From 01b7bec8dabe13f95231f37efd1f7bded2b846d0 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Fri, 5 Jul 2024 09:57:57 -0400 Subject: [PATCH 01/74] feat: implement in-memory-trie iterator and prefixed iterator --- lib/runtime/storage/trie.go | 82 +++++++----- lib/runtime/storage/trie_test.go | 18 +++ pkg/trie/inmemory/in_memory.go | 124 ++---------------- pkg/trie/inmemory/in_memory_test.go | 2 +- pkg/trie/inmemory/interator_test.go | 28 ++++ pkg/trie/inmemory/iterator.go | 195 ++++++++++++++++++++++++++++ pkg/trie/trie.go | 22 +++- pkg/trie/triedb/iterator.go | 2 +- pkg/trie/triedb/triedb.go | 14 +- pkg/trie/triedb/triedb_iterator.go | 21 ++- pkg/trie/triedb/triedb_test.go | 136 +++++++++---------- 11 files changed, 409 insertions(+), 235 deletions(-) create mode 100644 pkg/trie/inmemory/interator_test.go create mode 100644 pkg/trie/inmemory/iterator.go diff --git a/lib/runtime/storage/trie.go b/lib/runtime/storage/trie.go index bfec865029..f10c2da895 100644 --- a/lib/runtime/storage/trie.go +++ b/lib/runtime/storage/trie.go @@ -7,6 +7,7 @@ import ( "bytes" "container/list" "encoding/binary" + "errors" "fmt" "sort" "strings" @@ -212,30 +213,32 @@ func (t *TrieState) NextKey(key []byte) []byte { defer t.mtx.RUnlock() if currentTx := t.getCurrentTransaction(); currentTx != nil { - mainStateSortedKeys := make([]string, len(t.sortedKeys)) - copy(mainStateSortedKeys, t.sortedKeys) - - mainStateSortedKeys = slices.DeleteFunc(mainStateSortedKeys, func(s string) bool { - _, ok := currentTx.deletes[s] - return ok - }) - - allSortedKeys := append(mainStateSortedKeys, currentTx.sortedKeys...) - sort.Strings(allSortedKeys) - // Find key position - pos, found := slices.BinarySearch(allSortedKeys, string(key)) + pos, found := slices.BinarySearch(currentTx.sortedKeys, string(key)) if found { pos += 1 } + var nextKey []byte = nil + // Get next key based on that position - if pos < len(allSortedKeys) { - k := allSortedKeys[pos] - return []byte(k) + if pos < len(currentTx.sortedKeys) { + nextKey = []byte(currentTx.sortedKeys[pos]) } - return nil + nextKeyOnState := t.state.PrefixedIter(key).NextKeyFunc(func(nextKey []byte) bool { + _, deleted := currentTx.deletes[string(nextKey)] + return !deleted + }) + if nextKeyOnState == nil { + return nextKey + } + + if nextKey == nil || bytes.Compare(nextKeyOnState, nextKey) < 0 { + return nextKeyOnState + } + + return nextKey } return t.state.NextKey(key) @@ -529,41 +532,48 @@ func (t *TrieState) GetChildNextKey(keyToChild, key []byte) ([]byte, error) { } if childChanges := currentTx.childChangeSet[string(keyToChild)]; childChanges != nil { - mainStateChildTrieSortedKeys := t.childSortedKeys[string(keyToChild)] - childTrieSortedKeys := make([]string, len(mainStateChildTrieSortedKeys)) - copy(childTrieSortedKeys, mainStateChildTrieSortedKeys) - - childTrieSortedKeys = slices.DeleteFunc(childTrieSortedKeys, func(s string) bool { - _, ok := childChanges.deletes[s] - return ok - }) - - allSortedKeys := append(childTrieSortedKeys, childChanges.sortedKeys...) - sort.Strings(allSortedKeys) + var nextKey []byte = nil // Find key position - pos, found := slices.BinarySearch(allSortedKeys, string(key)) + pos, found := slices.BinarySearch(childChanges.sortedKeys, string(key)) if found { pos = pos + 1 } // Get next key based on that position - if pos < len(allSortedKeys) { - k := allSortedKeys[pos] - return []byte(k), nil + if pos < len(childChanges.sortedKeys) { + nextKey = []byte(childChanges.sortedKeys[pos]) } - return nil, nil + childTrie, err := t.state.GetChild(keyToChild) + if err != nil { + if errors.Is(err, trie.ErrChildTrieDoesNotExist) { + return nextKey, nil + } + return nil, err + } + + nextKeyOnState := childTrie.PrefixedIter(key).NextKeyFunc(func(nextKey []byte) bool { + _, ok := childChanges.deletes[string(nextKey)] + return !ok + }) + + if nextKeyOnState == nil { + return nextKey, nil + } + + if nextKey == nil || bytes.Compare(nextKeyOnState, nextKey) < 0 { + return nextKeyOnState, nil + } + + return nextKey, nil } } child, err := t.state.GetChild(keyToChild) - if err != nil { + if err != nil || child == nil { return nil, err } - if child == nil { - return nil, nil - } return child.NextKey(key), nil } diff --git a/lib/runtime/storage/trie_test.go b/lib/runtime/storage/trie_test.go index 2437d984ac..9d9b2f05f0 100644 --- a/lib/runtime/storage/trie_test.go +++ b/lib/runtime/storage/trie_test.go @@ -467,6 +467,24 @@ func TestNextKeys(t *testing.T) { underTransactionFn: func(t *testing.T, ts *TrieState) {}, expectedNextKey: nil, }, + "nothing_on_state_only_on_tx": { + searchKey: []byte("acc:abc123"), + keysOnState: [][]byte{}, + underTransactionFn: func(t *testing.T, ts *TrieState) { + require.NoError(t, ts.Put([]byte("acc:abc123:ddd"), []byte("0x10"))) + }, + expectedNextKey: []byte("acc:abc123:ddd"), + }, + "search_key_longer_but_next_key_exists": { + searchKey: []byte("abz"), + keysOnState: [][]byte{ + []byte("a"), + []byte("b"), + []byte("c"), + }, + underTransactionFn: func(t *testing.T, ts *TrieState) {}, + expectedNextKey: []byte("b"), + }, } for tname, tt := range cases { diff --git a/pkg/trie/inmemory/in_memory.go b/pkg/trie/inmemory/in_memory.go index 823c875081..955ff29588 100644 --- a/pkg/trie/inmemory/in_memory.go +++ b/pkg/trie/inmemory/in_memory.go @@ -31,6 +31,8 @@ type InMemoryTrie struct { deltas tracking.Delta } +var _ trie.Trie = (*InMemoryTrie)(nil) + // NewEmptyTrie creates a trie with a nil root func NewEmptyTrie() *InMemoryTrie { return NewTrie(nil, db.NewEmptyMemoryDB()) @@ -48,6 +50,14 @@ func NewTrie(root *node.Node, db db.Database) *InMemoryTrie { } } +func (t *InMemoryTrie) Iter() trie.TrieIterator { + return NewInMemoryTrieIterator(WithTrie(t)) +} + +func (t *InMemoryTrie) PrefixedIter(prefix []byte) trie.TrieIterator { + return NewInMemoryTrieIterator(WithTrie(t), WithCursorAt(codec.KeyLEToNibbles(prefix))) +} + func (t *InMemoryTrie) SetVersion(v trie.TrieLayout) { if v < t.version { panic("cannot regress trie version") @@ -225,14 +235,6 @@ func (t *InMemoryTrie) Hash() (rootHash common.Hash, err error) { return rootHash, nil } -// Entries returns all the key-value pairs in the trie as a map of keys to values -// where the keys are encoded in Little Endian. -func (t *InMemoryTrie) Entries() (keyValueMap map[string][]byte) { - keyValueMap = make(map[string][]byte) - t.buildEntriesMap(t.root, nil, keyValueMap) - return keyValueMap -} - func (t *InMemoryTrie) buildEntriesMap(currentNode *node.Node, prefix []byte, kv map[string][]byte) { if currentNode == nil { return @@ -261,97 +263,6 @@ func (t *InMemoryTrie) buildEntriesMap(currentNode *node.Node, prefix []byte, kv } } -// NextKey returns the next key in the trie in lexicographic order. -// It returns nil if no next key is found. -func (t *InMemoryTrie) NextKey(keyLE []byte) (nextKeyLE []byte) { - prefix := []byte(nil) - key := codec.KeyLEToNibbles(keyLE) - - nextKey := findNextKey(t.root, prefix, key) - if nextKey == nil { - return nil - } - - nextKeyLE = codec.NibblesToKeyLE(nextKey) - return nextKeyLE -} - -func findNextKey(parent *node.Node, prefix, searchKey []byte) (nextKey []byte) { - if parent == nil { - return nil - } - - if parent.Kind() == node.Leaf { - return findNextKeyLeaf(parent, prefix, searchKey) - } - return findNextKeyBranch(parent, prefix, searchKey) -} - -func findNextKeyLeaf(leaf *node.Node, prefix, searchKey []byte) (nextKey []byte) { - parentLeafKey := leaf.PartialKey - fullKey := concatenateSlices(prefix, parentLeafKey) - - if keyIsLexicographicallyBigger(searchKey, fullKey) { - return nil - } - - return fullKey -} - -func findNextKeyBranch(parentBranch *node.Node, prefix, searchKey []byte) (nextKey []byte) { - fullKey := concatenateSlices(prefix, parentBranch.PartialKey) - - if bytes.Equal(searchKey, fullKey) { - const startChildIndex = 0 - return findNextKeyChild(parentBranch.Children, startChildIndex, fullKey, searchKey) - } - - if keyIsLexicographicallyBigger(searchKey, fullKey) { - if len(searchKey) < len(fullKey) { - return nil - } else if len(searchKey) > len(fullKey) { - startChildIndex := searchKey[len(fullKey)] - return findNextKeyChild(parentBranch.Children, - startChildIndex, fullKey, searchKey) - } - } - - // search key is smaller than full key - if parentBranch.StorageValue != nil { - return fullKey - } - const startChildIndex = 0 - return findNextKeyChild(parentBranch.Children, startChildIndex, - fullKey, searchKey) -} - -func keyIsLexicographicallyBigger(key, key2 []byte) (bigger bool) { - if len(key) < len(key2) { - return bytes.Compare(key, key2[:len(key)]) == 1 - } - return bytes.Compare(key[:len(key2)], key2) != -1 -} - -// findNextKeyChild searches for a next key in the children -// given and returns a next key or nil if no next key is found. -func findNextKeyChild(children []*node.Node, startIndex byte, - fullKey, key []byte) (nextKey []byte) { - for i := startIndex; i < node.ChildrenCapacity; i++ { - child := children[i] - if child == nil { - continue - } - - childFullKey := concatenateSlices(fullKey, []byte{i}) - next := findNextKey(child, childFullKey, key) - if len(next) > 0 { - return next - } - } - - return nil -} - // Put inserts a value into the trie at the // key specified in little Endian format. func (t *InMemoryTrie) Put(keyLE, value []byte) (err error) { @@ -673,21 +584,6 @@ func LoadFromMap(data map[string]string, version trie.TrieLayout) (trie *InMemor return trie, nil } -// GetKeysWithPrefix returns all keys in little Endian -// format from nodes in the trie that have the given little -// Endian formatted prefix in their key. -func (t *InMemoryTrie) GetKeysWithPrefix(prefixLE []byte) (keysLE [][]byte) { - var prefixNibbles []byte - if len(prefixLE) > 0 { - prefixNibbles = codec.KeyLEToNibbles(prefixLE) - prefixNibbles = bytes.TrimSuffix(prefixNibbles, []byte{0}) - } - - prefix := []byte(nil) - key := prefixNibbles - return getKeysWithPrefix(t.root, prefix, key, keysLE) -} - // getKeysWithPrefix returns all keys in little Endian format that have the // prefix given. The prefix and key byte slices are in nibbles format. // TODO pass in map of keysLE if order is not needed. diff --git a/pkg/trie/inmemory/in_memory_test.go b/pkg/trie/inmemory/in_memory_test.go index a9dc7e09da..3a68248996 100644 --- a/pkg/trie/inmemory/in_memory_test.go +++ b/pkg/trie/inmemory/in_memory_test.go @@ -1067,7 +1067,7 @@ func Test_nextKey(t *testing.T) { originalTrie := testCase.trie.DeepCopy() - nextKey := findNextKey(testCase.trie.root, nil, testCase.key) + nextKey := findNextNode(testCase.trie.root, nil, testCase.key) assert.Equal(t, testCase.nextKey, nextKey) assert.Equal(t, *originalTrie, testCase.trie) // ensure no mutation diff --git a/pkg/trie/inmemory/interator_test.go b/pkg/trie/inmemory/interator_test.go new file mode 100644 index 0000000000..17c2ee013c --- /dev/null +++ b/pkg/trie/inmemory/interator_test.go @@ -0,0 +1,28 @@ +package inmemory + +import ( + "testing" + + "github.com/ChainSafe/gossamer/pkg/trie/codec" + "github.com/stretchr/testify/require" +) + +func TestInMemoryTrieIterator(t *testing.T) { + tt := NewEmptyTrie() + + tt.Put([]byte("some_other_storage:XCC:ZZZ"), []byte("0x10")) + tt.Put([]byte("yet_another_storage:BLABLA:YYY:JJJ"), []byte("0x10")) + tt.Put([]byte("account_storage:ABC:AAA"), []byte("0x10")) + tt.Put([]byte("account_storage:ABC:CCC"), []byte("0x10")) + tt.Put([]byte("account_storage:ABC:DDD"), []byte("0x10")) + tt.Put([]byte("account_storage:JJK:EEE"), []byte("0x10")) + + iter := NewInMemoryTrieIterator(WithTrie(tt)) + require.Equal(t, []byte("account_storage:ABC:AAA"), codec.NibblesToKeyLE((iter.NextEntry().Key))) + require.Equal(t, []byte("account_storage:ABC:CCC"), codec.NibblesToKeyLE((iter.NextEntry().Key))) + require.Equal(t, []byte("account_storage:ABC:DDD"), codec.NibblesToKeyLE((iter.NextEntry().Key))) + require.Equal(t, []byte("account_storage:ABC:EEE"), codec.NibblesToKeyLE((iter.NextEntry().Key))) + require.Equal(t, []byte("some_other_storage:XCC:ZZZ"), codec.NibblesToKeyLE((iter.NextEntry().Key))) + require.Equal(t, []byte("yet_another_storage:BLABLA:YYY:JJJ"), codec.NibblesToKeyLE((iter.NextEntry().Key))) + require.Nil(t, iter.NextEntry()) +} diff --git a/pkg/trie/inmemory/iterator.go b/pkg/trie/inmemory/iterator.go new file mode 100644 index 0000000000..ddeac44076 --- /dev/null +++ b/pkg/trie/inmemory/iterator.go @@ -0,0 +1,195 @@ +package inmemory + +import ( + "bytes" + "fmt" + + "github.com/ChainSafe/gossamer/pkg/trie" + "github.com/ChainSafe/gossamer/pkg/trie/codec" + "github.com/ChainSafe/gossamer/pkg/trie/node" +) + +type IterOpts func(*InMemoryTrieIterator) + +var WithTrie = func(tt *InMemoryTrie) IterOpts { + return func(imti *InMemoryTrieIterator) { + imti.trie = tt + } +} + +var WithCursorAt = func(cursor []byte) IterOpts { + return func(imti *InMemoryTrieIterator) { + imti.cursorAtKey = cursor + } +} + +var _ trie.TrieIterator = (*InMemoryTrieIterator)(nil) + +type InMemoryTrieIterator struct { + trie *InMemoryTrie + cursorAtKey []byte +} + +func NewInMemoryTrieIterator(opts ...IterOpts) *InMemoryTrieIterator { + iter := &InMemoryTrieIterator{ + trie: NewEmptyTrie(), + cursorAtKey: nil, + } + + for _, opt := range opts { + opt(iter) + } + + return iter +} + +func (t *InMemoryTrieIterator) NextEntry() *trie.Entry { + found := findNextNode(t.trie.root, []byte(nil), t.cursorAtKey) + if found != nil { + t.cursorAtKey = found.Key + } + return found +} + +func (t *InMemoryTrieIterator) NextKey() []byte { + entry := t.NextEntry() + if entry != nil { + return codec.NibblesToKeyLE(entry.Key) + } + return nil +} + +func (t *InMemoryTrieIterator) NextKeyFunc(predicate func(nextKey []byte) bool) (nextKey []byte) { + for entry := t.NextEntry(); entry != nil; entry = t.NextEntry() { + key := codec.NibblesToKeyLE(entry.Key) + if predicate(key) { + return key + } + } + return nil +} + +func (i *InMemoryTrieIterator) Seek(targetKey []byte) { + for key := i.NextKey(); bytes.Compare(key, targetKey) < 0; key = i.NextKey() { + } +} + +// Entries returns all the key-value pairs in the trie as a map of keys to values +// where the keys are encoded in Little Endian. +func (t *InMemoryTrie) Entries() (keyValueMap map[string][]byte) { + keyValueMap = make(map[string][]byte) + t.buildEntriesMap(t.root, nil, keyValueMap) + return keyValueMap +} + +// NextKey returns the next key in the trie in lexicographic order. +// It returns nil if no next key is found. +func (t *InMemoryTrie) NextKey(keyLE []byte) (nextKeyLE []byte) { + key := codec.KeyLEToNibbles(keyLE) + + iter := NewInMemoryTrieIterator(WithTrie(t), WithCursorAt(key)) + return iter.NextKey() +} + +func findNextNode(currentNode *node.Node, prefix, searchKey []byte) *trie.Entry { + if currentNode == nil { + return nil + } + + currentFullKey := bytes.Join([][]byte{prefix, currentNode.PartialKey}, nil) + + // if the keys are lexicographically equal then we will proceed + // in order to find the one that is lexicographically greater + // if the current node is a leaf then there is no other path + // if the current node is a branch then we can iterate over its children + switch currentNode.Kind() { + case node.Leaf: + // if search key lexicographically lower than the current full key + // then we should return the full key if it is not in the deletedKeys + if bytes.Compare(searchKey, currentFullKey) == -1 { + return &trie.Entry{Key: currentFullKey, Value: currentNode.StorageValue} + } + case node.Branch: + comparision := bytes.Compare(searchKey, currentFullKey) + + // if searchKey is lexicographically lower (-1) and the branch has a storage value then + // we found the next key, otherwise go over the children from the start + if comparision == -1 { + if currentNode.StorageValue != nil { + return &trie.Entry{Key: currentFullKey, Value: currentNode.StorageValue} + } + + return findNextKeyOnChildren( + currentNode, + currentFullKey, + searchKey, + 0, + ) + } + + // if searchKey is lexicographically equal (0) we should go over children from the start + if comparision == 0 { + return findNextKeyOnChildren( + currentNode, + currentFullKey, + searchKey, + 0, + ) + } + + // if searchKey is lexicographically greater (1) we should go over children starting from + // the last match between `searchKey` and `currentFullKey` + if comparision == 1 { + // search key is exhausted then return nil + if len(searchKey) < len(currentFullKey) { + return nil + } + + return findNextKeyOnChildren( + currentNode, + currentFullKey, + searchKey, + searchKey[len(currentFullKey)], + ) + } + default: + panic(fmt.Sprintf("node type not supported: %s", currentNode.Kind().String())) + } + + return nil +} + +func findNextKeyOnChildren(currentNode *node.Node, prefix, searchKey []byte, startingAt byte) *trie.Entry { + for i := startingAt; i < node.ChildrenCapacity; i++ { + child := currentNode.Children[i] + if child == nil { + continue + } + + next := findNextNode(child, + bytes.Join([][]byte{prefix, {byte(i)}}, nil), + searchKey, + ) + + if next != nil { + return next + } + } + + return nil +} + +// GetKeysWithPrefix returns all keys in little Endian +// format from nodes in the trie that have the given little +// Endian formatted prefix in their key. +func (t *InMemoryTrie) GetKeysWithPrefix(prefixLE []byte) (keysLE [][]byte) { + var prefixNibbles []byte + if len(prefixLE) > 0 { + prefixNibbles = codec.KeyLEToNibbles(prefixLE) + prefixNibbles = bytes.TrimSuffix(prefixNibbles, []byte{0}) + } + + prefix := []byte(nil) + key := prefixNibbles + return getKeysWithPrefix(t.root, prefix, key, keysLE) +} diff --git a/pkg/trie/trie.go b/pkg/trie/trie.go index 1eea7bd3e2..bd132cabed 100644 --- a/pkg/trie/trie.go +++ b/pkg/trie/trie.go @@ -35,9 +35,18 @@ type KVStoreWrite interface { } type TrieIterator interface { - Entries() (keyValueMap map[string][]byte) - NextKey(key []byte) []byte - GetKeysWithPrefix(prefix []byte) (keysLE [][]byte) + // NextKey performs a depth-first search on the trie and returns the next key + // and value based on the current state of the iterator. + NextEntry() (entry *Entry) + + // NextKey performs a depth-first search on the trie and returns the next key + // based on the current state of the iterator. + NextKey() (nextKey []byte) + + NextKeyFunc(func(nextKey []byte) bool) (nextKey []byte) + + // Seek moves the iterator to the first key that is greater than the target key. + Seek(targetKey []byte) } type PrefixTrieWrite interface { @@ -66,7 +75,12 @@ type TrieRead interface { KVStoreRead Hashable ChildTriesRead - TrieIterator + + Iter() TrieIterator + PrefixedIter(prefix []byte) TrieIterator + Entries() (keyValueMap map[string][]byte) + NextKey(key []byte) []byte + GetKeysWithPrefix(prefix []byte) (keysLE [][]byte) } type Trie interface { diff --git a/pkg/trie/triedb/iterator.go b/pkg/trie/triedb/iterator.go index 8d02a86361..193bbb3e3a 100644 --- a/pkg/trie/triedb/iterator.go +++ b/pkg/trie/triedb/iterator.go @@ -10,7 +10,7 @@ func (t *TrieDB) Entries() (keyValueMap map[string][]byte) { iter := NewTrieDBIterator(t) for entry := iter.NextEntry(); entry != nil; entry = iter.NextEntry() { - entries[string(entry.key)] = entry.value + entries[string(entry.Key)] = entry.Value } return entries diff --git a/pkg/trie/triedb/triedb.go b/pkg/trie/triedb/triedb.go index 67ed51c019..26c9b6acf0 100644 --- a/pkg/trie/triedb/triedb.go +++ b/pkg/trie/triedb/triedb.go @@ -25,11 +25,6 @@ var ( logger = log.NewFromGlobal(log.AddContext("pkg", "triedb")) ) -type entry struct { - key []byte - value []byte -} - // TrieDB is a DB-backed patricia merkle trie implementation // using lazy loading to fetch nodes type TrieDB struct { @@ -216,7 +211,6 @@ func (t *TrieDB) remove(keyNibbles []byte) error { // Delete deletes the given key from the trie func (t *TrieDB) Delete(key []byte) error { - keyNibbles := nibbles.KeyLEToNibbles(key) return t.remove(keyNibbles) } @@ -877,4 +871,12 @@ func (t *TrieDB) commitChild( } } +func (t *TrieDB) Iter() trie.TrieIterator { + return NewTrieDBIterator(t) +} + +func (t *TrieDB) PrefixedIter(prefix []byte) trie.TrieIterator { + return NewPrefixedTrieDBIterator(t, prefix) +} + var _ trie.TrieRead = (*TrieDB)(nil) diff --git a/pkg/trie/triedb/triedb_iterator.go b/pkg/trie/triedb/triedb_iterator.go index bfef62daa3..9172f03e1d 100644 --- a/pkg/trie/triedb/triedb_iterator.go +++ b/pkg/trie/triedb/triedb_iterator.go @@ -6,6 +6,7 @@ package triedb import ( "bytes" + "github.com/ChainSafe/gossamer/pkg/trie" nibbles "github.com/ChainSafe/gossamer/pkg/trie/codec" "github.com/ChainSafe/gossamer/pkg/trie/triedb/codec" ) @@ -80,7 +81,7 @@ func (i *TrieDBIterator) nextState() *iteratorState { return currentState } -func (i *TrieDBIterator) NextEntry() *entry { +func (i *TrieDBIterator) NextEntry() *trie.Entry { for len(i.nodeStack) > 0 { currentState := i.nextState() currentNode := currentState.node @@ -89,7 +90,7 @@ func (i *TrieDBIterator) NextEntry() *entry { case codec.Leaf: key := currentState.fullKeyNibbles(nil) value := i.db.Get(key) - return &entry{key: key, value: value} + return &trie.Entry{Key: key, Value: value} case codec.Branch: // Reverse iterate over children because we are using a LIFO stack // and we want to visit the leftmost child first @@ -106,7 +107,7 @@ func (i *TrieDBIterator) NextEntry() *entry { if n.GetValue() != nil { key := currentState.fullKeyNibbles(nil) value := i.db.Get(key) - return &entry{key: key, value: value} + return &trie.Entry{Key: key, Value: value} } } } @@ -119,13 +120,23 @@ func (i *TrieDBIterator) NextEntry() *entry { func (i *TrieDBIterator) NextKey() []byte { entry := i.NextEntry() if entry != nil { - return entry.key + return entry.Key + } + return nil +} + +func (i *TrieDBIterator) NextKeyFunc(predicate func(nextKey []byte) bool) (nextKey []byte) { + for entry := i.NextEntry(); entry != nil; entry = i.NextEntry() { + if predicate(entry.Key) { + return entry.Key + } } return nil } -// Seek moves the iterator to the first key that is greater than the target key. func (i *TrieDBIterator) Seek(targetKey []byte) { for key := i.NextKey(); bytes.Compare(key, targetKey) < 0; key = i.NextKey() { } } + +var _ trie.TrieIterator = (*TrieDBIterator)(nil) diff --git a/pkg/trie/triedb/triedb_test.go b/pkg/trie/triedb/triedb_test.go index 8c7e02bd2d..63a96269f4 100644 --- a/pkg/trie/triedb/triedb_test.go +++ b/pkg/trie/triedb/triedb_test.go @@ -15,13 +15,13 @@ func TestInsertions(t *testing.T) { t.Parallel() testCases := map[string]struct { - trieEntries []entry + trieEntries []trie.Entry key []byte value []byte stored NodeStorage }{ "nil_parent": { - trieEntries: []entry{}, + trieEntries: []trie.Entry{}, key: []byte{1}, value: []byte("leaf"), stored: NodeStorage{ @@ -36,10 +36,10 @@ func TestInsertions(t *testing.T) { }, }, "branch_parent": { - trieEntries: []entry{ + trieEntries: []trie.Entry{ { - key: []byte{1}, - value: []byte("branch"), + Key: []byte{1}, + Value: []byte("branch"), }, }, key: []byte{1, 0}, @@ -66,14 +66,14 @@ func TestInsertions(t *testing.T) { }, }, "branch_in_between_rearrange": { - trieEntries: []entry{ + trieEntries: []trie.Entry{ { - key: []byte{1}, - value: []byte("branch"), + Key: []byte{1}, + Value: []byte("branch"), }, { - key: []byte{1, 0, 1}, - value: []byte("leaf"), + Key: []byte{1, 0, 1}, + Value: []byte("leaf"), }, }, key: []byte{1, 0}, @@ -110,14 +110,14 @@ func TestInsertions(t *testing.T) { }, }, "branch_in_between": { - trieEntries: []entry{ + trieEntries: []trie.Entry{ { - key: []byte{1, 0}, - value: []byte("branch"), + Key: []byte{1, 0}, + Value: []byte("branch"), }, { - key: []byte{1, 0, 1}, - value: []byte("leaf"), + Key: []byte{1, 0, 1}, + Value: []byte("leaf"), }, }, key: []byte{1}, @@ -154,14 +154,14 @@ func TestInsertions(t *testing.T) { }, }, "override_branch_value": { - trieEntries: []entry{ + trieEntries: []trie.Entry{ { - key: []byte{1}, - value: []byte("branch"), + Key: []byte{1}, + Value: []byte("branch"), }, { - key: []byte{1, 0}, - value: []byte("leaf"), + Key: []byte{1, 0}, + Value: []byte("leaf"), }, }, key: []byte{1}, @@ -188,14 +188,14 @@ func TestInsertions(t *testing.T) { }, }, "override_branch_value_same_value": { - trieEntries: []entry{ + trieEntries: []trie.Entry{ { - key: []byte{1}, - value: []byte("branch"), + Key: []byte{1}, + Value: []byte("branch"), }, { - key: []byte{1, 0}, - value: []byte("leaf"), + Key: []byte{1, 0}, + Value: []byte("leaf"), }, }, key: []byte{1}, @@ -222,14 +222,14 @@ func TestInsertions(t *testing.T) { }, }, "override_leaf_of_branch_value_same_value": { - trieEntries: []entry{ + trieEntries: []trie.Entry{ { - key: []byte{1}, - value: []byte("branch"), + Key: []byte{1}, + Value: []byte("branch"), }, { - key: []byte{1, 0}, - value: []byte("leaf"), + Key: []byte{1, 0}, + Value: []byte("leaf"), }, }, key: []byte{1, 0}, @@ -256,10 +256,10 @@ func TestInsertions(t *testing.T) { }, }, "override_leaf_parent": { - trieEntries: []entry{ + trieEntries: []trie.Entry{ { - key: []byte{1}, - value: []byte("leaf"), + Key: []byte{1}, + Value: []byte("leaf"), }, }, key: []byte{1}, @@ -276,10 +276,10 @@ func TestInsertions(t *testing.T) { }, }, "write_same_leaf_value_to_leaf_parent": { - trieEntries: []entry{ + trieEntries: []trie.Entry{ { - key: []byte{1}, - value: []byte("same"), + Key: []byte{1}, + Value: []byte("same"), }, }, key: []byte{1}, @@ -296,10 +296,10 @@ func TestInsertions(t *testing.T) { }, }, "write_leaf_as_divergent_child_next_to_parent_leaf": { - trieEntries: []entry{ + trieEntries: []trie.Entry{ { - key: []byte{1, 2}, - value: []byte("original leaf"), + Key: []byte{1, 2}, + Value: []byte("original leaf"), }, }, key: []byte{2, 3}, @@ -345,7 +345,7 @@ func TestInsertions(t *testing.T) { trie := NewEmptyTrieDB(inmemoryDB, nil) for _, entry := range testCase.trieEntries { - assert.NoError(t, trie.insert(entry.key, entry.value)) + assert.NoError(t, trie.insert(entry.Key, entry.Value)) } // Add new key-value pair @@ -362,15 +362,15 @@ func TestDeletes(t *testing.T) { t.Parallel() testCases := map[string]struct { - trieEntries []entry + trieEntries []trie.Entry key []byte expected NodeStorage }{ "nil_key": { - trieEntries: []entry{ + trieEntries: []trie.Entry{ { - key: []byte{1}, - value: []byte("leaf"), + Key: []byte{1}, + Value: []byte("leaf"), }, }, expected: NodeStorage{ @@ -391,10 +391,10 @@ func TestDeletes(t *testing.T) { }, }, "delete_leaf": { - trieEntries: []entry{ + trieEntries: []trie.Entry{ { - key: []byte{1}, - value: []byte("leaf"), + Key: []byte{1}, + Value: []byte("leaf"), }, }, key: []byte{1}, @@ -403,14 +403,14 @@ func TestDeletes(t *testing.T) { }, }, "delete_branch": { - trieEntries: []entry{ + trieEntries: []trie.Entry{ { - key: []byte{1}, - value: []byte("branch"), + Key: []byte{1}, + Value: []byte("branch"), }, { - key: []byte{1, 0}, - value: []byte("leaf"), + Key: []byte{1, 0}, + Value: []byte("leaf"), }, }, key: []byte{1}, @@ -427,14 +427,14 @@ func TestDeletes(t *testing.T) { }, }, "delete_branch_without_value_should_do_nothing": { - trieEntries: []entry{ + trieEntries: []trie.Entry{ { - key: []byte{1, 0}, - value: []byte("leaf1"), + Key: []byte{1, 0}, + Value: []byte("leaf1"), }, { - key: []byte{1, 1}, - value: []byte("leaf2"), + Key: []byte{1, 1}, + Value: []byte("leaf2"), }, }, key: []byte{1}, @@ -475,7 +475,7 @@ func TestDeletes(t *testing.T) { trie := NewEmptyTrieDB(inmemoryDB, nil) for _, entry := range testCase.trieEntries { - assert.NoError(t, trie.insert(entry.key, entry.value)) + assert.NoError(t, trie.insert(entry.Key, entry.Value)) } // Remove key @@ -492,16 +492,16 @@ func TestInsertAfterDelete(t *testing.T) { t.Parallel() testCases := map[string]struct { - trieEntries []entry + trieEntries []trie.Entry key []byte value []byte expected NodeStorage }{ "insert_leaf_after_delete": { - trieEntries: []entry{ + trieEntries: []trie.Entry{ { - key: []byte{1}, - value: []byte("leaf"), + Key: []byte{1}, + Value: []byte("leaf"), }, }, key: []byte{1}, @@ -518,14 +518,14 @@ func TestInsertAfterDelete(t *testing.T) { }, }, "insert_branch_after_delete": { - trieEntries: []entry{ + trieEntries: []trie.Entry{ { - key: []byte{1}, - value: []byte("branch"), + Key: []byte{1}, + Value: []byte("branch"), }, { - key: []byte{1, 0}, - value: []byte("leaf"), + Key: []byte{1, 0}, + Value: []byte("leaf"), }, }, key: []byte{1}, @@ -562,7 +562,7 @@ func TestInsertAfterDelete(t *testing.T) { trie := NewEmptyTrieDB(inmemoryDB, nil) for _, entry := range testCase.trieEntries { - assert.NoError(t, trie.insert(entry.key, entry.value)) + assert.NoError(t, trie.insert(entry.Key, entry.Value)) } // Remove key From d3004f9dfb198d8f5bfe2c88351811d610d85203 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Fri, 5 Jul 2024 10:03:00 -0400 Subject: [PATCH 02/74] chore: adjustment to fn replacement --- lib/runtime/storage/trie.go | 4 ++-- pkg/trie/inmemory/in_memory.go | 15 +++++++++++++++ pkg/trie/inmemory/iterator.go | 15 --------------- 3 files changed, 17 insertions(+), 17 deletions(-) diff --git a/lib/runtime/storage/trie.go b/lib/runtime/storage/trie.go index f10c2da895..1bf10f80b9 100644 --- a/lib/runtime/storage/trie.go +++ b/lib/runtime/storage/trie.go @@ -554,8 +554,8 @@ func (t *TrieState) GetChildNextKey(keyToChild, key []byte) ([]byte, error) { } nextKeyOnState := childTrie.PrefixedIter(key).NextKeyFunc(func(nextKey []byte) bool { - _, ok := childChanges.deletes[string(nextKey)] - return !ok + _, deleted := childChanges.deletes[string(nextKey)] + return !deleted }) if nextKeyOnState == nil { diff --git a/pkg/trie/inmemory/in_memory.go b/pkg/trie/inmemory/in_memory.go index 955ff29588..5326c10704 100644 --- a/pkg/trie/inmemory/in_memory.go +++ b/pkg/trie/inmemory/in_memory.go @@ -584,6 +584,21 @@ func LoadFromMap(data map[string]string, version trie.TrieLayout) (trie *InMemor return trie, nil } +// GetKeysWithPrefix returns all keys in little Endian +// format from nodes in the trie that have the given little +// Endian formatted prefix in their key. +func (t *InMemoryTrie) GetKeysWithPrefix(prefixLE []byte) (keysLE [][]byte) { + var prefixNibbles []byte + if len(prefixLE) > 0 { + prefixNibbles = codec.KeyLEToNibbles(prefixLE) + prefixNibbles = bytes.TrimSuffix(prefixNibbles, []byte{0}) + } + + prefix := []byte(nil) + key := prefixNibbles + return getKeysWithPrefix(t.root, prefix, key, keysLE) +} + // getKeysWithPrefix returns all keys in little Endian format that have the // prefix given. The prefix and key byte slices are in nibbles format. // TODO pass in map of keysLE if order is not needed. diff --git a/pkg/trie/inmemory/iterator.go b/pkg/trie/inmemory/iterator.go index ddeac44076..2568db9b0d 100644 --- a/pkg/trie/inmemory/iterator.go +++ b/pkg/trie/inmemory/iterator.go @@ -178,18 +178,3 @@ func findNextKeyOnChildren(currentNode *node.Node, prefix, searchKey []byte, sta return nil } - -// GetKeysWithPrefix returns all keys in little Endian -// format from nodes in the trie that have the given little -// Endian formatted prefix in their key. -func (t *InMemoryTrie) GetKeysWithPrefix(prefixLE []byte) (keysLE [][]byte) { - var prefixNibbles []byte - if len(prefixLE) > 0 { - prefixNibbles = codec.KeyLEToNibbles(prefixLE) - prefixNibbles = bytes.TrimSuffix(prefixNibbles, []byte{0}) - } - - prefix := []byte(nil) - key := prefixNibbles - return getKeysWithPrefix(t.root, prefix, key, keysLE) -} From 1bb4eda61a88847e736d69b06f538a67c741fc87 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Fri, 5 Jul 2024 10:12:20 -0400 Subject: [PATCH 03/74] chore: add license and fix test --- pkg/trie/inmemory/interator_test.go | 5 ++++- pkg/trie/inmemory/iterator.go | 3 +++ pkg/trie/trie.go | 5 ++++- 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/pkg/trie/inmemory/interator_test.go b/pkg/trie/inmemory/interator_test.go index 17c2ee013c..a0795de20a 100644 --- a/pkg/trie/inmemory/interator_test.go +++ b/pkg/trie/inmemory/interator_test.go @@ -1,3 +1,6 @@ +// Copyright 2024 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + package inmemory import ( @@ -21,7 +24,7 @@ func TestInMemoryTrieIterator(t *testing.T) { require.Equal(t, []byte("account_storage:ABC:AAA"), codec.NibblesToKeyLE((iter.NextEntry().Key))) require.Equal(t, []byte("account_storage:ABC:CCC"), codec.NibblesToKeyLE((iter.NextEntry().Key))) require.Equal(t, []byte("account_storage:ABC:DDD"), codec.NibblesToKeyLE((iter.NextEntry().Key))) - require.Equal(t, []byte("account_storage:ABC:EEE"), codec.NibblesToKeyLE((iter.NextEntry().Key))) + require.Equal(t, []byte("account_storage:JJK:EEE"), codec.NibblesToKeyLE((iter.NextEntry().Key))) require.Equal(t, []byte("some_other_storage:XCC:ZZZ"), codec.NibblesToKeyLE((iter.NextEntry().Key))) require.Equal(t, []byte("yet_another_storage:BLABLA:YYY:JJJ"), codec.NibblesToKeyLE((iter.NextEntry().Key))) require.Nil(t, iter.NextEntry()) diff --git a/pkg/trie/inmemory/iterator.go b/pkg/trie/inmemory/iterator.go index 2568db9b0d..08d7a0133d 100644 --- a/pkg/trie/inmemory/iterator.go +++ b/pkg/trie/inmemory/iterator.go @@ -1,3 +1,6 @@ +// Copyright 2024 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + package inmemory import ( diff --git a/pkg/trie/trie.go b/pkg/trie/trie.go index bd132cabed..038880d8f7 100644 --- a/pkg/trie/trie.go +++ b/pkg/trie/trie.go @@ -43,7 +43,9 @@ type TrieIterator interface { // based on the current state of the iterator. NextKey() (nextKey []byte) - NextKeyFunc(func(nextKey []byte) bool) (nextKey []byte) + // NextKeyFunc performs a depth-first search on the trie and returns the next key + // that satisfies the predicate based on the current state of the iterator. + NextKeyFunc(predicate func(nextKey []byte) bool) (nextKey []byte) // Seek moves the iterator to the first key that is greater than the target key. Seek(targetKey []byte) @@ -78,6 +80,7 @@ type TrieRead interface { Iter() TrieIterator PrefixedIter(prefix []byte) TrieIterator + Entries() (keyValueMap map[string][]byte) NextKey(key []byte) []byte GetKeysWithPrefix(prefix []byte) (keysLE [][]byte) From ef53a1f18fce8222e7880bc30a66ca141aaf5535 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Fri, 5 Jul 2024 14:02:59 -0400 Subject: [PATCH 04/74] chore: fix lint issues --- pkg/trie/inmemory/iterator.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pkg/trie/inmemory/iterator.go b/pkg/trie/inmemory/iterator.go index 08d7a0133d..bf08f6df71 100644 --- a/pkg/trie/inmemory/iterator.go +++ b/pkg/trie/inmemory/iterator.go @@ -113,11 +113,11 @@ func findNextNode(currentNode *node.Node, prefix, searchKey []byte) *trie.Entry return &trie.Entry{Key: currentFullKey, Value: currentNode.StorageValue} } case node.Branch: - comparision := bytes.Compare(searchKey, currentFullKey) + cmp := bytes.Compare(searchKey, currentFullKey) // if searchKey is lexicographically lower (-1) and the branch has a storage value then // we found the next key, otherwise go over the children from the start - if comparision == -1 { + if cmp == -1 { if currentNode.StorageValue != nil { return &trie.Entry{Key: currentFullKey, Value: currentNode.StorageValue} } @@ -131,7 +131,7 @@ func findNextNode(currentNode *node.Node, prefix, searchKey []byte) *trie.Entry } // if searchKey is lexicographically equal (0) we should go over children from the start - if comparision == 0 { + if cmp == 0 { return findNextKeyOnChildren( currentNode, currentFullKey, @@ -142,7 +142,7 @@ func findNextNode(currentNode *node.Node, prefix, searchKey []byte) *trie.Entry // if searchKey is lexicographically greater (1) we should go over children starting from // the last match between `searchKey` and `currentFullKey` - if comparision == 1 { + if cmp == 1 { // search key is exhausted then return nil if len(searchKey) < len(currentFullKey) { return nil @@ -170,7 +170,7 @@ func findNextKeyOnChildren(currentNode *node.Node, prefix, searchKey []byte, sta } next := findNextNode(child, - bytes.Join([][]byte{prefix, {byte(i)}}, nil), + bytes.Join([][]byte{prefix, {i}}, nil), searchKey, ) From 396b873b05c61d06eae46cfc3443f0be45a4877d Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Fri, 5 Jul 2024 18:50:33 -0400 Subject: [PATCH 05/74] feat: remove usage of `sorted keys` --- dot/network/discovery.go | 23 +- lib/runtime/storage/trie.go | 221 +++++------------- pkg/trie/inmemory/iterator.go | 19 +- .../{interator_test.go => iterator_test.go} | 29 +++ 4 files changed, 113 insertions(+), 179 deletions(-) rename pkg/trie/inmemory/{interator_test.go => iterator_test.go} (59%) diff --git a/dot/network/discovery.go b/dot/network/discovery.go index a4e047f450..d0e6422db7 100644 --- a/dot/network/discovery.go +++ b/dot/network/discovery.go @@ -8,6 +8,7 @@ import ( "fmt" "time" + "github.com/ChainSafe/gossamer/internal/log" ethmetrics "github.com/ethereum/go-ethereum/metrics" badger "github.com/ipfs/go-ds-badger2" kaddht "github.com/libp2p/go-libp2p-kad-dht" @@ -31,6 +32,8 @@ var ( tryAdvertiseTimeout = time.Second * 30 connectToPeersTimeout = time.Minute findPeersTimeout = time.Minute + + discoveryLogger = log.NewFromGlobal(log.AddContext("pkg", "network-discovery")) ) // discovery handles discovery of new peers via the kademlia DHT @@ -72,7 +75,7 @@ func (d *discovery) waitForPeers() (peers []peer.AddrInfo, err error) { for len(currentPeers) == 0 { select { case <-t.C: - logger.Debug("no peers yet, waiting to start DHT...") + discoveryLogger.Debug("no peers yet, waiting to start DHT...") // wait for peers to connect before starting DHT, otherwise DHT bootstrap nodes // will be empty and we will fail to fill the routing table case <-d.ctx.Done(): @@ -104,8 +107,8 @@ func (d *discovery) start() error { } d.bootnodes = peers } - logger.Debugf("starting DHT with bootnodes %v...", d.bootnodes) - logger.Debugf("V1ProtocolOverride %v...", d.pid+"/kad") + discoveryLogger.Debugf("starting DHT with bootnodes %v...", d.bootnodes) + discoveryLogger.Debugf("V1ProtocolOverride %v...", d.pid+"/kad") dhtOpts := []dual.Option{ dual.DHTOption(kaddht.Datastore(d.ds)), @@ -147,7 +150,7 @@ func (d *discovery) discoverAndAdvertise() error { go d.advertise() go d.checkPeerCount() - logger.Debug("DHT discovery started!") + discoveryLogger.Debug("DHT discovery started!") return nil } @@ -162,16 +165,16 @@ func (d *discovery) advertise() { timer.Stop() return case <-timer.C: - logger.Debug("advertising ourselves in the DHT...") + discoveryLogger.Debug("advertising ourselves in the DHT...") err := d.dht.Bootstrap(d.ctx) if err != nil { - logger.Warnf("failed to bootstrap DHT: %s", err) + discoveryLogger.Warnf("failed to bootstrap DHT: %s", err) continue } ttl, err = d.rd.Advertise(d.ctx, string(d.pid)) if err != nil { - logger.Warnf("failed to advertise in the DHT: %s", err) + discoveryLogger.Warnf("failed to advertise in the DHT: %s", err) ttl = tryAdvertiseTimeout } } @@ -197,10 +200,10 @@ func (d *discovery) checkPeerCount() { } func (d *discovery) findPeers() { - logger.Debug("attempting to find DHT peers...") + discoveryLogger.Debug("attempting to find DHT peers...") peerCh, err := d.rd.FindPeers(d.ctx, string(d.pid)) if err != nil { - logger.Warnf("failed to begin finding peers via DHT: %s", err) + discoveryLogger.Warnf("failed to begin finding peers via DHT: %s", err) return } @@ -216,7 +219,7 @@ func (d *discovery) findPeers() { continue } - logger.Tracef("found new peer %s via DHT", peer.ID) + discoveryLogger.Tracef("found new peer %s via DHT", peer.ID) d.h.Peerstore().AddAddrs(peer.ID, peer.Addrs, peerstore.PermanentAddrTTL) d.handler.AddPeer(0, peer.ID) } diff --git a/lib/runtime/storage/trie.go b/lib/runtime/storage/trie.go index 1bf10f80b9..7793d06f30 100644 --- a/lib/runtime/storage/trie.go +++ b/lib/runtime/storage/trie.go @@ -10,7 +10,6 @@ import ( "errors" "fmt" "sort" - "strings" "sync" "github.com/ChainSafe/gossamer/lib/common" @@ -24,21 +23,17 @@ import ( // If the execution of the call is successful, the changes will be applied to // the current `state` type TrieState struct { - mtx sync.RWMutex - state trie.Trie - transactions *list.List - sortedKeys []string - childSortedKeys map[string][]string + mtx sync.RWMutex + state trie.Trie + transactions *list.List } // NewTrieState initialises and returns a new TrieState instance func NewTrieState(initialState trie.Trie) *TrieState { transactions := list.New() return &TrieState{ - transactions: transactions, - state: initialState, - sortedKeys: make([]string, 0), - childSortedKeys: make(map[string][]string), + transactions: transactions, + state: initialState, } } @@ -98,25 +93,6 @@ func (t *TrieState) CommitTransaction() { // This is the last transaction so we apply all the changes to our state tx := t.transactions.Remove(t.transactions.Back()).(*storageDiff) tx.applyToTrie(t.state) - - // Update sorted keys - for _, k := range tx.sortedKeys { - t.addMainTrieSortedKey(k) - } - - for k := range tx.deletes { - t.removeMainTrieSortedKey(k) - } - - for childKey, childChanges := range tx.childChangeSet { - for _, k := range childChanges.sortedKeys { - t.addChildTrieSortedKey(childKey, k) - } - - for k := range childChanges.deletes { - t.removeChildTrieSortedKey(childKey, k) - } - } } } @@ -137,15 +113,9 @@ func (t *TrieState) Put(key, value []byte) (err error) { // if not, we apply the changes directly on our state trie if t.getCurrentTransaction() != nil { t.getCurrentTransaction().upsert(string(key), value) - } else { - err := t.state.Put(key, value) - if err != nil { - return err - } - t.addMainTrieSortedKey(string(key)) } - return nil + return t.state.Put(key, value) } // Get gets a value from the trie @@ -196,15 +166,9 @@ func (t *TrieState) Delete(key []byte) (err error) { if currentTx := t.getCurrentTransaction(); currentTx != nil { t.getCurrentTransaction().delete(string(key)) - } else { - err := t.state.Delete(key) - if err != nil { - return err - } - t.removeMainTrieSortedKey(string(key)) } - return nil + return t.state.Delete(key) } // NextKey returns the next key in the trie in lexicographical order. If it does not exist, it returns nil. @@ -250,16 +214,18 @@ func (t *TrieState) ClearPrefix(prefix []byte) error { defer t.mtx.Unlock() if currentTx := t.getCurrentTransaction(); currentTx != nil { - currentTx.clearPrefix(prefix, t.sortedKeys, -1) + keysOnState := make([]string, 0) + + iter := t.state.PrefixedIter(prefix) + for key := iter.NextKey(); bytes.HasPrefix(key, prefix); key = iter.NextKey() { + keysOnState = append(keysOnState, string(key)) + } + + currentTx.clearPrefix(prefix, keysOnState, -1) return nil } - err := t.state.ClearPrefix(prefix) - if err != nil { - return err - } - t.sortedKeys = removePrefixedSortedKey(t.sortedKeys, string(prefix), -1) - return nil + return t.state.ClearPrefix(prefix) } // ClearPrefixLimit deletes key-value pairs from the trie where the key starts with the given prefix till limit reached @@ -269,16 +235,18 @@ func (t *TrieState) ClearPrefixLimit(prefix []byte, limit uint32) ( defer t.mtx.Unlock() if currentTx := t.getCurrentTransaction(); currentTx != nil { - deleted, allDeleted = currentTx.clearPrefix(prefix, t.sortedKeys, int(limit)) + keysOnState := make([]string, 0) + + iter := t.state.PrefixedIter(prefix) + for key := iter.NextKey(); bytes.HasPrefix(key, prefix); key = iter.NextKey() { + keysOnState = append(keysOnState, string(key)) + } + + deleted, allDeleted = currentTx.clearPrefix(prefix, keysOnState, int(limit)) return deleted, allDeleted, nil } - deleted, allDeleted, err = t.state.ClearPrefixLimit(prefix, limit) - if err != nil { - return 0, false, err - } - t.sortedKeys = removePrefixedSortedKey(t.sortedKeys, string(prefix), int(limit)) - return + return t.state.ClearPrefixLimit(prefix, limit) } // TrieEntries returns every key-value pair in the trie @@ -316,12 +284,7 @@ func (t *TrieState) SetChildStorage(keyToChild, key, value []byte) error { return nil } - err := t.state.PutIntoChild(keyToChild, key, value) - if err != nil { - return err - } - t.addChildTrieSortedKey(string(keyToChild), string(key)) - return nil + return t.state.PutIntoChild(keyToChild, key, value) } func (t *TrieState) GetChildRoot(keyToChild []byte) (common.Hash, error) { @@ -362,12 +325,7 @@ func (t *TrieState) DeleteChild(keyToChild []byte) error { return nil } - err := t.state.DeleteChild(keyToChild) - if err != nil { - return err - } - delete(t.childSortedKeys, string(keyToChild)) - return nil + return t.state.DeleteChild(keyToChild) } // DeleteChildLimit deletes up to limit of database entries by lexicographic order. @@ -412,7 +370,6 @@ func (t *TrieState) DeleteChildLimit(key []byte, limit *[]byte) ( if err != nil { return 0, false, fmt.Errorf("deleting child trie: %w", err) } - delete(t.childSortedKeys, string(key)) return qtyEntries, true, nil } limitUint := binary.LittleEndian.Uint32(*limit) @@ -429,8 +386,6 @@ func (t *TrieState) DeleteChildLimit(key []byte, limit *[]byte) ( if err != nil { return deleted, allDeleted, fmt.Errorf("deleting from child trie located at key 0x%x: %w", key, err) } - - t.removeChildTrieSortedKey(string(key), k) deleted++ if deleted == limitUint { break @@ -458,7 +413,6 @@ func (t *TrieState) ClearChildStorage(keyToChild, key []byte) error { return err } - t.removeChildTrieSortedKey(string(keyToChild), string(key)) return nil } @@ -468,28 +422,34 @@ func (t *TrieState) ClearPrefixInChild(keyToChild, prefix []byte) error { defer t.mtx.Unlock() if currentTx := t.getCurrentTransaction(); currentTx != nil { - currentTx.clearPrefixInChild(string(keyToChild), prefix, t.childSortedKeys[string(keyToChild)], -1) + child, err := t.state.GetChild(keyToChild) + if err != nil { + if errors.Is(err, trie.ErrChildTrieDoesNotExist) { + currentTx.clearPrefixInChild(string(keyToChild), prefix, []string{}, -1) + return nil + } + return err + } + + var onStateKeys []string + iter := child.PrefixedIter(prefix) + for key := iter.NextKey(); bytes.HasPrefix(key, prefix); key = iter.NextKey() { + onStateKeys = append(onStateKeys, string(key)) + } + + currentTx.clearPrefixInChild(string(keyToChild), prefix, onStateKeys, -1) return nil } child, err := t.state.GetChild(keyToChild) - if err != nil { + if err != nil || child == nil { return err } - if child == nil { - return nil - } err = child.ClearPrefix(prefix) if err != nil { return fmt.Errorf("clearing prefix in child trie located at key 0x%x: %w", keyToChild, err) } - t.childSortedKeys[string(keyToChild)] = removePrefixedSortedKey( - t.childSortedKeys[string(keyToChild)], - string(prefix), - -1, - ) - return nil } @@ -498,8 +458,22 @@ func (t *TrieState) ClearPrefixInChildWithLimit(keyToChild, prefix []byte, limit defer t.mtx.Unlock() if currentTx := t.getCurrentTransaction(); currentTx != nil { - deleted, allDeleted := currentTx.clearPrefixInChild(string(keyToChild), prefix, - t.childSortedKeys[string(keyToChild)], int(limit)) + child, err := t.state.GetChild(keyToChild) + if err != nil { + if errors.Is(err, trie.ErrChildTrieDoesNotExist) { + deleted, allDeleted := currentTx.clearPrefixInChild(string(keyToChild), prefix, []string{}, -1) + return deleted, allDeleted, nil + } + return 0, false, err + } + + var onStateKeys []string + iter := child.PrefixedIter(prefix) + for key := iter.NextKey(); bytes.HasPrefix(key, prefix); key = iter.NextKey() { + onStateKeys = append(onStateKeys, string(key)) + } + + deleted, allDeleted := currentTx.clearPrefixInChild(string(keyToChild), prefix, onStateKeys, int(limit)) return deleted, allDeleted, nil } @@ -508,16 +482,7 @@ func (t *TrieState) ClearPrefixInChildWithLimit(keyToChild, prefix []byte, limit return 0, false, err } - deleted, allDeleted, err := child.ClearPrefixLimit(prefix, limit) - if err != nil { - return 0, false, err - } - t.childSortedKeys[string(keyToChild)] = removePrefixedSortedKey( - t.childSortedKeys[string(keyToChild)], - string(prefix), - -1, - ) - return deleted, allDeleted, nil + return child.ClearPrefixLimit(prefix, limit) } // GetChildNextKey returns the next lexicographical larger key from child storage. If it does not exist, it returns nil. @@ -645,67 +610,3 @@ func (t *TrieState) GetChangedNodeHashes() (inserted, deleted map[common.Hash]st return t.state.GetChangedNodeHashes() } - -func (t *TrieState) addMainTrieSortedKey(key string) { - t.sortedKeys = insertSortedKey(t.sortedKeys, key) -} - -func (t *TrieState) removeMainTrieSortedKey(key string) { - t.sortedKeys = removeSortedKey(t.sortedKeys, key) -} - -func (t *TrieState) addChildTrieSortedKey(keyToChild, key string) { - t.childSortedKeys[keyToChild] = insertSortedKey(t.childSortedKeys[keyToChild], key) -} - -func (t *TrieState) removeChildTrieSortedKey(keyToChild, key string) { - t.childSortedKeys[keyToChild] = removeSortedKey(t.childSortedKeys[keyToChild], key) -} - -func insertSortedKey(keys []string, key string) []string { - pos, found := slices.BinarySearch(keys, key) - if found { - return keys // key already exists - } - - keys = append(keys, "") - copy(keys[pos+1:], keys[pos:]) - keys[pos] = key - return keys -} - -func removeSortedKey(keys []string, key string) []string { - pos, found := slices.BinarySearch(keys, key) - - if found { - return append(keys[:pos], keys[pos+1:]...) - } - - return keys -} - -func removePrefixedSortedKey(keys []string, prefix string, limit int) []string { - if limit == 0 { - return keys - } - - amountDeleted := 0 - for { - pos, _ := slices.BinarySearch(keys, prefix) - if pos >= len(keys) { - break - } - - if !strings.HasPrefix(keys[pos], prefix) { - break - } - - keys = append(keys[:pos], keys[pos+1:]...) - amountDeleted++ - if limit > 0 && limit == amountDeleted { - break - } - } - - return keys -} diff --git a/pkg/trie/inmemory/iterator.go b/pkg/trie/inmemory/iterator.go index bf08f6df71..ccb4a3811c 100644 --- a/pkg/trie/inmemory/iterator.go +++ b/pkg/trie/inmemory/iterator.go @@ -46,24 +46,24 @@ func NewInMemoryTrieIterator(opts ...IterOpts) *InMemoryTrieIterator { return iter } -func (t *InMemoryTrieIterator) NextEntry() *trie.Entry { - found := findNextNode(t.trie.root, []byte(nil), t.cursorAtKey) +func (i *InMemoryTrieIterator) NextEntry() *trie.Entry { + found := findNextNode(i.trie.root, []byte(nil), i.cursorAtKey) if found != nil { - t.cursorAtKey = found.Key + i.cursorAtKey = found.Key } return found } -func (t *InMemoryTrieIterator) NextKey() []byte { - entry := t.NextEntry() +func (i *InMemoryTrieIterator) NextKey() []byte { + entry := i.NextEntry() if entry != nil { return codec.NibblesToKeyLE(entry.Key) } return nil } -func (t *InMemoryTrieIterator) NextKeyFunc(predicate func(nextKey []byte) bool) (nextKey []byte) { - for entry := t.NextEntry(); entry != nil; entry = t.NextEntry() { +func (i *InMemoryTrieIterator) NextKeyFunc(predicate func(nextKey []byte) bool) (nextKey []byte) { + for entry := i.NextEntry(); entry != nil; entry = i.NextEntry() { key := codec.NibblesToKeyLE(entry.Key) if predicate(key) { return key @@ -73,8 +73,9 @@ func (t *InMemoryTrieIterator) NextKeyFunc(predicate func(nextKey []byte) bool) } func (i *InMemoryTrieIterator) Seek(targetKey []byte) { - for key := i.NextKey(); bytes.Compare(key, targetKey) < 0; key = i.NextKey() { - } + i.NextKeyFunc(func(nextKey []byte) bool { + return bytes.Compare(nextKey, targetKey) >= 0 + }) } // Entries returns all the key-value pairs in the trie as a map of keys to values diff --git a/pkg/trie/inmemory/interator_test.go b/pkg/trie/inmemory/iterator_test.go similarity index 59% rename from pkg/trie/inmemory/interator_test.go rename to pkg/trie/inmemory/iterator_test.go index a0795de20a..e5a5937a88 100644 --- a/pkg/trie/inmemory/interator_test.go +++ b/pkg/trie/inmemory/iterator_test.go @@ -4,6 +4,7 @@ package inmemory import ( + "bytes" "testing" "github.com/ChainSafe/gossamer/pkg/trie/codec" @@ -29,3 +30,31 @@ func TestInMemoryTrieIterator(t *testing.T) { require.Equal(t, []byte("yet_another_storage:BLABLA:YYY:JJJ"), codec.NibblesToKeyLE((iter.NextEntry().Key))) require.Nil(t, iter.NextEntry()) } + +func TestInMemoryIteratorGetAllKeysWithPrefix(t *testing.T) { + tt := NewEmptyTrie() + + tt.Put([]byte("services_storage:serviceA:19090"), []byte("0x10")) + tt.Put([]byte("services_storage:serviceB:22222"), []byte("0x10")) + tt.Put([]byte("account_storage:ABC:AAA"), []byte("0x10")) + tt.Put([]byte("account_storage:ABC:CCC"), []byte("0x10")) + tt.Put([]byte("account_storage:ABC:DDD"), []byte("0x10")) + tt.Put([]byte("account_storage:JJK:EEE"), []byte("0x10")) + + prefix := []byte("account_storage") + iter := tt.PrefixedIter(prefix) + + keys := make([][]byte, 0) + for key := iter.NextKey(); bytes.HasPrefix(key, prefix); key = iter.NextKey() { + keys = append(keys, key) + } + + expectedKeys := [][]byte{ + []byte("account_storage:ABC:AAA"), + []byte("account_storage:ABC:CCC"), + []byte("account_storage:ABC:DDD"), + []byte("account_storage:JJK:EEE"), + } + + require.Equal(t, expectedKeys, keys) +} From 53288b30bc5a8cc4f30232af6b55d5b6fc6e1f16 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Sat, 6 Jul 2024 10:33:14 -0400 Subject: [PATCH 06/74] chore: remove unneeded logger --- dot/network/discovery.go | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/dot/network/discovery.go b/dot/network/discovery.go index d0e6422db7..a4e047f450 100644 --- a/dot/network/discovery.go +++ b/dot/network/discovery.go @@ -8,7 +8,6 @@ import ( "fmt" "time" - "github.com/ChainSafe/gossamer/internal/log" ethmetrics "github.com/ethereum/go-ethereum/metrics" badger "github.com/ipfs/go-ds-badger2" kaddht "github.com/libp2p/go-libp2p-kad-dht" @@ -32,8 +31,6 @@ var ( tryAdvertiseTimeout = time.Second * 30 connectToPeersTimeout = time.Minute findPeersTimeout = time.Minute - - discoveryLogger = log.NewFromGlobal(log.AddContext("pkg", "network-discovery")) ) // discovery handles discovery of new peers via the kademlia DHT @@ -75,7 +72,7 @@ func (d *discovery) waitForPeers() (peers []peer.AddrInfo, err error) { for len(currentPeers) == 0 { select { case <-t.C: - discoveryLogger.Debug("no peers yet, waiting to start DHT...") + logger.Debug("no peers yet, waiting to start DHT...") // wait for peers to connect before starting DHT, otherwise DHT bootstrap nodes // will be empty and we will fail to fill the routing table case <-d.ctx.Done(): @@ -107,8 +104,8 @@ func (d *discovery) start() error { } d.bootnodes = peers } - discoveryLogger.Debugf("starting DHT with bootnodes %v...", d.bootnodes) - discoveryLogger.Debugf("V1ProtocolOverride %v...", d.pid+"/kad") + logger.Debugf("starting DHT with bootnodes %v...", d.bootnodes) + logger.Debugf("V1ProtocolOverride %v...", d.pid+"/kad") dhtOpts := []dual.Option{ dual.DHTOption(kaddht.Datastore(d.ds)), @@ -150,7 +147,7 @@ func (d *discovery) discoverAndAdvertise() error { go d.advertise() go d.checkPeerCount() - discoveryLogger.Debug("DHT discovery started!") + logger.Debug("DHT discovery started!") return nil } @@ -165,16 +162,16 @@ func (d *discovery) advertise() { timer.Stop() return case <-timer.C: - discoveryLogger.Debug("advertising ourselves in the DHT...") + logger.Debug("advertising ourselves in the DHT...") err := d.dht.Bootstrap(d.ctx) if err != nil { - discoveryLogger.Warnf("failed to bootstrap DHT: %s", err) + logger.Warnf("failed to bootstrap DHT: %s", err) continue } ttl, err = d.rd.Advertise(d.ctx, string(d.pid)) if err != nil { - discoveryLogger.Warnf("failed to advertise in the DHT: %s", err) + logger.Warnf("failed to advertise in the DHT: %s", err) ttl = tryAdvertiseTimeout } } @@ -200,10 +197,10 @@ func (d *discovery) checkPeerCount() { } func (d *discovery) findPeers() { - discoveryLogger.Debug("attempting to find DHT peers...") + logger.Debug("attempting to find DHT peers...") peerCh, err := d.rd.FindPeers(d.ctx, string(d.pid)) if err != nil { - discoveryLogger.Warnf("failed to begin finding peers via DHT: %s", err) + logger.Warnf("failed to begin finding peers via DHT: %s", err) return } @@ -219,7 +216,7 @@ func (d *discovery) findPeers() { continue } - discoveryLogger.Tracef("found new peer %s via DHT", peer.ID) + logger.Tracef("found new peer %s via DHT", peer.ID) d.h.Peerstore().AddAddrs(peer.ID, peer.Addrs, peerstore.PermanentAddrTTL) d.handler.AddPeer(0, peer.ID) } From c8da5839e82689e0013e9aa3345e48a7581fa340 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Sun, 7 Jul 2024 07:53:12 -0400 Subject: [PATCH 07/74] wip sync --- dot/network/service.go | 1 + dot/network/state.go | 2 + dot/node.go | 7 +- dot/services.go | 85 ++++++++------- dot/sync/chain_sync.go | 14 ++- dot/sync/syncer.go | 4 + dot/sync/worker_pool.go | 4 +- lib/sync/fullsync.go | 25 +++++ lib/sync/service.go | 162 ++++++++++++++++++++++++++++ lib/sync/worker.go | 90 ++++++++++++++++ lib/sync/worker_pool.go | 227 ++++++++++++++++++++++++++++++++++++++++ 11 files changed, 568 insertions(+), 53 deletions(-) create mode 100644 lib/sync/fullsync.go create mode 100644 lib/sync/service.go create mode 100644 lib/sync/worker.go create mode 100644 lib/sync/worker_pool.go diff --git a/dot/network/service.go b/dot/network/service.go index 984eeaddb2..4d19dfaa44 100644 --- a/dot/network/service.go +++ b/dot/network/service.go @@ -734,6 +734,7 @@ func (s *Service) processMessage(msg peerset.Message) { return } logger.Debugf("connection dropped successfully for peer %s", peerID) + s.syncer.OnConnectionClosed(peerID) } } diff --git a/dot/network/state.go b/dot/network/state.go index 78183d65d6..09e7d20f11 100644 --- a/dot/network/state.go +++ b/dot/network/state.go @@ -33,6 +33,8 @@ type Syncer interface { // CreateBlockResponse is called upon receipt of a BlockRequestMessage to create the response CreateBlockResponse(peer.ID, *BlockRequestMessage) (*BlockResponseMessage, error) + + OnConnectionClosed(peer.ID) } // TransactionHandler is the interface used by the transactions sub-protocol diff --git a/dot/node.go b/dot/node.go index 8b0352eb03..9e99ac30be 100644 --- a/dot/node.go +++ b/dot/node.go @@ -21,7 +21,6 @@ import ( "github.com/ChainSafe/gossamer/dot/rpc" "github.com/ChainSafe/gossamer/dot/state" "github.com/ChainSafe/gossamer/dot/state/pruner" - dotsync "github.com/ChainSafe/gossamer/dot/sync" "github.com/ChainSafe/gossamer/dot/system" "github.com/ChainSafe/gossamer/dot/telemetry" "github.com/ChainSafe/gossamer/dot/types" @@ -64,7 +63,7 @@ type nodeBuilderIface interface { net *network.Service, telemetryMailer Telemetry) (*grandpa.Service, error) newSyncService(config *cfg.Config, st *state.Service, finalityGadget BlockJustificationVerifier, verifier *babe.VerificationManager, cs *core.Service, net *network.Service, - telemetryMailer Telemetry) (*dotsync.Service, error) + telemetryMailer Telemetry) (network.Syncer, error) createBABEService(config *cfg.Config, st *state.Service, ks KeyStore, cs *core.Service, telemetryMailer Telemetry) (service *babe.Service, err error) createSystemService(cfg *types.SystemInfo, stateSrvc *state.Service) (*system.Service, error) @@ -382,7 +381,7 @@ func newNode(config *cfg.Config, networkSrvc.SetSyncer(syncer) networkSrvc.SetTransactionHandler(coreSrvc) } - nodeSrvcs = append(nodeSrvcs, syncer) + nodeSrvcs = append(nodeSrvcs, syncer.(service)) bp, err := builder.createBABEService(config, stateSrvc, ks.Babe, coreSrvc, telemetryMailer) if err != nil { @@ -402,7 +401,7 @@ func newNode(config *cfg.Config, blockProducer: bp, system: sysSrvc, blockFinality: fg, - syncer: syncer, + syncer: syncer.(rpc.SyncAPI), } rpcSrvc, err = builder.createRPCService(cRPCParams) if err != nil { diff --git a/dot/services.go b/dot/services.go index 827465b240..901daad3f8 100644 --- a/dot/services.go +++ b/dot/services.go @@ -7,7 +7,6 @@ import ( "errors" "fmt" "strings" - "time" cfg "github.com/ChainSafe/gossamer/config" @@ -17,7 +16,6 @@ import ( "github.com/ChainSafe/gossamer/dot/rpc" "github.com/ChainSafe/gossamer/dot/rpc/modules" "github.com/ChainSafe/gossamer/dot/state" - "github.com/ChainSafe/gossamer/dot/sync" "github.com/ChainSafe/gossamer/dot/system" "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/internal/database" @@ -35,6 +33,7 @@ import ( "github.com/ChainSafe/gossamer/lib/runtime" rtstorage "github.com/ChainSafe/gossamer/lib/runtime/storage" wazero_runtime "github.com/ChainSafe/gossamer/lib/runtime/wazero" + libsync "github.com/ChainSafe/gossamer/lib/sync" ) // BlockProducer to produce blocks @@ -54,7 +53,7 @@ type rpcServiceSettings struct { blockProducer BlockProducer system *system.Service blockFinality *grandpa.Service - syncer *sync.Service + syncer rpc.SyncAPI } func newInMemoryDB() (database.Database, error) { @@ -499,46 +498,46 @@ func (nodeBuilder) createBlockVerifier(st *state.Service) *babe.VerificationMana func (nodeBuilder) newSyncService(config *cfg.Config, st *state.Service, fg BlockJustificationVerifier, verifier *babe.VerificationManager, cs *core.Service, net *network.Service, telemetryMailer Telemetry) ( - *sync.Service, error) { - slotDuration, err := st.Epoch.GetSlotDuration() - if err != nil { - return nil, err - } - - genesisData, err := st.Base.LoadGenesisData() - if err != nil { - return nil, err - } - - syncLogLevel, err := log.ParseLevel(config.Log.Sync) - if err != nil { - return nil, fmt.Errorf("failed to parse sync log level: %w", err) - } - - const blockRequestTimeout = time.Second * 20 - requestMaker := net.GetRequestResponseProtocol( - network.SyncID, - blockRequestTimeout, - network.MaxBlockResponseSize) - - syncCfg := &sync.Config{ - LogLvl: syncLogLevel, - Network: net, - BlockState: st.Block, - StorageState: st.Storage, - TransactionState: st.Transaction, - FinalityGadget: fg, - BabeVerifier: verifier, - BlockImportHandler: cs, - MinPeers: config.Network.MinPeers, - MaxPeers: config.Network.MaxPeers, - SlotDuration: slotDuration, - Telemetry: telemetryMailer, - BadBlocks: genesisData.BadBlocks, - RequestMaker: requestMaker, - } - - return sync.NewService(syncCfg) + network.Syncer, error) { + // slotDuration, err := st.Epoch.GetSlotDuration() + // if err != nil { + // return nil, err + // } + + // genesisData, err := st.Base.LoadGenesisData() + // if err != nil { + // return nil, err + // } + + // syncLogLevel, err := log.ParseLevel(config.Log.Sync) + // if err != nil { + // return nil, fmt.Errorf("failed to parse sync log level: %w", err) + // } + + // const blockRequestTimeout = time.Second * 20 + // requestMaker := net.GetRequestResponseProtocol( + // network.SyncID, + // blockRequestTimeout, + // network.MaxBlockResponseSize) + + // syncCfg := &sync.Config{ + // LogLvl: syncLogLevel, + // Network: net, + // BlockState: st.Block, + // StorageState: st.Storage, + // TransactionState: st.Transaction, + // FinalityGadget: fg, + // BabeVerifier: verifier, + // BlockImportHandler: cs, + // MinPeers: config.Network.MinPeers, + // MaxPeers: config.Network.MaxPeers, + // SlotDuration: slotDuration, + // Telemetry: telemetryMailer, + // BadBlocks: genesisData.BadBlocks, + // RequestMaker: requestMaker, + // } + + return libsync.NewSyncService(net, st.Block, &libsync.FullSyncStrategy{}, nil), nil } func (nodeBuilder) createDigestHandler(st *state.Service) (*digest.Handler, error) { diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index 80bf161001..e2f31e427e 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -649,6 +649,8 @@ taskResultLoop: if taskResult.err != nil { if !errors.Is(taskResult.err, network.ErrReceivedEmptyMessage) { + cs.workerPool.ignorePeerAsWorker(taskResult.who) + logger.Errorf("task result: peer(%s) error: %s", taskResult.who, taskResult.err) @@ -658,11 +660,13 @@ taskResultLoop: Reason: peerset.BadProtocolReason, }, who) } - } else if errors.Is(taskResult.err, network.ErrNilBlockInResponse) { - cs.network.ReportPeer(peerset.ReputationChange{ - Value: peerset.BadMessageValue, - Reason: peerset.BadMessageReason, - }, who) + + if errors.Is(taskResult.err, network.ErrNilBlockInResponse) { + cs.network.ReportPeer(peerset.ReputationChange{ + Value: peerset.BadMessageValue, + Reason: peerset.BadMessageReason, + }, who) + } } // TODO: avoid the same peer to get the same task diff --git a/dot/sync/syncer.go b/dot/sync/syncer.go index dd93d1383b..6eccf30064 100644 --- a/dot/sync/syncer.go +++ b/dot/sync/syncer.go @@ -179,6 +179,10 @@ func (s *Service) HandleBlockAnnounce(from peer.ID, msg *network.BlockAnnounceMe }) } +func (s *Service) OnConnectionClosed(who peer.ID) { + logger.Tracef("[NOT IMPLEMENTED] OnConnectionClosed: %s", who.String()) +} + // IsSynced exposes the synced state func (s *Service) IsSynced() bool { return s.chainSync.getSyncMode() == tip diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index 9bfad59dbd..3fc9558130 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -106,7 +106,9 @@ func (s *syncWorkerPool) useConnectedPeers() { s.mtx.Lock() defer s.mtx.Unlock() for _, connectedPeer := range connectedPeers { - s.newPeer(connectedPeer) + if _, shouldIgnore := s.ignorePeers[connectedPeer]; !shouldIgnore { + s.newPeer(connectedPeer) + } } } diff --git a/lib/sync/fullsync.go b/lib/sync/fullsync.go new file mode 100644 index 0000000000..4a54b4b0d3 --- /dev/null +++ b/lib/sync/fullsync.go @@ -0,0 +1,25 @@ +package sync + +import ( + "fmt" + + "github.com/ChainSafe/gossamer/dot/network" + "github.com/libp2p/go-libp2p/core/peer" +) + +var _ Strategy = (*FullSyncStrategy)(nil) + +type FullSyncStrategy struct{} + +func (*FullSyncStrategy) IsFinished() (bool, error) { + return false, nil +} + +func (*FullSyncStrategy) OnBlockAnnounce(from peer.ID, msg *network.BlockAnnounceMessage) error { + fmt.Printf("received block announce: %d", msg.Number) + return nil +} + +func (*FullSyncStrategy) NextActions() ([]*syncTask, error) { + return nil, nil +} diff --git a/lib/sync/service.go b/lib/sync/service.go new file mode 100644 index 0000000000..2294a6f188 --- /dev/null +++ b/lib/sync/service.go @@ -0,0 +1,162 @@ +package sync + +import ( + "errors" + "fmt" + "sync" + "time" + + "github.com/ChainSafe/gossamer/dot/network" + "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/internal/log" + "github.com/libp2p/go-libp2p/core/peer" +) + +var logger = log.NewFromGlobal(log.AddContext("pkg", "new-sync")) + +type Network interface { + AllConnectedPeersIDs() []peer.ID + BlockAnnounceHandshake(*types.Header) error +} + +type BlockState interface { + BestBlockHeader() (*types.Header, error) +} + +type Strategy interface { + OnBlockAnnounce(from peer.ID, msg *network.BlockAnnounceMessage) error + NextActions() ([]*syncTask, error) + IsFinished() (bool, error) +} + +type SyncService struct { + wg sync.WaitGroup + network Network + blockState BlockState + + currentStrategy Strategy + defaultStrategy Strategy + + workerPool *syncWorkerPool + waitPeersDuration time.Duration + minPeers int + + stopCh chan struct{} +} + +func NewSyncService(network Network, blockState BlockState, + currentStrategy, defaultStrategy Strategy) *SyncService { + return &SyncService{ + network: network, + blockState: blockState, + currentStrategy: currentStrategy, + defaultStrategy: defaultStrategy, + workerPool: newSyncWorkerPool(network), + waitPeersDuration: 2 * time.Second, + minPeers: 5, + stopCh: make(chan struct{}), + } +} + +func (s *SyncService) waitWorkers() { + waitPeersTimer := time.NewTimer(s.waitPeersDuration) + + bestBlockHeader, err := s.blockState.BestBlockHeader() + if err != nil { + panic(fmt.Sprintf("failed to get highest finalised header: %v", err)) + } + + for { + total := s.workerPool.totalWorkers() + logger.Info("waiting peers...") + logger.Infof("total workers: %d, min peers: %d", total, s.minPeers) + if total >= s.minPeers { + return + } + + err := s.network.BlockAnnounceHandshake(bestBlockHeader) + if err != nil && !errors.Is(err, network.ErrNoPeersConnected) { + logger.Errorf("retrieving target info from peers: %v", err) + } + + select { + case <-waitPeersTimer.C: + waitPeersTimer.Reset(s.waitPeersDuration) + + case <-s.stopCh: + return + } + } +} + +func (s *SyncService) Start() error { + s.waitWorkers() + + s.wg.Add(1) + go s.runSyncEngine() + return nil +} + +func (s *SyncService) Stop() error { + // TODO: implement stop mechanism + close(s.stopCh) + s.wg.Wait() + return nil +} + +func (s *SyncService) HandleBlockAnnounceHandshake(from peer.ID, msg *network.BlockAnnounceHandshake) error { + logger.Infof("receiving a block announce handshake: %s", from.String()) + s.workerPool.fromBlockAnnounceHandshake(from, msg.BestBlockHash, uint(msg.BestBlockNumber)) + return nil +} + +func (s *SyncService) HandleBlockAnnounce(from peer.ID, msg *network.BlockAnnounceMessage) error { + return s.currentStrategy.OnBlockAnnounce(from, msg) +} + +func (s *SyncService) OnConnectionClosed(who peer.ID) { + logger.Tracef("removing peer worker: %s", who.String()) + s.workerPool.removeWorker(who) +} + +func (s *SyncService) CreateBlockResponse(who peer.ID, req *network.BlockRequestMessage) ( + *network.BlockResponseMessage, error) { + return nil, nil +} + +func (s *SyncService) IsSynced() bool { + return false +} + +func (s *SyncService) HighestBlock() uint { + return 0 +} + +func (s *SyncService) runSyncEngine() { + defer s.wg.Done() + + logger.Infof("starting sync engine with strategy: %T", s.currentStrategy) + // TODO: need to handle stop channel + for { + tasks, err := s.currentStrategy.NextActions() + if err != nil { + panic(fmt.Sprintf("current sync strategy next actions failed with: %s", err.Error())) + } + + s.workerPool.submitRequests(tasks) + + done, err := s.currentStrategy.IsFinished() + if err != nil { + panic(fmt.Sprintf("current sync strategy failed with: %s", err.Error())) + } + + if done { + if s.defaultStrategy == nil { + panic("nil default strategy") + } + + s.currentStrategy = s.defaultStrategy + s.defaultStrategy = nil + } + } +} diff --git a/lib/sync/worker.go b/lib/sync/worker.go new file mode 100644 index 0000000000..f2d7f3e088 --- /dev/null +++ b/lib/sync/worker.go @@ -0,0 +1,90 @@ +// Copyright 2023 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package sync + +import ( + "errors" + "sync" + + "github.com/libp2p/go-libp2p/core/peer" +) + +// ErrStopTimeout is an error indicating that the worker stop operation timed out. +var ErrStopTimeout = errors.New("stop timeout") + +// worker represents a worker that processes sync tasks by making network requests to peers. +// It manages the synchronisation tasks between nodes in the Polkadot's peer-to-peer network. +// The primary goal of the worker is to handle and coordinate tasks related to network requests, +// ensuring that nodes stay synchronised with the blockchain state +type worker struct { + // Status of the worker (e.g., available, busy, etc.) + status byte + + // ID of the peer this worker is associated with + peerID peer.ID + + // Channel used as a semaphore to limit concurrent tasks. By making the channel buffered with some size, + // the creator of the channel can control how many workers can work concurrently and send requests. + sharedGuard chan struct{} + + stopCh chan struct{} +} + +// newWorker creates and returns a new worker instance. +func newWorker(pID peer.ID, sharedGuard chan struct{}, stopCh chan struct{}) *worker { + return &worker{ + peerID: pID, + sharedGuard: sharedGuard, + status: available, + stopCh: stopCh, + } +} + +// run starts the worker to process tasks from the queue. +// queue: Channel from which the worker receives tasks +// wg: WaitGroup to signal when the worker has finished processing +func (w *worker) run(queue chan *syncTask, wg *sync.WaitGroup) { + defer func() { + logger.Debugf("[STOPPED] worker %s", w.peerID) + wg.Done() + }() + + for { + select { + case <-w.stopCh: + return + case task := <-queue: + executeRequest(w.peerID, task, w.sharedGuard) + } + } +} + +// executeRequest processes a sync task by making a network request to a peer. +// who: ID of the peer making the request +// requestMaker: Interface to make the network request +// task: Sync task to be processed +// sharedGuard: Channel used for concurrency control +func executeRequest(who peer.ID, task *syncTask, sharedGuard chan struct{}) { + defer func() { + <-sharedGuard // Release the semaphore slot after the request is processed + }() + + sharedGuard <- struct{}{} // Acquire a semaphore slot before starting the request + + request := task.request + logger.Debugf("[EXECUTING] worker %s, block request: %s", who, request) + err := task.requestMaker.Do(who, request, task.response) + if err != nil { + logger.Debugf("[ERR] worker %s, err: %s", who, err) + } + + task.resultCh <- &syncTaskResult{ + who: who, + request: request, + response: task.response, + err: err, + } + + logger.Debugf("[FINISHED] worker %s, response: %s", who, task.response.String()) +} diff --git a/lib/sync/worker_pool.go b/lib/sync/worker_pool.go new file mode 100644 index 0000000000..1dd928519c --- /dev/null +++ b/lib/sync/worker_pool.go @@ -0,0 +1,227 @@ +// Copyright 2023 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package sync + +import ( + "crypto/rand" + "fmt" + "math/big" + "sync" + "time" + + "github.com/ChainSafe/gossamer/dot/network" + "github.com/ChainSafe/gossamer/lib/common" + "github.com/libp2p/go-libp2p/core/peer" + "golang.org/x/exp/maps" +) + +const ( + available byte = iota + busy + punished +) + +const ( + punishmentBaseTimeout = 5 * time.Minute + maxRequestsAllowed uint = 60 +) + +type syncTask struct { + requestMaker network.RequestMaker + request network.Message + response network.ResponseMessage + resultCh chan<- *syncTaskResult +} + +type syncTaskResult struct { + who peer.ID + request network.Message + response network.ResponseMessage + err error +} + +type syncWorker struct { + stopCh chan struct{} + bestBlockHash common.Hash + bestBlockNumber uint + worker *worker + queue chan *syncTask +} + +func (s *syncWorker) stop() { + +} + +type syncWorkerPool struct { + mtx sync.RWMutex + wg sync.WaitGroup + + network Network + workers map[peer.ID]*syncWorker + ignorePeers map[peer.ID]struct{} + + sharedGuard chan struct{} +} + +func newSyncWorkerPool(net Network) *syncWorkerPool { + swp := &syncWorkerPool{ + network: net, + workers: make(map[peer.ID]*syncWorker), + ignorePeers: make(map[peer.ID]struct{}), + sharedGuard: make(chan struct{}, maxRequestsAllowed), + } + + return swp +} + +// stop will shutdown all the available workers goroutines +func (s *syncWorkerPool) stop() error { + s.mtx.RLock() + defer s.mtx.RUnlock() + + for _, sw := range s.workers { + close(sw.queue) + } + + allWorkersDoneCh := make(chan struct{}) + go func() { + defer close(allWorkersDoneCh) + s.wg.Wait() + }() + + timeoutTimer := time.NewTimer(30 * time.Second) + select { + case <-timeoutTimer.C: + return fmt.Errorf("timeout reached while finishing workers") + case <-allWorkersDoneCh: + if !timeoutTimer.Stop() { + <-timeoutTimer.C + } + + return nil + } +} + +func (s *syncWorkerPool) fromBlockAnnounceHandshake(who peer.ID, bestBlockHash common.Hash, bestBlockNumber uint) { + s.mtx.Lock() + defer s.mtx.Unlock() + + if _, ok := s.ignorePeers[who]; ok { + return + } + + syncPeer, has := s.workers[who] + if has { + syncPeer.bestBlockHash = bestBlockHash + syncPeer.bestBlockNumber = bestBlockNumber + return + } + + workerStopCh := make(chan struct{}) + worker := newWorker(who, s.sharedGuard, workerStopCh) + workerQueue := make(chan *syncTask, maxRequestsAllowed) + + s.wg.Add(1) + go worker.run(workerQueue, &s.wg) + + s.workers[who] = &syncWorker{ + worker: worker, + queue: workerQueue, + bestBlockHash: bestBlockHash, + bestBlockNumber: bestBlockNumber, + stopCh: workerStopCh, + } + logger.Tracef("potential worker added, total in the pool %d", len(s.workers)) +} + +func (s *syncWorkerPool) removeWorker(who peer.ID) { + s.mtx.Lock() + defer s.mtx.Unlock() + + worker, ok := s.workers[who] + if !ok { + return + } + + close(worker.stopCh) + delete(s.workers, who) +} + +// submitRequest given a request, the worker pool will get the peer given the peer.ID +// parameter or if nil the very first available worker or +// to perform the request, the response will be dispatch in the resultCh. +func (s *syncWorkerPool) submitRequest(request *network.BlockRequestMessage, + who *peer.ID, resultCh chan<- *syncTaskResult) { + + task := &syncTask{ + request: request, + resultCh: resultCh, + } + + // if the request is bounded to a specific peer then just + // request it and sent through its queue otherwise send + // the request in the general queue where all worker are + // listening on + s.mtx.RLock() + defer s.mtx.RUnlock() + + if who != nil { + syncWorker, inMap := s.workers[*who] + if inMap { + if syncWorker == nil { + panic("sync worker should not be nil") + } + syncWorker.queue <- task + return + } + } + + // if the exact peer is not specified then + // randomly select a worker and assign the + // task to it, if the amount of workers is + var selectedWorkerIdx int + workers := maps.Values(s.workers) + nBig, err := rand.Int(rand.Reader, big.NewInt(int64(len(workers)))) + if err != nil { + panic(fmt.Errorf("fail to get a random number: %w", err)) + } + selectedWorkerIdx = int(nBig.Int64()) + selectedWorker := workers[selectedWorkerIdx] + selectedWorker.queue <- task +} + +// submitRequests takes an set of requests and will submit to the pool through submitRequest +// the response will be dispatch in the resultCh +func (s *syncWorkerPool) submitRequests(tasks []*syncTask) { + s.mtx.RLock() + defer s.mtx.RUnlock() + + allWorkers := maps.Values(s.workers) + for idx, task := range tasks { + workerID := idx % len(allWorkers) + syncWorker := allWorkers[workerID] + + syncWorker.queue <- task + } +} + +func (s *syncWorkerPool) ignorePeerAsWorker(who peer.ID) { + s.mtx.Lock() + defer s.mtx.Unlock() + + worker, has := s.workers[who] + if has { + close(worker.queue) + delete(s.workers, who) + s.ignorePeers[who] = struct{}{} + } +} + +// totalWorkers only returns available or busy workers +func (s *syncWorkerPool) totalWorkers() (total int) { + s.mtx.RLock() + defer s.mtx.RUnlock() + + return len(s.workers) +} From a5910769aaa80a06b9501fb9edfb463dcf10039f Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Thu, 25 Jul 2024 19:56:00 -0400 Subject: [PATCH 08/74] chore: strategies does not report or block peers --- dot/services.go | 21 +- dot/sync/chain_sync.go | 102 --------- dot/sync/errors.go | 17 +- dot/sync/syncer.go | 6 - lib/sync/fullsync.go | 479 ++++++++++++++++++++++++++++++++++++++- lib/sync/peer_view.go | 146 ++++++++++++ lib/sync/service.go | 46 +++- lib/sync/service_test.go | 7 + lib/sync/worker.go | 58 ++--- lib/sync/worker_pool.go | 153 +++---------- 10 files changed, 734 insertions(+), 301 deletions(-) create mode 100644 lib/sync/peer_view.go create mode 100644 lib/sync/service_test.go diff --git a/dot/services.go b/dot/services.go index 901daad3f8..a14b809cac 100644 --- a/dot/services.go +++ b/dot/services.go @@ -7,6 +7,7 @@ import ( "errors" "fmt" "strings" + "time" cfg "github.com/ChainSafe/gossamer/config" @@ -514,11 +515,11 @@ func (nodeBuilder) newSyncService(config *cfg.Config, st *state.Service, fg Bloc // return nil, fmt.Errorf("failed to parse sync log level: %w", err) // } - // const blockRequestTimeout = time.Second * 20 - // requestMaker := net.GetRequestResponseProtocol( - // network.SyncID, - // blockRequestTimeout, - // network.MaxBlockResponseSize) + const blockRequestTimeout = 30 * time.Second + requestMaker := net.GetRequestResponseProtocol( + network.SyncID, + blockRequestTimeout, + network.MaxBlockResponseSize) // syncCfg := &sync.Config{ // LogLvl: syncLogLevel, @@ -537,7 +538,15 @@ func (nodeBuilder) newSyncService(config *cfg.Config, st *state.Service, fg Bloc // RequestMaker: requestMaker, // } - return libsync.NewSyncService(net, st.Block, &libsync.FullSyncStrategy{}, nil), nil + genesisHeader, err := st.Block.BestBlockHeader() + if err != nil { + return nil, fmt.Errorf("cannot get genesis header: %w", err) + } + + defaultStrategy := libsync.NewFullSyncStrategy(genesisHeader, requestMaker) + return libsync.NewSyncService(net, st.Block, + defaultStrategy, + defaultStrategy), nil } func (nodeBuilder) createDigestHandler(st *state.Service) (*digest.Handler, error) { diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index e2f31e427e..80cb07658f 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -964,108 +964,6 @@ func (cs *chainSync) handleBlock(block *types.Block, announceImportedBlock bool) return nil } -// validateResponseFields checks that the expected fields are in the block data -func validateResponseFields(requestedData byte, blocks []*types.BlockData) error { - for _, bd := range blocks { - if bd == nil { - return errNilBlockData - } - - if (requestedData&network.RequestedDataHeader) == network.RequestedDataHeader && bd.Header == nil { - return fmt.Errorf("%w: %s", errNilHeaderInResponse, bd.Hash) - } - - if (requestedData&network.RequestedDataBody) == network.RequestedDataBody && bd.Body == nil { - return fmt.Errorf("%w: %s", errNilBodyInResponse, bd.Hash) - } - - // if we requested strictly justification - if (requestedData|network.RequestedDataJustification) == network.RequestedDataJustification && - bd.Justification == nil { - return fmt.Errorf("%w: %s", errNilJustificationInResponse, bd.Hash) - } - } - - return nil -} - -func isResponseAChain(responseBlockData []*types.BlockData) bool { - if len(responseBlockData) < 2 { - return true - } - - previousBlockData := responseBlockData[0] - for _, currBlockData := range responseBlockData[1:] { - previousHash := previousBlockData.Header.Hash() - isParent := previousHash == currBlockData.Header.ParentHash - if !isParent { - return false - } - - previousBlockData = currBlockData - } - - return true -} - -// doResponseGrowsTheChain will check if the acquired blocks grows the current chain -// matching their parent hashes -func doResponseGrowsTheChain(response, ongoingChain []*types.BlockData, startAtBlock uint, expectedTotal uint32) bool { - // the ongoing chain does not have any element, we can safely insert an item in it - if len(ongoingChain) < 1 { - return true - } - - compareParentHash := func(parent, child *types.BlockData) bool { - return parent.Header.Hash() == child.Header.ParentHash - } - - firstBlockInResponse := response[0] - firstBlockExactIndex := firstBlockInResponse.Header.Number - startAtBlock - if firstBlockExactIndex != 0 && firstBlockExactIndex < uint(expectedTotal) { - leftElement := ongoingChain[firstBlockExactIndex-1] - if leftElement != nil && !compareParentHash(leftElement, firstBlockInResponse) { - return false - } - } - - switch { - // if the response contains only one block then we should check both sides - // for example, if the response contains only one block called X we should - // check if its parent hash matches with the left element as well as we should - // check if the right element contains X hash as its parent hash - // ... W <- X -> Y ... - // we can skip left side comparison if X is in the 0 index and we can skip - // right side comparison if X is in the last index - case len(response) == 1: - if uint32(firstBlockExactIndex+1) < expectedTotal { - rightElement := ongoingChain[firstBlockExactIndex+1] - if rightElement != nil && !compareParentHash(firstBlockInResponse, rightElement) { - return false - } - } - // if the response contains more than 1 block then we need to compare - // only the start and the end of the acquired response, for example - // let's say we receive a response [C, D, E] and we need to check - // if those values fits correctly: - // ... B <- C D E -> F - // we skip the left check if its index is equals to 0 and we skip the right - // check if it ends in the latest position of the ongoing array - case len(response) > 1: - lastBlockInResponse := response[len(response)-1] - lastBlockExactIndex := lastBlockInResponse.Header.Number - startAtBlock - - if uint32(lastBlockExactIndex+1) < expectedTotal { - rightElement := ongoingChain[lastBlockExactIndex+1] - if rightElement != nil && !compareParentHash(lastBlockInResponse, rightElement) { - return false - } - } - } - - return true -} - func (cs *chainSync) getHighestBlock() (highestBlock uint, err error) { if cs.peerViewSet.size() == 0 { return 0, errNoPeers diff --git a/dot/sync/errors.go b/dot/sync/errors.go index ea96cd84d1..cfe579c3ea 100644 --- a/dot/sync/errors.go +++ b/dot/sync/errors.go @@ -17,14 +17,11 @@ var ( errRequestStartTooHigh = errors.New("request start number is higher than our best block") // chainSync errors - errNilBlockData = errors.New("block data is nil") - errNilHeaderInResponse = errors.New("expected header, received none") - errNilBodyInResponse = errors.New("expected body, received none") - errNilJustificationInResponse = errors.New("expected justification, received none") - errNoPeers = errors.New("no peers to sync with") - errPeerOnInvalidFork = errors.New("peer is on an invalid fork") - errFailedToGetParent = errors.New("failed to get parent header") - errStartAndEndMismatch = errors.New("request start and end hash are not on the same chain") - errFailedToGetDescendant = errors.New("failed to find descendant block") - errAlreadyInDisjointSet = errors.New("already in disjoint set") + + errNoPeers = errors.New("no peers to sync with") + errPeerOnInvalidFork = errors.New("peer is on an invalid fork") + errFailedToGetParent = errors.New("failed to get parent header") + errStartAndEndMismatch = errors.New("request start and end hash are not on the same chain") + errFailedToGetDescendant = errors.New("failed to find descendant block") + errAlreadyInDisjointSet = errors.New("already in disjoint set") ) diff --git a/dot/sync/syncer.go b/dot/sync/syncer.go index 6eccf30064..bcc33272da 100644 --- a/dot/sync/syncer.go +++ b/dot/sync/syncer.go @@ -197,9 +197,3 @@ func (s *Service) HighestBlock() uint { } return highestBlock } - -func reverseBlockData(data []*types.BlockData) { - for i, j := 0, len(data)-1; i < j; i, j = i+1, j-1 { - data[i], data[j] = data[j], data[i] - } -} diff --git a/lib/sync/fullsync.go b/lib/sync/fullsync.go index 4a54b4b0d3..83b1e2d51a 100644 --- a/lib/sync/fullsync.go +++ b/lib/sync/fullsync.go @@ -1,25 +1,492 @@ package sync import ( + "errors" "fmt" + "slices" + "strings" + "time" "github.com/ChainSafe/gossamer/dot/network" + "github.com/ChainSafe/gossamer/dot/peerset" + "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/internal/database" + "github.com/ChainSafe/gossamer/lib/common/variadic" "github.com/libp2p/go-libp2p/core/peer" ) +const blockRequestTimeout = 30 * time.Second + var _ Strategy = (*FullSyncStrategy)(nil) -type FullSyncStrategy struct{} +var ( + errNilBlockData = errors.New("block data is nil") + errNilHeaderInResponse = errors.New("expected header, received none") + errNilBodyInResponse = errors.New("expected body, received none") + errNilJustificationInResponse = errors.New("expected justification, received none") +) + +type FullSyncStrategy struct { + bestBlockHeader *types.Header + peers *peerViewSet + reqMaker network.RequestMaker + stopCh chan struct{} +} + +func NewFullSyncStrategy(startHeader *types.Header, reqMaker network.RequestMaker) *FullSyncStrategy { + return &FullSyncStrategy{ + bestBlockHeader: startHeader, + reqMaker: reqMaker, + peers: &peerViewSet{ + view: make(map[peer.ID]peerView), + target: 0, + }, + } +} + +func (f *FullSyncStrategy) incompleteBlocksSync() ([]*syncTask, error) { + panic("incompleteBlocksSync not implemented yet") +} + +func (f *FullSyncStrategy) NextActions() ([]*syncTask, error) { + currentTarget := f.peers.getTarget() + // our best block is equal or ahead of current target + // we're not legging behind, so let's set the set of + // incomplete blocks and request them + if uint32(f.bestBlockHeader.Number) >= currentTarget { + return f.incompleteBlocksSync() + } + + startRequestAt := f.bestBlockHeader.Number + 1 + targetBlockNumber := startRequestAt + uint(f.peers.len())*128 + + if targetBlockNumber > uint(currentTarget) { + targetBlockNumber = uint(currentTarget) + } -func (*FullSyncStrategy) IsFinished() (bool, error) { - return false, nil + requests := network.NewAscendingBlockRequests(startRequestAt, targetBlockNumber, + network.BootstrapRequestData) + + tasks := make([]*syncTask, len(requests)) + for idx, req := range requests { + tasks[idx] = &syncTask{ + request: req, + response: &network.BlockResponseMessage{}, + requestMaker: f.reqMaker, + } + } + + return tasks, nil +} + +func (*FullSyncStrategy) IsFinished(results []*syncTaskResult) (bool, []Change, []peer.ID, error) { + return false, nil, nil, nil +} + +func (f *FullSyncStrategy) OnBlockAnnounceHandshake(from peer.ID, msg *network.BlockAnnounceHandshake) error { + logger.Infof("received block announce from %s: #%d (%s)", + from, + msg.BestBlockNumber, + msg.BestBlockHash.Short(), + ) + + f.peers.update(from, msg.BestBlockHash, msg.BestBlockNumber) + return nil } func (*FullSyncStrategy) OnBlockAnnounce(from peer.ID, msg *network.BlockAnnounceMessage) error { - fmt.Printf("received block announce: %d", msg.Number) + logger.Infof("received block announce: %d", msg.Number) return nil } -func (*FullSyncStrategy) NextActions() ([]*syncTask, error) { - return nil, nil +var ErrResultsTimeout = errors.New("waiting results reached timeout") + +// handleWorkersResults, every time we submit requests to workers they results should be computed here +// and every cicle we should endup with a complete chain, whenever we identify +// any error from a worker we should evaluate the error and re-insert the request +// in the queue and wait for it to completes +// TODO: handle only justification requests +func (cs *FullSyncStrategy) handleWorkersResults(results []*syncTaskResult, origin BlockOrigin) error { + repChanges := make([]Change, 0) + blocks := make([]peer.ID, 0) + + for _, result := range results { + if result.err != nil { + if !errors.Is(result.err, network.ErrReceivedEmptyMessage) { + blocks = append(blocks, result.who) + + if strings.Contains(result.err.Error(), "protocols not supported") { + repChanges = append(repChanges, Change{ + who: result.who, + rep: peerset.ReputationChange{ + Value: peerset.BadProtocolValue, + Reason: peerset.BadProtocolReason, + }, + }) + } + + if errors.Is(result.err, network.ErrNilBlockInResponse) { + repChanges = append(repChanges, Change{ + who: result.who, + rep: peerset.ReputationChange{ + Value: peerset.BadMessageValue, + Reason: peerset.BadMessageReason, + }, + }) + } + } + continue + } + + request := result.request.(*network.BlockRequestMessage) + response := result.response.(*network.BlockResponseMessage) + + if request.Direction == network.Descending { + // reverse blocks before pre-validating and placing in ready queue + reverseBlockData(response.BlockData) + } + + err := validateResponseFields(request.RequestedData, response.BlockData) + if err != nil { + logger.Criticalf("validating fields: %s", err) + // TODO: check the reputation change for nil body in response + // and nil justification in response + if errors.Is(err, errNilHeaderInResponse) { + repChanges = append(repChanges, Change{ + who: result.who, + rep: peerset.ReputationChange{ + Value: peerset.IncompleteHeaderValue, + Reason: peerset.IncompleteHeaderReason, + }, + }) + } + + err = cs.submitRequest(taskResult.request, nil, workersResults) + if err != nil { + return err + } + continue taskResultLoop + } + } + +taskResultLoop: + for waitingBlocks > 0 { + // in a case where we don't handle workers results we should check the pool + idleDuration := time.Minute + idleTimer := time.NewTimer(idleDuration) + + select { + case <-cs.stopCh: + return nil + + case <-idleTimer.C: + return ErrResultsTimeout + + case taskResult := <-workersResults: + if !idleTimer.Stop() { + <-idleTimer.C + } + + who := taskResult.who + request := taskResult.request + response := taskResult.response + + logger.Debugf("task result: peer(%s), with error: %v, with response: %v", + taskResult.who, taskResult.err != nil, taskResult.response != nil) + + if taskResult.err != nil { + if !errors.Is(taskResult.err, network.ErrReceivedEmptyMessage) { + cs.workerPool.ignorePeerAsWorker(taskResult.who) + + logger.Errorf("task result: peer(%s) error: %s", + taskResult.who, taskResult.err) + + if strings.Contains(taskResult.err.Error(), "protocols not supported") { + cs.network.ReportPeer(peerset.ReputationChange{ + Value: peerset.BadProtocolValue, + Reason: peerset.BadProtocolReason, + }, who) + } + + if errors.Is(taskResult.err, network.ErrNilBlockInResponse) { + cs.network.ReportPeer(peerset.ReputationChange{ + Value: peerset.BadMessageValue, + Reason: peerset.BadMessageReason, + }, who) + } + } + + // TODO: avoid the same peer to get the same task + err := cs.submitRequest(request, nil, workersResults) + if err != nil { + return err + } + continue + } + + if request.Direction == network.Descending { + // reverse blocks before pre-validating and placing in ready queue + reverseBlockData(response.BlockData) + } + + err := validateResponseFields(request.RequestedData, response.BlockData) + if err != nil { + logger.Criticalf("validating fields: %s", err) + // TODO: check the reputation change for nil body in response + // and nil justification in response + if errors.Is(err, errNilHeaderInResponse) { + cs.network.ReportPeer(peerset.ReputationChange{ + Value: peerset.IncompleteHeaderValue, + Reason: peerset.IncompleteHeaderReason, + }, who) + } + + err = cs.submitRequest(taskResult.request, nil, workersResults) + if err != nil { + return err + } + continue taskResultLoop + } + + isChain := isResponseAChain(response.BlockData) + if !isChain { + logger.Criticalf("response from %s is not a chain", who) + err = cs.submitRequest(taskResult.request, nil, workersResults) + if err != nil { + return err + } + continue taskResultLoop + } + + grows := doResponseGrowsTheChain(response.BlockData, syncingChain, + startAtBlock, expectedSyncedBlocks) + if !grows { + logger.Criticalf("response from %s does not grows the ongoing chain", who) + err = cs.submitRequest(taskResult.request, nil, workersResults) + if err != nil { + return err + } + continue taskResultLoop + } + + for _, blockInResponse := range response.BlockData { + if slices.Contains(cs.badBlocks, blockInResponse.Hash.String()) { + logger.Criticalf("%s sent a known bad block: %s (#%d)", + who, blockInResponse.Hash.String(), blockInResponse.Number()) + + cs.network.ReportPeer(peerset.ReputationChange{ + Value: peerset.BadBlockAnnouncementValue, + Reason: peerset.BadBlockAnnouncementReason, + }, who) + + cs.workerPool.ignorePeerAsWorker(taskResult.who) + err = cs.submitRequest(taskResult.request, nil, workersResults) + if err != nil { + return err + } + continue taskResultLoop + } + + blockExactIndex := blockInResponse.Header.Number - startAtBlock + if blockExactIndex < uint(expectedSyncedBlocks) { + syncingChain[blockExactIndex] = blockInResponse + } + } + + // we need to check if we've filled all positions + // otherwise we should wait for more responses + waitingBlocks -= uint32(len(response.BlockData)) + + // we received a response without the desired amount of blocks + // we should include a new request to retrieve the missing blocks + if len(response.BlockData) < int(*request.Max) { + difference := uint32(int(*request.Max) - len(response.BlockData)) + lastItem := response.BlockData[len(response.BlockData)-1] + + startRequestNumber := uint32(lastItem.Header.Number + 1) + startAt, err := variadic.NewUint32OrHash(startRequestNumber) + if err != nil { + panic(err) + } + + taskResult.request = &network.BlockRequestMessage{ + RequestedData: network.BootstrapRequestData, + StartingBlock: *startAt, + Direction: network.Ascending, + Max: &difference, + } + err = cs.submitRequest(taskResult.request, nil, workersResults) + if err != nil { + return err + } + continue taskResultLoop + } + } + } + + retreiveBlocksSeconds := time.Since(startTime).Seconds() + logger.Infof("🔽 retrieved %d blocks, took: %.2f seconds, starting process...", + expectedSyncedBlocks, retreiveBlocksSeconds) + + // response was validated! place into ready block queue + for _, bd := range syncingChain { + // block is ready to be processed! + if err := cs.handleReadyBlock(bd, origin); err != nil { + return fmt.Errorf("while handling ready block: %w", err) + } + } + + cs.showSyncStats(startTime, len(syncingChain)) + return nil +} + +func (cs *chainSync) handleReadyBlock(bd *types.BlockData, origin blockOrigin) error { + // if header was not requested, get it from the pending set + // if we're expecting headers, validate should ensure we have a header + if bd.Header == nil { + block := cs.pendingBlocks.getBlock(bd.Hash) + if block == nil { + // block wasn't in the pending set! + // let's check the db as maybe we already processed it + has, err := cs.blockState.HasHeader(bd.Hash) + if err != nil && !errors.Is(err, database.ErrNotFound) { + logger.Debugf("failed to check if header is known for hash %s: %s", bd.Hash, err) + return err + } + + if has { + logger.Tracef("ignoring block we've already processed, hash=%s", bd.Hash) + return err + } + + // this is bad and shouldn't happen + logger.Errorf("block with unknown header is ready: hash=%s", bd.Hash) + return err + } + + if block.header == nil { + logger.Errorf("new ready block number (unknown) with hash %s", bd.Hash) + return nil + } + + bd.Header = block.header + } + + err := cs.processBlockData(*bd, origin) + if err != nil { + // depending on the error, we might want to save this block for later + logger.Errorf("block data processing for block with hash %s failed: %s", bd.Hash, err) + return err + } + + cs.pendingBlocks.removeBlock(bd.Hash) + return nil +} + +func reverseBlockData(data []*types.BlockData) { + for i, j := 0, len(data)-1; i < j; i, j = i+1, j-1 { + data[i], data[j] = data[j], data[i] + } +} + +// validateResponseFields checks that the expected fields are in the block data +func validateResponseFields(requestedData byte, blocks []*types.BlockData) error { + for _, bd := range blocks { + if bd == nil { + return errNilBlockData + } + + if (requestedData&network.RequestedDataHeader) == network.RequestedDataHeader && bd.Header == nil { + return fmt.Errorf("%w: %s", errNilHeaderInResponse, bd.Hash) + } + + if (requestedData&network.RequestedDataBody) == network.RequestedDataBody && bd.Body == nil { + return fmt.Errorf("%w: %s", errNilBodyInResponse, bd.Hash) + } + + // if we requested strictly justification + if (requestedData|network.RequestedDataJustification) == network.RequestedDataJustification && + bd.Justification == nil { + return fmt.Errorf("%w: %s", errNilJustificationInResponse, bd.Hash) + } + } + + return nil +} + +func isResponseAChain(responseBlockData []*types.BlockData) bool { + if len(responseBlockData) < 2 { + return true + } + + previousBlockData := responseBlockData[0] + for _, currBlockData := range responseBlockData[1:] { + previousHash := previousBlockData.Header.Hash() + isParent := previousHash == currBlockData.Header.ParentHash + if !isParent { + return false + } + + previousBlockData = currBlockData + } + + return true +} + +// doResponseGrowsTheChain will check if the acquired blocks grows the current chain +// matching their parent hashes +func doResponseGrowsTheChain(response, ongoingChain []*types.BlockData, startAtBlock uint, expectedTotal uint32) bool { + // the ongoing chain does not have any element, we can safely insert an item in it + if len(ongoingChain) < 1 { + return true + } + + compareParentHash := func(parent, child *types.BlockData) bool { + return parent.Header.Hash() == child.Header.ParentHash + } + + firstBlockInResponse := response[0] + firstBlockExactIndex := firstBlockInResponse.Header.Number - startAtBlock + if firstBlockExactIndex != 0 && firstBlockExactIndex < uint(expectedTotal) { + leftElement := ongoingChain[firstBlockExactIndex-1] + if leftElement != nil && !compareParentHash(leftElement, firstBlockInResponse) { + return false + } + } + + switch { + // if the response contains only one block then we should check both sides + // for example, if the response contains only one block called X we should + // check if its parent hash matches with the left element as well as we should + // check if the right element contains X hash as its parent hash + // ... W <- X -> Y ... + // we can skip left side comparison if X is in the 0 index and we can skip + // right side comparison if X is in the last index + case len(response) == 1: + if uint32(firstBlockExactIndex+1) < expectedTotal { + rightElement := ongoingChain[firstBlockExactIndex+1] + if rightElement != nil && !compareParentHash(firstBlockInResponse, rightElement) { + return false + } + } + // if the response contains more than 1 block then we need to compare + // only the start and the end of the acquired response, for example + // let's say we receive a response [C, D, E] and we need to check + // if those values fits correctly: + // ... B <- C D E -> F + // we skip the left check if its index is equals to 0 and we skip the right + // check if it ends in the latest position of the ongoing array + case len(response) > 1: + lastBlockInResponse := response[len(response)-1] + lastBlockExactIndex := lastBlockInResponse.Header.Number - startAtBlock + + if uint32(lastBlockExactIndex+1) < expectedTotal { + rightElement := ongoingChain[lastBlockExactIndex+1] + if rightElement != nil && !compareParentHash(lastBlockInResponse, rightElement) { + return false + } + } + } + + return true } diff --git a/lib/sync/peer_view.go b/lib/sync/peer_view.go new file mode 100644 index 0000000000..a44a0e40bb --- /dev/null +++ b/lib/sync/peer_view.go @@ -0,0 +1,146 @@ +package sync + +import ( + "math/big" + "sort" + "sync" + + "github.com/ChainSafe/gossamer/lib/common" + "github.com/libp2p/go-libp2p/core/peer" + "golang.org/x/exp/maps" +) + +type peerView struct { + bestBlockNumber uint32 + bestBlockHash common.Hash +} + +type peerViewSet struct { + mtx sync.RWMutex + view map[peer.ID]peerView + target uint32 +} + +func (p *peerViewSet) update(peerID peer.ID, hash common.Hash, number uint32) { + p.mtx.Lock() + defer p.mtx.Unlock() + + newView := peerView{ + bestBlockHash: hash, + bestBlockNumber: number, + } + + view, ok := p.view[peerID] + if ok && view.bestBlockNumber >= newView.bestBlockNumber { + return + } + + p.view[peerID] = newView +} + +// getTarget takes the average of all peer views best number +func (p *peerViewSet) getTarget() uint32 { + p.mtx.RLock() + defer p.mtx.RUnlock() + + if len(p.view) == 0 { + return p.target + } + + numbers := make([]uint32, 0, len(p.view)) + // we are going to sort the data and remove the outliers then we will return the avg of all the valid elements + for _, view := range maps.Values(p.view) { + numbers = append(numbers, view.bestBlockNumber) + } + + sum, count := nonOutliersSumCount(numbers) + quotientBigInt := uint32(big.NewInt(0).Div(sum, big.NewInt(int64(count))).Uint64()) + + if p.target >= quotientBigInt { + return p.target + } + + p.target = quotientBigInt // cache latest calculated target + return p.target +} + +func (p *peerViewSet) len() int { + p.mtx.RLock() + defer p.mtx.RUnlock() + return len(p.view) +} + +// nonOutliersSumCount calculates the sum and count of non-outlier elements +// Explanation: +// IQR outlier detection +// Q25 = 25th_percentile +// Q75 = 75th_percentile +// IQR = Q75 - Q25 // inter-quartile range +// If x > Q75 + 1.5 * IQR or x < Q25 - 1.5 * IQR THEN x is a mild outlier +// If x > Q75 + 3.0 * IQR or x < Q25 – 3.0 * IQR THEN x is a extreme outlier +// Ref: http://www.mathwords.com/o/outlier.htm +// +// returns: sum and count of all the non-outliers elements +func nonOutliersSumCount(dataArrUint []uint32) (sum *big.Int, count uint) { + dataArr := make([]*big.Int, len(dataArrUint)) + for i, v := range dataArrUint { + dataArr[i] = big.NewInt(int64(v)) + } + + length := len(dataArr) + + switch length { + case 0: + return big.NewInt(0), 0 + case 1: + return dataArr[0], 1 + case 2: + return big.NewInt(0).Add(dataArr[0], dataArr[1]), 2 + } + + sort.Slice(dataArr, func(i, j int) bool { + return dataArr[i].Cmp(dataArr[j]) < 0 + }) + + half := length / 2 + firstHalf := dataArr[:half] + var secondHalf []*big.Int + + if length%2 == 0 { + secondHalf = dataArr[half:] + } else { + secondHalf = dataArr[half+1:] + } + + q1 := getMedian(firstHalf) + q3 := getMedian(secondHalf) + + iqr := big.NewInt(0).Sub(q3, q1) + iqr1_5 := big.NewInt(0).Mul(iqr, big.NewInt(2)) // instead of 1.5 it is 2.0 due to the rounding + lower := big.NewInt(0).Sub(q1, iqr1_5) + upper := big.NewInt(0).Add(q3, iqr1_5) + + sum = big.NewInt(0) + for _, v := range dataArr { + // collect valid (non-outlier) values + lowPass := v.Cmp(lower) + highPass := v.Cmp(upper) + if lowPass >= 0 && highPass <= 0 { + sum.Add(sum, v) + count++ + } + } + + return sum, count +} + +func getMedian(data []*big.Int) *big.Int { + length := len(data) + half := length / 2 + if length%2 == 0 { + sum := big.NewInt(0).Add(data[half], data[half-1]) + return sum.Div(sum, big.NewInt(2)) + } + + return data[half] +} diff --git a/lib/sync/service.go b/lib/sync/service.go index 2294a6f188..b23703faee 100644 --- a/lib/sync/service.go +++ b/lib/sync/service.go @@ -7,6 +7,7 @@ import ( "time" "github.com/ChainSafe/gossamer/dot/network" + "github.com/ChainSafe/gossamer/dot/peerset" "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/internal/log" "github.com/libp2p/go-libp2p/core/peer" @@ -16,20 +17,37 @@ var logger = log.NewFromGlobal(log.AddContext("pkg", "new-sync")) type Network interface { AllConnectedPeersIDs() []peer.ID + ReportPeer(change peerset.ReputationChange, p peer.ID) BlockAnnounceHandshake(*types.Header) error + GetRequestResponseProtocol(subprotocol string, requestTimeout time.Duration, + maxResponseSize uint64) *network.RequestResponseProtocol } type BlockState interface { BestBlockHeader() (*types.Header, error) } +type Change struct { + who peer.ID + rep peerset.ReputationChange +} + type Strategy interface { OnBlockAnnounce(from peer.ID, msg *network.BlockAnnounceMessage) error + OnBlockAnnounceHandshake(from peer.ID, msg *network.BlockAnnounceHandshake) error NextActions() ([]*syncTask, error) - IsFinished() (bool, error) + IsFinished(results []*syncTaskResult) (done bool, repChanges []Change, blocks []peer.ID, err error) } +type BlockOrigin byte + +const ( + networkInitialSync BlockOrigin = iota + networkBroadcast +) + type SyncService struct { + mu sync.Mutex wg sync.WaitGroup network Network blockState BlockState @@ -44,7 +62,8 @@ type SyncService struct { stopCh chan struct{} } -func NewSyncService(network Network, blockState BlockState, +func NewSyncService(network Network, + blockState BlockState, currentStrategy, defaultStrategy Strategy) *SyncService { return &SyncService{ network: network, @@ -53,7 +72,7 @@ func NewSyncService(network Network, blockState BlockState, defaultStrategy: defaultStrategy, workerPool: newSyncWorkerPool(network), waitPeersDuration: 2 * time.Second, - minPeers: 5, + minPeers: 3, stopCh: make(chan struct{}), } } @@ -107,6 +126,10 @@ func (s *SyncService) Stop() error { func (s *SyncService) HandleBlockAnnounceHandshake(from peer.ID, msg *network.BlockAnnounceHandshake) error { logger.Infof("receiving a block announce handshake: %s", from.String()) s.workerPool.fromBlockAnnounceHandshake(from, msg.BestBlockHash, uint(msg.BestBlockNumber)) + + s.mu.Lock() + defer s.mu.Unlock() + s.currentStrategy.OnBlockAnnounceHandshake(from, msg) return nil } @@ -136,6 +159,7 @@ func (s *SyncService) runSyncEngine() { defer s.wg.Done() logger.Infof("starting sync engine with strategy: %T", s.currentStrategy) + // TODO: need to handle stop channel for { tasks, err := s.currentStrategy.NextActions() @@ -143,20 +167,30 @@ func (s *SyncService) runSyncEngine() { panic(fmt.Sprintf("current sync strategy next actions failed with: %s", err.Error())) } - s.workerPool.submitRequests(tasks) + logger.Infof("sending %d tasks", len(tasks)) + results := s.workerPool.submitRequests(tasks) - done, err := s.currentStrategy.IsFinished() + done, repChanges, blocks, err := s.currentStrategy.IsFinished(results) if err != nil { panic(fmt.Sprintf("current sync strategy failed with: %s", err.Error())) } + for _, change := range repChanges { + s.network.ReportPeer(change.rep, change.who) + } + + for _, block := range blocks { + s.workerPool.ignorePeerAsWorker(block) + } + if done { if s.defaultStrategy == nil { panic("nil default strategy") } + s.mu.Lock() s.currentStrategy = s.defaultStrategy - s.defaultStrategy = nil + s.mu.Unlock() } } } diff --git a/lib/sync/service_test.go b/lib/sync/service_test.go new file mode 100644 index 0000000000..25b8cf9817 --- /dev/null +++ b/lib/sync/service_test.go @@ -0,0 +1,7 @@ +package sync + +import "testing" + +func TestSyncService(t *testing.T) { + +} diff --git a/lib/sync/worker.go b/lib/sync/worker.go index f2d7f3e088..1560d6e62c 100644 --- a/lib/sync/worker.go +++ b/lib/sync/worker.go @@ -24,67 +24,41 @@ type worker struct { // ID of the peer this worker is associated with peerID peer.ID - // Channel used as a semaphore to limit concurrent tasks. By making the channel buffered with some size, - // the creator of the channel can control how many workers can work concurrently and send requests. - sharedGuard chan struct{} - stopCh chan struct{} } // newWorker creates and returns a new worker instance. -func newWorker(pID peer.ID, sharedGuard chan struct{}, stopCh chan struct{}) *worker { +func newWorker(pID peer.ID) *worker { return &worker{ - peerID: pID, - sharedGuard: sharedGuard, - status: available, - stopCh: stopCh, + peerID: pID, + status: available, } } -// run starts the worker to process tasks from the queue. -// queue: Channel from which the worker receives tasks -// wg: WaitGroup to signal when the worker has finished processing -func (w *worker) run(queue chan *syncTask, wg *sync.WaitGroup) { +func executeRequest(wg *sync.WaitGroup, who *worker, task *syncTask, resCh chan<- *syncTaskResult) { defer func() { - logger.Debugf("[STOPPED] worker %s", w.peerID) + who.status = available wg.Done() }() - for { - select { - case <-w.stopCh: - return - case task := <-queue: - executeRequest(w.peerID, task, w.sharedGuard) - } - } -} - -// executeRequest processes a sync task by making a network request to a peer. -// who: ID of the peer making the request -// requestMaker: Interface to make the network request -// task: Sync task to be processed -// sharedGuard: Channel used for concurrency control -func executeRequest(who peer.ID, task *syncTask, sharedGuard chan struct{}) { - defer func() { - <-sharedGuard // Release the semaphore slot after the request is processed - }() - - sharedGuard <- struct{}{} // Acquire a semaphore slot before starting the request - request := task.request logger.Debugf("[EXECUTING] worker %s, block request: %s", who, request) - err := task.requestMaker.Do(who, request, task.response) + err := task.requestMaker.Do(who.peerID, request, task.response) if err != nil { logger.Debugf("[ERR] worker %s, err: %s", who, err) + resCh <- &syncTaskResult{ + who: who.peerID, + request: request, + err: err, + response: nil, + } + return } - task.resultCh <- &syncTaskResult{ - who: who, + logger.Debugf("[FINISHED] worker %s, response: %s", who, task.response.String()) + resCh <- &syncTaskResult{ + who: who.peerID, request: request, response: task.response, - err: err, } - - logger.Debugf("[FINISHED] worker %s, response: %s", who, task.response.String()) } diff --git a/lib/sync/worker_pool.go b/lib/sync/worker_pool.go index 1dd928519c..7c390ab889 100644 --- a/lib/sync/worker_pool.go +++ b/lib/sync/worker_pool.go @@ -4,9 +4,6 @@ package sync import ( - "crypto/rand" - "fmt" - "math/big" "sync" "time" @@ -31,26 +28,13 @@ type syncTask struct { requestMaker network.RequestMaker request network.Message response network.ResponseMessage - resultCh chan<- *syncTaskResult } type syncTaskResult struct { who peer.ID + err error request network.Message response network.ResponseMessage - err error -} - -type syncWorker struct { - stopCh chan struct{} - bestBlockHash common.Hash - bestBlockNumber uint - worker *worker - queue chan *syncTask -} - -func (s *syncWorker) stop() { - } type syncWorkerPool struct { @@ -58,7 +42,7 @@ type syncWorkerPool struct { wg sync.WaitGroup network Network - workers map[peer.ID]*syncWorker + workers map[peer.ID]*worker ignorePeers map[peer.ID]struct{} sharedGuard chan struct{} @@ -67,7 +51,7 @@ type syncWorkerPool struct { func newSyncWorkerPool(net Network) *syncWorkerPool { swp := &syncWorkerPool{ network: net, - workers: make(map[peer.ID]*syncWorker), + workers: make(map[peer.ID]*worker), ignorePeers: make(map[peer.ID]struct{}), sharedGuard: make(chan struct{}, maxRequestsAllowed), } @@ -75,34 +59,6 @@ func newSyncWorkerPool(net Network) *syncWorkerPool { return swp } -// stop will shutdown all the available workers goroutines -func (s *syncWorkerPool) stop() error { - s.mtx.RLock() - defer s.mtx.RUnlock() - - for _, sw := range s.workers { - close(sw.queue) - } - - allWorkersDoneCh := make(chan struct{}) - go func() { - defer close(allWorkersDoneCh) - s.wg.Wait() - }() - - timeoutTimer := time.NewTimer(30 * time.Second) - select { - case <-timeoutTimer.C: - return fmt.Errorf("timeout reached while finishing workers") - case <-allWorkersDoneCh: - if !timeoutTimer.Stop() { - <-timeoutTimer.C - } - - return nil - } -} - func (s *syncWorkerPool) fromBlockAnnounceHandshake(who peer.ID, bestBlockHash common.Hash, bestBlockNumber uint) { s.mtx.Lock() defer s.mtx.Unlock() @@ -111,111 +67,62 @@ func (s *syncWorkerPool) fromBlockAnnounceHandshake(who peer.ID, bestBlockHash c return } - syncPeer, has := s.workers[who] + _, has := s.workers[who] if has { - syncPeer.bestBlockHash = bestBlockHash - syncPeer.bestBlockNumber = bestBlockNumber return } - workerStopCh := make(chan struct{}) - worker := newWorker(who, s.sharedGuard, workerStopCh) - workerQueue := make(chan *syncTask, maxRequestsAllowed) - - s.wg.Add(1) - go worker.run(workerQueue, &s.wg) - - s.workers[who] = &syncWorker{ - worker: worker, - queue: workerQueue, - bestBlockHash: bestBlockHash, - bestBlockNumber: bestBlockNumber, - stopCh: workerStopCh, - } + s.workers[who] = newWorker(who) logger.Tracef("potential worker added, total in the pool %d", len(s.workers)) } func (s *syncWorkerPool) removeWorker(who peer.ID) { s.mtx.Lock() defer s.mtx.Unlock() - - worker, ok := s.workers[who] - if !ok { - return - } - - close(worker.stopCh) delete(s.workers, who) } -// submitRequest given a request, the worker pool will get the peer given the peer.ID -// parameter or if nil the very first available worker or -// to perform the request, the response will be dispatch in the resultCh. -func (s *syncWorkerPool) submitRequest(request *network.BlockRequestMessage, - who *peer.ID, resultCh chan<- *syncTaskResult) { - - task := &syncTask{ - request: request, - resultCh: resultCh, - } - - // if the request is bounded to a specific peer then just - // request it and sent through its queue otherwise send - // the request in the general queue where all worker are - // listening on - s.mtx.RLock() - defer s.mtx.RUnlock() - - if who != nil { - syncWorker, inMap := s.workers[*who] - if inMap { - if syncWorker == nil { - panic("sync worker should not be nil") - } - syncWorker.queue <- task - return - } - } - - // if the exact peer is not specified then - // randomly select a worker and assign the - // task to it, if the amount of workers is - var selectedWorkerIdx int - workers := maps.Values(s.workers) - nBig, err := rand.Int(rand.Reader, big.NewInt(int64(len(workers)))) - if err != nil { - panic(fmt.Errorf("fail to get a random number: %w", err)) - } - selectedWorkerIdx = int(nBig.Int64()) - selectedWorker := workers[selectedWorkerIdx] - selectedWorker.queue <- task -} - // submitRequests takes an set of requests and will submit to the pool through submitRequest // the response will be dispatch in the resultCh -func (s *syncWorkerPool) submitRequests(tasks []*syncTask) { +func (s *syncWorkerPool) submitRequests(tasks []*syncTask) []*syncTaskResult { s.mtx.RLock() defer s.mtx.RUnlock() + wg := sync.WaitGroup{} + resCh := make(chan *syncTaskResult, len(tasks)) + allWorkers := maps.Values(s.workers) for idx, task := range tasks { workerID := idx % len(allWorkers) - syncWorker := allWorkers[workerID] + worker := allWorkers[workerID] + if worker.status != available { + continue + } + + worker.status = busy + wg.Add(1) + go executeRequest(&wg, worker, task, resCh) + } + + go func() { + wg.Wait() + close(resCh) + }() - syncWorker.queue <- task + results := make([]*syncTaskResult, 0) + for r := range resCh { + results = append(results, r) } + + return results } func (s *syncWorkerPool) ignorePeerAsWorker(who peer.ID) { s.mtx.Lock() defer s.mtx.Unlock() - worker, has := s.workers[who] - if has { - close(worker.queue) - delete(s.workers, who) - s.ignorePeers[who] = struct{}{} - } + delete(s.workers, who) + s.ignorePeers[who] = struct{}{} } // totalWorkers only returns available or busy workers From e9d1bcc476f15519c729727f0809f492f110a961 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Mon, 29 Jul 2024 09:34:20 -0400 Subject: [PATCH 09/74] wip: fullsync strategy working, need to polish the libsync servic --- dot/services.go | 40 +-- dot/sync/errors.go | 6 +- lib/sync/fullsync.go | 635 +++++++++++++++++++--------------- lib/sync/service.go | 27 +- lib/sync/worker.go | 11 +- lib/sync/worker_pool.go | 29 +- pkg/trie/inmemory/iterator.go | 2 +- 7 files changed, 426 insertions(+), 324 deletions(-) diff --git a/dot/services.go b/dot/services.go index a14b809cac..d3e61554b1 100644 --- a/dot/services.go +++ b/dot/services.go @@ -505,10 +505,10 @@ func (nodeBuilder) newSyncService(config *cfg.Config, st *state.Service, fg Bloc // return nil, err // } - // genesisData, err := st.Base.LoadGenesisData() - // if err != nil { - // return nil, err - // } + genesisData, err := st.Base.LoadGenesisData() + if err != nil { + return nil, err + } // syncLogLevel, err := log.ParseLevel(config.Log.Sync) // if err != nil { @@ -521,29 +521,25 @@ func (nodeBuilder) newSyncService(config *cfg.Config, st *state.Service, fg Bloc blockRequestTimeout, network.MaxBlockResponseSize) - // syncCfg := &sync.Config{ - // LogLvl: syncLogLevel, - // Network: net, - // BlockState: st.Block, - // StorageState: st.Storage, - // TransactionState: st.Transaction, - // FinalityGadget: fg, - // BabeVerifier: verifier, - // BlockImportHandler: cs, - // MinPeers: config.Network.MinPeers, - // MaxPeers: config.Network.MaxPeers, - // SlotDuration: slotDuration, - // Telemetry: telemetryMailer, - // BadBlocks: genesisData.BadBlocks, - // RequestMaker: requestMaker, - // } - genesisHeader, err := st.Block.BestBlockHeader() if err != nil { return nil, fmt.Errorf("cannot get genesis header: %w", err) } - defaultStrategy := libsync.NewFullSyncStrategy(genesisHeader, requestMaker) + syncCfg := &libsync.FullSyncConfig{ + StartHeader: genesisHeader, + BlockState: st.Block, + StorageState: st.Storage, + TransactionState: st.Transaction, + FinalityGadget: fg, + BabeVerifier: verifier, + BlockImportHandler: cs, + Telemetry: telemetryMailer, + BadBlocks: genesisData.BadBlocks, + RequestMaker: requestMaker, + } + + defaultStrategy := libsync.NewFullSyncStrategy(syncCfg) return libsync.NewSyncService(net, st.Block, defaultStrategy, defaultStrategy), nil diff --git a/dot/sync/errors.go b/dot/sync/errors.go index cfe579c3ea..92947ddef3 100644 --- a/dot/sync/errors.go +++ b/dot/sync/errors.go @@ -18,9 +18,9 @@ var ( // chainSync errors - errNoPeers = errors.New("no peers to sync with") - errPeerOnInvalidFork = errors.New("peer is on an invalid fork") - errFailedToGetParent = errors.New("failed to get parent header") + errNoPeers = errors.New("no peers to sync with") + errPeerOnInvalidFork = errors.New("peer is on an invalid fork") + errStartAndEndMismatch = errors.New("request start and end hash are not on the same chain") errFailedToGetDescendant = errors.New("failed to find descendant block") errAlreadyInDisjointSet = errors.New("already in disjoint set") diff --git a/lib/sync/fullsync.go b/lib/sync/fullsync.go index 83b1e2d51a..9b705f780d 100644 --- a/lib/sync/fullsync.go +++ b/lib/sync/fullsync.go @@ -1,42 +1,116 @@ package sync import ( + "bytes" + "encoding/json" "errors" "fmt" "slices" "strings" - "time" + "sync" "github.com/ChainSafe/gossamer/dot/network" "github.com/ChainSafe/gossamer/dot/peerset" + "github.com/ChainSafe/gossamer/dot/telemetry" "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/internal/database" - "github.com/ChainSafe/gossamer/lib/common/variadic" + "github.com/ChainSafe/gossamer/lib/common" + rtstorage "github.com/ChainSafe/gossamer/lib/runtime/storage" "github.com/libp2p/go-libp2p/core/peer" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" ) -const blockRequestTimeout = 30 * time.Second - var _ Strategy = (*FullSyncStrategy)(nil) var ( + errFailedToGetParent = errors.New("failed to get parent header") errNilBlockData = errors.New("block data is nil") errNilHeaderInResponse = errors.New("expected header, received none") errNilBodyInResponse = errors.New("expected body, received none") errNilJustificationInResponse = errors.New("expected justification, received none") + + blockSizeGauge = promauto.NewGauge(prometheus.GaugeOpts{ + Namespace: "gossamer_sync", + Name: "block_size", + Help: "represent the size of blocks synced", + }) +) + +type ( + // Telemetry is the telemetry client to send telemetry messages. + Telemetry interface { + SendMessage(msg json.Marshaler) + } + + // StorageState is the interface for the storage state + StorageState interface { + TrieState(root *common.Hash) (*rtstorage.TrieState, error) + sync.Locker + } + + // TransactionState is the interface for transaction queue methods + TransactionState interface { + RemoveExtrinsic(ext types.Extrinsic) + } + + // BabeVerifier deals with BABE block verification + BabeVerifier interface { + VerifyBlock(header *types.Header) error + } + + // FinalityGadget implements justification verification functionality + FinalityGadget interface { + VerifyBlockJustification(common.Hash, []byte) error + } + + // BlockImportHandler is the interface for the handler of newly imported blocks + BlockImportHandler interface { + HandleBlockImport(block *types.Block, state *rtstorage.TrieState, announce bool) error + } ) +// Config is the configuration for the sync Service. +type FullSyncConfig struct { + StartHeader *types.Header + BlockState BlockState + StorageState StorageState + FinalityGadget FinalityGadget + TransactionState TransactionState + BlockImportHandler BlockImportHandler + BabeVerifier BabeVerifier + Telemetry Telemetry + BadBlocks []string + RequestMaker network.RequestMaker +} + type FullSyncStrategy struct { - bestBlockHeader *types.Header - peers *peerViewSet - reqMaker network.RequestMaker - stopCh chan struct{} + bestBlockHeader *types.Header + missingRequests []*network.BlockRequestMessage + disjointBlocks [][]*types.BlockData + peers *peerViewSet + badBlocks []string + reqMaker network.RequestMaker + blockState BlockState + storageState StorageState + transactionState TransactionState + babeVerifier BabeVerifier + finalityGadget FinalityGadget + blockImportHandler BlockImportHandler + telemetry Telemetry } -func NewFullSyncStrategy(startHeader *types.Header, reqMaker network.RequestMaker) *FullSyncStrategy { +func NewFullSyncStrategy(cfg *FullSyncConfig) *FullSyncStrategy { return &FullSyncStrategy{ - bestBlockHeader: startHeader, - reqMaker: reqMaker, + badBlocks: cfg.BadBlocks, + bestBlockHeader: cfg.StartHeader, + reqMaker: cfg.RequestMaker, + blockState: cfg.BlockState, + storageState: cfg.StorageState, + transactionState: cfg.TransactionState, + babeVerifier: cfg.BabeVerifier, + finalityGadget: cfg.FinalityGadget, + blockImportHandler: cfg.BlockImportHandler, + telemetry: cfg.Telemetry, peers: &peerViewSet{ view: make(map[peer.ID]peerView), target: 0, @@ -49,6 +123,10 @@ func (f *FullSyncStrategy) incompleteBlocksSync() ([]*syncTask, error) { } func (f *FullSyncStrategy) NextActions() ([]*syncTask, error) { + if len(f.missingRequests) > 0 { + return f.createTasks(f.missingRequests), nil + } + currentTarget := f.peers.getTarget() // our best block is equal or ahead of current target // we're not legging behind, so let's set the set of @@ -58,7 +136,7 @@ func (f *FullSyncStrategy) NextActions() ([]*syncTask, error) { } startRequestAt := f.bestBlockHeader.Number + 1 - targetBlockNumber := startRequestAt + uint(f.peers.len())*128 + targetBlockNumber := startRequestAt + 60*128 if targetBlockNumber > uint(currentTarget) { targetBlockNumber = uint(currentTarget) @@ -66,7 +144,10 @@ func (f *FullSyncStrategy) NextActions() ([]*syncTask, error) { requests := network.NewAscendingBlockRequests(startRequestAt, targetBlockNumber, network.BootstrapRequestData) + return f.createTasks(requests), nil +} +func (f *FullSyncStrategy) createTasks(requests []*network.BlockRequestMessage) []*syncTask { tasks := make([]*syncTask, len(requests)) for idx, req := range requests { tasks[idx] = &syncTask{ @@ -75,12 +156,41 @@ func (f *FullSyncStrategy) NextActions() ([]*syncTask, error) { requestMaker: f.reqMaker, } } - - return tasks, nil + return tasks } -func (*FullSyncStrategy) IsFinished(results []*syncTaskResult) (bool, []Change, []peer.ID, error) { - return false, nil, nil, nil +func (f *FullSyncStrategy) IsFinished(results []*syncTaskResult) (bool, []Change, []peer.ID, error) { + repChanges, blocks, missingReq, validResp := validateResults(results, f.badBlocks) + f.missingRequests = missingReq + + if f.disjointBlocks == nil { + f.disjointBlocks = make([][]*types.BlockData, 0) + } + + // merge validResp with the current disjoint blocks + for _, resp := range validResp { + f.disjointBlocks = append(f.disjointBlocks, resp.BlockData) + } + + // given the validResponses, can we start importing the blocks or + // we should wait for the missing requests to fill the gap? + blocksToImport, disjointBlocks := blocksAvailable(f.bestBlockHeader.Hash(), f.bestBlockHeader.Number, f.disjointBlocks) + f.disjointBlocks = disjointBlocks + + if len(blocksToImport) > 0 { + for _, blockToImport := range blocksToImport { + fmt.Printf("handling block #%d (%s)\n", blockToImport.Header.Number, blockToImport.Hash.Short()) + err := f.handleReadyBlock(blockToImport, networkInitialSync) + if err != nil { + return false, nil, nil, fmt.Errorf("while handling ready block: %w", err) + } + f.bestBlockHeader = blockToImport.Header + } + } + + fmt.Printf("best block #%d (%s)\n", f.bestBlockHeader.Number, f.bestBlockHeader.Hash().String()) + + return false, repChanges, blocks, nil } func (f *FullSyncStrategy) OnBlockAnnounceHandshake(from peer.ID, msg *network.BlockAnnounceHandshake) error { @@ -101,16 +211,158 @@ func (*FullSyncStrategy) OnBlockAnnounce(from peer.ID, msg *network.BlockAnnounc var ErrResultsTimeout = errors.New("waiting results reached timeout") -// handleWorkersResults, every time we submit requests to workers they results should be computed here -// and every cicle we should endup with a complete chain, whenever we identify -// any error from a worker we should evaluate the error and re-insert the request -// in the queue and wait for it to completes -// TODO: handle only justification requests -func (cs *FullSyncStrategy) handleWorkersResults(results []*syncTaskResult, origin BlockOrigin) error { - repChanges := make([]Change, 0) - blocks := make([]peer.ID, 0) +func (f *FullSyncStrategy) handleReadyBlock(bd *types.BlockData, origin BlockOrigin) error { + err := f.processBlockData(*bd, origin) + if err != nil { + // depending on the error, we might want to save this block for later + logger.Errorf("processing block #%d (%s) failed: %s", bd.Header.Number, bd.Hash, err) + return err + } + + return nil +} + +// processBlockData processes the BlockData from a BlockResponse and +// returns the index of the last BlockData it handled on success, +// or the index of the block data that errored on failure. +// TODO: https://github.com/ChainSafe/gossamer/issues/3468 +func (f *FullSyncStrategy) processBlockData(blockData types.BlockData, origin BlockOrigin) error { + // while in bootstrap mode we don't need to broadcast block announcements + // TODO: set true if not in initial sync setup + announceImportedBlock := false + + if blockData.Header != nil { + if blockData.Body != nil { + err := f.processBlockDataWithHeaderAndBody(blockData, origin, announceImportedBlock) + if err != nil { + return fmt.Errorf("processing block data with header and body: %w", err) + } + } + + if blockData.Justification != nil && len(*blockData.Justification) > 0 { + err := f.handleJustification(blockData.Header, *blockData.Justification) + if err != nil { + return fmt.Errorf("handling justification: %w", err) + } + } + } + + err := f.blockState.CompareAndSetBlockData(&blockData) + if err != nil { + return fmt.Errorf("comparing and setting block data: %w", err) + } + + return nil +} + +func (f *FullSyncStrategy) processBlockDataWithHeaderAndBody(blockData types.BlockData, + origin BlockOrigin, announceImportedBlock bool) (err error) { + + if origin != networkInitialSync { + err = f.babeVerifier.VerifyBlock(blockData.Header) + if err != nil { + return fmt.Errorf("babe verifying block: %w", err) + } + } + + f.handleBody(blockData.Body) + + block := &types.Block{ + Header: *blockData.Header, + Body: *blockData.Body, + } + + err = f.handleBlock(block, announceImportedBlock) + if err != nil { + return fmt.Errorf("handling block: %w", err) + } + + return nil +} + +// handleHeader handles block bodies included in BlockResponses +func (f *FullSyncStrategy) handleBody(body *types.Body) { + acc := 0 + for _, ext := range *body { + acc += len(ext) + f.transactionState.RemoveExtrinsic(ext) + } + + blockSizeGauge.Set(float64(acc)) +} + +// handleHeader handles blocks (header+body) included in BlockResponses +func (f *FullSyncStrategy) handleBlock(block *types.Block, announceImportedBlock bool) error { + parent, err := f.blockState.GetHeader(block.Header.ParentHash) + if err != nil { + return fmt.Errorf("%w: %s", errFailedToGetParent, err) + } + + f.storageState.Lock() + defer f.storageState.Unlock() + + ts, err := f.storageState.TrieState(&parent.StateRoot) + if err != nil { + return err + } + + root := ts.MustRoot() + if !bytes.Equal(parent.StateRoot[:], root[:]) { + panic("parent state root does not match snapshot state root") + } + + rt, err := f.blockState.GetRuntime(parent.Hash()) + if err != nil { + return err + } + + rt.SetContextStorage(ts) + + _, err = rt.ExecuteBlock(block) + if err != nil { + return fmt.Errorf("failed to execute block %d: %w", block.Header.Number, err) + } + + if err = f.blockImportHandler.HandleBlockImport(block, ts, announceImportedBlock); err != nil { + return err + } + + blockHash := block.Header.Hash() + f.telemetry.SendMessage(telemetry.NewBlockImport( + &blockHash, + block.Header.Number, + "NetworkInitialSync")) + + return nil +} + +func (f *FullSyncStrategy) handleJustification(header *types.Header, justification []byte) (err error) { + headerHash := header.Hash() + err = f.finalityGadget.VerifyBlockJustification(headerHash, justification) + if err != nil { + return fmt.Errorf("verifying block number %d justification: %w", header.Number, err) + } + + err = f.blockState.SetJustification(headerHash, justification) + if err != nil { + return fmt.Errorf("setting justification for block number %d: %w", header.Number, err) + } + return nil +} + +func validateResults(results []*syncTaskResult, badBlocks []string) (repChanges []Change, blocks []peer.ID, + missingReqs []*network.BlockRequestMessage, validRes []*network.BlockResponseMessage) { + repChanges = make([]Change, 0) + blocks = make([]peer.ID, 0) + + missingReqs = make([]*network.BlockRequestMessage, 0, len(results)) + validRes = make([]*network.BlockResponseMessage, 0, len(results)) + +resultLoop: for _, result := range results { + request := result.request.(*network.BlockRequestMessage) + if result.err != nil { if !errors.Is(result.err, network.ErrReceivedEmptyMessage) { blocks = append(blocks, result.who) @@ -135,15 +387,15 @@ func (cs *FullSyncStrategy) handleWorkersResults(results []*syncTaskResult, orig }) } } + + missingReqs = append(missingReqs, request) continue } - request := result.request.(*network.BlockRequestMessage) response := result.response.(*network.BlockResponseMessage) - if request.Direction == network.Descending { // reverse blocks before pre-validating and placing in ready queue - reverseBlockData(response.BlockData) + slices.Reverse(response.BlockData) } err := validateResponseFields(request.RequestedData, response.BlockData) @@ -161,232 +413,103 @@ func (cs *FullSyncStrategy) handleWorkersResults(results []*syncTaskResult, orig }) } - err = cs.submitRequest(taskResult.request, nil, workersResults) - if err != nil { - return err - } - continue taskResultLoop + missingReqs = append(missingReqs, request) + continue } - } - -taskResultLoop: - for waitingBlocks > 0 { - // in a case where we don't handle workers results we should check the pool - idleDuration := time.Minute - idleTimer := time.NewTimer(idleDuration) - - select { - case <-cs.stopCh: - return nil - - case <-idleTimer.C: - return ErrResultsTimeout - - case taskResult := <-workersResults: - if !idleTimer.Stop() { - <-idleTimer.C - } - - who := taskResult.who - request := taskResult.request - response := taskResult.response - - logger.Debugf("task result: peer(%s), with error: %v, with response: %v", - taskResult.who, taskResult.err != nil, taskResult.response != nil) - - if taskResult.err != nil { - if !errors.Is(taskResult.err, network.ErrReceivedEmptyMessage) { - cs.workerPool.ignorePeerAsWorker(taskResult.who) - - logger.Errorf("task result: peer(%s) error: %s", - taskResult.who, taskResult.err) - - if strings.Contains(taskResult.err.Error(), "protocols not supported") { - cs.network.ReportPeer(peerset.ReputationChange{ - Value: peerset.BadProtocolValue, - Reason: peerset.BadProtocolReason, - }, who) - } - if errors.Is(taskResult.err, network.ErrNilBlockInResponse) { - cs.network.ReportPeer(peerset.ReputationChange{ - Value: peerset.BadMessageValue, - Reason: peerset.BadMessageReason, - }, who) - } - } - - // TODO: avoid the same peer to get the same task - err := cs.submitRequest(request, nil, workersResults) - if err != nil { - return err - } - continue - } - - if request.Direction == network.Descending { - // reverse blocks before pre-validating and placing in ready queue - reverseBlockData(response.BlockData) - } - - err := validateResponseFields(request.RequestedData, response.BlockData) - if err != nil { - logger.Criticalf("validating fields: %s", err) - // TODO: check the reputation change for nil body in response - // and nil justification in response - if errors.Is(err, errNilHeaderInResponse) { - cs.network.ReportPeer(peerset.ReputationChange{ - Value: peerset.IncompleteHeaderValue, - Reason: peerset.IncompleteHeaderReason, - }, who) - } - - err = cs.submitRequest(taskResult.request, nil, workersResults) - if err != nil { - return err - } - continue taskResultLoop - } - - isChain := isResponseAChain(response.BlockData) - if !isChain { - logger.Criticalf("response from %s is not a chain", who) - err = cs.submitRequest(taskResult.request, nil, workersResults) - if err != nil { - return err - } - continue taskResultLoop - } - - grows := doResponseGrowsTheChain(response.BlockData, syncingChain, - startAtBlock, expectedSyncedBlocks) - if !grows { - logger.Criticalf("response from %s does not grows the ongoing chain", who) - err = cs.submitRequest(taskResult.request, nil, workersResults) - if err != nil { - return err - } - continue taskResultLoop - } + if !isResponseAChain(response.BlockData) { + logger.Criticalf("response from %s is not a chain", result.who) + missingReqs = append(missingReqs, request) + continue + } - for _, blockInResponse := range response.BlockData { - if slices.Contains(cs.badBlocks, blockInResponse.Hash.String()) { - logger.Criticalf("%s sent a known bad block: %s (#%d)", - who, blockInResponse.Hash.String(), blockInResponse.Number()) + for _, block := range response.BlockData { + if slices.Contains(badBlocks, block.Hash.String()) { + logger.Criticalf("%s sent a known bad block: #%d (%s)", + result.who, block.Number(), block.Hash.String()) - cs.network.ReportPeer(peerset.ReputationChange{ + blocks = append(blocks, result.who) + repChanges = append(repChanges, Change{ + who: result.who, + rep: peerset.ReputationChange{ Value: peerset.BadBlockAnnouncementValue, Reason: peerset.BadBlockAnnouncementReason, - }, who) - - cs.workerPool.ignorePeerAsWorker(taskResult.who) - err = cs.submitRequest(taskResult.request, nil, workersResults) - if err != nil { - return err - } - continue taskResultLoop - } + }, + }) - blockExactIndex := blockInResponse.Header.Number - startAtBlock - if blockExactIndex < uint(expectedSyncedBlocks) { - syncingChain[blockExactIndex] = blockInResponse - } + missingReqs = append(missingReqs, request) + continue resultLoop } + } - // we need to check if we've filled all positions - // otherwise we should wait for more responses - waitingBlocks -= uint32(len(response.BlockData)) + validRes = append(validRes, response) + } - // we received a response without the desired amount of blocks - // we should include a new request to retrieve the missing blocks - if len(response.BlockData) < int(*request.Max) { - difference := uint32(int(*request.Max) - len(response.BlockData)) - lastItem := response.BlockData[len(response.BlockData)-1] + return repChanges, blocks, missingReqs, validRes +} - startRequestNumber := uint32(lastItem.Header.Number + 1) - startAt, err := variadic.NewUint32OrHash(startRequestNumber) - if err != nil { - panic(err) - } +// blocksAvailable given a set of responses, which are fragments of the chain we should +// check if there is fragments that can be imported or fragments that are disjoint (cannot be imported yet) +func blocksAvailable(blockHash common.Hash, blockNumber uint, responses [][]*types.BlockData) ( + []*types.BlockData, [][]*types.BlockData) { + if len(responses) == 0 { + return nil, nil + } - taskResult.request = &network.BlockRequestMessage{ - RequestedData: network.BootstrapRequestData, - StartingBlock: *startAt, - Direction: network.Ascending, - Max: &difference, - } - err = cs.submitRequest(taskResult.request, nil, workersResults) - if err != nil { - return err - } - continue taskResultLoop - } + slices.SortFunc(responses, func(a, b []*types.BlockData) int { + if a[len(a)-1].Header.Number < b[0].Header.Number { + return -1 } - } + if a[len(a)-1].Header.Number == b[0].Header.Number { + return 0 + } + return 1 + }) - retreiveBlocksSeconds := time.Since(startTime).Seconds() - logger.Infof("🔽 retrieved %d blocks, took: %.2f seconds, starting process...", - expectedSyncedBlocks, retreiveBlocksSeconds) + type hashAndNumber struct { + hash common.Hash + number uint + } - // response was validated! place into ready block queue - for _, bd := range syncingChain { - // block is ready to be processed! - if err := cs.handleReadyBlock(bd, origin); err != nil { - return fmt.Errorf("while handling ready block: %w", err) - } + compareWith := hashAndNumber{ + hash: blockHash, + number: blockNumber, } - cs.showSyncStats(startTime, len(syncingChain)) - return nil -} + disjoints := false + lastIdx := 0 -func (cs *chainSync) handleReadyBlock(bd *types.BlockData, origin blockOrigin) error { - // if header was not requested, get it from the pending set - // if we're expecting headers, validate should ensure we have a header - if bd.Header == nil { - block := cs.pendingBlocks.getBlock(bd.Hash) - if block == nil { - // block wasn't in the pending set! - // let's check the db as maybe we already processed it - has, err := cs.blockState.HasHeader(bd.Hash) - if err != nil && !errors.Is(err, database.ErrNotFound) { - logger.Debugf("failed to check if header is known for hash %s: %s", bd.Hash, err) - return err - } + okFrag := make([]*types.BlockData, 0, len(responses)) + for idx, chain := range responses { + if len(chain) == 0 { + panic("unreachable") + } - if has { - logger.Tracef("ignoring block we've already processed, hash=%s", bd.Hash) - return err - } + incrementOne := (compareWith.number + 1) == chain[0].Header.Number + isParent := compareWith.hash == chain[0].Header.ParentHash - // this is bad and shouldn't happen - logger.Errorf("block with unknown header is ready: hash=%s", bd.Hash) - return err - } + fmt.Printf("checking: in response %d, compare with %d\n", chain[0].Header.Number, compareWith.number+1) + fmt.Printf("checking: in response %s, compare with %s\n", chain[0].Header.ParentHash, compareWith.hash) - if block.header == nil { - logger.Errorf("new ready block number (unknown) with hash %s", bd.Hash) - return nil + if incrementOne && isParent { + okFrag = append(okFrag, chain...) + compareWith = hashAndNumber{ + hash: chain[len(chain)-1].Hash, + number: chain[len(chain)-1].Header.Number, + } + continue } - bd.Header = block.header + lastIdx = idx + disjoints = true + break } - err := cs.processBlockData(*bd, origin) - if err != nil { - // depending on the error, we might want to save this block for later - logger.Errorf("block data processing for block with hash %s failed: %s", bd.Hash, err) - return err + if disjoints { + return okFrag, responses[lastIdx:] } - cs.pendingBlocks.removeBlock(bd.Hash) - return nil -} - -func reverseBlockData(data []*types.BlockData) { - for i, j := 0, len(data)-1; i < j; i, j = i+1, j-1 { - data[i], data[j] = data[j], data[i] - } + return okFrag, nil } // validateResponseFields checks that the expected fields are in the block data @@ -432,61 +555,3 @@ func isResponseAChain(responseBlockData []*types.BlockData) bool { return true } - -// doResponseGrowsTheChain will check if the acquired blocks grows the current chain -// matching their parent hashes -func doResponseGrowsTheChain(response, ongoingChain []*types.BlockData, startAtBlock uint, expectedTotal uint32) bool { - // the ongoing chain does not have any element, we can safely insert an item in it - if len(ongoingChain) < 1 { - return true - } - - compareParentHash := func(parent, child *types.BlockData) bool { - return parent.Header.Hash() == child.Header.ParentHash - } - - firstBlockInResponse := response[0] - firstBlockExactIndex := firstBlockInResponse.Header.Number - startAtBlock - if firstBlockExactIndex != 0 && firstBlockExactIndex < uint(expectedTotal) { - leftElement := ongoingChain[firstBlockExactIndex-1] - if leftElement != nil && !compareParentHash(leftElement, firstBlockInResponse) { - return false - } - } - - switch { - // if the response contains only one block then we should check both sides - // for example, if the response contains only one block called X we should - // check if its parent hash matches with the left element as well as we should - // check if the right element contains X hash as its parent hash - // ... W <- X -> Y ... - // we can skip left side comparison if X is in the 0 index and we can skip - // right side comparison if X is in the last index - case len(response) == 1: - if uint32(firstBlockExactIndex+1) < expectedTotal { - rightElement := ongoingChain[firstBlockExactIndex+1] - if rightElement != nil && !compareParentHash(firstBlockInResponse, rightElement) { - return false - } - } - // if the response contains more than 1 block then we need to compare - // only the start and the end of the acquired response, for example - // let's say we receive a response [C, D, E] and we need to check - // if those values fits correctly: - // ... B <- C D E -> F - // we skip the left check if its index is equals to 0 and we skip the right - // check if it ends in the latest position of the ongoing array - case len(response) > 1: - lastBlockInResponse := response[len(response)-1] - lastBlockExactIndex := lastBlockInResponse.Header.Number - startAtBlock - - if uint32(lastBlockExactIndex+1) < expectedTotal { - rightElement := ongoingChain[lastBlockExactIndex+1] - if rightElement != nil && !compareParentHash(lastBlockInResponse, rightElement) { - return false - } - } - } - - return true -} diff --git a/lib/sync/service.go b/lib/sync/service.go index b23703faee..5d0b583b6b 100644 --- a/lib/sync/service.go +++ b/lib/sync/service.go @@ -10,6 +10,8 @@ import ( "github.com/ChainSafe/gossamer/dot/peerset" "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/internal/log" + "github.com/ChainSafe/gossamer/lib/common" + "github.com/ChainSafe/gossamer/lib/runtime" "github.com/libp2p/go-libp2p/core/peer" ) @@ -25,6 +27,29 @@ type Network interface { type BlockState interface { BestBlockHeader() (*types.Header, error) + BestBlockNumber() (number uint, err error) + CompareAndSetBlockData(bd *types.BlockData) error + GetBlockBody(common.Hash) (*types.Body, error) + GetHeader(common.Hash) (*types.Header, error) + HasHeader(hash common.Hash) (bool, error) + Range(startHash, endHash common.Hash) (hashes []common.Hash, err error) + RangeInMemory(start, end common.Hash) ([]common.Hash, error) + GetReceipt(common.Hash) ([]byte, error) + GetMessageQueue(common.Hash) ([]byte, error) + GetJustification(common.Hash) ([]byte, error) + SetJustification(hash common.Hash, data []byte) error + GetHashByNumber(blockNumber uint) (common.Hash, error) + GetBlockByHash(common.Hash) (*types.Block, error) + GetRuntime(blockHash common.Hash) (runtime runtime.Instance, err error) + StoreRuntime(blockHash common.Hash, runtime runtime.Instance) + GetHighestFinalisedHeader() (*types.Header, error) + GetFinalisedNotifierChannel() chan *types.FinalisationInfo + GetHeaderByNumber(num uint) (*types.Header, error) + GetAllBlocksAtNumber(num uint) ([]common.Hash, error) + IsDescendantOf(parent, child common.Hash) (bool, error) + + IsPaused() bool + Pause() error } type Change struct { @@ -72,7 +97,7 @@ func NewSyncService(network Network, defaultStrategy: defaultStrategy, workerPool: newSyncWorkerPool(network), waitPeersDuration: 2 * time.Second, - minPeers: 3, + minPeers: 1, stopCh: make(chan struct{}), } } diff --git a/lib/sync/worker.go b/lib/sync/worker.go index 1560d6e62c..d58a263dfc 100644 --- a/lib/sync/worker.go +++ b/lib/sync/worker.go @@ -23,8 +23,6 @@ type worker struct { // ID of the peer this worker is associated with peerID peer.ID - - stopCh chan struct{} } // newWorker creates and returns a new worker instance. @@ -35,17 +33,18 @@ func newWorker(pID peer.ID) *worker { } } -func executeRequest(wg *sync.WaitGroup, who *worker, task *syncTask, resCh chan<- *syncTaskResult) { +func executeRequest(wg *sync.WaitGroup, who *worker, task *syncTask, guard chan struct{}, resCh chan<- *syncTaskResult) { defer func() { who.status = available + <-guard wg.Done() }() request := task.request - logger.Debugf("[EXECUTING] worker %s, block request: %s", who, request) + logger.Infof("[EXECUTING] worker %s, block request: %s", who.peerID, request) err := task.requestMaker.Do(who.peerID, request, task.response) if err != nil { - logger.Debugf("[ERR] worker %s, err: %s", who, err) + logger.Infof("[ERR] worker %s, err: %s", who.peerID, err) resCh <- &syncTaskResult{ who: who.peerID, request: request, @@ -55,7 +54,7 @@ func executeRequest(wg *sync.WaitGroup, who *worker, task *syncTask, resCh chan< return } - logger.Debugf("[FINISHED] worker %s, response: %s", who, task.response.String()) + logger.Debugf("[FINISHED] worker %s, response: %s", who.peerID, task.response.String()) resCh <- &syncTaskResult{ who: who.peerID, request: request, diff --git a/lib/sync/worker_pool.go b/lib/sync/worker_pool.go index 7c390ab889..ee344acf7c 100644 --- a/lib/sync/worker_pool.go +++ b/lib/sync/worker_pool.go @@ -85,23 +85,40 @@ func (s *syncWorkerPool) removeWorker(who peer.ID) { // submitRequests takes an set of requests and will submit to the pool through submitRequest // the response will be dispatch in the resultCh func (s *syncWorkerPool) submitRequests(tasks []*syncTask) []*syncTaskResult { + peers := s.network.AllConnectedPeersIDs() + connectedPeers := make(map[peer.ID]*worker, len(peers)) + for _, peer := range peers { + connectedPeers[peer] = newWorker(peer) + } + s.mtx.RLock() defer s.mtx.RUnlock() wg := sync.WaitGroup{} resCh := make(chan *syncTaskResult, len(tasks)) - allWorkers := maps.Values(s.workers) + for pid, w := range s.workers { + _, ok := connectedPeers[pid] + if ok { + continue + } + connectedPeers[pid] = w + } + + allWorkers := maps.Values(connectedPeers) + if len(allWorkers) == 0 { + panic("TODO: no peers to sync, what should we do?") + } + + guard := make(chan struct{}, len(allWorkers)) for idx, task := range tasks { + guard <- struct{}{} + workerID := idx % len(allWorkers) worker := allWorkers[workerID] - if worker.status != available { - continue - } - worker.status = busy wg.Add(1) - go executeRequest(&wg, worker, task, resCh) + go executeRequest(&wg, worker, task, guard, resCh) } go func() { diff --git a/pkg/trie/inmemory/iterator.go b/pkg/trie/inmemory/iterator.go index ccb4a3811c..198d8f9e19 100644 --- a/pkg/trie/inmemory/iterator.go +++ b/pkg/trie/inmemory/iterator.go @@ -145,7 +145,7 @@ func findNextNode(currentNode *node.Node, prefix, searchKey []byte) *trie.Entry // the last match between `searchKey` and `currentFullKey` if cmp == 1 { // search key is exhausted then return nil - if len(searchKey) < len(currentFullKey) { + if len(searchKey) <= len(currentFullKey) { return nil } From 41dde9d168074d8b88cdef3981012e2f2f5ab472 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Mon, 29 Jul 2024 15:31:51 -0400 Subject: [PATCH 10/74] chore: bring sync status --- lib/sync/fullsync.go | 18 +++++++++++++++--- lib/sync/service.go | 17 +++++++++++++++++ lib/sync/worker.go | 35 +++++++---------------------------- lib/sync/worker_pool.go | 31 +++++++++++++------------------ 4 files changed, 52 insertions(+), 49 deletions(-) diff --git a/lib/sync/fullsync.go b/lib/sync/fullsync.go index 9b705f780d..6ffda3b04a 100644 --- a/lib/sync/fullsync.go +++ b/lib/sync/fullsync.go @@ -8,6 +8,7 @@ import ( "slices" "strings" "sync" + "time" "github.com/ChainSafe/gossamer/dot/network" "github.com/ChainSafe/gossamer/dot/peerset" @@ -97,6 +98,9 @@ type FullSyncStrategy struct { finalityGadget FinalityGadget blockImportHandler BlockImportHandler telemetry Telemetry + + startedAt time.Time + syncedBlocks int } func NewFullSyncStrategy(cfg *FullSyncConfig) *FullSyncStrategy { @@ -123,6 +127,8 @@ func (f *FullSyncStrategy) incompleteBlocksSync() ([]*syncTask, error) { } func (f *FullSyncStrategy) NextActions() ([]*syncTask, error) { + f.startedAt = time.Now() + if len(f.missingRequests) > 0 { return f.createTasks(f.missingRequests), nil } @@ -179,7 +185,6 @@ func (f *FullSyncStrategy) IsFinished(results []*syncTaskResult) (bool, []Change if len(blocksToImport) > 0 { for _, blockToImport := range blocksToImport { - fmt.Printf("handling block #%d (%s)\n", blockToImport.Header.Number, blockToImport.Hash.Short()) err := f.handleReadyBlock(blockToImport, networkInitialSync) if err != nil { return false, nil, nil, fmt.Errorf("while handling ready block: %w", err) @@ -188,11 +193,18 @@ func (f *FullSyncStrategy) IsFinished(results []*syncTaskResult) (bool, []Change } } - fmt.Printf("best block #%d (%s)\n", f.bestBlockHeader.Number, f.bestBlockHeader.Hash().String()) - + f.syncedBlocks = len(blocksToImport) return false, repChanges, blocks, nil } +func (f *FullSyncStrategy) ShowMetrics() { + totalSyncAndImportSeconds := time.Since(f.startedAt).Seconds() + bps := float64(f.syncedBlocks) / totalSyncAndImportSeconds + logger.Infof("⛓️ synced %d blocks, "+ + "took: %.2f seconds, bps: %.2f blocks/second, target block number #%d", + f.syncedBlocks, totalSyncAndImportSeconds, bps, f.peers.getTarget()) +} + func (f *FullSyncStrategy) OnBlockAnnounceHandshake(from peer.ID, msg *network.BlockAnnounceHandshake) error { logger.Infof("received block announce from %s: #%d (%s)", from, diff --git a/lib/sync/service.go b/lib/sync/service.go index 5d0b583b6b..4393bdd58b 100644 --- a/lib/sync/service.go +++ b/lib/sync/service.go @@ -62,6 +62,7 @@ type Strategy interface { OnBlockAnnounceHandshake(from peer.ID, msg *network.BlockAnnounceHandshake) error NextActions() ([]*syncTask, error) IsFinished(results []*syncTaskResult) (done bool, repChanges []Change, blocks []peer.ID, err error) + ShowMetrics() } type BlockOrigin byte @@ -187,6 +188,20 @@ func (s *SyncService) runSyncEngine() { // TODO: need to handle stop channel for { + finalisedHeader, err := s.blockState.GetHighestFinalisedHeader() + if err != nil { + logger.Criticalf("getting highest finalized header: %w", err) + return + } + + logger.Infof( + "🚣 currently syncing, %d peers connected, last finalised #%d (%s) ", + len(s.network.AllConnectedPeersIDs()), + s.workerPool.totalWorkers(), + finalisedHeader.Number, + finalisedHeader.Hash().Short(), + ) + tasks, err := s.currentStrategy.NextActions() if err != nil { panic(fmt.Sprintf("current sync strategy next actions failed with: %s", err.Error())) @@ -208,6 +223,8 @@ func (s *SyncService) runSyncEngine() { s.workerPool.ignorePeerAsWorker(block) } + s.currentStrategy.ShowMetrics() + if done { if s.defaultStrategy == nil { panic("nil default strategy") diff --git a/lib/sync/worker.go b/lib/sync/worker.go index d58a263dfc..7898d95da4 100644 --- a/lib/sync/worker.go +++ b/lib/sync/worker.go @@ -13,40 +13,19 @@ import ( // ErrStopTimeout is an error indicating that the worker stop operation timed out. var ErrStopTimeout = errors.New("stop timeout") -// worker represents a worker that processes sync tasks by making network requests to peers. -// It manages the synchronisation tasks between nodes in the Polkadot's peer-to-peer network. -// The primary goal of the worker is to handle and coordinate tasks related to network requests, -// ensuring that nodes stay synchronised with the blockchain state -type worker struct { - // Status of the worker (e.g., available, busy, etc.) - status byte - - // ID of the peer this worker is associated with - peerID peer.ID -} - -// newWorker creates and returns a new worker instance. -func newWorker(pID peer.ID) *worker { - return &worker{ - peerID: pID, - status: available, - } -} - -func executeRequest(wg *sync.WaitGroup, who *worker, task *syncTask, guard chan struct{}, resCh chan<- *syncTaskResult) { +func executeRequest(wg *sync.WaitGroup, who peer.ID, task *syncTask, guard chan struct{}, resCh chan<- *syncTaskResult) { defer func() { - who.status = available <-guard wg.Done() }() request := task.request - logger.Infof("[EXECUTING] worker %s, block request: %s", who.peerID, request) - err := task.requestMaker.Do(who.peerID, request, task.response) + logger.Debugf("[EXECUTING] worker %s, block request: %s", who, request) + err := task.requestMaker.Do(who, request, task.response) if err != nil { - logger.Infof("[ERR] worker %s, err: %s", who.peerID, err) + logger.Debugf("[ERR] worker %s, err: %s", who, err) resCh <- &syncTaskResult{ - who: who.peerID, + who: who, request: request, err: err, response: nil, @@ -54,9 +33,9 @@ func executeRequest(wg *sync.WaitGroup, who *worker, task *syncTask, guard chan return } - logger.Debugf("[FINISHED] worker %s, response: %s", who.peerID, task.response.String()) + logger.Debugf("[FINISHED] worker %s, response: %s", who, task.response.String()) resCh <- &syncTaskResult{ - who: who.peerID, + who: who, request: request, response: task.response, } diff --git a/lib/sync/worker_pool.go b/lib/sync/worker_pool.go index ee344acf7c..2c8e9bd2cb 100644 --- a/lib/sync/worker_pool.go +++ b/lib/sync/worker_pool.go @@ -13,12 +13,6 @@ import ( "golang.org/x/exp/maps" ) -const ( - available byte = iota - busy - punished -) - const ( punishmentBaseTimeout = 5 * time.Minute maxRequestsAllowed uint = 60 @@ -42,7 +36,7 @@ type syncWorkerPool struct { wg sync.WaitGroup network Network - workers map[peer.ID]*worker + workers map[peer.ID]struct{} ignorePeers map[peer.ID]struct{} sharedGuard chan struct{} @@ -51,7 +45,7 @@ type syncWorkerPool struct { func newSyncWorkerPool(net Network) *syncWorkerPool { swp := &syncWorkerPool{ network: net, - workers: make(map[peer.ID]*worker), + workers: make(map[peer.ID]struct{}), ignorePeers: make(map[peer.ID]struct{}), sharedGuard: make(chan struct{}, maxRequestsAllowed), } @@ -72,23 +66,17 @@ func (s *syncWorkerPool) fromBlockAnnounceHandshake(who peer.ID, bestBlockHash c return } - s.workers[who] = newWorker(who) + s.workers[who] = struct{}{} logger.Tracef("potential worker added, total in the pool %d", len(s.workers)) } -func (s *syncWorkerPool) removeWorker(who peer.ID) { - s.mtx.Lock() - defer s.mtx.Unlock() - delete(s.workers, who) -} - // submitRequests takes an set of requests and will submit to the pool through submitRequest // the response will be dispatch in the resultCh func (s *syncWorkerPool) submitRequests(tasks []*syncTask) []*syncTaskResult { peers := s.network.AllConnectedPeersIDs() - connectedPeers := make(map[peer.ID]*worker, len(peers)) + connectedPeers := make(map[peer.ID]struct{}, len(peers)) for _, peer := range peers { - connectedPeers[peer] = newWorker(peer) + connectedPeers[peer] = struct{}{} } s.mtx.RLock() @@ -105,7 +93,7 @@ func (s *syncWorkerPool) submitRequests(tasks []*syncTask) []*syncTaskResult { connectedPeers[pid] = w } - allWorkers := maps.Values(connectedPeers) + allWorkers := maps.Keys(connectedPeers) if len(allWorkers) == 0 { panic("TODO: no peers to sync, what should we do?") } @@ -142,6 +130,13 @@ func (s *syncWorkerPool) ignorePeerAsWorker(who peer.ID) { s.ignorePeers[who] = struct{}{} } +func (s *syncWorkerPool) removeWorker(who peer.ID) { + s.mtx.Lock() + defer s.mtx.Unlock() + + delete(s.workers, who) +} + // totalWorkers only returns available or busy workers func (s *syncWorkerPool) totalWorkers() (total int) { s.mtx.RLock() From 88b7d3b71d4419f6acb36d9b6ac68e5d8a38a1d6 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Mon, 29 Jul 2024 16:39:25 -0400 Subject: [PATCH 11/74] chore: removing panics --- lib/sync/fullsync.go | 147 ----------------------------- lib/sync/fullsync_handle_block.go | 149 ++++++++++++++++++++++++++++++ lib/sync/fullsync_test.go | 1 + lib/sync/service.go | 20 ++-- lib/sync/worker_pool.go | 15 +-- 5 files changed, 171 insertions(+), 161 deletions(-) create mode 100644 lib/sync/fullsync_handle_block.go create mode 100644 lib/sync/fullsync_test.go diff --git a/lib/sync/fullsync.go b/lib/sync/fullsync.go index 6ffda3b04a..af7e85adf1 100644 --- a/lib/sync/fullsync.go +++ b/lib/sync/fullsync.go @@ -1,7 +1,6 @@ package sync import ( - "bytes" "encoding/json" "errors" "fmt" @@ -12,7 +11,6 @@ import ( "github.com/ChainSafe/gossamer/dot/network" "github.com/ChainSafe/gossamer/dot/peerset" - "github.com/ChainSafe/gossamer/dot/telemetry" "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/lib/common" rtstorage "github.com/ChainSafe/gossamer/lib/runtime/storage" @@ -221,148 +219,6 @@ func (*FullSyncStrategy) OnBlockAnnounce(from peer.ID, msg *network.BlockAnnounc return nil } -var ErrResultsTimeout = errors.New("waiting results reached timeout") - -func (f *FullSyncStrategy) handleReadyBlock(bd *types.BlockData, origin BlockOrigin) error { - err := f.processBlockData(*bd, origin) - if err != nil { - // depending on the error, we might want to save this block for later - logger.Errorf("processing block #%d (%s) failed: %s", bd.Header.Number, bd.Hash, err) - return err - } - - return nil -} - -// processBlockData processes the BlockData from a BlockResponse and -// returns the index of the last BlockData it handled on success, -// or the index of the block data that errored on failure. -// TODO: https://github.com/ChainSafe/gossamer/issues/3468 -func (f *FullSyncStrategy) processBlockData(blockData types.BlockData, origin BlockOrigin) error { - // while in bootstrap mode we don't need to broadcast block announcements - // TODO: set true if not in initial sync setup - announceImportedBlock := false - - if blockData.Header != nil { - if blockData.Body != nil { - err := f.processBlockDataWithHeaderAndBody(blockData, origin, announceImportedBlock) - if err != nil { - return fmt.Errorf("processing block data with header and body: %w", err) - } - } - - if blockData.Justification != nil && len(*blockData.Justification) > 0 { - err := f.handleJustification(blockData.Header, *blockData.Justification) - if err != nil { - return fmt.Errorf("handling justification: %w", err) - } - } - } - - err := f.blockState.CompareAndSetBlockData(&blockData) - if err != nil { - return fmt.Errorf("comparing and setting block data: %w", err) - } - - return nil -} - -func (f *FullSyncStrategy) processBlockDataWithHeaderAndBody(blockData types.BlockData, - origin BlockOrigin, announceImportedBlock bool) (err error) { - - if origin != networkInitialSync { - err = f.babeVerifier.VerifyBlock(blockData.Header) - if err != nil { - return fmt.Errorf("babe verifying block: %w", err) - } - } - - f.handleBody(blockData.Body) - - block := &types.Block{ - Header: *blockData.Header, - Body: *blockData.Body, - } - - err = f.handleBlock(block, announceImportedBlock) - if err != nil { - return fmt.Errorf("handling block: %w", err) - } - - return nil -} - -// handleHeader handles block bodies included in BlockResponses -func (f *FullSyncStrategy) handleBody(body *types.Body) { - acc := 0 - for _, ext := range *body { - acc += len(ext) - f.transactionState.RemoveExtrinsic(ext) - } - - blockSizeGauge.Set(float64(acc)) -} - -// handleHeader handles blocks (header+body) included in BlockResponses -func (f *FullSyncStrategy) handleBlock(block *types.Block, announceImportedBlock bool) error { - parent, err := f.blockState.GetHeader(block.Header.ParentHash) - if err != nil { - return fmt.Errorf("%w: %s", errFailedToGetParent, err) - } - - f.storageState.Lock() - defer f.storageState.Unlock() - - ts, err := f.storageState.TrieState(&parent.StateRoot) - if err != nil { - return err - } - - root := ts.MustRoot() - if !bytes.Equal(parent.StateRoot[:], root[:]) { - panic("parent state root does not match snapshot state root") - } - - rt, err := f.blockState.GetRuntime(parent.Hash()) - if err != nil { - return err - } - - rt.SetContextStorage(ts) - - _, err = rt.ExecuteBlock(block) - if err != nil { - return fmt.Errorf("failed to execute block %d: %w", block.Header.Number, err) - } - - if err = f.blockImportHandler.HandleBlockImport(block, ts, announceImportedBlock); err != nil { - return err - } - - blockHash := block.Header.Hash() - f.telemetry.SendMessage(telemetry.NewBlockImport( - &blockHash, - block.Header.Number, - "NetworkInitialSync")) - - return nil -} - -func (f *FullSyncStrategy) handleJustification(header *types.Header, justification []byte) (err error) { - headerHash := header.Hash() - err = f.finalityGadget.VerifyBlockJustification(headerHash, justification) - if err != nil { - return fmt.Errorf("verifying block number %d justification: %w", header.Number, err) - } - - err = f.blockState.SetJustification(headerHash, justification) - if err != nil { - return fmt.Errorf("setting justification for block number %d: %w", header.Number, err) - } - - return nil -} - func validateResults(results []*syncTaskResult, badBlocks []string) (repChanges []Change, blocks []peer.ID, missingReqs []*network.BlockRequestMessage, validRes []*network.BlockResponseMessage) { repChanges = make([]Change, 0) @@ -500,9 +356,6 @@ func blocksAvailable(blockHash common.Hash, blockNumber uint, responses [][]*typ incrementOne := (compareWith.number + 1) == chain[0].Header.Number isParent := compareWith.hash == chain[0].Header.ParentHash - fmt.Printf("checking: in response %d, compare with %d\n", chain[0].Header.Number, compareWith.number+1) - fmt.Printf("checking: in response %s, compare with %s\n", chain[0].Header.ParentHash, compareWith.hash) - if incrementOne && isParent { okFrag = append(okFrag, chain...) compareWith = hashAndNumber{ diff --git a/lib/sync/fullsync_handle_block.go b/lib/sync/fullsync_handle_block.go new file mode 100644 index 0000000000..4e4736a6c6 --- /dev/null +++ b/lib/sync/fullsync_handle_block.go @@ -0,0 +1,149 @@ +package sync + +import ( + "bytes" + "fmt" + + "github.com/ChainSafe/gossamer/dot/telemetry" + "github.com/ChainSafe/gossamer/dot/types" +) + +func (f *FullSyncStrategy) handleReadyBlock(bd *types.BlockData, origin BlockOrigin) error { + err := f.processBlockData(*bd, origin) + if err != nil { + // depending on the error, we might want to save this block for later + logger.Errorf("processing block #%d (%s) failed: %s", bd.Header.Number, bd.Hash, err) + return err + } + + return nil +} + +// processBlockData processes the BlockData from a BlockResponse and +// returns the index of the last BlockData it handled on success, +// or the index of the block data that errored on failure. +// TODO: https://github.com/ChainSafe/gossamer/issues/3468 +func (f *FullSyncStrategy) processBlockData(blockData types.BlockData, origin BlockOrigin) error { + // while in bootstrap mode we don't need to broadcast block announcements + // TODO: set true if not in initial sync setup + announceImportedBlock := false + + if blockData.Header != nil { + if blockData.Body != nil { + err := f.processBlockDataWithHeaderAndBody(blockData, origin, announceImportedBlock) + if err != nil { + return fmt.Errorf("processing block data with header and body: %w", err) + } + } + + if blockData.Justification != nil && len(*blockData.Justification) > 0 { + err := f.handleJustification(blockData.Header, *blockData.Justification) + if err != nil { + return fmt.Errorf("handling justification: %w", err) + } + } + } + + err := f.blockState.CompareAndSetBlockData(&blockData) + if err != nil { + return fmt.Errorf("comparing and setting block data: %w", err) + } + + return nil +} + +func (f *FullSyncStrategy) processBlockDataWithHeaderAndBody(blockData types.BlockData, + origin BlockOrigin, announceImportedBlock bool) (err error) { + + if origin != networkInitialSync { + err = f.babeVerifier.VerifyBlock(blockData.Header) + if err != nil { + return fmt.Errorf("babe verifying block: %w", err) + } + } + + f.handleBody(blockData.Body) + + block := &types.Block{ + Header: *blockData.Header, + Body: *blockData.Body, + } + + err = f.handleBlock(block, announceImportedBlock) + if err != nil { + return fmt.Errorf("handling block: %w", err) + } + + return nil +} + +// handleHeader handles block bodies included in BlockResponses +func (f *FullSyncStrategy) handleBody(body *types.Body) { + acc := 0 + for _, ext := range *body { + acc += len(ext) + f.transactionState.RemoveExtrinsic(ext) + } + + blockSizeGauge.Set(float64(acc)) +} + +// handleHeader handles blocks (header+body) included in BlockResponses +func (f *FullSyncStrategy) handleBlock(block *types.Block, announceImportedBlock bool) error { + parent, err := f.blockState.GetHeader(block.Header.ParentHash) + if err != nil { + return fmt.Errorf("%w: %s", errFailedToGetParent, err) + } + + f.storageState.Lock() + defer f.storageState.Unlock() + + ts, err := f.storageState.TrieState(&parent.StateRoot) + if err != nil { + return err + } + + root := ts.MustRoot() + if !bytes.Equal(parent.StateRoot[:], root[:]) { + panic("parent state root does not match snapshot state root") + } + + rt, err := f.blockState.GetRuntime(parent.Hash()) + if err != nil { + return err + } + + rt.SetContextStorage(ts) + + _, err = rt.ExecuteBlock(block) + if err != nil { + return fmt.Errorf("failed to execute block %d: %w", block.Header.Number, err) + } + + if err = f.blockImportHandler.HandleBlockImport(block, ts, announceImportedBlock); err != nil { + return err + } + + blockHash := block.Header.Hash() + f.telemetry.SendMessage(telemetry.NewBlockImport( + &blockHash, + block.Header.Number, + "NetworkInitialSync")) + + return nil +} + +func (f *FullSyncStrategy) handleJustification(header *types.Header, justification []byte) (err error) { + headerHash := header.Hash() + err = f.finalityGadget.VerifyBlockJustification(headerHash, justification) + if err != nil { + return fmt.Errorf("verifying block number %d justification: %w", header.Number, err) + } + + err = f.blockState.SetJustification(headerHash, justification) + if err != nil { + return fmt.Errorf("setting justification for block number %d: %w", header.Number, err) + } + + return nil +} diff --git a/lib/sync/fullsync_test.go b/lib/sync/fullsync_test.go new file mode 100644 index 0000000000..1ca2a85edd --- /dev/null +++ b/lib/sync/fullsync_test.go @@ -0,0 +1 @@ +package sync diff --git a/lib/sync/service.go b/lib/sync/service.go index 4393bdd58b..69b583e926 100644 --- a/lib/sync/service.go +++ b/lib/sync/service.go @@ -113,8 +113,8 @@ func (s *SyncService) waitWorkers() { for { total := s.workerPool.totalWorkers() - logger.Info("waiting peers...") - logger.Infof("total workers: %d, min peers: %d", total, s.minPeers) + logger.Debugf("waiting peers...") + logger.Debugf("total workers: %d, min peers: %d", total, s.minPeers) if total >= s.minPeers { return } @@ -151,7 +151,7 @@ func (s *SyncService) Stop() error { func (s *SyncService) HandleBlockAnnounceHandshake(from peer.ID, msg *network.BlockAnnounceHandshake) error { logger.Infof("receiving a block announce handshake: %s", from.String()) - s.workerPool.fromBlockAnnounceHandshake(from, msg.BestBlockHash, uint(msg.BestBlockNumber)) + s.workerPool.fromBlockAnnounceHandshake(from) s.mu.Lock() defer s.mu.Unlock() @@ -197,18 +197,21 @@ func (s *SyncService) runSyncEngine() { logger.Infof( "🚣 currently syncing, %d peers connected, last finalised #%d (%s) ", len(s.network.AllConnectedPeersIDs()), - s.workerPool.totalWorkers(), finalisedHeader.Number, finalisedHeader.Hash().Short(), ) tasks, err := s.currentStrategy.NextActions() if err != nil { - panic(fmt.Sprintf("current sync strategy next actions failed with: %s", err.Error())) + logger.Criticalf("current sync strategy next actions failed with: %s", err.Error()) + return } - logger.Infof("sending %d tasks", len(tasks)) - results := s.workerPool.submitRequests(tasks) + results, err := s.workerPool.submitRequests(tasks) + if err != nil { + logger.Criticalf("getting highest finalized header: %w", err) + return + } done, repChanges, blocks, err := s.currentStrategy.IsFinished(results) if err != nil { @@ -227,7 +230,8 @@ func (s *SyncService) runSyncEngine() { if done { if s.defaultStrategy == nil { - panic("nil default strategy") + logger.Criticalf("nil default strategy") + return } s.mu.Lock() diff --git a/lib/sync/worker_pool.go b/lib/sync/worker_pool.go index 2c8e9bd2cb..599a729164 100644 --- a/lib/sync/worker_pool.go +++ b/lib/sync/worker_pool.go @@ -4,15 +4,17 @@ package sync import ( + "errors" "sync" "time" "github.com/ChainSafe/gossamer/dot/network" - "github.com/ChainSafe/gossamer/lib/common" "github.com/libp2p/go-libp2p/core/peer" "golang.org/x/exp/maps" ) +var ErrNoPeersToMakeRequest = errors.New("no peers to make requests") + const ( punishmentBaseTimeout = 5 * time.Minute maxRequestsAllowed uint = 60 @@ -33,7 +35,6 @@ type syncTaskResult struct { type syncWorkerPool struct { mtx sync.RWMutex - wg sync.WaitGroup network Network workers map[peer.ID]struct{} @@ -53,7 +54,9 @@ func newSyncWorkerPool(net Network) *syncWorkerPool { return swp } -func (s *syncWorkerPool) fromBlockAnnounceHandshake(who peer.ID, bestBlockHash common.Hash, bestBlockNumber uint) { +// fromBlockAnnounceHandshake stores the peer which send us a handshake as +// a possible source for requesting blocks/state/warp proofs +func (s *syncWorkerPool) fromBlockAnnounceHandshake(who peer.ID) { s.mtx.Lock() defer s.mtx.Unlock() @@ -72,7 +75,7 @@ func (s *syncWorkerPool) fromBlockAnnounceHandshake(who peer.ID, bestBlockHash c // submitRequests takes an set of requests and will submit to the pool through submitRequest // the response will be dispatch in the resultCh -func (s *syncWorkerPool) submitRequests(tasks []*syncTask) []*syncTaskResult { +func (s *syncWorkerPool) submitRequests(tasks []*syncTask) ([]*syncTaskResult, error) { peers := s.network.AllConnectedPeersIDs() connectedPeers := make(map[peer.ID]struct{}, len(peers)) for _, peer := range peers { @@ -95,7 +98,7 @@ func (s *syncWorkerPool) submitRequests(tasks []*syncTask) []*syncTaskResult { allWorkers := maps.Keys(connectedPeers) if len(allWorkers) == 0 { - panic("TODO: no peers to sync, what should we do?") + return nil, ErrNoPeersToMakeRequest } guard := make(chan struct{}, len(allWorkers)) @@ -119,7 +122,7 @@ func (s *syncWorkerPool) submitRequests(tasks []*syncTask) []*syncTaskResult { results = append(results, r) } - return results + return results, nil } func (s *syncWorkerPool) ignorePeerAsWorker(who peer.ID) { From 996baaab49da9a2aa477ba6bef52849cbd9562c6 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Mon, 29 Jul 2024 16:40:59 -0400 Subject: [PATCH 12/74] chore: removing unneeded code --- lib/sync/peer_view.go | 6 ------ 1 file changed, 6 deletions(-) diff --git a/lib/sync/peer_view.go b/lib/sync/peer_view.go index a44a0e40bb..3ea309252d 100644 --- a/lib/sync/peer_view.go +++ b/lib/sync/peer_view.go @@ -64,12 +64,6 @@ func (p *peerViewSet) getTarget() uint32 { return p.target } -func (p *peerViewSet) len() int { - p.mtx.RLock() - defer p.mtx.RUnlock() - return len(p.view) -} - // nonOutliersSumCount calculates the sum and count of non-outlier elements // Explanation: // IQR outlier detection From c1e43c8f30e8c87ec9c23f879e6a138670cc2321 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 6 Aug 2024 15:25:37 -0400 Subject: [PATCH 13/74] chore: add test `TestFullSyncIsFinished` --- dot/network/message.go | 24 +- lib/common/variadic/uint32OrHash.go | 21 + lib/sync/fullsync.go | 599 +++++++++++------- lib/sync/fullsync_handle_block.go | 132 ++-- lib/sync/fullsync_test.go | 277 +++++++++ lib/sync/mocks_generate_test.go | 6 + lib/sync/mocks_test.go | 743 +++++++++++++++++++++++ lib/sync/peer_view.go | 12 +- lib/sync/request_queue.go | 38 ++ lib/sync/service.go | 27 +- lib/sync/worker.go | 7 +- lib/sync/worker_pool.go | 74 +-- lib/utils/utils.go | 6 + scripts/retrieve_block/retrieve_block.go | 6 +- 14 files changed, 1665 insertions(+), 307 deletions(-) create mode 100644 lib/sync/mocks_generate_test.go create mode 100644 lib/sync/mocks_test.go create mode 100644 lib/sync/request_queue.go diff --git a/dot/network/message.go b/dot/network/message.go index 13fd77326a..0296d20d64 100644 --- a/dot/network/message.go +++ b/dot/network/message.go @@ -84,6 +84,10 @@ type BlockRequestMessage struct { Max *uint32 } +func (bm *BlockRequestMessage) RequestField(req byte) bool { + return (bm.RequestedData & req) == req +} + // String formats a BlockRequestMessage as a string func (bm *BlockRequestMessage) String() string { max := uint32(0) @@ -92,7 +96,7 @@ func (bm *BlockRequestMessage) String() string { } return fmt.Sprintf("BlockRequestMessage RequestedData=%d StartingBlock=%v Direction=%d Max=%d", bm.RequestedData, - bm.StartingBlock, + bm.StartingBlock.String(), bm.Direction, max) } @@ -380,7 +384,7 @@ func NewAscendingBlockRequests(startNumber, targetNumber uint, requestedData byt return []*BlockRequestMessage{} } - diff := targetNumber - (startNumber - 1) + diff := targetNumber - startNumber // start and end block are the same, just request 1 block if diff == 0 { @@ -390,26 +394,10 @@ func NewAscendingBlockRequests(startNumber, targetNumber uint, requestedData byt } numRequests := diff / MaxBlocksInResponse - // we should check if the diff is in the maxResponseSize bounds - // otherwise we should increase the numRequests by one, take this - // example, we want to sync from 1 to 259, the diff is 259 - // then the num of requests is 2 (uint(259)/uint(128)) however two requests will - // retrieve only 256 blocks (each request can retrieve a max of 128 blocks), so we should - // create one more request to retrieve those missing blocks, 3 in this example. - missingBlocks := diff % MaxBlocksInResponse - if missingBlocks != 0 { - numRequests++ - } reqs := make([]*BlockRequestMessage, numRequests) for i := uint(0); i < numRequests; i++ { max := uint32(MaxBlocksInResponse) - - lastIteration := numRequests - 1 - if i == lastIteration && missingBlocks != 0 { - max = uint32(missingBlocks) - } - start := variadic.MustNewUint32OrHash(startNumber) reqs[i] = NewBlockRequest(*start, max, requestedData, Ascending) startNumber += uint(max) diff --git a/lib/common/variadic/uint32OrHash.go b/lib/common/variadic/uint32OrHash.go index 4a4c7a4025..2d1eb0fff3 100644 --- a/lib/common/variadic/uint32OrHash.go +++ b/lib/common/variadic/uint32OrHash.go @@ -6,6 +6,7 @@ package variadic import ( "encoding/binary" "errors" + "fmt" "io" "github.com/ChainSafe/gossamer/lib/common" @@ -16,6 +17,18 @@ type Uint32OrHash struct { value interface{} } +func FromHash(hash common.Hash) *Uint32OrHash { + return &Uint32OrHash{ + value: hash, + } +} + +func FromUint32(value uint32) *Uint32OrHash { + return &Uint32OrHash{ + value: value, + } +} + // NewUint32OrHash returns a new variadic.Uint32OrHash given an int, uint32, or Hash func NewUint32OrHash(value interface{}) (*Uint32OrHash, error) { switch v := value.(type) { @@ -97,6 +110,14 @@ func (x *Uint32OrHash) Hash() common.Hash { return x.value.(common.Hash) } +func (x *Uint32OrHash) String() string { + if x.IsHash() { + return x.Hash().String() + } + + return fmt.Sprintf("%d", x.value) +} + // IsUint32 returns true if the value is a uint32 func (x *Uint32OrHash) IsUint32() bool { if x == nil { diff --git a/lib/sync/fullsync.go b/lib/sync/fullsync.go index af7e85adf1..74ef7bdb2f 100644 --- a/lib/sync/fullsync.go +++ b/lib/sync/fullsync.go @@ -1,32 +1,35 @@ package sync import ( - "encoding/json" + "container/list" "errors" "fmt" "slices" - "strings" "sync" "time" "github.com/ChainSafe/gossamer/dot/network" "github.com/ChainSafe/gossamer/dot/peerset" "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/internal/database" "github.com/ChainSafe/gossamer/lib/common" - rtstorage "github.com/ChainSafe/gossamer/lib/runtime/storage" + "github.com/ChainSafe/gossamer/lib/common/variadic" + "github.com/libp2p/go-libp2p/core/peer" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" ) +const defaultNumOfTasks = 3 + var _ Strategy = (*FullSyncStrategy)(nil) var ( - errFailedToGetParent = errors.New("failed to get parent header") - errNilBlockData = errors.New("block data is nil") - errNilHeaderInResponse = errors.New("expected header, received none") - errNilBodyInResponse = errors.New("expected body, received none") - errNilJustificationInResponse = errors.New("expected justification, received none") + errFailedToGetParent = errors.New("failed to get parent header") + errNilHeaderInResponse = errors.New("expected header, received none") + errNilBodyInResponse = errors.New("expected body, received none") + errPeerOnInvalidFork = errors.New("peer is on an invalid fork") + errMismatchBestBlockAnnouncement = errors.New("mismatch best block announcement") blockSizeGauge = promauto.NewGauge(prometheus.GaugeOpts{ Namespace: "gossamer_sync", @@ -35,84 +38,124 @@ var ( }) ) -type ( - // Telemetry is the telemetry client to send telemetry messages. - Telemetry interface { - SendMessage(msg json.Marshaler) - } +// Config is the configuration for the sync Service. +type FullSyncConfig struct { + StartHeader *types.Header + StorageState StorageState + TransactionState TransactionState + BabeVerifier BabeVerifier + FinalityGadget FinalityGadget + BlockImportHandler BlockImportHandler + Telemetry Telemetry + BlockState BlockState + BadBlocks []string + NumOfTasks int + RequestMaker network.RequestMaker +} - // StorageState is the interface for the storage state - StorageState interface { - TrieState(root *common.Hash) (*rtstorage.TrieState, error) - sync.Locker - } +type unreadyBlocks struct { + mu sync.Mutex + incompleteBlocks map[common.Hash]*types.BlockData + disjointChains [][]*types.BlockData +} - // TransactionState is the interface for transaction queue methods - TransactionState interface { - RemoveExtrinsic(ext types.Extrinsic) +func (u *unreadyBlocks) newHeader(blockHeader *types.Header) { + u.mu.Lock() + defer u.mu.Unlock() + + blockHash := blockHeader.Hash() + u.incompleteBlocks[blockHash] = &types.BlockData{ + Hash: blockHash, + Header: blockHeader, } +} + +func (u *unreadyBlocks) newFragment(frag []*types.BlockData) { + u.mu.Lock() + defer u.mu.Unlock() + + u.disjointChains = append(u.disjointChains, frag) +} - // BabeVerifier deals with BABE block verification - BabeVerifier interface { - VerifyBlock(header *types.Header) error +func (u *unreadyBlocks) updateDisjointFragments(chain []*types.BlockData) ([]*types.BlockData, bool) { + u.mu.Lock() + defer u.mu.Unlock() + + indexToChange := -1 + for idx, disjointChain := range u.disjointChains { + lastBlockArriving := chain[len(chain)-1] + firstDisjointBlock := disjointChain[0] + if formsSequence(lastBlockArriving, firstDisjointBlock) { + indexToChange = idx + break + } } - // FinalityGadget implements justification verification functionality - FinalityGadget interface { - VerifyBlockJustification(common.Hash, []byte) error + if indexToChange >= 0 { + disjointChain := u.disjointChains[indexToChange] + u.disjointChains = append(u.disjointChains[:indexToChange], u.disjointChains[indexToChange+1:]...) + return append(chain, disjointChain...), true } - // BlockImportHandler is the interface for the handler of newly imported blocks - BlockImportHandler interface { - HandleBlockImport(block *types.Block, state *rtstorage.TrieState, announce bool) error + return nil, false +} + +func (u *unreadyBlocks) updateIncompleteBlocks(chain []*types.BlockData) []*types.BlockData { + u.mu.Lock() + defer u.mu.Unlock() + + completeBlocks := make([]*types.BlockData, 0) + for _, blockData := range chain { + incomplete, ok := u.incompleteBlocks[blockData.Hash] + if !ok { + continue + } + + incomplete.Body = blockData.Body + incomplete.Justification = blockData.Justification + + delete(u.incompleteBlocks, blockData.Hash) + completeBlocks = append(completeBlocks, incomplete) } -) -// Config is the configuration for the sync Service. -type FullSyncConfig struct { - StartHeader *types.Header - BlockState BlockState - StorageState StorageState - FinalityGadget FinalityGadget - TransactionState TransactionState - BlockImportHandler BlockImportHandler - BabeVerifier BabeVerifier - Telemetry Telemetry - BadBlocks []string - RequestMaker network.RequestMaker + return completeBlocks +} + +type Importer interface { + handle(*types.BlockData, BlockOrigin) (imported bool, err error) } type FullSyncStrategy struct { - bestBlockHeader *types.Header - missingRequests []*network.BlockRequestMessage - disjointBlocks [][]*types.BlockData - peers *peerViewSet - badBlocks []string - reqMaker network.RequestMaker - blockState BlockState - storageState StorageState - transactionState TransactionState - babeVerifier BabeVerifier - finalityGadget FinalityGadget - blockImportHandler BlockImportHandler - telemetry Telemetry - - startedAt time.Time - syncedBlocks int + requestQueue *requestsQueue[*network.BlockRequestMessage] + unreadyBlocks *unreadyBlocks + peers *peerViewSet + badBlocks []string + reqMaker network.RequestMaker + blockState BlockState + numOfTasks int + startedAt time.Time + syncedBlocks int + importer Importer } func NewFullSyncStrategy(cfg *FullSyncConfig) *FullSyncStrategy { + if cfg.NumOfTasks == 0 { + cfg.NumOfTasks = defaultNumOfTasks + } + return &FullSyncStrategy{ - badBlocks: cfg.BadBlocks, - bestBlockHeader: cfg.StartHeader, - reqMaker: cfg.RequestMaker, - blockState: cfg.BlockState, - storageState: cfg.StorageState, - transactionState: cfg.TransactionState, - babeVerifier: cfg.BabeVerifier, - finalityGadget: cfg.FinalityGadget, - blockImportHandler: cfg.BlockImportHandler, - telemetry: cfg.Telemetry, + badBlocks: cfg.BadBlocks, + reqMaker: cfg.RequestMaker, + blockState: cfg.BlockState, + numOfTasks: cfg.NumOfTasks, + importer: newBlockImporter(cfg), + unreadyBlocks: &unreadyBlocks{ + incompleteBlocks: make(map[common.Hash]*types.BlockData), + disjointChains: make([][]*types.BlockData, 0), + }, + requestQueue: &requestsQueue[*network.BlockRequestMessage]{ + queue: list.New(), + }, peers: &peerViewSet{ view: make(map[peer.ID]peerView), target: 0, @@ -120,35 +163,44 @@ func NewFullSyncStrategy(cfg *FullSyncConfig) *FullSyncStrategy { } } -func (f *FullSyncStrategy) incompleteBlocksSync() ([]*syncTask, error) { - panic("incompleteBlocksSync not implemented yet") -} - func (f *FullSyncStrategy) NextActions() ([]*syncTask, error) { f.startedAt = time.Now() + f.syncedBlocks = 0 - if len(f.missingRequests) > 0 { - return f.createTasks(f.missingRequests), nil + if f.requestQueue.Len() > 0 { + message, _ := f.requestQueue.PopFront() + return f.createTasks([]*network.BlockRequestMessage{message}), nil } currentTarget := f.peers.getTarget() - // our best block is equal or ahead of current target - // we're not legging behind, so let's set the set of - // incomplete blocks and request them - if uint32(f.bestBlockHeader.Number) >= currentTarget { - return f.incompleteBlocksSync() + bestBlockHeader, err := f.blockState.BestBlockHeader() + if err != nil { + return nil, fmt.Errorf("while getting best block header") } - startRequestAt := f.bestBlockHeader.Number + 1 - targetBlockNumber := startRequestAt + 60*128 + // our best block is equal or ahead of current target. + // in the nodes pov we are not legging behind so there's nothing to do + if uint32(bestBlockHeader.Number) >= currentTarget { + return nil, nil + } + + startRequestAt := bestBlockHeader.Number + 1 + + // here is where we cap the amount of tasks we will create + // f.numOfTasks - len(requests) gives us the remaining amount + // of requests and we multiply by 128 which is the max amount + // of blocks a single request can ask + // 257 + 2 * 128 = 513 + targetBlockNumber := startRequestAt + 128 if targetBlockNumber > uint(currentTarget) { targetBlockNumber = uint(currentTarget) } - requests := network.NewAscendingBlockRequests(startRequestAt, targetBlockNumber, + ascendingBlockRequests := network.NewAscendingBlockRequests(startRequestAt, targetBlockNumber, network.BootstrapRequestData) - return f.createTasks(requests), nil + + return f.createTasks(ascendingBlockRequests), nil } func (f *FullSyncStrategy) createTasks(requests []*network.BlockRequestMessage) []*syncTask { @@ -164,99 +216,234 @@ func (f *FullSyncStrategy) createTasks(requests []*network.BlockRequestMessage) } func (f *FullSyncStrategy) IsFinished(results []*syncTaskResult) (bool, []Change, []peer.ID, error) { - repChanges, blocks, missingReq, validResp := validateResults(results, f.badBlocks) - f.missingRequests = missingReq + repChanges, peersToIgnore, validResp := validateResults(results, f.badBlocks) + + validBlocksUnderFragment := func(highestFinalizedNumber uint, fragmentBlocks []*types.BlockData) []*types.BlockData { + startFragmentFrom := -1 + for idx, block := range fragmentBlocks { + if block.Header.Number > highestFinalizedNumber { + startFragmentFrom = idx + break + } + } + + if startFragmentFrom < 0 { + return nil + } - if f.disjointBlocks == nil { - f.disjointBlocks = make([][]*types.BlockData, 0) + return fragmentBlocks[startFragmentFrom:] } - // merge validResp with the current disjoint blocks - for _, resp := range validResp { - f.disjointBlocks = append(f.disjointBlocks, resp.BlockData) + highestFinalized, err := f.blockState.GetHighestFinalisedHeader() + if err != nil { + return false, nil, nil, fmt.Errorf("getting highest finalized header") } - // given the validResponses, can we start importing the blocks or - // we should wait for the missing requests to fill the gap? - blocksToImport, disjointBlocks := blocksAvailable(f.bestBlockHeader.Hash(), f.bestBlockHeader.Number, f.disjointBlocks) - f.disjointBlocks = disjointBlocks + readyBlocks := make([][]*types.BlockData, 0, len(validResp)) + for _, reqRespData := range validResp { + // if Gossamer requested the header, then the response data should + // contains the full bocks to be imported + // if Gossamer don't requested the header, then the response shoul + // only contains the missing parts the will complete the unreadyBlocks + // and then with the blocks completed we should be able to import them + + if reqRespData.req.RequestField(network.RequestedDataHeader) { + updatedFragment, ok := f.unreadyBlocks.updateDisjointFragments(reqRespData.responseData) + if ok { + validBlocks := validBlocksUnderFragment(highestFinalized.Number, updatedFragment) + if len(validBlocks) > 0 { + readyBlocks = append(readyBlocks, validBlocks) + } + } else { + readyBlocks = append(readyBlocks, reqRespData.responseData) + } + continue + } + + completedBlocks := f.unreadyBlocks.updateIncompleteBlocks(reqRespData.responseData) + readyBlocks = append(readyBlocks, completedBlocks) + } + + // disjoint fragments are pieces of the chain that could not be imported rn + // because is blocks too far ahead or blocks that belongs to forks + orderedFragments := sortFragmentsOfChain(readyBlocks) + orderedFragments = mergeFragmentsOfChain(orderedFragments) + + nextBlocksToImport := make([]*types.BlockData, 0) + disjointFragments := make([][]*types.BlockData, 0) + for _, fragment := range orderedFragments { + ok, err := f.blockState.HasHeader(fragment[0].Header.ParentHash) + if err != nil && !errors.Is(err, database.ErrNotFound) { + return false, nil, nil, fmt.Errorf("checking block parent header: %w", err) + } + + if ok { + nextBlocksToImport = append(nextBlocksToImport, fragment...) + continue + } + + disjointFragments = append(disjointFragments, fragment) + } - if len(blocksToImport) > 0 { - for _, blockToImport := range blocksToImport { - err := f.handleReadyBlock(blockToImport, networkInitialSync) + for len(nextBlocksToImport) > 0 || len(disjointFragments) > 0 { + for _, blockToImport := range nextBlocksToImport { + imported, err := f.importer.handle(blockToImport, networkInitialSync) if err != nil { return false, nil, nil, fmt.Errorf("while handling ready block: %w", err) } - f.bestBlockHeader = blockToImport.Header + + if imported { + f.syncedBlocks += 1 + } + } + + nextBlocksToImport = make([]*types.BlockData, 0) + + // check if blocks from the disjoint set can be imported on their on forks + // given that fragment contains chains and these chains contains blocks + // check if the first block in the chain contains a parent known by us + for _, fragment := range disjointFragments { + highestFinalized, err := f.blockState.GetHighestFinalisedHeader() + if err != nil { + return false, nil, nil, fmt.Errorf("getting highest finalized header") + } + + validFragment := validBlocksUnderFragment(highestFinalized.Number, fragment) + if len(validFragment) == 0 { + continue + } + + ok, err := f.blockState.HasHeader(validFragment[0].Header.ParentHash) + if err != nil && !errors.Is(err, database.ErrNotFound) { + return false, nil, nil, err + } + + if !ok { + logger.Infof("starting an acestor search from %s parent of #%d (%s)", + validFragment[0].Header.ParentHash, + validFragment[0].Header.Number, + validFragment[0].Header.Hash(), + ) + + f.unreadyBlocks.newFragment(validFragment) + request := network.NewBlockRequest( + *variadic.FromHash(validFragment[0].Header.ParentHash), + network.MaxBlocksInResponse, + network.BootstrapRequestData, network.Descending) + + f.requestQueue.PushBack(request) + } else { + // inserting them in the queue to be processed after the main chain + nextBlocksToImport = append(nextBlocksToImport, validFragment...) + } } + + disjointFragments = nil } - f.syncedBlocks = len(blocksToImport) - return false, repChanges, blocks, nil + return false, repChanges, peersToIgnore, nil } func (f *FullSyncStrategy) ShowMetrics() { totalSyncAndImportSeconds := time.Since(f.startedAt).Seconds() bps := float64(f.syncedBlocks) / totalSyncAndImportSeconds - logger.Infof("⛓️ synced %d blocks, "+ + logger.Infof("⛓️ synced %d blocks, disjoint fragments %d, incomplete blocks %d, "+ "took: %.2f seconds, bps: %.2f blocks/second, target block number #%d", - f.syncedBlocks, totalSyncAndImportSeconds, bps, f.peers.getTarget()) + f.syncedBlocks, len(f.unreadyBlocks.disjointChains), len(f.unreadyBlocks.incompleteBlocks), + totalSyncAndImportSeconds, bps, f.peers.getTarget()) } func (f *FullSyncStrategy) OnBlockAnnounceHandshake(from peer.ID, msg *network.BlockAnnounceHandshake) error { - logger.Infof("received block announce from %s: #%d (%s)", + f.peers.update(from, msg.BestBlockHash, msg.BestBlockNumber) + return nil +} + +func (f *FullSyncStrategy) OnBlockAnnounce(from peer.ID, msg *network.BlockAnnounceMessage) (repChange *Change, err error) { + if f.blockState.IsPaused() { + return nil, errors.New("blockstate service is paused") + } + + if msg.BestBlock { + peerView := f.peers.get(from) + if uint(peerView.bestBlockNumber) != msg.Number { + repChange = &Change{ + who: from, + rep: peerset.ReputationChange{ + Value: peerset.BadBlockAnnouncementValue, + Reason: peerset.BadBlockAnnouncementReason, + }, + } + return repChange, fmt.Errorf("%w: peer %s, on handshake #%d, on announce #%d", + errMismatchBestBlockAnnouncement, from, peerView.bestBlockNumber, msg.Number) + } + } + + currentTarget := f.peers.getTarget() + if msg.Number >= uint(currentTarget) { + return nil, nil + } + + blockAnnounceHeader := types.NewHeader(msg.ParentHash, msg.StateRoot, msg.ExtrinsicsRoot, msg.Number, msg.Digest) + blockAnnounceHeaderHash := blockAnnounceHeader.Hash() + + logger.Infof("received block announce from %s: #%d (%s) best block: %v", from, - msg.BestBlockNumber, - msg.BestBlockHash.Short(), + blockAnnounceHeader.Number, + blockAnnounceHeaderHash, + msg.BestBlock, ) - f.peers.update(from, msg.BestBlockHash, msg.BestBlockNumber) - return nil + // check if their best block is on an invalid chain, if it is, + // potentially downscore them for now, we can remove them from the syncing peers set + highestFinalized, err := f.blockState.GetHighestFinalisedHeader() + if err != nil { + return nil, fmt.Errorf("get highest finalised header: %w", err) + } + + if blockAnnounceHeader.Number <= highestFinalized.Number { + repChange = &Change{ + who: from, + rep: peerset.ReputationChange{ + Value: peerset.BadBlockAnnouncementValue, + Reason: peerset.BadBlockAnnouncementReason, + }, + } + return repChange, fmt.Errorf("%w: peer %s, block number #%d (%s)", + errPeerOnInvalidFork, from, blockAnnounceHeader.Number, blockAnnounceHeaderHash.String()) + } + + has, err := f.blockState.HasHeader(blockAnnounceHeaderHash) + if err != nil { + return nil, fmt.Errorf("checking if header exists: %w", err) + } + + if !has { + f.unreadyBlocks.newHeader(blockAnnounceHeader) + request := network.NewBlockRequest(*variadic.FromHash(blockAnnounceHeaderHash), + 1, network.RequestedDataBody+network.RequestedDataJustification, network.Ascending) + f.requestQueue.PushBack(request) + } + + return nil, nil } -func (*FullSyncStrategy) OnBlockAnnounce(from peer.ID, msg *network.BlockAnnounceMessage) error { - logger.Infof("received block announce: %d", msg.Number) - return nil +type RequestResponseData struct { + req *network.BlockRequestMessage + responseData []*types.BlockData } -func validateResults(results []*syncTaskResult, badBlocks []string) (repChanges []Change, blocks []peer.ID, - missingReqs []*network.BlockRequestMessage, validRes []*network.BlockResponseMessage) { - repChanges = make([]Change, 0) - blocks = make([]peer.ID, 0) +func validateResults(results []*syncTaskResult, badBlocks []string) (repChanges []Change, + peersToBlock []peer.ID, validRes []RequestResponseData) { - missingReqs = make([]*network.BlockRequestMessage, 0, len(results)) - validRes = make([]*network.BlockResponseMessage, 0, len(results)) + repChanges = make([]Change, 0) + peersToBlock = make([]peer.ID, 0) + validRes = make([]RequestResponseData, 0, len(results)) resultLoop: for _, result := range results { request := result.request.(*network.BlockRequestMessage) - if result.err != nil { - if !errors.Is(result.err, network.ErrReceivedEmptyMessage) { - blocks = append(blocks, result.who) - - if strings.Contains(result.err.Error(), "protocols not supported") { - repChanges = append(repChanges, Change{ - who: result.who, - rep: peerset.ReputationChange{ - Value: peerset.BadProtocolValue, - Reason: peerset.BadProtocolReason, - }, - }) - } - - if errors.Is(result.err, network.ErrNilBlockInResponse) { - repChanges = append(repChanges, Change{ - who: result.who, - rep: peerset.ReputationChange{ - Value: peerset.BadMessageValue, - Reason: peerset.BadMessageReason, - }, - }) - } - } - - missingReqs = append(missingReqs, request) + if !result.completed { continue } @@ -266,7 +453,7 @@ resultLoop: slices.Reverse(response.BlockData) } - err := validateResponseFields(request.RequestedData, response.BlockData) + err := validateResponseFields(request, response.BlockData) if err != nil { logger.Criticalf("validating fields: %s", err) // TODO: check the reputation change for nil body in response @@ -281,13 +468,23 @@ resultLoop: }) } - missingReqs = append(missingReqs, request) + //missingReqs = append(missingReqs, request) continue } - if !isResponseAChain(response.BlockData) { + // only check if the responses forms a chain if the response contains the headers + // of each block, othewise the response might only have the body/justification for + // a block + if request.RequestField(network.RequestedDataHeader) && !isResponseAChain(response.BlockData) { logger.Criticalf("response from %s is not a chain", result.who) - missingReqs = append(missingReqs, request) + repChanges = append(repChanges, Change{ + who: result.who, + rep: peerset.ReputationChange{ + Value: peerset.IncompleteHeaderValue, + Reason: peerset.IncompleteHeaderReason, + }, + }) + //missingReqs = append(missingReqs, request) continue } @@ -296,7 +493,7 @@ resultLoop: logger.Criticalf("%s sent a known bad block: #%d (%s)", result.who, block.Number(), block.Hash.String()) - blocks = append(blocks, result.who) + peersToBlock = append(peersToBlock, result.who) repChanges = append(repChanges, Change{ who: result.who, rep: peerset.ReputationChange{ @@ -305,98 +502,92 @@ resultLoop: }, }) - missingReqs = append(missingReqs, request) + //missingReqs = append(missingReqs, request) continue resultLoop } } - validRes = append(validRes, response) + validRes = append(validRes, RequestResponseData{ + req: request, + responseData: response.BlockData, + }) } - return repChanges, blocks, missingReqs, validRes + return repChanges, peersToBlock, validRes } -// blocksAvailable given a set of responses, which are fragments of the chain we should -// check if there is fragments that can be imported or fragments that are disjoint (cannot be imported yet) -func blocksAvailable(blockHash common.Hash, blockNumber uint, responses [][]*types.BlockData) ( - []*types.BlockData, [][]*types.BlockData) { +// sortFragmentsOfChain will organize the fragments +// in a way we can import the older blocks first also guaranting that +// forks can be imported by organizing them to be after the main chain +// +// e.g: consider the following fragment of chains +// [ {17} {1, 2, 3, 4, 5} {6, 7, 8, 9, 10} {8} {11, 12, 13, 14, 15, 16} ] +// +// note that we have fragments with single blocks, fragments with fork (in case of 8) +// after sorting these fragments we end up with: +// [ {1, 2, 3, 4, 5} {6, 7, 8, 9, 10} {8} {11, 12, 13, 14, 15, 16} {17} ] +func sortFragmentsOfChain(responses [][]*types.BlockData) [][]*types.BlockData { if len(responses) == 0 { - return nil, nil + return nil } slices.SortFunc(responses, func(a, b []*types.BlockData) int { - if a[len(a)-1].Header.Number < b[0].Header.Number { + if a[0].Header.Number < b[0].Header.Number { return -1 } - if a[len(a)-1].Header.Number == b[0].Header.Number { + if a[0].Header.Number == b[0].Header.Number { return 0 } return 1 }) - type hashAndNumber struct { - hash common.Hash - number uint - } + return responses +} - compareWith := hashAndNumber{ - hash: blockHash, - number: blockNumber, +// mergeFragmentsOfChain merges a sorted slice of fragments that forms a valid +// chain sequente which is the previous is the direct parent of the next block, +// and keep untouch fragments that does not forms such sequence, +// take as an example the following sorted slice. +// [ {1, 2, 3, 4, 5} {6, 7, 8, 9, 10} {8} {11, 12, 13, 14, 15, 16} {17} ] +// merge will transform it in the following slice: +// [ {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17} {8} ] +func mergeFragmentsOfChain(fragments [][]*types.BlockData) [][]*types.BlockData { + if len(fragments) == 0 { + return nil } - disjoints := false - lastIdx := 0 + mergedFragments := [][]*types.BlockData{fragments[0]} + for i := 1; i < len(fragments); i++ { + lastMerged := mergedFragments[len(mergedFragments)-1] + current := fragments[i] - okFrag := make([]*types.BlockData, 0, len(responses)) - for idx, chain := range responses { - if len(chain) == 0 { - panic("unreachable") - } - - incrementOne := (compareWith.number + 1) == chain[0].Header.Number - isParent := compareWith.hash == chain[0].Header.ParentHash - - if incrementOne && isParent { - okFrag = append(okFrag, chain...) - compareWith = hashAndNumber{ - hash: chain[len(chain)-1].Hash, - number: chain[len(chain)-1].Header.Number, - } - continue + if formsSequence(lastMerged[len(lastMerged)-1], current[0]) { + mergedFragments[len(mergedFragments)-1] = append(lastMerged, current...) + } else { + mergedFragments = append(mergedFragments, current) } - - lastIdx = idx - disjoints = true - break } - if disjoints { - return okFrag, responses[lastIdx:] - } + return mergedFragments +} + +func formsSequence(last, curr *types.BlockData) bool { + incrementOne := (last.Header.Number + 1) == curr.Header.Number + isParent := last.Hash == curr.Header.ParentHash - return okFrag, nil + return incrementOne && isParent } // validateResponseFields checks that the expected fields are in the block data -func validateResponseFields(requestedData byte, blocks []*types.BlockData) error { +func validateResponseFields(req *network.BlockRequestMessage, blocks []*types.BlockData) error { for _, bd := range blocks { - if bd == nil { - return errNilBlockData - } - - if (requestedData&network.RequestedDataHeader) == network.RequestedDataHeader && bd.Header == nil { + if req.RequestField(network.RequestedDataHeader) && bd.Header == nil { return fmt.Errorf("%w: %s", errNilHeaderInResponse, bd.Hash) } - if (requestedData&network.RequestedDataBody) == network.RequestedDataBody && bd.Body == nil { + if req.RequestField(network.RequestedDataBody) && bd.Body == nil { return fmt.Errorf("%w: %s", errNilBodyInResponse, bd.Hash) } - - // if we requested strictly justification - if (requestedData|network.RequestedDataJustification) == network.RequestedDataJustification && - bd.Justification == nil { - return fmt.Errorf("%w: %s", errNilJustificationInResponse, bd.Hash) - } } return nil diff --git a/lib/sync/fullsync_handle_block.go b/lib/sync/fullsync_handle_block.go index 4e4736a6c6..4db4b25429 100644 --- a/lib/sync/fullsync_handle_block.go +++ b/lib/sync/fullsync_handle_block.go @@ -2,49 +2,118 @@ package sync import ( "bytes" + "encoding/json" + "errors" "fmt" + "sync" "github.com/ChainSafe/gossamer/dot/telemetry" "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/internal/database" + "github.com/ChainSafe/gossamer/lib/common" + rtstorage "github.com/ChainSafe/gossamer/lib/runtime/storage" ) -func (f *FullSyncStrategy) handleReadyBlock(bd *types.BlockData, origin BlockOrigin) error { - err := f.processBlockData(*bd, origin) +type ( + // Telemetry is the telemetry client to send telemetry messages. + Telemetry interface { + SendMessage(msg json.Marshaler) + } + + // StorageState is the interface for the storage state + StorageState interface { + TrieState(root *common.Hash) (*rtstorage.TrieState, error) + sync.Locker + } + + // TransactionState is the interface for transaction queue methods + TransactionState interface { + RemoveExtrinsic(ext types.Extrinsic) + } + + // BabeVerifier deals with BABE block verification + BabeVerifier interface { + VerifyBlock(header *types.Header) error + } + + // FinalityGadget implements justification verification functionality + FinalityGadget interface { + VerifyBlockJustification(common.Hash, []byte) error + } + + // BlockImportHandler is the interface for the handler of newly imported blocks + BlockImportHandler interface { + HandleBlockImport(block *types.Block, state *rtstorage.TrieState, announce bool) error + } +) + +type blockImporter struct { + blockState BlockState + storageState StorageState + transactionState TransactionState + babeVerifier BabeVerifier + finalityGadget FinalityGadget + blockImportHandler BlockImportHandler + telemetry Telemetry +} + +func newBlockImporter(cfg *FullSyncConfig) *blockImporter { + return &blockImporter{ + storageState: cfg.StorageState, + transactionState: cfg.TransactionState, + babeVerifier: cfg.BabeVerifier, + finalityGadget: cfg.FinalityGadget, + blockImportHandler: cfg.BlockImportHandler, + telemetry: cfg.Telemetry, + } +} + +func (b *blockImporter) handle(bd *types.BlockData, origin BlockOrigin) (imported bool, err error) { + blockAlreadyExists, err := b.blockState.HasHeader(bd.Hash) + if err != nil && !errors.Is(err, database.ErrNotFound) { + return false, err + } + + if blockAlreadyExists { + return false, nil + } + + err = b.processBlockData(*bd, origin) if err != nil { // depending on the error, we might want to save this block for later logger.Errorf("processing block #%d (%s) failed: %s", bd.Header.Number, bd.Hash, err) - return err + return false, err } - return nil + return true, nil } // processBlockData processes the BlockData from a BlockResponse and // returns the index of the last BlockData it handled on success, // or the index of the block data that errored on failure. // TODO: https://github.com/ChainSafe/gossamer/issues/3468 -func (f *FullSyncStrategy) processBlockData(blockData types.BlockData, origin BlockOrigin) error { +func (b *blockImporter) processBlockData(blockData types.BlockData, origin BlockOrigin) error { // while in bootstrap mode we don't need to broadcast block announcements // TODO: set true if not in initial sync setup announceImportedBlock := false if blockData.Header != nil { if blockData.Body != nil { - err := f.processBlockDataWithHeaderAndBody(blockData, origin, announceImportedBlock) + err := b.processBlockDataWithHeaderAndBody(blockData, origin, announceImportedBlock) if err != nil { return fmt.Errorf("processing block data with header and body: %w", err) } } if blockData.Justification != nil && len(*blockData.Justification) > 0 { - err := f.handleJustification(blockData.Header, *blockData.Justification) + err := b.handleJustification(blockData.Header, *blockData.Justification) if err != nil { return fmt.Errorf("handling justification: %w", err) } } } - err := f.blockState.CompareAndSetBlockData(&blockData) + err := b.blockState.CompareAndSetBlockData(&blockData) if err != nil { return fmt.Errorf("comparing and setting block data: %w", err) } @@ -52,24 +121,30 @@ func (f *FullSyncStrategy) processBlockData(blockData types.BlockData, origin Bl return nil } -func (f *FullSyncStrategy) processBlockDataWithHeaderAndBody(blockData types.BlockData, +func (b *blockImporter) processBlockDataWithHeaderAndBody(blockData types.BlockData, origin BlockOrigin, announceImportedBlock bool) (err error) { if origin != networkInitialSync { - err = f.babeVerifier.VerifyBlock(blockData.Header) + err = b.babeVerifier.VerifyBlock(blockData.Header) if err != nil { return fmt.Errorf("babe verifying block: %w", err) } } - f.handleBody(blockData.Body) + accBlockSize := 0 + for _, ext := range *blockData.Body { + accBlockSize += len(ext) + b.transactionState.RemoveExtrinsic(ext) + } + + blockSizeGauge.Set(float64(accBlockSize)) block := &types.Block{ Header: *blockData.Header, Body: *blockData.Body, } - err = f.handleBlock(block, announceImportedBlock) + err = b.handleBlock(block, announceImportedBlock) if err != nil { return fmt.Errorf("handling block: %w", err) } @@ -77,28 +152,17 @@ func (f *FullSyncStrategy) processBlockDataWithHeaderAndBody(blockData types.Blo return nil } -// handleHeader handles block bodies included in BlockResponses -func (f *FullSyncStrategy) handleBody(body *types.Body) { - acc := 0 - for _, ext := range *body { - acc += len(ext) - f.transactionState.RemoveExtrinsic(ext) - } - - blockSizeGauge.Set(float64(acc)) -} - // handleHeader handles blocks (header+body) included in BlockResponses -func (f *FullSyncStrategy) handleBlock(block *types.Block, announceImportedBlock bool) error { - parent, err := f.blockState.GetHeader(block.Header.ParentHash) +func (b *blockImporter) handleBlock(block *types.Block, announceImportedBlock bool) error { + parent, err := b.blockState.GetHeader(block.Header.ParentHash) if err != nil { return fmt.Errorf("%w: %s", errFailedToGetParent, err) } - f.storageState.Lock() - defer f.storageState.Unlock() + b.storageState.Lock() + defer b.storageState.Unlock() - ts, err := f.storageState.TrieState(&parent.StateRoot) + ts, err := b.storageState.TrieState(&parent.StateRoot) if err != nil { return err } @@ -108,7 +172,7 @@ func (f *FullSyncStrategy) handleBlock(block *types.Block, announceImportedBlock panic("parent state root does not match snapshot state root") } - rt, err := f.blockState.GetRuntime(parent.Hash()) + rt, err := b.blockState.GetRuntime(parent.Hash()) if err != nil { return err } @@ -120,12 +184,12 @@ func (f *FullSyncStrategy) handleBlock(block *types.Block, announceImportedBlock return fmt.Errorf("failed to execute block %d: %w", block.Header.Number, err) } - if err = f.blockImportHandler.HandleBlockImport(block, ts, announceImportedBlock); err != nil { + if err = b.blockImportHandler.HandleBlockImport(block, ts, announceImportedBlock); err != nil { return err } blockHash := block.Header.Hash() - f.telemetry.SendMessage(telemetry.NewBlockImport( + b.telemetry.SendMessage(telemetry.NewBlockImport( &blockHash, block.Header.Number, "NetworkInitialSync")) @@ -133,14 +197,14 @@ func (f *FullSyncStrategy) handleBlock(block *types.Block, announceImportedBlock return nil } -func (f *FullSyncStrategy) handleJustification(header *types.Header, justification []byte) (err error) { +func (b *blockImporter) handleJustification(header *types.Header, justification []byte) (err error) { headerHash := header.Hash() - err = f.finalityGadget.VerifyBlockJustification(headerHash, justification) + err = b.finalityGadget.VerifyBlockJustification(headerHash, justification) if err != nil { return fmt.Errorf("verifying block number %d justification: %w", header.Number, err) } - err = f.blockState.SetJustification(headerHash, justification) + err = b.blockState.SetJustification(headerHash, justification) if err != nil { return fmt.Errorf("setting justification for block number %d: %w", header.Number, err) } diff --git a/lib/sync/fullsync_test.go b/lib/sync/fullsync_test.go index 1ca2a85edd..6a450eda0d 100644 --- a/lib/sync/fullsync_test.go +++ b/lib/sync/fullsync_test.go @@ -1 +1,278 @@ package sync + +import ( + "container/list" + "os" + "testing" + + "github.com/ChainSafe/gossamer/dot/network" + "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/lib/common" + "github.com/ChainSafe/gossamer/lib/common/variadic" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" +) + +func TestFullSyncNextActions(t *testing.T) { + t.Run("best_block_greater_or_equal_current_target", func(t *testing.T) { + cfg := &FullSyncConfig{ + StartHeader: types.NewEmptyHeader(), + } + + fs := NewFullSyncStrategy(cfg) + task, err := fs.NextActions() + require.NoError(t, err) + require.Empty(t, task) + }) + + t.Run("target_block_greater_than_best_block", func(t *testing.T) { + cfg := &FullSyncConfig{ + StartHeader: types.NewEmptyHeader(), + NumOfTasks: 2, + } + + fs := NewFullSyncStrategy(cfg) + err := fs.OnBlockAnnounceHandshake(peer.ID("peer-A"), &network.BlockAnnounceHandshake{ + Roles: 1, + BestBlockNumber: 1024, + BestBlockHash: common.BytesToHash([]byte{0x01, 0x02}), + GenesisHash: common.BytesToHash([]byte{0x00, 0x01}), + }) + require.NoError(t, err) + + task, err := fs.NextActions() + require.NoError(t, err) + + // the current target is block 1024 (see the OnBlockAnnounceHandshake) + // since we cap the request to the max blocks we can retrieve which is 128 + // the we should have 2 requests start from 1 and request 128 and another + // request starting from 129 and requesting 128 + require.Len(t, task, 2) + request := task[0].request.(*network.BlockRequestMessage) + require.Equal(t, uint32(1), request.StartingBlock.Uint32()) + require.Equal(t, uint32(128), *request.Max) + + request = task[1].request.(*network.BlockRequestMessage) + require.Equal(t, uint32(129), request.StartingBlock.Uint32()) + require.Equal(t, uint32(128), *request.Max) + }) + + t.Run("having_requests_in_the_queue", func(t *testing.T) { + refTo := func(v uint32) *uint32 { + return &v + } + + cases := map[string]struct { + setupRequestQueue func(*testing.T) *requestsQueue[*network.BlockRequestMessage] + expectedTasksLen int + expectedTasks []*network.BlockRequestMessage + }{ + "should_have_one_from_request_queue_and_one_from_target_chasing": { + setupRequestQueue: func(t *testing.T) *requestsQueue[*network.BlockRequestMessage] { + request := network.NewAscendingBlockRequests( + 129, 129+128, + network.BootstrapRequestData) + require.Len(t, request, 1) + + rq := &requestsQueue[*network.BlockRequestMessage]{queue: list.New()} + for _, req := range request { + rq.PushBack(req) + } + return rq + }, + expectedTasksLen: 2, + expectedTasks: []*network.BlockRequestMessage{ + { + RequestedData: network.BootstrapRequestData, + StartingBlock: *variadic.FromUint32(129), + Direction: network.Ascending, + Max: refTo(128), + }, + { + RequestedData: network.BootstrapRequestData, + StartingBlock: *variadic.FromUint32(1), + Direction: network.Ascending, + Max: refTo(128), + }, + }, + }, + // creating a amount of 4 requests, but since we have a max num of + // request set to 2 (see FullSyncConfig) we should only have 2 tasks + "should_have_two_tasks": { + setupRequestQueue: func(t *testing.T) *requestsQueue[*network.BlockRequestMessage] { + request := network.NewAscendingBlockRequests( + 129, 129+(4*128), + network.BootstrapRequestData) + require.Len(t, request, 4) + + rq := &requestsQueue[*network.BlockRequestMessage]{queue: list.New()} + for _, req := range request { + rq.PushBack(req) + } + return rq + }, + expectedTasksLen: 2, + expectedTasks: []*network.BlockRequestMessage{ + { + RequestedData: network.BootstrapRequestData, + StartingBlock: *variadic.FromUint32(129), + Direction: network.Ascending, + Max: refTo(128), + }, + { + RequestedData: network.BootstrapRequestData, + StartingBlock: *variadic.FromUint32(257), + Direction: network.Ascending, + Max: refTo(128), + }, + }, + }, + } + + for tname, tt := range cases { + tt := tt + t.Run(tname, func(t *testing.T) { + cfg := &FullSyncConfig{ + StartHeader: types.NewEmptyHeader(), + NumOfTasks: 2, + } + fs := NewFullSyncStrategy(cfg) + fs.requestQueue = tt.setupRequestQueue(t) + + // introduce a peer and a target + err := fs.OnBlockAnnounceHandshake(peer.ID("peer-A"), &network.BlockAnnounceHandshake{ + Roles: 1, + BestBlockNumber: 1024, + BestBlockHash: common.BytesToHash([]byte{0x01, 0x02}), + GenesisHash: common.BytesToHash([]byte{0x00, 0x01}), + }) + require.NoError(t, err) + + tasks, err := fs.NextActions() + require.NoError(t, err) + require.Len(t, tasks, tt.expectedTasksLen) + for idx, task := range tasks { + require.Equal(t, task.request, tt.expectedTasks[idx]) + } + }) + } + }) +} + +func TestFullSyncIsFinished(t *testing.T) { + fstBlocksRaw, err := os.ReadFile("./test_data/westend_1_10_blocks.out") + require.NoError(t, err) + + fstTaskBlockResponse := &network.BlockResponseMessage{} + err = fstTaskBlockResponse.Decode(common.MustHexToBytes(string(fstBlocksRaw))) + require.NoError(t, err) + + sndBlocksRaw, err := os.ReadFile("./test_data/westend_129_256_blocks.out") + require.NoError(t, err) + + sndTaskBlockResponse := &network.BlockResponseMessage{} + err = sndTaskBlockResponse.Decode(common.MustHexToBytes(string(sndBlocksRaw))) + require.NoError(t, err) + + t.Run("requested_max_but_received_less_blocks", func(t *testing.T) { + syncTaskResults := []*syncTaskResult{ + // first task + // 1 -> 10 + { + who: peer.ID("peerA"), + request: network.NewBlockRequest(*variadic.FromUint32(1), 128, + network.BootstrapRequestData, network.Ascending), + completed: true, + response: fstTaskBlockResponse, + }, + // there is gap from 11 -> 128 + // second task + // 129 -> 256 + { + who: peer.ID("peerA"), + request: network.NewBlockRequest(*variadic.FromUint32(1), 128, + network.BootstrapRequestData, network.Ascending), + completed: true, + response: sndTaskBlockResponse, + }, + } + + genesisHeader := types.NewHeader(fstTaskBlockResponse.BlockData[0].Header.ParentHash, + common.Hash{}, common.Hash{}, 0, types.NewDigest()) + + ctrl := gomock.NewController(t) + mockBlockState := NewMockBlockState(ctrl) + + mockBlockState.EXPECT().GetHighestFinalisedHeader(). + Return(genesisHeader, nil). + Times(3) + + mockBlockState.EXPECT(). + HasHeader(fstTaskBlockResponse.BlockData[0].Header.ParentHash). + Return(true, nil). + Times(2) + + mockBlockState.EXPECT(). + HasHeader(sndTaskBlockResponse.BlockData[0].Header.ParentHash). + Return(false, nil). + Times(2) + + mockImporter := NewMockImporter(ctrl) + mockImporter.EXPECT(). + handle(gomock.AssignableToTypeOf(&types.BlockData{}), networkInitialSync). + Return(true, nil). + Times(10 + 128 + 128) + + cfg := &FullSyncConfig{ + StartHeader: types.NewEmptyHeader(), + BlockState: mockBlockState, + } + + fs := NewFullSyncStrategy(cfg) + fs.importer = mockImporter + + done, _, _, err := fs.IsFinished(syncTaskResults) + require.NoError(t, err) + require.False(t, done) + + require.Len(t, fs.unreadyBlocks.incompleteBlocks, 0) + require.Len(t, fs.unreadyBlocks.disjointChains, 1) + require.Equal(t, fs.unreadyBlocks.disjointChains[0], sndTaskBlockResponse.BlockData) + + expectedAncestorRequest := network.NewBlockRequest( + *variadic.FromHash(sndTaskBlockResponse.BlockData[0].Header.ParentHash), + network.MaxBlocksInResponse, + network.BootstrapRequestData, network.Descending) + + message, ok := fs.requestQueue.PopFront() + require.True(t, ok) + require.Equal(t, expectedAncestorRequest, message) + + // ancestor search response + ancestorSearchBlocksRaw, err := os.ReadFile("./test_data/westend_ancestor_blocks.out") + require.NoError(t, err) + + ancestorSearchResponse := &network.BlockResponseMessage{} + err = ancestorSearchResponse.Decode(common.MustHexToBytes(string(ancestorSearchBlocksRaw))) + require.NoError(t, err) + + syncTaskResults = []*syncTaskResult{ + // ancestor search task + // 128 -> 1 + { + who: peer.ID("peerA"), + request: expectedAncestorRequest, + completed: true, + response: ancestorSearchResponse, + }, + } + + done, _, _, err = fs.IsFinished(syncTaskResults) + require.NoError(t, err) + require.False(t, done) + + require.Len(t, fs.unreadyBlocks.incompleteBlocks, 0) + require.Len(t, fs.unreadyBlocks.disjointChains, 0) + }) +} diff --git a/lib/sync/mocks_generate_test.go b/lib/sync/mocks_generate_test.go new file mode 100644 index 0000000000..c7c8d816de --- /dev/null +++ b/lib/sync/mocks_generate_test.go @@ -0,0 +1,6 @@ +// Copyright 2022 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package sync + +//go:generate mockgen -destination=mocks_test.go -package=$GOPACKAGE . Telemetry,BlockState,StorageState,TransactionState,BabeVerifier,FinalityGadget,BlockImportHandler,Network,Importer diff --git a/lib/sync/mocks_test.go b/lib/sync/mocks_test.go new file mode 100644 index 0000000000..f75c98918a --- /dev/null +++ b/lib/sync/mocks_test.go @@ -0,0 +1,743 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ChainSafe/gossamer/lib/sync (interfaces: Telemetry,BlockState,StorageState,TransactionState,BabeVerifier,FinalityGadget,BlockImportHandler,Network,Importer) +// +// Generated by this command: +// +// mockgen -destination=mocks_test.go -package=sync . Telemetry,BlockState,StorageState,TransactionState,BabeVerifier,FinalityGadget,BlockImportHandler,Network,Importer +// + +// Package sync is a generated GoMock package. +package sync + +import ( + json "encoding/json" + reflect "reflect" + time "time" + + network "github.com/ChainSafe/gossamer/dot/network" + peerset "github.com/ChainSafe/gossamer/dot/peerset" + types "github.com/ChainSafe/gossamer/dot/types" + common "github.com/ChainSafe/gossamer/lib/common" + runtime "github.com/ChainSafe/gossamer/lib/runtime" + storage "github.com/ChainSafe/gossamer/lib/runtime/storage" + peer "github.com/libp2p/go-libp2p/core/peer" + gomock "go.uber.org/mock/gomock" +) + +// MockTelemetry is a mock of Telemetry interface. +type MockTelemetry struct { + ctrl *gomock.Controller + recorder *MockTelemetryMockRecorder +} + +// MockTelemetryMockRecorder is the mock recorder for MockTelemetry. +type MockTelemetryMockRecorder struct { + mock *MockTelemetry +} + +// NewMockTelemetry creates a new mock instance. +func NewMockTelemetry(ctrl *gomock.Controller) *MockTelemetry { + mock := &MockTelemetry{ctrl: ctrl} + mock.recorder = &MockTelemetryMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockTelemetry) EXPECT() *MockTelemetryMockRecorder { + return m.recorder +} + +// SendMessage mocks base method. +func (m *MockTelemetry) SendMessage(arg0 json.Marshaler) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SendMessage", arg0) +} + +// SendMessage indicates an expected call of SendMessage. +func (mr *MockTelemetryMockRecorder) SendMessage(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMessage", reflect.TypeOf((*MockTelemetry)(nil).SendMessage), arg0) +} + +// MockBlockState is a mock of BlockState interface. +type MockBlockState struct { + ctrl *gomock.Controller + recorder *MockBlockStateMockRecorder +} + +// MockBlockStateMockRecorder is the mock recorder for MockBlockState. +type MockBlockStateMockRecorder struct { + mock *MockBlockState +} + +// NewMockBlockState creates a new mock instance. +func NewMockBlockState(ctrl *gomock.Controller) *MockBlockState { + mock := &MockBlockState{ctrl: ctrl} + mock.recorder = &MockBlockStateMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockBlockState) EXPECT() *MockBlockStateMockRecorder { + return m.recorder +} + +// BestBlockHeader mocks base method. +func (m *MockBlockState) BestBlockHeader() (*types.Header, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BestBlockHeader") + ret0, _ := ret[0].(*types.Header) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BestBlockHeader indicates an expected call of BestBlockHeader. +func (mr *MockBlockStateMockRecorder) BestBlockHeader() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BestBlockHeader", reflect.TypeOf((*MockBlockState)(nil).BestBlockHeader)) +} + +// BestBlockNumber mocks base method. +func (m *MockBlockState) BestBlockNumber() (uint, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BestBlockNumber") + ret0, _ := ret[0].(uint) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BestBlockNumber indicates an expected call of BestBlockNumber. +func (mr *MockBlockStateMockRecorder) BestBlockNumber() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BestBlockNumber", reflect.TypeOf((*MockBlockState)(nil).BestBlockNumber)) +} + +// CompareAndSetBlockData mocks base method. +func (m *MockBlockState) CompareAndSetBlockData(arg0 *types.BlockData) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CompareAndSetBlockData", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// CompareAndSetBlockData indicates an expected call of CompareAndSetBlockData. +func (mr *MockBlockStateMockRecorder) CompareAndSetBlockData(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CompareAndSetBlockData", reflect.TypeOf((*MockBlockState)(nil).CompareAndSetBlockData), arg0) +} + +// GetAllBlocksAtNumber mocks base method. +func (m *MockBlockState) GetAllBlocksAtNumber(arg0 uint) ([]common.Hash, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAllBlocksAtNumber", arg0) + ret0, _ := ret[0].([]common.Hash) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAllBlocksAtNumber indicates an expected call of GetAllBlocksAtNumber. +func (mr *MockBlockStateMockRecorder) GetAllBlocksAtNumber(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAllBlocksAtNumber", reflect.TypeOf((*MockBlockState)(nil).GetAllBlocksAtNumber), arg0) +} + +// GetBlockBody mocks base method. +func (m *MockBlockState) GetBlockBody(arg0 common.Hash) (*types.Body, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBlockBody", arg0) + ret0, _ := ret[0].(*types.Body) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBlockBody indicates an expected call of GetBlockBody. +func (mr *MockBlockStateMockRecorder) GetBlockBody(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlockBody", reflect.TypeOf((*MockBlockState)(nil).GetBlockBody), arg0) +} + +// GetBlockByHash mocks base method. +func (m *MockBlockState) GetBlockByHash(arg0 common.Hash) (*types.Block, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBlockByHash", arg0) + ret0, _ := ret[0].(*types.Block) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBlockByHash indicates an expected call of GetBlockByHash. +func (mr *MockBlockStateMockRecorder) GetBlockByHash(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlockByHash", reflect.TypeOf((*MockBlockState)(nil).GetBlockByHash), arg0) +} + +// GetFinalisedNotifierChannel mocks base method. +func (m *MockBlockState) GetFinalisedNotifierChannel() chan *types.FinalisationInfo { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetFinalisedNotifierChannel") + ret0, _ := ret[0].(chan *types.FinalisationInfo) + return ret0 +} + +// GetFinalisedNotifierChannel indicates an expected call of GetFinalisedNotifierChannel. +func (mr *MockBlockStateMockRecorder) GetFinalisedNotifierChannel() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFinalisedNotifierChannel", reflect.TypeOf((*MockBlockState)(nil).GetFinalisedNotifierChannel)) +} + +// GetHashByNumber mocks base method. +func (m *MockBlockState) GetHashByNumber(arg0 uint) (common.Hash, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetHashByNumber", arg0) + ret0, _ := ret[0].(common.Hash) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetHashByNumber indicates an expected call of GetHashByNumber. +func (mr *MockBlockStateMockRecorder) GetHashByNumber(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHashByNumber", reflect.TypeOf((*MockBlockState)(nil).GetHashByNumber), arg0) +} + +// GetHeader mocks base method. +func (m *MockBlockState) GetHeader(arg0 common.Hash) (*types.Header, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetHeader", arg0) + ret0, _ := ret[0].(*types.Header) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetHeader indicates an expected call of GetHeader. +func (mr *MockBlockStateMockRecorder) GetHeader(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHeader", reflect.TypeOf((*MockBlockState)(nil).GetHeader), arg0) +} + +// GetHeaderByNumber mocks base method. +func (m *MockBlockState) GetHeaderByNumber(arg0 uint) (*types.Header, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetHeaderByNumber", arg0) + ret0, _ := ret[0].(*types.Header) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetHeaderByNumber indicates an expected call of GetHeaderByNumber. +func (mr *MockBlockStateMockRecorder) GetHeaderByNumber(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHeaderByNumber", reflect.TypeOf((*MockBlockState)(nil).GetHeaderByNumber), arg0) +} + +// GetHighestFinalisedHeader mocks base method. +func (m *MockBlockState) GetHighestFinalisedHeader() (*types.Header, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetHighestFinalisedHeader") + ret0, _ := ret[0].(*types.Header) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetHighestFinalisedHeader indicates an expected call of GetHighestFinalisedHeader. +func (mr *MockBlockStateMockRecorder) GetHighestFinalisedHeader() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHighestFinalisedHeader", reflect.TypeOf((*MockBlockState)(nil).GetHighestFinalisedHeader)) +} + +// GetJustification mocks base method. +func (m *MockBlockState) GetJustification(arg0 common.Hash) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetJustification", arg0) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetJustification indicates an expected call of GetJustification. +func (mr *MockBlockStateMockRecorder) GetJustification(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetJustification", reflect.TypeOf((*MockBlockState)(nil).GetJustification), arg0) +} + +// GetMessageQueue mocks base method. +func (m *MockBlockState) GetMessageQueue(arg0 common.Hash) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMessageQueue", arg0) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetMessageQueue indicates an expected call of GetMessageQueue. +func (mr *MockBlockStateMockRecorder) GetMessageQueue(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMessageQueue", reflect.TypeOf((*MockBlockState)(nil).GetMessageQueue), arg0) +} + +// GetReceipt mocks base method. +func (m *MockBlockState) GetReceipt(arg0 common.Hash) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetReceipt", arg0) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetReceipt indicates an expected call of GetReceipt. +func (mr *MockBlockStateMockRecorder) GetReceipt(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetReceipt", reflect.TypeOf((*MockBlockState)(nil).GetReceipt), arg0) +} + +// GetRuntime mocks base method. +func (m *MockBlockState) GetRuntime(arg0 common.Hash) (runtime.Instance, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetRuntime", arg0) + ret0, _ := ret[0].(runtime.Instance) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetRuntime indicates an expected call of GetRuntime. +func (mr *MockBlockStateMockRecorder) GetRuntime(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRuntime", reflect.TypeOf((*MockBlockState)(nil).GetRuntime), arg0) +} + +// HasHeader mocks base method. +func (m *MockBlockState) HasHeader(arg0 common.Hash) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HasHeader", arg0) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// HasHeader indicates an expected call of HasHeader. +func (mr *MockBlockStateMockRecorder) HasHeader(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasHeader", reflect.TypeOf((*MockBlockState)(nil).HasHeader), arg0) +} + +// IsDescendantOf mocks base method. +func (m *MockBlockState) IsDescendantOf(arg0, arg1 common.Hash) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsDescendantOf", arg0, arg1) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// IsDescendantOf indicates an expected call of IsDescendantOf. +func (mr *MockBlockStateMockRecorder) IsDescendantOf(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsDescendantOf", reflect.TypeOf((*MockBlockState)(nil).IsDescendantOf), arg0, arg1) +} + +// IsPaused mocks base method. +func (m *MockBlockState) IsPaused() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsPaused") + ret0, _ := ret[0].(bool) + return ret0 +} + +// IsPaused indicates an expected call of IsPaused. +func (mr *MockBlockStateMockRecorder) IsPaused() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsPaused", reflect.TypeOf((*MockBlockState)(nil).IsPaused)) +} + +// Pause mocks base method. +func (m *MockBlockState) Pause() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Pause") + ret0, _ := ret[0].(error) + return ret0 +} + +// Pause indicates an expected call of Pause. +func (mr *MockBlockStateMockRecorder) Pause() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Pause", reflect.TypeOf((*MockBlockState)(nil).Pause)) +} + +// Range mocks base method. +func (m *MockBlockState) Range(arg0, arg1 common.Hash) ([]common.Hash, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Range", arg0, arg1) + ret0, _ := ret[0].([]common.Hash) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Range indicates an expected call of Range. +func (mr *MockBlockStateMockRecorder) Range(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Range", reflect.TypeOf((*MockBlockState)(nil).Range), arg0, arg1) +} + +// RangeInMemory mocks base method. +func (m *MockBlockState) RangeInMemory(arg0, arg1 common.Hash) ([]common.Hash, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RangeInMemory", arg0, arg1) + ret0, _ := ret[0].([]common.Hash) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RangeInMemory indicates an expected call of RangeInMemory. +func (mr *MockBlockStateMockRecorder) RangeInMemory(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RangeInMemory", reflect.TypeOf((*MockBlockState)(nil).RangeInMemory), arg0, arg1) +} + +// SetJustification mocks base method. +func (m *MockBlockState) SetJustification(arg0 common.Hash, arg1 []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetJustification", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetJustification indicates an expected call of SetJustification. +func (mr *MockBlockStateMockRecorder) SetJustification(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetJustification", reflect.TypeOf((*MockBlockState)(nil).SetJustification), arg0, arg1) +} + +// StoreRuntime mocks base method. +func (m *MockBlockState) StoreRuntime(arg0 common.Hash, arg1 runtime.Instance) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "StoreRuntime", arg0, arg1) +} + +// StoreRuntime indicates an expected call of StoreRuntime. +func (mr *MockBlockStateMockRecorder) StoreRuntime(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StoreRuntime", reflect.TypeOf((*MockBlockState)(nil).StoreRuntime), arg0, arg1) +} + +// MockStorageState is a mock of StorageState interface. +type MockStorageState struct { + ctrl *gomock.Controller + recorder *MockStorageStateMockRecorder +} + +// MockStorageStateMockRecorder is the mock recorder for MockStorageState. +type MockStorageStateMockRecorder struct { + mock *MockStorageState +} + +// NewMockStorageState creates a new mock instance. +func NewMockStorageState(ctrl *gomock.Controller) *MockStorageState { + mock := &MockStorageState{ctrl: ctrl} + mock.recorder = &MockStorageStateMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockStorageState) EXPECT() *MockStorageStateMockRecorder { + return m.recorder +} + +// Lock mocks base method. +func (m *MockStorageState) Lock() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Lock") +} + +// Lock indicates an expected call of Lock. +func (mr *MockStorageStateMockRecorder) Lock() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Lock", reflect.TypeOf((*MockStorageState)(nil).Lock)) +} + +// TrieState mocks base method. +func (m *MockStorageState) TrieState(arg0 *common.Hash) (*storage.TrieState, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "TrieState", arg0) + ret0, _ := ret[0].(*storage.TrieState) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// TrieState indicates an expected call of TrieState. +func (mr *MockStorageStateMockRecorder) TrieState(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TrieState", reflect.TypeOf((*MockStorageState)(nil).TrieState), arg0) +} + +// Unlock mocks base method. +func (m *MockStorageState) Unlock() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Unlock") +} + +// Unlock indicates an expected call of Unlock. +func (mr *MockStorageStateMockRecorder) Unlock() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Unlock", reflect.TypeOf((*MockStorageState)(nil).Unlock)) +} + +// MockTransactionState is a mock of TransactionState interface. +type MockTransactionState struct { + ctrl *gomock.Controller + recorder *MockTransactionStateMockRecorder +} + +// MockTransactionStateMockRecorder is the mock recorder for MockTransactionState. +type MockTransactionStateMockRecorder struct { + mock *MockTransactionState +} + +// NewMockTransactionState creates a new mock instance. +func NewMockTransactionState(ctrl *gomock.Controller) *MockTransactionState { + mock := &MockTransactionState{ctrl: ctrl} + mock.recorder = &MockTransactionStateMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockTransactionState) EXPECT() *MockTransactionStateMockRecorder { + return m.recorder +} + +// RemoveExtrinsic mocks base method. +func (m *MockTransactionState) RemoveExtrinsic(arg0 types.Extrinsic) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "RemoveExtrinsic", arg0) +} + +// RemoveExtrinsic indicates an expected call of RemoveExtrinsic. +func (mr *MockTransactionStateMockRecorder) RemoveExtrinsic(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveExtrinsic", reflect.TypeOf((*MockTransactionState)(nil).RemoveExtrinsic), arg0) +} + +// MockBabeVerifier is a mock of BabeVerifier interface. +type MockBabeVerifier struct { + ctrl *gomock.Controller + recorder *MockBabeVerifierMockRecorder +} + +// MockBabeVerifierMockRecorder is the mock recorder for MockBabeVerifier. +type MockBabeVerifierMockRecorder struct { + mock *MockBabeVerifier +} + +// NewMockBabeVerifier creates a new mock instance. +func NewMockBabeVerifier(ctrl *gomock.Controller) *MockBabeVerifier { + mock := &MockBabeVerifier{ctrl: ctrl} + mock.recorder = &MockBabeVerifierMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockBabeVerifier) EXPECT() *MockBabeVerifierMockRecorder { + return m.recorder +} + +// VerifyBlock mocks base method. +func (m *MockBabeVerifier) VerifyBlock(arg0 *types.Header) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "VerifyBlock", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// VerifyBlock indicates an expected call of VerifyBlock. +func (mr *MockBabeVerifierMockRecorder) VerifyBlock(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyBlock", reflect.TypeOf((*MockBabeVerifier)(nil).VerifyBlock), arg0) +} + +// MockFinalityGadget is a mock of FinalityGadget interface. +type MockFinalityGadget struct { + ctrl *gomock.Controller + recorder *MockFinalityGadgetMockRecorder +} + +// MockFinalityGadgetMockRecorder is the mock recorder for MockFinalityGadget. +type MockFinalityGadgetMockRecorder struct { + mock *MockFinalityGadget +} + +// NewMockFinalityGadget creates a new mock instance. +func NewMockFinalityGadget(ctrl *gomock.Controller) *MockFinalityGadget { + mock := &MockFinalityGadget{ctrl: ctrl} + mock.recorder = &MockFinalityGadgetMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockFinalityGadget) EXPECT() *MockFinalityGadgetMockRecorder { + return m.recorder +} + +// VerifyBlockJustification mocks base method. +func (m *MockFinalityGadget) VerifyBlockJustification(arg0 common.Hash, arg1 []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "VerifyBlockJustification", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// VerifyBlockJustification indicates an expected call of VerifyBlockJustification. +func (mr *MockFinalityGadgetMockRecorder) VerifyBlockJustification(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyBlockJustification", reflect.TypeOf((*MockFinalityGadget)(nil).VerifyBlockJustification), arg0, arg1) +} + +// MockBlockImportHandler is a mock of BlockImportHandler interface. +type MockBlockImportHandler struct { + ctrl *gomock.Controller + recorder *MockBlockImportHandlerMockRecorder +} + +// MockBlockImportHandlerMockRecorder is the mock recorder for MockBlockImportHandler. +type MockBlockImportHandlerMockRecorder struct { + mock *MockBlockImportHandler +} + +// NewMockBlockImportHandler creates a new mock instance. +func NewMockBlockImportHandler(ctrl *gomock.Controller) *MockBlockImportHandler { + mock := &MockBlockImportHandler{ctrl: ctrl} + mock.recorder = &MockBlockImportHandlerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockBlockImportHandler) EXPECT() *MockBlockImportHandlerMockRecorder { + return m.recorder +} + +// HandleBlockImport mocks base method. +func (m *MockBlockImportHandler) HandleBlockImport(arg0 *types.Block, arg1 *storage.TrieState, arg2 bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HandleBlockImport", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// HandleBlockImport indicates an expected call of HandleBlockImport. +func (mr *MockBlockImportHandlerMockRecorder) HandleBlockImport(arg0, arg1, arg2 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HandleBlockImport", reflect.TypeOf((*MockBlockImportHandler)(nil).HandleBlockImport), arg0, arg1, arg2) +} + +// MockNetwork is a mock of Network interface. +type MockNetwork struct { + ctrl *gomock.Controller + recorder *MockNetworkMockRecorder +} + +// MockNetworkMockRecorder is the mock recorder for MockNetwork. +type MockNetworkMockRecorder struct { + mock *MockNetwork +} + +// NewMockNetwork creates a new mock instance. +func NewMockNetwork(ctrl *gomock.Controller) *MockNetwork { + mock := &MockNetwork{ctrl: ctrl} + mock.recorder = &MockNetworkMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockNetwork) EXPECT() *MockNetworkMockRecorder { + return m.recorder +} + +// AllConnectedPeersIDs mocks base method. +func (m *MockNetwork) AllConnectedPeersIDs() []peer.ID { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AllConnectedPeersIDs") + ret0, _ := ret[0].([]peer.ID) + return ret0 +} + +// AllConnectedPeersIDs indicates an expected call of AllConnectedPeersIDs. +func (mr *MockNetworkMockRecorder) AllConnectedPeersIDs() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AllConnectedPeersIDs", reflect.TypeOf((*MockNetwork)(nil).AllConnectedPeersIDs)) +} + +// BlockAnnounceHandshake mocks base method. +func (m *MockNetwork) BlockAnnounceHandshake(arg0 *types.Header) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BlockAnnounceHandshake", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// BlockAnnounceHandshake indicates an expected call of BlockAnnounceHandshake. +func (mr *MockNetworkMockRecorder) BlockAnnounceHandshake(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BlockAnnounceHandshake", reflect.TypeOf((*MockNetwork)(nil).BlockAnnounceHandshake), arg0) +} + +// GetRequestResponseProtocol mocks base method. +func (m *MockNetwork) GetRequestResponseProtocol(arg0 string, arg1 time.Duration, arg2 uint64) *network.RequestResponseProtocol { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetRequestResponseProtocol", arg0, arg1, arg2) + ret0, _ := ret[0].(*network.RequestResponseProtocol) + return ret0 +} + +// GetRequestResponseProtocol indicates an expected call of GetRequestResponseProtocol. +func (mr *MockNetworkMockRecorder) GetRequestResponseProtocol(arg0, arg1, arg2 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRequestResponseProtocol", reflect.TypeOf((*MockNetwork)(nil).GetRequestResponseProtocol), arg0, arg1, arg2) +} + +// ReportPeer mocks base method. +func (m *MockNetwork) ReportPeer(arg0 peerset.ReputationChange, arg1 peer.ID) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "ReportPeer", arg0, arg1) +} + +// ReportPeer indicates an expected call of ReportPeer. +func (mr *MockNetworkMockRecorder) ReportPeer(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReportPeer", reflect.TypeOf((*MockNetwork)(nil).ReportPeer), arg0, arg1) +} + +// MockImporter is a mock of Importer interface. +type MockImporter struct { + ctrl *gomock.Controller + recorder *MockImporterMockRecorder +} + +// MockImporterMockRecorder is the mock recorder for MockImporter. +type MockImporterMockRecorder struct { + mock *MockImporter +} + +// NewMockImporter creates a new mock instance. +func NewMockImporter(ctrl *gomock.Controller) *MockImporter { + mock := &MockImporter{ctrl: ctrl} + mock.recorder = &MockImporterMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockImporter) EXPECT() *MockImporterMockRecorder { + return m.recorder +} + +// handle mocks base method. +func (m *MockImporter) handle(arg0 *types.BlockData, arg1 BlockOrigin) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "handle", arg0, arg1) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// handle indicates an expected call of handle. +func (mr *MockImporterMockRecorder) handle(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "handle", reflect.TypeOf((*MockImporter)(nil).handle), arg0, arg1) +} diff --git a/lib/sync/peer_view.go b/lib/sync/peer_view.go index 3ea309252d..628fba4ff6 100644 --- a/lib/sync/peer_view.go +++ b/lib/sync/peer_view.go @@ -21,13 +21,19 @@ type peerViewSet struct { target uint32 } -func (p *peerViewSet) update(peerID peer.ID, hash common.Hash, number uint32) { +func (p *peerViewSet) get(peerID peer.ID) peerView { + p.mtx.RLock() + defer p.mtx.RUnlock() + return p.view[peerID] +} + +func (p *peerViewSet) update(peerID peer.ID, bestHash common.Hash, bestNumber uint32) { p.mtx.Lock() defer p.mtx.Unlock() newView := peerView{ - bestBlockHash: hash, - bestBlockNumber: number, + bestBlockHash: bestHash, + bestBlockNumber: bestNumber, } view, ok := p.view[peerID] diff --git a/lib/sync/request_queue.go b/lib/sync/request_queue.go new file mode 100644 index 0000000000..a483906f14 --- /dev/null +++ b/lib/sync/request_queue.go @@ -0,0 +1,38 @@ +package sync + +import ( + "container/list" + "sync" +) + +type requestsQueue[M any] struct { + mu sync.RWMutex + queue *list.List +} + +func (r *requestsQueue[M]) Len() int { + r.mu.RLock() + defer r.mu.RUnlock() + return r.queue.Len() +} + +func (r *requestsQueue[M]) PopFront() (value M, ok bool) { + r.mu.Lock() + defer r.mu.Unlock() + + e := r.queue.Front() + if e == nil { + return value, false + } + + r.queue.Remove(e) + return e.Value.(M), true +} + +func (r *requestsQueue[M]) PushBack(message ...M) { + r.mu.Lock() + defer r.mu.Unlock() + for _, m := range message { + r.queue.PushBack(m) + } +} diff --git a/lib/sync/service.go b/lib/sync/service.go index 69b583e926..d17c8f6f33 100644 --- a/lib/sync/service.go +++ b/lib/sync/service.go @@ -58,7 +58,7 @@ type Change struct { } type Strategy interface { - OnBlockAnnounce(from peer.ID, msg *network.BlockAnnounceMessage) error + OnBlockAnnounce(from peer.ID, msg *network.BlockAnnounceMessage) (repChange *Change, err error) OnBlockAnnounceHandshake(from peer.ID, msg *network.BlockAnnounceHandshake) error NextActions() ([]*syncTask, error) IsFinished(results []*syncTaskResult) (done bool, repChanges []Change, blocks []peer.ID, err error) @@ -84,6 +84,7 @@ type SyncService struct { workerPool *syncWorkerPool waitPeersDuration time.Duration minPeers int + slotDuration time.Duration stopCh chan struct{} } @@ -160,7 +161,16 @@ func (s *SyncService) HandleBlockAnnounceHandshake(from peer.ID, msg *network.Bl } func (s *SyncService) HandleBlockAnnounce(from peer.ID, msg *network.BlockAnnounceMessage) error { - return s.currentStrategy.OnBlockAnnounce(from, msg) + repChange, err := s.currentStrategy.OnBlockAnnounce(from, msg) + if repChange != nil { + s.network.ReportPeer(repChange.rep, repChange.who) + } + + if err != nil { + return fmt.Errorf("while handling block announce: %w", err) + } + + return nil } func (s *SyncService) OnConnectionClosed(who peer.ID) { @@ -207,22 +217,29 @@ func (s *SyncService) runSyncEngine() { return } + if len(tasks) == 0 { + // sleep the amount of one slot and try + time.Sleep(s.slotDuration) + continue + } + results, err := s.workerPool.submitRequests(tasks) if err != nil { logger.Criticalf("getting highest finalized header: %w", err) return } - done, repChanges, blocks, err := s.currentStrategy.IsFinished(results) + done, repChanges, peersToIgnore, err := s.currentStrategy.IsFinished(results) if err != nil { - panic(fmt.Sprintf("current sync strategy failed with: %s", err.Error())) + logger.Criticalf("current sync strategy failed with: %s", err.Error()) + return } for _, change := range repChanges { s.network.ReportPeer(change.rep, change.who) } - for _, block := range blocks { + for _, block := range peersToIgnore { s.workerPool.ignorePeerAsWorker(block) } diff --git a/lib/sync/worker.go b/lib/sync/worker.go index 7898d95da4..c28215aa4a 100644 --- a/lib/sync/worker.go +++ b/lib/sync/worker.go @@ -20,20 +20,19 @@ func executeRequest(wg *sync.WaitGroup, who peer.ID, task *syncTask, guard chan }() request := task.request - logger.Debugf("[EXECUTING] worker %s, block request: %s", who, request) + //logger.Infof("[EXECUTING] worker %s", who, request) err := task.requestMaker.Do(who, request, task.response) if err != nil { - logger.Debugf("[ERR] worker %s, err: %s", who, err) + logger.Infof("[ERR] worker %s, request: %s, err: %s", who, request, err.Error()) resCh <- &syncTaskResult{ who: who, request: request, - err: err, response: nil, } return } - logger.Debugf("[FINISHED] worker %s, response: %s", who, task.response.String()) + logger.Infof("[FINISHED] worker %s, request: %s", who, request) resCh <- &syncTaskResult{ who: who, request: request, diff --git a/lib/sync/worker_pool.go b/lib/sync/worker_pool.go index 599a729164..c34e439f66 100644 --- a/lib/sync/worker_pool.go +++ b/lib/sync/worker_pool.go @@ -5,6 +5,7 @@ package sync import ( "errors" + "math/rand" "sync" "time" @@ -27,10 +28,10 @@ type syncTask struct { } type syncTaskResult struct { - who peer.ID - err error - request network.Message - response network.ResponseMessage + who peer.ID + completed bool + request network.Message + response network.ResponseMessage } type syncWorkerPool struct { @@ -85,41 +86,40 @@ func (s *syncWorkerPool) submitRequests(tasks []*syncTask) ([]*syncTaskResult, e s.mtx.RLock() defer s.mtx.RUnlock() - wg := sync.WaitGroup{} - resCh := make(chan *syncTaskResult, len(tasks)) - - for pid, w := range s.workers { - _, ok := connectedPeers[pid] - if ok { - continue + pids := append(maps.Keys(s.workers), peers...) + rand.Shuffle(len(pids), func(i, j int) { + pids[i], pids[j] = pids[j], pids[i] + }) + + results := make([]*syncTaskResult, 0, len(tasks)) + for _, task := range tasks { + completed := false + for _, pid := range pids { + logger.Infof("[EXECUTING] worker %s", pid) + err := task.requestMaker.Do(pid, task.request, task.response) + if err != nil { + logger.Infof("[ERR] worker %s, request: %s, err: %s", pid, task.request, err.Error()) + continue + } + + completed = true + results = append(results, &syncTaskResult{ + who: pid, + completed: completed, + request: task.request, + response: task.response, + }) + logger.Infof("[FINISHED] worker %s, request: %s", pid, task.request) + break } - connectedPeers[pid] = w - } - - allWorkers := maps.Keys(connectedPeers) - if len(allWorkers) == 0 { - return nil, ErrNoPeersToMakeRequest - } - - guard := make(chan struct{}, len(allWorkers)) - for idx, task := range tasks { - guard <- struct{}{} - workerID := idx % len(allWorkers) - worker := allWorkers[workerID] - - wg.Add(1) - go executeRequest(&wg, worker, task, guard, resCh) - } - - go func() { - wg.Wait() - close(resCh) - }() - - results := make([]*syncTaskResult, 0) - for r := range resCh { - results = append(results, r) + if !completed { + results = append(results, &syncTaskResult{ + completed: completed, + request: task.request, + response: nil, + }) + } } return results, nil diff --git a/lib/utils/utils.go b/lib/utils/utils.go index ef9d98b533..2ad78927cd 100644 --- a/lib/utils/utils.go +++ b/lib/utils/utils.go @@ -148,6 +148,12 @@ func GetWestendLocalRawGenesisPath(t *testing.T) string { return filepath.Join(GetProjectRootPathTest(t), "chain", "westend-local", "westend-local-spec-raw.json") } +// GetWestendLocalRawGenesisPath gets the westend-local genesis raw path +func GetWestendRawGenesisPath(t *testing.T) string { + t.Helper() + return filepath.Join(GetProjectRootPathTest(t), "chain", "westend", "westend-spec-raw.json") +} + // GetKusamaGenesisPath gets the Kusama genesis path func GetKusamaGenesisPath(t *testing.T) string { t.Helper() diff --git a/scripts/retrieve_block/retrieve_block.go b/scripts/retrieve_block/retrieve_block.go index 598c1258f8..6f937849e8 100644 --- a/scripts/retrieve_block/retrieve_block.go +++ b/scripts/retrieve_block/retrieve_block.go @@ -270,15 +270,17 @@ func main() { protocolID := protocol.ID(fmt.Sprintf("/%s/sync/2", chain.ProtocolID)) for _, bootnodesAddr := range bootnodes { + fmt.Println("connecting...") err := p2pHost.Connect(ctx, bootnodesAddr) if err != nil { + fmt.Printf("fail with: %s\n", err.Error()) continue } - log.Printf("requesting from peer %s\n", bootnodesAddr.String()) + fmt.Printf("requesting from peer %s\n", bootnodesAddr.String()) stream, err := p2pHost.NewStream(ctx, bootnodesAddr.ID, protocolID) if err != nil { - log.Printf("WARN: failed to create stream using protocol %s: %s", protocolID, err.Error()) + fmt.Printf("WARN: failed to create stream using protocol %s: %s", protocolID, err.Error()) } defer stream.Close() //nolint:errcheck From 9622544f89e6edf2b4384952c617cf1fc37a336a Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 6 Aug 2024 16:34:23 -0400 Subject: [PATCH 14/74] chore: use yaml file for test data --- lib/sync/fullsync_test.go | 28 +++++++++++------- lib/sync/service.go | 6 ++-- lib/sync/testdata/westend_blocks.yaml | 5 ++++ lib/sync/worker.go | 41 --------------------------- lib/sync/worker_pool.go | 26 +++++++---------- 5 files changed, 36 insertions(+), 70 deletions(-) create mode 100644 lib/sync/testdata/westend_blocks.yaml delete mode 100644 lib/sync/worker.go diff --git a/lib/sync/fullsync_test.go b/lib/sync/fullsync_test.go index 6a450eda0d..c382b6deb0 100644 --- a/lib/sync/fullsync_test.go +++ b/lib/sync/fullsync_test.go @@ -2,7 +2,6 @@ package sync import ( "container/list" - "os" "testing" "github.com/ChainSafe/gossamer/dot/network" @@ -12,8 +11,20 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/require" "go.uber.org/mock/gomock" + "gopkg.in/yaml.v3" + + _ "embed" ) +//go:embed testdata/westend_blocks.yaml +var rawWestendBlocks []byte + +type WestendBlocks struct { + Blocks1To10 string `yaml:"blocks_1_to_10"` + Blocks129To256 string `yaml:"blocks_129_to_256"` + Blocks1To128 string `yaml:"blocks_1_to_128"` +} + func TestFullSyncNextActions(t *testing.T) { t.Run("best_block_greater_or_equal_current_target", func(t *testing.T) { cfg := &FullSyncConfig{ @@ -161,18 +172,16 @@ func TestFullSyncNextActions(t *testing.T) { } func TestFullSyncIsFinished(t *testing.T) { - fstBlocksRaw, err := os.ReadFile("./test_data/westend_1_10_blocks.out") + westendBlocks := &WestendBlocks{} + err := yaml.Unmarshal(rawWestendBlocks, westendBlocks) require.NoError(t, err) fstTaskBlockResponse := &network.BlockResponseMessage{} - err = fstTaskBlockResponse.Decode(common.MustHexToBytes(string(fstBlocksRaw))) - require.NoError(t, err) - - sndBlocksRaw, err := os.ReadFile("./test_data/westend_129_256_blocks.out") + err = fstTaskBlockResponse.Decode(common.MustHexToBytes(westendBlocks.Blocks1To10)) require.NoError(t, err) sndTaskBlockResponse := &network.BlockResponseMessage{} - err = sndTaskBlockResponse.Decode(common.MustHexToBytes(string(sndBlocksRaw))) + err = sndTaskBlockResponse.Decode(common.MustHexToBytes(westendBlocks.Blocks129To256)) require.NoError(t, err) t.Run("requested_max_but_received_less_blocks", func(t *testing.T) { @@ -250,11 +259,8 @@ func TestFullSyncIsFinished(t *testing.T) { require.Equal(t, expectedAncestorRequest, message) // ancestor search response - ancestorSearchBlocksRaw, err := os.ReadFile("./test_data/westend_ancestor_blocks.out") - require.NoError(t, err) - ancestorSearchResponse := &network.BlockResponseMessage{} - err = ancestorSearchResponse.Decode(common.MustHexToBytes(string(ancestorSearchBlocksRaw))) + err = ancestorSearchResponse.Decode(common.MustHexToBytes(westendBlocks.Blocks1To128)) require.NoError(t, err) syncTaskResults = []*syncTaskResult{ diff --git a/lib/sync/service.go b/lib/sync/service.go index d17c8f6f33..aa869376f6 100644 --- a/lib/sync/service.go +++ b/lib/sync/service.go @@ -106,7 +106,6 @@ func NewSyncService(network Network, func (s *SyncService) waitWorkers() { waitPeersTimer := time.NewTimer(s.waitPeersDuration) - bestBlockHeader, err := s.blockState.BestBlockHeader() if err != nil { panic(fmt.Sprintf("failed to get highest finalised header: %v", err)) @@ -152,10 +151,13 @@ func (s *SyncService) Stop() error { func (s *SyncService) HandleBlockAnnounceHandshake(from peer.ID, msg *network.BlockAnnounceHandshake) error { logger.Infof("receiving a block announce handshake: %s", from.String()) - s.workerPool.fromBlockAnnounceHandshake(from) + if err := s.workerPool.fromBlockAnnounceHandshake(from); err != nil { + return err + } s.mu.Lock() defer s.mu.Unlock() + s.currentStrategy.OnBlockAnnounceHandshake(from, msg) return nil } diff --git a/lib/sync/testdata/westend_blocks.yaml b/lib/sync/testdata/westend_blocks.yaml new file mode 100644 index 0000000000..84325c37cc --- /dev/null +++ b/lib/sync/testdata/westend_blocks.yaml @@ -0,0 +1,5 @@ +blocks_1_to_10: 0x0abe030a2044ef51c86927a1e2da55754dba9684dd6ff9bac8c61624ffe958be656c42e036128503e143f23803ac50e8f6f8e62695d1ce9e4e1d68aa36c1cd2cfd15340213f3423e04333f8c04dda25fa8d47474b253c6630d9ccb70380a71469d9a50f33c00dd2dbfa258f9a8dc3c75cb4566dc1419dadc2168465a7bee5d0006c6ede541b18cb1800c0642414245340200000000771dc20f00000000044241424509030110a8ddd0891e14725841cd1b5581d23806a97f41c28a25436db6473c86e15dcd4f01000000000000007ca58770eb41c1a68ef77e92255e4635fc11f665cb89aee469e920511c48343a010000000000000072bae70a1398c0ba52f815cc5dfbc9ec5c013771e541ae28e05d1129243e3001010000000000000074bfb70627416e6e6c4785e928ced384c6c06e5c8dd173a094bc3118da7b673e01000000000000000000000000000000000000000000000000000000000000000000000000000000054241424501019c32c3d037ef3e8231a1eb08a858fc6aa74a58f1e34c82ed08f2464567fec50db1f0cd197b6c5bb84f146eee6c24316168369d25eb40b642d4df5bbdd2b0838c1a0b280402000b1095925571011a0510040d00000ad6060a209b0211aadcef4bb65e69346cfd256ddd2abcb674271326b08f0975dac7c17bc712bc0144ef51c86927a1e2da55754dba9684dd6ff9bac8c61624ffe958be656c42e036086c697d4e2175d16627de2861303e21c2e987ec9e024cbb523d2fdc29da874dea733222ec5a02dbb1173287d5e8613203357d682f4fe63f9d25da118db5039276080642414245340200000000781dc20f000000000542414245010134de462d98e7cdd8dd8691794e6e935a4ec65f1a712d9e4382d0e2feadf68262467ed9f520c7cd4d7c259a18386bb214a5d23aefa49b4633803417bcd986548e1a0b280402000b80ac925571011a0510040d00001aec01a903040b00000000008c8812200d16ec6fe30767653a18d34902db4e6285b7fb3084a730bc95bf435ee65735ba107c782f6970342f33342e37332e35352e3138332f7463702f33303333342f7773706c2f6970342f33342e37332e35352e3138332f7463702f333033333368642f6970342f31302e302e312e3133362f7463702f333033333374702f6970342f31302e302e312e3133362f7463702f33303333342f7773000000000300000096f234c06c4a1d4018df2e5f5ed34f5428553944f308a2116ff2a4677ae5203be5a7dcb020ddbea1d00a80ebb4f307f479bd7ca242fa00ad018daedf76eb4f861ab501cd02040b00000000008c8812209f3f68a9c2a4bb9676447919685bca373214d0256db8ba126ab90e4e4eb2906908807c2f6970342f3130342e3135352e37392e39302f7463702f33303333342f777374702f6970342f3130342e3135352e37392e39302f7463702f33303333330000000002000000d492de037d3bc49ac5332c69aee509ac3e9ea608557d78033d81206eb1a0890454abcff60e9346750ac2013cd03846553082639c99ed0818992d6ff6bb610f811ab701d502040b00000000008c881220bbcf661928cde137d3dc501e0d521ad28ab6a8d058daa1acbce02608addceb880884802f6970342f33352e3230352e3134322e3132392f7463702f33303333342f777378742f6970342f33352e3230352e3134322e3132392f7463702f33303333330000000001000000e8219652e4ca1faf79034c08e708c2dfe135d5b6033a4a033f165b299698443908eb119c0c614f50e23259a65af2cd1cf49305abfc97b4a5ab240370651bc3880af5010a20d8c479815319121ae17e2879061de85eb792fa30b00bf365efb261ecffbeafca12bc019b0211aadcef4bb65e69346cfd256ddd2abcb674271326b08f0975dac7c17bc70c2bb0eb80aef1e145183bb641511425984328002986bf50a89844c893448c046427cc018a114ac99fb9a4d5fcbf9ab6565b06362e272da5e0bb78d9c9dedf8f6f080642414245340201000000791dc20f000000000542414245010166e906ef7e9eba0df82e5215522b27dfbb7ff9e9cfb5e68cb4a329cd6e5e39220da891b527b674e7e47805f4d4bb0a101403deb40e2dff5f1c29114afbedbe861a0b280402000bf0c3925571011a0510040d00000af5010a202243f93bf130fb7dca537cc1825717159139512a9bc7d635c3848af0a65fc0a112bc01d8c479815319121ae17e2879061de85eb792fa30b00bf365efb261ecffbeafca10438c8afb86a47f6e5895d45aa71277a2e269051481707004b564e2361ffda20fdd7b7310e2ba6cffa402b654100c677263b5d46bf123ec232ab0f3588029bb610806424142453402020000007a1dc20f00000000054241424501012aceb3caa0342781eff5cd80cf1700f6820d6d71bf9d7d679d52b81057a0a26474b63036ccc885c219c541613d8494f47a9474b5dbb10032278b3f913c424c801a0b280402000b60db925571011a0510040d00000ad6020a20db8fea8c1a82feb981e935baa1a4b1d5b87fad03f15cfa40a9d341a2b8188965129d022243f93bf130fb7dca537cc1825717159139512a9bc7d635c3848af0a65fc0a1140d6645bab20bd411d81d63804b1db1a7d69b6ebd9fa03c48b05602c79eaf81369bfe1dda1f43481691b6212aeae4affadfd4b54cfab7e02594f98a4ce9f4ff15080642414245b50101030000007b1dc20f0000000086480dac7669ab3aeacd6f3c77e962be301fe5c6efebfcb16834abf29e50cf771bf12ace9ce3f1e97b0017144157a47c257220207406490a555ae3f3cfd5ff05a84c2af873f6503143873978e98961f4eabc2080f77c45ad6d065d58be6e9a02054241424501019cc6d652cec7f054320b9608dfcbde2d7e05320ebf3697c0154c8834f9babe0d4cad8a7bbc45e0ee9ca414aaa1d91e6af98c7c536db50391edd8d505ae2d658f1a0b280402000bd0f2925571011a0510040d00000ad6020a20ed77dd52a8f2dceadc8cd3f7c194bb8c72781c0726c02276b5bf2372b04acbf7129d02db8fea8c1a82feb981e935baa1a4b1d5b87fad03f15cfa40a9d341a2b818896518b17af4f793b5b1b6f094edbe4a3802c2395c9d6ace124645ff38f0e3191d3a913453c574e4ce8eb68d4e0cf669b31e05d83af82b31f61a2f48eab1d9c41cfaf7080642414245b50101030000007c1dc20f00000000f8b30a821656568e1c66e9183ec05c1627820c3d3648edee1effcc556fac86717658661c7e1978ddb6497367db7bec49bbdd63756932aec3b719d5ba5e4c3c09aeae4ace2ef06fd5298378c7ff30ec06f821b589d06eefdf9c640d81121ba2080542414245010102c69a1467ac5b92358cacb78202a6ee55c9f6c06c7bc7f4ae2b23c8adbf4575eddb68ed01777d96ff896c5a44859585d738b5e92e6565890cfce18db637988d1a0b280402000b400a935571011a0510040d00000af5010a208e309f167b7e0e7e53ff5f25a6c0a8d792f6a0800d609e11eeb6ba5f4265c12e12bc01ed77dd52a8f2dceadc8cd3f7c194bb8c72781c0726c02276b5bf2372b04acbf71c34a85349fb4f9d66c9f09d14d0d61d19c36c99c3c6882f66b82499101b28d42b3e67f48d0eb16c123523e24d0d5c47c722fd2a2e25091e6bc3ae8b3ed3c1bba00806424142453402000000007d1dc20f000000000542414245010160b508e46ba356790faede15a0d241726d06a319e88cc7e31c28423ce2f6bc13aff92a23a08fc07e4d65070e79416eced9b304e5446c03b24a85f0f6594146831a0b280402000bb021935571011a0510040d00000ad6020a207c990593b4a9f595a3a5bbea360531287994f8880e724fa62a4321d3bfa3160d129d028e309f167b7e0e7e53ff5f25a6c0a8d792f6a0800d609e11eeb6ba5f4265c12e209545a374939c79e3fd7a6bf4849b09e4a00d38a5366bb1d9dd06cf287d9892b2d9c4f22e061f7efc739ed99ddb5ce4de31fd19a1608af345b3243b49887da1c8080642414245b50101010000007e1dc20f0000000094c58d5e04e7338e760558d45923f19ddc232d956e2c57305bed3a238e86ab41a0917237f24a2274efe2cef44dfe252bdb9f912e528082ea86fa1b7e1a8328001facf4f6a07a5efcdc2f8055cd00cff88866b45058c78fd8ef2d315c8b3a350b0542414245010194147ef1722e8c61b5cfc01a6693154e732aec5ce689cadae029d5d10214ce1e7c5c7d64f5dfe94d75892fb5d3052bc83ccff599c8d8a53971039d63334322891a0b280402000b2039935571011a0510040d00000af5010a201d794413708ad4a52da8517123b9c919873f6066cf903800c6ba898cb2d0b7a712bc017c990593b4a9f595a3a5bbea360531287994f8880e724fa62a4321d3bfa3160d245d2974041eee45f5d628dfda86a98f3bc5640c14605628fba35cbea993176172d0f03eabe0ed013f6b5162666133aeb0e2eaae669690d187f18eb4a2c31a5b1c0806424142453402020000007f1dc20f00000000054241424501016e097b2be9fbf3f6f4e86598117bfda11e253be8f215dd62e83ec769594f7c74c569a26032561719b67803ce154325eb45dc0b5b5b35aee466aa2df217691b831a0b280402000b9050935571011a0510040d00000af5010a20bfcfcb1dbeeabf76c1edc73f8ea366e6c8cea3885a83058214a229f92658f25912bc011d794413708ad4a52da8517123b9c919873f6066cf903800c6ba898cb2d0b7a72892d6edc3f96041c2b6271f516f5054b837e6e61d67f99f0177a9d1d77e23d4e3b8cf653038f29ac18f6023172c25b31838daa78b87471a3fc467b2a9a004b727080642414245340200000000801dc20f00000000054241424501010a0b87e0038aa69f4fd0156a775dd3a3c7b1914b2d7fbe45173f97db971fc2577905c677717056df9a066adebf419b9969e21c535929c7f5a6b70a58d36ac8871a0b280402000b0068935571011a0510040d0000 + +blocks_129_to_256: 0x0adf020a2083503a03488e849f6cd3c4ea3bdf0c2d9609be707385e294fcde109d64b3dad0129e025490ddb4f096e061a7e4c69761da48abb275c84d2e9b22ef29d60d7dd9085e8a0502e9626e8cd821ae4eed116e630d21c9d76eb4e42d6d76248c4e106edc8b826a55681911c37e0068eadde6a9b885e59d97b5f5ec82f092c85c7267fbb00ca7d1a3080642414245b5010103000000fe1dc20f00000000c427096d72423fad7f00aed3fcc5bff193c48c0e15919a70d1366390b3a74316a693c3c8d5127c42e90ebc61518eadf1ff8573d937a38604f3b054e32de6c606877fa3a3979ed88102b883f141f8783e3335ebc6bc10ec8055c2d0715f98cf06054241424501017a0c21f1dea58fa873ad13b224d009ad38d4d7028b8f90417c7d6afabbcd914ab59b2ee15e458bfd4c5c6926362ef5d5a47674c9d35fdd092738b040f242978b1a0b280402000b20f19e5571011a0614040900f9011a0510040d00000afe010a20a2b7c610cdaedae3f6a29ad58d52f6edb2b5477e9adb74bd9234da711108903112bd0183503a03488e849f6cd3c4ea3bdf0c2d9609be707385e294fcde109d64b3dad00902405e952b8d9c40ef701fa07fe630603c43eee4d45863c34519b74cec31af96bee385abfb579ba96820d5d7fcb02bcf7dcc9d874e54c4ae262a7bd552e0fff92b080642414245340201000000ff1dc20f00000000054241424501013e12f24a9c17c4be2a2882364489882d0c03550aa2b096b58fc172a5993a490991053cfbc0e7c205cd98c6bebd808591d166356fb2a468f8476d7eb42a56268f1a0b280402000b90089f5571011a0614040900fd011a0510040d00000ad7020a20187a56cbab0aa6ca28681df537b8e0226450c2e1fb9221c89cc47ae09c99556f129e02a2b7c610cdaedae3f6a29ad58d52f6edb2b5477e9adb74bd9234da71110890310d02e33635dba0f24cbe80fe487ae9c3a441073942c0f7c56c81b93a5c5da088f85828681f2b0a6073b438ff2fda5018a403d39d6e1137453013c5fa377afed11096080642414245b5010103000000001ec20f000000000ce76e14ee89c0a039f7bd52e5d4aea8b81f8e5b661cfcfcaf874bef92e466564876400fd254326579eaeea0d024660f02d1b22bc633109a82993714de1ebc0e229046683f69cde4aa4a0f506cf1f92fa9bc43081e14dc42e65b66bec2bfc305054241424501012671737491206faf1c31ac5ef7063920f612810671120f28c04c552cf636b4505f34115957ee1af1267399b5a2c5f4bec928c5767433b603b4c5150c9e5e89861a0b280402000b00209f5571011a0510040d00000adf020a20a8023029cfb0075289bdcc33a1a172677e06afa96d4f9695e6b6f05d097f06f9129e02187a56cbab0aa6ca28681df537b8e0226450c2e1fb9221c89cc47ae09c99556f11021a3b973a547351b48b37ac4a46d2d94653365c383b15d94bcc7baa1987385142dd3ad772a8ace0c8923549de8f31b37c00573b1f5473bed3e725a57abf1e9908080642414245b5010103000000011ec20f000000002cee6369ba45edd78f26563a437e4107ad8345c8250d48dea009b84d84ebae025e47621b4010344247cde9175a523594ae116e55de09f08723a72f0d8386ed00d336d88c017d1fe8ce34933248faadc839e262bbd7fdad8be0f04e2a0af92e080542414245010152f7ee482c79ee066118983e8b838f5c90422356a84f79dca898b0d1b5080273496fc77eac777262a3e7907bf7827bc53b30827245abd777b2691b8e0da6f1801a0b280402000b70379f5571011a061404090005021a0510040d00000adf020a20b68c19a0edec33708c64c5c6a8418b34cd5b211849313bc652c2b55c13f935e5129e02a8023029cfb0075289bdcc33a1a172677e06afa96d4f9695e6b6f05d097f06f91502377620df18a1cf6f19081e0277595963e88fa89a4b4f9fd69c8c51e9958530c2b5198e927f8088540d52567195aa5e015efbfa6a66f163fbc2e30515db164121080642414245b5010103000000021ec20f000000008030f6634b536977515e838e7636f5e8c796fbd5367429ff07b78dcf81d08567e0a8dce976378e8de87c39bb894d8290bd8e914fbae4098a26653f5602ed970c309bd4d3b9814ae6fc2780643d59aab89b5377b8ccdede92c05283a9e79035030542414245010128d99b3f7342c39e8b95d5005049429aec4a5191b9f9337ea45bef4f54673171b73c37ee74db5453a4463b96d944c3ab5814401784ca54efe71df3f25f4a5d871a0b280402000be04e9f5571011a061404090009021a0510040d00000afe010a20c249b12a3b855cb17706a3f430a173afff5b7c8fa1f9135ace63e02ee423661c12bd01b68c19a0edec33708c64c5c6a8418b34cd5b211849313bc652c2b55c13f935e5190288dd75ee591f67f7ef8eef231d31e7ae41a2042727083389171de75d4b06334662dfd2bd231e345e024f68d5dbdb52fed76b83b22989187ebf51893dd04efa0e080642414245340203000000031ec20f000000000542414245010196f281f7b186161840a2f4590f5b387b6b67dc52e42cdf4dd0e99d32e1eb230fd9d3b52d38ea52cbdfea6c35f66c69bba8e898fa3f5e0b085ea5602f90fa9c831a0b280402000b50669f5571011a06140409000d021a0510040d00000afe010a20a97925e34a40440571f52a4e03f503e6b4c2a0b3b78088b91bc2f78ec1158e6812bd01c249b12a3b855cb17706a3f430a173afff5b7c8fa1f9135ace63e02ee423661c1d029ece8c8337d9776333f380db5757eb021e661f6fbe148b94806cb8ae94e50d1a88bb09f8f3d22b8daf4070406d88df3051a3952d2c6fe97927c10a4eb13fc5de080642414245340200000000041ec20f0000000005424142450101803eb89a359c8637d23c1f4562f0bef31be16d17699aa56ba7904e3c7207f40209f6d135429df5b636ed4e47d347cbd9bb342be56ee65369df86885e7fff3f861a0b280402000bc07d9f5571011a061404090011021a0510040d00000afe010a20d66ed8e5def58d6b1f646c15275543aa9d11cf440d2b2e2acd1b099d0a3aadaa12bd01a97925e34a40440571f52a4e03f503e6b4c2a0b3b78088b91bc2f78ec1158e6821028616e185d9de0c13bb2b5f5f1f4df57ab826e5b9a22e147c4a4cd0423a52170b71c2f39b50405f6d6671641ed3b44effca351e6560a375bc9db45f5a02cc26fb080642414245340200000000051ec20f0000000005424142450101aafc0f9f0d05b09547a78365df838a58add2c60713192db658414bc60f3727433836aeaa3e641fb3af67b9ac512c318f3995a0551876ae53b1aa1ddba804e98d1a0b280402000b30959f5571011a061404090015021a0510040d00000afe010a206ec9688a45bdf2259d68627771305f10c19fa2fbdb0c1ee44a55b840abddbd2712bd01d66ed8e5def58d6b1f646c15275543aa9d11cf440d2b2e2acd1b099d0a3aadaa2502b316a6ebd075b0793aa331015f2442bae7065f7c7e6b7abee7cf61b79d67a8a5d47c5031545e892684871f9e827f449fb6a2a90d5f584fa843bdc02b36b718ed080642414245340201000000061ec20f00000000054241424501010e10746240ee730222423e067f2adad13429cf6e4ba5b17a73079b3c02357866dc8e5841fc250a3c43288b29b1b5357c9ebfac918d127dc517a4bcabedcab3811a0b280402000ba0ac9f5571011a061404090019021a0510040d00000afe010a20c78d3d270ed12ce375790fe7693d087b827f5a61f91227e8ef93ecd9f5547cfa12bd016ec9688a45bdf2259d68627771305f10c19fa2fbdb0c1ee44a55b840abddbd2729026b286f92bb23acbc64870e8c4a265d103b801975a216840a2b614ccaff6f0ab3013dd8619105d908ac11eb23d4bd9d05ef334b5aabc336a38274641d41941024080642414245340203000000071ec20f0000000005424142450101ee753ef61934e0112636b28a5e084b821470d6a45612e29cbcfce7b362e10c789e4bc9722c88955dfbe798df686bd633dedae4d2bdbb023bbae2269f5cfe8a8f1a0b280402000b10c49f5571011a06140409001d021a0510040d00000afe010a20fcc606bb97c036294a8246c02da89ad228115d9f4dd7b0659e2f8956310112b912bd01c78d3d270ed12ce375790fe7693d087b827f5a61f91227e8ef93ecd9f5547cfa2d025c7e9372d535b7c6f31450229e96aaa0607873addf10a97fa57b92c111725c0fb9fbecec8e8e0469788590afac03badd3d126e6593c6100dfe85e28ae0c4debc080642414245340200000000081ec20f00000000054241424501013e8d957f0dab36a00ec38c44ccd1b003270a92aa0a4ab08b18f7d5f161748536d4abd3a001978c9dc16d524c2a29640ca66aceac9b0565529ebcef88d96c8a8c1a0b280402000b80db9f5571011a061404090021021a0510040d00000afe010a201edb541e7a15aeb9c6435a07c570d310259d713e808d9ab069874b9e57c5b7ff12bd01fcc606bb97c036294a8246c02da89ad228115d9f4dd7b0659e2f8956310112b93102d13e8507c2f9b605bb39d998792b3d3fb21d230b2763b438a39c5ad03d0588aab3f77f52439610e3ac25e5b254e4e393a3bd1a5dac23cf1c7cdf4c9c47c6a46f080642414245340202000000091ec20f00000000054241424501016a602b14eb46b3a6fc931baed025c7fdc1a4223129c22a62849183e919a7386c8f04f66a3ec6187336d8580ffaf50d930a9623d190e1860f33f5b134e8122d881a0b280402000bf0f29f5571011a061404090025021a0510040d00000afe010a205cf199eec39b0199627d79bf7ca2662f8569a003f7a98f695b561a25bdb99e2712bd011edb541e7a15aeb9c6435a07c570d310259d713e808d9ab069874b9e57c5b7ff3502f735256645f33a34726295003b4b43678b6084cfb3c417bd02bb0a5771af70c8fcb5e2849ccd8c8cf79418a159e0f7b462ae1154296be39f74972c131682544f0806424142453402020000000a1ec20f0000000005424142450101e0951ee62279c031d05901278339d6b3c2ca052b99b587ddbeb9c40469608129576676812235f625acf3035899212bab6cef25694bcf3461a62690e4b832b78a1a0b280402000b600aa05571011a061404090029021a0510040d00000af6010a20857fb66c897b5535c83c535a465969da3de162c90b39320de3ef0cd0482fcb7812bd015cf199eec39b0199627d79bf7ca2662f8569a003f7a98f695b561a25bdb99e273902e12e428f175451bfb569df91b8964ef7fa7a84a4226c18193a425c58b58a8e7f3b56af14282408b94d04a00d14ae7dbf86b6ce1ccd3a90d1472c9c479037ccec0806424142453402010000000b1ec20f00000000054241424501011cc5c3d3aebd11f4fbf42367a5c685ecf0e23b5e1edc90ac2dfd47abe90e8e7b64f42523aeb55f09bebfc1da440e0fd2d05138f169186bc0cb0bc77f5ee5ac8a1a0b280402000bd021a05571011a0510040d00000afe010a200d9cc69a5e305f7901c42749768bc905632c1c4fa8896c4836bed8b60f26c8dc12bd01857fb66c897b5535c83c535a465969da3de162c90b39320de3ef0cd0482fcb783d027855e4803584bef513629947f45e40633a5d3015593e5f59c91f80cab0ea874436b93e089a773b7b28f2235b8d1e565927da1e4abae6a93d6597bb2cddd9ce070806424142453402000000000c1ec20f000000000542414245010108a74fbfdae24d2e8f193b06aecb41a8381b3ee5c04230869a6708545305c545b79b1c5a860e7ae098d69dbba9c591aaaa040d139b62f3545ab67ce0b231e6811a0b280402000b4039a05571011a061404090031021a0510040d00000afe010a20397dae292f1f3f74f88198a3411045a374c4bc785986fd4d03f6312fe43241e212bd010d9cc69a5e305f7901c42749768bc905632c1c4fa8896c4836bed8b60f26c8dc4102fcb84f718ddc8f79073de4822ff3aee83a7f6132b730d94875a78f0bfdda41102aa02d98eee530a2c0067c47594c97c91cf14967af678f7cb9c3c55315b0ed1c0806424142453402030000000d1ec20f0000000005424142450101161640d04cdaffee0cd0bac16a69e2322520e8019a3a73cfb9712076448c3343f4cdb04b1f9ec450a4e4cecb777784adfef4a197c83f7da914fb26c6b5f600871a0b280402000bb050a05571011a061404090035021a0510040d00000afe010a20d174bce3c4abdc9ca5e6b44e50b971bff73fa76e23f1d8be490df34bcd7e0cb912bd01397dae292f1f3f74f88198a3411045a374c4bc785986fd4d03f6312fe43241e24502cb6449fe9ca0384e3d7441bbdb5f9145940a7933a99812a3e37b34d6726aadc00eefa2d96d70a48367271adc2e2ef01a49df580767886f29c463f0bc492ccf730806424142453402000000000e1ec20f00000000054241424501017a3dcb5acdeb5ca077ea68388bdec5aaf5fa8f81f33bb33f83f350bab76dfe6310ca66ce09cbdd5080d0d65544074689c0c1bfb375238f1fbdb7142e3f70d28a1a0b280402000b2068a05571011a061404090039021a0510040d00000afe010a20952c8178f018ed644a28fbd6a04b7ff57499c37895c5824e286e1a83eb10b03c12bd01d174bce3c4abdc9ca5e6b44e50b971bff73fa76e23f1d8be490df34bcd7e0cb9490244b966fbc42c0640e0468a57fed57f7cf3ab5ca3c620a3a43ec0baa0df8e9de1fd87076032fc66d47ae2567c7cc22fc2027e621d81b1d9122070d529f44dc25a0806424142453402000000000f1ec20f0000000005424142450101bc75428fe8124777ad60793731fbbb3d52ff999b263d56aa6e61e41b5238a37b5c10022deedc1a9ffda0ad1c96d7b3d5d1aa9631295bf85aca43d126ad2e33861a0b280402000b907fa05571011a06140409003d021a0510040d00000afe010a20ed5bf966454710f0f516c9d6c0c8ab273913ce0c5e049f7e3a57b1202450699d12bd01952c8178f018ed644a28fbd6a04b7ff57499c37895c5824e286e1a83eb10b03c4d02bf2a9cc4f0ff60fa647836933ed92fe537ad7166425999955e58aa52fc65d6373ac105617701d234127d1397044652da85787dc6441fd19d1eb687d8e1d7440d080642414245340203000000101ec20f00000000054241424501015e1ce175afd9a1c7da1ecba6bb15de51c4b901642a3d6a41e23788430f235b05d90e02d033c24e7d83d15bb8c30a1d560b7f2346755e4ad6c03de084af4886831a0b280402000b0097a05571011a061404090041021a0510040d00000afe010a20c618380c4043f406399e947e89773b1e279a15cb48d9358507b97fa15d5a9cdb12bd01ed5bf966454710f0f516c9d6c0c8ab273913ce0c5e049f7e3a57b1202450699d510274b3626b2401ac4021ae702be838412472eef9b8225e2a695e3a1fe4b638b64f4f2029feb78669b5446509b4dc5645e3708b3a528bad8d160f07212e849cfdcc080642414245340203000000111ec20f0000000005424142450101146c6671f09ff9dc9999c2b5aa23aa80187ad30c3ce0874d529db775350ff43344d7f6b0d668082eb31b583fee73ee16ca0a0a261557f0e06842e49776e3e5851a0b280402000b70aea05571011a061404090045021a0510040d00000afe010a20295a6a3adc49e22b8f09d66fdde7c1a35f28947ddaea98e57cf53e3e22bf73c812bd01c618380c4043f406399e947e89773b1e279a15cb48d9358507b97fa15d5a9cdb5502dc6fef7da42373567a1149503e7489a129680934077bb66f7c80fffa8f2475a6815d2fd75027dd27de916bdd2935121969d1c41c3966c02b51e3714c940cca48080642414245340203000000121ec20f00000000054241424501017e6c06d9dad1822e436555c1c1fafb0fc2c22940e1f6384265a833ac2d132a118f6ae42a463ba2f52df5201b4fd8dfb2e25c1d79c12c75b6f6785bbb86886a801a0b280402000be0c5a05571011a061404090049021a0510040d00000afe010a20e65d421e8d992c181cc59fd8a8d6405c65197774a6c074d3c70c81059ae36ad612bd01295a6a3adc49e22b8f09d66fdde7c1a35f28947ddaea98e57cf53e3e22bf73c859029f90a4bf30bb2e4909a12056bdb21ff1eeca77990160cfe99309092a9164b5bfbd309d125bea77e36e72798ce5928a1d32a215f7e637c9819116215df14c9f02080642414245340201000000131ec20f0000000005424142450101e0edeb4ae2a13383e2c880a02d52df11abc1f1f0eeb4d216b404d279c733d162eee17b8a4b2d0d44697633dc4468fc0b4f4d1737c22ce6f1383c7c823049038a1a0b280402000b50dda05571011a06140409004d021a0510040d00000ad7020a203f0ef11835499458cf37448c2b0a4c119a4be93ab09cdbfcc59fd3094a72ea46129e02e65d421e8d992c181cc59fd8a8d6405c65197774a6c074d3c70c81059ae36ad65d02dcd4616bdcc362088a3afdf46f4e95d98fd49fbdbe695fb6da61a854c8580f0124919a46c3ba58e0854111e22b96e1f0b4e41dcc80096401e8ad4c20a7ae4edd080642414245b5010103000000141ec20f000000004e565fa9a175e60af9ba60ed433f75247f91fe6f25de5a0913fd3b8fe024bc191ea521a35e9e85b4e24369e211b8bf528af2f953a1293fe857c1cf5ea4d19606545a0cba437d41f6aef310b06bac8d58e3ea4f818b193ec025faae9c996a7f09054241424501017a629e13f4d2a4aa2b07a00146d1bc8a86c715b175f31e674f64741c6c85b946d88256d65bb960059f3a7ae270b4ef03dfc0705f7faa19c718da14a218e8d3811a0b280402000bc0f4a05571011a0510040d00000afe010a20ec374354c93eea3d7caec8667e17320cb3635e724f983b505e31a123af21e04d12bd013f0ef11835499458cf37448c2b0a4c119a4be93ab09cdbfcc59fd3094a72ea466102e56513f66d0c1880755cd9f5af11234557006b71ac547f2bb8547eb47bb8012ee93ca0f7c76a054cad272c2637d8762b58c1060979bd79f4a79de23154d83085080642414245340203000000151ec20f000000000542414245010166998b540220593443bd7c30ada312d2bb41f6e28019b1e7072f56a72604b552df2d536e0e8a6fc2905ffac20b65d704b0e620195e7be9021f795641e2db618f1a0b280402000b300ca15571011a061404090055021a0510040d00000af6010a2098ab15181ec9c48cba023f344145462a792369702652bd988681191969946fa712bd01ec374354c93eea3d7caec8667e17320cb3635e724f983b505e31a123af21e04d6502ede7556baa33b00bfc4e8c2e2210e8a1f3de94cd47a5e912aa897a665e2cd93b02fadbf7fc0f5a6a5d3dfae44272d9f7c28ef03d1a7a5f37c9f0e0de3bef30d0080642414245340202000000161ec20f0000000005424142450101f87c3727b8d5a8b8af709b9795f2a051dd764fe4e3e795c7837656cb90541918ea0db9ab5c876392b0413f7abe4f10a4b4fd42e2ec0323038d319e4bcc5962821a0b280402000ba023a15571011a0510040d00000afe010a202a2683f913230e7db8675205ba0e5e658d534cf1c6aa47eb40f137e52dc8199212bd0198ab15181ec9c48cba023f344145462a792369702652bd988681191969946fa76902afa2b948fd6094a6c47767c7585a25ae01c825dc93f0003670f2668c0ff00f68ae21c32cbf128b5552b3568f4a6cb760f8077300c5aaa324b5506f0f9d41c416080642414245340203000000171ec20f0000000005424142450101e6990939a676408d07633883aafb9ee20a4521714373b9857cd2efd90d5c716a0c3b414b6f7cd8437e3f247f9987940db8f6570e24e89b2827db138408dece8d1a0b280402000b103ba15571011a06140409005d021a0510040d00000afe010a202ce351a3675d291245c25ff8b0fb55b9ae21d2a550551faf65c55856d74f00aa12bd012a2683f913230e7db8675205ba0e5e658d534cf1c6aa47eb40f137e52dc819926d02ad60b78978868f3717d1a4c9d99aac7889f9d90b2bca9f1f143019d3f494f8c5ba327baadc537d515413c3990427065de56da5591c0d6228e8d0b9edbe35e664080642414245340203000000181ec20f000000000542414245010196d035fc73d7a132ace124a669a8ef4824295a9aa29a9e9d5b8e3f47880e77794807d9518c99c516e8314e920c4b5376c6ce8f2b497592db131e1e74a6989e811a0b280402000b8052a15571011a061404090061021a0510040d00000afe010a20a69b7623dd7715848cedaa3c673eb5a939346b40f41df558aead140ec7090f8712bd012ce351a3675d291245c25ff8b0fb55b9ae21d2a550551faf65c55856d74f00aa710232ba6a899265f731f4a3f4a3ef7d249b90b7eda2725b10cc2d198db78d32bbc4317b191725d8825c1ed4ca3220bcf045d0e60b26b96acfc6264fdfc52faaa48b080642414245340200000000191ec20f0000000005424142450101588067e6e2d94684f5ccc55d62ac550a7a7b407f22d539683f47dc838c2de30989b275a9d110d7a38dfd3d741b6f5fbbc278da51596167470427cafc67ca748e1a0b280402000bf069a15571011a061404090065021a0510040d00000afe010a20cdd360dcf7f529bb3e8798635965753e4f7ea52901018c2dd70e628c93a6d7e112bd01a69b7623dd7715848cedaa3c673eb5a939346b40f41df558aead140ec7090f8775023b7b6def6a6a94606c2f17e042c82ec2f86556c2170f564ec3812650f1c7c61159dd5f4b4023d262057da96a0cd03f3bb04f4d5c544c960ccb6de9cfbcb85fa30806424142453402030000001a1ec20f0000000005424142450101ce25c40031ac7c8d53c96cec20bd271d82edbb9d3aa8382e307b818cc3f40a1e652357b8992b36727e2336bb3c5764a27ec7a3192e534ff5693e495a9623b98b1a0b280402000b6081a15571011a061404090069021a0510040d00000adf020a200126fdb11d565cb6600a70ed907f8aa6e5d928ab2eb68f98059303bf0c93f8bd129e02cdd360dcf7f529bb3e8798635965753e4f7ea52901018c2dd70e628c93a6d7e17902521722d05ed11806708f6644a5108fb5f4c05c1919b1d2fb8388950c88ffaeee8493fa665ca36501837119bfc12cfc67fbde4313bdd79091c33c3e585ced3200080642414245b50101000000001b1ec20f00000000f22e2f9dfcd41b6350509fe930efa7ce85bf29357e5335a761ed96cadec16662dbce77bf83b88e670934c1b7990e5c8a384e8c612c6e35c48a9eed328f53cb00f3145a3bda4e83b11281747514f5254b4b8a8d0c28ea8cd5b8b3584e7e457e0e054241424501018aa6aa712ad978fc37a79dbb77c226c7249cf9f9433846de38151284e416687f3df6b113c09f2460b10d5c401bffcddb4a47a2334b568ef8e6fee2b18a674d8f1a0b280402000bd098a15571011a06140409006d021a0510040d00000afe010a20de75ab641f9f82393d65fe2a51c54ca86414baa537916d8bc3ebb8b9d4515c9912bd010126fdb11d565cb6600a70ed907f8aa6e5d928ab2eb68f98059303bf0c93f8bd7d02b075c0f6afbdb654653f32a75fe3fe8b9a602ddbb2c9a20a202f180bb33e4a9f9d99cf71a8fd9bf12cd93401deb680c665450568758fe9a973cdbe0cc76fc5330806424142453402030000001c1ec20f0000000005424142450101be7e15cb0631134992ce9bd8f37b62204bc71f57f7213ec619e9951aa6a0f84c1d8f7c534be9edba6ca5c90e445a448f5a927e35d186b58390bf449783c596861a0b280402000b40b0a15571011a061404090071021a0510040d00000afe010a20c5be9f31f33d92a915493155037e6bd4058114e4924828ffb46397d3010fae6712bd01de75ab641f9f82393d65fe2a51c54ca86414baa537916d8bc3ebb8b9d4515c998102cac25f68fb028bcf4ad1fb5b1bfa8b9b3ee44dd91f255d4acc1c986d3683d2c9b7c267ec97bf018fbc48d714ce69b4dce89a0f9a748009eee471059fdeea3b030806424142453402030000001d1ec20f0000000005424142450101b0d5360fc62abefe10302af30027d946cdf90103b92abe98af6326c3be536341a7200393963444355dc1f0ea4782d15b302c6991664598ecb1b4640f8f6d7f811a0b280402000bb0c7a15571011a061404090075021a0510040d00000afe010a2097a82b71db99f458ce411ed608da74d4b9166f53178b8e5b45ac98bfd5de418f12bd01c5be9f31f33d92a915493155037e6bd4058114e4924828ffb46397d3010fae67850238b406e9d19542b0deac5622dbddf3618202c95d6b8d5092aaa6901f898d9334e2c033526246c5108e3c2e3f526b371b4eaf52f3482811c2e28b236a607739f90806424142453402010000001e1ec20f0000000005424142450101da0518e23e5819a5e5007e3f912ae36f82e29b0ea87200753cd27358037b9358d90c7d937671ee95d8237aa51558dccd608405606113614c8533d5eb0f5bed8a1a0b280402000b20dfa15571011a061404090079021a0510040d00000af6010a20421084cb2218dc5f589424acc2d2f2e1d61ff647ff392a88dc8145cb6a04a62412bd0197a82b71db99f458ce411ed608da74d4b9166f53178b8e5b45ac98bfd5de418f8902185b487d55ee7780df8f7a804514b94adcba8c0280ca97edb3f080a65230c19d8387e2c40d0e2436332b67c5698ee921e2374e4f649f3633b217681a2daa08e40806424142453402000000001f1ec20f00000000054241424501018041342eed636161a563d49e469dc871178b7ff2f229cfa7311556d28b945212a845e0380c0441107e9ee20d17e4aaf4c22103cad00af142974dbf4dbb0b83861a0b280402000b90f6a15571011a0510040d00000afe010a205b26c0b4d0f769fed9106bb703e813c1e2ee9d0121ed40f858aaf9be2d21fab412bd01421084cb2218dc5f589424acc2d2f2e1d61ff647ff392a88dc8145cb6a04a6248d02a686e07c059faabddf2a16c1970e7328c69ce7684b748de502e6bd027685a97871800ad36bd85e6a6ce95502efa2141654cad4f28f1725174f0cd18b7f0f9876080642414245340203000000201ec20f00000000054241424501012e8a2f0ffee8695cfb83386ded146192392fffe3a3f8cab3364129168fd4302e1187eabe356f6e10c7b6796ed7794d38bd9e29c9cf661fe1f16192264b8d41801a0b280402000b000ea25571011a061404090081021a0510040d00000afe010a2096a6fd83499e2fbce468f0f3d71ddcf79c497a9527217a4f7db3d011d21bf54012bd015b26c0b4d0f769fed9106bb703e813c1e2ee9d0121ed40f858aaf9be2d21fab491023269f822d010e01c06411b46eeb355d3c8b8ee0b83e2f8fd8c73fcf6b901e9a8838b5d4f2a7dd32b8360538b72a952dee13c48e56230cc8a448be54d0913b6b4080642414245340200000000211ec20f00000000054241424501016c38d07dd1a1e4d352acce9c37ab48d638fd3fa6655812695d69af477a7b7b2a4816d19a8839a15e80fd60e11f41a26d384398f4af31519d5224e2f7f711be801a0b280402000b7025a25571011a061404090085021a0510040d00000adf020a20f16f175471eef7687054c7d44f22b0695ed95ef8311039212a2415c9498a2034129e0296a6fd83499e2fbce468f0f3d71ddcf79c497a9527217a4f7db3d011d21bf540950279cc55296f3390e480b48731504b1a37febbab606502d9c602ef9143702395da6e3f1913189a27f78b70d1298021777814b50db3917943a5dd102a27b4e2d9c7080642414245b5010103000000221ec20f00000000345ad8bb12d6136416c97da091b37ae0105b62ea10a871edf038c6a492dc8f1bed2111c95eb8ade7d4408c2fdd01bb7184c06c9723710e64d0f7bd07f4cec901f492dff95b1cd59761eb8ab53adf7bad0ee11b981677d732be7ecd06d66e130105424142450101360322299b5c978540ab8cab810d61b2df916960e98132a5b59165baccdfad6618837c6af6736c7062a344bf1840213dcda3cf76955b76b585df0ce8ae2e938f1a0b280402000be03ca25571011a061404090089021a0510040d00000afe010a20059e46534a14c99a7b598c0726867df331ee1022a8619395922285bd345cab3312bd01f16f175471eef7687054c7d44f22b0695ed95ef8311039212a2415c9498a2034990203f8a63c490ac61d2c56d1d57cfbd3e1c61aacb586612aac587ff0c8a6e46ecbdae9bdf935d57ff87973b9f447f8b9644654d3782665f76f46f79af47ddf2848080642414245340202000000231ec20f0000000005424142450101a0a34148ad9d7a83b5c06ead7efe75ed6346686f7e344ad2f98a56ffba67384a34c9a65a8c9774bfc95a6067a70eb10f73ffb4b85b8b626769d3d5924fec938b1a0b280402000b5054a25571011a06140409008d021a0510040d00000afe010a209820483b546611422c37ef45b3f4a0509d28d7964419be36d92f5af93c0b268212bd01059e46534a14c99a7b598c0726867df331ee1022a8619395922285bd345cab339d0216965309f8041fd895c7e7e41b664af9524f5bce3c858faef1ae3793e5b292889c8c7158ab80fe92f6c2b92805921bda8d41756a9c0e2c80fdb6e0cc2a438b43080642414245340202000000241ec20f0000000005424142450101d09e62831734f089ce1780500beef4e2b2fa215c40b2c411a498a53e3d84e073c53ad6507fbace28db761bc0a3c6e9cb17966549f5f044ebca8c64b057c056831a0b280402000bc06ba25571011a061404090091021a0510040d00000afe010a20e785350fc1f621b239ca8acc458faaedb31a03c4f97c48a09b84e36cbac6605d12bd019820483b546611422c37ef45b3f4a0509d28d7964419be36d92f5af93c0b2682a102ca26d527bdadbc117a8e7d2662c578e6cb4f64a01638d8abb1775d1fe52a708b93137ed4941898871a82d9c6d1b0e1f243cbd9fae6fc9c0f745c065d0178e17a080642414245340202000000251ec20f000000000542414245010120a22d697f7a6bda31dfb4c32539a6697ea1af46b5745081cbe8e6f178529e79d16a3f2e93bc4d129c14b1ffc6842ef0939dbfaf865778f74169f89a55b6ec8a1a0b280402000b3083a25571011a061404090095021a0510040d00000afe010a20e5a0017c3deb90682cc8a11b96ac0570becdc83a58374829c7a28bbf919e6b2912bd01e785350fc1f621b239ca8acc458faaedb31a03c4f97c48a09b84e36cbac6605da50268e4c85259602f46c7d8c787aaba066186fdd03f13303be8dc4640682afc07d40eeaf361b2fe7d775e5e91b7c41e1d866b4ada3553a5cb907a90d4cf4eb2a4b6080642414245340201000000261ec20f0000000005424142450101fc9683d63377d8a1db747b70337eac71144505f50df1018a88c67554973fa117974063d91b0d4a3f84b67af69c6d1b9499f078a5fcfe95d3760d16f07564cd8b1a0b280402000ba09aa25571011a061404090099021a0510040d00000adf020a2067f0693fee8281cff760c5d8bf7c5553d182e50bd32986795bd2bcb2f4a1c070129e02e5a0017c3deb90682cc8a11b96ac0570becdc83a58374829c7a28bbf919e6b29a9029f6606ebf080e7c7efd9fb7c3807cd226888df82e944c7905a5294488494cb88eda748610478b57819b71d7d2b0b535be3dd37dfe2988f03c62a404a4786a4fe080642414245b5010101000000271ec20f0000000050faca91d83865d9492ec5b0ed8e74d1db5d042f319c10ff371c6eebc08cc5105b5f057d81e9ded339863d8f2a17b17a5abd58aee79d3531842cad348246210c34d85220880e40140c39b0938e91fc61a14b67323fe8e60975230156838efe0305424142450101229191166f90a0b31e562688f0089363c6c7818f9b20c39ed1631bef6c03527b23681a5451491d7125f9fed4db572bf8cbf6e69d7bf20184c4a65aa0dc71d08a1a0b280402000b10b2a25571011a06140409009d021a0510040d00000af6010a20c0f6ce6485402b47134d4a1561a92342a18b013e75bae7ea524c1db1a295713a12bd0167f0693fee8281cff760c5d8bf7c5553d182e50bd32986795bd2bcb2f4a1c070ad02220f39ed6821c63c784211c301b7a64fe688258da33cd0bf8b87d4ac16abd8ce879586a47400e4cd2cf03ec5f0b30d40889ae3c15c49491295e68723a04d2d94080642414245340202000000281ec20f0000000005424142450101089629414b9ea7658ce276e0abfa21ce8b8d798bf420895008c8964c4288a133a75c37e6d048fe641f3ba700bc169895191e79c3bd51683a396f57549f73b38e1a0b280402000b80c9a25571011a0510040d00000afe010a206520674f4c3ae67e6084012a05462cf152cccbebc054522210f2204a4b289b2812bd01c0f6ce6485402b47134d4a1561a92342a18b013e75bae7ea524c1db1a295713ab102097ad665e6f6b734507a2d01302aed5fa1c1ff81f87db1ff8c757c3ce8fb3b706e0ae457d4f1933a295e8b0898348faa2dd08dfe4abe9f281b1272800f4074dc080642414245340201000000291ec20f0000000005424142450101c2352beefb4aef108d0aba84c17eace063537102041fcbe79ed181ebd052ec7e821a3b6a2b7019a72a8632a273f745e80daf48a30d4fdf750398d1e4e62508881a0b280402000bf0e0a25571011a0614040900a5021a0510040d00000af6010a20619c49fb6990ce2412d7628866a588cce6c33227bad586ff53482bdd7bd8845612bd016520674f4c3ae67e6084012a05462cf152cccbebc054522210f2204a4b289b28b5026cb111693d94312df9093ba6af531d7cc2da4941c12b4ee5b94e2674a7a82e4193eba4511310945a0131e2433da4d340373ae55172265fb7230f9d245299ebe20806424142453402020000002a1ec20f0000000005424142450101960edf45acd13b162306fd971cc7e36b239aa66dd5dc64defd3f11fd459c99404ca600a9e02de8c6db4d511589855563803de5d87481b5fe2e3556b748cb17861a0b280402000b60f8a25571011a0510040d00000afe010a20c5557ba2976d0298bad269e8af3786a655d5ac5d8390ca854f03952cbf99229912bd01619c49fb6990ce2412d7628866a588cce6c33227bad586ff53482bdd7bd88456b90226eb262cafbb33a5fef8eb16bdf35174905331393d1721c4cc92a586ab73b88f23f461b2f5aadacfa924320a6dde480658ece1b3fd9e6273cfb28730c93d0dbf0806424142453402010000002b1ec20f00000000054241424501013046de5217cb1e5ba3c3c2ea9bcf4ed86aa974c7cdc3143c0018d6077cb8ce6225f5b2829d8feae1be6cc2384faf3fab12f5cb84cc2c27ea064e1ad90bf714801a0b280402000bd00fa35571011a0614040900ad021a0510040d00000af6010a20d493c48f9ca83ff4fd6ee6b6dd7a2d5c2751bbc590d0c12d4da7f8790dc4e0a412bd01c5557ba2976d0298bad269e8af3786a655d5ac5d8390ca854f03952cbf992299bd02fa5e9502c42e657d12bb89fd3144cf5bc59b5379263f8e4484f6326f3deafbc8401242b48080a11457aceef4cac8cdc41e444b5ef569a6babb1a507afa8cbd8e0806424142453402020000002c1ec20f0000000005424142450101eec484f87bfad5a2bd9dd899e073f8d289a38b55d5d4a5fa687beccbe2451a6dc52ab7330da3e8cc5edefbb73c88bdbca747c463267865bccb29bd05304ff0881a0b280402000b4027a35571011a0510040d00000afe010a20a431d12d1ab218f22522a54cb3ab057e071980ec5856281318fa271a2f2eccc912bd01d493c48f9ca83ff4fd6ee6b6dd7a2d5c2751bbc590d0c12d4da7f8790dc4e0a4c102779ee5fce3c9ef6e6c68a34a369dd4a5d0614ebc18caa0fc5fa87db0be348245aeb10fbc14d91f25d33988f0d240d7e8c1ec960954f98aa38f480509a40578fe0806424142453402030000002d1ec20f000000000542414245010146b41c041c56605a7833af37c2017a7c4da1dbe3a8fd131b33dbb320db330b40ada8b85d65b67c4ca4556d0176d4a2d086094c60b61b4724f2a88a851ce2e3821a0b280402000bb03ea35571011a0614040900b5021a0510040d00000afe010a203ca089a16f6cd1015015c1b555aecba01c257a33f088561d0438a25fa6fb435c12bd01a431d12d1ab218f22522a54cb3ab057e071980ec5856281318fa271a2f2eccc9c5021a7355180a49f38aea2759bce6e17b35170610bb8bbf175aa4112fc5c34014c74a106724039a9011500a46180f02feadb933bbe9e13ce11f9bb3637aa6bb03140806424142453402030000002e1ec20f0000000005424142450101f6e4e0775e446b07727afcf714a4d4f566c48e181a0152019e24b33416cd573169309849b696e7b896945272aeffb09e82006b2855e06892186ecac2d379018e1a0b280402000b2056a35571011a0614040900b9021a0510040d00000afe010a202276086e3a4875c6be861f92288239e0c41b1992c944ec459dc7b3fac721406912bd013ca089a16f6cd1015015c1b555aecba01c257a33f088561d0438a25fa6fb435cc90268439c0d96e6a1e91a9a0d9a878b490fb2c272b67284d1e3353833a3e29aef4376f9b89f182a50ac2dd795c54359c63a6ce1af796a5c549050249d128dc851110806424142453402000000002f1ec20f0000000005424142450101b0d698f07834efd966e8e204f9eeeaf7f12790a101dce5f6242d12bb1831f263db5ba829e3d25435b02d992695487658b5496ce748957f1cc9ec2e0bc10904871a0b280402000b906da35571011a0614040900bd021a0510040d00000afe010a2055c7591afbbc61202b36719775c8c564ad9567736c78013d2f47a1850de8428a12bd012276086e3a4875c6be861f92288239e0c41b1992c944ec459dc7b3fac7214069cd02d3da0304b55a5c8d54250e81a2e8dec2a9cde60faf7a6cf3458f556e34627d7bfc6cb396d60bcf150ea0515b56cb9c4f0c61f5e4249cba17e19e037849037b87080642414245340200000000301ec20f00000000054241424501018a43b9969e3da3048d9c00e11a01de4d56fd03ada73f4608c1685c8f8adf153f1d0c9dfaa78693d7f2d2010bf6b1759acf04dec2d7eb2402f4be648102f49b8d1a0b280402000b0085a35571011a0614040900c1021a0510040d00000afe010a206f6c8addc84ff5f4d945243cc6f507070ecbd4832d9e5ec41b4c2afa85417a4512bd0155c7591afbbc61202b36719775c8c564ad9567736c78013d2f47a1850de8428ad1029cb3046b64655a6210d7ca5a88ac8bfb7f4df4ffdf6aef08f82df9b3d350dccee3399a52a89b3586f3dd26ad42b29f2e619fa033f17c0c2a731c321d10b15a78080642414245340203000000311ec20f00000000054241424501013c88691c65b01c1b450110256c94ca0877e8c9b07b33bf61e4e29efdb71c9e283de76a0b8e70ec448bc75964b242828240131a8263b2646fdd71d0c960c47f811a0b280402000b709ca35571011a0614040900c5021a0510040d00000afe010a2052849cad26c0074d2586caefcb5fe9239b44a33fe774398ded46e1286725b09012bd016f6c8addc84ff5f4d945243cc6f507070ecbd4832d9e5ec41b4c2afa85417a45d502d0812014ad59394dfe8dee03d3a24f37687da00d0c8ee305d21d60ba7eb6ed74f42fca8e383c7f7a6f16070167b4a0c3f7cc526181588f674dd9b1ab6dabafad080642414245340203000000321ec20f0000000005424142450101ca1f2bf655bdd3e4982940148e4a2e2bbe863bf62f160b7c56bb637e91934b2e1acba670ce4dc826b0bfe4a661fda958761fac47d74b831cfce64bcb4d1592851a0b280402000be0b3a35571011a0614040900c9021a0510040d00000ad7020a20ce0cdaf0f5c89ddcfbee5ff9a68ff2da18149230ba98754152ada9ea599e0d20129e0252849cad26c0074d2586caefcb5fe9239b44a33fe774398ded46e1286725b090d902724dec482a3893de1767627046f8ffc0fa2bb2b8956714ae467593263902fbc6fddec12f2661fcd8b9abd788800a1b67c6e44db0ff3b58711ee164669813abec080642414245b5010102000000331ec20f00000000d4c59df1eac0ffe3e3149a31ef2c87423bdbc8bba2974d4a6ac1b8882d45c51ae3efd4685d7f6b6c7c8f767185b525da790b16052d2bdcd57babb9bc63b1af074e6dfdc04a6b04eab9550d0e43a36030d0b4f839ea7071a20c37fa440c1b2c0c05424142450101463d296ee5a8538ebd7827f9cb0e0b9a22b0efa703bb2692adfdb086c634155d405e3f9d67773a3db584c2f1ca7dcb3d60174fcc06e875537e613ca3ae627a8c1a0b280402000b52cba35571011a0510040d00000afe010a20e6e946ad6bfa375813a866e18ab9a25e68d8d2254a9bdd86e63eb0c067e8ccd312bd01ce0cdaf0f5c89ddcfbee5ff9a68ff2da18149230ba98754152ada9ea599e0d20dd02d0d9183aca40da4e277279a0a5c0aabadef923dc50503fafbc843413be16b39aca02d8d96c150c0342b10be0137a5cbb981a41e6bc026a8e9b599920761b15b5080642414245340202000000341ec20f000000000542414245010110eab85bd02e019d5744a489dc20e0e43b7f51579e27c6fe6a16aacd7f689f410a4f1e6571b105d0fc06d07aaf97476a584d558982e2c76c703e530d0aaf38811a0b280402000bc0e2a35571011a0614040900d1021a0510040d00000ad7020a20e308c63d8d7791235cff641d9bc91ed49e307195112959207db7fd3d6f6b2b35129e02e6e946ad6bfa375813a866e18ab9a25e68d8d2254a9bdd86e63eb0c067e8ccd3e102a59a6a4a498af0de6313604bc4c3ee031de8a3bb53e9b1f731976c071581be6d59f63a3b335220a0c52eed4f47469fe827a43f7e4a027313f43f974088171ac1080642414245b5010100000000351ec20f00000000b601ad6098b65ededb2d26935644b2da542bba2f4c85e878528718274c70427bf2bd8332be0f9d7fece4b19f6ea912e542899c69659e3e95164d3b86e2722d0d7a8e9368f732c4343cdcb1d712adf1822428293c5466f663b69656235d9f7e03054241424501015e8b6dc6065c52e19d00e99fd8d9778110bf099b352efb99a6ff85718ae35e08e9d9a70bf9e38d495763907c8d85e205c1cdf0861b4895c5ffa81f46a75338851a0b280402000b30faa35571011a0510040d00000afe010a20a092606314bb72aa3be408d13eaf195e01301474bf87b5ba177455b84f7ab6cf12bd01e308c63d8d7791235cff641d9bc91ed49e307195112959207db7fd3d6f6b2b35e50213f9bc521c7dbd7124e32a588aeb85152bf78321bdec73f1991c38fb262b749d6a573c2f4a72e48496ac81cf614f8e1a825c2f6727285a17497597e3624d1b03080642414245340203000000361ec20f0000000005424142450101ac35a1888fc98bbf4c04c658e98246fd3f0679ceea822a5acd93ef264af735636712e02187789696cff48c1e04df00b8966a27776e9e07defeb5b63c8ed03d861a0b280402000ba011a45571011a0614040900d9021a0510040d00000adf020a20b1ff1f491c3238031a428bc19da71019fee44f4a576fad3429193cdb86dda7d2129e02a092606314bb72aa3be408d13eaf195e01301474bf87b5ba177455b84f7ab6cfe902ce9b34b08e2daaba0631eed2f592cadcba93d2541e01cde2288e1e486f2cabac20d13b3488e890a31d5e3e76d2b9bd9a7670b7e5865f1de70ef14a17ac90feb0080642414245b5010100000000371ec20f00000000da0a73c3ce9b348916648041a9a8bbd796284c58c8a3788e0dc808777606c124da92d28e3ae0e5f53574da04f8e727bddefb3f6ad405bca49ef0d21cce11b104e730f6ba2964c5fc6bc9b9c904a24ac0f179555d68dd1d2de8fe963f9154ff04054241424501015e04de5fc0fcccf7b36df9fa468a04ac46de7f7c20b3f3d7176d3ec5cc5112673ae6d597b99ee0037719f0f7de7eac1549b88db2d156bf3f3949840dcc9b508f1a0b280402000b1029a45571011a0614040900dd021a0510040d00000afe010a200415be1efa318101f64c8940d43b11f7c22c7ce0ab9a65911f528b6c8f8a2b8712bd01b1ff1f491c3238031a428bc19da71019fee44f4a576fad3429193cdb86dda7d2ed0293624cec556732fba8112e74c85d357a5f436d061b77198a58bc94b917d811f00883f47b49977300105b87416bf8a59df8c2a0b41a162aa361afeffee6d1def7080642414245340202000000381ec20f0000000005424142450101ce7f83f91a263312a445beec507c50ac65618f6c41dd6ed6e079faab37c01263d85eb7d483c04eb47c06279f0e1d1e59b542680e95b0c469e49329801d8dfc811a0b280402000b8040a45571011a0614040900e1021a0510040d00000afe010a2095adb3b908645ad1bb0c74838a844a78804fedf1cb88a17e6a7690490c8b480a12bd010415be1efa318101f64c8940d43b11f7c22c7ce0ab9a65911f528b6c8f8a2b87f102f43f282a6bf251187f91445af3702bffe857e1e615308094b0c31875d75c1b4d9c267b2cbcc0f0546966665bad8e9ef66b3b6a7a37b53075539d30972337ff74080642414245340202000000391ec20f0000000005424142450101aed25bb041d2ded0bc099add9fd20bcd26c731832775c1aeddd4d18fc542390b5b82028fc51a5af1d5acbc45bade62729eac291ac719e76a3084f9603a8906801a0b280402000bf057a45571011a0614040900e5021a0510040d00000adf020a20b035a1211259d19618226dacf3ff6be02b379a2d0f9ba43dff3577c664c20e35129e0295adb3b908645ad1bb0c74838a844a78804fedf1cb88a17e6a7690490c8b480af50283e535a394be376f84ee39041fb2be905a9d880aacc8c3018b1719ebcb5818d8dcbf22b08335a9ad2ede129a276b4428cb80b9680732debad588089ef9a9a31f080642414245b50101020000003a1ec20f0000000058ac0344355df070b16b4a6cbfd399c14c7da9edb7a9c26da95e18d11c45b20fb85d843fd90a4ac2a34dc857d49a359788b1a3719e9640e04ae4f545f03df60246afd8a6e3c56cd499a76b6e5a0fa948b5cfe723ccc12487ad5bb6e164f6490705424142450101ead3e97ba7034f3705d3d3dd09947cad4cd8b6f4fbaf67af8b098938e56a993d17563925c252aa2be5aa661a0d9e0564176702dd981ebbe4caa5a02a5695838a1a0b280402000b606fa45571011a0614040900e9021a0510040d00000afe010a207000f535430d9c75ad03acbf112bf2b7a157df9eedb3ffcf61a70d20bd24ca7b12bd01b035a1211259d19618226dacf3ff6be02b379a2d0f9ba43dff3577c664c20e35f90232f278ef65d7745ca01ae02511a4d3da9006e384d33353b6c33f45edd94d3db0605cd6a81abd5746de9f96e5f1fe4beeab96eb3766242d943f1c8551b63727060806424142453402020000003b1ec20f0000000005424142450101048536147030a5ff3a05dd768ea745bf2aea90626b9e99f58753ec8c0bcd273a107229091956dd1c555c6a776283060e5f4d718cd8e6acb891e326fbed9a798f1a0b280402000bd086a45571011a0614040900ed021a0510040d00000afe010a204ed2d9160c7b43efcd078c7391a0b1f3133c8067323b3619c0f610e2d188d92612bd017000f535430d9c75ad03acbf112bf2b7a157df9eedb3ffcf61a70d20bd24ca7bfd02eab57f1362db175dfa6a59d862dff1b45793287bd1b62d34c47885fa4c1504dbb670cd2e4c6b2e6aca0241174145fc34586e5bf0e95910e643ff3a84d01e19da0806424142453402010000003c1ec20f00000000054241424501017a68556ff2228211dd2a5a6a00bc4b49bf9573283bbfe47db69bfb9c8ea2fd456fb3a3ccbd6966d002a5246affb19a4992d5f46c2a06650d01a768cb4a45f7801a0b280402000b409ea45571011a0614040900f1021a0510040d00000afe010a205582cfd67fe167808dfc0d2086acea4dacc10a1c67b77608bf12b8c33dd1ae1a12bd014ed2d9160c7b43efcd078c7391a0b1f3133c8067323b3619c0f610e2d188d9260103252fbb6ae855eb19a6697d400032fb0574ffc8b57be42bf9442b0b4bc3346877f3cdcdd5bdf6bc5e293c6b2f676126c07bb95a40c009ea6bcce0a1d403e712cd0806424142453402010000003d1ec20f0000000005424142450101a6c9bff44e32fcb47d333bb186f96d73e7218cee7661061c550595b64288377bca484ea7c7ddb3ff35f6bf5ea619fd2ec0d274e2b74edb3e2eba885603176b891a0b280402000bb0b5a45571011a0614040900f5021a0510040d00000af6010a20d40ab04506d5072b37147b369513b7f3f1139f683c718c22576374e1199b5ae112bd015582cfd67fe167808dfc0d2086acea4dacc10a1c67b77608bf12b8c33dd1ae1a0503cb837e74e98e7b7d5ba20566eae4950999e0b94ec9bd7cdb599ad8ea00e861e3554e2e360c0b4635d659bf6abe758695c18d511c14af93fd27909317e32b9e120806424142453402020000003e1ec20f000000000542414245010108d97ce42cc69428338d87298678e06571beac50d798b45cf5040ab253200e597432ded775c25dcf85059087f01eeebe81373171f26c3fa198ee6284b5b0a0891a0b280402000b20cda45571011a0510040d00000afe010a209d9307ccc554a3b7d93bea5a1e3f4819631b65133201503020cae848e648597012bd01d40ab04506d5072b37147b369513b7f3f1139f683c718c22576374e1199b5ae1090327bd52ab6cbab2c9c90fbcb5c880383cb411426361b2ab7141ffd7fb538452437d4edc2dd571cd5ca705cb76f2d25b461679d1282bee08bdc2199bf447a915d90806424142453402000000003f1ec20f0000000005424142450101c8a414e54a76b398c7a352ac043d1f97dd9c899a84c80a3d13654412e1b742336caa7abb5bd549f3c303986e39074558c1035776c0156bb8039612dec237588d1a0b280402000b90e4a45571011a0614040900fd021a0510040d00000ad7020a201d3968d5ac092eb97f74decfc7ac7d9966630bfa0ab5713a327b86a82870abb2129e029d9307ccc554a3b7d93bea5a1e3f4819631b65133201503020cae848e64859700d039e5b83d4ef05a7ce21d7bbf828e1bf3d1f87be46516b8e5aab5f56e26e0140f1ca5ea483fa44f651182dba2cf1d260b840aeaae622b2dedf600fb9d3032a3455080642414245b5010101000000401ec20f000000006c98e9091d13312fc0c63f4f8fbbf7220b50bec1c576b65f3ed1189251525d4d7ccbc4cfbcff5e505f0262296bc20aa48b7644dec40aa63e4e0e3443e4d2ab0629d8b123698a4938937673b91d033fe470fb0c365208c1878f9b769f6cd8780e05424142450101645ca1e8b1da30adb85dc2cee6a3f01d98e0664e482113f3ac307ef15947b27169385004f11b6a95b0cdd3ffa09848c2ecb35bc339be6e948f0f8e69207b23811a0b280402000b00fca45571011a0510040d00000adf020a201e66bff772fa925f13dcb15d13ab9f3546552668f7c2cb61b105279594f3021e129e021d3968d5ac092eb97f74decfc7ac7d9966630bfa0ab5713a327b86a82870abb211036b26cdf24b4486cc84c8aadd997d238cd8bda846a6ad1455ad2a5b34972ead8b7f692308fbdcd58224e7496d436488828fb1ba7e5897abc1e5114398f9184d3e080642414245b5010103000000411ec20f000000005cba29f25cc0a14a4363f9552f5bbdb4f3f03afb12ea0b54f9fb16c10eedb67832b6dbb4c0fa1c40300a14dae9db84c10e6f08bfdfb0dad6f8f02980aa35cb0b207c5487942a44ea11c403ab2f4f7eb4970985c337e021b76719d8ced9544f0005424142450101c625fa75394f4728f3e3874c0116d8542ed56bcfaaffaf4eeda2a84ec85f8624d52a107d75fb5da8088f223733b3f1ea2abdda7bb425503486101f46667810831a0b280402000b7013a55571011a061404090005031a0510040d00000afe010a20b1f4822fb73b0f6287a25e93dc83ae5c63d49367292daf8a7b9da827d832960112bd011e66bff772fa925f13dcb15d13ab9f3546552668f7c2cb61b105279594f3021e15031eb78b1a0e71d536c0d23a22c3398036c32302b7c8a4373d4c14400fda2330ad2094792108989edf4bbb82f528a90a154132cdf4998b29032bec73766f12e325080642414245340202000000421ec20f0000000005424142450101567be65c07d1aad6b27d30b8c5a7864a2216a8bfd4c376037bac1aaf00baed266dc7634e7b58ba2a7fae6cf88062e332a43bb2f3ab11ef038c0f0d81ec059d841a0b280402000be02aa55571011a061404090009031a0510040d00000adf020a2032cf57a99620b34cac69cd18ae6c494e26b81a985324146c8cbed9dc6ef7fa89129e02b1f4822fb73b0f6287a25e93dc83ae5c63d49367292daf8a7b9da827d83296011903fc6630c26e0f8a38cf21d0b1b606f24a79e93f1c1939f0376da26aa5e5a3821be1f89a93505fdbaceb87d8470bbc5b474f253dd7d23f1280498ddd2819c20c6f080642414245b5010102000000431ec20f00000000aa8bb81995b0b88829fdb22451f5f7cfcc6bd07ae69e3203eed0210c006d0c5ee940d5e0ce1402a45eb4a9b18c1af6e36f8cac0dcf0503f1a485a8b14aace700c14cea3380fcace5f5ecd765b1b4ee5074868a60ca8790850831ff1decc15d08054241424501017cc2dcfb105e67402c9d10b8d45e9ab0a8f5d71cd98d66c2a72c152bb2fb30651fd7c6248ed8e1324eb475bd9ba2c52736fc5b60033a0a79536a71c0492c36821a0b280402000b5042a55571011a06140409000d031a0510040d00000afe010a20decf04cd31d7d317480bc405ab1ab78b6c8e68a0f18f702053e539443172c60e12bd0132cf57a99620b34cac69cd18ae6c494e26b81a985324146c8cbed9dc6ef7fa891d03a55709684826fd34950ba77d2249faa3f28fa919bee6039eb874154c60fe0b875223d97b10ca697dc09a555de80c769989ae12042e5170c8c4662842c9e1bc19080642414245340203000000441ec20f0000000005424142450101580aecdfa6fc155f79af98c4a6082ada97190f22e0c9804554dab9cb6ed38e3d08741aa1d1dceb0fb0952a2bf45d3fbd178b6c6979eea57866910744c16ad9831a0b280402000bc059a55571011a061404090011031a0510040d00000afe010a205f8e89de50bb62158371b6a089d3378960f075c3c7808afb91689bc0551f991612bd01decf04cd31d7d317480bc405ab1ab78b6c8e68a0f18f702053e539443172c60e21030f5632eb63735b484dfd16630b2ed0a502797d1e408625010ba7add0601aadffbf455e5e41fd48c9ada5e1a64b9c06253dd77df1cf64ff3ee56a8518b9fe6dde080642414245340203000000451ec20f0000000005424142450101300fcafad36a3984eaffaaac395ba517df1062984b0197c47ee03eb4f192a02437040772529c8f22c20bc4d783c41530786fab5cc104debd83dd22f8259d8b851a0b280402000b3071a55571011a061404090015031a0510040d00000adf020a20283eec6539d11f984fc7815dd4aa3bc7e4994128539849dd2572115cb184456e129e025f8e89de50bb62158371b6a089d3378960f075c3c7808afb91689bc0551f99162503da95fcb04efb9e9a00277792c77813079969ae79acb9e3be80810eb7e94d2f0e0810f09cedb9f22463aeaa1d721f9b92e058a4b6a1e5730341df43b6a2f38ca5080642414245b5010102000000461ec20f000000004e525234a90b3b91919ec40a31fbd644110a22b6ef8fa1766be0532e934fb343801f3d453196b3d694b4fbda87f577b79bb13f7be4d44dadcba64f9e5a416907c9029cb42d351d13912a2ca558181ff1bd6c6ca27630ab762806f5b52623b30305424142450101925e52c4ddbfcaae8fab7bc36964dcd37af81634c1d1165bf20e45dc1691123b8edb695860c6e3d1a6fcac3a75f2fd21b29548f79d00581f04781066c61cb3881a0b280402000ba088a55571011a061404090019031a0510040d00000af6010a206918b1a2d79d8557f65371784ef7c8fef0e57cb27114ce9ecc338fa646d7638612bd01283eec6539d11f984fc7815dd4aa3bc7e4994128539849dd2572115cb184456e2903aba415ec0302546a76a8506ddf2b48188ab8fbc449dd4580a9f9633463b6db374a4b086233b1d392ae81c843d7865e7944b2af0beac6c883835dcb3b0f88344f080642414245340202000000471ec20f0000000005424142450101da5a6845c56c7a44d1c89b87752d5cc1c36f10df5376a92b3dac467d64f2a82d79fb9479ba1ac1d6c28a1e8da01f3181935de8ef14aa7d67667e10e2e2c76c8f1a0b280402000b10a0a55571011a0510040d00000afe010a205c32812d797446e96cc204327d55d9df5f760e0741e8247c54a5229077306bd512bd016918b1a2d79d8557f65371784ef7c8fef0e57cb27114ce9ecc338fa646d763862d0356853b15be1d2ca437f2c9c94d3eaeca078691ddc94231d2a0812875d0a7647e98cf5725abfdcd918775b4e620259e667a65bb046d71f6ed6351dbf8d65c1162080642414245340200000000481ec20f0000000005424142450101a0da49466445bbba2d8f7ca7937ddade87caf000ba63c7f4afbc4fb384a64a06040acc10c0ef80d5da5fb023fa5592f5d450ae3b277c4b5c767f6d74b6c9848e1a0b280402000b80b7a55571011a061404090021031a0510040d00000ad7020a20e50667b90031144fa1224f02d4f60abab4168b356f19ad3a492af281a6f3fc1e129e025c32812d797446e96cc204327d55d9df5f760e0741e8247c54a5229077306bd53103258a53f363c39a6e2d3e88154fa34685839d8a8bf161207e1f4de0d9e6034be3b3ea7926b307ea22977669b71f2377009dc37a7865e8a7a8a5b0c5b874926409080642414245b5010100000000491ec20f000000005822db8daf93bdf0d415adeb7d3d201557e7b2cc6c3ad51bee1c8b1daee0707f860b2c998c1cd7325234db8c1dc63945305a7d399ea2965571f7901282090b01114e22ad794fd6be442ac743ef5493cecf7e41b699a27f6d94f7493304ce840e054241424501019202778678e5195845e0cf17f011068bb2fa0c418400976d7f33148e1b3d2201514fe8c02aaf1ca737edd9fae6b0d24bcfbd21d9b9435651feb7b2c7a251d8881a0b280402000bf0cea55571011a0510040d00000afe010a203f716bb50c3cb0024fc8866ac36f78d5670d64227f27e8413bb75d16fa36e48812bd01e50667b90031144fa1224f02d4f60abab4168b356f19ad3a492af281a6f3fc1e35038f04ef7ebd03f3f2b7bc6729e1b354d4ed62f61216f8a5358c40a13941f6adf6c6156df7d6ea2a90efaa2629bc619f532e7cabd0ce909c8c457f18bba558e78f0806424142453402020000004a1ec20f0000000005424142450101d8cc12e94dd688b50ca8c183fc2d3167c23b092b5c2ade2e19a7911c85138424d69f90b1570b9fa7ab8918954b44f80a000c1bff5ccc2117829a6c24a545e7861a0b280402000b60e6a55571011a061404090029031a0510040d00000afe010a20641955156f8086686030fc53160879e55816986fa35c8a9f3bf4994a15f1868e12bd013f716bb50c3cb0024fc8866ac36f78d5670d64227f27e8413bb75d16fa36e4883903d6c2eecafc05a775a7aea61df01f8377d6bd3f3377676e3a3d578982b42d8868adaeab0136735e5fffddc6a9231446e1973e71baa7c9ac043503f67ce6754fa90806424142453402010000004b1ec20f0000000005424142450101a2af824bfbe8c25959555f7b09ccee3db6643e5872cc11250dbb0eb4b11f4523e8aa3216db029cbf8aee7f659d507659beb2eb25fec635b8f03f26385550318a1a0b280402000bd0fda55571011a06140409002d031a0510040d00000afe010a202d2a932eb90c6160bee5cb30a3ade6a8b68fe9bb9636f0721c97e36d1f9a89c212bd01641955156f8086686030fc53160879e55816986fa35c8a9f3bf4994a15f1868e3d03626e5170010e9177448d7a9ca0e366f8209df4af52f42950de1907fe09a213819fdba8bf6bd7f81b21d2322262e99a76e36c04e557f6479db53d03f48ce8bbf60806424142453402030000004c1ec20f0000000005424142450101b067a8f2a25f0943b3982222f0a2a068d73a4933fd28ff4c35fcec8588821d0599f68122a5002f38087e345d02ba4fe263c4895e7d3957a496a5990075113e811a0b280402000b4015a65571011a061404090031031a0510040d00000afe010a200358e0a42170aa25e3ce69d00068057e4f35c12a91651848ef96539a86d5cf5e12bd012d2a932eb90c6160bee5cb30a3ade6a8b68fe9bb9636f0721c97e36d1f9a89c2410345e4223db95b5fde71df7c7ffdc7d368924a4ebe98b9b173ed23af88bcf5abbadbef48820e84de698d38969f4a28677d129a135794e09591dcf89c5697b6aacb0806424142453402020000004d1ec20f0000000005424142450101126ef66dc766481cce1236d1060ab2caceaae3681148674e1e7830dcb5ce8a0d308b33265403572268db11ed23ad320a78afa9c236e2d923baf4c8afae8868801a0b280402000bb02ca65571011a061404090035031a0510040d00000afe010a2061903093d9bb4c4b247a84cfae94809225fc0936f3d8c33d2fdc8dc5a66fe5ad12bd010358e0a42170aa25e3ce69d00068057e4f35c12a91651848ef96539a86d5cf5e45036758dff4803e2e39e04926ea72afdefea284d180294b62d8931e2b7284e9a5830430d6d517a07b6fab1df1e4ae4cace7b896321236f7d5b2c3f8b372e19383120806424142453402020000004e1ec20f00000000054241424501018c9ac122f290addbdc35cbeaec71e867b0b669d5de17468ad77a3ccdf11e0c18fa436cc28e3225794ed3aa6d688ac94e2814085a99098b7b92f4d1d0956b38831a0b280402000b2044a65571011a061404090039031a0510040d00000adf020a20ba5f3cca42f81af8587e0aa2531c33bdcbf65dec6cc7273b4412944887db8c73129e0261903093d9bb4c4b247a84cfae94809225fc0936f3d8c33d2fdc8dc5a66fe5ad4903d3f570b88276e5ff1b62710140c70b2f8171db51c3024654b17d4834661c0695cb3418f1e299f10d5024a15065900e7035efd8e010f6d05dae4bb700090c4fad080642414245b50101010000004f1ec20f00000000d82d78c8db37217e7910b608836b540d0104eb0565cf1777d65ebe408f66be66ea9ae24589953ae1584e891587adf335e8d5d31e91aec5c05b8855748f6d07070fa62ac76bf05f0705156748f4730d6ef8f7eacbe9f26c2e008d3c3a5fe0510505424142450101364a73f1cc3cedd31e601b9ffda91a03f0cc538ccb392324852590b128edd43d525623ae5c83c2495ba1698b74af937a23eee63bf262ae85e84dd6f7b25ae7861a0b280402000b905ba65571011a06140409003d031a0510040d00000adf020a20f6b48ffc6294724a33c87e7449bf0087c2c90524fe57ed6bb4f199f209b824eb129e02ba5f3cca42f81af8587e0aa2531c33bdcbf65dec6cc7273b4412944887db8c734d0315bd758f31e97336a9a801e55e171b10ce156a9f3533531daf5146ff8387a67cee6ff41bc0a6da59dba52e12b585feb45127d2459a78b13a78b6b45a3e80a34c080642414245b5010103000000501ec20f00000000b0af892e303f39ae8aa84525cf8fd61bb32a58d039a25d8f233e44a76fce3b13e287bbfe96e47004400fce6eb776f35ef24f9caf786c22ec64c684b9cf35980e0035ee33ef07893e38261e380643b2e9709484b44dff1579ceacc03c7e34860c05424142450101f052717090616415acf42359f14db21713e0c7d462b6133de6d4b03d0c318d19e93552a69464a61c1d0b71fc97d5d45cc2ef0bfe41d811c6c3e2f6bd49a7de821a0b280402000b0073a65571011a061404090041031a0510040d00000adf020a2082b52547a20fe4333bddc791223966af42c2b0dd5a9cd7d53e46bae260e5e188129e02f6b48ffc6294724a33c87e7449bf0087c2c90524fe57ed6bb4f199f209b824eb51031027b6e5c0e30b541700c169c3afebd9568868d91e56bba067eb27063bf12e432e5c7240c2e303e57168f5da008d3e56ee007b8ea33cafda9eddbd3070b67300080642414245b5010103000000511ec20f0000000032aef4a0861964297f71b5eb50a78ebbd82ae0fb82df4027697486420eecb6014453c58c88b26d9b77bf33e0591693c25a8d8da27a44c12d10abd805417db303cd684aa2fee200d6688f1b0ce3be7b383e41063c9336083c78b9209c260e05050542414245010120e13cf01fc20ed4f8462d7c3725a5499f8d2263192a87912d2dbc2c13c0737f419bd72299c240a5abcdc8ee192e31948c38b4e0c8bcc04e3ecba5abb45e228a1a0b280402000b708aa65571011a061404090045031a0510040d00000ad7020a206958c24431bb92aefbefde8865b870b0d750f6b98d9129fa3234fe15958da5c5129e0282b52547a20fe4333bddc791223966af42c2b0dd5a9cd7d53e46bae260e5e18855033f5650615d497fedc3d904738e3e6df97cb38f16d11e00c578d338c6c8cb2892be2c5e8f6ac1843d647147303fdd0f0949778c5d0f82e206640ae6afa970b8ed080642414245b5010102000000521ec20f00000000a62b5d1d7a425d9eef523ca7c805917afa1a84482f71f69cb9a4cd9685dcc73136f04a418e95a95e63acd3ebedf980b4eb09426d33cd1d60d8efcc2aa34c330e8559b7be58dd237283672a217af1c18e84a9fbf9006db5411bcda5da55df510c05424142450101e432fdcea827eefd56f5a7c001f740cf5b2d2b6b371aa4a94b4ce517e19bd647c47a9ee8accceffa6505435536dadbf66ac207766929f37e509333cb7b267d8e1a0b280402000be0a1a65571011a0510040d00000afe010a20c771d86c9c207586cb60c3b319516520eeb363e1b925d0061f1c3dcef872dfac12bd016958c24431bb92aefbefde8865b870b0d750f6b98d9129fa3234fe15958da5c55903a907e7d83fa6ccf8b4645659fd23495f108acfa3b7b5aa95fda1f52a13e9cc354395aa2541099a96a23f9b50ef8b63e3b1a2b004c8e635006743f5282699624f080642414245340202000000531ec20f0000000005424142450101f8ea71ea2ab0d713bdaca47a58bd12ecb5bd7de6d47a5df2179a381ae3e834380d028f21d9c526901d11681eaff9a84666144f7b4ccf29e76644d369d4c60c821a0b280402000b50b9a65571011a06140409004d031a0510040d00000af6010a20b73e8bf5acaa5daf93fcf027ea88c9ca9a502410a49208e1b54dadb2f60b75b312bd01c771d86c9c207586cb60c3b319516520eeb363e1b925d0061f1c3dcef872dfac5d030fcf44752a786a7ba939215d9a2fe2e5ba5a115782aedf1c6bb7b5e521f3898cc7d22ec8b25f0fc0c84ca0faf70218498b752933c02c0e433204cc0721be7ef3080642414245340202000000541ec20f00000000054241424501013aa6877268801bdac426ca496c2916c79cc6f146204ec97cbbdfaa13c66b7b3bd9fba79bd19b4abd80670a0b70a0649f47869b2c1355baa7697038e79bce07881a0b280402000bc0d0a65571011a0510040d00000afe010a20df4b2f5fa9d878755e3960769a85957f63e0c6c45c0de8a67e353f12154a20cf12bd01b73e8bf5acaa5daf93fcf027ea88c9ca9a502410a49208e1b54dadb2f60b75b36103859689b8c26c1efcf3ba5685493088373dc8d506acf07487e8c016aa4501ae5e0b6bbcd2d72be503d2218a29c1ac729bd385b1839186ab9e1040cde5f8ccf21c080642414245340203000000551ec20f000000000542414245010146ca7624fa498fba31e4dff0368f3d22b0cdb0f81aa6f54c1379d7d334537a5da850d47b07bb7ebb05428011a72a37c1f38ce740a9c1c3e26b3949aa8e6bfc8d1a0b280402000b30e8a65571011a061404090055031a0510040d00000afe010a20c2c8d113a3497de7986fb6ce384c86bce5d87a4c549b9215028f217d316e9c2412bd01df4b2f5fa9d878755e3960769a85957f63e0c6c45c0de8a67e353f12154a20cf6503c40f2f8ddf80648542182a703af2e57bff91e5c4c05d40c681ee2f8564d0c3704229b710615807cf25830c73a9ee19575ee0dbd0b839b9d2a73accb2760a22eb080642414245340203000000561ec20f000000000542414245010176d78afa269a1869c8c39513ea626253f9ffe28d379e549f5b0f929dfcbbf035598fa6e552c04ea088ccd245fac98cae499d1010c688535680253454f92736881a0b280402000ba0ffa65571011a061404090059031a0510040d00000afe010a208922111f9c1fa8822b3ee9b676b73831ebcdb351c7db7dff87b9ad189f37430512bd01c2c8d113a3497de7986fb6ce384c86bce5d87a4c549b9215028f217d316e9c246903a668a223d6e8271bc2671f6607cfba9d088ec59450175f33e37b2e3bbc55c6c909ba1afcd0d689a00bb64585bf2913217b562a03ffc64145285cff906e7935f7080642414245340203000000571ec20f0000000005424142450101d08e7b2aecd7d5d9590150d9c357f0c163350b4dd1ed0dc8b995b5ee57daab428f6a5563808e533fc568ac51db4e682de7087a7bc967d4ec831d0ba02083e6891a0b280402000b1017a75571011a06140409005d031a0510040d00000afe010a20fff6a40ddea581e5af94be0652857c57f58fc4a54db669cbb366ed990482897712bd018922111f9c1fa8822b3ee9b676b73831ebcdb351c7db7dff87b9ad189f3743056d0366a96132dab4d21e7e47066ca5587ffcf12b54d8810cacd69ddbf95458c5a585a6d8dadda55452e01f1ab335e896e4a627b1332a49f4af0a2bb22343c4981a25080642414245340202000000581ec20f000000000542414245010158d1bc4b78d5979529048be1f8e6754480be63e30921fe75aa637bcba567381f0d9d400a2d0fa48ce6eb7cd9cd943d7185b7c2d5fc1c64e35e9336917fec538f1a0b280402000b802ea75571011a061404090061031a0510040d00000afe010a20315803ded7b1b1f46f675c1f8a17fa2023b9d5a5268f5eafa516b1fab260535d12bd01fff6a40ddea581e5af94be0652857c57f58fc4a54db669cbb366ed99048289777103b8d1f419484dd66324eddc09add190ddb5fd2725a033d6230b89173303007bd175c87c12c8fa4e7a85246dfc6aa61431da138c354e1a8769e8700d98d49fd7ea080642414245340201000000591ec20f000000000542414245010120d36cfadf448cb6b17f39f26104dee6928a882a29d46ce82a8d08d0740ade02878438d02179f0fdf12a32e851281ab4e251960ca6755b1917b437ab6d1e53861a0b280402000bf045a75571011a061404090065031a0510040d00000afe010a203aaf7f6bcc3ee16c4a5fe352aa9b06b99e7cad04791fd49aa01ad8961b1ce31712bd01315803ded7b1b1f46f675c1f8a17fa2023b9d5a5268f5eafa516b1fab260535d750311aa1aaeb634d447bdb1df28b6417170c69883f9594d8c233ba9b8ce18bd406027c08e428dc435fa2bad38fee321da682626c3856306f7e08cf5dc3d74dfb8740806424142453402010000005a1ec20f00000000054241424501017e6ac3c3aa12711034a479afcc51329ccad9bb7afcebd08cd42b2456bb1b0c5caedc8b2d749bcacd0a8913f2c1ea54891a762a48a70d93735bdddae53c28808e1a0b280402000b605da75571011a061404090069031a0510040d00000adf020a202359aae3cf5e0a2f8efb2fd4289642d193924a027e1cc9284b2091abd407212b129e023aaf7f6bcc3ee16c4a5fe352aa9b06b99e7cad04791fd49aa01ad8961b1ce31779035a74923b19b0f7dc34d621fe666df42f399b64105585b4ccc563d46313f59e49e425384184536ed2189bced8e6013d8bec284360c2df50b77ba849116c145444080642414245b50101000000005b1ec20f00000000d4388a2176efa2dbbdfa93d8df0c47eb4fe0713ab07da4d962718748ba78b9285ddde2f4c2800ba5443dc1e49c63b84352cf3f619d727e794e2cd3728ef2fc02bccaeebbe01fafddee46545d66490370e40343d4db8d64d74616b563bee59d0605424142450101faa850c1e5c24497c1610d26990ce7a2d5950eb943e62237645c8efe86741c01d030e495f2a8f75b246717a486653702618b2b7606b21cfc0d2b156f5c33348a1a0b280402000bd074a75571011a06140409006d031a0510040d00000afe010a205bdf4051f48d96289ffa45653ae27c6705555647c7b41d6a7a8577657212267b12bd012359aae3cf5e0a2f8efb2fd4289642d193924a027e1cc9284b2091abd407212b7d0382358d764549eeee2bcf0e484fda680580f3edfe1b10fd7234def963c103ee8f96acd8c78b1733276cc05b028e04ef600edb0e50bcd76af90dbb65386245feb20806424142453402000000005c1ec20f00000000054241424501018ad447cadf3e2cea108bf3992929a4c7ca26e06de93456c0911cbf7d0b6c0d545165dc9b0c00ed349fc42a75add589a20fc5ea14eeb2ac27013b31d1e049e2891a0b280402000b408ca75571011a061404090071031a0510040d00000af6010a20484cbc1f315447dc76062e199c3bbcd9dbe3f5dae9af8870f81e8ca321e17bf812bd015bdf4051f48d96289ffa45653ae27c6705555647c7b41d6a7a8577657212267b8103e7c2fe1171d7ea7e11c2ea6fc905766301beda1878759b8f66536e3bcd38b9f33c5ccfb8ef253043554d7846b350c57c52f6c7f45b5f21cfca2965f9733f7f500806424142453402000000005d1ec20f0000000005424142450101348af724fd046a4fe398784fdd8634e09ff47d197159d4460aa06ce50fb44c2a690c43d32274ba7091e69eb1e841267c9fa469660f2cfac4fed2d21b73de4e8d1a0b280402000bb0a3a75571011a0510040d00000afe010a202d74b0b435634345427cba1df4db95c9ebbed77b7c717243ad91748c8c81b74412bd01484cbc1f315447dc76062e199c3bbcd9dbe3f5dae9af8870f81e8ca321e17bf885031736ea5c4e3ecb3ee1474a42fdf25f2a08a65f380c57e815d9b2df1e2dfcf3670d7189d3eff81aa637580b63b2d80acedd94f737f084806c4d891346721bbb410806424142453402000000005e1ec20f000000000542414245010158e54b45a061ce4a35a9321edb9b7aba5154534b5303368be4d3e9b4e24eb1463ba0c440e35098adadc122dbd030a481fdf74ebd2638bbc76bea1f2e7d3c198f1a0b280402000b20bba75571011a061404090079031a0510040d00000ad7020a206e69c157536d8531b4a3602ece22f4d2405853ff75aea1079b266ecdf1b48141129e022d74b0b435634345427cba1df4db95c9ebbed77b7c717243ad91748c8c81b7448903922b556d06546b35fb0df96270cedc75b1db5443d2cc5b9d5952e6d448927cb5a1036bcb6655c40d9a9d86e2fdd8679435f866063f3ad2ae08504bf40b811367080642414245b50101020000005f1ec20f00000000506c7e6b2defe2b67edab2b4b3c2dbdc62e7f4188aa55242772516ce32cd565c5f8036097c49b156986e1c4d6f9e77b26ddaa76ca175d9bd50dd959d528d370aadde3156afa2a8e8fb6c898c8112228465244c10204ada26aa2e43c3aca8200b05424142450101d231a43fd9e79626c93222c0047ac5024ee6285a77bf9c2f4b0ac50a702129316e4502bb33d81a6c784928618442fe4657a169711f612c66ac798222eac374801a0b280402000b90d2a75571011a0510040d00000afe010a20cb461289e150bd7788c55c10227431383c5158886c230dce705e8bceb317a10f12bd016e69c157536d8531b4a3602ece22f4d2405853ff75aea1079b266ecdf1b481418d03255bee7dedcfa5ef3975fc9172832a4e746e26ba6ad50efb623e9d337223449ab70d59712c0c908ecacc5cf2a7d2b9c8ef3c5f6e48548ca790423cc918c0da30080642414245340200000000601ec20f0000000005424142450101d482d4a94576d36634b2cb79c8732228a16bed56335ec8778c9a169b13a05a13b4d6a62f5874859485f736dd2777ab2de754d50e58678067172be637c2a6b38a1a0b280402000b00eaa75571011a061404090081031a0510040d00000afe010a20295fcbc649d4ca7e5a94e0410c9c2d4db4ce21203059fcb46b3b008bd947e01b12bd01cb461289e150bd7788c55c10227431383c5158886c230dce705e8bceb317a10f9103123cd0eeb5fa7cb142082c3dba1a487d0287b44f23446efe785bb1fc50d698247c521dbe6b8ff6bbea2b0672ccab658679363f88cc7166db4e9ed02d78918221080642414245340201000000611ec20f0000000005424142450101d23f268ab2131e8eacc53c3ef5fe4a2981e8237ced1780a581894db48c42710a255f043a97ab61df0b03eb640196a00acd00df6e7fa16600b989771831e4c6821a0b280402000b7001a85571011a061404090085031a0510040d00000afe010a20b7149a93ff684969dd1eb5c4d47cf8ebab40801a4fa02a93299c844b431beefc12bd01295fcbc649d4ca7e5a94e0410c9c2d4db4ce21203059fcb46b3b008bd947e01b9503797c0a470522b1535460262bf0eb5cbd59a24a50fff7fc8f2d8a17add79020749060fb856dfac1a2fc61bd4c395034a76d5b0e7b47b834ab3bb503b6b9cea423080642414245340202000000621ec20f00000000054241424501012cb4591457765e121f24375ffdf8dbca991ee80e2b58244aff529d9b58c92d2b26ae0efa79c9c2bf2033de60596f4b47957458b36d85a32fd255295965ed638d1a0b280402000be018a85571011a061404090089031a0510040d00000afe010a20564e1926358e7f14579dfb777aff7c8051bf22bd26ad92eba0f1be0a45e8a84112bd01b7149a93ff684969dd1eb5c4d47cf8ebab40801a4fa02a93299c844b431beefc9903fe4ca8ed304cf8392d6bc5c2fcc9c0772858366710db2aa70264e0a020cb3695b742b31c9f88ee86a1b55543d75704ff7848f904c718f62bacee861e22b333e3080642414245340202000000631ec20f0000000005424142450101b2381eecfd8370696e281d2da7d4873d78b9cd84e03b5cff1f9afb1a432b446b223b26ed4190d741ac0cd36aeb2c0f597274e089b85d9b495c18b799ebeed98a1a0b280402000b5030a85571011a06140409008d031a0510040d00000adf020a204a1f46faaeeabfadb31ec441b1afcc5a7bed6797bea2aa944f563328f4bb86d7129e02564e1926358e7f14579dfb777aff7c8051bf22bd26ad92eba0f1be0a45e8a8419d037741fdbb3fa7ec0d5b5163fbc59c7ff41c1b1d12120de3df238c689b9314126608b6ac624d050414727c9dfb034a3a237cf8cd941ba6491bfb68ef6ea451b107080642414245b5010100000000641ec20f000000009c482c7bcbf2b723ae4993c649cbc22b1da5f4a4b5224dbc9535b9e5f1bf82665bf15b36913c02fb3c52cdd2ece712a08f7536880098ba9299a9cf1da8b0ac0c128b733bdddc4e0d36a8a12d0df603bd184601fc8fe6a73c3de366fb44fa030105424142450101a65f548be9d3008a5baec3d620e37b24e42c9abf4ac9b682824f6a2ce2f30451ab23ee13e122bf4c2d18ed1441c951080e6df85b0a71eabf044001103a84928b1a0b280402000bc047a85571011a061404090091031a0510040d00000afe010a201681a0a768779bbf7e08ccaecb3703003f28b8d4b46b95c1e9ac3272a11abee012bd014a1f46faaeeabfadb31ec441b1afcc5a7bed6797bea2aa944f563328f4bb86d7a1032a1af9792b938d2c2a5f0cdc83f5ad1dc345ac97ca95939146e3142d20bd847c0462464720908071a4d60d1ec86bdc4b0cd433103cf87b11190a7846e040a62f080642414245340200000000651ec20f0000000005424142450101bcdc8c2d37b197c63a66d56e0acd8abcc231a20673e73bfabf6e682f3a87f15b5078bae01986a15548773429ea1c305c4d8ad19e812685d4b94afd6108b9a1831a0b280402000b305fa85571011a061404090095031a0510040d00000adf020a2041f3052245f5b38ec159b86574264c2bf00c7e66d223a3e2d0e566481fafb391129e021681a0a768779bbf7e08ccaecb3703003f28b8d4b46b95c1e9ac3272a11abee0a50327ab92d4a8254962cdd73ed80bd1b0bfb6009fa88860ebccf85bee10dcd690750ec11cc37565374ff8186f3d3d4f355b71565baab6cce8d10e4282c81bf6d210080642414245b5010101000000661ec20f00000000a2139f859d9e5692dd8059fc06a6e11bf189fcf0c3812d42f62078c125eda21020d2a2c864f2e75bb842c3cd617180e6490a1a50a2e2e0052077c5ede4e6da0ac758a3908783c24bd3f6ea4c56dcac056a4d38949b427273d3b4d845d1807d09054241424501011a9436aad0f2bb0fb6f4bf5033a9fbb1ab0e2a006b3d0bff5ae0f727c06e471132fec48a88ad662d4cbd3bdd87b2b83b672ce0bbfee8f41b16746fc21c6808821a0b280402000ba076a85571011a061404090099031a0510040d00000adf020a2021723a7f5f29c30b809bfc077ed5fd4df6c2fec4ce8c39cfc5064c71c6dd7b1f129e0241f3052245f5b38ec159b86574264c2bf00c7e66d223a3e2d0e566481fafb391a90394964581532fde1dc90c0c6849b31ce9abb07baa769ffb61ffe6f281fe6d6e166df796881b948f87568d3be68e1fcab3172aa756c8495eb1738525cfe003c418080642414245b5010100000000671ec20f00000000fe2aca0696b509b607b98ded50702767f2247e61c06d21a399eb7e7fab52d035f7b67bd0885ff335a5222c05c7df53691f7fe4f1afe43da4364107895cfb390de31f8b1672eb4a2205333237f1b23b0580fa05b51d139c317276ef07360d8e0105424142450101162fdeaaaaec74991d2af890a9e73013cfa99a44cc1485fdac6f5d541605110b61b7ae4b9a7ae4f53020024b9a713da984c419ec2ebcc03421807666768fd2861a0b280402000b108ea85571011a06140409009d031a0510040d00000adf020a20129b9e264343be3ec5001d7377309f4b84047a1459fa96db2bb19bbc3c57bedc129e0221723a7f5f29c30b809bfc077ed5fd4df6c2fec4ce8c39cfc5064c71c6dd7b1fad0385f44301dee540f0be59deeec869431c1dd538ab81613e81d9b88c2cab8264fd10dd538fafd6480c7759f54c2ac7217952e9bc2bb256bb6e4c608fafac6ff65c080642414245b5010102000000681ec20f000000000edc94f1c48afa8563831ecc9fa0df9b1093fde4c9eb857b76e03e3f9adbdc165cb430f8a1bfe2b1f70bb421af55a711db67bd309850ab7d5d218701ceac970bbe3c070d763ffc956bf9c0df6a6fe230cd4eeb487e86626c6daa9cf50dc15d09054241424501019066626a07b02dfce0f2ff9f975a97992c712e5135d6103fe1b4bd8b5c0b12586dd23550ba228f9255c0f204ebddbb7f33fa6e9d84efe47983ff8bfb8053958b1a0b280402000b80a5a85571011a0614040900a1031a0510040d00000afe010a202a1422fcd194c4322f7e72280db72d35d6c98955a2bb7903140e1ef49f1e731212bd01129b9e264343be3ec5001d7377309f4b84047a1459fa96db2bb19bbc3c57bedcb10398003758143803cb5c8ec53b57c86912557f3c405c6908e8461c1db8809258075a21681c8d9f790c134636c9e9d62ef62730cf57d04aca6b5654bf1d25005e1c080642414245340203000000691ec20f000000000542414245010164e665b7a2cb013811f4ed1a3c9105507c39789c64d7c99a37acec7b0883c9130e4ffc2793beaff64f0f3b404478511bd96059de6b7d9817070fe8b19dff978f1a0b280402000bf0bca85571011a0614040900a5031a0510040d00000afe010a204698b22cce673247f79498aa0b9f42ad033a113df218c8223c04c0af4f77251112bd012a1422fcd194c4322f7e72280db72d35d6c98955a2bb7903140e1ef49f1e7312b503e93660ba003afff70fe67af818360cd211e0ef2305ad86add08acfba04b819cfd7aad6dd9b6b0231f836a2115066ec87bc2a5ad69ab9b77a3ba9e26cd53c5a970806424142453402000000006a1ec20f00000000054241424501018cd570558a264dccacb4ad7dba58a7b31c7846179a7062ee27daf3f3dd9d8827732883ef86111334cbe9b1dec8deb6ba797bc183ca697d946e40174371a6938a1a0b280402000b60d4a85571011a0614040900a9031a0510040d00000adf020a20d21c944b792752e8df52a9b6cec21839a4fe26e5a764b47522831b32a0a9e8ea129e024698b22cce673247f79498aa0b9f42ad033a113df218c8223c04c0af4f772511b90389e09bd4d5cd49d304b8d0861de053575433daca6613a8d4e37a3970b1ffb16ccdc6225fbd74336b65020a3b57c665ed2ac16497551eefdd3c289423c88e73ab080642414245b50101030000006b1ec20f0000000056d436c65c42ac8b049ced6325fbbec338c2936a49f8ae419a405e82b34109462238bd38775867bfb8cd3c83aebb5a833081721e1ce299e04a1d6564b4788f093ffe9273221d8925776605f5a7d9e2eca3e5f25b5bcc834156cf9dd898e70f0a054241424501016a4a2fb5801b6ec42e2b2608696b8c054a01e9fcbe5c8f5d5d0d6e8d616d27619ebfd3881954d64fa439b7a34d653ef7570821693e34e5e0cb4cde0d20b0238b1a0b280402000bd0eba85571011a0614040900ad031a0510040d00000afe010a20c87ff52163b3d027c2c269e694c7d722cc5d6167ddcd67cd56589fd1405121db12bd01d21c944b792752e8df52a9b6cec21839a4fe26e5a764b47522831b32a0a9e8eabd03cb178fff495aa5de58b0f8643875082e479d85eb9ae993f7caa6be73c6091d6cc3bf52cd32216dd5e2ca2bc43404cd0083e8536cd68f44b570b390f70daa63500806424142453402030000006c1ec20f0000000005424142450101bafbd45edd941757a0b6759f505c7817ccb024827e803b534212b3129c66457505041f91e98f4675d47c0acf10bd78cc142cac23a7f1f43a91bd905eb0df608e1a0b280402000b4003a95571011a0614040900b1031a0510040d00000afe010a20fa4b4a963313a8af0fe5c99523a8cb24226a4fce290015362d2d792017d9218412bd01c87ff52163b3d027c2c269e694c7d722cc5d6167ddcd67cd56589fd1405121dbc103b998ca3d5a797eb2e0eb46a65e6f07c31ce9f2c9c5e82f0b130132f0137be1fbccf128198a91429bdb018aca0eb3f9c45531b9aa1a3dc4536b5f39888d76052e0806424142453402000000006d1ec20f0000000005424142450101984c173baa7eba78d8aeaefd2bf67ef08844552c579a8476f045fd866110974f6a7c14e8a31f0dba6db195db81b1d79a8d1295a66d6c8635db2665a91cc334841a0b280402000bb01aa95571011a0614040900b5031a0510040d00000afe010a207d836b16c77062e636c3dd4f153839ebe9d6ed6e2b63656824cddb144cdf263b12bd01fa4b4a963313a8af0fe5c99523a8cb24226a4fce290015362d2d792017d92184c503d78e408d9efec7953b8a494f7b6706d0a4c02e48622defebc8174115ed694f0bcd1a377c268d3c1c7aa3043fa7f2c7b8de7dbbf07845eef7f38d5f751ae1aa830806424142453402000000006e1ec20f0000000005424142450101fc10488534c84130851a774448bffcc48dcced9f38979d91b3f2d1a5ec572d35ec16e407abcf8c4ed54646a17d44a25b38fa667980cabff0687e4b2c37fef2841a0b280402000b2032a95571011a0614040900b9031a0510040d00000afe010a20ff56959c8c27dd2871667a7a06f25ca3713c5cf6abe1427681c0c7e6aa05d67f12bd017d836b16c77062e636c3dd4f153839ebe9d6ed6e2b63656824cddb144cdf263bc903d29cdc97b99b200ff4fc957787ef49ac3df1b3c3410ea70273997d0740f18f6189a1b4ce780656d6b836c66fc069d4f98a347bbcff8252ac4405747f5410d5ba0806424142453402010000006f1ec20f000000000542414245010182d68c00d14721bce1e8d5aa8cc5dbbd5f8b93d445e744c911c39d2ff912a05475ce9ea28a747d89ad9e1a09caf3037381cea830ffcfb198992204833c3340811a0b280402000b9049a95571011a0614040900bd031a0510040d00000adf020a200cccd797ba242db5dfd1f13e29f56149bfbbc48d9ef20f78b6e7de4fd25c61a1129e02ff56959c8c27dd2871667a7a06f25ca3713c5cf6abe1427681c0c7e6aa05d67fcd03c82a9b8b4a513f7af5ee81086884c8df094f8bfe1085e89e9b3ea6b0c15aebe021b23303ff8176b9900579e7718f79017361bd30ce2fa51d99b12086f03d501a080642414245b5010103000000701ec20f00000000bc380892930c2b4270a60caf368cbc3cd45f9f6f49cc3d8b4fd9d7447db53e034f8cc3677863ffa3e5ba74214b7baed025e4d1159e22995ff06211963b0a50090e97c071cf3e8aa1b0bdd93d34e1659e0c4d06b78d2006f35c894cb441983a0b05424142450101b4558a9d12d7e4a434a9a5ff7aa7d42b4a3c8855260cad8220bea861688f3d3bb12a26f29ed6255de74c90e8e758cdfecb38227ab979658a153f9673ceceab841a0b280402000b0061a95571011a0614040900c1031a0510040d00000af6010a20c2616a448548848937990908782ae356dca63801e4161e355e3249f993f2b0b712bd010cccd797ba242db5dfd1f13e29f56149bfbbc48d9ef20f78b6e7de4fd25c61a1d1035d7ec24a9dd6d955b9bec05cd46ddf7f78df7bd32d14eed8176f94b2218c86c544ec39b246dbea7afd98992d8075fb6d65e204b364a73eb91c114baa4422b5c7080642414245340201000000711ec20f00000000054241424501010830ceab0fdae5aef839b05e1849ad345908f270525036c4028ecce0d363451a6ba873b6770f9f4c9359d9bc0b905752200a03c0502b673b86a55ae5ffd583861a0b280402000b7078a95571011a0510040d00000afe010a20a3e82119bee2dec0e98fc86b1ca1c41cb403100ef1e25ca10b7458e51d3e083c12bd01c2616a448548848937990908782ae356dca63801e4161e355e3249f993f2b0b7d503a9b1d9638ee79520eebf780a5248cc043ffb9e77495f09fa67b6071c25521ea8757037a2d0494ca52b4661e2793d666cb61f5626a8e73de071b13004dd442cfe080642414245340200000000721ec20f00000000054241424501019c8566d7c6b60fde193303de0f9f72021480bedcc206405cb1cda2a5125e40084ab717ef8a9d17be11f6f9e30e666101ecf3b9756423af392bb4de921d59568d1a0b280402000be08fa95571011a0614040900c9031a0510040d00000ad7020a20357e086710fa2349d68a908c105d87d85d6f51d85e71f474aa2414811013aa6b129e02a3e82119bee2dec0e98fc86b1ca1c41cb403100ef1e25ca10b7458e51d3e083cd90337522dbbc3c6e8bcc8a7c561fca10e55ab807587eb8b7932beacd5e34cce745a6b2c46921b6ca9a6a3580fccb1198b3473c5f941595527baf0cc0f279e6e9c47080642414245b5010103000000731ec20f000000007280119beab8831f001923d48a62ea577ee458ad018b1d5e403a920a74f7683e1e205a6a33c8fe7fd5299ca0978b6b76764d013a866e39f4e78ba28d8be2980706a2133257511cdf4c4381acfa2467759d088a5e3ee37aa4742f29de2ae2980205424142450101d2320e0ad4872d8178e9dca24556be14f6af83fc516d3d6fb4e9105ca8b4e67ff109b7b3f1545e79f3468ff31c603b4ba992f6a34f750a14bfb7fd03c44460811a0b280402000b50a7a95571011a0510040d00000afe010a20279bc1c0f97d5a91b328c363df58bffd8fff223dd7e6df740c7d586cecac68aa12bd01357e086710fa2349d68a908c105d87d85d6f51d85e71f474aa2414811013aa6bdd03584cbb5258de1b028b03745c2ab4d41eecd43cbdedc15f49bdfac11e2524cc2c75bca8ed9a56b452d435f586c701cd3584b4d60106ca96013c777094c578fa1e080642414245340200000000741ec20f00000000054241424501018461afdc46c661a1d9831e32db2ff6d97ab3a75a7c88d469020ceb3286da4c6800bf1dc9e7327618940cfc8b43d84784fb0b4fbfcaa8f59aba9afb7bd3f0298e1a0b280402000bc0bea95571011a0614040900d1031a0510040d00000afe010a20996f10269227ddeda634a741bd09753532732e64f4d298e785d3e478c8efb4d512bd01279bc1c0f97d5a91b328c363df58bffd8fff223dd7e6df740c7d586cecac68aae10311081644974fad06d93ec6eae6aee7330ce3c8cb36206d654ec4af99b329ba8738930e2ad291c0b79d387865e1f284d127fd6988ac42343cb5c550c224d47d13080642414245340201000000751ec20f00000000054241424501019e2d9afc9c94531998ea875a90d72e549a1eb86eb0e14980dc03a48c7519cc35ae09fdf51d051c94d6b2d1802c9a3013173b76810ec5e94434b3a198d9b5518f1a0b280402000b30d6a95571011a0614040900d5031a0510040d00000afe010a20a4d3e1258410afb228b61803f1661571fa873cb2f1270125636da7ea3b4bbfc812bd01996f10269227ddeda634a741bd09753532732e64f4d298e785d3e478c8efb4d5e5030385d6fbfae88d35d6eff28783c3176608704dbc77bf6fc1410b577b59ceda24f4a0bdecf3028003dabe8b59a6e9bc0dd718e039793a56b504b821f81a2b863b080642414245340203000000761ec20f0000000005424142450101749481ae815c6e2e7f8c44be0df44ba9b3b587ce98260bdb1f22803a2fae1e5d0ebc24a5290934c537a887b3ebf2c1192712290f344f7b1a8e82582a8f83d98e1a0b280402000ba0eda95571011a0614040900d9031a0510040d00000adf020a20af5c3cc3da7ced7b7dbdf89a773e2a037d8810cebc3a973643c163b999c3a894129e02a4d3e1258410afb228b61803f1661571fa873cb2f1270125636da7ea3b4bbfc8e90352428aaec85c075d9162703ed105528ef379f47bfea98b906d4731df66282a6bc309d5527772881ee9aba4983f8912f63fef6f16fb00da4e9f7a227b299e96cb080642414245b5010103000000771ec20f000000004a90551c02861666d62a57ef8fe591591dadf4045f6c4e6dfed146bab9b4a258b8a280794775dc81ca89ed99ab61012ec0842e30fe872206c5b3d158fec376021e56b5077c042bd1a0dcfe92f4ba1a1af50ae2b9e69de615cbcf2dc007b2860f05424142450101ac573b4b79e41c2a2ea978efa0fadc7c8cd00bbf43f54bf4f5898df519a8e957d82b8039434c14a13ae07d3c0d6b12b617a2220537a96575acb44b16b644f8871a0b280402000b1005aa5571011a0614040900dd031a0510040d00000afe010a205eb9d9c7ac52ca8952c0986d320515825afe38a1f3206495d40947b5a6da68dd12bd01af5c3cc3da7ced7b7dbdf89a773e2a037d8810cebc3a973643c163b999c3a894ed032c63361ea23c8fb8df23415cffbc5dfc9d3cb7c2b520c1f20d80a0d867318a3795e9fc9adede6f112aeacbcae03f2dd5499768f8e8a3d2167cf094075501ae1d080642414245340201000000781ec20f0000000005424142450101a24d1d8833601bfd1bb826099aca472976c18cec22946f304cf1e5bc62fa9d242a6aff6a81413fe1d212bbabb1fea63daee22c40e95030e19b5e5c218b8bcc841a0b280402000b801caa5571011a0614040900e1031a0510040d00000afe010a20278a8d18206bb05c3d699df93b96af16240a7cebd54e9e8d87e04678da054f4712bd015eb9d9c7ac52ca8952c0986d320515825afe38a1f3206495d40947b5a6da68ddf103b0e6c3d1b7e93c6bd445c2795aec97118ee7b8a8623855950d4da45a7d9116ba86312170e767bb06a304d4547da9817af0327dd6f9d67c2d1918fbcc742d7e63080642414245340202000000791ec20f000000000542414245010174f15ed321cea473357fa870eb44f1f20b4aa8ae6d3535e7d99b6b9999b36a29bb86dd461cbefb9978bc02fbb01b48f2fd60c8f0637dd184fc850634f0e751821a0b280402000bf033aa5571011a0614040900e5031a0510040d00000afe010a206afeab31d7b52329d0e390cf06f4139bc554b032ad919db786a0694ff2246c1a12bd01278a8d18206bb05c3d699df93b96af16240a7cebd54e9e8d87e04678da054f47f503afa142820fb5823c94e1db34a7bf20038fe2519448828eeaeedf136e5dd06d82806b9e5bb19b8036ba63af2900b7b463ae08624beb75e05cc344970b5bad53cf0806424142453402010000007a1ec20f0000000005424142450101c6f4f0753cb066cff1947e33998a3f62751c4a5bcb207a8fb8df606955c1a238a082bed9143f07667948d83e624b46146028a8cd06f0c6ad6df08d2e987ade8a1a0b280402000b604baa5571011a0614040900e9031a0510040d00000afe010a206e5627f487ba6d2def88d3ec8edcbdbeed8d2dae244fc9baa1ea713062fe0ac112bd016afeab31d7b52329d0e390cf06f4139bc554b032ad919db786a0694ff2246c1af903bdeb2d0a5a79fd31b57f12957f3dff72828ddcceda0cd7ffc40294bf2d5b4f7541e928bbf078de4dbd150c236299250c00c1f4b806a88cebcb1fc758253042d20806424142453402010000007b1ec20f00000000054241424501019a3827a9b19da216e21a4f6d2c73721d95f1c8f1d94b33fcd4636b24b0d34c74d3d5878fc109a840e59981d3ba194ca1c0480dbdaccd4280f6d806e8c154ce851a0b280402000bd062aa5571011a0614040900ed031a0510040d00000af6010a20e621eacec7e88f734ba2461cfbb93daae8c6d9e27d39b2cacbc1253e7e41e7ad12bd016e5627f487ba6d2def88d3ec8edcbdbeed8d2dae244fc9baa1ea713062fe0ac1fd03d5850e829c7e4470df20580b4fee10673905345cdfbdaf58296fcdac20762ec1f333194328444ced462f2fe156974794394e8c25318a9731790612eefa6cd70b0806424142453402010000007c1ec20f0000000005424142450101a83167772c4bf9ba015f0b51fa081832f8c8f15cdf26dbff7679b69d505be2745a6ed49beb932f2d664208c85ef342e41cdcac0933d5dd9e4f1146022b76318f1a0b280402000b407aaa5571011a0510040d00000afe010a20b7f3334eaa611483108de2f2c25a5d8e2aeefca56dfe20201fdc8618eb6571bf12bd01e621eacec7e88f734ba2461cfbb93daae8c6d9e27d39b2cacbc1253e7e41e7ad010452bb9876167b2bbfa80f202b6be4961bd83616570ab8684630506fe1b789f1ebf364d3af207a3bb546af3264ec64e26141a0ee3351ea0a81d05d7e75de21a93a0806424142453402000000007d1ec20f000000000542414245010100f4f2498f17418993329ed9201c53285a9a7dfe4797b5c2fc29075b7b3837230218dbdb7b71ef32e5008185d01fbfa0f50c354864783f51be5001ebef5d35841a0b280402000bb091aa5571011a0614040900f5031a0510040d0000 + +blocks_1_to_128: 0x0afe010a205490ddb4f096e061a7e4c69761da48abb275c84d2e9b22ef29d60d7dd9085e8a12bd01d1ad30bf020566291453fe5c39e01f3420016266ce9e66c2a750e797f9e8f9bc0102f0d0bbf603857e0d964ee7223dc99784e500398b66a33a4262d99bc8afb436cca077e8436ea549eea8d9796fc8a5e0df4561ffe6db38fb3f65599da0f91b5ede080642414245340200000000fd1dc20f000000000542414245010148414839963359bb4f255f0c99f9e90482fab4dd1e73014b132c4978411eb543db9d33d943a3bd12059edb0622fba47b19f8b38f27e33908f6b6c82f99311b8d1a0b280402000bb0d99e5571011a0614040900f5011a0510040d00000afe010a20d1ad30bf020566291453fe5c39e01f3420016266ce9e66c2a750e797f9e8f9bc12bd012e5f0b6462cf99666386e62bcb31b378086b62f179cbd8191aabb59c64548573fd012ed0d52d9623b88779f102bd069c2ae7ef9ab6a401205773ccbfd1846d5f95b43bc1686950a4435f54ae3c2ab4ec83c4341669a0ac19b2a9e8ac532767f6e78e080642414245340203000000fc1dc20f00000000054241424501010455bf732387032958b635b581d1d6550e4a9af5bea016960aea3372fa996952af50cf92bf564c3ddcb57cdbb0f19af1e3ffbd593978436e79770a8ce38c45821a0b280402000b40c29e5571011a0614040900f1011a0510040d00000afe010a202e5f0b6462cf99666386e62bcb31b378086b62f179cbd8191aabb59c6454857312bd01100fe5616cda27868ac7b4d4357e95d7042ae03454124212eb030a8c13126ee0f9013ade24d25bffcb39a0826b13e1f281508139d0cb5d05e0aa8d7951feef21ec68bfaa8459101d5f9b1d8dd2cd2909b78e977614ff65a9b36fea41e525ce1c1dad080642414245340200000000fb1dc20f0000000005424142450101b0c7351d6aa5fd4fef5f7a1a5290bb5045ca4cef800cd53f85a98bdfc3349749ac2a7aab85aa5c7a71956db8234addd1e1629e6e9bdf96be45dc7c9ea336e9851a0b280402000bd0aa9e5571011a0614040900ed011a0510040d00000adf020a20100fe5616cda27868ac7b4d4357e95d7042ae03454124212eb030a8c13126ee0129e02b89b9545d4162d65ac7b139424c04613d41b9a4a58de2261f665e92488b2769df501bed9a77eea0eec94ea07ef84ddff9d8a0a09a885185e8c49e489baa04bbf58d6d33d4fd1c03265268b313a2382f77fea3846a8e8aaaa636fcdf0b49658f7106a080642414245b5010103000000fa1dc20f00000000bc1563cc76c8188c3bf372608fc56c29bdb495f61d1f9870435479c8251c4c6f779fe9293bc88204cb80f34fd7e52f8f58e40e836385894915adbd8c3279c90075ede8551d7cc6a296dd441fd52db9d14f10962dab1d3c438a97ef01c018ad0e054241424501013ea30af7235b246e6720c305c6aaaa56c5f9cfd8234f3ba57a9d14276c88345e4913f529dbaaccf96c3c4eedd49a7c0f147f8671cacfcbc548c16814282e0a881a0b280402000b60939e5571011a0614040900e9011a0510040d00000afe010a20b89b9545d4162d65ac7b139424c04613d41b9a4a58de2261f665e92488b2769d12bd012a9eae82de62ef8659b11b586dc5e557f45ae2943a57ab473afa8236e727cb22f101d7cb903f4189996cb8d56bcc4dc484006ff042577ed6ba804f87ac61f603ec922a2811fd92e8c9a41875d572ec0611f0da89783058463d7695d1da1ece5a5f0f080642414245340200000000f91dc20f00000000054241424501019cce7a6e628a64cf0f2da15f199d3699a8178ecbe2894d38cb46bf1a4915c3054459c118b8653cd3e56006c0eed772d0840971f5d6153aff97b3a1edfd2420811a0b280402000bf07b9e5571011a0614040900e5011a0510040d00000afe010a202a9eae82de62ef8659b11b586dc5e557f45ae2943a57ab473afa8236e727cb2212bd0193cd8874a0f0e2c743bb8d0495c2d6eb620019b17d671279ba228b901924edc0ed0139155cd468aeb6664fc797cfe7ed1abee320de5cf3a67a9874220503dcbe18bff526a1e1448b8092d546ac7c759a589a754666de8b15aa2a6c06e63b17fcbe91080642414245340202000000f81dc20f000000000542414245010198f201f45e1ce2c6c99914bb90c8c7d3a6f23a29b510eccd99ff5871e744266c1bfe75d5d6b9322e9b274dfe04124b1ca2bdc3cf54b9f10f9583b76759822a861a0b280402000b80649e5571011a0614040900e1011a0510040d00000af6010a2093cd8874a0f0e2c743bb8d0495c2d6eb620019b17d671279ba228b901924edc012bd01154ab42d4e88c11a3298252d3ee6d99d0d9de0635fb927816cd26f69d6d58098e901765128865fa2371d18fd4d21b53023ff901766c5ac43005f4a51180510afb2ed8868b1563d818aa985b5919ef824ff791ef2c6aee5238f6bdcb13cf8d43c06b3080642414245340201000000f71dc20f0000000005424142450101ceafa735ab3baf511e05b56e350dc5a6d846f3477c9ad50a5e76e201dad6b32e421495a916e51a1045cd14e51e58ed2f0dc21fe840be417dd180dfe969e4bd8e1a0b280402000b104d9e5571011a0510040d00000afe010a20154ab42d4e88c11a3298252d3ee6d99d0d9de0635fb927816cd26f69d6d5809812bd01b3001d7296abf52d9d7fe17e817b5e6bdab8b1abc98f76eb2f55610910dfd82de50100b17f42f4501d5d2c8fd5e81bfa5b06ca0b6adc3f2c5849c4f447396261b21cd6d22d4b799612d92dcba256f1d15c14d61d5c29a3a63f34f4d3a630786e045b080642414245340202000000f61dc20f0000000005424142450101928dbc5660b26310aaeed64d386556076dfa531f4d088b6d03c345848ac0b234057f1de5e2f1b1fde8f4cda45a44589ad81cff33400c781288567af9afde2b841a0b280402000ba0359e5571011a0614040900d9011a0510040d00000ad7020a20b3001d7296abf52d9d7fe17e817b5e6bdab8b1abc98f76eb2f55610910dfd82d129e027860aa8bed3f3299db72b8f14920b052fd890daec921c998f264ee348d051818e1014625d71b918df4cfb7cdff15c26b3ebad93db4de847d32576ac85011969d95246b6f64a9cdf6c65e42f0c0675d3cb08680bda554ecb515e80715feb7c25610c7080642414245b5010103000000f51dc20f00000000647429f614d80b95ec8579d62b8893ad396bdd9575875768be4db76ced2387191f0741c0fc1ff771b4f1eb864a5beabad9fe290d2d79b3c294b07f7c9163e80a32d1d398e8dfcad8fe500b9fb20de513fdc112acf6079e909da87322f346500605424142450101da98cb1fee4d27b5ddf306b322db5fa51ec66dee00dd8296e1e5e726e3f2f97ea42de9d3fd4b082febd156410407d4e7ec776087a4d8541207bf29a96a9b778f1a0b280402000b301e9e5571011a0510040d00000afe010a207860aa8bed3f3299db72b8f14920b052fd890daec921c998f264ee348d05181812bd0106ae0ef59ef6e82546e72828a45e9c733f87ae38f17a977f03fb578eec76e38cdd0192e61b30d0c253c281727b8b9efb7a85559e8198a8ce70ba351e2a75b2efc27ec77b04d490c4c9ba67ee6110259a3dc47e14d2f28dfae7aea1c3ad9b802d81b0080642414245340201000000f41dc20f0000000005424142450101d2f86da1baa5aa19e22658988760f8db378bebf17a561206b538ce76d85d5d603f78edd1d7821e6bffe04d4c8b6efb89418aa6e2e346af83d852b61b4429b28a1a0b280402000bc0069e5571011a0614040900d1011a0510040d00000afe010a2006ae0ef59ef6e82546e72828a45e9c733f87ae38f17a977f03fb578eec76e38c12bd010c37606e0ac1f61ddf1818216e10516bebf348793da2215411de20b1e6a9e34ad901706d675be2148c9488e541bc3d554e835d6733ef65023c01fb0c9120f27d1425d57202df7bf26afb948cc66bff00952ce15bfa4f31f065263caad088b240c97d080642414245340203000000f31dc20f00000000054241424501016ca6fb2ee23b2a39f0351220ac3b5a0c8f85135ba659410b4737e5d929cd72531bcfdc090a46b52d2b1b66794a3ec0b35dbefe89a2a74792cf7a3335946301871a0b280402000b50ef9d5571011a0614040900cd011a0510040d00000adf020a200c37606e0ac1f61ddf1818216e10516bebf348793da2215411de20b1e6a9e34a129e02644a036e3767e6533668c9f012f5b638849dc00054738c1e5fc0358c5156ffa9d5010dcbb8010134a2419b45bec59ea38b71c2720b0e0cf3fb4086709d54c69da99459b1568e980ef80738a98fdf82baad93a3406bef3548f73e2b288dbf23803453080642414245b5010103000000f21dc20f000000002e143508291b71708d36ead3d0c7618a3997d6877cd63815fc3dacccfe5de859e46bb1404bacffd4ffdb84d53c083a0db163a2a78c19ef2ca51268b9f7afbd05130f5e09ca090f91f723610eba29b9e0bcd41a8870c957a1983c75cddf7f900905424142450101085976ac588326704e98e70ec4f7ae6dabd81664f0fef3f0ace447bebd83333d88cd31452b4f7fe62695532554ef1fb48c9362924c123553e63f6684c17c39851a0b280402000be0d79d5571011a0614040900c9011a0510040d00000adf020a20644a036e3767e6533668c9f012f5b638849dc00054738c1e5fc0358c5156ffa9129e02e60286b90ff9fc7be46754140c94e655822f5ab6f2a2ba62dac198b12cfbba6ad101360a5d0c5a0d42cf968d40789ecd1f7f6c64341a5cbb37414d91f06df2d3f95b9398eb780f83a249693a909271d3be81d603dfffe3c3ed46b1d2497ffdaedeba080642414245b5010100000000f11dc20f000000003c335090b46bdcd04d4b091ad0926056b8aa767d6c56cd9887f190e13e3e764e40e0b8ef820571eecda8af4e51b78eb61671a64f0e0b35d193140cda47ec12077227c89ab7918802bb247ae79c7a5c41b29f626c9479da2085bae215b90fa4090542414245010156111ad5ad0aeb9427554d0ece255c34b730da8147d2c84de8ece5acab288f58ace70e0e308cf201b6f1d0fa9afdeccf5d2fc188089906c75cb6c990d2ec6a8a1a0b280402000b70c09d5571011a0614040900c5011a0510040d00000adf020a20e60286b90ff9fc7be46754140c94e655822f5ab6f2a2ba62dac198b12cfbba6a129e02dc6b872e3f756a2e1056ba3db1e7ba7aa63ff134d31b8ad7873ce0a7ec868969cd0131ba702e33f2f63e84ee39c555c588fbbb0cbe3f39ce2adab2be0e945a91a15b44109298be550719fef9a5caf17918747376f95bddecea3d799b3b9349082acc080642414245b5010102000000f01dc20f00000000fac0b19eb5757d560e53411cd3c9613168809766e9015b1719ee005c81610d459dc8df4e94a2cf296badf374f876bb4aadc818d14943d410d0947ffea89d7b053408fe93ba444e8b21db7db2e77b9e795322931e7f4c08310458028c996ec90d0542414245010106c6325ad4cd3e4e00dbcf282665fa722eead5131d46ae81e89e4816639ca25ba4097700c59cc2998343c209ca397eec1df0f0aa3e566d14d6d9155bc30990881a0b280402000b00a99d5571011a0614040900c1011a0510040d00000adf020a20dc6b872e3f756a2e1056ba3db1e7ba7aa63ff134d31b8ad7873ce0a7ec868969129e024e1200367fdda60c43bc19761a4be9a87a90f7603cd0dc7a533930a010d6ece5c9014e862fee057f5029575a64151bef1b586f84ac9dd8a62f9cafea5a5a316a7f4ca7016ad78e0f6dff39a4fa4064752b73e9aa21b8f7561108abc4739cd1b89853080642414245b5010102000000ef1dc20f00000000e8e1c13e07d5082d16ec6957a4ed12f6798ea9841d77315e78efe5bbb9f49820c60947d08b5d244644f88023673477d968a0891dd916a593eed94bafca745108461df4c3bc32aae07a31b4e821e5cd3a68749a4f3fec2092dc077cdeb47fa60d0542414245010160ac0365b28326d87bc639658df9d48b39ab4f85902656a496b58c51f1c30912b3b9dbad8bae7ec76f7c8789a73ae999d2120a06d9b2188b145eaea1e55896821a0b280402000b90919d5571011a0614040900bd011a0510040d00000ad7020a204e1200367fdda60c43bc19761a4be9a87a90f7603cd0dc7a533930a010d6ece5129e029192075a077d9b1913f0c20b4f7b6366a7340d7ec6c7ffc5d5241f2d069a6419c501e13fc8ac60051b480625b6590e07c7adcfaf443b9df06516b6ab2df287bdf5e584b1bdaeae0685dfe49b7374b9b4dabeb1fb62180187cd6a9cf2905c9a6eb684080642414245b5010101000000ee1dc20f00000000c8c905ead71f18852bbd09e23708b086f72d8d4d8b7396edb3d73f9dc5d90809eb6bf5ca3dcedf1af3ba547782526406d73331ae59863fe154b322d99d92ed0175c526114c119dbb089f049c9e5aff4f0aab864f0e594e50531aa364e55d3b020542414245010184f747427a6649c44c461d74057992e8e5848fb2931afb293a78c38f6126597ff9774956479c5a89b66ca7c82eadc66360f9b0e8cb49156529550c609813648f1a0b280402000b207a9d5571011a0510040d00000afe010a209192075a077d9b1913f0c20b4f7b6366a7340d7ec6c7ffc5d5241f2d069a641912bd0190591d0f3fdd26a1f6f0c97d8f94abee1eb386bdd7dd62a17a003b2f33e18103c1017c48f3c423a221852447ba487fd81ea6a4f2e2ea5bb104f621c18da0a3e0642c85f51f0594194103d16ba42d2fd2b3d10bce62e1bbd253c93cadb85c382d75c9080642414245340203000000ed1dc20f00000000054241424501016a0465bb6f49f02cf6935436ddc6d078c327b13e96df5a53bd289de1ea1b85237a2f9d675cf0a6e2170e026ac8bbd7ed18beb2796dda157cb427f7b1297f4b831a0b280402000bb3629d5571011a0614040900b5011a0510040d00000ad7020a2090591d0f3fdd26a1f6f0c97d8f94abee1eb386bdd7dd62a17a003b2f33e18103129e023ad8a585b88e708b4cbd0b2e89cb17710f17b80bc2d537ed71189b375c2d3a7dbd01c65e2b153b82653c3a9079f7a8f16934c575e30792fafafacb9ff2a136e5d511aecb684a63079322150c74fb6455baddf200abd9a4ec1208da1d0b844ecbbdca080642414245b5010102000000ec1dc20f00000000e08bd2e234a0ffa802a80727a83aacacf8086e2e556c93eccd9dcf08a69c445d200e8caa2ff67c23cf2236580111c6d3df01051b98be3043e4fb247de77f08062495fd740639507bb093064f4041aa63a1576124bc7781b417c869f93d65b10d054241424501015c3f979641c5c2868bcc0a7d110b6ded090cc3a95a25e1c4b8b365ef170a54488bea1fae6b15c454db42816c4a7e0a3a2a8d3c11a21fcb98ead110f19b60a88a1a0b280402000b404b9d5571011a0510040d00000afe010a203ad8a585b88e708b4cbd0b2e89cb17710f17b80bc2d537ed71189b375c2d3a7d12bd0165aeabb56a3e2f615f1d9bf59a144fee19484ff818696396367ce1ea28932b08b901909378721a98d7e9ffa6449de7bc03cc906b1984caf11cbcfb9f1a312e7995f805e00c9e8cdf14e7cc75d67ec3b0965b840d7ba2e1d1858da1b5b3fb0c1e8f8d080642414245340203000000eb1dc20f0000000005424142450101dce27b3166890a073b936d96f4a342c5a88ff46c9eba377e21146090c1d28e0f124255f2b916fe35922b2e6c940c08e40df84a723e069eafe94e5a1e3e702d831a0b280402000bd0339d5571011a0614040900ad011a0510040d00000afe010a2065aeabb56a3e2f615f1d9bf59a144fee19484ff818696396367ce1ea28932b0812bd0126438ca736c56000577137de2de041b946ba966f7f05d73bf30ee7572a8a2792b501a570af0393bedc6c502d106e4de00214d59de2e651b43cefa4aee4465cdc19471ecb1df8ba734c9b52b993ea10cb5a03201532209c976d409e2a47beb4f101a9080642414245340200000000ea1dc20f00000000054241424501013ede3964cba11036341e5657f47479cff253a240949978e577a049e04db81f16cb6845b259c34d78ae17e4a6e8b9e97e3c39e7faed26a84a4a4156991316f6881a0b280402000b601c9d5571011a0614040900a9011a0510040d00000afe010a2026438ca736c56000577137de2de041b946ba966f7f05d73bf30ee7572a8a279212bd01c68784db2d0b6280b112fd27c880a56f3f89d25c006d952bd9255b0a8a8b49b3b101e76be352ceda9ae8d952eb517ae4d38c1919c90494a07d55590d7f7ed4d7e7fe851f6a54eef5ab592347c57403cfe14a46d973fc2f431fad4edc3fdfa5830509080642414245340202000000e91dc20f00000000054241424501018ca6acde9953ac43e3daf9e34ddd9ee76ad72b699b25efb979c4b189b836787944adb6cbe428521b5d54eb733c0e71338e073ffeae2a5e4a0b132aa67ad2418a1a0b280402000bf0049d5571011a0614040900a5011a0510040d00000afe010a20c68784db2d0b6280b112fd27c880a56f3f89d25c006d952bd9255b0a8a8b49b312bd0128e13a4d2eb2bfd92ff4e31eb40cbb7548897006085833bb540dda6c8e7cc88fad019a9475d8e68ac2e360a860495d5da5232a6a4937670405788f6e6e230bdae66e2065f5942f1fafbfbc7c021ed43cdf57d2732365fcac06a43e5fbc63608c0dd3080642414245340201000000e81dc20f000000000542414245010106c3189d73cfb206fcc3abbc51dc44007e26699cfe554daa883a7491777900420e8ca2713ff82653335b17000aa8addcb76ff19c62f791ed145c7241397d9c861a0b280402000b80ed9c5571011a0614040900a1011a0510040d00000adf020a2028e13a4d2eb2bfd92ff4e31eb40cbb7548897006085833bb540dda6c8e7cc88f129e02de81e49573a8280c2cc5acead0dc20cea8272fcc737682791de62b5ae9a25275a901ef47774f8b1dab713645f485d9cdd23b8e7fed590800f08461debcfe6e7d494b7a5ccd3eff786aa75a860b6fb9599979434106de6026ddc6cc9b479593c838b0080642414245b5010103000000e71dc20f000000003485199f00befdcfac45cc12f5ff91450e2bb7b6727efd68a2677b497b1ced7364d2a151971e7fea16032f00620ab31d5bfef176e8423f983ae2b3834edfe3063572037da3066844c293b3104cc70b1d7512871e68ca8e95b5d6110f5626e70905424142450101aa60e5ee2e400c350825f213fe1f569ef61ccd059a71e6cf9f995344992ff96c45c29d562f5f0b37d91b42f774e658e7009c433bf396ae22439742d756a0b3831a0b280402000b10d69c5571011a06140409009d011a0510040d00000afe010a20de81e49573a8280c2cc5acead0dc20cea8272fcc737682791de62b5ae9a2527512bd016df4edf522a5da6a4039a0372e05412d67d34967128b1abd7e92db51f3397495a501639bc2a7d478547fde38cee86fe70705bfe75e657028e8a85d03e6f7197d02a7f415928ae71df71c6dd5ba8ea95def9a1f64890e84dded6f117b1cfd8eb26653080642414245340200000000e61dc20f00000000054241424501016232bc9d8b3c9e6780b6d19245a7a66a122a7bc371c2229b72fc55d3c824b20069cefeabd9ceccc024493d5e569f8e2a3a26a9cca9a837c37d60982d2f9b2c881a0b280402000ba0be9c5571011a061404090099011a0510040d00000afe010a206df4edf522a5da6a4039a0372e05412d67d34967128b1abd7e92db51f339749512bd01917e733cd736eb2052dfddeb4bc6593d095482bcac594437f05e668b3230d356a1010329b22072087d3f074741836e3bd2546871f0ad53ec38be6b5f0d7a26159dd13ba33fcf45a1ed93968f6a921349f811e3ae0987c320b173fb7ba22a174fbe6b080642414245340201000000e51dc20f000000000542414245010138bd108639f7335d310e7c4f33d7df5cd0f5ca8ec029ea3d91e13539fef37d6e2e521a4130d722219b1687064f9d922ff06219eb2f3c2a2e91c1f35ebbb7cb841a0b280402000b30a79c5571011a061404090095011a0510040d00000afe010a20917e733cd736eb2052dfddeb4bc6593d095482bcac594437f05e668b3230d35612bd01de8eb69db612068043b3cc39d4a8e61f88630439d807a61ac7235bbd7b58fcd09d011479614b83f6aebf4990593482abfdc597f70dc6757032931daed578ff6022ddf6a079b99612dc29ccf9c53dfaa0004c009dbe3e3e4cab6f467140d2a26a7364080642414245340202000000e41dc20f000000000542414245010102e49d5817c85bfbe6eb6945ca04d555452f25d10013e6adccff0a91aa5cfd57e1f0e8b6dee4c049d36666416ead607b612285bcbdaf0e91494a484639c27d851a0b280402000bc08f9c5571011a061404090091011a0510040d00000afe010a20de8eb69db612068043b3cc39d4a8e61f88630439d807a61ac7235bbd7b58fcd012bd012a35aae382d969c55a89e84be1e372f7cec58458899f5d0c64fb3a5d1be48871990120faf342490665fe158c77e5f34d3b63d9098cacefcae42f2fa660ec5f06437a1baa77c09a9165010b24b9398cc27b92d325d07301e17e1f3daa5b8104b0ef9f080642414245340202000000e31dc20f0000000005424142450101c04b700eac8ca79880d49b59893c0c56160f6bb27e715a9b406a56b7ec63a869fad98937aec8dd4e1b608c5bb08d8be17398d097d77c4cbca164a258fcaacf8c1a0b280402000b50789c5571011a06140409008d011a0510040d00000adf020a202a35aae382d969c55a89e84be1e372f7cec58458899f5d0c64fb3a5d1be48871129e02839cce3cffdc40fcfbafb6d02983ebc4941c2f24ee4de12417c9e70e43de3cc49501f795c017d3d081df592f732f43f192ba5ccbbef6904036e4e94480062552c1e326f72af6720afa64300562ff0af1f8b658c52d1046abaf8b5f98a5936b93deb3080642414245b5010103000000e21dc20f00000000801ff805d18ff1783d1353ba857daf9648788e2dbecae65d60ec3cf5483da05cd1bca0825221a18ad2b95d285696629884049e36be67dacdcaa594d4760ddb0913a4bc6269e040f58079b2f1dd3920fb5bd48ba945887e3f1802e77fdd56550b05424142450101fca5c8bb13bd2a17a08dad18a03af0697e960e747b5c006b71506bb84cb36f0d1f8d6b114f6be88e58c97fddb388367885bde8a8054de94c6f0c72ef8908f88f1a0b280402000be0609c5571011a061404090089011a0510040d00000af6010a20839cce3cffdc40fcfbafb6d02983ebc4941c2f24ee4de12417c9e70e43de3cc412bd019997604e7eb947a90b2e76c03c1c3983eb76ee03b106ce8ee584095bd2ec674c910137f305669ea4b93df839f7bb5012b7debc15c3044f99b1f1313bb8a6e02df257caaf7a2b59ca57736dc21d64591b3ff7ad045268f3ad19b0d34caf23736b31d2080642414245340203000000e11dc20f00000000054241424501016edc8b2d51fd6862e1f15e9bb2571d1304a45e153e916a8ca967a80501c59a5a3e3ee6da075cf6767ab8d8df8e8cbdbfe775092065d935855f9154ea1db7f68b1a0b280402000b70499c5571011a0510040d00000afe010a209997604e7eb947a90b2e76c03c1c3983eb76ee03b106ce8ee584095bd2ec674c12bd0181c54d95c95698646000c57d7b420f4b3fba64f96edf8bf950e4de329aab22fa8d01b33d6458552d17b7c0610015f9444bfcf0ff97415cd86e2721003557bcc24be00c87cedf276d0b74cbf9399719e33c9fe4fc32d0d27fd0dbd9c5bf47f620b93a080642414245340200000000e01dc20f00000000054241424501012c4b294f2f7095a226601e88c5d08e478e024b053dcc933d925747468a436039d867b5c162a4e50804ac053b90c69eae31ff1d4a21a56a0844665630f48a74811a0b280402000b00329c5571011a061404090081011a0510040d00000afe010a2081c54d95c95698646000c57d7b420f4b3fba64f96edf8bf950e4de329aab22fa12bd01c799d9b854c14e0ca3dc6e46500ca59b153cc80ace003a75cfc389098119a6ba890129fa01d98f5ddf2bfabf4bc98d18f79efcedee753c242b4c42f0048e2168ab4a6162251b2f2adf646e99ba55ab087c47eccdaa9c9b569988fb07a6e3a87d8134080642414245340203000000df1dc20f000000000542414245010150fd5db0e26b1c33aaf74ff85bf3e2cc5b29516af41b63a74703f6e34f730c2b66b82709ba5b0f6f4123e38eeb121f1cf314db0427ddfacd2273f8250adb99871a0b280402000b901a9c5571011a06140409007d011a0510040d00000adf020a20c799d9b854c14e0ca3dc6e46500ca59b153cc80ace003a75cfc389098119a6ba129e0291cbf9f4bc11733a55220bf591fb3d40aa02975948fd7818f58a02a3e466f4e985015e659d0c486ea9cc7e4a8af322a19eff1db40d729d1e573f246817cb4a019194fe7a0e6ddab725978911e792f8a4a1bdb6cd3741c7a02c3cecda621b682de759080642414245b5010100000000de1dc20f00000000d666e7535232867a73bab0212b53a5f00672f48f3877aaec6773fe0ec350cc0dbf149a413ec4a2f4c566d9e280254775ffd9ca2266a1b167d5594d0940f4dc01df6d4cf5a42f15740e80a9f5ea2d395f55b16a66abcff7b9618e5e8119efb2060542414245010170f7c7795ca7de2e9ed489b4ba48fe320bc5cbc8f8516570d11659720ead426553716f30255610206ef7ce8542ae8cc2d3171a4aeb100f810023d0a63da3d38a1a0b280402000b20039c5571011a061404090079011a0510040d00000afe010a2091cbf9f4bc11733a55220bf591fb3d40aa02975948fd7818f58a02a3e466f4e912bd01773d125de722d7c002cabcbf54a2cc85bb736ca628bbecbe83669be23899b9ce8101eac3606e2829baf462420acf2ab3f834d89a8f0a58f789d881a0f7ca5f789eb9f4f4e2a29982666aba90725fac9f0ed33d8975babb0cb7f3df635aeef9311833080642414245340202000000dd1dc20f00000000054241424501012a6d97d13ac496ec8d996f637e6c7f2a3cd97899f67215bc71944f0f3fc8245cf804f7a3ab8d37824cc0d7ea8bd8df034e791816ffa12d8a883f4b691bfefa861a0b280402000bb0eb9b5571011a061404090075011a0510040d00000afe010a20773d125de722d7c002cabcbf54a2cc85bb736ca628bbecbe83669be23899b9ce12bd0104e515e7b27d49c1b87c30c177b8006a9d3733fbdbc27c31f29cf6aeed18f0577d019dbc01b7b72597b1830bcb6c8ae575dc1e60e2f698860a8884362eb1cb014185ef3538666fd72acddc00143646b743be19270d1571a6ff100675e3152345bb52080642414245340201000000dc1dc20f0000000005424142450101264d74065638a4dc5c4eb74e887415b298dcd6755c2b6eccdcd882856de7073150e056750ecdfea493c1af2d799646a912d2d6ff0ed91d5234b6b40e47684d8d1a0b280402000b40d49b5571011a061404090071011a0510040d00000afe010a2004e515e7b27d49c1b87c30c177b8006a9d3733fbdbc27c31f29cf6aeed18f05712bd016005ca27b69990801e0ebead99eb7d68a92a9579c6cfb3eb13c50b3f4e717ea479013fc6fd51121c7122918de6bad6b9a8147547ee6c9c966cf9a1f5418614a4c001548346a79c2020ee9fe0f00adee62899649c6b5e42952f1c50e7b0751e35292b080642414245340202000000db1dc20f0000000005424142450101e0cbbc49f3e52364dce72c702379b221365473abffa2cc5f8a1c813cd2f0cc19f6b47dd566c606ccfb03ec2ae9f77e37f30d1189849b3a05c9dc4958a731318f1a0b280402000bd0bc9b5571011a06140409006d011a0510040d00000afe010a206005ca27b69990801e0ebead99eb7d68a92a9579c6cfb3eb13c50b3f4e717ea412bd014ed75f1d604570d73ba2cd67e3f7f8521035c18a53a93604d85fc20523c715807501ee177441625113e8abb2b5e990afb47072abe3fac946c024d3dd9833464c72695f993385cac71dc4dde2e083a182fbe9afde6c52c7153d7ab4912f9329af2cc7080642414245340201000000da1dc20f00000000054241424501016ca9978a6102f3f5897ed388a7f8fcd44fd80ba54ff5d1fd2fdd932ffbd5973101b311bac57cc1764af62b0b82285be3fe283551c6cd04ff41bddb30c16fc18e1a0b280402000b60a59b5571011a061404090069011a0510040d00000afe010a204ed75f1d604570d73ba2cd67e3f7f8521035c18a53a93604d85fc20523c7158012bd0174fcdd1f56d2f7cc422bbebc6c683f466e1f8388b05c0a79030e73145a7fe4ed710102cd2d593c9f88715292838cdf4eee6a683d3a167e74ca2c91b7dba74ecefaf6bd7c55797d8c85d58316f91a90482a40b380a9f1a7b24302cfc5d9155715a41f080642414245340202000000d91dc20f0000000005424142450101467e2dee1119754ca83b68396299911f029a8e26fda7458e706d30cfe9124906738d8e11cca700d16dfe207e1d40aa53ed5b501f30c17dd46b4f3a0179b45e801a0b280402000bf08d9b5571011a061404090065011a0510040d00000af6010a2074fcdd1f56d2f7cc422bbebc6c683f466e1f8388b05c0a79030e73145a7fe4ed12bd01d736563183d0d35e852826b2612e7619d957dbbbf4e3d7e3d14cc4fd9a0f731f6d0169979b2dc1b541967ea2ababaeef3596ca464b7965c79ca8707a5266180fd18d4dbda443b418d2086ebf26be10fda61eea7bb147b6ecf9d4ca41a652c6cfa7fc080642414245340200000000d81dc20f0000000005424142450101e49c3d0f4b16ed0a7b3d709a9a45d65394f76f8bd4a5a7491e00da77bc636f1cad8bd6aaf7ff490721437137e2db96e53317038e76f8bee0561977475219be821a0b280402000b80769b5571011a0510040d00000afe010a20d736563183d0d35e852826b2612e7619d957dbbbf4e3d7e3d14cc4fd9a0f731f12bd01cd9c2a6eab0daba7945938797f75aaee1ae49dafc6f7cf96482c023e3d33342969010181817449521c19371ad712fbc35846f1c9bdf40197e4c1df567344b20d880d1b2fa1a9245ed586b4f0e2d611d7359a636c3823b47fc4d22576e19f3c6f00c6080642414245340201000000d71dc20f0000000005424142450101506c2e397de6d890999d4b12de7891a9f0a7511eb4e28e4d1d4bb156d0b1c7527ce93730f4e09ee113e340c1ae53addcec60c70c21728b0275aecd4144bd40861a0b280402000b105f9b5571011a06140409005d011a0510040d00000af6010a20cd9c2a6eab0daba7945938797f75aaee1ae49dafc6f7cf96482c023e3d33342912bd011ea37246b0e60c511776ab500d245d74fe1af61648fc558951936be09b7792116501f5393fc3e6f92b5b91e663813efd0841d7198d9f8b5bbb72c22b2186467ec2481337fca5d25c8e8bbc4c6f4778bf0228bba90c877af8f70ff4d7950565a9a300080642414245340202000000d61dc20f0000000005424142450101e04c2f6f3c52188ce5e4f7781194e47e8431bc144dce3bf8f64237a39f417365846bf563348540a61450fec3a28e9d83510544bede04f9c3c485bad68f62c58f1a0b280402000ba0479b5571011a0510040d00000afe010a201ea37246b0e60c511776ab500d245d74fe1af61648fc558951936be09b77921112bd01c6bef59b395d65c969772f99163bacbf67fc3dba9e7ff2b47e194900e569eaa56101d89bcb78f9f555aaa28e08fe7ab5d714b96c2c2b29e89e8e32e301ff844d89ee2f060bfffda5105e25b5751537f14e97718c0eb7eee8a1843ba169302f3fb32b080642414245340202000000d51dc20f0000000005424142450101b4d01cce068de228dafcfadc3686cc1da8ff0512e04f5b29e35210a35d1a373a3ab6465b62c4cfc132da4de74eac7eb70e466cf73792f844d9ae02368a602d851a0b280402000b30309b5571011a061404090055011a0510040d00000adf020a20c6bef59b395d65c969772f99163bacbf67fc3dba9e7ff2b47e194900e569eaa5129e024b937548ee583f5ffa7a3a7ed30b4ec94db6e3cffcbffed728f1d710497dfc6f5d010ff5618ca55d1e2eb206a1dd4b415a5c656da65767ed60abebbcc48c37e83422e85f6510053a533f9885f5e28afb996f0910e7e8668196abf76ffbaa7b2fc391080642414245b5010103000000d41dc20f0000000014210bef2249b1e358ce43c888db1113c8e0d6ee0c845d199399b51e81d00f518badf5afce0e416dbd37fbb6718ded820409d5e133104fb5e04dc49f8bb97f07a38b3936c2c1d58bcd6baa9afea38964f87e9d4cc6205f16ca22519be9e601090542414245010114f60ceff84f7ecd04a9222316aedff5a55c4afd4f41a78ecef500c0059c5b42e41c425f547718d2b5a68ed767ca1bda00548af20aa964046fa9ce7f690647821a0b280402000bc0189b5571011a061404090051011a0510040d00000adf020a204b937548ee583f5ffa7a3a7ed30b4ec94db6e3cffcbffed728f1d710497dfc6f129e02551b3c95d2ac89839edbb37dade28169eb5b5be176afb8cd050f8f41ffd03f095901b90290a649fd3bbedb2f357aaf57a4cace6a62a078c4c82650e812b31ee193a37bd837c52ca5494ac5cde05a9be707a708d9dd4c7ad9e9ef8dcf9b22ded5c79a080642414245b5010103000000d31dc20f0000000008722d9edf2695c81378f0597a978dbdd449d3684aa7cea4528ffce45900d82e8e9783b767f349e1e51d177a6e8eb9e78822a3737dd512bcc41bad547e12da0e2b3f471ab4df31a2fc5f62fde00b4bb9f7ec206fa27c7c4c49543ff9c466e000054241424501012ed86854bd0ddded95c236a5c5227c72c30ebc2992ddc0820889ab021cdfce754417e45c67690cb14af2feae0b3787d5aa922a4789ed8940ad6517110b5c798d1a0b280402000b50019b5571011a06140409004d011a0510040d00000afe010a20551b3c95d2ac89839edbb37dade28169eb5b5be176afb8cd050f8f41ffd03f0912bd0171f182177ea18f8584cc4c2482199b0120b0a3635f2c14e0bc2f879d1729d679550155d5b6cc3729fa150a044233e100e4758c01b84323fdf401e2514bbe9dafb9cd2b0d452c229a7ff5eb04612fc59f277a2f03270241e85a43f8377d8911662d75080642414245340200000000d21dc20f0000000005424142450101ba502a764e1a7ccd22168d7a53837dc6fc234027ff2043f96ce35e0d8d9efa45b737a93f167c51a49deb627b56b5f8b3af76cf46dd3593ec3cfdae80f804b5891a0b280402000be0e99a5571011a061404090049011a0510040d00000afe010a2071f182177ea18f8584cc4c2482199b0120b0a3635f2c14e0bc2f879d1729d67912bd01351e87c12d14c71cd2a32a35c662e207dff5c7797160af13e2a7f33a8015a2da5101c1481f80fe727ccbf1e0848ecba69e38ddd29e39156e5f474aaa2d542fa01b842984386fe7820b1a362b4b600c93f47b7daab7ce0e1963c8e2bd61db2fcffa8f080642414245340203000000d11dc20f0000000005424142450101923d1c0630015d2b98724afa5f5001c4e7e0b6084a39b1893b7574ef4e6bd93921191e394ee9c9085553945f587062fbf1d029e89503ce45bfad68efab5435841a0b280402000b70d29a5571011a061404090045011a0510040d00000afe010a20351e87c12d14c71cd2a32a35c662e207dff5c7797160af13e2a7f33a8015a2da12bd0131d1a8d1bef7be687601e3a36369d9e53d0c2285e9397cf727b8664a2de29c974d0145ae470543e5118fafc7e4e8a21bd6447118d3dbf5cc054368e8c83b59736f47255bfb65da14432f0c3a2470443141e2c455c28ee556b6d46ba9636716f25f1e080642414245340203000000d01dc20f0000000005424142450101ca5ee06e99d5df3e5650cc6ff0b6c57431aea6043266e1d4038731747e0fcb25613210535268558fce2aa2166d3f8920857c0fdcfdd78ec1738d4a04e30b9e881a0b280402000b00bb9a5571011a061404090041011a0510040d00000afe010a2031d1a8d1bef7be687601e3a36369d9e53d0c2285e9397cf727b8664a2de29c9712bd01b1b7e5afe8713c8b9b28e4afa0bec4ae2f1308c664a969b8addb3aba19c7bcb14901d518e47cfd8e33069119a940fd02d49ae2dcef580da104fba9908dae195eb284a82ceecb3103a93da79c939a9c258e5305a5abe330a1bdc51a042dcfd5961005080642414245340203000000cf1dc20f0000000005424142450101ba2748ba17bbbc9f9353ac6975c9c08ff304229f6d9cff1bb8779e65b596f257e144447305fd431e24835c0e731965dc379da01badf799ae974f944b44ebf4831a0b280402000b91a39a5571011a06140409003d011a0510040d00000afe010a20b1b7e5afe8713c8b9b28e4afa0bec4ae2f1308c664a969b8addb3aba19c7bcb112bd017813fafd42b3a9f7f238b6b45294fbb67c2eeffe2d1aee69ee1e34841ecb03d04501871233dba17cd0b06b37028ba97969c609952b9d98ff467dbdc03380a204d024066f7f43daeea7fc24855ef2b5a9de34342933ce1bed75a2cbcb764372a5786d080642414245340201000000ce1dc20f00000000054241424501017cd39145d03898c3344816591d01229bb9eda6fe57fb90188aba4f1ca17ddd59ba0ef18623b347c3525a085c68220c08d75ac8d8faa84bfbd4d790a621e954831a0b280402000b208c9a5571011a061404090039011a0510040d00000af6010a207813fafd42b3a9f7f238b6b45294fbb67c2eeffe2d1aee69ee1e34841ecb03d012bd01ebc4b3e0d652387726b502ac6a91707fa6e19bcf01a641ca64c91210e2628c7d41015f28ec5fbf2f256623117c0cfa01de4cfe626e66f676e5c579b56764b586bd6ebe33fe6b349d59f7a1557e63abe04497ed6e07f393c45aeb6fce3308a9c73778080642414245340202000000cd1dc20f000000000542414245010124f01218e28a5db08d6ac4549c009a4272dadcab7907b0675faafc6c91a5c67c3cffc77b2bc5c30433cedb5f099a0015bea32c81cd79b37832e42458473bb1801a0b280402000bb0749a5571011a0510040d00000afe010a20ebc4b3e0d652387726b502ac6a91707fa6e19bcf01a641ca64c91210e2628c7d12bd0164b4560eac2ff33179a97c4654d22fa605191a76db296abb67c2f2a20f746f8b3d0110aa35e040b25c2dfbdbc29c688d5a70cfc4ec670608ee95f7afe6231bac6d33e6c9e3c3570039f0f1e042ee7c9bd59c17d0166accfb19de82d18e082efc7b37080642414245340202000000cc1dc20f000000000542414245010192446f74f34d2b364073132081d1518029ab08f054a3ca9c845655e186637b723daf60409c44946a77ac8be39990f944cee903687d1f18ed9fa8bda74094d0881a0b280402000b405d9a5571011a061404090031011a0510040d00000af6010a2064b4560eac2ff33179a97c4654d22fa605191a76db296abb67c2f2a20f746f8b12bd01b8724d785db13c4dbdd85e7efb71a7cc34956c4057ae71539196b42233ff0eb93901ecb0ec256107ef62f8fcdce3b9c5dbbe7dbd0ed112b4abb69718b4c3c45cff17a99bbb70bf0c1a36a5ad36f2677feeae1e9b07eacbdc87dc875d591a0fb1ac49080642414245340203000000cb1dc20f0000000005424142450101169c05a140142a1142bcff9a58e33b4112884b78524501c6c0504c3974a66430d58baa66116966d7af6d830df978afdfd6182665a81188a1aff0f28af35d68881a0b280402000bd0459a5571011a0510040d00000afe010a20b8724d785db13c4dbdd85e7efb71a7cc34956c4057ae71539196b42233ff0eb912bd012ae33d8e88755a0ddb7f2dd00ab952e3d7166c5f9cb3a24c9b63543550cc629b350104ece1ea78b941f321164f410a511f35cf69c9d67d8d540d3b8f837c7b2dcf0e6c609fa1c90d2461039240e14bd8b31f59a2bf2c761097bff5e358ad22ce1834080642414245340203000000ca1dc20f000000000542414245010186e9b6204e893692f270dcbcbd87b1a0eb9ad4145881df9d0ad73280a8ad19468cbc66b1a59381b7033bc4961decd6c1e428c0343edb387a7cf36297355097851a0b280402000b602e9a5571011a061404090029011a0510040d00000afe010a202ae33d8e88755a0ddb7f2dd00ab952e3d7166c5f9cb3a24c9b63543550cc629b12bd0125a30d365d4c4b164ee90970f35ab1bac67eef1ef9672a793ea100788a7722a53101783af4490bc7afe8cf038204cdb3b5c9443eae63d3f01261d2b3b7643e1adb57e266cacdca78b0e87376bcd33f902bea2fd27a432e353573dba3e968ec5da00f080642414245340203000000c91dc20f0000000005424142450101142908f9242eed3c2cf49f3e9fc6e6372562fcc51980998042badbc61595137e58c875d5482d0152b0b78296f3d544f85053a57f9008f7f0f0b71b031d7bed811a0b280402000bf0169a5571011a061404090025011a0510040d00000afe010a2025a30d365d4c4b164ee90970f35ab1bac67eef1ef9672a793ea100788a7722a512bd01d3f7bc764e422664fadac267597b9599f9486f8de6c44d9fb8df59381b48379e2d014a2e9399b6a2f30157cabf56da6cafdc88a1eca3b64214658ca62b16d9b5e1eb185a560a8a7f0d57fc6dba1d069fcb37d6b40e3574c52eec83cbde4845b39748080642414245340201000000c81dc20f00000000054241424501011c3e2400ea49a61bddbafa284c5a6d9b44af4aa2dee3d1dd2d92122ad5a6e177e72f7d4de8c5fcc443cd3cef50f40f17dbc1109f4c724fda6a9eacaf148617811a0b280402000b80ff995571011a061404090021011a0510040d00000adf020a20d3f7bc764e422664fadac267597b9599f9486f8de6c44d9fb8df59381b48379e129e02356e3bce52a7d124cb2bbd56b07f9a51a9369452725ec79401a14d53fad56d622901ce6a3c19622ec5092447b8bdefbc79ad01e5927113e74c30ad961bce79ef1a8d1ffca3dfde0dbd75988356be2209d0c205143decced95e25475a5c4fcb38397a080642414245b5010101000000c71dc20f00000000daa34c265e5ae6952ecba58e9536e14d50e246145c347c9b8fc72d9a81e5087c02b43374446984e084532ec758796179a7f960575740bd3f114eb1da83af430ec398b651902e30325210f383d1bd05984b34d4838d92f9391e435a91d6479707054241424501015620ae89383922cf57f16d9db94398fccd321851f93db828fe9878a90c118601b45374b3285db784df55c8505a4b3cc78dc4a0e7db9a703fbc652c3d5d1c89891a0b280402000b10e8995571011a06140409001d011a0510040d00000afe010a20356e3bce52a7d124cb2bbd56b07f9a51a9369452725ec79401a14d53fad56d6212bd015121bad1272843a9597fef780b5c3af3841d8029cff3993f906ca08c7c337d152501cfe070c607985e5d50d056515b4c934a9f66762f1fddc499c4278507dd0dd138bd8f12b549fdcde9e9ec80efc54f4caa20eb71088fe3ece6b92c7687131224ce080642414245340200000000c61dc20f0000000005424142450101b48b05db6f5314f541c2d00af9f752f73eab3da9204e55cbab3d75314ce2590c68a6fdd311dbbcdc649c3ce649f6aae17a3cb1fda33894681e8f4f578eebfb8f1a0b280402000ba0d0995571011a061404090019011a0510040d00000adf020a205121bad1272843a9597fef780b5c3af3841d8029cff3993f906ca08c7c337d15129e028076b83bf818680a67e13b1453628ff03ce912cddfea4bc0b34916229e6f188a21010f800e1c755327a9d49d8cddf3cdd5352111cd843e990ac294766aec21f70a946e2cf16aafdf0eac1b367547371f371717f0e1c836447fe54c924f8f01effb48080642414245b5010101000000c51dc20f00000000eeff013103e09457b7e75448ce35e7ee9d82917ca073239ec001d11e525ed26767a2c61d31aab85eef648a324ce1680646f9102dd411d396d54724ce78b3ca00513df460faf3d0104d2cbeb7a054ab0a6099005a0e6476e3f8b9d1c967d9a50405424142450101be41db09e2984d0778dbb48a312ad4f7f0dc20529847c8533e70f8242fccb701a651359cd9d0f1ca4f944154ba5b51d0f5ce530bda88b5faa937fcf5abe4588e1a0b280402000b30b9995571011a061404090015011a0510040d00000adf020a208076b83bf818680a67e13b1453628ff03ce912cddfea4bc0b34916229e6f188a129e02ffc694f6486564e6b1d38e1300150f5f586df02f968399e2b4513e42575168ca1d01ef78355204b5b342edd44df1c1521a6bf6c190a2e7a7fc4c7ef26fbdbc7468376314749939cef4bf26bd0804098a6e08d7984de1e2b24f099664264a2c0f6c9c080642414245b5010102000000c41dc20f00000000906cb27906005d69d190ceab6c1a641a0557608dd2a87a7faaf57f30524c0562b863b5f1e1115f33ef255df751f3949265147d412c28988673b41ba3fdfaf0082a0ff16cafdcd4f595230d200ea47dc93d6c7b6962292b551660f818fd35f40305424142450101161a3501ccaabd37cde898ceb7a7462ffb70f7b902c44b79b7a82bd17dc35b384c4a03fbc413b360f8d978b5a2a792b9a899832adfcce3921c8e84ed60921e8f1a0b280402000bc0a1995571011a061404090011011a0510040d00000adf020a20ffc694f6486564e6b1d38e1300150f5f586df02f968399e2b4513e42575168ca129e027437eb9c262e7fb83ed996911e1ec678148edcb64796934abc3486be672d666319017e3b4a30721d1faa9077fa31af596fe510ee86071f67bf4971b0ff97b371655393c2bfa4a359ef20533d52d494825e5d1c73f81f40a9c8c9f3c39f830eecf898080642414245b5010102000000c31dc20f000000007c2c586bed7d31f77ac0bcdb15c1bbe698cbb4aed9eb6b05ab3b099cf41a5357d6626475c36a53e33d4904d95a39f46cd1afa92072b515b8c769c7f2f9c7af0919e896da04b08f7bbf4f3e267606c6e3a501071758d42b6ac1365904a6fd250905424142450101a0944523b43bbcf76cde3553687faa18e326d6737407947a22679c1c3ffc97635ebca9ddb54d3318bef8022565d239a5f878f8e8dc5b228eb67f7e77c011e1801a0b280402000b508a995571011a06140409000d011a0510040d00000af6010a207437eb9c262e7fb83ed996911e1ec678148edcb64796934abc3486be672d666312bd01156ee4d205c168d314eb860ce3d631a00d7c84fb7ec5086dc9e0883b5e72e2ee1501e0c7e018d444fdd5a2db2634bfce88c0faec716d8c5b85cc8d51abc3dcd76004284aff16fed03c3d4f56488a70b23948ff71883cc430cc3f0b3c5fbf32ad4ce6080642414245340203000000c21dc20f000000000542414245010138acf56de3e9f0dfac98c3a4f5f15e7c2bd8449de9332f2e29ccfaa5c443c82c54668e1ef02cfa0aacff590f0d18d037172d46e18f2904a719c9450bfe17028b1a0b280402000be072995571011a0510040d00000afe010a20156ee4d205c168d314eb860ce3d631a00d7c84fb7ec5086dc9e0883b5e72e2ee12bd017c1c7b1a448ae768cedc21824e00f7a2dcf553ec671778aabc9ec0473685c7f811019eb123e0610c1fa46b8fc5b1d755df521d57d1a96800c781a398f8a54747b562dbcf588773825809fdec074f288f82f731e020a35ccde29f467f038d339a6063080642414245340202000000c11dc20f0000000005424142450101567dc7e1e4e76aadf12b7377ccbb652dd40a976b18af871588baa13f67f3110a9e002577f52ea4bf4832862d81e5c2298f9e8b87f2c9efa8cb42babdcf2637851a0b280402000b705b995571011a061404090005011a0510040d00000afe010a207c1c7b1a448ae768cedc21824e00f7a2dcf553ec671778aabc9ec0473685c7f812bd01ab88b62678d1d597bb9692493f7228d7ef2514856203bd1b233647b54ca8a3a80d0181a3e0979487817b74505788ee972f78fdf119e43fd8d7268127f0011473cd9c37466cf1dd5ad5173583260692e51c7feaacd7041d82e86bc895aba4806183bb080642414245340203000000c01dc20f00000000054241424501015cf4973779fccb01bccc98b06ee6e941dc1e0a4484745cc38203668c195f810e550d9905157577dd20adcca414c9582093fa295cdf0eb3880f78455b3936598c1a0b280402000b0044995571011a061404090001011a0510040d00000afd010a20ab88b62678d1d597bb9692493f7228d7ef2514856203bd1b233647b54ca8a3a812bd01b1822e4189fe4a3e6b6db07f71af4b3d3f1168df0279ce4c4c2f6b0eea8f6346090145beb511fa64cd924f67616266be745c19e4ed57f8e82c81ef8411258b5378f1bad49d316149f983753b9d60d7f67b403cf19ab428b0ef6ffc282bc4933d1039080642414245340203000000bf1dc20f000000000542414245010132241a90c00393426f65b33156a6ddbe5dece069dde58d25c6223fbacc1c6c1ca7ea07e21d6a6f91af1a80dd391a207cc3395ded21e24f230a19fbf85ac90c841a0b280402000b902c995571011a0510040900fc1a0510040d00000afd010a20b1822e4189fe4a3e6b6db07f71af4b3d3f1168df0279ce4c4c2f6b0eea8f634612bd0112351b583c540b6eaad137dd39d9af8d9f7a9867cef0cc83c2fa9bb8770e54010501c9b167336f1cef3ffae5ddf95d048e765bbd18aaa59aaa02a0a0668d33f1b1a01d598f397719a4b4f58a0c74ef99c9bc2b0384a926f3e9e666740ba3c04b21fe080642414245340200000000be1dc20f00000000054241424501015cc5cbbd0b14f718121d1ea51effeba63f0f692e727952425ca5d23c3b3a4d760a68415a1d4b46972a2435d337736a450376c133c197332cb1183245974aff821a0b280402000b2015995571011a0510040900f81a0510040d00000afd010a2012351b583c540b6eaad137dd39d9af8d9f7a9867cef0cc83c2fa9bb8770e540112bd0194e1a68cb52317e78930b95dee3b08a2e62e6a2b29730fb9d475797184483a8b0101539ff369eeb1cf4412880c1203eff2866d75f1f8803cd3c9d4790294e770134452e8ddbc842d7ee82a43dc9cad16cbcd708dc1d5f0b0c5ca0e507e4b836e30ac080642414245340203000000bd1dc20f00000000054241424501019ef49d5da0a319c0a248a5bbe0f6fb8c39b53839cae2d52f7413e06de993d5333c4145799cfcaca427fa6d7b8dc71a6a22079662f655168cea39dfbd7f65f78e1a0b280402000bb0fd985571011a0510040900f41a0510040d00000afc010a2094e1a68cb52317e78930b95dee3b08a2e62e6a2b29730fb9d475797184483a8b12bc01133828dccf4b7d79180f51817cfd72f1ef0d0c97a7fd5a2f0115c2b2c5ced0effcbdb4827a7e592796b36f90962332baaa5b2faa7e8029a7681ded8a58f1c89de33c7cfd4d4fa1a542032d5d10e04866ab36da6b34df90f301311b3e68c4cdde40080642414245340202000000bc1dc20f0000000005424142450101e25896041bd6d61bc3816c91e702b7be55ec8aca700abb0cb1a98e2ca14a9d72fafd492750295fb3d9e87bfbeb3dff29a6bc57c8e4a0ea4e0fae5cd0ffac898f1a0b280402000b40e6985571011a0510040900f01a0510040d00000afc010a20133828dccf4b7d79180f51817cfd72f1ef0d0c97a7fd5a2f0115c2b2c5ced0ef12bc0154ad8e47a788cc635b85f740ccd8075059586ff5ce605c80eaa3521120d43a35f89879fb1d2dffa3f14e6b5c49c24dcc396b627474bc11a4aa9c66692854dfeb899e638f75210972669901b16eee2b1c806ea507d7d16995d4949f612f3fe52ae3080642414245340200000000bb1dc20f000000000542414245010110d9b2b529ccf2fdea24e9d7499d16aaed1a1023659c7a8fb29c3996fd25402a30e5503946e71162a535a48eb9c248b5987df473f17f1bd6b24cde37369109831a0b280402000bd0ce985571011a0510040900ec1a0510040d00000afc010a2054ad8e47a788cc635b85f740ccd8075059586ff5ce605c80eaa3521120d43a3512bc014ed0ae871ec1a1f1bb59f304a4303fe24d74822e7ea4fae61f0eea007a23d421f4ffb93d1e66b9b815bcb04de551fcffc36b74d0795fbc2f69b5d2912a0b3a402a5a56e1778a035110513a259b6bba57c614f838f750faa8ab5e3df8490a1baa8d080642414245340203000000ba1dc20f00000000054241424501014c34c38185d6b9eff0688f7f831efb5a0b35580d8f1506b740660c34b27c5b26cfe6e81548dc0266cc40b76113e1662c286dc75a2c05a759be703111f7b651871a0b280402000b60b7985571011a0510040900e81a0510040d00000ad6020a204ed0ae871ec1a1f1bb59f304a4303fe24d74822e7ea4fae61f0eea007a23d421129d02abfe6c642981aa5febe12ffd4b81280270fcf84a937129573cf7dc64afca41f0f04d533867d0e1e47344c7f0785822bce4826bcd9d2cb198c22dc83691e1d17cfa52e0f9f0ebfb4ada40bf3eb5a3d5db743991be02b1503003f667888429b5b8f1080642414245b5010102000000b91dc20f0000000070977497816a34e5a5844749dc7429e7b0c870a09f69af0281227a3e62f7b421a9c7d1ce9832f2a0daa8f14357b9ef1a01ab59da3cd485c3fc827ce6b07d0200319b8a8c2eb8e3a8057e6ef0807ef49e302f647c44d211aa8850fc171c4aab0e054241424501018c09d5de3fd9249f86ac89376e569a2e4f5be570ff3cc81b6c87d2611a49945d250b01580a127690a4d72e6d73fcce4da47de7b4de23dd4e18cbd2e959248d8b1a0b280402000bf09f985571011a0510040d00000afc010a20abfe6c642981aa5febe12ffd4b81280270fcf84a937129573cf7dc64afca41f012bc01a2fbb4e27d9279cf3771cb3b9464f5556834b453c2bec9763af71eb8f97811a7ec6e2a0ae5334da0b44a96716afec293917aee391fccda0036b40120edf4dc4b07b17d9f55abf5d9cd71117b7ff69dc09964ee5643fe9b6ed51673c3e8949cb041080642414245340200000000b81dc20f0000000005424142450101da853056e687127ef867c0cfa18e04792aa73d7300a0cf01c46868ab8e0f604b66cd07dd5c861863674442fd9543f3ce76753727f7e53c86a5a9aa19bcbbf98e1a0b280402000b8088985571011a0510040900e01a0510040d00000ad6020a20a2fbb4e27d9279cf3771cb3b9464f5556834b453c2bec9763af71eb8f97811a7129d02b6ebc8d83332e5bffe81c7035d6289b5a962accaf5496ba5cbb52366fb3fe4c7e8a85d4f37726c53996b54f99d32305996bf49ab4afbc77f8a258374407da17bd339ef4facd031864195008b920cff62da386b2e0b2364f0821b69f6f5667b87db080642414245b5010103000000b71dc20f00000000ca064962cd3a8f57e5f7ed1fbf9fb14786420364250aa8a30ee77a36198f527c1ac6d1d2eed6a3c977f9cac780fe7a31e2e2589c290d8d7b1fdfd1bb8b620804b7db90765f90feba1c35164595e454c04b4f382c7a1c808fa4f6c12f3d84730505424142450101f421a8931607dc2c560f1d8a0b2b92c90e36e4ba86d4490229dbb31c3342e90b33ea34ec04a0496d4328e2052e15c501fb46908fd176f1edc3dc26ed539ad6891a0b280402000b1071985571011a0510040d00000afc010a20b6ebc8d83332e5bffe81c7035d6289b5a962accaf5496ba5cbb52366fb3fe4c712bc0121e9236d0e794bc8168d210a5071ae96b9cdd917050b6792f17af940957753c2e4a2ca16b2d9ebf9d1bf58bade5b8b02f7e973dee60d4ef9d86c0122ca61e5b2ac20693c1f7205692d455f33f21d1b14f549f6e995c0e6e25b7168d7e6ebaaba56080642414245340202000000b61dc20f000000000542414245010156b150afd89fc10ec4f2898388851eed97a04ff98347345b422815217280e3290adac67604a70877803ef70f856f2d3aca881808624074835397130efcf69c871a0b280402000ba059985571011a0510040900d81a0510040d00000afc010a2021e9236d0e794bc8168d210a5071ae96b9cdd917050b6792f17af940957753c212bc01f035db458e49d2cb7d74e3d66c17ad2f5e35343c21e27a5973de98af14604470e027da04a4042b6d28767b0821945c10d24cfe24b70b21cf82a220a0571a39b50eba684c7f3874621a7e653f9e11523153708d1e58ba3688f23b92d5a6581effba080642414245340200000000b51dc20f0000000005424142450101f6c9655a29c9db659c92eebb37843f974e5e23bed8bb26faf190516d52eefa2df75f3162b7c0ec782fe236b0439f8fa30d39341e02b4a5a973bee5a7b110ba8f1a0b280402000b3042985571011a0510040900d41a0510040d00000afc010a20f035db458e49d2cb7d74e3d66c17ad2f5e35343c21e27a5973de98af1460447012bc01d522c1a81612602f34ca8bc85dd0b5325454bce9c9679a609dbf69dda56386d5dc18766525c7e792ec0331367f85e5a79018bb730fe1648bcd5eae14a14f01b30952ccf1ceed15fde24fa012583d6c8ed9a0cd894471040c1781cfa2e1c280a631080642414245340202000000b41dc20f0000000005424142450101027a2ce35968408d93cfaf8eb6a4926922df20d67416bc731aada94a05f95d49b6bb906c93817a154d876ea2b98fa32bdb3f31bd1138d72370c1f7354f1bf3821a0b280402000bc02a985571011a0510040900d01a0510040d00000afc010a20d522c1a81612602f34ca8bc85dd0b5325454bce9c9679a609dbf69dda56386d512bc0189894de43a4926cc76e2290cf78b201b21191464e755f48dbe9ceb2a670a0766d85f7e961e150ae8447cda12dbf1dc92a83713312cb4f0045b1fc4b6c3c5dbd10d06bdd10aba1364979ee0b234be6e49470aa6743e1d47005a7736d9f51f426a53080642414245340200000000b31dc20f0000000005424142450101266f2bd4cc1593f17cdfc0e762fa4440803ba79905ca3dc74ed36e2ea839350e706f5bb83f0bef082d4a04dfe4b465e7efe0e236f01ac923801c124fdace28861a0b280402000b5013985571011a0510040900cc1a0510040d00000afc010a2089894de43a4926cc76e2290cf78b201b21191464e755f48dbe9ceb2a670a076612bc0159551681710df676f35db91d3d5ef8fd5bd01640ff2d061f0b4398842f5fb360d4ee382b2d5b5fb896b0f2074f51e25f55bd9b7ae9e2ea67c56010b53a404969fc7d5864fc9fb40212f442fa02b0b5b270fe8be9639de1c1e6d1fb9dcdf5f0789f080642414245340200000000b21dc20f0000000005424142450101985cdde018c0b446bbf468dd4f97c00a7421f125c2f54a228d941ba88c093b0454a4ed3ead88ea1483d6c879b4800dcae406add76caa2913dee9f3286565ba881a0b280402000be0fb975571011a0510040900c81a0510040d00000afc010a2059551681710df676f35db91d3d5ef8fd5bd01640ff2d061f0b4398842f5fb36012bc01eb209f84900561bf5db2a9ad511e8c10085550a94467517189ac53e1e0f2994dd0e28ff15ad98b56adc8fc182a144c08144a53d43012555c1c09e6db2facc004c1a857aae0d206cf514a1c1f9dce6f3972febd2d330d9ee860d1d94a67bc5546f8080642414245340202000000b11dc20f0000000005424142450101c44e7468dffe14d0e632753a0de5b322315060b4e44592fcf506880538dd803a7f308df03b055c271c90f004254d1f8fa1dc5b4b3173e1dd2b64d37aef44c4801a0b280402000b70e4975571011a0510040900c41a0510040d00000afc010a20eb209f84900561bf5db2a9ad511e8c10085550a94467517189ac53e1e0f2994d12bc01711e106dc7758d7acded313a70341f3ef8639930418ddd3438e3e72d49809c50cc33946c8de5f431ca6638deaebdbb463b19b32d5aee3df669aa5dc5cefad123f1b7d09ca34904fa43bb0a618bb777d3f071eb4a7c4f12ee1355e99eaa1fe0c95b080642414245340202000000b01dc20f0000000005424142450101901ea0091e323406fe9a5fd6f2b83b1589df7a964cc37af53ff0bd56dcfadb3317614bd9d3cd241a65229274bd5290a349f561346109c8e0449a9c6d990d7a8f1a0b280402000b00cd975571011a0510040900c01a0510040d00000afc010a20711e106dc7758d7acded313a70341f3ef8639930418ddd3438e3e72d49809c5012bc013b15b41cf015512311da383a008143c3b13beafbc3fc3bd1ebe14f3ff0136fccc8aaceb4576e4ca6082ff968fa42cb76cb432e0deabc4e1ee3c78d8217d59083d2d41cc7f52853c040bee2ec8ec54d8739930e11809554340e640c373cbf7a3a0d080642414245340200000000af1dc20f000000000542414245010196ed6b0e9d4471235c685053cdc6898f22176305daa5e47b952999385d47dc407b7a9a8ab4beb4957e5eff0407f8ee2a284ca02af7cbf34b86ed417f225103801a0b280402000b90b5975571011a0510040900bc1a0510040d00000afc010a203b15b41cf015512311da383a008143c3b13beafbc3fc3bd1ebe14f3ff0136fcc12bc01c43ae6270b8c3512a0cc9dda7b5bd72f5e080aa97494e054ff9cd857f70de541c4d13cb4204b14f858afd5cfd164dbb50787ab914ab2a18f2f7dad5df4b1ba8f7b2f44151cd448f650cc459a659232ce2ea2522a36aa7dbd4365b114b3df7e9f23080642414245340203000000ae1dc20f00000000054241424501017c5ff2ec84d567343ffd32706342bde56b9ed46dbe372fb06b31f1027c254c7a6df560d0c74369c579fb5a357b8a28000b7e9fab0b0687865f66fa40da32f88d1a0b280402000b209e975571011a0510040900b81a0510040d00000add020a20c43ae6270b8c3512a0cc9dda7b5bd72f5e080aa97494e054ff9cd857f70de541129d02321fb54dff389eb65be959af46958cfbe4e4a65e89be3a22a48bf8eb5984a94fc0b36e25a2c338c22d5e5e501287f9be66c7d1e2ab785aa4d3df2927cfb946a0f0aa16e4b280459a8f07e31b2e2d5c5abe6a4bcc1a913d235b0ab3c708c0829dcb080642414245b5010100000000ad1dc20f000000001850c3b7fef2f1cd41ab9e7628bad6170e642473d6cec7e71d23af5b230a3728d0855f9b064a6c33e78dfbcaf8b3ee3425850f9fb06e30a0393b0affcaf19a04c90ace79f553ba4792a345c8a19772ccfd55e8b41ee488e405524198c57c540c05424142450101482025ede24d681a71981a0562dca722ffb50a4495afbce849343f110b99d603101ae97a844da19e663b265b2ccd9906ae1af6f6a5b6bf6223040d173e8f89861a0b280402000bb086975571011a0510040900b41a0510040d00000af5010a20321fb54dff389eb65be959af46958cfbe4e4a65e89be3a22a48bf8eb5984a94f12bc01a8a009b3309b8b80475fdf1cdc8c8d41a776bf5877aff35fdd99a4736aa03c6dbcc61b7ba81b62db85d70487d051742c9a9c3273faab95864abc24b5a3184a3561c93d55ef9e804d63df03b9f65336bb1b3d593ade65dcc6da76056f335a2a561e080642414245340203000000ac1dc20f0000000005424142450101141b1b7b5a76d2b00d32dac732ea1f80f4ea88efb2ff558ce19af2058223ca5bdbcaac96c33d242cbda0f6d854931b9641e5bddd98b94ef198c17041cf29ca841a0b280402000b406f975571011a0510040d00000afc010a20a8a009b3309b8b80475fdf1cdc8c8d41a776bf5877aff35fdd99a4736aa03c6d12bc012a2bc043d964a8f0c37483aefe2610373cfe628c545a8dcb0d2659ab44dedaceb85c60ea79bf501f1f15946a32d2484943502d6393b8cd195800b78092b90751d31668c91620083733f60155661074b8cd5f06a3ebe725688f416ece8189511c4d080642414245340202000000ab1dc20f00000000054241424501018211868fcac1dc26010ed2d34e0cade81ff7ee0b3b0f253cd20464ab4b4d2d2d80ad978ecca58265518c2f0e40db435590140ce939bae3fcb7cb0d278f85e7871a0b280402000bd057975571011a0510040900ac1a0510040d00000add020a202a2bc043d964a8f0c37483aefe2610373cfe628c545a8dcb0d2659ab44dedace129d02803c90a07631dae2a2cf420be6528a70f4a47c14b1b558eba31b1897e867bce8b4d339396e439411a2d430330e068b604f9a46debf5d007c81181c9ef8bad39ac841c3eb89b3c2bc5eea13fe600aa9a9570e0e6c9931eea5290d1c1a8de554f7ff080642414245b5010102000000aa1dc20f00000000cc39cfc902f1cb719a02b0998f7bd6b823b957c0ff126084fb5955c8128c0e02cad4992f3cb885cf480870e5ea006bdad98bdf5fed63adea6cab9da75a49b4032ecc7764b25099b6ed3fc242c5669ac2368ba38e69059a1bcfae9b5bd134da0d0542414245010192ce5cd9fc9fd589991965f547672fa2c20fe8a5f4cf28a82296fb445965e8782a816dbed77261fc59c449d4d6f55f93a9a4cb7eba1185e366566cefd61ca28c1a0b280402000b6040975571011a0510040900a81a0510040d00000afc010a20803c90a07631dae2a2cf420be6528a70f4a47c14b1b558eba31b1897e867bce812bc017a259db8c7c271a658ec911d588c2460eeb3944f2d4376626f0d6c035f68f6feb0114c29fb682495c700dfdb55bbd42e04f69f2f0e58ec330e71d6b4c8bd780af13a86f657ebcf8cbfdefb0f7d95de4cd00d19ac51811fa8c4835b282cd52e00b4080642414245340200000000a91dc20f00000000054241424501016e911f3c5d6dc800bcb427423b3968a65ab7d04cbd9f800d5a25d0d7aadebd575c85eac547aade9b1be785614c1e9a5e10319d11bec2c1e127da06eee138898b1a0b280402000bf028975571011a0510040900a41a0510040d00000afc010a207a259db8c7c271a658ec911d588c2460eeb3944f2d4376626f0d6c035f68f6fe12bc0105e48f9308a479b6851c18e8c76a43aafca1991315234a2100e293c63d8118d9ac60242b80cec0f9480aa41727a3940e1f37cefeee55bfadfa1089e1cae23f3476093220646f3b80a8e1538f3d538ba1f0167e1487ee9efdd49fdc4f8ec4d8af0c080642414245340200000000a81dc20f00000000054241424501010e0a136ab13518d92771f30ecce4bcf2f1494c84aa2dc37c6fbe1c0e9f075265838fbb826dba0ac0fb4fe20811b70177f9931ceeb1d6bbe9d0229470705c7e8f1a0b280402000b8011975571011a0510040900a01a0510040d00000afc010a2005e48f9308a479b6851c18e8c76a43aafca1991315234a2100e293c63d8118d912bc01ca6b49e3e29b8f4f0dbdf64e595c435e82d8115bd54b63218c1e36a1282d6415a8b5dcaadc8d43b53cf3d33772dbe735deb64c637b54382bc93220341b36cc0d0749c0c504592e6793d2b410a660e7cd2029ded8127bf11c06907a02887b7fe320080642414245340203000000a71dc20f00000000054241424501014c1d631e328b6fa659e79f8b4ee2977b0703dae8ea51bbfb9dd9eb4c7e4c5629b8b684884429ea80ca5422d36ead7025470e1b116e31ae44d81064eb6889f3801a0b280402000b10fa965571011a05100409009c1a0510040d00000afc010a20ca6b49e3e29b8f4f0dbdf64e595c435e82d8115bd54b63218c1e36a1282d641512bc0114f24c836198423ce235ed44c9289f36b4bed6bd5864145f816c636a7caa6d21a4b56059e7570975cbd71506e3ae8d984e63797583f9c299060cb07375914b13688ff01a8f4aa24d688a64d5d420e3ccfeb88656108040052f27f091fec22bff16080642414245340201000000a61dc20f0000000005424142450101b4e11d99ca6a70b85ddf47dbaaf457a96ac34b82448fc5ea01864ea439f4ae13fe2fe69d14065a894cce5f04e0825997b891fe1bcf363871e4d55e2290cdfe8c1a0b280402000ba0e2965571011a0510040900981a0510040d00000afc010a2014f24c836198423ce235ed44c9289f36b4bed6bd5864145f816c636a7caa6d2112bc01a380b227811139aa976f67007d720b88b824fc1fa791b91857e731b9e12a2455a0f3bec1e696176a3f6b8a5727b5bbf8722e85ab15d60f0a27ef6f418125447a39ff5e36a8518443384945715a08234c33435002bfa522a6a201d2929d787960ca080642414245340202000000a51dc20f0000000005424142450101466cedecc51cf91b4dfe27683e550892496e8ccce36ebd681f62df1d0d4ab90249e4040122c5999017ffff1079b612c7cfb4cf4b2baa6a659a34524900782e8b1a0b280402000b30cb965571011a0510040900941a0510040d00000add020a20a380b227811139aa976f67007d720b88b824fc1fa791b91857e731b9e12a2455129d020cf9175dd4bf1a98b9c88e130bada4940665ba743a867bc4017497043d7f04a59c791996d25c9dd282cc23aa6878bfdb6d7b9034f1bd3aec5fc582ec339e830d098a0448930a20aa983bd72952d866bfbfcd77b4df76b8a5dcbe5ac755f4f92764080642414245b5010100000000a41dc20f00000000920ed3dc03f54eee30415196e246c7e85040fb69670608b1ec33eaf37842cc198d16a0a700d3297306dbac3d0700a3f7c523c74eec913896c9a5ec04a9355f010ffbcdc6bc683a5ae09267ab62f47f999926921134c52c1aea937e97243719080542414245010194cfb5c7a68dc758464dc34bda5e1e772990e585042d0228e6f5a31d2b090d19d5cceaa2751a816b89e46eb640dd7544b6ef99fb485c3b42bfe4aff189fd88861a0b280402000bc0b3965571011a0510040900901a0510040d00000afc010a200cf9175dd4bf1a98b9c88e130bada4940665ba743a867bc4017497043d7f04a512bc01a513721b28bd45dca0199efd32b7a90552cd5be2ef2707b09ed5ffa3fb6c7b6a98cadd120aea96240a5ab97bee57b1662c333a77d83e8323e44e2f9fa85591384c8eef63939cdf5146a4487d8a59a0895181286fe93f1551c2d45e31ccd870e35c080642414245340200000000a31dc20f0000000005424142450101a2b1ff09d9594f99d7afd39d41e3a0411d1cbbdd1d90b9e173d50af079af5d27eece76f0d849ab43eded4dc6b6980882a2d5254fb22643a98e0f560f9672288c1a0b280402000b509c965571011a05100409008c1a0510040d00000add020a20a513721b28bd45dca0199efd32b7a90552cd5be2ef2707b09ed5ffa3fb6c7b6a129d020293c2e4efeb8051202d82dcabe42acbdad4150067b34e9c71246769e6db85789409d9324b134705e7449f5dc75a582db4f07197fb83e2c29da877e8bdbf0dddfe2b483bdc29cf13192aad87817e81bc06b636b8a91efac53adf6b4a8bde2a5a10080642414245b5010101000000a21dc20f00000000a6dbe7ea29c87f85d567d6295d580ed0b39b944b0dd12c352f589d9b56e5bb29356b974973b7d377e9e1d18a34870922d3af8c02130be7071f9f01f793240b00ee56e39309e70e37eea4d4cbddb7b6eecfbc694ec63ae823c351d5a8b1c3300f054241424501010851358b48e5844ff3f39e2d00e7c901c05bc35108f9f02a30141ef4af6e1030c25ce92de0bdfed1a1be0384af6b3e457bcb2f8a00d165bc348a5244926ce5881a0b280402000be084965571011a0510040900881a0510040d00000af5010a200293c2e4efeb8051202d82dcabe42acbdad4150067b34e9c71246769e6db857812bc01faf1618afa5250aadeff7c043382c658ce0e8802ea814809ba69b18424ef205790d1218beb64bdb8b9571bc671e3d86255443adb2a12ccdc2c9d1aa28c1fe2141dc7f65de627c29a21ed2c820103f8dd9149fe32c2a60ab65c873835399d7a5ef6080642414245340203000000a11dc20f0000000005424142450101a498ea8bed5d1b6b32ca405f9ba0593dd345a87554a2da33e33c31ab2fce313f687b28c5d181a6d0e3e3f3c2ccc98a190f73d84220805f3049170a2e042ddc831a0b280402000b706d965571011a0510040d00000add020a20faf1618afa5250aadeff7c043382c658ce0e8802ea814809ba69b18424ef2057129d020def1a8a310321f1eb73e4fcdd4b863ea3b0db4c4e45a2b36ba7651c6e0111268c3ea558f93e09f243767cb50c5e4325c4588dadc33939f0290809b33bd58fd5e83da55a124ebd101d2d327013d0d583bcae76a41d4469977c1ade55d87bc28294080642414245b5010101000000a01dc20f0000000000d254219da1efd76fe8d07295a61f3b09472bf6776007278adc040a26b15f07776353556dd922cbe529dfd9f7b15a6c9f94f3d21ad2432cb7ec67272a8f6f0583aedf06b3eb0bb2078cb1438808be72661ab47f7533b766a7ad45915f36bc0505424142450101ac5642e58fd48f45de1d1e43797f4f470685bf98bc40ec01f0693c78f6af1322a38e5732c71b32fd320b62bd3b6043f72eb827a4dd2f027c3956157c055fe68b1a0b280402000b0056965571011a0510040900801a0510040d00000afc010a200def1a8a310321f1eb73e4fcdd4b863ea3b0db4c4e45a2b36ba7651c6e01112612bc0136616612ec8b83dcab4d43a0c028ca29abcbb2b9d40b7a920383c4f4f97fecde88e242157a9b0c0a638675bb443d7ecebc5f45d31f94ac96437a70b0bc107184709871ae75330e6dad204ab6cfdbf2ff512302cb83b9e59f6ed5465d5c4d1a44180806424142453402000000009f1dc20f0000000005424142450101f080d53212111b17147101fc160dbacd37cf428de6f1d424ed2c137f3d64402468ecd469ac95bbc8aea6e0a0b9d58b7f9e6cc674636074d631beb20123bf468a1a0b280402000b903e965571011a05100409007c1a0510040d00000afc010a2036616612ec8b83dcab4d43a0c028ca29abcbb2b9d40b7a920383c4f4f97fecde12bc014d06fe626368780915d2309574f23db3d04614472bab862f6f06d80c1bf7465e84948aee83acd3f743bb4b5ad6bdb5c7b22928cabede0a463529ff720b19aaa5a910531c72c0b300fa09b55bae65da74cd252928d56601fde99a1ed014eb00cadb0806424142453402030000009e1dc20f0000000005424142450101b8cebfeb11b7649e343a026d4831842c07185e5219f488aa2db6a9131f6d6f4848f3ce54c37463190aa5d0e13d3d7e07671ec0aeb5278712295c1ed5b4b7b08b1a0b280402000b2027965571011a0510040900781a0510040d00000afc010a204d06fe626368780915d2309574f23db3d04614472bab862f6f06d80c1bf7465e12bc0131e8ef63d4d4c6b0ba977ec2cebd820f9edcd061caf3c19d27bd3fadce0ca710809f38a1f3c1690c3ce94a9f558800f73eeacc4bf61e2984d21b5c3128fa51bb4832dd3e5383dd2c612361129bdc6ece31928a55abf5d8ccfa21d164d8c39a125f0806424142453402000000009d1dc20f00000000054241424501013c82b3a44a313b871963bacd37a7748686dff72e653f3535572768301a54cf5ca77095d3fd6673a6acea6a64c7d96e8ba8ec9cc8c9cb631ccd4710ce0aa1da801a0b280402000bb00f965571011a0510040900741a0510040d00000afc010a2031e8ef63d4d4c6b0ba977ec2cebd820f9edcd061caf3c19d27bd3fadce0ca71012bc01be671057eff7da856c107c29c78d603ceac928640ae06ef72376ebcdd0d797137cfc5bed20032182425bd4a5d096a63833e183fca7e03f5e6362694c05db75cd83b76c124eff15ca1aef796f4c01f8d409d12c7311df0b7e5c82b64e3765281d700806424142453402000000009c1dc20f000000000542414245010160bea1c8d50d6ce91541bf0953ba6cc7c28d5a405c315956a4d7eff427945a5ee3591a39ca16ed386d2d2f769de227d0b38905ba527fab841f133383ff23f7891a0b280402000b40f8955571011a0510040900701a0510040d00000add020a20be671057eff7da856c107c29c78d603ceac928640ae06ef72376ebcdd0d79713129d02dc212268318e8d109e9c25049d2999e22d89325e14b468dc89a2ad54c9def3aa781db9b935bfb294c20d463fec6ae326fa016d1e3e16e0ff0d937c3f10aa3409e205595d6c304f6b05b114fa31574a6df7057e3b3d6ac9e776459072a987bea18b080642414245b50101020000009b1dc20f00000000c0f8f7def66c977f1f03dd796704ea6e17bcba160b182e034a7fdd8853555f27020709776221a308e9c113b1950786d320aaa01e70739c6f6fe1f75f9070e402a627bf3e95dd9bdb9a710a7ebf5d58088ee3c1a5b70d18707f8d78594793a20605424142450101f6bb22e9cd23ab6e7e70f720dd0ec0544a78451f3d6f45fed76a5bd9a62d7648e45aa8a56d4f03c9f26b872062bcf5142b41bae7783300c44986a9812ca8528b1a0b280402000bd0e0955571011a05100409006c1a0510040d00000af5010a20dc212268318e8d109e9c25049d2999e22d89325e14b468dc89a2ad54c9def3aa12bc01b2fd01a4d490714a0f23eb0f80e58d5dfb31f7963cebdc19f962a88228611623740a7a09302af6bb321d632f4f7c24c60a6aadae048929cb22e93702881947970f1e598dc78add5b01aa2b4887889c160c4e9b98a10c8fd1949b5ce17545972ace0806424142453402020000009a1dc20f00000000054241424501013ef755889cf8c683b5ebcb1abe6877dbcdad017f6b5f4c4e798efee81888bc031816f0a22c44a78afec366eb2a18df08056db446fb31d9145ea51ab9cac422841a0b280402000b60c9955571011a0510040d00000add020a20b2fd01a4d490714a0f23eb0f80e58d5dfb31f7963cebdc19f962a88228611623129d02e2a0ddde67b9ebdc6c1e4d8e9d5a70b30edc8018e63c9be6327c628b67e24144704c0003dda25a44368254a0985dd5a5e403a3565cae8f0b4c43ac8d51ee70ec3667fa52b63cb9880902116fd882d3eaa1f2bfac191e655303dd39e322f302f4c0080642414245b5010101000000991dc20f00000000b824116d5996b741dff2d285c3dbccea85396615b314339d263bb796e3efe456d3cc7c18c9d340a30f5bf5c04915f3dffce6361284c1bb14e752a3b862a43d02eef5ad336d6ee253f8ae7682b6f125bd78345f5c40b75227c2ab865585acd00305424142450101fe65f24ce99306ca9cbf4135f73b05452e61595a49960336190b40d8fa24b8194b0d721253886d4c42652a67f900431b34b3e1371f7148fd7e3282247761a6871a0b280402000bf0b1955571011a0510040900641a0510040d00000af5010a20e2a0ddde67b9ebdc6c1e4d8e9d5a70b30edc8018e63c9be6327c628b67e2414412bc01cc19437359aa82dc8c8f75a72f26924f1eac9247bcc3b03ca3fc7c263c7a73476c693667fe8ee7927bad07fef9e3c91867045dd50dca6208e34c87abde801d1694020fb8247229d1637e1d920d9575734798cca46186d279349e24eec306568dbb080642414245340200000000981dc20f000000000542414245010102bdc519c9d0e45ab232c1cd5b715d703eb4435837174825f598c8b3c7ae4a6586541185dfd8493976aef44f4683304a53ffd89d9f58e2a2817ba13272f2a3801a0b280402000b809a955571011a0510040d00000afc010a20cc19437359aa82dc8c8f75a72f26924f1eac9247bcc3b03ca3fc7c263c7a734712bc0100e2630a6d54479635d5ee29b2641ab67f60347cdbac1c41fca9f743ac3b6ec2682d893a915d7b503612a37cd24883e676edc0c70100311e50cfc7724b9601d4fd57c0fa908fab04c316dc5996cb9e7db838cde1f8d931b71215262568cf459886080642414245340202000000971dc20f0000000005424142450101101f72822b00222bdee3cbeebb3d38f03f3172e7b1fbece1fc72f9f154ef341fadffc2c7cfbfd86b7fffa0224249119cef081fcbb85a29885864d51ef75edf8f1a0b280402000b1083955571011a05100409005c1a0510040d00000afc010a2000e2630a6d54479635d5ee29b2641ab67f60347cdbac1c41fca9f743ac3b6ec212bc01c9e5b740552655ffe7e152346d60e78a8676cf24a64e1f752230984131b8602a64f77fb966a2427461f0819cd366f9f18c9f32f48d3b416cbad3e32c478295b3f5f1ea8c08a692b360674336d5654c518892be526f4635d1e81ff3f70845bf8676080642414245340200000000961dc20f0000000005424142450101a4b8c50c1ee1aca029bbd0f88059f8b02022f466b2cbad33dc670f6eb5755f4c22cc23c7910347cd6f38d03abe3d0b8f5e737eadd27d1e6ecdbf07215a8d9e801a0b280402000bfd6b955571011a0510040900541a0510040d00000af5010a20c9e5b740552655ffe7e152346d60e78a8676cf24a64e1f752230984131b8602a12bc012b35df16d340767af794c69a2e2957b91c0951036d45bd5f3f2c53f9579580b660f4ee150e4375acde16dcb4af0b718440991143e0298edccb7913c7d42eb1ea387a12dcd493510935ab01f64e7e24e6a62797a373cfd0f1b9349140466f431920080642414245340200000000951dc20f0000000005424142450101ecb07f5e31db733987ee76a0b1a52b15ecd872c3895dfde31b21764a176b947c12b3e54ce065c62d8d5f3913b8b21385a439a075ca88c2d138fb72c9ff882e891a0b280402000b3054955571011a0510040d00000af5010a202b35df16d340767af794c69a2e2957b91c0951036d45bd5f3f2c53f9579580b612bc01fa86580900fd772c68b87065fdd9de66197802afe7e395e650317f97fe12088b5c74a63992beae8f138a817e2bd83f0961ee35ac70518e47ddd4b2ec3a66f267dccfb05e0086bb273629f68b3f2fe355c99adbfa4f3c6ecfe340bc17dcb3062206080642414245340200000000941dc20f0000000005424142450101ae0f673468d534148fd344d92daa690cfd2afbf64b0dd65970ce81e7d1c05d023b8f5e5061d97894143fb3c588bdcea29a2b08f483aa0a9e8c4683b09d11718c1a0b280402000bc03c955571011a0510040d00000af5010a20fa86580900fd772c68b87065fdd9de66197802afe7e395e650317f97fe12088b12bc01147537511c610544f9342a077c5582a5fbdb7ef4271f717d515779246230c265583f003baf079bc391b69830a2de76cd45608ed5770074a9f5c1e6b6347d9034347a0ac68d6680e81b4ea0f1ac9c959ae271f460b964f9ee2586f687d1b0feb6eb080642414245340200000000931dc20f000000000542414245010198d6a49f2ff608528a687e6689edc3ad3f0cf014976087852f6a7309c8ed852b0c2ea6cd232cf10f68b9ff807d99d46e6fa56d2499dcfbb16384037fcf8e8a8c1a0b280402000b5025955571011a0510040d00000af5010a20147537511c610544f9342a077c5582a5fbdb7ef4271f717d515779246230c26512bc0107dba8acbe4b0b12d820608cf0c2da14d5d3e6aa1adb9134503657dc6cd2fe4b54b9ae96bf865c0df1f6b7dce1b9bfca438867e0625b6dfadc1c5f06afc35993ba29e624916efa9bf566a7d37a764859cb99d948ef4ae09a55790ad23b79b96dcb080642414245340200000000901dc20f000000000542414245010166718fac5867465aee0a48f2a0ef14bd9f30da75dc02e4daf6ec1bb4fd598741ba11234ced5dec8c21a7b5e510ac9ebb224e992ea818ed0d1bd38fb92478f5871a0b280402000b00df945571011a0510040d00000af5010a2007dba8acbe4b0b12d820608cf0c2da14d5d3e6aa1adb9134503657dc6cd2fe4b12bc012e42da53f87a4b5a7890c74df608aec9065c0be0cc9f079cd1e490654e6e0038502efac51f432bc13a1212380e0454d1797184dd770724e44b44df558e49934eef142e1b0290762ba412b9813d2451cf060a3f62e5cf9c43ef57c6878b9c720b3e0806424142453402010000008f1dc20f0000000005424142450101beb5cd52e739ba985137e5db40f8030a736783b2eaac0a0bd3c8e7d33b84991d94ebca997cf219f2151c2c0c39ab924f6ddaf7e94dbdf840e6675470867585841a0b280402000b90c7945571011a0510040d00000af5010a202e42da53f87a4b5a7890c74df608aec9065c0be0cc9f079cd1e490654e6e003812bc017bb44983f7e1a2143641f7fefe3066e8ea3c71a43a4a00f0d8d1175b873ba2784cb40db8b5ee01917e4d62bd51b0c197a174d8fa17b99562d9311eb1e5deab0ffdf41e73435f8cedfe4239aa00e09da5f068f0e224a678da07489a52ede718a85a0806424142453402000000008c1dc20f00000000054241424501013aa454c3f62adaea5518af2053d9e9ae931bb8809a4ca8e19520aafa24d3bc12d4ed3bc22c0633c3e5a33b862e0a7b9683fd84a70408c3562f0e5398860852811a0b280402000b4081945571011a0510040d00000af5010a207bb44983f7e1a2143641f7fefe3066e8ea3c71a43a4a00f0d8d1175b873ba27812bc0186b0719eeaf485bc621e4b4f1f57302c6df6bdf0834ec555e4f9807b223838b448f0ceb3b323536c7f3d7a8c5b3deef6dbb4be5dde7f62027d95c9bfaf55dffc124b150a50491320573b7f8b54a716acb6b91f894dd4f4f6b789f566bb6d9d3c800806424142453402010000008b1dc20f00000000054241424501015aff692798ff63e7b3ae75499066c34a672fbcf9d0149384bc793e94b4322d26a92049e8c215b26eece908acbb6a7299ce429c69a0dfd5fbb938463caebb47861a0b280402000bd069945571011a0510040d00000af5010a2086b0719eeaf485bc621e4b4f1f57302c6df6bdf0834ec555e4f9807b223838b412bc01149d8c5e7d18e37b5692739abc331622725ec11feb5f0865461c7cb96bd3c22244dadc7eae8dd479e29ca5695862bbdd947e44a6f0cc4810584dd9d833dbd9a54db589e09c431f47199c94445668c5d01c2982ce32c051374394a38d55750a0ed5080642414245340200000000891dc20f0000000005424142450101c29989458da965c0f0d563cf81e8ba06b4d98ee552f6c210a25f037667eb153e3e5d142f0f14d6710fcaf67e6d67ff9c200a8ac2f46d10711e08376240db6b8e1a0b280402000bf03a945571011a0510040d00000af5010a20149d8c5e7d18e37b5692739abc331622725ec11feb5f0865461c7cb96bd3c22212bc0191df6be16e5df850d2264c958e7341843b12e7556b6babf04d469a06e9aca3f740968c53c00de90d544b0d5dec3abd544e57f63669f0cf82ad08d49aaa90f0a4b6b39cfa21b9210931750ee8d4d306d14be1045fe8ae2a8b60d745a8a94e172033080642414245340201000000871dc20f00000000054241424501015834f7b90820f9381e4402cec0bfc50f0130faa6a0194fd9ad595b1adb20e461063a30548df65dbf1a6dc18b8378a1e4298fc92529dd63fc65c3e3bf26eb9c811a0b280402000b100c945571011a0510040d00000af5010a2091df6be16e5df850d2264c958e7341843b12e7556b6babf04d469a06e9aca3f712bc01825a011380e1bf450ec155e1d514ebaa911ebb33dcc17f806334e0af14e50f533cef7d8418017cf1587efaa891a047f1318251755b997017878aff709d17b0f8c676086e3d348caf6231fd51b19d3c633bc0cf911cba3d909ef00a3c1e598f96f7080642414245340203000000851dc20f000000000542414245010136def4943a27ce1e40916138c3ba41b03dda24500b2a4a90e1ab49e02cd96e2ef2bbeafd0ec193dd3285591e2484786690901713d5cb2b54914e64dcbcfc02821a0b280402000b30dd935571011a0510040d00000af5010a20825a011380e1bf450ec155e1d514ebaa911ebb33dcc17f806334e0af14e50f5312bc01ea5403950e5fcfb76dc933b58e357e93c86855e8dec0c3f596a904a30c5928db388fc6785d5107bcda2fadcab5126f480c1f3513f319612efd1b822859d8f9bcd8bd1cd86a72d2187504781a2689309b5c2e4f8eabe6c26fddbda45c91e4188d92080642414245340202000000841dc20f000000000542414245010142829ff79786a1e7aebcc18f98be5989013585b156da2e40e4ed918401e4d77ff61075a848f867c1c3a11aabf6f9eb56b0ad60104867d4ebdbc82f138fdf948a1a0b280402000bc0c5935571011a0510040d00000af5010a20ea5403950e5fcfb76dc933b58e357e93c86855e8dec0c3f596a904a30c5928db12bc01163815f475f884f6cfefe0ddadd72fb5ae2b2be38c8c0c54a79a817e159b9a8334b33116651302d684eae3db290c7fc3a8af4a03879b4cadee40c6e987015c52d08d4ae6039f4e7ceec1cd26987d7d1fddee4536c3a019dd0d3426ed04526e2513080642414245340200000000831dc20f0000000005424142450101f84cbf913eda2b32753f584477949e41895237a6e30b946d1420bfdfbfc16442c221d61550ada1b0d162e9157ed44e1f119c633ae3d42268a655b07e8f925f871a0b280402000b50ae935571011a0510040d00000ad6020a20163815f475f884f6cfefe0ddadd72fb5ae2b2be38c8c0c54a79a817e159b9a83129d02684ae9ed6d434638c4dbe05d5434c8245cc3f6ce28f00aba7dda435971a870d8301a47b0c2e7d461ef269302686b34ffefdabae3eedf8b4edeb4873ff427fd201bb4fd91c0603c91cef8845504805f2bd4d7922793ed2d3966de44f5c4b5608e1d080642414245b5010101000000821dc20f00000000fc2db52d323edb2b39a54da4ac32f98de3da889a1db02346b43414c037d70162fe130aa9c03eee862a250ef099ed58cfabf38cc33fd1a8fc9ee763f5e54c370d167d9c022aba23c8c80b48eb1e51015179fafb75e181d2acea25f2c67ab7c50a054241424501016e1f741895a5649bb3e88a481c26e59d11055e4ec2453290ec7e73683b54c408a2abb78143fa0850515765b53688e2d678c0389e8bee72fa75d3fe0c49b3028e1a0b280402000be596935571011a0510040d00000ad6020a20684ae9ed6d434638c4dbe05d5434c8245cc3f6ce28f00aba7dda435971a870d8129d02bfcfcb1dbeeabf76c1edc73f8ea366e6c8cea3885a83058214a229f92658f2592ca79ea368f72ee9306e2f5c93e3d3bc6bc07f243998edb1b93449970fff84d06a8717b431aa5b0b2860f4adb5c390784c94c4093cea887292e710a676260f3429080642414245b5010103000000811dc20f00000000da47124dee6a4c7bcb176d5ab87dc13b9e62301677b47f6380bfc863a6e11c0a852688a0f45556950e76eb6caa3c6f70a97fa83f74ba1f6852eff9c67b70e008b5b81fabc3f26796efa342aba6260e080e23f584a12388bc02ad3a3cce78020305424142450101f288bc0b20f81ec92bda0ddf19d4ccbacef2f8f29356cf530071b9131e2c6e03331a4a1e3cb881416784cf0d739fee907c019a605e178be612547929e1468c8f1a0b280402000b707f935571011a0510040d00000af5010a20bfcfcb1dbeeabf76c1edc73f8ea366e6c8cea3885a83058214a229f92658f25912bc011d794413708ad4a52da8517123b9c919873f6066cf903800c6ba898cb2d0b7a72892d6edc3f96041c2b6271f516f5054b837e6e61d67f99f0177a9d1d77e23d4e3b8cf653038f29ac18f6023172c25b31838daa78b87471a3fc467b2a9a004b727080642414245340200000000801dc20f00000000054241424501010a0b87e0038aa69f4fd0156a775dd3a3c7b1914b2d7fbe45173f97db971fc2577905c677717056df9a066adebf419b9969e21c535929c7f5a6b70a58d36ac8871a0b280402000b0068935571011a0510040d00000af5010a201d794413708ad4a52da8517123b9c919873f6066cf903800c6ba898cb2d0b7a712bc017c990593b4a9f595a3a5bbea360531287994f8880e724fa62a4321d3bfa3160d245d2974041eee45f5d628dfda86a98f3bc5640c14605628fba35cbea993176172d0f03eabe0ed013f6b5162666133aeb0e2eaae669690d187f18eb4a2c31a5b1c0806424142453402020000007f1dc20f00000000054241424501016e097b2be9fbf3f6f4e86598117bfda11e253be8f215dd62e83ec769594f7c74c569a26032561719b67803ce154325eb45dc0b5b5b35aee466aa2df217691b831a0b280402000b9050935571011a0510040d00000ad6020a207c990593b4a9f595a3a5bbea360531287994f8880e724fa62a4321d3bfa3160d129d028e309f167b7e0e7e53ff5f25a6c0a8d792f6a0800d609e11eeb6ba5f4265c12e209545a374939c79e3fd7a6bf4849b09e4a00d38a5366bb1d9dd06cf287d9892b2d9c4f22e061f7efc739ed99ddb5ce4de31fd19a1608af345b3243b49887da1c8080642414245b50101010000007e1dc20f0000000094c58d5e04e7338e760558d45923f19ddc232d956e2c57305bed3a238e86ab41a0917237f24a2274efe2cef44dfe252bdb9f912e528082ea86fa1b7e1a8328001facf4f6a07a5efcdc2f8055cd00cff88866b45058c78fd8ef2d315c8b3a350b0542414245010194147ef1722e8c61b5cfc01a6693154e732aec5ce689cadae029d5d10214ce1e7c5c7d64f5dfe94d75892fb5d3052bc83ccff599c8d8a53971039d63334322891a0b280402000b2039935571011a0510040d00000af5010a208e309f167b7e0e7e53ff5f25a6c0a8d792f6a0800d609e11eeb6ba5f4265c12e12bc01ed77dd52a8f2dceadc8cd3f7c194bb8c72781c0726c02276b5bf2372b04acbf71c34a85349fb4f9d66c9f09d14d0d61d19c36c99c3c6882f66b82499101b28d42b3e67f48d0eb16c123523e24d0d5c47c722fd2a2e25091e6bc3ae8b3ed3c1bba00806424142453402000000007d1dc20f000000000542414245010160b508e46ba356790faede15a0d241726d06a319e88cc7e31c28423ce2f6bc13aff92a23a08fc07e4d65070e79416eced9b304e5446c03b24a85f0f6594146831a0b280402000bb021935571011a0510040d00000ad6020a20ed77dd52a8f2dceadc8cd3f7c194bb8c72781c0726c02276b5bf2372b04acbf7129d02db8fea8c1a82feb981e935baa1a4b1d5b87fad03f15cfa40a9d341a2b818896518b17af4f793b5b1b6f094edbe4a3802c2395c9d6ace124645ff38f0e3191d3a913453c574e4ce8eb68d4e0cf669b31e05d83af82b31f61a2f48eab1d9c41cfaf7080642414245b50101030000007c1dc20f00000000f8b30a821656568e1c66e9183ec05c1627820c3d3648edee1effcc556fac86717658661c7e1978ddb6497367db7bec49bbdd63756932aec3b719d5ba5e4c3c09aeae4ace2ef06fd5298378c7ff30ec06f821b589d06eefdf9c640d81121ba2080542414245010102c69a1467ac5b92358cacb78202a6ee55c9f6c06c7bc7f4ae2b23c8adbf4575eddb68ed01777d96ff896c5a44859585d738b5e92e6565890cfce18db637988d1a0b280402000b400a935571011a0510040d00000ad6020a20db8fea8c1a82feb981e935baa1a4b1d5b87fad03f15cfa40a9d341a2b8188965129d022243f93bf130fb7dca537cc1825717159139512a9bc7d635c3848af0a65fc0a1140d6645bab20bd411d81d63804b1db1a7d69b6ebd9fa03c48b05602c79eaf81369bfe1dda1f43481691b6212aeae4affadfd4b54cfab7e02594f98a4ce9f4ff15080642414245b50101030000007b1dc20f0000000086480dac7669ab3aeacd6f3c77e962be301fe5c6efebfcb16834abf29e50cf771bf12ace9ce3f1e97b0017144157a47c257220207406490a555ae3f3cfd5ff05a84c2af873f6503143873978e98961f4eabc2080f77c45ad6d065d58be6e9a02054241424501019cc6d652cec7f054320b9608dfcbde2d7e05320ebf3697c0154c8834f9babe0d4cad8a7bbc45e0ee9ca414aaa1d91e6af98c7c536db50391edd8d505ae2d658f1a0b280402000bd0f2925571011a0510040d00000af5010a202243f93bf130fb7dca537cc1825717159139512a9bc7d635c3848af0a65fc0a112bc01d8c479815319121ae17e2879061de85eb792fa30b00bf365efb261ecffbeafca10438c8afb86a47f6e5895d45aa71277a2e269051481707004b564e2361ffda20fdd7b7310e2ba6cffa402b654100c677263b5d46bf123ec232ab0f3588029bb610806424142453402020000007a1dc20f00000000054241424501012aceb3caa0342781eff5cd80cf1700f6820d6d71bf9d7d679d52b81057a0a26474b63036ccc885c219c541613d8494f47a9474b5dbb10032278b3f913c424c801a0b280402000b60db925571011a0510040d00000af5010a20d8c479815319121ae17e2879061de85eb792fa30b00bf365efb261ecffbeafca12bc019b0211aadcef4bb65e69346cfd256ddd2abcb674271326b08f0975dac7c17bc70c2bb0eb80aef1e145183bb641511425984328002986bf50a89844c893448c046427cc018a114ac99fb9a4d5fcbf9ab6565b06362e272da5e0bb78d9c9dedf8f6f080642414245340201000000791dc20f000000000542414245010166e906ef7e9eba0df82e5215522b27dfbb7ff9e9cfb5e68cb4a329cd6e5e39220da891b527b674e7e47805f4d4bb0a101403deb40e2dff5f1c29114afbedbe861a0b280402000bf0c3925571011a0510040d00000ad6060a209b0211aadcef4bb65e69346cfd256ddd2abcb674271326b08f0975dac7c17bc712bc0144ef51c86927a1e2da55754dba9684dd6ff9bac8c61624ffe958be656c42e036086c697d4e2175d16627de2861303e21c2e987ec9e024cbb523d2fdc29da874dea733222ec5a02dbb1173287d5e8613203357d682f4fe63f9d25da118db5039276080642414245340200000000781dc20f000000000542414245010134de462d98e7cdd8dd8691794e6e935a4ec65f1a712d9e4382d0e2feadf68262467ed9f520c7cd4d7c259a18386bb214a5d23aefa49b4633803417bcd986548e1a0b280402000b80ac925571011a0510040d00001aec01a903040b00000000008c8812200d16ec6fe30767653a18d34902db4e6285b7fb3084a730bc95bf435ee65735ba107c782f6970342f33342e37332e35352e3138332f7463702f33303333342f7773706c2f6970342f33342e37332e35352e3138332f7463702f333033333368642f6970342f31302e302e312e3133362f7463702f333033333374702f6970342f31302e302e312e3133362f7463702f33303333342f7773000000000300000096f234c06c4a1d4018df2e5f5ed34f5428553944f308a2116ff2a4677ae5203be5a7dcb020ddbea1d00a80ebb4f307f479bd7ca242fa00ad018daedf76eb4f861ab501cd02040b00000000008c8812209f3f68a9c2a4bb9676447919685bca373214d0256db8ba126ab90e4e4eb2906908807c2f6970342f3130342e3135352e37392e39302f7463702f33303333342f777374702f6970342f3130342e3135352e37392e39302f7463702f33303333330000000002000000d492de037d3bc49ac5332c69aee509ac3e9ea608557d78033d81206eb1a0890454abcff60e9346750ac2013cd03846553082639c99ed0818992d6ff6bb610f811ab701d502040b00000000008c881220bbcf661928cde137d3dc501e0d521ad28ab6a8d058daa1acbce02608addceb880884802f6970342f33352e3230352e3134322e3132392f7463702f33303333342f777378742f6970342f33352e3230352e3134322e3132392f7463702f33303333330000000001000000e8219652e4ca1faf79034c08e708c2dfe135d5b6033a4a033f165b299698443908eb119c0c614f50e23259a65af2cd1cf49305abfc97b4a5ab240370651bc3880abe030a2044ef51c86927a1e2da55754dba9684dd6ff9bac8c61624ffe958be656c42e036128503e143f23803ac50e8f6f8e62695d1ce9e4e1d68aa36c1cd2cfd15340213f3423e04333f8c04dda25fa8d47474b253c6630d9ccb70380a71469d9a50f33c00dd2dbfa258f9a8dc3c75cb4566dc1419dadc2168465a7bee5d0006c6ede541b18cb1800c0642414245340200000000771dc20f00000000044241424509030110a8ddd0891e14725841cd1b5581d23806a97f41c28a25436db6473c86e15dcd4f01000000000000007ca58770eb41c1a68ef77e92255e4635fc11f665cb89aee469e920511c48343a010000000000000072bae70a1398c0ba52f815cc5dfbc9ec5c013771e541ae28e05d1129243e3001010000000000000074bfb70627416e6e6c4785e928ced384c6c06e5c8dd173a094bc3118da7b673e01000000000000000000000000000000000000000000000000000000000000000000000000000000054241424501019c32c3d037ef3e8231a1eb08a858fc6aa74a58f1e34c82ed08f2464567fec50db1f0cd197b6c5bb84f146eee6c24316168369d25eb40b642d4df5bbdd2b0838c1a0b280402000b1095925571011a0510040d0000 diff --git a/lib/sync/worker.go b/lib/sync/worker.go deleted file mode 100644 index c28215aa4a..0000000000 --- a/lib/sync/worker.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2023 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "errors" - "sync" - - "github.com/libp2p/go-libp2p/core/peer" -) - -// ErrStopTimeout is an error indicating that the worker stop operation timed out. -var ErrStopTimeout = errors.New("stop timeout") - -func executeRequest(wg *sync.WaitGroup, who peer.ID, task *syncTask, guard chan struct{}, resCh chan<- *syncTaskResult) { - defer func() { - <-guard - wg.Done() - }() - - request := task.request - //logger.Infof("[EXECUTING] worker %s", who, request) - err := task.requestMaker.Do(who, request, task.response) - if err != nil { - logger.Infof("[ERR] worker %s, request: %s, err: %s", who, request, err.Error()) - resCh <- &syncTaskResult{ - who: who, - request: request, - response: nil, - } - return - } - - logger.Infof("[FINISHED] worker %s, request: %s", who, request) - resCh <- &syncTaskResult{ - who: who, - request: request, - response: task.response, - } -} diff --git a/lib/sync/worker_pool.go b/lib/sync/worker_pool.go index c34e439f66..87ace80819 100644 --- a/lib/sync/worker_pool.go +++ b/lib/sync/worker_pool.go @@ -5,7 +5,6 @@ package sync import ( "errors" - "math/rand" "sync" "time" @@ -14,7 +13,10 @@ import ( "golang.org/x/exp/maps" ) -var ErrNoPeersToMakeRequest = errors.New("no peers to make requests") +var ( + ErrNoPeersToMakeRequest = errors.New("no peers to make requests") + ErrPeerIgnored = errors.New("peer ignored") +) const ( punishmentBaseTimeout = 5 * time.Minute @@ -57,41 +59,33 @@ func newSyncWorkerPool(net Network) *syncWorkerPool { // fromBlockAnnounceHandshake stores the peer which send us a handshake as // a possible source for requesting blocks/state/warp proofs -func (s *syncWorkerPool) fromBlockAnnounceHandshake(who peer.ID) { +func (s *syncWorkerPool) fromBlockAnnounceHandshake(who peer.ID) error { s.mtx.Lock() defer s.mtx.Unlock() if _, ok := s.ignorePeers[who]; ok { - return + return ErrPeerIgnored } _, has := s.workers[who] if has { - return + return nil } s.workers[who] = struct{}{} logger.Tracef("potential worker added, total in the pool %d", len(s.workers)) + return nil } // submitRequests takes an set of requests and will submit to the pool through submitRequest // the response will be dispatch in the resultCh func (s *syncWorkerPool) submitRequests(tasks []*syncTask) ([]*syncTaskResult, error) { - peers := s.network.AllConnectedPeersIDs() - connectedPeers := make(map[peer.ID]struct{}, len(peers)) - for _, peer := range peers { - connectedPeers[peer] = struct{}{} - } - s.mtx.RLock() defer s.mtx.RUnlock() - pids := append(maps.Keys(s.workers), peers...) - rand.Shuffle(len(pids), func(i, j int) { - pids[i], pids[j] = pids[j], pids[i] - }) - + pids := maps.Keys(s.workers) results := make([]*syncTaskResult, 0, len(tasks)) + for _, task := range tasks { completed := false for _, pid := range pids { From 316214aeacc5e1c461c7bad1dc4cfc613789b04e Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 6 Aug 2024 18:19:06 -0400 Subject: [PATCH 15/74] chore: remove old sync package --- dot/core/service_integration_test.go | 60 - dot/mock_node_builder_test.go | 5 +- dot/network/mock_syncer_test.go | 12 + dot/node_integration_test.go | 4 +- dot/rpc/modules/mock_syncer_test.go | 12 + dot/sync/chain_sync.go | 979 --------- dot/sync/chain_sync_test.go | 1899 ----------------- dot/sync/disjoint_block_set.go | 290 --- .../disjoint_block_set_integration_test.go | 135 -- dot/sync/disjoint_block_set_test.go | 484 ----- dot/sync/errors.go | 27 - dot/sync/interfaces.go | 88 - dot/sync/message.go | 415 ---- dot/sync/message_integration_test.go | 468 ---- dot/sync/message_test.go | 388 ---- dot/sync/mock_chain_sync_test.go | 124 -- dot/sync/mock_disjoint_block_set_test.go | 190 -- dot/sync/mock_request.go | 55 - dot/sync/mock_runtime_test.go | 439 ---- dot/sync/mock_telemetry_test.go | 52 - dot/sync/mocks_generate_test.go | 11 - dot/sync/mocks_test.go | 667 ------ dot/sync/outliers.go | 84 - dot/sync/outliers_test.go | 46 - dot/sync/peer_view.go | 98 - dot/sync/syncer.go | 199 -- dot/sync/syncer_integration_test.go | 212 -- dot/sync/syncer_test.go | 435 ---- dot/sync/test_helpers.go | 99 - dot/sync/worker.go | 86 - dot/sync/worker_pool.go | 233 -- dot/sync/worker_pool_test.go | 247 --- dot/sync/worker_test.go | 126 -- 33 files changed, 28 insertions(+), 8641 deletions(-) delete mode 100644 dot/sync/chain_sync.go delete mode 100644 dot/sync/chain_sync_test.go delete mode 100644 dot/sync/disjoint_block_set.go delete mode 100644 dot/sync/disjoint_block_set_integration_test.go delete mode 100644 dot/sync/disjoint_block_set_test.go delete mode 100644 dot/sync/errors.go delete mode 100644 dot/sync/interfaces.go delete mode 100644 dot/sync/message.go delete mode 100644 dot/sync/message_integration_test.go delete mode 100644 dot/sync/message_test.go delete mode 100644 dot/sync/mock_chain_sync_test.go delete mode 100644 dot/sync/mock_disjoint_block_set_test.go delete mode 100644 dot/sync/mock_request.go delete mode 100644 dot/sync/mock_runtime_test.go delete mode 100644 dot/sync/mock_telemetry_test.go delete mode 100644 dot/sync/mocks_generate_test.go delete mode 100644 dot/sync/mocks_test.go delete mode 100644 dot/sync/outliers.go delete mode 100644 dot/sync/outliers_test.go delete mode 100644 dot/sync/peer_view.go delete mode 100644 dot/sync/syncer.go delete mode 100644 dot/sync/syncer_integration_test.go delete mode 100644 dot/sync/syncer_test.go delete mode 100644 dot/sync/test_helpers.go delete mode 100644 dot/sync/worker.go delete mode 100644 dot/sync/worker_pool.go delete mode 100644 dot/sync/worker_pool_test.go delete mode 100644 dot/sync/worker_test.go diff --git a/dot/core/service_integration_test.go b/dot/core/service_integration_test.go index 7a80bdb342..515b616c35 100644 --- a/dot/core/service_integration_test.go +++ b/dot/core/service_integration_test.go @@ -15,7 +15,6 @@ import ( "github.com/ChainSafe/gossamer/dot/network" "github.com/ChainSafe/gossamer/dot/state" - "github.com/ChainSafe/gossamer/dot/sync" "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/lib/babe/inherents" "github.com/ChainSafe/gossamer/lib/common" @@ -210,65 +209,6 @@ func TestHandleChainReorg_NoReorg(t *testing.T) { require.NoError(t, err) } -func TestHandleChainReorg_WithReorg_Trans(t *testing.T) { - t.Skip() // TODO: tx fails to validate in handleChainReorg() with "Invalid transaction" (#1026) - s := NewTestService(t, nil) - bs := s.blockState - - parent, err := bs.BestBlockHeader() - require.NoError(t, err) - - bestBlockHash := s.blockState.BestBlockHash() - rt, err := s.blockState.GetRuntime(bestBlockHash) - require.NoError(t, err) - - block1 := sync.BuildBlock(t, rt, parent, nil) - bs.StoreRuntime(block1.Header.Hash(), rt) - err = bs.AddBlock(block1) - require.NoError(t, err) - - block2 := sync.BuildBlock(t, rt, &block1.Header, nil) - bs.StoreRuntime(block2.Header.Hash(), rt) - err = bs.AddBlock(block2) - require.NoError(t, err) - - block3 := sync.BuildBlock(t, rt, &block2.Header, nil) - bs.StoreRuntime(block3.Header.Hash(), rt) - err = bs.AddBlock(block3) - require.NoError(t, err) - - block4 := sync.BuildBlock(t, rt, &block3.Header, nil) - bs.StoreRuntime(block4.Header.Hash(), rt) - err = bs.AddBlock(block4) - require.NoError(t, err) - - block5 := sync.BuildBlock(t, rt, &block4.Header, nil) - bs.StoreRuntime(block5.Header.Hash(), rt) - err = bs.AddBlock(block5) - require.NoError(t, err) - - block31 := sync.BuildBlock(t, rt, &block2.Header, nil) - bs.StoreRuntime(block31.Header.Hash(), rt) - err = bs.AddBlock(block31) - require.NoError(t, err) - - nonce := uint64(0) - - // Add extrinsic to block `block41` - ext := createExtrinsic(t, rt, bs.(*state.BlockState).GenesisHash(), nonce) - - block41 := sync.BuildBlock(t, rt, &block31.Header, ext) - bs.StoreRuntime(block41.Header.Hash(), rt) - err = bs.AddBlock(block41) - require.NoError(t, err) - - err = s.handleChainReorg(block41.Header.Hash(), block5.Header.Hash()) - require.NoError(t, err) - - pending := s.transactionState.(*state.TransactionState).Pending() - require.Equal(t, 1, len(pending)) -} - func TestHandleChainReorg_WithReorg_NoTransactions(t *testing.T) { s := NewTestService(t, nil) const height = 5 diff --git a/dot/mock_node_builder_test.go b/dot/mock_node_builder_test.go index b6a87fd5cc..ad64164c1c 100644 --- a/dot/mock_node_builder_test.go +++ b/dot/mock_node_builder_test.go @@ -18,7 +18,6 @@ import ( network "github.com/ChainSafe/gossamer/dot/network" rpc "github.com/ChainSafe/gossamer/dot/rpc" state "github.com/ChainSafe/gossamer/dot/state" - sync "github.com/ChainSafe/gossamer/dot/sync" system "github.com/ChainSafe/gossamer/dot/system" types "github.com/ChainSafe/gossamer/dot/types" babe "github.com/ChainSafe/gossamer/lib/babe" @@ -229,10 +228,10 @@ func (mr *MocknodeBuilderIfaceMockRecorder) loadRuntime(config, ns, stateSrvc, k } // newSyncService mocks base method. -func (m *MocknodeBuilderIface) newSyncService(config *config.Config, st *state.Service, finalityGadget BlockJustificationVerifier, verifier *babe.VerificationManager, cs *core.Service, net *network.Service, telemetryMailer Telemetry) (*sync.Service, error) { +func (m *MocknodeBuilderIface) newSyncService(config *config.Config, st *state.Service, finalityGadget BlockJustificationVerifier, verifier *babe.VerificationManager, cs *core.Service, net *network.Service, telemetryMailer Telemetry) (network.Syncer, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "newSyncService", config, st, finalityGadget, verifier, cs, net, telemetryMailer) - ret0, _ := ret[0].(*sync.Service) + ret0, _ := ret[0].(network.Syncer) ret1, _ := ret[1].(error) return ret0, ret1 } diff --git a/dot/network/mock_syncer_test.go b/dot/network/mock_syncer_test.go index 64101cf2ec..893f16ffcd 100644 --- a/dot/network/mock_syncer_test.go +++ b/dot/network/mock_syncer_test.go @@ -95,3 +95,15 @@ func (mr *MockSyncerMockRecorder) IsSynced() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsSynced", reflect.TypeOf((*MockSyncer)(nil).IsSynced)) } + +// OnConnectionClosed mocks base method. +func (m *MockSyncer) OnConnectionClosed(arg0 peer.ID) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "OnConnectionClosed", arg0) +} + +// OnConnectionClosed indicates an expected call of OnConnectionClosed. +func (mr *MockSyncerMockRecorder) OnConnectionClosed(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnConnectionClosed", reflect.TypeOf((*MockSyncer)(nil).OnConnectionClosed), arg0) +} diff --git a/dot/node_integration_test.go b/dot/node_integration_test.go index ad1bf47536..b2cf001f44 100644 --- a/dot/node_integration_test.go +++ b/dot/node_integration_test.go @@ -22,7 +22,6 @@ import ( digest "github.com/ChainSafe/gossamer/dot/digest" network "github.com/ChainSafe/gossamer/dot/network" "github.com/ChainSafe/gossamer/dot/state" - dotsync "github.com/ChainSafe/gossamer/dot/sync" system "github.com/ChainSafe/gossamer/dot/system" "github.com/ChainSafe/gossamer/dot/telemetry" "github.com/ChainSafe/gossamer/dot/types" @@ -36,6 +35,7 @@ import ( "github.com/ChainSafe/gossamer/lib/keystore" "github.com/ChainSafe/gossamer/lib/runtime" wazero_runtime "github.com/ChainSafe/gossamer/lib/runtime/wazero" + libsync "github.com/ChainSafe/gossamer/lib/sync" "github.com/ChainSafe/gossamer/pkg/trie" inmemory_trie "github.com/ChainSafe/gossamer/pkg/trie/inmemory" "github.com/stretchr/testify/assert" @@ -139,7 +139,7 @@ func TestNewNode(t *testing.T) { m.EXPECT().newSyncService(initConfig, gomock.AssignableToTypeOf(&state.Service{}), &grandpa.Service{}, &babe.VerificationManager{}, &core.Service{}, gomock.AssignableToTypeOf(&network.Service{}), gomock.AssignableToTypeOf(&telemetry.Mailer{})). - Return(&dotsync.Service{}, nil) + Return(&libsync.SyncService{}, nil) m.EXPECT().createBABEService(initConfig, gomock.AssignableToTypeOf(&state.Service{}), ks.Babe, &core.Service{}, gomock.AssignableToTypeOf(&telemetry.Mailer{})). Return(&babe.Service{}, nil) diff --git a/dot/rpc/modules/mock_syncer_test.go b/dot/rpc/modules/mock_syncer_test.go index 1a94839d78..458a4788dc 100644 --- a/dot/rpc/modules/mock_syncer_test.go +++ b/dot/rpc/modules/mock_syncer_test.go @@ -96,3 +96,15 @@ func (mr *MockSyncerMockRecorder) IsSynced() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsSynced", reflect.TypeOf((*MockSyncer)(nil).IsSynced)) } + +// OnConnectionClosed mocks base method. +func (m *MockSyncer) OnConnectionClosed(arg0 peer.ID) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "OnConnectionClosed", arg0) +} + +// OnConnectionClosed indicates an expected call of OnConnectionClosed. +func (mr *MockSyncerMockRecorder) OnConnectionClosed(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnConnectionClosed", reflect.TypeOf((*MockSyncer)(nil).OnConnectionClosed), arg0) +} diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go deleted file mode 100644 index c8e3c3c730..0000000000 --- a/dot/sync/chain_sync.go +++ /dev/null @@ -1,979 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "bytes" - "errors" - "fmt" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/libp2p/go-libp2p/core/peer" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - "golang.org/x/exp/slices" - - "github.com/ChainSafe/gossamer/dot/network" - "github.com/ChainSafe/gossamer/dot/peerset" - "github.com/ChainSafe/gossamer/dot/telemetry" - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/internal/database" - "github.com/ChainSafe/gossamer/lib/common" - "github.com/ChainSafe/gossamer/lib/common/variadic" -) - -var _ ChainSync = (*chainSync)(nil) - -type chainSyncState byte - -const ( - bootstrap chainSyncState = iota - tip -) - -type blockOrigin byte - -const ( - networkInitialSync blockOrigin = iota - networkBroadcast -) - -func (s chainSyncState) String() string { - switch s { - case bootstrap: - return "bootstrap" - case tip: - return "tip" - default: - return "unknown" - } -} - -var ( - pendingBlocksLimit = network.MaxBlocksInResponse * 32 - isSyncedGauge = promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: "gossamer_network_syncer", - Name: "is_synced", - Help: "bool representing whether the node is synced to the head of the chain", - }) - - blockSizeGauge = promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: "gossamer_sync", - Name: "block_size", - Help: "represent the size of blocks synced", - }) -) - -// ChainSync contains the methods used by the high-level service into the `chainSync` module -type ChainSync interface { - start() - stop() error - - // called upon receiving a BlockAnnounceHandshake - onBlockAnnounceHandshake(p peer.ID, hash common.Hash, number uint) error - - // getSyncMode returns the current syncing state - getSyncMode() chainSyncState - - // getHighestBlock returns the highest block or an error - getHighestBlock() (highestBlock uint, err error) - - onBlockAnnounce(announcedBlock) error -} - -type announcedBlock struct { - who peer.ID - header *types.Header -} - -type chainSync struct { - wg sync.WaitGroup - stopCh chan struct{} - - blockState BlockState - network Network - - workerPool *syncWorkerPool - - // tracks the latest state we know of from our peers, - // ie. their best block hash and number - peerViewSet *peerViewSet - - // disjoint set of blocks which are known but not ready to be processed - // ie. we only know the hash, number, or the parent block is unknown, or the body is unknown - // note: the block may have empty fields, as some data about it may be unknown - pendingBlocks DisjointBlockSet - - syncMode atomic.Value - - finalisedCh <-chan *types.FinalisationInfo - - minPeers int - slotDuration time.Duration - - storageState StorageState - transactionState TransactionState - babeVerifier BabeVerifier - finalityGadget FinalityGadget - blockImportHandler BlockImportHandler - telemetry Telemetry - badBlocks []string - requestMaker network.RequestMaker - waitPeersDuration time.Duration -} - -type chainSyncConfig struct { - bs BlockState - net Network - requestMaker network.RequestMaker - pendingBlocks DisjointBlockSet - minPeers, maxPeers int - slotDuration time.Duration - storageState StorageState - transactionState TransactionState - babeVerifier BabeVerifier - finalityGadget FinalityGadget - blockImportHandler BlockImportHandler - telemetry Telemetry - badBlocks []string - waitPeersDuration time.Duration -} - -func newChainSync(cfg chainSyncConfig) *chainSync { - atomicState := atomic.Value{} - atomicState.Store(tip) - return &chainSync{ - stopCh: make(chan struct{}), - storageState: cfg.storageState, - transactionState: cfg.transactionState, - babeVerifier: cfg.babeVerifier, - finalityGadget: cfg.finalityGadget, - blockImportHandler: cfg.blockImportHandler, - telemetry: cfg.telemetry, - blockState: cfg.bs, - network: cfg.net, - peerViewSet: newPeerViewSet(cfg.maxPeers), - pendingBlocks: cfg.pendingBlocks, - syncMode: atomicState, - finalisedCh: cfg.bs.GetFinalisedNotifierChannel(), - minPeers: cfg.minPeers, - slotDuration: cfg.slotDuration, - workerPool: newSyncWorkerPool(cfg.net, cfg.requestMaker), - badBlocks: cfg.badBlocks, - requestMaker: cfg.requestMaker, - waitPeersDuration: cfg.waitPeersDuration, - } -} - -func (cs *chainSync) waitWorkersAndTarget() { - waitPeersTimer := time.NewTimer(cs.waitPeersDuration) - - highestFinalizedHeader, err := cs.blockState.GetHighestFinalisedHeader() - if err != nil { - panic(fmt.Sprintf("failed to get highest finalised header: %v", err)) - } - - for { - cs.workerPool.useConnectedPeers() - totalAvailable := cs.workerPool.totalWorkers() - - if totalAvailable >= uint(cs.minPeers) && - cs.peerViewSet.getTarget() > 0 { - return - } - - err := cs.network.BlockAnnounceHandshake(highestFinalizedHeader) - if err != nil && !errors.Is(err, network.ErrNoPeersConnected) { - logger.Errorf("retrieving target info from peers: %v", err) - } - - select { - case <-waitPeersTimer.C: - waitPeersTimer.Reset(cs.waitPeersDuration) - - case <-cs.stopCh: - return - } - } -} - -func (cs *chainSync) start() { - // since the default status from sync mode is syncMode(tip) - isSyncedGauge.Set(1) - - cs.wg.Add(1) - go cs.pendingBlocks.run(cs.finalisedCh, cs.stopCh, &cs.wg) - - // wait until we have a minimal workers in the sync worker pool - cs.waitWorkersAndTarget() -} - -func (cs *chainSync) stop() error { - err := cs.workerPool.stop() - if err != nil { - return fmt.Errorf("stopping worker poll: %w", err) - } - - close(cs.stopCh) - allStopCh := make(chan struct{}) - go func() { - defer close(allStopCh) - cs.wg.Wait() - }() - - timeoutTimer := time.NewTimer(30 * time.Second) - - select { - case <-allStopCh: - if !timeoutTimer.Stop() { - <-timeoutTimer.C - } - return nil - case <-timeoutTimer.C: - return ErrStopTimeout - } -} - -func (cs *chainSync) isBootstrapSync(currentBlockNumber uint) bool { - syncTarget := cs.peerViewSet.getTarget() - return currentBlockNumber+network.MaxBlocksInResponse < syncTarget -} - -func (cs *chainSync) bootstrapSync() { - defer cs.wg.Done() - currentBlock, err := cs.blockState.GetHighestFinalisedHeader() - if err != nil { - panic("cannot find highest finalised header") - } - - for { - select { - case <-cs.stopCh: - logger.Warn("ending bootstrap sync, chain sync stop channel triggered") - return - default: - } - - isBootstrap := cs.isBootstrapSync(currentBlock.Number) - if isBootstrap { - cs.workerPool.useConnectedPeers() - err = cs.requestMaxBlocksFrom(currentBlock, networkInitialSync) - if err != nil { - if errors.Is(err, errBlockStatePaused) { - logger.Debugf("exiting bootstrap sync: %s", err) - return - } - logger.Errorf("requesting max blocks from best block header: %s", err) - } - - currentBlock, err = cs.blockState.BestBlockHeader() - if err != nil { - logger.Errorf("getting best block header: %v", err) - } - } else { - // we are less than 128 blocks behind the target we can use tip sync - cs.syncMode.Store(tip) - isSyncedGauge.Set(1) - logger.Infof("🔁 switched sync mode to %s", tip.String()) - return - } - } -} - -func (cs *chainSync) getSyncMode() chainSyncState { - return cs.syncMode.Load().(chainSyncState) -} - -// onBlockAnnounceHandshake sets a peer's best known block -func (cs *chainSync) onBlockAnnounceHandshake(who peer.ID, bestHash common.Hash, bestNumber uint) error { - cs.workerPool.fromBlockAnnounce(who) - cs.peerViewSet.update(who, bestHash, bestNumber) - - if cs.getSyncMode() == bootstrap { - return nil - } - - bestBlockHeader, err := cs.blockState.BestBlockHeader() - if err != nil { - return err - } - - isBootstrap := cs.isBootstrapSync(bestBlockHeader.Number) - if !isBootstrap { - return nil - } - - // we are more than 128 blocks behind the head, switch to bootstrap - cs.syncMode.Store(bootstrap) - isSyncedGauge.Set(0) - logger.Infof("🔁 switched sync mode to %s", bootstrap.String()) - - cs.wg.Add(1) - go cs.bootstrapSync() - return nil -} - -func (cs *chainSync) onBlockAnnounce(announced announcedBlock) error { - // TODO: https://github.com/ChainSafe/gossamer/issues/3432 - if cs.pendingBlocks.hasBlock(announced.header.Hash()) { - return fmt.Errorf("%w: block #%d (%s)", - errAlreadyInDisjointSet, announced.header.Number, announced.header.Hash()) - } - - err := cs.pendingBlocks.addHeader(announced.header) - if err != nil { - return fmt.Errorf("while adding pending block header: %w", err) - } - - if cs.getSyncMode() == bootstrap { - return nil - } - - bestBlockHeader, err := cs.blockState.BestBlockHeader() - if err != nil { - return fmt.Errorf("getting best block header: %w", err) - } - - isBootstrap := cs.isBootstrapSync(bestBlockHeader.Number) - if !isBootstrap { - return cs.requestAnnouncedBlock(bestBlockHeader, announced) - } - - return nil -} - -func (cs *chainSync) requestAnnouncedBlock(bestBlockHeader *types.Header, announce announcedBlock) error { - peerWhoAnnounced := announce.who - announcedHash := announce.header.Hash() - announcedNumber := announce.header.Number - - has, err := cs.blockState.HasHeader(announcedHash) - if err != nil { - return fmt.Errorf("checking if header exists: %s", err) - } - - if has { - return nil - } - - highestFinalizedHeader, err := cs.blockState.GetHighestFinalisedHeader() - if err != nil { - return fmt.Errorf("getting highest finalized header") - } - - // if the announced block contains a lower number than our best - // block header, let's check if it is greater than our latests - // finalized header, if so this block belongs to a fork chain - if announcedNumber < bestBlockHeader.Number { - // ignore the block if it has the same or lower number - // TODO: is it following the protocol to send a blockAnnounce with number < highestFinalized number? - if announcedNumber <= highestFinalizedHeader.Number { - return nil - } - - return cs.requestForkBlocks(bestBlockHeader, highestFinalizedHeader, announce.header, announce.who) - } - - err = cs.requestChainBlocks(announce.header, bestBlockHeader, peerWhoAnnounced) - if err != nil { - return fmt.Errorf("requesting chain blocks: %w", err) - } - - err = cs.requestPendingBlocks(highestFinalizedHeader) - if err != nil { - return fmt.Errorf("while requesting pending blocks") - } - - return nil -} - -func (cs *chainSync) requestChainBlocks(announcedHeader, bestBlockHeader *types.Header, - peerWhoAnnounced peer.ID) error { - gapLength := uint32(announcedHeader.Number - bestBlockHeader.Number) - startAtBlock := announcedHeader.Number - totalBlocks := uint32(1) - - var request *network.BlockRequestMessage - startingBlock := *variadic.MustNewUint32OrHash(announcedHeader.Hash()) - - if gapLength > 1 { - request = network.NewBlockRequest(startingBlock, gapLength, - network.BootstrapRequestData, network.Descending) - - startAtBlock = announcedHeader.Number - uint(*request.Max) + 1 - totalBlocks = *request.Max - - logger.Infof("requesting %d blocks from peer: %v, descending request from #%d (%s)", - gapLength, peerWhoAnnounced, announcedHeader.Number, announcedHeader.Hash().Short()) - } else { - request = network.NewBlockRequest(startingBlock, 1, network.BootstrapRequestData, network.Descending) - logger.Infof("requesting a single block from peer: %v with Number: #%d and Hash: (%s)", - peerWhoAnnounced, announcedHeader.Number, announcedHeader.Hash().Short()) - } - - resultsQueue := make(chan *syncTaskResult) - err := cs.submitRequest(request, &peerWhoAnnounced, resultsQueue) - if err != nil { - return err - } - err = cs.handleWorkersResults(resultsQueue, networkBroadcast, startAtBlock, totalBlocks) - if err != nil { - return fmt.Errorf("while handling workers results: %w", err) - } - - return nil -} - -func (cs *chainSync) requestForkBlocks(bestBlockHeader, highestFinalizedHeader, announcedHeader *types.Header, - peerWhoAnnounced peer.ID) error { - logger.Infof("block announce lower than best block #%d (%s) and greater highest finalized #%d (%s)", - bestBlockHeader.Number, bestBlockHeader.Hash().Short(), - highestFinalizedHeader.Number, highestFinalizedHeader.Hash().Short()) - - parentExists, err := cs.blockState.HasHeader(announcedHeader.ParentHash) - if err != nil && !errors.Is(err, database.ErrNotFound) { - return fmt.Errorf("while checking header exists: %w", err) - } - - gapLength := uint32(1) - startAtBlock := announcedHeader.Number - announcedHash := announcedHeader.Hash() - var request *network.BlockRequestMessage - startingBlock := *variadic.MustNewUint32OrHash(announcedHash) - - if parentExists { - request = network.NewBlockRequest(startingBlock, 1, network.BootstrapRequestData, network.Descending) - } else { - gapLength = uint32(announcedHeader.Number - highestFinalizedHeader.Number) - startAtBlock = highestFinalizedHeader.Number + 1 - request = network.NewBlockRequest(startingBlock, gapLength, network.BootstrapRequestData, network.Descending) - } - - logger.Infof("requesting %d fork blocks from peer: %v starting at #%d (%s)", - gapLength, peerWhoAnnounced, announcedHeader.Number, announcedHash.Short()) - - resultsQueue := make(chan *syncTaskResult) - err = cs.submitRequest(request, &peerWhoAnnounced, resultsQueue) - if err != nil { - return err - } - err = cs.handleWorkersResults(resultsQueue, networkBroadcast, startAtBlock, gapLength) - if err != nil { - return fmt.Errorf("while handling workers results: %w", err) - } - - return nil -} - -func (cs *chainSync) requestPendingBlocks(highestFinalizedHeader *types.Header) error { - pendingBlocksTotal := cs.pendingBlocks.size() - logger.Infof("total of pending blocks: %d", pendingBlocksTotal) - if pendingBlocksTotal < 1 { - return nil - } - - pendingBlocks := cs.pendingBlocks.getBlocks() - for _, pendingBlock := range pendingBlocks { - if pendingBlock.number <= highestFinalizedHeader.Number { - cs.pendingBlocks.removeBlock(pendingBlock.hash) - continue - } - - parentExists, err := cs.blockState.HasHeader(pendingBlock.header.ParentHash) - if err != nil { - return fmt.Errorf("getting pending block parent header: %w", err) - } - - if parentExists { - err := cs.handleReadyBlock(pendingBlock.toBlockData(), networkBroadcast) - if err != nil { - return fmt.Errorf("handling ready block: %w", err) - } - continue - } - - gapLength := pendingBlock.number - highestFinalizedHeader.Number - if gapLength > 128 { - logger.Warnf("gap of %d blocks, max expected: 128 block", gapLength) - gapLength = 128 - } - - descendingGapRequest := network.NewBlockRequest(*variadic.MustNewUint32OrHash(pendingBlock.hash), - uint32(gapLength), network.BootstrapRequestData, network.Descending) - startAtBlock := pendingBlock.number - uint(*descendingGapRequest.Max) + 1 - - // the `requests` in the tip sync are not related necessarily - // this is why we need to treat them separately - resultsQueue := make(chan *syncTaskResult) - err = cs.submitRequest(descendingGapRequest, nil, resultsQueue) - if err != nil { - return err - } - // TODO: we should handle the requests concurrently - // a way of achieve that is by constructing a new `handleWorkersResults` for - // handling only tip sync requests - err = cs.handleWorkersResults(resultsQueue, networkBroadcast, startAtBlock, *descendingGapRequest.Max) - if err != nil { - return fmt.Errorf("while handling workers results: %w", err) - } - } - - return nil -} - -func (cs *chainSync) requestMaxBlocksFrom(bestBlockHeader *types.Header, origin blockOrigin) error { //nolint:unparam - startRequestAt := bestBlockHeader.Number + 1 - - // targetBlockNumber is the virtual target we will request, however - // we should bound it to the real target which is collected through - // block announces received from other peers - targetBlockNumber := startRequestAt + maxRequestsAllowed*128 - realTarget := cs.peerViewSet.getTarget() - - if targetBlockNumber > realTarget { - targetBlockNumber = realTarget - } - - requests := network.NewAscendingBlockRequests(startRequestAt, targetBlockNumber, - network.BootstrapRequestData) - - var expectedAmountOfBlocks uint32 - for _, request := range requests { - if request.Max != nil { - expectedAmountOfBlocks += *request.Max - } - } - - resultsQueue, err := cs.submitRequests(requests) - if err != nil { - return err - } - err = cs.handleWorkersResults(resultsQueue, origin, startRequestAt, expectedAmountOfBlocks) - if err != nil { - return fmt.Errorf("while handling workers results: %w", err) - } - - return nil -} - -func (cs *chainSync) submitRequest( - request *network.BlockRequestMessage, - who *peer.ID, - resultCh chan<- *syncTaskResult, -) error { - if !cs.blockState.IsPaused() { - cs.workerPool.submitRequest(request, who, resultCh) - return nil - } - return fmt.Errorf("submitting request: %w", errBlockStatePaused) -} - -func (cs *chainSync) submitRequests(requests []*network.BlockRequestMessage) ( - resultCh chan *syncTaskResult, err error) { - if !cs.blockState.IsPaused() { - return cs.workerPool.submitRequests(requests), nil - } - return nil, fmt.Errorf("submitting requests: %w", errBlockStatePaused) -} - -func (cs *chainSync) showSyncStats(syncBegin time.Time, syncedBlocks int) { - finalisedHeader, err := cs.blockState.GetHighestFinalisedHeader() - if err != nil { - logger.Criticalf("getting highest finalized header: %w", err) - return - } - - totalSyncAndImportSeconds := time.Since(syncBegin).Seconds() - bps := float64(syncedBlocks) / totalSyncAndImportSeconds - logger.Infof("⛓️ synced %d blocks, "+ - "took: %.2f seconds, bps: %.2f blocks/second", - syncedBlocks, totalSyncAndImportSeconds, bps) - - logger.Infof( - "🚣 currently syncing, %d peers connected, "+ - "%d available workers, "+ - "target block number %d, "+ - "finalised #%d (%s) "+ - "sync mode: %s", - len(cs.network.Peers()), - cs.workerPool.totalWorkers(), - cs.peerViewSet.getTarget(), - finalisedHeader.Number, - finalisedHeader.Hash().Short(), - cs.getSyncMode().String(), - ) -} - -// handleWorkersResults, every time we submit requests to workers they results should be computed here -// and every cicle we should endup with a complete chain, whenever we identify -// any error from a worker we should evaluate the error and re-insert the request -// in the queue and wait for it to completes -// TODO: handle only justification requests -func (cs *chainSync) handleWorkersResults( - workersResults chan *syncTaskResult, origin blockOrigin, startAtBlock uint, expectedSyncedBlocks uint32) error { - startTime := time.Now() - syncingChain := make([]*types.BlockData, expectedSyncedBlocks) - // the total numbers of blocks is missing in the syncing chain - waitingBlocks := expectedSyncedBlocks - -taskResultLoop: - for waitingBlocks > 0 { - // in a case where we don't handle workers results we should check the pool - idleDuration := time.Minute - idleTimer := time.NewTimer(idleDuration) - - select { - case <-cs.stopCh: - return nil - - case <-idleTimer.C: - logger.Warnf("idle ticker triggered! checking pool") - cs.workerPool.useConnectedPeers() - continue - - case taskResult := <-workersResults: - if !idleTimer.Stop() { - <-idleTimer.C - } - - who := taskResult.who - request := taskResult.request - response := taskResult.response - - logger.Debugf("task result: peer(%s), with error: %v, with response: %v", - taskResult.who, taskResult.err != nil, taskResult.response != nil) - - if taskResult.err != nil { - if !errors.Is(taskResult.err, network.ErrReceivedEmptyMessage) { - cs.workerPool.ignorePeerAsWorker(taskResult.who) - - logger.Errorf("task result: peer(%s) error: %s", - taskResult.who, taskResult.err) - - if errors.Is(taskResult.err, network.ErrNilBlockInResponse) { - cs.network.ReportPeer(peerset.ReputationChange{ - Value: peerset.BadMessageValue, - Reason: peerset.BadMessageReason, - }, who) - } - - if strings.Contains(taskResult.err.Error(), "protocols not supported") { - cs.network.ReportPeer(peerset.ReputationChange{ - Value: peerset.BadProtocolValue, - Reason: peerset.BadProtocolReason, - }, who) - } - } - - err := cs.submitRequest(request, nil, workersResults) - if err != nil { - return err - } - continue - } - - if request.Direction == network.Descending { - // reverse blocks before pre-validating and placing in ready queue - reverseBlockData(response.BlockData) - } - - err := validateResponseFields(request.RequestedData, response.BlockData) - if err != nil { - logger.Criticalf("validating fields: %s", err) - // TODO: check the reputation change for nil body in response - // and nil justification in response - if errors.Is(err, errNilHeaderInResponse) { - cs.network.ReportPeer(peerset.ReputationChange{ - Value: peerset.IncompleteHeaderValue, - Reason: peerset.IncompleteHeaderReason, - }, who) - } - - err = cs.submitRequest(taskResult.request, nil, workersResults) - if err != nil { - return err - } - continue taskResultLoop - } - - isChain := isResponseAChain(response.BlockData) - if !isChain { - logger.Criticalf("response from %s is not a chain", who) - err = cs.submitRequest(taskResult.request, nil, workersResults) - if err != nil { - return err - } - continue taskResultLoop - } - - grows := doResponseGrowsTheChain(response.BlockData, syncingChain, - startAtBlock, expectedSyncedBlocks) - if !grows { - logger.Criticalf("response from %s does not grows the ongoing chain", who) - err = cs.submitRequest(taskResult.request, nil, workersResults) - if err != nil { - return err - } - continue taskResultLoop - } - - for _, blockInResponse := range response.BlockData { - if slices.Contains(cs.badBlocks, blockInResponse.Hash.String()) { - logger.Criticalf("%s sent a known bad block: %s (#%d)", - who, blockInResponse.Hash.String(), blockInResponse.Number()) - - cs.network.ReportPeer(peerset.ReputationChange{ - Value: peerset.BadBlockAnnouncementValue, - Reason: peerset.BadBlockAnnouncementReason, - }, who) - - cs.workerPool.ignorePeerAsWorker(taskResult.who) - err = cs.submitRequest(taskResult.request, nil, workersResults) - if err != nil { - return err - } - continue taskResultLoop - } - - blockExactIndex := blockInResponse.Header.Number - startAtBlock - if blockExactIndex < uint(expectedSyncedBlocks) { - syncingChain[blockExactIndex] = blockInResponse - } - } - - // we need to check if we've filled all positions - // otherwise we should wait for more responses - waitingBlocks -= uint32(len(response.BlockData)) - - // we received a response without the desired amount of blocks - // we should include a new request to retrieve the missing blocks - if len(response.BlockData) < int(*request.Max) { - difference := uint32(int(*request.Max) - len(response.BlockData)) - lastItem := response.BlockData[len(response.BlockData)-1] - - startRequestNumber := uint32(lastItem.Header.Number + 1) - startAt, err := variadic.NewUint32OrHash(startRequestNumber) - if err != nil { - panic(err) - } - - taskResult.request = &network.BlockRequestMessage{ - RequestedData: network.BootstrapRequestData, - StartingBlock: *startAt, - Direction: network.Ascending, - Max: &difference, - } - err = cs.submitRequest(taskResult.request, nil, workersResults) - if err != nil { - return err - } - continue taskResultLoop - } - } - } - - retreiveBlocksSeconds := time.Since(startTime).Seconds() - logger.Infof("🔽 retrieved %d blocks, took: %.2f seconds, starting process...", - expectedSyncedBlocks, retreiveBlocksSeconds) - - // response was validated! place into ready block queue - for _, bd := range syncingChain { - // block is ready to be processed! - if err := cs.handleReadyBlock(bd, origin); err != nil { - return fmt.Errorf("while handling ready block: %w", err) - } - } - - cs.showSyncStats(startTime, len(syncingChain)) - return nil -} - -func (cs *chainSync) handleReadyBlock(bd *types.BlockData, origin blockOrigin) error { - // if header was not requested, get it from the pending set - // if we're expecting headers, validate should ensure we have a header - if bd.Header == nil { - block := cs.pendingBlocks.getBlock(bd.Hash) - if block == nil { - // block wasn't in the pending set! - // let's check the db as maybe we already processed it - has, err := cs.blockState.HasHeader(bd.Hash) - if err != nil && !errors.Is(err, database.ErrNotFound) { - logger.Debugf("failed to check if header is known for hash %s: %s", bd.Hash, err) - return err - } - - if has { - logger.Tracef("ignoring block we've already processed, hash=%s", bd.Hash) - return err - } - - // this is bad and shouldn't happen - logger.Errorf("block with unknown header is ready: hash=%s", bd.Hash) - return err - } - - if block.header == nil { - logger.Errorf("new ready block number (unknown) with hash %s", bd.Hash) - return nil - } - - bd.Header = block.header - } - - err := cs.processBlockData(*bd, origin) - if err != nil { - // depending on the error, we might want to save this block for later - logger.Errorf("block data processing for block with hash %s failed: %s", bd.Hash, err) - return err - } - - cs.pendingBlocks.removeBlock(bd.Hash) - return nil -} - -// processBlockData processes the BlockData from a BlockResponse and -// returns the index of the last BlockData it handled on success, -// or the index of the block data that errored on failure. -// TODO: https://github.com/ChainSafe/gossamer/issues/3468 -func (cs *chainSync) processBlockData(blockData types.BlockData, origin blockOrigin) error { - // while in bootstrap mode we don't need to broadcast block announcements - announceImportedBlock := cs.getSyncMode() == tip - - if blockData.Header != nil { - if blockData.Body != nil { - err := cs.processBlockDataWithHeaderAndBody(blockData, origin, announceImportedBlock) - if err != nil { - return fmt.Errorf("processing block data with header and body: %w", err) - } - } - - if blockData.Justification != nil && len(*blockData.Justification) > 0 { - err := cs.handleJustification(blockData.Header, *blockData.Justification) - if err != nil { - return fmt.Errorf("handling justification: %w", err) - } - } - } - - err := cs.blockState.CompareAndSetBlockData(&blockData) - if err != nil { - return fmt.Errorf("comparing and setting block data: %w", err) - } - - return nil -} - -func (cs *chainSync) processBlockDataWithHeaderAndBody(blockData types.BlockData, - origin blockOrigin, announceImportedBlock bool) (err error) { - - if origin != networkInitialSync { - err = cs.babeVerifier.VerifyBlock(blockData.Header) - if err != nil { - return fmt.Errorf("babe verifying block: %w", err) - } - } - - cs.handleBody(blockData.Body) - - block := &types.Block{ - Header: *blockData.Header, - Body: *blockData.Body, - } - - err = cs.handleBlock(block, announceImportedBlock) - if err != nil { - return fmt.Errorf("handling block: %w", err) - } - - return nil -} - -// handleHeader handles block bodies included in BlockResponses -func (cs *chainSync) handleBody(body *types.Body) { - acc := 0 - for _, ext := range *body { - acc += len(ext) - cs.transactionState.RemoveExtrinsic(ext) - } - - blockSizeGauge.Set(float64(acc)) -} - -func (cs *chainSync) handleJustification(header *types.Header, justification []byte) (err error) { - headerHash := header.Hash() - err = cs.finalityGadget.VerifyBlockJustification(headerHash, justification) - if err != nil { - return fmt.Errorf("verifying block number %d justification: %w", header.Number, err) - } - - err = cs.blockState.SetJustification(headerHash, justification) - if err != nil { - return fmt.Errorf("setting justification for block number %d: %w", header.Number, err) - } - - return nil -} - -// handleHeader handles blocks (header+body) included in BlockResponses -func (cs *chainSync) handleBlock(block *types.Block, announceImportedBlock bool) error { - parent, err := cs.blockState.GetHeader(block.Header.ParentHash) - if err != nil { - return fmt.Errorf("%w: %s", errFailedToGetParent, err) - } - - cs.storageState.Lock() - defer cs.storageState.Unlock() - - ts, err := cs.storageState.TrieState(&parent.StateRoot) - if err != nil { - return err - } - - root := ts.MustRoot() - if !bytes.Equal(parent.StateRoot[:], root[:]) { - panic("parent state root does not match snapshot state root") - } - - rt, err := cs.blockState.GetRuntime(parent.Hash()) - if err != nil { - return err - } - - rt.SetContextStorage(ts) - - _, err = rt.ExecuteBlock(block) - if err != nil { - return fmt.Errorf("failed to execute block %d: %w", block.Header.Number, err) - } - - if err = cs.blockImportHandler.HandleBlockImport(block, ts, announceImportedBlock); err != nil { - return err - } - - blockHash := block.Header.Hash() - cs.telemetry.SendMessage(telemetry.NewBlockImport( - &blockHash, - block.Header.Number, - "NetworkInitialSync")) - - return nil -} - -func (cs *chainSync) getHighestBlock() (highestBlock uint, err error) { - if cs.peerViewSet.size() == 0 { - return 0, errNoPeers - } - - for _, ps := range cs.peerViewSet.values() { - if ps.number < highestBlock { - continue - } - highestBlock = ps.number - } - - return highestBlock, nil -} diff --git a/dot/sync/chain_sync_test.go b/dot/sync/chain_sync_test.go deleted file mode 100644 index 79ea46ad56..0000000000 --- a/dot/sync/chain_sync_test.go +++ /dev/null @@ -1,1899 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "errors" - "fmt" - "sync/atomic" - "testing" - "time" - - "github.com/ChainSafe/gossamer/dot/network" - "github.com/ChainSafe/gossamer/dot/peerset" - "github.com/ChainSafe/gossamer/dot/telemetry" - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/common" - "github.com/ChainSafe/gossamer/lib/common/variadic" - "github.com/ChainSafe/gossamer/lib/runtime/storage" - "github.com/ChainSafe/gossamer/pkg/trie" - inmemory_trie "github.com/ChainSafe/gossamer/pkg/trie/inmemory" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.uber.org/mock/gomock" -) - -func Test_chainSyncState_String(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - s chainSyncState - want string - }{ - { - name: "case_bootstrap", - s: bootstrap, - want: "bootstrap", - }, - { - name: "case_tip", - s: tip, - want: "tip", - }, - { - name: "case_unknown", - s: 3, - want: "unknown", - }, - } - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - got := tt.s.String() - assert.Equal(t, tt.want, got) - }) - } -} - -func Test_chainSync_onBlockAnnounce(t *testing.T) { - t.Parallel() - const somePeer = peer.ID("abc") - - errTest := errors.New("test error") - emptyTrieState := storage.NewTrieState(inmemory_trie.NewEmptyTrie()) - block1AnnounceHeader := types.NewHeader(common.Hash{}, emptyTrieState.MustRoot(), - common.Hash{}, 1, nil) - block2AnnounceHeader := types.NewHeader(block1AnnounceHeader.Hash(), - emptyTrieState.MustRoot(), - common.Hash{}, 2, nil) - - testCases := map[string]struct { - waitBootstrapSync bool - chainSyncBuilder func(ctrl *gomock.Controller) *chainSync - peerID peer.ID - blockAnnounceHeader *types.Header - errWrapped error - errMessage string - expectedSyncMode chainSyncState - }{ - "announced_block_already_exists_in_disjoint_set": { - chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { - pendingBlocks := NewMockDisjointBlockSet(ctrl) - pendingBlocks.EXPECT().hasBlock(block2AnnounceHeader.Hash()).Return(true) - return &chainSync{ - stopCh: make(chan struct{}), - pendingBlocks: pendingBlocks, - peerViewSet: newPeerViewSet(0), - workerPool: newSyncWorkerPool(NewMockNetwork(nil), NewMockRequestMaker(nil)), - } - }, - peerID: somePeer, - blockAnnounceHeader: block2AnnounceHeader, - errWrapped: errAlreadyInDisjointSet, - errMessage: fmt.Sprintf("already in disjoint set: block #%d (%s)", - block2AnnounceHeader.Number, block2AnnounceHeader.Hash()), - }, - "failed_to_add_announced_block_in_disjoint_set": { - chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { - pendingBlocks := NewMockDisjointBlockSet(ctrl) - pendingBlocks.EXPECT().hasBlock(block2AnnounceHeader.Hash()).Return(false) - pendingBlocks.EXPECT().addHeader(block2AnnounceHeader).Return(errTest) - - return &chainSync{ - stopCh: make(chan struct{}), - pendingBlocks: pendingBlocks, - peerViewSet: newPeerViewSet(0), - workerPool: newSyncWorkerPool(NewMockNetwork(nil), NewMockRequestMaker(nil)), - } - }, - peerID: somePeer, - blockAnnounceHeader: block2AnnounceHeader, - errWrapped: errTest, - errMessage: "while adding pending block header: test error", - }, - "announced_block_while_in_bootstrap_mode": { - chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { - pendingBlocks := NewMockDisjointBlockSet(ctrl) - pendingBlocks.EXPECT().hasBlock(block2AnnounceHeader.Hash()).Return(false) - pendingBlocks.EXPECT().addHeader(block2AnnounceHeader).Return(nil) - - state := atomic.Value{} - state.Store(bootstrap) - - return &chainSync{ - stopCh: make(chan struct{}), - pendingBlocks: pendingBlocks, - syncMode: state, - peerViewSet: newPeerViewSet(0), - workerPool: newSyncWorkerPool(NewMockNetwork(nil), NewMockRequestMaker(nil)), - } - }, - peerID: somePeer, - blockAnnounceHeader: block2AnnounceHeader, - }, - "announced_block_while_in_tip_mode": { - chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { - pendingBlocksMock := NewMockDisjointBlockSet(ctrl) - pendingBlocksMock.EXPECT().hasBlock(block2AnnounceHeader.Hash()).Return(false) - pendingBlocksMock.EXPECT().addHeader(block2AnnounceHeader).Return(nil) - pendingBlocksMock.EXPECT().removeBlock(block2AnnounceHeader.Hash()) - pendingBlocksMock.EXPECT().size().Return(0) - - blockStateMock := NewMockBlockState(ctrl) - blockStateMock.EXPECT(). - HasHeader(block2AnnounceHeader.Hash()). - Return(false, nil) - blockStateMock.EXPECT().IsPaused().Return(false) - - blockStateMock.EXPECT(). - BestBlockHeader(). - Return(block1AnnounceHeader, nil) - - blockStateMock.EXPECT(). - GetHighestFinalisedHeader(). - Return(block2AnnounceHeader, nil). - Times(2) - - expectedRequest := network.NewBlockRequest(*variadic.MustNewUint32OrHash(block2AnnounceHeader.Hash()), - 1, network.BootstrapRequestData, network.Descending) - - fakeBlockBody := types.Body([]types.Extrinsic{}) - mockedBlockResponse := &network.BlockResponseMessage{ - BlockData: []*types.BlockData{ - { - Hash: block2AnnounceHeader.Hash(), - Header: block2AnnounceHeader, - Body: &fakeBlockBody, - }, - }, - } - - networkMock := NewMockNetwork(ctrl) - networkMock.EXPECT().Peers().Return([]common.PeerInfo{}) - - requestMaker := NewMockRequestMaker(ctrl) - requestMaker.EXPECT(). - Do(somePeer, expectedRequest, &network.BlockResponseMessage{}). - DoAndReturn(func(_, _, response any) any { - responsePtr := response.(*network.BlockResponseMessage) - *responsePtr = *mockedBlockResponse - return nil - }) - - babeVerifierMock := NewMockBabeVerifier(ctrl) - storageStateMock := NewMockStorageState(ctrl) - importHandlerMock := NewMockBlockImportHandler(ctrl) - telemetryMock := NewMockTelemetry(ctrl) - - const announceBlock = true - ensureSuccessfulBlockImportFlow(t, block1AnnounceHeader, mockedBlockResponse.BlockData, - blockStateMock, babeVerifierMock, storageStateMock, importHandlerMock, telemetryMock, - networkBroadcast, announceBlock) - - workerPool := newSyncWorkerPool(networkMock, requestMaker) - // include the peer who announced the block in the pool - workerPool.newPeer(somePeer) - - state := atomic.Value{} - state.Store(tip) - - return &chainSync{ - stopCh: make(chan struct{}), - pendingBlocks: pendingBlocksMock, - syncMode: state, - workerPool: workerPool, - network: networkMock, - blockState: blockStateMock, - babeVerifier: babeVerifierMock, - telemetry: telemetryMock, - storageState: storageStateMock, - blockImportHandler: importHandlerMock, - peerViewSet: newPeerViewSet(0), - } - }, - peerID: somePeer, - blockAnnounceHeader: block2AnnounceHeader, - }, - } - - for name, tt := range testCases { - tt := tt - t.Run(name, func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - - chainSync := tt.chainSyncBuilder(ctrl) - err := chainSync.onBlockAnnounce(announcedBlock{ - who: tt.peerID, - header: tt.blockAnnounceHeader, - }) - - assert.ErrorIs(t, err, tt.errWrapped) - if tt.errWrapped != nil { - assert.EqualError(t, err, tt.errMessage) - } - - if tt.waitBootstrapSync { - chainSync.wg.Wait() - err = chainSync.workerPool.stop() - require.NoError(t, err) - } - }) - } -} - -func Test_chainSync_onBlockAnnounceHandshake_tipModeNeedToCatchup(t *testing.T) { - ctrl := gomock.NewController(t) - const somePeer = peer.ID("abc") - - emptyTrieState := storage.NewTrieState(inmemory_trie.NewEmptyTrie()) - block1AnnounceHeader := types.NewHeader(common.Hash{}, emptyTrieState.MustRoot(), - common.Hash{}, 1, nil) - block2AnnounceHeader := types.NewHeader(block1AnnounceHeader.Hash(), - emptyTrieState.MustRoot(), - common.Hash{}, 130, nil) - - blockStateMock := NewMockBlockState(ctrl) - blockStateMock.EXPECT(). - BestBlockHeader(). - Return(block1AnnounceHeader, nil). - Times(2) - - blockStateMock.EXPECT(). - BestBlockHeader(). - Return(block2AnnounceHeader, nil). - Times(1) - - blockStateMock.EXPECT(). - GetHighestFinalisedHeader(). - Return(block1AnnounceHeader, nil). - Times(3) - - blockStateMock.EXPECT().IsPaused().Return(false).Times(2) - - expectedRequest := network.NewAscendingBlockRequests( - block1AnnounceHeader.Number+1, - block2AnnounceHeader.Number, network.BootstrapRequestData) - - networkMock := NewMockNetwork(ctrl) - networkMock.EXPECT().Peers().Return([]common.PeerInfo{}). - Times(2) - networkMock.EXPECT().AllConnectedPeersIDs().Return([]peer.ID{}).Times(2) - - firstMockedResponse := createSuccesfullBlockResponse(t, block1AnnounceHeader.Hash(), 2, 128) - latestItemFromMockedResponse := firstMockedResponse.BlockData[len(firstMockedResponse.BlockData)-1] - - secondMockedResponse := createSuccesfullBlockResponse(t, latestItemFromMockedResponse.Hash, - int(latestItemFromMockedResponse.Header.Number+1), 1) - - requestMaker := NewMockRequestMaker(ctrl) - requestMaker.EXPECT(). - Do(somePeer, expectedRequest[0], &network.BlockResponseMessage{}). - DoAndReturn(func(_, _, response any) any { - responsePtr := response.(*network.BlockResponseMessage) - *responsePtr = *firstMockedResponse - return nil - }).Times(2) - - requestMaker.EXPECT(). - Do(somePeer, expectedRequest[1], &network.BlockResponseMessage{}). - DoAndReturn(func(_, _, response any) any { - responsePtr := response.(*network.BlockResponseMessage) - *responsePtr = *secondMockedResponse - return nil - }).Times(2) - - babeVerifierMock := NewMockBabeVerifier(ctrl) - storageStateMock := NewMockStorageState(ctrl) - importHandlerMock := NewMockBlockImportHandler(ctrl) - telemetryMock := NewMockTelemetry(ctrl) - - const announceBlock = false - ensureSuccessfulBlockImportFlow(t, block1AnnounceHeader, firstMockedResponse.BlockData, - blockStateMock, babeVerifierMock, storageStateMock, importHandlerMock, telemetryMock, - networkInitialSync, announceBlock) - ensureSuccessfulBlockImportFlow(t, latestItemFromMockedResponse.Header, secondMockedResponse.BlockData, - blockStateMock, babeVerifierMock, storageStateMock, importHandlerMock, telemetryMock, - networkInitialSync, announceBlock) - - state := atomic.Value{} - state.Store(tip) - - stopCh := make(chan struct{}) - defer close(stopCh) - - chainSync := &chainSync{ - stopCh: stopCh, - peerViewSet: newPeerViewSet(10), - syncMode: state, - pendingBlocks: newDisjointBlockSet(0), - workerPool: newSyncWorkerPool(networkMock, requestMaker), - network: networkMock, - blockState: blockStateMock, - babeVerifier: babeVerifierMock, - telemetry: telemetryMock, - storageState: storageStateMock, - blockImportHandler: importHandlerMock, - } - - err := chainSync.onBlockAnnounceHandshake(somePeer, block2AnnounceHeader.Hash(), block2AnnounceHeader.Number) - require.NoError(t, err) - - chainSync.wg.Wait() - err = chainSync.workerPool.stop() - require.NoError(t, err) - - require.Equal(t, chainSync.getSyncMode(), tip) -} - -func TestChainSync_onBlockAnnounceHandshake_onBootstrapMode(t *testing.T) { - const randomHashString = "0x580d77a9136035a0bc3c3cd86286172f7f81291164c5914266073a30466fba21" - randomHash := common.MustHexToHash(randomHashString) - - testcases := map[string]struct { - newChainSync func(t *testing.T, ctrl *gomock.Controller) *chainSync - peerID peer.ID - bestHash common.Hash - bestNumber uint - shouldBeAWorker bool - workerStatus byte - }{ - "new_peer": { - newChainSync: func(t *testing.T, ctrl *gomock.Controller) *chainSync { - networkMock := NewMockNetwork(ctrl) - workerPool := newSyncWorkerPool(networkMock, NewMockRequestMaker(nil)) - - cs := newChainSyncTest(t, ctrl) - cs.syncMode.Store(bootstrap) - cs.workerPool = workerPool - return cs - }, - peerID: peer.ID("peer-test"), - bestHash: randomHash, - bestNumber: uint(20), - shouldBeAWorker: true, - workerStatus: available, - }, - "ignore_peer_should_not_be_included_in_the_workerpoll": { - newChainSync: func(t *testing.T, ctrl *gomock.Controller) *chainSync { - networkMock := NewMockNetwork(ctrl) - workerPool := newSyncWorkerPool(networkMock, NewMockRequestMaker(nil)) - workerPool.ignorePeers = map[peer.ID]struct{}{ - peer.ID("peer-test"): {}, - } - - cs := newChainSyncTest(t, ctrl) - cs.syncMode.Store(bootstrap) - cs.workerPool = workerPool - return cs - }, - peerID: peer.ID("peer-test"), - bestHash: randomHash, - bestNumber: uint(20), - shouldBeAWorker: false, - }, - "peer_already_exists_in_the_pool": { - newChainSync: func(t *testing.T, ctrl *gomock.Controller) *chainSync { - networkMock := NewMockNetwork(ctrl) - workerPool := newSyncWorkerPool(networkMock, NewMockRequestMaker(nil)) - workerPool.workers = map[peer.ID]*syncWorker{ - peer.ID("peer-test"): { - worker: &worker{status: available}, - }, - } - - cs := newChainSyncTest(t, ctrl) - cs.syncMode.Store(bootstrap) - cs.workerPool = workerPool - return cs - }, - peerID: peer.ID("peer-test"), - bestHash: randomHash, - bestNumber: uint(20), - shouldBeAWorker: true, - workerStatus: available, - }, - } - - for tname, tt := range testcases { - tt := tt - t.Run(tname, func(t *testing.T) { - ctrl := gomock.NewController(t) - cs := tt.newChainSync(t, ctrl) - cs.onBlockAnnounceHandshake(tt.peerID, tt.bestHash, tt.bestNumber) - - view, exists := cs.peerViewSet.find(tt.peerID) - require.True(t, exists) - require.Equal(t, tt.peerID, view.who) - require.Equal(t, tt.bestHash, view.hash) - require.Equal(t, tt.bestNumber, view.number) - - if tt.shouldBeAWorker { - syncWorker, exists := cs.workerPool.workers[tt.peerID] - require.True(t, exists) - require.Equal(t, tt.workerStatus, syncWorker.worker.status) - } else { - _, exists := cs.workerPool.workers[tt.peerID] - require.False(t, exists) - } - }) - } -} - -func newChainSyncTest(t *testing.T, ctrl *gomock.Controller) *chainSync { - t.Helper() - - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) - - cfg := chainSyncConfig{ - bs: mockBlockState, - pendingBlocks: newDisjointBlockSet(pendingBlocksLimit), - minPeers: 1, - maxPeers: 5, - slotDuration: 6 * time.Second, - } - - return newChainSync(cfg) -} - -func setupChainSyncToBootstrapMode(t *testing.T, blocksAhead uint, - bs BlockState, net Network, reqMaker network.RequestMaker, babeVerifier BabeVerifier, - storageState StorageState, blockImportHandler BlockImportHandler, telemetry Telemetry) *chainSync { - t.Helper() - mockedPeerID := []peer.ID{ - peer.ID("some_peer_1"), - peer.ID("some_peer_2"), - peer.ID("some_peer_3"), - } - - peerViewMap := map[peer.ID]peerView{} - for _, p := range mockedPeerID { - peerViewMap[p] = peerView{ - who: p, - hash: common.Hash{1, 2, 3}, - number: blocksAhead, - } - } - - cfg := chainSyncConfig{ - pendingBlocks: newDisjointBlockSet(pendingBlocksLimit), - minPeers: 1, - maxPeers: 5, - slotDuration: 6 * time.Second, - bs: bs, - net: net, - requestMaker: reqMaker, - babeVerifier: babeVerifier, - storageState: storageState, - blockImportHandler: blockImportHandler, - telemetry: telemetry, - } - - chainSync := newChainSync(cfg) - chainSync.peerViewSet = &peerViewSet{view: peerViewMap} - chainSync.syncMode.Store(bootstrap) - - return chainSync -} - -func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorker(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - - mockedGenesisHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, - trie.EmptyHash, 0, types.NewDigest()) - - const blocksAhead = 128 - totalBlockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, blocksAhead) - mockedNetwork := NewMockNetwork(ctrl) - - workerPeerID := peer.ID("noot") - startingBlock := variadic.MustNewUint32OrHash(1) - max := uint32(128) - - mockedRequestMaker := NewMockRequestMaker(ctrl) - - expectedBlockRequestMessage := &network.BlockRequestMessage{ - RequestedData: network.BootstrapRequestData, - StartingBlock: *startingBlock, - Direction: network.Ascending, - Max: &max, - } - - mockedRequestMaker.EXPECT(). - Do(workerPeerID, expectedBlockRequestMessage, &network.BlockResponseMessage{}). - DoAndReturn(func(_, _, response any) any { - responsePtr := response.(*network.BlockResponseMessage) - *responsePtr = *totalBlockResponse - return nil - }) - - mockedBlockState := NewMockBlockState(ctrl) - mockedBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) - mockedBlockState.EXPECT().IsPaused().Return(false) - - mockBabeVerifier := NewMockBabeVerifier(ctrl) - mockStorageState := NewMockStorageState(ctrl) - mockImportHandler := NewMockBlockImportHandler(ctrl) - mockTelemetry := NewMockTelemetry(ctrl) - - mockedBlockState.EXPECT().GetHighestFinalisedHeader().Return(types.NewEmptyHeader(), nil).Times(1) - mockedNetwork.EXPECT().Peers().Return([]common.PeerInfo{}).Times(1) - - const announceBlock = false - // setup mocks for new synced blocks that doesn't exists in our local database - ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, totalBlockResponse.BlockData, mockedBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) - - // setup a chain sync which holds in its peer view map - // 3 peers, each one announce block X as its best block number. - // We start this test with genesis block being our best block, so - // we're far behind by X blocks, we should execute a bootstrap - // sync request those blocks - cs := setupChainSyncToBootstrapMode(t, blocksAhead, - mockedBlockState, mockedNetwork, mockedRequestMaker, mockBabeVerifier, - mockStorageState, mockImportHandler, mockTelemetry) - - target := cs.peerViewSet.getTarget() - require.Equal(t, uint(128), target) - - // include a new worker in the worker pool set, this worker - // should be an available peer that will receive a block request - // the worker pool executes the workers management - cs.workerPool.fromBlockAnnounce(peer.ID("noot")) - - err := cs.requestMaxBlocksFrom(mockedGenesisHeader, networkInitialSync) - require.NoError(t, err) - - err = cs.workerPool.stop() - require.NoError(t, err) -} - -func TestChainSync_BootstrapSync_SuccessfulSync_WithTwoWorkers(t *testing.T) { - t.Parallel() - - ctrl := gomock.NewController(t) - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) - mockedGenesisHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, - trie.EmptyHash, 0, types.NewDigest()) - - mockNetwork := NewMockNetwork(ctrl) - mockRequestMaker := NewMockRequestMaker(ctrl) - - mockBabeVerifier := NewMockBabeVerifier(ctrl) - mockStorageState := NewMockStorageState(ctrl) - mockImportHandler := NewMockBlockImportHandler(ctrl) - mockTelemetry := NewMockTelemetry(ctrl) - - mockBlockState.EXPECT().GetHighestFinalisedHeader().Return(types.NewEmptyHeader(), nil).Times(1) - mockBlockState.EXPECT().IsPaused().Return(false) - mockNetwork.EXPECT().Peers().Return([]common.PeerInfo{}).Times(1) - - // this test expects two workers responding each request with 128 blocks which means - // we should import 256 blocks in total - blockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, 256) - - // here we split the whole set in two parts each one will be the "response" for each peer - worker1Response := &network.BlockResponseMessage{ - BlockData: blockResponse.BlockData[:128], - } - const announceBlock = false - // the first peer will respond the from the block 1 to 128 so the ensureBlockImportFlow - // will setup the expectations starting from the genesis header until block 128 - ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, worker1Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) - - worker2Response := &network.BlockResponseMessage{ - BlockData: blockResponse.BlockData[128:], - } - // the worker 2 will respond from block 129 to 256 so the ensureBlockImportFlow - // will setup the expectations starting from block 128, from previous worker, until block 256 - parent := worker1Response.BlockData[len(worker1Response.BlockData)-1] - ensureSuccessfulBlockImportFlow(t, parent.Header, worker2Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) - - // we use gomock.Any since I cannot guarantee which peer picks which request - // but the first call to DoBlockRequest will return the first set and the second - // call will return the second set - mockRequestMaker.EXPECT(). - Do(gomock.Any(), gomock.Any(), &network.BlockResponseMessage{}). - DoAndReturn(func(_, _, response any) any { - responsePtr := response.(*network.BlockResponseMessage) - *responsePtr = *worker1Response - return nil - }) - - mockRequestMaker.EXPECT(). - Do(gomock.Any(), gomock.Any(), &network.BlockResponseMessage{}). - DoAndReturn(func(_, _, response any) any { - responsePtr := response.(*network.BlockResponseMessage) - *responsePtr = *worker2Response - return nil - }) - - // setup a chain sync which holds in its peer view map - // 3 peers, each one announce block 129 as its best block number. - // We start this test with genesis block being our best block, so - // we're far behind by 128 blocks, we should execute a bootstrap - // sync request those blocks - const blocksAhead = 256 - cs := setupChainSyncToBootstrapMode(t, blocksAhead, - mockBlockState, mockNetwork, mockRequestMaker, mockBabeVerifier, - mockStorageState, mockImportHandler, mockTelemetry) - - target := cs.peerViewSet.getTarget() - require.Equal(t, uint(blocksAhead), target) - - // include a new worker in the worker pool set, this worker - // should be an available peer that will receive a block request - // the worker pool executes the workers management - cs.workerPool.fromBlockAnnounce(peer.ID("noot")) - cs.workerPool.fromBlockAnnounce(peer.ID("noot2")) - - err := cs.requestMaxBlocksFrom(mockedGenesisHeader, networkInitialSync) - require.NoError(t, err) - - err = cs.workerPool.stop() - require.NoError(t, err) -} - -func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorkerFailing(t *testing.T) { - t.Parallel() - - ctrl := gomock.NewController(t) - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) - mockBlockState.EXPECT().IsPaused().Return(false).Times(2) - mockedGenesisHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, - trie.EmptyHash, 0, types.NewDigest()) - - mockNetwork := NewMockNetwork(ctrl) - mockRequestMaker := NewMockRequestMaker(ctrl) - - mockBabeVerifier := NewMockBabeVerifier(ctrl) - mockStorageState := NewMockStorageState(ctrl) - mockImportHandler := NewMockBlockImportHandler(ctrl) - mockTelemetry := NewMockTelemetry(ctrl) - - mockBlockState.EXPECT().GetHighestFinalisedHeader().Return(types.NewEmptyHeader(), nil).Times(1) - - mockNetwork.EXPECT().Peers().Return([]common.PeerInfo{}).Times(1) - - // this test expects two workers responding each request with 128 blocks which means - // we should import 256 blocks in total - blockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, 256) - const announceBlock = false - - // here we split the whole set in two parts each one will be the "response" for each peer - worker1Response := &network.BlockResponseMessage{ - BlockData: blockResponse.BlockData[:128], - } - - // the first peer will respond the from the block 1 to 128 so the ensureBlockImportFlow - // will setup the expectations starting from the genesis header until block 128 - ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, worker1Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) - - worker2Response := &network.BlockResponseMessage{ - BlockData: blockResponse.BlockData[128:], - } - // the worker 2 will respond from block 129 to 256 so the ensureBlockImportFlow - // will setup the expectations starting from block 128, from previous worker, until block 256 - parent := worker1Response.BlockData[len(worker1Response.BlockData)-1] - ensureSuccessfulBlockImportFlow(t, parent.Header, worker2Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) - - // we use gomock.Any since I cannot guarantee which peer picks which request - // but the first call to DoBlockRequest will return the first set and the second - // call will return the second set - doBlockRequestCount := atomic.Int32{} - mockRequestMaker.EXPECT(). - Do(gomock.Any(), gomock.Any(), &network.BlockResponseMessage{}). - DoAndReturn(func(peerID, _, response any) any { - // lets ensure that the DoBlockRequest is called by - // peer.ID(alice) and peer.ID(bob). When bob calls, this method will fail - // then alice should pick the failed request and re-execute it which will - // be the third call - responsePtr := response.(*network.BlockResponseMessage) - defer func() { doBlockRequestCount.Add(1) }() - - switch doBlockRequestCount.Load() { - case 0: - *responsePtr = *worker1Response - case 1: - return errors.New("a bad error while getting a response") - default: - *responsePtr = *worker2Response - } - return nil - - }).Times(3) - - // setup a chain sync which holds in its peer view map - // 3 peers, each one announce block 129 as its best block number. - // We start this test with genesis block being our best block, so - // we're far behind by 128 blocks, we should execute a bootstrap - // sync request those blocks - const blocksAhead = 256 - cs := setupChainSyncToBootstrapMode(t, blocksAhead, - mockBlockState, mockNetwork, mockRequestMaker, mockBabeVerifier, - mockStorageState, mockImportHandler, mockTelemetry) - - target := cs.peerViewSet.getTarget() - require.Equal(t, uint(blocksAhead), target) - - // include a new worker in the worker pool set, this worker - // should be an available peer that will receive a block request - // the worker pool executes the workers management - cs.workerPool.fromBlockAnnounce(peer.ID("alice")) - cs.workerPool.fromBlockAnnounce(peer.ID("bob")) - - err := cs.requestMaxBlocksFrom(mockedGenesisHeader, networkInitialSync) - require.NoError(t, err) - - err = cs.workerPool.stop() - require.NoError(t, err) -} - -func TestChainSync_BootstrapSync_SuccessfulSync_WithProtocolNotSupported(t *testing.T) { - t.Parallel() - - ctrl := gomock.NewController(t) - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) - mockBlockState.EXPECT().IsPaused().Return(false).Times(2) - mockBlockState.EXPECT(). - GetHighestFinalisedHeader(). - Return(types.NewEmptyHeader(), nil). - Times(1) - mockedGenesisHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, - trie.EmptyHash, 0, types.NewDigest()) - - mockNetwork := NewMockNetwork(ctrl) - mockNetwork.EXPECT().Peers().Return([]common.PeerInfo{}) - mockRequestMaker := NewMockRequestMaker(ctrl) - - mockBabeVerifier := NewMockBabeVerifier(ctrl) - mockStorageState := NewMockStorageState(ctrl) - mockImportHandler := NewMockBlockImportHandler(ctrl) - mockTelemetry := NewMockTelemetry(ctrl) - - // this test expects two workers responding each request with 128 blocks which means - // we should import 256 blocks in total - blockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, 256) - const announceBlock = false - - // here we split the whole set in two parts each one will be the "response" for each peer - worker1Response := &network.BlockResponseMessage{ - BlockData: blockResponse.BlockData[:128], - } - - // the first peer will respond the from the block 1 to 128 so the ensureBlockImportFlow - // will setup the expectations starting from the genesis header until block 128 - ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, worker1Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) - - worker2Response := &network.BlockResponseMessage{ - BlockData: blockResponse.BlockData[128:], - } - // the worker 2 will respond from block 129 to 256 so the ensureBlockImportFlow - // will setup the expectations starting from block 128, from previous worker, until block 256 - parent := worker1Response.BlockData[len(worker1Response.BlockData)-1] - ensureSuccessfulBlockImportFlow(t, parent.Header, worker2Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) - - // we use gomock.Any since I cannot guarantee which peer picks which request - // but the first call to DoBlockRequest will return the first set and the second - // call will return the second set - doBlockRequestCount := atomic.Int32{} - mockRequestMaker.EXPECT(). - Do(gomock.Any(), gomock.Any(), &network.BlockResponseMessage{}). - DoAndReturn(func(peerID, _, response any) any { - // lets ensure that the DoBlockRequest is called by - // peer.ID(alice) and peer.ID(bob). When bob calls, this method will fail - // then alice should pick the failed request and re-execute it which will - // be the third call - responsePtr := response.(*network.BlockResponseMessage) - defer func() { doBlockRequestCount.Add(1) }() - - switch doBlockRequestCount.Load() { - case 0: - *responsePtr = *worker1Response - case 1: - return errors.New("protocols not supported") - default: - *responsePtr = *worker2Response - } - - return nil - }).Times(3) - - // since some peer will fail with protocols not supported his - // reputation will be affected and - mockNetwork.EXPECT().ReportPeer(peerset.ReputationChange{ - Value: peerset.BadProtocolValue, - Reason: peerset.BadProtocolReason, - }, gomock.AssignableToTypeOf(peer.ID(""))) - // setup a chain sync which holds in its peer view map - // 3 peers, each one announce block 129 as its best block number. - // We start this test with genesis block being our best block, so - // we're far behind by 128 blocks, we should execute a bootstrap - // sync request those blocks - const blocksAhead = 256 - cs := setupChainSyncToBootstrapMode(t, blocksAhead, - mockBlockState, mockNetwork, mockRequestMaker, mockBabeVerifier, - mockStorageState, mockImportHandler, mockTelemetry) - - target := cs.peerViewSet.getTarget() - require.Equal(t, uint(blocksAhead), target) - - // include a new worker in the worker pool set, this worker - // should be an available peer that will receive a block request - // the worker pool executes the workers management - cs.workerPool.fromBlockAnnounce(peer.ID("alice")) - cs.workerPool.fromBlockAnnounce(peer.ID("bob")) - - err := cs.requestMaxBlocksFrom(mockedGenesisHeader, networkInitialSync) - require.NoError(t, err) - - err = cs.workerPool.stop() - require.NoError(t, err) -} - -func TestChainSync_BootstrapSync_SuccessfulSync_WithNilHeaderInResponse(t *testing.T) { - t.Parallel() - - ctrl := gomock.NewController(t) - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) - mockBlockState.EXPECT().IsPaused().Return(false).Times(2) - mockBlockState.EXPECT(). - GetHighestFinalisedHeader(). - Return(types.NewEmptyHeader(), nil). - Times(1) - mockedGenesisHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, - trie.EmptyHash, 0, types.NewDigest()) - - mockNetwork := NewMockNetwork(ctrl) - mockNetwork.EXPECT().Peers().Return([]common.PeerInfo{}) - mockRequestMaker := NewMockRequestMaker(ctrl) - - mockBabeVerifier := NewMockBabeVerifier(ctrl) - mockStorageState := NewMockStorageState(ctrl) - mockImportHandler := NewMockBlockImportHandler(ctrl) - mockTelemetry := NewMockTelemetry(ctrl) - - // this test expects two workers responding each request with 128 blocks which means - // we should import 256 blocks in total - blockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, 256) - const announceBlock = false - - // here we split the whole set in two parts each one will be the "response" for each peer - worker1Response := &network.BlockResponseMessage{ - BlockData: blockResponse.BlockData[:128], - } - - // the first peer will respond the from the block 1 to 128 so the ensureBlockImportFlow - // will setup the expectations starting from the genesis header until block 128 - ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, worker1Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) - - worker2Response := &network.BlockResponseMessage{ - BlockData: blockResponse.BlockData[128:], - } - // the worker 2 will respond from block 129 to 256 so the ensureBlockImportFlow - // will setup the expectations starting from block 128, from previous worker, until block 256 - parent := worker1Response.BlockData[127] - ensureSuccessfulBlockImportFlow(t, parent.Header, worker2Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) - - // we use gomock.Any since I cannot guarantee which peer picks which request - // but the first call to DoBlockRequest will return the first set and the second - // call will return the second set - doBlockRequestCount := atomic.Int32{} - mockRequestMaker.EXPECT(). - Do(gomock.Any(), gomock.Any(), &network.BlockResponseMessage{}). - DoAndReturn(func(peerID, _, response any) any { - // lets ensure that the DoBlockRequest is called by - // peer.ID(alice) and peer.ID(bob). When bob calls, this method return an - // response item but without header as was requested - responsePtr := response.(*network.BlockResponseMessage) - defer func() { doBlockRequestCount.Add(1) }() - - switch doBlockRequestCount.Load() { - case 0: - *responsePtr = *worker1Response - case 1: - incompleteBlockData := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 128, 256) - incompleteBlockData.BlockData[0].Header = nil - - *responsePtr = *incompleteBlockData - default: - *responsePtr = *worker2Response - } - - return nil - }).Times(3) - - // since some peer will fail with protocols not supported his - // reputation will be affected and - mockNetwork.EXPECT().ReportPeer(peerset.ReputationChange{ - Value: peerset.IncompleteHeaderValue, - Reason: peerset.IncompleteHeaderReason, - }, gomock.AssignableToTypeOf(peer.ID(""))) - // setup a chain sync which holds in its peer view map - // 3 peers, each one announce block 129 as its best block number. - // We start this test with genesis block being our best block, so - // we're far behind by 128 blocks, we should execute a bootstrap - // sync request those blocks - const blocksAhead = 256 - cs := setupChainSyncToBootstrapMode(t, blocksAhead, - mockBlockState, mockNetwork, mockRequestMaker, mockBabeVerifier, - mockStorageState, mockImportHandler, mockTelemetry) - - target := cs.peerViewSet.getTarget() - require.Equal(t, uint(blocksAhead), target) - - // include a new worker in the worker pool set, this worker - // should be an available peer that will receive a block request - // the worker pool executes the workers management - cs.workerPool.fromBlockAnnounce(peer.ID("alice")) - cs.workerPool.fromBlockAnnounce(peer.ID("bob")) - - err := cs.requestMaxBlocksFrom(mockedGenesisHeader, networkInitialSync) - require.NoError(t, err) - - err = cs.workerPool.stop() - require.NoError(t, err) -} - -func TestChainSync_BootstrapSync_SuccessfulSync_WithNilBlockInResponse(t *testing.T) { - t.Parallel() - - ctrl := gomock.NewController(t) - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) - mockBlockState.EXPECT().IsPaused().Return(false).Times(2) - mockBlockState.EXPECT(). - GetHighestFinalisedHeader(). - Return(types.NewEmptyHeader(), nil). - Times(1) - mockedGenesisHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, - trie.EmptyHash, 0, types.NewDigest()) - - mockBabeVerifier := NewMockBabeVerifier(ctrl) - mockStorageState := NewMockStorageState(ctrl) - mockImportHandler := NewMockBlockImportHandler(ctrl) - mockTelemetry := NewMockTelemetry(ctrl) - - blockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, 128) - const announceBlock = false - - workerResponse := &network.BlockResponseMessage{ - BlockData: blockResponse.BlockData, - } - - // the first peer will respond the from the block 1 to 128 so the ensureBlockImportFlow - // will setup the expectations starting from the genesis header until block 128 - ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, workerResponse.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) - - doBlockRequestCount := atomic.Int32{} - mockRequestMaker := NewMockRequestMaker(ctrl) - mockRequestMaker.EXPECT(). - Do(gomock.Any(), gomock.Any(), &network.BlockResponseMessage{}). - DoAndReturn(func(peerID, _, response any) any { - // lets ensure that the DoBlockRequest is called by - // peer.ID(alice) and peer.ID(bob). When bob calls, this method return an - // response item but without header as was requested - responsePtr := response.(*network.BlockResponseMessage) - defer func() { doBlockRequestCount.Add(1) }() - - switch doBlockRequestCount.Load() { - case 0: - return network.ErrNilBlockInResponse - case 1: - *responsePtr = *workerResponse - } - - return nil - }).Times(2) - - mockNetwork := NewMockNetwork(ctrl) - mockNetwork.EXPECT().Peers().Return([]common.PeerInfo{}) - - // reputation will be affected and - mockNetwork.EXPECT().ReportPeer(peerset.ReputationChange{ - Value: peerset.BadMessageValue, - Reason: peerset.BadMessageReason, - }, gomock.AssignableToTypeOf(peer.ID(""))) - - const blocksAhead = 128 - cs := setupChainSyncToBootstrapMode(t, blocksAhead, - mockBlockState, mockNetwork, mockRequestMaker, mockBabeVerifier, - mockStorageState, mockImportHandler, mockTelemetry) - - target := cs.peerViewSet.getTarget() - require.Equal(t, uint(blocksAhead), target) - - // include a new worker in the worker pool set, this worker - // should be an available peer that will receive a block request - // the worker pool executes the workers management - cs.workerPool.fromBlockAnnounce(peer.ID("alice")) - cs.workerPool.fromBlockAnnounce(peer.ID("bob")) - - err := cs.requestMaxBlocksFrom(mockedGenesisHeader, networkInitialSync) - require.NoError(t, err) - - err = cs.workerPool.stop() - require.NoError(t, err) -} - -func TestChainSync_BootstrapSync_SuccessfulSync_WithResponseIsNotAChain(t *testing.T) { - t.Parallel() - - ctrl := gomock.NewController(t) - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) - mockBlockState.EXPECT().IsPaused().Return(false).Times(2) - mockBlockState.EXPECT(). - GetHighestFinalisedHeader(). - Return(types.NewEmptyHeader(), nil). - Times(1) - mockedGenesisHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, - trie.EmptyHash, 0, types.NewDigest()) - - mockNetwork := NewMockNetwork(ctrl) - mockNetwork.EXPECT().Peers().Return([]common.PeerInfo{}) - mockRequestMaker := NewMockRequestMaker(ctrl) - - mockBabeVerifier := NewMockBabeVerifier(ctrl) - mockStorageState := NewMockStorageState(ctrl) - mockImportHandler := NewMockBlockImportHandler(ctrl) - mockTelemetry := NewMockTelemetry(ctrl) - - // this test expects two workers responding each request with 128 blocks which means - // we should import 256 blocks in total - blockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, 256) - const announceBlock = false - - // here we split the whole set in two parts each one will be the "response" for each peer - worker1Response := &network.BlockResponseMessage{ - BlockData: blockResponse.BlockData[:128], - } - - // the first peer will respond the from the block 1 to 128 so the ensureBlockImportFlow - // will setup the expectations starting from the genesis header until block 128 - ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, worker1Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) - - worker2Response := &network.BlockResponseMessage{ - BlockData: blockResponse.BlockData[128:], - } - // the worker 2 will respond from block 129 to 256 so the ensureBlockImportFlow - // will setup the expectations starting from block 128, from previous worker, until block 256 - parent := worker1Response.BlockData[127] - ensureSuccessfulBlockImportFlow(t, parent.Header, worker2Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) - - // we use gomock.Any since I cannot guarantee which peer picks which request - // but the first call to DoBlockRequest will return the first set and the second - // call will return the second set - doBlockRequestCount := atomic.Int32{} - mockRequestMaker.EXPECT(). - Do(gomock.Any(), gomock.Any(), &network.BlockResponseMessage{}). - DoAndReturn(func(peerID, _, response any) any { - // lets ensure that the DoBlockRequest is called by - // peer.ID(alice) and peer.ID(bob). When bob calls, this method return an - // response that does not form an chain - responsePtr := response.(*network.BlockResponseMessage) - defer func() { doBlockRequestCount.Add(1) }() - - switch doBlockRequestCount.Load() { - case 0: - *responsePtr = *worker1Response - case 1: - notAChainBlockData := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 128, 256) - // swap positions to force the problem - notAChainBlockData.BlockData[0], notAChainBlockData.BlockData[130] = - notAChainBlockData.BlockData[130], notAChainBlockData.BlockData[0] - - *responsePtr = *notAChainBlockData - default: - *responsePtr = *worker2Response - } - - return nil - }).Times(3) - - // setup a chain sync which holds in its peer view map - // 3 peers, each one announce block 129 as its best block number. - // We start this test with genesis block being our best block, so - // we're far behind by 128 blocks, we should execute a bootstrap - // sync request those blocks - const blocksAhead = 256 - cs := setupChainSyncToBootstrapMode(t, blocksAhead, - mockBlockState, mockNetwork, mockRequestMaker, mockBabeVerifier, - mockStorageState, mockImportHandler, mockTelemetry) - - target := cs.peerViewSet.getTarget() - - require.Equal(t, uint(blocksAhead), target) - - // include a new worker in the worker pool set, this worker - // should be an available peer that will receive a block request - // the worker pool executes the workers management - cs.workerPool.fromBlockAnnounce(peer.ID("alice")) - cs.workerPool.fromBlockAnnounce(peer.ID("bob")) - - err := cs.requestMaxBlocksFrom(mockedGenesisHeader, networkInitialSync) - require.NoError(t, err) - - err = cs.workerPool.stop() - require.NoError(t, err) -} - -func TestChainSync_BootstrapSync_SuccessfulSync_WithReceivedBadBlock(t *testing.T) { - t.Parallel() - - ctrl := gomock.NewController(t) - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) - mockBlockState.EXPECT().IsPaused().Return(false).Times(2) - mockBlockState.EXPECT(). - GetHighestFinalisedHeader(). - Return(types.NewEmptyHeader(), nil). - Times(1) - - mockedGenesisHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, - trie.EmptyHash, 0, types.NewDigest()) - - mockNetwork := NewMockNetwork(ctrl) - mockNetwork.EXPECT().Peers().Return([]common.PeerInfo{}) - mockRequestMaker := NewMockRequestMaker(ctrl) - - mockBabeVerifier := NewMockBabeVerifier(ctrl) - mockStorageState := NewMockStorageState(ctrl) - mockImportHandler := NewMockBlockImportHandler(ctrl) - mockTelemetry := NewMockTelemetry(ctrl) - - // this test expects two workers responding each request with 128 blocks which means - // we should import 256 blocks in total - blockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, 256) - const announceBlock = false - - // here we split the whole set in two parts each one will be the "response" for each peer - worker1Response := &network.BlockResponseMessage{ - BlockData: blockResponse.BlockData[:128], - } - - // the first peer will respond the from the block 1 to 128 so the ensureBlockImportFlow - // will setup the expectations starting from the genesis header until block 128 - ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, worker1Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) - - worker2Response := &network.BlockResponseMessage{ - BlockData: blockResponse.BlockData[128:], - } - // the worker 2 will respond from block 129 to 256 so the ensureBlockImportFlow - // will setup the expectations starting from block 128, from previous worker, until block 256 - parent := worker1Response.BlockData[len(worker1Response.BlockData)-1] - ensureSuccessfulBlockImportFlow(t, parent.Header, worker2Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) - - fakeBadBlockHash := common.MustHexToHash("0x18767cb4bb4cc13bf119f6613aec5487d4c06a2e453de53d34aea6f3f1ee9855") - - // we use gomock.Any since I cannot guarantee which peer picks which request - // but the first call to DoBlockRequest will return the first set and the second - // call will return the second set - doBlockRequestCount := atomic.Int32{} - mockRequestMaker.EXPECT(). - Do(gomock.Any(), gomock.Any(), &network.BlockResponseMessage{}). - DoAndReturn(func(peerID, _, response any) any { - // lets ensure that the DoBlockRequest is called by - // peer.ID(alice) and peer.ID(bob). When bob calls, this method return an - // response that contains a know bad block - responsePtr := response.(*network.BlockResponseMessage) - defer func() { doBlockRequestCount.Add(1) }() - - switch doBlockRequestCount.Load() { - case 0: - *responsePtr = *worker1Response - case 1: - // use the fisrt response last item hash to produce the second response block data - // so we can guarantee that the second response continues the first response blocks - firstResponseLastItem := worker1Response.BlockData[len(worker1Response.BlockData)-1] - blockDataWithBadBlock := createSuccesfullBlockResponse(t, - firstResponseLastItem.Header.Hash(), - 129, - 128) - - // changes the last item from the second response to be a bad block, so we guarantee that - // this second response is a chain, (changing the hash from a block in the middle of the block - // response brokes the `isAChain` verification) - lastItem := len(blockDataWithBadBlock.BlockData) - 1 - blockDataWithBadBlock.BlockData[lastItem].Hash = fakeBadBlockHash - *responsePtr = *blockDataWithBadBlock - default: - *responsePtr = *worker2Response - } - - return nil - }).Times(3) - - mockNetwork.EXPECT().ReportPeer(peerset.ReputationChange{ - Value: peerset.BadBlockAnnouncementValue, - Reason: peerset.BadBlockAnnouncementReason, - }, gomock.AssignableToTypeOf(peer.ID(""))) - // setup a chain sync which holds in its peer view map - // 3 peers, each one announce block 129 as its best block number. - // We start this test with genesis block being our best block, so - // we're far behind by 128 blocks, we should execute a bootstrap - // sync request those blocks - const blocksAhead = 256 - cs := setupChainSyncToBootstrapMode(t, blocksAhead, - mockBlockState, mockNetwork, mockRequestMaker, mockBabeVerifier, - mockStorageState, mockImportHandler, mockTelemetry) - - cs.badBlocks = []string{fakeBadBlockHash.String()} - - target := cs.peerViewSet.getTarget() - require.Equal(t, uint(blocksAhead), target) - - // include a new worker in the worker pool set, this worker - // should be an available peer that will receive a block request - // the worker pool executes the workers management - cs.workerPool.fromBlockAnnounce(peer.ID("alice")) - cs.workerPool.fromBlockAnnounce(peer.ID("bob")) - - err := cs.requestMaxBlocksFrom(mockedGenesisHeader, networkInitialSync) - require.NoError(t, err) - - err = cs.workerPool.stop() - require.NoError(t, err) - - // peer should be not in the worker pool - // peer should be in the ignore list - require.Len(t, cs.workerPool.workers, 1) - require.Len(t, cs.workerPool.ignorePeers, 1) -} - -func TestChainSync_BootstrapSync_SucessfulSync_ReceivedPartialBlockData(t *testing.T) { - t.Parallel() - - ctrl := gomock.NewController(t) - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) - mockBlockState.EXPECT().IsPaused().Return(false).Times(2) - mockBlockState.EXPECT(). - GetHighestFinalisedHeader(). - Return(types.NewEmptyHeader(), nil). - Times(1) - - mockedGenesisHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, - trie.EmptyHash, 0, types.NewDigest()) - - mockNetwork := NewMockNetwork(ctrl) - mockNetwork.EXPECT().Peers().Return([]common.PeerInfo{}) - - mockRequestMaker := NewMockRequestMaker(ctrl) - - mockBabeVerifier := NewMockBabeVerifier(ctrl) - mockStorageState := NewMockStorageState(ctrl) - mockImportHandler := NewMockBlockImportHandler(ctrl) - mockTelemetry := NewMockTelemetry(ctrl) - - // create a set of 128 blocks - blockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, 128) - const announceBlock = false - - // the worker will return a partial size of the set - worker1Response := &network.BlockResponseMessage{ - BlockData: blockResponse.BlockData[:97], - } - - // the first peer will respond the from the block 1 to 96 so the ensureBlockImportFlow - // will setup the expectations starting from the genesis header until block 96 - ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, worker1Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) - - worker1MissingBlocksResponse := &network.BlockResponseMessage{ - BlockData: blockResponse.BlockData[97:], - } - - // last item from the previous response - parent := worker1Response.BlockData[96] - ensureSuccessfulBlockImportFlow(t, parent.Header, worker1MissingBlocksResponse.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) - - doBlockRequestCount := 0 - mockRequestMaker.EXPECT(). - Do(gomock.Any(), gomock.Any(), &network.BlockResponseMessage{}). - DoAndReturn(func(peerID, _, response any) any { - // lets ensure that the DoBlockRequest is called by - // peer.ID(alice). The first call will return only 97 blocks - // the handler should issue another call to retrieve the missing blocks - responsePtr := response.(*network.BlockResponseMessage) - defer func() { doBlockRequestCount++ }() - - if doBlockRequestCount == 0 { - *responsePtr = *worker1Response - } else { - *responsePtr = *worker1MissingBlocksResponse - } - - return nil - }).Times(2) - - const blocksAhead = 128 - cs := setupChainSyncToBootstrapMode(t, blocksAhead, - mockBlockState, mockNetwork, mockRequestMaker, mockBabeVerifier, - mockStorageState, mockImportHandler, mockTelemetry) - - target := cs.peerViewSet.getTarget() - require.Equal(t, uint(blocksAhead), target) - - cs.workerPool.fromBlockAnnounce(peer.ID("alice")) - - err := cs.requestMaxBlocksFrom(mockedGenesisHeader, networkInitialSync) - require.NoError(t, err) - - err = cs.workerPool.stop() - require.NoError(t, err) - - require.Len(t, cs.workerPool.workers, 1) - - _, ok := cs.workerPool.workers[peer.ID("alice")] - require.True(t, ok) -} - -func createSuccesfullBlockResponse(t *testing.T, parentHeader common.Hash, - startingAt, numBlocks int) *network.BlockResponseMessage { - t.Helper() - - response := new(network.BlockResponseMessage) - response.BlockData = make([]*types.BlockData, numBlocks) - - emptyTrieState := storage.NewTrieState(inmemory_trie.NewEmptyTrie()) - tsRoot := emptyTrieState.MustRoot() - - firstHeader := types.NewHeader(parentHeader, tsRoot, common.Hash{}, - uint(startingAt), nil) - response.BlockData[0] = &types.BlockData{ - Hash: firstHeader.Hash(), - Header: firstHeader, - Body: types.NewBody([]types.Extrinsic{}), - Justification: nil, - } - - parentHash := firstHeader.Hash() - for idx := 1; idx < numBlocks; idx++ { - blockNumber := idx + startingAt - header := types.NewHeader(parentHash, tsRoot, common.Hash{}, - uint(blockNumber), nil) - response.BlockData[idx] = &types.BlockData{ - Hash: header.Hash(), - Header: header, - Body: types.NewBody([]types.Extrinsic{}), - Justification: nil, - } - parentHash = header.Hash() - } - - return response -} - -// ensureSuccessfulBlockImportFlow will setup the expectations for method calls -// that happens while chain sync imports a block -func ensureSuccessfulBlockImportFlow(t *testing.T, parentHeader *types.Header, - blocksReceived []*types.BlockData, mockBlockState *MockBlockState, - mockBabeVerifier *MockBabeVerifier, mockStorageState *MockStorageState, - mockImportHandler *MockBlockImportHandler, mockTelemetry *MockTelemetry, origin blockOrigin, announceBlock bool) { - t.Helper() - - for idx, blockData := range blocksReceived { - if origin != networkInitialSync { - mockBabeVerifier.EXPECT().VerifyBlock(blockData.Header).Return(nil) - } - - var previousHeader *types.Header - if idx == 0 { - previousHeader = parentHeader - } else { - previousHeader = blocksReceived[idx-1].Header - } - - mockBlockState.EXPECT().GetHeader(blockData.Header.ParentHash).Return(previousHeader, nil).AnyTimes() - mockStorageState.EXPECT().Lock().AnyTimes() - mockStorageState.EXPECT().Unlock().AnyTimes() - - emptyTrieState := storage.NewTrieState(inmemory_trie.NewEmptyTrie()) - parentStateRoot := previousHeader.StateRoot - mockStorageState.EXPECT().TrieState(&parentStateRoot). - Return(emptyTrieState, nil).AnyTimes() - - ctrl := gomock.NewController(t) - mockRuntimeInstance := NewMockInstance(ctrl) - mockBlockState.EXPECT().GetRuntime(previousHeader.Hash()). - Return(mockRuntimeInstance, nil).AnyTimes() - - expectedBlock := &types.Block{ - Header: *blockData.Header, - Body: *blockData.Body, - } - - mockRuntimeInstance.EXPECT().SetContextStorage(emptyTrieState).AnyTimes() - mockRuntimeInstance.EXPECT().ExecuteBlock(expectedBlock). - Return(nil, nil).AnyTimes() - - mockImportHandler.EXPECT().HandleBlockImport(expectedBlock, emptyTrieState, announceBlock). - Return(nil).AnyTimes() - - blockHash := blockData.Header.Hash() - expectedTelemetryMessage := telemetry.NewBlockImport( - &blockHash, - blockData.Header.Number, - "NetworkInitialSync") - mockTelemetry.EXPECT().SendMessage(expectedTelemetryMessage).AnyTimes() - mockBlockState.EXPECT().CompareAndSetBlockData(blockData).Return(nil).AnyTimes() - } -} - -func TestChainSync_validateResponseFields(t *testing.T) { - t.Parallel() - - block1Header := &types.Header{ - ParentHash: common.MustHexToHash("0x00597cb4bb4cc13bf119f6613aec7642d4c06a2e453de53d34aea6f3f1eeb504"), - Number: 2, - } - - block2Header := &types.Header{ - ParentHash: block1Header.Hash(), - Number: 3, - } - - cases := map[string]struct { - wantErr error - errString string - setupChainSync func(t *testing.T) *chainSync - requestedData byte - blockData *types.BlockData - }{ - "requested_bootstrap_data_but_got_nil_header": { - wantErr: errNilHeaderInResponse, - errString: "expected header, received none: " + - block2Header.Hash().String(), - requestedData: network.BootstrapRequestData, - blockData: &types.BlockData{ - Hash: block2Header.Hash(), - Header: nil, - Body: &types.Body{}, - Justification: &[]byte{0}, - }, - setupChainSync: func(t *testing.T) *chainSync { - ctrl := gomock.NewController(t) - blockStateMock := NewMockBlockState(ctrl) - blockStateMock.EXPECT().HasHeader(block1Header.ParentHash).Return(true, nil) - - networkMock := NewMockNetwork(ctrl) - networkMock.EXPECT().ReportPeer(peerset.ReputationChange{ - Value: peerset.IncompleteHeaderValue, - Reason: peerset.IncompleteHeaderReason, - }, peer.ID("peer")) - - return &chainSync{ - blockState: blockStateMock, - network: networkMock, - } - }, - }, - "requested_bootstrap_data_but_got_nil_body": { - wantErr: errNilBodyInResponse, - errString: "expected body, received none: " + - block2Header.Hash().String(), - requestedData: network.BootstrapRequestData, - blockData: &types.BlockData{ - Hash: block2Header.Hash(), - Header: block2Header, - Body: nil, - Justification: &[]byte{0}, - }, - setupChainSync: func(t *testing.T) *chainSync { - ctrl := gomock.NewController(t) - blockStateMock := NewMockBlockState(ctrl) - blockStateMock.EXPECT().HasHeader(block1Header.ParentHash).Return(true, nil) - networkMock := NewMockNetwork(ctrl) - - return &chainSync{ - blockState: blockStateMock, - network: networkMock, - } - }, - }, - "requested_only_justification_but_got_nil": { - wantErr: errNilJustificationInResponse, - errString: "expected justification, received none: " + - block2Header.Hash().String(), - requestedData: network.RequestedDataJustification, - blockData: &types.BlockData{ - Hash: block2Header.Hash(), - Header: block2Header, - Body: nil, - Justification: nil, - }, - setupChainSync: func(t *testing.T) *chainSync { - ctrl := gomock.NewController(t) - blockStateMock := NewMockBlockState(ctrl) - blockStateMock.EXPECT().HasHeader(block1Header.ParentHash).Return(true, nil) - networkMock := NewMockNetwork(ctrl) - - return &chainSync{ - blockState: blockStateMock, - network: networkMock, - } - }, - }, - } - - for tname, tt := range cases { - tt := tt - t.Run(tname, func(t *testing.T) { - t.Parallel() - - err := validateResponseFields(tt.requestedData, []*types.BlockData{tt.blockData}) - require.ErrorIs(t, err, tt.wantErr) - if tt.errString != "" { - require.EqualError(t, err, tt.errString) - } - }) - } -} - -func TestChainSync_isResponseAChain(t *testing.T) { - t.Parallel() - - block1Header := &types.Header{ - ParentHash: common.MustHexToHash("0x00597cb4bb4cc13bf119f6613aec7642d4c06a2e453de53d34aea6f3f1eeb504"), - Number: 2, - } - - block2Header := &types.Header{ - ParentHash: block1Header.Hash(), - Number: 3, - } - - block4Header := &types.Header{ - ParentHash: common.MustHexToHash("0x198616547187613bf119f6613aec7642d4c06a2e453de53d34aea6f390788677"), - Number: 4, - } - - cases := map[string]struct { - expected bool - blockData []*types.BlockData - }{ - "not_a_chain": { - expected: false, - blockData: []*types.BlockData{ - { - Hash: block1Header.Hash(), - Header: block1Header, - Body: &types.Body{}, - Justification: &[]byte{0}, - }, - { - Hash: block2Header.Hash(), - Header: block2Header, - Body: &types.Body{}, - Justification: &[]byte{0}, - }, - { - Hash: block4Header.Hash(), - Header: block4Header, - Body: &types.Body{}, - Justification: &[]byte{0}, - }, - }, - }, - "is_a_chain": { - expected: true, - blockData: []*types.BlockData{ - { - Hash: block1Header.Hash(), - Header: block1Header, - Body: &types.Body{}, - Justification: &[]byte{0}, - }, - { - Hash: block2Header.Hash(), - Header: block2Header, - Body: &types.Body{}, - Justification: &[]byte{0}, - }, - }, - }, - } - - for tname, tt := range cases { - tt := tt - t.Run(tname, func(t *testing.T) { - t.Parallel() - output := isResponseAChain(tt.blockData) - require.Equal(t, tt.expected, output) - }) - } -} - -func TestChainSync_doResponseGrowsTheChain(t *testing.T) { - block1Header := types.NewHeader(common.Hash{}, common.Hash{}, common.Hash{}, 1, types.NewDigest()) - block2Header := types.NewHeader(block1Header.Hash(), common.Hash{}, common.Hash{}, 2, types.NewDigest()) - block3Header := types.NewHeader(block2Header.Hash(), common.Hash{}, common.Hash{}, 3, types.NewDigest()) - block4Header := types.NewHeader(block3Header.Hash(), common.Hash{}, common.Hash{}, 4, types.NewDigest()) - - testcases := map[string]struct { - response []*types.BlockData - ongoingChain []*types.BlockData - startAt uint - exepectedTotal uint32 - expectedOut bool - }{ - // the ongoing chain does not have any data so the response - // can be inserted in the ongoing chain without any problems - "empty_ongoing_chain": { - ongoingChain: []*types.BlockData{}, - expectedOut: true, - }, - - "one_in_response_growing_ongoing_chain_without_check": { - startAt: 1, - exepectedTotal: 3, - // the ongoing chain contains 3 positions, the block number 1 is at position 0 - ongoingChain: []*types.BlockData{ - {Header: types.NewHeader(common.Hash{}, common.Hash{}, common.Hash{}, 1, types.NewDigest())}, - nil, - nil, - }, - - // the response contains the block number 3 which should be placed in position 2 - // in the ongoing chain, which means that no comparison should be done to place - // block number 3 in the ongoing chain - response: []*types.BlockData{ - {Header: types.NewHeader(common.Hash{}, common.Hash{}, common.Hash{}, 3, types.NewDigest())}, - }, - expectedOut: true, - }, - - "one_in_response_growing_ongoing_chain_by_checking_neighbours": { - startAt: 1, - exepectedTotal: 3, - // the ongoing chain contains 3 positions, the block number 1 is at position 0 - ongoingChain: []*types.BlockData{ - {Header: block1Header}, - nil, - {Header: block3Header}, - }, - - // the response contains the block number 2 which should be placed in position 1 - // in the ongoing chain, which means that a comparison should be made to check - // if the parent hash of block 2 is the same hash of block 1 - response: []*types.BlockData{ - {Header: block2Header}, - }, - expectedOut: true, - }, - - "one_in_response_failed_to_grow_ongoing_chain": { - startAt: 1, - exepectedTotal: 3, - ongoingChain: []*types.BlockData{ - {Header: block1Header}, - nil, - nil, - }, - response: []*types.BlockData{ - {Header: types.NewHeader(common.Hash{}, common.Hash{}, common.Hash{}, 2, types.NewDigest())}, - }, - expectedOut: false, - }, - - "many_in_response_grow_ongoing_chain_only_left_check": { - startAt: 1, - exepectedTotal: 3, - ongoingChain: []*types.BlockData{ - {Header: block1Header}, - nil, - nil, - nil, - }, - response: []*types.BlockData{ - {Header: block2Header}, - {Header: block3Header}, - }, - expectedOut: true, - }, - - "many_in_response_grow_ongoing_chain_left_right_check": { - startAt: 1, - exepectedTotal: 3, - ongoingChain: []*types.BlockData{ - {Header: block1Header}, - nil, - nil, - {Header: block4Header}, - }, - response: []*types.BlockData{ - {Header: block2Header}, - {Header: block3Header}, - }, - expectedOut: true, - }, - } - - for tname, tt := range testcases { - tt := tt - - t.Run(tname, func(t *testing.T) { - out := doResponseGrowsTheChain(tt.response, tt.ongoingChain, tt.startAt, tt.exepectedTotal) - require.Equal(t, tt.expectedOut, out) - }) - } -} - -func TestChainSync_getHighestBlock(t *testing.T) { - t.Parallel() - - cases := map[string]struct { - expectedHighestBlock uint - wantErr error - chainSyncPeerViewSet *peerViewSet - }{ - "no_peer_view": { - wantErr: errNoPeers, - expectedHighestBlock: 0, - chainSyncPeerViewSet: newPeerViewSet(10), - }, - "highest_block": { - expectedHighestBlock: 500, - chainSyncPeerViewSet: &peerViewSet{ - view: map[peer.ID]peerView{ - peer.ID("peer-A"): { - number: 100, - }, - peer.ID("peer-B"): { - number: 500, - }, - }, - }, - }, - } - - for tname, tt := range cases { - tt := tt - t.Run(tname, func(t *testing.T) { - t.Parallel() - - chainSync := &chainSync{ - peerViewSet: tt.chainSyncPeerViewSet, - } - - highestBlock, err := chainSync.getHighestBlock() - require.ErrorIs(t, err, tt.wantErr) - require.Equal(t, tt.expectedHighestBlock, highestBlock) - }) - } -} -func TestChainSync_BootstrapSync_SuccessfulSync_WithInvalidJusticationBlock(t *testing.T) { - // TODO: https://github.com/ChainSafe/gossamer/issues/3468 - t.Skip() - t.Parallel() - - ctrl := gomock.NewController(t) - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) - mockedGenesisHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, - trie.EmptyHash, 0, types.NewDigest()) - - mockNetwork := NewMockNetwork(ctrl) - mockRequestMaker := NewMockRequestMaker(ctrl) - - mockBabeVerifier := NewMockBabeVerifier(ctrl) - mockStorageState := NewMockStorageState(ctrl) - mockImportHandler := NewMockBlockImportHandler(ctrl) - mockTelemetry := NewMockTelemetry(ctrl) - mockFinalityGadget := NewMockFinalityGadget(ctrl) - - // this test expects two workers responding each request with 128 blocks which means - // we should import 256 blocks in total - blockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, 129) - const announceBlock = false - - invalidJustificationBlock := blockResponse.BlockData[90] - invalidJustification := &[]byte{0x01, 0x01, 0x01, 0x02} - invalidJustificationBlock.Justification = invalidJustification - - // here we split the whole set in two parts each one will be the "response" for each peer - worker1Response := &network.BlockResponseMessage{ - BlockData: blockResponse.BlockData[:128], - } - - // the first peer will respond the from the block 1 to 128 so the ensureBlockImportFlow - // will setup the expectations starting from the genesis header until block 128 - ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, worker1Response.BlockData[:90], mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) - - errVerifyBlockJustification := errors.New("VerifyBlockJustification mock error") - mockFinalityGadget.EXPECT(). - VerifyBlockJustification( - invalidJustificationBlock.Header.Hash(), - *invalidJustification). - Return(uint64(0), uint64(0), errVerifyBlockJustification) - - // we use gomock.Any since I cannot guarantee which peer picks which request - // but the first call to DoBlockRequest will return the first set and the second - // call will return the second set - mockRequestMaker.EXPECT(). - Do(gomock.Any(), gomock.Any(), &network.BlockResponseMessage{}). - DoAndReturn(func(peerID, _, response any) any { - responsePtr := response.(*network.BlockResponseMessage) - *responsePtr = *worker1Response - - fmt.Println("mocked request maker") - return nil - }) - - // setup a chain sync which holds in its peer view map - // 3 peers, each one announce block 129 as its best block number. - // We start this test with genesis block being our best block, so - // we're far behind by 128 blocks, we should execute a bootstrap - // sync request those blocks - const blocksAhead = 128 - cs := setupChainSyncToBootstrapMode(t, blocksAhead, - mockBlockState, mockNetwork, mockRequestMaker, mockBabeVerifier, - mockStorageState, mockImportHandler, mockTelemetry) - - cs.finalityGadget = mockFinalityGadget - - target := cs.peerViewSet.getTarget() - require.Equal(t, uint(blocksAhead), target) - - // include a new worker in the worker pool set, this worker - // should be an available peer that will receive a block request - // the worker pool executes the workers management - cs.workerPool.fromBlockAnnounce(peer.ID("alice")) - //cs.workerPool.fromBlockAnnounce(peer.ID("bob")) - - err := cs.requestMaxBlocksFrom(mockedGenesisHeader, networkInitialSync) - require.ErrorIs(t, err, errVerifyBlockJustification) - - err = cs.workerPool.stop() - require.NoError(t, err) - - // peer should be not in the worker pool - // peer should be in the ignore list - require.Len(t, cs.workerPool.workers, 1) -} diff --git a/dot/sync/disjoint_block_set.go b/dot/sync/disjoint_block_set.go deleted file mode 100644 index 95b9f7407b..0000000000 --- a/dot/sync/disjoint_block_set.go +++ /dev/null @@ -1,290 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "errors" - "sync" - "time" - - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/common" - "golang.org/x/exp/maps" -) - -const ( - // ttl is the time that a block can stay in this set before being cleared. - ttl = 10 * time.Minute - clearBlocksInterval = time.Minute -) - -var ( - errUnknownBlock = errors.New("cannot add justification for unknown block") - errSetAtLimit = errors.New("cannot add block; set is at capacity") -) - -// DisjointBlockSet represents a set of incomplete blocks, or blocks -// with an unknown parent. it is implemented by *disjointBlockSet -type DisjointBlockSet interface { - run(finalisedCh <-chan *types.FinalisationInfo, stop <-chan struct{}, wg *sync.WaitGroup) - addHashAndNumber(hash common.Hash, number uint) error - addHeader(*types.Header) error - addBlock(*types.Block) error - addJustification(common.Hash, []byte) error - removeBlock(common.Hash) - removeLowerBlocks(num uint) - getBlock(common.Hash) *pendingBlock - getBlocks() []*pendingBlock - hasBlock(common.Hash) bool - size() int -} - -// pendingBlock stores a block that we know of but it not yet ready to be processed -// this is a different type than *types.Block because we may wish to set the block -// hash and number without knowing the entire header yet -// this allows us easily to check which fields are missing -type pendingBlock struct { - hash common.Hash - number uint - header *types.Header - body *types.Body - justification []byte - - // the time when this block should be cleared from the set. - // if the block is re-added to the set, this time get updated. - clearAt time.Time -} - -func newPendingBlock(hash common.Hash, number uint, - header *types.Header, body *types.Body, clearAt time.Time) *pendingBlock { - return &pendingBlock{ - hash: hash, - number: number, - header: header, - body: body, - clearAt: clearAt, - } -} - -func (b *pendingBlock) toBlockData() *types.BlockData { - if b.justification == nil { - return &types.BlockData{ - Hash: b.hash, - Header: b.header, - Body: b.body, - } - } - - return &types.BlockData{ - Hash: b.hash, - Header: b.header, - Body: b.body, - Justification: &b.justification, - } -} - -// disjointBlockSet contains a list of incomplete (pending) blocks -// the header may have empty fields; they may have hash and number only, -// or they may have all their header fields, or they may be complete. -// -// if the header is complete, but the body is missing, then we need to request -// the block body. -// -// if the block is complete, we may not know of its parent. -type disjointBlockSet struct { - sync.RWMutex - limit int - - // map of block hash -> block data - blocks map[common.Hash]*pendingBlock - - // map of parent hash -> child hashes - parentToChildren map[common.Hash]map[common.Hash]struct{} - - timeNow func() time.Time -} - -func newDisjointBlockSet(limit int) *disjointBlockSet { - return &disjointBlockSet{ - blocks: make(map[common.Hash]*pendingBlock), - parentToChildren: make(map[common.Hash]map[common.Hash]struct{}), - limit: limit, - timeNow: time.Now, - } -} - -func (s *disjointBlockSet) run(finalisedCh <-chan *types.FinalisationInfo, stop <-chan struct{}, wg *sync.WaitGroup) { - ticker := time.NewTicker(clearBlocksInterval) - defer func() { - ticker.Stop() - wg.Done() - }() - - for { - select { - case <-ticker.C: - s.clearBlocks() - case finalisedInfo := <-finalisedCh: - s.removeLowerBlocks(finalisedInfo.Header.Number) - case <-stop: - return - } - } -} - -func (s *disjointBlockSet) clearBlocks() { - s.Lock() - defer s.Unlock() - - for _, block := range s.blocks { - if s.timeNow().Sub(block.clearAt) > 0 { - s.removeBlockInner(block.hash) - } - } -} - -func (s *disjointBlockSet) addToParentMap(parent, child common.Hash) { - children, has := s.parentToChildren[parent] - if !has { - children = make(map[common.Hash]struct{}) - s.parentToChildren[parent] = children - } - - children[child] = struct{}{} -} - -func (s *disjointBlockSet) addHashAndNumber(hash common.Hash, number uint) error { - s.Lock() - defer s.Unlock() - - if b, has := s.blocks[hash]; has { - b.clearAt = s.timeNow().Add(ttl) - return nil - } - - if len(s.blocks) == s.limit { - return errSetAtLimit - } - - s.blocks[hash] = newPendingBlock(hash, number, nil, nil, s.timeNow().Add(ttl)) - return nil -} - -func (s *disjointBlockSet) addHeader(header *types.Header) error { - s.Lock() - defer s.Unlock() - - hash := header.Hash() - if b, has := s.blocks[hash]; has { - b.header = header - b.clearAt = s.timeNow().Add(ttl) - return nil - } - - if len(s.blocks) == s.limit { - return errSetAtLimit - } - - s.blocks[hash] = newPendingBlock(hash, header.Number, header, nil, s.timeNow().Add(ttl)) - s.addToParentMap(header.ParentHash, hash) - return nil -} - -func (s *disjointBlockSet) addBlock(block *types.Block) error { - s.Lock() - defer s.Unlock() - - hash := block.Header.Hash() - if b, has := s.blocks[hash]; has { - b.header = &block.Header - b.body = &block.Body - b.clearAt = s.timeNow().Add(ttl) - return nil - } - - if len(s.blocks) == s.limit { - return errSetAtLimit - } - - s.blocks[hash] = newPendingBlock(hash, block.Header.Number, &block.Header, &block.Body, s.timeNow().Add(ttl)) - s.addToParentMap(block.Header.ParentHash, hash) - return nil -} - -func (s *disjointBlockSet) addJustification(hash common.Hash, just []byte) error { - s.Lock() - defer s.Unlock() - - b, has := s.blocks[hash] - if has { - b.justification = just - b.clearAt = time.Now().Add(ttl) - return nil - } - - // block number must not be nil if we are storing a justification for it - return errUnknownBlock -} - -func (s *disjointBlockSet) removeBlock(hash common.Hash) { - s.Lock() - defer s.Unlock() - s.removeBlockInner(hash) -} - -// this function does not lock!! -// it should only be called by other functions in this file that lock the set beforehand. -func (s *disjointBlockSet) removeBlockInner(hash common.Hash) { - block, has := s.blocks[hash] - if !has { - return - } - - // clear block from parent->child map if its parent was known - if block.header != nil { - delete(s.parentToChildren[block.header.ParentHash], hash) - if len(s.parentToChildren[block.header.ParentHash]) == 0 { - delete(s.parentToChildren, block.header.ParentHash) - } - } - - delete(s.blocks, hash) -} - -// removeLowerBlocks removes all blocks with a number equal or less than the given number -// from the set. it should be called when a new block is finalised to cleanup the set. -func (s *disjointBlockSet) removeLowerBlocks(num uint) { - blocks := s.getBlocks() - for _, block := range blocks { - if block.number <= num { - s.removeBlock(block.hash) - } - } -} - -func (s *disjointBlockSet) hasBlock(hash common.Hash) bool { - s.RLock() - defer s.RUnlock() - _, has := s.blocks[hash] - return has -} - -func (s *disjointBlockSet) size() int { - s.RLock() - defer s.RUnlock() - return len(s.blocks) -} - -func (s *disjointBlockSet) getBlock(hash common.Hash) *pendingBlock { - s.RLock() - defer s.RUnlock() - return s.blocks[hash] -} - -func (s *disjointBlockSet) getBlocks() []*pendingBlock { - s.RLock() - defer s.RUnlock() - - return maps.Values(s.blocks) -} diff --git a/dot/sync/disjoint_block_set_integration_test.go b/dot/sync/disjoint_block_set_integration_test.go deleted file mode 100644 index ec6745ba56..0000000000 --- a/dot/sync/disjoint_block_set_integration_test.go +++ /dev/null @@ -1,135 +0,0 @@ -//go:build integration - -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "testing" - "time" - - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/common" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestDisjointBlockSet(t *testing.T) { - s := newDisjointBlockSet(pendingBlocksLimit) - - hash := common.Hash{0xa, 0xb} - const number uint = 100 - s.addHashAndNumber(hash, number) - require.True(t, s.hasBlock(hash)) - require.Equal(t, 1, s.size()) - - expected := &pendingBlock{ - hash: hash, - number: number, - } - blocks := s.getBlocks() - require.Equal(t, 1, len(blocks)) - assert.Greater(t, blocks[0].clearAt, time.Now().Add(ttl-time.Minute)) - blocks[0].clearAt = time.Time{} - require.Equal(t, expected, blocks[0]) - - header := &types.Header{ - Number: 100, - } - s.addHeader(header) - require.True(t, s.hasBlock(header.Hash())) - require.Equal(t, 2, s.size()) - expected = &pendingBlock{ - hash: header.Hash(), - number: header.Number, - header: header, - } - block1 := s.getBlock(header.Hash()) - assert.Greater(t, block1.clearAt, time.Now().Add(ttl-time.Minute)) - block1.clearAt = time.Time{} - require.Equal(t, expected, block1) - - header2 := &types.Header{ - Number: 999, - } - s.addHashAndNumber(header2.Hash(), header2.Number) - require.Equal(t, 3, s.size()) - s.addHeader(header2) - require.Equal(t, 3, s.size()) - expected = &pendingBlock{ - hash: header2.Hash(), - number: header2.Number, - header: header2, - } - block2 := s.getBlock(header2.Hash()) - assert.Greater(t, block2.clearAt, time.Now().Add(ttl-time.Minute)) - block2.clearAt = time.Time{} - require.Equal(t, expected, block2) - - block := &types.Block{ - Header: *header2, - Body: types.Body{{0xa}}, - } - s.addBlock(block) - require.Equal(t, 3, s.size()) - expected = &pendingBlock{ - hash: header2.Hash(), - number: header2.Number, - header: header2, - body: &block.Body, - } - block3 := s.getBlock(header2.Hash()) - assert.Greater(t, block3.clearAt, time.Now().Add(ttl-time.Minute)) - block3.clearAt = time.Time{} - require.Equal(t, expected, block3) - - s.removeBlock(hash) - require.Equal(t, 2, s.size()) - require.False(t, s.hasBlock(hash)) - - s.removeLowerBlocks(998) - require.Equal(t, 1, s.size()) - require.False(t, s.hasBlock(header.Hash())) - require.True(t, s.hasBlock(header2.Hash())) -} - -func TestPendingBlock_toBlockData(t *testing.T) { - pb := &pendingBlock{ - hash: common.Hash{0xa, 0xb, 0xc}, - number: 1, - header: &types.Header{ - Number: 1, - }, - body: &types.Body{{0x1, 0x2, 0x3}}, - } - - expected := &types.BlockData{ - Hash: pb.hash, - Header: pb.header, - Body: pb.body, - } - - require.Equal(t, expected, pb.toBlockData()) -} - -func TestDisjointBlockSet_ClearBlocks(t *testing.T) { - s := newDisjointBlockSet(pendingBlocksLimit) - - testHashA := common.Hash{0} - testHashB := common.Hash{1} - - s.blocks[testHashA] = &pendingBlock{ - hash: testHashA, - clearAt: time.Unix(1000, 0), - } - s.blocks[testHashB] = &pendingBlock{ - hash: testHashB, - clearAt: time.Now().Add(ttl * 2), - } - - s.clearBlocks() - require.Equal(t, 1, len(s.blocks)) - _, has := s.blocks[testHashB] - require.True(t, has) -} diff --git a/dot/sync/disjoint_block_set_test.go b/dot/sync/disjoint_block_set_test.go deleted file mode 100644 index 4481a28a06..0000000000 --- a/dot/sync/disjoint_block_set_test.go +++ /dev/null @@ -1,484 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "errors" - "testing" - "time" - - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/common" - "github.com/stretchr/testify/assert" -) - -func Test_disjointBlockSet_addBlock(t *testing.T) { - t.Parallel() - - hashHeader := func(header types.Header) common.Hash { - return header.Hash() - } - setHashToHeader := func(header types.Header) *types.Header { - header.Hash() - return &header - } - - timeNow := func() time.Time { - return time.Unix(0, 0) - } - tests := map[string]struct { - disjointBlockSet *disjointBlockSet - block *types.Block - expectedDisjointBlockSet *disjointBlockSet - err error - }{ - "add_block_beyond_capacity": { - disjointBlockSet: &disjointBlockSet{}, - block: &types.Block{ - Header: types.Header{ - Number: 1, - }, - }, - expectedDisjointBlockSet: &disjointBlockSet{}, - err: errSetAtLimit, - }, - "add_block": { - disjointBlockSet: &disjointBlockSet{ - limit: 1, - blocks: make(map[common.Hash]*pendingBlock), - timeNow: timeNow, - parentToChildren: make(map[common.Hash]map[common.Hash]struct{}), - }, - block: &types.Block{ - Header: types.Header{ - Number: 1, - ParentHash: common.Hash{1}, - }, - Body: []types.Extrinsic{[]byte{1}}, - }, - expectedDisjointBlockSet: &disjointBlockSet{ - limit: 1, - blocks: map[common.Hash]*pendingBlock{ - hashHeader(types.Header{Number: 1, ParentHash: common.Hash{1}}): { - hash: hashHeader(types.Header{Number: 1, ParentHash: common.Hash{1}}), - number: 1, - header: setHashToHeader(types.Header{Number: 1, ParentHash: common.Hash{1}}), - body: &types.Body{{1}}, - clearAt: time.Unix(0, int64(ttl)), - }, - }, - parentToChildren: map[common.Hash]map[common.Hash]struct{}{ - {1}: { - hashHeader(types.Header{Number: 1, ParentHash: common.Hash{1}}): {}, - }, - }, - }, - }, - "has_block": { - disjointBlockSet: &disjointBlockSet{ - limit: 1, - blocks: map[common.Hash]*pendingBlock{ - hashHeader(types.Header{Number: 1, ParentHash: common.Hash{1}}): { - hash: hashHeader(types.Header{Number: 1, ParentHash: common.Hash{1}}), - number: 1, - header: setHashToHeader(types.Header{Number: 1, ParentHash: common.Hash{1}}), - body: &types.Body{{1}}, - clearAt: time.Unix(0, int64(ttl)), - }, - }, - timeNow: timeNow, - parentToChildren: make(map[common.Hash]map[common.Hash]struct{}), - }, - block: &types.Block{ - Header: types.Header{ - Number: 1, - ParentHash: common.Hash{1}, - }, - Body: []types.Extrinsic{[]byte{1}}, - }, - expectedDisjointBlockSet: &disjointBlockSet{ - limit: 1, - blocks: map[common.Hash]*pendingBlock{ - hashHeader(types.Header{Number: 1, ParentHash: common.Hash{1}}): { - hash: hashHeader(types.Header{Number: 1, ParentHash: common.Hash{1}}), - number: 1, - header: setHashToHeader(types.Header{Number: 1, ParentHash: common.Hash{1}}), - body: &types.Body{{1}}, - justification: nil, - clearAt: time.Unix(0, int64(ttl)), - }, - }, - parentToChildren: map[common.Hash]map[common.Hash]struct{}{}, - }, - }, - } - for name, tt := range tests { - tt := tt - t.Run(name, func(t *testing.T) { - t.Parallel() - err := tt.disjointBlockSet.addBlock(tt.block) - if tt.err != nil { - assert.EqualError(t, err, tt.err.Error()) - } else { - assert.NoError(t, err) - } - - tt.disjointBlockSet.timeNow = nil - assert.Equal(t, tt.expectedDisjointBlockSet, tt.disjointBlockSet) - }) - } -} - -func Test_disjointBlockSet_addHeader(t *testing.T) { - t.Parallel() - - hashHeader := func(header types.Header) common.Hash { - return header.Hash() - } - setHashToHeader := func(header types.Header) *types.Header { - header.Hash() - return &header - } - - tests := map[string]struct { - disjointBlockSet *disjointBlockSet - header *types.Header - expectedDisjointBlockSet *disjointBlockSet - err error - }{ - "add_header_beyond_capactiy": { - disjointBlockSet: &disjointBlockSet{}, - header: &types.Header{ - Number: 1, - }, - expectedDisjointBlockSet: &disjointBlockSet{}, - err: errors.New("cannot add block; set is at capacity"), - }, - "add_header": { - disjointBlockSet: &disjointBlockSet{ - blocks: make(map[common.Hash]*pendingBlock), - limit: 1, - timeNow: func() time.Time { return time.Unix(0, 0) }, - parentToChildren: make(map[common.Hash]map[common.Hash]struct{}), - }, - header: &types.Header{ - Number: 1, - ParentHash: common.Hash{1}, - }, - expectedDisjointBlockSet: &disjointBlockSet{ - limit: 1, - blocks: map[common.Hash]*pendingBlock{ - hashHeader(types.Header{Number: 1, ParentHash: common.Hash{1}}): { - hash: hashHeader(types.Header{Number: 1, ParentHash: common.Hash{1}}), - number: 1, - header: setHashToHeader(types.Header{Number: 1, ParentHash: common.Hash{1}}), - clearAt: time.Unix(0, int64(ttl)), - }, - }, - parentToChildren: map[common.Hash]map[common.Hash]struct{}{ - {1}: { - hashHeader(types.Header{Number: 1, ParentHash: common.Hash{1}}): {}, - }, - }, - }, - }, - "has_header": { - disjointBlockSet: &disjointBlockSet{ - blocks: map[common.Hash]*pendingBlock{ - hashHeader(types.Header{Number: 1, ParentHash: common.Hash{1}}): { - hash: hashHeader(types.Header{Number: 1, ParentHash: common.Hash{1}}), - number: 1, - header: setHashToHeader(types.Header{Number: 1, ParentHash: common.Hash{1}}), - clearAt: time.Unix(0, int64(ttl)), - }, - }, - limit: 1, - timeNow: func() time.Time { return time.Unix(0, 0) }, - parentToChildren: make(map[common.Hash]map[common.Hash]struct{}), - }, - header: &types.Header{ - Number: 1, - ParentHash: common.Hash{1}, - }, - expectedDisjointBlockSet: &disjointBlockSet{ - limit: 1, - blocks: map[common.Hash]*pendingBlock{ - hashHeader(types.Header{Number: 1, ParentHash: common.Hash{1}}): { - hash: hashHeader(types.Header{Number: 1, ParentHash: common.Hash{1}}), - number: 1, - header: setHashToHeader(types.Header{Number: 1, ParentHash: common.Hash{1}}), - clearAt: time.Unix(0, int64(ttl)), - }, - }, - parentToChildren: map[common.Hash]map[common.Hash]struct{}{}, - }, - }, - } - for name, tt := range tests { - tt := tt - t.Run(name, func(t *testing.T) { - t.Parallel() - err := tt.disjointBlockSet.addHeader(tt.header) - if tt.err != nil { - assert.EqualError(t, err, tt.err.Error()) - } else { - assert.NoError(t, err) - } - - tt.disjointBlockSet.timeNow = nil - assert.Equal(t, tt.expectedDisjointBlockSet, tt.disjointBlockSet) - }) - } -} - -func Test_disjointBlockSet_clearBlocks(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - disjointBlockSet *disjointBlockSet - remaining map[common.Hash]*pendingBlock - }{ - { - name: "base_case", - disjointBlockSet: &disjointBlockSet{ - limit: 0, - blocks: map[common.Hash]*pendingBlock{ - {1}: { - clearAt: time.Unix(1000, 0), - hash: common.Hash{1}, - }, - }, - timeNow: func() time.Time { return time.Unix(1001, 0) }, - }, - remaining: map[common.Hash]*pendingBlock{}, - }, - { - name: "remove_clear_one_block", - disjointBlockSet: &disjointBlockSet{ - limit: 0, - blocks: map[common.Hash]*pendingBlock{ - {1}: { - clearAt: time.Unix(1000, 0), - hash: common.Hash{1}, - }, - {2}: { - clearAt: time.Unix(1002, 0), - hash: common.Hash{2}, - }, - }, - timeNow: func() time.Time { return time.Unix(1001, 0) }, - }, - remaining: map[common.Hash]*pendingBlock{ - {2}: { - clearAt: time.Unix(1002, 0), - hash: common.Hash{2}, - }, - }, - }, - } - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - tt.disjointBlockSet.clearBlocks() - assert.Equal(t, tt.remaining, tt.disjointBlockSet.blocks) - }) - } -} - -func Test_disjointBlockSet_getBlocks(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - disjointBlockSet *disjointBlockSet - want []*pendingBlock - wantDisjointBlockSet *disjointBlockSet - }{ - { - name: "no blocks", - disjointBlockSet: &disjointBlockSet{}, - want: []*pendingBlock{}, - wantDisjointBlockSet: &disjointBlockSet{}, - }, - { - name: "base_case", - disjointBlockSet: &disjointBlockSet{ - blocks: map[common.Hash]*pendingBlock{ - {}: {}, - }, - }, - want: []*pendingBlock{{}}, - wantDisjointBlockSet: &disjointBlockSet{ - blocks: map[common.Hash]*pendingBlock{ - {}: {}, - }, - }, - }, - } - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - blocks := tt.disjointBlockSet.getBlocks() - assert.Equal(t, tt.want, blocks) - assert.Equal(t, tt.wantDisjointBlockSet, tt.disjointBlockSet) - }) - } -} - -func Test_disjointBlockSet_removeLowerBlocks(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - disjointBlockSet *disjointBlockSet - num uint - remaining map[common.Hash]*pendingBlock - wantDisjointBlockSet *disjointBlockSet - }{ - { - name: "number_0", - disjointBlockSet: &disjointBlockSet{ - blocks: map[common.Hash]*pendingBlock{ - {1}: { - hash: common.Hash{1}, - number: 1, - }, - {10}: { - hash: common.Hash{10}, - number: 10, - }, - }, - }, - num: 0, - remaining: map[common.Hash]*pendingBlock{ - {1}: { - hash: common.Hash{1}, - number: 1, - }, - {10}: { - hash: common.Hash{10}, - number: 10, - }, - }, - wantDisjointBlockSet: &disjointBlockSet{ - blocks: map[common.Hash]*pendingBlock{ - {1}: { - hash: common.Hash{1}, - number: 1, - }, - {10}: { - hash: common.Hash{10}, - number: 10, - }, - }, - }, - }, - { - name: "number_1", - disjointBlockSet: &disjointBlockSet{ - blocks: map[common.Hash]*pendingBlock{ - {1}: { - hash: common.Hash{1}, - number: 1, - }, - {10}: { - hash: common.Hash{10}, - number: 10, - }, - }, - }, - num: 1, - remaining: map[common.Hash]*pendingBlock{{10}: { - hash: common.Hash{10}, - number: 10, - }, - }, - wantDisjointBlockSet: &disjointBlockSet{ - blocks: map[common.Hash]*pendingBlock{ - {10}: { - hash: common.Hash{10}, - number: 10, - }, - }, - }, - }, - { - name: "number_11", - disjointBlockSet: &disjointBlockSet{ - blocks: map[common.Hash]*pendingBlock{ - {1}: { - hash: common.Hash{1}, - number: 1, - }, - {10}: { - hash: common.Hash{10}, - number: 10, - }, - }, - }, - num: 11, - remaining: map[common.Hash]*pendingBlock{}, - wantDisjointBlockSet: &disjointBlockSet{ - blocks: map[common.Hash]*pendingBlock{}, - }, - }, - } - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - tt.disjointBlockSet.removeLowerBlocks(tt.num) - assert.Equal(t, tt.remaining, tt.disjointBlockSet.blocks) - assert.Equal(t, tt.wantDisjointBlockSet, tt.disjointBlockSet) - }) - } -} - -func Test_disjointBlockSet_size(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - disjointBlockSet *disjointBlockSet - want int - }{ - { - name: "expect_0", - disjointBlockSet: &disjointBlockSet{ - blocks: map[common.Hash]*pendingBlock{}, - }, - want: 0, - }, - { - name: "expect_1", - disjointBlockSet: &disjointBlockSet{ - blocks: map[common.Hash]*pendingBlock{ - {1}: {hash: common.Hash{1}, number: 1}, - }, - }, - want: 1, - }, - { - name: "expect_2", - disjointBlockSet: &disjointBlockSet{ - blocks: map[common.Hash]*pendingBlock{ - {1}: {hash: common.Hash{1}, number: 1}, - {10}: {hash: common.Hash{10}, number: 10}, - }, - }, - want: 2, - }, - } - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - size := tt.disjointBlockSet.size() - assert.Equal(t, tt.want, size) - }) - } -} diff --git a/dot/sync/errors.go b/dot/sync/errors.go deleted file mode 100644 index 92947ddef3..0000000000 --- a/dot/sync/errors.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "errors" -) - -var ( - errBlockStatePaused = errors.New("blockstate service has been paused") - errMaxNumberOfSameRequest = errors.New("max number of same request reached") - - // ErrInvalidBlockRequest is returned when an invalid block request is received - ErrInvalidBlockRequest = errors.New("invalid block request") - errInvalidRequestDirection = errors.New("invalid request direction") - errRequestStartTooHigh = errors.New("request start number is higher than our best block") - - // chainSync errors - - errNoPeers = errors.New("no peers to sync with") - errPeerOnInvalidFork = errors.New("peer is on an invalid fork") - - errStartAndEndMismatch = errors.New("request start and end hash are not on the same chain") - errFailedToGetDescendant = errors.New("failed to find descendant block") - errAlreadyInDisjointSet = errors.New("already in disjoint set") -) diff --git a/dot/sync/interfaces.go b/dot/sync/interfaces.go deleted file mode 100644 index 03820704a5..0000000000 --- a/dot/sync/interfaces.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "encoding/json" - "sync" - - "github.com/ChainSafe/gossamer/dot/peerset" - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/common" - "github.com/ChainSafe/gossamer/lib/runtime" - rtstorage "github.com/ChainSafe/gossamer/lib/runtime/storage" - "github.com/libp2p/go-libp2p/core/peer" -) - -// BlockState is the interface for the block state -type BlockState interface { - BestBlockHeader() (*types.Header, error) - BestBlockNumber() (number uint, err error) - CompareAndSetBlockData(bd *types.BlockData) error - GetBlockBody(common.Hash) (*types.Body, error) - GetHeader(common.Hash) (*types.Header, error) - HasHeader(hash common.Hash) (bool, error) - Range(startHash, endHash common.Hash) (hashes []common.Hash, err error) - RangeInMemory(start, end common.Hash) ([]common.Hash, error) - GetReceipt(common.Hash) ([]byte, error) - GetMessageQueue(common.Hash) ([]byte, error) - GetJustification(common.Hash) ([]byte, error) - SetJustification(hash common.Hash, data []byte) error - GetHashByNumber(blockNumber uint) (common.Hash, error) - GetBlockByHash(common.Hash) (*types.Block, error) - GetRuntime(blockHash common.Hash) (runtime runtime.Instance, err error) - StoreRuntime(blockHash common.Hash, runtime runtime.Instance) - GetHighestFinalisedHeader() (*types.Header, error) - GetFinalisedNotifierChannel() chan *types.FinalisationInfo - GetHeaderByNumber(num uint) (*types.Header, error) - GetAllBlocksAtNumber(num uint) ([]common.Hash, error) - IsDescendantOf(parent, child common.Hash) (bool, error) - - IsPaused() bool - Pause() error -} - -// StorageState is the interface for the storage state -type StorageState interface { - TrieState(root *common.Hash) (*rtstorage.TrieState, error) - sync.Locker -} - -// TransactionState is the interface for transaction queue methods -type TransactionState interface { - RemoveExtrinsic(ext types.Extrinsic) -} - -// BabeVerifier deals with BABE block verification -type BabeVerifier interface { - VerifyBlock(header *types.Header) error -} - -// FinalityGadget implements justification verification functionality -type FinalityGadget interface { - VerifyBlockJustification(common.Hash, []byte) error -} - -// BlockImportHandler is the interface for the handler of newly imported blocks -type BlockImportHandler interface { - HandleBlockImport(block *types.Block, state *rtstorage.TrieState, announce bool) error -} - -// Network is the interface for the network -type Network interface { - // Peers returns a list of currently connected peers - Peers() []common.PeerInfo - - // ReportPeer reports peer based on the peer behaviour. - ReportPeer(change peerset.ReputationChange, p peer.ID) - - AllConnectedPeersIDs() []peer.ID - - BlockAnnounceHandshake(*types.Header) error -} - -// Telemetry is the telemetry client to send telemetry messages. -type Telemetry interface { - SendMessage(msg json.Marshaler) -} diff --git a/dot/sync/message.go b/dot/sync/message.go deleted file mode 100644 index df4b58a777..0000000000 --- a/dot/sync/message.go +++ /dev/null @@ -1,415 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "bytes" - "fmt" - - "github.com/ChainSafe/gossamer/dot/network" - "github.com/ChainSafe/gossamer/dot/peerset" - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/common" - "github.com/libp2p/go-libp2p/core/peer" -) - -const maxNumberOfSameRequestPerPeer uint = 2 - -// CreateBlockResponse creates a block response message from a block request message -func (s *Service) CreateBlockResponse(from peer.ID, req *network.BlockRequestMessage) ( - *network.BlockResponseMessage, error) { - logger.Debugf("sync request from %s: %s", from, req.String()) - - if !req.StartingBlock.IsUint32() && !req.StartingBlock.IsHash() { - return nil, ErrInvalidBlockRequest - } - - encodedRequest, err := req.Encode() - if err != nil { - return nil, fmt.Errorf("encoding request: %w", err) - } - - encodedKey := bytes.Join([][]byte{[]byte(from.String()), encodedRequest}, nil) - requestHash, err := common.Blake2bHash(encodedKey) - if err != nil { - return nil, fmt.Errorf("hashing encoded block request sync message: %w", err) - } - - numOfRequests := s.seenBlockSyncRequests.Get(requestHash) - - if numOfRequests > maxNumberOfSameRequestPerPeer { - s.network.ReportPeer(peerset.ReputationChange{ - Value: peerset.SameBlockSyncRequest, - Reason: peerset.SameBlockSyncRequestReason, - }, from) - - logger.Debugf("max number of same request reached by: %s", from.String()) - return nil, fmt.Errorf("%w: %s", errMaxNumberOfSameRequest, from.String()) - } - - s.seenBlockSyncRequests.Put(requestHash, numOfRequests+1) - - switch req.Direction { - case network.Ascending: - return s.handleAscendingRequest(req) - case network.Descending: - return s.handleDescendingRequest(req) - default: - return nil, errInvalidRequestDirection - } -} - -func (s *Service) handleAscendingRequest(req *network.BlockRequestMessage) (*network.BlockResponseMessage, error) { - var ( - max uint = network.MaxBlocksInResponse - startHash *common.Hash - startNumber uint - ) - - // determine maximum response size - if req.Max != nil && *req.Max < network.MaxBlocksInResponse { - max = uint(*req.Max) - } - - bestBlockNumber, err := s.blockState.BestBlockNumber() - if err != nil { - return nil, fmt.Errorf("getting best block for request: %w", err) - } - - switch startBlock := req.StartingBlock.Value().(type) { - case uint32: - if startBlock == 0 { - startBlock = 1 - } - - // if request start is higher than our best block, return error - if bestBlockNumber < uint(startBlock) { - return nil, errRequestStartTooHigh - } - - startNumber = uint(startBlock) - case common.Hash: - startHash = &startBlock - - // make sure we actually have the starting block - header, err := s.blockState.GetHeader(*startHash) - if err != nil { - return nil, fmt.Errorf("failed to get start block %s for request: %w", startHash, err) - } - - startNumber = header.Number - default: - return nil, ErrInvalidBlockRequest - } - - endNumber := startNumber + max - 1 - if endNumber > bestBlockNumber { - endNumber = bestBlockNumber - } - - var endHash *common.Hash - if startHash != nil { - eh, err := s.checkOrGetDescendantHash(*startHash, nil, endNumber) - if err != nil { - return nil, err - } - - endHash = &eh - } - - if startHash == nil { - logger.Debugf("handling block request: direction %s, "+ - "start block number: %d, "+ - "end block number: %d", - req.Direction, startNumber, endNumber) - - return s.handleAscendingByNumber(startNumber, endNumber, req.RequestedData) - } - - logger.Debugf("handling block request: direction %s, "+ - "start block hash: %s, "+ - "end block hash: %s", - req.Direction, *startHash, *endHash) - - return s.handleChainByHash(*startHash, *endHash, max, req.RequestedData, req.Direction) -} - -func (s *Service) handleDescendingRequest(req *network.BlockRequestMessage) (*network.BlockResponseMessage, error) { - var ( - startHash *common.Hash - startNumber uint - max uint = network.MaxBlocksInResponse - ) - - // determine maximum response size - if req.Max != nil && *req.Max < network.MaxBlocksInResponse { - max = uint(*req.Max) - } - - switch startBlock := req.StartingBlock.Value().(type) { - case uint32: - bestBlockNumber, err := s.blockState.BestBlockNumber() - if err != nil { - return nil, fmt.Errorf("failed to get best block %d for request: %w", bestBlockNumber, err) - } - - // if request start is higher than our best block, only return blocks from our best block and below - if bestBlockNumber < uint(startBlock) { - startNumber = bestBlockNumber - } else { - startNumber = uint(startBlock) - } - case common.Hash: - startHash = &startBlock - - // make sure we actually have the starting block - header, err := s.blockState.GetHeader(*startHash) - if err != nil { - return nil, fmt.Errorf("failed to get start block %s for request: %w", startHash, err) - } - - startNumber = header.Number - default: - return nil, ErrInvalidBlockRequest - } - - endNumber := uint(1) - if startNumber > max+1 { - endNumber = startNumber - max + 1 - } - - var endHash *common.Hash - if startHash != nil { - // need to get blocks by subchain if start hash is provided, get end hash - endHeader, err := s.blockState.GetHeaderByNumber(endNumber) - if err != nil { - return nil, fmt.Errorf("getting end block %d for request: %w", endNumber, err) - } - - hash := endHeader.Hash() - endHash = &hash - } - - if startHash == nil || endHash == nil { - logger.Debugf("handling BlockRequestMessage with direction %s "+ - "from start block with number %d to end block with number %d", - req.Direction, startNumber, endNumber) - return s.handleDescendingByNumber(startNumber, endNumber, req.RequestedData) - } - - logger.Debugf("handling block request message with direction %s "+ - "from start block with hash %s to end block with hash %s", - req.Direction, *startHash, *endHash) - return s.handleChainByHash(*endHash, *startHash, max, req.RequestedData, req.Direction) -} - -// checkOrGetDescendantHash checks if the provided `descendant` is -// on the same chain as the `ancestor`, if it's provided, otherwise -// it sets `descendant` to a block with number=`descendantNumber` that is a descendant of the ancestor. -// If used with an Ascending request, ancestor is the start block and descendant is the end block -// If used with an Descending request, ancestor is the end block and descendant is the start block -func (s *Service) checkOrGetDescendantHash(ancestor common.Hash, - descendant *common.Hash, descendantNumber uint) (common.Hash, error) { - // if `descendant` was provided, check that it's a descendant of `ancestor` - if descendant != nil { - header, err := s.blockState.GetHeader(ancestor) - if err != nil { - return common.Hash{}, fmt.Errorf("failed to get descendant %s: %w", *descendant, err) - } - - // if descendant number is lower than ancestor number, this is an error - if header.Number > descendantNumber { - return common.Hash{}, - fmt.Errorf("invalid request, descendant number %d is lower than ancestor %d", - descendantNumber, header.Number) - } - - // check if provided start hash is descendant of provided descendant hash - is, err := s.blockState.IsDescendantOf(ancestor, *descendant) - if err != nil { - return common.Hash{}, err - } - - if !is { - return common.Hash{}, errStartAndEndMismatch - } - - return *descendant, nil - } - - // otherwise, get block on canonical chain by descendantNumber - hash, err := s.blockState.GetHashByNumber(descendantNumber) - if err != nil { - return common.Hash{}, err - } - - // check if it's a descendant of the provided ancestor hash - is, err := s.blockState.IsDescendantOf(ancestor, hash) - if err != nil { - return common.Hash{}, err - } - - if !is { - // if it's not a descendant, search for a block that has number=descendantNumber that is - hashes, err := s.blockState.GetAllBlocksAtNumber(descendantNumber) - if err != nil { - return common.Hash{}, fmt.Errorf("failed to get blocks at number %d: %w", descendantNumber, err) - } - - for _, hash := range hashes { - is, err := s.blockState.IsDescendantOf(ancestor, hash) - if err != nil || !is { - continue - } - - // this sets the descendant hash to whatever the first block we find with descendantNumber - // is, however there might be multiple blocks that fit this criteria - h := common.Hash{} - copy(h[:], hash[:]) - descendant = &h - break - } - - if descendant == nil { - return common.Hash{}, fmt.Errorf("%w with number %d", errFailedToGetDescendant, descendantNumber) - } - } else { - // if it is, set descendant hash to our block w/ descendantNumber - descendant = &hash - } - - logger.Tracef("determined descendant %s with number %d and ancestor %s", - *descendant, descendantNumber, ancestor) - return *descendant, nil -} - -func (s *Service) handleAscendingByNumber(start, end uint, - requestedData byte) (*network.BlockResponseMessage, error) { - var err error - data := make([]*types.BlockData, (end-start)+1) - - for i := uint(0); start+i <= end; i++ { - blockNumber := start + i - data[i], err = s.getBlockDataByNumber(blockNumber, requestedData) - if err != nil { - return nil, err - } - } - - return &network.BlockResponseMessage{ - BlockData: data, - }, nil -} - -func (s *Service) handleDescendingByNumber(start, end uint, - requestedData byte) (*network.BlockResponseMessage, error) { - var err error - data := make([]*types.BlockData, (start-end)+1) - - for i := uint(0); start-i >= end; i++ { - blockNumber := start - i - data[i], err = s.getBlockDataByNumber(blockNumber, requestedData) - if err != nil { - return nil, err - } - } - - return &network.BlockResponseMessage{ - BlockData: data, - }, nil -} - -func (s *Service) handleChainByHash(ancestor, descendant common.Hash, - max uint, requestedData byte, direction network.SyncDirection) ( - *network.BlockResponseMessage, error) { - subchain, err := s.blockState.Range(ancestor, descendant) - if err != nil { - return nil, fmt.Errorf("retrieving range: %w", err) - } - - // If the direction is descending, prune from the start. - // if the direction is ascending it should prune from the end. - if uint(len(subchain)) > max { - if direction == network.Ascending { - subchain = subchain[:max] - } else { - subchain = subchain[uint(len(subchain))-max:] - } - } - - data := make([]*types.BlockData, len(subchain)) - - for i, hash := range subchain { - data[i], err = s.getBlockData(hash, requestedData) - if err != nil { - return nil, err - } - } - - // reverse BlockData, if descending request - if direction == network.Descending { - reverseBlockData(data) - } - - return &network.BlockResponseMessage{ - BlockData: data, - }, nil -} - -func (s *Service) getBlockDataByNumber(num uint, requestedData byte) (*types.BlockData, error) { - hash, err := s.blockState.GetHashByNumber(num) - if err != nil { - return nil, err - } - - return s.getBlockData(hash, requestedData) -} - -func (s *Service) getBlockData(hash common.Hash, requestedData byte) (*types.BlockData, error) { - var err error - blockData := &types.BlockData{ - Hash: hash, - } - - if requestedData == 0 { - return blockData, nil - } - - if (requestedData & network.RequestedDataHeader) == 1 { - blockData.Header, err = s.blockState.GetHeader(hash) - if err != nil { - logger.Debugf("failed to get header for block with hash %s: %s", hash, err) - } - } - - if (requestedData&network.RequestedDataBody)>>1 == 1 { - blockData.Body, err = s.blockState.GetBlockBody(hash) - if err != nil { - logger.Debugf("failed to get body for block with hash %s: %s", hash, err) - } - } - - if (requestedData&network.RequestedDataReceipt)>>2 == 1 { - retData, err := s.blockState.GetReceipt(hash) - if err == nil && retData != nil { - blockData.Receipt = &retData - } - } - - if (requestedData&network.RequestedDataMessageQueue)>>3 == 1 { - retData, err := s.blockState.GetMessageQueue(hash) - if err == nil && retData != nil { - blockData.MessageQueue = &retData - } - } - - if (requestedData&network.RequestedDataJustification)>>4 == 1 { - retData, err := s.blockState.GetJustification(hash) - if err == nil && retData != nil { - blockData.Justification = &retData - } - } - - return blockData, nil -} diff --git a/dot/sync/message_integration_test.go b/dot/sync/message_integration_test.go deleted file mode 100644 index 317a9aff05..0000000000 --- a/dot/sync/message_integration_test.go +++ /dev/null @@ -1,468 +0,0 @@ -//go:build integration - -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "testing" - - "github.com/ChainSafe/gossamer/dot/network" - "github.com/ChainSafe/gossamer/dot/state" - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/common/variadic" - "github.com/ChainSafe/gossamer/pkg/trie" - "github.com/libp2p/go-libp2p/core/peer" - - "github.com/stretchr/testify/require" -) - -func addTestBlocksToState(t *testing.T, depth uint, blockState BlockState) { - previousHash := blockState.(*state.BlockState).BestBlockHash() - previousNum, err := blockState.BestBlockNumber() - require.NoError(t, err) - - digest := types.NewDigest() - prd, err := types.NewBabeSecondaryPlainPreDigest(0, 1).ToPreRuntimeDigest() - require.NoError(t, err) - err = digest.Add(*prd) - require.NoError(t, err) - - for i := uint(1); i <= depth; i++ { - block := &types.Block{ - Header: types.Header{ - ParentHash: previousHash, - Number: previousNum + i, - StateRoot: trie.EmptyHash, - Digest: digest, - }, - Body: types.Body{}, - } - - previousHash = block.Header.Hash() - - err := blockState.(*state.BlockState).AddBlock(block) - require.NoError(t, err) - } -} - -func TestService_CreateBlockResponse_MaxSize(t *testing.T) { - s := newTestSyncer(t) - addTestBlocksToState(t, network.MaxBlocksInResponse*2, s.blockState) - - // test ascending - start, err := variadic.NewUint32OrHash(1) - require.NoError(t, err) - - req := &network.BlockRequestMessage{ - RequestedData: 3, - StartingBlock: *start, - Direction: network.Ascending, - Max: nil, - } - - resp, err := s.CreateBlockResponse(peer.ID("alice"), req) - require.NoError(t, err) - require.Equal(t, int(network.MaxBlocksInResponse), len(resp.BlockData)) - require.Equal(t, uint(1), resp.BlockData[0].Number()) - require.Equal(t, uint(128), resp.BlockData[127].Number()) - - max := uint32(network.MaxBlocksInResponse + 100) - req = &network.BlockRequestMessage{ - RequestedData: 3, - StartingBlock: *start, - Direction: network.Ascending, - Max: &max, - } - - resp, err = s.CreateBlockResponse(peer.ID("alice"), req) - require.NoError(t, err) - require.Equal(t, int(network.MaxBlocksInResponse), len(resp.BlockData)) - require.Equal(t, uint(1), resp.BlockData[0].Number()) - require.Equal(t, uint(128), resp.BlockData[127].Number()) - - max = uint32(16) - req = &network.BlockRequestMessage{ - RequestedData: 3, - StartingBlock: *start, - Direction: network.Ascending, - Max: &max, - } - - resp, err = s.CreateBlockResponse(peer.ID("alice"), req) - require.NoError(t, err) - require.Equal(t, int(max), len(resp.BlockData)) - require.Equal(t, uint(1), resp.BlockData[0].Number()) - require.Equal(t, uint(16), resp.BlockData[15].Number()) - - // test descending - start, err = variadic.NewUint32OrHash(uint32(128)) - require.NoError(t, err) - - req = &network.BlockRequestMessage{ - RequestedData: 3, - StartingBlock: *start, - Direction: network.Descending, - Max: nil, - } - - resp, err = s.CreateBlockResponse(peer.ID("alice"), req) - require.NoError(t, err) - require.Equal(t, int(network.MaxBlocksInResponse), len(resp.BlockData)) - require.Equal(t, uint(128), resp.BlockData[0].Number()) - require.Equal(t, uint(1), resp.BlockData[127].Number()) - - max = uint32(network.MaxBlocksInResponse + 100) - start, err = variadic.NewUint32OrHash(uint32(256)) - require.NoError(t, err) - - req = &network.BlockRequestMessage{ - RequestedData: 3, - StartingBlock: *start, - Direction: network.Descending, - Max: &max, - } - - resp, err = s.CreateBlockResponse(peer.ID("alice"), req) - require.NoError(t, err) - require.Equal(t, int(network.MaxBlocksInResponse), len(resp.BlockData)) - require.Equal(t, uint(256), resp.BlockData[0].Number()) - require.Equal(t, uint(129), resp.BlockData[127].Number()) - - max = uint32(16) - req = &network.BlockRequestMessage{ - RequestedData: 3, - StartingBlock: *start, - Direction: network.Descending, - Max: &max, - } - - resp, err = s.CreateBlockResponse(peer.ID("alice"), req) - require.NoError(t, err) - require.Equal(t, int(max), len(resp.BlockData)) - require.Equal(t, uint(256), resp.BlockData[0].Number()) - require.Equal(t, uint(241), resp.BlockData[15].Number()) -} - -func TestService_CreateBlockResponse_StartHash(t *testing.T) { - s := newTestSyncer(t) - addTestBlocksToState(t, uint(network.MaxBlocksInResponse*2), s.blockState) - - // test ascending with nil endBlockHash - startHash, err := s.blockState.GetHashByNumber(1) - require.NoError(t, err) - - start, err := variadic.NewUint32OrHash(startHash) - require.NoError(t, err) - - req := &network.BlockRequestMessage{ - RequestedData: 3, - StartingBlock: *start, - Direction: network.Ascending, - Max: nil, - } - - resp, err := s.CreateBlockResponse(peer.ID("alice"), req) - require.NoError(t, err) - require.Equal(t, int(network.MaxBlocksInResponse), len(resp.BlockData)) - require.Equal(t, uint(1), resp.BlockData[0].Number()) - require.Equal(t, uint(128), resp.BlockData[127].Number()) - - // test descending with nil endBlockHash - startHash, err = s.blockState.GetHashByNumber(16) - require.NoError(t, err) - - start, err = variadic.NewUint32OrHash(startHash) - require.NoError(t, err) - - req = &network.BlockRequestMessage{ - RequestedData: 3, - StartingBlock: *start, - Direction: network.Descending, - Max: nil, - } - - resp, err = s.CreateBlockResponse(peer.ID("alice"), req) - require.NoError(t, err) - require.Equal(t, int(16), len(resp.BlockData)) - require.Equal(t, uint(16), resp.BlockData[0].Number()) - require.Equal(t, uint(1), resp.BlockData[15].Number()) - - req = &network.BlockRequestMessage{ - RequestedData: 3, - StartingBlock: *start, - Direction: network.Descending, - Max: nil, - } - - resp, err = s.CreateBlockResponse(peer.ID("alice"), req) - require.NoError(t, err) - require.Equal(t, int(16), len(resp.BlockData)) - require.Equal(t, uint(16), resp.BlockData[0].Number()) - require.Equal(t, uint(1), resp.BlockData[15].Number()) - - // test descending with nil endBlockHash and start > network.MaxBlocksInResponse - startHash, err = s.blockState.GetHashByNumber(256) - require.NoError(t, err) - - start, err = variadic.NewUint32OrHash(startHash) - require.NoError(t, err) - - req = &network.BlockRequestMessage{ - RequestedData: 3, - StartingBlock: *start, - Direction: network.Descending, - Max: nil, - } - - resp, err = s.CreateBlockResponse(peer.ID("alice"), req) - require.NoError(t, err) - require.Equal(t, int(network.MaxBlocksInResponse), len(resp.BlockData)) - require.Equal(t, uint(256), resp.BlockData[0].Number()) - require.Equal(t, uint(129), resp.BlockData[127].Number()) - - startHash, err = s.blockState.GetHashByNumber(128) - require.NoError(t, err) - - start, err = variadic.NewUint32OrHash(startHash) - require.NoError(t, err) - - req = &network.BlockRequestMessage{ - RequestedData: 3, - StartingBlock: *start, - Direction: network.Descending, - Max: nil, - } - - resp, err = s.CreateBlockResponse(peer.ID("alice"), req) - require.NoError(t, err) - require.Equal(t, network.MaxBlocksInResponse, len(resp.BlockData)) - require.Equal(t, uint(128), resp.BlockData[0].Number()) - require.Equal(t, uint(1), resp.BlockData[127].Number()) -} - -func TestService_checkOrGetDescendantHash_integration(t *testing.T) { - t.Parallel() - s := newTestSyncer(t) - branches := map[uint]int{ - 8: 1, - } - state.AddBlocksToStateWithFixedBranches(t, s.blockState.(*state.BlockState), 16, branches) - - // base case - ancestor, err := s.blockState.GetHashByNumber(1) - require.NoError(t, err) - descendant, err := s.blockState.GetHashByNumber(16) - require.NoError(t, err) - const descendantNumber uint = 16 - - res, err := s.checkOrGetDescendantHash(ancestor, &descendant, descendantNumber) - require.NoError(t, err) - require.Equal(t, descendant, res) - - // supply descendant that's not on canonical chain - leaves := s.blockState.(*state.BlockState).Leaves() - require.Equal(t, 2, len(leaves)) - - ancestor, err = s.blockState.GetHashByNumber(1) - require.NoError(t, err) - descendant, err = s.blockState.GetHashByNumber(descendantNumber) - require.NoError(t, err) - - for _, leaf := range leaves { - if leaf != descendant { - descendant = leaf - break - } - } - - res, err = s.checkOrGetDescendantHash(ancestor, &descendant, descendantNumber) - require.NoError(t, err) - require.Equal(t, descendant, res) - - // supply descedant that's not on same chain as ancestor - ancestor, err = s.blockState.GetHashByNumber(9) - require.NoError(t, err) - _, err = s.checkOrGetDescendantHash(ancestor, &descendant, descendantNumber) - require.Error(t, err) - - // don't supply descendant, should return block on canonical chain - // as ancestor is on canonical chain - expected, err := s.blockState.GetHashByNumber(descendantNumber) - require.NoError(t, err) - - res, err = s.checkOrGetDescendantHash(ancestor, nil, descendantNumber) - require.NoError(t, err) - require.Equal(t, expected, res) - - // don't supply descendant and provide ancestor not on canonical chain - // should return descendant block also not on canonical chain - block9s, err := s.blockState.GetAllBlocksAtNumber(9) - require.NoError(t, err) - canonical, err := s.blockState.GetHashByNumber(9) - require.NoError(t, err) - - // set ancestor to non-canonical block 9 - for _, block := range block9s { - if canonical != block { - ancestor = block - break - } - } - - // expected is non-canonical block 16 - for _, leaf := range leaves { - is, err := s.blockState.IsDescendantOf(ancestor, leaf) - require.NoError(t, err) - if is { - expected = leaf - break - } - } - - res, err = s.checkOrGetDescendantHash(ancestor, nil, descendantNumber) - require.NoError(t, err) - require.Equal(t, expected, res) -} - -func TestService_CreateBlockResponse_Fields(t *testing.T) { - s := newTestSyncer(t) - addTestBlocksToState(t, 2, s.blockState) - - bestHash := s.blockState.(*state.BlockState).BestBlockHash() - bestBlock, err := s.blockState.(*state.BlockState).GetBlockByNumber(1) - require.NoError(t, err) - - // set some nils and check no error is thrown - bds := &types.BlockData{ - Hash: bestHash, - Header: nil, - Receipt: nil, - MessageQueue: nil, - Justification: nil, - } - err = s.blockState.CompareAndSetBlockData(bds) - require.NoError(t, err) - - // set receipt message and justification - a := []byte("asdf") - b := []byte("ghjkl") - c := []byte("qwerty") - bds = &types.BlockData{ - Hash: bestHash, - Receipt: &a, - MessageQueue: &b, - Justification: &c, - } - - start, err := variadic.NewUint32OrHash(uint32(1)) - require.NoError(t, err) - - err = s.blockState.CompareAndSetBlockData(bds) - require.NoError(t, err) - - testCases := []struct { - description string - value *network.BlockRequestMessage - expectedMsgValue *network.BlockResponseMessage - }{ - { - description: "test get Header and Body", - value: &network.BlockRequestMessage{ - RequestedData: 3, - StartingBlock: *start, - Direction: network.Ascending, - Max: nil, - }, - expectedMsgValue: &network.BlockResponseMessage{ - BlockData: []*types.BlockData{ - { - Hash: bestHash, - Header: &bestBlock.Header, - Body: &bestBlock.Body, - }, - }, - }, - }, - { - description: "test get Header", - value: &network.BlockRequestMessage{ - RequestedData: 1, - StartingBlock: *start, - Direction: network.Ascending, - Max: nil, - }, - expectedMsgValue: &network.BlockResponseMessage{ - BlockData: []*types.BlockData{ - { - Hash: bestHash, - Header: &bestBlock.Header, - Body: nil, - }, - }, - }, - }, - { - description: "test get Receipt", - value: &network.BlockRequestMessage{ - RequestedData: 4, - StartingBlock: *start, - Direction: network.Ascending, - Max: nil, - }, - expectedMsgValue: &network.BlockResponseMessage{ - BlockData: []*types.BlockData{ - { - Hash: bestHash, - Header: nil, - Body: nil, - Receipt: bds.Receipt, - }, - }, - }, - }, - { - description: "test get MessageQueue", - value: &network.BlockRequestMessage{ - RequestedData: 8, - StartingBlock: *start, - Direction: network.Ascending, - Max: nil, - }, - expectedMsgValue: &network.BlockResponseMessage{ - BlockData: []*types.BlockData{ - { - Hash: bestHash, - Header: nil, - Body: nil, - MessageQueue: bds.MessageQueue, - }, - }, - }, - }, - } - - for _, test := range testCases { - t.Run(test.description, func(t *testing.T) { - resp, err := s.CreateBlockResponse(peer.ID("alice"), test.value) - require.NoError(t, err) - require.Len(t, resp.BlockData, 2) - require.Equal(t, test.expectedMsgValue.BlockData[0].Hash, bestHash) - require.Equal(t, test.expectedMsgValue.BlockData[0].Header, resp.BlockData[0].Header) - require.Equal(t, test.expectedMsgValue.BlockData[0].Body, resp.BlockData[0].Body) - - if test.expectedMsgValue.BlockData[0].Receipt != nil { - require.Equal(t, test.expectedMsgValue.BlockData[0].Receipt, resp.BlockData[1].Receipt) - } - - if test.expectedMsgValue.BlockData[0].MessageQueue != nil { - require.Equal(t, test.expectedMsgValue.BlockData[0].MessageQueue, resp.BlockData[1].MessageQueue) - } - - if test.expectedMsgValue.BlockData[0].Justification != nil { - require.Equal(t, test.expectedMsgValue.BlockData[0].Justification, resp.BlockData[1].Justification) - } - }) - } -} diff --git a/dot/sync/message_test.go b/dot/sync/message_test.go deleted file mode 100644 index 0559283ca4..0000000000 --- a/dot/sync/message_test.go +++ /dev/null @@ -1,388 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "errors" - "testing" - - "github.com/ChainSafe/gossamer/dot/network" - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/common" - "github.com/ChainSafe/gossamer/lib/common/variadic" - lrucache "github.com/ChainSafe/gossamer/lib/utils/lru-cache" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/stretchr/testify/assert" - "go.uber.org/mock/gomock" -) - -func TestService_CreateBlockResponse(t *testing.T) { - t.Parallel() - - type args struct { - req *network.BlockRequestMessage - } - tests := map[string]struct { - blockStateBuilder func(ctrl *gomock.Controller) BlockState - args args - want *network.BlockResponseMessage - err error - }{ - "invalid_block_request": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - return mockBlockState - }, - args: args{req: &network.BlockRequestMessage{}}, - err: ErrInvalidBlockRequest, - }, - "ascending_request_nil_startHash": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().BestBlockNumber().Return(uint(1), nil) - mockBlockState.EXPECT().GetHashByNumber(uint(1)).Return(common.Hash{1, 2}, nil) - return mockBlockState - }, - args: args{req: &network.BlockRequestMessage{ - StartingBlock: *variadic.MustNewUint32OrHash(uint32(0)), - Direction: network.Ascending, - }}, - want: &network.BlockResponseMessage{BlockData: []*types.BlockData{{ - Hash: common.Hash{1, 2}, - }}}, - }, - "ascending_request_start_number_higher": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().BestBlockNumber().Return(uint(1), nil) - return mockBlockState - }, - args: args{req: &network.BlockRequestMessage{ - StartingBlock: *variadic.MustNewUint32OrHash(2), - Direction: network.Ascending, - }}, - err: errRequestStartTooHigh, - want: nil, - }, - "descending_request_nil_startHash": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().BestBlockNumber().Return(uint(1), nil) - return mockBlockState - }, - args: args{req: &network.BlockRequestMessage{ - StartingBlock: *variadic.MustNewUint32OrHash(0), - Direction: network.Descending, - }}, - want: &network.BlockResponseMessage{BlockData: []*types.BlockData{}}, - }, - "descending_request_start_number_higher": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().BestBlockNumber().Return(uint(1), nil) - mockBlockState.EXPECT().GetHashByNumber(uint(1)).Return(common.Hash{1, 2}, nil) - return mockBlockState - }, - args: args{req: &network.BlockRequestMessage{ - StartingBlock: *variadic.MustNewUint32OrHash(2), - Direction: network.Descending, - }}, - err: nil, - want: &network.BlockResponseMessage{BlockData: []*types.BlockData{{ - Hash: common.Hash{1, 2}, - }}}, - }, - "ascending_request_startHash": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(&types.Header{ - Number: 1, - }, nil) - mockBlockState.EXPECT().BestBlockNumber().Return(uint(2), nil) - mockBlockState.EXPECT().GetHashByNumber(uint(2)).Return(common.Hash{1, 2, 3}, nil) - mockBlockState.EXPECT().IsDescendantOf(common.Hash{}, common.Hash{1, 2, 3}).Return(true, - nil) - mockBlockState.EXPECT().Range(common.Hash{}, common.Hash{1, 2, 3}).Return([]common.Hash{{1, - 2}}, - nil) - return mockBlockState - }, - args: args{req: &network.BlockRequestMessage{ - StartingBlock: *variadic.MustNewUint32OrHash(common.Hash{}), - Direction: network.Ascending, - }}, - want: &network.BlockResponseMessage{BlockData: []*types.BlockData{{ - Hash: common.Hash{1, 2}, - }}}, - }, - "descending_request_startHash": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(&types.Header{ - Number: 1, - }, nil) - mockBlockState.EXPECT().GetHeaderByNumber(uint(1)).Return(&types.Header{ - Number: 1, - }, nil) - mockBlockState.EXPECT().Range(common.MustHexToHash( - "0x6443a0b46e0412e626363028115a9f2cf963eeed526b8b33e5316f08b50d0dc3"), - common.Hash{}).Return([]common.Hash{{1, 2}}, nil) - return mockBlockState - }, - args: args{req: &network.BlockRequestMessage{ - StartingBlock: *variadic.MustNewUint32OrHash(common.Hash{}), - Direction: network.Descending, - }}, - want: &network.BlockResponseMessage{BlockData: []*types.BlockData{{ - Hash: common.Hash{1, 2}, - }}}, - }, - "invalid_direction": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - return nil - }, - args: args{ - req: &network.BlockRequestMessage{ - StartingBlock: *variadic.MustNewUint32OrHash(common.Hash{}), - Direction: network.SyncDirection(3), - }}, - err: errInvalidRequestDirection, - }, - } - for name, tt := range tests { - tt := tt - t.Run(name, func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - s := &Service{ - blockState: tt.blockStateBuilder(ctrl), - seenBlockSyncRequests: lrucache.NewLRUCache[common.Hash, uint](100), - } - got, err := s.CreateBlockResponse(peer.ID("alice"), tt.args.req) - if tt.err != nil { - assert.EqualError(t, err, tt.err.Error()) - } else { - assert.NoError(t, err) - } - assert.Equal(t, tt.want, got) - }) - } -} - -func TestService_checkOrGetDescendantHash(t *testing.T) { - t.Parallel() - - type args struct { - ancestor common.Hash - descendant *common.Hash - descendantNumber uint - } - tests := map[string]struct { - name string - blockStateBuilder func(ctrl *gomock.Controller) BlockState - args args - want common.Hash - expectedError error - }{ - "nil_descendant": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockStateBuilder := NewMockBlockState(ctrl) - mockStateBuilder.EXPECT().GetHashByNumber(uint(1)).Return(common.Hash{}, nil) - mockStateBuilder.EXPECT().IsDescendantOf(common.Hash{}, common.Hash{}).Return(true, nil) - return mockStateBuilder - }, - args: args{ancestor: common.Hash{}, descendant: nil, descendantNumber: 1}, - }, - "not_nil_descendant": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(&types.Header{}, nil) - mockBlockState.EXPECT().IsDescendantOf(common.Hash{}, common.Hash{1, 2}).Return(true, nil) - return mockBlockState - }, - args: args{ancestor: common.Hash{0}, descendant: &common.Hash{1, 2}, descendantNumber: 1}, - want: common.Hash{1, 2}, - }, - "descendant_greater_than_header": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetHeader(common.Hash{2}).Return(&types.Header{ - Number: 2, - }, nil) - return mockBlockState - }, - args: args{ancestor: common.Hash{2}, descendant: &common.Hash{1, 2}, descendantNumber: 1}, - want: common.Hash{}, - expectedError: errors.New("invalid request, descendant number 1 is lower than ancestor 2"), - }, - } - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - s := &Service{ - blockState: tt.blockStateBuilder(ctrl), - } - got, err := s.checkOrGetDescendantHash(tt.args.ancestor, tt.args.descendant, tt.args.descendantNumber) - if tt.expectedError != nil { - assert.EqualError(t, err, tt.expectedError.Error()) - } else { - assert.NoError(t, err) - } - assert.Equal(t, tt.want, got) - }) - } -} - -func TestService_getBlockData(t *testing.T) { - t.Parallel() - - type args struct { - hash common.Hash - requestedData byte - } - tests := map[string]struct { - blockStateBuilder func(ctrl *gomock.Controller) BlockState - args args - want *types.BlockData - err error - }{ - "requestedData_0": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - return nil - }, - args: args{ - hash: common.Hash{}, - requestedData: 0, - }, - want: &types.BlockData{}, - }, - "requestedData_RequestedDataHeader_error": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(nil, errors.New("empty hash")) - return mockBlockState - }, - args: args{ - hash: common.Hash{0}, - requestedData: network.RequestedDataHeader, - }, - want: &types.BlockData{ - Hash: common.Hash{}, - }, - }, - "requestedData_RequestedDataHeader": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetHeader(common.Hash{1}).Return(&types.Header{ - Number: 2, - }, nil) - return mockBlockState - }, - args: args{ - hash: common.Hash{1}, - requestedData: network.RequestedDataHeader, - }, - want: &types.BlockData{ - Hash: common.Hash{1}, - Header: &types.Header{ - Number: 2, - }, - }, - }, - "requestedData_RequestedDataBody_error": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetBlockBody(common.Hash{}).Return(nil, errors.New("empty hash")) - return mockBlockState - }, - - args: args{ - hash: common.Hash{}, - requestedData: network.RequestedDataBody, - }, - want: &types.BlockData{ - Hash: common.Hash{}, - }, - }, - "requestedData_RequestedDataBody": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetBlockBody(common.Hash{1}).Return(&types.Body{[]byte{1}}, nil) - return mockBlockState - }, - args: args{ - hash: common.Hash{1}, - requestedData: network.RequestedDataBody, - }, - want: &types.BlockData{ - Hash: common.Hash{1}, - Body: &types.Body{[]byte{1}}, - }, - }, - "requestedData_RequestedDataReceipt": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetReceipt(common.Hash{1}).Return([]byte{1}, nil) - return mockBlockState - }, - args: args{ - hash: common.Hash{1}, - requestedData: network.RequestedDataReceipt, - }, - want: &types.BlockData{ - Hash: common.Hash{1}, - Receipt: &[]byte{1}, - }, - }, - "requestedData_RequestedDataMessageQueue": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetMessageQueue(common.Hash{2}).Return([]byte{2}, nil) - return mockBlockState - }, - args: args{ - hash: common.Hash{2}, - requestedData: network.RequestedDataMessageQueue, - }, - want: &types.BlockData{ - Hash: common.Hash{2}, - MessageQueue: &[]byte{2}, - }, - }, - "requestedData_RequestedDataJustification": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetJustification(common.Hash{3}).Return([]byte{3}, nil) - return mockBlockState - }, - args: args{ - hash: common.Hash{3}, - requestedData: network.RequestedDataJustification, - }, - want: &types.BlockData{ - Hash: common.Hash{3}, - Justification: &[]byte{3}, - }, - }, - } - for name, tt := range tests { - tt := tt - t.Run(name, func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - s := &Service{ - blockState: tt.blockStateBuilder(ctrl), - } - got, err := s.getBlockData(tt.args.hash, tt.args.requestedData) - if tt.err != nil { - assert.EqualError(t, err, tt.err.Error()) - } else { - assert.NoError(t, err) - } - assert.Equal(t, tt.want, got) - }) - } -} diff --git a/dot/sync/mock_chain_sync_test.go b/dot/sync/mock_chain_sync_test.go deleted file mode 100644 index b77771f18d..0000000000 --- a/dot/sync/mock_chain_sync_test.go +++ /dev/null @@ -1,124 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: chain_sync.go -// -// Generated by this command: -// -// mockgen -destination=mock_chain_sync_test.go -package sync -source chain_sync.go . ChainSync -// - -// Package sync is a generated GoMock package. -package sync - -import ( - reflect "reflect" - - common "github.com/ChainSafe/gossamer/lib/common" - peer "github.com/libp2p/go-libp2p/core/peer" - gomock "go.uber.org/mock/gomock" -) - -// MockChainSync is a mock of ChainSync interface. -type MockChainSync struct { - ctrl *gomock.Controller - recorder *MockChainSyncMockRecorder -} - -// MockChainSyncMockRecorder is the mock recorder for MockChainSync. -type MockChainSyncMockRecorder struct { - mock *MockChainSync -} - -// NewMockChainSync creates a new mock instance. -func NewMockChainSync(ctrl *gomock.Controller) *MockChainSync { - mock := &MockChainSync{ctrl: ctrl} - mock.recorder = &MockChainSyncMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockChainSync) EXPECT() *MockChainSyncMockRecorder { - return m.recorder -} - -// getHighestBlock mocks base method. -func (m *MockChainSync) getHighestBlock() (uint, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "getHighestBlock") - ret0, _ := ret[0].(uint) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// getHighestBlock indicates an expected call of getHighestBlock. -func (mr *MockChainSyncMockRecorder) getHighestBlock() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getHighestBlock", reflect.TypeOf((*MockChainSync)(nil).getHighestBlock)) -} - -// getSyncMode mocks base method. -func (m *MockChainSync) getSyncMode() chainSyncState { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "getSyncMode") - ret0, _ := ret[0].(chainSyncState) - return ret0 -} - -// getSyncMode indicates an expected call of getSyncMode. -func (mr *MockChainSyncMockRecorder) getSyncMode() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getSyncMode", reflect.TypeOf((*MockChainSync)(nil).getSyncMode)) -} - -// onBlockAnnounce mocks base method. -func (m *MockChainSync) onBlockAnnounce(arg0 announcedBlock) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "onBlockAnnounce", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// onBlockAnnounce indicates an expected call of onBlockAnnounce. -func (mr *MockChainSyncMockRecorder) onBlockAnnounce(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "onBlockAnnounce", reflect.TypeOf((*MockChainSync)(nil).onBlockAnnounce), arg0) -} - -// onBlockAnnounceHandshake mocks base method. -func (m *MockChainSync) onBlockAnnounceHandshake(p peer.ID, hash common.Hash, number uint) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "onBlockAnnounceHandshake", p, hash, number) - ret0, _ := ret[0].(error) - return ret0 -} - -// onBlockAnnounceHandshake indicates an expected call of onBlockAnnounceHandshake. -func (mr *MockChainSyncMockRecorder) onBlockAnnounceHandshake(p, hash, number any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "onBlockAnnounceHandshake", reflect.TypeOf((*MockChainSync)(nil).onBlockAnnounceHandshake), p, hash, number) -} - -// start mocks base method. -func (m *MockChainSync) start() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "start") -} - -// start indicates an expected call of start. -func (mr *MockChainSyncMockRecorder) start() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "start", reflect.TypeOf((*MockChainSync)(nil).start)) -} - -// stop mocks base method. -func (m *MockChainSync) stop() error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "stop") - ret0, _ := ret[0].(error) - return ret0 -} - -// stop indicates an expected call of stop. -func (mr *MockChainSyncMockRecorder) stop() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "stop", reflect.TypeOf((*MockChainSync)(nil).stop)) -} diff --git a/dot/sync/mock_disjoint_block_set_test.go b/dot/sync/mock_disjoint_block_set_test.go deleted file mode 100644 index 90c3884369..0000000000 --- a/dot/sync/mock_disjoint_block_set_test.go +++ /dev/null @@ -1,190 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ChainSafe/gossamer/dot/sync (interfaces: DisjointBlockSet) -// -// Generated by this command: -// -// mockgen -destination=mock_disjoint_block_set_test.go -package=sync . DisjointBlockSet -// - -// Package sync is a generated GoMock package. -package sync - -import ( - reflect "reflect" - sync0 "sync" - - types "github.com/ChainSafe/gossamer/dot/types" - common "github.com/ChainSafe/gossamer/lib/common" - gomock "go.uber.org/mock/gomock" -) - -// MockDisjointBlockSet is a mock of DisjointBlockSet interface. -type MockDisjointBlockSet struct { - ctrl *gomock.Controller - recorder *MockDisjointBlockSetMockRecorder -} - -// MockDisjointBlockSetMockRecorder is the mock recorder for MockDisjointBlockSet. -type MockDisjointBlockSetMockRecorder struct { - mock *MockDisjointBlockSet -} - -// NewMockDisjointBlockSet creates a new mock instance. -func NewMockDisjointBlockSet(ctrl *gomock.Controller) *MockDisjointBlockSet { - mock := &MockDisjointBlockSet{ctrl: ctrl} - mock.recorder = &MockDisjointBlockSetMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockDisjointBlockSet) EXPECT() *MockDisjointBlockSetMockRecorder { - return m.recorder -} - -// addBlock mocks base method. -func (m *MockDisjointBlockSet) addBlock(arg0 *types.Block) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "addBlock", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// addBlock indicates an expected call of addBlock. -func (mr *MockDisjointBlockSetMockRecorder) addBlock(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "addBlock", reflect.TypeOf((*MockDisjointBlockSet)(nil).addBlock), arg0) -} - -// addHashAndNumber mocks base method. -func (m *MockDisjointBlockSet) addHashAndNumber(arg0 common.Hash, arg1 uint) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "addHashAndNumber", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// addHashAndNumber indicates an expected call of addHashAndNumber. -func (mr *MockDisjointBlockSetMockRecorder) addHashAndNumber(arg0, arg1 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "addHashAndNumber", reflect.TypeOf((*MockDisjointBlockSet)(nil).addHashAndNumber), arg0, arg1) -} - -// addHeader mocks base method. -func (m *MockDisjointBlockSet) addHeader(arg0 *types.Header) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "addHeader", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// addHeader indicates an expected call of addHeader. -func (mr *MockDisjointBlockSetMockRecorder) addHeader(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "addHeader", reflect.TypeOf((*MockDisjointBlockSet)(nil).addHeader), arg0) -} - -// addJustification mocks base method. -func (m *MockDisjointBlockSet) addJustification(arg0 common.Hash, arg1 []byte) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "addJustification", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// addJustification indicates an expected call of addJustification. -func (mr *MockDisjointBlockSetMockRecorder) addJustification(arg0, arg1 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "addJustification", reflect.TypeOf((*MockDisjointBlockSet)(nil).addJustification), arg0, arg1) -} - -// getBlock mocks base method. -func (m *MockDisjointBlockSet) getBlock(arg0 common.Hash) *pendingBlock { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "getBlock", arg0) - ret0, _ := ret[0].(*pendingBlock) - return ret0 -} - -// getBlock indicates an expected call of getBlock. -func (mr *MockDisjointBlockSetMockRecorder) getBlock(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getBlock", reflect.TypeOf((*MockDisjointBlockSet)(nil).getBlock), arg0) -} - -// getBlocks mocks base method. -func (m *MockDisjointBlockSet) getBlocks() []*pendingBlock { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "getBlocks") - ret0, _ := ret[0].([]*pendingBlock) - return ret0 -} - -// getBlocks indicates an expected call of getBlocks. -func (mr *MockDisjointBlockSetMockRecorder) getBlocks() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getBlocks", reflect.TypeOf((*MockDisjointBlockSet)(nil).getBlocks)) -} - -// hasBlock mocks base method. -func (m *MockDisjointBlockSet) hasBlock(arg0 common.Hash) bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "hasBlock", arg0) - ret0, _ := ret[0].(bool) - return ret0 -} - -// hasBlock indicates an expected call of hasBlock. -func (mr *MockDisjointBlockSetMockRecorder) hasBlock(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "hasBlock", reflect.TypeOf((*MockDisjointBlockSet)(nil).hasBlock), arg0) -} - -// removeBlock mocks base method. -func (m *MockDisjointBlockSet) removeBlock(arg0 common.Hash) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "removeBlock", arg0) -} - -// removeBlock indicates an expected call of removeBlock. -func (mr *MockDisjointBlockSetMockRecorder) removeBlock(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "removeBlock", reflect.TypeOf((*MockDisjointBlockSet)(nil).removeBlock), arg0) -} - -// removeLowerBlocks mocks base method. -func (m *MockDisjointBlockSet) removeLowerBlocks(arg0 uint) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "removeLowerBlocks", arg0) -} - -// removeLowerBlocks indicates an expected call of removeLowerBlocks. -func (mr *MockDisjointBlockSetMockRecorder) removeLowerBlocks(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "removeLowerBlocks", reflect.TypeOf((*MockDisjointBlockSet)(nil).removeLowerBlocks), arg0) -} - -// run mocks base method. -func (m *MockDisjointBlockSet) run(arg0 <-chan *types.FinalisationInfo, arg1 <-chan struct{}, arg2 *sync0.WaitGroup) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "run", arg0, arg1, arg2) -} - -// run indicates an expected call of run. -func (mr *MockDisjointBlockSetMockRecorder) run(arg0, arg1, arg2 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "run", reflect.TypeOf((*MockDisjointBlockSet)(nil).run), arg0, arg1, arg2) -} - -// size mocks base method. -func (m *MockDisjointBlockSet) size() int { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "size") - ret0, _ := ret[0].(int) - return ret0 -} - -// size indicates an expected call of size. -func (mr *MockDisjointBlockSetMockRecorder) size() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "size", reflect.TypeOf((*MockDisjointBlockSet)(nil).size)) -} diff --git a/dot/sync/mock_request.go b/dot/sync/mock_request.go deleted file mode 100644 index c3ecda53ba..0000000000 --- a/dot/sync/mock_request.go +++ /dev/null @@ -1,55 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ChainSafe/gossamer/dot/network (interfaces: RequestMaker) -// -// Generated by this command: -// -// mockgen -destination=mock_request.go -package sync github.com/ChainSafe/gossamer/dot/network RequestMaker -// - -// Package sync is a generated GoMock package. -package sync - -import ( - reflect "reflect" - - network "github.com/ChainSafe/gossamer/dot/network" - peer "github.com/libp2p/go-libp2p/core/peer" - gomock "go.uber.org/mock/gomock" -) - -// MockRequestMaker is a mock of RequestMaker interface. -type MockRequestMaker struct { - ctrl *gomock.Controller - recorder *MockRequestMakerMockRecorder -} - -// MockRequestMakerMockRecorder is the mock recorder for MockRequestMaker. -type MockRequestMakerMockRecorder struct { - mock *MockRequestMaker -} - -// NewMockRequestMaker creates a new mock instance. -func NewMockRequestMaker(ctrl *gomock.Controller) *MockRequestMaker { - mock := &MockRequestMaker{ctrl: ctrl} - mock.recorder = &MockRequestMakerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockRequestMaker) EXPECT() *MockRequestMakerMockRecorder { - return m.recorder -} - -// Do mocks base method. -func (m *MockRequestMaker) Do(arg0 peer.ID, arg1 network.Message, arg2 network.ResponseMessage) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Do", arg0, arg1, arg2) - ret0, _ := ret[0].(error) - return ret0 -} - -// Do indicates an expected call of Do. -func (mr *MockRequestMakerMockRecorder) Do(arg0, arg1, arg2 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Do", reflect.TypeOf((*MockRequestMaker)(nil).Do), arg0, arg1, arg2) -} diff --git a/dot/sync/mock_runtime_test.go b/dot/sync/mock_runtime_test.go deleted file mode 100644 index 2678a50c0c..0000000000 --- a/dot/sync/mock_runtime_test.go +++ /dev/null @@ -1,439 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ChainSafe/gossamer/lib/runtime (interfaces: Instance) -// -// Generated by this command: -// -// mockgen -destination=mock_runtime_test.go -package sync github.com/ChainSafe/gossamer/lib/runtime Instance -// - -// Package sync is a generated GoMock package. -package sync - -import ( - reflect "reflect" - - types "github.com/ChainSafe/gossamer/dot/types" - common "github.com/ChainSafe/gossamer/lib/common" - ed25519 "github.com/ChainSafe/gossamer/lib/crypto/ed25519" - keystore "github.com/ChainSafe/gossamer/lib/keystore" - runtime "github.com/ChainSafe/gossamer/lib/runtime" - transaction "github.com/ChainSafe/gossamer/lib/transaction" - gomock "go.uber.org/mock/gomock" -) - -// MockInstance is a mock of Instance interface. -type MockInstance struct { - ctrl *gomock.Controller - recorder *MockInstanceMockRecorder -} - -// MockInstanceMockRecorder is the mock recorder for MockInstance. -type MockInstanceMockRecorder struct { - mock *MockInstance -} - -// NewMockInstance creates a new mock instance. -func NewMockInstance(ctrl *gomock.Controller) *MockInstance { - mock := &MockInstance{ctrl: ctrl} - mock.recorder = &MockInstanceMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockInstance) EXPECT() *MockInstanceMockRecorder { - return m.recorder -} - -// ApplyExtrinsic mocks base method. -func (m *MockInstance) ApplyExtrinsic(arg0 types.Extrinsic) ([]byte, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ApplyExtrinsic", arg0) - ret0, _ := ret[0].([]byte) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ApplyExtrinsic indicates an expected call of ApplyExtrinsic. -func (mr *MockInstanceMockRecorder) ApplyExtrinsic(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyExtrinsic", reflect.TypeOf((*MockInstance)(nil).ApplyExtrinsic), arg0) -} - -// BabeConfiguration mocks base method. -func (m *MockInstance) BabeConfiguration() (*types.BabeConfiguration, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "BabeConfiguration") - ret0, _ := ret[0].(*types.BabeConfiguration) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// BabeConfiguration indicates an expected call of BabeConfiguration. -func (mr *MockInstanceMockRecorder) BabeConfiguration() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BabeConfiguration", reflect.TypeOf((*MockInstance)(nil).BabeConfiguration)) -} - -// BabeGenerateKeyOwnershipProof mocks base method. -func (m *MockInstance) BabeGenerateKeyOwnershipProof(arg0 uint64, arg1 [32]byte) (types.OpaqueKeyOwnershipProof, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "BabeGenerateKeyOwnershipProof", arg0, arg1) - ret0, _ := ret[0].(types.OpaqueKeyOwnershipProof) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// BabeGenerateKeyOwnershipProof indicates an expected call of BabeGenerateKeyOwnershipProof. -func (mr *MockInstanceMockRecorder) BabeGenerateKeyOwnershipProof(arg0, arg1 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BabeGenerateKeyOwnershipProof", reflect.TypeOf((*MockInstance)(nil).BabeGenerateKeyOwnershipProof), arg0, arg1) -} - -// BabeSubmitReportEquivocationUnsignedExtrinsic mocks base method. -func (m *MockInstance) BabeSubmitReportEquivocationUnsignedExtrinsic(arg0 types.BabeEquivocationProof, arg1 types.OpaqueKeyOwnershipProof) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "BabeSubmitReportEquivocationUnsignedExtrinsic", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// BabeSubmitReportEquivocationUnsignedExtrinsic indicates an expected call of BabeSubmitReportEquivocationUnsignedExtrinsic. -func (mr *MockInstanceMockRecorder) BabeSubmitReportEquivocationUnsignedExtrinsic(arg0, arg1 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BabeSubmitReportEquivocationUnsignedExtrinsic", reflect.TypeOf((*MockInstance)(nil).BabeSubmitReportEquivocationUnsignedExtrinsic), arg0, arg1) -} - -// CheckInherents mocks base method. -func (m *MockInstance) CheckInherents() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "CheckInherents") -} - -// CheckInherents indicates an expected call of CheckInherents. -func (mr *MockInstanceMockRecorder) CheckInherents() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckInherents", reflect.TypeOf((*MockInstance)(nil).CheckInherents)) -} - -// DecodeSessionKeys mocks base method. -func (m *MockInstance) DecodeSessionKeys(arg0 []byte) ([]byte, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DecodeSessionKeys", arg0) - ret0, _ := ret[0].([]byte) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DecodeSessionKeys indicates an expected call of DecodeSessionKeys. -func (mr *MockInstanceMockRecorder) DecodeSessionKeys(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DecodeSessionKeys", reflect.TypeOf((*MockInstance)(nil).DecodeSessionKeys), arg0) -} - -// Exec mocks base method. -func (m *MockInstance) Exec(arg0 string, arg1 []byte) ([]byte, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Exec", arg0, arg1) - ret0, _ := ret[0].([]byte) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Exec indicates an expected call of Exec. -func (mr *MockInstanceMockRecorder) Exec(arg0, arg1 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Exec", reflect.TypeOf((*MockInstance)(nil).Exec), arg0, arg1) -} - -// ExecuteBlock mocks base method. -func (m *MockInstance) ExecuteBlock(arg0 *types.Block) ([]byte, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ExecuteBlock", arg0) - ret0, _ := ret[0].([]byte) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ExecuteBlock indicates an expected call of ExecuteBlock. -func (mr *MockInstanceMockRecorder) ExecuteBlock(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecuteBlock", reflect.TypeOf((*MockInstance)(nil).ExecuteBlock), arg0) -} - -// FinalizeBlock mocks base method. -func (m *MockInstance) FinalizeBlock() (*types.Header, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FinalizeBlock") - ret0, _ := ret[0].(*types.Header) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// FinalizeBlock indicates an expected call of FinalizeBlock. -func (mr *MockInstanceMockRecorder) FinalizeBlock() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FinalizeBlock", reflect.TypeOf((*MockInstance)(nil).FinalizeBlock)) -} - -// GenerateSessionKeys mocks base method. -func (m *MockInstance) GenerateSessionKeys() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "GenerateSessionKeys") -} - -// GenerateSessionKeys indicates an expected call of GenerateSessionKeys. -func (mr *MockInstanceMockRecorder) GenerateSessionKeys() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenerateSessionKeys", reflect.TypeOf((*MockInstance)(nil).GenerateSessionKeys)) -} - -// GetCodeHash mocks base method. -func (m *MockInstance) GetCodeHash() common.Hash { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetCodeHash") - ret0, _ := ret[0].(common.Hash) - return ret0 -} - -// GetCodeHash indicates an expected call of GetCodeHash. -func (mr *MockInstanceMockRecorder) GetCodeHash() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCodeHash", reflect.TypeOf((*MockInstance)(nil).GetCodeHash)) -} - -// GrandpaAuthorities mocks base method. -func (m *MockInstance) GrandpaAuthorities() ([]types.Authority, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GrandpaAuthorities") - ret0, _ := ret[0].([]types.Authority) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GrandpaAuthorities indicates an expected call of GrandpaAuthorities. -func (mr *MockInstanceMockRecorder) GrandpaAuthorities() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GrandpaAuthorities", reflect.TypeOf((*MockInstance)(nil).GrandpaAuthorities)) -} - -// GrandpaGenerateKeyOwnershipProof mocks base method. -func (m *MockInstance) GrandpaGenerateKeyOwnershipProof(arg0 uint64, arg1 ed25519.PublicKeyBytes) (types.GrandpaOpaqueKeyOwnershipProof, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GrandpaGenerateKeyOwnershipProof", arg0, arg1) - ret0, _ := ret[0].(types.GrandpaOpaqueKeyOwnershipProof) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GrandpaGenerateKeyOwnershipProof indicates an expected call of GrandpaGenerateKeyOwnershipProof. -func (mr *MockInstanceMockRecorder) GrandpaGenerateKeyOwnershipProof(arg0, arg1 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GrandpaGenerateKeyOwnershipProof", reflect.TypeOf((*MockInstance)(nil).GrandpaGenerateKeyOwnershipProof), arg0, arg1) -} - -// GrandpaSubmitReportEquivocationUnsignedExtrinsic mocks base method. -func (m *MockInstance) GrandpaSubmitReportEquivocationUnsignedExtrinsic(arg0 types.GrandpaEquivocationProof, arg1 types.GrandpaOpaqueKeyOwnershipProof) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GrandpaSubmitReportEquivocationUnsignedExtrinsic", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// GrandpaSubmitReportEquivocationUnsignedExtrinsic indicates an expected call of GrandpaSubmitReportEquivocationUnsignedExtrinsic. -func (mr *MockInstanceMockRecorder) GrandpaSubmitReportEquivocationUnsignedExtrinsic(arg0, arg1 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GrandpaSubmitReportEquivocationUnsignedExtrinsic", reflect.TypeOf((*MockInstance)(nil).GrandpaSubmitReportEquivocationUnsignedExtrinsic), arg0, arg1) -} - -// InherentExtrinsics mocks base method. -func (m *MockInstance) InherentExtrinsics(arg0 []byte) ([]byte, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "InherentExtrinsics", arg0) - ret0, _ := ret[0].([]byte) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// InherentExtrinsics indicates an expected call of InherentExtrinsics. -func (mr *MockInstanceMockRecorder) InherentExtrinsics(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InherentExtrinsics", reflect.TypeOf((*MockInstance)(nil).InherentExtrinsics), arg0) -} - -// InitializeBlock mocks base method. -func (m *MockInstance) InitializeBlock(arg0 *types.Header) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "InitializeBlock", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// InitializeBlock indicates an expected call of InitializeBlock. -func (mr *MockInstanceMockRecorder) InitializeBlock(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InitializeBlock", reflect.TypeOf((*MockInstance)(nil).InitializeBlock), arg0) -} - -// Keystore mocks base method. -func (m *MockInstance) Keystore() *keystore.GlobalKeystore { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Keystore") - ret0, _ := ret[0].(*keystore.GlobalKeystore) - return ret0 -} - -// Keystore indicates an expected call of Keystore. -func (mr *MockInstanceMockRecorder) Keystore() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Keystore", reflect.TypeOf((*MockInstance)(nil).Keystore)) -} - -// Metadata mocks base method. -func (m *MockInstance) Metadata() ([]byte, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Metadata") - ret0, _ := ret[0].([]byte) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Metadata indicates an expected call of Metadata. -func (mr *MockInstanceMockRecorder) Metadata() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Metadata", reflect.TypeOf((*MockInstance)(nil).Metadata)) -} - -// NetworkService mocks base method. -func (m *MockInstance) NetworkService() runtime.BasicNetwork { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NetworkService") - ret0, _ := ret[0].(runtime.BasicNetwork) - return ret0 -} - -// NetworkService indicates an expected call of NetworkService. -func (mr *MockInstanceMockRecorder) NetworkService() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetworkService", reflect.TypeOf((*MockInstance)(nil).NetworkService)) -} - -// NodeStorage mocks base method. -func (m *MockInstance) NodeStorage() runtime.NodeStorage { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NodeStorage") - ret0, _ := ret[0].(runtime.NodeStorage) - return ret0 -} - -// NodeStorage indicates an expected call of NodeStorage. -func (mr *MockInstanceMockRecorder) NodeStorage() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeStorage", reflect.TypeOf((*MockInstance)(nil).NodeStorage)) -} - -// OffchainWorker mocks base method. -func (m *MockInstance) OffchainWorker() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "OffchainWorker") -} - -// OffchainWorker indicates an expected call of OffchainWorker. -func (mr *MockInstanceMockRecorder) OffchainWorker() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OffchainWorker", reflect.TypeOf((*MockInstance)(nil).OffchainWorker)) -} - -// PaymentQueryInfo mocks base method. -func (m *MockInstance) PaymentQueryInfo(arg0 []byte) (*types.RuntimeDispatchInfo, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PaymentQueryInfo", arg0) - ret0, _ := ret[0].(*types.RuntimeDispatchInfo) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PaymentQueryInfo indicates an expected call of PaymentQueryInfo. -func (mr *MockInstanceMockRecorder) PaymentQueryInfo(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaymentQueryInfo", reflect.TypeOf((*MockInstance)(nil).PaymentQueryInfo), arg0) -} - -// RandomSeed mocks base method. -func (m *MockInstance) RandomSeed() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "RandomSeed") -} - -// RandomSeed indicates an expected call of RandomSeed. -func (mr *MockInstanceMockRecorder) RandomSeed() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RandomSeed", reflect.TypeOf((*MockInstance)(nil).RandomSeed)) -} - -// SetContextStorage mocks base method. -func (m *MockInstance) SetContextStorage(arg0 runtime.Storage) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetContextStorage", arg0) -} - -// SetContextStorage indicates an expected call of SetContextStorage. -func (mr *MockInstanceMockRecorder) SetContextStorage(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetContextStorage", reflect.TypeOf((*MockInstance)(nil).SetContextStorage), arg0) -} - -// Stop mocks base method. -func (m *MockInstance) Stop() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Stop") -} - -// Stop indicates an expected call of Stop. -func (mr *MockInstanceMockRecorder) Stop() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockInstance)(nil).Stop)) -} - -// ValidateTransaction mocks base method. -func (m *MockInstance) ValidateTransaction(arg0 types.Extrinsic) (*transaction.Validity, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ValidateTransaction", arg0) - ret0, _ := ret[0].(*transaction.Validity) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ValidateTransaction indicates an expected call of ValidateTransaction. -func (mr *MockInstanceMockRecorder) ValidateTransaction(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateTransaction", reflect.TypeOf((*MockInstance)(nil).ValidateTransaction), arg0) -} - -// Validator mocks base method. -func (m *MockInstance) Validator() bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Validator") - ret0, _ := ret[0].(bool) - return ret0 -} - -// Validator indicates an expected call of Validator. -func (mr *MockInstanceMockRecorder) Validator() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Validator", reflect.TypeOf((*MockInstance)(nil).Validator)) -} - -// Version mocks base method. -func (m *MockInstance) Version() (runtime.Version, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Version") - ret0, _ := ret[0].(runtime.Version) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Version indicates an expected call of Version. -func (mr *MockInstanceMockRecorder) Version() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockInstance)(nil).Version)) -} diff --git a/dot/sync/mock_telemetry_test.go b/dot/sync/mock_telemetry_test.go deleted file mode 100644 index db277f1f51..0000000000 --- a/dot/sync/mock_telemetry_test.go +++ /dev/null @@ -1,52 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ChainSafe/gossamer/dot/sync (interfaces: Telemetry) -// -// Generated by this command: -// -// mockgen -destination=mock_telemetry_test.go -package sync . Telemetry -// - -// Package sync is a generated GoMock package. -package sync - -import ( - json "encoding/json" - reflect "reflect" - - gomock "go.uber.org/mock/gomock" -) - -// MockTelemetry is a mock of Telemetry interface. -type MockTelemetry struct { - ctrl *gomock.Controller - recorder *MockTelemetryMockRecorder -} - -// MockTelemetryMockRecorder is the mock recorder for MockTelemetry. -type MockTelemetryMockRecorder struct { - mock *MockTelemetry -} - -// NewMockTelemetry creates a new mock instance. -func NewMockTelemetry(ctrl *gomock.Controller) *MockTelemetry { - mock := &MockTelemetry{ctrl: ctrl} - mock.recorder = &MockTelemetryMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockTelemetry) EXPECT() *MockTelemetryMockRecorder { - return m.recorder -} - -// SendMessage mocks base method. -func (m *MockTelemetry) SendMessage(arg0 json.Marshaler) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SendMessage", arg0) -} - -// SendMessage indicates an expected call of SendMessage. -func (mr *MockTelemetryMockRecorder) SendMessage(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMessage", reflect.TypeOf((*MockTelemetry)(nil).SendMessage), arg0) -} diff --git a/dot/sync/mocks_generate_test.go b/dot/sync/mocks_generate_test.go deleted file mode 100644 index e970742556..0000000000 --- a/dot/sync/mocks_generate_test.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2022 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -//go:generate mockgen -destination=mocks_test.go -package=$GOPACKAGE . BlockState,StorageState,TransactionState,BabeVerifier,FinalityGadget,BlockImportHandler,Network -//go:generate mockgen -destination=mock_telemetry_test.go -package $GOPACKAGE . Telemetry -//go:generate mockgen -destination=mock_runtime_test.go -package $GOPACKAGE github.com/ChainSafe/gossamer/lib/runtime Instance -//go:generate mockgen -destination=mock_chain_sync_test.go -package $GOPACKAGE -source chain_sync.go . ChainSync -//go:generate mockgen -destination=mock_disjoint_block_set_test.go -package=$GOPACKAGE . DisjointBlockSet -//go:generate mockgen -destination=mock_request.go -package $GOPACKAGE github.com/ChainSafe/gossamer/dot/network RequestMaker diff --git a/dot/sync/mocks_test.go b/dot/sync/mocks_test.go deleted file mode 100644 index bb57e94a7d..0000000000 --- a/dot/sync/mocks_test.go +++ /dev/null @@ -1,667 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ChainSafe/gossamer/dot/sync (interfaces: BlockState,StorageState,TransactionState,BabeVerifier,FinalityGadget,BlockImportHandler,Network) -// -// Generated by this command: -// -// mockgen -destination=mocks_test.go -package=sync . BlockState,StorageState,TransactionState,BabeVerifier,FinalityGadget,BlockImportHandler,Network -// - -// Package sync is a generated GoMock package. -package sync - -import ( - reflect "reflect" - - peerset "github.com/ChainSafe/gossamer/dot/peerset" - types "github.com/ChainSafe/gossamer/dot/types" - common "github.com/ChainSafe/gossamer/lib/common" - runtime "github.com/ChainSafe/gossamer/lib/runtime" - storage "github.com/ChainSafe/gossamer/lib/runtime/storage" - peer "github.com/libp2p/go-libp2p/core/peer" - gomock "go.uber.org/mock/gomock" -) - -// MockBlockState is a mock of BlockState interface. -type MockBlockState struct { - ctrl *gomock.Controller - recorder *MockBlockStateMockRecorder -} - -// MockBlockStateMockRecorder is the mock recorder for MockBlockState. -type MockBlockStateMockRecorder struct { - mock *MockBlockState -} - -// NewMockBlockState creates a new mock instance. -func NewMockBlockState(ctrl *gomock.Controller) *MockBlockState { - mock := &MockBlockState{ctrl: ctrl} - mock.recorder = &MockBlockStateMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockBlockState) EXPECT() *MockBlockStateMockRecorder { - return m.recorder -} - -// BestBlockHeader mocks base method. -func (m *MockBlockState) BestBlockHeader() (*types.Header, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "BestBlockHeader") - ret0, _ := ret[0].(*types.Header) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// BestBlockHeader indicates an expected call of BestBlockHeader. -func (mr *MockBlockStateMockRecorder) BestBlockHeader() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BestBlockHeader", reflect.TypeOf((*MockBlockState)(nil).BestBlockHeader)) -} - -// BestBlockNumber mocks base method. -func (m *MockBlockState) BestBlockNumber() (uint, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "BestBlockNumber") - ret0, _ := ret[0].(uint) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// BestBlockNumber indicates an expected call of BestBlockNumber. -func (mr *MockBlockStateMockRecorder) BestBlockNumber() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BestBlockNumber", reflect.TypeOf((*MockBlockState)(nil).BestBlockNumber)) -} - -// CompareAndSetBlockData mocks base method. -func (m *MockBlockState) CompareAndSetBlockData(arg0 *types.BlockData) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CompareAndSetBlockData", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// CompareAndSetBlockData indicates an expected call of CompareAndSetBlockData. -func (mr *MockBlockStateMockRecorder) CompareAndSetBlockData(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CompareAndSetBlockData", reflect.TypeOf((*MockBlockState)(nil).CompareAndSetBlockData), arg0) -} - -// GetAllBlocksAtNumber mocks base method. -func (m *MockBlockState) GetAllBlocksAtNumber(arg0 uint) ([]common.Hash, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetAllBlocksAtNumber", arg0) - ret0, _ := ret[0].([]common.Hash) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetAllBlocksAtNumber indicates an expected call of GetAllBlocksAtNumber. -func (mr *MockBlockStateMockRecorder) GetAllBlocksAtNumber(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAllBlocksAtNumber", reflect.TypeOf((*MockBlockState)(nil).GetAllBlocksAtNumber), arg0) -} - -// GetBlockBody mocks base method. -func (m *MockBlockState) GetBlockBody(arg0 common.Hash) (*types.Body, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBlockBody", arg0) - ret0, _ := ret[0].(*types.Body) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBlockBody indicates an expected call of GetBlockBody. -func (mr *MockBlockStateMockRecorder) GetBlockBody(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlockBody", reflect.TypeOf((*MockBlockState)(nil).GetBlockBody), arg0) -} - -// GetBlockByHash mocks base method. -func (m *MockBlockState) GetBlockByHash(arg0 common.Hash) (*types.Block, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBlockByHash", arg0) - ret0, _ := ret[0].(*types.Block) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBlockByHash indicates an expected call of GetBlockByHash. -func (mr *MockBlockStateMockRecorder) GetBlockByHash(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlockByHash", reflect.TypeOf((*MockBlockState)(nil).GetBlockByHash), arg0) -} - -// GetFinalisedNotifierChannel mocks base method. -func (m *MockBlockState) GetFinalisedNotifierChannel() chan *types.FinalisationInfo { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetFinalisedNotifierChannel") - ret0, _ := ret[0].(chan *types.FinalisationInfo) - return ret0 -} - -// GetFinalisedNotifierChannel indicates an expected call of GetFinalisedNotifierChannel. -func (mr *MockBlockStateMockRecorder) GetFinalisedNotifierChannel() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFinalisedNotifierChannel", reflect.TypeOf((*MockBlockState)(nil).GetFinalisedNotifierChannel)) -} - -// GetHashByNumber mocks base method. -func (m *MockBlockState) GetHashByNumber(arg0 uint) (common.Hash, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetHashByNumber", arg0) - ret0, _ := ret[0].(common.Hash) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetHashByNumber indicates an expected call of GetHashByNumber. -func (mr *MockBlockStateMockRecorder) GetHashByNumber(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHashByNumber", reflect.TypeOf((*MockBlockState)(nil).GetHashByNumber), arg0) -} - -// GetHeader mocks base method. -func (m *MockBlockState) GetHeader(arg0 common.Hash) (*types.Header, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetHeader", arg0) - ret0, _ := ret[0].(*types.Header) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetHeader indicates an expected call of GetHeader. -func (mr *MockBlockStateMockRecorder) GetHeader(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHeader", reflect.TypeOf((*MockBlockState)(nil).GetHeader), arg0) -} - -// GetHeaderByNumber mocks base method. -func (m *MockBlockState) GetHeaderByNumber(arg0 uint) (*types.Header, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetHeaderByNumber", arg0) - ret0, _ := ret[0].(*types.Header) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetHeaderByNumber indicates an expected call of GetHeaderByNumber. -func (mr *MockBlockStateMockRecorder) GetHeaderByNumber(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHeaderByNumber", reflect.TypeOf((*MockBlockState)(nil).GetHeaderByNumber), arg0) -} - -// GetHighestFinalisedHeader mocks base method. -func (m *MockBlockState) GetHighestFinalisedHeader() (*types.Header, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetHighestFinalisedHeader") - ret0, _ := ret[0].(*types.Header) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetHighestFinalisedHeader indicates an expected call of GetHighestFinalisedHeader. -func (mr *MockBlockStateMockRecorder) GetHighestFinalisedHeader() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHighestFinalisedHeader", reflect.TypeOf((*MockBlockState)(nil).GetHighestFinalisedHeader)) -} - -// GetJustification mocks base method. -func (m *MockBlockState) GetJustification(arg0 common.Hash) ([]byte, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetJustification", arg0) - ret0, _ := ret[0].([]byte) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetJustification indicates an expected call of GetJustification. -func (mr *MockBlockStateMockRecorder) GetJustification(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetJustification", reflect.TypeOf((*MockBlockState)(nil).GetJustification), arg0) -} - -// GetMessageQueue mocks base method. -func (m *MockBlockState) GetMessageQueue(arg0 common.Hash) ([]byte, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetMessageQueue", arg0) - ret0, _ := ret[0].([]byte) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetMessageQueue indicates an expected call of GetMessageQueue. -func (mr *MockBlockStateMockRecorder) GetMessageQueue(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMessageQueue", reflect.TypeOf((*MockBlockState)(nil).GetMessageQueue), arg0) -} - -// GetReceipt mocks base method. -func (m *MockBlockState) GetReceipt(arg0 common.Hash) ([]byte, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetReceipt", arg0) - ret0, _ := ret[0].([]byte) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetReceipt indicates an expected call of GetReceipt. -func (mr *MockBlockStateMockRecorder) GetReceipt(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetReceipt", reflect.TypeOf((*MockBlockState)(nil).GetReceipt), arg0) -} - -// GetRuntime mocks base method. -func (m *MockBlockState) GetRuntime(arg0 common.Hash) (runtime.Instance, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetRuntime", arg0) - ret0, _ := ret[0].(runtime.Instance) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetRuntime indicates an expected call of GetRuntime. -func (mr *MockBlockStateMockRecorder) GetRuntime(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRuntime", reflect.TypeOf((*MockBlockState)(nil).GetRuntime), arg0) -} - -// HasHeader mocks base method. -func (m *MockBlockState) HasHeader(arg0 common.Hash) (bool, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "HasHeader", arg0) - ret0, _ := ret[0].(bool) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// HasHeader indicates an expected call of HasHeader. -func (mr *MockBlockStateMockRecorder) HasHeader(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasHeader", reflect.TypeOf((*MockBlockState)(nil).HasHeader), arg0) -} - -// IsDescendantOf mocks base method. -func (m *MockBlockState) IsDescendantOf(arg0, arg1 common.Hash) (bool, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "IsDescendantOf", arg0, arg1) - ret0, _ := ret[0].(bool) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// IsDescendantOf indicates an expected call of IsDescendantOf. -func (mr *MockBlockStateMockRecorder) IsDescendantOf(arg0, arg1 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsDescendantOf", reflect.TypeOf((*MockBlockState)(nil).IsDescendantOf), arg0, arg1) -} - -// IsPaused mocks base method. -func (m *MockBlockState) IsPaused() bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "IsPaused") - ret0, _ := ret[0].(bool) - return ret0 -} - -// IsPaused indicates an expected call of IsPaused. -func (mr *MockBlockStateMockRecorder) IsPaused() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsPaused", reflect.TypeOf((*MockBlockState)(nil).IsPaused)) -} - -// Pause mocks base method. -func (m *MockBlockState) Pause() error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Pause") - ret0, _ := ret[0].(error) - return ret0 -} - -// Pause indicates an expected call of Pause. -func (mr *MockBlockStateMockRecorder) Pause() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Pause", reflect.TypeOf((*MockBlockState)(nil).Pause)) -} - -// Range mocks base method. -func (m *MockBlockState) Range(arg0, arg1 common.Hash) ([]common.Hash, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Range", arg0, arg1) - ret0, _ := ret[0].([]common.Hash) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Range indicates an expected call of Range. -func (mr *MockBlockStateMockRecorder) Range(arg0, arg1 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Range", reflect.TypeOf((*MockBlockState)(nil).Range), arg0, arg1) -} - -// RangeInMemory mocks base method. -func (m *MockBlockState) RangeInMemory(arg0, arg1 common.Hash) ([]common.Hash, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RangeInMemory", arg0, arg1) - ret0, _ := ret[0].([]common.Hash) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// RangeInMemory indicates an expected call of RangeInMemory. -func (mr *MockBlockStateMockRecorder) RangeInMemory(arg0, arg1 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RangeInMemory", reflect.TypeOf((*MockBlockState)(nil).RangeInMemory), arg0, arg1) -} - -// SetJustification mocks base method. -func (m *MockBlockState) SetJustification(arg0 common.Hash, arg1 []byte) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetJustification", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// SetJustification indicates an expected call of SetJustification. -func (mr *MockBlockStateMockRecorder) SetJustification(arg0, arg1 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetJustification", reflect.TypeOf((*MockBlockState)(nil).SetJustification), arg0, arg1) -} - -// StoreRuntime mocks base method. -func (m *MockBlockState) StoreRuntime(arg0 common.Hash, arg1 runtime.Instance) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "StoreRuntime", arg0, arg1) -} - -// StoreRuntime indicates an expected call of StoreRuntime. -func (mr *MockBlockStateMockRecorder) StoreRuntime(arg0, arg1 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StoreRuntime", reflect.TypeOf((*MockBlockState)(nil).StoreRuntime), arg0, arg1) -} - -// MockStorageState is a mock of StorageState interface. -type MockStorageState struct { - ctrl *gomock.Controller - recorder *MockStorageStateMockRecorder -} - -// MockStorageStateMockRecorder is the mock recorder for MockStorageState. -type MockStorageStateMockRecorder struct { - mock *MockStorageState -} - -// NewMockStorageState creates a new mock instance. -func NewMockStorageState(ctrl *gomock.Controller) *MockStorageState { - mock := &MockStorageState{ctrl: ctrl} - mock.recorder = &MockStorageStateMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockStorageState) EXPECT() *MockStorageStateMockRecorder { - return m.recorder -} - -// Lock mocks base method. -func (m *MockStorageState) Lock() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Lock") -} - -// Lock indicates an expected call of Lock. -func (mr *MockStorageStateMockRecorder) Lock() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Lock", reflect.TypeOf((*MockStorageState)(nil).Lock)) -} - -// TrieState mocks base method. -func (m *MockStorageState) TrieState(arg0 *common.Hash) (*storage.TrieState, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "TrieState", arg0) - ret0, _ := ret[0].(*storage.TrieState) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// TrieState indicates an expected call of TrieState. -func (mr *MockStorageStateMockRecorder) TrieState(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TrieState", reflect.TypeOf((*MockStorageState)(nil).TrieState), arg0) -} - -// Unlock mocks base method. -func (m *MockStorageState) Unlock() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Unlock") -} - -// Unlock indicates an expected call of Unlock. -func (mr *MockStorageStateMockRecorder) Unlock() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Unlock", reflect.TypeOf((*MockStorageState)(nil).Unlock)) -} - -// MockTransactionState is a mock of TransactionState interface. -type MockTransactionState struct { - ctrl *gomock.Controller - recorder *MockTransactionStateMockRecorder -} - -// MockTransactionStateMockRecorder is the mock recorder for MockTransactionState. -type MockTransactionStateMockRecorder struct { - mock *MockTransactionState -} - -// NewMockTransactionState creates a new mock instance. -func NewMockTransactionState(ctrl *gomock.Controller) *MockTransactionState { - mock := &MockTransactionState{ctrl: ctrl} - mock.recorder = &MockTransactionStateMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockTransactionState) EXPECT() *MockTransactionStateMockRecorder { - return m.recorder -} - -// RemoveExtrinsic mocks base method. -func (m *MockTransactionState) RemoveExtrinsic(arg0 types.Extrinsic) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "RemoveExtrinsic", arg0) -} - -// RemoveExtrinsic indicates an expected call of RemoveExtrinsic. -func (mr *MockTransactionStateMockRecorder) RemoveExtrinsic(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveExtrinsic", reflect.TypeOf((*MockTransactionState)(nil).RemoveExtrinsic), arg0) -} - -// MockBabeVerifier is a mock of BabeVerifier interface. -type MockBabeVerifier struct { - ctrl *gomock.Controller - recorder *MockBabeVerifierMockRecorder -} - -// MockBabeVerifierMockRecorder is the mock recorder for MockBabeVerifier. -type MockBabeVerifierMockRecorder struct { - mock *MockBabeVerifier -} - -// NewMockBabeVerifier creates a new mock instance. -func NewMockBabeVerifier(ctrl *gomock.Controller) *MockBabeVerifier { - mock := &MockBabeVerifier{ctrl: ctrl} - mock.recorder = &MockBabeVerifierMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockBabeVerifier) EXPECT() *MockBabeVerifierMockRecorder { - return m.recorder -} - -// VerifyBlock mocks base method. -func (m *MockBabeVerifier) VerifyBlock(arg0 *types.Header) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "VerifyBlock", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// VerifyBlock indicates an expected call of VerifyBlock. -func (mr *MockBabeVerifierMockRecorder) VerifyBlock(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyBlock", reflect.TypeOf((*MockBabeVerifier)(nil).VerifyBlock), arg0) -} - -// MockFinalityGadget is a mock of FinalityGadget interface. -type MockFinalityGadget struct { - ctrl *gomock.Controller - recorder *MockFinalityGadgetMockRecorder -} - -// MockFinalityGadgetMockRecorder is the mock recorder for MockFinalityGadget. -type MockFinalityGadgetMockRecorder struct { - mock *MockFinalityGadget -} - -// NewMockFinalityGadget creates a new mock instance. -func NewMockFinalityGadget(ctrl *gomock.Controller) *MockFinalityGadget { - mock := &MockFinalityGadget{ctrl: ctrl} - mock.recorder = &MockFinalityGadgetMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockFinalityGadget) EXPECT() *MockFinalityGadgetMockRecorder { - return m.recorder -} - -// VerifyBlockJustification mocks base method. -func (m *MockFinalityGadget) VerifyBlockJustification(arg0 common.Hash, arg1 []byte) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "VerifyBlockJustification", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// VerifyBlockJustification indicates an expected call of VerifyBlockJustification. -func (mr *MockFinalityGadgetMockRecorder) VerifyBlockJustification(arg0, arg1 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyBlockJustification", reflect.TypeOf((*MockFinalityGadget)(nil).VerifyBlockJustification), arg0, arg1) -} - -// MockBlockImportHandler is a mock of BlockImportHandler interface. -type MockBlockImportHandler struct { - ctrl *gomock.Controller - recorder *MockBlockImportHandlerMockRecorder -} - -// MockBlockImportHandlerMockRecorder is the mock recorder for MockBlockImportHandler. -type MockBlockImportHandlerMockRecorder struct { - mock *MockBlockImportHandler -} - -// NewMockBlockImportHandler creates a new mock instance. -func NewMockBlockImportHandler(ctrl *gomock.Controller) *MockBlockImportHandler { - mock := &MockBlockImportHandler{ctrl: ctrl} - mock.recorder = &MockBlockImportHandlerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockBlockImportHandler) EXPECT() *MockBlockImportHandlerMockRecorder { - return m.recorder -} - -// HandleBlockImport mocks base method. -func (m *MockBlockImportHandler) HandleBlockImport(arg0 *types.Block, arg1 *storage.TrieState, arg2 bool) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "HandleBlockImport", arg0, arg1, arg2) - ret0, _ := ret[0].(error) - return ret0 -} - -// HandleBlockImport indicates an expected call of HandleBlockImport. -func (mr *MockBlockImportHandlerMockRecorder) HandleBlockImport(arg0, arg1, arg2 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HandleBlockImport", reflect.TypeOf((*MockBlockImportHandler)(nil).HandleBlockImport), arg0, arg1, arg2) -} - -// MockNetwork is a mock of Network interface. -type MockNetwork struct { - ctrl *gomock.Controller - recorder *MockNetworkMockRecorder -} - -// MockNetworkMockRecorder is the mock recorder for MockNetwork. -type MockNetworkMockRecorder struct { - mock *MockNetwork -} - -// NewMockNetwork creates a new mock instance. -func NewMockNetwork(ctrl *gomock.Controller) *MockNetwork { - mock := &MockNetwork{ctrl: ctrl} - mock.recorder = &MockNetworkMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockNetwork) EXPECT() *MockNetworkMockRecorder { - return m.recorder -} - -// AllConnectedPeersIDs mocks base method. -func (m *MockNetwork) AllConnectedPeersIDs() []peer.ID { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AllConnectedPeersIDs") - ret0, _ := ret[0].([]peer.ID) - return ret0 -} - -// AllConnectedPeersIDs indicates an expected call of AllConnectedPeersIDs. -func (mr *MockNetworkMockRecorder) AllConnectedPeersIDs() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AllConnectedPeersIDs", reflect.TypeOf((*MockNetwork)(nil).AllConnectedPeersIDs)) -} - -// BlockAnnounceHandshake mocks base method. -func (m *MockNetwork) BlockAnnounceHandshake(arg0 *types.Header) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "BlockAnnounceHandshake", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// BlockAnnounceHandshake indicates an expected call of BlockAnnounceHandshake. -func (mr *MockNetworkMockRecorder) BlockAnnounceHandshake(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BlockAnnounceHandshake", reflect.TypeOf((*MockNetwork)(nil).BlockAnnounceHandshake), arg0) -} - -// Peers mocks base method. -func (m *MockNetwork) Peers() []common.PeerInfo { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Peers") - ret0, _ := ret[0].([]common.PeerInfo) - return ret0 -} - -// Peers indicates an expected call of Peers. -func (mr *MockNetworkMockRecorder) Peers() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Peers", reflect.TypeOf((*MockNetwork)(nil).Peers)) -} - -// ReportPeer mocks base method. -func (m *MockNetwork) ReportPeer(arg0 peerset.ReputationChange, arg1 peer.ID) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "ReportPeer", arg0, arg1) -} - -// ReportPeer indicates an expected call of ReportPeer. -func (mr *MockNetworkMockRecorder) ReportPeer(arg0, arg1 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReportPeer", reflect.TypeOf((*MockNetwork)(nil).ReportPeer), arg0, arg1) -} diff --git a/dot/sync/outliers.go b/dot/sync/outliers.go deleted file mode 100644 index be33e69a1a..0000000000 --- a/dot/sync/outliers.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "math/big" - "sort" -) - -// nonOutliersSumCount calculates the sum and count of non-outlier elements -// Explanation: -// IQR outlier detection -// Q25 = 25th_percentile -// Q75 = 75th_percentile -// IQR = Q75 - Q25 // inter-quartile range -// If x > Q75 + 1.5 * IQR or x < Q25 - 1.5 * IQR THEN x is a mild outlier -// If x > Q75 + 3.0 * IQR or x < Q25 – 3.0 * IQR THEN x is a extreme outlier -// Ref: http://www.mathwords.com/o/outlier.htm -// -// returns: sum and count of all the non-outliers elements -func nonOutliersSumCount(dataArrUint []uint) (sum *big.Int, count uint) { - dataArr := make([]*big.Int, len(dataArrUint)) - for i, v := range dataArrUint { - dataArr[i] = big.NewInt(int64(v)) - } - - length := len(dataArr) - - switch length { - case 0: - return big.NewInt(0), 0 - case 1: - return dataArr[0], 1 - case 2: - return big.NewInt(0).Add(dataArr[0], dataArr[1]), 2 - } - - sort.Slice(dataArr, func(i, j int) bool { - return dataArr[i].Cmp(dataArr[j]) < 0 - }) - - half := length / 2 - firstHalf := dataArr[:half] - var secondHalf []*big.Int - - if length%2 == 0 { - secondHalf = dataArr[half:] - } else { - secondHalf = dataArr[half+1:] - } - - q1 := getMedian(firstHalf) - q3 := getMedian(secondHalf) - - iqr := big.NewInt(0).Sub(q3, q1) - iqr1_5 := big.NewInt(0).Mul(iqr, big.NewInt(2)) // instead of 1.5 it is 2.0 due to the rounding - lower := big.NewInt(0).Sub(q1, iqr1_5) - upper := big.NewInt(0).Add(q3, iqr1_5) - - sum = big.NewInt(0) - for _, v := range dataArr { - // collect valid (non-outlier) values - lowPass := v.Cmp(lower) - highPass := v.Cmp(upper) - if lowPass >= 0 && highPass <= 0 { - sum.Add(sum, v) - count++ - } - } - - return sum, count -} - -func getMedian(data []*big.Int) *big.Int { - length := len(data) - half := length / 2 - if length%2 == 0 { - sum := big.NewInt(0).Add(data[half], data[half-1]) - return sum.Div(sum, big.NewInt(2)) - } - - return data[half] -} diff --git a/dot/sync/outliers_test.go b/dot/sync/outliers_test.go deleted file mode 100644 index a407d654d9..0000000000 --- a/dot/sync/outliers_test.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "math/big" - "testing" - - "github.com/stretchr/testify/assert" -) - -func Test_nonOutliersSumCount(t *testing.T) { - tests := []struct { - name string - dataArr []uint - wantSum *big.Int - wantCount uint - }{ - { - name: "case 0 outliers", - dataArr: []uint{2, 5, 6, 9, 12}, - wantSum: big.NewInt(34), - wantCount: uint(5), - }, - { - name: "case 1 outliers", - dataArr: []uint{100, 2, 260, 280, 220, 240, 250, 1000}, - wantSum: big.NewInt(1352), - wantCount: uint(7), - }, - { - name: "case 2 outliers", - dataArr: []uint{5000, 500, 5560, 5580, 5520, 5540, 5550, 100000}, - wantSum: big.NewInt(32750), - wantCount: uint(6), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - gotSum, gotCount := nonOutliersSumCount(tt.dataArr) - assert.Equal(t, tt.wantSum, gotSum) - assert.Equal(t, tt.wantCount, gotCount) - }) - } -} diff --git a/dot/sync/peer_view.go b/dot/sync/peer_view.go deleted file mode 100644 index 3a06122555..0000000000 --- a/dot/sync/peer_view.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2024 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "math/big" - "sync" - - "github.com/ChainSafe/gossamer/lib/common" - "github.com/libp2p/go-libp2p/core/peer" - "golang.org/x/exp/maps" -) - -// peerView tracks our peers's best reported blocks -type peerView struct { - who peer.ID - hash common.Hash - number uint -} - -type peerViewSet struct { - mtx sync.RWMutex - view map[peer.ID]peerView - target uint -} - -// getTarget takes the average of all peer views best number -func (p *peerViewSet) getTarget() uint { - p.mtx.RLock() - defer p.mtx.RUnlock() - - if len(p.view) == 0 { - return p.target - } - - numbers := make([]uint, 0, len(p.view)) - // we are going to sort the data and remove the outliers then we will return the avg of all the valid elements - for _, view := range maps.Values(p.view) { - numbers = append(numbers, view.number) - } - - sum, count := nonOutliersSumCount(numbers) - quotientBigInt := uint(big.NewInt(0).Div(sum, big.NewInt(int64(count))).Uint64()) - - if p.target >= quotientBigInt { - return p.target - } - - p.target = quotientBigInt // cache latest calculated target - return p.target -} - -func (p *peerViewSet) find(pID peer.ID) (view peerView, ok bool) { - p.mtx.RLock() - defer p.mtx.RUnlock() - - view, ok = p.view[pID] - return view, ok -} - -func (p *peerViewSet) size() int { - p.mtx.RLock() - defer p.mtx.RUnlock() - - return len(p.view) -} - -func (p *peerViewSet) values() []peerView { - p.mtx.RLock() - defer p.mtx.RUnlock() - - return maps.Values(p.view) -} - -func (p *peerViewSet) update(peerID peer.ID, hash common.Hash, number uint) { - p.mtx.Lock() - defer p.mtx.Unlock() - - newView := peerView{ - who: peerID, - hash: hash, - number: number, - } - - view, ok := p.view[peerID] - if ok && view.number >= newView.number { - return - } - - p.view[peerID] = newView -} - -func newPeerViewSet(cap int) *peerViewSet { - return &peerViewSet{ - view: make(map[peer.ID]peerView, cap), - } -} diff --git a/dot/sync/syncer.go b/dot/sync/syncer.go deleted file mode 100644 index bcc33272da..0000000000 --- a/dot/sync/syncer.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "errors" - "fmt" - "time" - - "github.com/ChainSafe/gossamer/dot/network" - "github.com/ChainSafe/gossamer/dot/peerset" - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/common" - lrucache "github.com/ChainSafe/gossamer/lib/utils/lru-cache" - - "github.com/ChainSafe/gossamer/internal/database" - "github.com/ChainSafe/gossamer/internal/log" - "github.com/libp2p/go-libp2p/core/peer" -) - -var logger = log.NewFromGlobal(log.AddContext("pkg", "sync")) - -// Service deals with chain syncing by sending block request messages and watching for responses. -type Service struct { - blockState BlockState - chainSync ChainSync - network Network - - seenBlockSyncRequests *lrucache.LRUCache[common.Hash, uint] -} - -// Pause Pauses the sync service -func (s *Service) Pause() error { - return s.blockState.Pause() -} - -// Config is the configuration for the sync Service. -type Config struct { - LogLvl log.Level - Network Network - BlockState BlockState - StorageState StorageState - FinalityGadget FinalityGadget - TransactionState TransactionState - BlockImportHandler BlockImportHandler - BabeVerifier BabeVerifier - MinPeers, MaxPeers int - SlotDuration time.Duration - Telemetry Telemetry - BadBlocks []string - RequestMaker network.RequestMaker -} - -// NewService returns a new *sync.Service -func NewService(cfg *Config) (*Service, error) { - logger.Patch(log.SetLevel(cfg.LogLvl)) - - pendingBlocks := newDisjointBlockSet(pendingBlocksLimit) - - csCfg := chainSyncConfig{ - bs: cfg.BlockState, - net: cfg.Network, - pendingBlocks: pendingBlocks, - minPeers: cfg.MinPeers, - maxPeers: cfg.MaxPeers, - slotDuration: cfg.SlotDuration, - storageState: cfg.StorageState, - transactionState: cfg.TransactionState, - babeVerifier: cfg.BabeVerifier, - finalityGadget: cfg.FinalityGadget, - blockImportHandler: cfg.BlockImportHandler, - telemetry: cfg.Telemetry, - badBlocks: cfg.BadBlocks, - requestMaker: cfg.RequestMaker, - waitPeersDuration: 100 * time.Millisecond, - } - chainSync := newChainSync(csCfg) - - return &Service{ - blockState: cfg.BlockState, - chainSync: chainSync, - network: cfg.Network, - seenBlockSyncRequests: lrucache.NewLRUCache[common.Hash, uint](100), - }, nil -} - -// Start begins the chainSync and chainProcessor modules. It begins syncing in bootstrap mode -func (s *Service) Start() error { - go s.chainSync.start() - return nil -} - -// Stop stops the chainSync and chainProcessor modules -func (s *Service) Stop() error { - return s.chainSync.stop() -} - -// HandleBlockAnnounceHandshake notifies the `chainSync` module that -// we have received a BlockAnnounceHandshake from the given peer. -func (s *Service) HandleBlockAnnounceHandshake(from peer.ID, msg *network.BlockAnnounceHandshake) error { - logger.Debugf("received block announce handshake from: %s, #%d (%s)", - from, msg.BestBlockNumber, msg.BestBlockHash.Short()) - return s.chainSync.onBlockAnnounceHandshake(from, msg.BestBlockHash, uint(msg.BestBlockNumber)) -} - -// HandleBlockAnnounce notifies the `chainSync` module that we have received a block announcement from the given peer. -func (s *Service) HandleBlockAnnounce(from peer.ID, msg *network.BlockAnnounceMessage) error { - blockAnnounceHeader := types.NewHeader(msg.ParentHash, msg.StateRoot, msg.ExtrinsicsRoot, msg.Number, msg.Digest) - blockAnnounceHeaderHash := blockAnnounceHeader.Hash() - logger.Debugf("received block announce from: %s, #%d (%s)", from, - blockAnnounceHeader.Number, blockAnnounceHeaderHash.Short()) - - if s.blockState.IsPaused() { - return errors.New("blockstate service is paused") - } - - // if the peer reports a lower or equal best block number than us, - // check if they are on a fork or not - bestBlockHeader, err := s.blockState.BestBlockHeader() - if err != nil { - return fmt.Errorf("best block header: %w", err) - } - - if blockAnnounceHeader.Number <= bestBlockHeader.Number { - // check if our block hash for that number is the same, if so, do nothing - // as we already have that block - ourHash, err := s.blockState.GetHashByNumber(blockAnnounceHeader.Number) - if err != nil && !errors.Is(err, database.ErrNotFound) { - return fmt.Errorf("get block hash by number: %w", err) - } - - if ourHash == blockAnnounceHeaderHash { - return nil - } - - // check if their best block is on an invalid chain, if it is, - // potentially downscore them - // for now, we can remove them from the syncing peers set - fin, err := s.blockState.GetHighestFinalisedHeader() - if err != nil { - return fmt.Errorf("get highest finalised header: %w", err) - } - - // their block hash doesn't match ours for that number (ie. they are on a different - // chain), and also the highest finalised block is higher than that number. - // thus the peer is on an invalid chain - if fin.Number >= blockAnnounceHeader.Number && msg.BestBlock { - // TODO: downscore this peer, or temporarily don't sync from them? (#1399) - // perhaps we need another field in `peerState` to mark whether the state is valid or not - s.network.ReportPeer(peerset.ReputationChange{ - Value: peerset.BadBlockAnnouncementValue, - Reason: peerset.BadBlockAnnouncementReason, - }, from) - return fmt.Errorf("%w: for peer %s and block number %d", - errPeerOnInvalidFork, from, blockAnnounceHeader.Number) - } - - // peer is on a fork, check if we have processed the fork already or not - // ie. is their block written to our db? - has, err := s.blockState.HasHeader(blockAnnounceHeaderHash) - if err != nil { - return fmt.Errorf("while checking if header exists: %w", err) - } - - // if so, do nothing, as we already have their fork - if has { - return nil - } - } - - // we assume that if a peer sends us a block announce for a certain block, - // that is also has the chain up until and including that block. - // this may not be a valid assumption, but perhaps we can assume that - // it is likely they will receive this block and its ancestors before us. - return s.chainSync.onBlockAnnounce(announcedBlock{ - who: from, - header: blockAnnounceHeader, - }) -} - -func (s *Service) OnConnectionClosed(who peer.ID) { - logger.Tracef("[NOT IMPLEMENTED] OnConnectionClosed: %s", who.String()) -} - -// IsSynced exposes the synced state -func (s *Service) IsSynced() bool { - return s.chainSync.getSyncMode() == tip -} - -// HighestBlock gets the highest known block number -func (s *Service) HighestBlock() uint { - highestBlock, err := s.chainSync.getHighestBlock() - if err != nil { - logger.Warnf("failed to get the highest block: %s", err) - return 0 - } - return highestBlock -} diff --git a/dot/sync/syncer_integration_test.go b/dot/sync/syncer_integration_test.go deleted file mode 100644 index c10643da5e..0000000000 --- a/dot/sync/syncer_integration_test.go +++ /dev/null @@ -1,212 +0,0 @@ -//go:build integration - -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "errors" - "path/filepath" - "testing" - - "github.com/ChainSafe/gossamer/dot/state" - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/internal/database" - "github.com/ChainSafe/gossamer/internal/log" - "github.com/ChainSafe/gossamer/lib/common" - "github.com/ChainSafe/gossamer/lib/genesis" - runtime "github.com/ChainSafe/gossamer/lib/runtime" - rtstorage "github.com/ChainSafe/gossamer/lib/runtime/storage" - wazero_runtime "github.com/ChainSafe/gossamer/lib/runtime/wazero" - "github.com/ChainSafe/gossamer/lib/utils" - "github.com/ChainSafe/gossamer/pkg/trie" - "github.com/ChainSafe/gossamer/tests/utils/config" - - "github.com/stretchr/testify/require" - "go.uber.org/mock/gomock" -) - -func newTestSyncer(t *testing.T) *Service { - ctrl := gomock.NewController(t) - - mockTelemetryClient := NewMockTelemetry(ctrl) - mockTelemetryClient.EXPECT().SendMessage(gomock.Any()).AnyTimes() - - wazero_runtime.DefaultTestLogLvl = log.Warn - - cfg := &Config{} - testDatadirPath := t.TempDir() - - scfg := state.Config{ - Path: testDatadirPath, - LogLevel: log.Info, - Telemetry: mockTelemetryClient, - GenesisBABEConfig: config.BABEConfigurationTestDefault, - } - stateSrvc := state.NewService(scfg) - stateSrvc.UseMemDB() - - gen, genTrie, genHeader := newWestendDevGenesisWithTrieAndHeader(t) - err := stateSrvc.Initialise(&gen, &genHeader, genTrie) - require.NoError(t, err) - - err = stateSrvc.Start() - require.NoError(t, err) - - if cfg.BlockState == nil { - cfg.BlockState = stateSrvc.Block - } - - if cfg.StorageState == nil { - cfg.StorageState = stateSrvc.Storage - } - - // initialise runtime - genState := rtstorage.NewTrieState(genTrie) - - rtCfg := wazero_runtime.Config{ - Storage: genState, - LogLvl: log.Critical, - } - - if stateSrvc != nil { - rtCfg.NodeStorage.BaseDB = stateSrvc.Base - } else { - rtCfg.NodeStorage.BaseDB, err = database.LoadDatabase(filepath.Join(testDatadirPath, "offline_storage"), false) - require.NoError(t, err) - } - - rtCfg.CodeHash, err = cfg.StorageState.(*state.InmemoryStorageState).LoadCodeHash(nil) - require.NoError(t, err) - - instance, err := wazero_runtime.NewRuntimeFromGenesis(rtCfg) - require.NoError(t, err) - - bestBlockHash := cfg.BlockState.(*state.BlockState).BestBlockHash() - cfg.BlockState.(*state.BlockState).StoreRuntime(bestBlockHash, instance) - blockImportHandler := NewMockBlockImportHandler(ctrl) - blockImportHandler.EXPECT().HandleBlockImport(gomock.AssignableToTypeOf(&types.Block{}), - gomock.AssignableToTypeOf(&rtstorage.TrieState{}), false).DoAndReturn( - func(block *types.Block, ts *rtstorage.TrieState, _ bool) error { - // store updates state trie nodes in database - if err = stateSrvc.Storage.StoreTrie(ts, &block.Header); err != nil { - logger.Warnf("failed to store state trie for imported block %s: %s", block.Header.Hash(), err) - return err - } - - // store block in database - err = stateSrvc.Block.AddBlock(block) - require.NoError(t, err) - - stateSrvc.Block.StoreRuntime(block.Header.Hash(), instance) - logger.Debugf("imported block %s and stored state trie with root %s", - block.Header.Hash(), ts.MustRoot()) - return nil - }).AnyTimes() - cfg.BlockImportHandler = blockImportHandler - - cfg.TransactionState = stateSrvc.Transaction - mockBabeVerifier := NewMockBabeVerifier(ctrl) - mockBabeVerifier.EXPECT().VerifyBlock(gomock.AssignableToTypeOf(&types.Header{})).AnyTimes() - cfg.BabeVerifier = mockBabeVerifier - cfg.LogLvl = log.Trace - mockFinalityGadget := NewMockFinalityGadget(ctrl) - mockFinalityGadget.EXPECT().VerifyBlockJustification(gomock.AssignableToTypeOf(common.Hash{}), - gomock.AssignableToTypeOf([]byte{})).DoAndReturn(func(hash common.Hash, justification []byte) error { - return nil - }).AnyTimes() - - cfg.FinalityGadget = mockFinalityGadget - cfg.Network = NewMockNetwork(ctrl) - cfg.Telemetry = mockTelemetryClient - cfg.RequestMaker = NewMockRequestMaker(ctrl) - syncer, err := NewService(cfg) - require.NoError(t, err) - return syncer -} - -func newWestendDevGenesisWithTrieAndHeader(t *testing.T) ( - gen genesis.Genesis, genesisTrie trie.Trie, genesisHeader types.Header) { - t.Helper() - - genesisPath := utils.GetWestendDevRawGenesisPath(t) - genesisPtr, err := genesis.NewGenesisFromJSONRaw(genesisPath) - require.NoError(t, err) - gen = *genesisPtr - - genesisTrie, err = runtime.NewTrieFromGenesis(gen) - require.NoError(t, err) - - parentHash := common.NewHash([]byte{0}) - stateRoot := genesisTrie.MustHash() - extrinsicRoot := trie.EmptyHash - const number = 0 - digest := types.NewDigest() - genesisHeaderPtr := types.NewHeader(parentHash, - stateRoot, extrinsicRoot, number, digest) - genesisHeader = *genesisHeaderPtr - - return gen, genesisTrie, genesisHeader -} - -func TestHighestBlock(t *testing.T) { - type input struct { - highestBlock uint - err error - } - type output struct { - highestBlock uint - } - type test struct { - name string - in input - out output - } - tests := []test{ - { - name: "when_*chainSync.getHighestBlock()_returns_0,_error_should_return_0", - in: input{ - highestBlock: 0, - err: errors.New("fake error"), - }, - out: output{ - highestBlock: 0, - }, - }, - { - name: "when_*chainSync.getHighestBlock()_returns_0,_nil_should_return_0", - in: input{ - highestBlock: 0, - err: nil, - }, - out: output{ - highestBlock: 0, - }, - }, - { - name: "when_*chainSync.getHighestBlock()_returns_50,_nil_should_return_50", - in: input{ - highestBlock: 50, - err: nil, - }, - out: output{ - highestBlock: 50, - }, - }, - } - for _, ts := range tests { - t.Run(ts.name, func(t *testing.T) { - s := newTestSyncer(t) - - ctrl := gomock.NewController(t) - chainSync := NewMockChainSync(ctrl) - chainSync.EXPECT().getHighestBlock().Return(ts.in.highestBlock, ts.in.err) - - s.chainSync = chainSync - - result := s.HighestBlock() - require.Equal(t, result, ts.out.highestBlock) - }) - } -} diff --git a/dot/sync/syncer_test.go b/dot/sync/syncer_test.go deleted file mode 100644 index a16a685d81..0000000000 --- a/dot/sync/syncer_test.go +++ /dev/null @@ -1,435 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "errors" - "sync" - "testing" - - "github.com/ChainSafe/gossamer/dot/network" - "github.com/ChainSafe/gossamer/dot/peerset" - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/common" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.uber.org/mock/gomock" -) - -func TestNewService(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - cfgBuilder func(ctrl *gomock.Controller) *Config - want *Service - err error - }{ - { - name: "working_example", - cfgBuilder: func(ctrl *gomock.Controller) *Config { - blockState := NewMockBlockState(ctrl) - blockState.EXPECT().GetFinalisedNotifierChannel(). - Return(make(chan *types.FinalisationInfo)) - return &Config{ - BlockState: blockState, - } - }, - want: &Service{}, - }, - } - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - - config := tt.cfgBuilder(ctrl) - - got, err := NewService(config) - if tt.err != nil { - assert.EqualError(t, err, tt.err.Error()) - } else { - assert.NoError(t, err) - } - if tt.want != nil { - assert.NotNil(t, got) - } - }) - } -} - -func TestService_HandleBlockAnnounce(t *testing.T) { - t.Parallel() - - errTest := errors.New("test error") - const somePeer = peer.ID("abc") - - block1AnnounceHeader := types.NewHeader(common.Hash{}, common.Hash{}, - common.Hash{}, 1, nil) - block2AnnounceHeader := types.NewHeader(common.Hash{}, common.Hash{}, - common.Hash{}, 2, nil) - - testCases := map[string]struct { - serviceBuilder func(ctrl *gomock.Controller) *Service - peerID peer.ID - blockAnnounceHeader *types.Header - errWrapped error - errMessage string - }{ - "best_block_header_error": { - serviceBuilder: func(ctrl *gomock.Controller) *Service { - blockState := NewMockBlockState(ctrl) - blockState.EXPECT().IsPaused().Return(false) - blockState.EXPECT().BestBlockHeader().Return(nil, errTest) - return &Service{ - blockState: blockState, - } - }, - peerID: somePeer, - blockAnnounceHeader: block1AnnounceHeader, - errWrapped: errTest, - errMessage: "best block header: test error", - }, - "number_smaller_than_best_block_number_get_hash_by_number_error": { - serviceBuilder: func(ctrl *gomock.Controller) *Service { - blockState := NewMockBlockState(ctrl) - blockState.EXPECT().IsPaused().Return(false) - bestBlockHeader := &types.Header{Number: 2} - blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) - blockState.EXPECT().GetHashByNumber(uint(1)).Return(common.Hash{}, errTest) - - return &Service{ - blockState: blockState, - } - }, - peerID: somePeer, - blockAnnounceHeader: block1AnnounceHeader, - errWrapped: errTest, - errMessage: "get block hash by number: test error", - }, - "number_smaller_than_best_block_number_and_same_hash": { - serviceBuilder: func(ctrl *gomock.Controller) *Service { - blockState := NewMockBlockState(ctrl) - blockState.EXPECT().IsPaused().Return(false) - bestBlockHeader := &types.Header{Number: 2} - blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) - blockState.EXPECT().GetHashByNumber(uint(1)).Return(block1AnnounceHeader.Hash(), nil) - return &Service{ - blockState: blockState, - } - }, - peerID: somePeer, - blockAnnounceHeader: block1AnnounceHeader, - }, - "number_smaller_than_best_block_number_get_highest_finalised_header_error": { - serviceBuilder: func(ctrl *gomock.Controller) *Service { - blockState := NewMockBlockState(ctrl) - blockState.EXPECT().IsPaused().Return(false) - bestBlockHeader := &types.Header{Number: 2} - blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) - blockState.EXPECT().GetHashByNumber(uint(1)).Return(common.Hash{2}, nil) - blockState.EXPECT().GetHighestFinalisedHeader().Return(nil, errTest) - return &Service{ - blockState: blockState, - } - }, - peerID: somePeer, - blockAnnounceHeader: block1AnnounceHeader, - errWrapped: errTest, - errMessage: "get highest finalised header: test error", - }, - "number_smaller_than_best_block_announced_number_equaks_finalised_number": { - serviceBuilder: func(ctrl *gomock.Controller) *Service { - blockState := NewMockBlockState(ctrl) - blockState.EXPECT().IsPaused().Return(false) - bestBlockHeader := &types.Header{Number: 2} - blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) - blockState.EXPECT().GetHashByNumber(uint(1)). - Return(common.Hash{2}, nil) // other hash than someHash - finalisedBlockHeader := &types.Header{Number: 1} - blockState.EXPECT().GetHighestFinalisedHeader().Return(finalisedBlockHeader, nil) - network := NewMockNetwork(ctrl) - network.EXPECT().ReportPeer(peerset.ReputationChange{ - Value: peerset.BadBlockAnnouncementValue, - Reason: peerset.BadBlockAnnouncementReason, - }, somePeer) - return &Service{ - blockState: blockState, - network: network, - } - }, - peerID: somePeer, - blockAnnounceHeader: block1AnnounceHeader, - errWrapped: errPeerOnInvalidFork, - errMessage: "peer is on an invalid fork: for peer ZiCa and block number 1", - }, - "number_smaller_than_best_block_number_and_finalised_number_bigger_than_number": { - serviceBuilder: func(ctrl *gomock.Controller) *Service { - blockState := NewMockBlockState(ctrl) - blockState.EXPECT().IsPaused().Return(false) - bestBlockHeader := &types.Header{Number: 2} - blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) - blockState.EXPECT().GetHashByNumber(uint(1)). - Return(common.Hash{2}, nil) // other hash than someHash - finalisedBlockHeader := &types.Header{Number: 2} - blockState.EXPECT().GetHighestFinalisedHeader().Return(finalisedBlockHeader, nil) - network := NewMockNetwork(ctrl) - network.EXPECT().ReportPeer(peerset.ReputationChange{ - Value: peerset.BadBlockAnnouncementValue, - Reason: peerset.BadBlockAnnouncementReason, - }, somePeer) - return &Service{ - blockState: blockState, - network: network, - } - }, - peerID: somePeer, - blockAnnounceHeader: block1AnnounceHeader, - errWrapped: errPeerOnInvalidFork, - errMessage: "peer is on an invalid fork: for peer ZiCa and block number 1", - }, - "number_smaller_than_best_block_number_and_" + - "finalised_number_smaller_than_number_and_" + - "has_header_error": { - serviceBuilder: func(ctrl *gomock.Controller) *Service { - blockState := NewMockBlockState(ctrl) - blockState.EXPECT().IsPaused().Return(false) - bestBlockHeader := &types.Header{Number: 3} - blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) - blockState.EXPECT().GetHashByNumber(uint(2)). - Return(common.Hash{5, 1, 2}, nil) // other hash than block2AnnounceHeader hash - finalisedBlockHeader := &types.Header{Number: 1} - blockState.EXPECT().GetHighestFinalisedHeader().Return(finalisedBlockHeader, nil) - blockState.EXPECT().HasHeader(block2AnnounceHeader.Hash()).Return(false, errTest) - return &Service{ - blockState: blockState, - } - }, - peerID: somePeer, - blockAnnounceHeader: block2AnnounceHeader, - errWrapped: errTest, - errMessage: "while checking if header exists: test error", - }, - "number_smaller_than_best_block_number_and_" + - "finalised_number_smaller_than_number_and_" + - "has_the_hash": { - serviceBuilder: func(ctrl *gomock.Controller) *Service { - blockState := NewMockBlockState(ctrl) - blockState.EXPECT().IsPaused().Return(false) - bestBlockHeader := &types.Header{Number: 3} - blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) - blockState.EXPECT().GetHashByNumber(uint(2)). - Return(common.Hash{2}, nil) // other hash than someHash - finalisedBlockHeader := &types.Header{Number: 1} - blockState.EXPECT().GetHighestFinalisedHeader().Return(finalisedBlockHeader, nil) - blockState.EXPECT().HasHeader(block2AnnounceHeader.Hash()).Return(true, nil) - return &Service{ - blockState: blockState, - } - }, - peerID: somePeer, - blockAnnounceHeader: block2AnnounceHeader, - }, - "number_bigger_than_best_block_number_added_in_disjoint_set_with_success": { - serviceBuilder: func(ctrl *gomock.Controller) *Service { - blockState := NewMockBlockState(ctrl) - blockState.EXPECT().IsPaused().Return(false) - bestBlockHeader := &types.Header{Number: 1} - blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) - chainSyncMock := NewMockChainSync(ctrl) - - expectedAnnouncedBlock := announcedBlock{ - who: somePeer, - header: block2AnnounceHeader, - } - - chainSyncMock.EXPECT().onBlockAnnounce(expectedAnnouncedBlock).Return(nil) - - return &Service{ - blockState: blockState, - chainSync: chainSyncMock, - } - }, - peerID: somePeer, - blockAnnounceHeader: block2AnnounceHeader, - }, - } - - for name, tt := range testCases { - tt := tt - t.Run(name, func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - - service := tt.serviceBuilder(ctrl) - - blockAnnounceMessage := &network.BlockAnnounceMessage{ - ParentHash: tt.blockAnnounceHeader.ParentHash, - Number: tt.blockAnnounceHeader.Number, - StateRoot: tt.blockAnnounceHeader.StateRoot, - ExtrinsicsRoot: tt.blockAnnounceHeader.ExtrinsicsRoot, - Digest: tt.blockAnnounceHeader.Digest, - BestBlock: true, - } - err := service.HandleBlockAnnounce(tt.peerID, blockAnnounceMessage) - assert.ErrorIs(t, err, tt.errWrapped) - if tt.errWrapped != nil { - assert.EqualError(t, err, tt.errMessage) - } - }) - } -} - -func Test_Service_HandleBlockAnnounceHandshake(t *testing.T) { - t.Parallel() - - ctrl := gomock.NewController(t) - chainSync := NewMockChainSync(ctrl) - chainSync.EXPECT().onBlockAnnounceHandshake(peer.ID("peer"), common.Hash{1}, uint(2)) - - service := Service{ - chainSync: chainSync, - } - - message := &network.BlockAnnounceHandshake{ - BestBlockHash: common.Hash{1}, - BestBlockNumber: 2, - } - - err := service.HandleBlockAnnounceHandshake(peer.ID("peer"), message) - require.NoError(t, err) -} - -func TestService_IsSynced(t *testing.T) { - t.Parallel() - - testCases := map[string]struct { - serviceBuilder func(ctrl *gomock.Controller) Service - synced bool - }{ - "tip": { - serviceBuilder: func(ctrl *gomock.Controller) Service { - chainSync := NewMockChainSync(ctrl) - chainSync.EXPECT().getSyncMode().Return(tip) - return Service{ - chainSync: chainSync, - } - }, - synced: true, - }, - "not_tip": { - serviceBuilder: func(ctrl *gomock.Controller) Service { - chainSync := NewMockChainSync(ctrl) - chainSync.EXPECT().getSyncMode().Return(bootstrap) - return Service{ - chainSync: chainSync, - } - }, - }, - } - - for name, testCase := range testCases { - testCase := testCase - t.Run(name, func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - - service := testCase.serviceBuilder(ctrl) - - synced := service.IsSynced() - - assert.Equal(t, testCase.synced, synced) - }) - } -} - -func TestService_Start(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - - var allCalled sync.WaitGroup - - chainSync := NewMockChainSync(ctrl) - allCalled.Add(1) - chainSync.EXPECT().start().DoAndReturn(func() { - allCalled.Done() - }) - - service := Service{ - chainSync: chainSync, - } - - err := service.Start() - allCalled.Wait() - assert.NoError(t, err) -} - -func TestService_Stop(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - - chainSync := NewMockChainSync(ctrl) - chainSync.EXPECT().stop() - service := &Service{ - chainSync: chainSync, - } - - err := service.Stop() - assert.NoError(t, err) -} - -func Test_reverseBlockData(t *testing.T) { - t.Parallel() - - type args struct { - data []*types.BlockData - } - tests := []struct { - name string - args args - expected args - }{ - { - name: "working_example", - args: args{data: []*types.BlockData{ - { - Hash: common.MustHexToHash("0x01"), - }, - { - Hash: common.MustHexToHash("0x02"), - }}}, - expected: args{data: []*types.BlockData{{ - Hash: common.MustHexToHash("0x02"), - }, { - Hash: common.MustHexToHash("0x01"), - }}, - }, - }, - } - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - reverseBlockData(tt.args.data) - assert.Equal(t, tt.expected.data, tt.args.data) - }) - } -} - -func TestService_HighestBlock(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - - chainSync := NewMockChainSync(ctrl) - chainSync.EXPECT().getHighestBlock().Return(uint(2), nil) - - service := &Service{ - chainSync: chainSync, - } - highestBlock := service.HighestBlock() - const expected = uint(2) - assert.Equal(t, expected, highestBlock) -} diff --git a/dot/sync/test_helpers.go b/dot/sync/test_helpers.go deleted file mode 100644 index 2e5a72664a..0000000000 --- a/dot/sync/test_helpers.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "testing" - "time" - - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/transaction" - "github.com/ChainSafe/gossamer/pkg/scale" - "github.com/stretchr/testify/require" -) - -// BuildBlockRuntime is the runtime interface to interact with -// blocks and extrinsics. -type BuildBlockRuntime interface { - InitializeBlock(header *types.Header) error - FinalizeBlock() (*types.Header, error) - InherentExtrinsics(data []byte) ([]byte, error) - ApplyExtrinsic(data types.Extrinsic) ([]byte, error) - ValidateTransaction(e types.Extrinsic) (*transaction.Validity, error) -} - -// BuildBlock ... -func BuildBlock(t *testing.T, instance BuildBlockRuntime, parent *types.Header, ext types.Extrinsic) *types.Block { - digest := types.NewDigest() - prd, err := types.NewBabeSecondaryPlainPreDigest(0, 1).ToPreRuntimeDigest() - require.NoError(t, err) - err = digest.Add(*prd) - require.NoError(t, err) - header := &types.Header{ - ParentHash: parent.Hash(), - Number: parent.Number + 1, - Digest: digest, - } - - err = instance.InitializeBlock(header) - require.NoError(t, err) - - idata := types.NewInherentData() - err = idata.SetInherent(types.Timstap0, uint64(time.Now().Unix())) - require.NoError(t, err) - - err = idata.SetInherent(types.Babeslot, uint64(1)) - require.NoError(t, err) - - ienc, err := idata.Encode() - require.NoError(t, err) - - // Call BlockBuilder_inherent_extrinsics which returns the inherents as encoded extrinsics - inherentExts, err := instance.InherentExtrinsics(ienc) - require.NoError(t, err) - - // decode inherent extrinsics - cp := make([]byte, len(inherentExts)) - copy(cp, inherentExts) - var inExts [][]byte - err = scale.Unmarshal(cp, &inExts) - require.NoError(t, err) - - // apply each inherent extrinsic - for _, inherent := range inExts { - in, err := scale.Marshal(inherent) - require.NoError(t, err) - - ret, err := instance.ApplyExtrinsic(in) - require.NoError(t, err) - require.Equal(t, ret, []byte{0, 0}) - } - - body := types.Body(types.BytesArrayToExtrinsics(inExts)) - - if ext != nil { - // validate and apply extrinsic - var ret []byte - - externalExt := types.Extrinsic(append([]byte{byte(types.TxnExternal)}, ext...)) - _, err = instance.ValidateTransaction(externalExt) - require.NoError(t, err) - - ret, err = instance.ApplyExtrinsic(ext) - require.NoError(t, err) - require.Equal(t, ret, []byte{0, 0}) - - body = append(body, ext) - } - - res, err := instance.FinalizeBlock() - require.NoError(t, err) - res.Number = header.Number - res.Hash() - - return &types.Block{ - Header: *res, - Body: body, - } -} diff --git a/dot/sync/worker.go b/dot/sync/worker.go deleted file mode 100644 index d63fe8e7dd..0000000000 --- a/dot/sync/worker.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2023 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "errors" - "sync" - - "github.com/ChainSafe/gossamer/dot/network" - "github.com/libp2p/go-libp2p/core/peer" -) - -// ErrStopTimeout is an error indicating that the worker stop operation timed out. -var ErrStopTimeout = errors.New("stop timeout") - -// worker represents a worker that processes sync tasks by making network requests to peers. -// It manages the synchronisation tasks between nodes in the Polkadot's peer-to-peer network. -// The primary goal of the worker is to handle and coordinate tasks related to network requests, -// ensuring that nodes stay synchronised with the blockchain state -type worker struct { - // Status of the worker (e.g., available, busy, etc.) - status byte - - // ID of the peer this worker is associated with - peerID peer.ID - - // Channel used as a semaphore to limit concurrent tasks. By making the channel buffered with some size, - // the creator of the channel can control how many workers can work concurrently and send requests. - sharedGuard chan struct{} - - // Interface to make network requests - requestMaker network.RequestMaker -} - -// newWorker creates and returns a new worker instance. -func newWorker(pID peer.ID, sharedGuard chan struct{}, network network.RequestMaker) *worker { - return &worker{ - peerID: pID, - sharedGuard: sharedGuard, - requestMaker: network, - status: available, - } -} - -// run starts the worker to process tasks from the queue. -// queue: Channel from which the worker receives tasks -// wg: WaitGroup to signal when the worker has finished processing -func (w *worker) run(queue chan *syncTask, wg *sync.WaitGroup) { - defer func() { - logger.Debugf("[STOPPED] worker %s", w.peerID) - wg.Done() - }() - - for task := range queue { - executeRequest(w.peerID, w.requestMaker, task, w.sharedGuard) - } -} - -// executeRequest processes a sync task by making a network request to a peer. -// who: ID of the peer making the request -// requestMaker: Interface to make the network request -// task: Sync task to be processed -// sharedGuard: Channel used for concurrency control -func executeRequest(who peer.ID, requestMaker network.RequestMaker, - task *syncTask, sharedGuard chan struct{}) { - defer func() { - <-sharedGuard // Release the semaphore slot after the request is processed - }() - - sharedGuard <- struct{}{} // Acquire a semaphore slot before starting the request - - request := task.request - logger.Debugf("[EXECUTING] worker %s, block request: %s\n", who, request) - response := new(network.BlockResponseMessage) - err := requestMaker.Do(who, request, response) - - task.resultCh <- &syncTaskResult{ - who: who, - request: request, - response: response, - err: err, - } - - logger.Debugf("[FINISHED] worker %s, err: %s, block data amount: %d", who, err, len(response.BlockData)) -} diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go deleted file mode 100644 index 3fc9558130..0000000000 --- a/dot/sync/worker_pool.go +++ /dev/null @@ -1,233 +0,0 @@ -// Copyright 2023 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "crypto/rand" - "fmt" - "math/big" - "sync" - "time" - - "github.com/ChainSafe/gossamer/dot/network" - "github.com/libp2p/go-libp2p/core/peer" - "golang.org/x/exp/maps" -) - -const ( - available byte = iota - busy - punished -) - -const ( - punishmentBaseTimeout = 5 * time.Minute - maxRequestsAllowed uint = 60 -) - -type syncTask struct { - request *network.BlockRequestMessage - resultCh chan<- *syncTaskResult -} - -type syncTaskResult struct { - who peer.ID - request *network.BlockRequestMessage - response *network.BlockResponseMessage - err error -} - -type syncWorker struct { - worker *worker - queue chan *syncTask -} - -type syncWorkerPool struct { - mtx sync.RWMutex - wg sync.WaitGroup - - network Network - requestMaker network.RequestMaker - workers map[peer.ID]*syncWorker - ignorePeers map[peer.ID]struct{} - - sharedGuard chan struct{} -} - -func newSyncWorkerPool(net Network, requestMaker network.RequestMaker) *syncWorkerPool { - swp := &syncWorkerPool{ - network: net, - requestMaker: requestMaker, - workers: make(map[peer.ID]*syncWorker), - ignorePeers: make(map[peer.ID]struct{}), - sharedGuard: make(chan struct{}, maxRequestsAllowed), - } - - return swp -} - -// stop will shutdown all the available workers goroutines -func (s *syncWorkerPool) stop() error { - s.mtx.RLock() - defer s.mtx.RUnlock() - - for _, sw := range s.workers { - close(sw.queue) - } - - allWorkersDoneCh := make(chan struct{}) - go func() { - defer close(allWorkersDoneCh) - s.wg.Wait() - }() - - timeoutTimer := time.NewTimer(30 * time.Second) - select { - case <-timeoutTimer.C: - return fmt.Errorf("timeout reached while finishing workers") - case <-allWorkersDoneCh: - if !timeoutTimer.Stop() { - <-timeoutTimer.C - } - - return nil - } -} - -// useConnectedPeers will retrieve all connected peers -// through the network layer and use them as sources of blocks -func (s *syncWorkerPool) useConnectedPeers() { - connectedPeers := s.network.AllConnectedPeersIDs() - if len(connectedPeers) < 1 { - return - } - - s.mtx.Lock() - defer s.mtx.Unlock() - for _, connectedPeer := range connectedPeers { - if _, shouldIgnore := s.ignorePeers[connectedPeer]; !shouldIgnore { - s.newPeer(connectedPeer) - } - } -} - -func (s *syncWorkerPool) fromBlockAnnounce(who peer.ID) { - s.mtx.Lock() - defer s.mtx.Unlock() - s.newPeer(who) -} - -// newPeer a new peer will be included in the worker -// pool if it is not a peer to ignore or is not punished -func (s *syncWorkerPool) newPeer(who peer.ID) { - if _, ok := s.ignorePeers[who]; ok { - return - } - - _, has := s.workers[who] - if has { - return - } - - worker := newWorker(who, s.sharedGuard, s.requestMaker) - workerQueue := make(chan *syncTask, maxRequestsAllowed) - - s.wg.Add(1) - go worker.run(workerQueue, &s.wg) - - s.workers[who] = &syncWorker{ - worker: worker, - queue: workerQueue, - } - logger.Tracef("potential worker added, total in the pool %d", len(s.workers)) -} - -// submitRequest given a request, the worker pool will get the peer given the peer.ID -// parameter or if nil the very first available worker or -// to perform the request, the response will be dispatch in the resultCh. -func (s *syncWorkerPool) submitRequest(request *network.BlockRequestMessage, - who *peer.ID, resultCh chan<- *syncTaskResult) { - - task := &syncTask{ - request: request, - resultCh: resultCh, - } - - // if the request is bounded to a specific peer then just - // request it and sent through its queue otherwise send - // the request in the general queue where all worker are - // listening on - s.mtx.RLock() - defer s.mtx.RUnlock() - - if who != nil { - syncWorker, inMap := s.workers[*who] - if inMap { - if syncWorker == nil { - panic("sync worker should not be nil") - } - syncWorker.queue <- task - return - } - } - - // if the exact peer is not specified then - // randomly select a worker and assign the - // task to it, if the amount of workers is - var selectedWorkerIdx int - workers := maps.Values(s.workers) - nBig, err := rand.Int(rand.Reader, big.NewInt(int64(len(workers)))) - if err != nil { - panic(fmt.Errorf("fail to get a random number: %w", err)) - } - selectedWorkerIdx = int(nBig.Int64()) - selectedWorker := workers[selectedWorkerIdx] - selectedWorker.queue <- task -} - -// submitRequests takes an set of requests and will submit to the pool through submitRequest -// the response will be dispatch in the resultCh -func (s *syncWorkerPool) submitRequests(requests []*network.BlockRequestMessage) (resultCh chan *syncTaskResult) { - resultCh = make(chan *syncTaskResult, maxRequestsAllowed+1) - - s.mtx.RLock() - defer s.mtx.RUnlock() - - allWorkers := maps.Values(s.workers) - for idx, request := range requests { - workerID := idx % len(allWorkers) - syncWorker := allWorkers[workerID] - - syncWorker.queue <- &syncTask{ - request: request, - resultCh: resultCh, - } - } - - return resultCh -} - -func (s *syncWorkerPool) ignorePeerAsWorker(who peer.ID) { - s.mtx.Lock() - defer s.mtx.Unlock() - - worker, has := s.workers[who] - if has { - close(worker.queue) - delete(s.workers, who) - s.ignorePeers[who] = struct{}{} - } -} - -// totalWorkers only returns available or busy workers -func (s *syncWorkerPool) totalWorkers() (total uint) { - s.mtx.RLock() - defer s.mtx.RUnlock() - - for range s.workers { - total++ - } - - return total -} diff --git a/dot/sync/worker_pool_test.go b/dot/sync/worker_pool_test.go deleted file mode 100644 index a49bc7a575..0000000000 --- a/dot/sync/worker_pool_test.go +++ /dev/null @@ -1,247 +0,0 @@ -// Copyright 2023 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "testing" - "time" - - "github.com/ChainSafe/gossamer/dot/network" - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/common" - "github.com/ChainSafe/gossamer/lib/common/variadic" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/stretchr/testify/require" - "go.uber.org/mock/gomock" - "golang.org/x/exp/maps" -) - -func TestSyncWorkerPool_useConnectedPeers(t *testing.T) { - t.Parallel() - cases := map[string]struct { - setupWorkerPool func(t *testing.T) *syncWorkerPool - exepectedWorkers []peer.ID - }{ - "no_connected_peers": { - setupWorkerPool: func(t *testing.T) *syncWorkerPool { - ctrl := gomock.NewController(t) - networkMock := NewMockNetwork(ctrl) - networkMock.EXPECT(). - AllConnectedPeersIDs(). - Return([]peer.ID{}) - - return newSyncWorkerPool(networkMock, nil) - }, - exepectedWorkers: []peer.ID{}, - }, - "3_available_peers": { - setupWorkerPool: func(t *testing.T) *syncWorkerPool { - ctrl := gomock.NewController(t) - networkMock := NewMockNetwork(ctrl) - networkMock.EXPECT(). - AllConnectedPeersIDs(). - Return([]peer.ID{ - peer.ID("available-1"), - peer.ID("available-2"), - peer.ID("available-3"), - }) - return newSyncWorkerPool(networkMock, nil) - }, - exepectedWorkers: []peer.ID{ - peer.ID("available-1"), - peer.ID("available-2"), - peer.ID("available-3"), - }, - }, - "2_available_peers_1_to_ignore": { - setupWorkerPool: func(t *testing.T) *syncWorkerPool { - ctrl := gomock.NewController(t) - networkMock := NewMockNetwork(ctrl) - networkMock.EXPECT(). - AllConnectedPeersIDs(). - Return([]peer.ID{ - peer.ID("available-1"), - peer.ID("available-2"), - peer.ID("available-3"), - }) - workerPool := newSyncWorkerPool(networkMock, nil) - workerPool.ignorePeers[peer.ID("available-3")] = struct{}{} - return workerPool - }, - exepectedWorkers: []peer.ID{ - peer.ID("available-1"), - peer.ID("available-2"), - }, - }, - "peer_already_in_workers_set": { - setupWorkerPool: func(t *testing.T) *syncWorkerPool { - ctrl := gomock.NewController(t) - networkMock := NewMockNetwork(ctrl) - networkMock.EXPECT(). - AllConnectedPeersIDs(). - Return([]peer.ID{ - peer.ID("available-1"), - peer.ID("available-2"), - peer.ID("available-3"), - }) - workerPool := newSyncWorkerPool(networkMock, nil) - syncWorker := &syncWorker{ - worker: &worker{}, - queue: make(chan *syncTask), - } - workerPool.workers[peer.ID("available-3")] = syncWorker - return workerPool - }, - exepectedWorkers: []peer.ID{ - peer.ID("available-1"), - peer.ID("available-2"), - peer.ID("available-3"), - }, - }, - } - - for tname, tt := range cases { - tt := tt - t.Run(tname, func(t *testing.T) { - t.Parallel() - - workerPool := tt.setupWorkerPool(t) - workerPool.useConnectedPeers() - defer workerPool.stop() - - require.ElementsMatch(t, - maps.Keys(workerPool.workers), - tt.exepectedWorkers) - }) - } -} - -func TestSyncWorkerPool_listenForRequests_submitRequest(t *testing.T) { - t.Parallel() - - ctrl := gomock.NewController(t) - networkMock := NewMockNetwork(ctrl) - requestMakerMock := NewMockRequestMaker(ctrl) - workerPool := newSyncWorkerPool(networkMock, requestMakerMock) - - availablePeer := peer.ID("available-peer") - workerPool.newPeer(availablePeer) - defer workerPool.stop() - - blockHash := common.MustHexToHash("0x750646b852a29e5f3668959916a03d6243a3137e91d0cd36870364931030f707") - blockRequest := network.NewBlockRequest(*variadic.MustNewUint32OrHash(blockHash), - 1, network.BootstrapRequestData, network.Descending) - mockedBlockResponse := &network.BlockResponseMessage{ - BlockData: []*types.BlockData{ - { - Hash: blockHash, - Header: &types.Header{ - ParentHash: common. - MustHexToHash("0x5895897f12e1a670609929433ac7a69dcae90e0cc2d9c32c0dce0e2a5e5e614e"), - }, - }, - }, - } - - // introduce a timeout of 5s then we can test the - // peer status change to busy - requestMakerMock.EXPECT(). - Do(availablePeer, blockRequest, &network.BlockResponseMessage{}). - DoAndReturn(func(_, _, response any) any { - responsePtr := response.(*network.BlockResponseMessage) - *responsePtr = *mockedBlockResponse - return nil - }) - - resultCh := make(chan *syncTaskResult) - workerPool.submitRequest(blockRequest, nil, resultCh) - - syncTaskResult := <-resultCh - require.NoError(t, syncTaskResult.err) - require.Equal(t, syncTaskResult.who, availablePeer) - require.Equal(t, syncTaskResult.request, blockRequest) - require.Equal(t, syncTaskResult.response, mockedBlockResponse) - -} - -func TestSyncWorkerPool_singleWorker_multipleRequests(t *testing.T) { - t.Parallel() - - ctrl := gomock.NewController(t) - networkMock := NewMockNetwork(ctrl) - requestMakerMock := NewMockRequestMaker(ctrl) - workerPool := newSyncWorkerPool(networkMock, requestMakerMock) - defer workerPool.stop() - - availablePeer := peer.ID("available-peer") - workerPool.newPeer(availablePeer) - - firstRequestBlockHash := common.MustHexToHash("0x750646b852a29e5f3668959916a03d6243a3137e91d0cd36870364931030f707") - firstBlockRequest := network.NewBlockRequest(*variadic.MustNewUint32OrHash(firstRequestBlockHash), - 1, network.BootstrapRequestData, network.Descending) - - secondRequestBlockHash := common.MustHexToHash("0x897646b852a29e5f3668959916a03d6243a3137e91d0cd36870364931030f707") - secondBlockRequest := network.NewBlockRequest(*variadic.MustNewUint32OrHash(firstRequestBlockHash), - 1, network.BootstrapRequestData, network.Descending) - - firstMockedBlockResponse := &network.BlockResponseMessage{ - BlockData: []*types.BlockData{ - { - Hash: firstRequestBlockHash, - Header: &types.Header{ - ParentHash: common. - MustHexToHash("0x5895897f12e1a670609929433ac7a69dcae90e0cc2d9c32c0dce0e2a5e5e614e"), - }, - }, - }, - } - - secondMockedBlockResponse := &network.BlockResponseMessage{ - BlockData: []*types.BlockData{ - { - Hash: secondRequestBlockHash, - Header: &types.Header{ - ParentHash: common. - MustHexToHash("0x8965897f12e1a670609929433ac7a69dcae90e0cc2d9c32c0dce0e2a5e5e614e"), - }, - }, - }, - } - - // introduce a timeout of 5s then we can test the - // then we can simulate a busy peer - requestMakerMock.EXPECT(). - Do(availablePeer, firstBlockRequest, &network.BlockResponseMessage{}). - DoAndReturn(func(_, _, response any) any { - time.Sleep(5 * time.Second) - responsePtr := response.(*network.BlockResponseMessage) - *responsePtr = *firstMockedBlockResponse - return nil - }) - - requestMakerMock.EXPECT(). - Do(availablePeer, firstBlockRequest, &network.BlockResponseMessage{}). - DoAndReturn(func(_, _, response any) any { - responsePtr := response.(*network.BlockResponseMessage) - *responsePtr = *secondMockedBlockResponse - return nil - }) - - resultCh := workerPool.submitRequests( - []*network.BlockRequestMessage{firstBlockRequest, secondBlockRequest}) - - syncTaskResult := <-resultCh - require.NoError(t, syncTaskResult.err) - require.Equal(t, syncTaskResult.who, availablePeer) - require.Equal(t, syncTaskResult.request, firstBlockRequest) - require.Equal(t, syncTaskResult.response, firstMockedBlockResponse) - - syncTaskResult = <-resultCh - require.NoError(t, syncTaskResult.err) - require.Equal(t, syncTaskResult.who, availablePeer) - require.Equal(t, syncTaskResult.request, secondBlockRequest) - require.Equal(t, syncTaskResult.response, secondMockedBlockResponse) - - require.Equal(t, uint(1), workerPool.totalWorkers()) -} diff --git a/dot/sync/worker_test.go b/dot/sync/worker_test.go deleted file mode 100644 index e0318dcce8..0000000000 --- a/dot/sync/worker_test.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2023 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "sort" - "sync" - "testing" - "time" - - "github.com/ChainSafe/gossamer/dot/network" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/stretchr/testify/require" - "go.uber.org/mock/gomock" -) - -func TestWorker(t *testing.T) { - peerA := peer.ID("peerA") - peerB := peer.ID("peerB") - - ctrl := gomock.NewController(t) - m := uint32(60) - blockReq := &network.BlockRequestMessage{ - RequestedData: 1, - Direction: 3, - Max: &m, - } - - // acquireOrFail is a test channel used to - // ensure the shared guard is working properly - // should have the same len as the shared guard - acquireOrFail := make(chan struct{}, 1) - - reqMaker := NewMockRequestMaker(ctrl) - // define a mock expectation to peerA - reqMaker.EXPECT(). - Do(peerA, blockReq, gomock.AssignableToTypeOf((*network.BlockResponseMessage)(nil))). - DoAndReturn(func(_, _, _ any) any { - select { - case acquireOrFail <- struct{}{}: - defer func() { - <-acquireOrFail // release once it finishes - }() - default: - t.Errorf("should acquire the channel, othewise the shared guard is not working") - } - time.Sleep(2 * time.Second) - return nil - }). - Return(nil) - - // define a mock expectation to peerB - reqMaker.EXPECT(). - Do(peerB, blockReq, gomock.AssignableToTypeOf((*network.BlockResponseMessage)(nil))). - DoAndReturn(func(_, _, _ any) any { - select { - case acquireOrFail <- struct{}{}: - defer func() { - <-acquireOrFail // release once it finishes - }() - default: - t.Errorf("should acquire the channel, othewise the shared guard is not working") - } - time.Sleep(2 * time.Second) - return nil - }). - Return(nil) - - sharedGuard := make(chan struct{}, 1) - - // instantiate the workers - fstWorker := newWorker(peerA, sharedGuard, reqMaker) - sndWorker := newWorker(peerB, sharedGuard, reqMaker) - - wg := sync.WaitGroup{} - queue := make(chan *syncTask, 2) - - // run two workers, but they shouldn't work concurrently, - // because sharedGuard is buffered channel with capacity - wg.Add(2) - go fstWorker.run(queue, &wg) - go sndWorker.run(queue, &wg) - - resultCh := make(chan *syncTaskResult) - queue <- &syncTask{ - request: blockReq, - resultCh: resultCh, - } - queue <- &syncTask{ - request: blockReq, - resultCh: resultCh, - } - - // we are waiting 500 ms to guarantee that workers had time to read sync tasks from the queue - // and send the request. With this assertion we can be sure that even that we start 2 workers - // only one of them is working and sent a requests - time.Sleep(500 * time.Millisecond) - require.Equal(t, 1, len(sharedGuard)) - - var actual []*syncTaskResult - result := <-resultCh - actual = append(actual, result) - - time.Sleep(500 * time.Millisecond) - require.Equal(t, 1, len(sharedGuard)) - - result = <-resultCh - actual = append(actual, result) - - expected := []*syncTaskResult{ - {who: peerA, request: blockReq, response: new(network.BlockResponseMessage)}, - {who: peerB, request: blockReq, response: new(network.BlockResponseMessage)}, - } - - sort.Slice(actual, func(i, j int) bool { - return actual[i].who < actual[j].who - }) - - require.Equal(t, expected, actual) - - close(queue) - wg.Wait() - - require.Equal(t, 0, len(sharedGuard)) // check that workers release lock -} From 959769ca54c417daca1d6c70c78e0e6abe564359 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 7 Aug 2024 08:56:33 -0400 Subject: [PATCH 16/74] chore: create `unready_blocks.go` --- lib/sync/fullsync.go | 72 ++-------------------------- lib/sync/fullsync_handle_block.go | 4 +- lib/sync/fullsync_test.go | 3 ++ lib/sync/peer_view.go | 3 ++ lib/sync/request_queue.go | 3 ++ lib/sync/service.go | 3 ++ lib/sync/service_test.go | 3 ++ lib/sync/unready_blocks.go | 79 +++++++++++++++++++++++++++++++ 8 files changed, 100 insertions(+), 70 deletions(-) create mode 100644 lib/sync/unready_blocks.go diff --git a/lib/sync/fullsync.go b/lib/sync/fullsync.go index 74ef7bdb2f..325b447483 100644 --- a/lib/sync/fullsync.go +++ b/lib/sync/fullsync.go @@ -1,3 +1,6 @@ +// Copyright 2024 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + package sync import ( @@ -5,7 +8,6 @@ import ( "errors" "fmt" "slices" - "sync" "time" "github.com/ChainSafe/gossamer/dot/network" @@ -53,74 +55,6 @@ type FullSyncConfig struct { RequestMaker network.RequestMaker } -type unreadyBlocks struct { - mu sync.Mutex - incompleteBlocks map[common.Hash]*types.BlockData - disjointChains [][]*types.BlockData -} - -func (u *unreadyBlocks) newHeader(blockHeader *types.Header) { - u.mu.Lock() - defer u.mu.Unlock() - - blockHash := blockHeader.Hash() - u.incompleteBlocks[blockHash] = &types.BlockData{ - Hash: blockHash, - Header: blockHeader, - } -} - -func (u *unreadyBlocks) newFragment(frag []*types.BlockData) { - u.mu.Lock() - defer u.mu.Unlock() - - u.disjointChains = append(u.disjointChains, frag) -} - -func (u *unreadyBlocks) updateDisjointFragments(chain []*types.BlockData) ([]*types.BlockData, bool) { - u.mu.Lock() - defer u.mu.Unlock() - - indexToChange := -1 - for idx, disjointChain := range u.disjointChains { - lastBlockArriving := chain[len(chain)-1] - firstDisjointBlock := disjointChain[0] - if formsSequence(lastBlockArriving, firstDisjointBlock) { - indexToChange = idx - break - } - } - - if indexToChange >= 0 { - disjointChain := u.disjointChains[indexToChange] - u.disjointChains = append(u.disjointChains[:indexToChange], u.disjointChains[indexToChange+1:]...) - return append(chain, disjointChain...), true - } - - return nil, false -} - -func (u *unreadyBlocks) updateIncompleteBlocks(chain []*types.BlockData) []*types.BlockData { - u.mu.Lock() - defer u.mu.Unlock() - - completeBlocks := make([]*types.BlockData, 0) - for _, blockData := range chain { - incomplete, ok := u.incompleteBlocks[blockData.Hash] - if !ok { - continue - } - - incomplete.Body = blockData.Body - incomplete.Justification = blockData.Justification - - delete(u.incompleteBlocks, blockData.Hash) - completeBlocks = append(completeBlocks, incomplete) - } - - return completeBlocks -} - type Importer interface { handle(*types.BlockData, BlockOrigin) (imported bool, err error) } diff --git a/lib/sync/fullsync_handle_block.go b/lib/sync/fullsync_handle_block.go index 4db4b25429..1d1d6f38f3 100644 --- a/lib/sync/fullsync_handle_block.go +++ b/lib/sync/fullsync_handle_block.go @@ -1,3 +1,6 @@ +// Copyright 2024 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + package sync import ( @@ -80,7 +83,6 @@ func (b *blockImporter) handle(bd *types.BlockData, origin BlockOrigin) (importe err = b.processBlockData(*bd, origin) if err != nil { - // depending on the error, we might want to save this block for later logger.Errorf("processing block #%d (%s) failed: %s", bd.Header.Number, bd.Hash, err) return false, err } diff --git a/lib/sync/fullsync_test.go b/lib/sync/fullsync_test.go index c382b6deb0..e89843dfa0 100644 --- a/lib/sync/fullsync_test.go +++ b/lib/sync/fullsync_test.go @@ -1,3 +1,6 @@ +// Copyright 2024 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + package sync import ( diff --git a/lib/sync/peer_view.go b/lib/sync/peer_view.go index 628fba4ff6..9c66454bb4 100644 --- a/lib/sync/peer_view.go +++ b/lib/sync/peer_view.go @@ -1,3 +1,6 @@ +// Copyright 2024 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + package sync import ( diff --git a/lib/sync/request_queue.go b/lib/sync/request_queue.go index a483906f14..85a387c4fb 100644 --- a/lib/sync/request_queue.go +++ b/lib/sync/request_queue.go @@ -1,3 +1,6 @@ +// Copyright 2024 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + package sync import ( diff --git a/lib/sync/service.go b/lib/sync/service.go index aa869376f6..657e0cf2a1 100644 --- a/lib/sync/service.go +++ b/lib/sync/service.go @@ -1,3 +1,6 @@ +// Copyright 2024 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + package sync import ( diff --git a/lib/sync/service_test.go b/lib/sync/service_test.go index 25b8cf9817..fb555613eb 100644 --- a/lib/sync/service_test.go +++ b/lib/sync/service_test.go @@ -1,3 +1,6 @@ +// Copyright 2024 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + package sync import "testing" diff --git a/lib/sync/unready_blocks.go b/lib/sync/unready_blocks.go new file mode 100644 index 0000000000..b17c5d6493 --- /dev/null +++ b/lib/sync/unready_blocks.go @@ -0,0 +1,79 @@ +// Copyright 2024 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package sync + +import ( + "sync" + + "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/lib/common" +) + +type unreadyBlocks struct { + mu sync.Mutex + incompleteBlocks map[common.Hash]*types.BlockData + disjointChains [][]*types.BlockData +} + +func (u *unreadyBlocks) newHeader(blockHeader *types.Header) { + u.mu.Lock() + defer u.mu.Unlock() + + blockHash := blockHeader.Hash() + u.incompleteBlocks[blockHash] = &types.BlockData{ + Hash: blockHash, + Header: blockHeader, + } +} + +func (u *unreadyBlocks) newFragment(frag []*types.BlockData) { + u.mu.Lock() + defer u.mu.Unlock() + + u.disjointChains = append(u.disjointChains, frag) +} + +func (u *unreadyBlocks) updateDisjointFragments(chain []*types.BlockData) ([]*types.BlockData, bool) { + u.mu.Lock() + defer u.mu.Unlock() + + indexToChange := -1 + for idx, disjointChain := range u.disjointChains { + lastBlockArriving := chain[len(chain)-1] + firstDisjointBlock := disjointChain[0] + if formsSequence(lastBlockArriving, firstDisjointBlock) { + indexToChange = idx + break + } + } + + if indexToChange >= 0 { + disjointChain := u.disjointChains[indexToChange] + u.disjointChains = append(u.disjointChains[:indexToChange], u.disjointChains[indexToChange+1:]...) + return append(chain, disjointChain...), true + } + + return nil, false +} + +func (u *unreadyBlocks) updateIncompleteBlocks(chain []*types.BlockData) []*types.BlockData { + u.mu.Lock() + defer u.mu.Unlock() + + completeBlocks := make([]*types.BlockData, 0) + for _, blockData := range chain { + incomplete, ok := u.incompleteBlocks[blockData.Hash] + if !ok { + continue + } + + incomplete.Body = blockData.Body + incomplete.Justification = blockData.Justification + + delete(u.incompleteBlocks, blockData.Hash) + completeBlocks = append(completeBlocks, incomplete) + } + + return completeBlocks +} From b950cda8d586eb1552b70e07e60d0d4e9fd7cf82 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 7 Aug 2024 09:46:26 -0400 Subject: [PATCH 17/74] chore: fix lint --- lib/sync/fullsync.go | 8 ++++---- lib/sync/service.go | 7 +------ lib/sync/worker_pool.go | 4 ++-- 3 files changed, 7 insertions(+), 12 deletions(-) diff --git a/lib/sync/fullsync.go b/lib/sync/fullsync.go index 325b447483..26455aaff9 100644 --- a/lib/sync/fullsync.go +++ b/lib/sync/fullsync.go @@ -298,8 +298,8 @@ func (f *FullSyncStrategy) OnBlockAnnounce(from peer.ID, msg *network.BlockAnnou } if msg.BestBlock { - peerView := f.peers.get(from) - if uint(peerView.bestBlockNumber) != msg.Number { + pv := f.peers.get(from) + if uint(pv.bestBlockNumber) != msg.Number { repChange = &Change{ who: from, rep: peerset.ReputationChange{ @@ -308,7 +308,7 @@ func (f *FullSyncStrategy) OnBlockAnnounce(from peer.ID, msg *network.BlockAnnou }, } return repChange, fmt.Errorf("%w: peer %s, on handshake #%d, on announce #%d", - errMismatchBestBlockAnnouncement, from, peerView.bestBlockNumber, msg.Number) + errMismatchBestBlockAnnouncement, from, pv.bestBlockNumber, msg.Number) } } @@ -452,7 +452,7 @@ resultLoop: // sortFragmentsOfChain will organize the fragments // in a way we can import the older blocks first also guaranting that -// forks can be imported by organizing them to be after the main chain +// forks can be imported by organising them to be after the main chain // // e.g: consider the following fragment of chains // [ {17} {1, 2, 3, 4, 5} {6, 7, 8, 9, 10} {8} {11, 12, 13, 14, 15, 16} ] diff --git a/lib/sync/service.go b/lib/sync/service.go index 657e0cf2a1..a6fc98a363 100644 --- a/lib/sync/service.go +++ b/lib/sync/service.go @@ -228,12 +228,7 @@ func (s *SyncService) runSyncEngine() { continue } - results, err := s.workerPool.submitRequests(tasks) - if err != nil { - logger.Criticalf("getting highest finalized header: %w", err) - return - } - + results := s.workerPool.submitRequests(tasks) done, repChanges, peersToIgnore, err := s.currentStrategy.IsFinished(results) if err != nil { logger.Criticalf("current sync strategy failed with: %s", err.Error()) diff --git a/lib/sync/worker_pool.go b/lib/sync/worker_pool.go index 87ace80819..fbfe144cc9 100644 --- a/lib/sync/worker_pool.go +++ b/lib/sync/worker_pool.go @@ -79,7 +79,7 @@ func (s *syncWorkerPool) fromBlockAnnounceHandshake(who peer.ID) error { // submitRequests takes an set of requests and will submit to the pool through submitRequest // the response will be dispatch in the resultCh -func (s *syncWorkerPool) submitRequests(tasks []*syncTask) ([]*syncTaskResult, error) { +func (s *syncWorkerPool) submitRequests(tasks []*syncTask) []*syncTaskResult { s.mtx.RLock() defer s.mtx.RUnlock() @@ -116,7 +116,7 @@ func (s *syncWorkerPool) submitRequests(tasks []*syncTask) ([]*syncTaskResult, e } } - return results, nil + return results } func (s *syncWorkerPool) ignorePeerAsWorker(who peer.ID) { From 8479f23c17b6cff47afa732ea8a74f138c94894d Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 7 Aug 2024 15:09:40 -0400 Subject: [PATCH 18/74] chore: ignore forks when ancestor is behind highest finalized block --- lib/sync/fullsync.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/lib/sync/fullsync.go b/lib/sync/fullsync.go index 26455aaff9..4ab0776b68 100644 --- a/lib/sync/fullsync.go +++ b/lib/sync/fullsync.go @@ -85,7 +85,8 @@ func NewFullSyncStrategy(cfg *FullSyncConfig) *FullSyncStrategy { importer: newBlockImporter(cfg), unreadyBlocks: &unreadyBlocks{ incompleteBlocks: make(map[common.Hash]*types.BlockData), - disjointChains: make([][]*types.BlockData, 0), + // TODO: cap disjoitChains to don't grows indefinitelly + disjointChains: make([][]*types.BlockData, 0), }, requestQueue: &requestsQueue[*network.BlockRequestMessage]{ queue: list.New(), @@ -119,12 +120,6 @@ func (f *FullSyncStrategy) NextActions() ([]*syncTask, error) { } startRequestAt := bestBlockHeader.Number + 1 - - // here is where we cap the amount of tasks we will create - // f.numOfTasks - len(requests) gives us the remaining amount - // of requests and we multiply by 128 which is the max amount - // of blocks a single request can ask - // 257 + 2 * 128 = 513 targetBlockNumber := startRequestAt + 128 if targetBlockNumber > uint(currentTarget) { @@ -253,6 +248,12 @@ func (f *FullSyncStrategy) IsFinished(results []*syncTaskResult) (bool, []Change } if !ok { + // if the parent of this valid fragment is behind our latest finalized number + // then we can discard the whole fragment since it is a invalid fork + if (validFragment[0].Header.Number - 1) <= highestFinalized.Number { + continue + } + logger.Infof("starting an acestor search from %s parent of #%d (%s)", validFragment[0].Header.ParentHash, validFragment[0].Header.Number, @@ -264,7 +265,6 @@ func (f *FullSyncStrategy) IsFinished(results []*syncTaskResult) (bool, []Change *variadic.FromHash(validFragment[0].Header.ParentHash), network.MaxBlocksInResponse, network.BootstrapRequestData, network.Descending) - f.requestQueue.PushBack(request) } else { // inserting them in the queue to be processed after the main chain @@ -450,7 +450,7 @@ resultLoop: return repChanges, peersToBlock, validRes } -// sortFragmentsOfChain will organize the fragments +// sortFragmentsOfChain will organise the fragments // in a way we can import the older blocks first also guaranting that // forks can be imported by organising them to be after the main chain // From ec88b94fd7b1d03a50026396a2f6806f04e3d278 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 7 Aug 2024 15:20:50 -0400 Subject: [PATCH 19/74] chore: small updts --- lib/sync/fullsync.go | 2 +- lib/sync/service.go | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/lib/sync/fullsync.go b/lib/sync/fullsync.go index 4ab0776b68..67008770e2 100644 --- a/lib/sync/fullsync.go +++ b/lib/sync/fullsync.go @@ -85,7 +85,7 @@ func NewFullSyncStrategy(cfg *FullSyncConfig) *FullSyncStrategy { importer: newBlockImporter(cfg), unreadyBlocks: &unreadyBlocks{ incompleteBlocks: make(map[common.Hash]*types.BlockData), - // TODO: cap disjoitChains to don't grows indefinitelly + // TODO: cap disjoitChains to don't grows indefinitely disjointChains: make([][]*types.BlockData, 0), }, requestQueue: &requestsQueue[*network.BlockRequestMessage]{ diff --git a/lib/sync/service.go b/lib/sync/service.go index a6fc98a363..6529de64a9 100644 --- a/lib/sync/service.go +++ b/lib/sync/service.go @@ -161,8 +161,7 @@ func (s *SyncService) HandleBlockAnnounceHandshake(from peer.ID, msg *network.Bl s.mu.Lock() defer s.mu.Unlock() - s.currentStrategy.OnBlockAnnounceHandshake(from, msg) - return nil + return s.currentStrategy.OnBlockAnnounceHandshake(from, msg) } func (s *SyncService) HandleBlockAnnounce(from peer.ID, msg *network.BlockAnnounceMessage) error { From f29170578cfb6888cef241d00433be619027efdf Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Mon, 12 Aug 2024 14:53:25 -0400 Subject: [PATCH 20/74] chore: listening for stop channel on `runSyncEngine` --- lib/sync/fullsync_handle_block.go | 1 + lib/sync/service.go | 7 +++++++ 2 files changed, 8 insertions(+) diff --git a/lib/sync/fullsync_handle_block.go b/lib/sync/fullsync_handle_block.go index 1d1d6f38f3..f8a3493b11 100644 --- a/lib/sync/fullsync_handle_block.go +++ b/lib/sync/fullsync_handle_block.go @@ -62,6 +62,7 @@ type blockImporter struct { func newBlockImporter(cfg *FullSyncConfig) *blockImporter { return &blockImporter{ + blockState: cfg.BlockState, storageState: cfg.StorageState, transactionState: cfg.TransactionState, babeVerifier: cfg.BabeVerifier, diff --git a/lib/sync/service.go b/lib/sync/service.go index 6529de64a9..f04797cfaa 100644 --- a/lib/sync/service.go +++ b/lib/sync/service.go @@ -103,6 +103,7 @@ func NewSyncService(network Network, workerPool: newSyncWorkerPool(network), waitPeersDuration: 2 * time.Second, minPeers: 1, + slotDuration: 6 * time.Second, stopCh: make(chan struct{}), } } @@ -202,6 +203,12 @@ func (s *SyncService) runSyncEngine() { // TODO: need to handle stop channel for { + select { + case <-s.stopCh: + return + default: + } + finalisedHeader, err := s.blockState.GetHighestFinalisedHeader() if err != nil { logger.Criticalf("getting highest finalized header: %w", err) From 66f841214c2f3aa01bcc5f0381b95cfba052e305 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Fri, 23 Aug 2024 14:34:14 -0400 Subject: [PATCH 21/74] chore: address comments --- dot/network/message.go | 4 +- dot/network/message_test.go | 22 ++-- dot/services.go | 26 ++--- lib/common/variadic/uint32OrHash.go | 20 +--- lib/sync/configuration.go | 31 +++++ lib/sync/fullsync.go | 108 +++++++++--------- lib/sync/fullsync_handle_block.go | 2 - lib/sync/fullsync_test.go | 14 +-- lib/sync/service.go | 28 ++--- scripts/retrieve_block/retrieve_block.go | 4 +- scripts/retrieve_block/retrieve_block_test.go | 12 +- 11 files changed, 145 insertions(+), 126 deletions(-) create mode 100644 lib/sync/configuration.go diff --git a/dot/network/message.go b/dot/network/message.go index 0296d20d64..774d18cacb 100644 --- a/dot/network/message.go +++ b/dot/network/message.go @@ -389,7 +389,7 @@ func NewAscendingBlockRequests(startNumber, targetNumber uint, requestedData byt // start and end block are the same, just request 1 block if diff == 0 { return []*BlockRequestMessage{ - NewBlockRequest(*variadic.MustNewUint32OrHash(uint32(startNumber)), 1, requestedData, Ascending), + NewBlockRequest(*variadic.Uint32OrHashFrom(uint32(startNumber)), 1, requestedData, Ascending), } } @@ -398,7 +398,7 @@ func NewAscendingBlockRequests(startNumber, targetNumber uint, requestedData byt reqs := make([]*BlockRequestMessage, numRequests) for i := uint(0); i < numRequests; i++ { max := uint32(MaxBlocksInResponse) - start := variadic.MustNewUint32OrHash(startNumber) + start := variadic.Uint32OrHashFrom(startNumber) reqs[i] = NewBlockRequest(*start, max, requestedData, Ascending) startNumber += uint(max) } diff --git a/dot/network/message_test.go b/dot/network/message_test.go index e0d713b9a6..bfaf3203b0 100644 --- a/dot/network/message_test.go +++ b/dot/network/message_test.go @@ -445,7 +445,7 @@ func TestAscendingBlockRequest(t *testing.T) { expectedBlockRequestMessage: []*BlockRequestMessage{ { RequestedData: BootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(uint32(10)), + StartingBlock: *variadic.Uint32OrHashFrom(uint32(10)), Direction: Ascending, Max: &one, }, @@ -460,7 +460,7 @@ func TestAscendingBlockRequest(t *testing.T) { expectedBlockRequestMessage: []*BlockRequestMessage{ { RequestedData: BootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(uint32(1)), + StartingBlock: *variadic.Uint32OrHashFrom(uint32(1)), Direction: Ascending, Max: &maxResponseSize, }, @@ -474,25 +474,25 @@ func TestAscendingBlockRequest(t *testing.T) { expectedBlockRequestMessage: []*BlockRequestMessage{ { RequestedData: BootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(uint32(1)), + StartingBlock: *variadic.Uint32OrHashFrom(uint32(1)), Direction: Ascending, Max: &maxResponseSize, }, { RequestedData: BootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(uint32(129)), + StartingBlock: *variadic.Uint32OrHashFrom(uint32(129)), Direction: Ascending, Max: &maxResponseSize, }, { RequestedData: BootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(uint32(257)), + StartingBlock: *variadic.Uint32OrHashFrom(uint32(257)), Direction: Ascending, Max: &maxResponseSize, }, { RequestedData: BootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(uint32(385)), + StartingBlock: *variadic.Uint32OrHashFrom(uint32(385)), Direction: Ascending, Max: &maxResponseSize, }, @@ -506,31 +506,31 @@ func TestAscendingBlockRequest(t *testing.T) { expectedBlockRequestMessage: []*BlockRequestMessage{ { RequestedData: BootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(uint32(1)), + StartingBlock: *variadic.Uint32OrHashFrom(uint32(1)), Direction: Ascending, Max: &maxResponseSize, }, { RequestedData: BootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(uint32(129)), + StartingBlock: *variadic.Uint32OrHashFrom(uint32(129)), Direction: Ascending, Max: &maxResponseSize, }, { RequestedData: BootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(uint32(257)), + StartingBlock: *variadic.Uint32OrHashFrom(uint32(257)), Direction: Ascending, Max: &maxResponseSize, }, { RequestedData: BootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(uint32(385)), + StartingBlock: *variadic.Uint32OrHashFrom(uint32(385)), Direction: Ascending, Max: &maxResponseSize, }, { RequestedData: BootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(uint32(513)), + StartingBlock: *variadic.Uint32OrHashFrom(uint32(513)), Direction: Ascending, Max: &three, }, diff --git a/dot/services.go b/dot/services.go index d3e61554b1..2fa9d07a32 100644 --- a/dot/services.go +++ b/dot/services.go @@ -500,22 +500,17 @@ func (nodeBuilder) createBlockVerifier(st *state.Service) *babe.VerificationMana func (nodeBuilder) newSyncService(config *cfg.Config, st *state.Service, fg BlockJustificationVerifier, verifier *babe.VerificationManager, cs *core.Service, net *network.Service, telemetryMailer Telemetry) ( network.Syncer, error) { - // slotDuration, err := st.Epoch.GetSlotDuration() - // if err != nil { - // return nil, err - // } + slotDuration, err := st.Epoch.GetSlotDuration() + if err != nil { + return nil, err + } genesisData, err := st.Base.LoadGenesisData() if err != nil { return nil, err } - // syncLogLevel, err := log.ParseLevel(config.Log.Sync) - // if err != nil { - // return nil, fmt.Errorf("failed to parse sync log level: %w", err) - // } - - const blockRequestTimeout = 30 * time.Second + const blockRequestTimeout = 20 * time.Second requestMaker := net.GetRequestResponseProtocol( network.SyncID, blockRequestTimeout, @@ -538,11 +533,14 @@ func (nodeBuilder) newSyncService(config *cfg.Config, st *state.Service, fg Bloc BadBlocks: genesisData.BadBlocks, RequestMaker: requestMaker, } + fullSync := libsync.NewFullSyncStrategy(syncCfg) - defaultStrategy := libsync.NewFullSyncStrategy(syncCfg) - return libsync.NewSyncService(net, st.Block, - defaultStrategy, - defaultStrategy), nil + return libsync.NewSyncService( + libsync.WithNetwork(net), + libsync.WithBlockState(st.Block), + libsync.WithSlotDuration(slotDuration), + libsync.WithStrategies(fullSync, fullSync), + ), nil } func (nodeBuilder) createDigestHandler(st *state.Service) (*digest.Handler, error) { diff --git a/lib/common/variadic/uint32OrHash.go b/lib/common/variadic/uint32OrHash.go index 2d1eb0fff3..922ff67996 100644 --- a/lib/common/variadic/uint32OrHash.go +++ b/lib/common/variadic/uint32OrHash.go @@ -17,13 +17,8 @@ type Uint32OrHash struct { value interface{} } -func FromHash(hash common.Hash) *Uint32OrHash { - return &Uint32OrHash{ - value: hash, - } -} - -func FromUint32(value uint32) *Uint32OrHash { +// Uint32OrHashFrom returns a new variadic.Uint32OrHash given an int, uint32, or Hash +func Uint32OrHashFrom[T common.Hash | ~int | ~uint | ~uint32](value T) *Uint32OrHash { return &Uint32OrHash{ value: value, } @@ -53,17 +48,6 @@ func NewUint32OrHash(value interface{}) (*Uint32OrHash, error) { } } -// MustNewUint32OrHash returns a new variadic.Uint32OrHash given an int, uint32, or Hash -// It panics if the input value is invalid -func MustNewUint32OrHash(value interface{}) *Uint32OrHash { - val, err := NewUint32OrHash(value) - if err != nil { - panic(err) - } - - return val -} - // NewUint32OrHashFromBytes returns a new variadic.Uint32OrHash from an encoded variadic uint32 or hash func NewUint32OrHashFromBytes(data []byte) *Uint32OrHash { firstByte := data[0] diff --git a/lib/sync/configuration.go b/lib/sync/configuration.go new file mode 100644 index 0000000000..f2d4c2b6a1 --- /dev/null +++ b/lib/sync/configuration.go @@ -0,0 +1,31 @@ +package sync + +import "time" + +type ServiceConfig func(svc *SyncService) + +func WithStrategies(currentStrategy, defaultStrategy Strategy) ServiceConfig { + return func(svc *SyncService) { + svc.currentStrategy = currentStrategy + svc.defaultStrategy = defaultStrategy + } +} + +func WithNetwork(net Network) ServiceConfig { + return func(svc *SyncService) { + svc.network = net + svc.workerPool = newSyncWorkerPool(net) + } +} + +func WithBlockState(bs BlockState) ServiceConfig { + return func(svc *SyncService) { + svc.blockState = bs + } +} + +func WithSlotDuration(slotDuration time.Duration) ServiceConfig { + return func(svc *SyncService) { + svc.slotDuration = slotDuration + } +} diff --git a/lib/sync/fullsync.go b/lib/sync/fullsync.go index 67008770e2..a669eda976 100644 --- a/lib/sync/fullsync.go +++ b/lib/sync/fullsync.go @@ -55,10 +55,13 @@ type FullSyncConfig struct { RequestMaker network.RequestMaker } -type Importer interface { +type importer interface { handle(*types.BlockData, BlockOrigin) (imported bool, err error) } +// FullSyncStrategy protocol is the "default" protocol. +// Full sync works by listening to announced blocks and requesting the blocks +// from the announcing peers. type FullSyncStrategy struct { requestQueue *requestsQueue[*network.BlockRequestMessage] unreadyBlocks *unreadyBlocks @@ -69,7 +72,7 @@ type FullSyncStrategy struct { numOfTasks int startedAt time.Time syncedBlocks int - importer Importer + importer importer } func NewFullSyncStrategy(cfg *FullSyncConfig) *FullSyncStrategy { @@ -147,22 +150,6 @@ func (f *FullSyncStrategy) createTasks(requests []*network.BlockRequestMessage) func (f *FullSyncStrategy) IsFinished(results []*syncTaskResult) (bool, []Change, []peer.ID, error) { repChanges, peersToIgnore, validResp := validateResults(results, f.badBlocks) - validBlocksUnderFragment := func(highestFinalizedNumber uint, fragmentBlocks []*types.BlockData) []*types.BlockData { - startFragmentFrom := -1 - for idx, block := range fragmentBlocks { - if block.Header.Number > highestFinalizedNumber { - startFragmentFrom = idx - break - } - } - - if startFragmentFrom < 0 { - return nil - } - - return fragmentBlocks[startFragmentFrom:] - } - highestFinalized, err := f.blockState.GetHighestFinalisedHeader() if err != nil { return false, nil, nil, fmt.Errorf("getting highest finalized header") @@ -171,11 +158,10 @@ func (f *FullSyncStrategy) IsFinished(results []*syncTaskResult) (bool, []Change readyBlocks := make([][]*types.BlockData, 0, len(validResp)) for _, reqRespData := range validResp { // if Gossamer requested the header, then the response data should - // contains the full bocks to be imported - // if Gossamer don't requested the header, then the response shoul - // only contains the missing parts the will complete the unreadyBlocks + // contains the full blocks to be imported. + // if Gossamer didn't request the header, then the response should + // only contain the missing parts that will complete the unreadyBlocks // and then with the blocks completed we should be able to import them - if reqRespData.req.RequestField(network.RequestedDataHeader) { updatedFragment, ok := f.unreadyBlocks.updateDisjointFragments(reqRespData.responseData) if ok { @@ -186,6 +172,7 @@ func (f *FullSyncStrategy) IsFinished(results []*syncTaskResult) (bool, []Change } else { readyBlocks = append(readyBlocks, reqRespData.responseData) } + continue } @@ -193,7 +180,7 @@ func (f *FullSyncStrategy) IsFinished(results []*syncTaskResult) (bool, []Change readyBlocks = append(readyBlocks, completedBlocks) } - // disjoint fragments are pieces of the chain that could not be imported rn + // disjoint fragments are pieces of the chain that could not be imported right now // because is blocks too far ahead or blocks that belongs to forks orderedFragments := sortFragmentsOfChain(readyBlocks) orderedFragments = mergeFragmentsOfChain(orderedFragments) @@ -262,7 +249,7 @@ func (f *FullSyncStrategy) IsFinished(results []*syncTaskResult) (bool, []Change f.unreadyBlocks.newFragment(validFragment) request := network.NewBlockRequest( - *variadic.FromHash(validFragment[0].Header.ParentHash), + *variadic.Uint32OrHashFrom(validFragment[0].Header.ParentHash), network.MaxBlocksInResponse, network.BootstrapRequestData, network.Descending) f.requestQueue.PushBack(request) @@ -297,29 +284,31 @@ func (f *FullSyncStrategy) OnBlockAnnounce(from peer.ID, msg *network.BlockAnnou return nil, errors.New("blockstate service is paused") } + currentTarget := f.peers.getTarget() + if msg.Number >= uint(currentTarget) { + return nil, nil + } + + blockAnnounceHeader := types.NewHeader(msg.ParentHash, msg.StateRoot, msg.ExtrinsicsRoot, msg.Number, msg.Digest) + blockAnnounceHeaderHash := blockAnnounceHeader.Hash() + if msg.BestBlock { pv := f.peers.get(from) - if uint(pv.bestBlockNumber) != msg.Number { + if uint(pv.bestBlockNumber) != msg.Number || blockAnnounceHeaderHash != pv.bestBlockHash { repChange = &Change{ who: from, rep: peerset.ReputationChange{ - Value: peerset.BadBlockAnnouncementValue, - Reason: peerset.BadBlockAnnouncementReason, + Value: peerset.BadMessageValue, + Reason: peerset.BadMessageReason, }, } - return repChange, fmt.Errorf("%w: peer %s, on handshake #%d, on announce #%d", - errMismatchBestBlockAnnouncement, from, pv.bestBlockNumber, msg.Number) + return repChange, fmt.Errorf("%w: peer %s, on handshake #%d (%s), on announce #%d (%s)", + errMismatchBestBlockAnnouncement, from, + pv.bestBlockNumber, pv.bestBlockHash.String(), + msg.Number, blockAnnounceHeaderHash.String()) } } - currentTarget := f.peers.getTarget() - if msg.Number >= uint(currentTarget) { - return nil, nil - } - - blockAnnounceHeader := types.NewHeader(msg.ParentHash, msg.StateRoot, msg.ExtrinsicsRoot, msg.Number, msg.Digest) - blockAnnounceHeaderHash := blockAnnounceHeader.Hash() - logger.Infof("received block announce from %s: #%d (%s) best block: %v", from, blockAnnounceHeader.Number, @@ -353,7 +342,7 @@ func (f *FullSyncStrategy) OnBlockAnnounce(from peer.ID, msg *network.BlockAnnou if !has { f.unreadyBlocks.newHeader(blockAnnounceHeader) - request := network.NewBlockRequest(*variadic.FromHash(blockAnnounceHeaderHash), + request := network.NewBlockRequest(*variadic.Uint32OrHashFrom(blockAnnounceHeaderHash), 1, network.RequestedDataBody+network.RequestedDataJustification, network.Ascending) f.requestQueue.PushBack(request) } @@ -389,7 +378,7 @@ resultLoop: err := validateResponseFields(request, response.BlockData) if err != nil { - logger.Criticalf("validating fields: %s", err) + logger.Warnf("validating fields: %s", err) // TODO: check the reputation change for nil body in response // and nil justification in response if errors.Is(err, errNilHeaderInResponse) { @@ -402,7 +391,6 @@ resultLoop: }) } - //missingReqs = append(missingReqs, request) continue } @@ -410,7 +398,7 @@ resultLoop: // of each block, othewise the response might only have the body/justification for // a block if request.RequestField(network.RequestedDataHeader) && !isResponseAChain(response.BlockData) { - logger.Criticalf("response from %s is not a chain", result.who) + logger.Warnf("response from %s is not a chain", result.who) repChanges = append(repChanges, Change{ who: result.who, rep: peerset.ReputationChange{ @@ -418,13 +406,12 @@ resultLoop: Reason: peerset.IncompleteHeaderReason, }, }) - //missingReqs = append(missingReqs, request) continue } for _, block := range response.BlockData { if slices.Contains(badBlocks, block.Hash.String()) { - logger.Criticalf("%s sent a known bad block: #%d (%s)", + logger.Warnf("%s sent a known bad block: #%d (%s)", result.who, block.Number(), block.Hash.String()) peersToBlock = append(peersToBlock, result.who) @@ -436,7 +423,6 @@ resultLoop: }, }) - //missingReqs = append(missingReqs, request) continue resultLoop } } @@ -460,12 +446,12 @@ resultLoop: // note that we have fragments with single blocks, fragments with fork (in case of 8) // after sorting these fragments we end up with: // [ {1, 2, 3, 4, 5} {6, 7, 8, 9, 10} {8} {11, 12, 13, 14, 15, 16} {17} ] -func sortFragmentsOfChain(responses [][]*types.BlockData) [][]*types.BlockData { - if len(responses) == 0 { +func sortFragmentsOfChain(fragments [][]*types.BlockData) [][]*types.BlockData { + if len(fragments) == 0 { return nil } - slices.SortFunc(responses, func(a, b []*types.BlockData) int { + slices.SortFunc(fragments, func(a, b []*types.BlockData) int { if a[0].Header.Number < b[0].Header.Number { return -1 } @@ -475,7 +461,7 @@ func sortFragmentsOfChain(responses [][]*types.BlockData) [][]*types.BlockData { return 1 }) - return responses + return fragments } // mergeFragmentsOfChain merges a sorted slice of fragments that forms a valid @@ -505,9 +491,29 @@ func mergeFragmentsOfChain(fragments [][]*types.BlockData) [][]*types.BlockData return mergedFragments } -func formsSequence(last, curr *types.BlockData) bool { - incrementOne := (last.Header.Number + 1) == curr.Header.Number - isParent := last.Hash == curr.Header.ParentHash +// validBlocksUnderFragment ignore all blocks prior to the given last finalized number +func validBlocksUnderFragment(highestFinalizedNumber uint, fragmentBlocks []*types.BlockData) []*types.BlockData { + startFragmentFrom := -1 + for idx, block := range fragmentBlocks { + if block.Header.Number > highestFinalizedNumber { + startFragmentFrom = idx + break + } + } + + if startFragmentFrom < 0 { + return nil + } + + return fragmentBlocks[startFragmentFrom:] +} + +// formsSequence given two fragments of blocks, check if they forms a sequence +// by comparing the latest block from the prev fragment with the +// first block of the next fragment +func formsSequence(prev, next *types.BlockData) bool { + incrementOne := (prev.Header.Number + 1) == next.Header.Number + isParent := prev.Hash == next.Header.ParentHash return incrementOne && isParent } diff --git a/lib/sync/fullsync_handle_block.go b/lib/sync/fullsync_handle_block.go index f8a3493b11..7c7a563760 100644 --- a/lib/sync/fullsync_handle_block.go +++ b/lib/sync/fullsync_handle_block.go @@ -96,8 +96,6 @@ func (b *blockImporter) handle(bd *types.BlockData, origin BlockOrigin) (importe // or the index of the block data that errored on failure. // TODO: https://github.com/ChainSafe/gossamer/issues/3468 func (b *blockImporter) processBlockData(blockData types.BlockData, origin BlockOrigin) error { - // while in bootstrap mode we don't need to broadcast block announcements - // TODO: set true if not in initial sync setup announceImportedBlock := false if blockData.Header != nil { diff --git a/lib/sync/fullsync_test.go b/lib/sync/fullsync_test.go index e89843dfa0..4459ddd97a 100644 --- a/lib/sync/fullsync_test.go +++ b/lib/sync/fullsync_test.go @@ -99,13 +99,13 @@ func TestFullSyncNextActions(t *testing.T) { expectedTasks: []*network.BlockRequestMessage{ { RequestedData: network.BootstrapRequestData, - StartingBlock: *variadic.FromUint32(129), + StartingBlock: *variadic.Uint32OrHashFrom(129), Direction: network.Ascending, Max: refTo(128), }, { RequestedData: network.BootstrapRequestData, - StartingBlock: *variadic.FromUint32(1), + StartingBlock: *variadic.Uint32OrHashFrom(1), Direction: network.Ascending, Max: refTo(128), }, @@ -130,13 +130,13 @@ func TestFullSyncNextActions(t *testing.T) { expectedTasks: []*network.BlockRequestMessage{ { RequestedData: network.BootstrapRequestData, - StartingBlock: *variadic.FromUint32(129), + StartingBlock: *variadic.Uint32OrHashFrom(129), Direction: network.Ascending, Max: refTo(128), }, { RequestedData: network.BootstrapRequestData, - StartingBlock: *variadic.FromUint32(257), + StartingBlock: *variadic.Uint32OrHashFrom(257), Direction: network.Ascending, Max: refTo(128), }, @@ -193,7 +193,7 @@ func TestFullSyncIsFinished(t *testing.T) { // 1 -> 10 { who: peer.ID("peerA"), - request: network.NewBlockRequest(*variadic.FromUint32(1), 128, + request: network.NewBlockRequest(*variadic.Uint32OrHashFrom(1), 128, network.BootstrapRequestData, network.Ascending), completed: true, response: fstTaskBlockResponse, @@ -203,7 +203,7 @@ func TestFullSyncIsFinished(t *testing.T) { // 129 -> 256 { who: peer.ID("peerA"), - request: network.NewBlockRequest(*variadic.FromUint32(1), 128, + request: network.NewBlockRequest(*variadic.Uint32OrHashFrom(1), 128, network.BootstrapRequestData, network.Ascending), completed: true, response: sndTaskBlockResponse, @@ -253,7 +253,7 @@ func TestFullSyncIsFinished(t *testing.T) { require.Equal(t, fs.unreadyBlocks.disjointChains[0], sndTaskBlockResponse.BlockData) expectedAncestorRequest := network.NewBlockRequest( - *variadic.FromHash(sndTaskBlockResponse.BlockData[0].Header.ParentHash), + *variadic.Uint32OrHashFrom(sndTaskBlockResponse.BlockData[0].Header.ParentHash), network.MaxBlocksInResponse, network.BootstrapRequestData, network.Descending) diff --git a/lib/sync/service.go b/lib/sync/service.go index f04797cfaa..2a8dafe51c 100644 --- a/lib/sync/service.go +++ b/lib/sync/service.go @@ -18,6 +18,11 @@ import ( "github.com/libp2p/go-libp2p/core/peer" ) +const ( + waitPeersDefaultTimeout = 2 * time.Second + minPeersDefault = 3 +) + var logger = log.NewFromGlobal(log.AddContext("pkg", "new-sync")) type Network interface { @@ -92,20 +97,18 @@ type SyncService struct { stopCh chan struct{} } -func NewSyncService(network Network, - blockState BlockState, - currentStrategy, defaultStrategy Strategy) *SyncService { - return &SyncService{ - network: network, - blockState: blockState, - currentStrategy: currentStrategy, - defaultStrategy: defaultStrategy, - workerPool: newSyncWorkerPool(network), - waitPeersDuration: 2 * time.Second, - minPeers: 1, - slotDuration: 6 * time.Second, +func NewSyncService(cfgs ...ServiceConfig) *SyncService { + svc := &SyncService{ + minPeers: minPeersDefault, + waitPeersDuration: waitPeersDefaultTimeout, stopCh: make(chan struct{}), } + + for _, cfg := range cfgs { + cfg(svc) + } + + return svc } func (s *SyncService) waitWorkers() { @@ -147,7 +150,6 @@ func (s *SyncService) Start() error { } func (s *SyncService) Stop() error { - // TODO: implement stop mechanism close(s.stopCh) s.wg.Wait() return nil diff --git a/scripts/retrieve_block/retrieve_block.go b/scripts/retrieve_block/retrieve_block.go index 6f937849e8..316135235d 100644 --- a/scripts/retrieve_block/retrieve_block.go +++ b/scripts/retrieve_block/retrieve_block.go @@ -270,14 +270,14 @@ func main() { protocolID := protocol.ID(fmt.Sprintf("/%s/sync/2", chain.ProtocolID)) for _, bootnodesAddr := range bootnodes { - fmt.Println("connecting...") + log.Println("connecting...") err := p2pHost.Connect(ctx, bootnodesAddr) if err != nil { fmt.Printf("fail with: %s\n", err.Error()) continue } - fmt.Printf("requesting from peer %s\n", bootnodesAddr.String()) + log.Printf("requesting from peer %s\n", bootnodesAddr.String()) stream, err := p2pHost.NewStream(ctx, bootnodesAddr.ID, protocolID) if err != nil { fmt.Printf("WARN: failed to create stream using protocol %s: %s", protocolID, err.Error()) diff --git a/scripts/retrieve_block/retrieve_block_test.go b/scripts/retrieve_block/retrieve_block_test.go index 7f8c2c6be4..7ec4dc109a 100644 --- a/scripts/retrieve_block/retrieve_block_test.go +++ b/scripts/retrieve_block/retrieve_block_test.go @@ -20,35 +20,35 @@ func TestBuildRequestMessage(t *testing.T) { { arg: "10", expected: network.NewBlockRequest( - *variadic.MustNewUint32OrHash(uint(10)), 1, + *variadic.Uint32OrHashFrom(uint(10)), 1, network.BootstrapRequestData, network.Ascending), }, { arg: "0x9b0211aadcef4bb65e69346cfd256ddd2abcb674271326b08f0975dac7c17bc7", - expected: network.NewBlockRequest(*variadic.MustNewUint32OrHash( + expected: network.NewBlockRequest(*variadic.Uint32OrHashFrom( common.MustHexToHash("0x9b0211aadcef4bb65e69346cfd256ddd2abcb674271326b08f0975dac7c17bc7")), 1, network.BootstrapRequestData, network.Ascending), }, { arg: "0x9b0211aadcef4bb65e69346cfd256ddd2abcb674271326b08f0975dac7c17bc7,asc,20", - expected: network.NewBlockRequest(*variadic.MustNewUint32OrHash( + expected: network.NewBlockRequest(*variadic.Uint32OrHashFrom( common.MustHexToHash("0x9b0211aadcef4bb65e69346cfd256ddd2abcb674271326b08f0975dac7c17bc7")), 20, network.BootstrapRequestData, network.Ascending), }, { arg: "1,asc,20", - expected: network.NewBlockRequest(*variadic.MustNewUint32OrHash(uint(1)), + expected: network.NewBlockRequest(*variadic.Uint32OrHashFrom(uint(1)), 20, network.BootstrapRequestData, network.Ascending), }, { arg: "0x9b0211aadcef4bb65e69346cfd256ddd2abcb674271326b08f0975dac7c17bc7,desc,20", - expected: network.NewBlockRequest(*variadic.MustNewUint32OrHash( + expected: network.NewBlockRequest(*variadic.Uint32OrHashFrom( common.MustHexToHash("0x9b0211aadcef4bb65e69346cfd256ddd2abcb674271326b08f0975dac7c17bc7")), 20, network.BootstrapRequestData, network.Descending), }, { arg: "1,desc,20", - expected: network.NewBlockRequest(*variadic.MustNewUint32OrHash(uint(1)), + expected: network.NewBlockRequest(*variadic.Uint32OrHashFrom(uint(1)), 20, network.BootstrapRequestData, network.Descending), }, } From a3fc14e3786faf02dc078217e1cdf188d63bd1e0 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Mon, 26 Aug 2024 16:51:45 -0400 Subject: [PATCH 22/74] chore: resolve lll warn --- lib/sync/fullsync.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/sync/fullsync.go b/lib/sync/fullsync.go index c5ecd39684..d121117a89 100644 --- a/lib/sync/fullsync.go +++ b/lib/sync/fullsync.go @@ -280,7 +280,8 @@ func (f *FullSyncStrategy) OnBlockAnnounceHandshake(from peer.ID, msg *network.B return nil } -func (f *FullSyncStrategy) OnBlockAnnounce(from peer.ID, msg *network.BlockAnnounceMessage) (repChange *Change, err error) { +func (f *FullSyncStrategy) OnBlockAnnounce(from peer.ID, msg *network.BlockAnnounceMessage) ( + repChange *Change, err error) { if f.blockState.IsPaused() { return nil, errors.New("blockstate service is paused") } From 0ff9ec949df0b3177759dbb0e8a4e874320336ae Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Mon, 26 Aug 2024 16:55:45 -0400 Subject: [PATCH 23/74] chore: make license --- lib/sync/configuration.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/sync/configuration.go b/lib/sync/configuration.go index f2d4c2b6a1..2b0f394af9 100644 --- a/lib/sync/configuration.go +++ b/lib/sync/configuration.go @@ -1,3 +1,6 @@ +// Copyright 2024 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + package sync import "time" From f59e7f271f38e8e9c5ebb7c09775d70b5827619c Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 27 Aug 2024 08:33:10 -0400 Subject: [PATCH 24/74] chore: solve invalid block number type while encoding block request message --- dot/network/messages/block.go | 2 ++ lib/common/variadic/uint32OrHash.go | 46 +++++++++++++++++++++-------- lib/sync/fullsync.go | 4 +-- 3 files changed, 37 insertions(+), 15 deletions(-) diff --git a/dot/network/messages/block.go b/dot/network/messages/block.go index b36fe86dab..07fd5b61b8 100644 --- a/dot/network/messages/block.go +++ b/dot/network/messages/block.go @@ -146,6 +146,8 @@ func (bm *BlockRequestMessage) Encode() ([]byte, error) { MaxBlocks: max, } + bm.StartingBlock.Encode() + if bm.StartingBlock.IsHash() { hash := bm.StartingBlock.Hash() msg.FromBlock = &pb.BlockRequest_Hash{ diff --git a/lib/common/variadic/uint32OrHash.go b/lib/common/variadic/uint32OrHash.go index 922ff67996..e0cb4dfe05 100644 --- a/lib/common/variadic/uint32OrHash.go +++ b/lib/common/variadic/uint32OrHash.go @@ -4,6 +4,7 @@ package variadic import ( + "bytes" "encoding/binary" "errors" "fmt" @@ -12,6 +13,8 @@ import ( "github.com/ChainSafe/gossamer/lib/common" ) +var ErrUnsupportedType = errors.New("unsupported type") + // Uint32OrHash represents a variadic type that is either uint32 or common.Hash. type Uint32OrHash struct { value interface{} @@ -104,35 +107,52 @@ func (x *Uint32OrHash) String() string { // IsUint32 returns true if the value is a uint32 func (x *Uint32OrHash) IsUint32() bool { - if x == nil { + switch x.Value().(type) { + case int, uint, uint32: + return true + default: return false } - - _, is := x.value.(uint32) - return is } // Uint32 returns the value as a uint32. It panics if the value is not a uint32. func (x *Uint32OrHash) Uint32() uint32 { - if !x.IsUint32() { + var blockNumber uint32 + + switch c := x.Value().(type) { + case uint32: + blockNumber = c + case int: + blockNumber = uint32(c) + case uint: + blockNumber = uint32(c) + default: panic("value is not uint32") } - return x.value.(uint32) + return blockNumber } // Encode will encode a Uint32OrHash using SCALE func (x *Uint32OrHash) Encode() ([]byte, error) { - var encMsg []byte + var blockNumber uint32 + switch c := x.Value().(type) { - case uint32: - startingBlockByteArray := make([]byte, 4) - binary.LittleEndian.PutUint32(startingBlockByteArray, c) - encMsg = append(encMsg, append([]byte{1}, startingBlockByteArray...)...) case common.Hash: - encMsg = append(encMsg, append([]byte{0}, c.ToBytes()...)...) + return bytes.Join([][]byte{{0}, c.ToBytes()}, nil), nil + case uint32: + blockNumber = c + case int: + blockNumber = uint32(c) + case uint: + blockNumber = uint32(c) + default: + return nil, fmt.Errorf("%w: %T", ErrUnsupportedType, c) } - return encMsg, nil + + startingBlockByteArray := make([]byte, 4) + binary.LittleEndian.PutUint32(startingBlockByteArray, blockNumber) + return bytes.Join([][]byte{{1}, startingBlockByteArray}, nil), nil } // Decode decodes a value into a Uint32OrHash diff --git a/lib/sync/fullsync.go b/lib/sync/fullsync.go index d121117a89..1a0832e784 100644 --- a/lib/sync/fullsync.go +++ b/lib/sync/fullsync.go @@ -56,7 +56,7 @@ type FullSyncConfig struct { RequestMaker network.RequestMaker } -type importer interface { +type Importer interface { handle(*types.BlockData, BlockOrigin) (imported bool, err error) } @@ -73,7 +73,7 @@ type FullSyncStrategy struct { numOfTasks int startedAt time.Time syncedBlocks int - importer importer + importer Importer } func NewFullSyncStrategy(cfg *FullSyncConfig) *FullSyncStrategy { From 2acf7621aa68a6662690d814f58d135b5e8f7c85 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 27 Aug 2024 08:36:33 -0400 Subject: [PATCH 25/74] chore: update the ci to use `lib/sync` --- .github/workflows/integration-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index cc6e90185a..293bb6e1b4 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -32,7 +32,7 @@ jobs: github.com/ChainSafe/gossamer/dot/state, github.com/ChainSafe/gossamer/dot/digest, github.com/ChainSafe/gossamer/dot/network, - github.com/ChainSafe/gossamer/dot/sync, + github.com/ChainSafe/gossamer/lib/sync, github.com/ChainSafe/gossamer/lib/babe, github.com/ChainSafe/gossamer/lib/grandpa, ] From eb0ca275f1dd0ca02d033f2e6a038cac6620f402 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 27 Aug 2024 08:40:20 -0400 Subject: [PATCH 26/74] chore: simplify the message construction to use just uint32 --- dot/network/message_test.go | 2 +- dot/network/messages/block.go | 8 ++++---- lib/sync/fullsync.go | 3 ++- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/dot/network/message_test.go b/dot/network/message_test.go index cfcb215118..f28ac5ec44 100644 --- a/dot/network/message_test.go +++ b/dot/network/message_test.go @@ -429,7 +429,7 @@ func TestAscendingBlockRequest(t *testing.T) { three := uint32(3) maxResponseSize := uint32(messages.MaxBlocksInResponse) cases := map[string]struct { - startNumber, targetNumber uint + startNumber, targetNumber uint32 expectedBlockRequestMessage []*messages.BlockRequestMessage expectedTotalOfBlocksRequested uint32 }{ diff --git a/dot/network/messages/block.go b/dot/network/messages/block.go index 07fd5b61b8..18c09ed904 100644 --- a/dot/network/messages/block.go +++ b/dot/network/messages/block.go @@ -72,7 +72,7 @@ func NewBlockRequest(startingBlock variadic.Uint32OrHash, amount uint32, } } -func NewAscendingBlockRequests(startNumber, targetNumber uint, requestedData byte) []*BlockRequestMessage { +func NewAscendingBlockRequests(startNumber, targetNumber uint32, requestedData byte) []*BlockRequestMessage { if startNumber > targetNumber { return []*BlockRequestMessage{} } @@ -82,7 +82,7 @@ func NewAscendingBlockRequests(startNumber, targetNumber uint, requestedData byt // start and end block are the same, just request 1 block if diff == 0 { return []*BlockRequestMessage{ - NewBlockRequest(*variadic.Uint32OrHashFrom(uint32(startNumber)), 1, requestedData, Ascending), + NewBlockRequest(*variadic.Uint32OrHashFrom(startNumber), 1, requestedData, Ascending), } } @@ -99,7 +99,7 @@ func NewAscendingBlockRequests(startNumber, targetNumber uint, requestedData byt } reqs := make([]*BlockRequestMessage, numRequests) - for i := uint(0); i < numRequests; i++ { + for i := uint32(0); i < numRequests; i++ { max := uint32(MaxBlocksInResponse) lastIteration := numRequests - 1 @@ -109,7 +109,7 @@ func NewAscendingBlockRequests(startNumber, targetNumber uint, requestedData byt start := variadic.Uint32OrHashFrom(startNumber) reqs[i] = NewBlockRequest(*start, max, requestedData, Ascending) - startNumber += uint(max) + startNumber += max } return reqs diff --git a/lib/sync/fullsync.go b/lib/sync/fullsync.go index 1a0832e784..17c8a81664 100644 --- a/lib/sync/fullsync.go +++ b/lib/sync/fullsync.go @@ -130,7 +130,8 @@ func (f *FullSyncStrategy) NextActions() ([]*syncTask, error) { targetBlockNumber = uint(currentTarget) } - ascendingBlockRequests := messages.NewAscendingBlockRequests(startRequestAt, targetBlockNumber, + ascendingBlockRequests := messages.NewAscendingBlockRequests( + uint32(startRequestAt), uint32(targetBlockNumber), messages.BootstrapRequestData) return f.createTasks(ascendingBlockRequests), nil From 1d4295fa5f6b6dfcf21cc384d2ef5cfd8a58212c Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 28 Aug 2024 14:54:39 -0400 Subject: [PATCH 27/74] chore: re-add sync block response handler --- dot/node_integration_test.go | 4 +- dot/services.go | 22 +- {lib => dot}/sync/configuration.go | 0 {lib => dot}/sync/fullsync.go | 4 +- {lib => dot}/sync/fullsync_handle_block.go | 0 {lib => dot}/sync/fullsync_test.go | 74 +-- dot/sync/message.go | 428 ++++++++++++ dot/sync/message_integration_test.go | 610 ++++++++++++++++++ dot/sync/message_test.go | 389 +++++++++++ dot/sync/mock_request_maker.go | 55 ++ {lib => dot}/sync/mocks_generate_test.go | 1 + {lib => dot}/sync/mocks_test.go | 2 +- {lib => dot}/sync/peer_view.go | 0 {lib => dot}/sync/request_queue.go | 0 {lib => dot}/sync/service.go | 16 +- {lib => dot}/sync/service_test.go | 0 .../sync/testdata/westend_blocks.yaml | 0 {lib => dot}/sync/unready_blocks.go | 0 {lib => dot}/sync/worker_pool.go | 2 +- scripts/retrieve_block/retrieve_block.go | 4 +- 20 files changed, 1535 insertions(+), 76 deletions(-) rename {lib => dot}/sync/configuration.go (100%) rename {lib => dot}/sync/fullsync.go (99%) rename {lib => dot}/sync/fullsync_handle_block.go (100%) rename {lib => dot}/sync/fullsync_test.go (81%) create mode 100644 dot/sync/message.go create mode 100644 dot/sync/message_integration_test.go create mode 100644 dot/sync/message_test.go create mode 100644 dot/sync/mock_request_maker.go rename {lib => dot}/sync/mocks_generate_test.go (68%) rename {lib => dot}/sync/mocks_test.go (99%) rename {lib => dot}/sync/peer_view.go (100%) rename {lib => dot}/sync/request_queue.go (100%) rename {lib => dot}/sync/service.go (95%) rename {lib => dot}/sync/service_test.go (100%) rename {lib => dot}/sync/testdata/westend_blocks.yaml (100%) rename {lib => dot}/sync/unready_blocks.go (100%) rename {lib => dot}/sync/worker_pool.go (99%) diff --git a/dot/node_integration_test.go b/dot/node_integration_test.go index b2cf001f44..7182c153ac 100644 --- a/dot/node_integration_test.go +++ b/dot/node_integration_test.go @@ -22,6 +22,7 @@ import ( digest "github.com/ChainSafe/gossamer/dot/digest" network "github.com/ChainSafe/gossamer/dot/network" "github.com/ChainSafe/gossamer/dot/state" + "github.com/ChainSafe/gossamer/dot/sync" system "github.com/ChainSafe/gossamer/dot/system" "github.com/ChainSafe/gossamer/dot/telemetry" "github.com/ChainSafe/gossamer/dot/types" @@ -35,7 +36,6 @@ import ( "github.com/ChainSafe/gossamer/lib/keystore" "github.com/ChainSafe/gossamer/lib/runtime" wazero_runtime "github.com/ChainSafe/gossamer/lib/runtime/wazero" - libsync "github.com/ChainSafe/gossamer/lib/sync" "github.com/ChainSafe/gossamer/pkg/trie" inmemory_trie "github.com/ChainSafe/gossamer/pkg/trie/inmemory" "github.com/stretchr/testify/assert" @@ -139,7 +139,7 @@ func TestNewNode(t *testing.T) { m.EXPECT().newSyncService(initConfig, gomock.AssignableToTypeOf(&state.Service{}), &grandpa.Service{}, &babe.VerificationManager{}, &core.Service{}, gomock.AssignableToTypeOf(&network.Service{}), gomock.AssignableToTypeOf(&telemetry.Mailer{})). - Return(&libsync.SyncService{}, nil) + Return(&sync.SyncService{}, nil) m.EXPECT().createBABEService(initConfig, gomock.AssignableToTypeOf(&state.Service{}), ks.Babe, &core.Service{}, gomock.AssignableToTypeOf(&telemetry.Mailer{})). Return(&babe.Service{}, nil) diff --git a/dot/services.go b/dot/services.go index 2fa9d07a32..fc6645cecd 100644 --- a/dot/services.go +++ b/dot/services.go @@ -17,6 +17,7 @@ import ( "github.com/ChainSafe/gossamer/dot/rpc" "github.com/ChainSafe/gossamer/dot/rpc/modules" "github.com/ChainSafe/gossamer/dot/state" + "github.com/ChainSafe/gossamer/dot/sync" "github.com/ChainSafe/gossamer/dot/system" "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/internal/database" @@ -34,7 +35,6 @@ import ( "github.com/ChainSafe/gossamer/lib/runtime" rtstorage "github.com/ChainSafe/gossamer/lib/runtime/storage" wazero_runtime "github.com/ChainSafe/gossamer/lib/runtime/wazero" - libsync "github.com/ChainSafe/gossamer/lib/sync" ) // BlockProducer to produce blocks @@ -516,13 +516,7 @@ func (nodeBuilder) newSyncService(config *cfg.Config, st *state.Service, fg Bloc blockRequestTimeout, network.MaxBlockResponseSize) - genesisHeader, err := st.Block.BestBlockHeader() - if err != nil { - return nil, fmt.Errorf("cannot get genesis header: %w", err) - } - - syncCfg := &libsync.FullSyncConfig{ - StartHeader: genesisHeader, + syncCfg := &sync.FullSyncConfig{ BlockState: st.Block, StorageState: st.Storage, TransactionState: st.Transaction, @@ -533,13 +527,13 @@ func (nodeBuilder) newSyncService(config *cfg.Config, st *state.Service, fg Bloc BadBlocks: genesisData.BadBlocks, RequestMaker: requestMaker, } - fullSync := libsync.NewFullSyncStrategy(syncCfg) + fullSync := sync.NewFullSyncStrategy(syncCfg) - return libsync.NewSyncService( - libsync.WithNetwork(net), - libsync.WithBlockState(st.Block), - libsync.WithSlotDuration(slotDuration), - libsync.WithStrategies(fullSync, fullSync), + return sync.NewSyncService( + sync.WithNetwork(net), + sync.WithBlockState(st.Block), + sync.WithSlotDuration(slotDuration), + sync.WithStrategies(fullSync, nil), ), nil } diff --git a/lib/sync/configuration.go b/dot/sync/configuration.go similarity index 100% rename from lib/sync/configuration.go rename to dot/sync/configuration.go diff --git a/lib/sync/fullsync.go b/dot/sync/fullsync.go similarity index 99% rename from lib/sync/fullsync.go rename to dot/sync/fullsync.go index 17c8a81664..2e7ebda0b3 100644 --- a/lib/sync/fullsync.go +++ b/dot/sync/fullsync.go @@ -43,7 +43,6 @@ var ( // Config is the configuration for the sync Service. type FullSyncConfig struct { - StartHeader *types.Header StorageState StorageState TransactionState TransactionState BabeVerifier BabeVerifier @@ -124,7 +123,7 @@ func (f *FullSyncStrategy) NextActions() ([]*syncTask, error) { } startRequestAt := bestBlockHeader.Number + 1 - targetBlockNumber := startRequestAt + 128 + targetBlockNumber := startRequestAt + 127 if targetBlockNumber > uint(currentTarget) { targetBlockNumber = uint(currentTarget) @@ -189,6 +188,7 @@ func (f *FullSyncStrategy) IsFinished(results []*syncTaskResult) (bool, []Change nextBlocksToImport := make([]*types.BlockData, 0) disjointFragments := make([][]*types.BlockData, 0) + for _, fragment := range orderedFragments { ok, err := f.blockState.HasHeader(fragment[0].Header.ParentHash) if err != nil && !errors.Is(err, database.ErrNotFound) { diff --git a/lib/sync/fullsync_handle_block.go b/dot/sync/fullsync_handle_block.go similarity index 100% rename from lib/sync/fullsync_handle_block.go rename to dot/sync/fullsync_handle_block.go diff --git a/lib/sync/fullsync_test.go b/dot/sync/fullsync_test.go similarity index 81% rename from lib/sync/fullsync_test.go rename to dot/sync/fullsync_test.go index 29313383c5..f2c654d7a3 100644 --- a/lib/sync/fullsync_test.go +++ b/dot/sync/fullsync_test.go @@ -31,8 +31,15 @@ type WestendBlocks struct { func TestFullSyncNextActions(t *testing.T) { t.Run("best_block_greater_or_equal_current_target", func(t *testing.T) { + // current target is 0 and best block is 0, then we should + // get an empty set of tasks + + mockBlockState := NewMockBlockState(gomock.NewController(t)) + mockBlockState.EXPECT().BestBlockHeader().Return( + types.NewEmptyHeader(), nil) + cfg := &FullSyncConfig{ - StartHeader: types.NewEmptyHeader(), + BlockState: mockBlockState, } fs := NewFullSyncStrategy(cfg) @@ -42,9 +49,12 @@ func TestFullSyncNextActions(t *testing.T) { }) t.Run("target_block_greater_than_best_block", func(t *testing.T) { + mockBlockState := NewMockBlockState(gomock.NewController(t)) + mockBlockState.EXPECT().BestBlockHeader().Return( + types.NewEmptyHeader(), nil) + cfg := &FullSyncConfig{ - StartHeader: types.NewEmptyHeader(), - NumOfTasks: 2, + BlockState: mockBlockState, } fs := NewFullSyncStrategy(cfg) @@ -59,18 +69,10 @@ func TestFullSyncNextActions(t *testing.T) { task, err := fs.NextActions() require.NoError(t, err) - // the current target is block 1024 (see the OnBlockAnnounceHandshake) - // since we cap the request to the max blocks we can retrieve which is 128 - // the we should have 2 requests start from 1 and request 128 and another - // request starting from 129 and requesting 128 - require.Len(t, task, 2) + require.Len(t, task, 1) request := task[0].request.(*messages.BlockRequestMessage) require.Equal(t, uint32(1), request.StartingBlock.Uint32()) require.Equal(t, uint32(128), *request.Max) - - request = task[1].request.(*messages.BlockRequestMessage) - require.Equal(t, uint32(129), request.StartingBlock.Uint32()) - require.Equal(t, uint32(128), *request.Max) }) t.Run("having_requests_in_the_queue", func(t *testing.T) { @@ -80,13 +82,13 @@ func TestFullSyncNextActions(t *testing.T) { cases := map[string]struct { setupRequestQueue func(*testing.T) *requestsQueue[*messages.BlockRequestMessage] - expectedTasksLen int + expectedQueueLen int expectedTasks []*messages.BlockRequestMessage }{ - "should_have_one_from_request_queue_and_one_from_target_chasing": { + "should_have_one_from_request_queue": { setupRequestQueue: func(t *testing.T) *requestsQueue[*messages.BlockRequestMessage] { request := messages.NewAscendingBlockRequests( - 129, 129+128, + 129, 129+127, messages.BootstrapRequestData) require.Len(t, request, 1) @@ -96,17 +98,11 @@ func TestFullSyncNextActions(t *testing.T) { } return rq }, - expectedTasksLen: 2, + expectedQueueLen: 0, expectedTasks: []*messages.BlockRequestMessage{ { RequestedData: messages.BootstrapRequestData, - StartingBlock: *variadic.Uint32OrHashFrom(129), - Direction: messages.Ascending, - Max: refTo(128), - }, - { - RequestedData: messages.BootstrapRequestData, - StartingBlock: *variadic.Uint32OrHashFrom(1), + StartingBlock: *variadic.Uint32OrHashFrom(uint32(129)), Direction: messages.Ascending, Max: refTo(128), }, @@ -114,10 +110,10 @@ func TestFullSyncNextActions(t *testing.T) { }, // creating a amount of 4 requests, but since we have a max num of // request set to 2 (see FullSyncConfig) we should only have 2 tasks - "should_have_two_tasks": { + "four_items_on_queue_should_pop_only_one": { setupRequestQueue: func(t *testing.T) *requestsQueue[*messages.BlockRequestMessage] { request := messages.NewAscendingBlockRequests( - 129, 129+(4*128), + 129, 129+(4*127), messages.BootstrapRequestData) require.Len(t, request, 4) @@ -127,17 +123,11 @@ func TestFullSyncNextActions(t *testing.T) { } return rq }, - expectedTasksLen: 2, + expectedQueueLen: 3, expectedTasks: []*messages.BlockRequestMessage{ { RequestedData: messages.BootstrapRequestData, - StartingBlock: *variadic.Uint32OrHashFrom(129), - Direction: messages.Ascending, - Max: refTo(128), - }, - { - RequestedData: messages.BootstrapRequestData, - StartingBlock: *variadic.Uint32OrHashFrom(257), + StartingBlock: *variadic.Uint32OrHashFrom(uint32(129)), Direction: messages.Ascending, Max: refTo(128), }, @@ -148,11 +138,7 @@ func TestFullSyncNextActions(t *testing.T) { for tname, tt := range cases { tt := tt t.Run(tname, func(t *testing.T) { - cfg := &FullSyncConfig{ - StartHeader: types.NewEmptyHeader(), - NumOfTasks: 2, - } - fs := NewFullSyncStrategy(cfg) + fs := NewFullSyncStrategy(&FullSyncConfig{}) fs.requestQueue = tt.setupRequestQueue(t) // introduce a peer and a target @@ -164,12 +150,11 @@ func TestFullSyncNextActions(t *testing.T) { }) require.NoError(t, err) - tasks, err := fs.NextActions() + task, err := fs.NextActions() require.NoError(t, err) - require.Len(t, tasks, tt.expectedTasksLen) - for idx, task := range tasks { - require.Equal(t, task.request, tt.expectedTasks[idx]) - } + + require.Equal(t, task[0].request, tt.expectedTasks[0]) + require.Equal(t, fs.requestQueue.Len(), tt.expectedQueueLen) }) } }) @@ -238,8 +223,7 @@ func TestFullSyncIsFinished(t *testing.T) { Times(10 + 128 + 128) cfg := &FullSyncConfig{ - StartHeader: types.NewEmptyHeader(), - BlockState: mockBlockState, + BlockState: mockBlockState, } fs := NewFullSyncStrategy(cfg) diff --git a/dot/sync/message.go b/dot/sync/message.go new file mode 100644 index 0000000000..f310708a24 --- /dev/null +++ b/dot/sync/message.go @@ -0,0 +1,428 @@ +// Copyright 2021 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package sync + +import ( + "bytes" + "errors" + "fmt" + "slices" + + "github.com/ChainSafe/gossamer/dot/network/messages" + "github.com/ChainSafe/gossamer/dot/peerset" + "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/lib/common" + "github.com/libp2p/go-libp2p/core/peer" +) + +const maxNumberOfSameRequestPerPeer uint = 2 + +var ( + ErrInvalidBlockRequest = errors.New("invalid block request") + errMaxNumberOfSameRequest = errors.New("max number of same request reached") + errInvalidRequestDirection = errors.New("invalid request direction") + errRequestStartTooHigh = errors.New("request start number is higher than our best block") + errStartAndEndNotOnChain = errors.New("request start and end hash are not on the same chain") + errFailedToGetDescendant = errors.New("failed to find descendant block") +) + +// CreateBlockResponse creates a block response message from a block request message +func (s *SyncService) CreateBlockResponse(from peer.ID, req *messages.BlockRequestMessage) ( + *messages.BlockResponseMessage, error) { + logger.Debugf("sync request from %s: %s", from, req.String()) + + if !req.StartingBlock.IsUint32() && !req.StartingBlock.IsHash() { + return nil, ErrInvalidBlockRequest + } + + encodedRequest, err := req.Encode() + if err != nil { + return nil, fmt.Errorf("encoding request: %w", err) + } + + encodedKey := bytes.Join([][]byte{[]byte(from.String()), encodedRequest}, nil) + requestHash, err := common.Blake2bHash(encodedKey) + if err != nil { + return nil, fmt.Errorf("hashing encoded block request sync message: %w", err) + } + + numOfRequests := s.seenBlockSyncRequests.Get(requestHash) + + if numOfRequests > maxNumberOfSameRequestPerPeer { + s.network.ReportPeer(peerset.ReputationChange{ + Value: peerset.SameBlockSyncRequest, + Reason: peerset.SameBlockSyncRequestReason, + }, from) + + logger.Debugf("max number of same request reached by: %s", from.String()) + return nil, fmt.Errorf("%w: %s", errMaxNumberOfSameRequest, from.String()) + } + + s.seenBlockSyncRequests.Put(requestHash, numOfRequests+1) + + switch req.Direction { + case messages.Ascending: + return s.handleAscendingRequest(req) + case messages.Descending: + return s.handleDescendingRequest(req) + default: + return nil, fmt.Errorf("%w: %v", errInvalidRequestDirection, req.Direction) + } +} + +func (s *SyncService) handleAscendingRequest(req *messages.BlockRequestMessage) (*messages.BlockResponseMessage, error) { + var ( + max uint = messages.MaxBlocksInResponse + startHash *common.Hash + startNumber uint + ) + + // determine maximum response size + if req.Max != nil && *req.Max < messages.MaxBlocksInResponse { + max = uint(*req.Max) + } + + bestBlockNumber, err := s.blockState.BestBlockNumber() + if err != nil { + return nil, fmt.Errorf("getting best block for request: %w", err) + } + + if req.StartingBlock.IsHash() { + startingBlockHash := req.StartingBlock.Hash() + startHash = &startingBlockHash + + // make sure we actually have the starting block + header, err := s.blockState.GetHeader(startingBlockHash) + if err != nil { + return nil, fmt.Errorf("failed to get start block %s for request: %w", startHash, err) + } + + startNumber = header.Number + } else if req.StartingBlock.IsUint32() { + startBlock := req.StartingBlock.Uint32() + if startBlock == 0 { + startBlock = 1 + } + + // if request start is higher than our best block, return error + if bestBlockNumber < uint(startBlock) { + return nil, errRequestStartTooHigh + } + + startNumber = uint(startBlock) + } else { + return nil, ErrInvalidBlockRequest + } + + endNumber := startNumber + max - 1 + if endNumber > bestBlockNumber { + endNumber = bestBlockNumber + } + + var endHash *common.Hash + if startHash != nil { + eh, err := s.checkOrGetDescendantHash(*startHash, nil, endNumber) + if err != nil { + return nil, err + } + + endHash = &eh + } + + if startHash == nil { + logger.Debugf("handling block request: direction %s, "+ + "start block number: %d, "+ + "end block number: %d", + req.Direction, startNumber, endNumber) + + return s.handleAscendingByNumber(startNumber, endNumber, req.RequestedData) + } + + logger.Debugf("handling block request: direction %s, "+ + "start block hash: %s, "+ + "end block hash: %s", + req.Direction, *startHash, *endHash) + + return s.handleChainByHash(*startHash, *endHash, max, req.RequestedData, req.Direction) +} + +func (s *SyncService) handleDescendingRequest(req *messages.BlockRequestMessage) (*messages.BlockResponseMessage, error) { + var ( + startHash *common.Hash + startNumber uint + max uint = messages.MaxBlocksInResponse + ) + + // determine maximum response size + if req.Max != nil && *req.Max < messages.MaxBlocksInResponse { + max = uint(*req.Max) + } + + if req.StartingBlock.IsHash() { + startingBlockHash := req.StartingBlock.Hash() + startHash = &startingBlockHash + + // make sure we actually have the starting block + header, err := s.blockState.GetHeader(*startHash) + if err != nil { + return nil, fmt.Errorf("failed to get start block %s for request: %w", startHash, err) + } + + startNumber = header.Number + } else if req.StartingBlock.IsUint32() { + startBlock := req.StartingBlock.Uint32() + bestBlockNumber, err := s.blockState.BestBlockNumber() + if err != nil { + return nil, fmt.Errorf("failed to get best block %d for request: %w", bestBlockNumber, err) + } + + // if request start is higher than our best block, only return blocks from our best block and below + if bestBlockNumber < uint(startBlock) { + startNumber = bestBlockNumber + } else { + startNumber = uint(startBlock) + } + } else { + return nil, ErrInvalidBlockRequest + } + + endNumber := uint(1) + if startNumber > max+1 { + endNumber = startNumber - max + 1 + } + + var endHash *common.Hash + if startHash != nil { + // need to get blocks by subchain if start hash is provided, get end hash + endHeader, err := s.blockState.GetHeaderByNumber(endNumber) + if err != nil { + return nil, fmt.Errorf("getting end block %d for request: %w", endNumber, err) + } + + hash := endHeader.Hash() + endHash = &hash + } + + if startHash == nil || endHash == nil { + logger.Debugf("handling BlockRequestMessage with direction %s "+ + "from start block with number %d to end block with number %d", + req.Direction, startNumber, endNumber) + return s.handleDescendingByNumber(startNumber, endNumber, req.RequestedData) + } + + logger.Debugf("handling block request message with direction %s "+ + "from start block with hash %s to end block with hash %s", + req.Direction, *startHash, *endHash) + return s.handleChainByHash(*endHash, *startHash, max, req.RequestedData, req.Direction) +} + +// checkOrGetDescendantHash checks if the provided `descendant` is +// on the same chain as the `ancestor`, if it's provided, otherwise +// it sets `descendant` to a block with number=`descendantNumber` that is a descendant of the ancestor. +// If used with an Ascending request, ancestor is the start block and descendant is the end block +// If used with an Descending request, ancestor is the end block and descendant is the start block +func (s *SyncService) checkOrGetDescendantHash(ancestor common.Hash, + descendant *common.Hash, descendantNumber uint) (common.Hash, error) { + // if `descendant` was provided, check that it's a descendant of `ancestor` + if descendant != nil { + header, err := s.blockState.GetHeader(ancestor) + if err != nil { + return common.Hash{}, fmt.Errorf("failed to get descendant %s: %w", *descendant, err) + } + + // if descendant number is lower than ancestor number, this is an error + if header.Number > descendantNumber { + return common.Hash{}, + fmt.Errorf("invalid request, descendant number %d is lower than ancestor %d", + descendantNumber, header.Number) + } + + // check if provided start hash is descendant of provided descendant hash + is, err := s.blockState.IsDescendantOf(ancestor, *descendant) + if err != nil { + return common.Hash{}, err + } + + if !is { + return common.Hash{}, errStartAndEndNotOnChain + } + + return *descendant, nil + } + + // otherwise, get block on canonical chain by descendantNumber + hash, err := s.blockState.GetHashByNumber(descendantNumber) + if err != nil { + return common.Hash{}, err + } + + // check if it's a descendant of the provided ancestor hash + is, err := s.blockState.IsDescendantOf(ancestor, hash) + if err != nil { + return common.Hash{}, err + } + + if !is { + // if it's not a descendant, search for a block that has number=descendantNumber that is + hashes, err := s.blockState.GetAllBlocksAtNumber(descendantNumber) + if err != nil { + return common.Hash{}, fmt.Errorf("failed to get blocks at number %d: %w", descendantNumber, err) + } + + for _, hash := range hashes { + is, err := s.blockState.IsDescendantOf(ancestor, hash) + if err != nil || !is { + continue + } + + // this sets the descendant hash to whatever the first block we find with descendantNumber + // is, however there might be multiple blocks that fit this criteria + h := common.Hash{} + copy(h[:], hash[:]) + descendant = &h + break + } + + if descendant == nil { + return common.Hash{}, fmt.Errorf("%w with number %d", errFailedToGetDescendant, descendantNumber) + } + } else { + // if it is, set descendant hash to our block w/ descendantNumber + descendant = &hash + } + + logger.Tracef("determined descendant %s with number %d and ancestor %s", + *descendant, descendantNumber, ancestor) + return *descendant, nil +} + +func (s *SyncService) handleAscendingByNumber(start, end uint, + requestedData byte) (*messages.BlockResponseMessage, error) { + var err error + data := make([]*types.BlockData, (end-start)+1) + + for i := uint(0); start+i <= end; i++ { + blockNumber := start + i + data[i], err = s.getBlockDataByNumber(blockNumber, requestedData) + if err != nil { + return nil, err + } + } + + return &messages.BlockResponseMessage{ + BlockData: data, + }, nil +} + +func (s *SyncService) handleDescendingByNumber(start, end uint, + requestedData byte) (*messages.BlockResponseMessage, error) { + var err error + data := make([]*types.BlockData, (start-end)+1) + + for i := uint(0); start-i >= end; i++ { + blockNumber := start - i + data[i], err = s.getBlockDataByNumber(blockNumber, requestedData) + if err != nil { + return nil, err + } + } + + return &messages.BlockResponseMessage{ + BlockData: data, + }, nil +} + +func (s *SyncService) handleChainByHash(ancestor, descendant common.Hash, + max uint, requestedData byte, direction messages.SyncDirection) ( + *messages.BlockResponseMessage, error) { + subchain, err := s.blockState.Range(ancestor, descendant) + if err != nil { + return nil, fmt.Errorf("retrieving range: %w", err) + } + + // If the direction is descending, prune from the start. + // if the direction is ascending it should prune from the end. + if uint(len(subchain)) > max { + if direction == messages.Ascending { + subchain = subchain[:max] + } else { + subchain = subchain[uint(len(subchain))-max:] + } + } + + data := make([]*types.BlockData, len(subchain)) + + for i, hash := range subchain { + data[i], err = s.getBlockData(hash, requestedData) + if err != nil { + return nil, err + } + } + + // reverse BlockData, if descending request + if direction == messages.Descending { + slices.Reverse(data) + } + + return &messages.BlockResponseMessage{ + BlockData: data, + }, nil +} + +func (s *SyncService) getBlockDataByNumber(num uint, requestedData byte) (*types.BlockData, error) { + hash, err := s.blockState.GetHashByNumber(num) + if err != nil { + return nil, err + } + + return s.getBlockData(hash, requestedData) +} + +func (s *SyncService) getBlockData(hash common.Hash, requestedData byte) (*types.BlockData, error) { + var err error + blockData := &types.BlockData{ + Hash: hash, + } + + if requestedData == 0 { + return blockData, nil + } + + if (requestedData & messages.RequestedDataHeader) == 1 { + blockData.Header, err = s.blockState.GetHeader(hash) + if err != nil { + logger.Debugf("failed to get header for block with hash %s: %s", hash, err) + } + } + + if (requestedData&messages.RequestedDataBody)>>1 == 1 { + blockData.Body, err = s.blockState.GetBlockBody(hash) + if err != nil { + logger.Debugf("failed to get body for block with hash %s: %s", hash, err) + } + } + + if (requestedData&messages.RequestedDataReceipt)>>2 == 1 { + retData, err := s.blockState.GetReceipt(hash) + if err == nil && retData != nil { + blockData.Receipt = &retData + } + } + + if (requestedData&messages.RequestedDataMessageQueue)>>3 == 1 { + retData, err := s.blockState.GetMessageQueue(hash) + if err == nil && retData != nil { + blockData.MessageQueue = &retData + } + } + + if (requestedData&messages.RequestedDataJustification)>>4 == 1 { + retData, err := s.blockState.GetJustification(hash) + if err == nil && retData != nil { + blockData.Justification = &retData + } + } + + return blockData, nil +} diff --git a/dot/sync/message_integration_test.go b/dot/sync/message_integration_test.go new file mode 100644 index 0000000000..87d46c7d87 --- /dev/null +++ b/dot/sync/message_integration_test.go @@ -0,0 +1,610 @@ +//go:build integration + +// Copyright 2021 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package sync + +import ( + "path/filepath" + "testing" + "time" + + "github.com/ChainSafe/gossamer/dot/network/messages" + "github.com/ChainSafe/gossamer/dot/state" + "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/internal/database" + "github.com/ChainSafe/gossamer/internal/log" + "github.com/ChainSafe/gossamer/lib/common" + "github.com/ChainSafe/gossamer/lib/common/variadic" + "github.com/ChainSafe/gossamer/lib/genesis" + runtime "github.com/ChainSafe/gossamer/lib/runtime" + "github.com/ChainSafe/gossamer/lib/utils" + "github.com/ChainSafe/gossamer/pkg/trie" + "github.com/ChainSafe/gossamer/tests/utils/config" + "github.com/libp2p/go-libp2p/core/peer" + "go.uber.org/mock/gomock" + + rtstorage "github.com/ChainSafe/gossamer/lib/runtime/storage" + wazero_runtime "github.com/ChainSafe/gossamer/lib/runtime/wazero" + + "github.com/stretchr/testify/require" +) + +func newWestendDevGenesisWithTrieAndHeader(t *testing.T) ( + gen genesis.Genesis, genesisTrie trie.Trie, genesisHeader types.Header) { + t.Helper() + + genesisPath := utils.GetWestendDevRawGenesisPath(t) + genesisPtr, err := genesis.NewGenesisFromJSONRaw(genesisPath) + require.NoError(t, err) + gen = *genesisPtr + + genesisTrie, err = runtime.NewTrieFromGenesis(gen) + require.NoError(t, err) + + parentHash := common.NewHash([]byte{0}) + stateRoot := genesisTrie.MustHash() + extrinsicRoot := trie.EmptyHash + const number = 0 + digest := types.NewDigest() + genesisHeaderPtr := types.NewHeader(parentHash, + stateRoot, extrinsicRoot, number, digest) + genesisHeader = *genesisHeaderPtr + + return gen, genesisTrie, genesisHeader +} + +func newFullSyncService(t *testing.T) *SyncService { + ctrl := gomock.NewController(t) + + mockTelemetryClient := NewMockTelemetry(ctrl) + mockTelemetryClient.EXPECT().SendMessage(gomock.Any()).AnyTimes() + + wazero_runtime.DefaultTestLogLvl = log.Warn + + testDatadirPath := t.TempDir() + + scfg := state.Config{ + Path: testDatadirPath, + LogLevel: log.Info, + Telemetry: mockTelemetryClient, + GenesisBABEConfig: config.BABEConfigurationTestDefault, + } + stateSrvc := state.NewService(scfg) + stateSrvc.UseMemDB() + + gen, genTrie, genHeader := newWestendDevGenesisWithTrieAndHeader(t) + err := stateSrvc.Initialise(&gen, &genHeader, genTrie) + require.NoError(t, err) + + err = stateSrvc.Start() + require.NoError(t, err) + + // initialise runtime + genState := rtstorage.NewTrieState(genTrie) + + rtCfg := wazero_runtime.Config{ + Storage: genState, + LogLvl: log.Critical, + } + + if stateSrvc != nil { + rtCfg.NodeStorage.BaseDB = stateSrvc.Base + } else { + rtCfg.NodeStorage.BaseDB, err = database.LoadDatabase(filepath.Join(testDatadirPath, "offline_storage"), false) + require.NoError(t, err) + } + + rtCfg.CodeHash, err = stateSrvc.Storage.LoadCodeHash(nil) + require.NoError(t, err) + + instance, err := wazero_runtime.NewRuntimeFromGenesis(rtCfg) + require.NoError(t, err) + + bestBlockHash := stateSrvc.Block.BestBlockHash() + stateSrvc.Block.StoreRuntime(bestBlockHash, instance) + + blockImportHandler := NewMockBlockImportHandler(ctrl) + blockImportHandler.EXPECT().HandleBlockImport(gomock.AssignableToTypeOf(&types.Block{}), + gomock.AssignableToTypeOf(&rtstorage.TrieState{}), false).DoAndReturn( + func(block *types.Block, ts *rtstorage.TrieState, _ bool) error { + // store updates state trie nodes in database + if err = stateSrvc.Storage.StoreTrie(ts, &block.Header); err != nil { + logger.Warnf("failed to store state trie for imported block %s: %s", block.Header.Hash(), err) + return err + } + + // store block in database + err = stateSrvc.Block.AddBlock(block) + require.NoError(t, err) + + stateSrvc.Block.StoreRuntime(block.Header.Hash(), instance) + logger.Debugf("imported block %s and stored state trie with root %s", + block.Header.Hash(), ts.Trie().MustHash()) + return nil + }).AnyTimes() + + mockBabeVerifier := NewMockBabeVerifier(ctrl) + mockBabeVerifier.EXPECT().VerifyBlock(gomock.AssignableToTypeOf(&types.Header{})).AnyTimes() + + mockFinalityGadget := NewMockFinalityGadget(ctrl) + mockFinalityGadget.EXPECT().VerifyBlockJustification(gomock.AssignableToTypeOf(common.Hash{}), + gomock.AssignableToTypeOf([]byte{})).DoAndReturn(func(hash common.Hash, justification []byte) error { + return nil + }).AnyTimes() + + mockNetwork := NewMockNetwork(ctrl) + + fullSyncCfg := &FullSyncConfig{ + BlockState: stateSrvc.Block, + StorageState: stateSrvc.Storage, + BlockImportHandler: blockImportHandler, + TransactionState: stateSrvc.Transaction, + BabeVerifier: mockBabeVerifier, + FinalityGadget: mockFinalityGadget, + Telemetry: mockTelemetryClient, + RequestMaker: NewMockRequestMaker(ctrl), + } + + fullSync := NewFullSyncStrategy(fullSyncCfg) + + serviceCfg := []ServiceConfig{ + WithBlockState(stateSrvc.Block), + WithNetwork(mockNetwork), + WithSlotDuration(6 * time.Second), + WithStrategies(fullSync, nil), + } + + syncer := NewSyncService(serviceCfg...) + return syncer +} + +func addTestBlocksToState(t *testing.T, depth uint, blockState BlockState) { + previousHash := blockState.(*state.BlockState).BestBlockHash() + previousNum, err := blockState.BestBlockNumber() + require.NoError(t, err) + + digest := types.NewDigest() + prd, err := types.NewBabeSecondaryPlainPreDigest(0, 1).ToPreRuntimeDigest() + require.NoError(t, err) + err = digest.Add(*prd) + require.NoError(t, err) + + for i := uint(1); i <= depth; i++ { + block := &types.Block{ + Header: types.Header{ + ParentHash: previousHash, + Number: previousNum + i, + StateRoot: trie.EmptyHash, + Digest: digest, + }, + Body: types.Body{}, + } + + previousHash = block.Header.Hash() + + err := blockState.(*state.BlockState).AddBlock(block) + require.NoError(t, err) + } +} + +func TestService_CreateBlockResponse_MaxSize(t *testing.T) { + s := newFullSyncService(t) + addTestBlocksToState(t, messages.MaxBlocksInResponse*2, s.blockState) + + // test ascending + start, err := variadic.NewUint32OrHash(1) + require.NoError(t, err) + + req := &messages.BlockRequestMessage{ + RequestedData: 3, + StartingBlock: *start, + Direction: messages.Ascending, + Max: nil, + } + + resp, err := s.CreateBlockResponse(peer.ID("alice"), req) + require.NoError(t, err) + require.Equal(t, int(messages.MaxBlocksInResponse), len(resp.BlockData)) + require.Equal(t, uint(1), resp.BlockData[0].Number()) + require.Equal(t, uint(128), resp.BlockData[127].Number()) + + max := uint32(messages.MaxBlocksInResponse + 100) + req = &messages.BlockRequestMessage{ + RequestedData: 3, + StartingBlock: *start, + Direction: messages.Ascending, + Max: &max, + } + + resp, err = s.CreateBlockResponse(peer.ID("alice"), req) + require.NoError(t, err) + require.Equal(t, int(messages.MaxBlocksInResponse), len(resp.BlockData)) + require.Equal(t, uint(1), resp.BlockData[0].Number()) + require.Equal(t, uint(128), resp.BlockData[127].Number()) + + max = uint32(16) + req = &messages.BlockRequestMessage{ + RequestedData: 3, + StartingBlock: *start, + Direction: messages.Ascending, + Max: &max, + } + + resp, err = s.CreateBlockResponse(peer.ID("alice"), req) + require.NoError(t, err) + require.Equal(t, int(max), len(resp.BlockData)) + require.Equal(t, uint(1), resp.BlockData[0].Number()) + require.Equal(t, uint(16), resp.BlockData[15].Number()) + + // test descending + start, err = variadic.NewUint32OrHash(uint32(128)) + require.NoError(t, err) + + req = &messages.BlockRequestMessage{ + RequestedData: 3, + StartingBlock: *start, + Direction: messages.Descending, + Max: nil, + } + + resp, err = s.CreateBlockResponse(peer.ID("alice"), req) + require.NoError(t, err) + require.Equal(t, int(messages.MaxBlocksInResponse), len(resp.BlockData)) + require.Equal(t, uint(128), resp.BlockData[0].Number()) + require.Equal(t, uint(1), resp.BlockData[127].Number()) + + max = uint32(messages.MaxBlocksInResponse + 100) + start, err = variadic.NewUint32OrHash(uint32(256)) + require.NoError(t, err) + + req = &messages.BlockRequestMessage{ + RequestedData: 3, + StartingBlock: *start, + Direction: messages.Descending, + Max: &max, + } + + resp, err = s.CreateBlockResponse(peer.ID("alice"), req) + require.NoError(t, err) + require.Equal(t, int(messages.MaxBlocksInResponse), len(resp.BlockData)) + require.Equal(t, uint(256), resp.BlockData[0].Number()) + require.Equal(t, uint(129), resp.BlockData[127].Number()) + + max = uint32(16) + req = &messages.BlockRequestMessage{ + RequestedData: 3, + StartingBlock: *start, + Direction: messages.Descending, + Max: &max, + } + + resp, err = s.CreateBlockResponse(peer.ID("alice"), req) + require.NoError(t, err) + require.Equal(t, int(max), len(resp.BlockData)) + require.Equal(t, uint(256), resp.BlockData[0].Number()) + require.Equal(t, uint(241), resp.BlockData[15].Number()) +} + +func TestService_CreateBlockResponse_StartHash(t *testing.T) { + s := newFullSyncService(t) + addTestBlocksToState(t, uint(messages.MaxBlocksInResponse*2), s.blockState) + + // test ascending with nil endBlockHash + startHash, err := s.blockState.GetHashByNumber(1) + require.NoError(t, err) + + start, err := variadic.NewUint32OrHash(startHash) + require.NoError(t, err) + + req := &messages.BlockRequestMessage{ + RequestedData: 3, + StartingBlock: *start, + Direction: messages.Ascending, + Max: nil, + } + + resp, err := s.CreateBlockResponse(peer.ID("alice"), req) + require.NoError(t, err) + require.Equal(t, int(messages.MaxBlocksInResponse), len(resp.BlockData)) + require.Equal(t, uint(1), resp.BlockData[0].Number()) + require.Equal(t, uint(128), resp.BlockData[127].Number()) + + // test descending with nil endBlockHash + startHash, err = s.blockState.GetHashByNumber(16) + require.NoError(t, err) + + start, err = variadic.NewUint32OrHash(startHash) + require.NoError(t, err) + + req = &messages.BlockRequestMessage{ + RequestedData: 3, + StartingBlock: *start, + Direction: messages.Descending, + Max: nil, + } + + resp, err = s.CreateBlockResponse(peer.ID("alice"), req) + require.NoError(t, err) + require.Equal(t, int(16), len(resp.BlockData)) + require.Equal(t, uint(16), resp.BlockData[0].Number()) + require.Equal(t, uint(1), resp.BlockData[15].Number()) + + req = &messages.BlockRequestMessage{ + RequestedData: 3, + StartingBlock: *start, + Direction: messages.Descending, + Max: nil, + } + + resp, err = s.CreateBlockResponse(peer.ID("alice"), req) + require.NoError(t, err) + require.Equal(t, int(16), len(resp.BlockData)) + require.Equal(t, uint(16), resp.BlockData[0].Number()) + require.Equal(t, uint(1), resp.BlockData[15].Number()) + + // test descending with nil endBlockHash and start > messages.MaxBlocksInResponse + startHash, err = s.blockState.GetHashByNumber(256) + require.NoError(t, err) + + start, err = variadic.NewUint32OrHash(startHash) + require.NoError(t, err) + + req = &messages.BlockRequestMessage{ + RequestedData: 3, + StartingBlock: *start, + Direction: messages.Descending, + Max: nil, + } + + resp, err = s.CreateBlockResponse(peer.ID("alice"), req) + require.NoError(t, err) + require.Equal(t, int(messages.MaxBlocksInResponse), len(resp.BlockData)) + require.Equal(t, uint(256), resp.BlockData[0].Number()) + require.Equal(t, uint(129), resp.BlockData[127].Number()) + + startHash, err = s.blockState.GetHashByNumber(128) + require.NoError(t, err) + + start, err = variadic.NewUint32OrHash(startHash) + require.NoError(t, err) + + req = &messages.BlockRequestMessage{ + RequestedData: 3, + StartingBlock: *start, + Direction: messages.Descending, + Max: nil, + } + + resp, err = s.CreateBlockResponse(peer.ID("alice"), req) + require.NoError(t, err) + require.Equal(t, messages.MaxBlocksInResponse, len(resp.BlockData)) + require.Equal(t, uint(128), resp.BlockData[0].Number()) + require.Equal(t, uint(1), resp.BlockData[127].Number()) +} + +func TestService_checkOrGetDescendantHash_integration(t *testing.T) { + t.Parallel() + s := newFullSyncService(t) + branches := map[uint]int{ + 8: 1, + } + state.AddBlocksToStateWithFixedBranches(t, s.blockState.(*state.BlockState), 16, branches) + + // base case + ancestor, err := s.blockState.GetHashByNumber(1) + require.NoError(t, err) + descendant, err := s.blockState.GetHashByNumber(16) + require.NoError(t, err) + const descendantNumber uint = 16 + + res, err := s.checkOrGetDescendantHash(ancestor, &descendant, descendantNumber) + require.NoError(t, err) + require.Equal(t, descendant, res) + + // supply descendant that's not on canonical chain + leaves := s.blockState.(*state.BlockState).Leaves() + require.Equal(t, 2, len(leaves)) + + ancestor, err = s.blockState.GetHashByNumber(1) + require.NoError(t, err) + descendant, err = s.blockState.GetHashByNumber(descendantNumber) + require.NoError(t, err) + + for _, leaf := range leaves { + if leaf != descendant { + descendant = leaf + break + } + } + + res, err = s.checkOrGetDescendantHash(ancestor, &descendant, descendantNumber) + require.NoError(t, err) + require.Equal(t, descendant, res) + + // supply descedant that's not on same chain as ancestor + ancestor, err = s.blockState.GetHashByNumber(9) + require.NoError(t, err) + _, err = s.checkOrGetDescendantHash(ancestor, &descendant, descendantNumber) + require.Error(t, err) + + // don't supply descendant, should return block on canonical chain + // as ancestor is on canonical chain + expected, err := s.blockState.GetHashByNumber(descendantNumber) + require.NoError(t, err) + + res, err = s.checkOrGetDescendantHash(ancestor, nil, descendantNumber) + require.NoError(t, err) + require.Equal(t, expected, res) + + // don't supply descendant and provide ancestor not on canonical chain + // should return descendant block also not on canonical chain + block9s, err := s.blockState.GetAllBlocksAtNumber(9) + require.NoError(t, err) + canonical, err := s.blockState.GetHashByNumber(9) + require.NoError(t, err) + + // set ancestor to non-canonical block 9 + for _, block := range block9s { + if canonical != block { + ancestor = block + break + } + } + + // expected is non-canonical block 16 + for _, leaf := range leaves { + is, err := s.blockState.IsDescendantOf(ancestor, leaf) + require.NoError(t, err) + if is { + expected = leaf + break + } + } + + res, err = s.checkOrGetDescendantHash(ancestor, nil, descendantNumber) + require.NoError(t, err) + require.Equal(t, expected, res) +} + +func TestService_CreateBlockResponse_Fields(t *testing.T) { + s := newFullSyncService(t) + addTestBlocksToState(t, 2, s.blockState) + + bestHash := s.blockState.(*state.BlockState).BestBlockHash() + bestBlock, err := s.blockState.(*state.BlockState).GetBlockByNumber(1) + require.NoError(t, err) + + // set some nils and check no error is thrown + bds := &types.BlockData{ + Hash: bestHash, + Header: nil, + Receipt: nil, + MessageQueue: nil, + Justification: nil, + } + err = s.blockState.CompareAndSetBlockData(bds) + require.NoError(t, err) + + // set receipt message and justification + a := []byte("asdf") + b := []byte("ghjkl") + c := []byte("qwerty") + bds = &types.BlockData{ + Hash: bestHash, + Receipt: &a, + MessageQueue: &b, + Justification: &c, + } + + start, err := variadic.NewUint32OrHash(uint32(1)) + require.NoError(t, err) + + err = s.blockState.CompareAndSetBlockData(bds) + require.NoError(t, err) + + testCases := []struct { + description string + value *messages.BlockRequestMessage + expectedMsgValue *messages.BlockResponseMessage + }{ + { + description: "test get Header and Body", + value: &messages.BlockRequestMessage{ + RequestedData: 3, + StartingBlock: *start, + Direction: messages.Ascending, + Max: nil, + }, + expectedMsgValue: &messages.BlockResponseMessage{ + BlockData: []*types.BlockData{ + { + Hash: bestHash, + Header: &bestBlock.Header, + Body: &bestBlock.Body, + }, + }, + }, + }, + { + description: "test get Header", + value: &messages.BlockRequestMessage{ + RequestedData: 1, + StartingBlock: *start, + Direction: messages.Ascending, + Max: nil, + }, + expectedMsgValue: &messages.BlockResponseMessage{ + BlockData: []*types.BlockData{ + { + Hash: bestHash, + Header: &bestBlock.Header, + Body: nil, + }, + }, + }, + }, + { + description: "test get Receipt", + value: &messages.BlockRequestMessage{ + RequestedData: 4, + StartingBlock: *start, + Direction: messages.Ascending, + Max: nil, + }, + expectedMsgValue: &messages.BlockResponseMessage{ + BlockData: []*types.BlockData{ + { + Hash: bestHash, + Header: nil, + Body: nil, + Receipt: bds.Receipt, + }, + }, + }, + }, + { + description: "test get MessageQueue", + value: &messages.BlockRequestMessage{ + RequestedData: 8, + StartingBlock: *start, + Direction: messages.Ascending, + Max: nil, + }, + expectedMsgValue: &messages.BlockResponseMessage{ + BlockData: []*types.BlockData{ + { + Hash: bestHash, + Header: nil, + Body: nil, + MessageQueue: bds.MessageQueue, + }, + }, + }, + }, + } + + for _, test := range testCases { + t.Run(test.description, func(t *testing.T) { + resp, err := s.CreateBlockResponse(peer.ID("alice"), test.value) + require.NoError(t, err) + require.Len(t, resp.BlockData, 2) + require.Equal(t, test.expectedMsgValue.BlockData[0].Hash, bestHash) + require.Equal(t, test.expectedMsgValue.BlockData[0].Header, resp.BlockData[0].Header) + require.Equal(t, test.expectedMsgValue.BlockData[0].Body, resp.BlockData[0].Body) + + if test.expectedMsgValue.BlockData[0].Receipt != nil { + require.Equal(t, test.expectedMsgValue.BlockData[0].Receipt, resp.BlockData[1].Receipt) + } + + if test.expectedMsgValue.BlockData[0].MessageQueue != nil { + require.Equal(t, test.expectedMsgValue.BlockData[0].MessageQueue, resp.BlockData[1].MessageQueue) + } + + if test.expectedMsgValue.BlockData[0].Justification != nil { + require.Equal(t, test.expectedMsgValue.BlockData[0].Justification, resp.BlockData[1].Justification) + } + }) + } +} diff --git a/dot/sync/message_test.go b/dot/sync/message_test.go new file mode 100644 index 0000000000..26ff5f986f --- /dev/null +++ b/dot/sync/message_test.go @@ -0,0 +1,389 @@ +// Copyright 2021 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package sync + +import ( + "errors" + "fmt" + "testing" + + "github.com/ChainSafe/gossamer/dot/network/messages" + "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/lib/common" + "github.com/ChainSafe/gossamer/lib/common/variadic" + lrucache "github.com/ChainSafe/gossamer/lib/utils/lru-cache" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/assert" + "go.uber.org/mock/gomock" +) + +func TestService_CreateBlockResponse(t *testing.T) { + t.Parallel() + + type args struct { + req *messages.BlockRequestMessage + } + tests := map[string]struct { + blockStateBuilder func(ctrl *gomock.Controller) BlockState + args args + want *messages.BlockResponseMessage + err error + }{ + "invalid_block_request": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + return mockBlockState + }, + args: args{req: &messages.BlockRequestMessage{}}, + err: ErrInvalidBlockRequest, + }, + "ascending_request_nil_startHash": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().BestBlockNumber().Return(uint(1), nil) + mockBlockState.EXPECT().GetHashByNumber(uint(1)).Return(common.Hash{1, 2}, nil) + return mockBlockState + }, + args: args{req: &messages.BlockRequestMessage{ + StartingBlock: *variadic.Uint32OrHashFrom(uint32(0)), + Direction: messages.Ascending, + }}, + want: &messages.BlockResponseMessage{BlockData: []*types.BlockData{{ + Hash: common.Hash{1, 2}, + }}}, + }, + "ascending_request_start_number_higher": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().BestBlockNumber().Return(uint(1), nil) + return mockBlockState + }, + args: args{req: &messages.BlockRequestMessage{ + StartingBlock: *variadic.Uint32OrHashFrom(2), + Direction: messages.Ascending, + }}, + err: errRequestStartTooHigh, + want: nil, + }, + "descending_request_nil_startHash": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().BestBlockNumber().Return(uint(1), nil) + return mockBlockState + }, + args: args{req: &messages.BlockRequestMessage{ + StartingBlock: *variadic.Uint32OrHashFrom(0), + Direction: messages.Descending, + }}, + want: &messages.BlockResponseMessage{BlockData: []*types.BlockData{}}, + }, + "descending_request_start_number_higher": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().BestBlockNumber().Return(uint(1), nil) + mockBlockState.EXPECT().GetHashByNumber(uint(1)).Return(common.Hash{1, 2}, nil) + return mockBlockState + }, + args: args{req: &messages.BlockRequestMessage{ + StartingBlock: *variadic.Uint32OrHashFrom(2), + Direction: messages.Descending, + }}, + err: nil, + want: &messages.BlockResponseMessage{BlockData: []*types.BlockData{{ + Hash: common.Hash{1, 2}, + }}}, + }, + "ascending_request_startHash": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(&types.Header{ + Number: 1, + }, nil) + mockBlockState.EXPECT().BestBlockNumber().Return(uint(2), nil) + mockBlockState.EXPECT().GetHashByNumber(uint(2)).Return(common.Hash{1, 2, 3}, nil) + mockBlockState.EXPECT().IsDescendantOf(common.Hash{}, common.Hash{1, 2, 3}).Return(true, + nil) + mockBlockState.EXPECT().Range(common.Hash{}, common.Hash{1, 2, 3}).Return([]common.Hash{{1, + 2}}, + nil) + return mockBlockState + }, + args: args{req: &messages.BlockRequestMessage{ + StartingBlock: *variadic.Uint32OrHashFrom(common.Hash{}), + Direction: messages.Ascending, + }}, + want: &messages.BlockResponseMessage{BlockData: []*types.BlockData{{ + Hash: common.Hash{1, 2}, + }}}, + }, + "descending_request_startHash": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(&types.Header{ + Number: 1, + }, nil) + mockBlockState.EXPECT().GetHeaderByNumber(uint(1)).Return(&types.Header{ + Number: 1, + }, nil) + mockBlockState.EXPECT().Range(common.MustHexToHash( + "0x6443a0b46e0412e626363028115a9f2cf963eeed526b8b33e5316f08b50d0dc3"), + common.Hash{}).Return([]common.Hash{{1, 2}}, nil) + return mockBlockState + }, + args: args{req: &messages.BlockRequestMessage{ + StartingBlock: *variadic.Uint32OrHashFrom(common.Hash{}), + Direction: messages.Descending, + }}, + want: &messages.BlockResponseMessage{BlockData: []*types.BlockData{{ + Hash: common.Hash{1, 2}, + }}}, + }, + "invalid_direction": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + return nil + }, + args: args{ + req: &messages.BlockRequestMessage{ + StartingBlock: *variadic.Uint32OrHashFrom(common.Hash{}), + Direction: messages.SyncDirection(3), + }}, + err: fmt.Errorf("%w: 3", errInvalidRequestDirection), + }, + } + for name, tt := range tests { + tt := tt + t.Run(name, func(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + s := &SyncService{ + blockState: tt.blockStateBuilder(ctrl), + seenBlockSyncRequests: lrucache.NewLRUCache[common.Hash, uint](100), + } + got, err := s.CreateBlockResponse(peer.ID("alice"), tt.args.req) + if tt.err != nil { + assert.EqualError(t, err, tt.err.Error()) + } else { + assert.NoError(t, err) + } + assert.Equal(t, tt.want, got) + }) + } +} + +func TestService_checkOrGetDescendantHash(t *testing.T) { + t.Parallel() + + type args struct { + ancestor common.Hash + descendant *common.Hash + descendantNumber uint + } + tests := map[string]struct { + name string + blockStateBuilder func(ctrl *gomock.Controller) BlockState + args args + want common.Hash + expectedError error + }{ + "nil_descendant": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockStateBuilder := NewMockBlockState(ctrl) + mockStateBuilder.EXPECT().GetHashByNumber(uint(1)).Return(common.Hash{}, nil) + mockStateBuilder.EXPECT().IsDescendantOf(common.Hash{}, common.Hash{}).Return(true, nil) + return mockStateBuilder + }, + args: args{ancestor: common.Hash{}, descendant: nil, descendantNumber: 1}, + }, + "not_nil_descendant": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(&types.Header{}, nil) + mockBlockState.EXPECT().IsDescendantOf(common.Hash{}, common.Hash{1, 2}).Return(true, nil) + return mockBlockState + }, + args: args{ancestor: common.Hash{0}, descendant: &common.Hash{1, 2}, descendantNumber: 1}, + want: common.Hash{1, 2}, + }, + "descendant_greater_than_header": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetHeader(common.Hash{2}).Return(&types.Header{ + Number: 2, + }, nil) + return mockBlockState + }, + args: args{ancestor: common.Hash{2}, descendant: &common.Hash{1, 2}, descendantNumber: 1}, + want: common.Hash{}, + expectedError: errors.New("invalid request, descendant number 1 is lower than ancestor 2"), + }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + s := &SyncService{ + blockState: tt.blockStateBuilder(ctrl), + } + got, err := s.checkOrGetDescendantHash(tt.args.ancestor, tt.args.descendant, tt.args.descendantNumber) + if tt.expectedError != nil { + assert.EqualError(t, err, tt.expectedError.Error()) + } else { + assert.NoError(t, err) + } + assert.Equal(t, tt.want, got) + }) + } +} + +func TestService_getBlockData(t *testing.T) { + t.Parallel() + + type args struct { + hash common.Hash + requestedData byte + } + tests := map[string]struct { + blockStateBuilder func(ctrl *gomock.Controller) BlockState + args args + want *types.BlockData + err error + }{ + "requestedData_0": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + return nil + }, + args: args{ + hash: common.Hash{}, + requestedData: 0, + }, + want: &types.BlockData{}, + }, + "requestedData_RequestedDataHeader_error": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(nil, errors.New("empty hash")) + return mockBlockState + }, + args: args{ + hash: common.Hash{0}, + requestedData: messages.RequestedDataHeader, + }, + want: &types.BlockData{ + Hash: common.Hash{}, + }, + }, + "requestedData_RequestedDataHeader": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetHeader(common.Hash{1}).Return(&types.Header{ + Number: 2, + }, nil) + return mockBlockState + }, + args: args{ + hash: common.Hash{1}, + requestedData: messages.RequestedDataHeader, + }, + want: &types.BlockData{ + Hash: common.Hash{1}, + Header: &types.Header{ + Number: 2, + }, + }, + }, + "requestedData_RequestedDataBody_error": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetBlockBody(common.Hash{}).Return(nil, errors.New("empty hash")) + return mockBlockState + }, + + args: args{ + hash: common.Hash{}, + requestedData: messages.RequestedDataBody, + }, + want: &types.BlockData{ + Hash: common.Hash{}, + }, + }, + "requestedData_RequestedDataBody": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetBlockBody(common.Hash{1}).Return(&types.Body{[]byte{1}}, nil) + return mockBlockState + }, + args: args{ + hash: common.Hash{1}, + requestedData: messages.RequestedDataBody, + }, + want: &types.BlockData{ + Hash: common.Hash{1}, + Body: &types.Body{[]byte{1}}, + }, + }, + "requestedData_RequestedDataReceipt": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetReceipt(common.Hash{1}).Return([]byte{1}, nil) + return mockBlockState + }, + args: args{ + hash: common.Hash{1}, + requestedData: messages.RequestedDataReceipt, + }, + want: &types.BlockData{ + Hash: common.Hash{1}, + Receipt: &[]byte{1}, + }, + }, + "requestedData_RequestedDataMessageQueue": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetMessageQueue(common.Hash{2}).Return([]byte{2}, nil) + return mockBlockState + }, + args: args{ + hash: common.Hash{2}, + requestedData: messages.RequestedDataMessageQueue, + }, + want: &types.BlockData{ + Hash: common.Hash{2}, + MessageQueue: &[]byte{2}, + }, + }, + "requestedData_RequestedDataJustification": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetJustification(common.Hash{3}).Return([]byte{3}, nil) + return mockBlockState + }, + args: args{ + hash: common.Hash{3}, + requestedData: messages.RequestedDataJustification, + }, + want: &types.BlockData{ + Hash: common.Hash{3}, + Justification: &[]byte{3}, + }, + }, + } + for name, tt := range tests { + tt := tt + t.Run(name, func(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + s := &SyncService{ + blockState: tt.blockStateBuilder(ctrl), + } + got, err := s.getBlockData(tt.args.hash, tt.args.requestedData) + if tt.err != nil { + assert.EqualError(t, err, tt.err.Error()) + } else { + assert.NoError(t, err) + } + assert.Equal(t, tt.want, got) + }) + } +} diff --git a/dot/sync/mock_request_maker.go b/dot/sync/mock_request_maker.go new file mode 100644 index 0000000000..4c78528d39 --- /dev/null +++ b/dot/sync/mock_request_maker.go @@ -0,0 +1,55 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ChainSafe/gossamer/dot/network (interfaces: RequestMaker) +// +// Generated by this command: +// +// mockgen -destination=mock_request_maker.go -package sync github.com/ChainSafe/gossamer/dot/network RequestMaker +// + +// Package sync is a generated GoMock package. +package sync + +import ( + reflect "reflect" + + messages "github.com/ChainSafe/gossamer/dot/network/messages" + peer "github.com/libp2p/go-libp2p/core/peer" + gomock "go.uber.org/mock/gomock" +) + +// MockRequestMaker is a mock of RequestMaker interface. +type MockRequestMaker struct { + ctrl *gomock.Controller + recorder *MockRequestMakerMockRecorder +} + +// MockRequestMakerMockRecorder is the mock recorder for MockRequestMaker. +type MockRequestMakerMockRecorder struct { + mock *MockRequestMaker +} + +// NewMockRequestMaker creates a new mock instance. +func NewMockRequestMaker(ctrl *gomock.Controller) *MockRequestMaker { + mock := &MockRequestMaker{ctrl: ctrl} + mock.recorder = &MockRequestMakerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockRequestMaker) EXPECT() *MockRequestMakerMockRecorder { + return m.recorder +} + +// Do mocks base method. +func (m *MockRequestMaker) Do(arg0 peer.ID, arg1, arg2 messages.P2PMessage) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Do", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// Do indicates an expected call of Do. +func (mr *MockRequestMakerMockRecorder) Do(arg0, arg1, arg2 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Do", reflect.TypeOf((*MockRequestMaker)(nil).Do), arg0, arg1, arg2) +} diff --git a/lib/sync/mocks_generate_test.go b/dot/sync/mocks_generate_test.go similarity index 68% rename from lib/sync/mocks_generate_test.go rename to dot/sync/mocks_generate_test.go index c7c8d816de..894b5747f6 100644 --- a/lib/sync/mocks_generate_test.go +++ b/dot/sync/mocks_generate_test.go @@ -4,3 +4,4 @@ package sync //go:generate mockgen -destination=mocks_test.go -package=$GOPACKAGE . Telemetry,BlockState,StorageState,TransactionState,BabeVerifier,FinalityGadget,BlockImportHandler,Network,Importer +//go:generate mockgen -destination=mock_request_maker.go -package $GOPACKAGE github.com/ChainSafe/gossamer/dot/network RequestMaker diff --git a/lib/sync/mocks_test.go b/dot/sync/mocks_test.go similarity index 99% rename from lib/sync/mocks_test.go rename to dot/sync/mocks_test.go index f75c98918a..dd8659dc8c 100644 --- a/lib/sync/mocks_test.go +++ b/dot/sync/mocks_test.go @@ -1,5 +1,5 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ChainSafe/gossamer/lib/sync (interfaces: Telemetry,BlockState,StorageState,TransactionState,BabeVerifier,FinalityGadget,BlockImportHandler,Network,Importer) +// Source: github.com/ChainSafe/gossamer/dot/sync (interfaces: Telemetry,BlockState,StorageState,TransactionState,BabeVerifier,FinalityGadget,BlockImportHandler,Network,Importer) // // Generated by this command: // diff --git a/lib/sync/peer_view.go b/dot/sync/peer_view.go similarity index 100% rename from lib/sync/peer_view.go rename to dot/sync/peer_view.go diff --git a/lib/sync/request_queue.go b/dot/sync/request_queue.go similarity index 100% rename from lib/sync/request_queue.go rename to dot/sync/request_queue.go diff --git a/lib/sync/service.go b/dot/sync/service.go similarity index 95% rename from lib/sync/service.go rename to dot/sync/service.go index 1b968ad24c..6e00b7178f 100644 --- a/lib/sync/service.go +++ b/dot/sync/service.go @@ -10,12 +10,12 @@ import ( "time" "github.com/ChainSafe/gossamer/dot/network" - "github.com/ChainSafe/gossamer/dot/network/messages" "github.com/ChainSafe/gossamer/dot/peerset" "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/internal/log" "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/lib/runtime" + lrucache "github.com/ChainSafe/gossamer/lib/utils/lru-cache" "github.com/libp2p/go-libp2p/core/peer" ) @@ -95,14 +95,17 @@ type SyncService struct { minPeers int slotDuration time.Duration + seenBlockSyncRequests *lrucache.LRUCache[common.Hash, uint] + stopCh chan struct{} } func NewSyncService(cfgs ...ServiceConfig) *SyncService { svc := &SyncService{ - minPeers: minPeersDefault, - waitPeersDuration: waitPeersDefaultTimeout, - stopCh: make(chan struct{}), + minPeers: minPeersDefault, + waitPeersDuration: waitPeersDefaultTimeout, + stopCh: make(chan struct{}), + seenBlockSyncRequests: lrucache.NewLRUCache[common.Hash, uint](100), } for _, cfg := range cfgs { @@ -186,11 +189,6 @@ func (s *SyncService) OnConnectionClosed(who peer.ID) { s.workerPool.removeWorker(who) } -func (s *SyncService) CreateBlockResponse(who peer.ID, req *messages.BlockRequestMessage) ( - *messages.BlockResponseMessage, error) { - return nil, nil -} - func (s *SyncService) IsSynced() bool { return false } diff --git a/lib/sync/service_test.go b/dot/sync/service_test.go similarity index 100% rename from lib/sync/service_test.go rename to dot/sync/service_test.go diff --git a/lib/sync/testdata/westend_blocks.yaml b/dot/sync/testdata/westend_blocks.yaml similarity index 100% rename from lib/sync/testdata/westend_blocks.yaml rename to dot/sync/testdata/westend_blocks.yaml diff --git a/lib/sync/unready_blocks.go b/dot/sync/unready_blocks.go similarity index 100% rename from lib/sync/unready_blocks.go rename to dot/sync/unready_blocks.go diff --git a/lib/sync/worker_pool.go b/dot/sync/worker_pool.go similarity index 99% rename from lib/sync/worker_pool.go rename to dot/sync/worker_pool.go index a12bbb0e48..edce7ab7c3 100644 --- a/lib/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -110,7 +110,7 @@ func (s *syncWorkerPool) submitRequests(tasks []*syncTask) []*syncTaskResult { if !completed { results = append(results, &syncTaskResult{ - completed: completed, + completed: false, request: task.request, response: nil, }) diff --git a/scripts/retrieve_block/retrieve_block.go b/scripts/retrieve_block/retrieve_block.go index 6fab6cd17c..c8e489ea75 100644 --- a/scripts/retrieve_block/retrieve_block.go +++ b/scripts/retrieve_block/retrieve_block.go @@ -116,14 +116,14 @@ func main() { log.Println("connecting...") err := p2pHost.Connect(ctx, bootnodesAddr) if err != nil { - fmt.Printf("fail with: %s\n", err.Error()) + log.Printf("fail with: %s\n", err.Error()) continue } log.Printf("requesting from peer %s\n", bootnodesAddr.String()) stream, err := p2pHost.NewStream(ctx, bootnodesAddr.ID, protocolID) if err != nil { - fmt.Printf("WARN: failed to create stream using protocol %s: %s", protocolID, err.Error()) + log.Printf("WARN: failed to create stream using protocol %s: %s", protocolID, err.Error()) } defer stream.Close() //nolint:errcheck From 2048a9ee429f4d3cde43a680145edbce0000599e Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 28 Aug 2024 15:02:15 -0400 Subject: [PATCH 28/74] chore: implement `HighestBlock` and `IsSynced` --- dot/sync/fullsync.go | 11 +++++++++++ dot/sync/service.go | 12 +++++++++++- 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/dot/sync/fullsync.go b/dot/sync/fullsync.go index 2e7ebda0b3..c03b162b1c 100644 --- a/dot/sync/fullsync.go +++ b/dot/sync/fullsync.go @@ -353,6 +353,17 @@ func (f *FullSyncStrategy) OnBlockAnnounce(from peer.ID, msg *network.BlockAnnou return nil, nil } +func (f *FullSyncStrategy) IsSynced() bool { + highestBlock, err := f.blockState.BestBlockNumber() + if err != nil { + logger.Criticalf("cannot get best block number") + return false + } + + // TODO: research a better rule + return uint32(highestBlock) >= (f.peers.getTarget() - 128) +} + type RequestResponseData struct { req *messages.BlockRequestMessage responseData []*types.BlockData diff --git a/dot/sync/service.go b/dot/sync/service.go index 6e00b7178f..7c9bcfd84b 100644 --- a/dot/sync/service.go +++ b/dot/sync/service.go @@ -72,6 +72,7 @@ type Strategy interface { NextActions() ([]*syncTask, error) IsFinished(results []*syncTaskResult) (done bool, repChanges []Change, blocks []peer.ID, err error) ShowMetrics() + IsSynced() bool } type BlockOrigin byte @@ -190,11 +191,20 @@ func (s *SyncService) OnConnectionClosed(who peer.ID) { } func (s *SyncService) IsSynced() bool { + s.mu.Lock() + defer s.mu.Unlock() + + s.currentStrategy.IsSynced() return false } func (s *SyncService) HighestBlock() uint { - return 0 + highestBlock, err := s.blockState.BestBlockNumber() + if err != nil { + logger.Warnf("failed to get the highest block: %s", err) + return 0 + } + return highestBlock } func (s *SyncService) runSyncEngine() { From 625f00ed96514a6b5a920ead3e318350de3e460e Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 28 Aug 2024 15:10:02 -0400 Subject: [PATCH 29/74] chore: fix lint warns --- dot/network/messages/block.go | 2 +- dot/sync/message.go | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/dot/network/messages/block.go b/dot/network/messages/block.go index 18c09ed904..8b5a14cfae 100644 --- a/dot/network/messages/block.go +++ b/dot/network/messages/block.go @@ -104,7 +104,7 @@ func NewAscendingBlockRequests(startNumber, targetNumber uint32, requestedData b lastIteration := numRequests - 1 if i == lastIteration && missingBlocks != 0 { - max = uint32(missingBlocks) + max = missingBlocks } start := variadic.Uint32OrHashFrom(startNumber) diff --git a/dot/sync/message.go b/dot/sync/message.go index f310708a24..e9fb273bf6 100644 --- a/dot/sync/message.go +++ b/dot/sync/message.go @@ -71,7 +71,8 @@ func (s *SyncService) CreateBlockResponse(from peer.ID, req *messages.BlockReque } } -func (s *SyncService) handleAscendingRequest(req *messages.BlockRequestMessage) (*messages.BlockResponseMessage, error) { +func (s *SyncService) handleAscendingRequest(req *messages.BlockRequestMessage) ( + *messages.BlockResponseMessage, error) { var ( max uint = messages.MaxBlocksInResponse startHash *common.Hash @@ -147,7 +148,8 @@ func (s *SyncService) handleAscendingRequest(req *messages.BlockRequestMessage) return s.handleChainByHash(*startHash, *endHash, max, req.RequestedData, req.Direction) } -func (s *SyncService) handleDescendingRequest(req *messages.BlockRequestMessage) (*messages.BlockResponseMessage, error) { +func (s *SyncService) handleDescendingRequest(req *messages.BlockRequestMessage) ( + *messages.BlockResponseMessage, error) { var ( startHash *common.Hash startNumber uint From f4615e42170edf53fb18e76e7548842afc5f9dc5 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 28 Aug 2024 15:17:32 -0400 Subject: [PATCH 30/74] chore: remove unneeded code --- dot/network/messages/block.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/dot/network/messages/block.go b/dot/network/messages/block.go index 8b5a14cfae..25d6d13219 100644 --- a/dot/network/messages/block.go +++ b/dot/network/messages/block.go @@ -146,8 +146,6 @@ func (bm *BlockRequestMessage) Encode() ([]byte, error) { MaxBlocks: max, } - bm.StartingBlock.Encode() - if bm.StartingBlock.IsHash() { hash := bm.StartingBlock.Hash() msg.FromBlock = &pb.BlockRequest_Hash{ From 43eef8237f4c3c96691fad77f6b2587fbaf4788f Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Thu, 29 Aug 2024 08:20:51 -0400 Subject: [PATCH 31/74] chore: remove unneeded comment --- dot/sync/service.go | 1 - 1 file changed, 1 deletion(-) diff --git a/dot/sync/service.go b/dot/sync/service.go index 7c9bcfd84b..3c5f504773 100644 --- a/dot/sync/service.go +++ b/dot/sync/service.go @@ -212,7 +212,6 @@ func (s *SyncService) runSyncEngine() { logger.Infof("starting sync engine with strategy: %T", s.currentStrategy) - // TODO: need to handle stop channel for { select { case <-s.stopCh: From c22065f1d2ad5343a65f2dac1a5fc5d6a2128660 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Fri, 30 Aug 2024 17:13:10 -0400 Subject: [PATCH 32/74] chore: introduce concurrency and perpetual tip sync chase --- dot/peerset/constants.go | 5 + dot/sync/fullsync.go | 154 ++++++++++++++++++------------ dot/sync/fullsync_handle_block.go | 11 +-- dot/sync/fullsync_test.go | 59 +++++++++++- dot/sync/mocks_test.go | 12 +++ dot/sync/service.go | 25 +++-- dot/sync/unready_blocks.go | 123 ++++++++++++++++++++---- dot/sync/unready_blocks_test.go | 118 +++++++++++++++++++++++ dot/sync/worker_pool.go | 102 ++++++++++++++------ 9 files changed, 486 insertions(+), 123 deletions(-) create mode 100644 dot/sync/unready_blocks_test.go diff --git a/dot/peerset/constants.go b/dot/peerset/constants.go index 88d3832110..d195b1e0db 100644 --- a/dot/peerset/constants.go +++ b/dot/peerset/constants.go @@ -38,6 +38,11 @@ const ( // GoodTransactionReason is the reason for used for good transaction. GoodTransactionReason = "Good Transaction" + // NotRelevantBlockAnnounce when peer sends us a not relevant block + NotRelevantBlockAnnounceValue Reputation = -(1 << 2) + // BadTransactionReason when transaction import was not performed. + NotRelevantBlockAnnounceReason = "Not Relevant Block Announce" + // BadTransactionValue used when transaction import was not performed. BadTransactionValue Reputation = -(1 << 12) // BadTransactionReason when transaction import was not performed. diff --git a/dot/sync/fullsync.go b/dot/sync/fullsync.go index c03b162b1c..392d969037 100644 --- a/dot/sync/fullsync.go +++ b/dot/sync/fullsync.go @@ -15,7 +15,6 @@ import ( "github.com/ChainSafe/gossamer/dot/peerset" "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/internal/database" - "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/lib/common/variadic" "github.com/libp2p/go-libp2p/core/peer" @@ -28,11 +27,10 @@ const defaultNumOfTasks = 3 var _ Strategy = (*FullSyncStrategy)(nil) var ( - errFailedToGetParent = errors.New("failed to get parent header") - errNilHeaderInResponse = errors.New("expected header, received none") - errNilBodyInResponse = errors.New("expected body, received none") - errPeerOnInvalidFork = errors.New("peer is on an invalid fork") - errMismatchBestBlockAnnouncement = errors.New("mismatch best block announcement") + errFailedToGetParent = errors.New("failed to get parent header") + errNilHeaderInResponse = errors.New("expected header, received none") + errNilBodyInResponse = errors.New("expected body, received none") + errPeerOnInvalidFork = errors.New("peer is on an invalid fork") blockSizeGauge = promauto.NewGauge(prometheus.GaugeOpts{ Namespace: "gossamer_sync", @@ -81,16 +79,12 @@ func NewFullSyncStrategy(cfg *FullSyncConfig) *FullSyncStrategy { } return &FullSyncStrategy{ - badBlocks: cfg.BadBlocks, - reqMaker: cfg.RequestMaker, - blockState: cfg.BlockState, - numOfTasks: cfg.NumOfTasks, - importer: newBlockImporter(cfg), - unreadyBlocks: &unreadyBlocks{ - incompleteBlocks: make(map[common.Hash]*types.BlockData), - // TODO: cap disjoitChains to don't grows indefinitely - disjointChains: make([][]*types.BlockData, 0), - }, + badBlocks: cfg.BadBlocks, + reqMaker: cfg.RequestMaker, + blockState: cfg.BlockState, + numOfTasks: cfg.NumOfTasks, + importer: newBlockImporter(cfg), + unreadyBlocks: newUnreadyBlocks(), requestQueue: &requestsQueue[*messages.BlockRequestMessage]{ queue: list.New(), }, @@ -105,9 +99,12 @@ func (f *FullSyncStrategy) NextActions() ([]*syncTask, error) { f.startedAt = time.Now() f.syncedBlocks = 0 - if f.requestQueue.Len() > 0 { - message, _ := f.requestQueue.PopFront() - return f.createTasks([]*messages.BlockRequestMessage{message}), nil + messagesToSend := []*messages.BlockRequestMessage{} + for f.requestQueue.Len() > 0 { + msg, ok := f.requestQueue.PopFront() + if ok { + messagesToSend = append(messagesToSend, msg) + } } currentTarget := f.peers.getTarget() @@ -117,13 +114,22 @@ func (f *FullSyncStrategy) NextActions() ([]*syncTask, error) { } // our best block is equal or ahead of current target. - // in the nodes pov we are not legging behind so there's nothing to do + // in the node's pov we are not legging behind so there's nothing to do + // or we didn't receive block announces, so lets ask for more blocks if uint32(bestBlockHeader.Number) >= currentTarget { - return nil, nil + ascendingBlockRequests := messages.NewBlockRequest( + *variadic.Uint32OrHashFrom(bestBlockHeader.Hash()), + messages.MaxBlocksInResponse, + messages.BootstrapRequestData, + messages.Ascending, + ) + + messagesToSend = append(messagesToSend, ascendingBlockRequests) + return f.createTasks(messagesToSend), nil } startRequestAt := bestBlockHeader.Number + 1 - targetBlockNumber := startRequestAt + 127 + targetBlockNumber := startRequestAt + maxRequestsAllowed*127 if targetBlockNumber > uint(currentTarget) { targetBlockNumber = uint(currentTarget) @@ -150,7 +156,9 @@ func (f *FullSyncStrategy) createTasks(requests []*messages.BlockRequestMessage) func (f *FullSyncStrategy) IsFinished(results []*syncTaskResult) (bool, []Change, []peer.ID, error) { repChanges, peersToIgnore, validResp := validateResults(results, f.badBlocks) + logger.Debugf("evaluating %d task results, %d valid responses", len(results), len(validResp)) + var highestFinalized *types.Header highestFinalized, err := f.blockState.GetHighestFinalisedHeader() if err != nil { return false, nil, nil, fmt.Errorf("getting highest finalized header") @@ -203,6 +211,10 @@ func (f *FullSyncStrategy) IsFinished(results []*syncTaskResult) (bool, []Change disjointFragments = append(disjointFragments, fragment) } + logger.Debugf("blocks to import: %d, disjoint fragments: %d", len(nextBlocksToImport), len(disjointFragments)) + + // this loop goal is to import ready blocks as well as + // update the highestFinalized header for len(nextBlocksToImport) > 0 || len(disjointFragments) > 0 { for _, blockToImport := range nextBlocksToImport { imported, err := f.importer.handle(blockToImport, networkInitialSync) @@ -216,16 +228,15 @@ func (f *FullSyncStrategy) IsFinished(results []*syncTaskResult) (bool, []Change } nextBlocksToImport = make([]*types.BlockData, 0) + highestFinalized, err = f.blockState.GetHighestFinalisedHeader() + if err != nil { + return false, nil, nil, fmt.Errorf("getting highest finalized header") + } // check if blocks from the disjoint set can be imported on their on forks // given that fragment contains chains and these chains contains blocks // check if the first block in the chain contains a parent known by us for _, fragment := range disjointFragments { - highestFinalized, err := f.blockState.GetHighestFinalisedHeader() - if err != nil { - return false, nil, nil, fmt.Errorf("getting highest finalized header") - } - validFragment := validBlocksUnderFragment(highestFinalized.Number, fragment) if len(validFragment) == 0 { continue @@ -249,7 +260,7 @@ func (f *FullSyncStrategy) IsFinished(results []*syncTaskResult) (bool, []Change validFragment[0].Header.Hash(), ) - f.unreadyBlocks.newFragment(validFragment) + f.unreadyBlocks.newDisjointFragemnt(validFragment) request := messages.NewBlockRequest( *variadic.Uint32OrHashFrom(validFragment[0].Header.ParentHash), messages.MaxBlocksInResponse, @@ -264,6 +275,7 @@ func (f *FullSyncStrategy) IsFinished(results []*syncTaskResult) (bool, []Change disjointFragments = nil } + f.unreadyBlocks.removeIrrelevantFragments(highestFinalized.Number) return false, repChanges, peersToIgnore, nil } @@ -272,7 +284,7 @@ func (f *FullSyncStrategy) ShowMetrics() { bps := float64(f.syncedBlocks) / totalSyncAndImportSeconds logger.Infof("⛓️ synced %d blocks, disjoint fragments %d, incomplete blocks %d, "+ "took: %.2f seconds, bps: %.2f blocks/second, target block number #%d", - f.syncedBlocks, len(f.unreadyBlocks.disjointChains), len(f.unreadyBlocks.incompleteBlocks), + f.syncedBlocks, len(f.unreadyBlocks.disjointFragments), len(f.unreadyBlocks.incompleteBlocks), totalSyncAndImportSeconds, bps, f.peers.getTarget()) } @@ -282,36 +294,14 @@ func (f *FullSyncStrategy) OnBlockAnnounceHandshake(from peer.ID, msg *network.B } func (f *FullSyncStrategy) OnBlockAnnounce(from peer.ID, msg *network.BlockAnnounceMessage) ( - repChange *Change, err error) { + gossip bool, repChange *Change, err error) { if f.blockState.IsPaused() { - return nil, errors.New("blockstate service is paused") - } - - currentTarget := f.peers.getTarget() - if msg.Number >= uint(currentTarget) { - return nil, nil + return false, nil, errors.New("blockstate service is paused") } blockAnnounceHeader := types.NewHeader(msg.ParentHash, msg.StateRoot, msg.ExtrinsicsRoot, msg.Number, msg.Digest) blockAnnounceHeaderHash := blockAnnounceHeader.Hash() - if msg.BestBlock { - pv := f.peers.get(from) - if uint(pv.bestBlockNumber) != msg.Number || blockAnnounceHeaderHash != pv.bestBlockHash { - repChange = &Change{ - who: from, - rep: peerset.ReputationChange{ - Value: peerset.BadMessageValue, - Reason: peerset.BadMessageReason, - }, - } - return repChange, fmt.Errorf("%w: peer %s, on handshake #%d (%s), on announce #%d (%s)", - errMismatchBestBlockAnnouncement, from, - pv.bestBlockNumber, pv.bestBlockHash.String(), - msg.Number, blockAnnounceHeaderHash.String()) - } - } - logger.Infof("received block announce from %s: #%d (%s) best block: %v", from, blockAnnounceHeader.Number, @@ -319,38 +309,76 @@ func (f *FullSyncStrategy) OnBlockAnnounce(from peer.ID, msg *network.BlockAnnou msg.BestBlock, ) - // check if their best block is on an invalid chain, if it is, - // potentially downscore them for now, we can remove them from the syncing peers set + if slices.Contains(f.badBlocks, blockAnnounceHeaderHash.String()) { + return false, &Change{ + who: from, + rep: peerset.ReputationChange{ + Value: peerset.BadBlockAnnouncementValue, + Reason: peerset.BadBlockAnnouncementReason, + }, + }, nil + } + highestFinalized, err := f.blockState.GetHighestFinalisedHeader() if err != nil { - return nil, fmt.Errorf("get highest finalised header: %w", err) + return false, nil, fmt.Errorf("get highest finalised header: %w", err) } - if blockAnnounceHeader.Number <= highestFinalized.Number { + // check if the announced block is relevant + if blockAnnounceHeader.Number <= highestFinalized.Number || f.blockAlreadyTracked(blockAnnounceHeader) { + logger.Debugf("announced block irrelevant #%d (%s)", blockAnnounceHeader.Number, blockAnnounceHeaderHash.Short()) repChange = &Change{ who: from, rep: peerset.ReputationChange{ - Value: peerset.BadBlockAnnouncementValue, - Reason: peerset.BadBlockAnnouncementReason, + Value: peerset.NotRelevantBlockAnnounceValue, + Reason: peerset.NotRelevantBlockAnnounceReason, }, } - return repChange, fmt.Errorf("%w: peer %s, block number #%d (%s)", + + return false, repChange, fmt.Errorf("%w: peer %s, block number #%d (%s)", errPeerOnInvalidFork, from, blockAnnounceHeader.Number, blockAnnounceHeaderHash.String()) } + logger.Debugf("relevant announced block #%d (%s)", blockAnnounceHeader.Number, blockAnnounceHeaderHash.Short()) + bestBlockHeader, err := f.blockState.BestBlockHeader() + if err != nil { + return false, nil, fmt.Errorf("get best block header: %w", err) + } + + // if we still far from aproaching the calculated target + // then we can ignore the block announce + ratioOfCompleteness := (bestBlockHeader.Number / uint(f.peers.getTarget())) * 100 + logger.Infof("ratio of completeness: %d", ratioOfCompleteness) + if ratioOfCompleteness < 80 { + return true, nil, nil + } + has, err := f.blockState.HasHeader(blockAnnounceHeaderHash) if err != nil { - return nil, fmt.Errorf("checking if header exists: %w", err) + return false, nil, fmt.Errorf("checking if header exists: %w", err) } if !has { - f.unreadyBlocks.newHeader(blockAnnounceHeader) + f.unreadyBlocks.newIncompleteBlock(blockAnnounceHeader) + logger.Infof("requesting announced block body #%d (%s)", blockAnnounceHeader.Number, blockAnnounceHeaderHash.Short()) request := messages.NewBlockRequest(*variadic.Uint32OrHashFrom(blockAnnounceHeaderHash), 1, messages.RequestedDataBody+messages.RequestedDataJustification, messages.Ascending) f.requestQueue.PushBack(request) } - return nil, nil + logger.Infof("block announced already exists #%d (%s)", blockAnnounceHeader.Number, blockAnnounceHeaderHash.Short()) + return true, &Change{ + who: from, + rep: peerset.ReputationChange{ + Value: peerset.NotRelevantBlockAnnounceValue, + Reason: peerset.NotRelevantBlockAnnounceReason, + }, + }, nil +} + +func (f *FullSyncStrategy) blockAlreadyTracked(announcedHeader *types.Header) bool { + return f.unreadyBlocks.isIncomplete(announcedHeader.Hash()) || + f.unreadyBlocks.inDisjointFragment(announcedHeader.Hash(), announcedHeader.Number) } func (f *FullSyncStrategy) IsSynced() bool { diff --git a/dot/sync/fullsync_handle_block.go b/dot/sync/fullsync_handle_block.go index a22f44a5a7..b504b829b7 100644 --- a/dot/sync/fullsync_handle_block.go +++ b/dot/sync/fullsync_handle_block.go @@ -96,11 +96,9 @@ func (b *blockImporter) handle(bd *types.BlockData, origin BlockOrigin) (importe // or the index of the block data that errored on failure. // TODO: https://github.com/ChainSafe/gossamer/issues/3468 func (b *blockImporter) processBlockData(blockData types.BlockData, origin BlockOrigin) error { - announceImportedBlock := false - if blockData.Header != nil { if blockData.Body != nil { - err := b.processBlockDataWithHeaderAndBody(blockData, origin, announceImportedBlock) + err := b.processBlockDataWithHeaderAndBody(blockData, origin) if err != nil { return fmt.Errorf("processing block data with header and body: %w", err) } @@ -123,7 +121,7 @@ func (b *blockImporter) processBlockData(blockData types.BlockData, origin Block } func (b *blockImporter) processBlockDataWithHeaderAndBody(blockData types.BlockData, - origin BlockOrigin, announceImportedBlock bool) (err error) { + origin BlockOrigin) (err error) { if origin != networkInitialSync { err = b.babeVerifier.VerifyBlock(blockData.Header) @@ -145,7 +143,7 @@ func (b *blockImporter) processBlockDataWithHeaderAndBody(blockData types.BlockD Body: *blockData.Body, } - err = b.handleBlock(block, announceImportedBlock) + err = b.handleBlock(block) if err != nil { return fmt.Errorf("handling block: %w", err) } @@ -154,7 +152,7 @@ func (b *blockImporter) processBlockDataWithHeaderAndBody(blockData types.BlockD } // handleHeader handles blocks (header+body) included in BlockResponses -func (b *blockImporter) handleBlock(block *types.Block, announceImportedBlock bool) error { +func (b *blockImporter) handleBlock(block *types.Block) error { parent, err := b.blockState.GetHeader(block.Header.ParentHash) if err != nil { return fmt.Errorf("%w: %s", errFailedToGetParent, err) @@ -185,6 +183,7 @@ func (b *blockImporter) handleBlock(block *types.Block, announceImportedBlock bo return fmt.Errorf("failed to execute block %d: %w", block.Header.Number, err) } + announceImportedBlock := false if err = b.blockImportHandler.HandleBlockImport(block, ts, announceImportedBlock); err != nil { return err } diff --git a/dot/sync/fullsync_test.go b/dot/sync/fullsync_test.go index f2c654d7a3..5127a01771 100644 --- a/dot/sync/fullsync_test.go +++ b/dot/sync/fullsync_test.go @@ -234,8 +234,8 @@ func TestFullSyncIsFinished(t *testing.T) { require.False(t, done) require.Len(t, fs.unreadyBlocks.incompleteBlocks, 0) - require.Len(t, fs.unreadyBlocks.disjointChains, 1) - require.Equal(t, fs.unreadyBlocks.disjointChains[0], sndTaskBlockResponse.BlockData) + require.Len(t, fs.unreadyBlocks.disjointFragments, 1) + require.Equal(t, fs.unreadyBlocks.disjointFragments[0], sndTaskBlockResponse.BlockData) expectedAncestorRequest := messages.NewBlockRequest( *variadic.Uint32OrHashFrom(sndTaskBlockResponse.BlockData[0].Header.ParentHash), @@ -267,6 +267,59 @@ func TestFullSyncIsFinished(t *testing.T) { require.False(t, done) require.Len(t, fs.unreadyBlocks.incompleteBlocks, 0) - require.Len(t, fs.unreadyBlocks.disjointChains, 0) + require.Len(t, fs.unreadyBlocks.disjointFragments, 0) + }) +} + +func TestFullSyncBlockAnnounce(t *testing.T) { + t.Run("announce_a_block_without_any_commom_ancestor", func(t *testing.T) { + highestFinalizedHeader := &types.Header{ + ParentHash: common.BytesToHash([]byte{0}), + StateRoot: common.BytesToHash([]byte{3, 3, 3, 3}), + ExtrinsicsRoot: common.BytesToHash([]byte{4, 4, 4, 4}), + Number: 0, + Digest: types.NewDigest(), + } + + ctrl := gomock.NewController(t) + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().IsPaused().Return(false) + mockBlockState.EXPECT(). + GetHighestFinalisedHeader(). + Return(highestFinalizedHeader, nil) + + mockBlockState.EXPECT(). + HasHeader(gomock.AnyOf(common.Hash{})). + Return(false, nil) + + fsCfg := &FullSyncConfig{ + BlockState: mockBlockState, + } + + fs := NewFullSyncStrategy(fsCfg) + + firstPeer := peer.ID("fst-peer") + firstHandshake := &network.BlockAnnounceHandshake{ + Roles: 1, + BestBlockNumber: 1024, + BestBlockHash: common.BytesToHash([]byte{0, 1, 2}), + GenesisHash: common.BytesToHash([]byte{1, 1, 1, 1}), + } + + err := fs.OnBlockAnnounceHandshake(firstPeer, firstHandshake) + require.NoError(t, err) + + firstBlockAnnounce := &network.BlockAnnounceMessage{ + ParentHash: common.BytesToHash([]byte{0, 1, 2}), + Number: 1024, + StateRoot: common.BytesToHash([]byte{3, 3, 3, 3}), + ExtrinsicsRoot: common.BytesToHash([]byte{4, 4, 4, 4}), + Digest: types.NewDigest(), + BestBlock: true, + } + + _, rep, err := fs.OnBlockAnnounce(firstPeer, firstBlockAnnounce) + require.NoError(t, err) + require.Nil(t, rep) }) } diff --git a/dot/sync/mocks_test.go b/dot/sync/mocks_test.go index dd8659dc8c..1a3c3d00bd 100644 --- a/dot/sync/mocks_test.go +++ b/dot/sync/mocks_test.go @@ -692,6 +692,18 @@ func (mr *MockNetworkMockRecorder) GetRequestResponseProtocol(arg0, arg1, arg2 a return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRequestResponseProtocol", reflect.TypeOf((*MockNetwork)(nil).GetRequestResponseProtocol), arg0, arg1, arg2) } +// GossipMessage mocks base method. +func (m *MockNetwork) GossipMessage(arg0 network.NotificationsMessage) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "GossipMessage", arg0) +} + +// GossipMessage indicates an expected call of GossipMessage. +func (mr *MockNetworkMockRecorder) GossipMessage(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GossipMessage", reflect.TypeOf((*MockNetwork)(nil).GossipMessage), arg0) +} + // ReportPeer mocks base method. func (m *MockNetwork) ReportPeer(arg0 peerset.ReputationChange, arg1 peer.ID) { m.ctrl.T.Helper() diff --git a/dot/sync/service.go b/dot/sync/service.go index 3c5f504773..f16f7e1582 100644 --- a/dot/sync/service.go +++ b/dot/sync/service.go @@ -24,7 +24,7 @@ const ( minPeersDefault = 3 ) -var logger = log.NewFromGlobal(log.AddContext("pkg", "new-sync")) +var logger = log.NewFromGlobal(log.AddContext("pkg", "sync")) type Network interface { AllConnectedPeersIDs() []peer.ID @@ -32,6 +32,7 @@ type Network interface { BlockAnnounceHandshake(*types.Header) error GetRequestResponseProtocol(subprotocol string, requestTimeout time.Duration, maxResponseSize uint64) *network.RequestResponseProtocol + GossipMessage(network.NotificationsMessage) } type BlockState interface { @@ -67,7 +68,7 @@ type Change struct { } type Strategy interface { - OnBlockAnnounce(from peer.ID, msg *network.BlockAnnounceMessage) (repChange *Change, err error) + OnBlockAnnounce(from peer.ID, msg *network.BlockAnnounceMessage) (gossip bool, repChange *Change, err error) OnBlockAnnounceHandshake(from peer.ID, msg *network.BlockAnnounceHandshake) error NextActions() ([]*syncTask, error) IsFinished(results []*syncTaskResult) (done bool, repChanges []Change, blocks []peer.ID, err error) @@ -173,13 +174,17 @@ func (s *SyncService) HandleBlockAnnounceHandshake(from peer.ID, msg *network.Bl } func (s *SyncService) HandleBlockAnnounce(from peer.ID, msg *network.BlockAnnounceMessage) error { - repChange, err := s.currentStrategy.OnBlockAnnounce(from, msg) + gossip, repChange, err := s.currentStrategy.OnBlockAnnounce(from, msg) + if err != nil { + return fmt.Errorf("while handling block announce: %w", err) + } + if repChange != nil { s.network.ReportPeer(repChange.rep, repChange.who) } - if err != nil { - return fmt.Errorf("while handling block announce: %w", err) + if gossip { + s.network.GossipMessage(msg) } return nil @@ -225,11 +230,19 @@ func (s *SyncService) runSyncEngine() { return } + bestBlockHeader, err := s.blockState.BestBlockHeader() + if err != nil { + logger.Criticalf("getting best block header: %w", err) + return + } + logger.Infof( - "🚣 currently syncing, %d peers connected, last finalised #%d (%s) ", + "🚣 currently syncing, %d peers connected, finalized #%d (%s), best #%d (%s)", len(s.network.AllConnectedPeersIDs()), finalisedHeader.Number, finalisedHeader.Hash().Short(), + bestBlockHeader.Number, + bestBlockHeader.Hash().Short(), ) tasks, err := s.currentStrategy.NextActions() diff --git a/dot/sync/unready_blocks.go b/dot/sync/unready_blocks.go index b17c5d6493..c7ac53f6f6 100644 --- a/dot/sync/unready_blocks.go +++ b/dot/sync/unready_blocks.go @@ -4,6 +4,8 @@ package sync import ( + "maps" + "slices" "sync" "github.com/ChainSafe/gossamer/dot/types" @@ -11,14 +13,21 @@ import ( ) type unreadyBlocks struct { - mu sync.Mutex - incompleteBlocks map[common.Hash]*types.BlockData - disjointChains [][]*types.BlockData + mtx sync.RWMutex + incompleteBlocks map[common.Hash]*types.BlockData + disjointFragments [][]*types.BlockData } -func (u *unreadyBlocks) newHeader(blockHeader *types.Header) { - u.mu.Lock() - defer u.mu.Unlock() +func newUnreadyBlocks() *unreadyBlocks { + return &unreadyBlocks{ + incompleteBlocks: make(map[common.Hash]*types.BlockData), + disjointFragments: make([][]*types.BlockData, 0), + } +} + +func (u *unreadyBlocks) newIncompleteBlock(blockHeader *types.Header) { + u.mtx.Lock() + defer u.mtx.Unlock() blockHash := blockHeader.Hash() u.incompleteBlocks[blockHash] = &types.BlockData{ @@ -27,19 +36,21 @@ func (u *unreadyBlocks) newHeader(blockHeader *types.Header) { } } -func (u *unreadyBlocks) newFragment(frag []*types.BlockData) { - u.mu.Lock() - defer u.mu.Unlock() - - u.disjointChains = append(u.disjointChains, frag) +func (u *unreadyBlocks) newDisjointFragemnt(frag []*types.BlockData) { + u.mtx.Lock() + defer u.mtx.Unlock() + u.disjointFragments = append(u.disjointFragments, frag) } +// updateDisjointFragments given a set of blocks check if it +// connects to a disjoint fragment, if so we remove the fragment from the +// disjoint set and return the fragment concatenated with the chain argument func (u *unreadyBlocks) updateDisjointFragments(chain []*types.BlockData) ([]*types.BlockData, bool) { - u.mu.Lock() - defer u.mu.Unlock() + u.mtx.Lock() + defer u.mtx.Unlock() indexToChange := -1 - for idx, disjointChain := range u.disjointChains { + for idx, disjointChain := range u.disjointFragments { lastBlockArriving := chain[len(chain)-1] firstDisjointBlock := disjointChain[0] if formsSequence(lastBlockArriving, firstDisjointBlock) { @@ -49,17 +60,20 @@ func (u *unreadyBlocks) updateDisjointFragments(chain []*types.BlockData) ([]*ty } if indexToChange >= 0 { - disjointChain := u.disjointChains[indexToChange] - u.disjointChains = append(u.disjointChains[:indexToChange], u.disjointChains[indexToChange+1:]...) + disjointChain := u.disjointFragments[indexToChange] + u.disjointFragments = append(u.disjointFragments[:indexToChange], u.disjointFragments[indexToChange+1:]...) return append(chain, disjointChain...), true } return nil, false } +// updateIncompleteBlocks given a set of blocks check if they can fullfil +// incomplete blocks, the blocks that can be completed will be removed from +// the incompleteBlocks map and returned func (u *unreadyBlocks) updateIncompleteBlocks(chain []*types.BlockData) []*types.BlockData { - u.mu.Lock() - defer u.mu.Unlock() + u.mtx.Lock() + defer u.mtx.Unlock() completeBlocks := make([]*types.BlockData, 0) for _, blockData := range chain { @@ -77,3 +91,76 @@ func (u *unreadyBlocks) updateIncompleteBlocks(chain []*types.BlockData) []*type return completeBlocks } + +func (u *unreadyBlocks) isIncomplete(blockHash common.Hash) bool { + u.mtx.RLock() + defer u.mtx.Lock() + + _, ok := u.incompleteBlocks[blockHash] + return ok +} + +// inDisjointFragment iterate through the disjoint fragments and +// check if the block hash an number already exists in one of them +func (u *unreadyBlocks) inDisjointFragment(blockHash common.Hash, blockNumber uint) bool { + u.mtx.RLock() + defer u.mtx.RUnlock() + + for _, frag := range u.disjointFragments { + target := &types.BlockData{Header: &types.Header{Number: blockNumber}} + idx, found := slices.BinarySearchFunc(frag, target, + func(a, b *types.BlockData) int { + switch { + case a.Header.Number == b.Header.Number: + return 0 + case a.Header.Number < b.Header.Number: + return -1 + default: + return 1 + } + }) + + if found && frag[idx].Hash == blockHash { + return true + } + } + + return false +} + +// removeIrrelevantFragments checks if there is blocks in the fragments that can be pruned +// given the finalised block number +func (u *unreadyBlocks) removeIrrelevantFragments(finalisedNumber uint) { + u.mtx.Lock() + defer u.mtx.Unlock() + + maps.DeleteFunc(u.incompleteBlocks, func(_ common.Hash, value *types.BlockData) bool { + return value.Header.Number <= finalisedNumber + }) + + idxsToRemove := make([]int, 0, len(u.disjointFragments)) + for fragmentIdx, fragment := range u.disjointFragments { + // the fragments are sorted in ascending order + // starting from the latest item and going backwards + // we have a higher chance to find the idx that has + // a block with number lower or equal the finalised one + idx := len(fragment) - 1 + for idx >= 0 { + if fragment[idx].Header.Number <= finalisedNumber { + break + } + idx-- + } + + updatedFragment := fragment[idx+1:] + if len(updatedFragment) == 0 { + idxsToRemove = append(idxsToRemove, fragmentIdx) + } else { + u.disjointFragments[fragmentIdx] = updatedFragment + } + } + + for _, idx := range idxsToRemove { + u.disjointFragments = append(u.disjointFragments[:idx], u.disjointFragments[idx+1:]...) + } +} diff --git a/dot/sync/unready_blocks_test.go b/dot/sync/unready_blocks_test.go new file mode 100644 index 0000000000..74280c8c86 --- /dev/null +++ b/dot/sync/unready_blocks_test.go @@ -0,0 +1,118 @@ +package sync + +import ( + "testing" + + "github.com/ChainSafe/gossamer/dot/types" + "github.com/stretchr/testify/require" +) + +func TestUnreadyBlocks_removeIrrelevantFragments(t *testing.T) { + ub := newUnreadyBlocks() + ub.disjointFragments = [][]*types.BlockData{ + // first fragment + { + { + Header: &types.Header{ + Number: 192, + }, + }, + + { + Header: &types.Header{ + Number: 191, + }, + }, + + { + Header: &types.Header{ + Number: 190, + }, + }, + }, + + // second fragment + { + { + Header: &types.Header{ + Number: 253, + }, + }, + + { + Header: &types.Header{ + Number: 254, + }, + }, + + { + Header: &types.Header{ + Number: 255, + }, + }, + }, + + // third fragment + { + { + Header: &types.Header{ + Number: 1022, + }, + }, + + { + Header: &types.Header{ + Number: 1023, + }, + }, + + { + Header: &types.Header{ + Number: 1024, + }, + }, + }, + } + + // the first fragment should be removed + // the second fragment should have only 2 items + // the third frament shold not be affected + ub.removeIrrelevantFragments(253) + require.Len(t, ub.disjointFragments, 2) + + expectedSecondFrag := []*types.BlockData{ + { + Header: &types.Header{ + Number: 254, + }, + }, + + { + Header: &types.Header{ + Number: 255, + }, + }, + } + + expectedThirdFragment := []*types.BlockData{ + { + Header: &types.Header{ + Number: 1022, + }, + }, + + { + Header: &types.Header{ + Number: 1023, + }, + }, + + { + Header: &types.Header{ + Number: 1024, + }, + }, + } + require.Equal(t, ub.disjointFragments[0], expectedSecondFrag) + require.Equal(t, ub.disjointFragments[1], expectedThirdFragment) +} diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index edce7ab7c3..574973f21b 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -21,7 +21,7 @@ var ( const ( punishmentBaseTimeout = 5 * time.Minute - maxRequestsAllowed uint = 60 + maxRequestsAllowed uint = 3 ) type syncTask struct { @@ -81,43 +81,91 @@ func (s *syncWorkerPool) fromBlockAnnounceHandshake(who peer.ID) error { // submitRequests takes an set of requests and will submit to the pool through submitRequest // the response will be dispatch in the resultCh func (s *syncWorkerPool) submitRequests(tasks []*syncTask) []*syncTaskResult { + if len(tasks) == 0 { + return nil + } + s.mtx.RLock() defer s.mtx.RUnlock() pids := maps.Keys(s.workers) - results := make([]*syncTaskResult, 0, len(tasks)) + workerPool := make(chan peer.ID, len(pids)) + for _, worker := range pids { + workerPool <- worker + } + failedTasks := make(chan *syncTask, len(tasks)) + results := make(chan *syncTaskResult, len(tasks)) + + var wg sync.WaitGroup for _, task := range tasks { - completed := false - for _, pid := range pids { - logger.Infof("[EXECUTING] worker %s", pid) - err := task.requestMaker.Do(pid, task.request, task.response) - if err != nil { - logger.Infof("[ERR] worker %s, request: %s, err: %s", pid, task.request, err.Error()) - continue - } + wg.Add(1) + go func(t *syncTask) { + defer wg.Done() + executeTask(t, workerPool, failedTasks, results) + }(task) + } - completed = true - results = append(results, &syncTaskResult{ - who: pid, - completed: completed, - request: task.request, - response: task.response, - }) - logger.Infof("[FINISHED] worker %s, request: %s", pid, task.request) - break + wg.Add(1) + go func() { + defer wg.Done() + for task := range failedTasks { + if len(workerPool) > 0 { + wg.Add(1) + go func(t *syncTask) { + defer wg.Done() + executeTask(t, workerPool, failedTasks, results) + }(task) + } else { + results <- &syncTaskResult{ + completed: false, + request: task.request, + response: nil, + } + } } + }() + + allResults := make(chan []*syncTaskResult, 1) + wg.Add(1) + go func(expectedResults int) { + defer wg.Done() + var taskResults []*syncTaskResult + for result := range results { + taskResults = append(taskResults, result) + if len(taskResults) == expectedResults { + close(failedTasks) + break + } + } + + allResults <- taskResults + }(len(tasks)) + + wg.Wait() + close(workerPool) + close(results) - if !completed { - results = append(results, &syncTaskResult{ - completed: false, - request: task.request, - response: nil, - }) + return <-allResults +} + +func executeTask(task *syncTask, workerPool chan peer.ID, failedTasks chan *syncTask, results chan *syncTaskResult) { + worker := <-workerPool + logger.Infof("[EXECUTING] worker %s", worker) + + err := task.requestMaker.Do(worker, task.request, task.response) + if err != nil { + logger.Infof("[ERR] worker %s, request: %s, err: %s", worker, task.request, err.Error()) + failedTasks <- task + } else { + logger.Infof("[FINISHED] worker %s, request: %s", worker, task.request) + results <- &syncTaskResult{ + who: worker, + completed: true, + request: task.request, + response: task.response, } } - - return results } func (s *syncWorkerPool) ignorePeerAsWorker(who peer.ID) { From 2f0b3a0400d8780e470dd1260063ef44ce53a4bd Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Sat, 31 Aug 2024 12:49:40 -0400 Subject: [PATCH 33/74] while reporting, create a node in peerset --- dot/peerset/peerstate.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/dot/peerset/peerstate.go b/dot/peerset/peerstate.go index 2ddf774567..8b69502f51 100644 --- a/dot/peerset/peerstate.go +++ b/dot/peerset/peerstate.go @@ -240,7 +240,8 @@ func (ps *PeersState) addReputation(peerID peer.ID, change ReputationChange) ( node, has := ps.nodes[peerID] if !has { - return 0, fmt.Errorf("%w: for peer id %s", ErrPeerDoesNotExist, peerID) + ps.insertPeer(0, peerID) + node = ps.nodes[peerID] } newReputation = node.addReputation(change.Value) From 195a9be91f4313d5ed5ef3d5bb960bd0078176a4 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 10 Sep 2024 12:22:07 -0400 Subject: [PATCH 34/74] chore: propagate block announce --- dot/network/notifications.go | 10 +++++----- dot/network/service.go | 25 +++++++++++++++++++++++++ dot/sync/fullsync.go | 2 +- dot/sync/service.go | 7 ++++--- 4 files changed, 35 insertions(+), 9 deletions(-) diff --git a/dot/network/notifications.go b/dot/network/notifications.go index b85634ecfd..f6afad66fb 100644 --- a/dot/network/notifications.go +++ b/dot/network/notifications.go @@ -272,21 +272,21 @@ func (s *Service) sendData(peer peer.ID, hs Handshake, info *notificationsProtoc stream, err := s.sendHandshake(peer, hs, info) if err != nil { - logger.Debugf("failed to send handshake to peer %s on protocol %s: %s", peer, info.protocolID, err) + logger.Infof("failed to send handshake to peer %s on protocol %s: %s", peer, info.protocolID, err) return } _, isConsensusMsg := msg.(*ConsensusMessage) if s.host.messageCache != nil && s.host.messageCache.exists(peer, msg) && !isConsensusMsg { - logger.Tracef("message has already been sent, ignoring: peer=%s msg=%s", peer, msg) + logger.Infof("message has already been sent, ignoring: peer=%s msg=%s", peer, msg) return } // we've completed the handshake with the peer, send message directly - logger.Tracef("sending message to peer %s using protocol %s: %s", peer, info.protocolID, msg) + logger.Infof("sending message to peer %s using protocol %s: %s", peer, info.protocolID, msg) if err := s.host.writeToStream(stream, msg); err != nil { - logger.Debugf("failed to send message to peer %s: %s", peer, err) + logger.Errorf("failed to send message to peer %s: %s", peer, err) // the stream was closed or reset, close it on our end and delete it from our peer's data if errors.Is(err, io.EOF) || errors.Is(err, network.ErrReset) { @@ -300,7 +300,7 @@ func (s *Service) sendData(peer peer.ID, hs Handshake, info *notificationsProtoc } } - logger.Tracef("successfully sent message on protocol %s to peer %s: message=", info.protocolID, peer, msg) + logger.Infof("successfully sent message on protocol %s to peer %s: message= %v", info.protocolID, peer, msg) s.host.cm.peerSetHandler.ReportPeer(peerset.ReputationChange{ Value: peerset.GossipSuccessValue, Reason: peerset.GossipSuccessReason, diff --git a/dot/network/service.go b/dot/network/service.go index 98169619b3..dd86e9f59c 100644 --- a/dot/network/service.go +++ b/dot/network/service.go @@ -562,6 +562,31 @@ func (s *Service) GossipMessage(msg NotificationsMessage) { logger.Errorf("message type %d not supported by any notifications protocol", msg.Type()) } +// GossipMessage gossips a notifications protocol message to our peers +func (s *Service) GossipMessageExcluding(msg NotificationsMessage, excluding peer.ID) { + if s.host == nil || msg == nil || s.IsStopped() { + return + } + + logger.Infof("gossiping from host %s message of type %d: %s", + s.host.id(), msg.Type(), msg) + + // check if the message is part of a notifications protocol + s.notificationsMu.Lock() + defer s.notificationsMu.Unlock() + + for msgID, prtl := range s.notificationsProtocols { + if msg.Type() != msgID || prtl == nil { + continue + } + + s.broadcastExcluding(prtl, excluding, msg) + return + } + + logger.Errorf("message type %d not supported by any notifications protocol", msg.Type()) +} + // SendMessage sends a message to the given peer func (s *Service) SendMessage(to peer.ID, msg NotificationsMessage) error { s.notificationsMu.Lock() diff --git a/dot/sync/fullsync.go b/dot/sync/fullsync.go index 392d969037..8f074bb3a7 100644 --- a/dot/sync/fullsync.go +++ b/dot/sync/fullsync.go @@ -366,7 +366,7 @@ func (f *FullSyncStrategy) OnBlockAnnounce(from peer.ID, msg *network.BlockAnnou f.requestQueue.PushBack(request) } - logger.Infof("block announced already exists #%d (%s)", blockAnnounceHeader.Number, blockAnnounceHeaderHash.Short()) + logger.Infof("announced block already exists #%d (%s)", blockAnnounceHeader.Number, blockAnnounceHeaderHash.Short()) return true, &Change{ who: from, rep: peerset.ReputationChange{ diff --git a/dot/sync/service.go b/dot/sync/service.go index f16f7e1582..4b87fe5fe8 100644 --- a/dot/sync/service.go +++ b/dot/sync/service.go @@ -32,7 +32,7 @@ type Network interface { BlockAnnounceHandshake(*types.Header) error GetRequestResponseProtocol(subprotocol string, requestTimeout time.Duration, maxResponseSize uint64) *network.RequestResponseProtocol - GossipMessage(network.NotificationsMessage) + GossipMessageExcluding(network.NotificationsMessage, peer.ID) } type BlockState interface { @@ -162,7 +162,7 @@ func (s *SyncService) Stop() error { } func (s *SyncService) HandleBlockAnnounceHandshake(from peer.ID, msg *network.BlockAnnounceHandshake) error { - logger.Infof("receiving a block announce handshake: %s", from.String()) + logger.Infof("receiving a block announce handshake from %s", from.String()) if err := s.workerPool.fromBlockAnnounceHandshake(from); err != nil { return err } @@ -184,7 +184,8 @@ func (s *SyncService) HandleBlockAnnounce(from peer.ID, msg *network.BlockAnnoun } if gossip { - s.network.GossipMessage(msg) + logger.Infof("propagating block announcement #%d excluding %s", msg.Number, from.String()) + s.network.GossipMessageExcluding(msg, from) } return nil From ec384c7422318b9355c637a8ee2efe3723e53b39 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Thu, 12 Sep 2024 12:13:10 -0400 Subject: [PATCH 35/74] wip: remove wrong mtx Lock, remove neighbors message --- dot/network/block_announce.go | 23 ++------ dot/network/host.go | 4 ++ dot/network/notifications.go | 12 +++- dot/network/service.go | 36 ++++-------- dot/sync/fullsync.go | 13 ++++- dot/sync/peer_view.go | 13 ++--- dot/sync/service.go | 103 ++++++++++++++++++--------------- dot/sync/unready_blocks.go | 2 +- lib/grandpa/message_handler.go | 84 +++++++++++++-------------- 9 files changed, 141 insertions(+), 149 deletions(-) diff --git a/dot/network/block_announce.go b/dot/network/block_announce.go index 9fb37c3ac5..e675760149 100644 --- a/dot/network/block_announce.go +++ b/dot/network/block_announce.go @@ -9,7 +9,6 @@ import ( "github.com/ChainSafe/gossamer/dot/peerset" "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/blocktree" "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/pkg/scale" @@ -153,6 +152,7 @@ func (s *Service) getBlockAnnounceHandshake() (Handshake, error) { } func (s *Service) validateBlockAnnounceHandshake(from peer.ID, hs Handshake) error { + logger.Info("validating block announce handshake") bhs, ok := hs.(*BlockAnnounceHandshake) if !ok { return errors.New("invalid handshake type") @@ -186,33 +186,18 @@ func (s *Service) validateBlockAnnounceHandshake(from peer.ID, hs Handshake) err np.peersData.setInboundHandshakeData(from, data) } - // if peer has higher best block than us, begin syncing - latestHeader, err := s.blockState.BestBlockHeader() - if err != nil { - return err - } - - // check if peer block number is greater than host block number - if latestHeader.Number >= uint(bhs.BestBlockNumber) { - return nil - } - return s.syncer.HandleBlockAnnounceHandshake(from, bhs) } // handleBlockAnnounceMessage handles BlockAnnounce messages // if some more blocks are required to sync the announced block, the node will open a sync stream // with its peer and send a BlockRequest message -func (s *Service) handleBlockAnnounceMessage(from peer.ID, msg NotificationsMessage) (propagate bool, err error) { +func (s *Service) handleBlockAnnounceMessage(from peer.ID, msg NotificationsMessage) (bool, error) { bam, ok := msg.(*BlockAnnounceMessage) if !ok { return false, errors.New("invalid message") } - err = s.syncer.HandleBlockAnnounce(from, bam) - if errors.Is(err, blocktree.ErrBlockExists) { - return true, nil - } - - return false, err + err := s.syncer.HandleBlockAnnounce(from, bam) + return true, err } diff --git a/dot/network/host.go b/dot/network/host.go index 97984ce576..85d1c7d093 100644 --- a/dot/network/host.go +++ b/dot/network/host.go @@ -370,6 +370,10 @@ func (h *host) writeToStream(s network.Stream, msg messages.P2PMessage) error { return err } + if len(encMsg) != sent { + logger.Criticalf("full message not sent: sent %d, message size %d", sent, len(encMsg)) + } + h.bwc.LogSentMessage(int64(sent)) return nil diff --git a/dot/network/notifications.go b/dot/network/notifications.go index f6afad66fb..53ce6ac0c3 100644 --- a/dot/network/notifications.go +++ b/dot/network/notifications.go @@ -227,9 +227,9 @@ func (s *Service) handleHandshake(info *notificationsProtocol, stream network.St logger.Tracef("receiver: sent handshake to peer %s using protocol %s", peer, info.protocolID) - if err := stream.CloseWrite(); err != nil { - return fmt.Errorf("failed to close stream for writing: %s", err) - } + // if err := stream.CloseWrite(); err != nil { + // return fmt.Errorf("failed to close stream for writing: %s", err) + // } return nil } @@ -300,6 +300,12 @@ func (s *Service) sendData(peer peer.ID, hs Handshake, info *notificationsProtoc } } + if info.protocolID == blockAnnounceID { + if err := stream.Close(); err != nil { + logger.Errorf("failed to close block announce notification stream: %w", err) + } + } + logger.Infof("successfully sent message on protocol %s to peer %s: message= %v", info.protocolID, peer, msg) s.host.cm.peerSetHandler.ReportPeer(peerset.ReputationChange{ Value: peerset.GossipSuccessValue, diff --git a/dot/network/service.go b/dot/network/service.go index dd86e9f59c..d7d9d8ed0b 100644 --- a/dot/network/service.go +++ b/dot/network/service.go @@ -787,36 +787,20 @@ func (s *Service) BlockAnnounceHandshake(header *types.Header) error { return ErrNoPeersConnected } + msg := &BlockAnnounceMessage{ + ParentHash: header.ParentHash, + Number: header.Number, + StateRoot: header.StateRoot, + ExtrinsicsRoot: header.ExtrinsicsRoot, + Digest: header.Digest, + BestBlock: true, + } + protocol, ok := s.notificationsProtocols[blockAnnounceMsgType] if !ok { panic("block announce message type not found") } - handshake, err := protocol.getHandshake() - if err != nil { - return fmt.Errorf("getting handshake: %w", err) - } - - wg := sync.WaitGroup{} - wg.Add(len(peers)) - for _, p := range peers { - protocol.peersData.setMutex(p) - - go func(p peer.ID) { - defer wg.Done() - stream, err := s.sendHandshake(p, handshake, protocol) - if err != nil { - logger.Tracef("sending block announce handshake: %s", err) - return - } - - response := protocol.peersData.getOutboundHandshakeData(p) - if response.received && response.validated { - closeOutboundStream(protocol, p, stream) - } - }(p) - } - - wg.Wait() + s.broadcastExcluding(protocol, peer.ID(""), msg) return nil } diff --git a/dot/sync/fullsync.go b/dot/sync/fullsync.go index 8f074bb3a7..598eae97e4 100644 --- a/dot/sync/fullsync.go +++ b/dot/sync/fullsync.go @@ -310,6 +310,9 @@ func (f *FullSyncStrategy) OnBlockAnnounce(from peer.ID, msg *network.BlockAnnou ) if slices.Contains(f.badBlocks, blockAnnounceHeaderHash.String()) { + logger.Infof("bad block receive from %s: #%d (%s) is a bad block", + from, blockAnnounceHeader.Number, blockAnnounceHeaderHash) + return false, &Change{ who: from, rep: peerset.ReputationChange{ @@ -319,6 +322,10 @@ func (f *FullSyncStrategy) OnBlockAnnounce(from peer.ID, msg *network.BlockAnnou }, nil } + if msg.BestBlock { + f.peers.update(from, blockAnnounceHeaderHash, uint32(blockAnnounceHeader.Number)) + } + highestFinalized, err := f.blockState.GetHighestFinalisedHeader() if err != nil { return false, nil, fmt.Errorf("get highest finalised header: %w", err) @@ -326,7 +333,7 @@ func (f *FullSyncStrategy) OnBlockAnnounce(from peer.ID, msg *network.BlockAnnou // check if the announced block is relevant if blockAnnounceHeader.Number <= highestFinalized.Number || f.blockAlreadyTracked(blockAnnounceHeader) { - logger.Debugf("announced block irrelevant #%d (%s)", blockAnnounceHeader.Number, blockAnnounceHeaderHash.Short()) + logger.Infof("announced block irrelevant #%d (%s)", blockAnnounceHeader.Number, blockAnnounceHeaderHash.Short()) repChange = &Change{ who: from, rep: peerset.ReputationChange{ @@ -339,7 +346,7 @@ func (f *FullSyncStrategy) OnBlockAnnounce(from peer.ID, msg *network.BlockAnnou errPeerOnInvalidFork, from, blockAnnounceHeader.Number, blockAnnounceHeaderHash.String()) } - logger.Debugf("relevant announced block #%d (%s)", blockAnnounceHeader.Number, blockAnnounceHeaderHash.Short()) + logger.Infof("relevant announced block #%d (%s)", blockAnnounceHeader.Number, blockAnnounceHeaderHash.Short()) bestBlockHeader, err := f.blockState.BestBlockHeader() if err != nil { return false, nil, fmt.Errorf("get best block header: %w", err) @@ -348,7 +355,7 @@ func (f *FullSyncStrategy) OnBlockAnnounce(from peer.ID, msg *network.BlockAnnou // if we still far from aproaching the calculated target // then we can ignore the block announce ratioOfCompleteness := (bestBlockHeader.Number / uint(f.peers.getTarget())) * 100 - logger.Infof("ratio of completeness: %d", ratioOfCompleteness) + logger.Infof("sync: ratio of completeness: %d", ratioOfCompleteness) if ratioOfCompleteness < 80 { return true, nil, nil } diff --git a/dot/sync/peer_view.go b/dot/sync/peer_view.go index 9c66454bb4..c15759c03a 100644 --- a/dot/sync/peer_view.go +++ b/dot/sync/peer_view.go @@ -24,12 +24,6 @@ type peerViewSet struct { target uint32 } -func (p *peerViewSet) get(peerID peer.ID) peerView { - p.mtx.RLock() - defer p.mtx.RUnlock() - return p.view[peerID] -} - func (p *peerViewSet) update(peerID peer.ID, bestHash common.Hash, bestNumber uint32) { p.mtx.Lock() defer p.mtx.Unlock() @@ -44,6 +38,7 @@ func (p *peerViewSet) update(peerID peer.ID, bestHash common.Hash, bestNumber ui return } + logger.Infof("updating peer %s view to #%d (%s)", peerID.String(), bestNumber, bestHash.Short()) p.view[peerID] = newView } @@ -56,10 +51,10 @@ func (p *peerViewSet) getTarget() uint32 { return p.target } - numbers := make([]uint32, 0, len(p.view)) + numbers := make([]uint32, len(p.view)) // we are going to sort the data and remove the outliers then we will return the avg of all the valid elements - for _, view := range maps.Values(p.view) { - numbers = append(numbers, view.bestBlockNumber) + for idx, view := range maps.Values(p.view) { + numbers[idx] = view.bestBlockNumber } sum, count := nonOutliersSumCount(numbers) diff --git a/dot/sync/service.go b/dot/sync/service.go index 4b87fe5fe8..b99a78ba33 100644 --- a/dot/sync/service.go +++ b/dot/sync/service.go @@ -132,9 +132,14 @@ func (s *SyncService) waitWorkers() { return } - err := s.network.BlockAnnounceHandshake(bestBlockHeader) - if err != nil && !errors.Is(err, network.ErrNoPeersConnected) { - logger.Errorf("retrieving target info from peers: %v", err) + err = s.network.BlockAnnounceHandshake(bestBlockHeader) + if err != nil { + if errors.Is(err, network.ErrNoPeersConnected) { + continue + } + + logger.Criticalf("waiting workers: %s", err.Error()) + break } select { @@ -174,7 +179,10 @@ func (s *SyncService) HandleBlockAnnounceHandshake(from peer.ID, msg *network.Bl } func (s *SyncService) HandleBlockAnnounce(from peer.ID, msg *network.BlockAnnounceMessage) error { - gossip, repChange, err := s.currentStrategy.OnBlockAnnounce(from, msg) + s.mu.Lock() + defer s.mu.Unlock() + + _, repChange, err := s.currentStrategy.OnBlockAnnounce(from, msg) if err != nil { return fmt.Errorf("while handling block announce: %w", err) } @@ -183,11 +191,6 @@ func (s *SyncService) HandleBlockAnnounce(from peer.ID, msg *network.BlockAnnoun s.network.ReportPeer(repChange.rep, repChange.who) } - if gossip { - logger.Infof("propagating block announcement #%d excluding %s", msg.Number, from.String()) - s.network.GossipMessageExcluding(msg, from) - } - return nil } @@ -215,49 +218,54 @@ func (s *SyncService) HighestBlock() uint { func (s *SyncService) runSyncEngine() { defer s.wg.Done() - logger.Infof("starting sync engine with strategy: %T", s.currentStrategy) - for { - select { - case <-s.stopCh: - return - default: - } + goto lockAndStart - finalisedHeader, err := s.blockState.GetHighestFinalisedHeader() - if err != nil { - logger.Criticalf("getting highest finalized header: %w", err) - return - } +lockAndStart: + s.mu.Lock() + logger.Info("starting process to acquire more blocks") - bestBlockHeader, err := s.blockState.BestBlockHeader() - if err != nil { - logger.Criticalf("getting best block header: %w", err) - return - } + select { + case <-s.stopCh: + return + default: + } - logger.Infof( - "🚣 currently syncing, %d peers connected, finalized #%d (%s), best #%d (%s)", - len(s.network.AllConnectedPeersIDs()), - finalisedHeader.Number, - finalisedHeader.Hash().Short(), - bestBlockHeader.Number, - bestBlockHeader.Hash().Short(), - ) + finalisedHeader, err := s.blockState.GetHighestFinalisedHeader() + if err != nil { + logger.Criticalf("getting highest finalized header: %w", err) + return + } - tasks, err := s.currentStrategy.NextActions() - if err != nil { - logger.Criticalf("current sync strategy next actions failed with: %s", err.Error()) - return - } + bestBlockHeader, err := s.blockState.BestBlockHeader() + if err != nil { + logger.Criticalf("getting best block header: %w", err) + return + } - if len(tasks) == 0 { - // sleep the amount of one slot and try - time.Sleep(s.slotDuration) - continue - } + logger.Infof( + "🚣 currently syncing, %d peers connected, finalized #%d (%s), best #%d (%s)", + len(s.network.AllConnectedPeersIDs()), + finalisedHeader.Number, + finalisedHeader.Hash().Short(), + bestBlockHeader.Number, + bestBlockHeader.Hash().Short(), + ) + tasks, err := s.currentStrategy.NextActions() + if err != nil { + logger.Criticalf("current sync strategy next actions failed with: %s", err.Error()) + return + } + + if len(tasks) == 0 { + // sleep the amount of one slot and try + time.Sleep(s.slotDuration) + goto loopBack + } + + { results := s.workerPool.submitRequests(tasks) done, repChanges, peersToIgnore, err := s.currentStrategy.IsFinished(results) if err != nil { @@ -281,9 +289,12 @@ func (s *SyncService) runSyncEngine() { return } - s.mu.Lock() s.currentStrategy = s.defaultStrategy - s.mu.Unlock() } } + +loopBack: + logger.Info("finish process to acquire more blocks") + s.mu.Unlock() + goto lockAndStart } diff --git a/dot/sync/unready_blocks.go b/dot/sync/unready_blocks.go index c7ac53f6f6..ac6a8a2cd7 100644 --- a/dot/sync/unready_blocks.go +++ b/dot/sync/unready_blocks.go @@ -94,7 +94,7 @@ func (u *unreadyBlocks) updateIncompleteBlocks(chain []*types.BlockData) []*type func (u *unreadyBlocks) isIncomplete(blockHash common.Hash) bool { u.mtx.RLock() - defer u.mtx.Lock() + defer u.mtx.RUnlock() _, ok := u.incompleteBlocks[blockHash] return ok diff --git a/lib/grandpa/message_handler.go b/lib/grandpa/message_handler.go index 7bb5e2b1c9..5d80d55929 100644 --- a/lib/grandpa/message_handler.go +++ b/lib/grandpa/message_handler.go @@ -80,48 +80,48 @@ func (h *MessageHandler) handleMessage(from peer.ID, m GrandpaMessage) (network. } func (h *MessageHandler) handleNeighbourMessage(msg *NeighbourPacketV1) error { - // TODO(#2931): this is a simple hack to ensure that the neighbour messages - // sent by gossamer are being received by substrate nodes - // not intended to be production code - round, setID := h.blockState.GetRoundAndSetID() - neighbourMessage := &NeighbourPacketV1{ - Round: round, - SetID: setID, - Number: uint32(h.grandpa.head.Number), - } - - cm, err := neighbourMessage.ToConsensusMessage() - if err != nil { - return fmt.Errorf("converting neighbour message to network message: %w", err) - } - - logger.Debugf("sending neighbour message: %v", neighbourMessage) - h.grandpa.network.GossipMessage(cm) - - currFinalized, err := h.blockState.GetFinalisedHeader(round, setID) - if err != nil { - return err - } - - // ignore neighbour messages where our best finalised number is greater than theirs - if currFinalized.Number >= uint(msg.Number) { - return nil - } - - // TODO; determine if there is some reason we don't receive justifications in responses near the head (usually), - // and remove the following code if it's fixed. (#1815) - head, err := h.blockState.BestBlockNumber() - if err != nil { - return err - } - - // ignore neighbour messages that are above our head - if uint(msg.Number) > head { - return nil - } - - logger.Debugf("got neighbour message with number %d, set id %d and round %d", msg.Number, msg.SetID, msg.Round) - // TODO: should we send a justification request here? potentially re-connect this to sync package? (#1815) + // // TODO(#2931): this is a simple hack to ensure that the neighbour messages + // // sent by gossamer are being received by substrate nodes + // // not intended to be production code + // round, setID := h.blockState.GetRoundAndSetID() + // neighbourMessage := &NeighbourPacketV1{ + // Round: round, + // SetID: setID, + // Number: uint32(h.grandpa.head.Number), + // } + + // cm, err := neighbourMessage.ToConsensusMessage() + // if err != nil { + // return fmt.Errorf("converting neighbour message to network message: %w", err) + // } + + // logger.Debugf("sending neighbour message: %v", neighbourMessage) + // h.grandpa.network.GossipMessage(cm) + + // currFinalized, err := h.blockState.GetFinalisedHeader(round, setID) + // if err != nil { + // return err + // } + + // // ignore neighbour messages where our best finalised number is greater than theirs + // if currFinalized.Number >= uint(msg.Number) { + // return nil + // } + + // // TODO; determine if there is some reason we don't receive justifications in responses near the head (usually), + // // and remove the following code if it's fixed. (#1815) + // head, err := h.blockState.BestBlockNumber() + // if err != nil { + // return err + // } + + // // ignore neighbour messages that are above our head + // if uint(msg.Number) > head { + // return nil + // } + + // logger.Debugf("got neighbour message with number %d, set id %d and round %d", msg.Number, msg.SetID, msg.Round) + // // TODO: should we send a justification request here? potentially re-connect this to sync package? (#1815) return nil } From a54572fbf5866de8d7eb88e0d78bc3a75c390f70 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Thu, 12 Sep 2024 19:32:04 -0400 Subject: [PATCH 36/74] chore: remove unneeded log level --- dot/network/block_announce.go | 1 - dot/network/notifications.go | 8 ++++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/dot/network/block_announce.go b/dot/network/block_announce.go index e675760149..315b72d4a1 100644 --- a/dot/network/block_announce.go +++ b/dot/network/block_announce.go @@ -152,7 +152,6 @@ func (s *Service) getBlockAnnounceHandshake() (Handshake, error) { } func (s *Service) validateBlockAnnounceHandshake(from peer.ID, hs Handshake) error { - logger.Info("validating block announce handshake") bhs, ok := hs.(*BlockAnnounceHandshake) if !ok { return errors.New("invalid handshake type") diff --git a/dot/network/notifications.go b/dot/network/notifications.go index 53ce6ac0c3..f58636276d 100644 --- a/dot/network/notifications.go +++ b/dot/network/notifications.go @@ -272,19 +272,19 @@ func (s *Service) sendData(peer peer.ID, hs Handshake, info *notificationsProtoc stream, err := s.sendHandshake(peer, hs, info) if err != nil { - logger.Infof("failed to send handshake to peer %s on protocol %s: %s", peer, info.protocolID, err) + logger.Tracef("failed to send handshake to peer %s on protocol %s: %s", peer, info.protocolID, err) return } _, isConsensusMsg := msg.(*ConsensusMessage) if s.host.messageCache != nil && s.host.messageCache.exists(peer, msg) && !isConsensusMsg { - logger.Infof("message has already been sent, ignoring: peer=%s msg=%s", peer, msg) + logger.Tracef("message has already been sent, ignoring: peer=%s msg=%s", peer, msg) return } // we've completed the handshake with the peer, send message directly - logger.Infof("sending message to peer %s using protocol %s: %s", peer, info.protocolID, msg) + logger.Tracef("sending message to peer %s using protocol %s: %s", peer, info.protocolID, msg) if err := s.host.writeToStream(stream, msg); err != nil { logger.Errorf("failed to send message to peer %s: %s", peer, err) @@ -306,7 +306,7 @@ func (s *Service) sendData(peer peer.ID, hs Handshake, info *notificationsProtoc } } - logger.Infof("successfully sent message on protocol %s to peer %s: message= %v", info.protocolID, peer, msg) + logger.Tracef("successfully sent message on protocol %s to peer %s: message= %v", info.protocolID, peer, msg) s.host.cm.peerSetHandler.ReportPeer(peerset.ReputationChange{ Value: peerset.GossipSuccessValue, Reason: peerset.GossipSuccessReason, From 8546176202026d663865e5c3c138cc340f5c3155 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Fri, 13 Sep 2024 09:09:08 -0400 Subject: [PATCH 37/74] chore: if ahead of target dont ask for more blocks --- dot/sync/fullsync.go | 16 ++++------------ dot/sync/service.go | 3 +-- 2 files changed, 5 insertions(+), 14 deletions(-) diff --git a/dot/sync/fullsync.go b/dot/sync/fullsync.go index 598eae97e4..1ee3b41a52 100644 --- a/dot/sync/fullsync.go +++ b/dot/sync/fullsync.go @@ -117,14 +117,6 @@ func (f *FullSyncStrategy) NextActions() ([]*syncTask, error) { // in the node's pov we are not legging behind so there's nothing to do // or we didn't receive block announces, so lets ask for more blocks if uint32(bestBlockHeader.Number) >= currentTarget { - ascendingBlockRequests := messages.NewBlockRequest( - *variadic.Uint32OrHashFrom(bestBlockHeader.Hash()), - messages.MaxBlocksInResponse, - messages.BootstrapRequestData, - messages.Ascending, - ) - - messagesToSend = append(messagesToSend, ascendingBlockRequests) return f.createTasks(messagesToSend), nil } @@ -143,13 +135,13 @@ func (f *FullSyncStrategy) NextActions() ([]*syncTask, error) { } func (f *FullSyncStrategy) createTasks(requests []*messages.BlockRequestMessage) []*syncTask { - tasks := make([]*syncTask, len(requests)) - for idx, req := range requests { - tasks[idx] = &syncTask{ + tasks := make([]*syncTask, 0, len(requests)) + for _, req := range requests { + tasks = append(tasks, &syncTask{ request: req, response: &messages.BlockResponseMessage{}, requestMaker: f.reqMaker, - } + }) } return tasks } diff --git a/dot/sync/service.go b/dot/sync/service.go index b99a78ba33..9171a46977 100644 --- a/dot/sync/service.go +++ b/dot/sync/service.go @@ -260,8 +260,6 @@ lockAndStart: } if len(tasks) == 0 { - // sleep the amount of one slot and try - time.Sleep(s.slotDuration) goto loopBack } @@ -296,5 +294,6 @@ lockAndStart: loopBack: logger.Info("finish process to acquire more blocks") s.mu.Unlock() + time.Sleep(s.slotDuration) goto lockAndStart } From 8f633258743600581f03bb6215333cfded5541ac Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Fri, 13 Sep 2024 09:09:47 -0400 Subject: [PATCH 38/74] chore: remove unneeded goto --- dot/sync/service.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/dot/sync/service.go b/dot/sync/service.go index 9171a46977..ba880c86fa 100644 --- a/dot/sync/service.go +++ b/dot/sync/service.go @@ -220,8 +220,6 @@ func (s *SyncService) runSyncEngine() { defer s.wg.Done() logger.Infof("starting sync engine with strategy: %T", s.currentStrategy) - goto lockAndStart - lockAndStart: s.mu.Lock() logger.Info("starting process to acquire more blocks") From 1f48f65066a4ef4ddc52134899bef92510ed32c8 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Fri, 13 Sep 2024 09:17:35 -0400 Subject: [PATCH 39/74] chore: adjust on block announce --- dot/sync/fullsync.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/dot/sync/fullsync.go b/dot/sync/fullsync.go index 1ee3b41a52..7b17455aa4 100644 --- a/dot/sync/fullsync.go +++ b/dot/sync/fullsync.go @@ -354,7 +354,9 @@ func (f *FullSyncStrategy) OnBlockAnnounce(from peer.ID, msg *network.BlockAnnou has, err := f.blockState.HasHeader(blockAnnounceHeaderHash) if err != nil { - return false, nil, fmt.Errorf("checking if header exists: %w", err) + if !errors.Is(err, database.ErrNotFound) { + return false, nil, fmt.Errorf("checking if header exists: %w", err) + } } if !has { From 399eb9bb03ecd429a4534222a2cb9b5e7e54f5de Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Fri, 13 Sep 2024 15:31:05 -0400 Subject: [PATCH 40/74] chore: rollback integration tests to use /dot/sync --- .github/workflows/integration-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index 293bb6e1b4..cc6e90185a 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -32,7 +32,7 @@ jobs: github.com/ChainSafe/gossamer/dot/state, github.com/ChainSafe/gossamer/dot/digest, github.com/ChainSafe/gossamer/dot/network, - github.com/ChainSafe/gossamer/lib/sync, + github.com/ChainSafe/gossamer/dot/sync, github.com/ChainSafe/gossamer/lib/babe, github.com/ChainSafe/gossamer/lib/grandpa, ] From e554d3f9f2016483219e394e1a742168b95ef2d7 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Fri, 13 Sep 2024 15:34:54 -0400 Subject: [PATCH 41/74] chore: rollback changes on core pkg --- dot/core/service_integration_test.go | 133 +++++++++++++++++++++++++++ 1 file changed, 133 insertions(+) diff --git a/dot/core/service_integration_test.go b/dot/core/service_integration_test.go index 515b616c35..47a3fd6bb1 100644 --- a/dot/core/service_integration_test.go +++ b/dot/core/service_integration_test.go @@ -209,6 +209,139 @@ func TestHandleChainReorg_NoReorg(t *testing.T) { require.NoError(t, err) } +func TestHandleChainReorg_WithReorg_Trans(t *testing.T) { + t.Skip() // TODO: tx fails to validate in handleChainReorg() with "Invalid transaction" (#1026) + s := NewTestService(t, nil) + bs := s.blockState + + parent, err := bs.BestBlockHeader() + require.NoError(t, err) + + bestBlockHash := s.blockState.BestBlockHash() + rt, err := s.blockState.GetRuntime(bestBlockHash) + require.NoError(t, err) + + block1 := BuildBlock(t, rt, parent, nil) + bs.StoreRuntime(block1.Header.Hash(), rt) + err = bs.AddBlock(block1) + require.NoError(t, err) + + block2 := BuildBlock(t, rt, &block1.Header, nil) + bs.StoreRuntime(block2.Header.Hash(), rt) + err = bs.AddBlock(block2) + require.NoError(t, err) + + block3 := BuildBlock(t, rt, &block2.Header, nil) + bs.StoreRuntime(block3.Header.Hash(), rt) + err = bs.AddBlock(block3) + require.NoError(t, err) + + block4 := BuildBlock(t, rt, &block3.Header, nil) + bs.StoreRuntime(block4.Header.Hash(), rt) + err = bs.AddBlock(block4) + require.NoError(t, err) + + block5 := BuildBlock(t, rt, &block4.Header, nil) + bs.StoreRuntime(block5.Header.Hash(), rt) + err = bs.AddBlock(block5) + require.NoError(t, err) + + block31 := BuildBlock(t, rt, &block2.Header, nil) + bs.StoreRuntime(block31.Header.Hash(), rt) + err = bs.AddBlock(block31) + require.NoError(t, err) + + nonce := uint64(0) + + // Add extrinsic to block `block41` + ext := createExtrinsic(t, rt, bs.(*state.BlockState).GenesisHash(), nonce) + + block41 := BuildBlock(t, rt, &block31.Header, ext) + bs.StoreRuntime(block41.Header.Hash(), rt) + err = bs.AddBlock(block41) + require.NoError(t, err) + + err = s.handleChainReorg(block41.Header.Hash(), block5.Header.Hash()) + require.NoError(t, err) + + pending := s.transactionState.(*state.TransactionState).Pending() + require.Equal(t, 1, len(pending)) +} + +func BuildBlock(t *testing.T, instance runtime.Instance, parent *types.Header, ext types.Extrinsic) *types.Block { + digest := types.NewDigest() + prd, err := types.NewBabeSecondaryPlainPreDigest(0, 1).ToPreRuntimeDigest() + require.NoError(t, err) + err = digest.Add(*prd) + require.NoError(t, err) + header := &types.Header{ + ParentHash: parent.Hash(), + Number: parent.Number + 1, + Digest: digest, + } + + err = instance.InitializeBlock(header) + require.NoError(t, err) + + idata := types.NewInherentData() + err = idata.SetInherent(types.Timstap0, uint64(time.Now().Unix())) + require.NoError(t, err) + + err = idata.SetInherent(types.Babeslot, uint64(1)) + require.NoError(t, err) + + ienc, err := idata.Encode() + require.NoError(t, err) + + // Call BlockBuilder_inherent_extrinsics which returns the inherents as encoded extrinsics + inherentExts, err := instance.InherentExtrinsics(ienc) + require.NoError(t, err) + + // decode inherent extrinsics + cp := make([]byte, len(inherentExts)) + copy(cp, inherentExts) + var inExts [][]byte + err = scale.Unmarshal(cp, &inExts) + require.NoError(t, err) + + // apply each inherent extrinsic + for _, inherent := range inExts { + in, err := scale.Marshal(inherent) + require.NoError(t, err) + + ret, err := instance.ApplyExtrinsic(in) + require.NoError(t, err) + require.Equal(t, ret, []byte{0, 0}) + } + + body := types.Body(types.BytesArrayToExtrinsics(inExts)) + + if ext != nil { + // validate and apply extrinsic + var ret []byte + + externalExt := types.Extrinsic(append([]byte{byte(types.TxnExternal)}, ext...)) + _, err = instance.ValidateTransaction(externalExt) + require.NoError(t, err) + + ret, err = instance.ApplyExtrinsic(ext) + require.NoError(t, err) + require.Equal(t, ret, []byte{0, 0}) + + body = append(body, ext) + } + + res, err := instance.FinalizeBlock() + require.NoError(t, err) + res.Number = header.Number + res.Hash() + + return &types.Block{ + Header: *res, + Body: body, + } +} + func TestHandleChainReorg_WithReorg_NoTransactions(t *testing.T) { s := NewTestService(t, nil) const height = 5 From f233d050789bea7fc1815e967c75204f784ff62a Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Fri, 13 Sep 2024 15:40:12 -0400 Subject: [PATCH 42/74] chore: TODO(#2931) --- lib/grandpa/message_handler.go | 45 ++-------------------------------- 1 file changed, 2 insertions(+), 43 deletions(-) diff --git a/lib/grandpa/message_handler.go b/lib/grandpa/message_handler.go index 5d80d55929..bbc328108b 100644 --- a/lib/grandpa/message_handler.go +++ b/lib/grandpa/message_handler.go @@ -79,49 +79,8 @@ func (h *MessageHandler) handleMessage(from peer.ID, m GrandpaMessage) (network. } } -func (h *MessageHandler) handleNeighbourMessage(msg *NeighbourPacketV1) error { - // // TODO(#2931): this is a simple hack to ensure that the neighbour messages - // // sent by gossamer are being received by substrate nodes - // // not intended to be production code - // round, setID := h.blockState.GetRoundAndSetID() - // neighbourMessage := &NeighbourPacketV1{ - // Round: round, - // SetID: setID, - // Number: uint32(h.grandpa.head.Number), - // } - - // cm, err := neighbourMessage.ToConsensusMessage() - // if err != nil { - // return fmt.Errorf("converting neighbour message to network message: %w", err) - // } - - // logger.Debugf("sending neighbour message: %v", neighbourMessage) - // h.grandpa.network.GossipMessage(cm) - - // currFinalized, err := h.blockState.GetFinalisedHeader(round, setID) - // if err != nil { - // return err - // } - - // // ignore neighbour messages where our best finalised number is greater than theirs - // if currFinalized.Number >= uint(msg.Number) { - // return nil - // } - - // // TODO; determine if there is some reason we don't receive justifications in responses near the head (usually), - // // and remove the following code if it's fixed. (#1815) - // head, err := h.blockState.BestBlockNumber() - // if err != nil { - // return err - // } - - // // ignore neighbour messages that are above our head - // if uint(msg.Number) > head { - // return nil - // } - - // logger.Debugf("got neighbour message with number %d, set id %d and round %d", msg.Number, msg.SetID, msg.Round) - // // TODO: should we send a justification request here? potentially re-connect this to sync package? (#1815) +func (h *MessageHandler) handleNeighbourMessage(_ *NeighbourPacketV1) error { + // TODO(#2931) return nil } From 330444c8396d979ce090f01da3e4e0782abd5605 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Fri, 13 Sep 2024 16:09:47 -0400 Subject: [PATCH 43/74] chore: fix `removeIrrelevantFragments` out of bounds panic --- dot/sync/mocks_test.go | 12 +- dot/sync/unready_blocks.go | 16 +- dot/sync/unready_blocks_test.go | 180 +++++++++++++++-------- scripts/retrieve_block/retrieve_block.go | 1 - 4 files changed, 132 insertions(+), 77 deletions(-) diff --git a/dot/sync/mocks_test.go b/dot/sync/mocks_test.go index 1a3c3d00bd..e006ce4493 100644 --- a/dot/sync/mocks_test.go +++ b/dot/sync/mocks_test.go @@ -692,16 +692,16 @@ func (mr *MockNetworkMockRecorder) GetRequestResponseProtocol(arg0, arg1, arg2 a return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRequestResponseProtocol", reflect.TypeOf((*MockNetwork)(nil).GetRequestResponseProtocol), arg0, arg1, arg2) } -// GossipMessage mocks base method. -func (m *MockNetwork) GossipMessage(arg0 network.NotificationsMessage) { +// GossipMessageExcluding mocks base method. +func (m *MockNetwork) GossipMessageExcluding(arg0 network.NotificationsMessage, arg1 peer.ID) { m.ctrl.T.Helper() - m.ctrl.Call(m, "GossipMessage", arg0) + m.ctrl.Call(m, "GossipMessageExcluding", arg0, arg1) } -// GossipMessage indicates an expected call of GossipMessage. -func (mr *MockNetworkMockRecorder) GossipMessage(arg0 any) *gomock.Call { +// GossipMessageExcluding indicates an expected call of GossipMessageExcluding. +func (mr *MockNetworkMockRecorder) GossipMessageExcluding(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GossipMessage", reflect.TypeOf((*MockNetwork)(nil).GossipMessage), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GossipMessageExcluding", reflect.TypeOf((*MockNetwork)(nil).GossipMessageExcluding), arg0, arg1) } // ReportPeer mocks base method. diff --git a/dot/sync/unready_blocks.go b/dot/sync/unready_blocks.go index ac6a8a2cd7..0baaba9382 100644 --- a/dot/sync/unready_blocks.go +++ b/dot/sync/unready_blocks.go @@ -138,29 +138,25 @@ func (u *unreadyBlocks) removeIrrelevantFragments(finalisedNumber uint) { return value.Header.Number <= finalisedNumber }) - idxsToRemove := make([]int, 0, len(u.disjointFragments)) - for fragmentIdx, fragment := range u.disjointFragments { + fragmentIdx := 0 + for _, fragment := range u.disjointFragments { // the fragments are sorted in ascending order // starting from the latest item and going backwards // we have a higher chance to find the idx that has // a block with number lower or equal the finalised one idx := len(fragment) - 1 - for idx >= 0 { + for ; idx >= 0; idx-- { if fragment[idx].Header.Number <= finalisedNumber { break } - idx-- } updatedFragment := fragment[idx+1:] - if len(updatedFragment) == 0 { - idxsToRemove = append(idxsToRemove, fragmentIdx) - } else { + if len(updatedFragment) != 0 { u.disjointFragments[fragmentIdx] = updatedFragment + fragmentIdx++ } } - for _, idx := range idxsToRemove { - u.disjointFragments = append(u.disjointFragments[:idx], u.disjointFragments[idx+1:]...) - } + u.disjointFragments = u.disjointFragments[:fragmentIdx] } diff --git a/dot/sync/unready_blocks_test.go b/dot/sync/unready_blocks_test.go index 74280c8c86..b435bf6d1a 100644 --- a/dot/sync/unready_blocks_test.go +++ b/dot/sync/unready_blocks_test.go @@ -8,37 +8,109 @@ import ( ) func TestUnreadyBlocks_removeIrrelevantFragments(t *testing.T) { - ub := newUnreadyBlocks() - ub.disjointFragments = [][]*types.BlockData{ - // first fragment - { + t.Run("removing all disjoint fragment", func(t *testing.T) { + ub := newUnreadyBlocks() + ub.disjointFragments = [][]*types.BlockData{ { - Header: &types.Header{ - Number: 192, + { + Header: &types.Header{ + Number: 100, + }, }, }, - { - Header: &types.Header{ - Number: 191, + { + Header: &types.Header{ + Number: 99, + }, + }, + }, + { + { + Header: &types.Header{ + Number: 92, + }, + }, + }, + } + ub.removeIrrelevantFragments(100) + require.Empty(t, ub.disjointFragments) + }) + + t.Run("removing irrelevant fragments", func(t *testing.T) { + ub := newUnreadyBlocks() + ub.disjointFragments = [][]*types.BlockData{ + // first fragment + { + { + Header: &types.Header{ + Number: 192, + }, + }, + + { + Header: &types.Header{ + Number: 191, + }, + }, + + { + Header: &types.Header{ + Number: 190, + }, }, }, + // second fragment { - Header: &types.Header{ - Number: 190, + { + Header: &types.Header{ + Number: 253, + }, + }, + + { + Header: &types.Header{ + Number: 254, + }, + }, + + { + Header: &types.Header{ + Number: 255, + }, }, }, - }, - // second fragment - { + // third fragment { - Header: &types.Header{ - Number: 253, + { + Header: &types.Header{ + Number: 1022, + }, + }, + + { + Header: &types.Header{ + Number: 1023, + }, + }, + + { + Header: &types.Header{ + Number: 1024, + }, }, }, + } + // the first fragment should be removed + // the second fragment should have only 2 items + // the third frament shold not be affected + ub.removeIrrelevantFragments(253) + require.Len(t, ub.disjointFragments, 2) + + expectedSecondFrag := []*types.BlockData{ { Header: &types.Header{ Number: 254, @@ -50,10 +122,9 @@ func TestUnreadyBlocks_removeIrrelevantFragments(t *testing.T) { Number: 255, }, }, - }, + } - // third fragment - { + expectedThirdFragment := []*types.BlockData{ { Header: &types.Header{ Number: 1022, @@ -71,48 +142,37 @@ func TestUnreadyBlocks_removeIrrelevantFragments(t *testing.T) { Number: 1024, }, }, - }, - } - - // the first fragment should be removed - // the second fragment should have only 2 items - // the third frament shold not be affected - ub.removeIrrelevantFragments(253) - require.Len(t, ub.disjointFragments, 2) - - expectedSecondFrag := []*types.BlockData{ - { - Header: &types.Header{ - Number: 254, - }, - }, - - { - Header: &types.Header{ - Number: 255, - }, - }, - } - - expectedThirdFragment := []*types.BlockData{ - { - Header: &types.Header{ - Number: 1022, + } + require.Equal(t, ub.disjointFragments[0], expectedSecondFrag) + require.Equal(t, ub.disjointFragments[1], expectedThirdFragment) + }) + + t.Run("keep all fragments", func(t *testing.T) { + ub := newUnreadyBlocks() + ub.disjointFragments = [][]*types.BlockData{ + { + { + Header: &types.Header{ + Number: 101, + }, + }, }, - }, - - { - Header: &types.Header{ - Number: 1023, + { + { + Header: &types.Header{ + Number: 103, + }, + }, }, - }, - - { - Header: &types.Header{ - Number: 1024, + { + { + Header: &types.Header{ + Number: 104, + }, + }, }, - }, - } - require.Equal(t, ub.disjointFragments[0], expectedSecondFrag) - require.Equal(t, ub.disjointFragments[1], expectedThirdFragment) + } + ub.removeIrrelevantFragments(100) + require.Len(t, ub.disjointFragments, 3) + }) } diff --git a/scripts/retrieve_block/retrieve_block.go b/scripts/retrieve_block/retrieve_block.go index 219f4f0b24..ef19f5124d 100644 --- a/scripts/retrieve_block/retrieve_block.go +++ b/scripts/retrieve_block/retrieve_block.go @@ -118,7 +118,6 @@ func main() { protocolID := protocol.ID(fmt.Sprintf("/%s/sync/2", chain.ProtocolID)) for _, bootnodesAddr := range bootnodes { - log.Println("connecting...") err := p2pHost.Connect(ctx, bootnodesAddr) if err != nil { log.Printf("fail with: %s\n", err.Error()) From b3434ec6884b0d93dd6f866961c9ef6a774831ef Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Fri, 13 Sep 2024 16:23:55 -0400 Subject: [PATCH 44/74] add license --- dot/sync/unready_blocks_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/dot/sync/unready_blocks_test.go b/dot/sync/unready_blocks_test.go index b435bf6d1a..66356cfc67 100644 --- a/dot/sync/unready_blocks_test.go +++ b/dot/sync/unready_blocks_test.go @@ -1,3 +1,6 @@ +// Copyright 2024 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + package sync import ( From 1b94210fb68fc52738f3f2fd96033239f9ccd4a6 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Fri, 13 Sep 2024 17:02:53 -0400 Subject: [PATCH 45/74] chore: solve mocks mismatch and full block importer --- dot/mock_node_builder_test.go | 7 +- dot/node.go | 1 + dot/sync/chain_sync.go | 1087 --------------- dot/sync/chain_sync_test.go | 1901 -------------------------- dot/sync/fullsync_handle_block.go | 47 +- dot/sync/interfaces.go | 90 -- dot/sync/message_integration_test.go | 9 +- dot/sync/service.go | 1 + dot/sync/syncer_integration_test.go | 213 --- 9 files changed, 35 insertions(+), 3321 deletions(-) delete mode 100644 dot/sync/chain_sync.go delete mode 100644 dot/sync/chain_sync_test.go delete mode 100644 dot/sync/interfaces.go delete mode 100644 dot/sync/syncer_integration_test.go diff --git a/dot/mock_node_builder_test.go b/dot/mock_node_builder_test.go index 06bfba5fba..f161ac6a6d 100644 --- a/dot/mock_node_builder_test.go +++ b/dot/mock_node_builder_test.go @@ -18,6 +18,7 @@ import ( network "github.com/ChainSafe/gossamer/dot/network" rpc "github.com/ChainSafe/gossamer/dot/rpc" state "github.com/ChainSafe/gossamer/dot/state" + sync "github.com/ChainSafe/gossamer/dot/sync" system "github.com/ChainSafe/gossamer/dot/system" types "github.com/ChainSafe/gossamer/dot/types" babe "github.com/ChainSafe/gossamer/lib/babe" @@ -228,11 +229,7 @@ func (mr *MocknodeBuilderIfaceMockRecorder) loadRuntime(config, ns, stateSrvc, k } // newSyncService mocks base method. -<<<<<<< HEAD -func (m *MocknodeBuilderIface) newSyncService(config *config.Config, st *state.Service, finalityGadget BlockJustificationVerifier, verifier *babe.VerificationManager, cs *core.Service, net *network.Service, telemetryMailer Telemetry) (network.Syncer, error) { -======= -func (m *MocknodeBuilderIface) newSyncService(config *config.Config, st *state.Service, finalityGadget sync.FinalityGadget, verifier *babe.VerificationManager, cs *core.Service, net *network.Service, telemetryMailer Telemetry) (*sync.Service, error) { ->>>>>>> development +func (m *MocknodeBuilderIface) newSyncService(config *config.Config, st *state.Service, finalityGadget sync.FinalityGadget, verifier *babe.VerificationManager, cs *core.Service, net *network.Service, telemetryMailer Telemetry) (network.Syncer, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "newSyncService", config, st, finalityGadget, verifier, cs, net, telemetryMailer) ret0, _ := ret[0].(network.Syncer) diff --git a/dot/node.go b/dot/node.go index aa6e656956..f420e86e52 100644 --- a/dot/node.go +++ b/dot/node.go @@ -21,6 +21,7 @@ import ( "github.com/ChainSafe/gossamer/dot/rpc" "github.com/ChainSafe/gossamer/dot/state" "github.com/ChainSafe/gossamer/dot/state/pruner" + dotsync "github.com/ChainSafe/gossamer/dot/sync" "github.com/ChainSafe/gossamer/dot/system" "github.com/ChainSafe/gossamer/dot/telemetry" "github.com/ChainSafe/gossamer/dot/types" diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go deleted file mode 100644 index a37240138a..0000000000 --- a/dot/sync/chain_sync.go +++ /dev/null @@ -1,1087 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "bytes" - "errors" - "fmt" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/libp2p/go-libp2p/core/peer" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - "golang.org/x/exp/slices" - - "github.com/ChainSafe/gossamer/dot/network" - "github.com/ChainSafe/gossamer/dot/network/messages" - "github.com/ChainSafe/gossamer/dot/peerset" - "github.com/ChainSafe/gossamer/dot/telemetry" - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/internal/database" - "github.com/ChainSafe/gossamer/lib/common" - "github.com/ChainSafe/gossamer/lib/common/variadic" -) - -var _ ChainSync = (*chainSync)(nil) - -type chainSyncState byte - -const ( - bootstrap chainSyncState = iota - tip -) - -type blockOrigin byte - -const ( - networkInitialSync blockOrigin = iota - networkBroadcast -) - -func (s chainSyncState) String() string { - switch s { - case bootstrap: - return "bootstrap" - case tip: - return "tip" - default: - return "unknown" - } -} - -var ( - pendingBlocksLimit = messages.MaxBlocksInResponse * 32 - isSyncedGauge = promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: "gossamer_network_syncer", - Name: "is_synced", - Help: "bool representing whether the node is synced to the head of the chain", - }) - - blockSizeGauge = promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: "gossamer_sync", - Name: "block_size", - Help: "represent the size of blocks synced", - }) -) - -// ChainSync contains the methods used by the high-level service into the `chainSync` module -type ChainSync interface { - start() - stop() error - - // called upon receiving a BlockAnnounceHandshake - onBlockAnnounceHandshake(p peer.ID, hash common.Hash, number uint) error - - // getSyncMode returns the current syncing state - getSyncMode() chainSyncState - - // getHighestBlock returns the highest block or an error - getHighestBlock() (highestBlock uint, err error) - - onBlockAnnounce(announcedBlock) error -} - -type announcedBlock struct { - who peer.ID - header *types.Header -} - -type chainSync struct { - wg sync.WaitGroup - stopCh chan struct{} - - blockState BlockState - network Network - - workerPool *syncWorkerPool - - // tracks the latest state we know of from our peers, - // ie. their best block hash and number - peerViewSet *peerViewSet - - // disjoint set of blocks which are known but not ready to be processed - // ie. we only know the hash, number, or the parent block is unknown, or the body is unknown - // note: the block may have empty fields, as some data about it may be unknown - pendingBlocks DisjointBlockSet - - syncMode atomic.Value - - finalisedCh <-chan *types.FinalisationInfo - - minPeers int - slotDuration time.Duration - - storageState StorageState - transactionState TransactionState - babeVerifier BabeVerifier - finalityGadget FinalityGadget - blockImportHandler BlockImportHandler - telemetry Telemetry - badBlocks []string - requestMaker network.RequestMaker - waitPeersDuration time.Duration -} - -type chainSyncConfig struct { - bs BlockState - net Network - requestMaker network.RequestMaker - pendingBlocks DisjointBlockSet - minPeers, maxPeers int - slotDuration time.Duration - storageState StorageState - transactionState TransactionState - babeVerifier BabeVerifier - finalityGadget FinalityGadget - blockImportHandler BlockImportHandler - telemetry Telemetry - badBlocks []string - waitPeersDuration time.Duration -} - -func newChainSync(cfg chainSyncConfig) *chainSync { - atomicState := atomic.Value{} - atomicState.Store(tip) - return &chainSync{ - stopCh: make(chan struct{}), - storageState: cfg.storageState, - transactionState: cfg.transactionState, - babeVerifier: cfg.babeVerifier, - finalityGadget: cfg.finalityGadget, - blockImportHandler: cfg.blockImportHandler, - telemetry: cfg.telemetry, - blockState: cfg.bs, - network: cfg.net, - peerViewSet: newPeerViewSet(cfg.maxPeers), - pendingBlocks: cfg.pendingBlocks, - syncMode: atomicState, - finalisedCh: cfg.bs.GetFinalisedNotifierChannel(), - minPeers: cfg.minPeers, - slotDuration: cfg.slotDuration, - workerPool: newSyncWorkerPool(cfg.net, cfg.requestMaker), - badBlocks: cfg.badBlocks, - requestMaker: cfg.requestMaker, - waitPeersDuration: cfg.waitPeersDuration, - } -} - -func (cs *chainSync) waitWorkersAndTarget() { - waitPeersTimer := time.NewTimer(cs.waitPeersDuration) - - highestFinalizedHeader, err := cs.blockState.GetHighestFinalisedHeader() - if err != nil { - panic(fmt.Sprintf("failed to get highest finalised header: %v", err)) - } - - for { - cs.workerPool.useConnectedPeers() - totalAvailable := cs.workerPool.totalWorkers() - - if totalAvailable >= uint(cs.minPeers) && - cs.peerViewSet.getTarget() > 0 { - return - } - - err := cs.network.BlockAnnounceHandshake(highestFinalizedHeader) - if err != nil && !errors.Is(err, network.ErrNoPeersConnected) { - logger.Errorf("retrieving target info from peers: %v", err) - } - - select { - case <-waitPeersTimer.C: - waitPeersTimer.Reset(cs.waitPeersDuration) - - case <-cs.stopCh: - return - } - } -} - -func (cs *chainSync) start() { - // since the default status from sync mode is syncMode(tip) - isSyncedGauge.Set(1) - - cs.wg.Add(1) - go cs.pendingBlocks.run(cs.finalisedCh, cs.stopCh, &cs.wg) - - // wait until we have a minimal workers in the sync worker pool - cs.waitWorkersAndTarget() -} - -func (cs *chainSync) stop() error { - err := cs.workerPool.stop() - if err != nil { - return fmt.Errorf("stopping worker poll: %w", err) - } - - close(cs.stopCh) - allStopCh := make(chan struct{}) - go func() { - defer close(allStopCh) - cs.wg.Wait() - }() - - timeoutTimer := time.NewTimer(30 * time.Second) - - select { - case <-allStopCh: - if !timeoutTimer.Stop() { - <-timeoutTimer.C - } - return nil - case <-timeoutTimer.C: - return ErrStopTimeout - } -} - -func (cs *chainSync) isBootstrapSync(currentBlockNumber uint) bool { - syncTarget := cs.peerViewSet.getTarget() - return currentBlockNumber+messages.MaxBlocksInResponse < syncTarget -} - -func (cs *chainSync) bootstrapSync() { - defer cs.wg.Done() - currentBlock, err := cs.blockState.GetHighestFinalisedHeader() - if err != nil { - panic("cannot find highest finalised header") - } - - for { - select { - case <-cs.stopCh: - logger.Warn("ending bootstrap sync, chain sync stop channel triggered") - return - default: - } - - isBootstrap := cs.isBootstrapSync(currentBlock.Number) - if isBootstrap { - cs.workerPool.useConnectedPeers() - err = cs.requestMaxBlocksFrom(currentBlock, networkInitialSync) - if err != nil { - if errors.Is(err, errBlockStatePaused) { - logger.Debugf("exiting bootstrap sync: %s", err) - return - } - logger.Errorf("requesting max blocks from best block header: %s", err) - } - - currentBlock, err = cs.blockState.BestBlockHeader() - if err != nil { - logger.Errorf("getting best block header: %v", err) - } - } else { - // we are less than 128 blocks behind the target we can use tip sync - cs.syncMode.Store(tip) - isSyncedGauge.Set(1) - logger.Infof("🔁 switched sync mode to %s", tip.String()) - return - } - } -} - -func (cs *chainSync) getSyncMode() chainSyncState { - return cs.syncMode.Load().(chainSyncState) -} - -// onBlockAnnounceHandshake sets a peer's best known block -func (cs *chainSync) onBlockAnnounceHandshake(who peer.ID, bestHash common.Hash, bestNumber uint) error { - cs.workerPool.fromBlockAnnounce(who) - cs.peerViewSet.update(who, bestHash, bestNumber) - - if cs.getSyncMode() == bootstrap { - return nil - } - - bestBlockHeader, err := cs.blockState.BestBlockHeader() - if err != nil { - return err - } - - isBootstrap := cs.isBootstrapSync(bestBlockHeader.Number) - if !isBootstrap { - return nil - } - - // we are more than 128 blocks behind the head, switch to bootstrap - cs.syncMode.Store(bootstrap) - isSyncedGauge.Set(0) - logger.Infof("🔁 switched sync mode to %s", bootstrap.String()) - - cs.wg.Add(1) - go cs.bootstrapSync() - return nil -} - -func (cs *chainSync) onBlockAnnounce(announced announcedBlock) error { - // TODO: https://github.com/ChainSafe/gossamer/issues/3432 - if cs.pendingBlocks.hasBlock(announced.header.Hash()) { - return fmt.Errorf("%w: block #%d (%s)", - errAlreadyInDisjointSet, announced.header.Number, announced.header.Hash()) - } - - err := cs.pendingBlocks.addHeader(announced.header) - if err != nil { - return fmt.Errorf("while adding pending block header: %w", err) - } - - if cs.getSyncMode() == bootstrap { - return nil - } - - bestBlockHeader, err := cs.blockState.BestBlockHeader() - if err != nil { - return fmt.Errorf("getting best block header: %w", err) - } - - isBootstrap := cs.isBootstrapSync(bestBlockHeader.Number) - if !isBootstrap { - return cs.requestAnnouncedBlock(bestBlockHeader, announced) - } - - return nil -} - -func (cs *chainSync) requestAnnouncedBlock(bestBlockHeader *types.Header, announce announcedBlock) error { - peerWhoAnnounced := announce.who - announcedHash := announce.header.Hash() - announcedNumber := announce.header.Number - - has, err := cs.blockState.HasHeader(announcedHash) - if err != nil { - return fmt.Errorf("checking if header exists: %s", err) - } - - if has { - return nil - } - - highestFinalizedHeader, err := cs.blockState.GetHighestFinalisedHeader() - if err != nil { - return fmt.Errorf("getting highest finalized header") - } - - // if the announced block contains a lower number than our best - // block header, let's check if it is greater than our latests - // finalized header, if so this block belongs to a fork chain - if announcedNumber < bestBlockHeader.Number { - // ignore the block if it has the same or lower number - // TODO: is it following the protocol to send a blockAnnounce with number < highestFinalized number? - if announcedNumber <= highestFinalizedHeader.Number { - return nil - } - - return cs.requestForkBlocks(bestBlockHeader, highestFinalizedHeader, announce.header, announce.who) - } - - err = cs.requestChainBlocks(announce.header, bestBlockHeader, peerWhoAnnounced) - if err != nil { - return fmt.Errorf("requesting chain blocks: %w", err) - } - - err = cs.requestPendingBlocks(highestFinalizedHeader) - if err != nil { - return fmt.Errorf("while requesting pending blocks") - } - - return nil -} - -func (cs *chainSync) requestChainBlocks(announcedHeader, bestBlockHeader *types.Header, - peerWhoAnnounced peer.ID) error { - gapLength := uint32(announcedHeader.Number - bestBlockHeader.Number) - startAtBlock := announcedHeader.Number - totalBlocks := uint32(1) - - var request *messages.BlockRequestMessage - startingBlock := *variadic.MustNewUint32OrHash(announcedHeader.Hash()) - - if gapLength > 1 { - request = messages.NewBlockRequest(startingBlock, gapLength, - messages.BootstrapRequestData, messages.Descending) - - startAtBlock = announcedHeader.Number - uint(*request.Max) + 1 - totalBlocks = *request.Max - - logger.Infof("requesting %d blocks from peer: %v, descending request from #%d (%s)", - gapLength, peerWhoAnnounced, announcedHeader.Number, announcedHeader.Hash().Short()) - } else { - request = messages.NewBlockRequest(startingBlock, 1, messages.BootstrapRequestData, messages.Descending) - logger.Infof("requesting a single block from peer: %v with Number: #%d and Hash: (%s)", - peerWhoAnnounced, announcedHeader.Number, announcedHeader.Hash().Short()) - } - - resultsQueue := make(chan *syncTaskResult) - err := cs.submitRequest(request, &peerWhoAnnounced, resultsQueue) - if err != nil { - return err - } - err = cs.handleWorkersResults(resultsQueue, networkBroadcast, startAtBlock, totalBlocks) - if err != nil { - return fmt.Errorf("while handling workers results: %w", err) - } - - return nil -} - -func (cs *chainSync) requestForkBlocks(bestBlockHeader, highestFinalizedHeader, announcedHeader *types.Header, - peerWhoAnnounced peer.ID) error { - logger.Infof("block announce lower than best block #%d (%s) and greater highest finalized #%d (%s)", - bestBlockHeader.Number, bestBlockHeader.Hash().Short(), - highestFinalizedHeader.Number, highestFinalizedHeader.Hash().Short()) - - parentExists, err := cs.blockState.HasHeader(announcedHeader.ParentHash) - if err != nil && !errors.Is(err, database.ErrNotFound) { - return fmt.Errorf("while checking header exists: %w", err) - } - - gapLength := uint32(1) - startAtBlock := announcedHeader.Number - announcedHash := announcedHeader.Hash() - var request *messages.BlockRequestMessage - startingBlock := *variadic.MustNewUint32OrHash(announcedHash) - - if parentExists { - request = messages.NewBlockRequest(startingBlock, 1, messages.BootstrapRequestData, messages.Descending) - } else { - gapLength = uint32(announcedHeader.Number - highestFinalizedHeader.Number) - startAtBlock = highestFinalizedHeader.Number + 1 - request = messages.NewBlockRequest(startingBlock, gapLength, messages.BootstrapRequestData, messages.Descending) - } - - logger.Infof("requesting %d fork blocks from peer: %v starting at #%d (%s)", - gapLength, peerWhoAnnounced, announcedHeader.Number, announcedHash.Short()) - - resultsQueue := make(chan *syncTaskResult) - err = cs.submitRequest(request, &peerWhoAnnounced, resultsQueue) - if err != nil { - return err - } - err = cs.handleWorkersResults(resultsQueue, networkBroadcast, startAtBlock, gapLength) - if err != nil { - return fmt.Errorf("while handling workers results: %w", err) - } - - return nil -} - -func (cs *chainSync) requestPendingBlocks(highestFinalizedHeader *types.Header) error { - pendingBlocksTotal := cs.pendingBlocks.size() - logger.Infof("total of pending blocks: %d", pendingBlocksTotal) - if pendingBlocksTotal < 1 { - return nil - } - - pendingBlocks := cs.pendingBlocks.getBlocks() - for _, pendingBlock := range pendingBlocks { - if pendingBlock.number <= highestFinalizedHeader.Number { - cs.pendingBlocks.removeBlock(pendingBlock.hash) - continue - } - - parentExists, err := cs.blockState.HasHeader(pendingBlock.header.ParentHash) - if err != nil { - return fmt.Errorf("getting pending block parent header: %w", err) - } - - if parentExists { - err := cs.handleReadyBlock(pendingBlock.toBlockData(), networkBroadcast) - if err != nil { - return fmt.Errorf("handling ready block: %w", err) - } - continue - } - - gapLength := pendingBlock.number - highestFinalizedHeader.Number - if gapLength > 128 { - logger.Warnf("gap of %d blocks, max expected: 128 block", gapLength) - gapLength = 128 - } - - descendingGapRequest := messages.NewBlockRequest(*variadic.MustNewUint32OrHash(pendingBlock.hash), - uint32(gapLength), messages.BootstrapRequestData, messages.Descending) - startAtBlock := pendingBlock.number - uint(*descendingGapRequest.Max) + 1 - - // the `requests` in the tip sync are not related necessarily - // this is why we need to treat them separately - resultsQueue := make(chan *syncTaskResult) - err = cs.submitRequest(descendingGapRequest, nil, resultsQueue) - if err != nil { - return err - } - // TODO: we should handle the requests concurrently - // a way of achieve that is by constructing a new `handleWorkersResults` for - // handling only tip sync requests - err = cs.handleWorkersResults(resultsQueue, networkBroadcast, startAtBlock, *descendingGapRequest.Max) - if err != nil { - return fmt.Errorf("while handling workers results: %w", err) - } - } - - return nil -} - -func (cs *chainSync) requestMaxBlocksFrom(bestBlockHeader *types.Header, origin blockOrigin) error { //nolint:unparam - startRequestAt := bestBlockHeader.Number + 1 - - // targetBlockNumber is the virtual target we will request, however - // we should bound it to the real target which is collected through - // block announces received from other peers - targetBlockNumber := startRequestAt + maxRequestsAllowed*128 - realTarget := cs.peerViewSet.getTarget() - - if targetBlockNumber > realTarget { - targetBlockNumber = realTarget - } - - requests := messages.NewAscendingBlockRequests(startRequestAt, targetBlockNumber, - messages.BootstrapRequestData) - - var expectedAmountOfBlocks uint32 - for _, request := range requests { - if request.Max != nil { - expectedAmountOfBlocks += *request.Max - } - } - - resultsQueue, err := cs.submitRequests(requests) - if err != nil { - return err - } - err = cs.handleWorkersResults(resultsQueue, origin, startRequestAt, expectedAmountOfBlocks) - if err != nil { - return fmt.Errorf("while handling workers results: %w", err) - } - - return nil -} - -func (cs *chainSync) submitRequest( - request *messages.BlockRequestMessage, - who *peer.ID, - resultCh chan<- *syncTaskResult, -) error { - if !cs.blockState.IsPaused() { - cs.workerPool.submitRequest(request, who, resultCh) - return nil - } - return fmt.Errorf("submitting request: %w", errBlockStatePaused) -} - -func (cs *chainSync) submitRequests(requests []*messages.BlockRequestMessage) ( - resultCh chan *syncTaskResult, err error) { - if !cs.blockState.IsPaused() { - return cs.workerPool.submitRequests(requests), nil - } - return nil, fmt.Errorf("submitting requests: %w", errBlockStatePaused) -} - -func (cs *chainSync) showSyncStats(syncBegin time.Time, syncedBlocks int) { - finalisedHeader, err := cs.blockState.GetHighestFinalisedHeader() - if err != nil { - logger.Criticalf("getting highest finalized header: %w", err) - return - } - - totalSyncAndImportSeconds := time.Since(syncBegin).Seconds() - bps := float64(syncedBlocks) / totalSyncAndImportSeconds - logger.Infof("⛓️ synced %d blocks, "+ - "took: %.2f seconds, bps: %.2f blocks/second", - syncedBlocks, totalSyncAndImportSeconds, bps) - - logger.Infof( - "🚣 currently syncing, %d peers connected, "+ - "%d available workers, "+ - "target block number %d, "+ - "finalised #%d (%s) "+ - "sync mode: %s", - len(cs.network.Peers()), - cs.workerPool.totalWorkers(), - cs.peerViewSet.getTarget(), - finalisedHeader.Number, - finalisedHeader.Hash().Short(), - cs.getSyncMode().String(), - ) -} - -// handleWorkersResults, every time we submit requests to workers they results should be computed here -// and every cicle we should endup with a complete chain, whenever we identify -// any error from a worker we should evaluate the error and re-insert the request -// in the queue and wait for it to completes -// TODO: handle only justification requests -func (cs *chainSync) handleWorkersResults( - workersResults chan *syncTaskResult, origin blockOrigin, startAtBlock uint, expectedSyncedBlocks uint32) error { - startTime := time.Now() - syncingChain := make([]*types.BlockData, expectedSyncedBlocks) - // the total numbers of blocks is missing in the syncing chain - waitingBlocks := expectedSyncedBlocks - -taskResultLoop: - for waitingBlocks > 0 { - // in a case where we don't handle workers results we should check the pool - idleDuration := time.Minute - idleTimer := time.NewTimer(idleDuration) - - select { - case <-cs.stopCh: - return nil - - case <-idleTimer.C: - logger.Warnf("idle ticker triggered! checking pool") - cs.workerPool.useConnectedPeers() - continue - - case taskResult := <-workersResults: - if !idleTimer.Stop() { - <-idleTimer.C - } - - who := taskResult.who - request := taskResult.request - response := taskResult.response - - logger.Debugf("task result: peer(%s), with error: %v, with response: %v", - taskResult.who, taskResult.err != nil, taskResult.response != nil) - - if taskResult.err != nil { - if !errors.Is(taskResult.err, network.ErrReceivedEmptyMessage) { - logger.Errorf("task result: peer(%s) error: %s", - taskResult.who, taskResult.err) - - if errors.Is(taskResult.err, messages.ErrNilBlockInResponse) { - cs.network.ReportPeer(peerset.ReputationChange{ - Value: peerset.BadMessageValue, - Reason: peerset.BadMessageReason, - }, who) - } - - if strings.Contains(taskResult.err.Error(), "protocols not supported") { - cs.network.ReportPeer(peerset.ReputationChange{ - Value: peerset.BadProtocolValue, - Reason: peerset.BadProtocolReason, - }, who) - } - } - - err := cs.submitRequest(request, nil, workersResults) - if err != nil { - return err - } - continue - } - - if request.Direction == messages.Descending { - // reverse blocks before pre-validating and placing in ready queue - reverseBlockData(response.BlockData) - } - - err := validateResponseFields(request.RequestedData, response.BlockData) - if err != nil { - logger.Criticalf("validating fields: %s", err) - // TODO: check the reputation change for nil body in response - // and nil justification in response - if errors.Is(err, errNilHeaderInResponse) { - cs.network.ReportPeer(peerset.ReputationChange{ - Value: peerset.IncompleteHeaderValue, - Reason: peerset.IncompleteHeaderReason, - }, who) - } - - err = cs.submitRequest(taskResult.request, nil, workersResults) - if err != nil { - return err - } - continue taskResultLoop - } - - isChain := isResponseAChain(response.BlockData) - if !isChain { - logger.Criticalf("response from %s is not a chain", who) - err = cs.submitRequest(taskResult.request, nil, workersResults) - if err != nil { - return err - } - continue taskResultLoop - } - - grows := doResponseGrowsTheChain(response.BlockData, syncingChain, - startAtBlock, expectedSyncedBlocks) - if !grows { - logger.Criticalf("response from %s does not grows the ongoing chain", who) - err = cs.submitRequest(taskResult.request, nil, workersResults) - if err != nil { - return err - } - continue taskResultLoop - } - - for _, blockInResponse := range response.BlockData { - if slices.Contains(cs.badBlocks, blockInResponse.Hash.String()) { - logger.Criticalf("%s sent a known bad block: %s (#%d)", - who, blockInResponse.Hash.String(), blockInResponse.Number()) - - cs.network.ReportPeer(peerset.ReputationChange{ - Value: peerset.BadBlockAnnouncementValue, - Reason: peerset.BadBlockAnnouncementReason, - }, who) - - cs.workerPool.ignorePeerAsWorker(taskResult.who) - err = cs.submitRequest(taskResult.request, nil, workersResults) - if err != nil { - return err - } - continue taskResultLoop - } - - blockExactIndex := blockInResponse.Header.Number - startAtBlock - if blockExactIndex < uint(expectedSyncedBlocks) { - syncingChain[blockExactIndex] = blockInResponse - } - } - - // we need to check if we've filled all positions - // otherwise we should wait for more responses - waitingBlocks -= uint32(len(response.BlockData)) - - // we received a response without the desired amount of blocks - // we should include a new request to retrieve the missing blocks - if len(response.BlockData) < int(*request.Max) { - difference := uint32(int(*request.Max) - len(response.BlockData)) - lastItem := response.BlockData[len(response.BlockData)-1] - - startRequestNumber := uint32(lastItem.Header.Number + 1) - startAt, err := variadic.NewUint32OrHash(startRequestNumber) - if err != nil { - panic(err) - } - - taskResult.request = &messages.BlockRequestMessage{ - RequestedData: messages.BootstrapRequestData, - StartingBlock: *startAt, - Direction: messages.Ascending, - Max: &difference, - } - err = cs.submitRequest(taskResult.request, nil, workersResults) - if err != nil { - return err - } - continue taskResultLoop - } - } - } - - retreiveBlocksSeconds := time.Since(startTime).Seconds() - logger.Infof("🔽 retrieved %d blocks, took: %.2f seconds, starting process...", - expectedSyncedBlocks, retreiveBlocksSeconds) - - // response was validated! place into ready block queue - for _, bd := range syncingChain { - // block is ready to be processed! - if err := cs.handleReadyBlock(bd, origin); err != nil { - return fmt.Errorf("while handling ready block: %w", err) - } - } - - cs.showSyncStats(startTime, len(syncingChain)) - return nil -} - -func (cs *chainSync) handleReadyBlock(bd *types.BlockData, origin blockOrigin) error { - // if header was not requested, get it from the pending set - // if we're expecting headers, validate should ensure we have a header - if bd.Header == nil { - block := cs.pendingBlocks.getBlock(bd.Hash) - if block == nil { - // block wasn't in the pending set! - // let's check the db as maybe we already processed it - has, err := cs.blockState.HasHeader(bd.Hash) - if err != nil && !errors.Is(err, database.ErrNotFound) { - logger.Debugf("failed to check if header is known for hash %s: %s", bd.Hash, err) - return err - } - - if has { - logger.Tracef("ignoring block we've already processed, hash=%s", bd.Hash) - return err - } - - // this is bad and shouldn't happen - logger.Errorf("block with unknown header is ready: hash=%s", bd.Hash) - return err - } - - if block.header == nil { - logger.Errorf("new ready block number (unknown) with hash %s", bd.Hash) - return nil - } - - bd.Header = block.header - } - - err := cs.processBlockData(*bd, origin) - if err != nil { - // depending on the error, we might want to save this block for later - logger.Errorf("block data processing for block with hash %s failed: %s", bd.Hash, err) - return err - } - - cs.pendingBlocks.removeBlock(bd.Hash) - return nil -} - -// processBlockData processes the BlockData from a BlockResponse and -// returns the index of the last BlockData it handled on success, -// or the index of the block data that errored on failure. -// TODO: https://github.com/ChainSafe/gossamer/issues/3468 -func (cs *chainSync) processBlockData(blockData types.BlockData, origin blockOrigin) error { - // while in bootstrap mode we don't need to broadcast block announcements - announceImportedBlock := cs.getSyncMode() == tip - - if blockData.Header != nil { - var ( - hasJustification = blockData.Justification != nil && len(*blockData.Justification) > 0 - round uint64 - setID uint64 - ) - - if hasJustification { - var err error - round, setID, err = cs.finalityGadget.VerifyBlockJustification( - blockData.Header.Hash(), blockData.Header.Number, *blockData.Justification) - if err != nil { - return fmt.Errorf("verifying justification: %w", err) - } - } - - if blockData.Body != nil { - err := cs.processBlockDataWithHeaderAndBody(blockData, origin, announceImportedBlock) - if err != nil { - return fmt.Errorf("processing block data with header and body: %w", err) - } - } - - if hasJustification { - header := blockData.Header - err := cs.blockState.SetFinalisedHash(header.Hash(), round, setID) - if err != nil { - return fmt.Errorf("setting finalised hash: %w", err) - } - err = cs.blockState.SetJustification(header.Hash(), *blockData.Justification) - if err != nil { - return fmt.Errorf("setting justification for block number %d: %w", header.Number, err) - } - - return nil - } - } - - err := cs.blockState.CompareAndSetBlockData(&blockData) - if err != nil { - return fmt.Errorf("comparing and setting block data: %w", err) - } - - return nil -} - -func (cs *chainSync) processBlockDataWithHeaderAndBody(blockData types.BlockData, - origin blockOrigin, announceImportedBlock bool) (err error) { - - if origin != networkInitialSync { - err = cs.babeVerifier.VerifyBlock(blockData.Header) - if err != nil { - return fmt.Errorf("babe verifying block: %w", err) - } - } - - cs.handleBody(blockData.Body) - - block := &types.Block{ - Header: *blockData.Header, - Body: *blockData.Body, - } - - err = cs.handleBlock(block, announceImportedBlock) - if err != nil { - return fmt.Errorf("handling block: %w", err) - } - - return nil -} - -// handleHeader handles block bodies included in BlockResponses -func (cs *chainSync) handleBody(body *types.Body) { - acc := 0 - for _, ext := range *body { - acc += len(ext) - cs.transactionState.RemoveExtrinsic(ext) - } - - blockSizeGauge.Set(float64(acc)) -} - -// handleHeader handles blocks (header+body) included in BlockResponses -func (cs *chainSync) handleBlock(block *types.Block, announceImportedBlock bool) error { - parent, err := cs.blockState.GetHeader(block.Header.ParentHash) - if err != nil { - return fmt.Errorf("%w: %s", errFailedToGetParent, err) - } - - cs.storageState.Lock() - defer cs.storageState.Unlock() - - ts, err := cs.storageState.TrieState(&parent.StateRoot) - if err != nil { - return err - } - - root := ts.Trie().MustHash() - if !bytes.Equal(parent.StateRoot[:], root[:]) { - panic("parent state root does not match snapshot state root") - } - - rt, err := cs.blockState.GetRuntime(parent.Hash()) - if err != nil { - return err - } - - rt.SetContextStorage(ts) - - _, err = rt.ExecuteBlock(block) - if err != nil { - return fmt.Errorf("failed to execute block %d: %w", block.Header.Number, err) - } - - if err = cs.blockImportHandler.HandleBlockImport(block, ts, announceImportedBlock); err != nil { - return err - } - - blockHash := block.Header.Hash() - cs.telemetry.SendMessage(telemetry.NewBlockImport( - &blockHash, - block.Header.Number, - "NetworkInitialSync")) - - return nil -} - -// validateResponseFields checks that the expected fields are in the block data -func validateResponseFields(requestedData byte, blocks []*types.BlockData) error { - for _, bd := range blocks { - if bd == nil { - return errNilBlockData - } - - if (requestedData&messages.RequestedDataHeader) == messages.RequestedDataHeader && bd.Header == nil { - return fmt.Errorf("%w: %s", errNilHeaderInResponse, bd.Hash) - } - - if (requestedData&messages.RequestedDataBody) == messages.RequestedDataBody && bd.Body == nil { - return fmt.Errorf("%w: %s", errNilBodyInResponse, bd.Hash) - } - - // if we requested strictly justification - if (requestedData|messages.RequestedDataJustification) == messages.RequestedDataJustification && - bd.Justification == nil { - return fmt.Errorf("%w: %s", errNilJustificationInResponse, bd.Hash) - } - } - - return nil -} - -func isResponseAChain(responseBlockData []*types.BlockData) bool { - if len(responseBlockData) < 2 { - return true - } - - previousBlockData := responseBlockData[0] - for _, currBlockData := range responseBlockData[1:] { - previousHash := previousBlockData.Header.Hash() - isParent := previousHash == currBlockData.Header.ParentHash - if !isParent { - return false - } - - previousBlockData = currBlockData - } - - return true -} - -// doResponseGrowsTheChain will check if the acquired blocks grows the current chain -// matching their parent hashes -func doResponseGrowsTheChain(response, ongoingChain []*types.BlockData, startAtBlock uint, expectedTotal uint32) bool { - // the ongoing chain does not have any element, we can safely insert an item in it - if len(ongoingChain) < 1 { - return true - } - - compareParentHash := func(parent, child *types.BlockData) bool { - return parent.Header.Hash() == child.Header.ParentHash - } - - firstBlockInResponse := response[0] - firstBlockExactIndex := firstBlockInResponse.Header.Number - startAtBlock - if firstBlockExactIndex != 0 && firstBlockExactIndex < uint(expectedTotal) { - leftElement := ongoingChain[firstBlockExactIndex-1] - if leftElement != nil && !compareParentHash(leftElement, firstBlockInResponse) { - return false - } - } - - switch { - // if the response contains only one block then we should check both sides - // for example, if the response contains only one block called X we should - // check if its parent hash matches with the left element as well as we should - // check if the right element contains X hash as its parent hash - // ... W <- X -> Y ... - // we can skip left side comparison if X is in the 0 index and we can skip - // right side comparison if X is in the last index - case len(response) == 1: - if uint32(firstBlockExactIndex+1) < expectedTotal { - rightElement := ongoingChain[firstBlockExactIndex+1] - if rightElement != nil && !compareParentHash(firstBlockInResponse, rightElement) { - return false - } - } - // if the response contains more than 1 block then we need to compare - // only the start and the end of the acquired response, for example - // let's say we receive a response [C, D, E] and we need to check - // if those values fits correctly: - // ... B <- C D E -> F - // we skip the left check if its index is equals to 0 and we skip the right - // check if it ends in the latest position of the ongoing array - case len(response) > 1: - lastBlockInResponse := response[len(response)-1] - lastBlockExactIndex := lastBlockInResponse.Header.Number - startAtBlock - - if uint32(lastBlockExactIndex+1) < expectedTotal { - rightElement := ongoingChain[lastBlockExactIndex+1] - if rightElement != nil && !compareParentHash(lastBlockInResponse, rightElement) { - return false - } - } - } - - return true -} - -func (cs *chainSync) getHighestBlock() (highestBlock uint, err error) { - if cs.peerViewSet.size() == 0 { - return 0, errNoPeers - } - - for _, ps := range cs.peerViewSet.values() { - if ps.number < highestBlock { - continue - } - highestBlock = ps.number - } - - return highestBlock, nil -} diff --git a/dot/sync/chain_sync_test.go b/dot/sync/chain_sync_test.go deleted file mode 100644 index 4af6deac79..0000000000 --- a/dot/sync/chain_sync_test.go +++ /dev/null @@ -1,1901 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "errors" - "fmt" - "sync/atomic" - "testing" - "time" - - "github.com/ChainSafe/gossamer/dot/network" - "github.com/ChainSafe/gossamer/dot/network/messages" - "github.com/ChainSafe/gossamer/dot/peerset" - "github.com/ChainSafe/gossamer/dot/telemetry" - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/common" - "github.com/ChainSafe/gossamer/lib/common/variadic" - "github.com/ChainSafe/gossamer/lib/runtime/storage" - "github.com/ChainSafe/gossamer/pkg/trie" - inmemory_trie "github.com/ChainSafe/gossamer/pkg/trie/inmemory" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.uber.org/mock/gomock" -) - -func Test_chainSyncState_String(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - s chainSyncState - want string - }{ - { - name: "case_bootstrap", - s: bootstrap, - want: "bootstrap", - }, - { - name: "case_tip", - s: tip, - want: "tip", - }, - { - name: "case_unknown", - s: 3, - want: "unknown", - }, - } - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - got := tt.s.String() - assert.Equal(t, tt.want, got) - }) - } -} - -func Test_chainSync_onBlockAnnounce(t *testing.T) { - t.Parallel() - const somePeer = peer.ID("abc") - - errTest := errors.New("test error") - emptyTrieState := storage.NewTrieState(inmemory_trie.NewEmptyTrie()) - block1AnnounceHeader := types.NewHeader(common.Hash{}, emptyTrieState.Trie().MustHash(), - common.Hash{}, 1, nil) - block2AnnounceHeader := types.NewHeader(block1AnnounceHeader.Hash(), - emptyTrieState.Trie().MustHash(), - common.Hash{}, 2, nil) - - testCases := map[string]struct { - waitBootstrapSync bool - chainSyncBuilder func(ctrl *gomock.Controller) *chainSync - peerID peer.ID - blockAnnounceHeader *types.Header - errWrapped error - errMessage string - expectedSyncMode chainSyncState - }{ - "announced_block_already_exists_in_disjoint_set": { - chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { - pendingBlocks := NewMockDisjointBlockSet(ctrl) - pendingBlocks.EXPECT().hasBlock(block2AnnounceHeader.Hash()).Return(true) - return &chainSync{ - stopCh: make(chan struct{}), - pendingBlocks: pendingBlocks, - peerViewSet: newPeerViewSet(0), - workerPool: newSyncWorkerPool(NewMockNetwork(nil), NewMockRequestMaker(nil)), - } - }, - peerID: somePeer, - blockAnnounceHeader: block2AnnounceHeader, - errWrapped: errAlreadyInDisjointSet, - errMessage: fmt.Sprintf("already in disjoint set: block #%d (%s)", - block2AnnounceHeader.Number, block2AnnounceHeader.Hash()), - }, - "failed_to_add_announced_block_in_disjoint_set": { - chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { - pendingBlocks := NewMockDisjointBlockSet(ctrl) - pendingBlocks.EXPECT().hasBlock(block2AnnounceHeader.Hash()).Return(false) - pendingBlocks.EXPECT().addHeader(block2AnnounceHeader).Return(errTest) - - return &chainSync{ - stopCh: make(chan struct{}), - pendingBlocks: pendingBlocks, - peerViewSet: newPeerViewSet(0), - workerPool: newSyncWorkerPool(NewMockNetwork(nil), NewMockRequestMaker(nil)), - } - }, - peerID: somePeer, - blockAnnounceHeader: block2AnnounceHeader, - errWrapped: errTest, - errMessage: "while adding pending block header: test error", - }, - "announced_block_while_in_bootstrap_mode": { - chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { - pendingBlocks := NewMockDisjointBlockSet(ctrl) - pendingBlocks.EXPECT().hasBlock(block2AnnounceHeader.Hash()).Return(false) - pendingBlocks.EXPECT().addHeader(block2AnnounceHeader).Return(nil) - - state := atomic.Value{} - state.Store(bootstrap) - - return &chainSync{ - stopCh: make(chan struct{}), - pendingBlocks: pendingBlocks, - syncMode: state, - peerViewSet: newPeerViewSet(0), - workerPool: newSyncWorkerPool(NewMockNetwork(nil), NewMockRequestMaker(nil)), - } - }, - peerID: somePeer, - blockAnnounceHeader: block2AnnounceHeader, - }, - "announced_block_while_in_tip_mode": { - chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { - pendingBlocksMock := NewMockDisjointBlockSet(ctrl) - pendingBlocksMock.EXPECT().hasBlock(block2AnnounceHeader.Hash()).Return(false) - pendingBlocksMock.EXPECT().addHeader(block2AnnounceHeader).Return(nil) - pendingBlocksMock.EXPECT().removeBlock(block2AnnounceHeader.Hash()) - pendingBlocksMock.EXPECT().size().Return(0) - - blockStateMock := NewMockBlockState(ctrl) - blockStateMock.EXPECT(). - HasHeader(block2AnnounceHeader.Hash()). - Return(false, nil) - blockStateMock.EXPECT().IsPaused().Return(false) - - blockStateMock.EXPECT(). - BestBlockHeader(). - Return(block1AnnounceHeader, nil) - - blockStateMock.EXPECT(). - GetHighestFinalisedHeader(). - Return(block2AnnounceHeader, nil). - Times(2) - - expectedRequest := messages.NewBlockRequest(*variadic.MustNewUint32OrHash(block2AnnounceHeader.Hash()), - 1, messages.BootstrapRequestData, messages.Descending) - - fakeBlockBody := types.Body([]types.Extrinsic{}) - mockedBlockResponse := &messages.BlockResponseMessage{ - BlockData: []*types.BlockData{ - { - Hash: block2AnnounceHeader.Hash(), - Header: block2AnnounceHeader, - Body: &fakeBlockBody, - }, - }, - } - - networkMock := NewMockNetwork(ctrl) - networkMock.EXPECT().Peers().Return([]common.PeerInfo{}) - - requestMaker := NewMockRequestMaker(ctrl) - requestMaker.EXPECT(). - Do(somePeer, expectedRequest, &messages.BlockResponseMessage{}). - DoAndReturn(func(_, _, response any) any { - responsePtr := response.(*messages.BlockResponseMessage) - *responsePtr = *mockedBlockResponse - return nil - }) - - babeVerifierMock := NewMockBabeVerifier(ctrl) - storageStateMock := NewMockStorageState(ctrl) - importHandlerMock := NewMockBlockImportHandler(ctrl) - telemetryMock := NewMockTelemetry(ctrl) - - const announceBlock = true - ensureSuccessfulBlockImportFlow(t, block1AnnounceHeader, mockedBlockResponse.BlockData, - blockStateMock, babeVerifierMock, storageStateMock, importHandlerMock, telemetryMock, - networkBroadcast, announceBlock) - - workerPool := newSyncWorkerPool(networkMock, requestMaker) - // include the peer who announced the block in the pool - workerPool.newPeer(somePeer) - - state := atomic.Value{} - state.Store(tip) - - return &chainSync{ - stopCh: make(chan struct{}), - pendingBlocks: pendingBlocksMock, - syncMode: state, - workerPool: workerPool, - network: networkMock, - blockState: blockStateMock, - babeVerifier: babeVerifierMock, - telemetry: telemetryMock, - storageState: storageStateMock, - blockImportHandler: importHandlerMock, - peerViewSet: newPeerViewSet(0), - } - }, - peerID: somePeer, - blockAnnounceHeader: block2AnnounceHeader, - }, - } - - for name, tt := range testCases { - tt := tt - t.Run(name, func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - - chainSync := tt.chainSyncBuilder(ctrl) - err := chainSync.onBlockAnnounce(announcedBlock{ - who: tt.peerID, - header: tt.blockAnnounceHeader, - }) - - assert.ErrorIs(t, err, tt.errWrapped) - if tt.errWrapped != nil { - assert.EqualError(t, err, tt.errMessage) - } - - if tt.waitBootstrapSync { - chainSync.wg.Wait() - err = chainSync.workerPool.stop() - require.NoError(t, err) - } - }) - } -} - -func Test_chainSync_onBlockAnnounceHandshake_tipModeNeedToCatchup(t *testing.T) { - ctrl := gomock.NewController(t) - const somePeer = peer.ID("abc") - - emptyTrieState := storage.NewTrieState(inmemory_trie.NewEmptyTrie()) - block1AnnounceHeader := types.NewHeader(common.Hash{}, emptyTrieState.Trie().MustHash(), - common.Hash{}, 1, nil) - block2AnnounceHeader := types.NewHeader(block1AnnounceHeader.Hash(), - emptyTrieState.Trie().MustHash(), - common.Hash{}, 130, nil) - - blockStateMock := NewMockBlockState(ctrl) - blockStateMock.EXPECT(). - BestBlockHeader(). - Return(block1AnnounceHeader, nil). - Times(2) - - blockStateMock.EXPECT(). - BestBlockHeader(). - Return(block2AnnounceHeader, nil). - Times(1) - - blockStateMock.EXPECT(). - GetHighestFinalisedHeader(). - Return(block1AnnounceHeader, nil). - Times(3) - - blockStateMock.EXPECT().IsPaused().Return(false).Times(2) - - expectedRequest := messages.NewAscendingBlockRequests( - block1AnnounceHeader.Number+1, - block2AnnounceHeader.Number, messages.BootstrapRequestData) - - networkMock := NewMockNetwork(ctrl) - networkMock.EXPECT().Peers().Return([]common.PeerInfo{}). - Times(2) - networkMock.EXPECT().AllConnectedPeersIDs().Return([]peer.ID{}).Times(2) - - firstMockedResponse := createSuccesfullBlockResponse(t, block1AnnounceHeader.Hash(), 2, 128) - latestItemFromMockedResponse := firstMockedResponse.BlockData[len(firstMockedResponse.BlockData)-1] - - secondMockedResponse := createSuccesfullBlockResponse(t, latestItemFromMockedResponse.Hash, - int(latestItemFromMockedResponse.Header.Number+1), 1) - - requestMaker := NewMockRequestMaker(ctrl) - requestMaker.EXPECT(). - Do(somePeer, expectedRequest[0], &messages.BlockResponseMessage{}). - DoAndReturn(func(_, _, response any) any { - responsePtr := response.(*messages.BlockResponseMessage) - *responsePtr = *firstMockedResponse - return nil - }).Times(2) - - requestMaker.EXPECT(). - Do(somePeer, expectedRequest[1], &messages.BlockResponseMessage{}). - DoAndReturn(func(_, _, response any) any { - responsePtr := response.(*messages.BlockResponseMessage) - *responsePtr = *secondMockedResponse - return nil - }).Times(2) - - babeVerifierMock := NewMockBabeVerifier(ctrl) - storageStateMock := NewMockStorageState(ctrl) - importHandlerMock := NewMockBlockImportHandler(ctrl) - telemetryMock := NewMockTelemetry(ctrl) - - const announceBlock = false - ensureSuccessfulBlockImportFlow(t, block1AnnounceHeader, firstMockedResponse.BlockData, - blockStateMock, babeVerifierMock, storageStateMock, importHandlerMock, telemetryMock, - networkInitialSync, announceBlock) - ensureSuccessfulBlockImportFlow(t, latestItemFromMockedResponse.Header, secondMockedResponse.BlockData, - blockStateMock, babeVerifierMock, storageStateMock, importHandlerMock, telemetryMock, - networkInitialSync, announceBlock) - - state := atomic.Value{} - state.Store(tip) - - stopCh := make(chan struct{}) - defer close(stopCh) - - chainSync := &chainSync{ - stopCh: stopCh, - peerViewSet: newPeerViewSet(10), - syncMode: state, - pendingBlocks: newDisjointBlockSet(0), - workerPool: newSyncWorkerPool(networkMock, requestMaker), - network: networkMock, - blockState: blockStateMock, - babeVerifier: babeVerifierMock, - telemetry: telemetryMock, - storageState: storageStateMock, - blockImportHandler: importHandlerMock, - } - - err := chainSync.onBlockAnnounceHandshake(somePeer, block2AnnounceHeader.Hash(), block2AnnounceHeader.Number) - require.NoError(t, err) - - chainSync.wg.Wait() - err = chainSync.workerPool.stop() - require.NoError(t, err) - - require.Equal(t, chainSync.getSyncMode(), tip) -} - -func TestChainSync_onBlockAnnounceHandshake_onBootstrapMode(t *testing.T) { - const randomHashString = "0x580d77a9136035a0bc3c3cd86286172f7f81291164c5914266073a30466fba21" - randomHash := common.MustHexToHash(randomHashString) - - testcases := map[string]struct { - newChainSync func(t *testing.T, ctrl *gomock.Controller) *chainSync - peerID peer.ID - bestHash common.Hash - bestNumber uint - shouldBeAWorker bool - workerStatus byte - }{ - "new_peer": { - newChainSync: func(t *testing.T, ctrl *gomock.Controller) *chainSync { - networkMock := NewMockNetwork(ctrl) - workerPool := newSyncWorkerPool(networkMock, NewMockRequestMaker(nil)) - - cs := newChainSyncTest(t, ctrl) - cs.syncMode.Store(bootstrap) - cs.workerPool = workerPool - return cs - }, - peerID: peer.ID("peer-test"), - bestHash: randomHash, - bestNumber: uint(20), - shouldBeAWorker: true, - workerStatus: available, - }, - "ignore_peer_should_not_be_included_in_the_workerpoll": { - newChainSync: func(t *testing.T, ctrl *gomock.Controller) *chainSync { - networkMock := NewMockNetwork(ctrl) - workerPool := newSyncWorkerPool(networkMock, NewMockRequestMaker(nil)) - workerPool.ignorePeers = map[peer.ID]struct{}{ - peer.ID("peer-test"): {}, - } - - cs := newChainSyncTest(t, ctrl) - cs.syncMode.Store(bootstrap) - cs.workerPool = workerPool - return cs - }, - peerID: peer.ID("peer-test"), - bestHash: randomHash, - bestNumber: uint(20), - shouldBeAWorker: false, - }, - "peer_already_exists_in_the_pool": { - newChainSync: func(t *testing.T, ctrl *gomock.Controller) *chainSync { - networkMock := NewMockNetwork(ctrl) - workerPool := newSyncWorkerPool(networkMock, NewMockRequestMaker(nil)) - workerPool.workers = map[peer.ID]*syncWorker{ - peer.ID("peer-test"): { - worker: &worker{status: available}, - }, - } - - cs := newChainSyncTest(t, ctrl) - cs.syncMode.Store(bootstrap) - cs.workerPool = workerPool - return cs - }, - peerID: peer.ID("peer-test"), - bestHash: randomHash, - bestNumber: uint(20), - shouldBeAWorker: true, - workerStatus: available, - }, - } - - for tname, tt := range testcases { - tt := tt - t.Run(tname, func(t *testing.T) { - ctrl := gomock.NewController(t) - cs := tt.newChainSync(t, ctrl) - cs.onBlockAnnounceHandshake(tt.peerID, tt.bestHash, tt.bestNumber) - - view, exists := cs.peerViewSet.find(tt.peerID) - require.True(t, exists) - require.Equal(t, tt.peerID, view.who) - require.Equal(t, tt.bestHash, view.hash) - require.Equal(t, tt.bestNumber, view.number) - - if tt.shouldBeAWorker { - syncWorker, exists := cs.workerPool.workers[tt.peerID] - require.True(t, exists) - require.Equal(t, tt.workerStatus, syncWorker.worker.status) - } else { - _, exists := cs.workerPool.workers[tt.peerID] - require.False(t, exists) - } - }) - } -} - -func newChainSyncTest(t *testing.T, ctrl *gomock.Controller) *chainSync { - t.Helper() - - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) - - cfg := chainSyncConfig{ - bs: mockBlockState, - pendingBlocks: newDisjointBlockSet(pendingBlocksLimit), - minPeers: 1, - maxPeers: 5, - slotDuration: 6 * time.Second, - } - - return newChainSync(cfg) -} - -func setupChainSyncToBootstrapMode(t *testing.T, blocksAhead uint, - bs BlockState, net Network, reqMaker network.RequestMaker, babeVerifier BabeVerifier, - storageState StorageState, blockImportHandler BlockImportHandler, telemetry Telemetry) *chainSync { - t.Helper() - mockedPeerID := []peer.ID{ - peer.ID("some_peer_1"), - peer.ID("some_peer_2"), - peer.ID("some_peer_3"), - } - - peerViewMap := map[peer.ID]peerView{} - for _, p := range mockedPeerID { - peerViewMap[p] = peerView{ - who: p, - hash: common.Hash{1, 2, 3}, - number: blocksAhead, - } - } - - cfg := chainSyncConfig{ - pendingBlocks: newDisjointBlockSet(pendingBlocksLimit), - minPeers: 1, - maxPeers: 5, - slotDuration: 6 * time.Second, - bs: bs, - net: net, - requestMaker: reqMaker, - babeVerifier: babeVerifier, - storageState: storageState, - blockImportHandler: blockImportHandler, - telemetry: telemetry, - } - - chainSync := newChainSync(cfg) - chainSync.peerViewSet = &peerViewSet{view: peerViewMap} - chainSync.syncMode.Store(bootstrap) - - return chainSync -} - -func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorker(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - - mockedGenesisHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, - trie.EmptyHash, 0, types.NewDigest()) - - const blocksAhead = 128 - totalBlockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, blocksAhead) - mockedNetwork := NewMockNetwork(ctrl) - - workerPeerID := peer.ID("noot") - startingBlock := variadic.MustNewUint32OrHash(1) - max := uint32(128) - - mockedRequestMaker := NewMockRequestMaker(ctrl) - - expectedBlockRequestMessage := &messages.BlockRequestMessage{ - RequestedData: messages.BootstrapRequestData, - StartingBlock: *startingBlock, - Direction: messages.Ascending, - Max: &max, - } - - mockedRequestMaker.EXPECT(). - Do(workerPeerID, expectedBlockRequestMessage, &messages.BlockResponseMessage{}). - DoAndReturn(func(_, _, response any) any { - responsePtr := response.(*messages.BlockResponseMessage) - *responsePtr = *totalBlockResponse - return nil - }) - - mockedBlockState := NewMockBlockState(ctrl) - mockedBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) - mockedBlockState.EXPECT().IsPaused().Return(false) - - mockBabeVerifier := NewMockBabeVerifier(ctrl) - mockStorageState := NewMockStorageState(ctrl) - mockImportHandler := NewMockBlockImportHandler(ctrl) - mockTelemetry := NewMockTelemetry(ctrl) - - mockedBlockState.EXPECT().GetHighestFinalisedHeader().Return(types.NewEmptyHeader(), nil).Times(1) - mockedNetwork.EXPECT().Peers().Return([]common.PeerInfo{}).Times(1) - - const announceBlock = false - // setup mocks for new synced blocks that doesn't exists in our local database - ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, totalBlockResponse.BlockData, mockedBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) - - // setup a chain sync which holds in its peer view map - // 3 peers, each one announce block X as its best block number. - // We start this test with genesis block being our best block, so - // we're far behind by X blocks, we should execute a bootstrap - // sync request those blocks - cs := setupChainSyncToBootstrapMode(t, blocksAhead, - mockedBlockState, mockedNetwork, mockedRequestMaker, mockBabeVerifier, - mockStorageState, mockImportHandler, mockTelemetry) - - target := cs.peerViewSet.getTarget() - require.Equal(t, uint(128), target) - - // include a new worker in the worker pool set, this worker - // should be an available peer that will receive a block request - // the worker pool executes the workers management - cs.workerPool.fromBlockAnnounce(peer.ID("noot")) - - err := cs.requestMaxBlocksFrom(mockedGenesisHeader, networkInitialSync) - require.NoError(t, err) - - err = cs.workerPool.stop() - require.NoError(t, err) -} - -func TestChainSync_BootstrapSync_SuccessfulSync_WithTwoWorkers(t *testing.T) { - t.Parallel() - - ctrl := gomock.NewController(t) - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) - mockedGenesisHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, - trie.EmptyHash, 0, types.NewDigest()) - - mockNetwork := NewMockNetwork(ctrl) - mockRequestMaker := NewMockRequestMaker(ctrl) - - mockBabeVerifier := NewMockBabeVerifier(ctrl) - mockStorageState := NewMockStorageState(ctrl) - mockImportHandler := NewMockBlockImportHandler(ctrl) - mockTelemetry := NewMockTelemetry(ctrl) - - mockBlockState.EXPECT().GetHighestFinalisedHeader().Return(types.NewEmptyHeader(), nil).Times(1) - mockBlockState.EXPECT().IsPaused().Return(false) - mockNetwork.EXPECT().Peers().Return([]common.PeerInfo{}).Times(1) - - // this test expects two workers responding each request with 128 blocks which means - // we should import 256 blocks in total - blockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, 256) - - // here we split the whole set in two parts each one will be the "response" for each peer - worker1Response := &messages.BlockResponseMessage{ - BlockData: blockResponse.BlockData[:128], - } - const announceBlock = false - // the first peer will respond the from the block 1 to 128 so the ensureBlockImportFlow - // will setup the expectations starting from the genesis header until block 128 - ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, worker1Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) - - worker2Response := &messages.BlockResponseMessage{ - BlockData: blockResponse.BlockData[128:], - } - // the worker 2 will respond from block 129 to 256 so the ensureBlockImportFlow - // will setup the expectations starting from block 128, from previous worker, until block 256 - parent := worker1Response.BlockData[len(worker1Response.BlockData)-1] - ensureSuccessfulBlockImportFlow(t, parent.Header, worker2Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) - - // we use gomock.Any since I cannot guarantee which peer picks which request - // but the first call to DoBlockRequest will return the first set and the second - // call will return the second set - mockRequestMaker.EXPECT(). - Do(gomock.Any(), gomock.Any(), &messages.BlockResponseMessage{}). - DoAndReturn(func(_, _, response any) any { - responsePtr := response.(*messages.BlockResponseMessage) - *responsePtr = *worker1Response - return nil - }) - - mockRequestMaker.EXPECT(). - Do(gomock.Any(), gomock.Any(), &messages.BlockResponseMessage{}). - DoAndReturn(func(_, _, response any) any { - responsePtr := response.(*messages.BlockResponseMessage) - *responsePtr = *worker2Response - return nil - }) - - // setup a chain sync which holds in its peer view map - // 3 peers, each one announce block 129 as its best block number. - // We start this test with genesis block being our best block, so - // we're far behind by 128 blocks, we should execute a bootstrap - // sync request those blocks - const blocksAhead = 256 - cs := setupChainSyncToBootstrapMode(t, blocksAhead, - mockBlockState, mockNetwork, mockRequestMaker, mockBabeVerifier, - mockStorageState, mockImportHandler, mockTelemetry) - - target := cs.peerViewSet.getTarget() - require.Equal(t, uint(blocksAhead), target) - - // include a new worker in the worker pool set, this worker - // should be an available peer that will receive a block request - // the worker pool executes the workers management - cs.workerPool.fromBlockAnnounce(peer.ID("noot")) - cs.workerPool.fromBlockAnnounce(peer.ID("noot2")) - - err := cs.requestMaxBlocksFrom(mockedGenesisHeader, networkInitialSync) - require.NoError(t, err) - - err = cs.workerPool.stop() - require.NoError(t, err) -} - -func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorkerFailing(t *testing.T) { - t.Parallel() - - ctrl := gomock.NewController(t) - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) - mockBlockState.EXPECT().IsPaused().Return(false).Times(2) - mockedGenesisHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, - trie.EmptyHash, 0, types.NewDigest()) - - mockNetwork := NewMockNetwork(ctrl) - mockRequestMaker := NewMockRequestMaker(ctrl) - - mockBabeVerifier := NewMockBabeVerifier(ctrl) - mockStorageState := NewMockStorageState(ctrl) - mockImportHandler := NewMockBlockImportHandler(ctrl) - mockTelemetry := NewMockTelemetry(ctrl) - - mockBlockState.EXPECT().GetHighestFinalisedHeader().Return(types.NewEmptyHeader(), nil).Times(1) - - mockNetwork.EXPECT().Peers().Return([]common.PeerInfo{}).Times(1) - - // this test expects two workers responding each request with 128 blocks which means - // we should import 256 blocks in total - blockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, 256) - const announceBlock = false - - // here we split the whole set in two parts each one will be the "response" for each peer - worker1Response := &messages.BlockResponseMessage{ - BlockData: blockResponse.BlockData[:128], - } - - // the first peer will respond the from the block 1 to 128 so the ensureBlockImportFlow - // will setup the expectations starting from the genesis header until block 128 - ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, worker1Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) - - worker2Response := &messages.BlockResponseMessage{ - BlockData: blockResponse.BlockData[128:], - } - // the worker 2 will respond from block 129 to 256 so the ensureBlockImportFlow - // will setup the expectations starting from block 128, from previous worker, until block 256 - parent := worker1Response.BlockData[len(worker1Response.BlockData)-1] - ensureSuccessfulBlockImportFlow(t, parent.Header, worker2Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) - - // we use gomock.Any since I cannot guarantee which peer picks which request - // but the first call to DoBlockRequest will return the first set and the second - // call will return the second set - doBlockRequestCount := atomic.Int32{} - mockRequestMaker.EXPECT(). - Do(gomock.Any(), gomock.Any(), &messages.BlockResponseMessage{}). - DoAndReturn(func(peerID, _, response any) any { - // lets ensure that the DoBlockRequest is called by - // peer.ID(alice) and peer.ID(bob). When bob calls, this method will fail - // then alice should pick the failed request and re-execute it which will - // be the third call - responsePtr := response.(*messages.BlockResponseMessage) - defer func() { doBlockRequestCount.Add(1) }() - - switch doBlockRequestCount.Load() { - case 0: - *responsePtr = *worker1Response - case 1: - return errors.New("a bad error while getting a response") - default: - *responsePtr = *worker2Response - } - return nil - - }).Times(3) - - // setup a chain sync which holds in its peer view map - // 3 peers, each one announce block 129 as its best block number. - // We start this test with genesis block being our best block, so - // we're far behind by 128 blocks, we should execute a bootstrap - // sync request those blocks - const blocksAhead = 256 - cs := setupChainSyncToBootstrapMode(t, blocksAhead, - mockBlockState, mockNetwork, mockRequestMaker, mockBabeVerifier, - mockStorageState, mockImportHandler, mockTelemetry) - - target := cs.peerViewSet.getTarget() - require.Equal(t, uint(blocksAhead), target) - - // include a new worker in the worker pool set, this worker - // should be an available peer that will receive a block request - // the worker pool executes the workers management - cs.workerPool.fromBlockAnnounce(peer.ID("alice")) - cs.workerPool.fromBlockAnnounce(peer.ID("bob")) - - err := cs.requestMaxBlocksFrom(mockedGenesisHeader, networkInitialSync) - require.NoError(t, err) - - err = cs.workerPool.stop() - require.NoError(t, err) -} - -func TestChainSync_BootstrapSync_SuccessfulSync_WithProtocolNotSupported(t *testing.T) { - t.Parallel() - - ctrl := gomock.NewController(t) - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) - mockBlockState.EXPECT().IsPaused().Return(false).Times(2) - mockBlockState.EXPECT(). - GetHighestFinalisedHeader(). - Return(types.NewEmptyHeader(), nil). - Times(1) - mockedGenesisHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, - trie.EmptyHash, 0, types.NewDigest()) - - mockNetwork := NewMockNetwork(ctrl) - mockNetwork.EXPECT().Peers().Return([]common.PeerInfo{}) - mockRequestMaker := NewMockRequestMaker(ctrl) - - mockBabeVerifier := NewMockBabeVerifier(ctrl) - mockStorageState := NewMockStorageState(ctrl) - mockImportHandler := NewMockBlockImportHandler(ctrl) - mockTelemetry := NewMockTelemetry(ctrl) - - // this test expects two workers responding each request with 128 blocks which means - // we should import 256 blocks in total - blockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, 256) - const announceBlock = false - - // here we split the whole set in two parts each one will be the "response" for each peer - worker1Response := &messages.BlockResponseMessage{ - BlockData: blockResponse.BlockData[:128], - } - - // the first peer will respond the from the block 1 to 128 so the ensureBlockImportFlow - // will setup the expectations starting from the genesis header until block 128 - ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, worker1Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) - - worker2Response := &messages.BlockResponseMessage{ - BlockData: blockResponse.BlockData[128:], - } - // the worker 2 will respond from block 129 to 256 so the ensureBlockImportFlow - // will setup the expectations starting from block 128, from previous worker, until block 256 - parent := worker1Response.BlockData[len(worker1Response.BlockData)-1] - ensureSuccessfulBlockImportFlow(t, parent.Header, worker2Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) - - // we use gomock.Any since I cannot guarantee which peer picks which request - // but the first call to DoBlockRequest will return the first set and the second - // call will return the second set - doBlockRequestCount := atomic.Int32{} - mockRequestMaker.EXPECT(). - Do(gomock.Any(), gomock.Any(), &messages.BlockResponseMessage{}). - DoAndReturn(func(peerID, _, response any) any { - // lets ensure that the DoBlockRequest is called by - // peer.ID(alice) and peer.ID(bob). When bob calls, this method will fail - // then alice should pick the failed request and re-execute it which will - // be the third call - responsePtr := response.(*messages.BlockResponseMessage) - defer func() { doBlockRequestCount.Add(1) }() - - switch doBlockRequestCount.Load() { - case 0: - *responsePtr = *worker1Response - case 1: - return errors.New("protocols not supported") - default: - *responsePtr = *worker2Response - } - - return nil - }).Times(3) - - // since some peer will fail with protocols not supported his - // reputation will be affected and - mockNetwork.EXPECT().ReportPeer(peerset.ReputationChange{ - Value: peerset.BadProtocolValue, - Reason: peerset.BadProtocolReason, - }, gomock.AssignableToTypeOf(peer.ID(""))) - // setup a chain sync which holds in its peer view map - // 3 peers, each one announce block 129 as its best block number. - // We start this test with genesis block being our best block, so - // we're far behind by 128 blocks, we should execute a bootstrap - // sync request those blocks - const blocksAhead = 256 - cs := setupChainSyncToBootstrapMode(t, blocksAhead, - mockBlockState, mockNetwork, mockRequestMaker, mockBabeVerifier, - mockStorageState, mockImportHandler, mockTelemetry) - - target := cs.peerViewSet.getTarget() - require.Equal(t, uint(blocksAhead), target) - - // include a new worker in the worker pool set, this worker - // should be an available peer that will receive a block request - // the worker pool executes the workers management - cs.workerPool.fromBlockAnnounce(peer.ID("alice")) - cs.workerPool.fromBlockAnnounce(peer.ID("bob")) - - err := cs.requestMaxBlocksFrom(mockedGenesisHeader, networkInitialSync) - require.NoError(t, err) - - err = cs.workerPool.stop() - require.NoError(t, err) -} - -func TestChainSync_BootstrapSync_SuccessfulSync_WithNilHeaderInResponse(t *testing.T) { - t.Parallel() - - ctrl := gomock.NewController(t) - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) - mockBlockState.EXPECT().IsPaused().Return(false).Times(2) - mockBlockState.EXPECT(). - GetHighestFinalisedHeader(). - Return(types.NewEmptyHeader(), nil). - Times(1) - mockedGenesisHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, - trie.EmptyHash, 0, types.NewDigest()) - - mockNetwork := NewMockNetwork(ctrl) - mockNetwork.EXPECT().Peers().Return([]common.PeerInfo{}) - mockRequestMaker := NewMockRequestMaker(ctrl) - - mockBabeVerifier := NewMockBabeVerifier(ctrl) - mockStorageState := NewMockStorageState(ctrl) - mockImportHandler := NewMockBlockImportHandler(ctrl) - mockTelemetry := NewMockTelemetry(ctrl) - - // this test expects two workers responding each request with 128 blocks which means - // we should import 256 blocks in total - blockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, 256) - const announceBlock = false - - // here we split the whole set in two parts each one will be the "response" for each peer - worker1Response := &messages.BlockResponseMessage{ - BlockData: blockResponse.BlockData[:128], - } - - // the first peer will respond the from the block 1 to 128 so the ensureBlockImportFlow - // will setup the expectations starting from the genesis header until block 128 - ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, worker1Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) - - worker2Response := &messages.BlockResponseMessage{ - BlockData: blockResponse.BlockData[128:], - } - // the worker 2 will respond from block 129 to 256 so the ensureBlockImportFlow - // will setup the expectations starting from block 128, from previous worker, until block 256 - parent := worker1Response.BlockData[127] - ensureSuccessfulBlockImportFlow(t, parent.Header, worker2Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) - - // we use gomock.Any since I cannot guarantee which peer picks which request - // but the first call to DoBlockRequest will return the first set and the second - // call will return the second set - doBlockRequestCount := atomic.Int32{} - mockRequestMaker.EXPECT(). - Do(gomock.Any(), gomock.Any(), &messages.BlockResponseMessage{}). - DoAndReturn(func(peerID, _, response any) any { - // lets ensure that the DoBlockRequest is called by - // peer.ID(alice) and peer.ID(bob). When bob calls, this method return an - // response item but without header as was requested - responsePtr := response.(*messages.BlockResponseMessage) - defer func() { doBlockRequestCount.Add(1) }() - - switch doBlockRequestCount.Load() { - case 0: - *responsePtr = *worker1Response - case 1: - incompleteBlockData := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 128, 256) - incompleteBlockData.BlockData[0].Header = nil - - *responsePtr = *incompleteBlockData - default: - *responsePtr = *worker2Response - } - - return nil - }).Times(3) - - // since some peer will fail with protocols not supported his - // reputation will be affected and - mockNetwork.EXPECT().ReportPeer(peerset.ReputationChange{ - Value: peerset.IncompleteHeaderValue, - Reason: peerset.IncompleteHeaderReason, - }, gomock.AssignableToTypeOf(peer.ID(""))) - // setup a chain sync which holds in its peer view map - // 3 peers, each one announce block 129 as its best block number. - // We start this test with genesis block being our best block, so - // we're far behind by 128 blocks, we should execute a bootstrap - // sync request those blocks - const blocksAhead = 256 - cs := setupChainSyncToBootstrapMode(t, blocksAhead, - mockBlockState, mockNetwork, mockRequestMaker, mockBabeVerifier, - mockStorageState, mockImportHandler, mockTelemetry) - - target := cs.peerViewSet.getTarget() - require.Equal(t, uint(blocksAhead), target) - - // include a new worker in the worker pool set, this worker - // should be an available peer that will receive a block request - // the worker pool executes the workers management - cs.workerPool.fromBlockAnnounce(peer.ID("alice")) - cs.workerPool.fromBlockAnnounce(peer.ID("bob")) - - err := cs.requestMaxBlocksFrom(mockedGenesisHeader, networkInitialSync) - require.NoError(t, err) - - err = cs.workerPool.stop() - require.NoError(t, err) -} - -func TestChainSync_BootstrapSync_SuccessfulSync_WithNilBlockInResponse(t *testing.T) { - t.Parallel() - - ctrl := gomock.NewController(t) - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) - mockBlockState.EXPECT().IsPaused().Return(false).Times(2) - mockBlockState.EXPECT(). - GetHighestFinalisedHeader(). - Return(types.NewEmptyHeader(), nil). - Times(1) - mockedGenesisHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, - trie.EmptyHash, 0, types.NewDigest()) - - mockBabeVerifier := NewMockBabeVerifier(ctrl) - mockStorageState := NewMockStorageState(ctrl) - mockImportHandler := NewMockBlockImportHandler(ctrl) - mockTelemetry := NewMockTelemetry(ctrl) - - blockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, 128) - const announceBlock = false - - workerResponse := &messages.BlockResponseMessage{ - BlockData: blockResponse.BlockData, - } - - // the first peer will respond the from the block 1 to 128 so the ensureBlockImportFlow - // will setup the expectations starting from the genesis header until block 128 - ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, workerResponse.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) - - doBlockRequestCount := atomic.Int32{} - mockRequestMaker := NewMockRequestMaker(ctrl) - mockRequestMaker.EXPECT(). - Do(gomock.Any(), gomock.Any(), &messages.BlockResponseMessage{}). - DoAndReturn(func(peerID, _, response any) any { - // lets ensure that the DoBlockRequest is called by - // peer.ID(alice) and peer.ID(bob). When bob calls, this method return an - // response item but without header as was requested - responsePtr := response.(*messages.BlockResponseMessage) - defer func() { doBlockRequestCount.Add(1) }() - - switch doBlockRequestCount.Load() { - case 0: - return messages.ErrNilBlockInResponse - case 1: - *responsePtr = *workerResponse - } - - return nil - }).Times(2) - - mockNetwork := NewMockNetwork(ctrl) - mockNetwork.EXPECT().Peers().Return([]common.PeerInfo{}) - - // reputation will be affected and - mockNetwork.EXPECT().ReportPeer(peerset.ReputationChange{ - Value: peerset.BadMessageValue, - Reason: peerset.BadMessageReason, - }, gomock.AssignableToTypeOf(peer.ID(""))) - - const blocksAhead = 128 - cs := setupChainSyncToBootstrapMode(t, blocksAhead, - mockBlockState, mockNetwork, mockRequestMaker, mockBabeVerifier, - mockStorageState, mockImportHandler, mockTelemetry) - - target := cs.peerViewSet.getTarget() - require.Equal(t, uint(blocksAhead), target) - - // include a new worker in the worker pool set, this worker - // should be an available peer that will receive a block request - // the worker pool executes the workers management - cs.workerPool.fromBlockAnnounce(peer.ID("alice")) - cs.workerPool.fromBlockAnnounce(peer.ID("bob")) - - err := cs.requestMaxBlocksFrom(mockedGenesisHeader, networkInitialSync) - require.NoError(t, err) - - err = cs.workerPool.stop() - require.NoError(t, err) -} - -func TestChainSync_BootstrapSync_SuccessfulSync_WithResponseIsNotAChain(t *testing.T) { - t.Parallel() - - ctrl := gomock.NewController(t) - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) - mockBlockState.EXPECT().IsPaused().Return(false).Times(2) - mockBlockState.EXPECT(). - GetHighestFinalisedHeader(). - Return(types.NewEmptyHeader(), nil). - Times(1) - mockedGenesisHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, - trie.EmptyHash, 0, types.NewDigest()) - - mockNetwork := NewMockNetwork(ctrl) - mockNetwork.EXPECT().Peers().Return([]common.PeerInfo{}) - mockRequestMaker := NewMockRequestMaker(ctrl) - - mockBabeVerifier := NewMockBabeVerifier(ctrl) - mockStorageState := NewMockStorageState(ctrl) - mockImportHandler := NewMockBlockImportHandler(ctrl) - mockTelemetry := NewMockTelemetry(ctrl) - - // this test expects two workers responding each request with 128 blocks which means - // we should import 256 blocks in total - blockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, 256) - const announceBlock = false - - // here we split the whole set in two parts each one will be the "response" for each peer - worker1Response := &messages.BlockResponseMessage{ - BlockData: blockResponse.BlockData[:128], - } - - // the first peer will respond the from the block 1 to 128 so the ensureBlockImportFlow - // will setup the expectations starting from the genesis header until block 128 - ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, worker1Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) - - worker2Response := &messages.BlockResponseMessage{ - BlockData: blockResponse.BlockData[128:], - } - // the worker 2 will respond from block 129 to 256 so the ensureBlockImportFlow - // will setup the expectations starting from block 128, from previous worker, until block 256 - parent := worker1Response.BlockData[127] - ensureSuccessfulBlockImportFlow(t, parent.Header, worker2Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) - - // we use gomock.Any since I cannot guarantee which peer picks which request - // but the first call to DoBlockRequest will return the first set and the second - // call will return the second set - doBlockRequestCount := atomic.Int32{} - mockRequestMaker.EXPECT(). - Do(gomock.Any(), gomock.Any(), &messages.BlockResponseMessage{}). - DoAndReturn(func(peerID, _, response any) any { - // lets ensure that the DoBlockRequest is called by - // peer.ID(alice) and peer.ID(bob). When bob calls, this method return an - // response that does not form an chain - responsePtr := response.(*messages.BlockResponseMessage) - defer func() { doBlockRequestCount.Add(1) }() - - switch doBlockRequestCount.Load() { - case 0: - *responsePtr = *worker1Response - case 1: - notAChainBlockData := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 128, 256) - // swap positions to force the problem - notAChainBlockData.BlockData[0], notAChainBlockData.BlockData[130] = - notAChainBlockData.BlockData[130], notAChainBlockData.BlockData[0] - - *responsePtr = *notAChainBlockData - default: - *responsePtr = *worker2Response - } - - return nil - }).Times(3) - - // setup a chain sync which holds in its peer view map - // 3 peers, each one announce block 129 as its best block number. - // We start this test with genesis block being our best block, so - // we're far behind by 128 blocks, we should execute a bootstrap - // sync request those blocks - const blocksAhead = 256 - cs := setupChainSyncToBootstrapMode(t, blocksAhead, - mockBlockState, mockNetwork, mockRequestMaker, mockBabeVerifier, - mockStorageState, mockImportHandler, mockTelemetry) - - target := cs.peerViewSet.getTarget() - - require.Equal(t, uint(blocksAhead), target) - - // include a new worker in the worker pool set, this worker - // should be an available peer that will receive a block request - // the worker pool executes the workers management - cs.workerPool.fromBlockAnnounce(peer.ID("alice")) - cs.workerPool.fromBlockAnnounce(peer.ID("bob")) - - err := cs.requestMaxBlocksFrom(mockedGenesisHeader, networkInitialSync) - require.NoError(t, err) - - err = cs.workerPool.stop() - require.NoError(t, err) -} - -func TestChainSync_BootstrapSync_SuccessfulSync_WithReceivedBadBlock(t *testing.T) { - t.Parallel() - - ctrl := gomock.NewController(t) - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) - mockBlockState.EXPECT().IsPaused().Return(false).Times(2) - mockBlockState.EXPECT(). - GetHighestFinalisedHeader(). - Return(types.NewEmptyHeader(), nil). - Times(1) - - mockedGenesisHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, - trie.EmptyHash, 0, types.NewDigest()) - - mockNetwork := NewMockNetwork(ctrl) - mockNetwork.EXPECT().Peers().Return([]common.PeerInfo{}) - mockRequestMaker := NewMockRequestMaker(ctrl) - - mockBabeVerifier := NewMockBabeVerifier(ctrl) - mockStorageState := NewMockStorageState(ctrl) - mockImportHandler := NewMockBlockImportHandler(ctrl) - mockTelemetry := NewMockTelemetry(ctrl) - - // this test expects two workers responding each request with 128 blocks which means - // we should import 256 blocks in total - blockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, 256) - const announceBlock = false - - // here we split the whole set in two parts each one will be the "response" for each peer - worker1Response := &messages.BlockResponseMessage{ - BlockData: blockResponse.BlockData[:128], - } - - // the first peer will respond the from the block 1 to 128 so the ensureBlockImportFlow - // will setup the expectations starting from the genesis header until block 128 - ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, worker1Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) - - worker2Response := &messages.BlockResponseMessage{ - BlockData: blockResponse.BlockData[128:], - } - // the worker 2 will respond from block 129 to 256 so the ensureBlockImportFlow - // will setup the expectations starting from block 128, from previous worker, until block 256 - parent := worker1Response.BlockData[len(worker1Response.BlockData)-1] - ensureSuccessfulBlockImportFlow(t, parent.Header, worker2Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) - - fakeBadBlockHash := common.MustHexToHash("0x18767cb4bb4cc13bf119f6613aec5487d4c06a2e453de53d34aea6f3f1ee9855") - - // we use gomock.Any since I cannot guarantee which peer picks which request - // but the first call to DoBlockRequest will return the first set and the second - // call will return the second set - doBlockRequestCount := atomic.Int32{} - mockRequestMaker.EXPECT(). - Do(gomock.Any(), gomock.Any(), &messages.BlockResponseMessage{}). - DoAndReturn(func(peerID, _, response any) any { - // lets ensure that the DoBlockRequest is called by - // peer.ID(alice) and peer.ID(bob). When bob calls, this method return an - // response that contains a know bad block - responsePtr := response.(*messages.BlockResponseMessage) - defer func() { doBlockRequestCount.Add(1) }() - - switch doBlockRequestCount.Load() { - case 0: - *responsePtr = *worker1Response - case 1: - // use the fisrt response last item hash to produce the second response block data - // so we can guarantee that the second response continues the first response blocks - firstResponseLastItem := worker1Response.BlockData[len(worker1Response.BlockData)-1] - blockDataWithBadBlock := createSuccesfullBlockResponse(t, - firstResponseLastItem.Header.Hash(), - 129, - 128) - - // changes the last item from the second response to be a bad block, so we guarantee that - // this second response is a chain, (changing the hash from a block in the middle of the block - // response brokes the `isAChain` verification) - lastItem := len(blockDataWithBadBlock.BlockData) - 1 - blockDataWithBadBlock.BlockData[lastItem].Hash = fakeBadBlockHash - *responsePtr = *blockDataWithBadBlock - default: - *responsePtr = *worker2Response - } - - return nil - }).Times(3) - - mockNetwork.EXPECT().ReportPeer(peerset.ReputationChange{ - Value: peerset.BadBlockAnnouncementValue, - Reason: peerset.BadBlockAnnouncementReason, - }, gomock.AssignableToTypeOf(peer.ID(""))) - // setup a chain sync which holds in its peer view map - // 3 peers, each one announce block 129 as its best block number. - // We start this test with genesis block being our best block, so - // we're far behind by 128 blocks, we should execute a bootstrap - // sync request those blocks - const blocksAhead = 256 - cs := setupChainSyncToBootstrapMode(t, blocksAhead, - mockBlockState, mockNetwork, mockRequestMaker, mockBabeVerifier, - mockStorageState, mockImportHandler, mockTelemetry) - - cs.badBlocks = []string{fakeBadBlockHash.String()} - - target := cs.peerViewSet.getTarget() - require.Equal(t, uint(blocksAhead), target) - - // include a new worker in the worker pool set, this worker - // should be an available peer that will receive a block request - // the worker pool executes the workers management - cs.workerPool.fromBlockAnnounce(peer.ID("alice")) - cs.workerPool.fromBlockAnnounce(peer.ID("bob")) - - err := cs.requestMaxBlocksFrom(mockedGenesisHeader, networkInitialSync) - require.NoError(t, err) - - err = cs.workerPool.stop() - require.NoError(t, err) - - // peer should be not in the worker pool - // peer should be in the ignore list - require.Len(t, cs.workerPool.workers, 1) - require.Len(t, cs.workerPool.ignorePeers, 1) -} - -func TestChainSync_BootstrapSync_SucessfulSync_ReceivedPartialBlockData(t *testing.T) { - t.Parallel() - - ctrl := gomock.NewController(t) - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) - mockBlockState.EXPECT().IsPaused().Return(false).Times(2) - mockBlockState.EXPECT(). - GetHighestFinalisedHeader(). - Return(types.NewEmptyHeader(), nil). - Times(1) - - mockedGenesisHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, - trie.EmptyHash, 0, types.NewDigest()) - - mockNetwork := NewMockNetwork(ctrl) - mockNetwork.EXPECT().Peers().Return([]common.PeerInfo{}) - - mockRequestMaker := NewMockRequestMaker(ctrl) - - mockBabeVerifier := NewMockBabeVerifier(ctrl) - mockStorageState := NewMockStorageState(ctrl) - mockImportHandler := NewMockBlockImportHandler(ctrl) - mockTelemetry := NewMockTelemetry(ctrl) - - // create a set of 128 blocks - blockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, 128) - const announceBlock = false - - // the worker will return a partial size of the set - worker1Response := &messages.BlockResponseMessage{ - BlockData: blockResponse.BlockData[:97], - } - - // the first peer will respond the from the block 1 to 96 so the ensureBlockImportFlow - // will setup the expectations starting from the genesis header until block 96 - ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, worker1Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) - - worker1MissingBlocksResponse := &messages.BlockResponseMessage{ - BlockData: blockResponse.BlockData[97:], - } - - // last item from the previous response - parent := worker1Response.BlockData[96] - ensureSuccessfulBlockImportFlow(t, parent.Header, worker1MissingBlocksResponse.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) - - doBlockRequestCount := 0 - mockRequestMaker.EXPECT(). - Do(gomock.Any(), gomock.Any(), &messages.BlockResponseMessage{}). - DoAndReturn(func(peerID, _, response any) any { - // lets ensure that the DoBlockRequest is called by - // peer.ID(alice). The first call will return only 97 blocks - // the handler should issue another call to retrieve the missing blocks - responsePtr := response.(*messages.BlockResponseMessage) - defer func() { doBlockRequestCount++ }() - - if doBlockRequestCount == 0 { - *responsePtr = *worker1Response - } else { - *responsePtr = *worker1MissingBlocksResponse - } - - return nil - }).Times(2) - - const blocksAhead = 128 - cs := setupChainSyncToBootstrapMode(t, blocksAhead, - mockBlockState, mockNetwork, mockRequestMaker, mockBabeVerifier, - mockStorageState, mockImportHandler, mockTelemetry) - - target := cs.peerViewSet.getTarget() - require.Equal(t, uint(blocksAhead), target) - - cs.workerPool.fromBlockAnnounce(peer.ID("alice")) - - err := cs.requestMaxBlocksFrom(mockedGenesisHeader, networkInitialSync) - require.NoError(t, err) - - err = cs.workerPool.stop() - require.NoError(t, err) - - require.Len(t, cs.workerPool.workers, 1) - - _, ok := cs.workerPool.workers[peer.ID("alice")] - require.True(t, ok) -} - -func createSuccesfullBlockResponse(t *testing.T, parentHeader common.Hash, - startingAt, numBlocks int) *messages.BlockResponseMessage { - t.Helper() - - response := new(messages.BlockResponseMessage) - response.BlockData = make([]*types.BlockData, numBlocks) - - emptyTrieState := storage.NewTrieState(inmemory_trie.NewEmptyTrie()) - tsRoot := emptyTrieState.Trie().MustHash() - - firstHeader := types.NewHeader(parentHeader, tsRoot, common.Hash{}, - uint(startingAt), nil) - response.BlockData[0] = &types.BlockData{ - Hash: firstHeader.Hash(), - Header: firstHeader, - Body: types.NewBody([]types.Extrinsic{}), - Justification: nil, - } - - parentHash := firstHeader.Hash() - for idx := 1; idx < numBlocks; idx++ { - blockNumber := idx + startingAt - header := types.NewHeader(parentHash, tsRoot, common.Hash{}, - uint(blockNumber), nil) - response.BlockData[idx] = &types.BlockData{ - Hash: header.Hash(), - Header: header, - Body: types.NewBody([]types.Extrinsic{}), - Justification: nil, - } - parentHash = header.Hash() - } - - return response -} - -// ensureSuccessfulBlockImportFlow will setup the expectations for method calls -// that happens while chain sync imports a block -func ensureSuccessfulBlockImportFlow(t *testing.T, parentHeader *types.Header, - blocksReceived []*types.BlockData, mockBlockState *MockBlockState, - mockBabeVerifier *MockBabeVerifier, mockStorageState *MockStorageState, - mockImportHandler *MockBlockImportHandler, mockTelemetry *MockTelemetry, origin blockOrigin, announceBlock bool) { - t.Helper() - - for idx, blockData := range blocksReceived { - if origin != networkInitialSync { - mockBabeVerifier.EXPECT().VerifyBlock(blockData.Header).Return(nil) - } - - var previousHeader *types.Header - if idx == 0 { - previousHeader = parentHeader - } else { - previousHeader = blocksReceived[idx-1].Header - } - - mockBlockState.EXPECT().GetHeader(blockData.Header.ParentHash).Return(previousHeader, nil).AnyTimes() - mockStorageState.EXPECT().Lock().AnyTimes() - mockStorageState.EXPECT().Unlock().AnyTimes() - - emptyTrieState := storage.NewTrieState(inmemory_trie.NewEmptyTrie()) - parentStateRoot := previousHeader.StateRoot - mockStorageState.EXPECT().TrieState(&parentStateRoot). - Return(emptyTrieState, nil).AnyTimes() - - ctrl := gomock.NewController(t) - mockRuntimeInstance := NewMockInstance(ctrl) - mockBlockState.EXPECT().GetRuntime(previousHeader.Hash()). - Return(mockRuntimeInstance, nil).AnyTimes() - - expectedBlock := &types.Block{ - Header: *blockData.Header, - Body: *blockData.Body, - } - - mockRuntimeInstance.EXPECT().SetContextStorage(emptyTrieState).AnyTimes() - mockRuntimeInstance.EXPECT().ExecuteBlock(expectedBlock). - Return(nil, nil).AnyTimes() - - mockImportHandler.EXPECT().HandleBlockImport(expectedBlock, emptyTrieState, announceBlock). - Return(nil).AnyTimes() - - blockHash := blockData.Header.Hash() - expectedTelemetryMessage := telemetry.NewBlockImport( - &blockHash, - blockData.Header.Number, - "NetworkInitialSync") - mockTelemetry.EXPECT().SendMessage(expectedTelemetryMessage).AnyTimes() - mockBlockState.EXPECT().CompareAndSetBlockData(blockData).Return(nil).AnyTimes() - } -} - -func TestChainSync_validateResponseFields(t *testing.T) { - t.Parallel() - - block1Header := &types.Header{ - ParentHash: common.MustHexToHash("0x00597cb4bb4cc13bf119f6613aec7642d4c06a2e453de53d34aea6f3f1eeb504"), - Number: 2, - } - - block2Header := &types.Header{ - ParentHash: block1Header.Hash(), - Number: 3, - } - - cases := map[string]struct { - wantErr error - errString string - setupChainSync func(t *testing.T) *chainSync - requestedData byte - blockData *types.BlockData - }{ - "requested_bootstrap_data_but_got_nil_header": { - wantErr: errNilHeaderInResponse, - errString: "expected header, received none: " + - block2Header.Hash().String(), - requestedData: messages.BootstrapRequestData, - blockData: &types.BlockData{ - Hash: block2Header.Hash(), - Header: nil, - Body: &types.Body{}, - Justification: &[]byte{0}, - }, - setupChainSync: func(t *testing.T) *chainSync { - ctrl := gomock.NewController(t) - blockStateMock := NewMockBlockState(ctrl) - blockStateMock.EXPECT().HasHeader(block1Header.ParentHash).Return(true, nil) - - networkMock := NewMockNetwork(ctrl) - networkMock.EXPECT().ReportPeer(peerset.ReputationChange{ - Value: peerset.IncompleteHeaderValue, - Reason: peerset.IncompleteHeaderReason, - }, peer.ID("peer")) - - return &chainSync{ - blockState: blockStateMock, - network: networkMock, - } - }, - }, - "requested_bootstrap_data_but_got_nil_body": { - wantErr: errNilBodyInResponse, - errString: "expected body, received none: " + - block2Header.Hash().String(), - requestedData: messages.BootstrapRequestData, - blockData: &types.BlockData{ - Hash: block2Header.Hash(), - Header: block2Header, - Body: nil, - Justification: &[]byte{0}, - }, - setupChainSync: func(t *testing.T) *chainSync { - ctrl := gomock.NewController(t) - blockStateMock := NewMockBlockState(ctrl) - blockStateMock.EXPECT().HasHeader(block1Header.ParentHash).Return(true, nil) - networkMock := NewMockNetwork(ctrl) - - return &chainSync{ - blockState: blockStateMock, - network: networkMock, - } - }, - }, - "requested_only_justification_but_got_nil": { - wantErr: errNilJustificationInResponse, - errString: "expected justification, received none: " + - block2Header.Hash().String(), - requestedData: messages.RequestedDataJustification, - blockData: &types.BlockData{ - Hash: block2Header.Hash(), - Header: block2Header, - Body: nil, - Justification: nil, - }, - setupChainSync: func(t *testing.T) *chainSync { - ctrl := gomock.NewController(t) - blockStateMock := NewMockBlockState(ctrl) - blockStateMock.EXPECT().HasHeader(block1Header.ParentHash).Return(true, nil) - networkMock := NewMockNetwork(ctrl) - - return &chainSync{ - blockState: blockStateMock, - network: networkMock, - } - }, - }, - } - - for tname, tt := range cases { - tt := tt - t.Run(tname, func(t *testing.T) { - t.Parallel() - - err := validateResponseFields(tt.requestedData, []*types.BlockData{tt.blockData}) - require.ErrorIs(t, err, tt.wantErr) - if tt.errString != "" { - require.EqualError(t, err, tt.errString) - } - }) - } -} - -func TestChainSync_isResponseAChain(t *testing.T) { - t.Parallel() - - block1Header := &types.Header{ - ParentHash: common.MustHexToHash("0x00597cb4bb4cc13bf119f6613aec7642d4c06a2e453de53d34aea6f3f1eeb504"), - Number: 2, - } - - block2Header := &types.Header{ - ParentHash: block1Header.Hash(), - Number: 3, - } - - block4Header := &types.Header{ - ParentHash: common.MustHexToHash("0x198616547187613bf119f6613aec7642d4c06a2e453de53d34aea6f390788677"), - Number: 4, - } - - cases := map[string]struct { - expected bool - blockData []*types.BlockData - }{ - "not_a_chain": { - expected: false, - blockData: []*types.BlockData{ - { - Hash: block1Header.Hash(), - Header: block1Header, - Body: &types.Body{}, - Justification: &[]byte{0}, - }, - { - Hash: block2Header.Hash(), - Header: block2Header, - Body: &types.Body{}, - Justification: &[]byte{0}, - }, - { - Hash: block4Header.Hash(), - Header: block4Header, - Body: &types.Body{}, - Justification: &[]byte{0}, - }, - }, - }, - "is_a_chain": { - expected: true, - blockData: []*types.BlockData{ - { - Hash: block1Header.Hash(), - Header: block1Header, - Body: &types.Body{}, - Justification: &[]byte{0}, - }, - { - Hash: block2Header.Hash(), - Header: block2Header, - Body: &types.Body{}, - Justification: &[]byte{0}, - }, - }, - }, - } - - for tname, tt := range cases { - tt := tt - t.Run(tname, func(t *testing.T) { - t.Parallel() - output := isResponseAChain(tt.blockData) - require.Equal(t, tt.expected, output) - }) - } -} - -func TestChainSync_doResponseGrowsTheChain(t *testing.T) { - block1Header := types.NewHeader(common.Hash{}, common.Hash{}, common.Hash{}, 1, types.NewDigest()) - block2Header := types.NewHeader(block1Header.Hash(), common.Hash{}, common.Hash{}, 2, types.NewDigest()) - block3Header := types.NewHeader(block2Header.Hash(), common.Hash{}, common.Hash{}, 3, types.NewDigest()) - block4Header := types.NewHeader(block3Header.Hash(), common.Hash{}, common.Hash{}, 4, types.NewDigest()) - - testcases := map[string]struct { - response []*types.BlockData - ongoingChain []*types.BlockData - startAt uint - exepectedTotal uint32 - expectedOut bool - }{ - // the ongoing chain does not have any data so the response - // can be inserted in the ongoing chain without any problems - "empty_ongoing_chain": { - ongoingChain: []*types.BlockData{}, - expectedOut: true, - }, - - "one_in_response_growing_ongoing_chain_without_check": { - startAt: 1, - exepectedTotal: 3, - // the ongoing chain contains 3 positions, the block number 1 is at position 0 - ongoingChain: []*types.BlockData{ - {Header: types.NewHeader(common.Hash{}, common.Hash{}, common.Hash{}, 1, types.NewDigest())}, - nil, - nil, - }, - - // the response contains the block number 3 which should be placed in position 2 - // in the ongoing chain, which means that no comparison should be done to place - // block number 3 in the ongoing chain - response: []*types.BlockData{ - {Header: types.NewHeader(common.Hash{}, common.Hash{}, common.Hash{}, 3, types.NewDigest())}, - }, - expectedOut: true, - }, - - "one_in_response_growing_ongoing_chain_by_checking_neighbours": { - startAt: 1, - exepectedTotal: 3, - // the ongoing chain contains 3 positions, the block number 1 is at position 0 - ongoingChain: []*types.BlockData{ - {Header: block1Header}, - nil, - {Header: block3Header}, - }, - - // the response contains the block number 2 which should be placed in position 1 - // in the ongoing chain, which means that a comparison should be made to check - // if the parent hash of block 2 is the same hash of block 1 - response: []*types.BlockData{ - {Header: block2Header}, - }, - expectedOut: true, - }, - - "one_in_response_failed_to_grow_ongoing_chain": { - startAt: 1, - exepectedTotal: 3, - ongoingChain: []*types.BlockData{ - {Header: block1Header}, - nil, - nil, - }, - response: []*types.BlockData{ - {Header: types.NewHeader(common.Hash{}, common.Hash{}, common.Hash{}, 2, types.NewDigest())}, - }, - expectedOut: false, - }, - - "many_in_response_grow_ongoing_chain_only_left_check": { - startAt: 1, - exepectedTotal: 3, - ongoingChain: []*types.BlockData{ - {Header: block1Header}, - nil, - nil, - nil, - }, - response: []*types.BlockData{ - {Header: block2Header}, - {Header: block3Header}, - }, - expectedOut: true, - }, - - "many_in_response_grow_ongoing_chain_left_right_check": { - startAt: 1, - exepectedTotal: 3, - ongoingChain: []*types.BlockData{ - {Header: block1Header}, - nil, - nil, - {Header: block4Header}, - }, - response: []*types.BlockData{ - {Header: block2Header}, - {Header: block3Header}, - }, - expectedOut: true, - }, - } - - for tname, tt := range testcases { - tt := tt - - t.Run(tname, func(t *testing.T) { - out := doResponseGrowsTheChain(tt.response, tt.ongoingChain, tt.startAt, tt.exepectedTotal) - require.Equal(t, tt.expectedOut, out) - }) - } -} - -func TestChainSync_getHighestBlock(t *testing.T) { - t.Parallel() - - cases := map[string]struct { - expectedHighestBlock uint - wantErr error - chainSyncPeerViewSet *peerViewSet - }{ - "no_peer_view": { - wantErr: errNoPeers, - expectedHighestBlock: 0, - chainSyncPeerViewSet: newPeerViewSet(10), - }, - "highest_block": { - expectedHighestBlock: 500, - chainSyncPeerViewSet: &peerViewSet{ - view: map[peer.ID]peerView{ - peer.ID("peer-A"): { - number: 100, - }, - peer.ID("peer-B"): { - number: 500, - }, - }, - }, - }, - } - - for tname, tt := range cases { - tt := tt - t.Run(tname, func(t *testing.T) { - t.Parallel() - - chainSync := &chainSync{ - peerViewSet: tt.chainSyncPeerViewSet, - } - - highestBlock, err := chainSync.getHighestBlock() - require.ErrorIs(t, err, tt.wantErr) - require.Equal(t, tt.expectedHighestBlock, highestBlock) - }) - } -} -func TestChainSync_BootstrapSync_SuccessfulSync_WithInvalidJusticationBlock(t *testing.T) { - // TODO: https://github.com/ChainSafe/gossamer/issues/3468 - t.Skip() - t.Parallel() - - ctrl := gomock.NewController(t) - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) - mockedGenesisHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, - trie.EmptyHash, 0, types.NewDigest()) - - mockNetwork := NewMockNetwork(ctrl) - mockRequestMaker := NewMockRequestMaker(ctrl) - - mockBabeVerifier := NewMockBabeVerifier(ctrl) - mockStorageState := NewMockStorageState(ctrl) - mockImportHandler := NewMockBlockImportHandler(ctrl) - mockTelemetry := NewMockTelemetry(ctrl) - mockFinalityGadget := NewMockFinalityGadget(ctrl) - - // this test expects two workers responding each request with 128 blocks which means - // we should import 256 blocks in total - blockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, 129) - const announceBlock = false - - invalidJustificationBlock := blockResponse.BlockData[90] - invalidJustification := &[]byte{0x01, 0x01, 0x01, 0x02} - invalidJustificationBlock.Justification = invalidJustification - - // here we split the whole set in two parts each one will be the "response" for each peer - worker1Response := &messages.BlockResponseMessage{ - BlockData: blockResponse.BlockData[:128], - } - - // the first peer will respond the from the block 1 to 128 so the ensureBlockImportFlow - // will setup the expectations starting from the genesis header until block 128 - ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, worker1Response.BlockData[:90], mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) - - errVerifyBlockJustification := errors.New("VerifyBlockJustification mock error") - mockFinalityGadget.EXPECT(). - VerifyBlockJustification( - invalidJustificationBlock.Header.Hash(), - invalidJustificationBlock.Header.Number, - *invalidJustification). - Return(uint64(0), uint64(0), errVerifyBlockJustification) - - // we use gomock.Any since I cannot guarantee which peer picks which request - // but the first call to DoBlockRequest will return the first set and the second - // call will return the second set - mockRequestMaker.EXPECT(). - Do(gomock.Any(), gomock.Any(), &messages.BlockResponseMessage{}). - DoAndReturn(func(peerID, _, response any) any { - responsePtr := response.(*messages.BlockResponseMessage) - *responsePtr = *worker1Response - - fmt.Println("mocked request maker") - return nil - }) - - // setup a chain sync which holds in its peer view map - // 3 peers, each one announce block 129 as its best block number. - // We start this test with genesis block being our best block, so - // we're far behind by 128 blocks, we should execute a bootstrap - // sync request those blocks - const blocksAhead = 128 - cs := setupChainSyncToBootstrapMode(t, blocksAhead, - mockBlockState, mockNetwork, mockRequestMaker, mockBabeVerifier, - mockStorageState, mockImportHandler, mockTelemetry) - - cs.finalityGadget = mockFinalityGadget - - target := cs.peerViewSet.getTarget() - require.Equal(t, uint(blocksAhead), target) - - // include a new worker in the worker pool set, this worker - // should be an available peer that will receive a block request - // the worker pool executes the workers management - cs.workerPool.fromBlockAnnounce(peer.ID("alice")) - //cs.workerPool.fromBlockAnnounce(peer.ID("bob")) - - err := cs.requestMaxBlocksFrom(mockedGenesisHeader, networkInitialSync) - require.ErrorIs(t, err, errVerifyBlockJustification) - - err = cs.workerPool.stop() - require.NoError(t, err) - - // peer should be not in the worker pool - // peer should be in the ignore list - require.Len(t, cs.workerPool.workers, 1) -} diff --git a/dot/sync/fullsync_handle_block.go b/dot/sync/fullsync_handle_block.go index b504b829b7..e12b9b816d 100644 --- a/dot/sync/fullsync_handle_block.go +++ b/dot/sync/fullsync_handle_block.go @@ -41,7 +41,7 @@ type ( // FinalityGadget implements justification verification functionality FinalityGadget interface { - VerifyBlockJustification(common.Hash, []byte) error + VerifyBlockJustification(common.Hash, uint, []byte) (round uint64, setID uint64, err error) } // BlockImportHandler is the interface for the handler of newly imported blocks @@ -94,9 +94,23 @@ func (b *blockImporter) handle(bd *types.BlockData, origin BlockOrigin) (importe // processBlockData processes the BlockData from a BlockResponse and // returns the index of the last BlockData it handled on success, // or the index of the block data that errored on failure. -// TODO: https://github.com/ChainSafe/gossamer/issues/3468 func (b *blockImporter) processBlockData(blockData types.BlockData, origin BlockOrigin) error { if blockData.Header != nil { + var ( + hasJustification = blockData.Justification != nil && len(*blockData.Justification) > 0 + round uint64 + setID uint64 + ) + + if hasJustification { + var err error + round, setID, err = b.finalityGadget.VerifyBlockJustification( + blockData.Header.Hash(), blockData.Header.Number, *blockData.Justification) + if err != nil { + return fmt.Errorf("verifying justification: %w", err) + } + } + if blockData.Body != nil { err := b.processBlockDataWithHeaderAndBody(blockData, origin) if err != nil { @@ -104,14 +118,20 @@ func (b *blockImporter) processBlockData(blockData types.BlockData, origin Block } } - if blockData.Justification != nil && len(*blockData.Justification) > 0 { - err := b.handleJustification(blockData.Header, *blockData.Justification) + if hasJustification { + header := blockData.Header + err := b.blockState.SetFinalisedHash(header.Hash(), round, setID) if err != nil { - return fmt.Errorf("handling justification: %w", err) + return fmt.Errorf("setting finalised hash: %w", err) } + err = b.blockState.SetJustification(header.Hash(), *blockData.Justification) + if err != nil { + return fmt.Errorf("setting justification for block number %d: %w", header.Number, err) + } + + return nil } } - err := b.blockState.CompareAndSetBlockData(&blockData) if err != nil { return fmt.Errorf("comparing and setting block data: %w", err) @@ -196,18 +216,3 @@ func (b *blockImporter) handleBlock(block *types.Block) error { return nil } - -func (b *blockImporter) handleJustification(header *types.Header, justification []byte) (err error) { - headerHash := header.Hash() - err = b.finalityGadget.VerifyBlockJustification(headerHash, justification) - if err != nil { - return fmt.Errorf("verifying block number %d justification: %w", header.Number, err) - } - - err = b.blockState.SetJustification(headerHash, justification) - if err != nil { - return fmt.Errorf("setting justification for block number %d: %w", header.Number, err) - } - - return nil -} diff --git a/dot/sync/interfaces.go b/dot/sync/interfaces.go deleted file mode 100644 index 03a03cda8e..0000000000 --- a/dot/sync/interfaces.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "encoding/json" - "sync" - - "github.com/ChainSafe/gossamer/dot/peerset" - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/common" - "github.com/ChainSafe/gossamer/lib/runtime" - rtstorage "github.com/ChainSafe/gossamer/lib/runtime/storage" - "github.com/libp2p/go-libp2p/core/peer" -) - -// BlockState is the interface for the block state -type BlockState interface { - BestBlockHeader() (*types.Header, error) - BestBlockNumber() (number uint, err error) - CompareAndSetBlockData(bd *types.BlockData) error - GetBlockBody(common.Hash) (*types.Body, error) - GetHeader(common.Hash) (*types.Header, error) - HasHeader(hash common.Hash) (bool, error) - Range(startHash, endHash common.Hash) (hashes []common.Hash, err error) - RangeInMemory(start, end common.Hash) ([]common.Hash, error) - GetReceipt(common.Hash) ([]byte, error) - GetMessageQueue(common.Hash) ([]byte, error) - GetJustification(common.Hash) ([]byte, error) - SetFinalisedHash(hash common.Hash, round uint64, setID uint64) error - SetJustification(hash common.Hash, data []byte) error - GetHashByNumber(blockNumber uint) (common.Hash, error) - GetBlockByHash(common.Hash) (*types.Block, error) - GetRuntime(blockHash common.Hash) (runtime runtime.Instance, err error) - StoreRuntime(blockHash common.Hash, runtime runtime.Instance) - GetHighestFinalisedHeader() (*types.Header, error) - GetFinalisedNotifierChannel() chan *types.FinalisationInfo - GetHeaderByNumber(num uint) (*types.Header, error) - GetAllBlocksAtNumber(num uint) ([]common.Hash, error) - IsDescendantOf(parent, child common.Hash) (bool, error) - - IsPaused() bool - Pause() error -} - -// StorageState is the interface for the storage state -type StorageState interface { - TrieState(root *common.Hash) (*rtstorage.TrieState, error) - sync.Locker -} - -// TransactionState is the interface for transaction queue methods -type TransactionState interface { - RemoveExtrinsic(ext types.Extrinsic) -} - -// BabeVerifier deals with BABE block verification -type BabeVerifier interface { - VerifyBlock(header *types.Header) error -} - -// FinalityGadget implements justification verification functionality -type FinalityGadget interface { - VerifyBlockJustification(finalizedHash common.Hash, finalizedNumber uint, encoded []byte) ( - round uint64, setID uint64, err error) -} - -// BlockImportHandler is the interface for the handler of newly imported blocks -type BlockImportHandler interface { - HandleBlockImport(block *types.Block, state *rtstorage.TrieState, announce bool) error -} - -// Network is the interface for the network -type Network interface { - // Peers returns a list of currently connected peers - Peers() []common.PeerInfo - - // ReportPeer reports peer based on the peer behaviour. - ReportPeer(change peerset.ReputationChange, p peer.ID) - - AllConnectedPeersIDs() []peer.ID - - BlockAnnounceHandshake(*types.Header) error -} - -// Telemetry is the telemetry client to send telemetry messages. -type Telemetry interface { - SendMessage(msg json.Marshaler) -} diff --git a/dot/sync/message_integration_test.go b/dot/sync/message_integration_test.go index 87d46c7d87..3a9e329a43 100644 --- a/dot/sync/message_integration_test.go +++ b/dot/sync/message_integration_test.go @@ -129,10 +129,11 @@ func newFullSyncService(t *testing.T) *SyncService { mockBabeVerifier.EXPECT().VerifyBlock(gomock.AssignableToTypeOf(&types.Header{})).AnyTimes() mockFinalityGadget := NewMockFinalityGadget(ctrl) - mockFinalityGadget.EXPECT().VerifyBlockJustification(gomock.AssignableToTypeOf(common.Hash{}), - gomock.AssignableToTypeOf([]byte{})).DoAndReturn(func(hash common.Hash, justification []byte) error { - return nil - }).AnyTimes() + mockFinalityGadget.EXPECT(). + VerifyBlockJustification(gomock.AssignableToTypeOf(common.Hash{}), + gomock.AssignableToTypeOf(uint(0)), gomock.AssignableToTypeOf([]byte{})). + Return(uint64(1), uint64(1), nil). + AnyTimes() mockNetwork := NewMockNetwork(ctrl) diff --git a/dot/sync/service.go b/dot/sync/service.go index ba880c86fa..eb31414c01 100644 --- a/dot/sync/service.go +++ b/dot/sync/service.go @@ -47,6 +47,7 @@ type BlockState interface { GetReceipt(common.Hash) ([]byte, error) GetMessageQueue(common.Hash) ([]byte, error) GetJustification(common.Hash) ([]byte, error) + SetFinalisedHash(hash common.Hash, round uint64, setID uint64) error SetJustification(hash common.Hash, data []byte) error GetHashByNumber(blockNumber uint) (common.Hash, error) GetBlockByHash(common.Hash) (*types.Block, error) diff --git a/dot/sync/syncer_integration_test.go b/dot/sync/syncer_integration_test.go deleted file mode 100644 index 7361a5280e..0000000000 --- a/dot/sync/syncer_integration_test.go +++ /dev/null @@ -1,213 +0,0 @@ -//go:build integration - -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "errors" - "path/filepath" - "testing" - - "github.com/ChainSafe/gossamer/dot/state" - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/internal/database" - "github.com/ChainSafe/gossamer/internal/log" - "github.com/ChainSafe/gossamer/lib/common" - "github.com/ChainSafe/gossamer/lib/genesis" - runtime "github.com/ChainSafe/gossamer/lib/runtime" - rtstorage "github.com/ChainSafe/gossamer/lib/runtime/storage" - wazero_runtime "github.com/ChainSafe/gossamer/lib/runtime/wazero" - "github.com/ChainSafe/gossamer/lib/utils" - "github.com/ChainSafe/gossamer/pkg/trie" - "github.com/ChainSafe/gossamer/tests/utils/config" - - "github.com/stretchr/testify/require" - "go.uber.org/mock/gomock" -) - -func newTestSyncer(t *testing.T) *Service { - ctrl := gomock.NewController(t) - - mockTelemetryClient := NewMockTelemetry(ctrl) - mockTelemetryClient.EXPECT().SendMessage(gomock.Any()).AnyTimes() - - wazero_runtime.DefaultTestLogLvl = log.Warn - - cfg := &Config{} - testDatadirPath := t.TempDir() - - scfg := state.Config{ - Path: testDatadirPath, - LogLevel: log.Info, - Telemetry: mockTelemetryClient, - GenesisBABEConfig: config.BABEConfigurationTestDefault, - } - stateSrvc := state.NewService(scfg) - stateSrvc.UseMemDB() - - gen, genTrie, genHeader := newWestendDevGenesisWithTrieAndHeader(t) - err := stateSrvc.Initialise(&gen, &genHeader, genTrie) - require.NoError(t, err) - - err = stateSrvc.Start() - require.NoError(t, err) - - if cfg.BlockState == nil { - cfg.BlockState = stateSrvc.Block - } - - if cfg.StorageState == nil { - cfg.StorageState = stateSrvc.Storage - } - - // initialise runtime - genState := rtstorage.NewTrieState(genTrie) - - rtCfg := wazero_runtime.Config{ - Storage: genState, - LogLvl: log.Critical, - } - - if stateSrvc != nil { - rtCfg.NodeStorage.BaseDB = stateSrvc.Base - } else { - rtCfg.NodeStorage.BaseDB, err = database.LoadDatabase(filepath.Join(testDatadirPath, "offline_storage"), false) - require.NoError(t, err) - } - - rtCfg.CodeHash, err = cfg.StorageState.(*state.InmemoryStorageState).LoadCodeHash(nil) - require.NoError(t, err) - - instance, err := wazero_runtime.NewRuntimeFromGenesis(rtCfg) - require.NoError(t, err) - - bestBlockHash := cfg.BlockState.(*state.BlockState).BestBlockHash() - cfg.BlockState.(*state.BlockState).StoreRuntime(bestBlockHash, instance) - blockImportHandler := NewMockBlockImportHandler(ctrl) - blockImportHandler.EXPECT().HandleBlockImport(gomock.AssignableToTypeOf(&types.Block{}), - gomock.AssignableToTypeOf(&rtstorage.TrieState{}), false).DoAndReturn( - func(block *types.Block, ts *rtstorage.TrieState, _ bool) error { - // store updates state trie nodes in database - if err = stateSrvc.Storage.StoreTrie(ts, &block.Header); err != nil { - logger.Warnf("failed to store state trie for imported block %s: %s", block.Header.Hash(), err) - return err - } - - // store block in database - err = stateSrvc.Block.AddBlock(block) - require.NoError(t, err) - - stateSrvc.Block.StoreRuntime(block.Header.Hash(), instance) - logger.Debugf("imported block %s and stored state trie with root %s", - block.Header.Hash(), ts.Trie().MustHash()) - return nil - }).AnyTimes() - cfg.BlockImportHandler = blockImportHandler - - cfg.TransactionState = stateSrvc.Transaction - mockBabeVerifier := NewMockBabeVerifier(ctrl) - mockBabeVerifier.EXPECT().VerifyBlock(gomock.AssignableToTypeOf(&types.Header{})).AnyTimes() - cfg.BabeVerifier = mockBabeVerifier - cfg.LogLvl = log.Trace - mockFinalityGadget := NewMockFinalityGadget(ctrl) - mockFinalityGadget.EXPECT().VerifyBlockJustification(gomock.AssignableToTypeOf(common.Hash{}), - gomock.AssignableToTypeOf(uint(0)), gomock.AssignableToTypeOf([]byte{})). - DoAndReturn(func(hash common.Hash, justification []byte) error { - return nil - }).AnyTimes() - - cfg.FinalityGadget = mockFinalityGadget - cfg.Network = NewMockNetwork(ctrl) - cfg.Telemetry = mockTelemetryClient - cfg.RequestMaker = NewMockRequestMaker(ctrl) - syncer, err := NewService(cfg) - require.NoError(t, err) - return syncer -} - -func newWestendDevGenesisWithTrieAndHeader(t *testing.T) ( - gen genesis.Genesis, genesisTrie trie.Trie, genesisHeader types.Header) { - t.Helper() - - genesisPath := utils.GetWestendDevRawGenesisPath(t) - genesisPtr, err := genesis.NewGenesisFromJSONRaw(genesisPath) - require.NoError(t, err) - gen = *genesisPtr - - genesisTrie, err = runtime.NewTrieFromGenesis(gen) - require.NoError(t, err) - - parentHash := common.NewHash([]byte{0}) - stateRoot := genesisTrie.MustHash() - extrinsicRoot := trie.EmptyHash - const number = 0 - digest := types.NewDigest() - genesisHeaderPtr := types.NewHeader(parentHash, - stateRoot, extrinsicRoot, number, digest) - genesisHeader = *genesisHeaderPtr - - return gen, genesisTrie, genesisHeader -} - -func TestHighestBlock(t *testing.T) { - type input struct { - highestBlock uint - err error - } - type output struct { - highestBlock uint - } - type test struct { - name string - in input - out output - } - tests := []test{ - { - name: "when_*chainSync.getHighestBlock()_returns_0,_error_should_return_0", - in: input{ - highestBlock: 0, - err: errors.New("fake error"), - }, - out: output{ - highestBlock: 0, - }, - }, - { - name: "when_*chainSync.getHighestBlock()_returns_0,_nil_should_return_0", - in: input{ - highestBlock: 0, - err: nil, - }, - out: output{ - highestBlock: 0, - }, - }, - { - name: "when_*chainSync.getHighestBlock()_returns_50,_nil_should_return_50", - in: input{ - highestBlock: 50, - err: nil, - }, - out: output{ - highestBlock: 50, - }, - }, - } - for _, ts := range tests { - t.Run(ts.name, func(t *testing.T) { - s := newTestSyncer(t) - - ctrl := gomock.NewController(t) - chainSync := NewMockChainSync(ctrl) - chainSync.EXPECT().getHighestBlock().Return(ts.in.highestBlock, ts.in.err) - - s.chainSync = chainSync - - result := s.HighestBlock() - require.Equal(t, result, ts.out.highestBlock) - }) - } -} From d905e7dacf49f2a667169938068e8a99a38178a5 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Fri, 13 Sep 2024 17:09:50 -0400 Subject: [PATCH 46/74] chore: use snake case on testing names --- dot/sync/unready_blocks_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/dot/sync/unready_blocks_test.go b/dot/sync/unready_blocks_test.go index 66356cfc67..b15019c362 100644 --- a/dot/sync/unready_blocks_test.go +++ b/dot/sync/unready_blocks_test.go @@ -11,7 +11,7 @@ import ( ) func TestUnreadyBlocks_removeIrrelevantFragments(t *testing.T) { - t.Run("removing all disjoint fragment", func(t *testing.T) { + t.Run("removing_all_disjoint_fragment", func(t *testing.T) { ub := newUnreadyBlocks() ub.disjointFragments = [][]*types.BlockData{ { @@ -40,7 +40,7 @@ func TestUnreadyBlocks_removeIrrelevantFragments(t *testing.T) { require.Empty(t, ub.disjointFragments) }) - t.Run("removing irrelevant fragments", func(t *testing.T) { + t.Run("removing_irrelevant_fragments", func(t *testing.T) { ub := newUnreadyBlocks() ub.disjointFragments = [][]*types.BlockData{ // first fragment @@ -150,7 +150,7 @@ func TestUnreadyBlocks_removeIrrelevantFragments(t *testing.T) { require.Equal(t, ub.disjointFragments[1], expectedThirdFragment) }) - t.Run("keep all fragments", func(t *testing.T) { + t.Run("keep_all_fragments", func(t *testing.T) { ub := newUnreadyBlocks() ub.disjointFragments = [][]*types.BlockData{ { From 8f86d9bafdc91a5f533c4dce60dfded3d3917f40 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Fri, 13 Sep 2024 17:17:50 -0400 Subject: [PATCH 47/74] chore: TestBuildRequestMessage --- scripts/retrieve_block/retrieve_block_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/retrieve_block/retrieve_block_test.go b/scripts/retrieve_block/retrieve_block_test.go index 2324fee644..6a1eca9e49 100644 --- a/scripts/retrieve_block/retrieve_block_test.go +++ b/scripts/retrieve_block/retrieve_block_test.go @@ -20,7 +20,7 @@ func TestBuildRequestMessage(t *testing.T) { { arg: "10", expected: messages.NewBlockRequest( - *variadic.Uint32OrHashFrom(uint(10)), 1, + *variadic.Uint32OrHashFrom(uint32(10)), 1, messages.BootstrapRequestData, messages.Ascending), }, { @@ -37,7 +37,7 @@ func TestBuildRequestMessage(t *testing.T) { }, { arg: "1,asc,20", - expected: messages.NewBlockRequest(*variadic.Uint32OrHashFrom(uint(1)), + expected: messages.NewBlockRequest(*variadic.Uint32OrHashFrom(uint32(1)), 20, messages.BootstrapRequestData, messages.Ascending), }, { @@ -48,7 +48,7 @@ func TestBuildRequestMessage(t *testing.T) { }, { arg: "1,desc,20", - expected: messages.NewBlockRequest(*variadic.Uint32OrHashFrom(uint(1)), + expected: messages.NewBlockRequest(*variadic.Uint32OrHashFrom(uint32(1)), 20, messages.BootstrapRequestData, messages.Descending), }, } From ed6c755bdc5c86f948b85cb60395d6c384e2b034 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Fri, 13 Sep 2024 17:44:31 -0400 Subject: [PATCH 48/74] chore: fix sync test --- dot/network/messages/block.go | 2 +- dot/sync/fullsync.go | 17 +++++---- dot/sync/fullsync_test.go | 66 ++++++++++++++++++++++------------- 3 files changed, 53 insertions(+), 32 deletions(-) diff --git a/dot/network/messages/block.go b/dot/network/messages/block.go index 25d6d13219..e67b0a381a 100644 --- a/dot/network/messages/block.go +++ b/dot/network/messages/block.go @@ -80,7 +80,7 @@ func NewAscendingBlockRequests(startNumber, targetNumber uint32, requestedData b diff := targetNumber - (startNumber - 1) // start and end block are the same, just request 1 block - if diff == 0 { + if diff == 1 { return []*BlockRequestMessage{ NewBlockRequest(*variadic.Uint32OrHashFrom(startNumber), 1, requestedData, Ascending), } diff --git a/dot/sync/fullsync.go b/dot/sync/fullsync.go index 7b17455aa4..ae24a44076 100644 --- a/dot/sync/fullsync.go +++ b/dot/sync/fullsync.go @@ -99,12 +99,14 @@ func (f *FullSyncStrategy) NextActions() ([]*syncTask, error) { f.startedAt = time.Now() f.syncedBlocks = 0 - messagesToSend := []*messages.BlockRequestMessage{} - for f.requestQueue.Len() > 0 { + reqsFromQueue := []*messages.BlockRequestMessage{} + for i := 0; i < int(f.numOfTasks); i++ { msg, ok := f.requestQueue.PopFront() - if ok { - messagesToSend = append(messagesToSend, msg) + if !ok { + break } + + reqsFromQueue = append(reqsFromQueue, msg) } currentTarget := f.peers.getTarget() @@ -117,11 +119,11 @@ func (f *FullSyncStrategy) NextActions() ([]*syncTask, error) { // in the node's pov we are not legging behind so there's nothing to do // or we didn't receive block announces, so lets ask for more blocks if uint32(bestBlockHeader.Number) >= currentTarget { - return f.createTasks(messagesToSend), nil + return f.createTasks(reqsFromQueue), nil } startRequestAt := bestBlockHeader.Number + 1 - targetBlockNumber := startRequestAt + maxRequestsAllowed*127 + targetBlockNumber := startRequestAt + uint(f.numOfTasks)*127 if targetBlockNumber > uint(currentTarget) { targetBlockNumber = uint(currentTarget) @@ -130,8 +132,9 @@ func (f *FullSyncStrategy) NextActions() ([]*syncTask, error) { ascendingBlockRequests := messages.NewAscendingBlockRequests( uint32(startRequestAt), uint32(targetBlockNumber), messages.BootstrapRequestData) + reqsFromQueue = append(reqsFromQueue, ascendingBlockRequests...) - return f.createTasks(ascendingBlockRequests), nil + return f.createTasks(reqsFromQueue), nil } func (f *FullSyncStrategy) createTasks(requests []*messages.BlockRequestMessage) []*syncTask { diff --git a/dot/sync/fullsync_test.go b/dot/sync/fullsync_test.go index 5127a01771..654f3273ab 100644 --- a/dot/sync/fullsync_test.go +++ b/dot/sync/fullsync_test.go @@ -69,7 +69,7 @@ func TestFullSyncNextActions(t *testing.T) { task, err := fs.NextActions() require.NoError(t, err) - require.Len(t, task, 1) + require.Len(t, task, int(maxRequestsAllowed)) request := task[0].request.(*messages.BlockRequestMessage) require.Equal(t, uint32(1), request.StartingBlock.Uint32()) require.Equal(t, uint32(128), *request.Max) @@ -85,51 +85,61 @@ func TestFullSyncNextActions(t *testing.T) { expectedQueueLen int expectedTasks []*messages.BlockRequestMessage }{ - "should_have_one_from_request_queue": { + "should_get_all_from_request_queue": { setupRequestQueue: func(t *testing.T) *requestsQueue[*messages.BlockRequestMessage] { - request := messages.NewAscendingBlockRequests( - 129, 129+127, - messages.BootstrapRequestData) + // insert a task to retrieve the block body of a single block + request := messages.NewAscendingBlockRequests(129, 129, messages.RequestedDataBody) require.Len(t, request, 1) rq := &requestsQueue[*messages.BlockRequestMessage]{queue: list.New()} - for _, req := range request { - rq.PushBack(req) - } + rq.PushBack(request[0]) return rq }, expectedQueueLen: 0, expectedTasks: []*messages.BlockRequestMessage{ { - RequestedData: messages.BootstrapRequestData, + RequestedData: messages.RequestedDataBody, StartingBlock: *variadic.Uint32OrHashFrom(uint32(129)), Direction: messages.Ascending, - Max: refTo(128), + Max: refTo(1), + }, + { + RequestedData: messages.BootstrapRequestData, + StartingBlock: *variadic.Uint32OrHashFrom(uint32(1)), + Direction: messages.Ascending, + Max: refTo(127), }, }, }, - // creating a amount of 4 requests, but since we have a max num of - // request set to 2 (see FullSyncConfig) we should only have 2 tasks - "four_items_on_queue_should_pop_only_one": { + "should_remain_1_in_request_queue": { setupRequestQueue: func(t *testing.T) *requestsQueue[*messages.BlockRequestMessage] { - request := messages.NewAscendingBlockRequests( - 129, 129+(4*127), - messages.BootstrapRequestData) - require.Len(t, request, 4) - rq := &requestsQueue[*messages.BlockRequestMessage]{queue: list.New()} - for _, req := range request { - rq.PushBack(req) - } + + fstReqByHash := messages.NewBlockRequest( + *variadic.Uint32OrHashFrom(common.BytesToHash([]byte{0, 1, 1, 2})), + 1, messages.RequestedDataBody, messages.Ascending) + rq.PushBack(fstReqByHash) + + sndReqByHash := messages.NewBlockRequest( + *variadic.Uint32OrHashFrom(common.BytesToHash([]byte{1, 2, 2, 4})), + 1, messages.RequestedDataBody, messages.Ascending) + rq.PushBack(sndReqByHash) + return rq }, - expectedQueueLen: 3, + expectedQueueLen: 1, expectedTasks: []*messages.BlockRequestMessage{ + { + RequestedData: messages.RequestedDataBody, + StartingBlock: *variadic.Uint32OrHashFrom(common.BytesToHash([]byte{0, 1, 1, 2})), + Direction: messages.Ascending, + Max: refTo(1), + }, { RequestedData: messages.BootstrapRequestData, - StartingBlock: *variadic.Uint32OrHashFrom(uint32(129)), + StartingBlock: *variadic.Uint32OrHashFrom(uint32(1)), Direction: messages.Ascending, - Max: refTo(128), + Max: refTo(127), }, }, }, @@ -140,6 +150,14 @@ func TestFullSyncNextActions(t *testing.T) { t.Run(tname, func(t *testing.T) { fs := NewFullSyncStrategy(&FullSyncConfig{}) fs.requestQueue = tt.setupRequestQueue(t) + fs.numOfTasks = 1 + + ctrl := gomock.NewController(t) + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT(). + BestBlockHeader(). + Return(&types.Header{Number: 0}, nil) + fs.blockState = mockBlockState // introduce a peer and a target err := fs.OnBlockAnnounceHandshake(peer.ID("peer-A"), &network.BlockAnnounceHandshake{ From 9dff75a8bac05296d4f47bde09183d6518b2c049 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 17 Sep 2024 18:19:59 -0400 Subject: [PATCH 49/74] chore: address comments and lint --- dot/network/state.go | 2 ++ dot/services.go | 9 +++-- dot/sync/fullsync.go | 15 ++++---- dot/sync/request_queue.go | 6 ++-- dot/sync/service.go | 72 ++++++++++++++++++--------------------- dot/sync/worker_pool.go | 2 ++ 6 files changed, 52 insertions(+), 54 deletions(-) diff --git a/dot/network/state.go b/dot/network/state.go index 1358c9e1be..dc2c2d1f56 100644 --- a/dot/network/state.go +++ b/dot/network/state.go @@ -35,6 +35,8 @@ type Syncer interface { // CreateBlockResponse is called upon receipt of a BlockRequestMessage to create the response CreateBlockResponse(peer.ID, *messages.BlockRequestMessage) (*messages.BlockResponseMessage, error) + // OnConnectionClosed should be trigged whenever Gossamer closes a connection with another + // peer, normally used when the peer reputation is too low. OnConnectionClosed(peer.ID) } diff --git a/dot/services.go b/dot/services.go index 8dca45d357..11d57cac0d 100644 --- a/dot/services.go +++ b/dot/services.go @@ -37,6 +37,8 @@ import ( wazero_runtime "github.com/ChainSafe/gossamer/lib/runtime/wazero" ) +const blockRequestTimeout = 20 * time.Second + // BlockProducer to produce blocks type BlockProducer interface { Pause() error @@ -510,11 +512,8 @@ func (nodeBuilder) newSyncService(config *cfg.Config, st *state.Service, fg sync return nil, err } - const blockRequestTimeout = 20 * time.Second - requestMaker := net.GetRequestResponseProtocol( - network.SyncID, - blockRequestTimeout, - network.MaxBlockResponseSize) + requestMaker := net.GetRequestResponseProtocol(network.SyncID, + blockRequestTimeout, network.MaxBlockResponseSize) syncCfg := &sync.FullSyncConfig{ BlockState: st.Block, diff --git a/dot/sync/fullsync.go b/dot/sync/fullsync.go index ae24a44076..9c142936ce 100644 --- a/dot/sync/fullsync.go +++ b/dot/sync/fullsync.go @@ -100,7 +100,7 @@ func (f *FullSyncStrategy) NextActions() ([]*syncTask, error) { f.syncedBlocks = 0 reqsFromQueue := []*messages.BlockRequestMessage{} - for i := 0; i < int(f.numOfTasks); i++ { + for i := 0; i < f.numOfTasks; i++ { msg, ok := f.requestQueue.PopFront() if !ok { break @@ -277,9 +277,9 @@ func (f *FullSyncStrategy) IsFinished(results []*syncTaskResult) (bool, []Change func (f *FullSyncStrategy) ShowMetrics() { totalSyncAndImportSeconds := time.Since(f.startedAt).Seconds() bps := float64(f.syncedBlocks) / totalSyncAndImportSeconds - logger.Infof("⛓️ synced %d blocks, disjoint fragments %d, incomplete blocks %d, "+ + logger.Infof("⛓️ synced %d blocks, tasks on queue %d, disjoint fragments %d, incomplete blocks %d, "+ "took: %.2f seconds, bps: %.2f blocks/second, target block number #%d", - f.syncedBlocks, len(f.unreadyBlocks.disjointFragments), len(f.unreadyBlocks.incompleteBlocks), + f.syncedBlocks, f.requestQueue.Len(), len(f.unreadyBlocks.disjointFragments), len(f.unreadyBlocks.incompleteBlocks), totalSyncAndImportSeconds, bps, f.peers.getTarget()) } @@ -349,9 +349,9 @@ func (f *FullSyncStrategy) OnBlockAnnounce(from peer.ID, msg *network.BlockAnnou // if we still far from aproaching the calculated target // then we can ignore the block announce - ratioOfCompleteness := (bestBlockHeader.Number / uint(f.peers.getTarget())) * 100 - logger.Infof("sync: ratio of completeness: %d", ratioOfCompleteness) - if ratioOfCompleteness < 80 { + mx := max(blockAnnounceHeader.Number, bestBlockHeader.Number) + mn := min(blockAnnounceHeader.Number, bestBlockHeader.Number) + if (mx - mn) > messages.MaxBlocksInResponse { return true, nil, nil } @@ -368,9 +368,10 @@ func (f *FullSyncStrategy) OnBlockAnnounce(from peer.ID, msg *network.BlockAnnou request := messages.NewBlockRequest(*variadic.Uint32OrHashFrom(blockAnnounceHeaderHash), 1, messages.RequestedDataBody+messages.RequestedDataJustification, messages.Ascending) f.requestQueue.PushBack(request) + } else { + logger.Infof("announced block already exists #%d (%s)", blockAnnounceHeader.Number, blockAnnounceHeaderHash.Short()) } - logger.Infof("announced block already exists #%d (%s)", blockAnnounceHeader.Number, blockAnnounceHeaderHash.Short()) return true, &Change{ who: from, rep: peerset.ReputationChange{ diff --git a/dot/sync/request_queue.go b/dot/sync/request_queue.go index 85a387c4fb..72f6030c4b 100644 --- a/dot/sync/request_queue.go +++ b/dot/sync/request_queue.go @@ -32,10 +32,8 @@ func (r *requestsQueue[M]) PopFront() (value M, ok bool) { return e.Value.(M), true } -func (r *requestsQueue[M]) PushBack(message ...M) { +func (r *requestsQueue[M]) PushBack(message M) { r.mu.Lock() defer r.mu.Unlock() - for _, m := range message { - r.queue.PushBack(m) - } + r.queue.PushBack(message) } diff --git a/dot/sync/service.go b/dot/sync/service.go index eb31414c01..97d7f0c51b 100644 --- a/dot/sync/service.go +++ b/dot/sync/service.go @@ -154,8 +154,6 @@ func (s *SyncService) waitWorkers() { } func (s *SyncService) Start() error { - s.waitWorkers() - s.wg.Add(1) go s.runSyncEngine() return nil @@ -219,17 +217,26 @@ func (s *SyncService) HighestBlock() uint { func (s *SyncService) runSyncEngine() { defer s.wg.Done() + s.waitWorkers() + logger.Infof("starting sync engine with strategy: %T", s.currentStrategy) -lockAndStart: - s.mu.Lock() - logger.Info("starting process to acquire more blocks") + for { + select { + case <-s.stopCh: + return + case <-time.After(s.slotDuration): + } - select { - case <-s.stopCh: - return - default: + s.runStrategy() } +} + +func (s *SyncService) runStrategy() { + s.mu.Lock() + defer s.mu.Unlock() + + logger.Tracef("running strategy: %T", s.currentStrategy) finalisedHeader, err := s.blockState.GetHighestFinalisedHeader() if err != nil { @@ -258,41 +265,30 @@ lockAndStart: return } + logger.Tracef("amount of tasks to process: %d", len(tasks)) if len(tasks) == 0 { - goto loopBack + return } - { - results := s.workerPool.submitRequests(tasks) - done, repChanges, peersToIgnore, err := s.currentStrategy.IsFinished(results) - if err != nil { - logger.Criticalf("current sync strategy failed with: %s", err.Error()) - return - } - - for _, change := range repChanges { - s.network.ReportPeer(change.rep, change.who) - } + results := s.workerPool.submitRequests(tasks) + done, repChanges, peersToIgnore, err := s.currentStrategy.IsFinished(results) + if err != nil { + logger.Criticalf("current sync strategy failed with: %s", err.Error()) + return + } - for _, block := range peersToIgnore { - s.workerPool.ignorePeerAsWorker(block) - } + for _, change := range repChanges { + s.network.ReportPeer(change.rep, change.who) + } - s.currentStrategy.ShowMetrics() + for _, block := range peersToIgnore { + s.workerPool.ignorePeerAsWorker(block) + } - if done { - if s.defaultStrategy == nil { - logger.Criticalf("nil default strategy") - return - } + s.currentStrategy.ShowMetrics() + logger.Trace("finish process to acquire more blocks") - s.currentStrategy = s.defaultStrategy - } + if done { + s.currentStrategy = s.defaultStrategy } - -loopBack: - logger.Info("finish process to acquire more blocks") - s.mu.Unlock() - time.Sleep(s.slotDuration) - goto lockAndStart } diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index 574973f21b..88fbd6bfc5 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -131,6 +131,7 @@ func (s *syncWorkerPool) submitRequests(tasks []*syncTask) []*syncTaskResult { go func(expectedResults int) { defer wg.Done() var taskResults []*syncTaskResult + for result := range results { taskResults = append(taskResults, result) if len(taskResults) == expectedResults { @@ -159,6 +160,7 @@ func executeTask(task *syncTask, workerPool chan peer.ID, failedTasks chan *sync failedTasks <- task } else { logger.Infof("[FINISHED] worker %s, request: %s", worker, task.request) + workerPool <- worker results <- &syncTaskResult{ who: worker, completed: true, From cc71723bb4b13f0d012a052fb3572341f1b63414 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 17 Sep 2024 18:46:00 -0400 Subject: [PATCH 50/74] chore: use `GossipSuccessValue` to good block announced received --- dot/sync/fullsync.go | 4 +-- dot/sync/fullsync_test.go | 74 +++++++++++++++++++++++++++++++++++++-- 2 files changed, 73 insertions(+), 5 deletions(-) diff --git a/dot/sync/fullsync.go b/dot/sync/fullsync.go index 9c142936ce..0a7fdaf7d5 100644 --- a/dot/sync/fullsync.go +++ b/dot/sync/fullsync.go @@ -375,8 +375,8 @@ func (f *FullSyncStrategy) OnBlockAnnounce(from peer.ID, msg *network.BlockAnnou return true, &Change{ who: from, rep: peerset.ReputationChange{ - Value: peerset.NotRelevantBlockAnnounceValue, - Reason: peerset.NotRelevantBlockAnnounceReason, + Value: peerset.GossipSuccessValue, + Reason: peerset.GossipSuccessReason, }, }, nil } diff --git a/dot/sync/fullsync_test.go b/dot/sync/fullsync_test.go index 654f3273ab..d96bd2e805 100644 --- a/dot/sync/fullsync_test.go +++ b/dot/sync/fullsync_test.go @@ -290,7 +290,7 @@ func TestFullSyncIsFinished(t *testing.T) { } func TestFullSyncBlockAnnounce(t *testing.T) { - t.Run("announce_a_block_without_any_commom_ancestor", func(t *testing.T) { + t.Run("announce_a_far_block_without_any_commom_ancestor", func(t *testing.T) { highestFinalizedHeader := &types.Header{ ParentHash: common.BytesToHash([]byte{0}), StateRoot: common.BytesToHash([]byte{3, 3, 3, 3}), @@ -307,8 +307,12 @@ func TestFullSyncBlockAnnounce(t *testing.T) { Return(highestFinalizedHeader, nil) mockBlockState.EXPECT(). - HasHeader(gomock.AnyOf(common.Hash{})). - Return(false, nil) + BestBlockHeader(). + Return(highestFinalizedHeader, nil) + + // mockBlockState.EXPECT(). + // HasHeader(gomock.AnyOf(common.Hash{})). + // Return(false, nil) fsCfg := &FullSyncConfig{ BlockState: mockBlockState, @@ -327,6 +331,8 @@ func TestFullSyncBlockAnnounce(t *testing.T) { err := fs.OnBlockAnnounceHandshake(firstPeer, firstHandshake) require.NoError(t, err) + // still far from aproaching the calculated target + // then we can ignore the block announce firstBlockAnnounce := &network.BlockAnnounceMessage{ ParentHash: common.BytesToHash([]byte{0, 1, 2}), Number: 1024, @@ -339,5 +345,67 @@ func TestFullSyncBlockAnnounce(t *testing.T) { _, rep, err := fs.OnBlockAnnounce(firstPeer, firstBlockAnnounce) require.NoError(t, err) require.Nil(t, rep) + require.Zero(t, fs.requestQueue.Len()) + }) + + t.Run("announce_closer_valid_block_without_any_commom_ancestor", func(t *testing.T) { + highestFinalizedHeader := &types.Header{ + ParentHash: common.BytesToHash([]byte{0}), + StateRoot: common.BytesToHash([]byte{3, 3, 3, 3}), + ExtrinsicsRoot: common.BytesToHash([]byte{4, 4, 4, 4}), + Number: 0, + Digest: types.NewDigest(), + } + + ctrl := gomock.NewController(t) + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().IsPaused().Return(false) + mockBlockState.EXPECT(). + GetHighestFinalisedHeader(). + Return(highestFinalizedHeader, nil) + + mockBlockState.EXPECT(). + BestBlockHeader(). + Return(highestFinalizedHeader, nil) + + mockBlockState.EXPECT(). + HasHeader(gomock.AssignableToTypeOf(common.Hash{})). + Return(false, nil) + + fsCfg := &FullSyncConfig{ + BlockState: mockBlockState, + } + + fs := NewFullSyncStrategy(fsCfg) + + firstPeer := peer.ID("fst-peer") + firstHandshake := &network.BlockAnnounceHandshake{ + Roles: 1, + BestBlockNumber: 17, + BestBlockHash: common.BytesToHash([]byte{0, 1, 2}), + GenesisHash: common.BytesToHash([]byte{1, 1, 1, 1}), + } + + err := fs.OnBlockAnnounceHandshake(firstPeer, firstHandshake) + require.NoError(t, err) + + // still far from aproaching the calculated target + // then we can ignore the block announce + firstBlockAnnounce := &network.BlockAnnounceMessage{ + ParentHash: common.BytesToHash([]byte{0, 1, 2}), + Number: 17, + StateRoot: common.BytesToHash([]byte{3, 3, 3, 3}), + ExtrinsicsRoot: common.BytesToHash([]byte{4, 4, 4, 4}), + Digest: types.NewDigest(), + BestBlock: true, + } + + // the announced block 17 is not far from our best block (0) then + // we will consider it and start a ancestor search + _, rep, err := fs.OnBlockAnnounce(firstPeer, firstBlockAnnounce) + require.NoError(t, err) + require.Nil(t, rep) + require.Equal(t, 1, fs.requestQueue.Len()) }) + } From 14224142d7c2c485d02044b20335bf9491287b0e Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 18 Sep 2024 20:08:41 -0400 Subject: [PATCH 51/74] improve test suite --- dot/sync/fullsync.go | 23 ++++--- dot/sync/fullsync_test.go | 132 +++++++++++++++++++++++++++++--------- dot/sync/peer_view.go | 17 ++--- dot/sync/service.go | 4 +- 4 files changed, 122 insertions(+), 54 deletions(-) diff --git a/dot/sync/fullsync.go b/dot/sync/fullsync.go index 0a7fdaf7d5..5fdf14f7ed 100644 --- a/dot/sync/fullsync.go +++ b/dot/sync/fullsync.go @@ -288,10 +288,9 @@ func (f *FullSyncStrategy) OnBlockAnnounceHandshake(from peer.ID, msg *network.B return nil } -func (f *FullSyncStrategy) OnBlockAnnounce(from peer.ID, msg *network.BlockAnnounceMessage) ( - gossip bool, repChange *Change, err error) { +func (f *FullSyncStrategy) OnBlockAnnounce(from peer.ID, msg *network.BlockAnnounceMessage) (repChange *Change, err error) { if f.blockState.IsPaused() { - return false, nil, errors.New("blockstate service is paused") + return nil, errors.New("blockstate service is paused") } blockAnnounceHeader := types.NewHeader(msg.ParentHash, msg.StateRoot, msg.ExtrinsicsRoot, msg.Number, msg.Digest) @@ -308,7 +307,7 @@ func (f *FullSyncStrategy) OnBlockAnnounce(from peer.ID, msg *network.BlockAnnou logger.Infof("bad block receive from %s: #%d (%s) is a bad block", from, blockAnnounceHeader.Number, blockAnnounceHeaderHash) - return false, &Change{ + return &Change{ who: from, rep: peerset.ReputationChange{ Value: peerset.BadBlockAnnouncementValue, @@ -323,12 +322,12 @@ func (f *FullSyncStrategy) OnBlockAnnounce(from peer.ID, msg *network.BlockAnnou highestFinalized, err := f.blockState.GetHighestFinalisedHeader() if err != nil { - return false, nil, fmt.Errorf("get highest finalised header: %w", err) + return nil, fmt.Errorf("get highest finalised header: %w", err) } // check if the announced block is relevant if blockAnnounceHeader.Number <= highestFinalized.Number || f.blockAlreadyTracked(blockAnnounceHeader) { - logger.Infof("announced block irrelevant #%d (%s)", blockAnnounceHeader.Number, blockAnnounceHeaderHash.Short()) + logger.Infof("ignoring announced block #%d (%s)", blockAnnounceHeader.Number, blockAnnounceHeaderHash.Short()) repChange = &Change{ who: from, rep: peerset.ReputationChange{ @@ -337,28 +336,28 @@ func (f *FullSyncStrategy) OnBlockAnnounce(from peer.ID, msg *network.BlockAnnou }, } - return false, repChange, fmt.Errorf("%w: peer %s, block number #%d (%s)", + return repChange, fmt.Errorf("%w: peer %s, block number #%d (%s)", errPeerOnInvalidFork, from, blockAnnounceHeader.Number, blockAnnounceHeaderHash.String()) } logger.Infof("relevant announced block #%d (%s)", blockAnnounceHeader.Number, blockAnnounceHeaderHash.Short()) bestBlockHeader, err := f.blockState.BestBlockHeader() if err != nil { - return false, nil, fmt.Errorf("get best block header: %w", err) + return nil, fmt.Errorf("get best block header: %w", err) } - // if we still far from aproaching the calculated target + // if we still far from aproaching the announced block // then we can ignore the block announce mx := max(blockAnnounceHeader.Number, bestBlockHeader.Number) mn := min(blockAnnounceHeader.Number, bestBlockHeader.Number) if (mx - mn) > messages.MaxBlocksInResponse { - return true, nil, nil + return nil, nil } has, err := f.blockState.HasHeader(blockAnnounceHeaderHash) if err != nil { if !errors.Is(err, database.ErrNotFound) { - return false, nil, fmt.Errorf("checking if header exists: %w", err) + return nil, fmt.Errorf("checking if header exists: %w", err) } } @@ -372,7 +371,7 @@ func (f *FullSyncStrategy) OnBlockAnnounce(from peer.ID, msg *network.BlockAnnou logger.Infof("announced block already exists #%d (%s)", blockAnnounceHeader.Number, blockAnnounceHeaderHash.Short()) } - return true, &Change{ + return &Change{ who: from, rep: peerset.ReputationChange{ Value: peerset.GossipSuccessValue, diff --git a/dot/sync/fullsync_test.go b/dot/sync/fullsync_test.go index d96bd2e805..a6ede033a3 100644 --- a/dot/sync/fullsync_test.go +++ b/dot/sync/fullsync_test.go @@ -9,6 +9,7 @@ import ( "github.com/ChainSafe/gossamer/dot/network" "github.com/ChainSafe/gossamer/dot/network/messages" + "github.com/ChainSafe/gossamer/dot/peerset" "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/lib/common/variadic" @@ -310,10 +311,6 @@ func TestFullSyncBlockAnnounce(t *testing.T) { BestBlockHeader(). Return(highestFinalizedHeader, nil) - // mockBlockState.EXPECT(). - // HasHeader(gomock.AnyOf(common.Hash{})). - // Return(false, nil) - fsCfg := &FullSyncConfig{ BlockState: mockBlockState, } @@ -342,7 +339,7 @@ func TestFullSyncBlockAnnounce(t *testing.T) { BestBlock: true, } - _, rep, err := fs.OnBlockAnnounce(firstPeer, firstBlockAnnounce) + rep, err := fs.OnBlockAnnounce(firstPeer, firstBlockAnnounce) require.NoError(t, err) require.Nil(t, rep) require.Zero(t, fs.requestQueue.Len()) @@ -359,39 +356,25 @@ func TestFullSyncBlockAnnounce(t *testing.T) { ctrl := gomock.NewController(t) mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().IsPaused().Return(false) + mockBlockState.EXPECT().IsPaused().Return(false).Times(2) mockBlockState.EXPECT(). GetHighestFinalisedHeader(). - Return(highestFinalizedHeader, nil) + Return(highestFinalizedHeader, nil).Times(2) mockBlockState.EXPECT(). BestBlockHeader(). - Return(highestFinalizedHeader, nil) + Return(highestFinalizedHeader, nil).Times(2) mockBlockState.EXPECT(). HasHeader(gomock.AssignableToTypeOf(common.Hash{})). Return(false, nil) - fsCfg := &FullSyncConfig{ BlockState: mockBlockState, } fs := NewFullSyncStrategy(fsCfg) - firstPeer := peer.ID("fst-peer") - firstHandshake := &network.BlockAnnounceHandshake{ - Roles: 1, - BestBlockNumber: 17, - BestBlockHash: common.BytesToHash([]byte{0, 1, 2}), - GenesisHash: common.BytesToHash([]byte{1, 1, 1, 1}), - } - - err := fs.OnBlockAnnounceHandshake(firstPeer, firstHandshake) - require.NoError(t, err) - - // still far from aproaching the calculated target - // then we can ignore the block announce - firstBlockAnnounce := &network.BlockAnnounceMessage{ + announceOfBlock17 := &network.BlockAnnounceMessage{ ParentHash: common.BytesToHash([]byte{0, 1, 2}), Number: 17, StateRoot: common.BytesToHash([]byte{3, 3, 3, 3}), @@ -400,12 +383,103 @@ func TestFullSyncBlockAnnounce(t *testing.T) { BestBlock: true, } - // the announced block 17 is not far from our best block (0) then - // we will consider it and start a ancestor search - _, rep, err := fs.OnBlockAnnounce(firstPeer, firstBlockAnnounce) - require.NoError(t, err) - require.Nil(t, rep) - require.Equal(t, 1, fs.requestQueue.Len()) + t.Run("peer_announces_block_17", func(t *testing.T) { + firstPeer := peer.ID("fst-peer") + firstHandshake := &network.BlockAnnounceHandshake{ + Roles: 1, + BestBlockNumber: 17, + BestBlockHash: common.BytesToHash([]byte{0, 1, 2}), + GenesisHash: common.BytesToHash([]byte{1, 1, 1, 1}), + } + + err := fs.OnBlockAnnounceHandshake(firstPeer, firstHandshake) + require.NoError(t, err) + + // still far from aproaching the calculated target + // then we can ignore the block announce + + // the announced block 17 is not far from our best block (0) then + // we will consider it and start a ancestor search + rep, err := fs.OnBlockAnnounce(firstPeer, announceOfBlock17) + require.NoError(t, err) + + expectedReputation := &Change{ + who: firstPeer, + rep: peerset.ReputationChange{ + Value: peerset.GossipSuccessValue, + Reason: peerset.GossipSuccessReason, + }, + } + require.Equal(t, expectedReputation, rep) + require.Equal(t, 1, fs.requestQueue.Len()) + }) + + t.Run("peer_B_announces_a_tracked_block", func(t *testing.T) { + sndPeer := peer.ID("snd-peer") + firstHandshake := &network.BlockAnnounceHandshake{ + Roles: 1, + BestBlockNumber: 17, + BestBlockHash: common.BytesToHash([]byte{0, 1, 2}), + GenesisHash: common.BytesToHash([]byte{1, 1, 1, 1}), + } + + err := fs.OnBlockAnnounceHandshake(sndPeer, firstHandshake) + require.NoError(t, err) + + // the announced block 17 is already tracked by our node + // then we will ignore it + rep, err := fs.OnBlockAnnounce(sndPeer, announceOfBlock17) + require.ErrorIs(t, err, errPeerOnInvalidFork) + + expectedReputation := &Change{ + who: sndPeer, + rep: peerset.ReputationChange{ + Value: peerset.NotRelevantBlockAnnounceValue, + Reason: peerset.NotRelevantBlockAnnounceReason, + }, + } + require.Equal(t, expectedReputation, rep) + + // the queue should not change + require.Equal(t, 1, fs.requestQueue.Len()) + }) + + t.Run("call_fullsync_next_actions_should_have_request_for_block_body", func(t *testing.T) { + refTo := func(v uint32) *uint32 { + return &v + } + + tasks, err := fs.NextActions() + require.NoError(t, err) + require.Len(t, tasks, 2) + + requests := make([]messages.P2PMessage, len(tasks)) + for idx, task := range tasks { + requests[idx] = task.request + } + + block17 := types.NewHeader(announceOfBlock17.ParentHash, + announceOfBlock17.StateRoot, announceOfBlock17.ExtrinsicsRoot, + announceOfBlock17.Number, announceOfBlock17.Digest) + block17Hash := block17.Hash() + + expectedRequests := []messages.P2PMessage{ + &messages.BlockRequestMessage{ + RequestedData: messages.RequestedDataBody + messages.RequestedDataJustification, + StartingBlock: *variadic.Uint32OrHashFrom(block17Hash), + Direction: messages.Ascending, + Max: refTo(1), + }, + &messages.BlockRequestMessage{ + RequestedData: messages.BootstrapRequestData, + StartingBlock: *variadic.Uint32OrHashFrom(uint32(1)), + Direction: messages.Ascending, + Max: refTo(17), + }, + } + + require.Equal(t, expectedRequests, requests) + }) }) } diff --git a/dot/sync/peer_view.go b/dot/sync/peer_view.go index c15759c03a..bd8f9312e2 100644 --- a/dot/sync/peer_view.go +++ b/dot/sync/peer_view.go @@ -51,20 +51,15 @@ func (p *peerViewSet) getTarget() uint32 { return p.target } - numbers := make([]uint32, len(p.view)) + currMax := p.target // we are going to sort the data and remove the outliers then we will return the avg of all the valid elements - for idx, view := range maps.Values(p.view) { - numbers[idx] = view.bestBlockNumber - } - - sum, count := nonOutliersSumCount(numbers) - quotientBigInt := uint32(big.NewInt(0).Div(sum, big.NewInt(int64(count))).Uint64()) - - if p.target >= quotientBigInt { - return p.target + for _, view := range maps.Values(p.view) { + if view.bestBlockNumber > currMax { + currMax = view.bestBlockNumber + } } - p.target = quotientBigInt // cache latest calculated target + p.target = currMax // cache latest calculated target return p.target } diff --git a/dot/sync/service.go b/dot/sync/service.go index 97d7f0c51b..1b6e837a63 100644 --- a/dot/sync/service.go +++ b/dot/sync/service.go @@ -69,7 +69,7 @@ type Change struct { } type Strategy interface { - OnBlockAnnounce(from peer.ID, msg *network.BlockAnnounceMessage) (gossip bool, repChange *Change, err error) + OnBlockAnnounce(from peer.ID, msg *network.BlockAnnounceMessage) (repChange *Change, err error) OnBlockAnnounceHandshake(from peer.ID, msg *network.BlockAnnounceHandshake) error NextActions() ([]*syncTask, error) IsFinished(results []*syncTaskResult) (done bool, repChanges []Change, blocks []peer.ID, err error) @@ -181,7 +181,7 @@ func (s *SyncService) HandleBlockAnnounce(from peer.ID, msg *network.BlockAnnoun s.mu.Lock() defer s.mu.Unlock() - _, repChange, err := s.currentStrategy.OnBlockAnnounce(from, msg) + repChange, err := s.currentStrategy.OnBlockAnnounce(from, msg) if err != nil { return fmt.Errorf("while handling block announce: %w", err) } From 2c119567abe54d013998df08a8f24d6f35dfd4ef Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 18 Sep 2024 20:16:21 -0400 Subject: [PATCH 52/74] chore: remove target calculation using mean --- dot/sync/peer_view.go | 77 ------------------------------------------- 1 file changed, 77 deletions(-) diff --git a/dot/sync/peer_view.go b/dot/sync/peer_view.go index bd8f9312e2..6a8c08ed50 100644 --- a/dot/sync/peer_view.go +++ b/dot/sync/peer_view.go @@ -4,8 +4,6 @@ package sync import ( - "math/big" - "sort" "sync" "github.com/ChainSafe/gossamer/lib/common" @@ -62,78 +60,3 @@ func (p *peerViewSet) getTarget() uint32 { p.target = currMax // cache latest calculated target return p.target } - -// nonOutliersSumCount calculates the sum and count of non-outlier elements -// Explanation: -// IQR outlier detection -// Q25 = 25th_percentile -// Q75 = 75th_percentile -// IQR = Q75 - Q25 // inter-quartile range -// If x > Q75 + 1.5 * IQR or x < Q25 - 1.5 * IQR THEN x is a mild outlier -// If x > Q75 + 3.0 * IQR or x < Q25 – 3.0 * IQR THEN x is a extreme outlier -// Ref: http://www.mathwords.com/o/outlier.htm -// -// returns: sum and count of all the non-outliers elements -func nonOutliersSumCount(dataArrUint []uint32) (sum *big.Int, count uint) { - dataArr := make([]*big.Int, len(dataArrUint)) - for i, v := range dataArrUint { - dataArr[i] = big.NewInt(int64(v)) - } - - length := len(dataArr) - - switch length { - case 0: - return big.NewInt(0), 0 - case 1: - return dataArr[0], 1 - case 2: - return big.NewInt(0).Add(dataArr[0], dataArr[1]), 2 - } - - sort.Slice(dataArr, func(i, j int) bool { - return dataArr[i].Cmp(dataArr[j]) < 0 - }) - - half := length / 2 - firstHalf := dataArr[:half] - var secondHalf []*big.Int - - if length%2 == 0 { - secondHalf = dataArr[half:] - } else { - secondHalf = dataArr[half+1:] - } - - q1 := getMedian(firstHalf) - q3 := getMedian(secondHalf) - - iqr := big.NewInt(0).Sub(q3, q1) - iqr1_5 := big.NewInt(0).Mul(iqr, big.NewInt(2)) // instead of 1.5 it is 2.0 due to the rounding - lower := big.NewInt(0).Sub(q1, iqr1_5) - upper := big.NewInt(0).Add(q3, iqr1_5) - - sum = big.NewInt(0) - for _, v := range dataArr { - // collect valid (non-outlier) values - lowPass := v.Cmp(lower) - highPass := v.Cmp(upper) - if lowPass >= 0 && highPass <= 0 { - sum.Add(sum, v) - count++ - } - } - - return sum, count -} - -func getMedian(data []*big.Int) *big.Int { - length := len(data) - half := length / 2 - if length%2 == 0 { - sum := big.NewInt(0).Add(data[half], data[half-1]) - return sum.Div(sum, big.NewInt(2)) - } - - return data[half] -} From b6a0b9dd3e427838691d1d790898e50f1e55d545 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Thu, 19 Sep 2024 14:53:20 -0400 Subject: [PATCH 53/74] chore: resolve lll --- dot/sync/fullsync.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/dot/sync/fullsync.go b/dot/sync/fullsync.go index 5fdf14f7ed..4b068a37b3 100644 --- a/dot/sync/fullsync.go +++ b/dot/sync/fullsync.go @@ -288,7 +288,8 @@ func (f *FullSyncStrategy) OnBlockAnnounceHandshake(from peer.ID, msg *network.B return nil } -func (f *FullSyncStrategy) OnBlockAnnounce(from peer.ID, msg *network.BlockAnnounceMessage) (repChange *Change, err error) { +func (f *FullSyncStrategy) OnBlockAnnounce(from peer.ID, msg *network.BlockAnnounceMessage) ( + repChange *Change, err error) { if f.blockState.IsPaused() { return nil, errors.New("blockstate service is paused") } From d3920d1be320ea470fda93fbc56b0a69ce9e3b16 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ecl=C3=A9sio=20Junior?= Date: Thu, 19 Sep 2024 18:25:48 -0400 Subject: [PATCH 54/74] Update dot/sync/peer_view.go Co-authored-by: Haiko Schol <539509+haikoschol@users.noreply.github.com> --- dot/sync/peer_view.go | 1 - 1 file changed, 1 deletion(-) diff --git a/dot/sync/peer_view.go b/dot/sync/peer_view.go index 6a8c08ed50..0e68e9d653 100644 --- a/dot/sync/peer_view.go +++ b/dot/sync/peer_view.go @@ -50,7 +50,6 @@ func (p *peerViewSet) getTarget() uint32 { } currMax := p.target - // we are going to sort the data and remove the outliers then we will return the avg of all the valid elements for _, view := range maps.Values(p.view) { if view.bestBlockNumber > currMax { currMax = view.bestBlockNumber From 793295c8d4073825451c5be4066778939c3811fb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ecl=C3=A9sio=20Junior?= Date: Thu, 19 Sep 2024 18:25:58 -0400 Subject: [PATCH 55/74] Update dot/sync/fullsync_handle_block.go Co-authored-by: Haiko Schol <539509+haikoschol@users.noreply.github.com> --- dot/sync/fullsync_handle_block.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dot/sync/fullsync_handle_block.go b/dot/sync/fullsync_handle_block.go index e12b9b816d..bb86b57e2d 100644 --- a/dot/sync/fullsync_handle_block.go +++ b/dot/sync/fullsync_handle_block.go @@ -171,7 +171,7 @@ func (b *blockImporter) processBlockDataWithHeaderAndBody(blockData types.BlockD return nil } -// handleHeader handles blocks (header+body) included in BlockResponses +// handleBlock executes blocks and writes them to disk func (b *blockImporter) handleBlock(block *types.Block) error { parent, err := b.blockState.GetHeader(block.Header.ParentHash) if err != nil { From b3e6bf4ef330008654108028409c922d729d38fd Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Thu, 19 Sep 2024 18:57:36 -0400 Subject: [PATCH 56/74] chore: fix `TestHandleBlockAnnounceMessage` --- dot/network/block_announce.go | 2 +- .../block_announce_integration_test.go | 34 ++++++++++---- dot/sync/fullsync.go | 47 ++++++++----------- dot/sync/peer_view.go | 2 +- dot/sync/service.go | 9 ++-- dot/sync/unready_blocks.go | 5 +- dot/sync/worker_pool.go | 4 +- dot/types/block_data.go | 6 +++ 8 files changed, 60 insertions(+), 49 deletions(-) diff --git a/dot/network/block_announce.go b/dot/network/block_announce.go index 315b72d4a1..d3ceadbe2b 100644 --- a/dot/network/block_announce.go +++ b/dot/network/block_announce.go @@ -198,5 +198,5 @@ func (s *Service) handleBlockAnnounceMessage(from peer.ID, msg NotificationsMess } err := s.syncer.HandleBlockAnnounce(from, bam) - return true, err + return err == nil, err } diff --git a/dot/network/block_announce_integration_test.go b/dot/network/block_announce_integration_test.go index a32f6c55fb..1f11174405 100644 --- a/dot/network/block_announce_integration_test.go +++ b/dot/network/block_announce_integration_test.go @@ -6,10 +6,10 @@ package network import ( + "errors" "testing" "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/blocktree" "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/pkg/scale" gomock "go.uber.org/mock/gomock" @@ -131,22 +131,33 @@ func TestHandleBlockAnnounceMessage(t *testing.T) { t.Parallel() testCases := map[string]struct { - propagate bool - mockSyncer func(*testing.T, peer.ID, *BlockAnnounceMessage) Syncer + propagate bool + expectError bool + mockSyncer func(*testing.T, peer.ID, *BlockAnnounceMessage) Syncer }{ - "block_already_exists": { + "should_propagate": { mockSyncer: func(t *testing.T, peer peer.ID, blockAnnounceMessage *BlockAnnounceMessage) Syncer { ctrl := gomock.NewController(t) syncer := NewMockSyncer(ctrl) syncer.EXPECT(). HandleBlockAnnounce(peer, blockAnnounceMessage). - Return(blocktree.ErrBlockExists) + Return(nil) return syncer }, - propagate: true, + expectError: false, + propagate: true, }, - "block_does_not_exists": { - propagate: false, + "should_not_propagate": { + mockSyncer: func(t *testing.T, peer peer.ID, blockAnnounceMessage *BlockAnnounceMessage) Syncer { + ctrl := gomock.NewController(t) + syncer := NewMockSyncer(ctrl) + syncer.EXPECT(). + HandleBlockAnnounce(peer, blockAnnounceMessage). + Return(errors.New("mocked error")) + return syncer + }, + expectError: true, + propagate: false, }, } @@ -175,8 +186,11 @@ func TestHandleBlockAnnounceMessage(t *testing.T) { service := createTestService(t, config) gotPropagate, err := service.handleBlockAnnounceMessage(peerID, msg) - - require.NoError(t, err) + if tt.expectError { + require.Error(t, err) + } else { + require.NoError(t, err) + } require.Equal(t, tt.propagate, gotPropagate) }) } diff --git a/dot/sync/fullsync.go b/dot/sync/fullsync.go index 4b068a37b3..e5c75cf609 100644 --- a/dot/sync/fullsync.go +++ b/dot/sync/fullsync.go @@ -31,6 +31,7 @@ var ( errNilHeaderInResponse = errors.New("expected header, received none") errNilBodyInResponse = errors.New("expected body, received none") errPeerOnInvalidFork = errors.New("peer is on an invalid fork") + errBadBlockReceived = errors.New("bad block received") blockSizeGauge = promauto.NewGauge(prometheus.GaugeOpts{ Namespace: "gossamer_sync", @@ -255,7 +256,7 @@ func (f *FullSyncStrategy) IsFinished(results []*syncTaskResult) (bool, []Change validFragment[0].Header.Hash(), ) - f.unreadyBlocks.newDisjointFragemnt(validFragment) + f.unreadyBlocks.newDisjointFragment(validFragment) request := messages.NewBlockRequest( *variadic.Uint32OrHashFrom(validFragment[0].Header.ParentHash), messages.MaxBlocksInResponse, @@ -305,7 +306,7 @@ func (f *FullSyncStrategy) OnBlockAnnounce(from peer.ID, msg *network.BlockAnnou ) if slices.Contains(f.badBlocks, blockAnnounceHeaderHash.String()) { - logger.Infof("bad block receive from %s: #%d (%s) is a bad block", + logger.Infof("bad block received from %s: #%d (%s) is a bad block", from, blockAnnounceHeader.Number, blockAnnounceHeaderHash) return &Change{ @@ -314,7 +315,7 @@ func (f *FullSyncStrategy) OnBlockAnnounce(from peer.ID, msg *network.BlockAnnou Value: peerset.BadBlockAnnouncementValue, Reason: peerset.BadBlockAnnouncementReason, }, - }, nil + }, errBadBlockReceived } if msg.BestBlock { @@ -393,8 +394,7 @@ func (f *FullSyncStrategy) IsSynced() bool { return false } - // TODO: research a better rule - return uint32(highestBlock) >= (f.peers.getTarget() - 128) + return uint32(highestBlock) >= f.peers.getTarget() } type RequestResponseData struct { @@ -511,12 +511,13 @@ func sortFragmentsOfChain(fragments [][]*types.BlockData) [][]*types.BlockData { return fragments } -// mergeFragmentsOfChain merges a sorted slice of fragments that forms a valid -// chain sequente which is the previous is the direct parent of the next block, -// and keep untouch fragments that does not forms such sequence, -// take as an example the following sorted slice. +// mergeFragmentsOfChain expects a sorted slice of fragments and merges those +// fragments for which the last block of the previous fragment is the direct parent of +// the first block of the next fragment. +// Fragments that are not part of this sequence (e.g. from forks) are left untouched. +// Take as an example the following sorted slice: // [ {1, 2, 3, 4, 5} {6, 7, 8, 9, 10} {8} {11, 12, 13, 14, 15, 16} {17} ] -// merge will transform it in the following slice: +// merge will transform it to the following slice: // [ {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17} {8} ] func mergeFragmentsOfChain(fragments [][]*types.BlockData) [][]*types.BlockData { if len(fragments) == 0 { @@ -525,13 +526,15 @@ func mergeFragmentsOfChain(fragments [][]*types.BlockData) [][]*types.BlockData mergedFragments := [][]*types.BlockData{fragments[0]} for i := 1; i < len(fragments); i++ { - lastMerged := mergedFragments[len(mergedFragments)-1] - current := fragments[i] + lastMergedFragment := mergedFragments[len(mergedFragments)-1] + currentFragment := fragments[i] - if formsSequence(lastMerged[len(lastMerged)-1], current[0]) { - mergedFragments[len(mergedFragments)-1] = append(lastMerged, current...) + lastBlock := lastMergedFragment[len(lastMergedFragment)-1] + + if lastBlock.IsParent(currentFragment[0]) { + mergedFragments[len(mergedFragments)-1] = append(lastMergedFragment, currentFragment...) } else { - mergedFragments = append(mergedFragments, current) + mergedFragments = append(mergedFragments, currentFragment) } } @@ -555,16 +558,6 @@ func validBlocksUnderFragment(highestFinalizedNumber uint, fragmentBlocks []*typ return fragmentBlocks[startFragmentFrom:] } -// formsSequence given two fragments of blocks, check if they forms a sequence -// by comparing the latest block from the prev fragment with the -// first block of the next fragment -func formsSequence(prev, next *types.BlockData) bool { - incrementOne := (prev.Header.Number + 1) == next.Header.Number - isParent := prev.Hash == next.Header.ParentHash - - return incrementOne && isParent -} - // validateResponseFields checks that the expected fields are in the block data func validateResponseFields(req *messages.BlockRequestMessage, blocks []*types.BlockData) error { for _, bd := range blocks { @@ -587,9 +580,7 @@ func isResponseAChain(responseBlockData []*types.BlockData) bool { previousBlockData := responseBlockData[0] for _, currBlockData := range responseBlockData[1:] { - previousHash := previousBlockData.Header.Hash() - isParent := previousHash == currBlockData.Header.ParentHash - if !isParent { + if !previousBlockData.IsParent(currBlockData) { return false } diff --git a/dot/sync/peer_view.go b/dot/sync/peer_view.go index 0e68e9d653..241a078b25 100644 --- a/dot/sync/peer_view.go +++ b/dot/sync/peer_view.go @@ -40,7 +40,7 @@ func (p *peerViewSet) update(peerID peer.ID, bestHash common.Hash, bestNumber ui p.view[peerID] = newView } -// getTarget takes the average of all peer views best number +// getTarget returns the highest block number received from connected peers func (p *peerViewSet) getTarget() uint32 { p.mtx.RLock() defer p.mtx.RUnlock() diff --git a/dot/sync/service.go b/dot/sync/service.go index 1b6e837a63..24c65e3a4f 100644 --- a/dot/sync/service.go +++ b/dot/sync/service.go @@ -182,14 +182,14 @@ func (s *SyncService) HandleBlockAnnounce(from peer.ID, msg *network.BlockAnnoun defer s.mu.Unlock() repChange, err := s.currentStrategy.OnBlockAnnounce(from, msg) - if err != nil { - return fmt.Errorf("while handling block announce: %w", err) - } if repChange != nil { s.network.ReportPeer(repChange.rep, repChange.who) } + if err != nil { + return fmt.Errorf("while handling block announce: %w", err) + } return nil } @@ -202,8 +202,7 @@ func (s *SyncService) IsSynced() bool { s.mu.Lock() defer s.mu.Unlock() - s.currentStrategy.IsSynced() - return false + return s.currentStrategy.IsSynced() } func (s *SyncService) HighestBlock() uint { diff --git a/dot/sync/unready_blocks.go b/dot/sync/unready_blocks.go index 0baaba9382..58f477ff52 100644 --- a/dot/sync/unready_blocks.go +++ b/dot/sync/unready_blocks.go @@ -36,7 +36,7 @@ func (u *unreadyBlocks) newIncompleteBlock(blockHeader *types.Header) { } } -func (u *unreadyBlocks) newDisjointFragemnt(frag []*types.BlockData) { +func (u *unreadyBlocks) newDisjointFragment(frag []*types.BlockData) { u.mtx.Lock() defer u.mtx.Unlock() u.disjointFragments = append(u.disjointFragments, frag) @@ -53,7 +53,8 @@ func (u *unreadyBlocks) updateDisjointFragments(chain []*types.BlockData) ([]*ty for idx, disjointChain := range u.disjointFragments { lastBlockArriving := chain[len(chain)-1] firstDisjointBlock := disjointChain[0] - if formsSequence(lastBlockArriving, firstDisjointBlock) { + + if lastBlockArriving.IsParent(firstDisjointBlock) { indexToChange = idx break } diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index 88fbd6bfc5..d0e501fb1c 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -78,8 +78,8 @@ func (s *syncWorkerPool) fromBlockAnnounceHandshake(who peer.ID) error { return nil } -// submitRequests takes an set of requests and will submit to the pool through submitRequest -// the response will be dispatch in the resultCh +// submitRequests blocks until all tasks have been completed or there are no workers +// left in the pool to retry failed tasks func (s *syncWorkerPool) submitRequests(tasks []*syncTask) []*syncTaskResult { if len(tasks) == 0 { return nil diff --git a/dot/types/block_data.go b/dot/types/block_data.go index 35525c86d0..9bf471984b 100644 --- a/dot/types/block_data.go +++ b/dot/types/block_data.go @@ -31,6 +31,12 @@ func (bd *BlockData) Number() uint { return bd.Header.Number } +func (bd *BlockData) IsParent(other *BlockData) bool { + incrementOne := (bd.Header.Number + 1) == other.Header.Number + isParent := bd.Hash == other.Header.ParentHash + return incrementOne && isParent +} + func (bd *BlockData) String() string { str := fmt.Sprintf("Hash=%s ", bd.Hash) From 04540d167623ec6d63f8b581facc0b84e84fff6e Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Thu, 19 Sep 2024 19:10:53 -0400 Subject: [PATCH 57/74] chore: fix `TestFullSyncIsFinished` --- dot/sync/fullsync.go | 14 ++++++-------- dot/sync/fullsync_test.go | 9 ++++++--- 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/dot/sync/fullsync.go b/dot/sync/fullsync.go index e5c75cf609..eb02990ec4 100644 --- a/dot/sync/fullsync.go +++ b/dot/sync/fullsync.go @@ -162,11 +162,10 @@ func (f *FullSyncStrategy) IsFinished(results []*syncTaskResult) (bool, []Change readyBlocks := make([][]*types.BlockData, 0, len(validResp)) for _, reqRespData := range validResp { - // if Gossamer requested the header, then the response data should - // contains the full blocks to be imported. - // if Gossamer didn't request the header, then the response should - // only contain the missing parts that will complete the unreadyBlocks - // and then with the blocks completed we should be able to import them + // if Gossamer requested the header, then the response data should contains + // the full blocks to be imported. If Gossamer didn't request the header, + // then the response should only contain the missing parts that will complete + // the unreadyBlocks and then with the blocks completed we should be able to import them if reqRespData.req.RequestField(messages.RequestedDataHeader) { updatedFragment, ok := f.unreadyBlocks.updateDisjointFragments(reqRespData.responseData) if ok { @@ -207,10 +206,9 @@ func (f *FullSyncStrategy) IsFinished(results []*syncTaskResult) (bool, []Change disjointFragments = append(disjointFragments, fragment) } - logger.Debugf("blocks to import: %d, disjoint fragments: %d", len(nextBlocksToImport), len(disjointFragments)) + fmt.Printf("blocks to import: %d, disjoint fragments: %d\n", len(nextBlocksToImport), len(disjointFragments)) - // this loop goal is to import ready blocks as well as - // update the highestFinalized header + // this loop goal is to import ready blocks as well as update the highestFinalized header for len(nextBlocksToImport) > 0 || len(disjointFragments) > 0 { for _, blockToImport := range nextBlocksToImport { imported, err := f.importer.handle(blockToImport, networkInitialSync) diff --git a/dot/sync/fullsync_test.go b/dot/sync/fullsync_test.go index a6ede033a3..72f73197ed 100644 --- a/dot/sync/fullsync_test.go +++ b/dot/sync/fullsync_test.go @@ -198,7 +198,7 @@ func TestFullSyncIsFinished(t *testing.T) { // 1 -> 10 { who: peer.ID("peerA"), - request: messages.NewBlockRequest(*variadic.Uint32OrHashFrom(1), 128, + request: messages.NewBlockRequest(*variadic.Uint32OrHashFrom(1), 127, messages.BootstrapRequestData, messages.Ascending), completed: true, response: fstTaskBlockResponse, @@ -208,7 +208,7 @@ func TestFullSyncIsFinished(t *testing.T) { // 129 -> 256 { who: peer.ID("peerA"), - request: messages.NewBlockRequest(*variadic.Uint32OrHashFrom(1), 128, + request: messages.NewBlockRequest(*variadic.Uint32OrHashFrom(129), 127, messages.BootstrapRequestData, messages.Ascending), completed: true, response: sndTaskBlockResponse, @@ -223,7 +223,7 @@ func TestFullSyncIsFinished(t *testing.T) { mockBlockState.EXPECT().GetHighestFinalisedHeader(). Return(genesisHeader, nil). - Times(3) + Times(4) mockBlockState.EXPECT(). HasHeader(fstTaskBlockResponse.BlockData[0].Header.ParentHash). @@ -252,9 +252,11 @@ func TestFullSyncIsFinished(t *testing.T) { require.NoError(t, err) require.False(t, done) + require.Equal(t, fs.requestQueue.Len(), 1) require.Len(t, fs.unreadyBlocks.incompleteBlocks, 0) require.Len(t, fs.unreadyBlocks.disjointFragments, 1) require.Equal(t, fs.unreadyBlocks.disjointFragments[0], sndTaskBlockResponse.BlockData) + require.Equal(t, len(fs.unreadyBlocks.disjointFragments[0]), len(sndTaskBlockResponse.BlockData)) expectedAncestorRequest := messages.NewBlockRequest( *variadic.Uint32OrHashFrom(sndTaskBlockResponse.BlockData[0].Header.ParentHash), @@ -285,6 +287,7 @@ func TestFullSyncIsFinished(t *testing.T) { require.NoError(t, err) require.False(t, done) + require.Equal(t, fs.requestQueue.Len(), 0) require.Len(t, fs.unreadyBlocks.incompleteBlocks, 0) require.Len(t, fs.unreadyBlocks.disjointFragments, 0) }) From 3263911e4d28ab1174e382c305c433f9fc374fbe Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Mon, 23 Sep 2024 10:00:09 -0400 Subject: [PATCH 58/74] chore: resolve all conflicts --- dot/sync/chain_sync.go | 1083 ------------------- dot/sync/chain_sync_test.go | 1900 ---------------------------------- dot/sync/fullsync.go | 7 +- dot/sync/fullsync_test.go | 27 +- dot/sync/service.go | 7 + dot/sync/worker_pool_test.go | 246 ----- 6 files changed, 23 insertions(+), 3247 deletions(-) delete mode 100644 dot/sync/chain_sync.go delete mode 100644 dot/sync/chain_sync_test.go delete mode 100644 dot/sync/worker_pool_test.go diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go deleted file mode 100644 index 75683b1b4b..0000000000 --- a/dot/sync/chain_sync.go +++ /dev/null @@ -1,1083 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "bytes" - "errors" - "fmt" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/libp2p/go-libp2p/core/peer" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - "golang.org/x/exp/slices" - - "github.com/ChainSafe/gossamer/dot/network" - "github.com/ChainSafe/gossamer/dot/network/messages" - "github.com/ChainSafe/gossamer/dot/peerset" - "github.com/ChainSafe/gossamer/dot/telemetry" - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/internal/database" - "github.com/ChainSafe/gossamer/lib/common" -) - -var _ ChainSync = (*chainSync)(nil) - -type chainSyncState byte - -const ( - bootstrap chainSyncState = iota - tip -) - -type blockOrigin byte - -const ( - networkInitialSync blockOrigin = iota - networkBroadcast -) - -func (s chainSyncState) String() string { - switch s { - case bootstrap: - return "bootstrap" - case tip: - return "tip" - default: - return "unknown" - } -} - -var ( - pendingBlocksLimit = messages.MaxBlocksInResponse * 32 - isSyncedGauge = promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: "gossamer_network_syncer", - Name: "is_synced", - Help: "bool representing whether the node is synced to the head of the chain", - }) - - blockSizeGauge = promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: "gossamer_sync", - Name: "block_size", - Help: "represent the size of blocks synced", - }) -) - -// ChainSync contains the methods used by the high-level service into the `chainSync` module -type ChainSync interface { - start() - stop() error - - // called upon receiving a BlockAnnounceHandshake - onBlockAnnounceHandshake(p peer.ID, hash common.Hash, number uint) error - - // getSyncMode returns the current syncing state - getSyncMode() chainSyncState - - // getHighestBlock returns the highest block or an error - getHighestBlock() (highestBlock uint, err error) - - onBlockAnnounce(announcedBlock) error -} - -type announcedBlock struct { - who peer.ID - header *types.Header -} - -type chainSync struct { - wg sync.WaitGroup - stopCh chan struct{} - - blockState BlockState - network Network - - workerPool *syncWorkerPool - - // tracks the latest state we know of from our peers, - // ie. their best block hash and number - peerViewSet *peerViewSet - - // disjoint set of blocks which are known but not ready to be processed - // ie. we only know the hash, number, or the parent block is unknown, or the body is unknown - // note: the block may have empty fields, as some data about it may be unknown - pendingBlocks DisjointBlockSet - - syncMode atomic.Value - - finalisedCh <-chan *types.FinalisationInfo - - minPeers int - slotDuration time.Duration - - storageState StorageState - transactionState TransactionState - babeVerifier BabeVerifier - finalityGadget FinalityGadget - blockImportHandler BlockImportHandler - telemetry Telemetry - badBlocks []string - requestMaker network.RequestMaker - waitPeersDuration time.Duration -} - -type chainSyncConfig struct { - bs BlockState - net Network - requestMaker network.RequestMaker - pendingBlocks DisjointBlockSet - minPeers, maxPeers int - slotDuration time.Duration - storageState StorageState - transactionState TransactionState - babeVerifier BabeVerifier - finalityGadget FinalityGadget - blockImportHandler BlockImportHandler - telemetry Telemetry - badBlocks []string - waitPeersDuration time.Duration -} - -func newChainSync(cfg chainSyncConfig) *chainSync { - atomicState := atomic.Value{} - atomicState.Store(tip) - return &chainSync{ - stopCh: make(chan struct{}), - storageState: cfg.storageState, - transactionState: cfg.transactionState, - babeVerifier: cfg.babeVerifier, - finalityGadget: cfg.finalityGadget, - blockImportHandler: cfg.blockImportHandler, - telemetry: cfg.telemetry, - blockState: cfg.bs, - network: cfg.net, - peerViewSet: newPeerViewSet(cfg.maxPeers), - pendingBlocks: cfg.pendingBlocks, - syncMode: atomicState, - finalisedCh: cfg.bs.GetFinalisedNotifierChannel(), - minPeers: cfg.minPeers, - slotDuration: cfg.slotDuration, - workerPool: newSyncWorkerPool(cfg.net, cfg.requestMaker), - badBlocks: cfg.badBlocks, - requestMaker: cfg.requestMaker, - waitPeersDuration: cfg.waitPeersDuration, - } -} - -func (cs *chainSync) waitWorkersAndTarget() { - waitPeersTimer := time.NewTimer(cs.waitPeersDuration) - - highestFinalizedHeader, err := cs.blockState.GetHighestFinalisedHeader() - if err != nil { - panic(fmt.Sprintf("failed to get highest finalised header: %v", err)) - } - - for { - cs.workerPool.useConnectedPeers() - totalAvailable := cs.workerPool.totalWorkers() - - if totalAvailable >= uint(cs.minPeers) && - cs.peerViewSet.getTarget() > 0 { - return - } - - err := cs.network.BlockAnnounceHandshake(highestFinalizedHeader) - if err != nil && !errors.Is(err, network.ErrNoPeersConnected) { - logger.Errorf("retrieving target info from peers: %v", err) - } - - select { - case <-waitPeersTimer.C: - waitPeersTimer.Reset(cs.waitPeersDuration) - - case <-cs.stopCh: - return - } - } -} - -func (cs *chainSync) start() { - // since the default status from sync mode is syncMode(tip) - isSyncedGauge.Set(1) - - cs.wg.Add(1) - go cs.pendingBlocks.run(cs.finalisedCh, cs.stopCh, &cs.wg) - - // wait until we have a minimal workers in the sync worker pool - cs.waitWorkersAndTarget() -} - -func (cs *chainSync) stop() error { - err := cs.workerPool.stop() - if err != nil { - return fmt.Errorf("stopping worker poll: %w", err) - } - - close(cs.stopCh) - allStopCh := make(chan struct{}) - go func() { - defer close(allStopCh) - cs.wg.Wait() - }() - - timeoutTimer := time.NewTimer(30 * time.Second) - - select { - case <-allStopCh: - if !timeoutTimer.Stop() { - <-timeoutTimer.C - } - return nil - case <-timeoutTimer.C: - return ErrStopTimeout - } -} - -func (cs *chainSync) isBootstrapSync(currentBlockNumber uint) bool { - syncTarget := cs.peerViewSet.getTarget() - return currentBlockNumber+messages.MaxBlocksInResponse < syncTarget -} - -func (cs *chainSync) bootstrapSync() { - defer cs.wg.Done() - currentBlock, err := cs.blockState.GetHighestFinalisedHeader() - if err != nil { - panic("cannot find highest finalised header") - } - - for { - select { - case <-cs.stopCh: - logger.Warn("ending bootstrap sync, chain sync stop channel triggered") - return - default: - } - - isBootstrap := cs.isBootstrapSync(currentBlock.Number) - if isBootstrap { - cs.workerPool.useConnectedPeers() - err = cs.requestMaxBlocksFrom(currentBlock, networkInitialSync) - if err != nil { - if errors.Is(err, errBlockStatePaused) { - logger.Debugf("exiting bootstrap sync: %s", err) - return - } - logger.Errorf("requesting max blocks from best block header: %s", err) - } - - currentBlock, err = cs.blockState.BestBlockHeader() - if err != nil { - logger.Errorf("getting best block header: %v", err) - } - } else { - // we are less than 128 blocks behind the target we can use tip sync - cs.syncMode.Store(tip) - isSyncedGauge.Set(1) - logger.Infof("🔁 switched sync mode to %s", tip.String()) - return - } - } -} - -func (cs *chainSync) getSyncMode() chainSyncState { - return cs.syncMode.Load().(chainSyncState) -} - -// onBlockAnnounceHandshake sets a peer's best known block -func (cs *chainSync) onBlockAnnounceHandshake(who peer.ID, bestHash common.Hash, bestNumber uint) error { - cs.workerPool.fromBlockAnnounce(who) - cs.peerViewSet.update(who, bestHash, bestNumber) - - if cs.getSyncMode() == bootstrap { - return nil - } - - bestBlockHeader, err := cs.blockState.BestBlockHeader() - if err != nil { - return err - } - - isBootstrap := cs.isBootstrapSync(bestBlockHeader.Number) - if !isBootstrap { - return nil - } - - // we are more than 128 blocks behind the head, switch to bootstrap - cs.syncMode.Store(bootstrap) - isSyncedGauge.Set(0) - logger.Infof("🔁 switched sync mode to %s", bootstrap.String()) - - cs.wg.Add(1) - go cs.bootstrapSync() - return nil -} - -func (cs *chainSync) onBlockAnnounce(announced announcedBlock) error { - // TODO: https://github.com/ChainSafe/gossamer/issues/3432 - if cs.pendingBlocks.hasBlock(announced.header.Hash()) { - return fmt.Errorf("%w: block #%d (%s)", - errAlreadyInDisjointSet, announced.header.Number, announced.header.Hash()) - } - - err := cs.pendingBlocks.addHeader(announced.header) - if err != nil { - return fmt.Errorf("while adding pending block header: %w", err) - } - - if cs.getSyncMode() == bootstrap { - return nil - } - - bestBlockHeader, err := cs.blockState.BestBlockHeader() - if err != nil { - return fmt.Errorf("getting best block header: %w", err) - } - - isBootstrap := cs.isBootstrapSync(bestBlockHeader.Number) - if !isBootstrap { - return cs.requestAnnouncedBlock(bestBlockHeader, announced) - } - - return nil -} - -func (cs *chainSync) requestAnnouncedBlock(bestBlockHeader *types.Header, announce announcedBlock) error { - peerWhoAnnounced := announce.who - announcedHash := announce.header.Hash() - announcedNumber := announce.header.Number - - has, err := cs.blockState.HasHeader(announcedHash) - if err != nil { - return fmt.Errorf("checking if header exists: %s", err) - } - - if has { - return nil - } - - highestFinalizedHeader, err := cs.blockState.GetHighestFinalisedHeader() - if err != nil { - return fmt.Errorf("getting highest finalized header") - } - - // if the announced block contains a lower number than our best - // block header, let's check if it is greater than our latests - // finalized header, if so this block belongs to a fork chain - if announcedNumber < bestBlockHeader.Number { - // ignore the block if it has the same or lower number - // TODO: is it following the protocol to send a blockAnnounce with number < highestFinalized number? - if announcedNumber <= highestFinalizedHeader.Number { - return nil - } - - return cs.requestForkBlocks(bestBlockHeader, highestFinalizedHeader, announce.header, announce.who) - } - - err = cs.requestChainBlocks(announce.header, bestBlockHeader, peerWhoAnnounced) - if err != nil { - return fmt.Errorf("requesting chain blocks: %w", err) - } - - err = cs.requestPendingBlocks(highestFinalizedHeader) - if err != nil { - return fmt.Errorf("while requesting pending blocks") - } - - return nil -} - -func (cs *chainSync) requestChainBlocks(announcedHeader, bestBlockHeader *types.Header, - peerWhoAnnounced peer.ID) error { - gapLength := uint32(announcedHeader.Number - bestBlockHeader.Number) - startAtBlock := announcedHeader.Number - totalBlocks := uint32(1) - - var request *messages.BlockRequestMessage - startingBlock := *messages.NewFromBlock(announcedHeader.Hash()) - - if gapLength > 1 { - request = messages.NewBlockRequest(startingBlock, gapLength, - messages.BootstrapRequestData, messages.Descending) - - startAtBlock = announcedHeader.Number - uint(*request.Max) + 1 - totalBlocks = *request.Max - - logger.Infof("requesting %d blocks from peer: %v, descending request from #%d (%s)", - gapLength, peerWhoAnnounced, announcedHeader.Number, announcedHeader.Hash().Short()) - } else { - request = messages.NewBlockRequest(startingBlock, 1, messages.BootstrapRequestData, messages.Descending) - logger.Infof("requesting a single block from peer: %v with Number: #%d and Hash: (%s)", - peerWhoAnnounced, announcedHeader.Number, announcedHeader.Hash().Short()) - } - - resultsQueue := make(chan *syncTaskResult) - err := cs.submitRequest(request, &peerWhoAnnounced, resultsQueue) - if err != nil { - return err - } - err = cs.handleWorkersResults(resultsQueue, networkBroadcast, startAtBlock, totalBlocks) - if err != nil { - return fmt.Errorf("while handling workers results: %w", err) - } - - return nil -} - -func (cs *chainSync) requestForkBlocks(bestBlockHeader, highestFinalizedHeader, announcedHeader *types.Header, - peerWhoAnnounced peer.ID) error { - logger.Infof("block announce lower than best block #%d (%s) and greater highest finalized #%d (%s)", - bestBlockHeader.Number, bestBlockHeader.Hash().Short(), - highestFinalizedHeader.Number, highestFinalizedHeader.Hash().Short()) - - parentExists, err := cs.blockState.HasHeader(announcedHeader.ParentHash) - if err != nil && !errors.Is(err, database.ErrNotFound) { - return fmt.Errorf("while checking header exists: %w", err) - } - - gapLength := uint32(1) - startAtBlock := announcedHeader.Number - announcedHash := announcedHeader.Hash() - var request *messages.BlockRequestMessage - startingBlock := *messages.NewFromBlock(announcedHash) - - if parentExists { - request = messages.NewBlockRequest(startingBlock, 1, messages.BootstrapRequestData, messages.Descending) - } else { - gapLength = uint32(announcedHeader.Number - highestFinalizedHeader.Number) - startAtBlock = highestFinalizedHeader.Number + 1 - request = messages.NewBlockRequest(startingBlock, gapLength, messages.BootstrapRequestData, messages.Descending) - } - - logger.Infof("requesting %d fork blocks from peer: %v starting at #%d (%s)", - gapLength, peerWhoAnnounced, announcedHeader.Number, announcedHash.Short()) - - resultsQueue := make(chan *syncTaskResult) - err = cs.submitRequest(request, &peerWhoAnnounced, resultsQueue) - if err != nil { - return err - } - err = cs.handleWorkersResults(resultsQueue, networkBroadcast, startAtBlock, gapLength) - if err != nil { - return fmt.Errorf("while handling workers results: %w", err) - } - - return nil -} - -func (cs *chainSync) requestPendingBlocks(highestFinalizedHeader *types.Header) error { - pendingBlocksTotal := cs.pendingBlocks.size() - logger.Infof("total of pending blocks: %d", pendingBlocksTotal) - if pendingBlocksTotal < 1 { - return nil - } - - pendingBlocks := cs.pendingBlocks.getBlocks() - for _, pendingBlock := range pendingBlocks { - if pendingBlock.number <= highestFinalizedHeader.Number { - cs.pendingBlocks.removeBlock(pendingBlock.hash) - continue - } - - parentExists, err := cs.blockState.HasHeader(pendingBlock.header.ParentHash) - if err != nil { - return fmt.Errorf("getting pending block parent header: %w", err) - } - - if parentExists { - err := cs.handleReadyBlock(pendingBlock.toBlockData(), networkBroadcast) - if err != nil { - return fmt.Errorf("handling ready block: %w", err) - } - continue - } - - gapLength := pendingBlock.number - highestFinalizedHeader.Number - if gapLength > 128 { - logger.Warnf("gap of %d blocks, max expected: 128 block", gapLength) - gapLength = 128 - } - - descendingGapRequest := messages.NewBlockRequest(*messages.NewFromBlock(pendingBlock.hash), - uint32(gapLength), messages.BootstrapRequestData, messages.Descending) - startAtBlock := pendingBlock.number - uint(*descendingGapRequest.Max) + 1 - - // the `requests` in the tip sync are not related necessarily - // this is why we need to treat them separately - resultsQueue := make(chan *syncTaskResult) - err = cs.submitRequest(descendingGapRequest, nil, resultsQueue) - if err != nil { - return err - } - // TODO: we should handle the requests concurrently - // a way of achieve that is by constructing a new `handleWorkersResults` for - // handling only tip sync requests - err = cs.handleWorkersResults(resultsQueue, networkBroadcast, startAtBlock, *descendingGapRequest.Max) - if err != nil { - return fmt.Errorf("while handling workers results: %w", err) - } - } - - return nil -} - -func (cs *chainSync) requestMaxBlocksFrom(bestBlockHeader *types.Header, origin blockOrigin) error { //nolint:unparam - startRequestAt := bestBlockHeader.Number + 1 - - // targetBlockNumber is the virtual target we will request, however - // we should bound it to the real target which is collected through - // block announces received from other peers - targetBlockNumber := startRequestAt + maxRequestsAllowed*128 - realTarget := cs.peerViewSet.getTarget() - - if targetBlockNumber > realTarget { - targetBlockNumber = realTarget - } - - requests := messages.NewAscendingBlockRequests(startRequestAt, targetBlockNumber, - messages.BootstrapRequestData) - - var expectedAmountOfBlocks uint32 - for _, request := range requests { - if request.Max != nil { - expectedAmountOfBlocks += *request.Max - } - } - - resultsQueue, err := cs.submitRequests(requests) - if err != nil { - return err - } - err = cs.handleWorkersResults(resultsQueue, origin, startRequestAt, expectedAmountOfBlocks) - if err != nil { - return fmt.Errorf("while handling workers results: %w", err) - } - - return nil -} - -func (cs *chainSync) submitRequest( - request *messages.BlockRequestMessage, - who *peer.ID, - resultCh chan<- *syncTaskResult, -) error { - if !cs.blockState.IsPaused() { - cs.workerPool.submitRequest(request, who, resultCh) - return nil - } - return fmt.Errorf("submitting request: %w", errBlockStatePaused) -} - -func (cs *chainSync) submitRequests(requests []*messages.BlockRequestMessage) ( - resultCh chan *syncTaskResult, err error) { - if !cs.blockState.IsPaused() { - return cs.workerPool.submitRequests(requests), nil - } - return nil, fmt.Errorf("submitting requests: %w", errBlockStatePaused) -} - -func (cs *chainSync) showSyncStats(syncBegin time.Time, syncedBlocks int) { - finalisedHeader, err := cs.blockState.GetHighestFinalisedHeader() - if err != nil { - logger.Criticalf("getting highest finalized header: %w", err) - return - } - - totalSyncAndImportSeconds := time.Since(syncBegin).Seconds() - bps := float64(syncedBlocks) / totalSyncAndImportSeconds - logger.Infof("⛓️ synced %d blocks, "+ - "took: %.2f seconds, bps: %.2f blocks/second", - syncedBlocks, totalSyncAndImportSeconds, bps) - - logger.Infof( - "🚣 currently syncing, %d peers connected, "+ - "%d available workers, "+ - "target block number %d, "+ - "finalised #%d (%s) "+ - "sync mode: %s", - len(cs.network.Peers()), - cs.workerPool.totalWorkers(), - cs.peerViewSet.getTarget(), - finalisedHeader.Number, - finalisedHeader.Hash().Short(), - cs.getSyncMode().String(), - ) -} - -// handleWorkersResults, every time we submit requests to workers they results should be computed here -// and every cicle we should endup with a complete chain, whenever we identify -// any error from a worker we should evaluate the error and re-insert the request -// in the queue and wait for it to completes -// TODO: handle only justification requests -func (cs *chainSync) handleWorkersResults( - workersResults chan *syncTaskResult, origin blockOrigin, startAtBlock uint, expectedSyncedBlocks uint32) error { - startTime := time.Now() - syncingChain := make([]*types.BlockData, expectedSyncedBlocks) - // the total numbers of blocks is missing in the syncing chain - waitingBlocks := expectedSyncedBlocks - -taskResultLoop: - for waitingBlocks > 0 { - // in a case where we don't handle workers results we should check the pool - idleDuration := time.Minute - idleTimer := time.NewTimer(idleDuration) - - select { - case <-cs.stopCh: - return nil - - case <-idleTimer.C: - logger.Warnf("idle ticker triggered! checking pool") - cs.workerPool.useConnectedPeers() - continue - - case taskResult := <-workersResults: - if !idleTimer.Stop() { - <-idleTimer.C - } - - who := taskResult.who - request := taskResult.request - response := taskResult.response - - logger.Debugf("task result: peer(%s), with error: %v, with response: %v", - taskResult.who, taskResult.err != nil, taskResult.response != nil) - - if taskResult.err != nil { - if !errors.Is(taskResult.err, network.ErrReceivedEmptyMessage) { - logger.Errorf("task result: peer(%s) error: %s", - taskResult.who, taskResult.err) - - if errors.Is(taskResult.err, messages.ErrNilBlockInResponse) { - cs.network.ReportPeer(peerset.ReputationChange{ - Value: peerset.BadMessageValue, - Reason: peerset.BadMessageReason, - }, who) - } - - if strings.Contains(taskResult.err.Error(), "protocols not supported") { - cs.network.ReportPeer(peerset.ReputationChange{ - Value: peerset.BadProtocolValue, - Reason: peerset.BadProtocolReason, - }, who) - } - } - - err := cs.submitRequest(request, nil, workersResults) - if err != nil { - return err - } - continue - } - - if request.Direction == messages.Descending { - // reverse blocks before pre-validating and placing in ready queue - reverseBlockData(response.BlockData) - } - - err := validateResponseFields(request.RequestedData, response.BlockData) - if err != nil { - logger.Criticalf("validating fields: %s", err) - // TODO: check the reputation change for nil body in response - // and nil justification in response - if errors.Is(err, errNilHeaderInResponse) { - cs.network.ReportPeer(peerset.ReputationChange{ - Value: peerset.IncompleteHeaderValue, - Reason: peerset.IncompleteHeaderReason, - }, who) - } - - err = cs.submitRequest(taskResult.request, nil, workersResults) - if err != nil { - return err - } - continue taskResultLoop - } - - isChain := isResponseAChain(response.BlockData) - if !isChain { - logger.Criticalf("response from %s is not a chain", who) - err = cs.submitRequest(taskResult.request, nil, workersResults) - if err != nil { - return err - } - continue taskResultLoop - } - - grows := doResponseGrowsTheChain(response.BlockData, syncingChain, - startAtBlock, expectedSyncedBlocks) - if !grows { - logger.Criticalf("response from %s does not grows the ongoing chain", who) - err = cs.submitRequest(taskResult.request, nil, workersResults) - if err != nil { - return err - } - continue taskResultLoop - } - - for _, blockInResponse := range response.BlockData { - if slices.Contains(cs.badBlocks, blockInResponse.Hash.String()) { - logger.Criticalf("%s sent a known bad block: %s (#%d)", - who, blockInResponse.Hash.String(), blockInResponse.Number()) - - cs.network.ReportPeer(peerset.ReputationChange{ - Value: peerset.BadBlockAnnouncementValue, - Reason: peerset.BadBlockAnnouncementReason, - }, who) - - cs.workerPool.ignorePeerAsWorker(taskResult.who) - err = cs.submitRequest(taskResult.request, nil, workersResults) - if err != nil { - return err - } - continue taskResultLoop - } - - blockExactIndex := blockInResponse.Header.Number - startAtBlock - if blockExactIndex < uint(expectedSyncedBlocks) { - syncingChain[blockExactIndex] = blockInResponse - } - } - - // we need to check if we've filled all positions - // otherwise we should wait for more responses - waitingBlocks -= uint32(len(response.BlockData)) - - // we received a response without the desired amount of blocks - // we should include a new request to retrieve the missing blocks - if len(response.BlockData) < int(*request.Max) { - difference := uint32(int(*request.Max) - len(response.BlockData)) - lastItem := response.BlockData[len(response.BlockData)-1] - - startRequestNumber := lastItem.Header.Number + 1 - startAt := messages.NewFromBlock(startRequestNumber) - - taskResult.request = &messages.BlockRequestMessage{ - RequestedData: messages.BootstrapRequestData, - StartingBlock: *startAt, - Direction: messages.Ascending, - Max: &difference, - } - err = cs.submitRequest(taskResult.request, nil, workersResults) - if err != nil { - return err - } - continue taskResultLoop - } - } - } - - retreiveBlocksSeconds := time.Since(startTime).Seconds() - logger.Infof("🔽 retrieved %d blocks, took: %.2f seconds, starting process...", - expectedSyncedBlocks, retreiveBlocksSeconds) - - // response was validated! place into ready block queue - for _, bd := range syncingChain { - // block is ready to be processed! - if err := cs.handleReadyBlock(bd, origin); err != nil { - return fmt.Errorf("while handling ready block: %w", err) - } - } - - cs.showSyncStats(startTime, len(syncingChain)) - return nil -} - -func (cs *chainSync) handleReadyBlock(bd *types.BlockData, origin blockOrigin) error { - // if header was not requested, get it from the pending set - // if we're expecting headers, validate should ensure we have a header - if bd.Header == nil { - block := cs.pendingBlocks.getBlock(bd.Hash) - if block == nil { - // block wasn't in the pending set! - // let's check the db as maybe we already processed it - has, err := cs.blockState.HasHeader(bd.Hash) - if err != nil && !errors.Is(err, database.ErrNotFound) { - logger.Debugf("failed to check if header is known for hash %s: %s", bd.Hash, err) - return err - } - - if has { - logger.Tracef("ignoring block we've already processed, hash=%s", bd.Hash) - return err - } - - // this is bad and shouldn't happen - logger.Errorf("block with unknown header is ready: hash=%s", bd.Hash) - return err - } - - if block.header == nil { - logger.Errorf("new ready block number (unknown) with hash %s", bd.Hash) - return nil - } - - bd.Header = block.header - } - - err := cs.processBlockData(*bd, origin) - if err != nil { - // depending on the error, we might want to save this block for later - logger.Errorf("block data processing for block with hash %s failed: %s", bd.Hash, err) - return err - } - - cs.pendingBlocks.removeBlock(bd.Hash) - return nil -} - -// processBlockData processes the BlockData from a BlockResponse and -// returns the index of the last BlockData it handled on success, -// or the index of the block data that errored on failure. -// TODO: https://github.com/ChainSafe/gossamer/issues/3468 -func (cs *chainSync) processBlockData(blockData types.BlockData, origin blockOrigin) error { - // while in bootstrap mode we don't need to broadcast block announcements - announceImportedBlock := cs.getSyncMode() == tip - - if blockData.Header != nil { - var ( - hasJustification = blockData.Justification != nil && len(*blockData.Justification) > 0 - round uint64 - setID uint64 - ) - - if hasJustification { - var err error - round, setID, err = cs.finalityGadget.VerifyBlockJustification( - blockData.Header.Hash(), blockData.Header.Number, *blockData.Justification) - if err != nil { - return fmt.Errorf("verifying justification: %w", err) - } - } - - if blockData.Body != nil { - err := cs.processBlockDataWithHeaderAndBody(blockData, origin, announceImportedBlock) - if err != nil { - return fmt.Errorf("processing block data with header and body: %w", err) - } - } - - if hasJustification { - header := blockData.Header - err := cs.blockState.SetFinalisedHash(header.Hash(), round, setID) - if err != nil { - return fmt.Errorf("setting finalised hash: %w", err) - } - err = cs.blockState.SetJustification(header.Hash(), *blockData.Justification) - if err != nil { - return fmt.Errorf("setting justification for block number %d: %w", header.Number, err) - } - - return nil - } - } - - err := cs.blockState.CompareAndSetBlockData(&blockData) - if err != nil { - return fmt.Errorf("comparing and setting block data: %w", err) - } - - return nil -} - -func (cs *chainSync) processBlockDataWithHeaderAndBody(blockData types.BlockData, - origin blockOrigin, announceImportedBlock bool) (err error) { - - if origin != networkInitialSync { - err = cs.babeVerifier.VerifyBlock(blockData.Header) - if err != nil { - return fmt.Errorf("babe verifying block: %w", err) - } - } - - cs.handleBody(blockData.Body) - - block := &types.Block{ - Header: *blockData.Header, - Body: *blockData.Body, - } - - err = cs.handleBlock(block, announceImportedBlock) - if err != nil { - return fmt.Errorf("handling block: %w", err) - } - - return nil -} - -// handleHeader handles block bodies included in BlockResponses -func (cs *chainSync) handleBody(body *types.Body) { - acc := 0 - for _, ext := range *body { - acc += len(ext) - cs.transactionState.RemoveExtrinsic(ext) - } - - blockSizeGauge.Set(float64(acc)) -} - -// handleHeader handles blocks (header+body) included in BlockResponses -func (cs *chainSync) handleBlock(block *types.Block, announceImportedBlock bool) error { - parent, err := cs.blockState.GetHeader(block.Header.ParentHash) - if err != nil { - return fmt.Errorf("%w: %s", errFailedToGetParent, err) - } - - cs.storageState.Lock() - defer cs.storageState.Unlock() - - ts, err := cs.storageState.TrieState(&parent.StateRoot) - if err != nil { - return err - } - - root := ts.Trie().MustHash() - if !bytes.Equal(parent.StateRoot[:], root[:]) { - panic("parent state root does not match snapshot state root") - } - - rt, err := cs.blockState.GetRuntime(parent.Hash()) - if err != nil { - return err - } - - rt.SetContextStorage(ts) - - _, err = rt.ExecuteBlock(block) - if err != nil { - return fmt.Errorf("failed to execute block %d: %w", block.Header.Number, err) - } - - if err = cs.blockImportHandler.HandleBlockImport(block, ts, announceImportedBlock); err != nil { - return err - } - - blockHash := block.Header.Hash() - cs.telemetry.SendMessage(telemetry.NewBlockImport( - &blockHash, - block.Header.Number, - "NetworkInitialSync")) - - return nil -} - -// validateResponseFields checks that the expected fields are in the block data -func validateResponseFields(requestedData byte, blocks []*types.BlockData) error { - for _, bd := range blocks { - if bd == nil { - return errNilBlockData - } - - if (requestedData&messages.RequestedDataHeader) == messages.RequestedDataHeader && bd.Header == nil { - return fmt.Errorf("%w: %s", errNilHeaderInResponse, bd.Hash) - } - - if (requestedData&messages.RequestedDataBody) == messages.RequestedDataBody && bd.Body == nil { - return fmt.Errorf("%w: %s", errNilBodyInResponse, bd.Hash) - } - - // if we requested strictly justification - if (requestedData|messages.RequestedDataJustification) == messages.RequestedDataJustification && - bd.Justification == nil { - return fmt.Errorf("%w: %s", errNilJustificationInResponse, bd.Hash) - } - } - - return nil -} - -func isResponseAChain(responseBlockData []*types.BlockData) bool { - if len(responseBlockData) < 2 { - return true - } - - previousBlockData := responseBlockData[0] - for _, currBlockData := range responseBlockData[1:] { - previousHash := previousBlockData.Header.Hash() - isParent := previousHash == currBlockData.Header.ParentHash - if !isParent { - return false - } - - previousBlockData = currBlockData - } - - return true -} - -// doResponseGrowsTheChain will check if the acquired blocks grows the current chain -// matching their parent hashes -func doResponseGrowsTheChain(response, ongoingChain []*types.BlockData, startAtBlock uint, expectedTotal uint32) bool { - // the ongoing chain does not have any element, we can safely insert an item in it - if len(ongoingChain) < 1 { - return true - } - - compareParentHash := func(parent, child *types.BlockData) bool { - return parent.Header.Hash() == child.Header.ParentHash - } - - firstBlockInResponse := response[0] - firstBlockExactIndex := firstBlockInResponse.Header.Number - startAtBlock - if firstBlockExactIndex != 0 && firstBlockExactIndex < uint(expectedTotal) { - leftElement := ongoingChain[firstBlockExactIndex-1] - if leftElement != nil && !compareParentHash(leftElement, firstBlockInResponse) { - return false - } - } - - switch { - // if the response contains only one block then we should check both sides - // for example, if the response contains only one block called X we should - // check if its parent hash matches with the left element as well as we should - // check if the right element contains X hash as its parent hash - // ... W <- X -> Y ... - // we can skip left side comparison if X is in the 0 index and we can skip - // right side comparison if X is in the last index - case len(response) == 1: - if uint32(firstBlockExactIndex+1) < expectedTotal { - rightElement := ongoingChain[firstBlockExactIndex+1] - if rightElement != nil && !compareParentHash(firstBlockInResponse, rightElement) { - return false - } - } - // if the response contains more than 1 block then we need to compare - // only the start and the end of the acquired response, for example - // let's say we receive a response [C, D, E] and we need to check - // if those values fits correctly: - // ... B <- C D E -> F - // we skip the left check if its index is equals to 0 and we skip the right - // check if it ends in the latest position of the ongoing array - case len(response) > 1: - lastBlockInResponse := response[len(response)-1] - lastBlockExactIndex := lastBlockInResponse.Header.Number - startAtBlock - - if uint32(lastBlockExactIndex+1) < expectedTotal { - rightElement := ongoingChain[lastBlockExactIndex+1] - if rightElement != nil && !compareParentHash(lastBlockInResponse, rightElement) { - return false - } - } - } - - return true -} - -func (cs *chainSync) getHighestBlock() (highestBlock uint, err error) { - if cs.peerViewSet.size() == 0 { - return 0, errNoPeers - } - - for _, ps := range cs.peerViewSet.values() { - if ps.number < highestBlock { - continue - } - highestBlock = ps.number - } - - return highestBlock, nil -} diff --git a/dot/sync/chain_sync_test.go b/dot/sync/chain_sync_test.go deleted file mode 100644 index e6e2ddd077..0000000000 --- a/dot/sync/chain_sync_test.go +++ /dev/null @@ -1,1900 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "errors" - "fmt" - "sync/atomic" - "testing" - "time" - - "github.com/ChainSafe/gossamer/dot/network" - "github.com/ChainSafe/gossamer/dot/network/messages" - "github.com/ChainSafe/gossamer/dot/peerset" - "github.com/ChainSafe/gossamer/dot/telemetry" - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/common" - "github.com/ChainSafe/gossamer/lib/runtime/storage" - "github.com/ChainSafe/gossamer/pkg/trie" - inmemory_trie "github.com/ChainSafe/gossamer/pkg/trie/inmemory" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.uber.org/mock/gomock" -) - -func Test_chainSyncState_String(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - s chainSyncState - want string - }{ - { - name: "case_bootstrap", - s: bootstrap, - want: "bootstrap", - }, - { - name: "case_tip", - s: tip, - want: "tip", - }, - { - name: "case_unknown", - s: 3, - want: "unknown", - }, - } - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - got := tt.s.String() - assert.Equal(t, tt.want, got) - }) - } -} - -func Test_chainSync_onBlockAnnounce(t *testing.T) { - t.Parallel() - const somePeer = peer.ID("abc") - - errTest := errors.New("test error") - emptyTrieState := storage.NewTrieState(inmemory_trie.NewEmptyTrie()) - block1AnnounceHeader := types.NewHeader(common.Hash{}, emptyTrieState.Trie().MustHash(), - common.Hash{}, 1, nil) - block2AnnounceHeader := types.NewHeader(block1AnnounceHeader.Hash(), - emptyTrieState.Trie().MustHash(), - common.Hash{}, 2, nil) - - testCases := map[string]struct { - waitBootstrapSync bool - chainSyncBuilder func(ctrl *gomock.Controller) *chainSync - peerID peer.ID - blockAnnounceHeader *types.Header - errWrapped error - errMessage string - expectedSyncMode chainSyncState - }{ - "announced_block_already_exists_in_disjoint_set": { - chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { - pendingBlocks := NewMockDisjointBlockSet(ctrl) - pendingBlocks.EXPECT().hasBlock(block2AnnounceHeader.Hash()).Return(true) - return &chainSync{ - stopCh: make(chan struct{}), - pendingBlocks: pendingBlocks, - peerViewSet: newPeerViewSet(0), - workerPool: newSyncWorkerPool(NewMockNetwork(nil), NewMockRequestMaker(nil)), - } - }, - peerID: somePeer, - blockAnnounceHeader: block2AnnounceHeader, - errWrapped: errAlreadyInDisjointSet, - errMessage: fmt.Sprintf("already in disjoint set: block #%d (%s)", - block2AnnounceHeader.Number, block2AnnounceHeader.Hash()), - }, - "failed_to_add_announced_block_in_disjoint_set": { - chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { - pendingBlocks := NewMockDisjointBlockSet(ctrl) - pendingBlocks.EXPECT().hasBlock(block2AnnounceHeader.Hash()).Return(false) - pendingBlocks.EXPECT().addHeader(block2AnnounceHeader).Return(errTest) - - return &chainSync{ - stopCh: make(chan struct{}), - pendingBlocks: pendingBlocks, - peerViewSet: newPeerViewSet(0), - workerPool: newSyncWorkerPool(NewMockNetwork(nil), NewMockRequestMaker(nil)), - } - }, - peerID: somePeer, - blockAnnounceHeader: block2AnnounceHeader, - errWrapped: errTest, - errMessage: "while adding pending block header: test error", - }, - "announced_block_while_in_bootstrap_mode": { - chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { - pendingBlocks := NewMockDisjointBlockSet(ctrl) - pendingBlocks.EXPECT().hasBlock(block2AnnounceHeader.Hash()).Return(false) - pendingBlocks.EXPECT().addHeader(block2AnnounceHeader).Return(nil) - - state := atomic.Value{} - state.Store(bootstrap) - - return &chainSync{ - stopCh: make(chan struct{}), - pendingBlocks: pendingBlocks, - syncMode: state, - peerViewSet: newPeerViewSet(0), - workerPool: newSyncWorkerPool(NewMockNetwork(nil), NewMockRequestMaker(nil)), - } - }, - peerID: somePeer, - blockAnnounceHeader: block2AnnounceHeader, - }, - "announced_block_while_in_tip_mode": { - chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { - pendingBlocksMock := NewMockDisjointBlockSet(ctrl) - pendingBlocksMock.EXPECT().hasBlock(block2AnnounceHeader.Hash()).Return(false) - pendingBlocksMock.EXPECT().addHeader(block2AnnounceHeader).Return(nil) - pendingBlocksMock.EXPECT().removeBlock(block2AnnounceHeader.Hash()) - pendingBlocksMock.EXPECT().size().Return(0) - - blockStateMock := NewMockBlockState(ctrl) - blockStateMock.EXPECT(). - HasHeader(block2AnnounceHeader.Hash()). - Return(false, nil) - blockStateMock.EXPECT().IsPaused().Return(false) - - blockStateMock.EXPECT(). - BestBlockHeader(). - Return(block1AnnounceHeader, nil) - - blockStateMock.EXPECT(). - GetHighestFinalisedHeader(). - Return(block2AnnounceHeader, nil). - Times(2) - - expectedRequest := messages.NewBlockRequest(*messages.NewFromBlock(block2AnnounceHeader.Hash()), - 1, messages.BootstrapRequestData, messages.Descending) - - fakeBlockBody := types.Body([]types.Extrinsic{}) - mockedBlockResponse := &messages.BlockResponseMessage{ - BlockData: []*types.BlockData{ - { - Hash: block2AnnounceHeader.Hash(), - Header: block2AnnounceHeader, - Body: &fakeBlockBody, - }, - }, - } - - networkMock := NewMockNetwork(ctrl) - networkMock.EXPECT().Peers().Return([]common.PeerInfo{}) - - requestMaker := NewMockRequestMaker(ctrl) - requestMaker.EXPECT(). - Do(somePeer, expectedRequest, &messages.BlockResponseMessage{}). - DoAndReturn(func(_, _, response any) any { - responsePtr := response.(*messages.BlockResponseMessage) - *responsePtr = *mockedBlockResponse - return nil - }) - - babeVerifierMock := NewMockBabeVerifier(ctrl) - storageStateMock := NewMockStorageState(ctrl) - importHandlerMock := NewMockBlockImportHandler(ctrl) - telemetryMock := NewMockTelemetry(ctrl) - - const announceBlock = true - ensureSuccessfulBlockImportFlow(t, block1AnnounceHeader, mockedBlockResponse.BlockData, - blockStateMock, babeVerifierMock, storageStateMock, importHandlerMock, telemetryMock, - networkBroadcast, announceBlock) - - workerPool := newSyncWorkerPool(networkMock, requestMaker) - // include the peer who announced the block in the pool - workerPool.newPeer(somePeer) - - state := atomic.Value{} - state.Store(tip) - - return &chainSync{ - stopCh: make(chan struct{}), - pendingBlocks: pendingBlocksMock, - syncMode: state, - workerPool: workerPool, - network: networkMock, - blockState: blockStateMock, - babeVerifier: babeVerifierMock, - telemetry: telemetryMock, - storageState: storageStateMock, - blockImportHandler: importHandlerMock, - peerViewSet: newPeerViewSet(0), - } - }, - peerID: somePeer, - blockAnnounceHeader: block2AnnounceHeader, - }, - } - - for name, tt := range testCases { - tt := tt - t.Run(name, func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - - chainSync := tt.chainSyncBuilder(ctrl) - err := chainSync.onBlockAnnounce(announcedBlock{ - who: tt.peerID, - header: tt.blockAnnounceHeader, - }) - - assert.ErrorIs(t, err, tt.errWrapped) - if tt.errWrapped != nil { - assert.EqualError(t, err, tt.errMessage) - } - - if tt.waitBootstrapSync { - chainSync.wg.Wait() - err = chainSync.workerPool.stop() - require.NoError(t, err) - } - }) - } -} - -func Test_chainSync_onBlockAnnounceHandshake_tipModeNeedToCatchup(t *testing.T) { - ctrl := gomock.NewController(t) - const somePeer = peer.ID("abc") - - emptyTrieState := storage.NewTrieState(inmemory_trie.NewEmptyTrie()) - block1AnnounceHeader := types.NewHeader(common.Hash{}, emptyTrieState.Trie().MustHash(), - common.Hash{}, 1, nil) - block2AnnounceHeader := types.NewHeader(block1AnnounceHeader.Hash(), - emptyTrieState.Trie().MustHash(), - common.Hash{}, 130, nil) - - blockStateMock := NewMockBlockState(ctrl) - blockStateMock.EXPECT(). - BestBlockHeader(). - Return(block1AnnounceHeader, nil). - Times(2) - - blockStateMock.EXPECT(). - BestBlockHeader(). - Return(block2AnnounceHeader, nil). - Times(1) - - blockStateMock.EXPECT(). - GetHighestFinalisedHeader(). - Return(block1AnnounceHeader, nil). - Times(3) - - blockStateMock.EXPECT().IsPaused().Return(false).Times(2) - - expectedRequest := messages.NewAscendingBlockRequests( - block1AnnounceHeader.Number+1, - block2AnnounceHeader.Number, messages.BootstrapRequestData) - - networkMock := NewMockNetwork(ctrl) - networkMock.EXPECT().Peers().Return([]common.PeerInfo{}). - Times(2) - networkMock.EXPECT().AllConnectedPeersIDs().Return([]peer.ID{}).Times(2) - - firstMockedResponse := createSuccesfullBlockResponse(t, block1AnnounceHeader.Hash(), 2, 128) - latestItemFromMockedResponse := firstMockedResponse.BlockData[len(firstMockedResponse.BlockData)-1] - - secondMockedResponse := createSuccesfullBlockResponse(t, latestItemFromMockedResponse.Hash, - int(latestItemFromMockedResponse.Header.Number+1), 1) - - requestMaker := NewMockRequestMaker(ctrl) - requestMaker.EXPECT(). - Do(somePeer, expectedRequest[0], &messages.BlockResponseMessage{}). - DoAndReturn(func(_, _, response any) any { - responsePtr := response.(*messages.BlockResponseMessage) - *responsePtr = *firstMockedResponse - return nil - }).Times(2) - - requestMaker.EXPECT(). - Do(somePeer, expectedRequest[1], &messages.BlockResponseMessage{}). - DoAndReturn(func(_, _, response any) any { - responsePtr := response.(*messages.BlockResponseMessage) - *responsePtr = *secondMockedResponse - return nil - }).Times(2) - - babeVerifierMock := NewMockBabeVerifier(ctrl) - storageStateMock := NewMockStorageState(ctrl) - importHandlerMock := NewMockBlockImportHandler(ctrl) - telemetryMock := NewMockTelemetry(ctrl) - - const announceBlock = false - ensureSuccessfulBlockImportFlow(t, block1AnnounceHeader, firstMockedResponse.BlockData, - blockStateMock, babeVerifierMock, storageStateMock, importHandlerMock, telemetryMock, - networkInitialSync, announceBlock) - ensureSuccessfulBlockImportFlow(t, latestItemFromMockedResponse.Header, secondMockedResponse.BlockData, - blockStateMock, babeVerifierMock, storageStateMock, importHandlerMock, telemetryMock, - networkInitialSync, announceBlock) - - state := atomic.Value{} - state.Store(tip) - - stopCh := make(chan struct{}) - defer close(stopCh) - - chainSync := &chainSync{ - stopCh: stopCh, - peerViewSet: newPeerViewSet(10), - syncMode: state, - pendingBlocks: newDisjointBlockSet(0), - workerPool: newSyncWorkerPool(networkMock, requestMaker), - network: networkMock, - blockState: blockStateMock, - babeVerifier: babeVerifierMock, - telemetry: telemetryMock, - storageState: storageStateMock, - blockImportHandler: importHandlerMock, - } - - err := chainSync.onBlockAnnounceHandshake(somePeer, block2AnnounceHeader.Hash(), block2AnnounceHeader.Number) - require.NoError(t, err) - - chainSync.wg.Wait() - err = chainSync.workerPool.stop() - require.NoError(t, err) - - require.Equal(t, chainSync.getSyncMode(), tip) -} - -func TestChainSync_onBlockAnnounceHandshake_onBootstrapMode(t *testing.T) { - const randomHashString = "0x580d77a9136035a0bc3c3cd86286172f7f81291164c5914266073a30466fba21" - randomHash := common.MustHexToHash(randomHashString) - - testcases := map[string]struct { - newChainSync func(t *testing.T, ctrl *gomock.Controller) *chainSync - peerID peer.ID - bestHash common.Hash - bestNumber uint - shouldBeAWorker bool - workerStatus byte - }{ - "new_peer": { - newChainSync: func(t *testing.T, ctrl *gomock.Controller) *chainSync { - networkMock := NewMockNetwork(ctrl) - workerPool := newSyncWorkerPool(networkMock, NewMockRequestMaker(nil)) - - cs := newChainSyncTest(t, ctrl) - cs.syncMode.Store(bootstrap) - cs.workerPool = workerPool - return cs - }, - peerID: peer.ID("peer-test"), - bestHash: randomHash, - bestNumber: uint(20), - shouldBeAWorker: true, - workerStatus: available, - }, - "ignore_peer_should_not_be_included_in_the_workerpoll": { - newChainSync: func(t *testing.T, ctrl *gomock.Controller) *chainSync { - networkMock := NewMockNetwork(ctrl) - workerPool := newSyncWorkerPool(networkMock, NewMockRequestMaker(nil)) - workerPool.ignorePeers = map[peer.ID]struct{}{ - peer.ID("peer-test"): {}, - } - - cs := newChainSyncTest(t, ctrl) - cs.syncMode.Store(bootstrap) - cs.workerPool = workerPool - return cs - }, - peerID: peer.ID("peer-test"), - bestHash: randomHash, - bestNumber: uint(20), - shouldBeAWorker: false, - }, - "peer_already_exists_in_the_pool": { - newChainSync: func(t *testing.T, ctrl *gomock.Controller) *chainSync { - networkMock := NewMockNetwork(ctrl) - workerPool := newSyncWorkerPool(networkMock, NewMockRequestMaker(nil)) - workerPool.workers = map[peer.ID]*syncWorker{ - peer.ID("peer-test"): { - worker: &worker{status: available}, - }, - } - - cs := newChainSyncTest(t, ctrl) - cs.syncMode.Store(bootstrap) - cs.workerPool = workerPool - return cs - }, - peerID: peer.ID("peer-test"), - bestHash: randomHash, - bestNumber: uint(20), - shouldBeAWorker: true, - workerStatus: available, - }, - } - - for tname, tt := range testcases { - tt := tt - t.Run(tname, func(t *testing.T) { - ctrl := gomock.NewController(t) - cs := tt.newChainSync(t, ctrl) - cs.onBlockAnnounceHandshake(tt.peerID, tt.bestHash, tt.bestNumber) - - view, exists := cs.peerViewSet.find(tt.peerID) - require.True(t, exists) - require.Equal(t, tt.peerID, view.who) - require.Equal(t, tt.bestHash, view.hash) - require.Equal(t, tt.bestNumber, view.number) - - if tt.shouldBeAWorker { - syncWorker, exists := cs.workerPool.workers[tt.peerID] - require.True(t, exists) - require.Equal(t, tt.workerStatus, syncWorker.worker.status) - } else { - _, exists := cs.workerPool.workers[tt.peerID] - require.False(t, exists) - } - }) - } -} - -func newChainSyncTest(t *testing.T, ctrl *gomock.Controller) *chainSync { - t.Helper() - - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) - - cfg := chainSyncConfig{ - bs: mockBlockState, - pendingBlocks: newDisjointBlockSet(pendingBlocksLimit), - minPeers: 1, - maxPeers: 5, - slotDuration: 6 * time.Second, - } - - return newChainSync(cfg) -} - -func setupChainSyncToBootstrapMode(t *testing.T, blocksAhead uint, - bs BlockState, net Network, reqMaker network.RequestMaker, babeVerifier BabeVerifier, - storageState StorageState, blockImportHandler BlockImportHandler, telemetry Telemetry) *chainSync { - t.Helper() - mockedPeerID := []peer.ID{ - peer.ID("some_peer_1"), - peer.ID("some_peer_2"), - peer.ID("some_peer_3"), - } - - peerViewMap := map[peer.ID]peerView{} - for _, p := range mockedPeerID { - peerViewMap[p] = peerView{ - who: p, - hash: common.Hash{1, 2, 3}, - number: blocksAhead, - } - } - - cfg := chainSyncConfig{ - pendingBlocks: newDisjointBlockSet(pendingBlocksLimit), - minPeers: 1, - maxPeers: 5, - slotDuration: 6 * time.Second, - bs: bs, - net: net, - requestMaker: reqMaker, - babeVerifier: babeVerifier, - storageState: storageState, - blockImportHandler: blockImportHandler, - telemetry: telemetry, - } - - chainSync := newChainSync(cfg) - chainSync.peerViewSet = &peerViewSet{view: peerViewMap} - chainSync.syncMode.Store(bootstrap) - - return chainSync -} - -func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorker(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - - mockedGenesisHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, - trie.EmptyHash, 0, types.NewDigest()) - - const blocksAhead = 128 - totalBlockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, blocksAhead) - mockedNetwork := NewMockNetwork(ctrl) - - workerPeerID := peer.ID("noot") - startingBlock := messages.NewFromBlock(uint(1)) - max := uint32(128) - - mockedRequestMaker := NewMockRequestMaker(ctrl) - - expectedBlockRequestMessage := &messages.BlockRequestMessage{ - RequestedData: messages.BootstrapRequestData, - StartingBlock: *startingBlock, - Direction: messages.Ascending, - Max: &max, - } - - mockedRequestMaker.EXPECT(). - Do(workerPeerID, expectedBlockRequestMessage, &messages.BlockResponseMessage{}). - DoAndReturn(func(_, _, response any) any { - responsePtr := response.(*messages.BlockResponseMessage) - *responsePtr = *totalBlockResponse - return nil - }) - - mockedBlockState := NewMockBlockState(ctrl) - mockedBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) - mockedBlockState.EXPECT().IsPaused().Return(false) - - mockBabeVerifier := NewMockBabeVerifier(ctrl) - mockStorageState := NewMockStorageState(ctrl) - mockImportHandler := NewMockBlockImportHandler(ctrl) - mockTelemetry := NewMockTelemetry(ctrl) - - mockedBlockState.EXPECT().GetHighestFinalisedHeader().Return(types.NewEmptyHeader(), nil).Times(1) - mockedNetwork.EXPECT().Peers().Return([]common.PeerInfo{}).Times(1) - - const announceBlock = false - // setup mocks for new synced blocks that doesn't exists in our local database - ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, totalBlockResponse.BlockData, mockedBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) - - // setup a chain sync which holds in its peer view map - // 3 peers, each one announce block X as its best block number. - // We start this test with genesis block being our best block, so - // we're far behind by X blocks, we should execute a bootstrap - // sync request those blocks - cs := setupChainSyncToBootstrapMode(t, blocksAhead, - mockedBlockState, mockedNetwork, mockedRequestMaker, mockBabeVerifier, - mockStorageState, mockImportHandler, mockTelemetry) - - target := cs.peerViewSet.getTarget() - require.Equal(t, uint(128), target) - - // include a new worker in the worker pool set, this worker - // should be an available peer that will receive a block request - // the worker pool executes the workers management - cs.workerPool.fromBlockAnnounce(peer.ID("noot")) - - err := cs.requestMaxBlocksFrom(mockedGenesisHeader, networkInitialSync) - require.NoError(t, err) - - err = cs.workerPool.stop() - require.NoError(t, err) -} - -func TestChainSync_BootstrapSync_SuccessfulSync_WithTwoWorkers(t *testing.T) { - t.Parallel() - - ctrl := gomock.NewController(t) - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) - mockedGenesisHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, - trie.EmptyHash, 0, types.NewDigest()) - - mockNetwork := NewMockNetwork(ctrl) - mockRequestMaker := NewMockRequestMaker(ctrl) - - mockBabeVerifier := NewMockBabeVerifier(ctrl) - mockStorageState := NewMockStorageState(ctrl) - mockImportHandler := NewMockBlockImportHandler(ctrl) - mockTelemetry := NewMockTelemetry(ctrl) - - mockBlockState.EXPECT().GetHighestFinalisedHeader().Return(types.NewEmptyHeader(), nil).Times(1) - mockBlockState.EXPECT().IsPaused().Return(false) - mockNetwork.EXPECT().Peers().Return([]common.PeerInfo{}).Times(1) - - // this test expects two workers responding each request with 128 blocks which means - // we should import 256 blocks in total - blockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, 256) - - // here we split the whole set in two parts each one will be the "response" for each peer - worker1Response := &messages.BlockResponseMessage{ - BlockData: blockResponse.BlockData[:128], - } - const announceBlock = false - // the first peer will respond the from the block 1 to 128 so the ensureBlockImportFlow - // will setup the expectations starting from the genesis header until block 128 - ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, worker1Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) - - worker2Response := &messages.BlockResponseMessage{ - BlockData: blockResponse.BlockData[128:], - } - // the worker 2 will respond from block 129 to 256 so the ensureBlockImportFlow - // will setup the expectations starting from block 128, from previous worker, until block 256 - parent := worker1Response.BlockData[len(worker1Response.BlockData)-1] - ensureSuccessfulBlockImportFlow(t, parent.Header, worker2Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) - - // we use gomock.Any since I cannot guarantee which peer picks which request - // but the first call to DoBlockRequest will return the first set and the second - // call will return the second set - mockRequestMaker.EXPECT(). - Do(gomock.Any(), gomock.Any(), &messages.BlockResponseMessage{}). - DoAndReturn(func(_, _, response any) any { - responsePtr := response.(*messages.BlockResponseMessage) - *responsePtr = *worker1Response - return nil - }) - - mockRequestMaker.EXPECT(). - Do(gomock.Any(), gomock.Any(), &messages.BlockResponseMessage{}). - DoAndReturn(func(_, _, response any) any { - responsePtr := response.(*messages.BlockResponseMessage) - *responsePtr = *worker2Response - return nil - }) - - // setup a chain sync which holds in its peer view map - // 3 peers, each one announce block 129 as its best block number. - // We start this test with genesis block being our best block, so - // we're far behind by 128 blocks, we should execute a bootstrap - // sync request those blocks - const blocksAhead = 256 - cs := setupChainSyncToBootstrapMode(t, blocksAhead, - mockBlockState, mockNetwork, mockRequestMaker, mockBabeVerifier, - mockStorageState, mockImportHandler, mockTelemetry) - - target := cs.peerViewSet.getTarget() - require.Equal(t, uint(blocksAhead), target) - - // include a new worker in the worker pool set, this worker - // should be an available peer that will receive a block request - // the worker pool executes the workers management - cs.workerPool.fromBlockAnnounce(peer.ID("noot")) - cs.workerPool.fromBlockAnnounce(peer.ID("noot2")) - - err := cs.requestMaxBlocksFrom(mockedGenesisHeader, networkInitialSync) - require.NoError(t, err) - - err = cs.workerPool.stop() - require.NoError(t, err) -} - -func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorkerFailing(t *testing.T) { - t.Parallel() - - ctrl := gomock.NewController(t) - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) - mockBlockState.EXPECT().IsPaused().Return(false).Times(2) - mockedGenesisHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, - trie.EmptyHash, 0, types.NewDigest()) - - mockNetwork := NewMockNetwork(ctrl) - mockRequestMaker := NewMockRequestMaker(ctrl) - - mockBabeVerifier := NewMockBabeVerifier(ctrl) - mockStorageState := NewMockStorageState(ctrl) - mockImportHandler := NewMockBlockImportHandler(ctrl) - mockTelemetry := NewMockTelemetry(ctrl) - - mockBlockState.EXPECT().GetHighestFinalisedHeader().Return(types.NewEmptyHeader(), nil).Times(1) - - mockNetwork.EXPECT().Peers().Return([]common.PeerInfo{}).Times(1) - - // this test expects two workers responding each request with 128 blocks which means - // we should import 256 blocks in total - blockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, 256) - const announceBlock = false - - // here we split the whole set in two parts each one will be the "response" for each peer - worker1Response := &messages.BlockResponseMessage{ - BlockData: blockResponse.BlockData[:128], - } - - // the first peer will respond the from the block 1 to 128 so the ensureBlockImportFlow - // will setup the expectations starting from the genesis header until block 128 - ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, worker1Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) - - worker2Response := &messages.BlockResponseMessage{ - BlockData: blockResponse.BlockData[128:], - } - // the worker 2 will respond from block 129 to 256 so the ensureBlockImportFlow - // will setup the expectations starting from block 128, from previous worker, until block 256 - parent := worker1Response.BlockData[len(worker1Response.BlockData)-1] - ensureSuccessfulBlockImportFlow(t, parent.Header, worker2Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) - - // we use gomock.Any since I cannot guarantee which peer picks which request - // but the first call to DoBlockRequest will return the first set and the second - // call will return the second set - doBlockRequestCount := atomic.Int32{} - mockRequestMaker.EXPECT(). - Do(gomock.Any(), gomock.Any(), &messages.BlockResponseMessage{}). - DoAndReturn(func(peerID, _, response any) any { - // lets ensure that the DoBlockRequest is called by - // peer.ID(alice) and peer.ID(bob). When bob calls, this method will fail - // then alice should pick the failed request and re-execute it which will - // be the third call - responsePtr := response.(*messages.BlockResponseMessage) - defer func() { doBlockRequestCount.Add(1) }() - - switch doBlockRequestCount.Load() { - case 0: - *responsePtr = *worker1Response - case 1: - return errors.New("a bad error while getting a response") - default: - *responsePtr = *worker2Response - } - return nil - - }).Times(3) - - // setup a chain sync which holds in its peer view map - // 3 peers, each one announce block 129 as its best block number. - // We start this test with genesis block being our best block, so - // we're far behind by 128 blocks, we should execute a bootstrap - // sync request those blocks - const blocksAhead = 256 - cs := setupChainSyncToBootstrapMode(t, blocksAhead, - mockBlockState, mockNetwork, mockRequestMaker, mockBabeVerifier, - mockStorageState, mockImportHandler, mockTelemetry) - - target := cs.peerViewSet.getTarget() - require.Equal(t, uint(blocksAhead), target) - - // include a new worker in the worker pool set, this worker - // should be an available peer that will receive a block request - // the worker pool executes the workers management - cs.workerPool.fromBlockAnnounce(peer.ID("alice")) - cs.workerPool.fromBlockAnnounce(peer.ID("bob")) - - err := cs.requestMaxBlocksFrom(mockedGenesisHeader, networkInitialSync) - require.NoError(t, err) - - err = cs.workerPool.stop() - require.NoError(t, err) -} - -func TestChainSync_BootstrapSync_SuccessfulSync_WithProtocolNotSupported(t *testing.T) { - t.Parallel() - - ctrl := gomock.NewController(t) - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) - mockBlockState.EXPECT().IsPaused().Return(false).Times(2) - mockBlockState.EXPECT(). - GetHighestFinalisedHeader(). - Return(types.NewEmptyHeader(), nil). - Times(1) - mockedGenesisHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, - trie.EmptyHash, 0, types.NewDigest()) - - mockNetwork := NewMockNetwork(ctrl) - mockNetwork.EXPECT().Peers().Return([]common.PeerInfo{}) - mockRequestMaker := NewMockRequestMaker(ctrl) - - mockBabeVerifier := NewMockBabeVerifier(ctrl) - mockStorageState := NewMockStorageState(ctrl) - mockImportHandler := NewMockBlockImportHandler(ctrl) - mockTelemetry := NewMockTelemetry(ctrl) - - // this test expects two workers responding each request with 128 blocks which means - // we should import 256 blocks in total - blockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, 256) - const announceBlock = false - - // here we split the whole set in two parts each one will be the "response" for each peer - worker1Response := &messages.BlockResponseMessage{ - BlockData: blockResponse.BlockData[:128], - } - - // the first peer will respond the from the block 1 to 128 so the ensureBlockImportFlow - // will setup the expectations starting from the genesis header until block 128 - ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, worker1Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) - - worker2Response := &messages.BlockResponseMessage{ - BlockData: blockResponse.BlockData[128:], - } - // the worker 2 will respond from block 129 to 256 so the ensureBlockImportFlow - // will setup the expectations starting from block 128, from previous worker, until block 256 - parent := worker1Response.BlockData[len(worker1Response.BlockData)-1] - ensureSuccessfulBlockImportFlow(t, parent.Header, worker2Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) - - // we use gomock.Any since I cannot guarantee which peer picks which request - // but the first call to DoBlockRequest will return the first set and the second - // call will return the second set - doBlockRequestCount := atomic.Int32{} - mockRequestMaker.EXPECT(). - Do(gomock.Any(), gomock.Any(), &messages.BlockResponseMessage{}). - DoAndReturn(func(peerID, _, response any) any { - // lets ensure that the DoBlockRequest is called by - // peer.ID(alice) and peer.ID(bob). When bob calls, this method will fail - // then alice should pick the failed request and re-execute it which will - // be the third call - responsePtr := response.(*messages.BlockResponseMessage) - defer func() { doBlockRequestCount.Add(1) }() - - switch doBlockRequestCount.Load() { - case 0: - *responsePtr = *worker1Response - case 1: - return errors.New("protocols not supported") - default: - *responsePtr = *worker2Response - } - - return nil - }).Times(3) - - // since some peer will fail with protocols not supported his - // reputation will be affected and - mockNetwork.EXPECT().ReportPeer(peerset.ReputationChange{ - Value: peerset.BadProtocolValue, - Reason: peerset.BadProtocolReason, - }, gomock.AssignableToTypeOf(peer.ID(""))) - // setup a chain sync which holds in its peer view map - // 3 peers, each one announce block 129 as its best block number. - // We start this test with genesis block being our best block, so - // we're far behind by 128 blocks, we should execute a bootstrap - // sync request those blocks - const blocksAhead = 256 - cs := setupChainSyncToBootstrapMode(t, blocksAhead, - mockBlockState, mockNetwork, mockRequestMaker, mockBabeVerifier, - mockStorageState, mockImportHandler, mockTelemetry) - - target := cs.peerViewSet.getTarget() - require.Equal(t, uint(blocksAhead), target) - - // include a new worker in the worker pool set, this worker - // should be an available peer that will receive a block request - // the worker pool executes the workers management - cs.workerPool.fromBlockAnnounce(peer.ID("alice")) - cs.workerPool.fromBlockAnnounce(peer.ID("bob")) - - err := cs.requestMaxBlocksFrom(mockedGenesisHeader, networkInitialSync) - require.NoError(t, err) - - err = cs.workerPool.stop() - require.NoError(t, err) -} - -func TestChainSync_BootstrapSync_SuccessfulSync_WithNilHeaderInResponse(t *testing.T) { - t.Parallel() - - ctrl := gomock.NewController(t) - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) - mockBlockState.EXPECT().IsPaused().Return(false).Times(2) - mockBlockState.EXPECT(). - GetHighestFinalisedHeader(). - Return(types.NewEmptyHeader(), nil). - Times(1) - mockedGenesisHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, - trie.EmptyHash, 0, types.NewDigest()) - - mockNetwork := NewMockNetwork(ctrl) - mockNetwork.EXPECT().Peers().Return([]common.PeerInfo{}) - mockRequestMaker := NewMockRequestMaker(ctrl) - - mockBabeVerifier := NewMockBabeVerifier(ctrl) - mockStorageState := NewMockStorageState(ctrl) - mockImportHandler := NewMockBlockImportHandler(ctrl) - mockTelemetry := NewMockTelemetry(ctrl) - - // this test expects two workers responding each request with 128 blocks which means - // we should import 256 blocks in total - blockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, 256) - const announceBlock = false - - // here we split the whole set in two parts each one will be the "response" for each peer - worker1Response := &messages.BlockResponseMessage{ - BlockData: blockResponse.BlockData[:128], - } - - // the first peer will respond the from the block 1 to 128 so the ensureBlockImportFlow - // will setup the expectations starting from the genesis header until block 128 - ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, worker1Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) - - worker2Response := &messages.BlockResponseMessage{ - BlockData: blockResponse.BlockData[128:], - } - // the worker 2 will respond from block 129 to 256 so the ensureBlockImportFlow - // will setup the expectations starting from block 128, from previous worker, until block 256 - parent := worker1Response.BlockData[127] - ensureSuccessfulBlockImportFlow(t, parent.Header, worker2Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) - - // we use gomock.Any since I cannot guarantee which peer picks which request - // but the first call to DoBlockRequest will return the first set and the second - // call will return the second set - doBlockRequestCount := atomic.Int32{} - mockRequestMaker.EXPECT(). - Do(gomock.Any(), gomock.Any(), &messages.BlockResponseMessage{}). - DoAndReturn(func(peerID, _, response any) any { - // lets ensure that the DoBlockRequest is called by - // peer.ID(alice) and peer.ID(bob). When bob calls, this method return an - // response item but without header as was requested - responsePtr := response.(*messages.BlockResponseMessage) - defer func() { doBlockRequestCount.Add(1) }() - - switch doBlockRequestCount.Load() { - case 0: - *responsePtr = *worker1Response - case 1: - incompleteBlockData := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 128, 256) - incompleteBlockData.BlockData[0].Header = nil - - *responsePtr = *incompleteBlockData - default: - *responsePtr = *worker2Response - } - - return nil - }).Times(3) - - // since some peer will fail with protocols not supported his - // reputation will be affected and - mockNetwork.EXPECT().ReportPeer(peerset.ReputationChange{ - Value: peerset.IncompleteHeaderValue, - Reason: peerset.IncompleteHeaderReason, - }, gomock.AssignableToTypeOf(peer.ID(""))) - // setup a chain sync which holds in its peer view map - // 3 peers, each one announce block 129 as its best block number. - // We start this test with genesis block being our best block, so - // we're far behind by 128 blocks, we should execute a bootstrap - // sync request those blocks - const blocksAhead = 256 - cs := setupChainSyncToBootstrapMode(t, blocksAhead, - mockBlockState, mockNetwork, mockRequestMaker, mockBabeVerifier, - mockStorageState, mockImportHandler, mockTelemetry) - - target := cs.peerViewSet.getTarget() - require.Equal(t, uint(blocksAhead), target) - - // include a new worker in the worker pool set, this worker - // should be an available peer that will receive a block request - // the worker pool executes the workers management - cs.workerPool.fromBlockAnnounce(peer.ID("alice")) - cs.workerPool.fromBlockAnnounce(peer.ID("bob")) - - err := cs.requestMaxBlocksFrom(mockedGenesisHeader, networkInitialSync) - require.NoError(t, err) - - err = cs.workerPool.stop() - require.NoError(t, err) -} - -func TestChainSync_BootstrapSync_SuccessfulSync_WithNilBlockInResponse(t *testing.T) { - t.Parallel() - - ctrl := gomock.NewController(t) - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) - mockBlockState.EXPECT().IsPaused().Return(false).Times(2) - mockBlockState.EXPECT(). - GetHighestFinalisedHeader(). - Return(types.NewEmptyHeader(), nil). - Times(1) - mockedGenesisHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, - trie.EmptyHash, 0, types.NewDigest()) - - mockBabeVerifier := NewMockBabeVerifier(ctrl) - mockStorageState := NewMockStorageState(ctrl) - mockImportHandler := NewMockBlockImportHandler(ctrl) - mockTelemetry := NewMockTelemetry(ctrl) - - blockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, 128) - const announceBlock = false - - workerResponse := &messages.BlockResponseMessage{ - BlockData: blockResponse.BlockData, - } - - // the first peer will respond the from the block 1 to 128 so the ensureBlockImportFlow - // will setup the expectations starting from the genesis header until block 128 - ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, workerResponse.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) - - doBlockRequestCount := atomic.Int32{} - mockRequestMaker := NewMockRequestMaker(ctrl) - mockRequestMaker.EXPECT(). - Do(gomock.Any(), gomock.Any(), &messages.BlockResponseMessage{}). - DoAndReturn(func(peerID, _, response any) any { - // lets ensure that the DoBlockRequest is called by - // peer.ID(alice) and peer.ID(bob). When bob calls, this method return an - // response item but without header as was requested - responsePtr := response.(*messages.BlockResponseMessage) - defer func() { doBlockRequestCount.Add(1) }() - - switch doBlockRequestCount.Load() { - case 0: - return messages.ErrNilBlockInResponse - case 1: - *responsePtr = *workerResponse - } - - return nil - }).Times(2) - - mockNetwork := NewMockNetwork(ctrl) - mockNetwork.EXPECT().Peers().Return([]common.PeerInfo{}) - - // reputation will be affected and - mockNetwork.EXPECT().ReportPeer(peerset.ReputationChange{ - Value: peerset.BadMessageValue, - Reason: peerset.BadMessageReason, - }, gomock.AssignableToTypeOf(peer.ID(""))) - - const blocksAhead = 128 - cs := setupChainSyncToBootstrapMode(t, blocksAhead, - mockBlockState, mockNetwork, mockRequestMaker, mockBabeVerifier, - mockStorageState, mockImportHandler, mockTelemetry) - - target := cs.peerViewSet.getTarget() - require.Equal(t, uint(blocksAhead), target) - - // include a new worker in the worker pool set, this worker - // should be an available peer that will receive a block request - // the worker pool executes the workers management - cs.workerPool.fromBlockAnnounce(peer.ID("alice")) - cs.workerPool.fromBlockAnnounce(peer.ID("bob")) - - err := cs.requestMaxBlocksFrom(mockedGenesisHeader, networkInitialSync) - require.NoError(t, err) - - err = cs.workerPool.stop() - require.NoError(t, err) -} - -func TestChainSync_BootstrapSync_SuccessfulSync_WithResponseIsNotAChain(t *testing.T) { - t.Parallel() - - ctrl := gomock.NewController(t) - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) - mockBlockState.EXPECT().IsPaused().Return(false).Times(2) - mockBlockState.EXPECT(). - GetHighestFinalisedHeader(). - Return(types.NewEmptyHeader(), nil). - Times(1) - mockedGenesisHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, - trie.EmptyHash, 0, types.NewDigest()) - - mockNetwork := NewMockNetwork(ctrl) - mockNetwork.EXPECT().Peers().Return([]common.PeerInfo{}) - mockRequestMaker := NewMockRequestMaker(ctrl) - - mockBabeVerifier := NewMockBabeVerifier(ctrl) - mockStorageState := NewMockStorageState(ctrl) - mockImportHandler := NewMockBlockImportHandler(ctrl) - mockTelemetry := NewMockTelemetry(ctrl) - - // this test expects two workers responding each request with 128 blocks which means - // we should import 256 blocks in total - blockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, 256) - const announceBlock = false - - // here we split the whole set in two parts each one will be the "response" for each peer - worker1Response := &messages.BlockResponseMessage{ - BlockData: blockResponse.BlockData[:128], - } - - // the first peer will respond the from the block 1 to 128 so the ensureBlockImportFlow - // will setup the expectations starting from the genesis header until block 128 - ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, worker1Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) - - worker2Response := &messages.BlockResponseMessage{ - BlockData: blockResponse.BlockData[128:], - } - // the worker 2 will respond from block 129 to 256 so the ensureBlockImportFlow - // will setup the expectations starting from block 128, from previous worker, until block 256 - parent := worker1Response.BlockData[127] - ensureSuccessfulBlockImportFlow(t, parent.Header, worker2Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) - - // we use gomock.Any since I cannot guarantee which peer picks which request - // but the first call to DoBlockRequest will return the first set and the second - // call will return the second set - doBlockRequestCount := atomic.Int32{} - mockRequestMaker.EXPECT(). - Do(gomock.Any(), gomock.Any(), &messages.BlockResponseMessage{}). - DoAndReturn(func(peerID, _, response any) any { - // lets ensure that the DoBlockRequest is called by - // peer.ID(alice) and peer.ID(bob). When bob calls, this method return an - // response that does not form an chain - responsePtr := response.(*messages.BlockResponseMessage) - defer func() { doBlockRequestCount.Add(1) }() - - switch doBlockRequestCount.Load() { - case 0: - *responsePtr = *worker1Response - case 1: - notAChainBlockData := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 128, 256) - // swap positions to force the problem - notAChainBlockData.BlockData[0], notAChainBlockData.BlockData[130] = - notAChainBlockData.BlockData[130], notAChainBlockData.BlockData[0] - - *responsePtr = *notAChainBlockData - default: - *responsePtr = *worker2Response - } - - return nil - }).Times(3) - - // setup a chain sync which holds in its peer view map - // 3 peers, each one announce block 129 as its best block number. - // We start this test with genesis block being our best block, so - // we're far behind by 128 blocks, we should execute a bootstrap - // sync request those blocks - const blocksAhead = 256 - cs := setupChainSyncToBootstrapMode(t, blocksAhead, - mockBlockState, mockNetwork, mockRequestMaker, mockBabeVerifier, - mockStorageState, mockImportHandler, mockTelemetry) - - target := cs.peerViewSet.getTarget() - - require.Equal(t, uint(blocksAhead), target) - - // include a new worker in the worker pool set, this worker - // should be an available peer that will receive a block request - // the worker pool executes the workers management - cs.workerPool.fromBlockAnnounce(peer.ID("alice")) - cs.workerPool.fromBlockAnnounce(peer.ID("bob")) - - err := cs.requestMaxBlocksFrom(mockedGenesisHeader, networkInitialSync) - require.NoError(t, err) - - err = cs.workerPool.stop() - require.NoError(t, err) -} - -func TestChainSync_BootstrapSync_SuccessfulSync_WithReceivedBadBlock(t *testing.T) { - t.Parallel() - - ctrl := gomock.NewController(t) - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) - mockBlockState.EXPECT().IsPaused().Return(false).Times(2) - mockBlockState.EXPECT(). - GetHighestFinalisedHeader(). - Return(types.NewEmptyHeader(), nil). - Times(1) - - mockedGenesisHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, - trie.EmptyHash, 0, types.NewDigest()) - - mockNetwork := NewMockNetwork(ctrl) - mockNetwork.EXPECT().Peers().Return([]common.PeerInfo{}) - mockRequestMaker := NewMockRequestMaker(ctrl) - - mockBabeVerifier := NewMockBabeVerifier(ctrl) - mockStorageState := NewMockStorageState(ctrl) - mockImportHandler := NewMockBlockImportHandler(ctrl) - mockTelemetry := NewMockTelemetry(ctrl) - - // this test expects two workers responding each request with 128 blocks which means - // we should import 256 blocks in total - blockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, 256) - const announceBlock = false - - // here we split the whole set in two parts each one will be the "response" for each peer - worker1Response := &messages.BlockResponseMessage{ - BlockData: blockResponse.BlockData[:128], - } - - // the first peer will respond the from the block 1 to 128 so the ensureBlockImportFlow - // will setup the expectations starting from the genesis header until block 128 - ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, worker1Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) - - worker2Response := &messages.BlockResponseMessage{ - BlockData: blockResponse.BlockData[128:], - } - // the worker 2 will respond from block 129 to 256 so the ensureBlockImportFlow - // will setup the expectations starting from block 128, from previous worker, until block 256 - parent := worker1Response.BlockData[len(worker1Response.BlockData)-1] - ensureSuccessfulBlockImportFlow(t, parent.Header, worker2Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) - - fakeBadBlockHash := common.MustHexToHash("0x18767cb4bb4cc13bf119f6613aec5487d4c06a2e453de53d34aea6f3f1ee9855") - - // we use gomock.Any since I cannot guarantee which peer picks which request - // but the first call to DoBlockRequest will return the first set and the second - // call will return the second set - doBlockRequestCount := atomic.Int32{} - mockRequestMaker.EXPECT(). - Do(gomock.Any(), gomock.Any(), &messages.BlockResponseMessage{}). - DoAndReturn(func(peerID, _, response any) any { - // lets ensure that the DoBlockRequest is called by - // peer.ID(alice) and peer.ID(bob). When bob calls, this method return an - // response that contains a know bad block - responsePtr := response.(*messages.BlockResponseMessage) - defer func() { doBlockRequestCount.Add(1) }() - - switch doBlockRequestCount.Load() { - case 0: - *responsePtr = *worker1Response - case 1: - // use the fisrt response last item hash to produce the second response block data - // so we can guarantee that the second response continues the first response blocks - firstResponseLastItem := worker1Response.BlockData[len(worker1Response.BlockData)-1] - blockDataWithBadBlock := createSuccesfullBlockResponse(t, - firstResponseLastItem.Header.Hash(), - 129, - 128) - - // changes the last item from the second response to be a bad block, so we guarantee that - // this second response is a chain, (changing the hash from a block in the middle of the block - // response brokes the `isAChain` verification) - lastItem := len(blockDataWithBadBlock.BlockData) - 1 - blockDataWithBadBlock.BlockData[lastItem].Hash = fakeBadBlockHash - *responsePtr = *blockDataWithBadBlock - default: - *responsePtr = *worker2Response - } - - return nil - }).Times(3) - - mockNetwork.EXPECT().ReportPeer(peerset.ReputationChange{ - Value: peerset.BadBlockAnnouncementValue, - Reason: peerset.BadBlockAnnouncementReason, - }, gomock.AssignableToTypeOf(peer.ID(""))) - // setup a chain sync which holds in its peer view map - // 3 peers, each one announce block 129 as its best block number. - // We start this test with genesis block being our best block, so - // we're far behind by 128 blocks, we should execute a bootstrap - // sync request those blocks - const blocksAhead = 256 - cs := setupChainSyncToBootstrapMode(t, blocksAhead, - mockBlockState, mockNetwork, mockRequestMaker, mockBabeVerifier, - mockStorageState, mockImportHandler, mockTelemetry) - - cs.badBlocks = []string{fakeBadBlockHash.String()} - - target := cs.peerViewSet.getTarget() - require.Equal(t, uint(blocksAhead), target) - - // include a new worker in the worker pool set, this worker - // should be an available peer that will receive a block request - // the worker pool executes the workers management - cs.workerPool.fromBlockAnnounce(peer.ID("alice")) - cs.workerPool.fromBlockAnnounce(peer.ID("bob")) - - err := cs.requestMaxBlocksFrom(mockedGenesisHeader, networkInitialSync) - require.NoError(t, err) - - err = cs.workerPool.stop() - require.NoError(t, err) - - // peer should be not in the worker pool - // peer should be in the ignore list - require.Len(t, cs.workerPool.workers, 1) - require.Len(t, cs.workerPool.ignorePeers, 1) -} - -func TestChainSync_BootstrapSync_SucessfulSync_ReceivedPartialBlockData(t *testing.T) { - t.Parallel() - - ctrl := gomock.NewController(t) - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) - mockBlockState.EXPECT().IsPaused().Return(false).Times(2) - mockBlockState.EXPECT(). - GetHighestFinalisedHeader(). - Return(types.NewEmptyHeader(), nil). - Times(1) - - mockedGenesisHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, - trie.EmptyHash, 0, types.NewDigest()) - - mockNetwork := NewMockNetwork(ctrl) - mockNetwork.EXPECT().Peers().Return([]common.PeerInfo{}) - - mockRequestMaker := NewMockRequestMaker(ctrl) - - mockBabeVerifier := NewMockBabeVerifier(ctrl) - mockStorageState := NewMockStorageState(ctrl) - mockImportHandler := NewMockBlockImportHandler(ctrl) - mockTelemetry := NewMockTelemetry(ctrl) - - // create a set of 128 blocks - blockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, 128) - const announceBlock = false - - // the worker will return a partial size of the set - worker1Response := &messages.BlockResponseMessage{ - BlockData: blockResponse.BlockData[:97], - } - - // the first peer will respond the from the block 1 to 96 so the ensureBlockImportFlow - // will setup the expectations starting from the genesis header until block 96 - ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, worker1Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) - - worker1MissingBlocksResponse := &messages.BlockResponseMessage{ - BlockData: blockResponse.BlockData[97:], - } - - // last item from the previous response - parent := worker1Response.BlockData[96] - ensureSuccessfulBlockImportFlow(t, parent.Header, worker1MissingBlocksResponse.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) - - doBlockRequestCount := 0 - mockRequestMaker.EXPECT(). - Do(gomock.Any(), gomock.Any(), &messages.BlockResponseMessage{}). - DoAndReturn(func(peerID, _, response any) any { - // lets ensure that the DoBlockRequest is called by - // peer.ID(alice). The first call will return only 97 blocks - // the handler should issue another call to retrieve the missing blocks - responsePtr := response.(*messages.BlockResponseMessage) - defer func() { doBlockRequestCount++ }() - - if doBlockRequestCount == 0 { - *responsePtr = *worker1Response - } else { - *responsePtr = *worker1MissingBlocksResponse - } - - return nil - }).Times(2) - - const blocksAhead = 128 - cs := setupChainSyncToBootstrapMode(t, blocksAhead, - mockBlockState, mockNetwork, mockRequestMaker, mockBabeVerifier, - mockStorageState, mockImportHandler, mockTelemetry) - - target := cs.peerViewSet.getTarget() - require.Equal(t, uint(blocksAhead), target) - - cs.workerPool.fromBlockAnnounce(peer.ID("alice")) - - err := cs.requestMaxBlocksFrom(mockedGenesisHeader, networkInitialSync) - require.NoError(t, err) - - err = cs.workerPool.stop() - require.NoError(t, err) - - require.Len(t, cs.workerPool.workers, 1) - - _, ok := cs.workerPool.workers[peer.ID("alice")] - require.True(t, ok) -} - -func createSuccesfullBlockResponse(t *testing.T, parentHeader common.Hash, - startingAt, numBlocks int) *messages.BlockResponseMessage { - t.Helper() - - response := new(messages.BlockResponseMessage) - response.BlockData = make([]*types.BlockData, numBlocks) - - emptyTrieState := storage.NewTrieState(inmemory_trie.NewEmptyTrie()) - tsRoot := emptyTrieState.Trie().MustHash() - - firstHeader := types.NewHeader(parentHeader, tsRoot, common.Hash{}, - uint(startingAt), nil) - response.BlockData[0] = &types.BlockData{ - Hash: firstHeader.Hash(), - Header: firstHeader, - Body: types.NewBody([]types.Extrinsic{}), - Justification: nil, - } - - parentHash := firstHeader.Hash() - for idx := 1; idx < numBlocks; idx++ { - blockNumber := idx + startingAt - header := types.NewHeader(parentHash, tsRoot, common.Hash{}, - uint(blockNumber), nil) - response.BlockData[idx] = &types.BlockData{ - Hash: header.Hash(), - Header: header, - Body: types.NewBody([]types.Extrinsic{}), - Justification: nil, - } - parentHash = header.Hash() - } - - return response -} - -// ensureSuccessfulBlockImportFlow will setup the expectations for method calls -// that happens while chain sync imports a block -func ensureSuccessfulBlockImportFlow(t *testing.T, parentHeader *types.Header, - blocksReceived []*types.BlockData, mockBlockState *MockBlockState, - mockBabeVerifier *MockBabeVerifier, mockStorageState *MockStorageState, - mockImportHandler *MockBlockImportHandler, mockTelemetry *MockTelemetry, origin blockOrigin, announceBlock bool) { - t.Helper() - - for idx, blockData := range blocksReceived { - if origin != networkInitialSync { - mockBabeVerifier.EXPECT().VerifyBlock(blockData.Header).Return(nil) - } - - var previousHeader *types.Header - if idx == 0 { - previousHeader = parentHeader - } else { - previousHeader = blocksReceived[idx-1].Header - } - - mockBlockState.EXPECT().GetHeader(blockData.Header.ParentHash).Return(previousHeader, nil).AnyTimes() - mockStorageState.EXPECT().Lock().AnyTimes() - mockStorageState.EXPECT().Unlock().AnyTimes() - - emptyTrieState := storage.NewTrieState(inmemory_trie.NewEmptyTrie()) - parentStateRoot := previousHeader.StateRoot - mockStorageState.EXPECT().TrieState(&parentStateRoot). - Return(emptyTrieState, nil).AnyTimes() - - ctrl := gomock.NewController(t) - mockRuntimeInstance := NewMockInstance(ctrl) - mockBlockState.EXPECT().GetRuntime(previousHeader.Hash()). - Return(mockRuntimeInstance, nil).AnyTimes() - - expectedBlock := &types.Block{ - Header: *blockData.Header, - Body: *blockData.Body, - } - - mockRuntimeInstance.EXPECT().SetContextStorage(emptyTrieState).AnyTimes() - mockRuntimeInstance.EXPECT().ExecuteBlock(expectedBlock). - Return(nil, nil).AnyTimes() - - mockImportHandler.EXPECT().HandleBlockImport(expectedBlock, emptyTrieState, announceBlock). - Return(nil).AnyTimes() - - blockHash := blockData.Header.Hash() - expectedTelemetryMessage := telemetry.NewBlockImport( - &blockHash, - blockData.Header.Number, - "NetworkInitialSync") - mockTelemetry.EXPECT().SendMessage(expectedTelemetryMessage).AnyTimes() - mockBlockState.EXPECT().CompareAndSetBlockData(blockData).Return(nil).AnyTimes() - } -} - -func TestChainSync_validateResponseFields(t *testing.T) { - t.Parallel() - - block1Header := &types.Header{ - ParentHash: common.MustHexToHash("0x00597cb4bb4cc13bf119f6613aec7642d4c06a2e453de53d34aea6f3f1eeb504"), - Number: 2, - } - - block2Header := &types.Header{ - ParentHash: block1Header.Hash(), - Number: 3, - } - - cases := map[string]struct { - wantErr error - errString string - setupChainSync func(t *testing.T) *chainSync - requestedData byte - blockData *types.BlockData - }{ - "requested_bootstrap_data_but_got_nil_header": { - wantErr: errNilHeaderInResponse, - errString: "expected header, received none: " + - block2Header.Hash().String(), - requestedData: messages.BootstrapRequestData, - blockData: &types.BlockData{ - Hash: block2Header.Hash(), - Header: nil, - Body: &types.Body{}, - Justification: &[]byte{0}, - }, - setupChainSync: func(t *testing.T) *chainSync { - ctrl := gomock.NewController(t) - blockStateMock := NewMockBlockState(ctrl) - blockStateMock.EXPECT().HasHeader(block1Header.ParentHash).Return(true, nil) - - networkMock := NewMockNetwork(ctrl) - networkMock.EXPECT().ReportPeer(peerset.ReputationChange{ - Value: peerset.IncompleteHeaderValue, - Reason: peerset.IncompleteHeaderReason, - }, peer.ID("peer")) - - return &chainSync{ - blockState: blockStateMock, - network: networkMock, - } - }, - }, - "requested_bootstrap_data_but_got_nil_body": { - wantErr: errNilBodyInResponse, - errString: "expected body, received none: " + - block2Header.Hash().String(), - requestedData: messages.BootstrapRequestData, - blockData: &types.BlockData{ - Hash: block2Header.Hash(), - Header: block2Header, - Body: nil, - Justification: &[]byte{0}, - }, - setupChainSync: func(t *testing.T) *chainSync { - ctrl := gomock.NewController(t) - blockStateMock := NewMockBlockState(ctrl) - blockStateMock.EXPECT().HasHeader(block1Header.ParentHash).Return(true, nil) - networkMock := NewMockNetwork(ctrl) - - return &chainSync{ - blockState: blockStateMock, - network: networkMock, - } - }, - }, - "requested_only_justification_but_got_nil": { - wantErr: errNilJustificationInResponse, - errString: "expected justification, received none: " + - block2Header.Hash().String(), - requestedData: messages.RequestedDataJustification, - blockData: &types.BlockData{ - Hash: block2Header.Hash(), - Header: block2Header, - Body: nil, - Justification: nil, - }, - setupChainSync: func(t *testing.T) *chainSync { - ctrl := gomock.NewController(t) - blockStateMock := NewMockBlockState(ctrl) - blockStateMock.EXPECT().HasHeader(block1Header.ParentHash).Return(true, nil) - networkMock := NewMockNetwork(ctrl) - - return &chainSync{ - blockState: blockStateMock, - network: networkMock, - } - }, - }, - } - - for tname, tt := range cases { - tt := tt - t.Run(tname, func(t *testing.T) { - t.Parallel() - - err := validateResponseFields(tt.requestedData, []*types.BlockData{tt.blockData}) - require.ErrorIs(t, err, tt.wantErr) - if tt.errString != "" { - require.EqualError(t, err, tt.errString) - } - }) - } -} - -func TestChainSync_isResponseAChain(t *testing.T) { - t.Parallel() - - block1Header := &types.Header{ - ParentHash: common.MustHexToHash("0x00597cb4bb4cc13bf119f6613aec7642d4c06a2e453de53d34aea6f3f1eeb504"), - Number: 2, - } - - block2Header := &types.Header{ - ParentHash: block1Header.Hash(), - Number: 3, - } - - block4Header := &types.Header{ - ParentHash: common.MustHexToHash("0x198616547187613bf119f6613aec7642d4c06a2e453de53d34aea6f390788677"), - Number: 4, - } - - cases := map[string]struct { - expected bool - blockData []*types.BlockData - }{ - "not_a_chain": { - expected: false, - blockData: []*types.BlockData{ - { - Hash: block1Header.Hash(), - Header: block1Header, - Body: &types.Body{}, - Justification: &[]byte{0}, - }, - { - Hash: block2Header.Hash(), - Header: block2Header, - Body: &types.Body{}, - Justification: &[]byte{0}, - }, - { - Hash: block4Header.Hash(), - Header: block4Header, - Body: &types.Body{}, - Justification: &[]byte{0}, - }, - }, - }, - "is_a_chain": { - expected: true, - blockData: []*types.BlockData{ - { - Hash: block1Header.Hash(), - Header: block1Header, - Body: &types.Body{}, - Justification: &[]byte{0}, - }, - { - Hash: block2Header.Hash(), - Header: block2Header, - Body: &types.Body{}, - Justification: &[]byte{0}, - }, - }, - }, - } - - for tname, tt := range cases { - tt := tt - t.Run(tname, func(t *testing.T) { - t.Parallel() - output := isResponseAChain(tt.blockData) - require.Equal(t, tt.expected, output) - }) - } -} - -func TestChainSync_doResponseGrowsTheChain(t *testing.T) { - block1Header := types.NewHeader(common.Hash{}, common.Hash{}, common.Hash{}, 1, types.NewDigest()) - block2Header := types.NewHeader(block1Header.Hash(), common.Hash{}, common.Hash{}, 2, types.NewDigest()) - block3Header := types.NewHeader(block2Header.Hash(), common.Hash{}, common.Hash{}, 3, types.NewDigest()) - block4Header := types.NewHeader(block3Header.Hash(), common.Hash{}, common.Hash{}, 4, types.NewDigest()) - - testcases := map[string]struct { - response []*types.BlockData - ongoingChain []*types.BlockData - startAt uint - exepectedTotal uint32 - expectedOut bool - }{ - // the ongoing chain does not have any data so the response - // can be inserted in the ongoing chain without any problems - "empty_ongoing_chain": { - ongoingChain: []*types.BlockData{}, - expectedOut: true, - }, - - "one_in_response_growing_ongoing_chain_without_check": { - startAt: 1, - exepectedTotal: 3, - // the ongoing chain contains 3 positions, the block number 1 is at position 0 - ongoingChain: []*types.BlockData{ - {Header: types.NewHeader(common.Hash{}, common.Hash{}, common.Hash{}, 1, types.NewDigest())}, - nil, - nil, - }, - - // the response contains the block number 3 which should be placed in position 2 - // in the ongoing chain, which means that no comparison should be done to place - // block number 3 in the ongoing chain - response: []*types.BlockData{ - {Header: types.NewHeader(common.Hash{}, common.Hash{}, common.Hash{}, 3, types.NewDigest())}, - }, - expectedOut: true, - }, - - "one_in_response_growing_ongoing_chain_by_checking_neighbours": { - startAt: 1, - exepectedTotal: 3, - // the ongoing chain contains 3 positions, the block number 1 is at position 0 - ongoingChain: []*types.BlockData{ - {Header: block1Header}, - nil, - {Header: block3Header}, - }, - - // the response contains the block number 2 which should be placed in position 1 - // in the ongoing chain, which means that a comparison should be made to check - // if the parent hash of block 2 is the same hash of block 1 - response: []*types.BlockData{ - {Header: block2Header}, - }, - expectedOut: true, - }, - - "one_in_response_failed_to_grow_ongoing_chain": { - startAt: 1, - exepectedTotal: 3, - ongoingChain: []*types.BlockData{ - {Header: block1Header}, - nil, - nil, - }, - response: []*types.BlockData{ - {Header: types.NewHeader(common.Hash{}, common.Hash{}, common.Hash{}, 2, types.NewDigest())}, - }, - expectedOut: false, - }, - - "many_in_response_grow_ongoing_chain_only_left_check": { - startAt: 1, - exepectedTotal: 3, - ongoingChain: []*types.BlockData{ - {Header: block1Header}, - nil, - nil, - nil, - }, - response: []*types.BlockData{ - {Header: block2Header}, - {Header: block3Header}, - }, - expectedOut: true, - }, - - "many_in_response_grow_ongoing_chain_left_right_check": { - startAt: 1, - exepectedTotal: 3, - ongoingChain: []*types.BlockData{ - {Header: block1Header}, - nil, - nil, - {Header: block4Header}, - }, - response: []*types.BlockData{ - {Header: block2Header}, - {Header: block3Header}, - }, - expectedOut: true, - }, - } - - for tname, tt := range testcases { - tt := tt - - t.Run(tname, func(t *testing.T) { - out := doResponseGrowsTheChain(tt.response, tt.ongoingChain, tt.startAt, tt.exepectedTotal) - require.Equal(t, tt.expectedOut, out) - }) - } -} - -func TestChainSync_getHighestBlock(t *testing.T) { - t.Parallel() - - cases := map[string]struct { - expectedHighestBlock uint - wantErr error - chainSyncPeerViewSet *peerViewSet - }{ - "no_peer_view": { - wantErr: errNoPeers, - expectedHighestBlock: 0, - chainSyncPeerViewSet: newPeerViewSet(10), - }, - "highest_block": { - expectedHighestBlock: 500, - chainSyncPeerViewSet: &peerViewSet{ - view: map[peer.ID]peerView{ - peer.ID("peer-A"): { - number: 100, - }, - peer.ID("peer-B"): { - number: 500, - }, - }, - }, - }, - } - - for tname, tt := range cases { - tt := tt - t.Run(tname, func(t *testing.T) { - t.Parallel() - - chainSync := &chainSync{ - peerViewSet: tt.chainSyncPeerViewSet, - } - - highestBlock, err := chainSync.getHighestBlock() - require.ErrorIs(t, err, tt.wantErr) - require.Equal(t, tt.expectedHighestBlock, highestBlock) - }) - } -} -func TestChainSync_BootstrapSync_SuccessfulSync_WithInvalidJusticationBlock(t *testing.T) { - // TODO: https://github.com/ChainSafe/gossamer/issues/3468 - t.Skip() - t.Parallel() - - ctrl := gomock.NewController(t) - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) - mockedGenesisHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, - trie.EmptyHash, 0, types.NewDigest()) - - mockNetwork := NewMockNetwork(ctrl) - mockRequestMaker := NewMockRequestMaker(ctrl) - - mockBabeVerifier := NewMockBabeVerifier(ctrl) - mockStorageState := NewMockStorageState(ctrl) - mockImportHandler := NewMockBlockImportHandler(ctrl) - mockTelemetry := NewMockTelemetry(ctrl) - mockFinalityGadget := NewMockFinalityGadget(ctrl) - - // this test expects two workers responding each request with 128 blocks which means - // we should import 256 blocks in total - blockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, 129) - const announceBlock = false - - invalidJustificationBlock := blockResponse.BlockData[90] - invalidJustification := &[]byte{0x01, 0x01, 0x01, 0x02} - invalidJustificationBlock.Justification = invalidJustification - - // here we split the whole set in two parts each one will be the "response" for each peer - worker1Response := &messages.BlockResponseMessage{ - BlockData: blockResponse.BlockData[:128], - } - - // the first peer will respond the from the block 1 to 128 so the ensureBlockImportFlow - // will setup the expectations starting from the genesis header until block 128 - ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, worker1Response.BlockData[:90], mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) - - errVerifyBlockJustification := errors.New("VerifyBlockJustification mock error") - mockFinalityGadget.EXPECT(). - VerifyBlockJustification( - invalidJustificationBlock.Header.Hash(), - invalidJustificationBlock.Header.Number, - *invalidJustification). - Return(uint64(0), uint64(0), errVerifyBlockJustification) - - // we use gomock.Any since I cannot guarantee which peer picks which request - // but the first call to DoBlockRequest will return the first set and the second - // call will return the second set - mockRequestMaker.EXPECT(). - Do(gomock.Any(), gomock.Any(), &messages.BlockResponseMessage{}). - DoAndReturn(func(peerID, _, response any) any { - responsePtr := response.(*messages.BlockResponseMessage) - *responsePtr = *worker1Response - - fmt.Println("mocked request maker") - return nil - }) - - // setup a chain sync which holds in its peer view map - // 3 peers, each one announce block 129 as its best block number. - // We start this test with genesis block being our best block, so - // we're far behind by 128 blocks, we should execute a bootstrap - // sync request those blocks - const blocksAhead = 128 - cs := setupChainSyncToBootstrapMode(t, blocksAhead, - mockBlockState, mockNetwork, mockRequestMaker, mockBabeVerifier, - mockStorageState, mockImportHandler, mockTelemetry) - - cs.finalityGadget = mockFinalityGadget - - target := cs.peerViewSet.getTarget() - require.Equal(t, uint(blocksAhead), target) - - // include a new worker in the worker pool set, this worker - // should be an available peer that will receive a block request - // the worker pool executes the workers management - cs.workerPool.fromBlockAnnounce(peer.ID("alice")) - //cs.workerPool.fromBlockAnnounce(peer.ID("bob")) - - err := cs.requestMaxBlocksFrom(mockedGenesisHeader, networkInitialSync) - require.ErrorIs(t, err, errVerifyBlockJustification) - - err = cs.workerPool.stop() - require.NoError(t, err) - - // peer should be not in the worker pool - // peer should be in the ignore list - require.Len(t, cs.workerPool.workers, 1) -} diff --git a/dot/sync/fullsync.go b/dot/sync/fullsync.go index eb02990ec4..b20cda6328 100644 --- a/dot/sync/fullsync.go +++ b/dot/sync/fullsync.go @@ -15,7 +15,6 @@ import ( "github.com/ChainSafe/gossamer/dot/peerset" "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/internal/database" - "github.com/ChainSafe/gossamer/lib/common/variadic" "github.com/libp2p/go-libp2p/core/peer" "github.com/prometheus/client_golang/prometheus" @@ -131,7 +130,7 @@ func (f *FullSyncStrategy) NextActions() ([]*syncTask, error) { } ascendingBlockRequests := messages.NewAscendingBlockRequests( - uint32(startRequestAt), uint32(targetBlockNumber), + startRequestAt, targetBlockNumber, messages.BootstrapRequestData) reqsFromQueue = append(reqsFromQueue, ascendingBlockRequests...) @@ -256,7 +255,7 @@ func (f *FullSyncStrategy) IsFinished(results []*syncTaskResult) (bool, []Change f.unreadyBlocks.newDisjointFragment(validFragment) request := messages.NewBlockRequest( - *variadic.Uint32OrHashFrom(validFragment[0].Header.ParentHash), + *messages.NewFromBlock(validFragment[0].Header.ParentHash), messages.MaxBlocksInResponse, messages.BootstrapRequestData, messages.Descending) f.requestQueue.PushBack(request) @@ -364,7 +363,7 @@ func (f *FullSyncStrategy) OnBlockAnnounce(from peer.ID, msg *network.BlockAnnou if !has { f.unreadyBlocks.newIncompleteBlock(blockAnnounceHeader) logger.Infof("requesting announced block body #%d (%s)", blockAnnounceHeader.Number, blockAnnounceHeaderHash.Short()) - request := messages.NewBlockRequest(*variadic.Uint32OrHashFrom(blockAnnounceHeaderHash), + request := messages.NewBlockRequest(*messages.NewFromBlock(blockAnnounceHeaderHash), 1, messages.RequestedDataBody+messages.RequestedDataJustification, messages.Ascending) f.requestQueue.PushBack(request) } else { diff --git a/dot/sync/fullsync_test.go b/dot/sync/fullsync_test.go index 72f73197ed..b853a2af6c 100644 --- a/dot/sync/fullsync_test.go +++ b/dot/sync/fullsync_test.go @@ -12,7 +12,6 @@ import ( "github.com/ChainSafe/gossamer/dot/peerset" "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/lib/common" - "github.com/ChainSafe/gossamer/lib/common/variadic" "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/require" "go.uber.org/mock/gomock" @@ -72,8 +71,8 @@ func TestFullSyncNextActions(t *testing.T) { require.Len(t, task, int(maxRequestsAllowed)) request := task[0].request.(*messages.BlockRequestMessage) - require.Equal(t, uint32(1), request.StartingBlock.Uint32()) - require.Equal(t, uint32(128), *request.Max) + require.Equal(t, uint(1), request.StartingBlock.RawValue()) + require.Equal(t, uint(128), *request.Max) }) t.Run("having_requests_in_the_queue", func(t *testing.T) { @@ -100,13 +99,13 @@ func TestFullSyncNextActions(t *testing.T) { expectedTasks: []*messages.BlockRequestMessage{ { RequestedData: messages.RequestedDataBody, - StartingBlock: *variadic.Uint32OrHashFrom(uint32(129)), + StartingBlock: *messages.NewFromBlock(uint(129)), Direction: messages.Ascending, Max: refTo(1), }, { RequestedData: messages.BootstrapRequestData, - StartingBlock: *variadic.Uint32OrHashFrom(uint32(1)), + StartingBlock: *messages.NewFromBlock(uint(1)), Direction: messages.Ascending, Max: refTo(127), }, @@ -117,12 +116,12 @@ func TestFullSyncNextActions(t *testing.T) { rq := &requestsQueue[*messages.BlockRequestMessage]{queue: list.New()} fstReqByHash := messages.NewBlockRequest( - *variadic.Uint32OrHashFrom(common.BytesToHash([]byte{0, 1, 1, 2})), + *messages.NewFromBlock(common.BytesToHash([]byte{0, 1, 1, 2})), 1, messages.RequestedDataBody, messages.Ascending) rq.PushBack(fstReqByHash) sndReqByHash := messages.NewBlockRequest( - *variadic.Uint32OrHashFrom(common.BytesToHash([]byte{1, 2, 2, 4})), + *messages.NewFromBlock(common.BytesToHash([]byte{1, 2, 2, 4})), 1, messages.RequestedDataBody, messages.Ascending) rq.PushBack(sndReqByHash) @@ -132,13 +131,13 @@ func TestFullSyncNextActions(t *testing.T) { expectedTasks: []*messages.BlockRequestMessage{ { RequestedData: messages.RequestedDataBody, - StartingBlock: *variadic.Uint32OrHashFrom(common.BytesToHash([]byte{0, 1, 1, 2})), + StartingBlock: *messages.NewFromBlock(common.BytesToHash([]byte{0, 1, 1, 2})), Direction: messages.Ascending, Max: refTo(1), }, { RequestedData: messages.BootstrapRequestData, - StartingBlock: *variadic.Uint32OrHashFrom(uint32(1)), + StartingBlock: *messages.NewFromBlock(uint(1)), Direction: messages.Ascending, Max: refTo(127), }, @@ -198,7 +197,7 @@ func TestFullSyncIsFinished(t *testing.T) { // 1 -> 10 { who: peer.ID("peerA"), - request: messages.NewBlockRequest(*variadic.Uint32OrHashFrom(1), 127, + request: messages.NewBlockRequest(*messages.NewFromBlock(uint(1)), 127, messages.BootstrapRequestData, messages.Ascending), completed: true, response: fstTaskBlockResponse, @@ -208,7 +207,7 @@ func TestFullSyncIsFinished(t *testing.T) { // 129 -> 256 { who: peer.ID("peerA"), - request: messages.NewBlockRequest(*variadic.Uint32OrHashFrom(129), 127, + request: messages.NewBlockRequest(*messages.NewFromBlock(uint(129)), 127, messages.BootstrapRequestData, messages.Ascending), completed: true, response: sndTaskBlockResponse, @@ -259,7 +258,7 @@ func TestFullSyncIsFinished(t *testing.T) { require.Equal(t, len(fs.unreadyBlocks.disjointFragments[0]), len(sndTaskBlockResponse.BlockData)) expectedAncestorRequest := messages.NewBlockRequest( - *variadic.Uint32OrHashFrom(sndTaskBlockResponse.BlockData[0].Header.ParentHash), + *messages.NewFromBlock(sndTaskBlockResponse.BlockData[0].Header.ParentHash), messages.MaxBlocksInResponse, messages.BootstrapRequestData, messages.Descending) @@ -469,13 +468,13 @@ func TestFullSyncBlockAnnounce(t *testing.T) { expectedRequests := []messages.P2PMessage{ &messages.BlockRequestMessage{ RequestedData: messages.RequestedDataBody + messages.RequestedDataJustification, - StartingBlock: *variadic.Uint32OrHashFrom(block17Hash), + StartingBlock: *messages.NewFromBlock(block17Hash), Direction: messages.Ascending, Max: refTo(1), }, &messages.BlockRequestMessage{ RequestedData: messages.BootstrapRequestData, - StartingBlock: *variadic.Uint32OrHashFrom(uint32(1)), + StartingBlock: *messages.NewFromBlock(uint(1)), Direction: messages.Ascending, Max: refTo(17), }, diff --git a/dot/sync/service.go b/dot/sync/service.go index 35d505fce2..f2795688dd 100644 --- a/dot/sync/service.go +++ b/dot/sync/service.go @@ -26,6 +26,13 @@ const ( var logger = log.NewFromGlobal(log.AddContext("pkg", "sync")) +type BlockOrigin byte + +const ( + networkInitialSync BlockOrigin = iota + networkBroadcast +) + type Network interface { AllConnectedPeersIDs() []peer.ID ReportPeer(change peerset.ReputationChange, p peer.ID) diff --git a/dot/sync/worker_pool_test.go b/dot/sync/worker_pool_test.go deleted file mode 100644 index d28cb80b12..0000000000 --- a/dot/sync/worker_pool_test.go +++ /dev/null @@ -1,246 +0,0 @@ -// Copyright 2023 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "testing" - "time" - - "github.com/ChainSafe/gossamer/dot/network/messages" - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/common" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/stretchr/testify/require" - "go.uber.org/mock/gomock" - "golang.org/x/exp/maps" -) - -func TestSyncWorkerPool_useConnectedPeers(t *testing.T) { - t.Parallel() - cases := map[string]struct { - setupWorkerPool func(t *testing.T) *syncWorkerPool - exepectedWorkers []peer.ID - }{ - "no_connected_peers": { - setupWorkerPool: func(t *testing.T) *syncWorkerPool { - ctrl := gomock.NewController(t) - networkMock := NewMockNetwork(ctrl) - networkMock.EXPECT(). - AllConnectedPeersIDs(). - Return([]peer.ID{}) - - return newSyncWorkerPool(networkMock, nil) - }, - exepectedWorkers: []peer.ID{}, - }, - "3_available_peers": { - setupWorkerPool: func(t *testing.T) *syncWorkerPool { - ctrl := gomock.NewController(t) - networkMock := NewMockNetwork(ctrl) - networkMock.EXPECT(). - AllConnectedPeersIDs(). - Return([]peer.ID{ - peer.ID("available-1"), - peer.ID("available-2"), - peer.ID("available-3"), - }) - return newSyncWorkerPool(networkMock, nil) - }, - exepectedWorkers: []peer.ID{ - peer.ID("available-1"), - peer.ID("available-2"), - peer.ID("available-3"), - }, - }, - "2_available_peers_1_to_ignore": { - setupWorkerPool: func(t *testing.T) *syncWorkerPool { - ctrl := gomock.NewController(t) - networkMock := NewMockNetwork(ctrl) - networkMock.EXPECT(). - AllConnectedPeersIDs(). - Return([]peer.ID{ - peer.ID("available-1"), - peer.ID("available-2"), - peer.ID("available-3"), - }) - workerPool := newSyncWorkerPool(networkMock, nil) - workerPool.ignorePeers[peer.ID("available-3")] = struct{}{} - return workerPool - }, - exepectedWorkers: []peer.ID{ - peer.ID("available-1"), - peer.ID("available-2"), - }, - }, - "peer_already_in_workers_set": { - setupWorkerPool: func(t *testing.T) *syncWorkerPool { - ctrl := gomock.NewController(t) - networkMock := NewMockNetwork(ctrl) - networkMock.EXPECT(). - AllConnectedPeersIDs(). - Return([]peer.ID{ - peer.ID("available-1"), - peer.ID("available-2"), - peer.ID("available-3"), - }) - workerPool := newSyncWorkerPool(networkMock, nil) - syncWorker := &syncWorker{ - worker: &worker{}, - queue: make(chan *syncTask), - } - workerPool.workers[peer.ID("available-3")] = syncWorker - return workerPool - }, - exepectedWorkers: []peer.ID{ - peer.ID("available-1"), - peer.ID("available-2"), - peer.ID("available-3"), - }, - }, - } - - for tname, tt := range cases { - tt := tt - t.Run(tname, func(t *testing.T) { - t.Parallel() - - workerPool := tt.setupWorkerPool(t) - workerPool.useConnectedPeers() - defer workerPool.stop() - - require.ElementsMatch(t, - maps.Keys(workerPool.workers), - tt.exepectedWorkers) - }) - } -} - -func TestSyncWorkerPool_listenForRequests_submitRequest(t *testing.T) { - t.Parallel() - - ctrl := gomock.NewController(t) - networkMock := NewMockNetwork(ctrl) - requestMakerMock := NewMockRequestMaker(ctrl) - workerPool := newSyncWorkerPool(networkMock, requestMakerMock) - - availablePeer := peer.ID("available-peer") - workerPool.newPeer(availablePeer) - defer workerPool.stop() - - blockHash := common.MustHexToHash("0x750646b852a29e5f3668959916a03d6243a3137e91d0cd36870364931030f707") - blockRequest := messages.NewBlockRequest(*messages.NewFromBlock(blockHash), - 1, messages.BootstrapRequestData, messages.Ascending) - mockedBlockResponse := &messages.BlockResponseMessage{ - BlockData: []*types.BlockData{ - { - Hash: blockHash, - Header: &types.Header{ - ParentHash: common. - MustHexToHash("0x5895897f12e1a670609929433ac7a69dcae90e0cc2d9c32c0dce0e2a5e5e614e"), - }, - }, - }, - } - - // introduce a timeout of 5s then we can test the - // peer status change to busy - requestMakerMock.EXPECT(). - Do(availablePeer, blockRequest, &messages.BlockResponseMessage{}). - DoAndReturn(func(_, _, response any) any { - responsePtr := response.(*messages.BlockResponseMessage) - *responsePtr = *mockedBlockResponse - return nil - }) - - resultCh := make(chan *syncTaskResult) - workerPool.submitRequest(blockRequest, nil, resultCh) - - syncTaskResult := <-resultCh - require.NoError(t, syncTaskResult.err) - require.Equal(t, syncTaskResult.who, availablePeer) - require.Equal(t, syncTaskResult.request, blockRequest) - require.Equal(t, syncTaskResult.response, mockedBlockResponse) - -} - -func TestSyncWorkerPool_singleWorker_multipleRequests(t *testing.T) { - t.Parallel() - - ctrl := gomock.NewController(t) - networkMock := NewMockNetwork(ctrl) - requestMakerMock := NewMockRequestMaker(ctrl) - workerPool := newSyncWorkerPool(networkMock, requestMakerMock) - defer workerPool.stop() - - availablePeer := peer.ID("available-peer") - workerPool.newPeer(availablePeer) - - firstRequestBlockHash := common.MustHexToHash("0x750646b852a29e5f3668959916a03d6243a3137e91d0cd36870364931030f707") - firstBlockRequest := messages.NewBlockRequest(*messages.NewFromBlock(firstRequestBlockHash), - 1, messages.BootstrapRequestData, messages.Ascending) - - secondRequestBlockHash := common.MustHexToHash("0x897646b852a29e5f3668959916a03d6243a3137e91d0cd36870364931030f707") - secondBlockRequest := messages.NewBlockRequest(*messages.NewFromBlock(firstRequestBlockHash), - 1, messages.BootstrapRequestData, messages.Ascending) - - firstMockedBlockResponse := &messages.BlockResponseMessage{ - BlockData: []*types.BlockData{ - { - Hash: firstRequestBlockHash, - Header: &types.Header{ - ParentHash: common. - MustHexToHash("0x5895897f12e1a670609929433ac7a69dcae90e0cc2d9c32c0dce0e2a5e5e614e"), - }, - }, - }, - } - - secondMockedBlockResponse := &messages.BlockResponseMessage{ - BlockData: []*types.BlockData{ - { - Hash: secondRequestBlockHash, - Header: &types.Header{ - ParentHash: common. - MustHexToHash("0x8965897f12e1a670609929433ac7a69dcae90e0cc2d9c32c0dce0e2a5e5e614e"), - }, - }, - }, - } - - // introduce a timeout of 5s then we can test the - // then we can simulate a busy peer - requestMakerMock.EXPECT(). - Do(availablePeer, firstBlockRequest, &messages.BlockResponseMessage{}). - DoAndReturn(func(_, _, response any) any { - time.Sleep(5 * time.Second) - responsePtr := response.(*messages.BlockResponseMessage) - *responsePtr = *firstMockedBlockResponse - return nil - }) - - requestMakerMock.EXPECT(). - Do(availablePeer, firstBlockRequest, &messages.BlockResponseMessage{}). - DoAndReturn(func(_, _, response any) any { - responsePtr := response.(*messages.BlockResponseMessage) - *responsePtr = *secondMockedBlockResponse - return nil - }) - - resultCh := workerPool.submitRequests( - []*messages.BlockRequestMessage{firstBlockRequest, secondBlockRequest}) - - syncTaskResult := <-resultCh - require.NoError(t, syncTaskResult.err) - require.Equal(t, syncTaskResult.who, availablePeer) - require.Equal(t, syncTaskResult.request, firstBlockRequest) - require.Equal(t, syncTaskResult.response, firstMockedBlockResponse) - - syncTaskResult = <-resultCh - require.NoError(t, syncTaskResult.err) - require.Equal(t, syncTaskResult.who, availablePeer) - require.Equal(t, syncTaskResult.request, secondBlockRequest) - require.Equal(t, syncTaskResult.response, secondMockedBlockResponse) - - require.Equal(t, uint(1), workerPool.totalWorkers()) -} From 2ad6b374268e1777fd36a9f6d7efe6c438c96d58 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Mon, 23 Sep 2024 10:13:34 -0400 Subject: [PATCH 59/74] chore: rename `IsFinished -> Process` and add named returns --- dot/sync/fullsync.go | 10 ++++++++-- dot/sync/fullsync_test.go | 6 +++--- dot/sync/service.go | 4 ++-- 3 files changed, 13 insertions(+), 7 deletions(-) diff --git a/dot/sync/fullsync.go b/dot/sync/fullsync.go index b20cda6328..7ab075e401 100644 --- a/dot/sync/fullsync.go +++ b/dot/sync/fullsync.go @@ -149,12 +149,18 @@ func (f *FullSyncStrategy) createTasks(requests []*messages.BlockRequestMessage) return tasks } -func (f *FullSyncStrategy) IsFinished(results []*syncTaskResult) (bool, []Change, []peer.ID, error) { +// Process receives as arguments the peer-to-peer block request responses +// and will check if the blocks data in the response can be imported to the state +// or complete an incomplete block or is part of a disjoint block set which will +// as a result it returns the if the strategy is finished, the peer reputations to change, +// peers to block/ban, or an error. FullSyncStrategy is intended to run as long as the node lives. +func (f *FullSyncStrategy) Process(results []*syncTaskResult) ( + isFinished bool, reputations []Change, bans []peer.ID, err error) { repChanges, peersToIgnore, validResp := validateResults(results, f.badBlocks) logger.Debugf("evaluating %d task results, %d valid responses", len(results), len(validResp)) var highestFinalized *types.Header - highestFinalized, err := f.blockState.GetHighestFinalisedHeader() + highestFinalized, err = f.blockState.GetHighestFinalisedHeader() if err != nil { return false, nil, nil, fmt.Errorf("getting highest finalized header") } diff --git a/dot/sync/fullsync_test.go b/dot/sync/fullsync_test.go index b853a2af6c..1da1ac00b8 100644 --- a/dot/sync/fullsync_test.go +++ b/dot/sync/fullsync_test.go @@ -178,7 +178,7 @@ func TestFullSyncNextActions(t *testing.T) { }) } -func TestFullSyncIsFinished(t *testing.T) { +func TestFullSyncProcess(t *testing.T) { westendBlocks := &WestendBlocks{} err := yaml.Unmarshal(rawWestendBlocks, westendBlocks) require.NoError(t, err) @@ -247,7 +247,7 @@ func TestFullSyncIsFinished(t *testing.T) { fs := NewFullSyncStrategy(cfg) fs.importer = mockImporter - done, _, _, err := fs.IsFinished(syncTaskResults) + done, _, _, err := fs.Process(syncTaskResults) require.NoError(t, err) require.False(t, done) @@ -282,7 +282,7 @@ func TestFullSyncIsFinished(t *testing.T) { }, } - done, _, _, err = fs.IsFinished(syncTaskResults) + done, _, _, err = fs.Process(syncTaskResults) require.NoError(t, err) require.False(t, done) diff --git a/dot/sync/service.go b/dot/sync/service.go index f2795688dd..116976abef 100644 --- a/dot/sync/service.go +++ b/dot/sync/service.go @@ -79,7 +79,7 @@ type Strategy interface { OnBlockAnnounce(from peer.ID, msg *network.BlockAnnounceMessage) (repChange *Change, err error) OnBlockAnnounceHandshake(from peer.ID, msg *network.BlockAnnounceHandshake) error NextActions() ([]*syncTask, error) - IsFinished(results []*syncTaskResult) (done bool, repChanges []Change, blocks []peer.ID, err error) + Process(results []*syncTaskResult) (done bool, repChanges []Change, blocks []peer.ID, err error) ShowMetrics() IsSynced() bool } @@ -270,7 +270,7 @@ func (s *SyncService) runStrategy() { } results := s.workerPool.submitRequests(tasks) - done, repChanges, peersToIgnore, err := s.currentStrategy.IsFinished(results) + done, repChanges, peersToIgnore, err := s.currentStrategy.Process(results) if err != nil { logger.Criticalf("current sync strategy failed with: %s", err.Error()) return From 75a30cd5a0e7d0ee9fed974d4c1d47df882f2178 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Mon, 23 Sep 2024 10:18:35 -0400 Subject: [PATCH 60/74] chore: fix small lint err --- dot/sync/message.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dot/sync/message.go b/dot/sync/message.go index e37272f653..8ea81322f4 100644 --- a/dot/sync/message.go +++ b/dot/sync/message.go @@ -110,7 +110,7 @@ func (s *SyncService) handleAscendingRequest(req *messages.BlockRequestMessage) return nil, errRequestStartTooHigh } - startNumber = uint(startBlock) + startNumber = startBlock default: return nil, fmt.Errorf("%w, unexpected from block type: %T", ErrInvalidBlockRequest, req.StartingBlock.RawValue()) From dff8a08773b29c79e96485dd97d189fce41ffd01 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Mon, 23 Sep 2024 10:26:50 -0400 Subject: [PATCH 61/74] chore: fix `TestService_CreateBlockResponse` --- dot/network/messages/block.go | 2 +- dot/sync/message_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/dot/network/messages/block.go b/dot/network/messages/block.go index e10a3a2c12..486249e268 100644 --- a/dot/network/messages/block.go +++ b/dot/network/messages/block.go @@ -37,7 +37,7 @@ func (s SyncDirection) String() string { case Descending: return "descending" default: - return "undefined direction" + return fmt.Sprintf("undefined direction: %d", s) } } diff --git a/dot/sync/message_test.go b/dot/sync/message_test.go index 3973f8b6ac..caa304f9a9 100644 --- a/dot/sync/message_test.go +++ b/dot/sync/message_test.go @@ -196,7 +196,7 @@ func TestService_CreateBlockResponse(t *testing.T) { StartingBlock: *messages.NewFromBlock(common.Hash{}), Direction: messages.SyncDirection(3), }}, - err: fmt.Errorf("%w: 3", errInvalidRequestDirection), + err: fmt.Errorf("%w: undefined direction: 3", errInvalidRequestDirection), }, } for name, tt := range tests { From 50c4594f02873bec54f35ac270a78c297dc6862a Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Mon, 23 Sep 2024 10:27:56 -0400 Subject: [PATCH 62/74] chore: fix test `TestFullSyncNextActions` --- dot/sync/fullsync_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dot/sync/fullsync_test.go b/dot/sync/fullsync_test.go index 1da1ac00b8..8ccd35446b 100644 --- a/dot/sync/fullsync_test.go +++ b/dot/sync/fullsync_test.go @@ -72,7 +72,7 @@ func TestFullSyncNextActions(t *testing.T) { require.Len(t, task, int(maxRequestsAllowed)) request := task[0].request.(*messages.BlockRequestMessage) require.Equal(t, uint(1), request.StartingBlock.RawValue()) - require.Equal(t, uint(128), *request.Max) + require.Equal(t, uint32(128), *request.Max) }) t.Run("having_requests_in_the_queue", func(t *testing.T) { From ee3380ba9e3f280d6e9587fae38e39dfa6459c90 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Mon, 23 Sep 2024 10:38:49 -0400 Subject: [PATCH 63/74] chore: fix test `TestCreateNotificationsMessageHandler_BlockAnnounceHandshake` --- dot/network/helpers_test.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/dot/network/helpers_test.go b/dot/network/helpers_test.go index d48c5d4176..7962ccbbe9 100644 --- a/dot/network/helpers_test.go +++ b/dot/network/helpers_test.go @@ -257,6 +257,10 @@ func createTestService(t *testing.T, cfg *Config) (srvc *Service) { CreateBlockResponse(gomock.Any(), gomock.Any()). Return(newTestBlockResponseMessage(t), nil).AnyTimes() + syncer.EXPECT(). + OnConnectionClosed(gomock.AssignableToTypeOf(peer.ID(string("")))). + AnyTimes() + syncer.EXPECT().IsSynced().Return(false).AnyTimes() cfg.Syncer = syncer } From 51e281cdadd8641a9119f528361d2c4d4cd6e5de Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Mon, 23 Sep 2024 19:46:23 -0400 Subject: [PATCH 64/74] chore: solve discovery test problem --- dot/peerset/peerstate.go | 1 + dot/services.go | 1 + dot/sync/configuration.go | 6 ++++++ dot/sync/fullsync.go | 4 ++-- dot/sync/service.go | 6 ++---- 5 files changed, 12 insertions(+), 6 deletions(-) diff --git a/dot/peerset/peerstate.go b/dot/peerset/peerstate.go index 8b69502f51..843af40472 100644 --- a/dot/peerset/peerstate.go +++ b/dot/peerset/peerstate.go @@ -85,6 +85,7 @@ func newNode(n int) *node { return &node{ state: sets, + reputation: 0, lastConnected: lastConnected, } } diff --git a/dot/services.go b/dot/services.go index 11d57cac0d..a5c3912f0e 100644 --- a/dot/services.go +++ b/dot/services.go @@ -533,6 +533,7 @@ func (nodeBuilder) newSyncService(config *cfg.Config, st *state.Service, fg sync sync.WithBlockState(st.Block), sync.WithSlotDuration(slotDuration), sync.WithStrategies(fullSync, nil), + sync.WithMinPeers(config.Network.MinPeers), ), nil } diff --git a/dot/sync/configuration.go b/dot/sync/configuration.go index 2b0f394af9..e144a87cbc 100644 --- a/dot/sync/configuration.go +++ b/dot/sync/configuration.go @@ -32,3 +32,9 @@ func WithSlotDuration(slotDuration time.Duration) ServiceConfig { svc.slotDuration = slotDuration } } + +func WithMinPeers(min int) ServiceConfig { + return func(svc *SyncService) { + svc.minPeers = min + } +} diff --git a/dot/sync/fullsync.go b/dot/sync/fullsync.go index 7ab075e401..6b89625bcc 100644 --- a/dot/sync/fullsync.go +++ b/dot/sync/fullsync.go @@ -341,8 +341,7 @@ func (f *FullSyncStrategy) OnBlockAnnounce(from peer.ID, msg *network.BlockAnnou }, } - return repChange, fmt.Errorf("%w: peer %s, block number #%d (%s)", - errPeerOnInvalidFork, from, blockAnnounceHeader.Number, blockAnnounceHeaderHash.String()) + return repChange, nil } logger.Infof("relevant announced block #%d (%s)", blockAnnounceHeader.Number, blockAnnounceHeaderHash.Short()) @@ -397,6 +396,7 @@ func (f *FullSyncStrategy) IsSynced() bool { return false } + logger.Infof("highest block: %d target %d", highestBlock, f.peers.getTarget()) return uint32(highestBlock) >= f.peers.getTarget() } diff --git a/dot/sync/service.go b/dot/sync/service.go index 116976abef..2ec0fd2297 100644 --- a/dot/sync/service.go +++ b/dot/sync/service.go @@ -20,7 +20,7 @@ import ( ) const ( - waitPeersDefaultTimeout = 2 * time.Second + waitPeersDefaultTimeout = 10 * time.Second minPeersDefault = 3 ) @@ -119,7 +119,6 @@ func NewSyncService(cfgs ...ServiceConfig) *SyncService { } func (s *SyncService) waitWorkers() { - waitPeersTimer := time.NewTimer(s.waitPeersDuration) bestBlockHeader, err := s.blockState.BestBlockHeader() if err != nil { panic(fmt.Sprintf("failed to get highest finalised header: %v", err)) @@ -127,8 +126,6 @@ func (s *SyncService) waitWorkers() { for { total := s.workerPool.totalWorkers() - logger.Debugf("waiting peers...") - logger.Debugf("total workers: %d, min peers: %d", total, s.minPeers) if total >= s.minPeers { return } @@ -143,6 +140,7 @@ func (s *SyncService) waitWorkers() { break } + waitPeersTimer := time.NewTimer(s.waitPeersDuration) select { case <-waitPeersTimer.C: waitPeersTimer.Reset(s.waitPeersDuration) From f68623d072b9cb3e35c0f056c856b41dc2177fad Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Mon, 23 Sep 2024 19:55:43 -0400 Subject: [PATCH 65/74] chore: fix `TestFullSyncBlockAnnounce` --- dot/sync/fullsync_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dot/sync/fullsync_test.go b/dot/sync/fullsync_test.go index 8ccd35446b..db15272a28 100644 --- a/dot/sync/fullsync_test.go +++ b/dot/sync/fullsync_test.go @@ -431,7 +431,7 @@ func TestFullSyncBlockAnnounce(t *testing.T) { // the announced block 17 is already tracked by our node // then we will ignore it rep, err := fs.OnBlockAnnounce(sndPeer, announceOfBlock17) - require.ErrorIs(t, err, errPeerOnInvalidFork) + require.NoError(t, err) expectedReputation := &Change{ who: sndPeer, From c7fc7b368ae4d780648dd0f45cb9c1cc0b05e967 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Mon, 23 Sep 2024 20:00:08 -0400 Subject: [PATCH 66/74] chore: `TestFullSyncBlockAnnounce` --- dot/sync/fullsync.go | 1 - 1 file changed, 1 deletion(-) diff --git a/dot/sync/fullsync.go b/dot/sync/fullsync.go index 6b89625bcc..10b1c4246a 100644 --- a/dot/sync/fullsync.go +++ b/dot/sync/fullsync.go @@ -29,7 +29,6 @@ var ( errFailedToGetParent = errors.New("failed to get parent header") errNilHeaderInResponse = errors.New("expected header, received none") errNilBodyInResponse = errors.New("expected body, received none") - errPeerOnInvalidFork = errors.New("peer is on an invalid fork") errBadBlockReceived = errors.New("bad block received") blockSizeGauge = promauto.NewGauge(prometheus.GaugeOpts{ From fc201d2348958f7ff2095c5122a00ee1a7b8d6d8 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Mon, 23 Sep 2024 20:00:30 -0400 Subject: [PATCH 67/74] chore: remove unneeded file --- dot/sync/service_test.go | 10 ---------- 1 file changed, 10 deletions(-) delete mode 100644 dot/sync/service_test.go diff --git a/dot/sync/service_test.go b/dot/sync/service_test.go deleted file mode 100644 index fb555613eb..0000000000 --- a/dot/sync/service_test.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2024 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import "testing" - -func TestSyncService(t *testing.T) { - -} From 6da0e7404747dc83deb56b29969704e3b3817125 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 25 Sep 2024 10:19:05 -0400 Subject: [PATCH 68/74] chore: fix deepsource --- dot/network/helpers_test.go | 2 +- dot/network/host.go | 2 +- dot/network/message_cache_integration_test.go | 2 +- dot/network/messages/block.go | 16 +++++------ dot/network/service.go | 2 +- dot/sync/fullsync.go | 15 ++++++----- dot/sync/fullsync_test.go | 6 ++--- dot/sync/message_test.go | 2 +- dot/sync/service.go | 4 +-- dot/sync/worker_pool.go | 27 +++++++++---------- lib/grandpa/message_handler.go | 2 +- scripts/retrieve_block/retrieve_block.go | 15 +++++++---- 12 files changed, 49 insertions(+), 46 deletions(-) diff --git a/dot/network/helpers_test.go b/dot/network/helpers_test.go index 7962ccbbe9..71f5513d51 100644 --- a/dot/network/helpers_test.go +++ b/dot/network/helpers_test.go @@ -65,7 +65,7 @@ func (s *testStreamHandler) handleMessage(stream libp2pnetwork.Stream, msg messa return s.writeToStream(stream, announceHandshake) } -func (s *testStreamHandler) writeToStream(stream libp2pnetwork.Stream, msg messages.P2PMessage) error { +func (*testStreamHandler) writeToStream(stream libp2pnetwork.Stream, msg messages.P2PMessage) error { encMsg, err := msg.Encode() if err != nil { return err diff --git a/dot/network/host.go b/dot/network/host.go index 85d1c7d093..94fb7e0a11 100644 --- a/dot/network/host.go +++ b/dot/network/host.go @@ -239,7 +239,7 @@ func newHost(ctx context.Context, cfg *Config) (*host, error) { NumCounters: int64(float64(cacheSize) * 0.05 * 2), MaxCost: int64(float64(cacheSize) * 0.95), BufferItems: 64, - Cost: func(value interface{}) int64 { + Cost: func(_ interface{}) int64 { return int64(1) }, } diff --git a/dot/network/message_cache_integration_test.go b/dot/network/message_cache_integration_test.go index 8eb346d276..19a224701e 100644 --- a/dot/network/message_cache_integration_test.go +++ b/dot/network/message_cache_integration_test.go @@ -24,7 +24,7 @@ func TestMessageCache(t *testing.T) { NumCounters: int64(float64(cacheSize) * 0.05 * 2), MaxCost: int64(float64(cacheSize) * 0.95), BufferItems: 64, - Cost: func(value interface{}) int64 { + Cost: func(_ interface{}) int64 { return int64(1) }, }, 800*time.Millisecond) diff --git a/dot/network/messages/block.go b/dot/network/messages/block.go index 486249e268..09c33467ad 100644 --- a/dot/network/messages/block.go +++ b/dot/network/messages/block.go @@ -64,11 +64,11 @@ var ( ErrNilBlockInResponse = errors.New("nil block in response") ) -type fromBlockType byte +type FromBlockType byte const ( - fromBlockNumber fromBlockType = iota - fromBlockHash + FromBlockNumber FromBlockType = iota + FromBlockHash ) type FromBlock struct { @@ -90,7 +90,7 @@ func (x *FromBlock) RawValue() any { } // Encode will encode a FromBlock into a 4 bytes representation -func (x *FromBlock) Encode() (fromBlockType, []byte) { +func (x *FromBlock) Encode() (FromBlockType, []byte) { switch rawValue := x.value.(type) { case uint: encoded := make([]byte, 4) @@ -98,9 +98,9 @@ func (x *FromBlock) Encode() (fromBlockType, []byte) { rawValue = math.MaxUint32 } binary.LittleEndian.PutUint32(encoded, uint32(rawValue)) - return fromBlockNumber, encoded + return FromBlockNumber, encoded case common.Hash: - return fromBlockHash, rawValue.ToBytes() + return FromBlockHash, rawValue.ToBytes() default: panic(fmt.Sprintf("unsupported FromBlock type: %T", x.value)) } @@ -202,11 +202,11 @@ func (bm *BlockRequestMessage) Encode() ([]byte, error) { protoType, encoded := bm.StartingBlock.Encode() switch protoType { - case fromBlockHash: + case FromBlockHash: msg.FromBlock = &pb.BlockRequest_Hash{ Hash: encoded, } - case fromBlockNumber: + case FromBlockNumber: msg.FromBlock = &pb.BlockRequest_Number{ Number: encoded, } diff --git a/dot/network/service.go b/dot/network/service.go index d7d9d8ed0b..5c333ace8c 100644 --- a/dot/network/service.go +++ b/dot/network/service.go @@ -562,7 +562,7 @@ func (s *Service) GossipMessage(msg NotificationsMessage) { logger.Errorf("message type %d not supported by any notifications protocol", msg.Type()) } -// GossipMessage gossips a notifications protocol message to our peers +// GossipMessageExcluding gossips a notifications protocol message to our peers func (s *Service) GossipMessageExcluding(msg NotificationsMessage, excluding peer.ID) { if s.host == nil || msg == nil || s.IsStopped() { return diff --git a/dot/sync/fullsync.go b/dot/sync/fullsync.go index 10b1c4246a..2fd48b0ad0 100644 --- a/dot/sync/fullsync.go +++ b/dot/sync/fullsync.go @@ -94,11 +94,12 @@ func NewFullSyncStrategy(cfg *FullSyncConfig) *FullSyncStrategy { } } -func (f *FullSyncStrategy) NextActions() ([]*syncTask, error) { +func (f *FullSyncStrategy) NextActions() ([]*SyncTask, error) { f.startedAt = time.Now() f.syncedBlocks = 0 - reqsFromQueue := []*messages.BlockRequestMessage{} + var reqsFromQueue []*messages.BlockRequestMessage + for i := 0; i < f.numOfTasks; i++ { msg, ok := f.requestQueue.PopFront() if !ok { @@ -136,10 +137,10 @@ func (f *FullSyncStrategy) NextActions() ([]*syncTask, error) { return f.createTasks(reqsFromQueue), nil } -func (f *FullSyncStrategy) createTasks(requests []*messages.BlockRequestMessage) []*syncTask { - tasks := make([]*syncTask, 0, len(requests)) +func (f *FullSyncStrategy) createTasks(requests []*messages.BlockRequestMessage) []*SyncTask { + tasks := make([]*SyncTask, 0, len(requests)) for _, req := range requests { - tasks = append(tasks, &syncTask{ + tasks = append(tasks, &SyncTask{ request: req, response: &messages.BlockResponseMessage{}, requestMaker: f.reqMaker, @@ -153,7 +154,7 @@ func (f *FullSyncStrategy) createTasks(requests []*messages.BlockRequestMessage) // or complete an incomplete block or is part of a disjoint block set which will // as a result it returns the if the strategy is finished, the peer reputations to change, // peers to block/ban, or an error. FullSyncStrategy is intended to run as long as the node lives. -func (f *FullSyncStrategy) Process(results []*syncTaskResult) ( +func (f *FullSyncStrategy) Process(results []*SyncTaskResult) ( isFinished bool, reputations []Change, bans []peer.ID, err error) { repChanges, peersToIgnore, validResp := validateResults(results, f.badBlocks) logger.Debugf("evaluating %d task results, %d valid responses", len(results), len(validResp)) @@ -404,7 +405,7 @@ type RequestResponseData struct { responseData []*types.BlockData } -func validateResults(results []*syncTaskResult, badBlocks []string) (repChanges []Change, +func validateResults(results []*SyncTaskResult, badBlocks []string) (repChanges []Change, peersToBlock []peer.ID, validRes []RequestResponseData) { repChanges = make([]Change, 0) diff --git a/dot/sync/fullsync_test.go b/dot/sync/fullsync_test.go index db15272a28..4d766d4baa 100644 --- a/dot/sync/fullsync_test.go +++ b/dot/sync/fullsync_test.go @@ -112,7 +112,7 @@ func TestFullSyncNextActions(t *testing.T) { }, }, "should_remain_1_in_request_queue": { - setupRequestQueue: func(t *testing.T) *requestsQueue[*messages.BlockRequestMessage] { + setupRequestQueue: func(_ *testing.T) *requestsQueue[*messages.BlockRequestMessage] { rq := &requestsQueue[*messages.BlockRequestMessage]{queue: list.New()} fstReqByHash := messages.NewBlockRequest( @@ -192,7 +192,7 @@ func TestFullSyncProcess(t *testing.T) { require.NoError(t, err) t.Run("requested_max_but_received_less_blocks", func(t *testing.T) { - syncTaskResults := []*syncTaskResult{ + syncTaskResults := []*SyncTaskResult{ // first task // 1 -> 10 { @@ -271,7 +271,7 @@ func TestFullSyncProcess(t *testing.T) { err = ancestorSearchResponse.Decode(common.MustHexToBytes(westendBlocks.Blocks1To128)) require.NoError(t, err) - syncTaskResults = []*syncTaskResult{ + syncTaskResults = []*SyncTaskResult{ // ancestor search task // 128 -> 1 { diff --git a/dot/sync/message_test.go b/dot/sync/message_test.go index caa304f9a9..f654f04509 100644 --- a/dot/sync/message_test.go +++ b/dot/sync/message_test.go @@ -187,7 +187,7 @@ func TestService_CreateBlockResponse(t *testing.T) { }}}, }, "invalid_direction": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + blockStateBuilder: func(_ *gomock.Controller) BlockState { return nil }, args: args{ diff --git a/dot/sync/service.go b/dot/sync/service.go index 2ec0fd2297..9b6d3fbf9f 100644 --- a/dot/sync/service.go +++ b/dot/sync/service.go @@ -78,8 +78,8 @@ type Change struct { type Strategy interface { OnBlockAnnounce(from peer.ID, msg *network.BlockAnnounceMessage) (repChange *Change, err error) OnBlockAnnounceHandshake(from peer.ID, msg *network.BlockAnnounceHandshake) error - NextActions() ([]*syncTask, error) - Process(results []*syncTaskResult) (done bool, repChanges []Change, blocks []peer.ID, err error) + NextActions() ([]*SyncTask, error) + Process(results []*SyncTaskResult) (done bool, repChanges []Change, blocks []peer.ID, err error) ShowMetrics() IsSynced() bool } diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index d0e501fb1c..bbc3d4bdfe 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -24,13 +24,13 @@ const ( maxRequestsAllowed uint = 3 ) -type syncTask struct { +type SyncTask struct { requestMaker network.RequestMaker request messages.P2PMessage response messages.P2PMessage } -type syncTaskResult struct { +type SyncTaskResult struct { who peer.ID completed bool request messages.P2PMessage @@ -43,8 +43,6 @@ type syncWorkerPool struct { network Network workers map[peer.ID]struct{} ignorePeers map[peer.ID]struct{} - - sharedGuard chan struct{} } func newSyncWorkerPool(net Network) *syncWorkerPool { @@ -52,7 +50,6 @@ func newSyncWorkerPool(net Network) *syncWorkerPool { network: net, workers: make(map[peer.ID]struct{}), ignorePeers: make(map[peer.ID]struct{}), - sharedGuard: make(chan struct{}, maxRequestsAllowed), } return swp @@ -80,7 +77,7 @@ func (s *syncWorkerPool) fromBlockAnnounceHandshake(who peer.ID) error { // submitRequests blocks until all tasks have been completed or there are no workers // left in the pool to retry failed tasks -func (s *syncWorkerPool) submitRequests(tasks []*syncTask) []*syncTaskResult { +func (s *syncWorkerPool) submitRequests(tasks []*SyncTask) []*SyncTaskResult { if len(tasks) == 0 { return nil } @@ -94,13 +91,13 @@ func (s *syncWorkerPool) submitRequests(tasks []*syncTask) []*syncTaskResult { workerPool <- worker } - failedTasks := make(chan *syncTask, len(tasks)) - results := make(chan *syncTaskResult, len(tasks)) + failedTasks := make(chan *SyncTask, len(tasks)) + results := make(chan *SyncTaskResult, len(tasks)) var wg sync.WaitGroup for _, task := range tasks { wg.Add(1) - go func(t *syncTask) { + go func(t *SyncTask) { defer wg.Done() executeTask(t, workerPool, failedTasks, results) }(task) @@ -112,12 +109,12 @@ func (s *syncWorkerPool) submitRequests(tasks []*syncTask) []*syncTaskResult { for task := range failedTasks { if len(workerPool) > 0 { wg.Add(1) - go func(t *syncTask) { + go func(t *SyncTask) { defer wg.Done() executeTask(t, workerPool, failedTasks, results) }(task) } else { - results <- &syncTaskResult{ + results <- &SyncTaskResult{ completed: false, request: task.request, response: nil, @@ -126,11 +123,11 @@ func (s *syncWorkerPool) submitRequests(tasks []*syncTask) []*syncTaskResult { } }() - allResults := make(chan []*syncTaskResult, 1) + allResults := make(chan []*SyncTaskResult, 1) wg.Add(1) go func(expectedResults int) { defer wg.Done() - var taskResults []*syncTaskResult + var taskResults []*SyncTaskResult for result := range results { taskResults = append(taskResults, result) @@ -150,7 +147,7 @@ func (s *syncWorkerPool) submitRequests(tasks []*syncTask) []*syncTaskResult { return <-allResults } -func executeTask(task *syncTask, workerPool chan peer.ID, failedTasks chan *syncTask, results chan *syncTaskResult) { +func executeTask(task *SyncTask, workerPool chan peer.ID, failedTasks chan *SyncTask, results chan *SyncTaskResult) { worker := <-workerPool logger.Infof("[EXECUTING] worker %s", worker) @@ -161,7 +158,7 @@ func executeTask(task *syncTask, workerPool chan peer.ID, failedTasks chan *sync } else { logger.Infof("[FINISHED] worker %s, request: %s", worker, task.request) workerPool <- worker - results <- &syncTaskResult{ + results <- &SyncTaskResult{ who: worker, completed: true, request: task.request, diff --git a/lib/grandpa/message_handler.go b/lib/grandpa/message_handler.go index c9ce389234..47837dc797 100644 --- a/lib/grandpa/message_handler.go +++ b/lib/grandpa/message_handler.go @@ -82,7 +82,7 @@ func (h *MessageHandler) handleMessage(from peer.ID, m GrandpaMessage) (network. } } -func (h *MessageHandler) handleNeighbourMessage(_ *NeighbourPacketV1) error { +func (*MessageHandler) handleNeighbourMessage(_ *NeighbourPacketV1) error { // TODO(#2931) return nil } diff --git a/scripts/retrieve_block/retrieve_block.go b/scripts/retrieve_block/retrieve_block.go index 2d67049c2e..38a31af304 100644 --- a/scripts/retrieve_block/retrieve_block.go +++ b/scripts/retrieve_block/retrieve_block.go @@ -29,7 +29,8 @@ func buildRequestMessage(arg string) *messages.BlockRequestMessage { amount, err := strconv.Atoi(params[2]) if err != nil || amount < 0 { - log.Fatalf("could not parse the amount of blocks, expected positive number got: %s", params[2]) + log.Printf("could not parse the amount of blocks, expected positive number got: %s\n", params[2]) + return nil } switch strings.ToLower(params[1]) { @@ -41,7 +42,7 @@ func buildRequestMessage(arg string) *messages.BlockRequestMessage { messages.BootstrapRequestData, messages.Descending) } - log.Fatalf("not supported direction: %s, use 'asc' for ascending or 'desc' for descending", params[1]) + log.Printf("not supported direction: %s, use 'asc' for ascending or 'desc' for descending\n", params[1]) return nil } @@ -52,7 +53,8 @@ func parseTargetBlock(arg string) messages.FromBlock { value, err := strconv.Atoi(arg) if err != nil { - log.Fatalf("\ntrying to convert %v to number: %s", arg, err.Error()) + log.Printf("\ntrying to convert %v to number: %s\n", arg, err.Error()) + return messages.FromBlock{} } return *messages.NewFromBlock(uint(value)) @@ -72,7 +74,8 @@ func waitAndStoreResponse(stream lip2pnetwork.Stream, outputFile string) bool { blockResponse := &messages.BlockResponseMessage{} err = blockResponse.Decode(output) if err != nil { - log.Fatalf("could not decode block response message: %s", err.Error()) + log.Printf("could not decode block response message: %s\n", err.Error()) + return false } resultOutput := strings.Builder{} @@ -85,8 +88,10 @@ func waitAndStoreResponse(stream lip2pnetwork.Stream, outputFile string) bool { log.Println(resultOutput.String()) err = os.WriteFile(outputFile, []byte(common.BytesToHex(output)), os.ModePerm) if err != nil { - log.Fatalf("failed to write response to file %s: %s", outputFile, err.Error()) + log.Printf("failed to write response to file %s: %s\n", outputFile, err.Error()) + return false } + return true } From 2f5c469e52d4ad4deea335d9aadd8c95a868b94f Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 25 Sep 2024 10:24:01 -0400 Subject: [PATCH 69/74] chore: define min peers to 1 --- dot/sync/service.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dot/sync/service.go b/dot/sync/service.go index 9b6d3fbf9f..e0c2b758a3 100644 --- a/dot/sync/service.go +++ b/dot/sync/service.go @@ -21,7 +21,7 @@ import ( const ( waitPeersDefaultTimeout = 10 * time.Second - minPeersDefault = 3 + minPeersDefault = 1 ) var logger = log.NewFromGlobal(log.AddContext("pkg", "sync")) From 266d147fea8dce4bbefc1e6ba8162133355e0a31 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 25 Sep 2024 14:02:13 -0400 Subject: [PATCH 70/74] chore: fix retrieveFirstNonOriginBlockSlot when hash is not imported yet --- dot/network/messages/block.go | 15 +++++++++++++-- dot/state/epoch.go | 4 ++++ dot/sync/worker_pool.go | 4 ++-- 3 files changed, 19 insertions(+), 4 deletions(-) diff --git a/dot/network/messages/block.go b/dot/network/messages/block.go index 09c33467ad..b65a593594 100644 --- a/dot/network/messages/block.go +++ b/dot/network/messages/block.go @@ -89,6 +89,17 @@ func (x *FromBlock) RawValue() any { return x.value } +func (x *FromBlock) String() string { + switch rawValue := x.value.(type) { + case uint: + return fmt.Sprintf("%d", rawValue) + case common.Hash: + return rawValue.String() + default: + panic(fmt.Sprintf("unsupported FromBlock type: %T", x.value)) + } +} + // Encode will encode a FromBlock into a 4 bytes representation func (x *FromBlock) Encode() (FromBlockType, []byte) { switch rawValue := x.value.(type) { @@ -180,9 +191,9 @@ func (bm *BlockRequestMessage) String() string { if bm.Max != nil { max = *bm.Max } - return fmt.Sprintf("BlockRequestMessage RequestedData=%d StartingBlock=%v Direction=%d Max=%d", + return fmt.Sprintf("BlockRequestMessage RequestedData=%d StartingBlock=%s Direction=%d Max=%d", bm.RequestedData, - bm.StartingBlock, + bm.StartingBlock.String(), bm.Direction, max) } diff --git a/dot/state/epoch.go b/dot/state/epoch.go index 3b72244bb9..4e472d17cc 100644 --- a/dot/state/epoch.go +++ b/dot/state/epoch.go @@ -192,6 +192,10 @@ func (s *EpochState) GetEpochForBlock(header *types.Header) (uint64, error) { } chainFirstSlotNumber, err := s.retrieveFirstNonOriginBlockSlot(header.Hash()) + if errors.Is(err, database.ErrNotFound) { + chainFirstSlotNumber, err = s.retrieveFirstNonOriginBlockSlot(header.ParentHash) + } + if err != nil { return 0, fmt.Errorf("retrieving very first slot number: %w", err) } diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index bbc3d4bdfe..b11b726db7 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -153,10 +153,10 @@ func executeTask(task *SyncTask, workerPool chan peer.ID, failedTasks chan *Sync err := task.requestMaker.Do(worker, task.request, task.response) if err != nil { - logger.Infof("[ERR] worker %s, request: %s, err: %s", worker, task.request, err.Error()) + logger.Infof("[ERR] worker %s, request: %s, err: %s", worker, task.request.String(), err.Error()) failedTasks <- task } else { - logger.Infof("[FINISHED] worker %s, request: %s", worker, task.request) + logger.Infof("[FINISHED] worker %s, request: %s", worker, task.request.String()) workerPool <- worker results <- &SyncTaskResult{ who: worker, From 109ec036b83c37dfcd0a46670ae0ee4de4f8f41e Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Thu, 26 Sep 2024 08:28:20 -0400 Subject: [PATCH 71/74] chore: bring sync promo gauge back --- dot/sync/fullsync.go | 12 +----------- dot/sync/fullsync_handle_block.go | 8 ++++++++ dot/sync/service.go | 18 +++++++++++++++++- 3 files changed, 26 insertions(+), 12 deletions(-) diff --git a/dot/sync/fullsync.go b/dot/sync/fullsync.go index 2fd48b0ad0..749dc77ebe 100644 --- a/dot/sync/fullsync.go +++ b/dot/sync/fullsync.go @@ -17,8 +17,6 @@ import ( "github.com/ChainSafe/gossamer/internal/database" "github.com/libp2p/go-libp2p/core/peer" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" ) const defaultNumOfTasks = 3 @@ -30,12 +28,6 @@ var ( errNilHeaderInResponse = errors.New("expected header, received none") errNilBodyInResponse = errors.New("expected body, received none") errBadBlockReceived = errors.New("bad block received") - - blockSizeGauge = promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: "gossamer_sync", - Name: "block_size", - Help: "represent the size of blocks synced", - }) ) // Config is the configuration for the sync Service. @@ -211,8 +203,6 @@ func (f *FullSyncStrategy) Process(results []*SyncTaskResult) ( disjointFragments = append(disjointFragments, fragment) } - fmt.Printf("blocks to import: %d, disjoint fragments: %d\n", len(nextBlocksToImport), len(disjointFragments)) - // this loop goal is to import ready blocks as well as update the highestFinalized header for len(nextBlocksToImport) > 0 || len(disjointFragments) > 0 { for _, blockToImport := range nextBlocksToImport { @@ -397,7 +387,7 @@ func (f *FullSyncStrategy) IsSynced() bool { } logger.Infof("highest block: %d target %d", highestBlock, f.peers.getTarget()) - return uint32(highestBlock) >= f.peers.getTarget() + return uint32(highestBlock)+messages.MaxBlocksInResponse >= f.peers.getTarget() } type RequestResponseData struct { diff --git a/dot/sync/fullsync_handle_block.go b/dot/sync/fullsync_handle_block.go index bb86b57e2d..be9ee14a50 100644 --- a/dot/sync/fullsync_handle_block.go +++ b/dot/sync/fullsync_handle_block.go @@ -15,8 +15,16 @@ import ( "github.com/ChainSafe/gossamer/internal/database" "github.com/ChainSafe/gossamer/lib/common" rtstorage "github.com/ChainSafe/gossamer/lib/runtime/storage" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" ) +var blockSizeGauge = promauto.NewGauge(prometheus.GaugeOpts{ + Namespace: "gossamer_sync", + Name: "block_size", + Help: "represent the size of blocks synced", +}) + type ( // Telemetry is the telemetry client to send telemetry messages. Telemetry interface { diff --git a/dot/sync/service.go b/dot/sync/service.go index e0c2b758a3..11013ff9dc 100644 --- a/dot/sync/service.go +++ b/dot/sync/service.go @@ -17,6 +17,8 @@ import ( "github.com/ChainSafe/gossamer/lib/runtime" lrucache "github.com/ChainSafe/gossamer/lib/utils/lru-cache" "github.com/libp2p/go-libp2p/core/peer" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" ) const ( @@ -24,7 +26,15 @@ const ( minPeersDefault = 1 ) -var logger = log.NewFromGlobal(log.AddContext("pkg", "sync")) +var ( + isSyncedGauge = promauto.NewGauge(prometheus.GaugeOpts{ + Namespace: "gossamer_network_syncer", + Name: "is_synced", + Help: "bool representing whether the node is synced to the head of the chain", + }) + + logger = log.NewFromGlobal(log.AddContext("pkg", "sync")) +) type BlockOrigin byte @@ -226,6 +236,12 @@ func (s *SyncService) runSyncEngine() { } s.runStrategy() + + if s.IsSynced() { + isSyncedGauge.Set(1) + } else { + isSyncedGauge.Set(0) + } } } From 29b6a0aa3a63ef127b4fee17cdd4439398852e0f Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Thu, 26 Sep 2024 15:46:53 -0400 Subject: [PATCH 72/74] chore: address comments --- dot/network/notifications.go | 4 -- ...sync_handle_block.go => block_importer.go} | 2 +- dot/sync/fullsync.go | 20 +++---- dot/sync/fullsync_test.go | 8 ++- dot/sync/mock_importer.go | 55 +++++++++++++++++++ dot/sync/mocks_generate_test.go | 3 +- dot/sync/mocks_test.go | 42 +------------- 7 files changed, 74 insertions(+), 60 deletions(-) rename dot/sync/{fullsync_handle_block.go => block_importer.go} (98%) create mode 100644 dot/sync/mock_importer.go diff --git a/dot/network/notifications.go b/dot/network/notifications.go index f58636276d..a938a64661 100644 --- a/dot/network/notifications.go +++ b/dot/network/notifications.go @@ -227,10 +227,6 @@ func (s *Service) handleHandshake(info *notificationsProtocol, stream network.St logger.Tracef("receiver: sent handshake to peer %s using protocol %s", peer, info.protocolID) - // if err := stream.CloseWrite(); err != nil { - // return fmt.Errorf("failed to close stream for writing: %s", err) - // } - return nil } diff --git a/dot/sync/fullsync_handle_block.go b/dot/sync/block_importer.go similarity index 98% rename from dot/sync/fullsync_handle_block.go rename to dot/sync/block_importer.go index be9ee14a50..3e6ab4898f 100644 --- a/dot/sync/fullsync_handle_block.go +++ b/dot/sync/block_importer.go @@ -80,7 +80,7 @@ func newBlockImporter(cfg *FullSyncConfig) *blockImporter { } } -func (b *blockImporter) handle(bd *types.BlockData, origin BlockOrigin) (imported bool, err error) { +func (b *blockImporter) importBlock(bd *types.BlockData, origin BlockOrigin) (imported bool, err error) { blockAlreadyExists, err := b.blockState.HasHeader(bd.Hash) if err != nil && !errors.Is(err, database.ErrNotFound) { return false, err diff --git a/dot/sync/fullsync.go b/dot/sync/fullsync.go index 749dc77ebe..a0930d548d 100644 --- a/dot/sync/fullsync.go +++ b/dot/sync/fullsync.go @@ -44,8 +44,8 @@ type FullSyncConfig struct { RequestMaker network.RequestMaker } -type Importer interface { - handle(*types.BlockData, BlockOrigin) (imported bool, err error) +type importer interface { + importBlock(*types.BlockData, BlockOrigin) (imported bool, err error) } // FullSyncStrategy protocol is the "default" protocol. @@ -61,7 +61,7 @@ type FullSyncStrategy struct { numOfTasks int startedAt time.Time syncedBlocks int - importer Importer + blockImporter importer } func NewFullSyncStrategy(cfg *FullSyncConfig) *FullSyncStrategy { @@ -74,7 +74,7 @@ func NewFullSyncStrategy(cfg *FullSyncConfig) *FullSyncStrategy { reqMaker: cfg.RequestMaker, blockState: cfg.BlockState, numOfTasks: cfg.NumOfTasks, - importer: newBlockImporter(cfg), + blockImporter: newBlockImporter(cfg), unreadyBlocks: newUnreadyBlocks(), requestQueue: &requestsQueue[*messages.BlockRequestMessage]{ queue: list.New(), @@ -183,8 +183,8 @@ func (f *FullSyncStrategy) Process(results []*SyncTaskResult) ( // disjoint fragments are pieces of the chain that could not be imported right now // because is blocks too far ahead or blocks that belongs to forks - orderedFragments := sortFragmentsOfChain(readyBlocks) - orderedFragments = mergeFragmentsOfChain(orderedFragments) + sortFragmentsOfChain(readyBlocks) + orderedFragments := mergeFragmentsOfChain(readyBlocks) nextBlocksToImport := make([]*types.BlockData, 0) disjointFragments := make([][]*types.BlockData, 0) @@ -206,7 +206,7 @@ func (f *FullSyncStrategy) Process(results []*SyncTaskResult) ( // this loop goal is to import ready blocks as well as update the highestFinalized header for len(nextBlocksToImport) > 0 || len(disjointFragments) > 0 { for _, blockToImport := range nextBlocksToImport { - imported, err := f.importer.handle(blockToImport, networkInitialSync) + imported, err := f.blockImporter.importBlock(blockToImport, networkInitialSync) if err != nil { return false, nil, nil, fmt.Errorf("while handling ready block: %w", err) } @@ -486,9 +486,9 @@ resultLoop: // note that we have fragments with single blocks, fragments with fork (in case of 8) // after sorting these fragments we end up with: // [ {1, 2, 3, 4, 5} {6, 7, 8, 9, 10} {8} {11, 12, 13, 14, 15, 16} {17} ] -func sortFragmentsOfChain(fragments [][]*types.BlockData) [][]*types.BlockData { +func sortFragmentsOfChain(fragments [][]*types.BlockData) { if len(fragments) == 0 { - return nil + return } slices.SortFunc(fragments, func(a, b []*types.BlockData) int { @@ -500,8 +500,6 @@ func sortFragmentsOfChain(fragments [][]*types.BlockData) [][]*types.BlockData { } return 1 }) - - return fragments } // mergeFragmentsOfChain expects a sorted slice of fragments and merges those diff --git a/dot/sync/fullsync_test.go b/dot/sync/fullsync_test.go index 4d766d4baa..6f11abd290 100644 --- a/dot/sync/fullsync_test.go +++ b/dot/sync/fullsync_test.go @@ -20,6 +20,8 @@ import ( _ "embed" ) +type mockBlockImporter struct{} + //go:embed testdata/westend_blocks.yaml var rawWestendBlocks []byte @@ -234,9 +236,9 @@ func TestFullSyncProcess(t *testing.T) { Return(false, nil). Times(2) - mockImporter := NewMockImporter(ctrl) + mockImporter := NewMockimporter(ctrl) mockImporter.EXPECT(). - handle(gomock.AssignableToTypeOf(&types.BlockData{}), networkInitialSync). + importBlock(gomock.AssignableToTypeOf(&types.BlockData{}), networkInitialSync). Return(true, nil). Times(10 + 128 + 128) @@ -245,7 +247,7 @@ func TestFullSyncProcess(t *testing.T) { } fs := NewFullSyncStrategy(cfg) - fs.importer = mockImporter + fs.blockImporter = mockImporter done, _, _, err := fs.Process(syncTaskResults) require.NoError(t, err) diff --git a/dot/sync/mock_importer.go b/dot/sync/mock_importer.go new file mode 100644 index 0000000000..6fb953b8b6 --- /dev/null +++ b/dot/sync/mock_importer.go @@ -0,0 +1,55 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: fullsync.go +// +// Generated by this command: +// +// mockgen -destination=mock_importer.go -source=fullsync.go -package=sync +// + +// Package sync is a generated GoMock package. +package sync + +import ( + reflect "reflect" + + types "github.com/ChainSafe/gossamer/dot/types" + gomock "go.uber.org/mock/gomock" +) + +// Mockimporter is a mock of importer interface. +type Mockimporter struct { + ctrl *gomock.Controller + recorder *MockimporterMockRecorder +} + +// MockimporterMockRecorder is the mock recorder for Mockimporter. +type MockimporterMockRecorder struct { + mock *Mockimporter +} + +// NewMockimporter creates a new mock instance. +func NewMockimporter(ctrl *gomock.Controller) *Mockimporter { + mock := &Mockimporter{ctrl: ctrl} + mock.recorder = &MockimporterMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *Mockimporter) EXPECT() *MockimporterMockRecorder { + return m.recorder +} + +// importBlock mocks base method. +func (m *Mockimporter) importBlock(arg0 *types.BlockData, arg1 BlockOrigin) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "importBlock", arg0, arg1) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// importBlock indicates an expected call of importBlock. +func (mr *MockimporterMockRecorder) importBlock(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "importBlock", reflect.TypeOf((*Mockimporter)(nil).importBlock), arg0, arg1) +} diff --git a/dot/sync/mocks_generate_test.go b/dot/sync/mocks_generate_test.go index 894b5747f6..a8f52d172f 100644 --- a/dot/sync/mocks_generate_test.go +++ b/dot/sync/mocks_generate_test.go @@ -3,5 +3,6 @@ package sync -//go:generate mockgen -destination=mocks_test.go -package=$GOPACKAGE . Telemetry,BlockState,StorageState,TransactionState,BabeVerifier,FinalityGadget,BlockImportHandler,Network,Importer +//go:generate mockgen -destination=mocks_test.go -package=$GOPACKAGE . Telemetry,BlockState,StorageState,TransactionState,BabeVerifier,FinalityGadget,BlockImportHandler,Network //go:generate mockgen -destination=mock_request_maker.go -package $GOPACKAGE github.com/ChainSafe/gossamer/dot/network RequestMaker +//go:generate mockgen -destination=mock_importer.go -source=fullsync.go -package=sync diff --git a/dot/sync/mocks_test.go b/dot/sync/mocks_test.go index 6ad35f501c..ef04c575d7 100644 --- a/dot/sync/mocks_test.go +++ b/dot/sync/mocks_test.go @@ -1,9 +1,9 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ChainSafe/gossamer/dot/sync (interfaces: Telemetry,BlockState,StorageState,TransactionState,BabeVerifier,FinalityGadget,BlockImportHandler,Network,Importer) +// Source: github.com/ChainSafe/gossamer/dot/sync (interfaces: Telemetry,BlockState,StorageState,TransactionState,BabeVerifier,FinalityGadget,BlockImportHandler,Network) // // Generated by this command: // -// mockgen -destination=mocks_test.go -package=sync . Telemetry,BlockState,StorageState,TransactionState,BabeVerifier,FinalityGadget,BlockImportHandler,Network,Importer +// mockgen -destination=mocks_test.go -package=sync . Telemetry,BlockState,StorageState,TransactionState,BabeVerifier,FinalityGadget,BlockImportHandler,Network // // Package sync is a generated GoMock package. @@ -731,41 +731,3 @@ func (mr *MockNetworkMockRecorder) ReportPeer(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReportPeer", reflect.TypeOf((*MockNetwork)(nil).ReportPeer), arg0, arg1) } - -// MockImporter is a mock of Importer interface. -type MockImporter struct { - ctrl *gomock.Controller - recorder *MockImporterMockRecorder -} - -// MockImporterMockRecorder is the mock recorder for MockImporter. -type MockImporterMockRecorder struct { - mock *MockImporter -} - -// NewMockImporter creates a new mock instance. -func NewMockImporter(ctrl *gomock.Controller) *MockImporter { - mock := &MockImporter{ctrl: ctrl} - mock.recorder = &MockImporterMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockImporter) EXPECT() *MockImporterMockRecorder { - return m.recorder -} - -// handle mocks base method. -func (m *MockImporter) handle(arg0 *types.BlockData, arg1 BlockOrigin) (bool, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "handle", arg0, arg1) - ret0, _ := ret[0].(bool) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// handle indicates an expected call of handle. -func (mr *MockImporterMockRecorder) handle(arg0, arg1 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "handle", reflect.TypeOf((*MockImporter)(nil).handle), arg0, arg1) -} From 294dfe23f11d8fc7be1c101ba2e3dedf0c411a4f Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Thu, 26 Sep 2024 17:14:14 -0400 Subject: [PATCH 73/74] chore: fix lint --- dot/sync/fullsync_test.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/dot/sync/fullsync_test.go b/dot/sync/fullsync_test.go index 6f11abd290..0c9bbd4122 100644 --- a/dot/sync/fullsync_test.go +++ b/dot/sync/fullsync_test.go @@ -20,8 +20,6 @@ import ( _ "embed" ) -type mockBlockImporter struct{} - //go:embed testdata/westend_blocks.yaml var rawWestendBlocks []byte From 4cbbb974819a0a68ae87268ed61d1d3612daa294 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Mon, 30 Sep 2024 10:08:37 -0400 Subject: [PATCH 74/74] chore: nits --- dot/network/block_announce.go | 3 ++- dot/network/host.go | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/dot/network/block_announce.go b/dot/network/block_announce.go index d3ceadbe2b..a526b2f7f5 100644 --- a/dot/network/block_announce.go +++ b/dot/network/block_announce.go @@ -198,5 +198,6 @@ func (s *Service) handleBlockAnnounceMessage(from peer.ID, msg NotificationsMess } err := s.syncer.HandleBlockAnnounce(from, bam) - return err == nil, err + shouldPropagate := err == nil + return shouldPropagate, err } diff --git a/dot/network/host.go b/dot/network/host.go index 9722047ec2..a838c585b0 100644 --- a/dot/network/host.go +++ b/dot/network/host.go @@ -371,7 +371,7 @@ func (h *host) writeToStream(s network.Stream, msg messages.P2PMessage) error { } if len(encMsg) != sent { - logger.Criticalf("full message not sent: sent %d, message size %d", sent, len(encMsg)) + logger.Errorf("full message not sent: sent %d, message size %d", sent, len(encMsg)) } h.bwc.LogSentMessage(int64(sent))