diff --git a/cmd/util/cmd/checkpoint-list-tries/cmd.go b/cmd/util/cmd/checkpoint-list-tries/cmd.go index 105f7408fb7..bfad7c18bef 100644 --- a/cmd/util/cmd/checkpoint-list-tries/cmd.go +++ b/cmd/util/cmd/checkpoint-list-tries/cmd.go @@ -28,12 +28,12 @@ func init() { func run(*cobra.Command, []string) { - flattenedForest, err := wal.LoadCheckpoint(flagCheckpoint) + tries, err := wal.LoadCheckpoint(flagCheckpoint) if err != nil { log.Fatal().Err(err).Msg("error while loading checkpoint") } - for _, trie := range flattenedForest.Tries { - fmt.Printf("%x\n", trie.RootHash) + for _, trie := range tries { + fmt.Printf("%x\n", trie.RootHash()) } } diff --git a/cmd/util/cmd/read-execution-state/list-wals/cmd.go b/cmd/util/cmd/read-execution-state/list-wals/cmd.go index 6d71edd0da4..f2a91553a55 100644 --- a/cmd/util/cmd/read-execution-state/list-wals/cmd.go +++ b/cmd/util/cmd/read-execution-state/list-wals/cmd.go @@ -11,7 +11,7 @@ import ( "github.com/onflow/flow-go/ledger" "github.com/onflow/flow-go/ledger/common/pathfinder" "github.com/onflow/flow-go/ledger/complete" - "github.com/onflow/flow-go/ledger/complete/mtrie/flattener" + "github.com/onflow/flow-go/ledger/complete/mtrie/trie" "github.com/onflow/flow-go/ledger/complete/wal" "github.com/onflow/flow-go/module/metrics" ) @@ -52,7 +52,7 @@ func run(*cobra.Command, []string) { }() err = w.ReplayLogsOnly( - func(forestSequencing *flattener.FlattenedForest) error { + func(tries []*trie.MTrie) error { fmt.Printf("forest sequencing \n") return nil }, diff --git a/ledger/complete/mtrie/flattener/encoding.go b/ledger/complete/mtrie/flattener/encoding.go index 1e7a888dae3..c2e209c9632 100644 --- a/ledger/complete/mtrie/flattener/encoding.go +++ b/ledger/complete/mtrie/flattener/encoding.go @@ -1,11 +1,13 @@ package flattener import ( + "bytes" "fmt" "io" "github.com/onflow/flow-go/ledger" "github.com/onflow/flow-go/ledger/common/encoding" + "github.com/onflow/flow-go/ledger/common/hash" "github.com/onflow/flow-go/ledger/common/utils" "github.com/onflow/flow-go/ledger/complete/mtrie/node" "github.com/onflow/flow-go/ledger/complete/mtrie/trie" @@ -59,26 +61,27 @@ func EncodeNode(n *node.Node, lchildIndex uint64, rchildIndex uint64) []byte { return buf } -// ReadStorableNode reads a storable node from io -func ReadStorableNode(reader io.Reader) (*StorableNode, error) { +// ReadNode reconstructs a node from data read from reader. +// TODO: reuse read buffer +func ReadNode(reader io.Reader, getNode func(nodeIndex uint64) (*node.Node, error)) (*node.Node, error) { // reading version buf := make([]byte, 2) read, err := io.ReadFull(reader, buf) if err != nil { - return nil, fmt.Errorf("error reading storable node, cannot read version part: %w", err) + return nil, fmt.Errorf("failed to read serialized node, cannot read version part: %w", err) } if read != len(buf) { - return nil, fmt.Errorf("not enough bytes read %d expected %d", read, len(buf)) + return nil, fmt.Errorf("failed to read serialized node: not enough bytes read %d expected %d", read, len(buf)) } version, _, err := utils.ReadUint16(buf) if err != nil { - return nil, fmt.Errorf("error reading storable node: %w", err) + return nil, fmt.Errorf("failed to read serialized node: %w", err) } if version > encodingDecodingVersion { - return nil, fmt.Errorf("error reading storable node: unsuported version %d > %d", version, encodingDecodingVersion) + return nil, fmt.Errorf("failed to read serialized node: unsuported version %d > %d", version, encodingDecodingVersion) } // reading fixed-length part @@ -86,55 +89,99 @@ func ReadStorableNode(reader io.Reader) (*StorableNode, error) { read, err = io.ReadFull(reader, buf) if err != nil { - return nil, fmt.Errorf("error reading storable node, cannot read fixed-length part: %w", err) + return nil, fmt.Errorf("failed to read serialized node, cannot read fixed-length part: %w", err) } if read != len(buf) { - return nil, fmt.Errorf("not enough bytes read %d expected %d", read, len(buf)) + return nil, fmt.Errorf("failed to read serialized node: not enough bytes read %d expected %d", read, len(buf)) } - storableNode := &StorableNode{} + var height, maxDepth uint16 + var lchildIndex, rchildIndex, regCount uint64 + var path, hashValue, encPayload []byte - storableNode.Height, buf, err = utils.ReadUint16(buf) + height, buf, err = utils.ReadUint16(buf) if err != nil { - return nil, fmt.Errorf("error reading storable node: %w", err) + return nil, fmt.Errorf("failed to read serialized node: %w", err) } - storableNode.LIndex, buf, err = utils.ReadUint64(buf) + lchildIndex, buf, err = utils.ReadUint64(buf) if err != nil { - return nil, fmt.Errorf("error reading storable node: %w", err) + return nil, fmt.Errorf("failed to read serialized node: %w", err) } - storableNode.RIndex, buf, err = utils.ReadUint64(buf) + rchildIndex, buf, err = utils.ReadUint64(buf) if err != nil { - return nil, fmt.Errorf("error reading storable node: %w", err) + return nil, fmt.Errorf("failed to read serialized node: %w", err) } - storableNode.MaxDepth, buf, err = utils.ReadUint16(buf) + maxDepth, buf, err = utils.ReadUint16(buf) if err != nil { - return nil, fmt.Errorf("error reading storable node: %w", err) + return nil, fmt.Errorf("failed to read serialized node: %w", err) } - storableNode.RegCount, _, err = utils.ReadUint64(buf) + regCount, _, err = utils.ReadUint64(buf) if err != nil { - return nil, fmt.Errorf("error reading storable node: %w", err) + return nil, fmt.Errorf("failed to read serialized node: %w", err) } - storableNode.Path, err = utils.ReadShortDataFromReader(reader) + path, err = utils.ReadShortDataFromReader(reader) if err != nil { return nil, fmt.Errorf("cannot read key data: %w", err) } - storableNode.EncPayload, err = utils.ReadLongDataFromReader(reader) + encPayload, err = utils.ReadLongDataFromReader(reader) if err != nil { return nil, fmt.Errorf("cannot read value data: %w", err) } - storableNode.HashValue, err = utils.ReadShortDataFromReader(reader) + hashValue, err = utils.ReadShortDataFromReader(reader) if err != nil { return nil, fmt.Errorf("cannot read hashValue data: %w", err) } - return storableNode, nil + // Create (and copy) hash from raw data. + nodeHash, err := hash.ToHash(hashValue) + if err != nil { + return nil, fmt.Errorf("failed to decode hash from checkpoint: %w", err) + } + + if len(path) > 0 { + // Create (and copy) path from raw data. + path, err := ledger.ToPath(path) + if err != nil { + return nil, fmt.Errorf("failed to decode path from checkpoint: %w", err) + } + + // Decode payload (payload data isn't copied). + payload, err := encoding.DecodePayload(encPayload) + if err != nil { + return nil, fmt.Errorf("failed to decode payload from checkpoint: %w", err) + } + + // make a copy of payload + var pl *ledger.Payload + if payload != nil { + pl = payload.DeepCopy() + } + + n := node.NewNode(int(height), nil, nil, path, pl, nodeHash, maxDepth, regCount) + return n, nil + } + + // Get left child node by node index + lchild, err := getNode(lchildIndex) + if err != nil { + return nil, fmt.Errorf("failed to find left child node: %w", err) + } + + // Get right child node by node index + rchild, err := getNode(rchildIndex) + if err != nil { + return nil, fmt.Errorf("failed to find right child node: %w", err) + } + + n := node.NewNode(int(height), lchild, rchild, ledger.DummyPath, nil, nodeHash, maxDepth, regCount) + return n, nil } // EncodeTrie encodes trie root node @@ -162,9 +209,8 @@ func EncodeTrie(rootNode *node.Node, rootIndex uint64) []byte { return buf } -// ReadStorableTrie reads a storable trie from io -func ReadStorableTrie(reader io.Reader) (*StorableTrie, error) { - storableTrie := &StorableTrie{} +// ReadTrie reconstructs a trie from data read from reader. +func ReadTrie(reader io.Reader, getNode func(nodeIndex uint64) (*node.Node, error)) (*trie.MTrie, error) { // reading version buf := make([]byte, 2) @@ -199,13 +245,26 @@ func ReadStorableTrie(reader io.Reader) (*StorableTrie, error) { if err != nil { return nil, fmt.Errorf("cannot read root index data: %w", err) } - storableTrie.RootIndex = rootIndex - roothash, err := utils.ReadShortDataFromReader(reader) + readRootHash, err := utils.ReadShortDataFromReader(reader) if err != nil { return nil, fmt.Errorf("cannot read roothash data: %w", err) } - storableTrie.RootHash = roothash - return storableTrie, nil + rootNode, err := getNode(rootIndex) + if err != nil { + return nil, fmt.Errorf("cannot find root node: %w", err) + } + + mtrie, err := trie.NewMTrie(rootNode) + if err != nil { + return nil, fmt.Errorf("restoring trie failed: %w", err) + } + + rootHash := mtrie.RootHash() + if !bytes.Equal(readRootHash, rootHash[:]) { + return nil, fmt.Errorf("restoring trie failed: roothash doesn't match") + } + + return mtrie, nil } diff --git a/ledger/complete/mtrie/flattener/encoding_test.go b/ledger/complete/mtrie/flattener/encoding_test.go index d4c790fb52e..b64df5bde6f 100644 --- a/ledger/complete/mtrie/flattener/encoding_test.go +++ b/ledger/complete/mtrie/flattener/encoding_test.go @@ -2,79 +2,115 @@ package flattener_test import ( "bytes" + "fmt" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/ledger" - "github.com/onflow/flow-go/ledger/common/encoding" "github.com/onflow/flow-go/ledger/common/hash" "github.com/onflow/flow-go/ledger/common/utils" "github.com/onflow/flow-go/ledger/complete/mtrie/flattener" "github.com/onflow/flow-go/ledger/complete/mtrie/node" ) -func TestStorableNode(t *testing.T) { - path := utils.PathByUint8(3) - payload := utils.LightPayload8('A', 'a') - hashValue := hash.Hash([32]byte{4, 4, 4}) - - n := node.NewNode(2137, nil, nil, path, payload, hashValue, 7, 5000) - - storableNode := &flattener.StorableNode{ - LIndex: 1, - RIndex: 2, - Height: 2137, - Path: path[:], - EncPayload: encoding.EncodePayload(payload), - HashValue: hashValue[:], - MaxDepth: 7, - RegCount: 5000, - } +func TestNodeSerialization(t *testing.T) { + + path1 := utils.PathByUint8(0) + payload1 := utils.LightPayload8('A', 'a') + hashValue1 := hash.Hash([32]byte{1, 1, 1}) + + path2 := utils.PathByUint8(1) + payload2 := utils.LightPayload8('B', 'b') + hashValue2 := hash.Hash([32]byte{2, 2, 2}) + + hashValue3 := hash.Hash([32]byte{3, 3, 3}) + + leafNode1 := node.NewNode(255, nil, nil, ledger.Path(path1), payload1, hashValue1, 0, 1) + leafNode2 := node.NewNode(255, nil, nil, ledger.Path(path2), payload2, hashValue2, 0, 1) + rootNode := node.NewNode(256, leafNode1, leafNode2, ledger.DummyPath, nil, hashValue3, 1, 2) // Version 0 - expected := []byte{ + expectedLeafNode1 := []byte{ 0, 0, // encoding version - 8, 89, // height - 0, 0, 0, 0, 0, 0, 0, 1, // LIndex - 0, 0, 0, 0, 0, 0, 0, 2, // RIndex - 0, 7, // max depth - 0, 0, 0, 0, 0, 0, 19, 136, // reg count + 0, 255, // height + 0, 0, 0, 0, 0, 0, 0, 0, // LIndex + 0, 0, 0, 0, 0, 0, 0, 0, // RIndex + 0, 0, // max depth + 0, 0, 0, 0, 0, 0, 0, 1, // reg count 0, 32, // path data len - 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // path data 0, 0, 0, 25, // payload data len 0, 0, 6, 0, 0, 0, 9, 0, 1, 0, 0, 0, 3, 0, 0, 65, 0, 0, 0, 0, 0, 0, 0, 1, 97, // payload data 0, 32, // hashValue length - 4, 4, 4, 0, 0, 0, 0, 0, + 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // hashValue } - t.Run("encode", func(t *testing.T) { - data := flattener.EncodeNode(n, 1, 2) - assert.Equal(t, expected, data) + // Version 0 + expectedRootNode := []byte{ + 0, 0, // encoding version + 1, 0, // height + 0, 0, 0, 0, 0, 0, 0, 1, // LIndex + 0, 0, 0, 0, 0, 0, 0, 2, // RIndex + 0, 1, // max depth + 0, 0, 0, 0, 0, 0, 0, 2, // reg count + 0, 0, // path data len + 0, 0, 0, 0, // payload data len + 0, 32, // hashValue length + 3, 3, 3, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, // hashValue + } + + t.Run("encode leaf node", func(t *testing.T) { + data := flattener.EncodeNode(leafNode1, 0, 0) + assert.Equal(t, expectedLeafNode1, data) }) - t.Run("decode", func(t *testing.T) { - reader := bytes.NewReader(expected) - newStorableNode, err := flattener.ReadStorableNode(reader) + t.Run("encode interim node", func(t *testing.T) { + data := flattener.EncodeNode(rootNode, 1, 2) + assert.Equal(t, expectedRootNode, data) + }) + + t.Run("decode leaf node", func(t *testing.T) { + reader := bytes.NewReader(expectedLeafNode1) + newNode, err := flattener.ReadNode(reader, func(nodeIndex uint64) (*node.Node, error) { + if nodeIndex != 0 { + return nil, fmt.Errorf("expect child node index 0, got %d", nodeIndex) + } + return nil, nil + }) + require.NoError(t, err) + assert.Equal(t, leafNode1, newNode) + }) + + t.Run("decode interim node", func(t *testing.T) { + reader := bytes.NewReader(expectedRootNode) + newNode, err := flattener.ReadNode(reader, func(nodeIndex uint64) (*node.Node, error) { + switch nodeIndex { + case 1: + return leafNode1, nil + case 2: + return leafNode2, nil + default: + return nil, fmt.Errorf("unexpected child node index %d ", nodeIndex) + } + }) require.NoError(t, err) - assert.Equal(t, storableNode, newStorableNode) + assert.Equal(t, rootNode, newNode) }) } -func TestStorableTrie(t *testing.T) { +func TestTrieSerialization(t *testing.T) { hashValue := hash.Hash([32]byte{2, 2, 2}) - rootNode := node.NewNode(256, nil, nil, ledger.DummyPath, nil, hashValue, 7, 5000) - - storableTrie := &flattener.StorableTrie{ - RootIndex: 21, - RootHash: hashValue[:], - } + rootNodeIndex := uint64(21) // Version 0 expected := []byte{ @@ -88,19 +124,19 @@ func TestStorableTrie(t *testing.T) { } t.Run("encode", func(t *testing.T) { - data := flattener.EncodeTrie(rootNode, 21) - + data := flattener.EncodeTrie(rootNode, rootNodeIndex) assert.Equal(t, expected, data) }) t.Run("decode", func(t *testing.T) { - reader := bytes.NewReader(expected) - - newStorableNode, err := flattener.ReadStorableTrie(reader) + trie, err := flattener.ReadTrie(reader, func(nodeIndex uint64) (*node.Node, error) { + if nodeIndex != rootNodeIndex { + return nil, fmt.Errorf("unexpected root node index %d ", nodeIndex) + } + return rootNode, nil + }) require.NoError(t, err) - - assert.Equal(t, storableTrie, newStorableNode) + assert.Equal(t, rootNode, trie.RootNode()) }) - } diff --git a/ledger/complete/mtrie/flattener/forest.go b/ledger/complete/mtrie/flattener/forest.go deleted file mode 100644 index f9628eb9671..00000000000 --- a/ledger/complete/mtrie/flattener/forest.go +++ /dev/null @@ -1,191 +0,0 @@ -package flattener - -import ( - "bytes" - "encoding/hex" - "fmt" - - "github.com/onflow/flow-go/ledger" - "github.com/onflow/flow-go/ledger/common/encoding" - "github.com/onflow/flow-go/ledger/common/hash" - "github.com/onflow/flow-go/ledger/complete/mtrie" - "github.com/onflow/flow-go/ledger/complete/mtrie/node" - "github.com/onflow/flow-go/ledger/complete/mtrie/trie" -) - -// FlattenedForest represents an Forest as a flattened data structure. -// Specifically it consists of : -// * a list of storable nodes, where references to nodes are replaced by index in the slice -// * and a list of storable tries, each referencing their respective root node by index. -// 0 is a special index, meaning nil, but is included in this list for ease of use -// and removing would make it necessary to constantly add/subtract indexes -// -// As an important property, the nodes are listed in an order which satisfies -// Descendents-First-Relationship. The Descendents-First-Relationship has the -// following important property: -// When re-building the Trie from the sequence of nodes, one can build the trie on the fly, -// as for each node, the children have been previously encountered. -type FlattenedForest struct { - Nodes []*StorableNode - Tries []*StorableTrie -} - -// node2indexMap maps a node pointer to the node index in the serialization -type node2indexMap map[*node.Node]uint64 - -// FlattenForest returns forest FlattenedForest, which contains all nodes and tries of the Forest. -func FlattenForest(f *mtrie.Forest) (*FlattenedForest, error) { - tries, err := f.GetTries() - if err != nil { - return nil, fmt.Errorf("cannot get cached tries root hashes: %w", err) - } - - storableTries := make([]*StorableTrie, 0, len(tries)) - storableNodes := []*StorableNode{nil} // 0th element is nil - - // assign unique value to every node - allNodes := make(node2indexMap) - allNodes[nil] = 0 // 0th element is nil - - counter := uint64(1) // start from 1, as 0 marks nil - for _, t := range tries { - for itr := NewUniqueNodeIterator(t, allNodes); itr.Next(); { - n := itr.Value() - allNodes[n] = counter - counter++ - storableNode, err := toStorableNode(n, allNodes) - if err != nil { - return nil, fmt.Errorf("failed to construct storable node: %w", err) - } - storableNodes = append(storableNodes, storableNode) - } - //fix root nodes indices - // since we indexed all nodes, root must be present - storableTrie, err := toStorableTrie(t, allNodes) - if err != nil { - return nil, fmt.Errorf("failed to construct storable trie: %w", err) - } - storableTries = append(storableTries, storableTrie) - } - - return &FlattenedForest{ - Nodes: storableNodes, - Tries: storableTries, - }, nil -} - -func toStorableNode(node *node.Node, indexForNode node2indexMap) (*StorableNode, error) { - leftIndex, found := indexForNode[node.LeftChild()] - if !found { - hash := node.LeftChild().Hash() - return nil, fmt.Errorf("internal error: missing node with hash %s", hex.EncodeToString(hash[:])) - } - rightIndex, found := indexForNode[node.RightChild()] - if !found { - hash := node.RightChild().Hash() - return nil, fmt.Errorf("internal error: missing node with hash %s", hex.EncodeToString(hash[:])) - } - - hash := node.Hash() - // if node is a leaf, path is a slice of 32 bytes, otherwise path is nil - var path []byte - if node.IsLeaf() { - temp := *node.Path() - path = temp[:] - } - storableNode := &StorableNode{ - LIndex: leftIndex, - RIndex: rightIndex, - Height: uint16(node.Height()), - Path: path, - EncPayload: encoding.EncodePayload(node.Payload()), - HashValue: hash[:], - MaxDepth: node.MaxDepth(), - RegCount: node.RegCount(), - } - return storableNode, nil -} - -func toStorableTrie(mtrie *trie.MTrie, indexForNode node2indexMap) (*StorableTrie, error) { - rootIndex, found := indexForNode[mtrie.RootNode()] - if !found { - hash := mtrie.RootNode().Hash() - return nil, fmt.Errorf("internal error: missing node with hash %s", hex.EncodeToString(hash[:])) - } - hash := mtrie.RootHash() - storableTrie := &StorableTrie{ - RootIndex: rootIndex, - RootHash: hash[:], - } - - return storableTrie, nil -} - -// RebuildTries construct a forest from a storable FlattenedForest -func RebuildTries(flatForest *FlattenedForest) ([]*trie.MTrie, error) { - tries := make([]*trie.MTrie, 0, len(flatForest.Tries)) - nodes, err := RebuildNodes(flatForest.Nodes) - if err != nil { - return nil, fmt.Errorf("reconstructing nodes from storables failed: %w", err) - } - - //restore tries - for _, storableTrie := range flatForest.Tries { - mtrie, err := trie.NewMTrie(nodes[storableTrie.RootIndex]) - if err != nil { - return nil, fmt.Errorf("restoring trie failed: %w", err) - } - rootHash := mtrie.RootHash() - if !bytes.Equal(storableTrie.RootHash, rootHash[:]) { - return nil, fmt.Errorf("restoring trie failed: roothash doesn't match") - } - tries = append(tries, mtrie) - } - return tries, nil -} - -// RebuildNodes generates a list of Nodes from a sequence of StorableNodes. -// The sequence must obey the DESCENDANTS-FIRST-RELATIONSHIP -func RebuildNodes(storableNodes []*StorableNode) ([]*node.Node, error) { - nodes := make([]*node.Node, 0, len(storableNodes)) - for i, snode := range storableNodes { - if snode == nil { - nodes = append(nodes, nil) - continue - } - if (snode.LIndex >= uint64(i)) || (snode.RIndex >= uint64(i)) { - return nil, fmt.Errorf("sequence of StorableNodes does not satisfy Descendents-First-Relationship") - } - - if len(snode.Path) > 0 { - path, err := ledger.ToPath(snode.Path) - if err != nil { - return nil, fmt.Errorf("failed to decode a path of a storableNode %w", err) - } - payload, err := encoding.DecodePayload(snode.EncPayload) - if err != nil { - return nil, fmt.Errorf("failed to decode a payload for an storableNode %w", err) - } - nodeHash, err := hash.ToHash(snode.HashValue) - if err != nil { - return nil, fmt.Errorf("failed to decode a hash of a storableNode %w", err) - } - // make a copy of payload - var pl *ledger.Payload - if payload != nil { - pl = payload.DeepCopy() - } - - node := node.NewNode(int(snode.Height), nodes[snode.LIndex], nodes[snode.RIndex], path, pl, nodeHash, snode.MaxDepth, snode.RegCount) - nodes = append(nodes, node) - continue - } - nodeHash, err := hash.ToHash(snode.HashValue) - if err != nil { - return nil, fmt.Errorf("failed to decode a hash of a storableNode %w", err) - } - node := node.NewNode(int(snode.Height), nodes[snode.LIndex], nodes[snode.RIndex], ledger.DummyPath, nil, nodeHash, snode.MaxDepth, snode.RegCount) - nodes = append(nodes, node) - } - return nodes, nil -} diff --git a/ledger/complete/mtrie/flattener/forest_test.go b/ledger/complete/mtrie/flattener/forest_test.go deleted file mode 100644 index 4762dbb7bf0..00000000000 --- a/ledger/complete/mtrie/flattener/forest_test.go +++ /dev/null @@ -1,72 +0,0 @@ -package flattener_test - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/ledger" - "github.com/onflow/flow-go/ledger/common/utils" - "github.com/onflow/flow-go/ledger/complete/mtrie" - "github.com/onflow/flow-go/ledger/complete/mtrie/flattener" - "github.com/onflow/flow-go/module/metrics" -) - -func TestForestStoreAndLoad(t *testing.T) { - - metricsCollector := &metrics.NoopCollector{} - mForest, err := mtrie.NewForest(5, metricsCollector, nil) - require.NoError(t, err) - rootHash := mForest.GetEmptyRootHash() - - p1 := utils.PathByUint8(1) - v1 := utils.LightPayload8('A', 'a') - p2 := utils.PathByUint8(2) - v2 := utils.LightPayload8('B', 'b') - p3 := utils.PathByUint8(130) - v3 := utils.LightPayload8('C', 'c') - p4 := utils.PathByUint8(131) - v4 := utils.LightPayload8('D', 'd') - p5 := utils.PathByUint8(132) - v5 := utils.LightPayload8('E', 'e') - - paths := []ledger.Path{p1, p2, p3, p4, p5} - payloads := []*ledger.Payload{v1, v2, v3, v4, v5} - - update := &ledger.TrieUpdate{RootHash: rootHash, Paths: paths, Payloads: payloads} - rootHash, err = mForest.Update(update) - require.NoError(t, err) - - p6 := utils.PathByUint8(133) - v6 := utils.LightPayload8('F', 'f') - update = &ledger.TrieUpdate{RootHash: rootHash, Paths: []ledger.Path{p6}, Payloads: []*ledger.Payload{v6}} - rootHash, err = mForest.Update(update) - require.NoError(t, err) - - forestSequencing, err := flattener.FlattenForest(mForest) - require.NoError(t, err) - - newForest, err := mtrie.NewForest(5, metricsCollector, nil) - require.NoError(t, err) - - //forests are different - assert.NotEqual(t, mForest, newForest) - - rebuiltTries, err := flattener.RebuildTries(forestSequencing) - require.NoError(t, err) - err = newForest.AddTries(rebuiltTries) - require.NoError(t, err) - - //forests are the same now - assert.Equal(t, mForest, newForest) - - read := &ledger.TrieRead{RootHash: rootHash, Paths: paths} - retPayloads, err := mForest.Read(read) - require.NoError(t, err) - newRetPayloads, err := newForest.Read(read) - require.NoError(t, err) - for i := range paths { - require.True(t, retPayloads[i].Equals(newRetPayloads[i])) - } -} diff --git a/ledger/complete/mtrie/flattener/storables.go b/ledger/complete/mtrie/flattener/storables.go deleted file mode 100644 index 9f35a812228..00000000000 --- a/ledger/complete/mtrie/flattener/storables.go +++ /dev/null @@ -1,18 +0,0 @@ -package flattener - -type StorableNode struct { - LIndex uint64 - RIndex uint64 - Height uint16 // Height where the node is at - Path []byte // path - EncPayload []byte // encoded data for payload - HashValue []byte - MaxDepth uint16 - RegCount uint64 -} - -// StorableTrie is a data structure for storing trie -type StorableTrie struct { - RootIndex uint64 - RootHash []byte -} diff --git a/ledger/complete/mtrie/flattener/trie.go b/ledger/complete/mtrie/flattener/trie.go deleted file mode 100644 index ec53bc564b5..00000000000 --- a/ledger/complete/mtrie/flattener/trie.go +++ /dev/null @@ -1,74 +0,0 @@ -package flattener - -import ( - "fmt" - - "github.com/onflow/flow-go/ledger/complete/mtrie/node" - "github.com/onflow/flow-go/ledger/complete/mtrie/trie" -) - -// FlattenedTrie is similar to FlattenedForest except only including a single trie -type FlattenedTrie struct { - Nodes []*StorableNode - Trie *StorableTrie -} - -// ToFlattenedForestWithASingleTrie converts the flattenedTrie into a FlattenedForest with only one trie included -func (ft *FlattenedTrie) ToFlattenedForestWithASingleTrie() *FlattenedForest { - storableTries := make([]*StorableTrie, 1) - storableTries[0] = ft.Trie - return &FlattenedForest{ - Nodes: ft.Nodes, - Tries: storableTries, - } -} - -// FlattenTrie returns the trie as a FlattenedTrie, which contains all nodes of that trie. -func FlattenTrie(trie *trie.MTrie) (*FlattenedTrie, error) { - storableNodes := []*StorableNode{nil} // 0th element is nil - - // assign unique value to every node - allNodes := make(map[*node.Node]uint64) - allNodes[nil] = 0 // 0th element is nil - - counter := uint64(1) // start from 1, as 0 marks nil - for itr := NewNodeIterator(trie); itr.Next(); { - n := itr.Value() - // if node not in map - if _, has := allNodes[n]; !has { - allNodes[n] = counter - counter++ - storableNode, err := toStorableNode(n, allNodes) - if err != nil { - return nil, fmt.Errorf("failed to construct storable node: %w", err) - } - storableNodes = append(storableNodes, storableNode) - } - } - // fix root nodes indices - // since we indexed all nodes, root must be present - storableTrie, err := toStorableTrie(trie, allNodes) - if err != nil { - return nil, fmt.Errorf("failed to construct storable trie: %w", err) - } - - return &FlattenedTrie{ - Nodes: storableNodes, - Trie: storableTrie, - }, nil -} - -// RebuildTrie construct a trie from a storable FlattenedForest -func RebuildTrie(flatTrie *FlattenedTrie) (*trie.MTrie, error) { - nodes, err := RebuildNodes(flatTrie.Nodes) - if err != nil { - return nil, fmt.Errorf("reconstructing nodes from storables failed: %w", err) - } - - //restore tries - mtrie, err := trie.NewMTrie(nodes[flatTrie.Trie.RootIndex]) - if err != nil { - return nil, fmt.Errorf("restoring trie failed: %w", err) - } - return mtrie, nil -} diff --git a/ledger/complete/mtrie/flattener/trie_test.go b/ledger/complete/mtrie/flattener/trie_test.go deleted file mode 100644 index f2d0f573ff2..00000000000 --- a/ledger/complete/mtrie/flattener/trie_test.go +++ /dev/null @@ -1,56 +0,0 @@ -package flattener_test - -import ( - "io/ioutil" - "os" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/ledger" - "github.com/onflow/flow-go/ledger/common/utils" - "github.com/onflow/flow-go/ledger/complete/mtrie/flattener" - "github.com/onflow/flow-go/ledger/complete/mtrie/trie" -) - -func TestTrieStoreAndLoad(t *testing.T) { - dir, err := ioutil.TempDir("", "test-mtrie-") - require.NoError(t, err) - defer os.RemoveAll(dir) - - emptyTrie := trie.NewEmptyMTrie() - require.NoError(t, err) - - p1 := utils.PathByUint8(1) - v1 := utils.LightPayload8('A', 'a') - p2 := utils.PathByUint8(2) - v2 := utils.LightPayload8('B', 'b') - p3 := utils.PathByUint8(130) - v3 := utils.LightPayload8('C', 'c') - p4 := utils.PathByUint8(131) - v4 := utils.LightPayload8('D', 'd') - p5 := utils.PathByUint8(132) - v5 := utils.LightPayload8('E', 'e') - - paths := []ledger.Path{p1, p2, p3, p4, p5} - payloads := []ledger.Payload{*v1, *v2, *v3, *v4, *v5} - - newTrie, err := trie.NewTrieWithUpdatedRegisters(emptyTrie, paths, payloads, true) - require.NoError(t, err) - - flattedTrie, err := flattener.FlattenTrie(newTrie) - require.NoError(t, err) - - rebuiltTrie, err := flattener.RebuildTrie(flattedTrie) - require.NoError(t, err) - - //tries are the same now - assert.Equal(t, newTrie, rebuiltTrie) - - retPayloads := newTrie.UnsafeRead(paths) - newRetPayloads := rebuiltTrie.UnsafeRead(paths) - for i := range paths { - require.True(t, retPayloads[i].Equals(newRetPayloads[i])) - } -} diff --git a/ledger/complete/wal/checkpointer.go b/ledger/complete/wal/checkpointer.go index e115293fda9..e8f48bb51a0 100644 --- a/ledger/complete/wal/checkpointer.go +++ b/ledger/complete/wal/checkpointer.go @@ -167,18 +167,8 @@ func (c *Checkpointer) Checkpoint(to int, targetWriter func() (io.WriteCloser, e } err = c.wal.replay(0, to, - func(forestSequencing *flattener.FlattenedForest) error { - tries, err := flattener.RebuildTries(forestSequencing) - if err != nil { - return err - } - for _, t := range tries { - err := forest.AddTrie(t) - if err != nil { - return err - } - } - return nil + func(tries []*trie.MTrie) error { + return forest.AddTries(tries) }, func(update *ledger.TrieUpdate) error { _, err := forest.Update(update) @@ -254,7 +244,20 @@ func CreateCheckpointWriterForFile(dir, filename string) (io.WriteCloser, error) }, nil } -// StoreCheckpoint writes the given checkpoint to disk, and also append with a CRC32 file checksum for integrity check. +// StoreCheckpoint writes the given tries to checkpoint file, and also appends +// a CRC32 file checksum for integrity check. +// Checkpoint file consists of a flattened forest. Specifically, it consists of: +// * a list of encoded nodes, where references to other nodes are by list index. +// * a list of encoded tries, each referencing their respective root node by index. +// Referencing to other nodes by index 0 is a special case, meaning nil. +// +// As an important property, the nodes are listed in an order which satisfies +// Descendents-First-Relationship. The Descendents-First-Relationship has the +// following important property: +// When rebuilding the trie from the sequence of nodes, build the trie on the fly, +// as for each node, the children have been previously encountered. +// TODO: evaluate alternatives to CRC32 since checkpoint file is many GB in size. +// TODO: add concurrency if the performance gains are enough to offset complexity. func StoreCheckpoint(writer io.Writer, tries ...*trie.MTrie) error { var err error @@ -271,7 +274,7 @@ func StoreCheckpoint(writer io.Writer, tries ...*trie.MTrie) error { return fmt.Errorf("cannot write checkpoint header: %w", err) } - // assign unique value to every node + // assign unique index to every node allNodes := make(map[*node.Node]uint64) allNodes[nil] = 0 // 0th element is nil @@ -363,12 +366,12 @@ func StoreCheckpoint(writer io.Writer, tries ...*trie.MTrie) error { return nil } -func (c *Checkpointer) LoadCheckpoint(checkpoint int) (*flattener.FlattenedForest, error) { +func (c *Checkpointer) LoadCheckpoint(checkpoint int) ([]*trie.MTrie, error) { filepath := path.Join(c.dir, NumberToFilename(checkpoint)) return LoadCheckpoint(filepath) } -func (c *Checkpointer) LoadRootCheckpoint() (*flattener.FlattenedForest, error) { +func (c *Checkpointer) LoadRootCheckpoint() ([]*trie.MTrie, error) { filepath := path.Join(c.dir, bootstrap.FilenameWALRootCheckpoint) return LoadCheckpoint(filepath) } @@ -387,7 +390,7 @@ func (c *Checkpointer) RemoveCheckpoint(checkpoint int) error { return os.Remove(path.Join(c.dir, NumberToFilename(checkpoint))) } -func LoadCheckpoint(filepath string) (*flattener.FlattenedForest, error) { +func LoadCheckpoint(filepath string) ([]*trie.MTrie, error) { file, err := os.Open(filepath) if err != nil { return nil, fmt.Errorf("cannot open checkpoint file %s: %w", filepath, err) @@ -399,7 +402,7 @@ func LoadCheckpoint(filepath string) (*flattener.FlattenedForest, error) { return readCheckpoint(file) } -func readCheckpoint(f *os.File) (*flattener.FlattenedForest, error) { +func readCheckpoint(f *os.File) ([]*trie.MTrie, error) { // Read header: magic (2 bytes) + version (2 bytes) header := make([]byte, 4) _, err := io.ReadFull(f, header) @@ -430,10 +433,10 @@ func readCheckpoint(f *os.File) (*flattener.FlattenedForest, error) { } } -// readCheckpointV3AndEarlier deserializes checkpoint file (version 3 and earlier) and returns flattened forest. +// readCheckpointV3AndEarlier deserializes checkpoint file (version 3 and earlier) and returns a list of tries. // Header (magic and version) are verified by the caller. // TODO: return []*trie.MTrie directly without conversion to FlattenedForest. -func readCheckpointV3AndEarlier(f *os.File, version uint16) (*flattener.FlattenedForest, error) { +func readCheckpointV3AndEarlier(f *os.File, version uint16) ([]*trie.MTrie, error) { var bufReader io.Reader = bufio.NewReader(f) crcReader := NewCRC32Reader(bufReader) @@ -461,24 +464,33 @@ func readCheckpointV3AndEarlier(f *os.File, version uint16) (*flattener.Flattene nodesCount, pos := readUint64(header, nodesCountOffset) triesCount, _ := readUint16(header, pos) - nodes := make([]*flattener.StorableNode, nodesCount+1) //+1 for 0 index meaning nil - tries := make([]*flattener.StorableTrie, triesCount) + nodes := make([]*node.Node, nodesCount+1) //+1 for 0 index meaning nil + tries := make([]*trie.MTrie, triesCount) for i := uint64(1); i <= nodesCount; i++ { - storableNode, err := flattener.ReadStorableNode(reader) + n, err := flattener.ReadNode(reader, func(nodeIndex uint64) (*node.Node, error) { + if nodeIndex >= uint64(i) { + return nil, fmt.Errorf("sequence of stored nodes does not satisfy Descendents-First-Relationship") + } + return nodes[nodeIndex], nil + }) if err != nil { - return nil, fmt.Errorf("cannot read storable node %d: %w", i, err) + return nil, fmt.Errorf("cannot read node %d: %w", i, err) } - nodes[i] = storableNode + nodes[i] = n } - // TODO version ? for i := uint16(0); i < triesCount; i++ { - storableTrie, err := flattener.ReadStorableTrie(reader) + trie, err := flattener.ReadTrie(reader, func(nodeIndex uint64) (*node.Node, error) { + if nodeIndex >= uint64(len(nodes)) { + return nil, fmt.Errorf("sequence of stored nodes doesn't contain node") + } + return nodes[nodeIndex], nil + }) if err != nil { return nil, fmt.Errorf("cannot read storable trie %d: %w", i, err) } - tries[i] = storableTrie + tries[i] = trie } if version == VersionV3 { @@ -496,16 +508,13 @@ func readCheckpointV3AndEarlier(f *os.File, version uint16) (*flattener.Flattene } } - return &flattener.FlattenedForest{ - Nodes: nodes, - Tries: tries, - }, nil + return tries, nil } -// readCheckpointV4 deserializes checkpoint file (version 4) and returns flattened forest. +// readCheckpointV4 deserializes checkpoint file (version 4) and returns a list of tries. // Checkpoint file header (magic and version) are verified by the caller. -func readCheckpointV4(f *os.File) (*flattener.FlattenedForest, error) { +func readCheckpointV4(f *os.File) ([]*trie.MTrie, error) { // Read footer to get node count and trie count @@ -547,23 +556,34 @@ func readCheckpointV4(f *os.File) (*flattener.FlattenedForest, error) { return nil, fmt.Errorf("cannot read header bytes: %w", err) } - nodes := make([]*flattener.StorableNode, nodesCount+1) //+1 for 0 index meaning nil - tries := make([]*flattener.StorableTrie, triesCount) + // nodes's element at index 0 is a special, meaning nil . + nodes := make([]*node.Node, nodesCount+1) //+1 for 0 index meaning nil + tries := make([]*trie.MTrie, triesCount) for i := uint64(1); i <= nodesCount; i++ { - storableNode, err := flattener.ReadStorableNode(reader) + n, err := flattener.ReadNode(reader, func(nodeIndex uint64) (*node.Node, error) { + if nodeIndex >= uint64(i) { + return nil, fmt.Errorf("sequence of stored nodes does not satisfy Descendents-First-Relationship") + } + return nodes[nodeIndex], nil + }) if err != nil { - return nil, fmt.Errorf("cannot read storable node %d: %w", i, err) + return nil, fmt.Errorf("cannot read node %d: %w", i, err) } - nodes[i] = storableNode + nodes[i] = n } for i := uint16(0); i < triesCount; i++ { - storableTrie, err := flattener.ReadStorableTrie(reader) + trie, err := flattener.ReadTrie(reader, func(nodeIndex uint64) (*node.Node, error) { + if nodeIndex >= uint64(len(nodes)) { + return nil, fmt.Errorf("sequence of stored nodes doesn't contain node") + } + return nodes[nodeIndex], nil + }) if err != nil { return nil, fmt.Errorf("cannot read storable trie %d: %w", i, err) } - tries[i] = storableTrie + tries[i] = trie } // Read footer again for crc32 computation @@ -586,10 +606,7 @@ func readCheckpointV4(f *os.File) (*flattener.FlattenedForest, error) { return nil, fmt.Errorf("checkpoint checksum failed! File contains %x but read data checksums to %x", readCrc32, calculatedCrc32) } - return &flattener.FlattenedForest{ - Nodes: nodes, - Tries: tries, - }, nil + return tries, nil } func writeUint16(buffer []byte, location int, value uint16) int { diff --git a/ledger/complete/wal/checkpointer_test.go b/ledger/complete/wal/checkpointer_test.go index 4576cca97f3..80c2374ea5f 100644 --- a/ledger/complete/wal/checkpointer_test.go +++ b/ledger/complete/wal/checkpointer_test.go @@ -22,7 +22,6 @@ import ( "github.com/onflow/flow-go/ledger/common/utils" "github.com/onflow/flow-go/ledger/complete" "github.com/onflow/flow-go/ledger/complete/mtrie" - "github.com/onflow/flow-go/ledger/complete/mtrie/flattener" "github.com/onflow/flow-go/ledger/complete/mtrie/trie" realWAL "github.com/onflow/flow-go/ledger/complete/wal" "github.com/onflow/flow-go/module/metrics" @@ -182,7 +181,7 @@ func Test_Checkpointing(t *testing.T) { require.NoError(t, err) err = wal2.Replay( - func(forestSequencing *flattener.FlattenedForest) error { + func(tries []*trie.MTrie) error { return fmt.Errorf("I should fail as there should be no checkpoints") }, func(update *ledger.TrieUpdate) error { @@ -216,8 +215,8 @@ func Test_Checkpointing(t *testing.T) { require.NoError(t, err) err = wal3.Replay( - func(forestSequencing *flattener.FlattenedForest) error { - return loadIntoForest(f3, forestSequencing) + func(tries []*trie.MTrie) error { + return f3.AddTries(tries) }, func(update *ledger.TrieUpdate) error { return fmt.Errorf("I should fail as there should be no updates") @@ -298,8 +297,8 @@ func Test_Checkpointing(t *testing.T) { updatesLeft := 1 // there should be only one update err = wal5.Replay( - func(forestSequencing *flattener.FlattenedForest) error { - return loadIntoForest(f5, forestSequencing) + func(tries []*trie.MTrie) error { + return f5.AddTries(tries) }, func(update *ledger.TrieUpdate) error { if updatesLeft == 0 { @@ -560,20 +559,6 @@ func Test_StoringLoadingCheckpoints(t *testing.T) { }) } -func loadIntoForest(forest *mtrie.Forest, forestSequencing *flattener.FlattenedForest) error { - tries, err := flattener.RebuildTries(forestSequencing) - if err != nil { - return err - } - for _, t := range tries { - err := forest.AddTrie(t) - if err != nil { - return err - } - } - return nil -} - type writeCloserWithErrors struct { writeError error closeError error diff --git a/ledger/complete/wal/checkpointer_versioning_test.go b/ledger/complete/wal/checkpointer_versioning_test.go index e5f86cedeca..81e412ba1c0 100644 --- a/ledger/complete/wal/checkpointer_versioning_test.go +++ b/ledger/complete/wal/checkpointer_versioning_test.go @@ -7,57 +7,29 @@ import ( "github.com/stretchr/testify/require" "github.com/onflow/flow-go/ledger" - "github.com/onflow/flow-go/ledger/complete/mtrie/flattener" ) -var v1Forest = &flattener.FlattenedForest{ - Nodes: []*flattener.StorableNode{ - nil, // node 0 is special and skipped - { - LIndex: 0, - RIndex: 0, - Height: 0, - Path: []byte{1}, - EncPayload: []byte{2}, - HashValue: []byte{3}, - MaxDepth: 1, - RegCount: 1, - }, { - LIndex: 1, - RIndex: 2, - Height: 3, - Path: []byte{11}, - EncPayload: []byte{22}, - HashValue: []byte{33}, - MaxDepth: 11, - RegCount: 11, - }, - }, - Tries: []*flattener.StorableTrie{ - { - RootIndex: 0, - RootHash: []byte{4}, - }, - { - RootIndex: 1, - RootHash: []byte{44}, - }, - }, -} - func Test_LoadingV1Checkpoint(t *testing.T) { - forest, err := LoadCheckpoint("test_data/checkpoint.v1") + expectedRootHash := [4]ledger.RootHash{ + mustToHash("568f4ec740fe3b5de88034cb7b1fbddb41548b068f31aebc8ae9189e429c5749"), // empty trie root hash + mustToHash("f53f9696b85b7428227f1b39f40b2ce07c175f58dea2b86cb6f84dc7c9fbeabd"), + mustToHash("7ac8daf34733cce3d5d03b5a1afde33a572249f81c45da91106412e94661e109"), + mustToHash("63df641430e5e0745c3d99ece6ac209467ccfdb77e362e7490a830db8e8803ae"), + } + + tries, err := LoadCheckpoint("test_data/checkpoint.v1") require.NoError(t, err) + require.Equal(t, len(expectedRootHash), len(tries)) - require.Equal(t, v1Forest, forest) + for i, trie := range tries { + require.Equal(t, expectedRootHash[i], trie.RootHash()) + require.True(t, trie.RootNode().VerifyCachedHash()) + } } func Test_LoadingV3Checkpoint(t *testing.T) { - forest, err := LoadCheckpoint("test_data/checkpoint.v3") - require.NoError(t, err) - expectedRootHash := [4]ledger.RootHash{ mustToHash("568f4ec740fe3b5de88034cb7b1fbddb41548b068f31aebc8ae9189e429c5749"), // empty trie root hash mustToHash("f53f9696b85b7428227f1b39f40b2ce07c175f58dea2b86cb6f84dc7c9fbeabd"), @@ -65,7 +37,7 @@ func Test_LoadingV3Checkpoint(t *testing.T) { mustToHash("63df641430e5e0745c3d99ece6ac209467ccfdb77e362e7490a830db8e8803ae"), } - tries, err := flattener.RebuildTries(forest) + tries, err := LoadCheckpoint("test_data/checkpoint.v3") require.NoError(t, err) require.Equal(t, len(expectedRootHash), len(tries)) diff --git a/ledger/complete/wal/compactor_test.go b/ledger/complete/wal/compactor_test.go index 8abab425ead..efcbb0160c5 100644 --- a/ledger/complete/wal/compactor_test.go +++ b/ledger/complete/wal/compactor_test.go @@ -14,7 +14,6 @@ import ( "github.com/onflow/flow-go/ledger" "github.com/onflow/flow-go/ledger/common/utils" "github.com/onflow/flow-go/ledger/complete/mtrie" - "github.com/onflow/flow-go/ledger/complete/mtrie/flattener" "github.com/onflow/flow-go/ledger/complete/mtrie/trie" "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/module/metrics" @@ -168,8 +167,8 @@ func Test_Compactor(t *testing.T) { require.NoError(t, err) err = wal2.Replay( - func(forestSequencing *flattener.FlattenedForest) error { - return loadIntoForest(f2, forestSequencing) + func(tries []*trie.MTrie) error { + return f2.AddTries(tries) }, func(update *ledger.TrieUpdate) error { _, err := f2.Update(update) @@ -330,17 +329,3 @@ func Test_Compactor_checkpointInterval(t *testing.T) { }) }) } - -func loadIntoForest(forest *mtrie.Forest, forestSequencing *flattener.FlattenedForest) error { - tries, err := flattener.RebuildTries(forestSequencing) - if err != nil { - return err - } - for _, t := range tries { - err := forest.AddTrie(t) - if err != nil { - return err - } - } - return nil -} diff --git a/ledger/complete/wal/fixtures/noopwal.go b/ledger/complete/wal/fixtures/noopwal.go index 3f88fc6e557..8f705efdbf2 100644 --- a/ledger/complete/wal/fixtures/noopwal.go +++ b/ledger/complete/wal/fixtures/noopwal.go @@ -3,7 +3,7 @@ package fixtures import ( "github.com/onflow/flow-go/ledger" "github.com/onflow/flow-go/ledger/complete/mtrie" - "github.com/onflow/flow-go/ledger/complete/mtrie/flattener" + "github.com/onflow/flow-go/ledger/complete/mtrie/trie" "github.com/onflow/flow-go/ledger/complete/wal" ) @@ -37,10 +37,10 @@ func (w *NoopWAL) ReplayOnForest(forest *mtrie.Forest) error { return nil } func (w *NoopWAL) Segments() (first, last int, err error) { return 0, 0, nil } -func (w *NoopWAL) Replay(checkpointFn func(forestSequencing *flattener.FlattenedForest) error, updateFn func(update *ledger.TrieUpdate) error, deleteFn func(ledger.RootHash) error) error { +func (w *NoopWAL) Replay(checkpointFn func(tries []*trie.MTrie) error, updateFn func(update *ledger.TrieUpdate) error, deleteFn func(ledger.RootHash) error) error { return nil } -func (w *NoopWAL) ReplayLogsOnly(checkpointFn func(forestSequencing *flattener.FlattenedForest) error, updateFn func(update *ledger.TrieUpdate) error, deleteFn func(rootHash ledger.RootHash) error) error { +func (w *NoopWAL) ReplayLogsOnly(checkpointFn func(tries []*trie.MTrie) error, updateFn func(update *ledger.TrieUpdate) error, deleteFn func(rootHash ledger.RootHash) error) error { return nil } diff --git a/ledger/complete/wal/test_data/checkpoint.v1 b/ledger/complete/wal/test_data/checkpoint.v1 index c5567395b83..86f6d15684a 100644 Binary files a/ledger/complete/wal/test_data/checkpoint.v1 and b/ledger/complete/wal/test_data/checkpoint.v1 differ diff --git a/ledger/complete/wal/wal.go b/ledger/complete/wal/wal.go index 3ce1bdbd7aa..63b4891c599 100644 --- a/ledger/complete/wal/wal.go +++ b/ledger/complete/wal/wal.go @@ -11,7 +11,7 @@ import ( "github.com/onflow/flow-go/ledger" "github.com/onflow/flow-go/ledger/complete/mtrie" - "github.com/onflow/flow-go/ledger/complete/mtrie/flattener" + "github.com/onflow/flow-go/ledger/complete/mtrie/trie" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/utils/io" ) @@ -105,12 +105,8 @@ func (w *DiskWAL) RecordDelete(rootHash ledger.RootHash) error { func (w *DiskWAL) ReplayOnForest(forest *mtrie.Forest) error { return w.Replay( - func(forestSequencing *flattener.FlattenedForest) error { - rebuiltTries, err := flattener.RebuildTries(forestSequencing) - if err != nil { - return fmt.Errorf("rebuilding forest from sequenced nodes failed: %w", err) - } - err = forest.AddTries(rebuiltTries) + func(tries []*trie.MTrie) error { + err := forest.AddTries(tries) if err != nil { return fmt.Errorf("adding rebuilt tries to forest failed: %w", err) } @@ -132,7 +128,7 @@ func (w *DiskWAL) Segments() (first, last int, err error) { } func (w *DiskWAL) Replay( - checkpointFn func(forestSequencing *flattener.FlattenedForest) error, + checkpointFn func(tries []*trie.MTrie) error, updateFn func(update *ledger.TrieUpdate) error, deleteFn func(ledger.RootHash) error, ) error { @@ -144,7 +140,7 @@ func (w *DiskWAL) Replay( } func (w *DiskWAL) ReplayLogsOnly( - checkpointFn func(forestSequencing *flattener.FlattenedForest) error, + checkpointFn func(tries []*trie.MTrie) error, updateFn func(update *ledger.TrieUpdate) error, deleteFn func(rootHash ledger.RootHash) error, ) error { @@ -157,7 +153,7 @@ func (w *DiskWAL) ReplayLogsOnly( func (w *DiskWAL) replay( from, to int, - checkpointFn func(forestSequencing *flattener.FlattenedForest) error, + checkpointFn func(tries []*trie.MTrie) error, updateFn func(update *ledger.TrieUpdate) error, deleteFn func(rootHash ledger.RootHash) error, useCheckpoints bool, @@ -343,12 +339,12 @@ type LedgerWAL interface { ReplayOnForest(forest *mtrie.Forest) error Segments() (first, last int, err error) Replay( - checkpointFn func(forestSequencing *flattener.FlattenedForest) error, + checkpointFn func(tries []*trie.MTrie) error, updateFn func(update *ledger.TrieUpdate) error, deleteFn func(ledger.RootHash) error, ) error ReplayLogsOnly( - checkpointFn func(forestSequencing *flattener.FlattenedForest) error, + checkpointFn func(tries []*trie.MTrie) error, updateFn func(update *ledger.TrieUpdate) error, deleteFn func(rootHash ledger.RootHash) error, ) error