Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

(WIP) [EN Performance] Split Node into LeafNode and InterimNode #2265

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions ledger/complete/mtrie/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -251,7 +251,7 @@ FUNCTION Update(height Int, node Node, paths []Path, payloads []Payload, compact
// If a compactLeaf from a higher height is carried over, then we are necessarily in case 2.a
// (node == nil and only one register to create)
if compactLeaf != nil {
return NewLeaf(compactLeaf.path, compactLeaf.payload, height)
return NewLeafNode(compactLeaf.path, compactLeaf.payload, height)
}
// No updates to make, re-use the same sub-trie
return node
Expand All @@ -260,7 +260,7 @@ FUNCTION Update(height Int, node Node, paths []Path, payloads []Payload, compact
// The remaining sub-case of 2.a (node == nil and only one register to create):
// the register payload is the input and no compactified leaf is to be carried over.
if len(paths) == 1 && node == nil && compactLeaf == nil {
return NewLeaf(paths[0], payloads[0], height)
return NewLeafNode(paths[0], payloads[0], height)
}

// case 1: we reach a non-nil leaf. Per Lemma, compactLeaf is necessarily nil
Expand All @@ -269,7 +269,7 @@ FUNCTION Update(height Int, node Node, paths []Path, payloads []Payload, compact
if len(paths) == 1 { // case 1.a.i
// the resource-exhaustion counter-measure
if !node.payload == payloads[i] {
return NewLeaf(paths[i], payloads[i], height)
return NewLeafNode(paths[i], payloads[i], height)
}
return node // re-cycle the same node
}
Expand Down
28 changes: 8 additions & 20 deletions ledger/complete/mtrie/flattener/encoding.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ const (

const payloadEncodingVersion = 1

// encodeLeafNode encodes leaf node in the following format:
// EncodeLeafNode encodes leaf node in the following format:
// - node type (1 byte)
// - height (2 bytes)
// - hash (32 bytes)
Expand All @@ -48,7 +48,7 @@ const payloadEncodingVersion = 1
// WARNING: The returned buffer is likely to share the same underlying array as
// the scratch buffer. Caller is responsible for copying or using returned buffer
// before scratch buffer is used again.
func encodeLeafNode(n *node.Node, scratch []byte) []byte {
func EncodeLeafNode(n *node.LeafNode, scratch []byte) []byte {

encPayloadSize := encoding.EncodedPayloadLengthWithoutPrefix(n.Payload(), payloadEncodingVersion)

Expand Down Expand Up @@ -99,7 +99,7 @@ func encodeLeafNode(n *node.Node, scratch []byte) []byte {
return buf
}

// encodeInterimNode encodes interim node in the following format:
// EncodeInterimNode encodes interim node in the following format:
// - node type (1 byte)
// - height (2 bytes)
// - hash (32 bytes)
Expand All @@ -112,7 +112,7 @@ func encodeLeafNode(n *node.Node, scratch []byte) []byte {
// WARNING: The returned buffer is likely to share the same underlying array as
// the scratch buffer. Caller is responsible for copying or using returned buffer
// before scratch buffer is used again.
func encodeInterimNode(n *node.Node, lchildIndex uint64, rchildIndex uint64, scratch []byte) []byte {
func EncodeInterimNode(n *node.InterimNode, lchildIndex uint64, rchildIndex uint64, scratch []byte) []byte {

const encodedNodeSize = encNodeTypeSize +
encHeightSize +
Expand Down Expand Up @@ -155,24 +155,12 @@ func encodeInterimNode(n *node.Node, lchildIndex uint64, rchildIndex uint64, scr
return buf[:pos]
}

// EncodeNode encodes node.
// Scratch buffer is used to avoid allocs.
// WARNING: The returned buffer is likely to share the same underlying array as
// the scratch buffer. Caller is responsible for copying or using returned buffer
// before scratch buffer is used again.
func EncodeNode(n *node.Node, lchildIndex uint64, rchildIndex uint64, scratch []byte) []byte {
if n.IsLeaf() {
return encodeLeafNode(n, scratch)
}
return encodeInterimNode(n, lchildIndex, rchildIndex, scratch)
}

// ReadNode reconstructs a node from data read from reader.
// Scratch buffer is used to avoid allocs. It should be used directly instead
// of using append. This function uses len(scratch) and ignores cap(scratch),
// so any extra capacity will not be utilized.
// If len(scratch) < 1024, then a new buffer will be allocated and used.
func ReadNode(reader io.Reader, scratch []byte, getNode func(nodeIndex uint64) (*node.Node, error)) (*node.Node, error) {
func ReadNode(reader io.Reader, scratch []byte, getNode func(nodeIndex uint64) (node.Node, error)) (node.Node, error) {

// minBufSize should be large enough for interim node and leaf node with small payload.
// minBufSize is a failsafe and is only used when len(scratch) is much smaller
Expand Down Expand Up @@ -232,7 +220,7 @@ func ReadNode(reader io.Reader, scratch []byte, getNode func(nodeIndex uint64) (
return nil, fmt.Errorf("failed to read and decode payload of serialized node: %w", err)
}

node := node.NewNode(int(height), nil, nil, path, payload, nodeHash)
node := node.NewLeafNodeWithHash(path, payload, int(height), nodeHash)
return node, nil
}

Expand Down Expand Up @@ -265,7 +253,7 @@ func ReadNode(reader io.Reader, scratch []byte, getNode func(nodeIndex uint64) (
return nil, fmt.Errorf("failed to find right child node of serialized node: %w", err)
}

n := node.NewNode(int(height), lchild, rchild, ledger.DummyPath, nil, nodeHash)
n := node.NewInterimNodeWithHash(int(height), lchild, rchild, nodeHash)
return n, nil
}

Expand Down Expand Up @@ -307,7 +295,7 @@ func EncodeTrie(trie *trie.MTrie, rootIndex uint64, scratch []byte) []byte {
}

// ReadTrie reconstructs a trie from data read from reader.
func ReadTrie(reader io.Reader, scratch []byte, getNode func(nodeIndex uint64) (*node.Node, error)) (*trie.MTrie, error) {
func ReadTrie(reader io.Reader, scratch []byte, getNode func(nodeIndex uint64) (node.Node, error)) (*trie.MTrie, error) {

if len(scratch) < encodedTrieSize {
scratch = make([]byte, encodedTrieSize)
Expand Down
36 changes: 18 additions & 18 deletions ledger/complete/mtrie/flattener/encoding_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,20 +24,20 @@ func TestLeafNodeEncodingDecoding(t *testing.T) {
path1 := utils.PathByUint8(0)
payload1 := (*ledger.Payload)(nil)
hashValue1 := hash.Hash([32]byte{1, 1, 1})
leafNodeNilPayload := node.NewNode(255, nil, nil, ledger.Path(path1), payload1, hashValue1)
leafNodeNilPayload := node.NewLeafNodeWithHash(ledger.Path(path1), payload1, 255, hashValue1)

// Leaf node with empty payload (not nil)
// EmptyPayload() not used because decoded playload's value is empty slice (not nil)
path2 := utils.PathByUint8(1)
payload2 := &ledger.Payload{Value: []byte{}}
hashValue2 := hash.Hash([32]byte{2, 2, 2})
leafNodeEmptyPayload := node.NewNode(255, nil, nil, ledger.Path(path2), payload2, hashValue2)
leafNodeEmptyPayload := node.NewLeafNodeWithHash(ledger.Path(path2), payload2, 255, hashValue2)

// Leaf node with payload
path3 := utils.PathByUint8(2)
payload3 := utils.LightPayload8('A', 'a')
hashValue3 := hash.Hash([32]byte{3, 3, 3})
leafNodePayload := node.NewNode(255, nil, nil, ledger.Path(path3), payload3, hashValue3)
leafNodePayload := node.NewLeafNodeWithHash(ledger.Path(path3), payload3, 255, hashValue3)

encodedLeafNodeNilPayload := []byte{
0x00, // node type
Expand Down Expand Up @@ -88,7 +88,7 @@ func TestLeafNodeEncodingDecoding(t *testing.T) {

testCases := []struct {
name string
node *node.Node
node *node.LeafNode
encodedNode []byte
}{
{"nil payload", leafNodeNilPayload, encodedLeafNodeNilPayload},
Expand All @@ -106,7 +106,7 @@ func TestLeafNodeEncodingDecoding(t *testing.T) {
}

for _, scratch := range scratchBuffers {
encodedNode := flattener.EncodeNode(tc.node, 0, 0, scratch)
encodedNode := flattener.EncodeLeafNode(tc.node, scratch)
assert.Equal(t, tc.encodedNode, encodedNode)

if len(scratch) > 0 {
Expand All @@ -131,7 +131,7 @@ func TestLeafNodeEncodingDecoding(t *testing.T) {

for _, scratch := range scratchBuffers {
reader := bytes.NewReader(tc.encodedNode)
newNode, err := flattener.ReadNode(reader, scratch, func(nodeIndex uint64) (*node.Node, error) {
newNode, err := flattener.ReadNode(reader, scratch, func(nodeIndex uint64) (node.Node, error) {
return nil, fmt.Errorf("no call expected")
})
require.NoError(t, err)
Expand Down Expand Up @@ -162,9 +162,9 @@ func TestRandomLeafNodeEncodingDecoding(t *testing.T) {
var hashValue hash.Hash
rand.Read(hashValue[:])

n := node.NewNode(height, nil, nil, paths[i], payloads[i], hashValue)
n := node.NewLeafNodeWithHash(paths[i], payloads[i], height, hashValue)

encodedNode := flattener.EncodeNode(n, 0, 0, writeScratch)
encodedNode := flattener.EncodeLeafNode(n, writeScratch)

if len(writeScratch) >= len(encodedNode) {
// reuse scratch buffer
Expand All @@ -175,7 +175,7 @@ func TestRandomLeafNodeEncodingDecoding(t *testing.T) {
}

reader := bytes.NewReader(encodedNode)
newNode, err := flattener.ReadNode(reader, readScratch, func(nodeIndex uint64) (*node.Node, error) {
newNode, err := flattener.ReadNode(reader, readScratch, func(nodeIndex uint64) (node.Node, error) {
return nil, fmt.Errorf("no call expected")
})
require.NoError(t, err)
Expand All @@ -193,17 +193,17 @@ func TestInterimNodeEncodingDecoding(t *testing.T) {
path1 := utils.PathByUint8(0)
payload1 := utils.LightPayload8('A', 'a')
hashValue1 := hash.Hash([32]byte{1, 1, 1})
leafNode1 := node.NewNode(255, nil, nil, ledger.Path(path1), payload1, hashValue1)
leafNode1 := node.NewLeafNodeWithHash(ledger.Path(path1), payload1, 255, hashValue1)

// Child node
path2 := utils.PathByUint8(1)
payload2 := utils.LightPayload8('B', 'b')
hashValue2 := hash.Hash([32]byte{2, 2, 2})
leafNode2 := node.NewNode(255, nil, nil, ledger.Path(path2), payload2, hashValue2)
leafNode2 := node.NewLeafNodeWithHash(ledger.Path(path2), payload2, 255, hashValue2)

// Interim node
hashValue3 := hash.Hash([32]byte{3, 3, 3})
interimNode := node.NewNode(256, leafNode1, leafNode2, ledger.DummyPath, nil, hashValue3)
interimNode := node.NewInterimNodeWithHash(256, leafNode1, leafNode2, hashValue3)

encodedInterimNode := []byte{
0x01, // node type
Expand All @@ -225,7 +225,7 @@ func TestInterimNodeEncodingDecoding(t *testing.T) {
}

for _, scratch := range scratchBuffers {
data := flattener.EncodeNode(interimNode, lchildIndex, rchildIndex, scratch)
data := flattener.EncodeInterimNode(interimNode, lchildIndex, rchildIndex, scratch)
assert.Equal(t, encodedInterimNode, data)
}
})
Expand All @@ -240,7 +240,7 @@ func TestInterimNodeEncodingDecoding(t *testing.T) {

for _, scratch := range scratchBuffers {
reader := bytes.NewReader(encodedInterimNode)
newNode, err := flattener.ReadNode(reader, scratch, func(nodeIndex uint64) (*node.Node, error) {
newNode, err := flattener.ReadNode(reader, scratch, func(nodeIndex uint64) (node.Node, error) {
switch nodeIndex {
case lchildIndex:
return leafNode1, nil
Expand All @@ -261,7 +261,7 @@ func TestInterimNodeEncodingDecoding(t *testing.T) {
scratch := make([]byte, 1024)

reader := bytes.NewReader(encodedInterimNode)
newNode, err := flattener.ReadNode(reader, scratch, func(nodeIndex uint64) (*node.Node, error) {
newNode, err := flattener.ReadNode(reader, scratch, func(nodeIndex uint64) (node.Node, error) {
return nil, nodeNotFoundError
})
require.Nil(t, newNode)
Expand All @@ -273,7 +273,7 @@ func TestTrieEncodingDecoding(t *testing.T) {
rootNodeNilIndex := uint64(20)

hashValue := hash.Hash([32]byte{2, 2, 2})
rootNode := node.NewNode(256, nil, nil, ledger.DummyPath, nil, hashValue)
rootNode := node.NewLeafNodeWithHash(ledger.DummyPath, nil, 256, hashValue)
rootNodeIndex := uint64(21)

mtrie, err := trie.NewMTrie(rootNode, 7, 1234)
Expand Down Expand Up @@ -345,7 +345,7 @@ func TestTrieEncodingDecoding(t *testing.T) {

for _, scratch := range scratchBuffers {
reader := bytes.NewReader(tc.encodedTrie)
trie, err := flattener.ReadTrie(reader, scratch, func(nodeIndex uint64) (*node.Node, error) {
trie, err := flattener.ReadTrie(reader, scratch, func(nodeIndex uint64) (node.Node, error) {
if nodeIndex != tc.rootNodeIndex {
return nil, fmt.Errorf("unexpected root node index %d ", nodeIndex)
}
Expand All @@ -364,7 +364,7 @@ func TestTrieEncodingDecoding(t *testing.T) {
scratch := make([]byte, 1024)

reader := bytes.NewReader(tc.encodedTrie)
newNode, err := flattener.ReadTrie(reader, scratch, func(nodeIndex uint64) (*node.Node, error) {
newNode, err := flattener.ReadTrie(reader, scratch, func(nodeIndex uint64) (node.Node, error) {
return nil, nodeNotFoundError
})
require.Nil(t, newNode)
Expand Down
8 changes: 4 additions & 4 deletions ledger/complete/mtrie/flattener/encoding_v3.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ import (
const encodingDecodingVersion = uint16(0)

// getNodeFunc returns node by nodeIndex along with node's regCount and regSize.
type getNodeFunc func(nodeIndex uint64) (n *node.Node, regCount uint64, regSize uint64, err error)
type getNodeFunc func(nodeIndex uint64) (n node.Node, regCount uint64, regSize uint64, err error)

// ReadNodeFromCheckpointV3AndEarlier returns a node recontructed from data in
// checkpoint v3 and earlier versions. It also returns node's regCount and regSize.
Expand All @@ -33,7 +33,7 @@ type getNodeFunc func(nodeIndex uint64) (n *node.Node, regCount uint64, regSize
// - path (2 bytes + 32 bytes)
// - payload (4 bytes + n bytes)
// - hash (2 bytes + 32 bytes)
func ReadNodeFromCheckpointV3AndEarlier(reader io.Reader, getNode getNodeFunc) (*node.Node, uint64, uint64, error) {
func ReadNodeFromCheckpointV3AndEarlier(reader io.Reader, getNode getNodeFunc) (node.Node, uint64, uint64, error) {

// Read version (2 bytes)
buf := make([]byte, 2)
Expand Down Expand Up @@ -147,7 +147,7 @@ func ReadNodeFromCheckpointV3AndEarlier(reader io.Reader, getNode getNodeFunc) (
pl = payload.DeepCopy()
}

n := node.NewNode(int(height), nil, nil, path, pl, nodeHash)
n := node.NewLeafNodeWithHash(path, pl, int(height), nodeHash)

// Leaf node has 1 register and register size is payload size.
return n, 1, uint64(pl.Size()), nil
Expand All @@ -165,7 +165,7 @@ func ReadNodeFromCheckpointV3AndEarlier(reader io.Reader, getNode getNodeFunc) (
return nil, 0, 0, fmt.Errorf("failed to find right child node of serialized node in v3: %w", err)
}

n := node.NewNode(int(height), lchild, rchild, ledger.DummyPath, nil, nodeHash)
n := node.NewInterimNodeWithHash(int(height), lchild, rchild, nodeHash)
return n, lchildRegCount + rchildRegCount, lchildRegSize + rchildRegSize, nil
}

Expand Down
14 changes: 7 additions & 7 deletions ledger/complete/mtrie/flattener/encoding_v3_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,10 @@ func TestNodeV3Decoding(t *testing.T) {
const leafNode1Index = 1
const leafNode2Index = 2

leafNode1 := node.NewNode(255, nil, nil, utils.PathByUint8(0), utils.LightPayload8('A', 'a'), hash.Hash([32]byte{1, 1, 1}))
leafNode2 := node.NewNode(255, nil, nil, utils.PathByUint8(1), utils.LightPayload8('B', 'b'), hash.Hash([32]byte{2, 2, 2}))
leafNode1 := node.NewLeafNodeWithHash(utils.PathByUint8(0), utils.LightPayload8('A', 'a'), 255, hash.Hash([32]byte{1, 1, 1}))
leafNode2 := node.NewLeafNodeWithHash(utils.PathByUint8(1), utils.LightPayload8('B', 'b'), 255, hash.Hash([32]byte{2, 2, 2}))

interimNode := node.NewNode(256, leafNode1, leafNode2, ledger.DummyPath, nil, hash.Hash([32]byte{3, 3, 3}))
interimNode := node.NewInterimNodeWithHash(256, leafNode1, leafNode2, hash.Hash([32]byte{3, 3, 3}))

encodedLeafNode1 := []byte{
0x00, 0x00, // encoding version
Expand Down Expand Up @@ -69,7 +69,7 @@ func TestNodeV3Decoding(t *testing.T) {

t.Run("leaf node", func(t *testing.T) {
reader := bytes.NewReader(encodedLeafNode1)
newNode, regCount, regSize, err := flattener.ReadNodeFromCheckpointV3AndEarlier(reader, func(nodeIndex uint64) (*node.Node, uint64, uint64, error) {
newNode, regCount, regSize, err := flattener.ReadNodeFromCheckpointV3AndEarlier(reader, func(nodeIndex uint64) (node.Node, uint64, uint64, error) {
return nil, 0, 0, fmt.Errorf("no call expected")
})
require.NoError(t, err)
Expand All @@ -80,7 +80,7 @@ func TestNodeV3Decoding(t *testing.T) {

t.Run("interim node", func(t *testing.T) {
reader := bytes.NewReader(encodedInterimNode)
newNode, regCount, regSize, err := flattener.ReadNodeFromCheckpointV3AndEarlier(reader, func(nodeIndex uint64) (*node.Node, uint64, uint64, error) {
newNode, regCount, regSize, err := flattener.ReadNodeFromCheckpointV3AndEarlier(reader, func(nodeIndex uint64) (node.Node, uint64, uint64, error) {
switch nodeIndex {
case leafNode1Index:
return leafNode1, 1, uint64(leafNode1.Payload().Size()), nil
Expand All @@ -104,7 +104,7 @@ func TestTrieV3Decoding(t *testing.T) {
const rootNodeRegSize = 1000000

hashValue := hash.Hash([32]byte{2, 2, 2})
rootNode := node.NewNode(256, nil, nil, ledger.DummyPath, nil, hashValue)
rootNode := node.NewLeafNodeWithHash(ledger.DummyPath, nil, 256, hashValue)

expected := []byte{
0x00, 0x00, // encoding version
Expand All @@ -118,7 +118,7 @@ func TestTrieV3Decoding(t *testing.T) {

reader := bytes.NewReader(expected)

trie, err := flattener.ReadTrieFromCheckpointV3AndEarlier(reader, func(nodeIndex uint64) (*node.Node, uint64, uint64, error) {
trie, err := flattener.ReadTrieFromCheckpointV3AndEarlier(reader, func(nodeIndex uint64) (node.Node, uint64, uint64, error) {
switch nodeIndex {
case rootNodeIndex:
return rootNode, rootNodeRegCount, rootNodeRegSize, nil
Expand Down
6 changes: 3 additions & 3 deletions ledger/complete/mtrie/flattener/encoding_v4.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ const (
// - hash (32 bytes)
// - path (32 bytes)
// - payload (4 bytes + n bytes)
func ReadNodeFromCheckpointV4(reader io.Reader, scratch []byte, getNode getNodeFunc) (*node.Node, uint64, uint64, error) {
func ReadNodeFromCheckpointV4(reader io.Reader, scratch []byte, getNode getNodeFunc) (node.Node, uint64, uint64, error) {

// minBufSize should be large enough for interim node and leaf node with small payload.
// minBufSize is a failsafe and is only used when len(scratch) is much smaller
Expand Down Expand Up @@ -114,7 +114,7 @@ func ReadNodeFromCheckpointV4(reader io.Reader, scratch []byte, getNode getNodeF
return nil, 0, 0, fmt.Errorf("failed to read and decode payload of serialized node: %w", err)
}

n := node.NewNode(int(height), nil, nil, path, payload, nodeHash)
n := node.NewLeafNodeWithHash(path, payload, int(height), nodeHash)

// Leaf node has 1 register and register size is payload size.
return n, 1, uint64(payload.Size()), nil
Expand Down Expand Up @@ -149,7 +149,7 @@ func ReadNodeFromCheckpointV4(reader io.Reader, scratch []byte, getNode getNodeF
return nil, 0, 0, fmt.Errorf("failed to find right child node of serialized node: %w", err)
}

n := node.NewNode(int(height), lchild, rchild, ledger.DummyPath, nil, nodeHash)
n := node.NewInterimNodeWithHash(int(height), lchild, rchild, nodeHash)
return n, lchildRegCount + rchildRegCount, lchildRegSize + rchildRegSize, nil
}

Expand Down
Loading