From 4c97d825c30a9ce2c9c581e5e0a9c34940f84b53 Mon Sep 17 00:00:00 2001 From: awskii Date: Thu, 1 Aug 2024 16:10:04 +0100 Subject: [PATCH 1/5] commitment: use Update for domain IO --- erigon-lib/commitment/bin_patricia_hashed.go | 3627 ++++++++--------- .../commitment/bin_patricia_hashed_test.go | 524 +-- erigon-lib/commitment/commitment.go | 66 +- erigon-lib/commitment/commitment_test.go | 6 +- erigon-lib/commitment/hex_patricia_hashed.go | 404 +- .../commitment/hex_patricia_hashed_test.go | 20 +- .../commitment/patricia_state_mock_test.go | 77 +- erigon-lib/state/domain_shared.go | 58 +- 8 files changed, 2323 insertions(+), 2459 deletions(-) diff --git a/erigon-lib/commitment/bin_patricia_hashed.go b/erigon-lib/commitment/bin_patricia_hashed.go index f39d967452c..5e35fcb7e07 100644 --- a/erigon-lib/commitment/bin_patricia_hashed.go +++ b/erigon-lib/commitment/bin_patricia_hashed.go @@ -1,3 +1,4 @@ +//build +ignore // Copyright 2022 The Erigon Authors // This file is part of Erigon. // @@ -16,1827 +17,1805 @@ package commitment -import ( - "bytes" - "context" - "encoding/binary" - "encoding/hex" - "errors" - "fmt" - "io" - "math/bits" - "path/filepath" - "sort" - - "github.com/holiman/uint256" - "golang.org/x/crypto/sha3" - - "github.com/erigontech/erigon-lib/common" - "github.com/erigontech/erigon-lib/common/length" - "github.com/erigontech/erigon-lib/etl" - "github.com/erigontech/erigon-lib/log/v3" - "github.com/erigontech/erigon-lib/rlp" -) - -const ( - maxKeySize = 512 - halfKeySize = maxKeySize / 2 - maxChild = 2 -) - -type bitstring []uint8 - -// converts slice of nibbles (lowest 4 bits of each byte) to bitstring -func hexToBin(hex []byte) bitstring { - bin := make([]byte, 4*len(hex)) - for i := range bin { - if hex[i/4]&(1<<(3-i%4)) != 0 { - bin[i] = 1 - } - } - return bin -} - -// encodes bitstring to its compact representation -func binToCompact(bin []byte) []byte { - compact := make([]byte, 2+common.BitLenToByteLen(len(bin))) - binary.BigEndian.PutUint16(compact, uint16(len(bin))) - for i := 0; i < len(bin); i++ { - if bin[i] != 0 { - compact[2+i/8] |= byte(1) << (i % 8) - } - } - return compact -} - -// decodes compact bitstring representation into actual bitstring -func compactToBin(compact []byte) []byte { - bin := make([]byte, binary.BigEndian.Uint16(compact)) - for i := 0; i < len(bin); i++ { - if compact[2+i/8]&(byte(1)<<(i%8)) == 0 { - bin[i] = 0 - } else { - bin[i] = 1 - } - } - return bin -} - -// BinHashed implements commitment based on patricia merkle tree with radix 16, -// with keys pre-hashed by keccak256 -type BinPatriciaHashed struct { - root BinaryCell // Root cell of the tree - // Rows of the grid correspond to the level of depth in the patricia tree - // Columns of the grid correspond to pointers to the nodes further from the root - grid [maxKeySize][maxChild]BinaryCell // First halfKeySize rows of this grid are for account trie, and next halfKeySize rows are for storage trie - // How many rows (starting from row 0) are currently active and have corresponding selected columns - // Last active row does not have selected column - activeRows int - // Length of the key that reflects current positioning of the grid. It maybe larger than number of active rows, - // if a account leaf cell represents multiple nibbles in the key - currentKeyLen int - currentKey [maxKeySize]byte // For each row indicates which column is currently selected - depths [maxKeySize]int // For each row, the depth of cells in that row - rootChecked bool // Set to false if it is not known whether the root is empty, set to true if it is checked - rootTouched bool - rootPresent bool - branchBefore [maxKeySize]bool // For each row, whether there was a branch node in the database loaded in unfold - touchMap [maxKeySize]uint16 // For each row, bitmap of cells that were either present before modification, or modified or deleted - afterMap [maxKeySize]uint16 // For each row, bitmap of cells that were present after modification - keccak keccakState - keccak2 keccakState - accountKeyLen int - trace bool - hashAuxBuffer [maxKeySize]byte // buffer to compute cell hash or write hash-related things - auxBuffer *bytes.Buffer // auxiliary buffer used during branch updates encoding - - branchEncoder *BranchEncoder - ctx PatriciaContext - - // Function used to fetch account with given plain key - accountFn func(plainKey []byte, cell *BinaryCell) error - // Function used to fetch account with given plain key - storageFn func(plainKey []byte, cell *BinaryCell) error -} - -func NewBinPatriciaHashed(accountKeyLen int, ctx PatriciaContext, tmpdir string) *BinPatriciaHashed { - bph := &BinPatriciaHashed{ - keccak: sha3.NewLegacyKeccak256().(keccakState), - keccak2: sha3.NewLegacyKeccak256().(keccakState), - accountKeyLen: accountKeyLen, - accountFn: wrapAccountStorageFn(ctx.GetAccount), - storageFn: wrapAccountStorageFn(ctx.GetStorage), - auxBuffer: bytes.NewBuffer(make([]byte, 8192)), - ctx: ctx, - } - bph.branchEncoder = NewBranchEncoder(1024, filepath.Join(tmpdir, "branch-encoder")) - - return bph - -} - -type BinaryCell struct { - h [length.Hash]byte // cell hash - hl int // Length of the hash (or embedded) - apk [length.Addr]byte // account plain key - apl int // length of account plain key - spk [length.Addr + length.Hash]byte // storage plain key - spl int // length of the storage plain key - downHashedKey [maxKeySize]byte - downHashedLen int - extension [halfKeySize]byte - extLen int - Nonce uint64 - Balance uint256.Int - CodeHash [length.Hash]byte // hash of the bytecode - Storage [length.Hash]byte - StorageLen int - Delete bool -} - -func (cell *BinaryCell) unwrapToHexCell() (cl *Cell) { - cl = new(Cell) - cl.Balance = *cell.Balance.Clone() - cl.Nonce = cell.Nonce - cl.StorageLen = cell.StorageLen - cl.accountPlainKeyLen = cell.apl - cl.storagePlainKeyLen = cell.spl - cl.HashLen = cell.hl - - copy(cl.accountPlainKey[:], cell.apk[:]) - copy(cl.storagePlainKey[:], cell.spk[:]) - copy(cl.hash[:], cell.h[:]) - - if cell.extLen > 0 { - compactedExt := binToCompact(cell.extension[:cell.extLen]) - copy(cl.extension[:], compactedExt) - cl.extLen = len(compactedExt) - } - if cell.downHashedLen > 0 { - compactedDHK := binToCompact(cell.downHashedKey[:cell.downHashedLen]) - copy(cl.downHashedKey[:], compactedDHK) - cl.downHashedLen = len(compactedDHK) - } - - copy(cl.CodeHash[:], cell.CodeHash[:]) - copy(cl.Storage[:], cell.Storage[:]) - cl.Delete = cell.Delete - return cl -} - -var ( // TODO REEAVL - EmptyBinRootHash, _ = hex.DecodeString("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") - EmptyBinCodeHash, _ = hex.DecodeString("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470") -) - -func (cell *BinaryCell) fillEmpty() { - cell.apl = 0 - cell.spl = 0 - cell.downHashedLen = 0 - cell.extLen = 0 - cell.hl = 0 - cell.Nonce = 0 - cell.Balance.Clear() - copy(cell.CodeHash[:], EmptyCodeHash) - cell.StorageLen = 0 - cell.Delete = false -} - -func (cell *BinaryCell) fillFromUpperCell(upBinaryCell *BinaryCell, depth, depthIncrement int) { - if upBinaryCell.downHashedLen >= depthIncrement { - cell.downHashedLen = upBinaryCell.downHashedLen - depthIncrement - } else { - cell.downHashedLen = 0 - } - if upBinaryCell.downHashedLen > depthIncrement { - copy(cell.downHashedKey[:], upBinaryCell.downHashedKey[depthIncrement:upBinaryCell.downHashedLen]) - } - if upBinaryCell.extLen >= depthIncrement { - cell.extLen = upBinaryCell.extLen - depthIncrement - } else { - cell.extLen = 0 - } - if upBinaryCell.extLen > depthIncrement { - copy(cell.extension[:], upBinaryCell.extension[depthIncrement:upBinaryCell.extLen]) - } - if depth <= halfKeySize { - cell.apl = upBinaryCell.apl - if upBinaryCell.apl > 0 { - copy(cell.apk[:], upBinaryCell.apk[:cell.apl]) - cell.Balance.Set(&upBinaryCell.Balance) - cell.Nonce = upBinaryCell.Nonce - copy(cell.CodeHash[:], upBinaryCell.CodeHash[:]) - cell.extLen = upBinaryCell.extLen - if upBinaryCell.extLen > 0 { - copy(cell.extension[:], upBinaryCell.extension[:upBinaryCell.extLen]) - } - } - } else { - cell.apl = 0 - } - cell.spl = upBinaryCell.spl - if upBinaryCell.spl > 0 { - copy(cell.spk[:], upBinaryCell.spk[:upBinaryCell.spl]) - cell.StorageLen = upBinaryCell.StorageLen - if upBinaryCell.StorageLen > 0 { - copy(cell.Storage[:], upBinaryCell.Storage[:upBinaryCell.StorageLen]) - } - } - cell.hl = upBinaryCell.hl - if upBinaryCell.hl > 0 { - copy(cell.h[:], upBinaryCell.h[:upBinaryCell.hl]) - } -} - -func (cell *BinaryCell) fillFromLowerBinaryCell(lowBinaryCell *BinaryCell, lowDepth int, preExtension []byte, nibble int) { - if lowBinaryCell.apl > 0 || lowDepth < halfKeySize { - cell.apl = lowBinaryCell.apl - } - if lowBinaryCell.apl > 0 { - copy(cell.apk[:], lowBinaryCell.apk[:cell.apl]) - cell.Balance.Set(&lowBinaryCell.Balance) - cell.Nonce = lowBinaryCell.Nonce - copy(cell.CodeHash[:], lowBinaryCell.CodeHash[:]) - } - cell.spl = lowBinaryCell.spl - if lowBinaryCell.spl > 0 { - copy(cell.spk[:], lowBinaryCell.spk[:cell.spl]) - cell.StorageLen = lowBinaryCell.StorageLen - if lowBinaryCell.StorageLen > 0 { - copy(cell.Storage[:], lowBinaryCell.Storage[:lowBinaryCell.StorageLen]) - } - } - if lowBinaryCell.hl > 0 { - if (lowBinaryCell.apl == 0 && lowDepth < halfKeySize) || (lowBinaryCell.spl == 0 && lowDepth > halfKeySize) { - // Extension is related to either accounts branch node, or storage branch node, we prepend it by preExtension | nibble - if len(preExtension) > 0 { - copy(cell.extension[:], preExtension) - } - cell.extension[len(preExtension)] = byte(nibble) - if lowBinaryCell.extLen > 0 { - copy(cell.extension[1+len(preExtension):], lowBinaryCell.extension[:lowBinaryCell.extLen]) - } - cell.extLen = lowBinaryCell.extLen + 1 + len(preExtension) - } else { - // Extension is related to a storage branch node, so we copy it upwards as is - cell.extLen = lowBinaryCell.extLen - if lowBinaryCell.extLen > 0 { - copy(cell.extension[:], lowBinaryCell.extension[:lowBinaryCell.extLen]) - } - } - } - cell.hl = lowBinaryCell.hl - if lowBinaryCell.hl > 0 { - copy(cell.h[:], lowBinaryCell.h[:lowBinaryCell.hl]) - } -} - -func (cell *BinaryCell) deriveHashedKeys(depth int, keccak keccakState, accountKeyLen int) error { - extraLen := 0 - if cell.apl > 0 { - if depth > halfKeySize { - return errors.New("deriveHashedKeys accountPlainKey present at depth > halfKeySize") - } - extraLen = halfKeySize - depth - } - if cell.spl > 0 { - if depth >= halfKeySize { - extraLen = maxKeySize - depth - } else { - extraLen += halfKeySize - } - } - if extraLen > 0 { - if cell.downHashedLen > 0 { - copy(cell.downHashedKey[extraLen:], cell.downHashedKey[:cell.downHashedLen]) - } - cell.downHashedLen += extraLen - var hashedKeyOffset, downOffset int - if cell.apl > 0 { - if err := binHashKey(keccak, cell.apk[:cell.apl], cell.downHashedKey[:], depth); err != nil { - return err - } - downOffset = halfKeySize - depth - } - if cell.spl > 0 { - if depth >= halfKeySize { - hashedKeyOffset = depth - halfKeySize - } - if err := binHashKey(keccak, cell.spk[accountKeyLen:cell.spl], cell.downHashedKey[downOffset:], hashedKeyOffset); err != nil { - return err - } - } - } - return nil -} - -func (cell *BinaryCell) fillFromFields(data []byte, pos int, fieldBits PartFlags) (int, error) { - if fieldBits&HashedKeyPart != 0 { - l, n := binary.Uvarint(data[pos:]) - if n == 0 { - return 0, errors.New("fillFromFields buffer too small for hashedKey len") - } else if n < 0 { - return 0, errors.New("fillFromFields value overflow for hashedKey len") - } - pos += n - if len(data) < pos+int(l) { - return 0, fmt.Errorf("fillFromFields buffer too small for hashedKey exp %d got %d", pos+int(l), len(data)) - } - cell.downHashedLen = int(l) - cell.extLen = int(l) - if l > 0 { - copy(cell.downHashedKey[:], data[pos:pos+int(l)]) - copy(cell.extension[:], data[pos:pos+int(l)]) - pos += int(l) - } - } else { - cell.downHashedLen = 0 - cell.extLen = 0 - } - if fieldBits&AccountPlainPart != 0 { - l, n := binary.Uvarint(data[pos:]) - if n == 0 { - return 0, errors.New("fillFromFields buffer too small for accountPlainKey len") - } else if n < 0 { - return 0, errors.New("fillFromFields value overflow for accountPlainKey len") - } - pos += n - if len(data) < pos+int(l) { - return 0, errors.New("fillFromFields buffer too small for accountPlainKey") - } - cell.apl = int(l) - if l > 0 { - copy(cell.apk[:], data[pos:pos+int(l)]) - pos += int(l) - } - } else { - cell.apl = 0 - } - if fieldBits&StoragePlainPart != 0 { - l, n := binary.Uvarint(data[pos:]) - if n == 0 { - return 0, errors.New("fillFromFields buffer too small for storagePlainKey len") - } else if n < 0 { - return 0, errors.New("fillFromFields value overflow for storagePlainKey len") - } - pos += n - if len(data) < pos+int(l) { - return 0, errors.New("fillFromFields buffer too small for storagePlainKey") - } - cell.spl = int(l) - if l > 0 { - copy(cell.spk[:], data[pos:pos+int(l)]) - pos += int(l) - } - } else { - cell.spl = 0 - } - if fieldBits&HashPart != 0 { - l, n := binary.Uvarint(data[pos:]) - if n == 0 { - return 0, errors.New("fillFromFields buffer too small for hash len") - } else if n < 0 { - return 0, errors.New("fillFromFields value overflow for hash len") - } - pos += n - if len(data) < pos+int(l) { - return 0, errors.New("fillFromFields buffer too small for hash") - } - cell.hl = int(l) - if l > 0 { - copy(cell.h[:], data[pos:pos+int(l)]) - pos += int(l) - } - } else { - cell.hl = 0 - } - return pos, nil -} - -func (cell *BinaryCell) setStorage(value []byte) { - cell.StorageLen = len(value) - if len(value) > 0 { - copy(cell.Storage[:], value) - } -} - -func (cell *BinaryCell) setAccountFields(codeHash []byte, balance *uint256.Int, nonce uint64) { - copy(cell.CodeHash[:], codeHash) - - cell.Balance.SetBytes(balance.Bytes()) - cell.Nonce = nonce -} - -func (cell *BinaryCell) accountForHashing(buffer []byte, storageRootHash [length.Hash]byte) int { - balanceBytes := 0 - if !cell.Balance.LtUint64(128) { - balanceBytes = cell.Balance.ByteLen() - } - - var nonceBytes int - if cell.Nonce < 128 && cell.Nonce != 0 { - nonceBytes = 0 - } else { - nonceBytes = common.BitLenToByteLen(bits.Len64(cell.Nonce)) - } - - var structLength = uint(balanceBytes + nonceBytes + 2) - structLength += 66 // Two 32-byte arrays + 2 prefixes - - var pos int - if structLength < 56 { - buffer[0] = byte(192 + structLength) - pos = 1 - } else { - lengthBytes := common.BitLenToByteLen(bits.Len(structLength)) - buffer[0] = byte(247 + lengthBytes) - - for i := lengthBytes; i > 0; i-- { - buffer[i] = byte(structLength) - structLength >>= 8 - } - - pos = lengthBytes + 1 - } - - // Encoding nonce - if cell.Nonce < 128 && cell.Nonce != 0 { - buffer[pos] = byte(cell.Nonce) - } else { - buffer[pos] = byte(128 + nonceBytes) - var nonce = cell.Nonce - for i := nonceBytes; i > 0; i-- { - buffer[pos+i] = byte(nonce) - nonce >>= 8 - } - } - pos += 1 + nonceBytes - - // Encoding balance - if cell.Balance.LtUint64(128) && !cell.Balance.IsZero() { - buffer[pos] = byte(cell.Balance.Uint64()) - pos++ - } else { - buffer[pos] = byte(128 + balanceBytes) - pos++ - cell.Balance.WriteToSlice(buffer[pos : pos+balanceBytes]) - pos += balanceBytes - } - - // Encoding Root and CodeHash - buffer[pos] = 128 + 32 - pos++ - copy(buffer[pos:], storageRootHash[:]) - pos += 32 - buffer[pos] = 128 + 32 - pos++ - copy(buffer[pos:], cell.CodeHash[:]) - pos += 32 - return pos -} - -func (bph *BinPatriciaHashed) ResetContext(ctx PatriciaContext) {} - -func (bph *BinPatriciaHashed) completeLeafHash(buf, keyPrefix []byte, kp, kl, compactLen int, key []byte, compact0 byte, ni int, val rlp.RlpSerializable, singleton bool) ([]byte, error) { - totalLen := kp + kl + val.DoubleRLPLen() - var lenPrefix [4]byte - pt := rlp.GenerateStructLen(lenPrefix[:], totalLen) - embedded := !singleton && totalLen+pt < length.Hash - var writer io.Writer - if embedded { - //bph.byteArrayWriter.Setup(buf) - bph.auxBuffer.Reset() - writer = bph.auxBuffer - } else { - bph.keccak.Reset() - writer = bph.keccak - } - if _, err := writer.Write(lenPrefix[:pt]); err != nil { - return nil, err - } - if _, err := writer.Write(keyPrefix[:kp]); err != nil { - return nil, err - } - var b [1]byte - b[0] = compact0 - if _, err := writer.Write(b[:]); err != nil { - return nil, err - } - for i := 1; i < compactLen; i++ { - b[0] = key[ni]*16 + key[ni+1] - if _, err := writer.Write(b[:]); err != nil { - return nil, err - } - ni += 2 - } - var prefixBuf [8]byte - if err := val.ToDoubleRLP(writer, prefixBuf[:]); err != nil { - return nil, err - } - if embedded { - buf = bph.auxBuffer.Bytes() - } else { - var hashBuf [33]byte - hashBuf[0] = 0x80 + length.Hash - if _, err := bph.keccak.Read(hashBuf[1:]); err != nil { - return nil, err - } - buf = append(buf, hashBuf[:]...) - } - return buf, nil -} - -func (bph *BinPatriciaHashed) leafHashWithKeyVal(buf, key []byte, val rlp.RlpSerializableBytes, singleton bool) ([]byte, error) { - // Compute the total length of binary representation - var kp, kl int - // Write key - var compactLen int - var ni int - var compact0 byte - compactLen = (len(key)-1)/2 + 1 - if len(key)&1 == 0 { - compact0 = 0x30 + key[0] // Odd: (3<<4) + first nibble - ni = 1 - } else { - compact0 = 0x20 - } - var keyPrefix [1]byte - if compactLen > 1 { - keyPrefix[0] = 0x80 + byte(compactLen) - kp = 1 - kl = compactLen - } else { - kl = 1 - } - return bph.completeLeafHash(buf, keyPrefix[:], kp, kl, compactLen, key, compact0, ni, val, singleton) -} - -func (bph *BinPatriciaHashed) accountLeafHashWithKey(buf, key []byte, val rlp.RlpSerializable) ([]byte, error) { - // Compute the total length of binary representation - var kp, kl int - // Write key - var compactLen int - var ni int - var compact0 byte - if hasTerm(key) { - compactLen = (len(key)-1)/2 + 1 - if len(key)&1 == 0 { - compact0 = 48 + key[0] // Odd (1<<4) + first nibble - ni = 1 - } else { - compact0 = 32 - } - } else { - compactLen = len(key)/2 + 1 - if len(key)&1 == 1 { - compact0 = 16 + key[0] // Odd (1<<4) + first nibble - ni = 1 - } - } - var keyPrefix [1]byte - if compactLen > 1 { - keyPrefix[0] = byte(128 + compactLen) - kp = 1 - kl = compactLen - } else { - kl = 1 - } - return bph.completeLeafHash(buf, keyPrefix[:], kp, kl, compactLen, key, compact0, ni, val, true) -} - -func (bph *BinPatriciaHashed) extensionHash(key []byte, hash []byte) ([length.Hash]byte, error) { - var hashBuf [length.Hash]byte - - // Compute the total length of binary representation - var kp, kl int - // Write key - var compactLen int - var ni int - var compact0 byte - if hasTerm(key) { - compactLen = (len(key)-1)/2 + 1 - if len(key)&1 == 0 { - compact0 = 0x30 + key[0] // Odd: (3<<4) + first nibble - ni = 1 - } else { - compact0 = 0x20 - } - } else { - compactLen = len(key)/2 + 1 - if len(key)&1 == 1 { - compact0 = 0x10 + key[0] // Odd: (1<<4) + first nibble - ni = 1 - } - } - var keyPrefix [1]byte - if compactLen > 1 { - keyPrefix[0] = 0x80 + byte(compactLen) - kp = 1 - kl = compactLen - } else { - kl = 1 - } - totalLen := kp + kl + 33 - var lenPrefix [4]byte - pt := rlp.GenerateStructLen(lenPrefix[:], totalLen) - bph.keccak.Reset() - if _, err := bph.keccak.Write(lenPrefix[:pt]); err != nil { - return hashBuf, err - } - if _, err := bph.keccak.Write(keyPrefix[:kp]); err != nil { - return hashBuf, err - } - var b [1]byte - b[0] = compact0 - if _, err := bph.keccak.Write(b[:]); err != nil { - return hashBuf, err - } - for i := 1; i < compactLen; i++ { - b[0] = key[ni]*16 + key[ni+1] - if _, err := bph.keccak.Write(b[:]); err != nil { - return hashBuf, err - } - ni += 2 - } - b[0] = 0x80 + length.Hash - if _, err := bph.keccak.Write(b[:]); err != nil { - return hashBuf, err - } - if _, err := bph.keccak.Write(hash); err != nil { - return hashBuf, err - } - // Replace previous hash with the new one - if _, err := bph.keccak.Read(hashBuf[:]); err != nil { - return hashBuf, err - } - return hashBuf, nil -} - -func (bph *BinPatriciaHashed) computeBinaryCellHashLen(cell *BinaryCell, depth int) int { - if cell.spl > 0 && depth >= halfKeySize { - keyLen := 128 - depth + 1 // Length of hex key with terminator character - var kp, kl int - compactLen := (keyLen-1)/2 + 1 - if compactLen > 1 { - kp = 1 - kl = compactLen - } else { - kl = 1 - } - val := rlp.RlpSerializableBytes(cell.Storage[:cell.StorageLen]) - totalLen := kp + kl + val.DoubleRLPLen() - var lenPrefix [4]byte - pt := rlp.GenerateStructLen(lenPrefix[:], totalLen) - if totalLen+pt < length.Hash { - return totalLen + pt - } - } - return length.Hash + 1 -} - -func (bph *BinPatriciaHashed) computeBinaryCellHash(cell *BinaryCell, depth int, buf []byte) ([]byte, error) { - var err error - var storageRootHash [length.Hash]byte - storageRootHashIsSet := false - if cell.spl > 0 { - var hashedKeyOffset int - if depth >= halfKeySize { - hashedKeyOffset = depth - halfKeySize - } - singleton := depth <= halfKeySize - if err := binHashKey(bph.keccak, cell.spk[bph.accountKeyLen:cell.spl], cell.downHashedKey[:], hashedKeyOffset); err != nil { - return nil, err - } - cell.downHashedKey[halfKeySize-hashedKeyOffset] = 16 // Add terminator - if singleton { - if bph.trace { - fmt.Printf("leafHashWithKeyVal(singleton) for [%x]=>[%x]\n", cell.downHashedKey[:halfKeySize-hashedKeyOffset+1], cell.Storage[:cell.StorageLen]) - } - aux := make([]byte, 0, 33) - if aux, err = bph.leafHashWithKeyVal(aux, cell.downHashedKey[:halfKeySize-hashedKeyOffset+1], cell.Storage[:cell.StorageLen], true); err != nil { - return nil, err - } - storageRootHash = *(*[length.Hash]byte)(aux[1:]) - storageRootHashIsSet = true - } else { - if bph.trace { - fmt.Printf("leafHashWithKeyVal for [%x]=>[%x]\n", cell.downHashedKey[:halfKeySize-hashedKeyOffset+1], cell.Storage[:cell.StorageLen]) - } - return bph.leafHashWithKeyVal(buf, cell.downHashedKey[:halfKeySize-hashedKeyOffset+1], cell.Storage[:cell.StorageLen], false) - } - } - if cell.apl > 0 { - if err := binHashKey(bph.keccak, cell.apk[:cell.apl], cell.downHashedKey[:], depth); err != nil { - return nil, err - } - cell.downHashedKey[halfKeySize-depth] = 16 // Add terminator - if !storageRootHashIsSet { - if cell.extLen > 0 { - // Extension - if cell.hl > 0 { - if bph.trace { - fmt.Printf("extensionHash for [%x]=>[%x]\n", cell.extension[:cell.extLen], cell.h[:cell.hl]) - } - if storageRootHash, err = bph.extensionHash(cell.extension[:cell.extLen], cell.h[:cell.hl]); err != nil { - return nil, err - } - } else { - return nil, errors.New("computeBinaryCellHash extension without hash") - } - } else if cell.hl > 0 { - storageRootHash = cell.h - } else { - storageRootHash = *(*[length.Hash]byte)(EmptyRootHash) - } - } - var valBuf [128]byte - valLen := cell.accountForHashing(valBuf[:], storageRootHash) - if bph.trace { - fmt.Printf("accountLeafHashWithKey for [%x]=>[%x]\n", cell.downHashedKey[:halfKeySize+1-depth], rlp.RlpEncodedBytes(valBuf[:valLen])) - } - return bph.accountLeafHashWithKey(buf, cell.downHashedKey[:halfKeySize+1-depth], rlp.RlpEncodedBytes(valBuf[:valLen])) - } - buf = append(buf, 0x80+32) - if cell.extLen > 0 { - // Extension - if cell.hl > 0 { - if bph.trace { - fmt.Printf("extensionHash for [%x]=>[%x]\n", cell.extension[:cell.extLen], cell.h[:cell.hl]) - } - var hash [length.Hash]byte - if hash, err = bph.extensionHash(cell.extension[:cell.extLen], cell.h[:cell.hl]); err != nil { - return nil, err - } - buf = append(buf, hash[:]...) - } else { - return nil, errors.New("computeBinaryCellHash extension without hash") - } - } else if cell.hl > 0 { - buf = append(buf, cell.h[:cell.hl]...) - } else { - buf = append(buf, EmptyRootHash...) - } - return buf, nil -} - -func (bph *BinPatriciaHashed) needUnfolding(hashedKey []byte) int { - var cell *BinaryCell - var depth int - if bph.activeRows == 0 { - if bph.trace { - fmt.Printf("needUnfolding root, rootChecked = %t\n", bph.rootChecked) - } - if bph.rootChecked && bph.root.downHashedLen == 0 && bph.root.hl == 0 { - // Previously checked, empty root, no unfolding needed - return 0 - } - cell = &bph.root - if cell.downHashedLen == 0 && cell.hl == 0 && !bph.rootChecked { - // Need to attempt to unfold the root - return 1 - } - } else { - col := int(hashedKey[bph.currentKeyLen]) - cell = &bph.grid[bph.activeRows-1][col] - depth = bph.depths[bph.activeRows-1] - if bph.trace { - fmt.Printf("needUnfolding cell (%d, %x), currentKey=[%x], depth=%d, cell.h=[%x]\n", bph.activeRows-1, col, bph.currentKey[:bph.currentKeyLen], depth, cell.h[:cell.hl]) - } - } - if len(hashedKey) <= depth { - return 0 - } - if cell.downHashedLen == 0 { - if cell.hl == 0 { - // cell is empty, no need to unfold further - return 0 - } - // unfold branch node - return 1 - } - cpl := commonPrefixLen(hashedKey[depth:], cell.downHashedKey[:cell.downHashedLen-1]) - if bph.trace { - fmt.Printf("cpl=%d, cell.downHashedKey=[%x], depth=%d, hashedKey[depth:]=[%x]\n", cpl, cell.downHashedKey[:cell.downHashedLen], depth, hashedKey[depth:]) - } - unfolding := cpl + 1 - if depth < halfKeySize && depth+unfolding > halfKeySize { - // This is to make sure that unfolding always breaks at the level where storage subtrees start - unfolding = halfKeySize - depth - if bph.trace { - fmt.Printf("adjusted unfolding=%d\n", unfolding) - } - } - return unfolding -} - -// unfoldBranchNode returns true if unfolding has been done -func (bph *BinPatriciaHashed) unfoldBranchNode(row int, deleted bool, depth int) (bool, error) { - branchData, _, err := bph.ctx.GetBranch(binToCompact(bph.currentKey[:bph.currentKeyLen])) - if err != nil { - return false, err - } - if len(branchData) >= 2 { - branchData = branchData[2:] // skip touch map and hold aftermap and rest - } - if !bph.rootChecked && bph.currentKeyLen == 0 && len(branchData) == 0 { - // Special case - empty or deleted root - bph.rootChecked = true - return false, nil - } - if len(branchData) == 0 { - log.Warn("got empty branch data during unfold", "row", row, "depth", depth, "deleted", deleted) - } - bph.branchBefore[row] = true - bitmap := binary.BigEndian.Uint16(branchData[0:]) - pos := 2 - if deleted { - // All cells come as deleted (touched but not present after) - bph.afterMap[row] = 0 - bph.touchMap[row] = bitmap - } else { - bph.afterMap[row] = bitmap - bph.touchMap[row] = 0 - } - //fmt.Printf("unfoldBranchNode [%x], afterMap = [%016b], touchMap = [%016b]\n", branchData, bph.afterMap[row], bph.touchMap[row]) - // Loop iterating over the set bits of modMask - for bitset, j := bitmap, 0; bitset != 0; j++ { - bit := bitset & -bitset - nibble := bits.TrailingZeros16(bit) - cell := &bph.grid[row][nibble] - fieldBits := branchData[pos] - pos++ - var err error - if pos, err = cell.fillFromFields(branchData, pos, PartFlags(fieldBits)); err != nil { - return false, fmt.Errorf("prefix [%x], branchData[%x]: %w", bph.currentKey[:bph.currentKeyLen], branchData, err) - } - if bph.trace { - fmt.Printf("cell (%d, %x) depth=%d, hash=[%x], a=[%x], s=[%x], ex=[%x]\n", row, nibble, depth, cell.h[:cell.hl], cell.apk[:cell.apl], cell.spk[:cell.spl], cell.extension[:cell.extLen]) - } - if cell.apl > 0 { - if err := bph.accountFn(cell.apk[:cell.apl], cell); err != nil { - return false, err - } - if bph.trace { - fmt.Printf("GetAccount[%x] return balance=%d, nonce=%d code=%x\n", cell.apk[:cell.apl], &cell.Balance, cell.Nonce, cell.CodeHash[:]) - } - } - if cell.spl > 0 { - if err := bph.storageFn(cell.spk[:cell.spl], cell); err != nil { - return false, err - } - } - if err = cell.deriveHashedKeys(depth, bph.keccak, bph.accountKeyLen); err != nil { - return false, err - } - bitset ^= bit - } - return true, nil -} - -func (bph *BinPatriciaHashed) unfold(hashedKey []byte, unfolding int) error { - if bph.trace { - fmt.Printf("unfold %d: activeRows: %d\n", unfolding, bph.activeRows) - } - var upCell *BinaryCell - var touched, present bool - var col byte - var upDepth, depth int - if bph.activeRows == 0 { - if bph.rootChecked && bph.root.hl == 0 && bph.root.downHashedLen == 0 { - // No unfolding for empty root - return nil - } - upCell = &bph.root - touched = bph.rootTouched - present = bph.rootPresent - if bph.trace { - fmt.Printf("unfold root, touched %t, present %t, column %d\n", touched, present, col) - } - } else { - upDepth = bph.depths[bph.activeRows-1] - col = hashedKey[upDepth-1] - upCell = &bph.grid[bph.activeRows-1][col] - touched = bph.touchMap[bph.activeRows-1]&(uint16(1)<= unfolding { - depth = upDepth + unfolding - nibble := upCell.downHashedKey[unfolding-1] - if touched { - bph.touchMap[row] = uint16(1) << nibble - } - if present { - bph.afterMap[row] = uint16(1) << nibble - } - cell := &bph.grid[row][nibble] - cell.fillFromUpperCell(upCell, depth, unfolding) - if bph.trace { - fmt.Printf("cell (%d, %x) depth=%d\n", row, nibble, depth) - } - if row >= halfKeySize { - cell.apl = 0 - } - if unfolding > 1 { - copy(bph.currentKey[bph.currentKeyLen:], upCell.downHashedKey[:unfolding-1]) - } - bph.currentKeyLen += unfolding - 1 - } else { - // upCell.downHashedLen < unfolding - depth = upDepth + upCell.downHashedLen - nibble := upCell.downHashedKey[upCell.downHashedLen-1] - if touched { - bph.touchMap[row] = uint16(1) << nibble - } - if present { - bph.afterMap[row] = uint16(1) << nibble - } - cell := &bph.grid[row][nibble] - cell.fillFromUpperCell(upCell, depth, upCell.downHashedLen) - if bph.trace { - fmt.Printf("cell (%d, %x) depth=%d\n", row, nibble, depth) - } - if row >= halfKeySize { - cell.apl = 0 - } - if upCell.downHashedLen > 1 { - copy(bph.currentKey[bph.currentKeyLen:], upCell.downHashedKey[:upCell.downHashedLen-1]) - } - bph.currentKeyLen += upCell.downHashedLen - 1 - } - bph.depths[bph.activeRows] = depth - bph.activeRows++ - return nil -} - -func (bph *BinPatriciaHashed) needFolding(hashedKey []byte) bool { - return !bytes.HasPrefix(hashedKey, bph.currentKey[:bph.currentKeyLen]) -} - -// The purpose of fold is to reduce hph.currentKey[:hph.currentKeyLen]. It should be invoked -// until that current key becomes a prefix of hashedKey that we will proccess next -// (in other words until the needFolding function returns 0) -func (bph *BinPatriciaHashed) fold() (err error) { - updateKeyLen := bph.currentKeyLen - if bph.activeRows == 0 { - return errors.New("cannot fold - no active rows") - } - if bph.trace { - fmt.Printf("fold: activeRows: %d, currentKey: [%x], touchMap: %016b, afterMap: %016b\n", bph.activeRows, bph.currentKey[:bph.currentKeyLen], bph.touchMap[bph.activeRows-1], bph.afterMap[bph.activeRows-1]) - } - // Move information to the row above - row := bph.activeRows - 1 - var upBinaryCell *BinaryCell - var col int - var upDepth int - if bph.activeRows == 1 { - if bph.trace { - fmt.Printf("upcell is root\n") - } - upBinaryCell = &bph.root - } else { - upDepth = bph.depths[bph.activeRows-2] - col = int(bph.currentKey[upDepth-1]) - if bph.trace { - fmt.Printf("upcell is (%d x %x), upDepth=%d\n", row-1, col, upDepth) - } - upBinaryCell = &bph.grid[row-1][col] - } - - depth := bph.depths[bph.activeRows-1] - updateKey := binToCompact(bph.currentKey[:updateKeyLen]) - partsCount := bits.OnesCount16(bph.afterMap[row]) - - if bph.trace { - fmt.Printf("touchMap[%d]=%016b, afterMap[%d]=%016b\n", row, bph.touchMap[row], row, bph.afterMap[row]) - } - switch partsCount { - case 0: - // Everything deleted - if bph.touchMap[row] != 0 { - if row == 0 { - // Root is deleted because the tree is empty - bph.rootTouched = true - bph.rootPresent = false - } else if upDepth == halfKeySize { - // Special case - all storage items of an account have been deleted, but it does not automatically delete the account, just makes it empty storage - // Therefore we are not propagating deletion upwards, but turn it into a modification - bph.touchMap[row-1] |= uint16(1) << col - } else { - // Deletion is propagated upwards - bph.touchMap[row-1] |= uint16(1) << col - bph.afterMap[row-1] &^= uint16(1) << col - } - } - upBinaryCell.hl = 0 - upBinaryCell.apl = 0 - upBinaryCell.spl = 0 - upBinaryCell.extLen = 0 - upBinaryCell.downHashedLen = 0 - if bph.branchBefore[row] { - _, err = bph.branchEncoder.CollectUpdate(bph.ctx, updateKey, 0, bph.touchMap[row], 0, RetrieveCellNoop) - if err != nil { - return fmt.Errorf("failed to encode leaf node update: %w", err) - } - } - bph.activeRows-- - if upDepth > 0 { - bph.currentKeyLen = upDepth - 1 - } else { - bph.currentKeyLen = 0 - } - case 1: - // Leaf or extension node - if bph.touchMap[row] != 0 { - // any modifications - if row == 0 { - bph.rootTouched = true - } else { - // Modifiction is propagated upwards - bph.touchMap[row-1] |= uint16(1) << col - } - } - nibble := bits.TrailingZeros16(bph.afterMap[row]) - cell := &bph.grid[row][nibble] - upBinaryCell.extLen = 0 - upBinaryCell.fillFromLowerBinaryCell(cell, depth, bph.currentKey[upDepth:bph.currentKeyLen], nibble) - // Delete if it existed - if bph.branchBefore[row] { - _, err = bph.branchEncoder.CollectUpdate(bph.ctx, updateKey, 0, bph.touchMap[row], 0, RetrieveCellNoop) - if err != nil { - return fmt.Errorf("failed to encode leaf node update: %w", err) - } - } - bph.activeRows-- - if upDepth > 0 { - bph.currentKeyLen = upDepth - 1 - } else { - bph.currentKeyLen = 0 - } - default: - // Branch node - if bph.touchMap[row] != 0 { - // any modifications - if row == 0 { - bph.rootTouched = true - } else { - // Modifiction is propagated upwards - bph.touchMap[row-1] |= uint16(1) << col - } - } - bitmap := bph.touchMap[row] & bph.afterMap[row] - if !bph.branchBefore[row] { - // There was no branch node before, so we need to touch even the singular child that existed - bph.touchMap[row] |= bph.afterMap[row] - bitmap |= bph.afterMap[row] - } - // Calculate total length of all hashes - totalBranchLen := 17 - partsCount // For every empty cell, one byte - for bitset, j := bph.afterMap[row], 0; bitset != 0; j++ { - bit := bitset & -bitset - nibble := bits.TrailingZeros16(bit) - cell := &bph.grid[row][nibble] - totalBranchLen += bph.computeBinaryCellHashLen(cell, depth) - bitset ^= bit - } - - bph.keccak2.Reset() - pt := rlp.GenerateStructLen(bph.hashAuxBuffer[:], totalBranchLen) - if _, err := bph.keccak2.Write(bph.hashAuxBuffer[:pt]); err != nil { - return err - } - - b := [...]byte{0x80} - cellGetter := func(nibble int, skip bool) (*Cell, error) { - if skip { - if _, err := bph.keccak2.Write(b[:]); err != nil { - return nil, fmt.Errorf("failed to write empty nibble to hash: %w", err) - } - if bph.trace { - fmt.Printf("%x: empty(%d,%x)\n", nibble, row, nibble) - } - return nil, nil - } - cell := &bph.grid[row][nibble] - cellHash, err := bph.computeBinaryCellHash(cell, depth, bph.hashAuxBuffer[:0]) - if err != nil { - return nil, err - } - if bph.trace { - fmt.Printf("%x: computeBinaryCellHash(%d,%x,depth=%d)=[%x]\n", nibble, row, nibble, depth, cellHash) - } - if _, err := bph.keccak2.Write(cellHash); err != nil { - return nil, err - } - - // TODO extension and downHashedKey should be encoded to hex format and vice versa, data loss due to array sizes - return cell.unwrapToHexCell(), nil - } - - var lastNibble int - var err error - _ = cellGetter - - lastNibble, err = bph.branchEncoder.CollectUpdate(bph.ctx, updateKey, bitmap, bph.touchMap[row], bph.afterMap[row], cellGetter) - if err != nil { - return fmt.Errorf("failed to encode branch update: %w", err) - } - for i := lastNibble; i <= maxChild; i++ { - if _, err := bph.keccak2.Write(b[:]); err != nil { - return err - } - if bph.trace { - fmt.Printf("%x: empty(%d,%x)\n", i, row, i) - } - } - upBinaryCell.extLen = depth - upDepth - 1 - upBinaryCell.downHashedLen = upBinaryCell.extLen - if upBinaryCell.extLen > 0 { - copy(upBinaryCell.extension[:], bph.currentKey[upDepth:bph.currentKeyLen]) - copy(upBinaryCell.downHashedKey[:], bph.currentKey[upDepth:bph.currentKeyLen]) - } - if depth < halfKeySize { - upBinaryCell.apl = 0 - } - upBinaryCell.spl = 0 - upBinaryCell.hl = 32 - if _, err := bph.keccak2.Read(upBinaryCell.h[:]); err != nil { - return err - } - if bph.trace { - fmt.Printf("} [%x]\n", upBinaryCell.h[:]) - } - bph.activeRows-- - if upDepth > 0 { - bph.currentKeyLen = upDepth - 1 - } else { - bph.currentKeyLen = 0 - } - } - return nil -} - -func (bph *BinPatriciaHashed) deleteBinaryCell(hashedKey []byte) { - if bph.trace { - fmt.Printf("deleteBinaryCell, activeRows = %d\n", bph.activeRows) - } - var cell *BinaryCell - if bph.activeRows == 0 { - // Remove the root - cell = &bph.root - bph.rootTouched = true - bph.rootPresent = false - } else { - row := bph.activeRows - 1 - if bph.depths[row] < len(hashedKey) { - if bph.trace { - fmt.Printf("deleteBinaryCell skipping spurious delete depth=%d, len(hashedKey)=%d\n", bph.depths[row], len(hashedKey)) - } - return - } - col := int(hashedKey[bph.currentKeyLen]) - cell = &bph.grid[row][col] - if bph.afterMap[row]&(uint16(1)< 0; unfolding = bph.needUnfolding(hashedKey) { - if err := bph.unfold(hashedKey, unfolding); err != nil { - return nil, fmt.Errorf("unfold: %w", err) - } - } - - // Update the cell - stagedBinaryCell.fillEmpty() - if len(plainKey) == bph.accountKeyLen { - if err := bph.accountFn(plainKey, stagedBinaryCell); err != nil { - return nil, fmt.Errorf("GetAccount for key %x failed: %w", plainKey, err) - } - if !stagedBinaryCell.Delete { - cell := bph.updateBinaryCell(plainKey, hashedKey) - cell.setAccountFields(stagedBinaryCell.CodeHash[:], &stagedBinaryCell.Balance, stagedBinaryCell.Nonce) - - if bph.trace { - fmt.Printf("GetAccount reading key %x => balance=%d nonce=%v codeHash=%x\n", cell.apk, &cell.Balance, cell.Nonce, cell.CodeHash) - } - } - } else { - if err = bph.storageFn(plainKey, stagedBinaryCell); err != nil { - return nil, fmt.Errorf("GetStorage for key %x failed: %w", plainKey, err) - } - if !stagedBinaryCell.Delete { - bph.updateBinaryCell(plainKey, hashedKey).setStorage(stagedBinaryCell.Storage[:stagedBinaryCell.StorageLen]) - if bph.trace { - fmt.Printf("GetStorage reading key %x => %x\n", plainKey, stagedBinaryCell.Storage[:stagedBinaryCell.StorageLen]) - } - } - } - - if stagedBinaryCell.Delete { - if bph.trace { - fmt.Printf("delete cell %x hash %x\n", plainKey, hashedKey) - } - bph.deleteBinaryCell(hashedKey) - } - } - // Folding everything up to the root - for bph.activeRows > 0 { - if err := bph.fold(); err != nil { - return nil, fmt.Errorf("final fold: %w", err) - } - } - - rootHash, err = bph.RootHash() - if err != nil { - return nil, fmt.Errorf("root hash evaluation failed: %w", err) - } - err = bph.branchEncoder.Load(bph.ctx, etl.TransformArgs{Quit: ctx.Done()}) - if err != nil { - return nil, fmt.Errorf("branch update failed: %w", err) - } - return rootHash, nil -} - -func (bph *BinPatriciaHashed) SetTrace(trace bool) { bph.trace = trace } - -func (bph *BinPatriciaHashed) Variant() TrieVariant { return VariantBinPatriciaTrie } - -// Reset allows BinPatriciaHashed instance to be reused for the new commitment calculation -func (bph *BinPatriciaHashed) Reset() { - bph.rootChecked = false - bph.root.hl = 0 - bph.root.downHashedLen = 0 - bph.root.apl = 0 - bph.root.spl = 0 - bph.root.extLen = 0 - copy(bph.root.CodeHash[:], EmptyCodeHash) - bph.root.StorageLen = 0 - bph.root.Balance.Clear() - bph.root.Nonce = 0 - bph.rootTouched = false - bph.rootPresent = true -} - -func (c *BinaryCell) bytes() []byte { - var pos = 1 - size := 1 + c.hl + 1 + c.apl + c.spl + 1 + c.downHashedLen + 1 + c.extLen + 1 // max size - buf := make([]byte, size) - - var flags uint8 - if c.hl != 0 { - flags |= 1 - buf[pos] = byte(c.hl) - pos++ - copy(buf[pos:pos+c.hl], c.h[:]) - pos += c.hl - } - if c.apl != 0 { - flags |= 2 - buf[pos] = byte(c.hl) - pos++ - copy(buf[pos:pos+c.apl], c.apk[:]) - pos += c.apl - } - if c.spl != 0 { - flags |= 4 - buf[pos] = byte(c.spl) - pos++ - copy(buf[pos:pos+c.spl], c.spk[:]) - pos += c.spl - } - if c.downHashedLen != 0 { - flags |= 8 - buf[pos] = byte(c.downHashedLen) - pos++ - copy(buf[pos:pos+c.downHashedLen], c.downHashedKey[:]) - pos += c.downHashedLen - } - if c.extLen != 0 { - flags |= 16 - buf[pos] = byte(c.extLen) - pos++ - copy(buf[pos:pos+c.downHashedLen], c.downHashedKey[:]) - //pos += c.downHashedLen - } - buf[0] = flags - return buf -} - -func (c *BinaryCell) decodeBytes(buf []byte) error { - if len(buf) < 1 { - return errors.New("invalid buffer size to contain BinaryCell (at least 1 byte expected)") - } - c.fillEmpty() - - var pos int - flags := buf[pos] - pos++ - - if flags&1 != 0 { - c.hl = int(buf[pos]) - pos++ - copy(c.h[:], buf[pos:pos+c.hl]) - pos += c.hl - } - if flags&2 != 0 { - c.apl = int(buf[pos]) - pos++ - copy(c.apk[:], buf[pos:pos+c.apl]) - pos += c.apl - } - if flags&4 != 0 { - c.spl = int(buf[pos]) - pos++ - copy(c.spk[:], buf[pos:pos+c.spl]) - pos += c.spl - } - if flags&8 != 0 { - c.downHashedLen = int(buf[pos]) - pos++ - copy(c.downHashedKey[:], buf[pos:pos+c.downHashedLen]) - pos += c.downHashedLen - } - if flags&16 != 0 { - c.extLen = int(buf[pos]) - pos++ - copy(c.extension[:], buf[pos:pos+c.extLen]) - //pos += c.extLen - } - return nil -} - -// Encode current state of hph into bytes -func (bph *BinPatriciaHashed) EncodeCurrentState(buf []byte) ([]byte, error) { - s := binState{ - CurrentKeyLen: int16(bph.currentKeyLen), - RootChecked: bph.rootChecked, - RootTouched: bph.rootTouched, - RootPresent: bph.rootPresent, - Root: make([]byte, 0), - } - - s.Root = bph.root.bytes() - copy(s.CurrentKey[:], bph.currentKey[:]) - copy(s.Depths[:], bph.depths[:]) - copy(s.BranchBefore[:], bph.branchBefore[:]) - copy(s.TouchMap[:], bph.touchMap[:]) - copy(s.AfterMap[:], bph.afterMap[:]) - - return s.Encode(buf) -} - -// buf expected to be encoded hph state. Decode state and set up hph to that state. -func (bph *BinPatriciaHashed) SetState(buf []byte) error { - if bph.activeRows != 0 { - return errors.New("has active rows, could not reset state") - } - - var s state - if err := s.Decode(buf); err != nil { - return err - } - - bph.Reset() - - if err := bph.root.decodeBytes(s.Root); err != nil { - return err - } - - bph.rootChecked = s.RootChecked - bph.rootTouched = s.RootTouched - bph.rootPresent = s.RootPresent - - copy(bph.depths[:], s.Depths[:]) - copy(bph.branchBefore[:], s.BranchBefore[:]) - copy(bph.touchMap[:], s.TouchMap[:]) - copy(bph.afterMap[:], s.AfterMap[:]) - - return nil -} - -func (bph *BinPatriciaHashed) ProcessTree(ctx context.Context, t *UpdateTree, lp string) (rootHash []byte, err error) { - panic("not implemented") -} - -func (bph *BinPatriciaHashed) ProcessUpdates(ctx context.Context, plainKeys [][]byte, updates []Update) (rootHash []byte, err error) { - for i, pk := range plainKeys { - updates[i].hashedKey = hexToBin(pk) - updates[i].plainKey = pk - } - - sort.Slice(updates, func(i, j int) bool { - return bytes.Compare(updates[i].hashedKey, updates[j].hashedKey) < 0 - }) - - for i, plainKey := range plainKeys { - select { - case <-ctx.Done(): - return nil, ctx.Err() - default: - } - update := updates[i] - if bph.trace { - fmt.Printf("plainKey=[%x], hashedKey=[%x], currentKey=[%x]\n", update.plainKey, update.hashedKey, bph.currentKey[:bph.currentKeyLen]) - } - // Keep folding until the currentKey is the prefix of the key we modify - for bph.needFolding(update.hashedKey) { - if err := bph.fold(); err != nil { - return nil, fmt.Errorf("fold: %w", err) - } - } - // Now unfold until we step on an empty cell - for unfolding := bph.needUnfolding(update.hashedKey); unfolding > 0; unfolding = bph.needUnfolding(update.hashedKey) { - if err := bph.unfold(update.hashedKey, unfolding); err != nil { - return nil, fmt.Errorf("unfold: %w", err) - } - } - - // Update the cell - if update.Flags == DeleteUpdate { - bph.deleteBinaryCell(update.hashedKey) - if bph.trace { - fmt.Printf("key %x deleted\n", update.plainKey) - } - } else { - cell := bph.updateBinaryCell(update.plainKey, update.hashedKey) - if bph.trace { - fmt.Printf("GetAccount updated key %x =>", plainKey) - } - if update.Flags&BalanceUpdate != 0 { - if bph.trace { - fmt.Printf(" balance=%d", &update.Balance) - } - cell.Balance.Set(&update.Balance) - } - if update.Flags&NonceUpdate != 0 { - if bph.trace { - fmt.Printf(" nonce=%d", update.Nonce) - } - cell.Nonce = update.Nonce - } - if update.Flags&CodeUpdate != 0 { - if bph.trace { - fmt.Printf(" codeHash=%x", update.CodeHashOrStorage) - } - copy(cell.CodeHash[:], update.CodeHashOrStorage[:]) - } - if bph.trace { - fmt.Printf("\n") - } - if update.Flags&StorageUpdate != 0 { - cell.setStorage(update.CodeHashOrStorage[:update.ValLength]) - if bph.trace { - fmt.Printf("GetStorage filled key %x => %x\n", plainKey, update.CodeHashOrStorage[:update.ValLength]) - } - } - } - } - // Folding everything up to the root - for bph.activeRows > 0 { - if err := bph.fold(); err != nil { - return nil, fmt.Errorf("final fold: %w", err) - } - } - - rootHash, err = bph.RootHash() - if err != nil { - return nil, fmt.Errorf("root hash evaluation failed: %w", err) - } - - err = bph.branchEncoder.Load(bph.ctx, etl.TransformArgs{Quit: ctx.Done()}) - if err != nil { - return nil, fmt.Errorf("branch update failed: %w", err) - } - - return rootHash, nil -} - -// Hashes provided key and expands resulting hash into nibbles (each byte split into two nibbles by 4 bits) -func (bph *BinPatriciaHashed) hashAndNibblizeKey2(key []byte) []byte { //nolint - hashedKey := make([]byte, length.Hash) - - bph.keccak.Reset() - bph.keccak.Write(key[:length.Addr]) - bph.keccak.Read(hashedKey[:length.Hash]) - - if len(key[length.Addr:]) > 0 { - hashedKey = append(hashedKey, make([]byte, length.Hash)...) - bph.keccak.Reset() - bph.keccak.Write(key[length.Addr:]) - bph.keccak.Read(hashedKey[length.Hash:]) - } - - nibblized := make([]byte, len(hashedKey)*2) - for i, b := range hashedKey { - nibblized[i*2] = (b >> 4) & 0xf - nibblized[i*2+1] = b & 0xf - } - return nibblized -} - -func binHashKey(keccak keccakState, plainKey []byte, dest []byte, hashedKeyOffset int) error { - keccak.Reset() - var hashBufBack [length.Hash]byte - hashBuf := hashBufBack[:] - if _, err := keccak.Write(plainKey); err != nil { - return err - } - if _, err := keccak.Read(hashBuf); err != nil { - return err - } - for k := hashedKeyOffset; k < 256; k++ { - if hashBuf[k/8]&(1<<(7-k%8)) == 0 { - dest[k-hashedKeyOffset] = 0 - } else { - dest[k-hashedKeyOffset] = 1 - } - } - return nil -} - -func wrapAccountStorageFn(fn func([]byte, *Cell) error) func(pk []byte, bc *BinaryCell) error { - return func(pk []byte, bc *BinaryCell) error { - cl := bc.unwrapToHexCell() - - if err := fn(pk, cl); err != nil { - return err - } - - bc.Balance = *cl.Balance.Clone() - bc.Nonce = cl.Nonce - bc.StorageLen = cl.StorageLen - bc.apl = cl.accountPlainKeyLen - bc.spl = cl.storagePlainKeyLen - bc.hl = cl.HashLen - copy(bc.apk[:], cl.accountPlainKey[:]) - copy(bc.spk[:], cl.storagePlainKey[:]) - copy(bc.h[:], cl.hash[:]) - - if cl.extLen > 0 { - binExt := compactToBin(cl.extension[:cl.extLen]) - copy(bc.extension[:], binExt) - bc.extLen = len(binExt) - } - if cl.downHashedLen > 0 { - bindhk := compactToBin(cl.downHashedKey[:cl.downHashedLen]) - copy(bc.downHashedKey[:], bindhk) - bc.downHashedLen = len(bindhk) - } - - copy(bc.CodeHash[:], cl.CodeHash[:]) - copy(bc.Storage[:], cl.Storage[:]) - bc.Delete = cl.Delete - return nil - } -} - -// represents state of the tree -type binState struct { - TouchMap [maxKeySize]uint16 // For each row, bitmap of cells that were either present before modification, or modified or deleted - AfterMap [maxKeySize]uint16 // For each row, bitmap of cells that were present after modification - CurrentKeyLen int16 - Root []byte // encoded root cell - RootChecked bool // Set to false if it is not known whether the root is empty, set to true if it is checked - RootTouched bool - RootPresent bool - BranchBefore [maxKeySize]bool // For each row, whether there was a branch node in the database loaded in unfold - CurrentKey [maxKeySize]byte // For each row indicates which column is currently selected - Depths [maxKeySize]int // For each row, the depth of cells in that row -} - -func (s *binState) Encode(buf []byte) ([]byte, error) { - var rootFlags stateRootFlag - if s.RootPresent { - rootFlags |= stateRootPresent - } - if s.RootChecked { - rootFlags |= stateRootChecked - } - if s.RootTouched { - rootFlags |= stateRootTouched - } - - ee := bytes.NewBuffer(buf) - if err := binary.Write(ee, binary.BigEndian, s.CurrentKeyLen); err != nil { - return nil, fmt.Errorf("encode currentKeyLen: %w", err) - } - if err := binary.Write(ee, binary.BigEndian, int8(rootFlags)); err != nil { - return nil, fmt.Errorf("encode rootFlags: %w", err) - } - if n, err := ee.Write(s.CurrentKey[:]); err != nil || n != len(s.CurrentKey) { - return nil, fmt.Errorf("encode currentKey: %w", err) - } - if err := binary.Write(ee, binary.BigEndian, uint16(len(s.Root))); err != nil { - return nil, fmt.Errorf("encode root len: %w", err) - } - if n, err := ee.Write(s.Root); err != nil || n != len(s.Root) { - return nil, fmt.Errorf("encode root: %w", err) - } - d := make([]byte, len(s.Depths)) - for i := 0; i < len(s.Depths); i++ { - d[i] = byte(s.Depths[i]) - } - if n, err := ee.Write(d); err != nil || n != len(s.Depths) { - return nil, fmt.Errorf("encode depths: %w", err) - } - if err := binary.Write(ee, binary.BigEndian, s.TouchMap); err != nil { - return nil, fmt.Errorf("encode touchMap: %w", err) - } - if err := binary.Write(ee, binary.BigEndian, s.AfterMap); err != nil { - return nil, fmt.Errorf("encode afterMap: %w", err) - } - - var before1, before2 uint64 - for i := 0; i < halfKeySize; i++ { - if s.BranchBefore[i] { - before1 |= 1 << i - } - } - for i, j := halfKeySize, 0; i < maxKeySize; i, j = i+1, j+1 { - if s.BranchBefore[i] { - before2 |= 1 << j - } - } - if err := binary.Write(ee, binary.BigEndian, before1); err != nil { - return nil, fmt.Errorf("encode branchBefore_1: %w", err) - } - if err := binary.Write(ee, binary.BigEndian, before2); err != nil { - return nil, fmt.Errorf("encode branchBefore_2: %w", err) - } - return ee.Bytes(), nil -} - -func (s *binState) Decode(buf []byte) error { - aux := bytes.NewBuffer(buf) - if err := binary.Read(aux, binary.BigEndian, &s.CurrentKeyLen); err != nil { - return fmt.Errorf("currentKeyLen: %w", err) - } - var rootFlags stateRootFlag - if err := binary.Read(aux, binary.BigEndian, &rootFlags); err != nil { - return fmt.Errorf("rootFlags: %w", err) - } - - if rootFlags&stateRootPresent != 0 { - s.RootPresent = true - } - if rootFlags&stateRootTouched != 0 { - s.RootTouched = true - } - if rootFlags&stateRootChecked != 0 { - s.RootChecked = true - } - if n, err := aux.Read(s.CurrentKey[:]); err != nil || n != maxKeySize { - return fmt.Errorf("currentKey: %w", err) - } - var rootSize uint16 - if err := binary.Read(aux, binary.BigEndian, &rootSize); err != nil { - return fmt.Errorf("root size: %w", err) - } - s.Root = make([]byte, rootSize) - if _, err := aux.Read(s.Root); err != nil { - return fmt.Errorf("root: %w", err) - } - d := make([]byte, len(s.Depths)) - if err := binary.Read(aux, binary.BigEndian, &d); err != nil { - return fmt.Errorf("depths: %w", err) - } - for i := 0; i < len(s.Depths); i++ { - s.Depths[i] = int(d[i]) - } - if err := binary.Read(aux, binary.BigEndian, &s.TouchMap); err != nil { - return fmt.Errorf("touchMap: %w", err) - } - if err := binary.Read(aux, binary.BigEndian, &s.AfterMap); err != nil { - return fmt.Errorf("afterMap: %w", err) - } - var branch1, branch2 uint64 - if err := binary.Read(aux, binary.BigEndian, &branch1); err != nil { - return fmt.Errorf("branchBefore1: %w", err) - } - if err := binary.Read(aux, binary.BigEndian, &branch2); err != nil { - return fmt.Errorf("branchBefore2: %w", err) - } - - // TODO invalid branch encode - for i := 0; i < halfKeySize; i++ { - if branch1&(1< 0 { +// compactedExt := binToCompact(cell.extension[:cell.extLen]) +// copy(cl.extension[:], compactedExt) +// cl.extLen = len(compactedExt) +// } +// if cell.downHashedLen > 0 { +// compactedDHK := binToCompact(cell.downHashedKey[:cell.downHashedLen]) +// copy(cl.downHashedKey[:], compactedDHK) +// cl.downHashedLen = len(compactedDHK) +// } +// +// copy(cl.CodeHash[:], cell.CodeHash[:]) +// copy(cl.Storage[:], cell.Storage[:]) +// cl.Delete = cell.Delete +// return cl +//} +// +//var ( // TODO REEAVL +// EmptyBinRootHash, _ = hex.DecodeString("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") +// EmptyBinCodeHash, _ = hex.DecodeString("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470") +//) +// +//func (cell *BinaryCell) fillEmpty() { +// cell.apl = 0 +// cell.spl = 0 +// cell.downHashedLen = 0 +// cell.extLen = 0 +// cell.hl = 0 +// cell.Nonce = 0 +// cell.Balance.Clear() +// copy(cell.CodeHash[:], EmptyCodeHash) +// cell.StorageLen = 0 +// cell.Delete = false +//} +// +//func (cell *BinaryCell) fillFromUpperCell(upBinaryCell *BinaryCell, depth, depthIncrement int) { +// if upBinaryCell.downHashedLen >= depthIncrement { +// cell.downHashedLen = upBinaryCell.downHashedLen - depthIncrement +// } else { +// cell.downHashedLen = 0 +// } +// if upBinaryCell.downHashedLen > depthIncrement { +// copy(cell.downHashedKey[:], upBinaryCell.downHashedKey[depthIncrement:upBinaryCell.downHashedLen]) +// } +// if upBinaryCell.extLen >= depthIncrement { +// cell.extLen = upBinaryCell.extLen - depthIncrement +// } else { +// cell.extLen = 0 +// } +// if upBinaryCell.extLen > depthIncrement { +// copy(cell.extension[:], upBinaryCell.extension[depthIncrement:upBinaryCell.extLen]) +// } +// if depth <= halfKeySize { +// cell.apl = upBinaryCell.apl +// if upBinaryCell.apl > 0 { +// copy(cell.apk[:], upBinaryCell.apk[:cell.apl]) +// cell.Balance.Set(&upBinaryCell.Balance) +// cell.Nonce = upBinaryCell.Nonce +// copy(cell.CodeHash[:], upBinaryCell.CodeHash[:]) +// cell.extLen = upBinaryCell.extLen +// if upBinaryCell.extLen > 0 { +// copy(cell.extension[:], upBinaryCell.extension[:upBinaryCell.extLen]) +// } +// } +// } else { +// cell.apl = 0 +// } +// cell.spl = upBinaryCell.spl +// if upBinaryCell.spl > 0 { +// copy(cell.spk[:], upBinaryCell.spk[:upBinaryCell.spl]) +// cell.StorageLen = upBinaryCell.StorageLen +// if upBinaryCell.StorageLen > 0 { +// copy(cell.Storage[:], upBinaryCell.Storage[:upBinaryCell.StorageLen]) +// } +// } +// cell.hl = upBinaryCell.hl +// if upBinaryCell.hl > 0 { +// copy(cell.h[:], upBinaryCell.h[:upBinaryCell.hl]) +// } +//} +// +//func (cell *BinaryCell) fillFromLowerBinaryCell(lowBinaryCell *BinaryCell, lowDepth int, preExtension []byte, nibble int) { +// if lowBinaryCell.apl > 0 || lowDepth < halfKeySize { +// cell.apl = lowBinaryCell.apl +// } +// if lowBinaryCell.apl > 0 { +// copy(cell.apk[:], lowBinaryCell.apk[:cell.apl]) +// cell.Balance.Set(&lowBinaryCell.Balance) +// cell.Nonce = lowBinaryCell.Nonce +// copy(cell.CodeHash[:], lowBinaryCell.CodeHash[:]) +// } +// cell.spl = lowBinaryCell.spl +// if lowBinaryCell.spl > 0 { +// copy(cell.spk[:], lowBinaryCell.spk[:cell.spl]) +// cell.StorageLen = lowBinaryCell.StorageLen +// if lowBinaryCell.StorageLen > 0 { +// copy(cell.Storage[:], lowBinaryCell.Storage[:lowBinaryCell.StorageLen]) +// } +// } +// if lowBinaryCell.hl > 0 { +// if (lowBinaryCell.apl == 0 && lowDepth < halfKeySize) || (lowBinaryCell.spl == 0 && lowDepth > halfKeySize) { +// // Extension is related to either accounts branch node, or storage branch node, we prepend it by preExtension | nibble +// if len(preExtension) > 0 { +// copy(cell.extension[:], preExtension) +// } +// cell.extension[len(preExtension)] = byte(nibble) +// if lowBinaryCell.extLen > 0 { +// copy(cell.extension[1+len(preExtension):], lowBinaryCell.extension[:lowBinaryCell.extLen]) +// } +// cell.extLen = lowBinaryCell.extLen + 1 + len(preExtension) +// } else { +// // Extension is related to a storage branch node, so we copy it upwards as is +// cell.extLen = lowBinaryCell.extLen +// if lowBinaryCell.extLen > 0 { +// copy(cell.extension[:], lowBinaryCell.extension[:lowBinaryCell.extLen]) +// } +// } +// } +// cell.hl = lowBinaryCell.hl +// if lowBinaryCell.hl > 0 { +// copy(cell.h[:], lowBinaryCell.h[:lowBinaryCell.hl]) +// } +//} +// +//func (cell *BinaryCell) deriveHashedKeys(depth int, keccak keccakState, accountKeyLen int) error { +// extraLen := 0 +// if cell.apl > 0 { +// if depth > halfKeySize { +// return errors.New("deriveHashedKeys accountPlainKey present at depth > halfKeySize") +// } +// extraLen = halfKeySize - depth +// } +// if cell.spl > 0 { +// if depth >= halfKeySize { +// extraLen = maxKeySize - depth +// } else { +// extraLen += halfKeySize +// } +// } +// if extraLen > 0 { +// if cell.downHashedLen > 0 { +// copy(cell.downHashedKey[extraLen:], cell.downHashedKey[:cell.downHashedLen]) +// } +// cell.downHashedLen += extraLen +// var hashedKeyOffset, downOffset int +// if cell.apl > 0 { +// if err := binHashKey(keccak, cell.apk[:cell.apl], cell.downHashedKey[:], depth); err != nil { +// return err +// } +// downOffset = halfKeySize - depth +// } +// if cell.spl > 0 { +// if depth >= halfKeySize { +// hashedKeyOffset = depth - halfKeySize +// } +// if err := binHashKey(keccak, cell.spk[accountKeyLen:cell.spl], cell.downHashedKey[downOffset:], hashedKeyOffset); err != nil { +// return err +// } +// } +// } +// return nil +//} +// +//func (cell *BinaryCell) fillFromFields(data []byte, pos int, fieldBits PartFlags) (int, error) { +// if fieldBits&HashedKeyPart != 0 { +// l, n := binary.Uvarint(data[pos:]) +// if n == 0 { +// return 0, errors.New("fillFromFields buffer too small for hashedKey len") +// } else if n < 0 { +// return 0, errors.New("fillFromFields value overflow for hashedKey len") +// } +// pos += n +// if len(data) < pos+int(l) { +// return 0, fmt.Errorf("fillFromFields buffer too small for hashedKey exp %d got %d", pos+int(l), len(data)) +// } +// cell.downHashedLen = int(l) +// cell.extLen = int(l) +// if l > 0 { +// copy(cell.downHashedKey[:], data[pos:pos+int(l)]) +// copy(cell.extension[:], data[pos:pos+int(l)]) +// pos += int(l) +// } +// } else { +// cell.downHashedLen = 0 +// cell.extLen = 0 +// } +// if fieldBits&AccountPlainPart != 0 { +// l, n := binary.Uvarint(data[pos:]) +// if n == 0 { +// return 0, errors.New("fillFromFields buffer too small for accountPlainKey len") +// } else if n < 0 { +// return 0, errors.New("fillFromFields value overflow for accountPlainKey len") +// } +// pos += n +// if len(data) < pos+int(l) { +// return 0, errors.New("fillFromFields buffer too small for accountPlainKey") +// } +// cell.apl = int(l) +// if l > 0 { +// copy(cell.apk[:], data[pos:pos+int(l)]) +// pos += int(l) +// } +// } else { +// cell.apl = 0 +// } +// if fieldBits&StoragePlainPart != 0 { +// l, n := binary.Uvarint(data[pos:]) +// if n == 0 { +// return 0, errors.New("fillFromFields buffer too small for storagePlainKey len") +// } else if n < 0 { +// return 0, errors.New("fillFromFields value overflow for storagePlainKey len") +// } +// pos += n +// if len(data) < pos+int(l) { +// return 0, errors.New("fillFromFields buffer too small for storagePlainKey") +// } +// cell.spl = int(l) +// if l > 0 { +// copy(cell.spk[:], data[pos:pos+int(l)]) +// pos += int(l) +// } +// } else { +// cell.spl = 0 +// } +// if fieldBits&HashPart != 0 { +// l, n := binary.Uvarint(data[pos:]) +// if n == 0 { +// return 0, errors.New("fillFromFields buffer too small for hash len") +// } else if n < 0 { +// return 0, errors.New("fillFromFields value overflow for hash len") +// } +// pos += n +// if len(data) < pos+int(l) { +// return 0, errors.New("fillFromFields buffer too small for hash") +// } +// cell.hl = int(l) +// if l > 0 { +// copy(cell.h[:], data[pos:pos+int(l)]) +// pos += int(l) +// } +// } else { +// cell.hl = 0 +// } +// return pos, nil +//} +// +//func (cell *BinaryCell) setStorage(value []byte) { +// cell.StorageLen = len(value) +// if len(value) > 0 { +// copy(cell.Storage[:], value) +// } +//} +// +//func (cell *BinaryCell) setAccountFields(codeHash []byte, balance *uint256.Int, nonce uint64) { +// copy(cell.CodeHash[:], codeHash) +// +// cell.Balance.SetBytes(balance.Bytes()) +// cell.Nonce = nonce +//} +// +//func (cell *BinaryCell) accountForHashing(buffer []byte, storageRootHash [length.Hash]byte) int { +// balanceBytes := 0 +// if !cell.Balance.LtUint64(128) { +// balanceBytes = cell.Balance.ByteLen() +// } +// +// var nonceBytes int +// if cell.Nonce < 128 && cell.Nonce != 0 { +// nonceBytes = 0 +// } else { +// nonceBytes = common.BitLenToByteLen(bits.Len64(cell.Nonce)) +// } +// +// var structLength = uint(balanceBytes + nonceBytes + 2) +// structLength += 66 // Two 32-byte arrays + 2 prefixes +// +// var pos int +// if structLength < 56 { +// buffer[0] = byte(192 + structLength) +// pos = 1 +// } else { +// lengthBytes := common.BitLenToByteLen(bits.Len(structLength)) +// buffer[0] = byte(247 + lengthBytes) +// +// for i := lengthBytes; i > 0; i-- { +// buffer[i] = byte(structLength) +// structLength >>= 8 +// } +// +// pos = lengthBytes + 1 +// } +// +// // Encoding nonce +// if cell.Nonce < 128 && cell.Nonce != 0 { +// buffer[pos] = byte(cell.Nonce) +// } else { +// buffer[pos] = byte(128 + nonceBytes) +// var nonce = cell.Nonce +// for i := nonceBytes; i > 0; i-- { +// buffer[pos+i] = byte(nonce) +// nonce >>= 8 +// } +// } +// pos += 1 + nonceBytes +// +// // Encoding balance +// if cell.Balance.LtUint64(128) && !cell.Balance.IsZero() { +// buffer[pos] = byte(cell.Balance.Uint64()) +// pos++ +// } else { +// buffer[pos] = byte(128 + balanceBytes) +// pos++ +// cell.Balance.WriteToSlice(buffer[pos : pos+balanceBytes]) +// pos += balanceBytes +// } +// +// // Encoding Root and CodeHash +// buffer[pos] = 128 + 32 +// pos++ +// copy(buffer[pos:], storageRootHash[:]) +// pos += 32 +// buffer[pos] = 128 + 32 +// pos++ +// copy(buffer[pos:], cell.CodeHash[:]) +// pos += 32 +// return pos +//} +// +//func (bph *BinPatriciaHashed) ResetContext(ctx PatriciaContext) {} +// +//func (bph *BinPatriciaHashed) completeLeafHash(buf, keyPrefix []byte, kp, kl, compactLen int, key []byte, compact0 byte, ni int, val rlp.RlpSerializable, singleton bool) ([]byte, error) { +// totalLen := kp + kl + val.DoubleRLPLen() +// var lenPrefix [4]byte +// pt := rlp.GenerateStructLen(lenPrefix[:], totalLen) +// embedded := !singleton && totalLen+pt < length.Hash +// var writer io.Writer +// if embedded { +// //bph.byteArrayWriter.Setup(buf) +// bph.auxBuffer.Reset() +// writer = bph.auxBuffer +// } else { +// bph.keccak.Reset() +// writer = bph.keccak +// } +// if _, err := writer.Write(lenPrefix[:pt]); err != nil { +// return nil, err +// } +// if _, err := writer.Write(keyPrefix[:kp]); err != nil { +// return nil, err +// } +// var b [1]byte +// b[0] = compact0 +// if _, err := writer.Write(b[:]); err != nil { +// return nil, err +// } +// for i := 1; i < compactLen; i++ { +// b[0] = key[ni]*16 + key[ni+1] +// if _, err := writer.Write(b[:]); err != nil { +// return nil, err +// } +// ni += 2 +// } +// var prefixBuf [8]byte +// if err := val.ToDoubleRLP(writer, prefixBuf[:]); err != nil { +// return nil, err +// } +// if embedded { +// buf = bph.auxBuffer.Bytes() +// } else { +// var hashBuf [33]byte +// hashBuf[0] = 0x80 + length.Hash +// if _, err := bph.keccak.Read(hashBuf[1:]); err != nil { +// return nil, err +// } +// buf = append(buf, hashBuf[:]...) +// } +// return buf, nil +//} +// +//func (bph *BinPatriciaHashed) leafHashWithKeyVal(buf, key []byte, val rlp.RlpSerializableBytes, singleton bool) ([]byte, error) { +// // Compute the total length of binary representation +// var kp, kl int +// // Write key +// var compactLen int +// var ni int +// var compact0 byte +// compactLen = (len(key)-1)/2 + 1 +// if len(key)&1 == 0 { +// compact0 = 0x30 + key[0] // Odd: (3<<4) + first nibble +// ni = 1 +// } else { +// compact0 = 0x20 +// } +// var keyPrefix [1]byte +// if compactLen > 1 { +// keyPrefix[0] = 0x80 + byte(compactLen) +// kp = 1 +// kl = compactLen +// } else { +// kl = 1 +// } +// return bph.completeLeafHash(buf, keyPrefix[:], kp, kl, compactLen, key, compact0, ni, val, singleton) +//} +// +//func (bph *BinPatriciaHashed) accountLeafHashWithKey(buf, key []byte, val rlp.RlpSerializable) ([]byte, error) { +// // Compute the total length of binary representation +// var kp, kl int +// // Write key +// var compactLen int +// var ni int +// var compact0 byte +// if hasTerm(key) { +// compactLen = (len(key)-1)/2 + 1 +// if len(key)&1 == 0 { +// compact0 = 48 + key[0] // Odd (1<<4) + first nibble +// ni = 1 +// } else { +// compact0 = 32 +// } +// } else { +// compactLen = len(key)/2 + 1 +// if len(key)&1 == 1 { +// compact0 = 16 + key[0] // Odd (1<<4) + first nibble +// ni = 1 +// } +// } +// var keyPrefix [1]byte +// if compactLen > 1 { +// keyPrefix[0] = byte(128 + compactLen) +// kp = 1 +// kl = compactLen +// } else { +// kl = 1 +// } +// return bph.completeLeafHash(buf, keyPrefix[:], kp, kl, compactLen, key, compact0, ni, val, true) +//} +// +//func (bph *BinPatriciaHashed) extensionHash(key []byte, hash []byte) ([length.Hash]byte, error) { +// var hashBuf [length.Hash]byte +// +// // Compute the total length of binary representation +// var kp, kl int +// // Write key +// var compactLen int +// var ni int +// var compact0 byte +// if hasTerm(key) { +// compactLen = (len(key)-1)/2 + 1 +// if len(key)&1 == 0 { +// compact0 = 0x30 + key[0] // Odd: (3<<4) + first nibble +// ni = 1 +// } else { +// compact0 = 0x20 +// } +// } else { +// compactLen = len(key)/2 + 1 +// if len(key)&1 == 1 { +// compact0 = 0x10 + key[0] // Odd: (1<<4) + first nibble +// ni = 1 +// } +// } +// var keyPrefix [1]byte +// if compactLen > 1 { +// keyPrefix[0] = 0x80 + byte(compactLen) +// kp = 1 +// kl = compactLen +// } else { +// kl = 1 +// } +// totalLen := kp + kl + 33 +// var lenPrefix [4]byte +// pt := rlp.GenerateStructLen(lenPrefix[:], totalLen) +// bph.keccak.Reset() +// if _, err := bph.keccak.Write(lenPrefix[:pt]); err != nil { +// return hashBuf, err +// } +// if _, err := bph.keccak.Write(keyPrefix[:kp]); err != nil { +// return hashBuf, err +// } +// var b [1]byte +// b[0] = compact0 +// if _, err := bph.keccak.Write(b[:]); err != nil { +// return hashBuf, err +// } +// for i := 1; i < compactLen; i++ { +// b[0] = key[ni]*16 + key[ni+1] +// if _, err := bph.keccak.Write(b[:]); err != nil { +// return hashBuf, err +// } +// ni += 2 +// } +// b[0] = 0x80 + length.Hash +// if _, err := bph.keccak.Write(b[:]); err != nil { +// return hashBuf, err +// } +// if _, err := bph.keccak.Write(hash); err != nil { +// return hashBuf, err +// } +// // Replace previous hash with the new one +// if _, err := bph.keccak.Read(hashBuf[:]); err != nil { +// return hashBuf, err +// } +// return hashBuf, nil +//} +// +//func (bph *BinPatriciaHashed) computeBinaryCellHashLen(cell *BinaryCell, depth int) int { +// if cell.spl > 0 && depth >= halfKeySize { +// keyLen := 128 - depth + 1 // Length of hex key with terminator character +// var kp, kl int +// compactLen := (keyLen-1)/2 + 1 +// if compactLen > 1 { +// kp = 1 +// kl = compactLen +// } else { +// kl = 1 +// } +// val := rlp.RlpSerializableBytes(cell.Storage[:cell.StorageLen]) +// totalLen := kp + kl + val.DoubleRLPLen() +// var lenPrefix [4]byte +// pt := rlp.GenerateStructLen(lenPrefix[:], totalLen) +// if totalLen+pt < length.Hash { +// return totalLen + pt +// } +// } +// return length.Hash + 1 +//} +// +//func (bph *BinPatriciaHashed) computeBinaryCellHash(cell *BinaryCell, depth int, buf []byte) ([]byte, error) { +// var err error +// var storageRootHash [length.Hash]byte +// storageRootHashIsSet := false +// if cell.spl > 0 { +// var hashedKeyOffset int +// if depth >= halfKeySize { +// hashedKeyOffset = depth - halfKeySize +// } +// singleton := depth <= halfKeySize +// if err := binHashKey(bph.keccak, cell.spk[bph.accountKeyLen:cell.spl], cell.downHashedKey[:], hashedKeyOffset); err != nil { +// return nil, err +// } +// cell.downHashedKey[halfKeySize-hashedKeyOffset] = 16 // Add terminator +// if singleton { +// if bph.trace { +// fmt.Printf("leafHashWithKeyVal(singleton) for [%x]=>[%x]\n", cell.downHashedKey[:halfKeySize-hashedKeyOffset+1], cell.Storage[:cell.StorageLen]) +// } +// aux := make([]byte, 0, 33) +// if aux, err = bph.leafHashWithKeyVal(aux, cell.downHashedKey[:halfKeySize-hashedKeyOffset+1], cell.Storage[:cell.StorageLen], true); err != nil { +// return nil, err +// } +// storageRootHash = *(*[length.Hash]byte)(aux[1:]) +// storageRootHashIsSet = true +// } else { +// if bph.trace { +// fmt.Printf("leafHashWithKeyVal for [%x]=>[%x]\n", cell.downHashedKey[:halfKeySize-hashedKeyOffset+1], cell.Storage[:cell.StorageLen]) +// } +// return bph.leafHashWithKeyVal(buf, cell.downHashedKey[:halfKeySize-hashedKeyOffset+1], cell.Storage[:cell.StorageLen], false) +// } +// } +// if cell.apl > 0 { +// if err := binHashKey(bph.keccak, cell.apk[:cell.apl], cell.downHashedKey[:], depth); err != nil { +// return nil, err +// } +// cell.downHashedKey[halfKeySize-depth] = 16 // Add terminator +// if !storageRootHashIsSet { +// if cell.extLen > 0 { +// // Extension +// if cell.hl > 0 { +// if bph.trace { +// fmt.Printf("extensionHash for [%x]=>[%x]\n", cell.extension[:cell.extLen], cell.h[:cell.hl]) +// } +// if storageRootHash, err = bph.extensionHash(cell.extension[:cell.extLen], cell.h[:cell.hl]); err != nil { +// return nil, err +// } +// } else { +// return nil, errors.New("computeBinaryCellHash extension without hash") +// } +// } else if cell.hl > 0 { +// storageRootHash = cell.h +// } else { +// storageRootHash = *(*[length.Hash]byte)(EmptyRootHash) +// } +// } +// var valBuf [128]byte +// valLen := cell.accountForHashing(valBuf[:], storageRootHash) +// if bph.trace { +// fmt.Printf("accountLeafHashWithKey for [%x]=>[%x]\n", cell.downHashedKey[:halfKeySize+1-depth], rlp.RlpEncodedBytes(valBuf[:valLen])) +// } +// return bph.accountLeafHashWithKey(buf, cell.downHashedKey[:halfKeySize+1-depth], rlp.RlpEncodedBytes(valBuf[:valLen])) +// } +// buf = append(buf, 0x80+32) +// if cell.extLen > 0 { +// // Extension +// if cell.hl > 0 { +// if bph.trace { +// fmt.Printf("extensionHash for [%x]=>[%x]\n", cell.extension[:cell.extLen], cell.h[:cell.hl]) +// } +// var hash [length.Hash]byte +// if hash, err = bph.extensionHash(cell.extension[:cell.extLen], cell.h[:cell.hl]); err != nil { +// return nil, err +// } +// buf = append(buf, hash[:]...) +// } else { +// return nil, errors.New("computeBinaryCellHash extension without hash") +// } +// } else if cell.hl > 0 { +// buf = append(buf, cell.h[:cell.hl]...) +// } else { +// buf = append(buf, EmptyRootHash...) +// } +// return buf, nil +//} +// +//func (bph *BinPatriciaHashed) needUnfolding(hashedKey []byte) int { +// var cell *BinaryCell +// var depth int +// if bph.activeRows == 0 { +// if bph.trace { +// fmt.Printf("needUnfolding root, rootChecked = %t\n", bph.rootChecked) +// } +// if bph.rootChecked && bph.root.downHashedLen == 0 && bph.root.hl == 0 { +// // Previously checked, empty root, no unfolding needed +// return 0 +// } +// cell = &bph.root +// if cell.downHashedLen == 0 && cell.hl == 0 && !bph.rootChecked { +// // Need to attempt to unfold the root +// return 1 +// } +// } else { +// col := int(hashedKey[bph.currentKeyLen]) +// cell = &bph.grid[bph.activeRows-1][col] +// depth = bph.depths[bph.activeRows-1] +// if bph.trace { +// fmt.Printf("needUnfolding cell (%d, %x), currentKey=[%x], depth=%d, cell.h=[%x]\n", bph.activeRows-1, col, bph.currentKey[:bph.currentKeyLen], depth, cell.h[:cell.hl]) +// } +// } +// if len(hashedKey) <= depth { +// return 0 +// } +// if cell.downHashedLen == 0 { +// if cell.hl == 0 { +// // cell is empty, no need to unfold further +// return 0 +// } +// // unfold branch node +// return 1 +// } +// cpl := commonPrefixLen(hashedKey[depth:], cell.downHashedKey[:cell.downHashedLen-1]) +// if bph.trace { +// fmt.Printf("cpl=%d, cell.downHashedKey=[%x], depth=%d, hashedKey[depth:]=[%x]\n", cpl, cell.downHashedKey[:cell.downHashedLen], depth, hashedKey[depth:]) +// } +// unfolding := cpl + 1 +// if depth < halfKeySize && depth+unfolding > halfKeySize { +// // This is to make sure that unfolding always breaks at the level where storage subtrees start +// unfolding = halfKeySize - depth +// if bph.trace { +// fmt.Printf("adjusted unfolding=%d\n", unfolding) +// } +// } +// return unfolding +//} +// +//// unfoldBranchNode returns true if unfolding has been done +//func (bph *BinPatriciaHashed) unfoldBranchNode(row int, deleted bool, depth int) (bool, error) { +// branchData, _, err := bph.ctx.GetBranch(binToCompact(bph.currentKey[:bph.currentKeyLen])) +// if err != nil { +// return false, err +// } +// if len(branchData) >= 2 { +// branchData = branchData[2:] // skip touch map and hold aftermap and rest +// } +// if !bph.rootChecked && bph.currentKeyLen == 0 && len(branchData) == 0 { +// // Special case - empty or deleted root +// bph.rootChecked = true +// return false, nil +// } +// if len(branchData) == 0 { +// log.Warn("got empty branch data during unfold", "row", row, "depth", depth, "deleted", deleted) +// } +// bph.branchBefore[row] = true +// bitmap := binary.BigEndian.Uint16(branchData[0:]) +// pos := 2 +// if deleted { +// // All cells come as deleted (touched but not present after) +// bph.afterMap[row] = 0 +// bph.touchMap[row] = bitmap +// } else { +// bph.afterMap[row] = bitmap +// bph.touchMap[row] = 0 +// } +// //fmt.Printf("unfoldBranchNode [%x], afterMap = [%016b], touchMap = [%016b]\n", branchData, bph.afterMap[row], bph.touchMap[row]) +// // Loop iterating over the set bits of modMask +// for bitset, j := bitmap, 0; bitset != 0; j++ { +// bit := bitset & -bitset +// nibble := bits.TrailingZeros16(bit) +// cell := &bph.grid[row][nibble] +// fieldBits := branchData[pos] +// pos++ +// var err error +// if pos, err = cell.fillFromFields(branchData, pos, PartFlags(fieldBits)); err != nil { +// return false, fmt.Errorf("prefix [%x], branchData[%x]: %w", bph.currentKey[:bph.currentKeyLen], branchData, err) +// } +// if bph.trace { +// fmt.Printf("cell (%d, %x) depth=%d, hash=[%x], a=[%x], s=[%x], ex=[%x]\n", row, nibble, depth, cell.h[:cell.hl], cell.apk[:cell.apl], cell.spk[:cell.spl], cell.extension[:cell.extLen]) +// } +// if cell.apl > 0 { +// if err := bph.accountFn(cell.apk[:cell.apl], cell); err != nil { +// return false, err +// } +// if bph.trace { +// fmt.Printf("GetAccount[%x] return balance=%d, nonce=%d code=%x\n", cell.apk[:cell.apl], &cell.Balance, cell.Nonce, cell.CodeHash[:]) +// } +// } +// if cell.spl > 0 { +// if err := bph.storageFn(cell.spk[:cell.spl], cell); err != nil { +// return false, err +// } +// } +// if err = cell.deriveHashedKeys(depth, bph.keccak, bph.accountKeyLen); err != nil { +// return false, err +// } +// bitset ^= bit +// } +// return true, nil +//} +// +//func (bph *BinPatriciaHashed) unfold(hashedKey []byte, unfolding int) error { +// if bph.trace { +// fmt.Printf("unfold %d: activeRows: %d\n", unfolding, bph.activeRows) +// } +// var upCell *BinaryCell +// var touched, present bool +// var col byte +// var upDepth, depth int +// if bph.activeRows == 0 { +// if bph.rootChecked && bph.root.hl == 0 && bph.root.downHashedLen == 0 { +// // No unfolding for empty root +// return nil +// } +// upCell = &bph.root +// touched = bph.rootTouched +// present = bph.rootPresent +// if bph.trace { +// fmt.Printf("unfold root, touched %t, present %t, column %d\n", touched, present, col) +// } +// } else { +// upDepth = bph.depths[bph.activeRows-1] +// col = hashedKey[upDepth-1] +// upCell = &bph.grid[bph.activeRows-1][col] +// touched = bph.touchMap[bph.activeRows-1]&(uint16(1)<= unfolding { +// depth = upDepth + unfolding +// nibble := upCell.downHashedKey[unfolding-1] +// if touched { +// bph.touchMap[row] = uint16(1) << nibble +// } +// if present { +// bph.afterMap[row] = uint16(1) << nibble +// } +// cell := &bph.grid[row][nibble] +// cell.fillFromUpperCell(upCell, depth, unfolding) +// if bph.trace { +// fmt.Printf("cell (%d, %x) depth=%d\n", row, nibble, depth) +// } +// if row >= halfKeySize { +// cell.apl = 0 +// } +// if unfolding > 1 { +// copy(bph.currentKey[bph.currentKeyLen:], upCell.downHashedKey[:unfolding-1]) +// } +// bph.currentKeyLen += unfolding - 1 +// } else { +// // upCell.downHashedLen < unfolding +// depth = upDepth + upCell.downHashedLen +// nibble := upCell.downHashedKey[upCell.downHashedLen-1] +// if touched { +// bph.touchMap[row] = uint16(1) << nibble +// } +// if present { +// bph.afterMap[row] = uint16(1) << nibble +// } +// cell := &bph.grid[row][nibble] +// cell.fillFromUpperCell(upCell, depth, upCell.downHashedLen) +// if bph.trace { +// fmt.Printf("cell (%d, %x) depth=%d\n", row, nibble, depth) +// } +// if row >= halfKeySize { +// cell.apl = 0 +// } +// if upCell.downHashedLen > 1 { +// copy(bph.currentKey[bph.currentKeyLen:], upCell.downHashedKey[:upCell.downHashedLen-1]) +// } +// bph.currentKeyLen += upCell.downHashedLen - 1 +// } +// bph.depths[bph.activeRows] = depth +// bph.activeRows++ +// return nil +//} +// +//func (bph *BinPatriciaHashed) needFolding(hashedKey []byte) bool { +// return !bytes.HasPrefix(hashedKey, bph.currentKey[:bph.currentKeyLen]) +//} +// +//// The purpose of fold is to reduce hph.currentKey[:hph.currentKeyLen]. It should be invoked +//// until that current key becomes a prefix of hashedKey that we will proccess next +//// (in other words until the needFolding function returns 0) +//func (bph *BinPatriciaHashed) fold() (err error) { +// updateKeyLen := bph.currentKeyLen +// if bph.activeRows == 0 { +// return errors.New("cannot fold - no active rows") +// } +// if bph.trace { +// fmt.Printf("fold: activeRows: %d, currentKey: [%x], touchMap: %016b, afterMap: %016b\n", bph.activeRows, bph.currentKey[:bph.currentKeyLen], bph.touchMap[bph.activeRows-1], bph.afterMap[bph.activeRows-1]) +// } +// // Move information to the row above +// row := bph.activeRows - 1 +// var upBinaryCell *BinaryCell +// var col int +// var upDepth int +// if bph.activeRows == 1 { +// if bph.trace { +// fmt.Printf("upcell is root\n") +// } +// upBinaryCell = &bph.root +// } else { +// upDepth = bph.depths[bph.activeRows-2] +// col = int(bph.currentKey[upDepth-1]) +// if bph.trace { +// fmt.Printf("upcell is (%d x %x), upDepth=%d\n", row-1, col, upDepth) +// } +// upBinaryCell = &bph.grid[row-1][col] +// } +// +// depth := bph.depths[bph.activeRows-1] +// updateKey := binToCompact(bph.currentKey[:updateKeyLen]) +// partsCount := bits.OnesCount16(bph.afterMap[row]) +// +// if bph.trace { +// fmt.Printf("touchMap[%d]=%016b, afterMap[%d]=%016b\n", row, bph.touchMap[row], row, bph.afterMap[row]) +// } +// switch partsCount { +// case 0: +// // Everything deleted +// if bph.touchMap[row] != 0 { +// if row == 0 { +// // Root is deleted because the tree is empty +// bph.rootTouched = true +// bph.rootPresent = false +// } else if upDepth == halfKeySize { +// // Special case - all storage items of an account have been deleted, but it does not automatically delete the account, just makes it empty storage +// // Therefore we are not propagating deletion upwards, but turn it into a modification +// bph.touchMap[row-1] |= uint16(1) << col +// } else { +// // Deletion is propagated upwards +// bph.touchMap[row-1] |= uint16(1) << col +// bph.afterMap[row-1] &^= uint16(1) << col +// } +// } +// upBinaryCell.hl = 0 +// upBinaryCell.apl = 0 +// upBinaryCell.spl = 0 +// upBinaryCell.extLen = 0 +// upBinaryCell.downHashedLen = 0 +// if bph.branchBefore[row] { +// _, err = bph.branchEncoder.CollectUpdate(bph.ctx, updateKey, 0, bph.touchMap[row], 0, RetrieveCellNoop) +// if err != nil { +// return fmt.Errorf("failed to encode leaf node update: %w", err) +// } +// } +// bph.activeRows-- +// if upDepth > 0 { +// bph.currentKeyLen = upDepth - 1 +// } else { +// bph.currentKeyLen = 0 +// } +// case 1: +// // Leaf or extension node +// if bph.touchMap[row] != 0 { +// // any modifications +// if row == 0 { +// bph.rootTouched = true +// } else { +// // Modifiction is propagated upwards +// bph.touchMap[row-1] |= uint16(1) << col +// } +// } +// nibble := bits.TrailingZeros16(bph.afterMap[row]) +// cell := &bph.grid[row][nibble] +// upBinaryCell.extLen = 0 +// upBinaryCell.fillFromLowerBinaryCell(cell, depth, bph.currentKey[upDepth:bph.currentKeyLen], nibble) +// // Delete if it existed +// if bph.branchBefore[row] { +// _, err = bph.branchEncoder.CollectUpdate(bph.ctx, updateKey, 0, bph.touchMap[row], 0, RetrieveCellNoop) +// if err != nil { +// return fmt.Errorf("failed to encode leaf node update: %w", err) +// } +// } +// bph.activeRows-- +// if upDepth > 0 { +// bph.currentKeyLen = upDepth - 1 +// } else { +// bph.currentKeyLen = 0 +// } +// default: +// // Branch node +// if bph.touchMap[row] != 0 { +// // any modifications +// if row == 0 { +// bph.rootTouched = true +// } else { +// // Modifiction is propagated upwards +// bph.touchMap[row-1] |= uint16(1) << col +// } +// } +// bitmap := bph.touchMap[row] & bph.afterMap[row] +// if !bph.branchBefore[row] { +// // There was no branch node before, so we need to touch even the singular child that existed +// bph.touchMap[row] |= bph.afterMap[row] +// bitmap |= bph.afterMap[row] +// } +// // Calculate total length of all hashes +// totalBranchLen := 17 - partsCount // For every empty cell, one byte +// for bitset, j := bph.afterMap[row], 0; bitset != 0; j++ { +// bit := bitset & -bitset +// nibble := bits.TrailingZeros16(bit) +// cell := &bph.grid[row][nibble] +// totalBranchLen += bph.computeBinaryCellHashLen(cell, depth) +// bitset ^= bit +// } +// +// bph.keccak2.Reset() +// pt := rlp.GenerateStructLen(bph.hashAuxBuffer[:], totalBranchLen) +// if _, err := bph.keccak2.Write(bph.hashAuxBuffer[:pt]); err != nil { +// return err +// } +// +// b := [...]byte{0x80} +// cellGetter := func(nibble int, skip bool) (*Cell, error) { +// if skip { +// if _, err := bph.keccak2.Write(b[:]); err != nil { +// return nil, fmt.Errorf("failed to write empty nibble to hash: %w", err) +// } +// if bph.trace { +// fmt.Printf("%x: empty(%d,%x)\n", nibble, row, nibble) +// } +// return nil, nil +// } +// cell := &bph.grid[row][nibble] +// cellHash, err := bph.computeBinaryCellHash(cell, depth, bph.hashAuxBuffer[:0]) +// if err != nil { +// return nil, err +// } +// if bph.trace { +// fmt.Printf("%x: computeBinaryCellHash(%d,%x,depth=%d)=[%x]\n", nibble, row, nibble, depth, cellHash) +// } +// if _, err := bph.keccak2.Write(cellHash); err != nil { +// return nil, err +// } +// +// // TODO extension and downHashedKey should be encoded to hex format and vice versa, data loss due to array sizes +// return cell.unwrapToHexCell(), nil +// } +// +// var lastNibble int +// var err error +// _ = cellGetter +// +// lastNibble, err = bph.branchEncoder.CollectUpdate(bph.ctx, updateKey, bitmap, bph.touchMap[row], bph.afterMap[row], cellGetter) +// if err != nil { +// return fmt.Errorf("failed to encode branch update: %w", err) +// } +// for i := lastNibble; i <= maxChild; i++ { +// if _, err := bph.keccak2.Write(b[:]); err != nil { +// return err +// } +// if bph.trace { +// fmt.Printf("%x: empty(%d,%x)\n", i, row, i) +// } +// } +// upBinaryCell.extLen = depth - upDepth - 1 +// upBinaryCell.downHashedLen = upBinaryCell.extLen +// if upBinaryCell.extLen > 0 { +// copy(upBinaryCell.extension[:], bph.currentKey[upDepth:bph.currentKeyLen]) +// copy(upBinaryCell.downHashedKey[:], bph.currentKey[upDepth:bph.currentKeyLen]) +// } +// if depth < halfKeySize { +// upBinaryCell.apl = 0 +// } +// upBinaryCell.spl = 0 +// upBinaryCell.hl = 32 +// if _, err := bph.keccak2.Read(upBinaryCell.h[:]); err != nil { +// return err +// } +// if bph.trace { +// fmt.Printf("} [%x]\n", upBinaryCell.h[:]) +// } +// bph.activeRows-- +// if upDepth > 0 { +// bph.currentKeyLen = upDepth - 1 +// } else { +// bph.currentKeyLen = 0 +// } +// } +// return nil +//} +// +//func (bph *BinPatriciaHashed) deleteBinaryCell(hashedKey []byte) { +// if bph.trace { +// fmt.Printf("deleteBinaryCell, activeRows = %d\n", bph.activeRows) +// } +// var cell *BinaryCell +// if bph.activeRows == 0 { +// // Remove the root +// cell = &bph.root +// bph.rootTouched = true +// bph.rootPresent = false +// } else { +// row := bph.activeRows - 1 +// if bph.depths[row] < len(hashedKey) { +// if bph.trace { +// fmt.Printf("deleteBinaryCell skipping spurious delete depth=%d, len(hashedKey)=%d\n", bph.depths[row], len(hashedKey)) +// } +// return +// } +// col := int(hashedKey[bph.currentKeyLen]) +// cell = &bph.grid[row][col] +// if bph.afterMap[row]&(uint16(1)< 0; unfolding = bph.needUnfolding(hashedKey) { +// if err := bph.unfold(hashedKey, unfolding); err != nil { +// return nil, fmt.Errorf("unfold: %w", err) +// } +// } +// +// // Update the cell +// stagedBinaryCell.fillEmpty() +// if len(plainKey) == bph.accountKeyLen { +// if err := bph.accountFn(plainKey, stagedBinaryCell); err != nil { +// return nil, fmt.Errorf("GetAccount for key %x failed: %w", plainKey, err) +// } +// if !stagedBinaryCell.Delete { +// cell := bph.updateBinaryCell(plainKey, hashedKey) +// cell.setAccountFields(stagedBinaryCell.CodeHash[:], &stagedBinaryCell.Balance, stagedBinaryCell.Nonce) +// +// if bph.trace { +// fmt.Printf("GetAccount reading key %x => balance=%d nonce=%v codeHash=%x\n", cell.apk, &cell.Balance, cell.Nonce, cell.CodeHash) +// } +// } +// } else { +// if err = bph.storageFn(plainKey, stagedBinaryCell); err != nil { +// return nil, fmt.Errorf("GetStorage for key %x failed: %w", plainKey, err) +// } +// if !stagedBinaryCell.Delete { +// bph.updateBinaryCell(plainKey, hashedKey).setStorage(stagedBinaryCell.Storage[:stagedBinaryCell.StorageLen]) +// if bph.trace { +// fmt.Printf("GetStorage reading key %x => %x\n", plainKey, stagedBinaryCell.Storage[:stagedBinaryCell.StorageLen]) +// } +// } +// } +// +// if stagedBinaryCell.Delete { +// if bph.trace { +// fmt.Printf("delete cell %x hash %x\n", plainKey, hashedKey) +// } +// bph.deleteBinaryCell(hashedKey) +// } +// } +// // Folding everything up to the root +// for bph.activeRows > 0 { +// if err := bph.fold(); err != nil { +// return nil, fmt.Errorf("final fold: %w", err) +// } +// } +// +// rootHash, err = bph.RootHash() +// if err != nil { +// return nil, fmt.Errorf("root hash evaluation failed: %w", err) +// } +// err = bph.branchEncoder.Load(bph.ctx, etl.TransformArgs{Quit: ctx.Done()}) +// if err != nil { +// return nil, fmt.Errorf("branch update failed: %w", err) +// } +// return rootHash, nil +//} +// +//func (bph *BinPatriciaHashed) SetTrace(trace bool) { bph.trace = trace } +// +//func (bph *BinPatriciaHashed) Variant() TrieVariant { return VariantBinPatriciaTrie } +// +//// Reset allows BinPatriciaHashed instance to be reused for the new commitment calculation +//func (bph *BinPatriciaHashed) Reset() { +// bph.rootChecked = false +// bph.root.hl = 0 +// bph.root.downHashedLen = 0 +// bph.root.apl = 0 +// bph.root.spl = 0 +// bph.root.extLen = 0 +// copy(bph.root.CodeHash[:], EmptyCodeHash) +// bph.root.StorageLen = 0 +// bph.root.Balance.Clear() +// bph.root.Nonce = 0 +// bph.rootTouched = false +// bph.rootPresent = true +//} +// +//func (c *BinaryCell) bytes() []byte { +// var pos = 1 +// size := 1 + c.hl + 1 + c.apl + c.spl + 1 + c.downHashedLen + 1 + c.extLen + 1 // max size +// buf := make([]byte, size) +// +// var flags uint8 +// if c.hl != 0 { +// flags |= 1 +// buf[pos] = byte(c.hl) +// pos++ +// copy(buf[pos:pos+c.hl], c.h[:]) +// pos += c.hl +// } +// if c.apl != 0 { +// flags |= 2 +// buf[pos] = byte(c.hl) +// pos++ +// copy(buf[pos:pos+c.apl], c.apk[:]) +// pos += c.apl +// } +// if c.spl != 0 { +// flags |= 4 +// buf[pos] = byte(c.spl) +// pos++ +// copy(buf[pos:pos+c.spl], c.spk[:]) +// pos += c.spl +// } +// if c.downHashedLen != 0 { +// flags |= 8 +// buf[pos] = byte(c.downHashedLen) +// pos++ +// copy(buf[pos:pos+c.downHashedLen], c.downHashedKey[:]) +// pos += c.downHashedLen +// } +// if c.extLen != 0 { +// flags |= 16 +// buf[pos] = byte(c.extLen) +// pos++ +// copy(buf[pos:pos+c.downHashedLen], c.downHashedKey[:]) +// //pos += c.downHashedLen +// } +// buf[0] = flags +// return buf +//} +// +//func (c *BinaryCell) decodeBytes(buf []byte) error { +// if len(buf) < 1 { +// return errors.New("invalid buffer size to contain BinaryCell (at least 1 byte expected)") +// } +// c.fillEmpty() +// +// var pos int +// flags := buf[pos] +// pos++ +// +// if flags&1 != 0 { +// c.hl = int(buf[pos]) +// pos++ +// copy(c.h[:], buf[pos:pos+c.hl]) +// pos += c.hl +// } +// if flags&2 != 0 { +// c.apl = int(buf[pos]) +// pos++ +// copy(c.apk[:], buf[pos:pos+c.apl]) +// pos += c.apl +// } +// if flags&4 != 0 { +// c.spl = int(buf[pos]) +// pos++ +// copy(c.spk[:], buf[pos:pos+c.spl]) +// pos += c.spl +// } +// if flags&8 != 0 { +// c.downHashedLen = int(buf[pos]) +// pos++ +// copy(c.downHashedKey[:], buf[pos:pos+c.downHashedLen]) +// pos += c.downHashedLen +// } +// if flags&16 != 0 { +// c.extLen = int(buf[pos]) +// pos++ +// copy(c.extension[:], buf[pos:pos+c.extLen]) +// //pos += c.extLen +// } +// return nil +//} +// +//// Encode current state of hph into bytes +//func (bph *BinPatriciaHashed) EncodeCurrentState(buf []byte) ([]byte, error) { +// s := binState{ +// CurrentKeyLen: int16(bph.currentKeyLen), +// RootChecked: bph.rootChecked, +// RootTouched: bph.rootTouched, +// RootPresent: bph.rootPresent, +// Root: make([]byte, 0), +// } +// +// s.Root = bph.root.bytes() +// copy(s.CurrentKey[:], bph.currentKey[:]) +// copy(s.Depths[:], bph.depths[:]) +// copy(s.BranchBefore[:], bph.branchBefore[:]) +// copy(s.TouchMap[:], bph.touchMap[:]) +// copy(s.AfterMap[:], bph.afterMap[:]) +// +// return s.Encode(buf) +//} +// +//// buf expected to be encoded hph state. Decode state and set up hph to that state. +//func (bph *BinPatriciaHashed) SetState(buf []byte) error { +// if bph.activeRows != 0 { +// return errors.New("has active rows, could not reset state") +// } +// +// var s state +// if err := s.Decode(buf); err != nil { +// return err +// } +// +// bph.Reset() +// +// if err := bph.root.decodeBytes(s.Root); err != nil { +// return err +// } +// +// bph.rootChecked = s.RootChecked +// bph.rootTouched = s.RootTouched +// bph.rootPresent = s.RootPresent +// +// copy(bph.depths[:], s.Depths[:]) +// copy(bph.branchBefore[:], s.BranchBefore[:]) +// copy(bph.touchMap[:], s.TouchMap[:]) +// copy(bph.afterMap[:], s.AfterMap[:]) +// +// return nil +//} +// +//func (bph *BinPatriciaHashed) ProcessTree(ctx context.Context, t *UpdateTree, lp string) (rootHash []byte, err error) { +// panic("not implemented") +//} +// +//func (bph *BinPatriciaHashed) ProcessUpdates(ctx context.Context, plainKeys [][]byte, updates []Update) (rootHash []byte, err error) { +// for i, pk := range plainKeys { +// updates[i].hashedKey = hexToBin(pk) +// updates[i].plainKey = pk +// } +// +// sort.Slice(updates, func(i, j int) bool { +// return bytes.Compare(updates[i].hashedKey, updates[j].hashedKey) < 0 +// }) +// +// for i, plainKey := range plainKeys { +// select { +// case <-ctx.Done(): +// return nil, ctx.Err() +// default: +// } +// update := updates[i] +// if bph.trace { +// fmt.Printf("plainKey=[%x], hashedKey=[%x], currentKey=[%x]\n", update.plainKey, update.hashedKey, bph.currentKey[:bph.currentKeyLen]) +// } +// // Keep folding until the currentKey is the prefix of the key we modify +// for bph.needFolding(update.hashedKey) { +// if err := bph.fold(); err != nil { +// return nil, fmt.Errorf("fold: %w", err) +// } +// } +// // Now unfold until we step on an empty cell +// for unfolding := bph.needUnfolding(update.hashedKey); unfolding > 0; unfolding = bph.needUnfolding(update.hashedKey) { +// if err := bph.unfold(update.hashedKey, unfolding); err != nil { +// return nil, fmt.Errorf("unfold: %w", err) +// } +// } +// +// // Update the cell +// if update.Flags == DeleteUpdate { +// bph.deleteBinaryCell(update.hashedKey) +// if bph.trace { +// fmt.Printf("key %x deleted\n", update.plainKey) +// } +// } else { +// cell := bph.updateBinaryCell(update.plainKey, update.hashedKey) +// if bph.trace { +// fmt.Printf("GetAccount updated key %x =>", plainKey) +// } +// if update.Flags&BalanceUpdate != 0 { +// if bph.trace { +// fmt.Printf(" balance=%d", &update.Balance) +// } +// cell.Balance.Set(&update.Balance) +// } +// if update.Flags&NonceUpdate != 0 { +// if bph.trace { +// fmt.Printf(" nonce=%d", update.Nonce) +// } +// cell.Nonce = update.Nonce +// } +// if update.Flags&CodeUpdate != 0 { +// if bph.trace { +// fmt.Printf(" codeHash=%x", update.CodeHash) +// } +// copy(cell.CodeHash[:], update.CodeHash[:]) +// } +// if bph.trace { +// fmt.Printf("\n") +// } +// if update.Flags&StorageUpdate != 0 { +// cell.setStorage(update.CodeHash[:update.StorageLen]) +// if bph.trace { +// fmt.Printf("GetStorage filled key %x => %x\n", plainKey, update.CodeHash[:update.StorageLen]) +// } +// } +// } +// } +// // Folding everything up to the root +// for bph.activeRows > 0 { +// if err := bph.fold(); err != nil { +// return nil, fmt.Errorf("final fold: %w", err) +// } +// } +// +// rootHash, err = bph.RootHash() +// if err != nil { +// return nil, fmt.Errorf("root hash evaluation failed: %w", err) +// } +// +// err = bph.branchEncoder.Load(bph.ctx, etl.TransformArgs{Quit: ctx.Done()}) +// if err != nil { +// return nil, fmt.Errorf("branch update failed: %w", err) +// } +// +// return rootHash, nil +//} +// +//// Hashes provided key and expands resulting hash into nibbles (each byte split into two nibbles by 4 bits) +//func (bph *BinPatriciaHashed) hashAndNibblizeKey2(key []byte) []byte { //nolint +// hashedKey := make([]byte, length.Hash) +// +// bph.keccak.Reset() +// bph.keccak.Write(key[:length.Addr]) +// bph.keccak.Read(hashedKey[:length.Hash]) +// +// if len(key[length.Addr:]) > 0 { +// hashedKey = append(hashedKey, make([]byte, length.Hash)...) +// bph.keccak.Reset() +// bph.keccak.Write(key[length.Addr:]) +// bph.keccak.Read(hashedKey[length.Hash:]) +// } +// +// nibblized := make([]byte, len(hashedKey)*2) +// for i, b := range hashedKey { +// nibblized[i*2] = (b >> 4) & 0xf +// nibblized[i*2+1] = b & 0xf +// } +// return nibblized +//} +// +//func binHashKey(keccak keccakState, plainKey []byte, dest []byte, hashedKeyOffset int) error { +// keccak.Reset() +// var hashBufBack [length.Hash]byte +// hashBuf := hashBufBack[:] +// if _, err := keccak.Write(plainKey); err != nil { +// return err +// } +// if _, err := keccak.Read(hashBuf); err != nil { +// return err +// } +// for k := hashedKeyOffset; k < 256; k++ { +// if hashBuf[k/8]&(1<<(7-k%8)) == 0 { +// dest[k-hashedKeyOffset] = 0 +// } else { +// dest[k-hashedKeyOffset] = 1 +// } +// } +// return nil +//} +// +//func wrapAccountStorageFn(fn func([]byte, *Cell) error) func(pk []byte, bc *BinaryCell) error { +// return func(pk []byte, bc *BinaryCell) error { +// cl := bc.unwrapToHexCell() +// +// if err := fn(pk, cl); err != nil { +// return err +// } +// +// bc.Balance = *cl.Balance.Clone() +// bc.Nonce = cl.Nonce +// bc.StorageLen = cl.StorageLen +// bc.apl = cl.accountPlainKeyLen +// bc.spl = cl.storagePlainKeyLen +// bc.hl = cl.hashLen +// copy(bc.apk[:], cl.accountPlainKey[:]) +// copy(bc.spk[:], cl.storagePlainKey[:]) +// copy(bc.h[:], cl.hash[:]) +// +// if cl.extLen > 0 { +// binExt := compactToBin(cl.extension[:cl.extLen]) +// copy(bc.extension[:], binExt) +// bc.extLen = len(binExt) +// } +// if cl.downHashedLen > 0 { +// bindhk := compactToBin(cl.downHashedKey[:cl.downHashedLen]) +// copy(bc.downHashedKey[:], bindhk) +// bc.downHashedLen = len(bindhk) +// } +// +// copy(bc.CodeHash[:], cl.CodeHash[:]) +// copy(bc.Storage[:], cl.Storage[:]) +// bc.Delete = cl.Delete +// return nil +// } +//} +// +//// represents state of the tree +//type binState struct { +// TouchMap [maxKeySize]uint16 // For each row, bitmap of cells that were either present before modification, or modified or deleted +// AfterMap [maxKeySize]uint16 // For each row, bitmap of cells that were present after modification +// CurrentKeyLen int16 +// Root []byte // encoded root cell +// RootChecked bool // Set to false if it is not known whether the root is empty, set to true if it is checked +// RootTouched bool +// RootPresent bool +// BranchBefore [maxKeySize]bool // For each row, whether there was a branch node in the database loaded in unfold +// CurrentKey [maxKeySize]byte // For each row indicates which column is currently selected +// Depths [maxKeySize]int // For each row, the depth of cells in that row +//} +// +//func (s *binState) Encode(buf []byte) ([]byte, error) { +// var rootFlags stateRootFlag +// if s.RootPresent { +// rootFlags |= stateRootPresent +// } +// if s.RootChecked { +// rootFlags |= stateRootChecked +// } +// if s.RootTouched { +// rootFlags |= stateRootTouched +// } +// +// ee := bytes.NewBuffer(buf) +// if err := binary.Write(ee, binary.BigEndian, s.CurrentKeyLen); err != nil { +// return nil, fmt.Errorf("encode currentKeyLen: %w", err) +// } +// if err := binary.Write(ee, binary.BigEndian, int8(rootFlags)); err != nil { +// return nil, fmt.Errorf("encode rootFlags: %w", err) +// } +// if n, err := ee.Write(s.CurrentKey[:]); err != nil || n != len(s.CurrentKey) { +// return nil, fmt.Errorf("encode currentKey: %w", err) +// } +// if err := binary.Write(ee, binary.BigEndian, uint16(len(s.Root))); err != nil { +// return nil, fmt.Errorf("encode root len: %w", err) +// } +// if n, err := ee.Write(s.Root); err != nil || n != len(s.Root) { +// return nil, fmt.Errorf("encode root: %w", err) +// } +// d := make([]byte, len(s.Depths)) +// for i := 0; i < len(s.Depths); i++ { +// d[i] = byte(s.Depths[i]) +// } +// if n, err := ee.Write(d); err != nil || n != len(s.Depths) { +// return nil, fmt.Errorf("encode depths: %w", err) +// } +// if err := binary.Write(ee, binary.BigEndian, s.TouchMap); err != nil { +// return nil, fmt.Errorf("encode touchMap: %w", err) +// } +// if err := binary.Write(ee, binary.BigEndian, s.AfterMap); err != nil { +// return nil, fmt.Errorf("encode afterMap: %w", err) +// } +// +// var before1, before2 uint64 +// for i := 0; i < halfKeySize; i++ { +// if s.BranchBefore[i] { +// before1 |= 1 << i +// } +// } +// for i, j := halfKeySize, 0; i < maxKeySize; i, j = i+1, j+1 { +// if s.BranchBefore[i] { +// before2 |= 1 << j +// } +// } +// if err := binary.Write(ee, binary.BigEndian, before1); err != nil { +// return nil, fmt.Errorf("encode branchBefore_1: %w", err) +// } +// if err := binary.Write(ee, binary.BigEndian, before2); err != nil { +// return nil, fmt.Errorf("encode branchBefore_2: %w", err) +// } +// return ee.Bytes(), nil +//} +// +//func (s *binState) Decode(buf []byte) error { +// aux := bytes.NewBuffer(buf) +// if err := binary.Read(aux, binary.BigEndian, &s.CurrentKeyLen); err != nil { +// return fmt.Errorf("currentKeyLen: %w", err) +// } +// var rootFlags stateRootFlag +// if err := binary.Read(aux, binary.BigEndian, &rootFlags); err != nil { +// return fmt.Errorf("rootFlags: %w", err) +// } +// +// if rootFlags&stateRootPresent != 0 { +// s.RootPresent = true +// } +// if rootFlags&stateRootTouched != 0 { +// s.RootTouched = true +// } +// if rootFlags&stateRootChecked != 0 { +// s.RootChecked = true +// } +// if n, err := aux.Read(s.CurrentKey[:]); err != nil || n != maxKeySize { +// return fmt.Errorf("currentKey: %w", err) +// } +// var rootSize uint16 +// if err := binary.Read(aux, binary.BigEndian, &rootSize); err != nil { +// return fmt.Errorf("root size: %w", err) +// } +// s.Root = make([]byte, rootSize) +// if _, err := aux.Read(s.Root); err != nil { +// return fmt.Errorf("root: %w", err) +// } +// d := make([]byte, len(s.Depths)) +// if err := binary.Read(aux, binary.BigEndian, &d); err != nil { +// return fmt.Errorf("depths: %w", err) +// } +// for i := 0; i < len(s.Depths); i++ { +// s.Depths[i] = int(d[i]) +// } +// if err := binary.Read(aux, binary.BigEndian, &s.TouchMap); err != nil { +// return fmt.Errorf("touchMap: %w", err) +// } +// if err := binary.Read(aux, binary.BigEndian, &s.AfterMap); err != nil { +// return fmt.Errorf("afterMap: %w", err) +// } +// var branch1, branch2 uint64 +// if err := binary.Read(aux, binary.BigEndian, &branch1); err != nil { +// return fmt.Errorf("branchBefore1: %w", err) +// } +// if err := binary.Read(aux, binary.BigEndian, &branch2); err != nil { +// return fmt.Errorf("branchBefore2: %w", err) +// } +// +// // TODO invalid branch encode +// for i := 0; i < halfKeySize; i++ { +// if branch1&(1< %s\n", CompactedKeyToHex([]byte(key)), branchNodeUpdate.String()) - } -} - -func Test_BinPatriciaHashed_UniqueRepresentation(t *testing.T) { - t.Skip() - ctx := context.Background() - - ms := NewMockState(t) - ms2 := NewMockState(t) - - plainKeys, updates := NewUpdateBuilder(). - Balance("f5", 4). - Balance("ff", 900234). - Balance("04", 1233). - Storage("04", "01", "0401"). - Balance("ba", 065606). - Balance("00", 4). - Balance("01", 5). - Balance("02", 6). - Balance("03", 7). - Storage("03", "56", "050505"). - Balance("05", 9). - Storage("03", "87", "060606"). - Balance("b9", 6). - Nonce("ff", 169356). - Storage("05", "02", "8989"). - Storage("f5", "04", "9898"). - Build() - - trieOne := NewBinPatriciaHashed(1, ms, ms.TempDir()) - trieTwo := NewBinPatriciaHashed(1, ms2, ms2.TempDir()) - - trieOne.SetTrace(true) - trieTwo.SetTrace(true) - - // single sequential update - roots := make([][]byte, 0) - // branchNodeUpdatesOne := make(map[string]BranchData) - fmt.Printf("1. Trie sequential update generated following branch updates\n") - for i := 0; i < len(updates); i++ { - if err := ms.applyPlainUpdates(plainKeys[i:i+1], updates[i:i+1]); err != nil { - t.Fatal(err) - } - - sequentialRoot, err := trieOne.ProcessKeys(ctx, plainKeys[i:i+1], "") - require.NoError(t, err) - roots = append(roots, sequentialRoot) - - //ms.applyBranchNodeUpdates(branchNodeUpdates) - //renderUpdates(branchNodeUpdates) - } - - err := ms2.applyPlainUpdates(plainKeys, updates) - require.NoError(t, err) - - fmt.Printf("\n2. Trie batch update generated following branch updates\n") - // batch update - batchRoot, err := trieTwo.ProcessKeys(ctx, plainKeys, "") - require.NoError(t, err) - //renderUpdates(branchNodeUpdatesTwo) - - fmt.Printf("\n sequential roots:\n") - for i, rh := range roots { - fmt.Printf("%2d %+v\n", i, hex.EncodeToString(rh)) - } - - //ms2.applyBranchNodeUpdates(branchNodeUpdatesTwo) - - require.EqualValues(t, batchRoot, roots[len(roots)-1], - "expected equal roots, got sequential [%v] != batch [%v]", hex.EncodeToString(roots[len(roots)-1]), hex.EncodeToString(batchRoot)) - require.Lenf(t, batchRoot, 32, "root hash length should be equal to 32 bytes") -} -func Test_BinPatriciaHashed_EmptyState(t *testing.T) { - ctx := context.Background() - ms := NewMockState(t) - hph := NewBinPatriciaHashed(1, ms, ms.TempDir()) - hph.SetTrace(false) - plainKeys, updates := NewUpdateBuilder(). - Balance("00", 4). - Balance("01", 5). - Balance("02", 6). - Balance("03", 7). - Balance("04", 8). - Storage("04", "01", "0401"). - Storage("03", "56", "050505"). - Storage("03", "57", "060606"). - Balance("05", 9). - Storage("05", "02", "8989"). - Storage("05", "04", "9898"). - Build() - - err := ms.applyPlainUpdates(plainKeys, updates) - require.NoError(t, err) - - firstRootHash, err := hph.ProcessKeys(ctx, plainKeys, "") - require.NoError(t, err) - - t.Logf("root hash %x\n", firstRootHash) - - //ms.applyBranchNodeUpdates(branchNodeUpdates) - - fmt.Printf("1. Generated updates\n") - //renderUpdates(branchNodeUpdates) - - // More updates - hph.Reset() - hph.SetTrace(false) - plainKeys, updates = NewUpdateBuilder(). - Storage("03", "58", "050505"). - Build() - err = ms.applyPlainUpdates(plainKeys, updates) - require.NoError(t, err) - - secondRootHash, err := hph.ProcessKeys(ctx, plainKeys, "") - require.NoError(t, err) - require.NotEqualValues(t, firstRootHash, secondRootHash) - - //ms.applyBranchNodeUpdates(branchNodeUpdates) - fmt.Printf("2. Generated single update\n") - //renderUpdates(branchNodeUpdates) - - // More updates - //hph.Reset() // one update - no need to reset - hph.SetTrace(false) - plainKeys, updates = NewUpdateBuilder(). - Storage("03", "58", "070807"). - Build() - err = ms.applyPlainUpdates(plainKeys, updates) - require.NoError(t, err) - - thirdRootHash, err := hph.ProcessKeys(ctx, plainKeys, "") - require.NoError(t, err) - require.NotEqualValues(t, secondRootHash, thirdRootHash) - - //ms.applyBranchNodeUpdates(branchNodeUpdates) - fmt.Printf("3. Generated single update\n") - //renderUpdates(branchNodeUpdates) -} - -func Test_BinPatriciaHashed_EmptyUpdateState(t *testing.T) { - ctx := context.Background() - ms := NewMockState(t) - hph := NewBinPatriciaHashed(1, ms, ms.TempDir()) - hph.SetTrace(false) - plainKeys, updates := NewUpdateBuilder(). - Balance("00", 4). - Nonce("00", 246462653). - Balance("01", 5). - CodeHash("03", "aaaaaaaaaaf7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a870"). - Delete("00"). - Storage("04", "01", "0401"). - Storage("03", "56", "050505"). - Build() - - err := ms.applyPlainUpdates(plainKeys, updates) - require.NoError(t, err) - - hashBeforeEmptyUpdate, err := hph.ProcessKeys(ctx, plainKeys, "") - require.NoError(t, err) - require.NotEmpty(t, hashBeforeEmptyUpdate) - - //ms.applyBranchNodeUpdates(branchNodeUpdates) - - fmt.Println("1. Updates applied") - //renderUpdates(branchNodeUpdates) - - // generate empty updates and do NOT reset tree - hph.SetTrace(true) - - plainKeys, updates = NewUpdateBuilder().Build() - - err = ms.applyPlainUpdates(plainKeys, updates) - require.NoError(t, err) - - hashAfterEmptyUpdate, err := hph.ProcessKeys(ctx, plainKeys, "") - require.NoError(t, err) - - //ms.applyBranchNodeUpdates(branchNodeUpdates) - fmt.Println("2. Empty updates applied without state reset") - - require.EqualValues(t, hashBeforeEmptyUpdate, hashAfterEmptyUpdate) -} +//import ( +// "context" +// "encoding/hex" +// "fmt" +// "slices" +// "testing" +// +// "github.com/stretchr/testify/require" +// +// "github.com/erigontech/erigon-lib/common/length" +//) +// +//func Test_BinPatriciaTrie_UniqueRepresentation(t *testing.T) { +// t.Skip() +// ctx := context.Background() +// +// ms := NewMockState(t) +// ms2 := NewMockState(t) +// +// trie := NewBinPatriciaHashed(length.Addr, ms, ms.TempDir()) +// trieBatch := NewBinPatriciaHashed(length.Addr, ms2, ms2.TempDir()) +// +// plainKeys, updates := NewUpdateBuilder(). +// Balance("e25652aaa6b9417973d325f9a1246b48ff9420bf", 12). +// Balance("cdd0a12034e978f7eccda72bd1bd89a8142b704e", 120000). +// Balance("5bb6abae12c87592b940458437526cb6cad60d50", 170). +// Nonce("5bb6abae12c87592b940458437526cb6cad60d50", 152512). +// Balance("2fcb355beb0ea2b5fcf3b62a24e2faaff1c8d0c0", 100000). +// Balance("463510be61a7ccde354509c0ab813e599ee3fc8a", 200000). +// Balance("cd3e804beea486038609f88f399140dfbe059ef3", 200000). +// Storage("cd3e804beea486038609f88f399140dfbe059ef3", "01023402", "98"). +// Balance("82c88c189d5deeba0ad11463b80b44139bd519c1", 300000). +// Balance("0647e43e8f9ba3fb8b14ad30796b7553d667c858", 400000). +// Delete("cdd0a12034e978f7eccda72bd1bd89a8142b704e"). +// Balance("06548d648c23b12f2e9bfd1bae274b658be208f4", 500000). +// Balance("e5417f49640cf8a0b1d6e38f9dfdc00196e99e8b", 600000). +// Nonce("825ac9fa5d015ec7c6b4cbbc50f78d619d255ea7", 184). +// Build() +// +// ms.applyPlainUpdates(plainKeys, updates) +// ms2.applyPlainUpdates(plainKeys, updates) +// +// fmt.Println("1. Running sequential updates over the bin trie") +// var seqHash []byte +// for i := 0; i < len(updates); i++ { +// sh, err := trie.ProcessKeys(ctx, plainKeys[i:i+1], "") +// require.NoError(t, err) +// require.Len(t, sh, length.Hash) +// // WARN! provided sequential branch updates are incorrect - lead to deletion of prefixes (afterMap is zero) +// // while root hashes are equal +// //renderUpdates(branchNodeUpdates) +// +// fmt.Printf("h=%x\n", sh) +// seqHash = sh +// } +// +// fmt.Println("2. Running batch updates over the bin trie") +// +// batchHash, err := trieBatch.ProcessKeys(ctx, plainKeys, "") +// require.NoError(t, err) +// //ms2.applyBranchNodeUpdates(branchBatchUpdates) +// +// //renderUpdates(branchBatchUpdates) +// +// require.EqualValues(t, seqHash, batchHash) +// // require.EqualValues(t, seqHash, batchHash) +// +// // expectedHash, _ := hex.DecodeString("3ed2b89c0f9c6ebc7fa11a181baac21aa0236b12bb4492c708562cb3e40c7c9e") +// // require.EqualValues(t, expectedHash, seqHash) +//} +// +//func renderUpdates(branchNodeUpdates map[string]BranchData) { +// keys := make([]string, 0, len(branchNodeUpdates)) +// for key := range branchNodeUpdates { +// keys = append(keys, key) +// } +// slices.Sort(keys) +// for _, key := range keys { +// branchNodeUpdate := branchNodeUpdates[key] +// fmt.Printf("%x => %s\n", CompactedKeyToHex([]byte(key)), branchNodeUpdate.String()) +// } +//} +// +//func Test_BinPatriciaHashed_UniqueRepresentation(t *testing.T) { +// t.Skip() +// ctx := context.Background() +// +// ms := NewMockState(t) +// ms2 := NewMockState(t) +// +// plainKeys, updates := NewUpdateBuilder(). +// Balance("f5", 4). +// Balance("ff", 900234). +// Balance("04", 1233). +// Storage("04", "01", "0401"). +// Balance("ba", 065606). +// Balance("00", 4). +// Balance("01", 5). +// Balance("02", 6). +// Balance("03", 7). +// Storage("03", "56", "050505"). +// Balance("05", 9). +// Storage("03", "87", "060606"). +// Balance("b9", 6). +// Nonce("ff", 169356). +// Storage("05", "02", "8989"). +// Storage("f5", "04", "9898"). +// Build() +// +// trieOne := NewBinPatriciaHashed(1, ms, ms.TempDir()) +// trieTwo := NewBinPatriciaHashed(1, ms2, ms2.TempDir()) +// +// trieOne.SetTrace(true) +// trieTwo.SetTrace(true) +// +// // single sequential update +// roots := make([][]byte, 0) +// // branchNodeUpdatesOne := make(map[string]BranchData) +// fmt.Printf("1. Trie sequential update generated following branch updates\n") +// for i := 0; i < len(updates); i++ { +// if err := ms.applyPlainUpdates(plainKeys[i:i+1], updates[i:i+1]); err != nil { +// t.Fatal(err) +// } +// +// sequentialRoot, err := trieOne.ProcessKeys(ctx, plainKeys[i:i+1], "") +// require.NoError(t, err) +// roots = append(roots, sequentialRoot) +// +// //ms.applyBranchNodeUpdates(branchNodeUpdates) +// //renderUpdates(branchNodeUpdates) +// } +// +// err := ms2.applyPlainUpdates(plainKeys, updates) +// require.NoError(t, err) +// +// fmt.Printf("\n2. Trie batch update generated following branch updates\n") +// // batch update +// batchRoot, err := trieTwo.ProcessKeys(ctx, plainKeys, "") +// require.NoError(t, err) +// //renderUpdates(branchNodeUpdatesTwo) +// +// fmt.Printf("\n sequential roots:\n") +// for i, rh := range roots { +// fmt.Printf("%2d %+v\n", i, hex.EncodeToString(rh)) +// } +// +// //ms2.applyBranchNodeUpdates(branchNodeUpdatesTwo) +// +// require.EqualValues(t, batchRoot, roots[len(roots)-1], +// "expected equal roots, got sequential [%v] != batch [%v]", hex.EncodeToString(roots[len(roots)-1]), hex.EncodeToString(batchRoot)) +// require.Lenf(t, batchRoot, 32, "root hash length should be equal to 32 bytes") +//} +//func Test_BinPatriciaHashed_EmptyState(t *testing.T) { +// ctx := context.Background() +// ms := NewMockState(t) +// hph := NewBinPatriciaHashed(1, ms, ms.TempDir()) +// hph.SetTrace(false) +// plainKeys, updates := NewUpdateBuilder(). +// Balance("00", 4). +// Balance("01", 5). +// Balance("02", 6). +// Balance("03", 7). +// Balance("04", 8). +// Storage("04", "01", "0401"). +// Storage("03", "56", "050505"). +// Storage("03", "57", "060606"). +// Balance("05", 9). +// Storage("05", "02", "8989"). +// Storage("05", "04", "9898"). +// Build() +// +// err := ms.applyPlainUpdates(plainKeys, updates) +// require.NoError(t, err) +// +// firstRootHash, err := hph.ProcessKeys(ctx, plainKeys, "") +// require.NoError(t, err) +// +// t.Logf("root hash %x\n", firstRootHash) +// +// //ms.applyBranchNodeUpdates(branchNodeUpdates) +// +// fmt.Printf("1. Generated updates\n") +// //renderUpdates(branchNodeUpdates) +// +// // More updates +// hph.Reset() +// hph.SetTrace(false) +// plainKeys, updates = NewUpdateBuilder(). +// Storage("03", "58", "050505"). +// Build() +// err = ms.applyPlainUpdates(plainKeys, updates) +// require.NoError(t, err) +// +// secondRootHash, err := hph.ProcessKeys(ctx, plainKeys, "") +// require.NoError(t, err) +// require.NotEqualValues(t, firstRootHash, secondRootHash) +// +// //ms.applyBranchNodeUpdates(branchNodeUpdates) +// fmt.Printf("2. Generated single update\n") +// //renderUpdates(branchNodeUpdates) +// +// // More updates +// //hph.Reset() // one update - no need to reset +// hph.SetTrace(false) +// plainKeys, updates = NewUpdateBuilder(). +// Storage("03", "58", "070807"). +// Build() +// err = ms.applyPlainUpdates(plainKeys, updates) +// require.NoError(t, err) +// +// thirdRootHash, err := hph.ProcessKeys(ctx, plainKeys, "") +// require.NoError(t, err) +// require.NotEqualValues(t, secondRootHash, thirdRootHash) +// +// //ms.applyBranchNodeUpdates(branchNodeUpdates) +// fmt.Printf("3. Generated single update\n") +// //renderUpdates(branchNodeUpdates) +//} +// +//func Test_BinPatriciaHashed_EmptyUpdateState(t *testing.T) { +// ctx := context.Background() +// ms := NewMockState(t) +// hph := NewBinPatriciaHashed(1, ms, ms.TempDir()) +// hph.SetTrace(false) +// plainKeys, updates := NewUpdateBuilder(). +// Balance("00", 4). +// Nonce("00", 246462653). +// Balance("01", 5). +// CodeHash("03", "aaaaaaaaaaf7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a870"). +// Delete("00"). +// Storage("04", "01", "0401"). +// Storage("03", "56", "050505"). +// Build() +// +// err := ms.applyPlainUpdates(plainKeys, updates) +// require.NoError(t, err) +// +// hashBeforeEmptyUpdate, err := hph.ProcessKeys(ctx, plainKeys, "") +// require.NoError(t, err) +// require.NotEmpty(t, hashBeforeEmptyUpdate) +// +// //ms.applyBranchNodeUpdates(branchNodeUpdates) +// +// fmt.Println("1. Updates applied") +// //renderUpdates(branchNodeUpdates) +// +// // generate empty updates and do NOT reset tree +// hph.SetTrace(true) +// +// plainKeys, updates = NewUpdateBuilder().Build() +// +// err = ms.applyPlainUpdates(plainKeys, updates) +// require.NoError(t, err) +// +// hashAfterEmptyUpdate, err := hph.ProcessKeys(ctx, plainKeys, "") +// require.NoError(t, err) +// +// //ms.applyBranchNodeUpdates(branchNodeUpdates) +// fmt.Println("2. Empty updates applied without state reset") +// +// require.EqualValues(t, hashBeforeEmptyUpdate, hashAfterEmptyUpdate) +//} diff --git a/erigon-lib/commitment/commitment.go b/erigon-lib/commitment/commitment.go index dd79402ccc2..c42b0145bed 100644 --- a/erigon-lib/commitment/commitment.go +++ b/erigon-lib/commitment/commitment.go @@ -74,12 +74,12 @@ type PatriciaContext interface { // For each cell, it sets the cell type, clears the modified flag, fills the hash, // and for the extension, account, and leaf type, the `l` and `k` GetBranch(prefix []byte) ([]byte, uint64, error) - // fetch account with given plain key - GetAccount(plainKey []byte, cell *Cell) error - // fetch storage with given plain key - GetStorage(plainKey []byte, cell *Cell) error // store branch data PutBranch(prefix []byte, data []byte, prevData []byte, prevStep uint64) error + // fetch account with given plain key + GetAccount(plainKey []byte) (*Update, error) + // fetch storage with given plain key + GetStorage(plainKey []byte) (*Update, error) } type TrieVariant string @@ -94,10 +94,11 @@ const ( func InitializeTrieAndUpdateTree(tv TrieVariant, mode Mode, tmpdir string) (Trie, *UpdateTree) { switch tv { case VariantBinPatriciaTrie: - trie := NewBinPatriciaHashed(length.Addr, nil, tmpdir) - fn := func(key []byte) []byte { return hexToBin(key) } - tree := NewUpdateTree(mode, tmpdir, fn) - return trie, tree + //trie := NewBinPatriciaHashed(length.Addr, nil, tmpdir) + //fn := func(key []byte) []byte { return hexToBin(key) } + //tree := NewUpdateTree(mode, tmpdir, fn) + //return trie, tree + panic("omg") case VariantHexPatriciaTrie: fallthrough default: @@ -157,8 +158,8 @@ func (branchData BranchData) String() string { fmt.Fprintf(&sb, "%sstoragePlainKey=[%x]", comma, cell.storagePlainKey[:cell.storagePlainKeyLen]) comma = "," } - if cell.HashLen > 0 { - fmt.Fprintf(&sb, "%shash=[%x]", comma, cell.hash[:cell.HashLen]) + if cell.hashLen > 0 { + fmt.Fprintf(&sb, "%shash=[%x]", comma, cell.hash[:cell.hashLen]) } sb.WriteString("}\n") } @@ -301,7 +302,7 @@ func (be *BranchEncoder) EncodeBranch(bitmap, touchMap, afterMap uint16, readCel if cell.storagePlainKeyLen > 0 { fieldBits |= StoragePlainPart } - if cell.HashLen > 0 { + if cell.hashLen > 0 { fieldBits |= HashPart } if err := be.buf.WriteByte(byte(fieldBits)); err != nil { @@ -323,7 +324,7 @@ func (be *BranchEncoder) EncodeBranch(bitmap, touchMap, afterMap uint16, readCel } } if fieldBits&HashPart != 0 { - if err := putUvarAndVal(uint64(cell.HashLen), cell.hash[:cell.HashLen]); err != nil { + if err := putUvarAndVal(uint64(cell.hashLen), cell.hash[:cell.hashLen]); err != nil { return nil, 0, err } } @@ -758,8 +759,8 @@ func DecodeBranchAndCollectStat(key, branch []byte, tv TrieVariant) *BranchStat case c.storagePlainKeyLen > 0: stat.SPKSize += uint64(c.storagePlainKeyLen) stat.SPKCount++ - case c.HashLen > 0: - stat.HashSize += uint64(c.HashLen) + case c.hashLen > 0: + stat.HashSize += uint64(c.hashLen) stat.HashCount++ default: panic("no plain key" + fmt.Sprintf("#+%v", c)) @@ -847,7 +848,7 @@ func NewUpdateTree(m Mode, tmpdir string, hasher keyHasher) *UpdateTree { func (t *UpdateTree) TouchPlainKey(key, val []byte, fn func(c *KeyUpdate, val []byte)) { switch t.mode { case ModeUpdate: - pivot, updated := &KeyUpdate{plainKey: key}, false + pivot, updated := &KeyUpdate{plainKey: key, update: new(Update)}, false t.tree.DescendLessOrEqual(pivot, func(item *KeyUpdate) bool { if bytes.Equal(item.plainKey, pivot.plainKey) { @@ -896,41 +897,38 @@ func (t *UpdateTree) TouchAccount(c *KeyUpdate, val []byte) { c.update.Balance.Set(balance) c.update.Flags |= BalanceUpdate } - if !bytes.Equal(chash, c.update.CodeHashOrStorage[:]) { + if !bytes.Equal(chash, c.update.CodeHash[:]) { if len(chash) == 0 { - c.update.ValLength = length.Hash - copy(c.update.CodeHashOrStorage[:], EmptyCodeHash) + copy(c.update.CodeHash[:], EmptyCodeHash) } else { - copy(c.update.CodeHashOrStorage[:], chash) - c.update.ValLength = length.Hash c.update.Flags |= CodeUpdate + copy(c.update.CodeHash[:], chash) } } } func (t *UpdateTree) TouchStorage(c *KeyUpdate, val []byte) { - c.update.ValLength = len(val) + c.update.StorageLen = len(val) if len(val) == 0 { c.update.Flags = DeleteUpdate } else { c.update.Flags |= StorageUpdate - copy(c.update.CodeHashOrStorage[:], val) + copy(c.update.Storage[:], val) } } func (t *UpdateTree) TouchCode(c *KeyUpdate, val []byte) { - t.keccak.Reset() - t.keccak.Write(val) - t.keccak.Read(c.update.CodeHashOrStorage[:]) - if c.update.Flags == DeleteUpdate && len(val) == 0 { - c.update.Flags = DeleteUpdate - c.update.ValLength = 0 + c.update.Flags |= CodeUpdate + if len(val) == 0 { + if c.update.Flags == 0 || c.update.Flags == DeleteUpdate { + c.update.Flags = DeleteUpdate + } + copy(c.update.CodeHash[:], EmptyCodeHash) return } - c.update.ValLength = length.Hash - if len(val) != 0 { - c.update.Flags |= CodeUpdate - } + t.keccak.Reset() + t.keccak.Write(val) + t.keccak.Read(c.update.CodeHash[:]) } func (t *UpdateTree) Close() { @@ -1008,7 +1006,7 @@ func (t *UpdateTree) List(clear bool) ([][]byte, []Update) { updates := make([]Update, t.tree.Len()) i := 0 t.tree.Ascend(func(item *KeyUpdate) bool { - plainKeys[i], updates[i] = item.plainKey, item.update + plainKeys[i], updates[i] = item.plainKey, *item.update i++ return true }) @@ -1023,7 +1021,7 @@ func (t *UpdateTree) List(clear bool) ([][]byte, []Update) { type KeyUpdate struct { plainKey []byte - update Update + update *Update } func keyUpdateLessFn(i, j *KeyUpdate) bool { diff --git a/erigon-lib/commitment/commitment_test.go b/erigon-lib/commitment/commitment_test.go index cbf2ef54643..b1a668db62c 100644 --- a/erigon-lib/commitment/commitment_test.go +++ b/erigon-lib/commitment/commitment_test.go @@ -37,10 +37,10 @@ func generateCellRow(tb testing.TB, size int) (row []*Cell, bitmap uint16) { var bm uint16 for i := 0; i < len(row); i++ { row[i] = new(Cell) - row[i].HashLen = 32 + row[i].hashLen = 32 n, err := rand.Read(row[i].hash[:]) require.NoError(tb, err) - require.EqualValues(tb, row[i].HashLen, n) + require.EqualValues(tb, row[i].hashLen, n) th := rand.Intn(120) switch { @@ -373,7 +373,7 @@ func TestUpdateTree_TouchPlainKey(t *testing.T) { for i := 0; i < len(sortedUniqUpds); i++ { require.EqualValues(t, sortedUniqUpds[i].key, pk[i]) - require.EqualValues(t, sortedUniqUpds[i].val, upd[i].CodeHashOrStorage[:upd[i].ValLength]) + require.EqualValues(t, sortedUniqUpds[i].val, upd[i].Storage[:upd[i].StorageLen]) } pk, upd = utDirect.List(true) diff --git a/erigon-lib/commitment/hex_patricia_hashed.go b/erigon-lib/commitment/hex_patricia_hashed.go index dfaee4c5b31..2a5512d33cd 100644 --- a/erigon-lib/commitment/hex_patricia_hashed.go +++ b/erigon-lib/commitment/hex_patricia_hashed.go @@ -99,22 +99,17 @@ func NewHexPatriciaHashed(accountKeyLen int, ctx PatriciaContext, tmpdir string) } type Cell struct { - Balance uint256.Int - Nonce uint64 - HashLen int // Length of the hash (or embedded) - StorageLen int - accountPlainKeyLen int // length of account plain key - storagePlainKeyLen int // length of the storage plain key - downHashedLen int - extLen int downHashedKey [128]byte extension [64]byte accountPlainKey [length.Addr]byte // account plain key storagePlainKey [length.Addr + length.Hash]byte // storage plain key hash [length.Hash]byte // cell hash - CodeHash [length.Hash]byte // hash of the bytecode - Storage [length.Hash]byte - Delete bool + hashLen int // Length of the hash (or embedded) + accountPlainKeyLen int // length of account plain key + storagePlainKeyLen int // length of the storage plain key + downHashedLen int + extLen int + Update } var ( @@ -128,12 +123,27 @@ func (cell *Cell) reset() { cell.storagePlainKeyLen = 0 cell.downHashedLen = 0 cell.extLen = 0 - cell.HashLen = 0 - cell.Nonce = 0 - cell.Balance.Clear() - copy(cell.CodeHash[:], EmptyCodeHash) - cell.StorageLen = 0 - cell.Delete = false + cell.hashLen = 0 + cell.Update.Reset() +} + +func (cell *Cell) setFromUpdate(update *Update) { + if update.Flags == DeleteUpdate { + cell.Update.Flags = DeleteUpdate + return + } + if update.Flags&BalanceUpdate != 0 { + cell.Balance.Set(&update.Balance) + } + if update.Flags&NonceUpdate != 0 { + cell.Nonce = update.Nonce + } + if update.Flags&CodeUpdate != 0 { + copy(cell.CodeHash[:], update.CodeHash[:]) + } + if update.Flags&StorageUpdate != 0 { + cell.setStorage(update.Storage[:update.StorageLen]) + } } func (cell *Cell) fillFromUpperCell(upCell *Cell, depth, depthIncrement int) { @@ -176,9 +186,9 @@ func (cell *Cell) fillFromUpperCell(upCell *Cell, depth, depthIncrement int) { copy(cell.Storage[:], upCell.Storage[:upCell.StorageLen]) } } - cell.HashLen = upCell.HashLen - if upCell.HashLen > 0 { - copy(cell.hash[:], upCell.hash[:upCell.HashLen]) + cell.hashLen = upCell.hashLen + if upCell.hashLen > 0 { + copy(cell.hash[:], upCell.hash[:upCell.hashLen]) } } @@ -200,7 +210,7 @@ func (cell *Cell) fillFromLowerCell(lowCell *Cell, lowDepth int, preExtension [] copy(cell.Storage[:], lowCell.Storage[:lowCell.StorageLen]) } } - if lowCell.HashLen > 0 { + if lowCell.hashLen > 0 { if (lowCell.accountPlainKeyLen == 0 && lowDepth < 64) || (lowCell.storagePlainKeyLen == 0 && lowDepth > 64) { // Extension is related to either accounts branch node, or storage branch node, we prepend it by preExtension | nibble if len(preExtension) > 0 { @@ -219,9 +229,9 @@ func (cell *Cell) fillFromLowerCell(lowCell *Cell, lowDepth int, preExtension [] } } } - cell.HashLen = lowCell.HashLen - if lowCell.HashLen > 0 { - copy(cell.hash[:], lowCell.hash[:lowCell.HashLen]) + cell.hashLen = lowCell.hashLen + if lowCell.hashLen > 0 { + copy(cell.hash[:], lowCell.hash[:lowCell.hashLen]) } } @@ -372,13 +382,13 @@ func (cell *Cell) fillFromFields(data []byte, pos int, fieldBits PartFlags) (int if len(data) < pos+int(l) { return 0, errors.New("fillFromFields buffer too small for hash") } - cell.HashLen = int(l) + cell.hashLen = int(l) if l > 0 { copy(cell.hash[:], data[pos:pos+int(l)]) pos += int(l) } } else { - cell.HashLen = 0 + cell.hashLen = 0 } return pos, nil } @@ -712,17 +722,17 @@ func (hph *HexPatriciaHashed) computeCellHash(cell *Cell, depth int, buf []byte) if !storageRootHashIsSet { if cell.extLen > 0 { // Extension - if cell.HashLen > 0 { + if cell.hashLen > 0 { if hph.trace { - fmt.Printf("extensionHash for [%x]=>[%x]\n", cell.extension[:cell.extLen], cell.hash[:cell.HashLen]) + fmt.Printf("extensionHash for [%x]=>[%x]\n", cell.extension[:cell.extLen], cell.hash[:cell.hashLen]) } - if storageRootHash, err = hph.extensionHash(cell.extension[:cell.extLen], cell.hash[:cell.HashLen]); err != nil { + if storageRootHash, err = hph.extensionHash(cell.extension[:cell.extLen], cell.hash[:cell.hashLen]); err != nil { return nil, err } } else { return nil, errors.New("computeCellHash extension without hash") } - } else if cell.HashLen > 0 { + } else if cell.hashLen > 0 { storageRootHash = cell.hash } else { storageRootHash = *(*[length.Hash]byte)(EmptyRootHash) @@ -738,20 +748,20 @@ func (hph *HexPatriciaHashed) computeCellHash(cell *Cell, depth int, buf []byte) buf = append(buf, 0x80+32) if cell.extLen > 0 { // Extension - if cell.HashLen > 0 { + if cell.hashLen > 0 { if hph.trace { - fmt.Printf("extensionHash for [%x]=>[%x]\n", cell.extension[:cell.extLen], cell.hash[:cell.HashLen]) + fmt.Printf("extensionHash for [%x]=>[%x]\n", cell.extension[:cell.extLen], cell.hash[:cell.hashLen]) } var hash [length.Hash]byte - if hash, err = hph.extensionHash(cell.extension[:cell.extLen], cell.hash[:cell.HashLen]); err != nil { + if hash, err = hph.extensionHash(cell.extension[:cell.extLen], cell.hash[:cell.hashLen]); err != nil { return nil, err } buf = append(buf, hash[:]...) } else { return nil, errors.New("computeCellHash extension without hash") } - } else if cell.HashLen > 0 { - buf = append(buf, cell.hash[:cell.HashLen]...) + } else if cell.hashLen > 0 { + buf = append(buf, cell.hash[:cell.hashLen]...) //} else if storageRootHashIsSet { // buf = append(buf, storageRootHash[:]...) // copy(cell.h[:], storageRootHash[:]) @@ -768,7 +778,7 @@ func (hph *HexPatriciaHashed) needUnfolding(hashedKey []byte) int { if hph.trace { fmt.Printf("needUnfolding root, rootChecked = %t\n", hph.rootChecked) } - if hph.root.downHashedLen == 0 && hph.root.HashLen == 0 { + if hph.root.downHashedLen == 0 && hph.root.hashLen == 0 { if hph.rootChecked { // Previously checked, empty root, no unfolding needed return 0 @@ -782,14 +792,14 @@ func (hph *HexPatriciaHashed) needUnfolding(hashedKey []byte) int { cell = &hph.grid[hph.activeRows-1][col] depth = hph.depths[hph.activeRows-1] if hph.trace { - fmt.Printf("needUnfolding cell (%d, %x), currentKey=[%x], depth=%d, cell.hash=[%x]\n", hph.activeRows-1, col, hph.currentKey[:hph.currentKeyLen], depth, cell.hash[:cell.HashLen]) + fmt.Printf("needUnfolding cell (%d, %x), currentKey=[%x], depth=%d, cell.hash=[%x]\n", hph.activeRows-1, col, hph.currentKey[:hph.currentKeyLen], depth, cell.hash[:cell.hashLen]) } } if len(hashedKey) <= depth { return 0 } if cell.downHashedLen == 0 { - if cell.HashLen == 0 { + if cell.hashLen == 0 { // cell is empty, no need to unfold further return 0 } @@ -862,20 +872,24 @@ func (hph *HexPatriciaHashed) unfoldBranchNode(row int, deleted bool, depth int) return false, fmt.Errorf("prefix [%x], branchData[%x]: %w", hph.currentKey[:hph.currentKeyLen], branchData, err) } if hph.trace { - fmt.Printf("cell (%d, %x) depth=%d, hash=[%x], accountPlainKey=[%x], storagePlainKey=[%x], extension=[%x]\n", row, nibble, depth, cell.hash[:cell.HashLen], cell.accountPlainKey[:cell.accountPlainKeyLen], cell.storagePlainKey[:cell.storagePlainKeyLen], cell.extension[:cell.extLen]) + fmt.Printf("cell (%d, %x) depth=%d, hash=[%x], accountPlainKey=[%x], storagePlainKey=[%x], extension=[%x]\n", row, nibble, depth, cell.hash[:cell.hashLen], cell.accountPlainKey[:cell.accountPlainKeyLen], cell.storagePlainKey[:cell.storagePlainKeyLen], cell.extension[:cell.extLen]) } if cell.accountPlainKeyLen > 0 { - if err = hph.ctx.GetAccount(cell.accountPlainKey[:cell.accountPlainKeyLen], cell); err != nil { + update, err := hph.ctx.GetAccount(cell.accountPlainKey[:cell.accountPlainKeyLen]) + if err != nil { return false, fmt.Errorf("unfoldBranchNode GetAccount: %w", err) } + cell.setFromUpdate(update) if hph.trace { fmt.Printf("GetAccount[%x] return balance=%d, nonce=%d code=%x\n", cell.accountPlainKey[:cell.accountPlainKeyLen], &cell.Balance, cell.Nonce, cell.CodeHash[:]) } } if cell.storagePlainKeyLen > 0 { - if err = hph.ctx.GetStorage(cell.storagePlainKey[:cell.storagePlainKeyLen], cell); err != nil { + update, err := hph.ctx.GetStorage(cell.storagePlainKey[:cell.storagePlainKeyLen]) + if err != nil { return false, fmt.Errorf("unfoldBranchNode GetAccount: %w", err) } + cell.setFromUpdate(update) } if err = cell.deriveHashedKeys(depth, hph.keccak, hph.accountKeyLen); err != nil { return false, err @@ -894,7 +908,7 @@ func (hph *HexPatriciaHashed) unfold(hashedKey []byte, unfolding int) error { var col byte var upDepth, depth int if hph.activeRows == 0 { - if hph.rootChecked && hph.root.HashLen == 0 && hph.root.downHashedLen == 0 { + if hph.rootChecked && hph.root.hashLen == 0 && hph.root.downHashedLen == 0 { // No unfolding for empty root return nil } @@ -1043,7 +1057,7 @@ func (hph *HexPatriciaHashed) fold() (err error) { hph.afterMap[row-1] &^= (uint16(1) << col) } } - upCell.HashLen = 0 + upCell.hashLen = 0 upCell.accountPlainKeyLen = 0 upCell.storagePlainKeyLen = 0 upCell.extLen = 0 @@ -1172,7 +1186,7 @@ func (hph *HexPatriciaHashed) fold() (err error) { upCell.accountPlainKeyLen = 0 } upCell.storagePlainKeyLen = 0 - upCell.HashLen = 32 + upCell.hashLen = 32 if _, err := hph.keccak2.Read(upCell.hash[:]); err != nil { return err } @@ -1194,11 +1208,9 @@ func (hph *HexPatriciaHashed) deleteCell(hashedKey []byte) { fmt.Printf("deleteCell, activeRows = %d\n", hph.activeRows) } var cell *Cell - if hph.activeRows == 0 { - // Remove the root + if hph.activeRows == 0 { // Remove the root cell = &hph.root - hph.rootTouched = true - hph.rootPresent = false + hph.rootTouched, hph.rootPresent = true, false } else { row := hph.activeRows - 1 if hph.depths[row] < len(hashedKey) { @@ -1209,10 +1221,11 @@ func (hph *HexPatriciaHashed) deleteCell(hashedKey []byte) { } col := int(hashedKey[hph.currentKeyLen]) cell = &hph.grid[row][col] - if hph.afterMap[row]&(uint16(1)< %s\n", plainKey, u.String()) + } return cell } @@ -1275,14 +1300,14 @@ func (hph *HexPatriciaHashed) RootHash() ([]byte, error) { func (hph *HexPatriciaHashed) ProcessTree(ctx context.Context, tree *UpdateTree, logPrefix string) (rootHash []byte, err error) { var ( - stagedCell = new(Cell) - logEvery = time.NewTicker(20 * time.Second) + m runtime.MemStats + ki uint64 + update *Update - m runtime.MemStats - ki uint64 + updatesCount = tree.Size() + logEvery = time.NewTicker(20 * time.Second) ) defer logEvery.Stop() - updatesCount := tree.Size() err = tree.HashSort(ctx, func(hashedKey, plainKey []byte) error { select { @@ -1310,37 +1335,19 @@ func (hph *HexPatriciaHashed) ProcessTree(ctx context.Context, tree *UpdateTree, } // Update the cell - stagedCell.reset() if len(plainKey) == hph.accountKeyLen { - if err := hph.ctx.GetAccount(plainKey, stagedCell); err != nil { + update, err = hph.ctx.GetAccount(plainKey) + if err != nil { return fmt.Errorf("GetAccount for key %x failed: %w", plainKey, err) } - if !stagedCell.Delete { - cell := hph.updateCell(plainKey, hashedKey) - cell.setAccountFields(stagedCell.CodeHash[:], &stagedCell.Balance, stagedCell.Nonce) - - if hph.trace { - fmt.Printf("GetAccount update key %x => balance=%d nonce=%v codeHash=%x\n", cell.accountPlainKey, &cell.Balance, cell.Nonce, cell.CodeHash) - } - } } else { - if err = hph.ctx.GetStorage(plainKey, stagedCell); err != nil { + update, err = hph.ctx.GetStorage(plainKey) + if err != nil { return fmt.Errorf("GetStorage for key %x failed: %w", plainKey, err) } - if !stagedCell.Delete { - hph.updateCell(plainKey, hashedKey).setStorage(stagedCell.Storage[:stagedCell.StorageLen]) - if hph.trace { - fmt.Printf("GetStorage reading key %x => %x\n", plainKey, stagedCell.Storage[:stagedCell.StorageLen]) - } - } } + hph.updateCell(plainKey, hashedKey, update) - if stagedCell.Delete { - if hph.trace { - fmt.Printf("delete cell %x hash %x\n", plainKey, hashedKey) - } - hph.deleteCell(hashedKey) - } mxKeys.Inc() ki++ return nil @@ -1387,7 +1394,8 @@ func (hph *HexPatriciaHashed) ProcessKeys(ctx context.Context, plainKeys [][]byt defer logEvery.Stop() var m runtime.MemStats - stagedCell := new(Cell) + //stagedCell := new(Cell) + var update *Update for i, hashedKey := range hashedKeys { select { case <-ctx.Done(): @@ -1415,37 +1423,18 @@ func (hph *HexPatriciaHashed) ProcessKeys(ctx context.Context, plainKeys [][]byt } // Update the cell - stagedCell.reset() if len(plainKey) == hph.accountKeyLen { - if err := hph.ctx.GetAccount(plainKey, stagedCell); err != nil { + update, err = hph.ctx.GetAccount(plainKey) + if err != nil { return nil, fmt.Errorf("GetAccount for key %x failed: %w", plainKey, err) } - if !stagedCell.Delete { - cell := hph.updateCell(plainKey, hashedKey) - cell.setAccountFields(stagedCell.CodeHash[:], &stagedCell.Balance, stagedCell.Nonce) - - if hph.trace { - fmt.Printf("GetAccount update key %x => balance=%d nonce=%v codeHash=%x\n", cell.accountPlainKey, &cell.Balance, cell.Nonce, cell.CodeHash) - } - } } else { - if err = hph.ctx.GetStorage(plainKey, stagedCell); err != nil { + update, err = hph.ctx.GetStorage(plainKey) + if err != nil { return nil, fmt.Errorf("GetStorage for key %x failed: %w", plainKey, err) } - if !stagedCell.Delete { - hph.updateCell(plainKey, hashedKey).setStorage(stagedCell.Storage[:stagedCell.StorageLen]) - if hph.trace { - fmt.Printf("GetStorage reading key %x => %x\n", plainKey, stagedCell.Storage[:stagedCell.StorageLen]) - } - } - } - - if stagedCell.Delete { - if hph.trace { - fmt.Printf("delete cell %x hash %x\n", plainKey, hashedKey) - } - hph.deleteCell(hashedKey) } + hph.updateCell(plainKey, hashedKey, update) mxKeys.Inc() } // Folding everything up to the root @@ -1485,6 +1474,7 @@ func (hph *HexPatriciaHashed) ProcessUpdates(ctx context.Context, plainKeys [][] return nil, ctx.Err() default: } + if hph.trace { fmt.Printf("(%d/%d) key=[%x] %s hashedKey=[%x] currentKey=[%x]\n", i+1, len(updates), update.plainKey, update.String(), update.hashedKey, hph.currentKey[:hph.currentKeyLen]) @@ -1502,45 +1492,7 @@ func (hph *HexPatriciaHashed) ProcessUpdates(ctx context.Context, plainKeys [][] } } - // Update the cell - if update.Flags == DeleteUpdate { - hph.deleteCell(update.hashedKey) - if hph.trace { - fmt.Printf("delete cell %x hash %x\n", update.plainKey, update.hashedKey) - } - } else { - cell := hph.updateCell(update.plainKey, update.hashedKey) - if hph.trace && len(update.plainKey) == hph.accountKeyLen { - fmt.Printf("GetAccount updated key %x =>", update.plainKey) - } - if update.Flags&BalanceUpdate != 0 { - if hph.trace { - fmt.Printf(" balance=%d", &update.Balance) - } - cell.Balance.Set(&update.Balance) - } - if update.Flags&NonceUpdate != 0 { - if hph.trace { - fmt.Printf(" nonce=%d", update.Nonce) - } - cell.Nonce = update.Nonce - } - if update.Flags&CodeUpdate != 0 { - if hph.trace { - fmt.Printf(" codeHash=%x", update.CodeHashOrStorage) - } - copy(cell.CodeHash[:], update.CodeHashOrStorage[:update.ValLength]) - } - if hph.trace { - fmt.Printf("\n") - } - if update.Flags&StorageUpdate != 0 { - cell.setStorage(update.CodeHashOrStorage[:update.ValLength]) - if hph.trace { - fmt.Printf("\rstorage set %x => %x\n", update.plainKey, update.CodeHashOrStorage[:update.ValLength]) - } - } - } + hph.updateCell(update.plainKey, update.hashedKey, &updates[i]) mxKeys.Inc() } @@ -1568,15 +1520,7 @@ func (hph *HexPatriciaHashed) Variant() TrieVariant { return VariantHexPatriciaT // Reset allows HexPatriciaHashed instance to be reused for the new commitment calculation func (hph *HexPatriciaHashed) Reset() { - hph.root.HashLen = 0 - hph.root.downHashedLen = 0 - hph.root.accountPlainKeyLen = 0 - hph.root.storagePlainKeyLen = 0 - hph.root.extLen = 0 - copy(hph.root.CodeHash[:], EmptyCodeHash) - hph.root.StorageLen = 0 - hph.root.Balance.Clear() - hph.root.Nonce = 0 + hph.root.reset() hph.rootTouched = false hph.rootChecked = false hph.rootPresent = true @@ -1723,16 +1667,16 @@ func (s *state) Decode(buf []byte) error { func (cell *Cell) Encode() []byte { var pos = 1 - size := pos + 5 + cell.HashLen + cell.accountPlainKeyLen + cell.storagePlainKeyLen + cell.downHashedLen + cell.extLen // max size + size := pos + 5 + cell.hashLen + cell.accountPlainKeyLen + cell.storagePlainKeyLen + cell.downHashedLen + cell.extLen // max size buf := make([]byte, size) var flags uint8 - if cell.HashLen != 0 { + if cell.hashLen != 0 { flags |= cellFlagHash - buf[pos] = byte(cell.HashLen) + buf[pos] = byte(cell.hashLen) pos++ - copy(buf[pos:pos+cell.HashLen], cell.hash[:]) - pos += cell.HashLen + copy(buf[pos:pos+cell.hashLen], cell.hash[:]) + pos += cell.hashLen } if cell.accountPlainKeyLen != 0 { flags |= cellFlagAccount @@ -1762,7 +1706,7 @@ func (cell *Cell) Encode() []byte { copy(buf[pos:pos+cell.extLen], cell.extension[:]) pos += cell.extLen //nolint } - if cell.Delete { + if cell.Deleted() { flags |= cellFlagDelete } buf[0] = flags @@ -1789,10 +1733,10 @@ func (cell *Cell) Decode(buf []byte) error { pos++ if flags&cellFlagHash != 0 { - cell.HashLen = int(buf[pos]) + cell.hashLen = int(buf[pos]) pos++ - copy(cell.hash[:], buf[pos:pos+cell.HashLen]) - pos += cell.HashLen + copy(cell.hash[:], buf[pos:pos+cell.hashLen]) + pos += cell.hashLen } if flags&cellFlagAccount != 0 { cell.accountPlainKeyLen = int(buf[pos]) @@ -1819,7 +1763,8 @@ func (cell *Cell) Decode(buf []byte) error { pos += cell.extLen //nolint } if flags&cellFlagDelete != 0 { - cell.Delete = true + panic("deleted cell should not be encoded") + cell.Update.Flags = DeleteUpdate } return nil } @@ -1889,17 +1834,22 @@ func (hph *HexPatriciaHashed) SetState(buf []byte) error { if hph.ctx == nil { panic("nil ctx") } - if err := hph.ctx.GetAccount(hph.root.accountPlainKey[:hph.root.accountPlainKeyLen], &hph.root); err != nil { + + update, err := hph.ctx.GetAccount(hph.root.accountPlainKey[:hph.root.accountPlainKeyLen]) + if err != nil { return err } + hph.root.setFromUpdate(update) } if hph.root.storagePlainKeyLen > 0 { if hph.ctx == nil { panic("nil ctx") } - if err := hph.ctx.GetStorage(hph.root.storagePlainKey[:hph.root.storagePlainKeyLen], &hph.root); err != nil { + update, err := hph.ctx.GetStorage(hph.root.storagePlainKey[:hph.root.storagePlainKeyLen]) + if err != nil { return err } + hph.root.setFromUpdate(update) //hph.root.deriveHashedKeys(0, hph.keccak, hph.accountKeyLen) } @@ -2058,21 +2008,22 @@ func (uf UpdateFlags) String() string { } type Update struct { - hashedKey []byte - plainKey []byte - Flags UpdateFlags - Balance uint256.Int - Nonce uint64 - ValLength int - CodeHashOrStorage [length.Hash]byte + hashedKey []byte + plainKey []byte + CodeHash [length.Hash]byte + Storage [length.Hash]byte + StorageLen int + Flags UpdateFlags + Balance uint256.Int + Nonce uint64 } func (u *Update) Reset() { u.Flags = 0 u.Balance.Clear() u.Nonce = 0 - u.ValLength = 0 - copy(u.CodeHashOrStorage[:], EmptyCodeHash) + u.StorageLen = 0 + copy(u.CodeHash[:], EmptyCodeHash) } func (u *Update) Merge(b *Update) { @@ -2090,77 +2041,12 @@ func (u *Update) Merge(b *Update) { } if b.Flags&CodeUpdate != 0 { u.Flags |= CodeUpdate - copy(u.CodeHashOrStorage[:], b.CodeHashOrStorage[:]) - u.ValLength = b.ValLength + copy(u.CodeHash[:], b.CodeHash[:]) } if b.Flags&StorageUpdate != 0 { u.Flags |= StorageUpdate - copy(u.CodeHashOrStorage[:], b.CodeHashOrStorage[:]) - u.ValLength = b.ValLength - } -} - -func (u *Update) DecodeForStorage(enc []byte) { - //u.Reset() - - //balance := new(uint256.Int) - // - //if len(enc) > 0 { - // pos := 0 - // nonceBytes := int(enc[pos]) - // pos++ - // if nonceBytes > 0 { - // nonce := bytesToUint64(enc[pos : pos+nonceBytes]) - // if u.Nonce != nonce { - // u.Flags |= NonceUpdate - // } - // u.Nonce = nonce - // pos += nonceBytes - // } - // balanceBytes := int(enc[pos]) - // pos++ - // if balanceBytes > 0 { - // balance.SetBytes(enc[pos : pos+balanceBytes]) - // if u.Balance.Cmp(balance) != 0 { - // u.Flags |= BalanceUpdate - // } - // u.Balance.Set(balance) - // pos += balanceBytes - // } - // codeHashBytes := int(enc[pos]) - // pos++ - // - // if codeHashBytes > 0 { - // if !bytes.Equal(u.CodeHashOrStorage[:], enc[pos:pos+codeHashBytes]) { - // u.Flags |= CodeUpdate - // copy(u.CodeHashOrStorage[:], enc[pos:pos+codeHashBytes]) - // u.ValLength = length.Hash - // } - // } - //} - //return - - pos := 0 - nonceBytes := int(enc[pos]) - pos++ - if nonceBytes > 0 { - u.Nonce = bytesToUint64(enc[pos : pos+nonceBytes]) - u.Flags |= NonceUpdate - pos += nonceBytes - } - balanceBytes := int(enc[pos]) - pos++ - if balanceBytes > 0 { - u.Balance.SetBytes(enc[pos : pos+balanceBytes]) - u.Flags |= BalanceUpdate - pos += balanceBytes - } - codeHashBytes := int(enc[pos]) - pos++ - if codeHashBytes > 0 { - copy(u.CodeHashOrStorage[:], enc[pos:pos+codeHashBytes]) - u.ValLength = length.Hash - u.Flags |= CodeUpdate + copy(u.Storage[:], b.Storage[:b.StorageLen]) + u.StorageLen = b.StorageLen } } @@ -2175,22 +2061,28 @@ func (u *Update) Encode(buf []byte, numBuf []byte) []byte { buf = append(buf, numBuf[:n]...) } if u.Flags&CodeUpdate != 0 { - buf = append(buf, u.CodeHashOrStorage[:]...) + buf = append(buf, u.CodeHash[:]...) } if u.Flags&StorageUpdate != 0 { - n := binary.PutUvarint(numBuf, uint64(u.ValLength)) + n := binary.PutUvarint(numBuf, uint64(u.StorageLen)) buf = append(buf, numBuf[:n]...) - if u.ValLength > 0 { - buf = append(buf, u.CodeHashOrStorage[:u.ValLength]...) + if u.StorageLen > 0 { + buf = append(buf, u.Storage[:u.StorageLen]...) } } return buf } +func (u *Update) Deleted() bool { + return u.Flags == DeleteUpdate +} + func (u *Update) Decode(buf []byte, pos int) (int, error) { if len(buf) < pos+1 { return 0, errors.New("decode Update: buffer too small for flags") } + u.Reset() + u.Flags = UpdateFlags(buf[pos]) pos++ if u.Flags&BalanceUpdate != 0 { @@ -2220,9 +2112,8 @@ func (u *Update) Decode(buf []byte, pos int) (int, error) { if len(buf) < pos+length.Hash { return 0, errors.New("decode Update: buffer too small for codeHash") } - copy(u.CodeHashOrStorage[:], buf[pos:pos+32]) + copy(u.CodeHash[:], buf[pos:pos+32]) pos += length.Hash - u.ValLength = length.Hash } if u.Flags&StorageUpdate != 0 { l, n := binary.Uvarint(buf[pos:]) @@ -2236,9 +2127,9 @@ func (u *Update) Decode(buf []byte, pos int) (int, error) { if len(buf) < pos+int(l) { return 0, errors.New("decode Update: buffer too small for storage") } - u.ValLength = int(l) - copy(u.CodeHashOrStorage[:], buf[pos:pos+int(l)]) - pos += int(l) + u.StorageLen = int(l) + copy(u.Storage[:], buf[pos:pos+u.StorageLen]) + pos += u.StorageLen } return pos, nil } @@ -2246,6 +2137,9 @@ func (u *Update) Decode(buf []byte, pos int) (int, error) { func (u *Update) String() string { var sb strings.Builder sb.WriteString(fmt.Sprintf("Flags: [%s]", u.Flags)) + if u.Deleted() { + sb.WriteString(", DELETED") + } if u.Flags&BalanceUpdate != 0 { sb.WriteString(fmt.Sprintf(", Balance: [%d]", &u.Balance)) } @@ -2253,10 +2147,10 @@ func (u *Update) String() string { sb.WriteString(fmt.Sprintf(", Nonce: [%d]", u.Nonce)) } if u.Flags&CodeUpdate != 0 { - sb.WriteString(fmt.Sprintf(", CodeHash: [%x]", u.CodeHashOrStorage)) + sb.WriteString(fmt.Sprintf(", CodeHash: [%x]", u.CodeHash)) } if u.Flags&StorageUpdate != 0 { - sb.WriteString(fmt.Sprintf(", Storage: [%x]", u.CodeHashOrStorage[:u.ValLength])) + sb.WriteString(fmt.Sprintf(", Storage: [%x]", u.Storage[:u.StorageLen])) } return sb.String() } diff --git a/erigon-lib/commitment/hex_patricia_hashed_test.go b/erigon-lib/commitment/hex_patricia_hashed_test.go index da27e2e09d4..7f96a957201 100644 --- a/erigon-lib/commitment/hex_patricia_hashed_test.go +++ b/erigon-lib/commitment/hex_patricia_hashed_test.go @@ -503,9 +503,11 @@ func Test_HexPatriciaHashed_Sepolia(t *testing.T) { func Test_Cell_EncodeDecode(t *testing.T) { rnd := rand.New(rand.NewSource(time.Now().UnixMilli())) first := &Cell{ - Nonce: rnd.Uint64(), - HashLen: length.Hash, - StorageLen: rnd.Intn(33), + //Nonce: rnd.Uint64(), + //StorageLen: rnd.Intn(33), + //CodeHash: [32]byte{}, + //Storage: [32]byte{}, + hashLen: length.Hash, accountPlainKeyLen: length.Addr, storagePlainKeyLen: length.Addr + length.Hash, downHashedLen: rnd.Intn(129), @@ -514,8 +516,6 @@ func Test_Cell_EncodeDecode(t *testing.T) { extension: [64]byte{}, storagePlainKey: [52]byte{}, hash: [32]byte{}, - CodeHash: [32]byte{}, - Storage: [32]byte{}, accountPlainKey: [20]byte{}, } b := uint256.NewInt(rnd.Uint64()) @@ -528,9 +528,9 @@ func Test_Cell_EncodeDecode(t *testing.T) { rnd.Read(first.hash[:]) rnd.Read(first.CodeHash[:]) rnd.Read(first.Storage[:first.StorageLen]) - if rnd.Intn(100) > 50 { - first.Delete = true - } + //if rnd.Intn(100) > 50 { + // first.Delete = true + //} second := &Cell{} second.Decode(first.Encode()) @@ -539,13 +539,13 @@ func Test_Cell_EncodeDecode(t *testing.T) { require.EqualValues(t, first.downHashedKey[:], second.downHashedKey[:]) require.EqualValues(t, first.accountPlainKeyLen, second.accountPlainKeyLen) require.EqualValues(t, first.storagePlainKeyLen, second.storagePlainKeyLen) - require.EqualValues(t, first.HashLen, second.HashLen) + require.EqualValues(t, first.hashLen, second.hashLen) require.EqualValues(t, first.accountPlainKey[:], second.accountPlainKey[:]) require.EqualValues(t, first.storagePlainKey[:], second.storagePlainKey[:]) require.EqualValues(t, first.hash[:], second.hash[:]) require.EqualValues(t, first.extension[:first.extLen], second.extension[:second.extLen]) // encode doesn't code Nonce, Balance, CodeHash and Storage - require.EqualValues(t, first.Delete, second.Delete) + //require.EqualValues(t, first.Delete, second.Delete) } func Test_HexPatriciaHashed_StateEncode(t *testing.T) { diff --git a/erigon-lib/commitment/patricia_state_mock_test.go b/erigon-lib/commitment/patricia_state_mock_test.go index 62af90a955f..b7641ea1783 100644 --- a/erigon-lib/commitment/patricia_state_mock_test.go +++ b/erigon-lib/commitment/patricia_state_mock_test.go @@ -66,90 +66,67 @@ func (ms *MockState) GetBranch(prefix []byte) ([]byte, uint64, error) { return nil, 0, nil } -func (ms *MockState) GetAccount(plainKey []byte, cell *Cell) error { +func (ms *MockState) GetAccount(plainKey []byte) (*Update, error) { exBytes, ok := ms.sm[string(plainKey[:])] if !ok { ms.t.Logf("GetAccount not found key [%x]", plainKey) - cell.Delete = true - return nil + u := new(Update) + u.Flags = DeleteUpdate + return u, nil } + var ex Update pos, err := ex.Decode(exBytes, 0) if err != nil { ms.t.Fatalf("GetAccount decode existing [%x], bytes: [%x]: %v", plainKey, exBytes, err) - return nil + return nil, nil } if pos != len(exBytes) { ms.t.Fatalf("GetAccount key [%x] leftover %d bytes in [%x], comsumed %x", plainKey, len(exBytes)-pos, exBytes, pos) - return nil + return nil, nil } if ex.Flags&StorageUpdate != 0 { ms.t.Logf("GetAccount reading storage item for key [%x]", plainKey) - return errors.New("storage read by GetAccount") + return nil, errors.New("storage read by GetAccount") } if ex.Flags&DeleteUpdate != 0 { ms.t.Fatalf("GetAccount reading deleted account for key [%x]", plainKey) - return nil + return nil, nil } - if ex.Flags&BalanceUpdate != 0 { - cell.Balance.Set(&ex.Balance) - } else { - cell.Balance.Clear() - } - if ex.Flags&NonceUpdate != 0 { - cell.Nonce = ex.Nonce - } else { - cell.Nonce = 0 - } - if ex.Flags&CodeUpdate != 0 { - copy(cell.CodeHash[:], ex.CodeHashOrStorage[:]) - } else { - copy(cell.CodeHash[:], EmptyCodeHash) - } - return nil + return &ex, nil } -func (ms *MockState) GetStorage(plainKey []byte, cell *Cell) error { +func (ms *MockState) GetStorage(plainKey []byte) (*Update, error) { exBytes, ok := ms.sm[string(plainKey[:])] if !ok { ms.t.Logf("GetStorage not found key [%x]", plainKey) - cell.Delete = true - return nil + u := new(Update) + u.Flags = DeleteUpdate + return u, nil } var ex Update pos, err := ex.Decode(exBytes, 0) if err != nil { ms.t.Fatalf("GetStorage decode existing [%x], bytes: [%x]: %v", plainKey, exBytes, err) - return nil + return nil, nil } if pos != len(exBytes) { ms.t.Fatalf("GetStorage key [%x] leftover bytes in [%x], comsumed %x", plainKey, exBytes, pos) - return nil + return nil, nil } if ex.Flags&BalanceUpdate != 0 { ms.t.Logf("GetStorage reading balance for key [%x]", plainKey) - return nil + return nil, nil } if ex.Flags&NonceUpdate != 0 { ms.t.Fatalf("GetStorage reading nonce for key [%x]", plainKey) - return nil + return nil, nil } if ex.Flags&CodeUpdate != 0 { ms.t.Fatalf("GetStorage reading codeHash for key [%x]", plainKey) - return nil - } - if ex.Flags&DeleteUpdate != 0 { - ms.t.Fatalf("GetStorage reading deleted item for key [%x]", plainKey) - return nil + return nil, nil } - if ex.Flags&StorageUpdate != 0 { - copy(cell.Storage[:], ex.CodeHashOrStorage[:]) - cell.StorageLen = len(ex.CodeHashOrStorage) - } else { - cell.StorageLen = 0 - cell.Storage = [length.Hash]byte{} - } - return nil + return &ex, nil } func (ms *MockState) applyPlainUpdates(plainKeys [][]byte, updates []Update) error { @@ -177,12 +154,12 @@ func (ms *MockState) applyPlainUpdates(plainKeys [][]byte, updates []Update) err } if update.Flags&CodeUpdate != 0 { ex.Flags |= CodeUpdate - copy(ex.CodeHashOrStorage[:], update.CodeHashOrStorage[:]) + copy(ex.CodeHash[:], update.CodeHash[:]) } if update.Flags&StorageUpdate != 0 { ex.Flags |= StorageUpdate - copy(ex.CodeHashOrStorage[:], update.CodeHashOrStorage[:]) - ex.ValLength = update.ValLength + copy(ex.Storage[:], update.Storage[:]) + ex.StorageLen = update.StorageLen } ms.sm[string(key)] = ex.Encode(nil, ms.numBuf[:]) } else { @@ -422,7 +399,7 @@ func (ub *UpdateBuilder) Build() (plainKeys [][]byte, updates []Update) { } if codeHash, ok := ub.codeHashes[string(key)]; ok { u.Flags |= CodeUpdate - copy(u.CodeHashOrStorage[:], codeHash[:]) + copy(u.CodeHash[:], codeHash[:]) } if _, del := ub.deletes[string(key)]; del { u.Flags = DeleteUpdate @@ -438,9 +415,9 @@ func (ub *UpdateBuilder) Build() (plainKeys [][]byte, updates []Update) { if sm, ok1 := ub.storages[string(key)]; ok1 { if storage, ok2 := sm[string(key2)]; ok2 { u.Flags |= StorageUpdate - u.CodeHashOrStorage = [length.Hash]byte{} - u.ValLength = len(storage) - copy(u.CodeHashOrStorage[:], storage) + u.CodeHash = [length.Hash]byte{} + u.StorageLen = len(storage) + copy(u.CodeHash[:], storage) } } } diff --git a/erigon-lib/state/domain_shared.go b/erigon-lib/state/domain_shared.go index 68fd2601e31..426ac18625a 100644 --- a/erigon-lib/state/domain_shared.go +++ b/erigon-lib/state/domain_shared.go @@ -1088,51 +1088,67 @@ func (sdc *SharedDomainsCommitmentContext) PutBranch(prefix []byte, data []byte, return sdc.sharedDomains.updateCommitmentData(prefix, data, prevData, prevStep) } -func (sdc *SharedDomainsCommitmentContext) GetAccount(plainKey []byte, cell *commitment.Cell) error { +func (sdc *SharedDomainsCommitmentContext) GetAccount(plainKey []byte) (*commitment.Update, error) { encAccount, _, err := sdc.sharedDomains.DomainGet(kv.AccountsDomain, plainKey, nil) if err != nil { - return fmt.Errorf("GetAccount failed: %w", err) + return nil, fmt.Errorf("GetAccount failed: %w", err) } - cell.Nonce = 0 - cell.Balance.Clear() + u := new(commitment.Update) + u.Reset() + if len(encAccount) > 0 { nonce, balance, chash := types.DecodeAccountBytesV3(encAccount) - cell.Nonce = nonce - cell.Balance.Set(balance) + u.Flags |= commitment.NonceUpdate + u.Nonce = nonce + u.Flags |= commitment.BalanceUpdate + u.Balance.Set(balance) if len(chash) > 0 { - copy(cell.CodeHash[:], chash) + u.Flags |= commitment.CodeUpdate + copy(u.CodeHash[:], chash) } } - if bytes.Equal(cell.CodeHash[:], commitment.EmptyCodeHash) { - cell.Delete = len(encAccount) == 0 - return nil + if bytes.Equal(u.CodeHash[:], commitment.EmptyCodeHash) { + if len(encAccount) == 0 { + u.Flags = commitment.DeleteUpdate + } + return u, nil } code, _, err := sdc.sharedDomains.DomainGet(kv.CodeDomain, plainKey, nil) if err != nil { - return fmt.Errorf("GetAccount: failed to read latest code: %w", err) + return nil, fmt.Errorf("GetAccount/Code: failed to read latest code: %w", err) } if len(code) > 0 { sdc.keccak.Reset() sdc.keccak.Write(code) - sdc.keccak.Read(cell.CodeHash[:]) + sdc.keccak.Read(u.CodeHash[:]) + u.Flags |= commitment.CodeUpdate + } else { - cell.CodeHash = commitment.EmptyCodeHashArray + copy(u.CodeHash[:], commitment.EmptyCodeHashArray[:]) } - cell.Delete = len(encAccount) == 0 && len(code) == 0 - return nil + + if len(encAccount) == 0 && len(code) == 0 { + u.Flags = commitment.DeleteUpdate + } + return u, nil } -func (sdc *SharedDomainsCommitmentContext) GetStorage(plainKey []byte, cell *commitment.Cell) error { +func (sdc *SharedDomainsCommitmentContext) GetStorage(plainKey []byte) (*commitment.Update, error) { // Look in the summary table first enc, _, err := sdc.sharedDomains.DomainGet(kv.StorageDomain, plainKey, nil) if err != nil { - return err + return nil, err } - cell.StorageLen = len(enc) - copy(cell.Storage[:], enc) - cell.Delete = cell.StorageLen == 0 - return nil + u := new(commitment.Update) + u.StorageLen = len(enc) + if len(enc) == 0 { + u.Flags = commitment.DeleteUpdate + } else { + u.Flags |= commitment.StorageUpdate + copy(u.Storage[:u.StorageLen], enc) + } + return u, nil } func (sdc *SharedDomainsCommitmentContext) Reset() { From 08b6eabf3909f084f2dd0acac134303952e17d95 Mon Sep 17 00:00:00 2001 From: awskii Date: Thu, 1 Aug 2024 17:38:11 +0100 Subject: [PATCH 2/5] lint --- erigon-lib/commitment/hex_patricia_hashed.go | 30 +++++++------------- 1 file changed, 10 insertions(+), 20 deletions(-) diff --git a/erigon-lib/commitment/hex_patricia_hashed.go b/erigon-lib/commitment/hex_patricia_hashed.go index b8aa46a9be2..871a42dfe92 100644 --- a/erigon-lib/commitment/hex_patricia_hashed.go +++ b/erigon-lib/commitment/hex_patricia_hashed.go @@ -400,16 +400,6 @@ func (cell *Cell) setStorage(value []byte) { } } -func (cell *Cell) setAccountFields(codeHash []byte, balance *uint256.Int, nonce uint64) { - if len(codeHash) == 0 { - codeHash = common.Copy(EmptyCodeHash) - } - copy(cell.CodeHash[:], codeHash) - - cell.Balance.SetBytes(balance.Bytes()) - cell.Nonce = nonce -} - func (cell *Cell) accountForHashing(buffer []byte, storageRootHash [length.Hash]byte) int { balanceBytes := 0 if !cell.Balance.LtUint64(128) { @@ -1763,7 +1753,7 @@ func (cell *Cell) Decode(buf []byte) error { pos += cell.extLen //nolint } if flags&cellFlagDelete != 0 { - panic("deleted cell should not be encoded") + log.Warn("deleted cell should not be encoded", "cell", cell.String()) cell.Update.Flags = DeleteUpdate } return nil @@ -1856,15 +1846,15 @@ func (hph *HexPatriciaHashed) SetState(buf []byte) error { return nil } -func bytesToUint64(buf []byte) (x uint64) { - for i, b := range buf { - x = x<<8 + uint64(b) - if i == 7 { - return - } - } - return -} +//func bytesToUint64(buf []byte) (x uint64) { +// for i, b := range buf { +// x = x<<8 + uint64(b) +// if i == 7 { +// return +// } +// } +// return +//} func hexToCompact(key []byte) []byte { zeroByte, keyPos, keyLen := makeCompactZeroByte(key) From d8c44f474a3369afbb7811053883118eb2f022eb Mon Sep 17 00:00:00 2001 From: awskii Date: Thu, 1 Aug 2024 18:46:40 +0100 Subject: [PATCH 3/5] added test --- .../commitment/hex_patricia_hashed_test.go | 78 +++++++++++++++++++ 1 file changed, 78 insertions(+) diff --git a/erigon-lib/commitment/hex_patricia_hashed_test.go b/erigon-lib/commitment/hex_patricia_hashed_test.go index 7f96a957201..df0ea9387ed 100644 --- a/erigon-lib/commitment/hex_patricia_hashed_test.go +++ b/erigon-lib/commitment/hex_patricia_hashed_test.go @@ -966,3 +966,81 @@ func Test_HexPatriciaHashed_ProcessUpdates_UniqueRepresentationInTheMiddle(t *te "expected equal roots, got sequential [%v] != batch [%v]", hex.EncodeToString(roots[len(roots)-1]), hex.EncodeToString(batchRoot)) require.Lenf(t, batchRoot, 32, "root hash length should be equal to 32 bytes") } + +func TestUpdate_EncodeDecode(t *testing.T) { + updates := []Update{ + {Flags: BalanceUpdate, Balance: *uint256.NewInt(123), CodeHash: [32]byte(EmptyCodeHash)}, + {Flags: BalanceUpdate | NonceUpdate, Balance: *uint256.NewInt(45639015), Nonce: 123, CodeHash: [32]byte(EmptyCodeHash)}, + {Flags: BalanceUpdate | NonceUpdate | CodeUpdate, Balance: *uint256.NewInt(45639015), Nonce: 123, + CodeHash: [length.Hash]byte{ + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x20}}, + {Flags: StorageUpdate, Storage: [length.Hash]byte{0x21, 0x22, 0x23, 0x24}, StorageLen: 4, CodeHash: [32]byte(EmptyCodeHash)}, + {Flags: DeleteUpdate, CodeHash: [32]byte(EmptyCodeHash)}, + } + + var numBuf [10]byte + for i, update := range updates { + encoded := update.Encode(nil, numBuf[:]) + + decoded := Update{} + n, err := decoded.Decode(encoded, 0) + require.NoError(t, err, i) + require.Equal(t, len(encoded), n, i) + + require.Equal(t, update.Flags, decoded.Flags, i) + require.Equal(t, update.Balance, decoded.Balance, i) + require.Equal(t, update.Nonce, decoded.Nonce, i) + require.Equal(t, update.CodeHash, decoded.CodeHash, i) + require.Equal(t, update.Storage, decoded.Storage, i) + require.Equal(t, update.StorageLen, decoded.StorageLen, i) + } +} + +func TestUpdate_Merge(t *testing.T) { + type tcase struct { + a, b, e Update + } + + updates := []tcase{ + { + a: Update{Flags: BalanceUpdate, Balance: *uint256.NewInt(123), CodeHash: [32]byte(EmptyCodeHash)}, + b: Update{Flags: BalanceUpdate | NonceUpdate, Balance: *uint256.NewInt(45639015), Nonce: 123, CodeHash: [32]byte(EmptyCodeHash)}, + e: Update{Flags: BalanceUpdate | NonceUpdate, Balance: *uint256.NewInt(45639015), Nonce: 123, CodeHash: [32]byte(EmptyCodeHash)}, + }, + { + a: Update{Flags: BalanceUpdate | NonceUpdate, Balance: *uint256.NewInt(45639015), Nonce: 123, CodeHash: [32]byte(EmptyCodeHash)}, + b: Update{Flags: BalanceUpdate | NonceUpdate | CodeUpdate, Balance: *uint256.NewInt(1000000), Nonce: 547, CodeHash: [32]byte(EmptyCodeHash)}, + e: Update{Flags: BalanceUpdate | NonceUpdate | CodeUpdate, Balance: *uint256.NewInt(1000000), Nonce: 547, CodeHash: [32]byte(EmptyCodeHash)}, + }, + { + a: Update{Flags: BalanceUpdate | NonceUpdate | CodeUpdate, Balance: *uint256.NewInt(4568314), Nonce: 123, CodeHash: [32]byte(EmptyCodeHash)}, + b: Update{Flags: BalanceUpdate | NonceUpdate | CodeUpdate, Balance: *uint256.NewInt(45639015), Nonce: 124, + CodeHash: [length.Hash]byte{ + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x20}}, + e: Update{Flags: BalanceUpdate | NonceUpdate | CodeUpdate, Balance: *uint256.NewInt(45639015), Nonce: 124, CodeHash: [length.Hash]byte{ + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x20}}, + }, + { + a: Update{Flags: StorageUpdate, Storage: [length.Hash]byte{0x21, 0x22, 0x23, 0x24}, StorageLen: 4, CodeHash: [32]byte(EmptyCodeHash)}, + b: Update{Flags: DeleteUpdate, CodeHash: [32]byte(EmptyCodeHash)}, + e: Update{Flags: DeleteUpdate, CodeHash: [32]byte(EmptyCodeHash)}, + }, + } + + var numBuf [10]byte + for i, tc := range updates { + tc.a.Merge(&tc.b) + encA := tc.a.Encode(nil, numBuf[:]) + encE := tc.e.Encode(nil, numBuf[:]) + require.EqualValues(t, encE, encA, i) + } +} From ab501d73231f65a4e71fe94651d31732886851f8 Mon Sep 17 00:00:00 2001 From: awskii Date: Tue, 6 Aug 2024 16:50:53 +0100 Subject: [PATCH 4/5] save --- erigon-lib/commitment/hex_patricia_hashed.go | 26 +----- .../commitment/hex_patricia_hashed_test.go | 81 +++++++++++++++++++ 2 files changed, 82 insertions(+), 25 deletions(-) diff --git a/erigon-lib/commitment/hex_patricia_hashed.go b/erigon-lib/commitment/hex_patricia_hashed.go index 871a42dfe92..3f0963f00ae 100644 --- a/erigon-lib/commitment/hex_patricia_hashed.go +++ b/erigon-lib/commitment/hex_patricia_hashed.go @@ -127,24 +127,7 @@ func (cell *Cell) reset() { cell.Update.Reset() } -func (cell *Cell) setFromUpdate(update *Update) { - if update.Flags == DeleteUpdate { - cell.Update.Flags = DeleteUpdate - return - } - if update.Flags&BalanceUpdate != 0 { - cell.Balance.Set(&update.Balance) - } - if update.Flags&NonceUpdate != 0 { - cell.Nonce = update.Nonce - } - if update.Flags&CodeUpdate != 0 { - copy(cell.CodeHash[:], update.CodeHash[:]) - } - if update.Flags&StorageUpdate != 0 { - cell.setStorage(update.Storage[:update.StorageLen]) - } -} +func (cell *Cell) setFromUpdate(update *Update) { cell.Update.Merge(update) } func (cell *Cell) fillFromUpperCell(upCell *Cell, depth, depthIncrement int) { if upCell.downHashedLen >= depthIncrement { @@ -393,13 +376,6 @@ func (cell *Cell) fillFromFields(data []byte, pos int, fieldBits PartFlags) (int return pos, nil } -func (cell *Cell) setStorage(value []byte) { - cell.StorageLen = len(value) - if len(value) > 0 { - copy(cell.Storage[:], value) - } -} - func (cell *Cell) accountForHashing(buffer []byte, storageRootHash [length.Hash]byte) int { balanceBytes := 0 if !cell.Balance.LtUint64(128) { diff --git a/erigon-lib/commitment/hex_patricia_hashed_test.go b/erigon-lib/commitment/hex_patricia_hashed_test.go index df0ea9387ed..9741f08c70f 100644 --- a/erigon-lib/commitment/hex_patricia_hashed_test.go +++ b/erigon-lib/commitment/hex_patricia_hashed_test.go @@ -1044,3 +1044,84 @@ func TestUpdate_Merge(t *testing.T) { require.EqualValues(t, encE, encA, i) } } + +func TestCell_setFromUpdate(t *testing.T) { + rnd := rand.New(rand.NewSource(42)) + + b := uint256.NewInt(rnd.Uint64()) + update := Update{} + update.Reset() + + update.Balance = *b + update.Nonce = rand.Uint64() + rnd.Read(update.CodeHash[:]) + update.Flags = BalanceUpdate | NonceUpdate | CodeUpdate + + target := new(Cell) + target.setFromUpdate(&update) + require.True(t, update.Balance.Eq(&target.Balance)) + require.EqualValues(t, update.Nonce, target.Nonce) + require.EqualValues(t, update.CodeHash, target.CodeHash) + require.EqualValues(t, 0, target.StorageLen) + + update.Reset() + + update.Balance.SetUint64(0) + update.Nonce = rand.Uint64() + rnd.Read(update.CodeHash[:]) + update.Flags = NonceUpdate | CodeUpdate + + target.reset() + target.setFromUpdate(&update) + + require.True(t, update.Balance.Eq(&target.Balance)) + require.EqualValues(t, update.Nonce, target.Nonce) + require.EqualValues(t, update.CodeHash, target.CodeHash) + require.EqualValues(t, 0, target.StorageLen) + + update.Reset() + + update.Balance.SetUint64(rnd.Uint64() + rnd.Uint64()) + update.Nonce = rand.Uint64() + rnd.Read(update.Storage[:]) + update.StorageLen = len(update.Storage) + update.Flags = NonceUpdate | BalanceUpdate | StorageUpdate + + target.reset() + target.setFromUpdate(&update) + + require.True(t, update.Balance.Eq(&target.Balance)) + require.EqualValues(t, update.Nonce, target.Nonce) + require.EqualValues(t, update.CodeHash, target.CodeHash) + require.EqualValues(t, update.StorageLen, target.StorageLen) + require.EqualValues(t, update.Storage[:update.StorageLen], target.Storage[:target.StorageLen]) + + update.Reset() + + update.Balance.SetUint64(rnd.Uint64() + rnd.Uint64()) + update.Nonce = rand.Uint64() + rnd.Read(update.Storage[:rnd.Intn(len(update.Storage))]) + update.StorageLen = len(update.Storage) + update.Flags = NonceUpdate | BalanceUpdate | StorageUpdate + + target.reset() + target.setFromUpdate(&update) + + require.True(t, update.Balance.Eq(&target.Balance)) + require.EqualValues(t, update.Nonce, target.Nonce) + require.EqualValues(t, update.CodeHash, target.CodeHash) + require.EqualValues(t, EmptyCodeHashArray[:], target.CodeHash) + require.EqualValues(t, update.StorageLen, target.StorageLen) + require.EqualValues(t, update.Storage[:update.StorageLen], target.Storage[:target.StorageLen]) + + update.Reset() + update.Flags = DeleteUpdate + target.reset() + target.setFromUpdate(&update) + + require.True(t, update.Balance.Eq(&target.Balance)) + require.EqualValues(t, update.Nonce, target.Nonce) + require.EqualValues(t, EmptyCodeHashArray[:], target.CodeHash) + require.EqualValues(t, update.StorageLen, target.StorageLen) + require.EqualValues(t, update.Storage[:update.StorageLen], target.Storage[:target.StorageLen]) +} From 8b39a380b53f9ef5e1aa48ce99c5e1b3a0b600bd Mon Sep 17 00:00:00 2001 From: awskii Date: Tue, 6 Aug 2024 16:53:59 +0100 Subject: [PATCH 5/5] save --- erigon-lib/commitment/hex_patricia_hashed.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/erigon-lib/commitment/hex_patricia_hashed.go b/erigon-lib/commitment/hex_patricia_hashed.go index 3f0963f00ae..7f0d853d0d4 100644 --- a/erigon-lib/commitment/hex_patricia_hashed.go +++ b/erigon-lib/commitment/hex_patricia_hashed.go @@ -1185,19 +1185,19 @@ func (hph *HexPatriciaHashed) deleteCell(hashedKey []byte) { } return } - col := int(hashedKey[hph.currentKeyLen]) - cell = &hph.grid[row][col] - nib := uint16(1) << col - if hph.afterMap[row]&nib != 0 { + nibble := int(hashedKey[hph.currentKeyLen]) + cell = &hph.grid[row][nibble] + col := uint16(1) << nibble + if hph.afterMap[row]&col != 0 { // Prevent "spurios deletions", i.e. deletion of absent items - hph.touchMap[row] |= nib - hph.afterMap[row] &^= nib + hph.touchMap[row] |= col + hph.afterMap[row] &^= col if hph.trace { - fmt.Printf("deleteCell setting (%d, %x)\n", row, col) + fmt.Printf("deleteCell setting (%d, %x)\n", row, nibble) } } else { if hph.trace { - fmt.Printf("deleteCell ignoring (%d, %x)\n", row, col) + fmt.Printf("deleteCell ignoring (%d, %x)\n", row, nibble) } } }