Skip to content

Commit

Permalink
Minor hashing-related optimizations.
Browse files Browse the repository at this point in the history
This commit contains three classes of optimizations:
 - Reducing the number of unnecessary hash copies
 - Improve the performance of the DoubleSha256 function
 - A couple of minor optimizations of the ShaHash functions

The first class is a result of the Bytes function on a ShaHash making a
copy of the bytes before returning them.  It really should have been named
CloneBytes, but that would break the API now.

To address this, a comment has been added to the function which explicitly
calls out the copy behavior.  In addition, all call sites of .Bytes on a
ShaHash in the code base have been updated to simply slice the array when
a copy is not needed.  This saves a significant amount of data copying.

The second optimization modifies the DoubleSha256 function to directly use
fastsha256.Sum256 instead of the hasher interface.  This reduces the
number of allocations needed.  A benchmark for the function has been added
as well.

old: BenchmarkDoubleSha256  500000   3691 ns/op   192 B/op   3 allocs/op
new: BenchmarkDoubleSha256  500000   3081 ns/op    32 B/op   1 allocs/op

The final optimizations are for the ShaHash IsEqual and SetBytes functions
which have been modified to make use of the fact the type is an array and
remove an unneeded subslice.
  • Loading branch information
davecgh committed Apr 4, 2015
1 parent 07176c8 commit 8269246
Show file tree
Hide file tree
Showing 9 changed files with 46 additions and 30 deletions.
7 changes: 3 additions & 4 deletions blockchain/difficulty.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,15 +57,14 @@ var (
// perform math comparisons.
func ShaHashToBig(hash *wire.ShaHash) *big.Int {
// A ShaHash is in little-endian, but the big package wants the bytes
// in big-endian. Reverse them. ShaHash.Bytes makes a copy, so it
// is safe to modify the returned buffer.
buf := hash.Bytes()
// in big-endian, so reverse them.
buf := *hash
blen := len(buf)
for i := 0; i < blen/2; i++ {
buf[i], buf[blen-1-i] = buf[blen-1-i], buf[i]
}

return new(big.Int).SetBytes(buf)
return new(big.Int).SetBytes(buf[:])
}

// CompactToBig converts a compact representation of a whole number N to an
Expand Down
4 changes: 2 additions & 2 deletions blockchain/merkle.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,8 @@ func nextPowerOfTwo(n int) int {
func HashMerkleBranches(left *wire.ShaHash, right *wire.ShaHash) *wire.ShaHash {
// Concatenate the left and right nodes.
var sha [wire.HashSize * 2]byte
copy(sha[:wire.HashSize], left.Bytes())
copy(sha[wire.HashSize:], right.Bytes())
copy(sha[:wire.HashSize], left[:])
copy(sha[wire.HashSize:], right[:])

// Create a new sha hash from the double sha 256. Ignore the error
// here since SetBytes can't fail here due to the fact DoubleSha256
Expand Down
7 changes: 3 additions & 4 deletions database/ldb/block.go
Original file line number Diff line number Diff line change
Expand Up @@ -134,10 +134,9 @@ func (db *LevelDb) setBlk(sha *wire.ShaHash, blkHeight int64, buf []byte) {
shaKey := shaBlkToKey(sha)
blkKey := int64ToKey(blkHeight)

shaB := sha.Bytes()
blkVal := make([]byte, len(shaB)+len(buf))
copy(blkVal[0:], shaB)
copy(blkVal[len(shaB):], buf)
blkVal := make([]byte, len(sha)+len(buf))
copy(blkVal[0:], sha[:])
copy(blkVal[len(sha):], buf)

db.lBatch().Put(shaKey, lw[:])
db.lBatch().Put(blkKey, blkVal)
Expand Down
17 changes: 9 additions & 8 deletions database/ldb/leveldb.go
Original file line number Diff line number Diff line change
Expand Up @@ -641,8 +641,7 @@ func int64ToKey(keyint int64) []byte {
}

func shaBlkToKey(sha *wire.ShaHash) []byte {
shaB := sha.Bytes()
return shaB
return sha[:]
}

// These are used here and in tx.go's deleteOldAddrIndex() to prevent deletion
Expand All @@ -651,15 +650,17 @@ var recordSuffixTx = []byte{'t', 'x'}
var recordSuffixSpentTx = []byte{'s', 'x'}

func shaTxToKey(sha *wire.ShaHash) []byte {
shaB := sha.Bytes()
shaB = append(shaB, recordSuffixTx...)
return shaB
key := make([]byte, len(sha)+len(recordSuffixTx))
copy(key, sha[:])
copy(key[len(sha):], recordSuffixTx)
return key
}

func shaSpentTxToKey(sha *wire.ShaHash) []byte {
shaB := sha.Bytes()
shaB = append(shaB, recordSuffixSpentTx...)
return shaB
key := make([]byte, len(sha)+len(recordSuffixSpentTx))
copy(key, sha[:])
copy(key[len(sha):], recordSuffixSpentTx)
return key
}

func (db *LevelDb) lBatch() *leveldb.Batch {
Expand Down
2 changes: 1 addition & 1 deletion database/ldb/tx.go
Original file line number Diff line number Diff line change
Expand Up @@ -537,7 +537,7 @@ func (db *LevelDb) UpdateAddrIndexForBlock(blkSha *wire.ShaHash, blkHeight int64

// Update tip of addrindex.
newIndexTip := make([]byte, 40, 40)
copy(newIndexTip[0:32], blkSha.Bytes())
copy(newIndexTip[0:32], blkSha[:])
binary.LittleEndian.PutUint64(newIndexTip[32:40], uint64(blkHeight))
batch.Put(addrIndexMetaDataKey, newIndexTip)

Expand Down
4 changes: 2 additions & 2 deletions server.go
Original file line number Diff line number Diff line change
Expand Up @@ -215,13 +215,13 @@ func (s *server) handleUpdatePeerHeights(state *peerState, umsg updatePeerHeight
return
}

latestBlkSha := p.lastAnnouncedBlock.Bytes()
latestBlkSha := p.lastAnnouncedBlock[:]
p.StatsMtx.Unlock()

// If the peer has recently announced a block, and this block
// matches our newly accepted block, then update their block
// height.
if bytes.Equal(latestBlkSha, umsg.newSha.Bytes()) {
if bytes.Equal(latestBlkSha, umsg.newSha[:]) {
p.UpdateLastBlockHeight(umsg.newHeight)
p.UpdateLastAnnouncedBlock(nil)
}
Expand Down
17 changes: 17 additions & 0 deletions wire/bench_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -392,3 +392,20 @@ func BenchmarkTxSha(b *testing.B) {
genesisCoinbaseTx.TxSha()
}
}

// BenchmarkDoubleSha256 performs a benchmark on how long it takes to perform a
// double sha 256.
func BenchmarkDoubleSha256(b *testing.B) {
b.StopTimer()
var buf bytes.Buffer
if err := genesisCoinbaseTx.Serialize(&buf); err != nil {
b.Errorf("Serialize: unexpected error: %v", err)
return
}
txBytes := buf.Bytes()
b.StartTimer()

for i := 0; i < b.N; i++ {
_ = DoubleSha256(txBytes)
}
}
9 changes: 3 additions & 6 deletions wire/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -522,10 +522,7 @@ func RandomUint64() (uint64, error) {

// DoubleSha256 calculates sha256(sha256(b)) and returns the resulting bytes.
func DoubleSha256(b []byte) []byte {
hasher := fastsha256.New()
hasher.Write(b)
sum := hasher.Sum(nil)
hasher.Reset()
hasher.Write(sum)
return hasher.Sum(nil)
first := fastsha256.Sum256(b)
second := fastsha256.Sum256(first[:])
return second[:]
}
9 changes: 6 additions & 3 deletions wire/shahash.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
package wire

import (
"bytes"
"encoding/hex"
"fmt"
)
Expand Down Expand Up @@ -34,6 +33,10 @@ func (hash ShaHash) String() string {
}

// Bytes returns the bytes which represent the hash as a byte slice.
//
// NOTE: This makes a copy of the bytes and should have probably been named
// CloneBytes. It is generally cheaper to just slice the hash directly thereby
// reusing the same bytes rather than calling this method.
func (hash *ShaHash) Bytes() []byte {
newHash := make([]byte, HashSize)
copy(newHash, hash[:])
Expand All @@ -49,14 +52,14 @@ func (hash *ShaHash) SetBytes(newHash []byte) error {
return fmt.Errorf("invalid sha length of %v, want %v", nhlen,
HashSize)
}
copy(hash[:], newHash[0:HashSize])
copy(hash[:], newHash)

return nil
}

// IsEqual returns true if target is the same as hash.
func (hash *ShaHash) IsEqual(target *ShaHash) bool {
return bytes.Equal(hash[:], target[:])
return *hash == *target
}

// NewShaHash returns a new ShaHash from a byte slice. An error is returned if
Expand Down

0 comments on commit 8269246

Please sign in to comment.