Skip to content

Commit

Permalink
Fix btcsuite#340 and DropAfterBlockBySha/NewestSha bug.
Browse files Browse the repository at this point in the history
- Delete spent TX in setclearSpentData when unspent by block
  disconnect on reorg.

- Test spent TX deletion when reorg causes block disconnect.

- Test for correct NewestSha results after DropAfterBlockBySha.

- Fix DropAfterBlockBySha to update info for NewestSha.
  • Loading branch information
aakselrod committed Mar 27, 2015
1 parent ead3915 commit ccb7d4c
Show file tree
Hide file tree
Showing 4 changed files with 196 additions and 1 deletion.
9 changes: 9 additions & 0 deletions database/db_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -177,3 +177,12 @@ func TestInterface(t *testing.T) {
}
}
}

// TestReorganization performs reorganization tests for each supported DB type
func TestReorganization(t *testing.T) {
for _, dbType := range database.SupportedDBs() {
if _, exists := ignoreDbTypes[dbType]; !exists {
testReorganization(t, dbType)
}
}
}
6 changes: 5 additions & 1 deletion database/ldb/leveldb.go
Original file line number Diff line number Diff line change
Expand Up @@ -348,6 +348,10 @@ func (db *LevelDb) DropAfterBlockBySha(sha *wire.ShaHash) (rerr error) {
db.lBatch().Delete(int64ToKey(height))
}

// update the last block cache
db.lastBlkShaCached = true
db.lastBlkSha = *sha
db.lastBlkIdx = keepidx
db.nextBlock = keepidx + 1

return nil
Expand Down Expand Up @@ -546,7 +550,7 @@ func (db *LevelDb) setclearSpentData(txsha *wire.ShaHash, idx uint32, set bool)
spentTxList[len(spentTxList)-1] = nil
if len(spentTxList) == 1 {
// write entry to delete tx from spent pool
// XXX
db.txSpentUpdateMap[*txsha] = &spentTxUpdate{delete: true}
} else {
spentTxList = spentTxList[:len(spentTxList)-1]
// XXX format sTxList and set update Table
Expand Down
182 changes: 182 additions & 0 deletions database/reorg_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,182 @@
// Copyright (c) 2013-2014 Conformal Systems LLC.
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.

package database_test

import (
"compress/bzip2"
"encoding/binary"
"io"
"os"
"path/filepath"
"strings"
"testing"

"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcutil"
)

// testReorganization performs reorganization tests for the passed DB type.
// Much of the setup is copied from the blockchain package, but the test looks
// to see if each TX in each block in the best chain can be fetched using
// FetchTxBySha. If not, then there's a bug.
func testReorganization(t *testing.T, dbType string) {
db, teardown, err := createDB(dbType, "reorganization", true)
if err != nil {
t.Errorf("Failed to create test database (%s) %v", dbType, err)
return
}
defer teardown()

blocks, err := loadReorgBlocks("reorgblocks.bz2")
if err != nil {
t.Errorf("Error loading file: %v", err)
return
}

for i := int64(0); i <= 2; i++ {
blkHash, err := blocks[i].Sha()
if err != nil {
t.Errorf("Error getting SHA for block %d: %v", i, err)
return
}
_, err = db.InsertBlock(blocks[i])
if err != nil {
t.Errorf("Error inserting block %d (%v): %v", i, blkHash, err)
return
}
var txIDs []string
for _, tx := range blocks[i].Transactions() {
txIDs = append(txIDs, tx.Sha().String())
}
}

for i := int64(1); i >= 0; i-- {
blkHash, err := blocks[i].Sha()
if err != nil {
t.Errorf("Error getting SHA for block %d: %v", i, err)
return
}
err = db.DropAfterBlockBySha(blkHash)
if err != nil {
t.Errorf("Error removing block %d for reorganization: %v", i, err)
return
}
// Exercise NewestSha() to make sure DropAfterBlockBySha() updates the
// info correctly
maxHash, blkHeight, err := db.NewestSha()
if err != nil {
t.Errorf("Error getting newest block info")
return
}
if !maxHash.IsEqual(blkHash) || blkHeight != i {
t.Errorf(
"NewestSha returned %v (%v), expected %v (%v)",
blkHeight,
maxHash,
i,
blkHash,
)
return
}
}

for i := int64(3); i < int64(len(blocks)); i++ {
blkHash, err := blocks[i].Sha()
if err != nil {
t.Errorf("Error getting SHA for block %dA: %v", i-2, err)
return
}
_, err = db.InsertBlock(blocks[i])
if err != nil {
t.Errorf("Error inserting block %dA (%v): %v", i-2, blkHash, err)
return
}
}

_, maxHeight, err := db.NewestSha()
if err != nil {
t.Errorf("Error getting newest block info")
return
}

for i := int64(0); i <= maxHeight; i++ {
blkHash, err := db.FetchBlockShaByHeight(i)
if err != nil {
t.Errorf("Error fetching SHA for block %d: %v", i, err)
return
}
block, err := db.FetchBlockBySha(blkHash)
if err != nil {
t.Errorf("Error fetching block %d (%v): %v", i, blkHash, err)
return
}
for _, tx := range block.Transactions() {
_, err := db.FetchTxBySha(tx.Sha())
if err != nil {
t.Errorf("Error fetching transaction %v: %v", tx.Sha(), err)
return
}
}
}
}

// loadReorgBlocks reads files containing bitcoin block data (bzipped but
// otherwise in the format bitcoind writes) from disk and returns them as an
// array of btcutil.Block. This is copied from the blockchain package, which
// itself largely borrowed it from the test code in this package.
func loadReorgBlocks(filename string) (blocks []*btcutil.Block, err error) {
filename = filepath.Join("testdata/", filename)

var network = wire.SimNet
var dr io.Reader
var fi io.ReadCloser

fi, err = os.Open(filename)
if err != nil {
return
}

if strings.HasSuffix(filename, ".bz2") {
dr = bzip2.NewReader(fi)
} else {
dr = fi
}
defer fi.Close()

var block *btcutil.Block

err = nil
for height := int64(1); err == nil; height++ {
var rintbuf uint32
err = binary.Read(dr, binary.LittleEndian, &rintbuf)
if err == io.EOF {
// hit end of file at expected offset: no warning
height--
err = nil
break
}
if err != nil {
break
}
if rintbuf != uint32(network) {
break
}
err = binary.Read(dr, binary.LittleEndian, &rintbuf)
blocklen := rintbuf

rbytes := make([]byte, blocklen)

// read block
dr.Read(rbytes)

block, err = btcutil.NewBlockFromBytes(rbytes)
if err != nil {
return
}
blocks = append(blocks, block)
}

return
}
Binary file added database/testdata/reorgblocks.bz2
Binary file not shown.

0 comments on commit ccb7d4c

Please sign in to comment.