diff --git a/database2/README.md b/database2/README.md new file mode 100644 index 0000000000..f10131a804 --- /dev/null +++ b/database2/README.md @@ -0,0 +1,77 @@ +database +======== + +[![Build Status](https://travis-ci.org/btcsuite/btcd.png?branch=master)] +(https://travis-ci.org/btcsuite/btcd) + +Package database provides a block and metadata storage database. + +Please note that this package is intended to enable btcd to support different +database backends and is not something that a client can directly access as only +one entity can have the database open at a time (for most database backends), +and that entity will be btcd. + +When a client wants programmatic access to the data provided by btcd, they'll +likely want to use the [btcrpcclient](https://github.com/btcsuite/btcrpcclient) +package which makes use of the [JSON-RPC API] +(https://github.com/btcsuite/btcd/tree/master/docs/json_rpc_api.md). + +However, this package could be extremely useful for any applications requiring +Bitcoin block storage capabilities. + +As of April 2015, there are over 350,000 blocks in the Bitcoin block chain and +and over 64 million transactions (which turns out to be over 35GB of data). +This package provides a database layer to store and retrieve this data in a +simple and efficient manner. + +The default backend, ffboltdb, has a strong focus on speed, efficiency, and +robustness. It makes use of zero-copy memory mapping for the metadata, flat +files for block storage, and checksums in key areas to ensure data integrity. + +## Feature Overview + +- Key/value metadata store +- Bitcoin block storage +- Efficient retrieval of block headers and regions (transactions, scripts, etc) +- Read-only and read-write transactions with both manual and managed modes +- Nested buckets +- Iteration support including cursors with seek capability +- Supports registration of backend databases +- Comprehensive test coverage + +## Documentation + +[![GoDoc](https://godoc.org/github.com/btcsuite/btcd/database?status.png)] +(http://godoc.org/github.com/btcsuite/btcd/database) + +Full `go doc` style documentation for the project can be viewed online without +installing this package by using the GoDoc site here: +http://godoc.org/github.com/btcsuite/btcd/database + +You can also view the documentation locally once the package is installed with +the `godoc` tool by running `godoc -http=":6060"` and pointing your browser to +http://localhost:6060/pkg/github.com/btcsuite/btcd/database + +## Installation + +```bash +$ go get github.com/btcsuite/btcd/database +``` + +## Examples + +* [Basic Usage Example] + (http://godoc.org/github.com/btcsuite/btcd/database#example-package--BasicUsage) + Demonstrates creating a new database and using a managed read-write + transaction to store and retrieve metadata. + +* [Block Storage and Retrieval Example] + (http://godoc.org/github.com/btcsuite/btcd/database#example-package--BlockStorageAndRetrieval) + Demonstrates creating a new database, using a managed read-write transaction + to store a block, and then using a managed read-only transaction to fetch the + block. + +## License + +Package database is licensed under the [copyfree](http://copyfree.org) ISC +License. diff --git a/database2/cmd/dbtool/fetchblock.go b/database2/cmd/dbtool/fetchblock.go new file mode 100644 index 0000000000..8fab07fdb5 --- /dev/null +++ b/database2/cmd/dbtool/fetchblock.go @@ -0,0 +1,62 @@ +// Copyright (c) 2015 Conformal Systems LLC. +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package main + +import ( + "encoding/hex" + "errors" + "time" + + "github.com/btcsuite/btcd/database2" + "github.com/btcsuite/btcd/wire" +) + +// fetchBlockCmd defines the configuration options for the fetchblock command. +type fetchBlockCmd struct{} + +var ( + // fetchBlockCfg defines the configuration options for the command. + fetchBlockCfg = fetchBlockCmd{} +) + +// Execute is the main entry point for the command. It's invoked by the parser. +func (cmd *fetchBlockCmd) Execute(args []string) error { + // Setup the global config options and ensure they are valid. + if err := setupGlobalConfig(); err != nil { + return err + } + + if len(args) != 1 { + return errors.New("required block hash parameter not specified") + } + blockHash, err := wire.NewShaHashFromStr(args[0]) + if err != nil { + return err + } + + // Load the block database. + db, err := loadBlockDB() + if err != nil { + return err + } + defer db.Close() + + return db.View(func(tx database.Tx) error { + log.Infof("Fetching block %s", blockHash) + startTime := time.Now() + blockBytes, err := tx.FetchBlock(blockHash) + if err != nil { + return err + } + log.Infof("Loaded block in %v", time.Now().Sub(startTime)) + log.Infof("Block Hex: %s", hex.EncodeToString(blockBytes)) + return nil + }) +} + +// Usage overrides the usage display for the command. +func (cmd *fetchBlockCmd) Usage() string { + return "" +} diff --git a/database2/cmd/dbtool/fetchblockregion.go b/database2/cmd/dbtool/fetchblockregion.go new file mode 100644 index 0000000000..f8ee0af221 --- /dev/null +++ b/database2/cmd/dbtool/fetchblockregion.go @@ -0,0 +1,89 @@ +// Copyright (c) 2015 Conformal Systems LLC. +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package main + +import ( + "encoding/hex" + "errors" + "strconv" + "time" + + "github.com/btcsuite/btcd/database2" + "github.com/btcsuite/btcd/wire" +) + +// blockRegionCmd defines the configuration options for the fetchblockregion +// command. +type blockRegionCmd struct{} + +var ( + // blockRegionCfg defines the configuration options for the command. + blockRegionCfg = blockRegionCmd{} +) + +// Execute is the main entry point for the command. It's invoked by the parser. +func (cmd *blockRegionCmd) Execute(args []string) error { + // Setup the global config options and ensure they are valid. + if err := setupGlobalConfig(); err != nil { + return err + } + + // Ensure expected arguments. + if len(args) < 1 { + return errors.New("required block hash parameter not specified") + } + if len(args) < 2 { + return errors.New("required start offset parameter not " + + "specified") + } + if len(args) < 3 { + return errors.New("required region length parameter not " + + "specified") + } + + // Parse arguments. + blockHash, err := wire.NewShaHashFromStr(args[0]) + if err != nil { + return err + } + startOffset, err := strconv.ParseUint(args[1], 10, 32) + if err != nil { + return err + } + regionLen, err := strconv.ParseUint(args[2], 10, 32) + if err != nil { + return err + } + + // Load the block database. + db, err := loadBlockDB() + if err != nil { + return err + } + defer db.Close() + + return db.View(func(tx database.Tx) error { + log.Infof("Fetching block region %s<%d:%d>", blockHash, + startOffset, regionLen) + region := database.BlockRegion{ + Hash: blockHash, + Offset: uint32(startOffset), + Len: uint32(regionLen), + } + startTime := time.Now() + regionBytes, err := tx.FetchBlockRegion(®ion) + if err != nil { + return err + } + log.Infof("Loaded block region in %v", time.Now().Sub(startTime)) + log.Infof("Region Hex: %s", hex.EncodeToString(regionBytes)) + return nil + }) +} + +// Usage overrides the usage display for the command. +func (cmd *blockRegionCmd) Usage() string { + return " " +} diff --git a/database2/cmd/dbtool/globalconfig.go b/database2/cmd/dbtool/globalconfig.go new file mode 100644 index 0000000000..4de64dda4a --- /dev/null +++ b/database2/cmd/dbtool/globalconfig.go @@ -0,0 +1,121 @@ +// Copyright (c) 2015 Conformal Systems LLC. +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package main + +import ( + "errors" + "fmt" + "os" + "path/filepath" + + "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcd/database2" + _ "github.com/btcsuite/btcd/database2/ffboltdb" + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcutil" +) + +var ( + btcdHomeDir = btcutil.AppDataDir("btcd", false) + knownDbTypes = database.SupportedDrivers() + activeNetParams = &chaincfg.MainNetParams + + // Default global config. + cfg = &config{ + DataDir: filepath.Join(btcdHomeDir, "data"), + DbType: "ffboltdb", + } +) + +// config defines the global configuration options. +type config struct { + DataDir string `short:"b" long:"datadir" description:"Location of the btcd data directory"` + DbType string `long:"dbtype" description:"Database backend to use for the Block Chain"` + TestNet3 bool `long:"testnet" description:"Use the test network"` + RegressionTest bool `long:"regtest" description:"Use the regression test network"` + SimNet bool `long:"simnet" description:"Use the simulation test network"` +} + +// filesExists reports whether the named file or directory exists. +func fileExists(name string) bool { + if _, err := os.Stat(name); err != nil { + if os.IsNotExist(err) { + return false + } + } + return true +} + +// validDbType returns whether or not dbType is a supported database type. +func validDbType(dbType string) bool { + for _, knownType := range knownDbTypes { + if dbType == knownType { + return true + } + } + + return false +} + +// netName returns the name used when referring to a bitcoin network. At the +// time of writing, btcd currently places blocks for testnet version 3 in the +// data and log directory "testnet", which does not match the Name field of the +// chaincfg parameters. This function can be used to override this directory name +// as "testnet" when the passed active network matches wire.TestNet3. +// +// A proper upgrade to move the data and log directories for this network to +// "testnet3" is planned for the future, at which point this function can be +// removed and the network parameter's name used instead. +func netName(chainParams *chaincfg.Params) string { + switch chainParams.Net { + case wire.TestNet3: + return "testnet" + default: + return chainParams.Name + } +} + +// setupGlobalConfig examine the global configuration options for any conditions +// which are invalid as well as performs any addition setup necessary after the +// initial parse. +func setupGlobalConfig() error { + // Multiple networks can't be selected simultaneously. + // Count number of network flags passed; assign active network params + // while we're at it + numNets := 0 + if cfg.TestNet3 { + numNets++ + activeNetParams = &chaincfg.TestNet3Params + } + if cfg.RegressionTest { + numNets++ + activeNetParams = &chaincfg.RegressionNetParams + } + if cfg.SimNet { + numNets++ + activeNetParams = &chaincfg.SimNetParams + } + if numNets > 1 { + return errors.New("The testnet, regtest, and simnet params " + + "can't be used together -- choose one of the three") + } + + // Validate database type. + if !validDbType(cfg.DbType) { + str := "The specified database type [%v] is invalid -- " + + "supported types %v" + return fmt.Errorf(str, cfg.DbType, knownDbTypes) + } + + // Append the network type to the data directory so it is "namespaced" + // per network. In addition to the block database, there are other + // pieces of data that are saved to disk such as address manager state. + // All data is specific to a network, so namespacing the data directory + // means each individual piece of serialized data does not have to + // worry about changing names per network and such. + cfg.DataDir = filepath.Join(cfg.DataDir, netName(activeNetParams)) + + return nil +} diff --git a/database2/cmd/dbtool/insecureimport.go b/database2/cmd/dbtool/insecureimport.go new file mode 100644 index 0000000000..192371a9ae --- /dev/null +++ b/database2/cmd/dbtool/insecureimport.go @@ -0,0 +1,406 @@ +// Copyright (c) 2015 Conformal Systems LLC. +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package main + +import ( + "encoding/binary" + "fmt" + "io" + "os" + "sync" + "time" + + "github.com/btcsuite/btcd/database2" + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcutil" +) + +// importCmd defines the configuration options for the insecureimport command. +type importCmd struct { + InFile string `short:"i" long:"infile" description:"File containing the block(s)"` + Progress int `short:"p" long:"progress" description:"Show a progress message each time this number of seconds have passed -- Use 0 to disable progress announcements"` +} + +var ( + // importCfg defines the configuration options for the command. + importCfg = importCmd{ + InFile: "bootstrap.dat", + Progress: 10, + } + + // zeroHash is a simply a hash with all zeros. It is defined here to + // avoid creating it multiple times. + zeroHash = wire.ShaHash{} +) + +// importResults houses the stats and result as an import operation. +type importResults struct { + blocksProcessed int64 + blocksImported int64 + err error +} + +// blockImporter houses information about an ongoing import from a block data +// file to the block database. +type blockImporter struct { + db database.DB + r io.ReadSeeker + processQueue chan []byte + doneChan chan bool + errChan chan error + quit chan struct{} + wg sync.WaitGroup + blocksProcessed int64 + blocksImported int64 + receivedLogBlocks int64 + receivedLogTx int64 + lastHeight int64 + lastBlockTime time.Time + lastLogTime time.Time +} + +// readBlock reads the next block from the input file. +func (bi *blockImporter) readBlock() ([]byte, error) { + // The block file format is: + // + var net uint32 + err := binary.Read(bi.r, binary.LittleEndian, &net) + if err != nil { + if err != io.EOF { + return nil, err + } + + // No block and no error means there are no more blocks to read. + return nil, nil + } + if net != uint32(activeNetParams.Net) { + return nil, fmt.Errorf("network mismatch -- got %x, want %x", + net, uint32(activeNetParams.Net)) + } + + // Read the block length and ensure it is sane. + var blockLen uint32 + if err := binary.Read(bi.r, binary.LittleEndian, &blockLen); err != nil { + return nil, err + } + if blockLen > wire.MaxBlockPayload { + return nil, fmt.Errorf("block payload of %d bytes is larger "+ + "than the max allowed %d bytes", blockLen, + wire.MaxBlockPayload) + } + + serializedBlock := make([]byte, blockLen) + if _, err := io.ReadFull(bi.r, serializedBlock); err != nil { + return nil, err + } + + return serializedBlock, nil +} + +// processBlock potentially imports the block into the database. It first +// deserializes the raw block while checking for errors. Already known blocks +// are skipped and orphan blocks are considered errors. Returns whether the +// block was imported along with any potential errors. +// +// NOTE: This is not a safe import as it does not verify chain rules. +func (bi *blockImporter) processBlock(serializedBlock []byte) (bool, error) { + // Deserialize the block which includes checks for malformed blocks. + block, err := btcutil.NewBlockFromBytes(serializedBlock) + if err != nil { + return false, err + } + + blockSha, err := block.Sha() + if err != nil { + return false, err + } + + // update progress statistics + bi.lastBlockTime = block.MsgBlock().Header.Timestamp + bi.receivedLogTx += int64(len(block.MsgBlock().Transactions)) + + // Skip blocks that already exist. + var exists bool + err = bi.db.View(func(tx database.Tx) error { + exists, err = tx.HasBlock(blockSha) + if err != nil { + return err + } + return nil + }) + if err != nil { + return false, err + } + if exists { + return false, nil + } + + // Don't bother trying to process orphans. + prevHash := &block.MsgBlock().Header.PrevBlock + if !prevHash.IsEqual(&zeroHash) { + var exists bool + err := bi.db.View(func(tx database.Tx) error { + exists, err = tx.HasBlock(prevHash) + if err != nil { + return err + } + return nil + }) + if err != nil { + return false, err + } + if !exists { + return false, fmt.Errorf("import file contains block "+ + "%v which does not link to the available "+ + "block chain", prevHash) + } + } + + // Put the blocks into the database with no checking of chain rules. + err = bi.db.Update(func(tx database.Tx) error { + return tx.StoreBlock(block) + }) + if err != nil { + return false, err + } + + return true, nil +} + +// readHandler is the main handler for reading blocks from the import file. +// This allows block processing to take place in parallel with block reads. +// It must be run as a goroutine. +func (bi *blockImporter) readHandler() { +out: + for { + // Read the next block from the file and if anything goes wrong + // notify the status handler with the error and bail. + serializedBlock, err := bi.readBlock() + if err != nil { + bi.errChan <- fmt.Errorf("Error reading from input "+ + "file: %v", err.Error()) + break out + } + + // A nil block with no error means we're done. + if serializedBlock == nil { + break out + } + + // Send the block or quit if we've been signalled to exit by + // the status handler due to an error elsewhere. + select { + case bi.processQueue <- serializedBlock: + case <-bi.quit: + break out + } + } + + // Close the processing channel to signal no more blocks are coming. + close(bi.processQueue) + bi.wg.Done() +} + +// logProgress logs block progress as an information message. In order to +// prevent spam, it limits logging to one message every importCfg.Progress +// seconds with duration and totals included. +func (bi *blockImporter) logProgress() { + bi.receivedLogBlocks++ + + now := time.Now() + duration := now.Sub(bi.lastLogTime) + if duration < time.Second*time.Duration(importCfg.Progress) { + return + } + + // Truncate the duration to 10s of milliseconds. + durationMillis := int64(duration / time.Millisecond) + tDuration := 10 * time.Millisecond * time.Duration(durationMillis/10) + + // Log information about new block height. + blockStr := "blocks" + if bi.receivedLogBlocks == 1 { + blockStr = "block" + } + txStr := "transactions" + if bi.receivedLogTx == 1 { + txStr = "transaction" + } + log.Infof("Processed %d %s in the last %s (%d %s, height %d, %s)", + bi.receivedLogBlocks, blockStr, tDuration, bi.receivedLogTx, + txStr, bi.lastHeight, bi.lastBlockTime) + + bi.receivedLogBlocks = 0 + bi.receivedLogTx = 0 + bi.lastLogTime = now +} + +// processHandler is the main handler for processing blocks. This allows block +// processing to take place in parallel with block reads from the import file. +// It must be run as a goroutine. +func (bi *blockImporter) processHandler() { +out: + for { + select { + case serializedBlock, ok := <-bi.processQueue: + // We're done when the channel is closed. + if !ok { + break out + } + + bi.blocksProcessed++ + bi.lastHeight++ + imported, err := bi.processBlock(serializedBlock) + if err != nil { + bi.errChan <- err + break out + } + + if imported { + bi.blocksImported++ + } + + bi.logProgress() + + case <-bi.quit: + break out + } + } + bi.wg.Done() +} + +// statusHandler waits for updates from the import operation and notifies +// the passed doneChan with the results of the import. It also causes all +// goroutines to exit if an error is reported from any of them. +func (bi *blockImporter) statusHandler(resultsChan chan *importResults) { + select { + // An error from either of the goroutines means we're done so signal + // caller with the error and signal all goroutines to quit. + case err := <-bi.errChan: + resultsChan <- &importResults{ + blocksProcessed: bi.blocksProcessed, + blocksImported: bi.blocksImported, + err: err, + } + close(bi.quit) + + // The import finished normally. + case <-bi.doneChan: + resultsChan <- &importResults{ + blocksProcessed: bi.blocksProcessed, + blocksImported: bi.blocksImported, + err: nil, + } + } +} + +// Import is the core function which handles importing the blocks from the file +// associated with the block importer to the database. It returns a channel +// on which the results will be returned when the operation has completed. +func (bi *blockImporter) Import() chan *importResults { + // Start up the read and process handling goroutines. This setup allows + // blocks to be read from disk in parallel while being processed. + bi.wg.Add(2) + go bi.readHandler() + go bi.processHandler() + + // Wait for the import to finish in a separate goroutine and signal + // the status handler when done. + go func() { + bi.wg.Wait() + bi.doneChan <- true + }() + + // Start the status handler and return the result channel that it will + // send the results on when the import is done. + resultChan := make(chan *importResults) + go bi.statusHandler(resultChan) + return resultChan +} + +// newBlockImporter returns a new importer for the provided file reader seeker +// and database. +func newBlockImporter(db database.DB, r io.ReadSeeker) *blockImporter { + return &blockImporter{ + db: db, + r: r, + processQueue: make(chan []byte, 2), + doneChan: make(chan bool), + errChan: make(chan error), + quit: make(chan struct{}), + lastLogTime: time.Now(), + } +} + +// Execute is the main entry point for the command. It's invoked by the parser. +func (cmd *importCmd) Execute(args []string) error { + // Setup the global config options and ensure they are valid. + if err := setupGlobalConfig(); err != nil { + return err + } + + // Ensure the specified block file exists. + if !fileExists(cmd.InFile) { + str := "The specified block file [%v] does not exist" + return fmt.Errorf(str, cmd.InFile) + } + + // Load the block database. + db, err := loadBlockDB() + if err != nil { + return err + } + defer db.Close() + + // Ensure the database is sync'd and closed on Ctrl+C. + addInterruptHandler(func() { + log.Infof("Gracefully shutting down the database...") + db.Close() + }) + + fi, err := os.Open(importCfg.InFile) + if err != nil { + return err + } + defer fi.Close() + + // Create a block importer for the database and input file and start it. + // The results channel returned from start will contain an error if + // anything went wrong. + importer := newBlockImporter(db, fi) + + // Perform the import asynchronously and signal the main goroutine when + // done. This allows blocks to be processed and read in parallel. The + // results channel returned from Import contains the statistics about + // the import including an error if something went wrong. This is done + // in a separate goroutine rather than waiting directly so the main + // goroutine can be signaled for shutdown by either completion, error, + // or from the main interrupt handler. This is necessary since the main + // goroutine must be kept running long enough for the interrupt handler + // goroutine to finish. + go func() { + log.Info("Starting import") + resultsChan := importer.Import() + results := <-resultsChan + if results.err != nil { + dbErr, ok := results.err.(database.Error) + if ok && dbErr.ErrorCode != database.ErrDbNotOpen { + shutdownChannel <- results.err + return + } + } + + log.Infof("Processed a total of %d blocks (%d imported, %d "+ + "already known)", results.blocksProcessed, + results.blocksImported, + results.blocksProcessed-results.blocksImported) + shutdownChannel <- nil + }() + + // Wait for shutdown signal from either a normal completion or from the + // interrupt handler. + err = <-shutdownChannel + return err +} diff --git a/database2/cmd/dbtool/loadheaders.go b/database2/cmd/dbtool/loadheaders.go new file mode 100644 index 0000000000..5b246dbe02 --- /dev/null +++ b/database2/cmd/dbtool/loadheaders.go @@ -0,0 +1,101 @@ +// Copyright (c) 2015 Conformal Systems LLC. +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package main + +import ( + "time" + + "github.com/btcsuite/btcd/database2" + "github.com/btcsuite/btcd/wire" +) + +// headersCmd defines the configuration options for the loadheaders command. +type headersCmd struct { + Bulk bool `long:"bulk" description:"Use bulk loading of headers instead of one at a time"` +} + +var ( + // headersCfg defines the configuration options for the command. + headersCfg = headersCmd{ + Bulk: false, + } +) + +// Execute is the main entry point for the command. It's invoked by the parser. +func (cmd *headersCmd) Execute(args []string) error { + // Setup the global config options and ensure they are valid. + if err := setupGlobalConfig(); err != nil { + return err + } + + // Load the block database. + db, err := loadBlockDB() + if err != nil { + return err + } + defer db.Close() + + // NOTE: This code will only work for ffboltdb. Ideally the package + // using the database would keep a metadata index of its own. + blockIdxName := []byte("ffboltdb-blockidx") + if !headersCfg.Bulk { + err = db.View(func(tx database.Tx) error { + totalHdrs := 0 + blockIdxBucket := tx.Metadata().Bucket(blockIdxName) + blockIdxBucket.ForEach(func(k, v []byte) error { + totalHdrs++ + return nil + }) + log.Infof("Loading headers for %d blocks...", totalHdrs) + numLoaded := 0 + startTime := time.Now() + blockIdxBucket.ForEach(func(k, v []byte) error { + var hash wire.ShaHash + copy(hash[:], k) + _, err := tx.FetchBlockHeader(&hash) + if err != nil { + return err + } + numLoaded++ + return nil + }) + log.Infof("Loaded %d headers in %v", numLoaded, + time.Now().Sub(startTime)) + return nil + }) + if err != nil { + return err + } + + return nil + } + + // Bulk load headers. + err = db.View(func(tx database.Tx) error { + blockIdxBucket := tx.Metadata().Bucket(blockIdxName) + hashes := make([]wire.ShaHash, 0, 500000) + blockIdxBucket.ForEach(func(k, v []byte) error { + var hash wire.ShaHash + copy(hash[:], k) + hashes = append(hashes, hash) + return nil + }) + + log.Infof("Loading headers for %d blocks...", len(hashes)) + startTime := time.Now() + hdrs, err := tx.FetchBlockHeaders(hashes) + if err != nil { + return err + } + log.Infof("Loaded %d headers in %v", len(hdrs), + time.Now().Sub(startTime)) + return nil + }) + if err != nil { + return err + } + + return nil +} diff --git a/database2/cmd/dbtool/main.go b/database2/cmd/dbtool/main.go new file mode 100644 index 0000000000..f1370b07ea --- /dev/null +++ b/database2/cmd/dbtool/main.go @@ -0,0 +1,116 @@ +// Copyright (c) 2015 Conformal Systems LLC. +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package main + +import ( + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/btcsuite/btcd/database2" + "github.com/btcsuite/btclog" + flags "github.com/btcsuite/go-flags" +) + +const ( + // blockDbNamePrefix is the prefix for the btcd block database. + blockDbNamePrefix = "blocks" +) + +var ( + log btclog.Logger + shutdownChannel = make(chan error) +) + +// loadBlockDB opens the block database and returns a handle to it. +func loadBlockDB() (database.DB, error) { + // The database name is based on the database type. + dbName := blockDbNamePrefix + "_" + cfg.DbType + dbPath := filepath.Join(cfg.DataDir, dbName) + + log.Infof("Loading block database from '%s'", dbPath) + db, err := database.Open(cfg.DbType, dbPath, activeNetParams.Net) + if err != nil { + // Return the error if it's not because the database doesn't + // exist. + if dbErr, ok := err.(database.Error); !ok || dbErr.ErrorCode != + database.ErrDbDoesNotExist { + + return nil, err + } + + // Create the db if it does not exist. + err = os.MkdirAll(cfg.DataDir, 0700) + if err != nil { + return nil, err + } + db, err = database.Create(cfg.DbType, dbPath, activeNetParams.Net) + if err != nil { + return nil, err + } + } + + log.Info("Block database loaded") + return db, nil +} + +// realMain is the real main function for the utility. It is necessary to work +// around the fact that deferred functions do not run when os.Exit() is called. +func realMain() error { + // Setup logging. + backendLogger := btclog.NewDefaultBackendLogger() + defer backendLogger.Flush() + log = btclog.NewSubsystemLogger(backendLogger, "") + dbLog := btclog.NewSubsystemLogger(backendLogger, "BCDB: ") + dbLog.SetLevel(btclog.DebugLvl) + database.UseLogger(dbLog) + + // Setup the parser options and commands. + appName := filepath.Base(os.Args[0]) + appName = strings.TrimSuffix(appName, filepath.Ext(appName)) + parserFlags := flags.Options(flags.HelpFlag | flags.PassDoubleDash) + parser := flags.NewNamedParser(appName, parserFlags) + parser.AddGroup("Global Options", "", cfg) + parser.AddCommand("insecureimport", + "Insecurely import bulk block data from bootstrap.dat", + "Insecurely import bulk block data from bootstrap.dat. "+ + "WARNING: This is NOT secure because it does NOT "+ + "verify chain rules. It is only provided for testing "+ + "purposes.", &importCfg) + parser.AddCommand("loadheaders", + "Time how long to load headers for all blocks in the database", + "", &headersCfg) + parser.AddCommand("fetchblock", + "Fetch the specific block hash from the database", "", + &fetchBlockCfg) + parser.AddCommand("fetchblockregion", + "Fetch the specified block region from the database", "", + &blockRegionCfg) + + // Parse command line and invoke the Execute function for the specified + // command. + if _, err := parser.Parse(); err != nil { + if e, ok := err.(*flags.Error); ok && e.Type == flags.ErrHelp { + parser.WriteHelp(os.Stderr) + } else { + log.Error(err) + } + + return err + } + + return nil +} + +func main() { + // Use all processor cores. + runtime.GOMAXPROCS(runtime.NumCPU()) + + // Work around defer not working after os.Exit() + if err := realMain(); err != nil { + os.Exit(1) + } +} diff --git a/database2/cmd/dbtool/signal.go b/database2/cmd/dbtool/signal.go new file mode 100644 index 0000000000..6f9510fb6e --- /dev/null +++ b/database2/cmd/dbtool/signal.go @@ -0,0 +1,82 @@ +// Copyright (c) 2013-2014 Conformal Systems LLC. +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package main + +import ( + "os" + "os/signal" +) + +// interruptChannel is used to receive SIGINT (Ctrl+C) signals. +var interruptChannel chan os.Signal + +// addHandlerChannel is used to add an interrupt handler to the list of handlers +// to be invoked on SIGINT (Ctrl+C) signals. +var addHandlerChannel = make(chan func()) + +// mainInterruptHandler listens for SIGINT (Ctrl+C) signals on the +// interruptChannel and invokes the registered interruptCallbacks accordingly. +// It also listens for callback registration. It must be run as a goroutine. +func mainInterruptHandler() { + // interruptCallbacks is a list of callbacks to invoke when a + // SIGINT (Ctrl+C) is received. + var interruptCallbacks []func() + + // isShutdown is a flag which is used to indicate whether or not + // the shutdown signal has already been received and hence any future + // attempts to add a new interrupt handler should invoke them + // immediately. + var isShutdown bool + + for { + select { + case <-interruptChannel: + // Ignore more than one shutdown signal. + if isShutdown { + log.Infof("Received SIGINT (Ctrl+C). " + + "Already shutting down...") + continue + } + + isShutdown = true + log.Infof("Received SIGINT (Ctrl+C). Shutting down...") + + // Run handlers in LIFO order. + for i := range interruptCallbacks { + idx := len(interruptCallbacks) - 1 - i + callback := interruptCallbacks[idx] + callback() + } + + // Signal the main goroutine to shutdown. + go func() { + shutdownChannel <- nil + }() + + case handler := <-addHandlerChannel: + // The shutdown signal has already been received, so + // just invoke and new handlers immediately. + if isShutdown { + handler() + } + + interruptCallbacks = append(interruptCallbacks, handler) + } + } +} + +// addInterruptHandler adds a handler to call when a SIGINT (Ctrl+C) is +// received. +func addInterruptHandler(handler func()) { + // Create the channel and start the main interrupt handler which invokes + // all other callbacks and exits if not already done. + if interruptChannel == nil { + interruptChannel = make(chan os.Signal, 1) + signal.Notify(interruptChannel, os.Interrupt) + go mainInterruptHandler() + } + + addHandlerChannel <- handler +} diff --git a/database2/doc.go b/database2/doc.go new file mode 100644 index 0000000000..ee8ff6936a --- /dev/null +++ b/database2/doc.go @@ -0,0 +1,94 @@ +// Copyright (c) 2015 Conformal Systems LLC. +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +/* +Package database provides a block and metadata storage database. + +Overview + +As of April 2015, there are over 350,000 blocks in the Bitcoin block chain and +and over 64 million transactions (which turns out to be over 35GB of data). +This package provides a database layer to store and retrieve this data in a +simple and efficient manner. + +The default backend, ffboltdb, has a strong focus on speed, efficiency, and +robustness. It makes use of zero-copy memory mapping for the metadata, flat +files for block storage, and checksums in key areas to ensure data integrity. + +A quick overview of the features database provides are as follows: + + - Key/value metadata store + - Bitcoin block storage + - Efficient retrieval of block headers and regions (transactions, scripts, etc) + - Read-only and read-write transactions with both manual and managed modes + - Nested buckets + - Supports registration of backend databases + - Comprehensive test coverage + +Database + +The main entry point is the DB interface. It exposes functionality for +transactional-based access and storage of metadata and block data. It is +obtained via the Create and Open functions which take a database type string +that identifies the specific database driver (backend) to use as well as +arguments specific to the specified driver. + +Namespaces + +The Namespace interface is an abstraction that provides facilities for obtaining +transactions (the Tx interface) that are the basis of all database reads and +writes. Unlike some database interfaces that support reading and writing +without transactions, this interface requires transactions even when only +reading or writing a single key. + +The Begin function provides an unmanaged transaction while the View and Update +functions provide a managed transaction. These are described in more detail +below. + +Transactions + +The Tx interface provides facilities for rolling back or commiting changes that +took place while the transaction was active. It also provides the root metadata +bucket under which all keys, values, and nested buckets are stored. A +transaction can either be read-only or read-write and managed or unmanaged. + +Managed versus Unmanaged Transactions + +A managed transaction is one where the caller provides a function to execute +within the context of the transaction and the commit or rollback is handled +automatically depending on whether or not the provided function returns an +error. Attempting to manually call Rollback or Commit on the managed +transaction will result in a panic. + +An unmanaged transaction, on the other hand, requires the caller to manually +call Commit or Rollback when they are finished with it. Leaving transactions +open for long periods of time can have several adverse effects, so it is +recommended that managed transactions are used instead. + +Buckets + +The Bucket interface provides the ability to manipulate key/value pairs and +nested buckets as well as iterate through them. + +The Get, Put, and Delete functions work with key/value pairs, while the Bucket, +CreateBucket, CreateBucketIfNotExists, and DeleteBucket functions work with +buckets. The ForEach function allows the caller to provide a function to be +called with each key/value pair and nested bucket in the current bucket. + +Metadata Bucket + +As discussed above, all of the functions which are used to manipulate key/value +pairs and nested buckets exist on the Bucket interface. The root metadata +bucket is the upper-most bucket in which data is stored and is created at the +same time as the database. Use the Metadata function on the Tx interface +to retrieve it. + +Nested Buckets + +The CreateBucket and CreateBucketIfNotExists functions on the Bucket interface +provide the ability to create an arbitrary number of nested buckets. It is +a good idea to avoid a lot of buckets with little data in them as it could lead +to poor page utilization depending on the specific driver in use. +*/ +package database diff --git a/database2/driver.go b/database2/driver.go new file mode 100644 index 0000000000..77b7110e30 --- /dev/null +++ b/database2/driver.go @@ -0,0 +1,85 @@ +// Copyright (c) 2015 Conformal Systems LLC. +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +// Parts of this interface were inspired heavily by the excellent boltdb project +// at https://github.com/boltdb/bolt by Ben B. Johnson. + +package database + +import "fmt" + +// Driver defines a structure for backend drivers to use when they registered +// themselves as a backend which implements the Db interface. +type Driver struct { + // DbType is the identifier used to uniquely identify a specific + // database driver. There can be only one driver with the same name. + DbType string + + // Create is the function that will be invoked with all user-specified + // arguments to create the database. This function must return + // ErrDbExists if the database already exists. + Create func(args ...interface{}) (DB, error) + + // Open is the function that will be invoked with all user-specified + // arguments to open the database. This function must return + // ErrDbDoesNotExist if the database has not already been created. + Open func(args ...interface{}) (DB, error) +} + +// driverList holds all of the registered database backends. +var drivers = make(map[string]*Driver) + +// RegisterDriver adds a backend database driver to available interfaces. +// ErrDbTypeRegistered will be retruned if the database type for the driver has +// already been registered. +func RegisterDriver(driver Driver) error { + if _, exists := drivers[driver.DbType]; exists { + str := fmt.Sprintf("driver %q is already registered", + driver.DbType) + return makeError(ErrDbTypeRegistered, str, nil) + } + + drivers[driver.DbType] = &driver + return nil +} + +// SupportedDrivers returns a slice of strings that represent the database +// drivers that have been registered and are therefore supported. +func SupportedDrivers() []string { + supportedDBs := make([]string, 0, len(drivers)) + for _, drv := range drivers { + supportedDBs = append(supportedDBs, drv.DbType) + } + return supportedDBs +} + +// Create intializes and opens a database for the specified type. The arguments +// are specific to the database type driver. See the documentation for the +// database driver for further details. +// +// ErrDbUnknownType will be returned if the the database type is not registered. +func Create(dbType string, args ...interface{}) (DB, error) { + drv, exists := drivers[dbType] + if !exists { + str := fmt.Sprintf("driver %q is not registered", dbType) + return nil, makeError(ErrDbUnknownType, str, nil) + } + + return drv.Create(args...) +} + +// Open opens an existing database for the specified type. The arguments are +// specific to the database type driver. See the documentation for the database +// driver for further details. +// +// ErrDbUnknownType will be returned if the the database type is not registered. +func Open(dbType string, args ...interface{}) (DB, error) { + drv, exists := drivers[dbType] + if !exists { + str := fmt.Sprintf("driver %q is not registered", dbType) + return nil, makeError(ErrDbUnknownType, str, nil) + } + + return drv.Open(args...) +} diff --git a/database2/driver_test.go b/database2/driver_test.go new file mode 100644 index 0000000000..3ed67eaed8 --- /dev/null +++ b/database2/driver_test.go @@ -0,0 +1,136 @@ +// Copyright (c) 2015 Conformal Systems LLC. +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package database_test + +import ( + "fmt" + "testing" + + "github.com/btcsuite/btcd/database2" + _ "github.com/btcsuite/btcd/database2/ffboltdb" +) + +var ( + // ignoreDbTypes are types which should be ignored when running tests + // that iterate all supported DB types. This allows some tests to add + // bogus drivers for testing purposes while still allowing other tests + // to easily iterate all supported drivers. + ignoreDbTypes = map[string]bool{"createopenfail": true} +) + +// checkDbError ensures the passed error is a database.Error with an error code +// that matches the passed error code. +func checkDbError(t *testing.T, testName string, gotErr error, wantErrCode database.ErrorCode) bool { + dbErr, ok := gotErr.(database.Error) + if !ok { + t.Errorf("%s: unexpected error type - got %T, want %T", + testName, gotErr, database.Error{}) + return false + } + if dbErr.ErrorCode != wantErrCode { + t.Errorf("%s: unexpected error code - got %s (%s), want %s", + testName, dbErr.ErrorCode, dbErr.Description, + wantErrCode) + return false + } + + return true +} + +// TestAddDuplicateDriver ensures that adding a duplicate driver does not +// overwrite an existing one. +func TestAddDuplicateDriver(t *testing.T) { + supportedDrivers := database.SupportedDrivers() + if len(supportedDrivers) == 0 { + t.Errorf("no backends to test") + return + } + dbType := supportedDrivers[0] + + // bogusCreateDB is a function which acts as a bogus create and open + // driver function and intentionally returns a failure that can be + // detected if the interface allows a duplicate driver to overwrite an + // existing one. + bogusCreateDB := func(args ...interface{}) (database.DB, error) { + return nil, fmt.Errorf("duplicate driver allowed for database "+ + "type [%v]", dbType) + } + + // Create a driver that tries to replace an existing one. Set its + // create and open functions to a function that causes a test failure if + // they are invoked. + driver := database.Driver{ + DbType: dbType, + Create: bogusCreateDB, + Open: bogusCreateDB, + } + testName := "duplicate driver registration" + err := database.RegisterDriver(driver) + if !checkDbError(t, testName, err, database.ErrDbTypeRegistered) { + return + } +} + +// TestCreateOpenFail ensures that errors which occur while opening or closing +// a database are handled properly. +func TestCreateOpenFail(t *testing.T) { + // bogusCreateDB is a function which acts as a bogus create and open + // driver function that intentionally returns a failure which can be + // detected. + dbType := "createopenfail" + openError := fmt.Errorf("failed to create or open database for "+ + "database type [%v]", dbType) + bogusCreateDB := func(args ...interface{}) (database.DB, error) { + return nil, openError + } + + // Create and add driver that intentionally fails when created or opened + // to ensure errors on database open and create are handled properly. + driver := database.Driver{ + DbType: dbType, + Create: bogusCreateDB, + Open: bogusCreateDB, + } + database.RegisterDriver(driver) + + // Ensure creating a database with the new type fails with the expected + // error. + _, err := database.Create(dbType) + if err != openError { + t.Errorf("expected error not received - got: %v, want %v", err, + openError) + return + } + + // Ensure opening a database with the new type fails with the expected + // error. + _, err = database.Open(dbType) + if err != openError { + t.Errorf("expected error not received - got: %v, want %v", err, + openError) + return + } +} + +// TestCreateOpenUnsupported ensures that attempting to create or open an +// unsupported database type is handled properly. +func TestCreateOpenUnsupported(t *testing.T) { + // Ensure creating a database with an unsupported type fails with the + // expected error. + testName := "create with unsupported database type" + dbType := "unsupported" + _, err := database.Create(dbType) + if !checkDbError(t, testName, err, database.ErrDbUnknownType) { + return + } + + // Ensure opening a database with the an unsupported type fails with the + // expected error. + testName = "open with unsupported database type" + _, err = database.Open(dbType) + if !checkDbError(t, testName, err, database.ErrDbUnknownType) { + return + } +} diff --git a/database2/error.go b/database2/error.go new file mode 100644 index 0000000000..324ffe7248 --- /dev/null +++ b/database2/error.go @@ -0,0 +1,197 @@ +// Copyright (c) 2015 Conformal Systems LLC. +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package database + +import "fmt" + +// ErrorCode identifies a kind of error. +type ErrorCode int + +// These constants are used to identify a specific database Error. +const ( + // ************************************** + // Errors related to driver registration. + // ************************************** + + // ErrDbTypeRegistered indicates two different database drivers + // attempt to register with the name database type. + ErrDbTypeRegistered ErrorCode = iota + + // ************************************* + // Errors related to database functions. + // ************************************* + + // ErrDbUnknownType indicates there is no driver registered for + // the specified database type. + ErrDbUnknownType + + // ErrDbDoesNotExist indicates open is called for a database that + // does not exist. + ErrDbDoesNotExist + + // ErrDbExists indicates create is called for a database that + // already exists. + ErrDbExists + + // ErrDbNotOpen indicates a database instance is accessed before + // it is opened or after it is closed. + ErrDbNotOpen + + // ErrDbAlreadyOpen indicates open was called on a database that + // is already open. + ErrDbAlreadyOpen + + // ErrInvalid indicates the specified database is not valid. + ErrInvalid + + // ErrCorruption indicates a checksum failure occurred which invariably + // means the database is corrupt. + ErrCorruption + + // **************************************** + // Errors related to database transactions. + // **************************************** + + // ErrTxClosed indicates an attempt was made to commit or rollback a + // transaction that has already had one of those operations performed. + ErrTxClosed + + // ErrTxNotWritable indicates an operation that requires write access to + // the database was attempted against a read-only transaction. + ErrTxNotWritable + + // ************************************** + // Errors related to metadata operations. + // ************************************** + + // ErrBucketNotFound indicates an attempt to access a bucket that has + // not been created yet. + ErrBucketNotFound + + // ErrBucketExists indicates an attempt to create a bucket that already + // exists. + ErrBucketExists + + // ErrBucketNameRequired indicates an attempt to create a bucket with a + // blank name. + ErrBucketNameRequired + + // ErrKeyRequired indicates at attempt to insert a zero-length key. + ErrKeyRequired + + // ErrKeyTooLarge indicates an attmempt to insert a key that is larger + // than the max allowed key size. The max key size depends on the + // specific backend driver being used. As a general rule, key sizes + // should be relatively, so this should rarely be an issue. + ErrKeyTooLarge + + // ErrValueTooLarge indicates an attmpt to insert a value that is larger + // than max allowed value size. The max key size depends on the + // specific backend driver being used. + ErrValueTooLarge + + // ErrIncompatibleValue indicates the value in question is invalid for + // the specific requested operation. For example, trying create or + // delete a bucket with an existing non-bucket key, attempting to create + // or delete a non-bucket key with an existing bucket key, or trying to + // delete a value via a cursor when it points to a nested bucket. + ErrIncompatibleValue + + // *************************************** + // Errors related to block I/O operations. + // *************************************** + + // ErrBlockNotFound indicates a block with the provided hash does not + // exist in the database. + ErrBlockNotFound + + // ErrBlockExists indicates a block with the provided hash already + // exists in the database. + ErrBlockExists + + // ErrBlockRegionInvalid indicates a region that exceeds the bounds of + // the specified block was requested. When the hash provided by the + // region does not correspond to an existing block, the error will be + // ErrBlockNotFound instead. + ErrBlockRegionInvalid + + // *********************************** + // Support for driver-specific errors. + // *********************************** + + // ErrDriverSpecific indicates the Err field is a driver-specific error. + // This provides a mechanism for drivers to plug-in their own custom + // errors for any situations which aren't already covered by the error + // codes provided by this package. + ErrDriverSpecific + + // numErrorCodes is the maximum error code number used in tests. + numErrorCodes +) + +// Map of ErrorCode values back to their constant names for pretty printing. +var errorCodeStrings = map[ErrorCode]string{ + ErrDbTypeRegistered: "ErrDbTypeRegistered", + ErrDbUnknownType: "ErrDbUnknownType", + ErrDbDoesNotExist: "ErrDbDoesNotExist", + ErrDbExists: "ErrDbExists", + ErrDbNotOpen: "ErrDbNotOpen", + ErrDbAlreadyOpen: "ErrDbAlreadyOpen", + ErrInvalid: "ErrInvalid", + ErrCorruption: "ErrCorruption", + ErrTxClosed: "ErrTxClosed", + ErrTxNotWritable: "ErrTxNotWritable", + ErrBucketNotFound: "ErrBucketNotFound", + ErrBucketExists: "ErrBucketExists", + ErrBucketNameRequired: "ErrBucketNameRequired", + ErrKeyRequired: "ErrKeyRequired", + ErrKeyTooLarge: "ErrKeyTooLarge", + ErrValueTooLarge: "ErrValueTooLarge", + ErrIncompatibleValue: "ErrIncompatibleValue", + ErrBlockNotFound: "ErrBlockNotFound", + ErrBlockExists: "ErrBlockExists", + ErrBlockRegionInvalid: "ErrBlockRegionInvalid", + ErrDriverSpecific: "ErrDriverSpecific", +} + +// String returns the ErrorCode as a human-readable name. +func (e ErrorCode) String() string { + if s := errorCodeStrings[e]; s != "" { + return s + } + return fmt.Sprintf("Unknown ErrorCode (%d)", int(e)) +} + +// Error provides a single type for errors that can happen during database +// operation. It is used to indicate several types of failures including errors +// with caller requests such as specifying invalid block regions or attempting +// to access data against closed database transactions, driver errors, errors +// retrieving data, and errors communicating with database servers. +// +// The caller can use type assertions to determine if an error is an Error and +// access the ErrorCode field to ascertain the specific reason for the failure. +// +// The ErrDriverSpecific error code will also have the Err field set with the +// underlying error. Depending on the backend driver, the Err field might be +// set to the underlying error for other error codes as well. +type Error struct { + ErrorCode ErrorCode // Describes the kind of error + Description string // Human readable description of the issue + Err error // Underlying error +} + +// Error satisfies the error interface and prints human-readable errors. +func (e Error) Error() string { + if e.Err != nil { + return e.Description + ": " + e.Err.Error() + } + return e.Description +} + +// makeError creates an Error given a set of arguments. The error code must +// be one of the error codes provided by this package. +func makeError(c ErrorCode, desc string, err error) Error { + return Error{ErrorCode: c, Description: desc, Err: err} +} diff --git a/database2/error_test.go b/database2/error_test.go new file mode 100644 index 0000000000..564e13f745 --- /dev/null +++ b/database2/error_test.go @@ -0,0 +1,97 @@ +// Copyright (c) 2015 Conformal Systems LLC. +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package database_test + +import ( + "errors" + "testing" + + "github.com/btcsuite/btcd/database2" +) + +// TestErrorCodeStringer tests the stringized output for the ErrorCode type. +func TestErrorCodeStringer(t *testing.T) { + tests := []struct { + in database.ErrorCode + want string + }{ + {database.ErrDbTypeRegistered, "ErrDbTypeRegistered"}, + {database.ErrDbUnknownType, "ErrDbUnknownType"}, + {database.ErrDbDoesNotExist, "ErrDbDoesNotExist"}, + {database.ErrDbExists, "ErrDbExists"}, + {database.ErrDbNotOpen, "ErrDbNotOpen"}, + {database.ErrDbAlreadyOpen, "ErrDbAlreadyOpen"}, + {database.ErrInvalid, "ErrInvalid"}, + {database.ErrCorruption, "ErrCorruption"}, + {database.ErrTxClosed, "ErrTxClosed"}, + {database.ErrTxNotWritable, "ErrTxNotWritable"}, + {database.ErrBucketNotFound, "ErrBucketNotFound"}, + {database.ErrBucketExists, "ErrBucketExists"}, + {database.ErrBucketNameRequired, "ErrBucketNameRequired"}, + {database.ErrKeyRequired, "ErrKeyRequired"}, + {database.ErrKeyTooLarge, "ErrKeyTooLarge"}, + {database.ErrValueTooLarge, "ErrValueTooLarge"}, + {database.ErrIncompatibleValue, "ErrIncompatibleValue"}, + {database.ErrBlockNotFound, "ErrBlockNotFound"}, + {database.ErrBlockExists, "ErrBlockExists"}, + {database.ErrBlockRegionInvalid, "ErrBlockRegionInvalid"}, + {database.ErrDriverSpecific, "ErrDriverSpecific"}, + + {0xffff, "Unknown ErrorCode (65535)"}, + } + + // Detect additional error codes that don't have the stringer added. + if len(tests)-1 != int(database.TstNumErrorCodes) { + t.Errorf("It appears an error code was added without adding " + + "an associated stringer test") + } + + t.Logf("Running %d tests", len(tests)) + for i, test := range tests { + result := test.in.String() + if result != test.want { + t.Errorf("String #%d\ngot: %s\nwant: %s", i, result, + test.want) + continue + } + } +} + +// TestError tests the error output for the Error type. +func TestError(t *testing.T) { + t.Parallel() + + tests := []struct { + in database.Error + want string + }{ + { + database.Error{Description: "some error"}, + "some error", + }, + { + database.Error{Description: "human-readable error"}, + "human-readable error", + }, + { + database.Error{ + ErrorCode: database.ErrDriverSpecific, + Description: "some error", + Err: errors.New("driver-specific error"), + }, + "some error: driver-specific error", + }, + } + + t.Logf("Running %d tests", len(tests)) + for i, test := range tests { + result := test.in.Error() + if result != test.want { + t.Errorf("Error #%d\n got: %s want: %s", i, result, + test.want) + continue + } + } +} diff --git a/database2/example_test.go b/database2/example_test.go new file mode 100644 index 0000000000..64c09b1026 --- /dev/null +++ b/database2/example_test.go @@ -0,0 +1,177 @@ +// Copyright (c) 2015 Conformal Systems LLC. +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package database_test + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + + "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcd/database2" + _ "github.com/btcsuite/btcd/database2/ffboltdb" + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcutil" +) + +// This example demonstrates creating a new database. +func ExampleCreate() { + // This example assumes the ffboltdb driver is imported. + // + // import ( + // "github.com/btcsuite/btcd/database" + // _ "github.com/btcsuite/btcd/database/ffboltdb" + // ) + + // Create a database and schedule it to be closed and removed on exit. + // Typically you wouldn't want to remove the database right away like + // this, nor put it in the temp directory, but it's done here to ensure + // the example cleans up after itself. + dbPath := filepath.Join(os.TempDir(), "examplecreate") + db, err := database.Create("ffboltdb", dbPath, wire.MainNet) + if err != nil { + fmt.Println(err) + return + } + defer os.RemoveAll(dbPath) + defer db.Close() + + // Output: +} + +// This example demonstrates creating a new database and using a managed +// read-write transaction to store and retrieve metadata. +func Example_basicUsage() { + // This example assumes the ffboltdb driver is imported. + // + // import ( + // "github.com/btcsuite/btcd/database" + // _ "github.com/btcsuite/btcd/database/ffboltdb" + // ) + + // Create a database and schedule it to be closed and removed on exit. + // Typically you wouldn't want to remove the database right away like + // this, nor put it in the temp directory, but it's done here to ensure + // the example cleans up after itself. + dbPath := filepath.Join(os.TempDir(), "exampleusage") + db, err := database.Create("ffboltdb", dbPath, wire.MainNet) + if err != nil { + fmt.Println(err) + return + } + defer os.RemoveAll(dbPath) + defer db.Close() + + // Use the Update function of the database to perform a managed + // read-write transaction. The transaction will automatically be rolled + // back if the supplied inner function returns a non-nil error. + err = db.Update(func(tx database.Tx) error { + // Store a key/value pair directly in the metadata bucket. + // Typically a nested bucket would be used for a given feature, + // but this example is using the metadata bucket directly for + // simplicity. + key := []byte("mykey") + value := []byte("myvalue") + if err := tx.Metadata().Put(key, value); err != nil { + return err + } + + // Read the key back and ensure it matches. + if !bytes.Equal(tx.Metadata().Get(key), value) { + return fmt.Errorf("unexpected value for key '%s'", key) + } + + // Create a new nested bucket under the metadata bucket. + nestedBucketKey := []byte("mybucket") + nestedBucket, err := tx.Metadata().CreateBucket(nestedBucketKey) + if err != nil { + return err + } + + // The key from above that was set in the metadata bucket does + // not exist in this new nested bucket. + if nestedBucket.Get(key) != nil { + return fmt.Errorf("key '%s' is not expected nil", key) + } + + return nil + }) + if err != nil { + fmt.Println(err) + return + } + + // Output: +} + +// This example demonstrates creating a new database, using a managed read-write +// transaction to store a block, and using a managed read-only transaction to +// fetch the block. +func Example_blockStorageAndRetrieval() { + // This example assumes the ffboltdb driver is imported. + // + // import ( + // "github.com/btcsuite/btcd/database" + // _ "github.com/btcsuite/btcd/database/ffboltdb" + // ) + + // Create a database and schedule it to be closed and removed on exit. + // Typically you wouldn't want to remove the database right away like + // this, nor put it in the temp directory, but it's done here to ensure + // the example cleans up after itself. + dbPath := filepath.Join(os.TempDir(), "exampleblkstorage") + db, err := database.Create("ffboltdb", dbPath, wire.MainNet) + if err != nil { + fmt.Println(err) + return + } + defer os.RemoveAll(dbPath) + defer db.Close() + + // Use the Update function of the database to perform a managed + // read-write transaction and store a genesis block in the database as + // and example. + err = db.Update(func(tx database.Tx) error { + genesisBlock := chaincfg.MainNetParams.GenesisBlock + return tx.StoreBlock(btcutil.NewBlock(genesisBlock)) + }) + if err != nil { + fmt.Println(err) + return + } + + // Use the View function of the database to perform a managed read-only + // transaction and fetch the block stored above. + var loadedBlockBytes []byte + err = db.Update(func(tx database.Tx) error { + genesisHash := chaincfg.MainNetParams.GenesisHash + blockBytes, err := tx.FetchBlock(genesisHash) + if err != nil { + return err + } + + // As documented, all data fetched from the database is only + // valid during a database transaction in order to support + // zero-copy backends. Thus, make a copy of the data so it + // can be used outside of the transaction. + loadedBlockBytes = make([]byte, len(blockBytes)) + copy(loadedBlockBytes, blockBytes) + return nil + }) + if err != nil { + fmt.Println(err) + return + } + + // Typically at this point, the block could be deserialized via the + // wire.MsgBlock.Deserialize function or used in its serialized form + // depending on need. However, for this example, just display the + // number of serialized bytes to show it was loaded as expected. + fmt.Printf("Serialized block size: %d bytes\n", len(loadedBlockBytes)) + + // Output: + // Serialized block size: 285 bytes +} diff --git a/database2/export_test.go b/database2/export_test.go new file mode 100644 index 0000000000..077ca4e708 --- /dev/null +++ b/database2/export_test.go @@ -0,0 +1,17 @@ +// Copyright (c) 2015 Conformal Systems LLC. +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +/* +This test file is part of the database package rather than than the +database_test package so it can bridge access to the internals to properly test +cases which are either not possible or can't reliably be tested via the public +interface. The functions, constants, and variables are only exported while the +tests are being run. +*/ + +package database + +// TstNumErrorCodes makes the internal numErrorCodes parameter available to the +// test package. +const TstNumErrorCodes = numErrorCodes diff --git a/database2/ffboltdb/README.md b/database2/ffboltdb/README.md new file mode 100644 index 0000000000..ff4f1b56d2 --- /dev/null +++ b/database2/ffboltdb/README.md @@ -0,0 +1,53 @@ +ffboltdb +======== + +[![Build Status](https://travis-ci.org/btcsuite/btcd.png?branch=master)] +(https://travis-ci.org/btcsuite/btcd) + +Package ffboltdb implements a driver for the database package that uses boltdb +for the backing metadata and flat files for block storage. + +This driver is the recommended driver for use with btcd. It has a strong focus +on speed, efficiency, and robustness. It makes use of zero-copy memory mapping +for the metadata, flat files for block storage, and checksums in key areas to +ensure data integrity. + +Package ffboltdb is licensed under the copyfree ISC license. + +## Usage + +This package is a driver to the database package and provides the database type +of "ffboltdb". The parameters the Open and Create functions take are the +database path as a string and the block network. + +```Go +db, err := database.Open("ffboltdb", "path/to/database", wire.MainNet) +if err != nil { + // Handle error +} +``` + +```Go +db, err := database.Create("ffboltdb", "path/to/database", wire.MainNet) +if err != nil { + // Handle error +} +``` + +## Documentation + +[![GoDoc](https://godoc.org/github.com/btcsuite/btcd/database/ffboltdb?status.png)] +(http://godoc.org/github.com/btcsuite/btcd/database/ffboltdb) + +Full `go doc` style documentation for the project can be viewed online without +installing this package by using the GoDoc site here: +http://godoc.org/github.com/btcsuite/btcd/database/ffboltdb + +You can also view the documentation locally once the package is installed with +the `godoc` tool by running `godoc -http=":6060"` and pointing your browser to +http://localhost:6060/pkg/github.com/btcsuite/btcd/database/ffboltdb + +## License + +Package ffboltdb is licensed under the [copyfree](http://copyfree.org) ISC +License. diff --git a/database2/ffboltdb/bench_test.go b/database2/ffboltdb/bench_test.go new file mode 100644 index 0000000000..fa02ced3e6 --- /dev/null +++ b/database2/ffboltdb/bench_test.go @@ -0,0 +1,97 @@ +// Copyright (c) 2015 Conformal Systems LLC. +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package ffboltdb + +import ( + "os" + "path/filepath" + "testing" + + "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcd/database2" + "github.com/btcsuite/btcutil" +) + +// BenchmarkBlockHeader benchmarks how long it takes to load the mainnet genesis +// block header. +func BenchmarkBlockHeader(b *testing.B) { + // Start by creating a new database and populating it with the mainnet + // genesis block. + dbPath := filepath.Join(os.TempDir(), "ffboltdb-benchblkhdr") + _ = os.RemoveAll(dbPath) + db, err := database.Create("ffboltdb", dbPath, blockDataNet) + if err != nil { + b.Fatal(err) + } + defer os.RemoveAll(dbPath) + defer db.Close() + err = db.Update(func(tx database.Tx) error { + block := btcutil.NewBlock(chaincfg.MainNetParams.GenesisBlock) + if err := tx.StoreBlock(block); err != nil { + return err + } + return nil + }) + if err != nil { + b.Fatal(err) + } + + b.ReportAllocs() + b.ResetTimer() + err = db.View(func(tx database.Tx) error { + blockHash := chaincfg.MainNetParams.GenesisHash + for i := 0; i < b.N; i++ { + _, err := tx.FetchBlockHeader(blockHash) + if err != nil { + return err + } + } + return nil + }) + if err != nil { + b.Fatal(err) + } +} + +// BenchmarkBlockHeader benchmarks how long it takes to load the mainnet genesis +// block. +func BenchmarkBlock(b *testing.B) { + // Start by creating a new database and populating it with the mainnet + // genesis block. + dbPath := filepath.Join(os.TempDir(), "ffboltdb-benchblk") + _ = os.RemoveAll(dbPath) + db, err := database.Create("ffboltdb", dbPath, blockDataNet) + if err != nil { + b.Fatal(err) + } + defer os.RemoveAll(dbPath) + defer db.Close() + err = db.Update(func(tx database.Tx) error { + block := btcutil.NewBlock(chaincfg.MainNetParams.GenesisBlock) + if err := tx.StoreBlock(block); err != nil { + return err + } + return nil + }) + if err != nil { + b.Fatal(err) + } + + b.ReportAllocs() + b.ResetTimer() + err = db.View(func(tx database.Tx) error { + blockHash := chaincfg.MainNetParams.GenesisHash + for i := 0; i < b.N; i++ { + _, err := tx.FetchBlock(blockHash) + if err != nil { + return err + } + } + return nil + }) + if err != nil { + b.Fatal(err) + } +} diff --git a/database2/ffboltdb/blockio.go b/database2/ffboltdb/blockio.go new file mode 100644 index 0000000000..259e1dc946 --- /dev/null +++ b/database2/ffboltdb/blockio.go @@ -0,0 +1,746 @@ +// Copyright (c) 2015 Conformal Systems LLC. +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +// This file contains the implementation functions for reading, writing, and +// otherwise working with the flat files that house the actual blocks. + +package ffboltdb + +import ( + "container/list" + "encoding/binary" + "fmt" + "hash/crc32" + "io" + "os" + "path/filepath" + "sync" + + "github.com/btcsuite/btcd/database2" + "github.com/btcsuite/btcd/wire" +) + +const ( + // The Bitcoin protocol encodes block height as int32, so max number of + // blocks is 2^31. Max block size per the protocol is 32MiB per block. + // So the theoretical max at the time this comment was written is 64PiB + // (pebibytes). With files @ 512MiB each, this would require a maximum + // of 134,217,728 files. Thus, choose 9 digits of precision for the + // filenames. An additional benefit is 9 digits provides 10^9 files @ + // 512MiB each for a total of ~476.84PiB (roughly 7.4 times the current + // theoretical max), so there is room for the max block size to grow in + // the future. + blockFilenameTemplate = "%09d.fdb" + + // maxOpenFiles is the max number of open files to maintain in the + // open blocks cache. Note that this does not include the current + // write file, so there will typically be one more than this value open. + maxOpenFiles = 25 + + // maxBlockFileSize is the maximum size for each file used to store + // blocks. + // + // NOTE: The current code uses uint32 for all offsets, so this value + // must be less than 2^32 (2 GiB). This is also why it's a typed + // constant. + maxBlockFileSize uint32 = 512 * 1024 * 1024 // 512 MiB + + // blockLocSize is the number of bytes the serialized block location + // data that is stored in the block index. + // + // The serialized block location format is: + // + // [0:4] Block file (4 bytes) + // [4:8] File offset (4 bytes) + // [8:12] Block length (4 bytes) + blockLocSize = 12 +) + +var ( + // castagnoli houses the Catagnoli polynomial used for CRC-32 checksums. + castagnoli = crc32.MakeTable(crc32.Castagnoli) +) + +// filer is an interface which acts very similar to a *os.File and is typically +// implemented by it. It exists so the test code can provide mock files for +// properly testing corruption and file system issues. +type filer interface { + io.Closer + io.WriterAt + io.ReaderAt + Truncate(size int64) error + Sync() error +} + +// lockableFile represents a block file on disk that has been opened for either +// read or read/write access. It also contains a read-write mutex to support +// multiple concurrent readers. +type lockableFile struct { + sync.RWMutex + file filer +} + +// writeCursor represents the current file and offset of the block file on disk +// for performing all writes. It also contains a read-write mutex to support +// multiple concurrent readers which can reuse the file handle. +type writeCursor struct { + sync.RWMutex + + // curFile is the current block file that will be appended to when + // writing new blocks. + curFile *lockableFile + + // curFileNum is the current block file number and is used to allow + // readers to use the same open file handle. + curFileNum uint32 + + // curOffset is the offset in the current write block file where the + // next new block will be written. + curOffset uint32 +} + +// blockStore houses information used to handle reading and writing blocks (and +// part of blocks) into flat files with support for multiple concurrent readers. +type blockStore struct { + // network is the specific network to use in the flat files for each + // block. + network wire.BitcoinNet + + // basePath is the base path used for the flat block files and metadata. + basePath string + + // maxBlockFileSize is the maximum size for each file used to store + // blocks. It is defined on the store so the whitebox tests can + // override the value. + maxBlockFileSize uint32 + + // The following fields are related to the flat files which hold the + // actual blocks. The number of open files is limited by maxOpenFiles. + // + // obfMutex protects concurrent access to the openBlockFiles map. It is + // a RWMutex so multiple readers can simultaneously access open files. + // + // openBlockFiles houses the open file handles for existing block files + // which have been opened read-only along with an individual RWMutex. + // This scheme allows multiple concurrent readers to the same file while + // preventing the file from closed out from under them. + // + // lruMutex protects concurrent access to the least recently used list + // and lookup map. + // + // openBlocksLRU tracks how the open files are refenced by pushing the + // least recently used files to end of the list. When a file needs to + // be closed due to exceeding the the max number of allowed open files, + // the one at the back of the list is closed. + // + // fileNumToLRUElem is a mapping between a specific block file number + // and the associated list element on the least recently used list. + // + // Thus, with the combination of these fields, the database supports + // concurrent non-blocking reads across multiple and individual files + // along with intelligently limiting the number of open file handles by + // closing the least recently used files as needed. + // + // NOTE: The locking order used throughout is well-defined and MUST be + // followed. Failure to do so could lead to deadlocks. In particular, + // the locking order is as follows: + // 1) obfMutex + // 2) lruMutex + // 3) writeCursor mutex + // 4) specific file mutexes + // + // None of the mutexes are required to be locked at the same time, and + // often aren't. However, if they are to be locked simultaneously, they + // MUST be locked in the order previously specified. + // + // Due to the high performance and multi-read concurrency requirements, + // write locks should only be held for the minimum time necessary. + obfMutex sync.RWMutex + lruMutex sync.Mutex + openBlocksLRU *list.List + fileNumToLRUElem map[uint32]*list.Element + openBlockFiles map[uint32]*lockableFile + + // writeCursor houses the state for the current file and location that + // new blocks are written to. + writeCursor *writeCursor + + // These functions are set to openFile, openWriteFile, and deleteFile by + // default, but are exposed here to allow the whitebox tests to replace + // them when working with mock files. + openFileFunc func(fileNum uint32) (*lockableFile, error) + openWriteFileFunc func(fileNum uint32) (filer, error) + deleteFileFunc func(fileNum uint32) error +} + +// blockLocation identifies a particular block file and location. +type blockLocation struct { + blockFileNum uint32 + fileOffset uint32 + blockLen uint32 +} + +// deserializeBlockLoc deserializes the passed serialized block location +// information. This is data stored into the block index metadata for each +// block. The serialized data passed to this function MUST be at least +// blockLocSize bytes or it will panic. There error check is avoided here +// because this information will always be coming from the block index which +// includes a checksum to detect corruption. Thus it is safe to use this +// unchecked here. +func deserializeBlockLoc(serializedLoc []byte) blockLocation { + // The serialized block location format is: + // + // [0:4] Block file (4 bytes) + // [4:8] File offset (4 bytes) + // [8:12] Block length (4 bytes) + return blockLocation{ + blockFileNum: byteOrder.Uint32(serializedLoc[0:4]), + fileOffset: byteOrder.Uint32(serializedLoc[4:8]), + blockLen: byteOrder.Uint32(serializedLoc[8:12]), + } +} + +// serializeBlockLoc returns the serialization of the passed block location. +// This is data to be stored into the block index metadata for each block. +func serializeBlockLoc(loc blockLocation) []byte { + // The serialized block location format is: + // + // [0:4] Block file (4 bytes) + // [4:8] File offset (4 bytes) + // [8:12] Block length (4 bytes) + var serializedData [12]byte + byteOrder.PutUint32(serializedData[0:4], loc.blockFileNum) + byteOrder.PutUint32(serializedData[4:8], loc.fileOffset) + byteOrder.PutUint32(serializedData[8:12], loc.blockLen) + return serializedData[:] +} + +// blockFilePath return the file path for the provided block file number. +func blockFilePath(dbPath string, fileNum uint32) string { + fileName := fmt.Sprintf(blockFilenameTemplate, fileNum) + return filepath.Join(dbPath, fileName) +} + +// openWriteFile returns a file handle for the passed flat file number in +// read/write mode. The file will be created if needed. It is typically used +// for the current file that will have all new data appended. Unlike openFile, +// this function does not keep track the open file and it is not subject to the +// maxOpenFiles limit. +func (s *blockStore) openWriteFile(fileNum uint32) (filer, error) { + // The current block file needs to be read-write so it is possible to + // append to it. Also, it shouldn't be part of the least recently used + // file. + filePath := blockFilePath(s.basePath, fileNum) + file, err := os.OpenFile(filePath, os.O_RDWR|os.O_CREATE, 0666) + if err != nil { + str := fmt.Sprintf("failed to open file %q: %v", filePath, err) + return nil, makeDbErr(database.ErrDriverSpecific, str, err) + } + + return file, nil +} + +// openFile returns a read-only file handle for the passed flat file number. +// The function also keeps track of the open files, performs least recently +// used tracking, and limits the number of open files to maxOpenFiles by closing +// the least recently used file as needed. +// +// This function MUST be called with the overall files mutex (s.obfMutex) locked +// for WRITES. +func (s *blockStore) openFile(fileNum uint32) (*lockableFile, error) { + // Open the appropriate file as read-only. + filePath := blockFilePath(s.basePath, fileNum) + file, err := os.Open(filePath) + if err != nil { + return nil, makeDbErr(database.ErrDriverSpecific, err.Error(), + err) + } + blockFile := &lockableFile{file: file} + + // Close the least recently used file is the file exceeds the max + // allowed open files. This is not done until after the file open in + // case the file fails to open, there is no need to close any files. + // + // A write lock is required on the LRU list here to protect against + // modifications happening as already open files are read from and + // shuffled to the front of the list. + // + // Also, add the file that was just opened to the front of the least + // recently used list to indicate it is the most recently used file and + // therefore should be closed last. + s.lruMutex.Lock() + lruList := s.openBlocksLRU + if lruList.Len() >= maxOpenFiles { + lruFileNum := lruList.Remove(lruList.Back()).(uint32) + oldBlockFile := s.openBlockFiles[lruFileNum] + + // Close the old file under the write lock for the file in case + // any readers are currently reading from it so it's not closed + // out from under them. + oldBlockFile.Lock() + _ = oldBlockFile.file.Close() + oldBlockFile.Unlock() + + delete(s.openBlockFiles, lruFileNum) + delete(s.fileNumToLRUElem, lruFileNum) + } + s.fileNumToLRUElem[fileNum] = lruList.PushFront(fileNum) + s.lruMutex.Unlock() + + // Store a reference to it the open block files map. + s.openBlockFiles[fileNum] = blockFile + + return blockFile, nil +} + +// deleteFile remove the block file for the passed flat file number. The file +// must already be closed and it is the responsibility of the caller to do any +// other state cleanup necessary. +func (s *blockStore) deleteFile(fileNum uint32) error { + filePath := blockFilePath(s.basePath, fileNum) + if err := os.Remove(filePath); err != nil { + return makeDbErr(database.ErrDriverSpecific, err.Error(), err) + } + + return nil +} + +// blockFile attempts to return an existing file handle for the passed flat file +// number if it is already open as well as marking it as most recently used. It +// will also open the file when it's not already open subject to the rules +// described in openFile. +// +// NOTE: The returned block file will already have the read lock acquired and +// the caller MUST call .RUnlock() to release it once it has finished all read +// operations. This is necessary because otherwise it would be possible for a +// separate goroutine to close the file after it is returned from here, but +// before the caller has acquired a read lock. +func (s *blockStore) blockFile(fileNum uint32) (*lockableFile, error) { + // When the requested block file is open for writes, return it. + wc := s.writeCursor + wc.RLock() + if fileNum == wc.curFileNum && wc.curFile.file != nil { + obf := wc.curFile + obf.RLock() + wc.RUnlock() + return obf, nil + } + wc.RUnlock() + + // Try to return an open file under the overall files read lock. + s.obfMutex.RLock() + if obf, ok := s.openBlockFiles[fileNum]; ok { + s.lruMutex.Lock() + s.openBlocksLRU.MoveToFront(s.fileNumToLRUElem[fileNum]) + s.lruMutex.Unlock() + + obf.RLock() + s.obfMutex.RUnlock() + return obf, nil + } + s.obfMutex.RUnlock() + + // Since the file isn't open already, need to check the open block files + // map again under write lock in case multiple readers got here and a + // separate one is already opening the file. + s.obfMutex.Lock() + if obf, ok := s.openBlockFiles[fileNum]; ok { + obf.RLock() + s.obfMutex.Unlock() + return obf, nil + } + + // The file isn't open, so open it while closing the least recently used + // one. The called function grabs the overall files write lock and + // checks the opened block files map again in case multiple readers get + // here. + obf, err := s.openFileFunc(fileNum) + if err != nil { + s.obfMutex.Unlock() + return nil, err + } + obf.RLock() + s.obfMutex.Unlock() + return obf, nil +} + +// writeBlock appends the specified raw block bytes to the store's write cursor +// location and increments it accordingly. When the block would exceed the max +// file size for the current flat file, this function will close the current +// file, create the next file, update the write cursor, and write the block to +// the new file. +// +// The write cursor will also be advanced the number of bytes actually written +// in the event of failure. +// +// Format: +func (s *blockStore) writeBlock(rawBlock []byte) (blockLocation, error) { + // Compute how many bytes will be written. + // 4 bytes each for block network + 4 bytes for block length + + // length of raw block + 4 bytes for checksum. + blockLen := uint32(len(rawBlock)) + fullLen := blockLen + 12 + + // Move to the next block file if adding the new block would exceed the + // max allowed size for the current block file. Also detect overflow + // to be paranoid, even though it isn't possible currently, numbers + // might change in the future to make possible. + // + // NOTE: The writeCursor.offset field isn't protected by the mutex + // since it's only read/changed in this function which can only be + // called during a write transaction, of which there can be only one at + // a time. + wc := s.writeCursor + finalOffset := wc.curOffset + fullLen + if finalOffset < wc.curOffset || finalOffset > s.maxBlockFileSize { + // This is done under the write cursor lock since the fileNum + // field is accessed elsewhere by readers. + // + // Close the current write file to force a read-only reopen + // with LRU tracking. The close is done under the write lock + // for the file to prevent it from being closed out from under + // any readers currently reading from it. + wc.Lock() + wc.curFile.Lock() + if wc.curFile.file != nil { + _ = wc.curFile.file.Close() + wc.curFile.file = nil + } + wc.curFile.Unlock() + + // Start writes into next file. + wc.curFileNum++ + wc.curOffset = 0 + wc.Unlock() + } + + // All writes are done under the write lock for the file to ensure any + // readers are finished and blocked first. + wc.curFile.Lock() + defer wc.curFile.Unlock() + + // Open the current file if needed. This will typically only be the + // case when moving to the next file to write to or on initial database + // load. However, it might also be the case if rollbacks happened after + // file writes started during a transaction commit. + if wc.curFile.file == nil { + file, err := s.openWriteFileFunc(wc.curFileNum) + if err != nil { + return blockLocation{}, err + } + wc.curFile.file = file + } + + // Bitcoin network. + origOffset := wc.curOffset + hasher := crc32.New(castagnoli) + var scratch [4]byte + byteOrder.PutUint32(scratch[:], uint32(s.network)) + n, err := wc.curFile.file.WriteAt(scratch[:], int64(wc.curOffset)) + wc.curOffset += uint32(n) + if err != nil { + str := fmt.Sprintf("failed to write network to file %d at "+ + "offset %d: %v", wc.curFileNum, wc.curOffset, err) + return blockLocation{}, makeDbErr(database.ErrDriverSpecific, + str, err) + } + _, _ = hasher.Write(scratch[:]) + + // Block length. + byteOrder.PutUint32(scratch[:], blockLen) + n, err = wc.curFile.file.WriteAt(scratch[:], int64(wc.curOffset)) + wc.curOffset += uint32(n) + if err != nil { + str := fmt.Sprintf("failed to write block length to file %d "+ + "at offset %d: %v", wc.curFileNum, wc.curOffset, err) + return blockLocation{}, makeDbErr(database.ErrDriverSpecific, + str, err) + } + _, _ = hasher.Write(scratch[:]) + + // Serialized block. + n, err = wc.curFile.file.WriteAt(rawBlock, int64(wc.curOffset)) + wc.curOffset += uint32(n) + if err != nil { + str := fmt.Sprintf("failed to write block to file %d at "+ + "offset %d: %v", wc.curFileNum, wc.curOffset, err) + return blockLocation{}, makeDbErr(database.ErrDriverSpecific, + str, err) + } + _, _ = hasher.Write(rawBlock) + + // Castagnoli CRC-32 as a checksum of all the previous. + n, err = wc.curFile.file.WriteAt(hasher.Sum(nil), int64(wc.curOffset)) + wc.curOffset += uint32(n) + if err != nil { + str := fmt.Sprintf("failed to write checksum to file %d at "+ + "offset %d: %v", wc.curFileNum, wc.curOffset, err) + return blockLocation{}, makeDbErr(database.ErrDriverSpecific, + str, err) + } + + // Sync the file to disk. + if err := wc.curFile.file.Sync(); err != nil { + str := fmt.Sprintf("failed to sync file %d: %v", wc.curFileNum, + err) + return blockLocation{}, makeDbErr(database.ErrDriverSpecific, + str, err) + } + + loc := blockLocation{ + blockFileNum: wc.curFileNum, + fileOffset: origOffset, + blockLen: fullLen, + } + return loc, nil +} + +// readBlock reads the specified block record and returns the serialized block. +// It ensures the integrity of the block data by checking that the serialized +// network matches the current network associated with the block store and +// comparing the calculated checksum against the one stored in the flat file. +// This function also automatically handles all file management such as opening +// and closing files as necessary to stay within the maximum allowed open files +// limit. +// +// Returns ErrDriverSpecific if the data fails to read for any reason and +// ErrCorruption if the checksum of the read data doesn't match the checksum +// read from the file. +// +// Format: +func (s *blockStore) readBlock(hash *wire.ShaHash, loc blockLocation) ([]byte, error) { + // Get the referenced block file handle opening the file as needed. The + // function also handles closing files as needed to avoid going over the + // max allowed open files. + blockFile, err := s.blockFile(loc.blockFileNum) + if err != nil { + return nil, err + } + + serializedData := make([]byte, loc.blockLen) + n, err := blockFile.file.ReadAt(serializedData, int64(loc.fileOffset)) + if err != nil { + blockFile.RUnlock() + str := fmt.Sprintf("failed to read block %s from file %d, "+ + "offset %d: %v", hash, loc.blockFileNum, loc.fileOffset, + err) + return nil, makeDbErr(database.ErrDriverSpecific, str, err) + } + blockFile.RUnlock() + + // Calculate the checksum of the read data and ensure it matches the + // serialized checksum. This will detect any data corruption in the + // flat file without having to do much more expensive merkle root + // calculations on the loaded block. + serializedChecksum := binary.BigEndian.Uint32(serializedData[n-4:]) + calculatedChecksum := crc32.Checksum(serializedData[:n-4], castagnoli) + if serializedChecksum != calculatedChecksum { + str := fmt.Sprintf("block data for block %s checksum "+ + "does not match - got %x, want %x", hash, + calculatedChecksum, serializedChecksum) + return nil, makeDbErr(database.ErrCorruption, str, nil) + } + + // The network associated with the block must match the current active + // network, otherwise somebody probably put the block files for the + // wrong network in the directory. + serializedNet := byteOrder.Uint32(serializedData[:4]) + if serializedNet != uint32(s.network) { + str := fmt.Sprintf("block data for block %s is for the "+ + "wrong network - got %d, want %d", hash, serializedNet, + uint32(s.network)) + return nil, makeDbErr(database.ErrDriverSpecific, str, nil) + } + + // The raw block excludes the network, length of the block, and + // checksum. + return serializedData[8 : n-4], nil +} + +// readBlockRegion reads the specified amount of data at the provided offset for +// a given block location. The offset is relative to the start of the +// serialized block (as opposed to the beginning of the block record). This +// function automatically handles all file management such as opening and +// closing files as necessary to stay within the maximum allowed open files +// limit. +// +// Returns ErrDriverSpecific if the data fails to read for any reason. +func (s *blockStore) readBlockRegion(loc blockLocation, offset, numBytes uint32) ([]byte, error) { + // Get the referenced block file handle opening the file as needed. The + // function also handles closing files as needed to avoid going over the + // max allowed open files. + blockFile, err := s.blockFile(loc.blockFileNum) + if err != nil { + return nil, err + } + + // Regions are offsets into the actual block, however the serialized + // data for a block includes an initial 4 bytes for network + 4 bytes + // for block length. Thus, add 8 bytes to adjust. + readOffset := loc.fileOffset + 8 + offset + serializedData := make([]byte, numBytes) + _, err = blockFile.file.ReadAt(serializedData, int64(readOffset)) + if err != nil { + blockFile.RUnlock() + str := fmt.Sprintf("failed to read region from block file %d, "+ + "offset %d, len %d: %v", loc.blockFileNum, readOffset, + numBytes, err) + return nil, makeDbErr(database.ErrDriverSpecific, str, err) + } + blockFile.RUnlock() + + return serializedData, nil +} + +// handleRollback rolls the block files on disk back to the provided file number +// and offset. This involves potentially deleting and truncating the files that +// were partially written. +// +// There are effective two scenarios to consider here: +// 1) Transient write failures from which recovery is possible +// 2) More permanant failures such as hard disk death and/or removal +// +// In either case, the write cursor will be repositioned to the old block file +// offset regardless of any other errors that occur while attempting to undo +// writes. +// +// For the first scenario, this will lead to any data which failed to be undone +// being overwritten and thus behaves as desired as the system continues to run. +// +// For the second scenario, the metadata which stores the current write cursor +// position within the block files will not have been updated yet and thus if +// the system eventually recovers (perhaps the hard drive is reconnected), it +// will also lead to any data which failed to be undone being overwritten and +// thus behaves as desired. +// +// Therefore, any errors are simply logged at a warning level rather than being +// returned since there is nothing more that could be done about it anyways. +func (s *blockStore) handleRollback(oldBlockFileNum, oldBlockOffset uint32) { + // Grab the write cursor mutex since it is modified throughout this + // function. + wc := s.writeCursor + wc.Lock() + defer wc.Unlock() + + // Nothing to do if the rollback point is the same as the current write + // cursor. + if wc.curFileNum == oldBlockFileNum && wc.curOffset == oldBlockOffset { + return + } + + // Regardless of any failures that happen below, reposition the write + // cursor to the old block file and offset. + defer func() { + wc.curFileNum = oldBlockFileNum + wc.curOffset = oldBlockOffset + }() + + log.Debugf("ROLLBACK: Rolling back to file %d, offset %d", + oldBlockFileNum, oldBlockOffset) + + // Close the current write file if it needs to be deleted. Then delete + // all files that are newer than the provided rollback file while + // also moving the write cursor file backwards accordingly. + if wc.curFileNum > oldBlockFileNum { + wc.curFile.Lock() + if wc.curFile.file != nil { + _ = wc.curFile.file.Close() + wc.curFile.file = nil + } + wc.curFile.Unlock() + } + for ; wc.curFileNum > oldBlockFileNum; wc.curFileNum-- { + if err := s.deleteFileFunc(wc.curFileNum); err != nil { + _ = log.Warnf("ROLLBACK: Failed to delete block file "+ + "number %d: %v", wc.curFileNum, err) + return + } + } + + // Open the file for the current write cursor if needed. + wc.curFile.Lock() + if wc.curFile.file == nil { + obf, err := s.openWriteFileFunc(wc.curFileNum) + if err != nil { + wc.curFile.Unlock() + _ = log.Warnf("ROLLBACK: %v", err) + return + } + wc.curFile.file = obf + } + + // Truncate the to the provided rollback offset. + if err := wc.curFile.file.Truncate(int64(oldBlockOffset)); err != nil { + wc.curFile.Unlock() + _ = log.Warnf("ROLLBACK: Failed to truncate file %d: %v", + wc.curFileNum, err) + return + } + + // Sync the file to disk. + if err := wc.curFile.file.Sync(); err != nil { + wc.curFile.Unlock() + _ = log.Warnf("ROLLBACK: Failed to sync file %d: %v", + wc.curFileNum, err) + return + } + wc.curFile.Unlock() + return +} + +// scanBlockFiles searches the database directory for all flat block files to +// find the end of the most recent file. This position is considered the +// current write cursor which is also stored in the metadata. Thus, it is used +// to detect unexpected shutdowns in the middle of writes so the block files +// can be reconciled. +func scanBlockFiles(dbPath string) (int, uint32) { + lastFile := -1 + fileLen := uint32(0) + for i := 0; ; i++ { + filePath := blockFilePath(dbPath, uint32(i)) + st, err := os.Stat(filePath) + if err != nil { + break + } + lastFile = i + + fileLen = uint32(st.Size()) + } + + log.Tracef("Scan found latest block file #%d with length %d", lastFile, + fileLen) + return lastFile, fileLen +} + +// newBlockStore returns a new block store with the current block file number +// and offset set and all fields initialized. +func newBlockStore(basePath string, network wire.BitcoinNet) *blockStore { + // Look for the end of the latest block to file to determine what the + // write cursor position is from the viewpoing of the block files on + // disk. + fileNum, fileOff := scanBlockFiles(basePath) + if fileNum == -1 { + fileNum = 0 + fileOff = 0 + } + + store := &blockStore{ + network: network, + basePath: basePath, + maxBlockFileSize: maxBlockFileSize, + openBlockFiles: make(map[uint32]*lockableFile), + openBlocksLRU: list.New(), + fileNumToLRUElem: make(map[uint32]*list.Element), + + writeCursor: &writeCursor{ + curFile: &lockableFile{}, + curFileNum: uint32(fileNum), + curOffset: uint32(fileOff), + }, + } + store.openFileFunc = store.openFile + store.openWriteFileFunc = store.openWriteFile + store.deleteFileFunc = store.deleteFile + return store +} diff --git a/database2/ffboltdb/db.go b/database2/ffboltdb/db.go new file mode 100644 index 0000000000..c2f4166496 --- /dev/null +++ b/database2/ffboltdb/db.go @@ -0,0 +1,1515 @@ +// Copyright (c) 2015 Conformal Systems LLC. +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package ffboltdb + +import ( + "encoding/binary" + "fmt" + "hash/crc32" + "os" + "path/filepath" + "sort" + "sync" + + "github.com/btcsuite/bolt" + "github.com/btcsuite/btcd/database2" + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcutil" +) + +const ( + // metadataDbName is the name used for the metadata database. + metadataDbName = "metadata.db" + + // blockHdrSize is the size of a block header. This is simply the + // constant from wire and is only provided here for convenience since + // wire.MaxBlockHeaderPayload is quite long. + blockHdrSize = wire.MaxBlockHeaderPayload + + // blockHdrOffset and checksumOffset define the offsets into a block + // index row for the block header and block row checksum, respectively. + // + // The serialized block index row format is: + // + blockHdrOffset = blockLocSize + checksumOffset = blockLocSize + blockHdrSize +) + +var ( + // byteOrder is the preferred byte order used through the database and + // block files. Sometimes big endian will be used to allow ordered byte + // sortable integer values. + byteOrder = binary.LittleEndian + + // metadataBucketName is the top-level bucket used for all metadata. + metadataBucketName = []byte("metadata") + + // blockIdxBucketName is the bucket used internally to track block + // metadata. + blockIdxBucketName = []byte("ffboltdb-blockidx") + + // writeLocKeyName is the key used to store the current write file + // location. + writeLocKeyName = []byte("ffboltdb-writeloc") +) + +// Common error strings. +var ( + // errDbNotOpenStr is the text to use for the database.ErrDbNotOpen + // error code. + errDbNotOpenStr = "database is not open" + + // errTxClosedStr is the text to use for the database.ErrTxClosed error + // code. + errTxClosedStr = "database tx is closed" +) + +// bulkFetchData is allows a block location to be specified along with the +// index it was requested from. This in turn allows the bulk data loading +// functions to sort the data accesses based on the location to improve +// performance while keeping track of which result the data is for. +type bulkFetchData struct { + *blockLocation + replyIndex int +} + +// bulkFetchDataSorter implements sort.Interface to allow a slice of +// bulkFetchData to be sorted. In particular it sorts by file and then +// offset so that reads from files are grouped and linear. +type bulkFetchDataSorter []bulkFetchData + +// Len returns the number of items in the slice. It is part of the +// sort.Interface implementation. +func (s bulkFetchDataSorter) Len() int { + return len(s) +} + +// Swap swaps the items at the passed indices. It is part of the +// sort.Interface implementation. +func (s bulkFetchDataSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// Less returns whether the item with index i should sort before the item with +// index j. It is part of the sort.Interface implementation. +func (s bulkFetchDataSorter) Less(i, j int) bool { + if s[i].blockFileNum < s[j].blockFileNum { + return true + } + if s[i].blockFileNum > s[j].blockFileNum { + return false + } + + return s[i].fileOffset < s[j].fileOffset +} + +// makeDbErr creates a database.Error given a set of arguments. +func makeDbErr(c database.ErrorCode, desc string, err error) database.Error { + return database.Error{ErrorCode: c, Description: desc, Err: err} +} + +// convertErr converts the passed bolt error into a database error with an +// equivalent error code and the passed description. It also sets the passed +// error as the underlying error. +func convertErr(desc string, boltErr error) database.Error { + // Use the driver-specific error code by default. The code below will + // update this with the converted bolt error if it's recognized. + var code = database.ErrDriverSpecific + + switch boltErr { + // Database open/create errors. + case bolt.ErrDatabaseNotOpen: + code = database.ErrDbNotOpen + case bolt.ErrInvalid: + code = database.ErrInvalid + + // Transaction errors. + case bolt.ErrTxNotWritable: + code = database.ErrTxNotWritable + case bolt.ErrTxClosed: + code = database.ErrTxClosed + + // Value/bucket errors. + case bolt.ErrBucketNotFound: + code = database.ErrBucketNotFound + case bolt.ErrBucketExists: + code = database.ErrBucketExists + case bolt.ErrBucketNameRequired: + code = database.ErrBucketNameRequired + case bolt.ErrKeyRequired: + code = database.ErrKeyRequired + case bolt.ErrKeyTooLarge: + code = database.ErrKeyTooLarge + case bolt.ErrValueTooLarge: + code = database.ErrValueTooLarge + case bolt.ErrIncompatibleValue: + code = database.ErrIncompatibleValue + } + + return database.Error{ErrorCode: code, Description: desc, Err: boltErr} +} + +// cursor is an internal type used to represent a cursor over key/value pairs +// and nested buckets of a bucket and implements the database.Cursor interface. +type cursor struct { + bucket *bucket + boltCursor *bolt.Cursor +} + +// Enforce cursor implements the database.Cursor interface. +var _ database.Cursor = (*cursor)(nil) + +// Bucket returns the bucket the cursor was created for. +// +// This function is part of the database.Cursor interface implementation. +func (c *cursor) Bucket() database.Bucket { + // Ensure transaction state is valid. + if err := c.bucket.tx.checkClosed(); err != nil { + return nil + } + + return c.bucket +} + +// Delete removes the current key/value pair the cursor is at without +// invalidating the cursor. +// +// Returns the following errors as required by the interface contract: +// - ErrIncompatibleValue if attempted when the cursor points to a nested +// bucket +// - ErrTxNotWritable if attempted against a read-only transaction +// - ErrTxClosed if the transaction has already been closed +// +// This function is part of the database.Cursor interface implementation. +func (c *cursor) Delete() error { + // Ensure transaction state is valid. + if err := c.bucket.tx.checkClosed(); err != nil { + return err + } + + if err := c.boltCursor.Delete(); err != nil { + str := "failed to delete cursor key" + return convertErr(str, err) + } + + return nil +} + +// First positions the cursor at the first key/value pair and returns the pair. +// +// This function is part of the database.Cursor interface implementation. +func (c *cursor) First() (key, value []byte) { + // Ensure transaction state is valid. + if err := c.bucket.tx.checkClosed(); err != nil { + return nil, nil + } + + return c.boltCursor.First() +} + +// Last positions the cursor at the last key/value pair and returns the pair. +// +// This function is part of the database.Cursor interface implementation. +func (c *cursor) Last() (key, value []byte) { + // Ensure transaction state is valid. + if err := c.bucket.tx.checkClosed(); err != nil { + return nil, nil + } + + return c.boltCursor.Last() +} + +// Next moves the cursor one key/value pair forward and returns the pair. +// +// This function is part of the database.Cursor interface implementation. +func (c *cursor) Next() (key, value []byte) { + // Ensure transaction state is valid. + if err := c.bucket.tx.checkClosed(); err != nil { + return nil, nil + } + + return c.boltCursor.Next() +} + +// Prev moves the cursor one key/value pair backward and returns the pair. +// +// This function is part of the database.Cursor interface implementation. +func (c *cursor) Prev() (key, value []byte) { + // Ensure transaction state is valid. + if err := c.bucket.tx.checkClosed(); err != nil { + return nil, nil + } + + return c.boltCursor.Prev() +} + +// Seek positions the cursor at the passed seek key. When the key does not +// exist, the cursor is moved to the next key after seek. Returns the new pair. +// +// This function is part of the database.Cursor interface implementation. +func (c *cursor) Seek(seek []byte) (key, value []byte) { + // Ensure transaction state is valid. + if err := c.bucket.tx.checkClosed(); err != nil { + return nil, nil + } + + return c.boltCursor.Seek(seek) +} + +// bucket is an internal type used to represent a collection of key/value pairs +// and implements the database.Bucket interface. +type bucket struct { + tx *transaction + boltBucket *bolt.Bucket +} + +// Enforce bucket implements the database.Bucket interface. +var _ database.Bucket = (*bucket)(nil) + +// Bucket retrieves a nested bucket with the given key. Returns nil if +// the bucket does not exist. +// +// This function is part of the database.Bucket interface implementation. +func (b *bucket) Bucket(key []byte) database.Bucket { + // Ensure transaction state is valid. + if err := b.tx.checkClosed(); err != nil { + return nil + } + + // This nil check is intentional so the return value can be checked + // against nil directly. + boltBucket := b.boltBucket.Bucket(key) + if boltBucket == nil { + return nil + } + return &bucket{tx: b.tx, boltBucket: boltBucket} +} + +// CreateBucket creates and returns a new nested bucket with the given key. +// +// Returns the following errors as required by the interface contract: +// - ErrBucketExists if the bucket already exists +// - ErrBucketNameRequired if the key is empty +// - ErrIncompatibleValue if the key is otherwise invalid for the particular +// implementation +// - ErrTxNotWritable if attempted against a read-only transaction +// - ErrTxClosed if the transaction has already been closed +// +// This function is part of the database.Bucket interface implementation. +func (b *bucket) CreateBucket(key []byte) (database.Bucket, error) { + // Ensure transaction state is valid. + if err := b.tx.checkClosed(); err != nil { + return nil, err + } + + boltBucket, err := b.boltBucket.CreateBucket(key) + if err != nil { + str := fmt.Sprintf("failed to create bucket with key %q", key) + return nil, convertErr(str, err) + } + return &bucket{tx: b.tx, boltBucket: boltBucket}, nil +} + +// CreateBucketIfNotExists creates and returns a new nested bucket with the +// given key if it does not already exist. +// +// Returns the following errors as required by the interface contract: +// - ErrBucketNameRequired if the key is empty +// - ErrIncompatibleValue if the key is otherwise invalid for the particular +// implementation +// - ErrTxNotWritable if attempted against a read-only transaction +// - ErrTxClosed if the transaction has already been closed +// +// This function is part of the database.Bucket interface implementation. +func (b *bucket) CreateBucketIfNotExists(key []byte) (database.Bucket, error) { + // Ensure transaction state is valid. + if err := b.tx.checkClosed(); err != nil { + return nil, err + } + + boltBucket, err := b.boltBucket.CreateBucketIfNotExists(key) + if err != nil { + str := fmt.Sprintf("failed to create bucket with key %q", key) + return nil, convertErr(str, err) + } + return &bucket{tx: b.tx, boltBucket: boltBucket}, nil +} + +// DeleteBucket removes a nested bucket with the given key. +// +// Returns the following errors as required by the interface contract: +// - ErrTxNotWritable if attempted against a read-only transaction +// - ErrBucketNotFound if the specified bucket does not exist +// - ErrTxNotWritable if attempted against a read-only transaction +// - ErrTxClosed if the transaction has already been closed +// +// This function is part of the database.Bucket interface implementation. +func (b *bucket) DeleteBucket(key []byte) error { + // Ensure transaction state is valid. + if err := b.tx.checkClosed(); err != nil { + return err + } + + err := b.boltBucket.DeleteBucket(key) + if err != nil { + str := fmt.Sprintf("failed to delete bucket %q", key) + return convertErr(str, err) + } + + return nil +} + +// Cursor returns a new cursor, allowing for iteration over the bucket's +// key/value pairs and nested buckets in forward or backward order. +// +// This function is part of the database.Bucket interface implementation. +func (b *bucket) Cursor() database.Cursor { + return &cursor{bucket: b, boltCursor: b.boltBucket.Cursor()} +} + +// ForEach invokes the passed function with every key/value pair in the bucket. +// This includes nested buckets, in which case the value is nil, but it does not +// include the key/value pairs within those nested buckets. +// +// Returns the following errors as required by the interface contract: +// - ErrTxClosed if the transaction has already been closed +// +// NOTE: The values returned by this function are only valid during a +// transaction. Attempting to access them after a transaction has ended will +// likely result in an access violation. +// +// This function is part of the database.Bucket interface implementation. +func (b *bucket) ForEach(fn func(k, v []byte) error) error { + // Ensure transaction state is valid. + if err := b.tx.checkClosed(); err != nil { + return err + } + + // Keep track of the caller returned the error so it can be + // differentiated from a bolt error which needs to be converted. + var callerErr error + err := b.boltBucket.ForEach(func(k, v []byte) error { + callerErr = fn(k, v) + return callerErr + }) + if callerErr != nil { + return callerErr + } + if err != nil { + str := "failed while iterating bucket" + return convertErr(str, err) + } + + return nil +} + +// Writable returns whether or not the bucket is writable. +// +// This function is part of the database.Bucket interface implementation. +func (b *bucket) Writable() bool { + return b.tx.writable +} + +// Put saves the specified key/value pair to the bucket. Keys that do not +// already exist are added and keys that already exist are overwritten. +// +// Returns the following errors as required by the interface contract: +// - ErrKeyRequired if the key is empty +// - ErrIncompatibleValue if the key is the same as an existing bucket +// - ErrTxNotWritable if attempted against a read-only transaction +// - ErrTxClosed if the transaction has already been closed +// +// This function is part of the database.Bucket interface implementation. +func (b *bucket) Put(key, value []byte) error { + // Ensure transaction state is valid. + if err := b.tx.checkClosed(); err != nil { + return err + } + + err := b.boltBucket.Put(key, value) + if err != nil { + str := fmt.Sprintf("failed to put value for key %q", key) + return convertErr(str, err) + } + + return nil +} + +// Get returns the value for the given key. Returns nil if the key does +// not exist in this bucket. +// +// NOTE: The value returned by this function is only valid during a +// transaction. Attempting to access it after a transaction has ended +// will likely result in an access violation. +// +// This function is part of the database.Bucket interface implementation. +func (b *bucket) Get(key []byte) []byte { + // Ensure transaction state is valid. + if err := b.tx.checkClosed(); err != nil { + return nil + } + + return b.boltBucket.Get(key) +} + +// Delete removes the specified key from the bucket. Deleting a key that does +// not exist does not return an error. +// +// Returns the following errors as required by the interface contract: +// - ErrKeyRequired if the key is empty +// - ErrIncompatibleValue if the key is the same as an existing bucket +// - ErrTxNotWritable if attempted against a read-only transaction +// - ErrTxClosed if the transaction has already been closed +// +// This function is part of the database.Bucket interface implementation. +func (b *bucket) Delete(key []byte) error { + // Ensure transaction state is valid. + if err := b.tx.checkClosed(); err != nil { + return err + } + + err := b.boltBucket.Delete(key) + if err != nil { + str := fmt.Sprintf("failed to delete key %q", key) + return convertErr(str, err) + } + + return nil +} + +// pendingBlock houses a block that will be written to disk when the database +// transaction is committed. +type pendingBlock struct { + hash *wire.ShaHash + bytes []byte +} + +// transaction represents a database transaction. It can either by read-only or +// read-write and implements the database.Bucket interface. The transaction +// provides a root bucket against which all read and writes occur. +type transaction struct { + managed bool // Is the transaction managed? + closed bool // Is the transaction closed? + writable bool // Is the transaction writable? + db *db // DB instance the tx was created from. + boltTx *bolt.Tx // Underlying bolt tx for metadata storage. + metaBucket *bucket // The metadata bucket in underlying bolt DB. + blockIdxBucket *bucket // The block index bucket. + + // Blocks that need to be stored on commit. The pendingBlocks map is + // kept to allow quick looks up pending data by block hash. + pendingBlocks map[wire.ShaHash]int + pendingBlockData []pendingBlock +} + +// Enforce transaction implements the database.Tx interface. +var _ database.Tx = (*transaction)(nil) + +// checkClosed returns an error if the the database or transaction is closed. +func (tx *transaction) checkClosed() error { + // The transaction is no longer valid if it has been closed. + if tx.closed { + return makeDbErr(database.ErrTxClosed, errTxClosedStr, nil) + } + + return nil +} + +// Metadata returns the top-most bucket for all metadata storage. +// +// This function is part of the database.Tx interface implementation. +func (tx *transaction) Metadata() database.Bucket { + return tx.metaBucket +} + +// hasBlock returns whether or not a block with the given hash exists. +func (tx *transaction) hasBlock(hash *wire.ShaHash) bool { + // Return true if the block is pending to be written on commit since + // it exists from the viewpoint of this transaction. + if tx.pendingBlocks != nil { + if _, exists := tx.pendingBlocks[*hash]; exists { + return true + } + } + + // Bolt is zero-copy so this doesn't incur additional overhead of + // loading the entry. + return tx.blockIdxBucket.Get(hash[:]) != nil +} + +// StoreBlock stores the provided block into the database. There are no checks +// to ensure the block connects to a previous block, contains double spends, or +// any additional functionality such as transaction indexing. It simply stores +// the block in the database. +// +// Returns the following errors as required by the interface contract: +// - ErrBlockExists when the block hash already exists +// - ErrTxNotWritable if attempted against a read-only transaction +// - ErrTxClosed if the transaction has already been closed +// +// This function is part of the database.Tx interface implementation. +func (tx *transaction) StoreBlock(block *btcutil.Block) error { + // Ensure transaction state is valid. + if err := tx.checkClosed(); err != nil { + return err + } + + // Ensure the transaction is writable. + if !tx.writable { + str := "store block requires a writable database transaction" + return makeDbErr(database.ErrTxNotWritable, str, nil) + } + + blockHash, err := block.Sha() + if err != nil { + str := "failed to get hash for block" + return makeDbErr(database.ErrDriverSpecific, str, err) + } + + // Reject the block if it already exists. + if tx.pendingBlocks != nil { + if _, exists := tx.pendingBlocks[*blockHash]; exists { + str := fmt.Sprintf("block %s already exists", blockHash) + return makeDbErr(database.ErrBlockExists, str, nil) + } + } + if tx.hasBlock(blockHash) { + str := fmt.Sprintf("block %s already exists", blockHash) + return makeDbErr(database.ErrBlockExists, str, nil) + } + + blockBytes, err := block.Bytes() + if err != nil { + str := fmt.Sprintf("failed to get serialized bytes for block %s", + blockHash) + return makeDbErr(database.ErrDriverSpecific, str, err) + } + + // Add the block to be stored to the list of pending blocks to store + // when the transaction is committed. Also, add it to pending blocks + // map so it is easy to determine the block is pending based on the + // block hash. + if tx.pendingBlocks == nil { + tx.pendingBlocks = make(map[wire.ShaHash]int) + } + tx.pendingBlocks[*blockHash] = len(tx.pendingBlockData) + tx.pendingBlockData = append(tx.pendingBlockData, pendingBlock{ + hash: blockHash, + bytes: blockBytes, + }) + log.Tracef("Added block %s to pending blocks", blockHash) + + return nil +} + +// HasBlock returns whether or not a block with the given hash exists in the +// database. +// +// Returns the following errors as required by the interface contract: +// - ErrTxClosed if the transaction has already been closed +// +// This function is part of the database.Tx interface implementation. +func (tx *transaction) HasBlock(hash *wire.ShaHash) (bool, error) { + // Ensure transaction state is valid. + if err := tx.checkClosed(); err != nil { + return false, err + } + + return tx.hasBlock(hash), nil +} + +// HasBlocks returns whether or not the blocks with the provided hashes +// exist in the database. +// +// Returns the following errors as required by the interface contract: +// - ErrTxClosed if the transaction has already been closed +// +// This function is part of the database.Tx interface implementation. +func (tx *transaction) HasBlocks(hashes []wire.ShaHash) ([]bool, error) { + // Ensure transaction state is valid. + if err := tx.checkClosed(); err != nil { + return nil, err + } + + results := make([]bool, len(hashes)) + for i := range hashes { + results[i] = tx.hasBlock(&hashes[i]) + } + + return results, nil +} + +// fetchBlockRow fetches the metadata stored in the block index for the provided +// hash. It will return ErrBlockNotFound if there is no entry and ErrCorruption +// if the checksum of the entry doesn't match. +func (tx *transaction) fetchBlockRow(hash *wire.ShaHash) ([]byte, error) { + blockRow := tx.blockIdxBucket.Get(hash[:]) + if blockRow == nil { + str := fmt.Sprintf("block %s does not exist", hash) + return nil, makeDbErr(database.ErrBlockNotFound, str, nil) + } + + // Ensure the block row checksum matches. The checksum is at the end. + gotChecksum := crc32.Checksum(blockRow[:checksumOffset], castagnoli) + wantChecksumBytes := blockRow[checksumOffset : checksumOffset+4] + wantChecksum := byteOrder.Uint32(wantChecksumBytes) + if gotChecksum != wantChecksum { + str := fmt.Sprintf("metadata for block %s does not match "+ + "the expected checksum - got %d, want %d", hash, + gotChecksum, wantChecksum) + return nil, makeDbErr(database.ErrCorruption, str, nil) + } + + return blockRow, nil +} + +// FetchBlockHeader returns the raw serialized bytes for the block header +// identified by the given hash. The raw bytes are in the format returned by +// Serialize on a wire.BlockHeader. +// +// Returns the following errors as required by the interface contract: +// - ErrBlockNotFound if the request block hash does not exist +// - ErrTxClosed if the transaction has already been closed +// - ErrCorruption if the database has somehow become corrupted +// +// NOTE: The data returned by this function is only valid during a +// database transaction. Attempting to access it after a transaction +// has ended results in undefined behavior. This constraint prevents +// additional data copies and allows support for memory-mapped database +// implementations. +// +// This function is part of the database.Tx interface implementation. +func (tx *transaction) FetchBlockHeader(hash *wire.ShaHash) ([]byte, error) { + // Ensure transaction state is valid. + if err := tx.checkClosed(); err != nil { + return nil, err + } + + // When the block is pending to be written on commit return the bytes + // from there. + if tx.pendingBlocks != nil { + if idx, exists := tx.pendingBlocks[*hash]; exists { + blockBytes := tx.pendingBlockData[idx].bytes + return blockBytes[0:blockHdrSize:blockHdrSize], nil + } + } + + // Fetch the block index row and slice off the header. Notice the use + // of the cap on the subslice to prevent the caller from accidentally + // appending into the db data. + blockRow, err := tx.fetchBlockRow(hash) + if err != nil { + return nil, err + } + endOffset := blockLocSize + blockHdrSize + return blockRow[blockLocSize:endOffset:endOffset], nil +} + +// FetchBlockHeaders returns the raw serialized bytes for the block headers +// identified by the given hashes. The raw bytes are in the format returned by +// Serialize on a wire.BlockHeader. +// +// Returns the following errors as required by the interface contract: +// - ErrBlockNotFound if the request block hash does not exist +// - ErrTxClosed if the transaction has already been closed +// - ErrCorruption if the database has somehow become corrupted +// +// NOTE: The data returned by this function is only valid during a database +// transaction. Attempting to access it after a transaction has ended results +// in undefined behavior. This constraint prevents additional data copies and +// allows support for memory-mapped database implementations. +// +// This function is part of the database.Tx interface implementation. +func (tx *transaction) FetchBlockHeaders(hashes []wire.ShaHash) ([][]byte, error) { + // Ensure transaction state is valid. + if err := tx.checkClosed(); err != nil { + return nil, err + } + + // NOTE: This could check for the existence of all blocks before loading + // any of the headers which would be faster in the failure case, however + // callers will not typically be calling this function with invalid + // values, so optimize for the common case. + + // Load the headers. + headers := make([][]byte, len(hashes)) + for i := range hashes { + hash := &hashes[i] + + // When the block is pending to be written on commit return the + // bytes from there. + if tx.pendingBlocks != nil { + if idx, exists := tx.pendingBlocks[*hash]; exists { + blkBytes := tx.pendingBlockData[idx].bytes + headers[i] = blkBytes[0:blockHdrSize:blockHdrSize] + continue + } + } + + // Fetch the block index row and slice off the header. Notice + // the use of the cap on the subslice to prevent the caller + // from accidentally appending into the db data. + blockRow, err := tx.fetchBlockRow(hash) + if err != nil { + return nil, err + } + endOffset := blockLocSize + blockHdrSize + headers[i] = blockRow[blockLocSize:endOffset:endOffset] + } + + return headers, nil +} + +// FetchBlock returns the raw serialized bytes for the block identified by the +// given hash. The raw bytes are in the format returned by Serialize on a +// wire.MsgBlock. +// +// Returns the following errors as required by the interface contract: +// - ErrBlockNotFound if the request block hash does not exist +// - ErrTxClosed if the transaction has already been closed +// - ErrCorruption if the database has somehow become corrupted +// +// In addition, returns ErrDriverSpecific if any failures occur when reading the +// block files. +// +// NOTE: The data returned by this function is only valid during a database +// transaction. Attempting to access it after a transaction has ended results +// in undefined behavior. This constraint prevents additional data copies and +// allows support for memory-mapped database implementations. +// +// This function is part of the database.Tx interface implementation. +func (tx *transaction) FetchBlock(hash *wire.ShaHash) ([]byte, error) { + // Ensure transaction state is valid. + if err := tx.checkClosed(); err != nil { + return nil, err + } + + // When the block is pending to be written on commit return the bytes + // from there. + if tx.pendingBlocks != nil { + if idx, exists := tx.pendingBlocks[*hash]; exists { + return tx.pendingBlockData[idx].bytes, nil + } + } + + // Lookup the location of the block in the files from the block index. + blockRow, err := tx.fetchBlockRow(hash) + if err != nil { + return nil, err + } + location := deserializeBlockLoc(blockRow) + + // Read the block from the appropriate location. The function also + // performs a checksum over the data to detect data corruption. + blockBytes, err := tx.db.store.readBlock(hash, location) + if err != nil { + return nil, err + } + + return blockBytes, nil +} + +// FetchBlocks returns the raw serialized bytes for the blocks identified by the +// given hashes. The raw bytes are in the format returned by Serialize on a +// wire.MsgBlock. +// +// Returns the following errors as required by the interface contract: +// - ErrBlockNotFound if the request block hash does not exist +// - ErrTxClosed if the transaction has already been closed +// - ErrCorruption if the database has somehow become corrupted +// +// In addition, returns ErrDriverSpecific if any failures occur when reading the +// block files. +// +// NOTE: The data returned by this function is only valid during a database +// transaction. Attempting to access it after a transaction has ended results +// in undefined behavior. This constraint prevents additional data copies and +// allows support for memory-mapped database implementations. +// +// This function is part of the database.Tx interface implementation. +func (tx *transaction) FetchBlocks(hashes []wire.ShaHash) ([][]byte, error) { + // Ensure transaction state is valid. + if err := tx.checkClosed(); err != nil { + return nil, err + } + + // NOTE: This could check for the existence of all blocks before loading + // any of them which would be faster in the failure case, however + // callers will not typically be calling this function with invalid + // values, so optimize for the common case. + + // Load the blocks. + blocks := make([][]byte, len(hashes)) + for i := range hashes { + var err error + blocks[i], err = tx.FetchBlock(&hashes[i]) + if err != nil { + return nil, err + } + } + + return blocks, nil +} + +// fetchPendingRegion attempts to fetch the provided region from any block which +// are pending to be written on commit. It will return nil for the byte slice +// when there region references a block which is not pending. When the region +// does reference a pending block, it is bounds checked and returns +// ErrBlockRegionInvalid if invalid. +func (tx *transaction) fetchPendingRegion(region *database.BlockRegion) ([]byte, error) { + // When the block is pending to be written on commit return the bytes + // from there. + if idx, exists := tx.pendingBlocks[*region.Hash]; exists { + // Ensure the region is within the bounds of the block. + blockBytes := tx.pendingBlockData[idx].bytes + blockLen := uint32(len(blockBytes)) + endOffset := region.Offset + region.Len + if endOffset < region.Offset || endOffset > blockLen { + str := fmt.Sprintf("block %s region offset %d, "+ + "length %d exceeds block length of %d", + region.Hash, region.Offset, region.Len, + blockLen) + return nil, makeDbErr(database.ErrBlockRegionInvalid, + str, nil) + } + + return blockBytes[region.Offset:endOffset:endOffset], nil + } + + return nil, nil +} + +// FetchBlockRegion returns the raw serialized bytes for the given block region. +// +// For example, it is possible to directly extract Bitcoin transactions and/or +// scripts from a block with this function. Depending on the backend +// implementation, this can provide significant savings by avoiding the need to +// load entire blocks. +// +// The raw bytes are in the format returned by Serialize on a wire.MsgBlock and +// the Offset field in the provided BlockRegion is zero-based and relative to +// the start of the block (byte 0). +// +// Returns the following errors as required by the interface contract: +// - ErrBlockNotFound if the request block hash does not exist +// - ErrBlockRegionInvalid if the region exceeds the bounds of the associated +// block +// - ErrTxClosed if the transaction has already been closed +// - ErrCorruption if the database has somehow become corrupted +// +// In addition, returns ErrDriverSpecific if any failures occur when reading the +// block files. +// +// NOTE: The data returned by this function is only valid during a database +// transaction. Attempting to access it after a transaction has ended results +// in undefined behavior. This constraint prevents additional data copies and +// allows support for memory-mapped database implementations. +// +// This function is part of the database.Tx interface implementation. +func (tx *transaction) FetchBlockRegion(region *database.BlockRegion) ([]byte, error) { + // Ensure transaction state is valid. + if err := tx.checkClosed(); err != nil { + return nil, err + } + + // When the block is pending to be written on commit return the bytes + // from there. + if tx.pendingBlocks != nil { + regionBytes, err := tx.fetchPendingRegion(region) + if err != nil { + return nil, err + } + if regionBytes != nil { + return regionBytes, nil + } + } + + // Lookup the location of the block in the files from the block index. + blockRow, err := tx.fetchBlockRow(region.Hash) + if err != nil { + return nil, err + } + location := deserializeBlockLoc(blockRow) + + // Ensure the region is within the bounds of the block. + endOffset := region.Offset + region.Len + if endOffset < region.Offset || endOffset > location.blockLen { + str := fmt.Sprintf("block %s region offset %d, length %d "+ + "exceeds block length of %d", region.Hash, + region.Offset, region.Len, location.blockLen) + return nil, makeDbErr(database.ErrBlockRegionInvalid, str, nil) + + } + + // Read the region from the appropriate disk block file. + regionBytes, err := tx.db.store.readBlockRegion(location, region.Offset, + region.Len) + if err != nil { + return nil, err + } + + return regionBytes, nil +} + +// FetchBlockRegions returns the raw serialized bytes for the given block +// regions. +// +// For example, it is possible to directly extract Bitcoin transactions and/or +// scripts from various blocks with this function. Depending on the backend +// implementation, this can provide significant savings by avoiding the need to +// load entire blocks. +// +// The raw bytes are in the format returned by Serialize on a wire.MsgBlock and +// the Offset fields in the provided BlockRegions are zero-based and relative to +// the start of the block (byte 0). +// +// Returns the following errors as required by the interface contract: +// - ErrBlockNotFound if the request block hash does not exist +// - ErrBlockRegionInvalid if one or more region exceed the bounds of the +// associated block +// - ErrTxClosed if the transaction has already been closed +// - ErrCorruption if the database has somehow become corrupted +// +// In addition, returns ErrDriverSpecific if any failures occur when reading the +// block files. +// +// NOTE: The data returned by this function is only valid during a database +// transaction. Attempting to access it after a transaction has ended results +// in undefined behavior. This constraint prevents additional data copies and +// allows support for memory-mapped database implementations. +// +// This function is part of the database.Tx interface implementation. +func (tx *transaction) FetchBlockRegions(regions []database.BlockRegion) ([][]byte, error) { + // Ensure transaction state is valid. + if err := tx.checkClosed(); err != nil { + return nil, err + } + + // NOTE: This could check for the existence of all blocks before + // deserializing the locations and building up the fetch list which + // would be faster in the failure case, however callers will not + // typically be calling this function with invalid values, so optimize + // for the common case. + + // NOTE: A potential optimization here would be to combine adjacent + // regions to reduce the number of reads. + + // In order to improve efficiency of loading the bulk data, first grab + // the block location for all of the requested block hashes and sort + // the reads by filenum:offset so that all reads are grouped by file + // and linear within each file. This can result in quite a significant + // performance increase depending on how spread out the requested hashes + // are by reducing the number of file open/closes and random accesses + // needed. The fetchList is intentionally allocated with a cap because + // some of the regions might be fetched from the pending blocks and + // hence there is no need to fetch those from disk. + blockRegions := make([][]byte, len(regions)) + fetchList := make([]bulkFetchData, 0, len(regions)) + for i := range regions { + region := ®ions[i] + + // When the block is pending to be written on commit grab the + // bytes from there. + if tx.pendingBlocks != nil { + regionBytes, err := tx.fetchPendingRegion(region) + if err != nil { + return nil, err + } + if regionBytes != nil { + blockRegions[i] = regionBytes + continue + } + } + + // Lookup the location of the block in the files from the block + // index. + blockRow, err := tx.fetchBlockRow(region.Hash) + if err != nil { + return nil, err + } + location := deserializeBlockLoc(blockRow) + + // Ensure the region is within the bounds of the block. + endOffset := region.Offset + region.Len + if endOffset < region.Offset || endOffset > location.blockLen { + str := fmt.Sprintf("block %s region offset %d, length "+ + "%d exceeds block length of %d", region.Hash, + region.Offset, region.Len, location.blockLen) + return nil, makeDbErr(database.ErrBlockRegionInvalid, str, nil) + } + + fetchList = append(fetchList, bulkFetchData{&location, i}) + } + sort.Sort(bulkFetchDataSorter(fetchList)) + + // Read all of the regions in the fetch list and set the results. + for i := range fetchList { + fetchData := &fetchList[i] + ri := fetchData.replyIndex + region := ®ions[ri] + location := fetchData.blockLocation + regionBytes, err := tx.db.store.readBlockRegion(*location, + region.Offset, region.Len) + if err != nil { + return nil, err + } + blockRegions[ri] = regionBytes + } + + return blockRegions, nil +} + +// close marks the transaction closed, releases any pending data, and releases +// the transaction read lock. +func (tx *transaction) close() { + tx.closed = true + + // Clear pending blocks that would have been written on commit. + tx.pendingBlocks = nil + tx.pendingBlockData = nil + + tx.db.mtx.RUnlock() +} + +// serializeBlockRow serializes a block row into a format suitable for storage +// into the block index. +func serializeBlockRow(blockLoc blockLocation, blockHdr []byte) []byte { + // The serialized block index row format is: + // + // [0:blockLocSize] Block location + // [blockLocSize:blockLocSize+blockHdrSize] Block header + // [checksumOffset:checksumOffset+4] Castagnoli CRC-32 checksum + serializedRow := make([]byte, blockLocSize+blockHdrSize+4) + copy(serializedRow, serializeBlockLoc(blockLoc)) + copy(serializedRow[blockHdrOffset:], blockHdr) + checksum := crc32.Checksum(serializedRow[:checksumOffset], castagnoli) + byteOrder.PutUint32(serializedRow[checksumOffset:], checksum) + return serializedRow +} + +// writePendingAndCommit writes pending block data to the flat block files, +// updates the metadata with their locations as well as the new current write +// location, and commits the metadata to the underlying bolt database. It also +// properly handles rollback in the case of failures. +// +// This function MUST only be called when there is pending data to be written. +func (tx *transaction) writePendingAndCommit() error { + // Save the current block store write position for potential rollback. + // These variables are only updated here in this function and there can + // only be one write transaction active at a time, so it's safe to store + // them for potential rollback. + wc := tx.db.store.writeCursor + wc.RLock() + oldBlkFileNum := wc.curFileNum + oldBlkOffset := wc.curOffset + wc.RUnlock() + + // rollback is a closure that is used to rollback all writes to the + // block files. It also optionally rolls back the underlying bolt + // transaction. + rollback := func(rollbackBolt bool) { + // Rollback any modification made to the block files and the + // underlying bolt transaction if needed. + tx.db.store.handleRollback(oldBlkFileNum, oldBlkOffset) + if rollbackBolt { + _ = tx.boltTx.Rollback() + } + } + + // Loop through all of the pending blocks to store and write them. + for _, blockData := range tx.pendingBlockData { + log.Tracef("Storing block %s", blockData.hash) + location, err := tx.db.store.writeBlock(blockData.bytes) + if err != nil { + rollback(true) + return err + } + + // Add a record in the block index for the block. The record + // includes the location information needed to locate the block + // on the filesystem as well as the block header since they are + // so commonly needed. + blockHdr := blockData.bytes[0:blockHdrSize] + blockRow := serializeBlockRow(location, blockHdr) + err = tx.blockIdxBucket.Put(blockData.hash[:], blockRow) + if err != nil { + rollback(true) + return err + } + } + + // Update the metadata for the current write file and offset. + writeRow := serializeWriteRow(wc.curFileNum, wc.curOffset) + if err := tx.metaBucket.Put(writeLocKeyName, writeRow); err != nil { + rollback(true) + return convertErr("failed to store write cursor", err) + } + + // Commit metadata updates. + if err := tx.boltTx.Commit(); err != nil { + rollback(false) + return convertErr("failed to commit transaction", err) + } + + return nil +} + +// rollback rollsback the underly bolt database and closes the transaction. It +// is separated mainly so the code panics on attempts to commit or rollback a +// managed transaction can rollback first. +func (tx *transaction) rollback() error { + // Regardless of whether the rollback succeeds, the transaction is + // closed on return. + defer tx.close() + + if err := tx.boltTx.Rollback(); err != nil { + return convertErr("failed to rollback underlying bolt tx", err) + } + + return nil +} + +// Commit commits all changes that have been made through the root bucket and +// all of its sub-buckets to persistent storage. +// +// This function is part of the database.Tx interface implementation. +func (tx *transaction) Commit() error { + // Prevent commits on managed transactions. + if tx.managed { + _ = tx.rollback() + panic("managed transaction commit not allowed") + } + + // Ensure transaction state is valid. + if err := tx.checkClosed(); err != nil { + return err + } + + // Regardless of whether the commit succeeds, the transaction is closed + // on return. This is done as a defer since some of the committing code + // requires the transaction to be open. + defer tx.close() + + // Ensure the transaction is writable. + if !tx.writable { + str := "Commit requires a writable database transaction" + return makeDbErr(database.ErrTxNotWritable, str, nil) + } + + // When there is no pending block data to be written, just commit the + // underlying bolt transaction and exit. + if len(tx.pendingBlockData) == 0 { + if err := tx.boltTx.Commit(); err != nil { + return convertErr("failed to commit transaction", err) + } + + return nil + } + + // Otherwise, there is pending block data to be written, so write it + // along with the necessary metadata. The function will rollback if + // any errors occur. + return tx.writePendingAndCommit() +} + +// Rollback undoes all changes that have been made to the root bucket and all of +// its sub-buckets. +// +// This function is part of the database.Tx interface implementation. +func (tx *transaction) Rollback() error { + // Prevent rollbacks on managed transactions. + if tx.managed { + _ = tx.rollback() + panic("managed transaction rollback not allowed") + } + + // Ensure transaction state is valid. + if err := tx.checkClosed(); err != nil { + return err + } + + return tx.rollback() +} + +// db represents a collection of namespaces which are persisted and implements +// the database.DB interface. All database access is performed through +// transactions which are obtained through the specific Namespace. +type db struct { + mtx sync.RWMutex // Protect concurrent access. + closed bool // Is the database closed? + boltDB *bolt.DB // The underlying bolt DB for metadata. + store *blockStore // Handles read/writing blocks to flat files. +} + +// Enforce db implements the database.DB interface. +var _ database.DB = (*db)(nil) + +// Type returns the database driver type the current database instance was +// created with. +// +// This function is part of the database.DB interface implementation. +func (db *db) Type() string { + return dbType +} + +// begin is the implemntation function for the Begin database method. See its +// documentation for more details. +// +// This function is only separate because it returns the internal transaction +// which is used by the managed transaction code while the database method +// returns the interface. +func (db *db) begin(writable bool) (*transaction, error) { + // Whenever a new transaction is started, grab a read lock against the + // database to ensure Close will wait for the transaction to finish. + // This lock will not be released until the transaction is closed (via + // Rollback or Commit). + db.mtx.RLock() + if db.closed { + db.mtx.RUnlock() + return nil, makeDbErr(database.ErrDbNotOpen, errDbNotOpenStr, + nil) + } + + // Bolt already handles allowing multiple concurrent read transactions + // while only only allowing a single write transaction, so make use of + // that functionality. + boltTx, err := db.boltDB.Begin(writable) + if err != nil { + db.mtx.RUnlock() + str := "failed to open transaction" + return nil, convertErr(str, err) + } + + metaBucket := boltTx.Bucket(metadataBucketName) + blockIdxBucket := metaBucket.Bucket(blockIdxBucketName) + tx := &transaction{ + writable: writable, + db: db, + boltTx: boltTx, + metaBucket: &bucket{boltBucket: metaBucket}, + blockIdxBucket: &bucket{boltBucket: blockIdxBucket}, + } + tx.metaBucket.tx = tx + tx.blockIdxBucket.tx = tx + return tx, nil +} + +// Begin starts a transaction which is either read-only or read-write depending +// on the specified flag. Multiple read-only transactions can be started +// simultaneously while only a single read-write transaction can be started at a +// time. The call will block when starting a read-write transaction when one is +// already open. +// +// NOTE: The transaction must be closed by calling Rollback or Commit on it when +// it is no longer needed. Failure to do so will result in unclaimed memory. +// +// This function is part of the database.DB interface implementation. +func (db *db) Begin(writable bool) (database.Tx, error) { + return db.begin(writable) +} + +// rollbackOnPanic rolls the passed transaction back if the code in the calling +// function panics. This is needed since the mutex on a transaction must be +// released and a panic in called code would prevent that from happening. +// +// NOTE: This can only be handled manually for managed transactions since they +// control the life-cycle of the transaction. As the documentation on Begin +// calls out, callers opting to use manual transactions will have to ensure the +// transaction is rolled back on panic if it desires that functionality as well +// or the database will fail to close since the read-lock will never be +// released. +func rollbackOnPanic(tx *transaction) { + if err := recover(); err != nil { + tx.managed = false + _ = tx.Rollback() + panic(err) + } +} + +// View invokes the passed function in the context of a managed read-only +// transaction with the root bucket for the namespace. Any errors returned from +// the user-supplied function are returned from this function. +// +// This function is part of the database.DB interface implementation. +func (db *db) View(fn func(database.Tx) error) error { + // Start a read-only transaction. + tx, err := db.begin(false) + if err != nil { + return err + } + + // Since the user-provided function might panic, ensure the transaction + // releases all mutexes and resources. There is no guarantee the caller + // won't use recover and keep going. Thus, the database must still be + // in a usable state on panics due to user issues. + defer rollbackOnPanic(tx) + + tx.managed = true + err = fn(tx) + tx.managed = false + if err != nil { + // The error is ignored here because nothing was written yet + // and regardless of a rollback failure, the tx is closed now + // anyways. + _ = tx.Rollback() + return err + } + + return tx.Rollback() +} + +// Update invokes the passed function in the context of a managed read-write +// transaction with the root bucket for the namespace. Any errors returned from +// the user-supplied function will cause the transaction to be rolled back and +// are returned from this function. Otherwise, the transaction is committed +// when the user-supplied function returns a nil error. +// +// This function is part of the database.DB interface implementation. +func (db *db) Update(fn func(database.Tx) error) error { + // Start a read-write transaction. + tx, err := db.begin(true) + if err != nil { + return err + } + + // Since the user-provided function might panic, ensure the transaction + // releases all mutexes and resources. There is no guarantee the caller + // won't use recover and keep going. Thus, the database must still be + // in a usable state on panics due to user issues. + defer rollbackOnPanic(tx) + + tx.managed = true + err = fn(tx) + tx.managed = false + if err != nil { + // The error is ignored here because nothing was written yet + // and regardless of a rollback failure, the tx is closed now + // anyways. + _ = tx.Rollback() + return err + } + + return tx.Commit() +} + +// Close cleanly shuts down the database and syncs all data. Any data in +// database transactions which have not been committed will be lost, so it is +// important to ensure all transactions are finalized prior to calling this +// function if that data is intended to be stored. +// +// This function is part of the database.DB interface implementation. +func (db *db) Close() error { + // Since all transactions have a read lock on this mutex, this will + // cause Close to wait for all readers to complete. + db.mtx.Lock() + defer db.mtx.Unlock() + + if db.closed { + return makeDbErr(database.ErrDbNotOpen, errDbNotOpenStr, nil) + } + db.closed = true + + // NOTE: Since the above lock waits for all transactions to finish and + // prevents any new ones from being started, it is safe to clear all + // state without the individual locks. + + // Close any open flat files that house the blocks. + wc := db.store.writeCursor + if wc.curFile.file != nil { + _ = wc.curFile.file.Close() + wc.curFile.file = nil + } + for _, blockFile := range db.store.openBlockFiles { + _ = blockFile.file.Close() + } + db.store.openBlockFiles = nil + db.store.openBlocksLRU.Init() + db.store.fileNumToLRUElem = nil + + if err := db.boltDB.Close(); err != nil { + str := "failed to close underlying bolt database" + return convertErr(str, err) + } + + return nil +} + +// filesExists reports whether the named file or directory exists. +func fileExists(name string) bool { + if _, err := os.Stat(name); err != nil { + if os.IsNotExist(err) { + return false + } + } + return true +} + +// initBoltDB creates the initial buckets and values used by the package. This +// is mainly in a separate function for testing purposes. +func initBoltDB(boltDB *bolt.DB) error { + err := boltDB.Update(func(tx *bolt.Tx) error { + // All metadata is housed in the metadata bucket at the + // root of the database. + metaBucket, err := tx.CreateBucket(metadataBucketName) + if err != nil { + return err + } + + // Create the internal block index bucket. + _, err = metaBucket.CreateBucket(blockIdxBucketName) + if err != nil { + return err + } + + // The starting block file write cursor location is file num 0, + // offset 0. + return metaBucket.Put(writeLocKeyName, serializeWriteRow(0, 0)) + }) + if err != nil { + str := fmt.Sprintf("failed to initialize metadata database: %v", + err) + return convertErr(str, err) + } + + return nil +} + +// openDB opens the database at the provided path. database.ErrDbDoesNotExist +// is returned if the database doesn't exist and the create flag is not set. +func openDB(dbPath string, network wire.BitcoinNet, create bool) (database.DB, error) { + // Set the logger to the database's logger. + log = database.GetLog() + + // Error if the database doesn't exist and the create flag is not set. + metadataDbPath := filepath.Join(dbPath, metadataDbName) + dbExists := fileExists(metadataDbPath) + if !create && !dbExists { + str := fmt.Sprintf("database %q does not exist", metadataDbPath) + return nil, makeDbErr(database.ErrDbDoesNotExist, str, nil) + } + + // Ensure the full path to the database exists. + if !dbExists { + // The error can be ignored here since the call to bolt.Open + // will fail if the directory couldn't be created. + _ = os.MkdirAll(dbPath, 0600) + } + + // Open the bolt metadata database (will create it if needed). + boltDB, err := bolt.Open(metadataDbPath, 0600, nil) + if err != nil { + return nil, convertErr(err.Error(), err) + } + + // Create the block store which includes scanning the existing flat + // block files to find what the current write cursor position is + // according to the data that is actually on disk. + store := newBlockStore(dbPath, network) + pdb := &db{boltDB: boltDB, store: store} + + // Perform any reconciliation needed between the block and metadata as + // well as bolt database initialization, if needed. + return reconcileDB(pdb, create) +} diff --git a/database2/ffboltdb/doc.go b/database2/ffboltdb/doc.go new file mode 100644 index 0000000000..ebd1174c27 --- /dev/null +++ b/database2/ffboltdb/doc.go @@ -0,0 +1,30 @@ +// Copyright (c) 2015 Conformal Systems LLC. +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +/* +Package ffboltdb implements a driver for the database package that uses boltdb +for the backing metadata and flat files for block storage. + +This driver is the recommended driver for use with btcd. It has a strong focus +on speed, efficiency, and robustness. It makes use of zero-copy memory mapping +for the metadata, flat files for block storage, and checksums in key areas to +ensure data integrity. + +Usage + +This package is a driver to the database package and provides the database type +of "ffboltdb". The parameters the Open and Create functions take are the +database path as a string and the block network: + + db, err := database.Open("ffboltdb", "path/to/database", wire.MainNet) + if err != nil { + // Handle error + } + + db, err := database.Create("ffboltdb", "path/to/database", wire.MainNet) + if err != nil { + // Handle error + } +*/ +package ffboltdb diff --git a/database2/ffboltdb/driver.go b/database2/ffboltdb/driver.go new file mode 100644 index 0000000000..4a0acbb250 --- /dev/null +++ b/database2/ffboltdb/driver.go @@ -0,0 +1,77 @@ +// Copyright (c) 2015 Conformal Systems LLC. +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package ffboltdb + +import ( + "fmt" + + "github.com/btcsuite/btcd/database2" + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btclog" +) + +var log = btclog.Disabled + +const ( + dbType = "ffboltdb" +) + +// parseArgs parses the arguments from the database Open/Create methods. +func parseArgs(funcName string, args ...interface{}) (string, wire.BitcoinNet, error) { + if len(args) != 2 { + return "", 0, fmt.Errorf("invalid arguments to %s.%s -- "+ + "expected database path and block network", dbType, + funcName) + } + + dbPath, ok := args[0].(string) + if !ok { + return "", 0, fmt.Errorf("first argument to %s.%s is invalid -- "+ + "expected database path string", dbType, funcName) + } + + network, ok := args[1].(wire.BitcoinNet) + if !ok { + return "", 0, fmt.Errorf("second argument to %s.%s is invalid -- "+ + "expected block network", dbType, funcName) + } + + return dbPath, network, nil +} + +// openDBDriver is the callback provided during driver registration that opens +// an existing database for use. +func openDBDriver(args ...interface{}) (database.DB, error) { + dbPath, network, err := parseArgs("Open", args...) + if err != nil { + return nil, err + } + + return openDB(dbPath, network, false) +} + +// createDBDriver is the callback provided during driver registration that +// creates, initializes, and opens a database for use. +func createDBDriver(args ...interface{}) (database.DB, error) { + dbPath, network, err := parseArgs("Create", args...) + if err != nil { + return nil, err + } + + return openDB(dbPath, network, true) +} + +func init() { + // Register the driver. + driver := database.Driver{ + DbType: dbType, + Create: createDBDriver, + Open: openDBDriver, + } + if err := database.RegisterDriver(driver); err != nil { + panic(fmt.Sprintf("Failed to regiser database driver '%s': %v", + dbType, err)) + } +} diff --git a/database2/ffboltdb/driver_test.go b/database2/ffboltdb/driver_test.go new file mode 100644 index 0000000000..32a861fcd9 --- /dev/null +++ b/database2/ffboltdb/driver_test.go @@ -0,0 +1,288 @@ +// Copyright (c) 2015 Conformal Systems LLC. +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package ffboltdb_test + +import ( + "fmt" + "os" + "path/filepath" + "reflect" + "runtime" + "testing" + + "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcd/database2" + "github.com/btcsuite/btcd/database2/ffboltdb" + "github.com/btcsuite/btcutil" +) + +// dbType is the database type name for this driver. +const dbType = "ffboltdb" + +// TestCreateOpenFail ensures that errors related to creating and opening a +// database are handled properly. +func TestCreateOpenFail(t *testing.T) { + t.Parallel() + + // Ensure that attempting to open a database that doesn't exist returns + // the expected error. + wantErrCode := database.ErrDbDoesNotExist + _, err := database.Open(dbType, "noexist", blockDataNet) + if !checkDbError(t, "Open", err, wantErrCode) { + return + } + + // Ensure that attempting to open a database with the wrong number of + // parameters returns the expected error. + wantErr := fmt.Errorf("invalid arguments to %s.Open -- expected "+ + "database path and block network", dbType) + _, err = database.Open(dbType, 1, 2, 3) + if err.Error() != wantErr.Error() { + t.Errorf("Open: did not receive expected error - got %v, "+ + "want %v", err, wantErr) + return + } + + // Ensure that attempting to open a database with an invalid type for + // the first parameter returns the expected error. + wantErr = fmt.Errorf("first argument to %s.Open is invalid -- "+ + "expected database path string", dbType) + _, err = database.Open(dbType, 1, blockDataNet) + if err.Error() != wantErr.Error() { + t.Errorf("Open: did not receive expected error - got %v, "+ + "want %v", err, wantErr) + return + } + + // Ensure that attempting to open a database with an invalid type for + // the second parameter returns the expected error. + wantErr = fmt.Errorf("second argument to %s.Open is invalid -- "+ + "expected block network", dbType) + _, err = database.Open(dbType, "noexist", "invalid") + if err.Error() != wantErr.Error() { + t.Errorf("Open: did not receive expected error - got %v, "+ + "want %v", err, wantErr) + return + } + + // Ensure that attempting to create a database with the wrong number of + // parameters returns the expected error. + wantErr = fmt.Errorf("invalid arguments to %s.Create -- expected "+ + "database path and block network", dbType) + _, err = database.Create(dbType, 1, 2, 3) + if err.Error() != wantErr.Error() { + t.Errorf("Create: did not receive expected error - got %v, "+ + "want %v", err, wantErr) + return + } + + // Ensure that attempting to create a database with an invalid type for + // the first parameter returns the expected error. + wantErr = fmt.Errorf("first argument to %s.Create is invalid -- "+ + "expected database path string", dbType) + _, err = database.Create(dbType, 1, blockDataNet) + if err.Error() != wantErr.Error() { + t.Errorf("Create: did not receive expected error - got %v, "+ + "want %v", err, wantErr) + return + } + + // Ensure that attempting to create a database with an invalid type for + // the second parameter returns the expected error. + wantErr = fmt.Errorf("second argument to %s.Create is invalid -- "+ + "expected block network", dbType) + _, err = database.Create(dbType, "noexist", "invalid") + if err.Error() != wantErr.Error() { + t.Errorf("Create: did not receive expected error - got %v, "+ + "want %v", err, wantErr) + return + } + + // Ensure operations against a closed database return the expected + // error. + dbPath := filepath.Join(os.TempDir(), "ffboltdb-createfail") + _ = os.RemoveAll(dbPath) + db, err := database.Create(dbType, dbPath, blockDataNet) + if err != nil { + t.Errorf("Create: unexpected error: %v", err) + return + } + defer os.RemoveAll(dbPath) + db.Close() + + wantErrCode = database.ErrDbNotOpen + err = db.View(func(tx database.Tx) error { + return nil + }) + if !checkDbError(t, "View", err, wantErrCode) { + return + } + + wantErrCode = database.ErrDbNotOpen + err = db.Update(func(tx database.Tx) error { + return nil + }) + if !checkDbError(t, "Update", err, wantErrCode) { + return + } + + wantErrCode = database.ErrDbNotOpen + _, err = db.Begin(false) + if !checkDbError(t, "Begin(false)", err, wantErrCode) { + return + } + + wantErrCode = database.ErrDbNotOpen + _, err = db.Begin(true) + if !checkDbError(t, "Begin(true)", err, wantErrCode) { + return + } + + wantErrCode = database.ErrDbNotOpen + err = db.Close() + if !checkDbError(t, "Close", err, wantErrCode) { + return + } +} + +// TestPersistence ensures that values stored are still valid after closing and +// reopening the database. +func TestPersistence(t *testing.T) { + t.Parallel() + + // Create a new database to run tests against. + dbPath := filepath.Join(os.TempDir(), "ffboltdb-persistencetest") + _ = os.RemoveAll(dbPath) + db, err := database.Create(dbType, dbPath, blockDataNet) + if err != nil { + t.Errorf("Failed to create test database (%s) %v", dbType, err) + return + } + defer os.RemoveAll(dbPath) + defer db.Close() + + // Create a bucket, put some values into it, and store a block so they + // can be tested for existence on re-open. + bucket1Key := []byte("bucket1") + storeValues := map[string]string{ + "b1key1": "foo1", + "b1key2": "foo2", + "b1key3": "foo3", + } + genesisBlock := btcutil.NewBlock(chaincfg.MainNetParams.GenesisBlock) + genesisHash := chaincfg.MainNetParams.GenesisHash + err = db.Update(func(tx database.Tx) error { + metadataBucket := tx.Metadata() + if metadataBucket == nil { + return fmt.Errorf("Metadata: unexpected nil bucket") + } + + bucket1, err := metadataBucket.CreateBucket(bucket1Key) + if err != nil { + return fmt.Errorf("CreateBucket: unexpected error: %v", + err) + } + + for k, v := range storeValues { + err := bucket1.Put([]byte(k), []byte(v)) + if err != nil { + return fmt.Errorf("Put: unexpected error: %v", + err) + } + } + + if err := tx.StoreBlock(genesisBlock); err != nil { + return fmt.Errorf("StoreBlock: unexpected error: %v", + err) + } + + return nil + }) + if err != nil { + t.Errorf("Update: unexpected error: %v", err) + return + } + + // Close and reopen the database to ensure the values persist. + db.Close() + db, err = database.Open(dbType, dbPath, blockDataNet) + if err != nil { + t.Errorf("Failed to open test database (%s) %v", dbType, err) + return + } + defer db.Close() + + // Ensure the values previously stored in the 3rd namespace still exist + // and are correct. + err = db.View(func(tx database.Tx) error { + metadataBucket := tx.Metadata() + if metadataBucket == nil { + return fmt.Errorf("Metadata: unexpected nil bucket") + } + + bucket1 := metadataBucket.Bucket(bucket1Key) + if bucket1 == nil { + return fmt.Errorf("Bucket1: unexpected nil bucket") + } + + for k, v := range storeValues { + gotVal := bucket1.Get([]byte(k)) + if !reflect.DeepEqual(gotVal, []byte(v)) { + return fmt.Errorf("Get: key '%s' does not "+ + "match expected value - got %s, want %s", + k, gotVal, v) + } + } + + genesisBlockBytes, _ := genesisBlock.Bytes() + gotBytes, err := tx.FetchBlock(genesisHash) + if err != nil { + return fmt.Errorf("FetchBlock: unexpected error: %v", + err) + } + if !reflect.DeepEqual(gotBytes, genesisBlockBytes) { + return fmt.Errorf("FetchBlock: stored block mismatch") + } + + return nil + }) + if err != nil { + t.Errorf("View: unexpected error: %v", err) + return + } +} + +// TestInterface performs all interfaces tests for this database driver. +func TestInterface(t *testing.T) { + t.Parallel() + + // Create a new database to run tests against. + dbPath := filepath.Join(os.TempDir(), "ffboltdb-interfacetest") + _ = os.RemoveAll(dbPath) + db, err := database.Create(dbType, dbPath, blockDataNet) + if err != nil { + t.Errorf("Failed to create test database (%s) %v", dbType, err) + return + } + defer os.RemoveAll(dbPath) + defer db.Close() + + // Ensure the driver type is the expected value. + gotDbType := db.Type() + if gotDbType != dbType { + t.Errorf("Type: unepxected driver type - got %v, want %v", + gotDbType, dbType) + return + } + + // Run all of the interface tests against the database. + runtime.GOMAXPROCS(runtime.NumCPU()) + + // Change the maximum file size to a small value to force multiple flat + // files with the test data set. + ffboltdb.TstRunWithMaxBlockFileSize(db, 2048, func() { + testInterface(t, db) + }) +} diff --git a/database2/ffboltdb/export_test.go b/database2/ffboltdb/export_test.go new file mode 100644 index 0000000000..c688c68cfb --- /dev/null +++ b/database2/ffboltdb/export_test.go @@ -0,0 +1,26 @@ +// Copyright (c) 2015 Conformal Systems LLC. +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +/* +This test file is part of the ffboltdb package rather than than the +ffboltdb_test package so it can bridge access to the internals to properly test +cases which are either not possible or can't reliably be tested via the public +interface. The functions are only exported while the tests are being run. +*/ + +package ffboltdb + +import "github.com/btcsuite/btcd/database2" + +// TstRunWithMaxBlockFileSize runs the passed function with the maximum allowed +// file size for the database set to the provided value. The value will be set +// back to the original value upon completion. +func TstRunWithMaxBlockFileSize(idb database.DB, size uint32, fn func()) { + ffboltdb := idb.(*db) + origSize := ffboltdb.store.maxBlockFileSize + + ffboltdb.store.maxBlockFileSize = size + fn() + ffboltdb.store.maxBlockFileSize = origSize +} diff --git a/database2/ffboltdb/interface_test.go b/database2/ffboltdb/interface_test.go new file mode 100644 index 0000000000..e42ffd6eb4 --- /dev/null +++ b/database2/ffboltdb/interface_test.go @@ -0,0 +1,2283 @@ +// Copyright (c) 2015 Conformal Systems LLC. +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +// This file intended to be copied into each backend driver directory. Each +// driver should have their own driver_test.go file which creates a database and +// invokes the testInterface function in this file to ensure the driver properly +// implements the interface. +// +// NOTE: When copying this file into the backend driver folder, the package name +// will need to be changed accordingly. + +package ffboltdb_test + +import ( + "bytes" + "compress/bzip2" + "encoding/binary" + "fmt" + "io" + "os" + "path/filepath" + "reflect" + "sync/atomic" + "testing" + "time" + + "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcd/database2" + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcutil" +) + +var ( + // blockDataNet is the expected network in the test block data. + blockDataNet = wire.MainNet + + // blockDataFile is the path to a file containing the first 256 blocks + // of the block chain. + blockDataFile = filepath.Join("..", "testdata", "blocks1-256.bz2") + + // errSubTestFail is used to signal that a sub test returned false. + errSubTestFail = fmt.Errorf("sub test failure") +) + +// loadBlocks loads the blocks contained in the testdata directory and returns +// a slice of them. +func loadBlocks(t *testing.T, dataFile string, network wire.BitcoinNet) ([]*btcutil.Block, error) { + // Open the file that contains the blocks for reading. + fi, err := os.Open(dataFile) + if err != nil { + t.Errorf("failed to open file %v, err %v", dataFile, err) + return nil, err + } + defer func() { + if err := fi.Close(); err != nil { + t.Errorf("failed to close file %v %v", dataFile, + err) + } + }() + dr := bzip2.NewReader(fi) + + // Set the first block as the genesis block. + blocks := make([]*btcutil.Block, 0, 256) + genesis := btcutil.NewBlock(chaincfg.MainNetParams.GenesisBlock) + blocks = append(blocks, genesis) + + // Load the remaining blocks. + for height := 1; ; height++ { + var net uint32 + err := binary.Read(dr, binary.LittleEndian, &net) + if err == io.EOF { + // Hit end of file at the expected offset. No error. + break + } + if err != nil { + t.Errorf("Failed to load network type for block %d: %v", + height, err) + return nil, err + } + if net != uint32(network) { + t.Errorf("Block doesn't match network: %v expects %v", + net, network) + return nil, err + } + + var blockLen uint32 + err = binary.Read(dr, binary.LittleEndian, &blockLen) + if err != nil { + t.Errorf("Failed to load block size for block %d: %v", + height, err) + return nil, err + } + + // Read the block. + blockBytes := make([]byte, blockLen) + _, err = io.ReadFull(dr, blockBytes) + if err != nil { + t.Errorf("Failed to load block %d: %v", height, err) + return nil, err + } + + // Deserialize and store the block. + block, err := btcutil.NewBlockFromBytes(blockBytes) + if err != nil { + t.Errorf("Failed to parse block %v: %v", height, err) + return nil, err + } + blocks = append(blocks, block) + } + + return blocks, nil +} + +// checkDbError ensures the passed error is a database.Error with an error code +// that matches the passed error code. +func checkDbError(t *testing.T, testName string, gotErr error, wantErrCode database.ErrorCode) bool { + dbErr, ok := gotErr.(database.Error) + if !ok { + t.Errorf("%s: unexpected error type - got %T, want %T", + testName, gotErr, database.Error{}) + return false + } + if dbErr.ErrorCode != wantErrCode { + t.Errorf("%s: unexpected error code - got %s (%s), want %s", + testName, dbErr.ErrorCode, dbErr.Description, + wantErrCode) + return false + } + + return true +} + +// testContext is used to store context information about a running test which +// is passed into helper functions. +type testContext struct { + t *testing.T + db database.DB + bucketDepth int + isWritable bool + blocks []*btcutil.Block +} + +// keyPair houses a key/value pair. It is used over maps so ordering can be +// maintained. +type keyPair struct { + key string + value string +} + +// lookupKey is a convenience method to lookup the requested key from the +// provided keypair slice along with whether or not the key was found. +func lookupKey(key string, values []keyPair) (string, bool) { + for _, item := range values { + if item.key == key { + return item.value, true + } + } + + return "", false +} + +// rollbackValues returns a copy of the provided keypairs with all values set to +// an empty string. This is used to test that values are properly rolled back. +func rollbackValues(values []keyPair) []keyPair { + ret := make([]keyPair, len(values)) + copy(ret, values) + for i := range ret { + ret[i].value = "" + } + return ret +} + +// testCursorKeyPair checks that the provide key and value match the expected +// keypair at the provided index. It also ensures the index is in range for the +// provided slice of expected keypairs. +func testCursorKeyPair(tc *testContext, k, v []byte, index int, values []keyPair) bool { + if index >= len(values) || index < 0 { + tc.t.Errorf("Cursor: exceeded the expected range of values - "+ + "index %d, num values %d", index, len(values)) + return false + } + + pair := &values[index] + kString := string(k) + if kString != pair.key { + tc.t.Errorf("Mismatched cursor key: index %d does not match "+ + "the expected key - got %q, want %q", index, kString, + pair.key) + return false + } + vString := string(v) + if vString != pair.value { + tc.t.Errorf("Mismatched cursor value: index %d does not match "+ + "the expected value - got %q, want %q", index, + vString, pair.value) + return false + } + + return true +} + +// testGetValues checks that all of the provided key/value pairs can be +// retrieved from the database and the retrieved values match the provided +// values. +func testGetValues(tc *testContext, bucket database.Bucket, values []keyPair) bool { + for _, item := range values { + var vBytes []byte + if item.value != "" { + vBytes = []byte(item.value) + } + + gotValue := bucket.Get([]byte(item.key)) + if !reflect.DeepEqual(gotValue, vBytes) { + tc.t.Errorf("Get: unexpected value - got %s, want %s", + gotValue, vBytes) + return false + } + } + + return true +} + +// testPutValues stores all of the provided key/value pairs in the provided +// bucket while checking for errors. +func testPutValues(tc *testContext, bucket database.Bucket, values []keyPair) bool { + for _, item := range values { + var vBytes []byte + if item.value != "" { + vBytes = []byte(item.value) + } + if err := bucket.Put([]byte(item.key), vBytes); err != nil { + tc.t.Errorf("Put: unexpected error: %v", err) + return false + } + } + + return true +} + +// testDeleteValues removes all of the provided key/value pairs from the +// provided bucket. +func testDeleteValues(tc *testContext, bucket database.Bucket, values []keyPair) bool { + for _, item := range values { + if err := bucket.Delete([]byte(item.key)); err != nil { + tc.t.Errorf("Delete: unexpected error: %v", err) + return false + } + } + + return true +} + +// testCursorInterface ensures the cursor itnerface is working properly by +// exercising all of its functions on the passed bucket. +func testCursorInterface(tc *testContext, bucket database.Bucket) bool { + // Ensure a cursor can be obtained for the bucket. + cursor := bucket.Cursor() + if cursor == nil { + tc.t.Error("Bucket.Cursor: unexpected nil cursor returned") + return false + } + + // Ensure the cursor returns the same bucket it was created for. + if cursor.Bucket() != bucket { + tc.t.Error("Cursor.Bucket: does not match the bucket it was " + + "created for") + return false + } + + if tc.isWritable { + unsortedValues := []keyPair{ + {"cursor", "val1"}, + {"abcd", "val1"}, + {"bcd", "val1"}, + } + sortedValues := []keyPair{ + {"abcd", "val1"}, + {"bcd", "val1"}, + {"cursor", "val1"}, + } + + // Store the values to be used in the cursor tests in unsorted + // order and ensure they were actually stored. + if !testPutValues(tc, bucket, unsortedValues) { + return false + } + if !testGetValues(tc, bucket, unsortedValues) { + return false + } + + // Ensure the cursor returns all items in byte-sorted order when + // iterating forward. + curIdx := 0 + for k, v := cursor.First(); k != nil; k, v = cursor.Next() { + if !testCursorKeyPair(tc, k, v, curIdx, sortedValues) { + return false + } + curIdx++ + } + if curIdx != len(unsortedValues) { + tc.t.Errorf("Cursor: expected to iterate %d values, "+ + "but only iterated %d", len(unsortedValues), + curIdx) + return false + } + + // Ensure the cursor returns all items in reverse byte-sorted + // order when iterating in reverse. + curIdx = len(sortedValues) - 1 + for k, v := cursor.Last(); k != nil; k, v = cursor.Prev() { + if !testCursorKeyPair(tc, k, v, curIdx, sortedValues) { + return false + } + curIdx-- + } + if curIdx > -1 { + tc.t.Errorf("Reverse cursor: expected to iterate %d "+ + "values, but only iterated %d", + len(sortedValues), len(sortedValues)-(curIdx+1)) + return false + } + + // Ensure foward iteration works as expected after seeking. + middleIdx := (len(sortedValues) - 1) / 2 + seekKey := []byte(sortedValues[middleIdx].key) + curIdx = middleIdx + for k, v := cursor.Seek(seekKey); k != nil; k, v = cursor.Next() { + if !testCursorKeyPair(tc, k, v, curIdx, sortedValues) { + return false + } + curIdx++ + } + if curIdx != len(sortedValues) { + tc.t.Errorf("Cursor after seek: expected to iterate "+ + "%d values, but only iterated %d", + len(sortedValues)-middleIdx, curIdx-middleIdx) + return false + } + + // Ensure reverse iteration works as expected after seeking. + curIdx = middleIdx + for k, v := cursor.Seek(seekKey); k != nil; k, v = cursor.Prev() { + if !testCursorKeyPair(tc, k, v, curIdx, sortedValues) { + return false + } + curIdx-- + } + if curIdx > -1 { + tc.t.Errorf("Reverse cursor after seek: expected to "+ + "iterate %d values, but only iterated %d", + len(sortedValues)-middleIdx, middleIdx-curIdx) + return false + } + + // Ensure the cursor deletes items properly. + k, _ := cursor.First() + if err := cursor.Delete(); err != nil { + tc.t.Errorf("Cursor.Delete: unexpected error: %v", err) + return false + } + if val := bucket.Get(k); val != nil { + tc.t.Errorf("Cursor.Delete: value for key %q was not "+ + "deleted", k) + return false + } + } + + return true +} + +// testNestedBucket reruns the testBucketInterface against a nested bucket along +// with a counter to only test a couple of level deep. +func testNestedBucket(tc *testContext, testBucket database.Bucket) bool { + // Don't go more than 2 nested levels deep. + if tc.bucketDepth > 1 { + return true + } + + tc.bucketDepth++ + defer func() { + tc.bucketDepth-- + }() + if !testBucketInterface(tc, testBucket) { + return false + } + + return true +} + +// testBucketInterface ensures the bucket interface is working properly by +// exercising all of its functions. This includes the cursor interface for the +// cursor returned from the bucket. +func testBucketInterface(tc *testContext, bucket database.Bucket) bool { + if bucket.Writable() != tc.isWritable { + tc.t.Errorf("Bucket writable state does not match.") + return false + } + + if tc.isWritable { + // keyValues holds the keys and values to use when putting + // values into the bucket. + var keyValues = []keyPair{ + {"bucketkey1", "foo1"}, + {"bucketkey2", "foo2"}, + {"bucketkey3", "foo3"}, + } + if !testPutValues(tc, bucket, keyValues) { + return false + } + + if !testGetValues(tc, bucket, keyValues) { + return false + } + + // Ensure errors returned from the user-supplied ForEach + // function are returned. + forEachError := fmt.Errorf("example foreach error") + err := bucket.ForEach(func(k, v []byte) error { + return forEachError + }) + if err != forEachError { + tc.t.Errorf("ForEach: inner function error not "+ + "returned - got %v, want %v", err, forEachError) + return false + } + + // Iterate all of the keys using ForEach while making sure the + // stored values are the expected values. + keysFound := make(map[string]struct{}, len(keyValues)) + err = bucket.ForEach(func(k, v []byte) error { + kString := string(k) + wantV, found := lookupKey(kString, keyValues) + if !found { + return fmt.Errorf("ForEach: key '%s' should "+ + "exist", kString) + } + + if !reflect.DeepEqual(v, []byte(wantV)) { + return fmt.Errorf("ForEach: value for key '%s' "+ + "does not match - got %s, want %s", + kString, v, wantV) + } + + keysFound[kString] = struct{}{} + return nil + }) + if err != nil { + tc.t.Errorf("%v", err) + return false + } + + // Ensure all keys were iterated. + for _, item := range keyValues { + if _, ok := keysFound[item.key]; !ok { + tc.t.Errorf("ForEach: key '%s' was not iterated "+ + "when it should have been", item.key) + return false + } + } + + // Delete the keys and ensure they were deleted. + if !testDeleteValues(tc, bucket, keyValues) { + return false + } + if !testGetValues(tc, bucket, rollbackValues(keyValues)) { + return false + } + + // Ensure creating a new bucket works as expected. + testBucketName := []byte("testbucket") + testBucket, err := bucket.CreateBucket(testBucketName) + if err != nil { + tc.t.Errorf("CreateBucket: unexpected error: %v", err) + return false + } + if !testNestedBucket(tc, testBucket) { + return false + } + + // Ensure creating a bucket that already exists fails with the + // expected error. + wantErrCode := database.ErrBucketExists + _, err = bucket.CreateBucket(testBucketName) + if !checkDbError(tc.t, "CreateBucket", err, wantErrCode) { + return false + } + + // Ensure CreateBucketIfNotExists returns an existing bucket. + testBucket, err = bucket.CreateBucketIfNotExists(testBucketName) + if err != nil { + tc.t.Errorf("CreateBucketIfNotExists: unexpected "+ + "error: %v", err) + return false + } + if !testNestedBucket(tc, testBucket) { + return false + } + + // Ensure retrieving an existing bucket works as expected. + testBucket = bucket.Bucket(testBucketName) + if !testNestedBucket(tc, testBucket) { + return false + } + + // Ensure deleting a bucket works as intended. + if err := bucket.DeleteBucket(testBucketName); err != nil { + tc.t.Errorf("DeleteBucket: unexpected error: %v", err) + return false + } + if b := bucket.Bucket(testBucketName); b != nil { + tc.t.Errorf("DeleteBucket: bucket '%s' still exists", + testBucketName) + return false + } + + // Ensure deleting a bucket that doesn't exist returns the + // expected error. + wantErrCode = database.ErrBucketNotFound + err = bucket.DeleteBucket(testBucketName) + if !checkDbError(tc.t, "DeleteBucket", err, wantErrCode) { + return false + } + + // Ensure CreateBucketIfNotExists creates a new bucket when + // it doesn't already exist. + testBucket, err = bucket.CreateBucketIfNotExists(testBucketName) + if err != nil { + tc.t.Errorf("CreateBucketIfNotExists: unexpected "+ + "error: %v", err) + return false + } + if !testNestedBucket(tc, testBucket) { + return false + } + + // Ensure the cursor interface works as expected. + if !testCursorInterface(tc, testBucket) { + return false + } + + // Delete the test bucket to avoid leaving it around for future + // calls. + if err := bucket.DeleteBucket(testBucketName); err != nil { + tc.t.Errorf("DeleteBucket: unexpected error: %v", err) + return false + } + if b := bucket.Bucket(testBucketName); b != nil { + tc.t.Errorf("DeleteBucket: bucket '%s' still exists", + testBucketName) + return false + } + } else { + // Put should fail with bucket that is not writable. + testName := "unwritable tx put" + wantErrCode := database.ErrTxNotWritable + failBytes := []byte("fail") + err := bucket.Put(failBytes, failBytes) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Delete should fail with bucket that is not writable. + testName = "unwritable tx delete" + err = bucket.Delete(failBytes) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // CreateBucket should fail with bucket that is not writable. + testName = "unwritable tx create bucket" + _, err = bucket.CreateBucket(failBytes) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // CreateBucketIfNotExists should fail with bucket that is not + // writable. + testName = "unwritable tx create bucket if not exists" + _, err = bucket.CreateBucketIfNotExists(failBytes) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // DeleteBucket should fail with bucket that is not writable. + testName = "unwritable tx delete bucket" + err = bucket.DeleteBucket(failBytes) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure the cursor interface works as expected with read-only + // buckets. + if !testCursorInterface(tc, bucket) { + return false + } + } + + return true +} + +// rollbackOnPanic rolls the passed transaction back if the code in the calling +// function panics. This is useful in case the tests unexpectedly panic which +// would leave any manually created transactions with the database mutex locked +// thereby leading to a deadlock and masking the real reason for the panic. It +// also logs a test error and repanics so the original panic can be traced. +func rollbackOnPanic(t *testing.T, tx database.Tx) { + if err := recover(); err != nil { + t.Errorf("Unexpected panic: %v", err) + _ = tx.Rollback() + panic(err) + } +} + +// testMetadataManualTxInterface ensures that the manual transactions metadata +// interface works as expected. +func testMetadataManualTxInterface(tc *testContext) bool { + // populateValues tests that populating values works as expected. + // + // When the writable flag is false, a read-only tranasction is created, + // standard bucket tests for read-only transactions are performed, and + // the Commit function is checked to ensure it fails as expected. + // + // Otherwise, a read-write transaction is created, the values are + // written, standard bucket tests for read-write transactions are + // performed, and then the transaction is either commited or rolled + // back depending on the flag. + bucket1Name := []byte("bucket1") + populateValues := func(writable, rollback bool, putValues []keyPair) bool { + tx, err := tc.db.Begin(writable) + if err != nil { + tc.t.Errorf("Begin: unexpected error %v", err) + return false + } + defer rollbackOnPanic(tc.t, tx) + + metadataBucket := tx.Metadata() + if metadataBucket == nil { + tc.t.Errorf("Metadata: unexpected nil bucket") + _ = tx.Rollback() + return false + } + + bucket1 := metadataBucket.Bucket(bucket1Name) + if bucket1 == nil { + tc.t.Errorf("Bucket1: unexpected nil bucket") + return false + } + + tc.isWritable = writable + if !testBucketInterface(tc, bucket1) { + _ = tx.Rollback() + return false + } + + if !writable { + // The transaction is not writable, so it should fail + // the commit. + testName := "unwritable tx commit" + wantErrCode := database.ErrTxNotWritable + err := tx.Commit() + if !checkDbError(tc.t, testName, err, wantErrCode) { + _ = tx.Rollback() + return false + } + } else { + if !testPutValues(tc, bucket1, putValues) { + return false + } + + if rollback { + // Rollback the transaction. + if err := tx.Rollback(); err != nil { + tc.t.Errorf("Rollback: unexpected "+ + "error %v", err) + return false + } + } else { + // The commit should succeed. + if err := tx.Commit(); err != nil { + tc.t.Errorf("Commit: unexpected error "+ + "%v", err) + return false + } + } + } + + return true + } + + // checkValues starts a read-only transaction and checks that all of + // the key/value pairs specified in the expectedValues parameter match + // what's in the database. + checkValues := func(expectedValues []keyPair) bool { + tx, err := tc.db.Begin(false) + if err != nil { + tc.t.Errorf("Begin: unexpected error %v", err) + return false + } + defer rollbackOnPanic(tc.t, tx) + + metadataBucket := tx.Metadata() + if metadataBucket == nil { + tc.t.Errorf("Metadata: unexpected nil bucket") + _ = tx.Rollback() + return false + } + + bucket1 := metadataBucket.Bucket(bucket1Name) + if bucket1 == nil { + tc.t.Errorf("Bucket1: unexpected nil bucket") + return false + } + + if !testGetValues(tc, bucket1, expectedValues) { + _ = tx.Rollback() + return false + } + + // Rollback the read-only transaction. + if err := tx.Rollback(); err != nil { + tc.t.Errorf("Commit: unexpected error %v", err) + return false + } + + return true + } + + // deleteValues starts a read-write transaction and deletes the keys + // in the passed key/value pairs. + deleteValues := func(values []keyPair) bool { + tx, err := tc.db.Begin(true) + if err != nil { + + } + defer rollbackOnPanic(tc.t, tx) + + metadataBucket := tx.Metadata() + if metadataBucket == nil { + tc.t.Errorf("Metadata: unexpected nil bucket") + _ = tx.Rollback() + return false + } + + bucket1 := metadataBucket.Bucket(bucket1Name) + if bucket1 == nil { + tc.t.Errorf("Bucket1: unexpected nil bucket") + return false + } + + // Delete the keys and ensure they were deleted. + if !testDeleteValues(tc, bucket1, values) { + _ = tx.Rollback() + return false + } + if !testGetValues(tc, bucket1, rollbackValues(values)) { + _ = tx.Rollback() + return false + } + + // Commit the changes and ensure it was successful. + if err := tx.Commit(); err != nil { + tc.t.Errorf("Commit: unexpected error %v", err) + return false + } + + return true + } + + // keyValues holds the keys and values to use when putting values into a + // bucket. + var keyValues = []keyPair{ + {"umtxkey1", "foo1"}, + {"umtxkey2", "foo2"}, + {"umtxkey3", "foo3"}, + } + + // Ensure that attempting populating the values using a read-only + // transaction fails as expected. + if !populateValues(false, true, keyValues) { + return false + } + if !checkValues(rollbackValues(keyValues)) { + return false + } + + // Ensure that attempting populating the values using a read-write + // transaction and then rolling it back yields the expected values. + if !populateValues(true, true, keyValues) { + return false + } + if !checkValues(rollbackValues(keyValues)) { + return false + } + + // Ensure that attempting populating the values using a read-write + // transaction and then committing it stores the expected values. + if !populateValues(true, false, keyValues) { + return false + } + if !checkValues(keyValues) { + return false + } + + // Clean up the keys. + if !deleteValues(keyValues) { + return false + } + + return true +} + +// testManagedTxPanics ensures calling Rollback of Commit inside a managed +// transaction panics. +func testManagedTxPanics(tc *testContext) bool { + testPanic := func(fn func()) (paniced bool) { + // Setup a defer to catch the expected panic and update the + // return variable. + defer func() { + if err := recover(); err != nil { + paniced = true + } + }() + + fn() + return false + } + + // Ensure calling Commit on a managed read-only transaction panics. + paniced := testPanic(func() { + tc.db.View(func(tx database.Tx) error { + tx.Commit() + return nil + }) + }) + if !paniced { + tc.t.Error("Commit called inside View did not panic") + return false + } + + // Ensure calling Rollback on a managed read-only transaction panics. + paniced = testPanic(func() { + tc.db.View(func(tx database.Tx) error { + tx.Rollback() + return nil + }) + }) + if !paniced { + tc.t.Error("Rollback called inside View did not panic") + return false + } + + // Ensure calling Commit on a managed read-write transaction panics. + paniced = testPanic(func() { + tc.db.Update(func(tx database.Tx) error { + tx.Commit() + return nil + }) + }) + if !paniced { + tc.t.Error("Commit called inside Update did not panic") + return false + } + + // Ensure calling Rollback on a managed read-write transaction panics. + paniced = testPanic(func() { + tc.db.Update(func(tx database.Tx) error { + tx.Rollback() + return nil + }) + }) + if !paniced { + tc.t.Error("Rollback called inside Update did not panic") + return false + } + + return true +} + +// testMetadataTxInterface tests all facets of the managed read/write and +// manual transaction metadata interfaces as well as the bucket interfaces under +// them. +func testMetadataTxInterface(tc *testContext) bool { + if !testManagedTxPanics(tc) { + return false + } + + bucket1Name := []byte("bucket1") + err := tc.db.Update(func(tx database.Tx) error { + _, err := tx.Metadata().CreateBucket(bucket1Name) + return err + }) + if err != nil { + tc.t.Errorf("Update: unexpected error creating bucket: %v", err) + return false + } + + if !testMetadataManualTxInterface(tc) { + return false + } + + // keyValues holds the keys and values to use when putting values + // into a bucket. + var keyValues = []keyPair{ + {"mtxkey1", "foo1"}, + {"mtxkey2", "foo2"}, + {"mtxkey3", "foo3"}, + } + + // Test the bucket interface via a managed read-only transaction. + err = tc.db.View(func(tx database.Tx) error { + metadataBucket := tx.Metadata() + if metadataBucket == nil { + return fmt.Errorf("Metadata: unexpected nil bucket") + } + + bucket1 := metadataBucket.Bucket(bucket1Name) + if bucket1 == nil { + return fmt.Errorf("Bucket1: unexpected nil bucket") + } + + tc.isWritable = false + if !testBucketInterface(tc, bucket1) { + return errSubTestFail + } + + return nil + }) + if err != nil { + if err != errSubTestFail { + tc.t.Errorf("%v", err) + } + return false + } + + // Ensure errors returned from the user-supplied View function are + // returned. + viewError := fmt.Errorf("example view error") + err = tc.db.View(func(tx database.Tx) error { + return viewError + }) + if err != viewError { + tc.t.Errorf("View: inner function error not returned - got "+ + "%v, want %v", err, viewError) + return false + } + + // Test the bucket interface via a managed read-write transaction. + // Also, put a series of values and force a rollback so the following + // code can ensure the values were not stored. + forceRollbackError := fmt.Errorf("force rollback") + err = tc.db.Update(func(tx database.Tx) error { + metadataBucket := tx.Metadata() + if metadataBucket == nil { + return fmt.Errorf("Metadata: unexpected nil bucket") + } + + bucket1 := metadataBucket.Bucket(bucket1Name) + if bucket1 == nil { + return fmt.Errorf("Bucket1: unexpected nil bucket") + } + + tc.isWritable = true + if !testBucketInterface(tc, bucket1) { + return errSubTestFail + } + + if !testPutValues(tc, bucket1, keyValues) { + return errSubTestFail + } + + // Return an error to force a rollback. + return forceRollbackError + }) + if err != forceRollbackError { + if err == errSubTestFail { + return false + } + + tc.t.Errorf("Update: inner function error not returned - got "+ + "%v, want %v", err, forceRollbackError) + return false + } + + // Ensure the values that should not have been stored due to the forced + // rollback above were not actually stored. + err = tc.db.View(func(tx database.Tx) error { + metadataBucket := tx.Metadata() + if metadataBucket == nil { + return fmt.Errorf("Metadata: unexpected nil bucket") + } + + if !testGetValues(tc, metadataBucket, rollbackValues(keyValues)) { + return errSubTestFail + } + + return nil + }) + if err != nil { + if err != errSubTestFail { + tc.t.Errorf("%v", err) + } + return false + } + + // Store a series of values via a managed read-write transaction. + err = tc.db.Update(func(tx database.Tx) error { + metadataBucket := tx.Metadata() + if metadataBucket == nil { + return fmt.Errorf("Metadata: unexpected nil bucket") + } + + bucket1 := metadataBucket.Bucket(bucket1Name) + if bucket1 == nil { + return fmt.Errorf("Bucket1: unexpected nil bucket") + } + + if !testPutValues(tc, bucket1, keyValues) { + return errSubTestFail + } + + return nil + }) + if err != nil { + if err != errSubTestFail { + tc.t.Errorf("%v", err) + } + return false + } + + // Ensure the values stored above were committed as expected. + err = tc.db.View(func(tx database.Tx) error { + metadataBucket := tx.Metadata() + if metadataBucket == nil { + return fmt.Errorf("Metadata: unexpected nil bucket") + } + + bucket1 := metadataBucket.Bucket(bucket1Name) + if bucket1 == nil { + return fmt.Errorf("Bucket1: unexpected nil bucket") + } + + if !testGetValues(tc, bucket1, keyValues) { + return errSubTestFail + } + + return nil + }) + if err != nil { + if err != errSubTestFail { + tc.t.Errorf("%v", err) + } + return false + } + + // Clean up the values stored above in a managed read-write transaction. + err = tc.db.Update(func(tx database.Tx) error { + metadataBucket := tx.Metadata() + if metadataBucket == nil { + return fmt.Errorf("Metadata: unexpected nil bucket") + } + + bucket1 := metadataBucket.Bucket(bucket1Name) + if bucket1 == nil { + return fmt.Errorf("Bucket1: unexpected nil bucket") + } + + if !testDeleteValues(tc, bucket1, keyValues) { + return errSubTestFail + } + + return nil + }) + if err != nil { + if err != errSubTestFail { + tc.t.Errorf("%v", err) + } + return false + } + + return true +} + +// testFetchBlockIOMissing ensures that all of the block retrieval API functions +// work as expected when requesting blocks that don't exist. +func testFetchBlockIOMissing(tc *testContext, tx database.Tx) bool { + wantErrCode := database.ErrBlockNotFound + + // --------------------- + // Non-bulk Block IO API + // --------------------- + + // Test the individual block APIs one block at a time to ensure they + // return the expected error. Also, build the data needed to test the + // bulk APIs below while looping. + allBlockHashes := make([]wire.ShaHash, len(tc.blocks)) + allBlockRegions := make([]database.BlockRegion, len(tc.blocks)) + for i, block := range tc.blocks { + blockHash, err := block.Sha() + if err != nil { + tc.t.Errorf("block.Sha #%d: unexpected error: %v", i, + err) + return false + } + allBlockHashes[i] = *blockHash + + txLocs, err := block.TxLoc() + if err != nil { + tc.t.Errorf("block.TxLoc(%d): unexpected error: %v", i, + err) + return false + } + + // Ensure FetchBlock returns expected error. + testName := fmt.Sprintf("FetchBlock #%d on missing block", i) + _, err = tx.FetchBlock(blockHash) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure FetchBlockHeader returns expected error. + testName = fmt.Sprintf("FetchBlockHeader #%d on missing block", + i) + _, err = tx.FetchBlockHeader(blockHash) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure the first transaction fetched as a block region from + // the database returns the expected error. + region := database.BlockRegion{ + Hash: blockHash, + Offset: uint32(txLocs[0].TxStart), + Len: uint32(txLocs[0].TxLen), + } + allBlockRegions[i] = region + _, err = tx.FetchBlockRegion(®ion) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure HasBlock returns false. + hasBlock, err := tx.HasBlock(blockHash) + if err != nil { + tc.t.Errorf("HasBlock #%d: unexpected err: %v", i, err) + return false + } + if hasBlock { + tc.t.Errorf("HasBlock #%d: should not have block", i) + return false + } + } + + // ----------------- + // Bulk Block IO API + // ----------------- + + // Ensure FetchBlocks returns expected error. + testName := "FetchBlocks on missing blocks" + _, err := tx.FetchBlocks(allBlockHashes) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure FetchBlockHeaders returns expected error. + testName = "FetchBlockHeaders on missing blocks" + _, err = tx.FetchBlockHeaders(allBlockHashes) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure FetchBlockRegions returns expected error. + testName = "FetchBlockRegions on missing blocks" + _, err = tx.FetchBlockRegions(allBlockRegions) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure HasBlocks returns false for all blocks. + hasBlocks, err := tx.HasBlocks(allBlockHashes) + if err != nil { + tc.t.Errorf("HasBlocks: unexpected err: %v", err) + } + for i, hasBlock := range hasBlocks { + if hasBlock { + tc.t.Errorf("HasBlocks #%d: should not have block", i) + return false + } + } + + return true +} + +// testFetchBlockIO ensures all of the block retrieval API functions work as +// expected for the provide set of blocks. The blocks must already be stored in +// the database, or at least stored into the the passed transaction. It also +// tests several error conditions such as ensuring the expected errors are +// returned when fetching blocks, headers, and regions that don't exist. +func testFetchBlockIO(tc *testContext, tx database.Tx) bool { + // --------------------- + // Non-bulk Block IO API + // --------------------- + + // Test the individual block APIs one block at a time. Also, build the + // data needed to test the bulk APIs below while looping. + allBlockHashes := make([]wire.ShaHash, len(tc.blocks)) + allBlockBytes := make([][]byte, len(tc.blocks)) + allBlockTxLocs := make([][]wire.TxLoc, len(tc.blocks)) + allBlockRegions := make([]database.BlockRegion, len(tc.blocks)) + for i, block := range tc.blocks { + blockHash, err := block.Sha() + if err != nil { + tc.t.Errorf("block.Sha(%d): unexpected error: %v", i, + err) + return false + } + allBlockHashes[i] = *blockHash + + blockBytes, err := block.Bytes() + if err != nil { + tc.t.Errorf("block.Bytes(%d): unexpected error: %v", i, + err) + return false + } + allBlockBytes[i] = blockBytes + + txLocs, err := block.TxLoc() + if err != nil { + tc.t.Errorf("block.TxLoc(%d): unexpected error: %v", i, + err) + return false + } + allBlockTxLocs[i] = txLocs + + // Ensure the block data fetched from the database matches the + // expected bytes. + gotBlockBytes, err := tx.FetchBlock(blockHash) + if err != nil { + tc.t.Errorf("FetchBlock(%s): unexpected error: %v", + blockHash, err) + return false + } + if !bytes.Equal(gotBlockBytes, blockBytes) { + tc.t.Errorf("FetchBlock(%s): bytes mismatch: got %x, "+ + "want %x", blockHash, gotBlockBytes, blockBytes) + return false + } + + // Ensure the block header fetched from the database matches the + // expected bytes. + wantHeaderBytes := blockBytes[0:wire.MaxBlockHeaderPayload] + gotHeaderBytes, err := tx.FetchBlockHeader(blockHash) + if err != nil { + tc.t.Errorf("FetchBlockHeader(%s): unexpected error: %v", + blockHash, err) + return false + } + if !bytes.Equal(gotHeaderBytes, wantHeaderBytes) { + tc.t.Errorf("FetchBlockHeader(%s): bytes mismatch: "+ + "got %x, want %x", blockHash, gotHeaderBytes, + wantHeaderBytes) + return false + } + + // Ensure the first transaction fetched as a block region from + // the database matches the expected bytes. + region := database.BlockRegion{ + Hash: blockHash, + Offset: uint32(txLocs[0].TxStart), + Len: uint32(txLocs[0].TxLen), + } + allBlockRegions[i] = region + endRegionOffset := region.Offset + region.Len + wantRegionBytes := blockBytes[region.Offset:endRegionOffset] + gotRegionBytes, err := tx.FetchBlockRegion(®ion) + if err != nil { + tc.t.Errorf("FetchBlockRegion(%s): unexpected error: %v", + blockHash, err) + return false + } + if !bytes.Equal(gotRegionBytes, wantRegionBytes) { + tc.t.Errorf("FetchBlockRegion(%s): bytes mismatch: "+ + "got %x, want %x", blockHash, gotRegionBytes, + wantRegionBytes) + return false + } + + // Ensure the block header fetched from the database matches the + // expected bytes. + hasBlock, err := tx.HasBlock(blockHash) + if err != nil { + tc.t.Errorf("HasBlock(%s): unexpected error: %v", + blockHash, err) + return false + } + if !hasBlock { + tc.t.Errorf("HasBlock(%s): database claims it doesn't "+ + "have the block when it should", blockHash) + return false + } + + // ----------------------- + // Invalid blocks/regions. + // ----------------------- + + // Ensure fetching a block that doesn't exist returns the + // expected error. + badBlockHash := &wire.ShaHash{} + testName := fmt.Sprintf("FetchBlock(%s) invalid block", + badBlockHash) + wantErrCode := database.ErrBlockNotFound + _, err = tx.FetchBlock(badBlockHash) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure fetching a block header that doesn't exist returns + // the expected error. + testName = fmt.Sprintf("FetchBlockHeader(%s) invalid block", + badBlockHash) + _, err = tx.FetchBlockHeader(badBlockHash) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure fetching a block region in a block that doesn't exist + // return the expected error. + testName = fmt.Sprintf("FetchBlockRegion(%s) invalid hash", + badBlockHash) + wantErrCode = database.ErrBlockNotFound + region.Hash = badBlockHash + region.Offset = ^uint32(0) + _, err = tx.FetchBlockRegion(®ion) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure fetching a block region that is out of bounds returns + // the expected error. + testName = fmt.Sprintf("FetchBlockRegion(%s) invalid region", + blockHash) + wantErrCode = database.ErrBlockRegionInvalid + region.Hash = blockHash + region.Offset = ^uint32(0) + _, err = tx.FetchBlockRegion(®ion) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + } + + // ----------------- + // Bulk Block IO API + // ----------------- + + // Ensure the bulk block data fetched from the database matches the + // expected bytes. + blockData, err := tx.FetchBlocks(allBlockHashes) + if err != nil { + tc.t.Errorf("FetchBlocks: unexpected error: %v", err) + return false + } + if len(blockData) != len(allBlockBytes) { + tc.t.Errorf("FetchBlocks: unexpected number of results - got "+ + "%d, want %d", len(blockData), len(allBlockBytes)) + return false + } + for i := 0; i < len(blockData); i++ { + blockHash := allBlockHashes[i] + wantBlockBytes := allBlockBytes[i] + gotBlockBytes := blockData[i] + if !bytes.Equal(gotBlockBytes, wantBlockBytes) { + tc.t.Errorf("FetchBlocks(%s): bytes mismatch: got %x, "+ + "want %x", blockHash, gotBlockBytes, + wantBlockBytes) + return false + } + } + + // Ensure the bulk block headers fetched from the database match the + // expected bytes. + blockHeaderData, err := tx.FetchBlockHeaders(allBlockHashes) + if err != nil { + tc.t.Errorf("FetchBlockHeaders: unexpected error: %v", err) + return false + } + if len(blockHeaderData) != len(allBlockBytes) { + tc.t.Errorf("FetchBlockHeaders: unexpected number of results "+ + "- got %d, want %d", len(blockHeaderData), + len(allBlockBytes)) + return false + } + for i := 0; i < len(blockHeaderData); i++ { + blockHash := allBlockHashes[i] + wantHeaderBytes := allBlockBytes[i][0:wire.MaxBlockHeaderPayload] + gotHeaderBytes := blockHeaderData[i] + if !bytes.Equal(gotHeaderBytes, wantHeaderBytes) { + tc.t.Errorf("FetchBlockHeaders(%s): bytes mismatch: "+ + "got %x, want %x", blockHash, gotHeaderBytes, + wantHeaderBytes) + return false + } + } + + // Ensure the first transaction of every block fetched in bulk block + // regions from the database matches the expected bytes. + allRegionBytes, err := tx.FetchBlockRegions(allBlockRegions) + if err != nil { + tc.t.Errorf("FetchBlockRegions: unexpected error: %v", err) + + } + if len(allRegionBytes) != len(allBlockRegions) { + tc.t.Errorf("FetchBlockRegions: unexpected number of results "+ + "- got %d, want %d", len(allRegionBytes), + len(allBlockRegions)) + return false + } + for i, gotRegionBytes := range allRegionBytes { + region := &allBlockRegions[i] + endRegionOffset := region.Offset + region.Len + wantRegionBytes := blockData[i][region.Offset:endRegionOffset] + if !bytes.Equal(gotRegionBytes, wantRegionBytes) { + tc.t.Errorf("FetchBlockRegions(%d): bytes mismatch: "+ + "got %x, want %x", i, gotRegionBytes, + wantRegionBytes) + return false + } + } + + // Ensure the bulk determination of whether a set of block hashes are in + // the database returns true for all loaded blocks. + hasBlocks, err := tx.HasBlocks(allBlockHashes) + if err != nil { + tc.t.Errorf("HasBlocks: unexpected error: %v", err) + } + for i, hasBlock := range hasBlocks { + if !hasBlock { + tc.t.Errorf("HasBlocks(%d): should have block", i) + return false + } + } + + // ----------------------- + // Invalid blocks/regions. + // ----------------------- + + // Ensure fetching blocks for which one doesn't exist returns the + // expected error. + testName := "FetchBlocks invalid hash" + badBlockHashes := make([]wire.ShaHash, len(allBlockHashes)+1) + copy(badBlockHashes, allBlockHashes) + badBlockHashes[len(badBlockHashes)-1] = wire.ShaHash{} + wantErrCode := database.ErrBlockNotFound + _, err = tx.FetchBlocks(badBlockHashes) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure fetching block headers for which one doesn't exist returns the + // expected error. + testName = "FetchBlockHeaders invalid hash" + _, err = tx.FetchBlockHeaders(badBlockHashes) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure fetching block regions for which one of blocks doesn't exist + // returns expected error. + testName = "FetchBlockRegions invalid hash" + badBlockRegions := make([]database.BlockRegion, len(allBlockRegions)+1) + copy(badBlockRegions, allBlockRegions) + badBlockRegions[len(badBlockRegions)-1].Hash = &wire.ShaHash{} + wantErrCode = database.ErrBlockNotFound + _, err = tx.FetchBlockRegions(badBlockRegions) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure fetching block regions that are out of bounds returns the + // expected error. + testName = "FetchBlockRegions invalid regions" + badBlockRegions = badBlockRegions[:len(badBlockRegions)-1] + for i := range badBlockRegions { + badBlockRegions[i].Offset = ^uint32(0) + } + wantErrCode = database.ErrBlockRegionInvalid + _, err = tx.FetchBlockRegions(badBlockRegions) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + return true +} + +// testBlockIOTxInterface ensures that the block IO interface works as expected +// for both managed read/write and manual transactions. This function leaves +// all of the stored blocks in the database. +func testBlockIOTxInterface(tc *testContext) bool { + // Ensure attempting to store a block with a read-only transaction fails + // with the expected error. + err := tc.db.View(func(tx database.Tx) error { + wantErrCode := database.ErrTxNotWritable + for i, block := range tc.blocks { + testName := fmt.Sprintf("StoreBlock(%d) on ro tx", i) + err := tx.StoreBlock(block) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return errSubTestFail + } + } + + return nil + }) + if err != nil { + if err != errSubTestFail { + tc.t.Errorf("%v", err) + } + return false + } + + // Populate the database with loaded blocks and ensure all of the data + // fetching APIs work properly on them within the transaction before a + // commit or rollback. Then, force a rollback so the code below can + // ensure none of the data actually gets stored. + forceRollbackError := fmt.Errorf("force rollback") + err = tc.db.Update(func(tx database.Tx) error { + // Store all blocks in the same transaction. + for i, block := range tc.blocks { + err := tx.StoreBlock(block) + if err != nil { + tc.t.Errorf("StoreBlock #%d: unexpected error: "+ + "%v", i, err) + return errSubTestFail + } + } + + // Ensure attempting to store the same block again, before the + // transaction has been committed, returns the expected error. + wantErrCode := database.ErrBlockExists + for i, block := range tc.blocks { + testName := fmt.Sprintf("duplicate block entry #%d "+ + "(before commit)", i) + err := tx.StoreBlock(block) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return errSubTestFail + } + } + + // Ensure that all data fetches from the stored blocks before + // the transaction has been committed work as expected. + if !testFetchBlockIO(tc, tx) { + return errSubTestFail + } + + return forceRollbackError + }) + if err != forceRollbackError { + if err == errSubTestFail { + return false + } + + tc.t.Errorf("Update: inner function error not returned - got "+ + "%v, want %v", err, forceRollbackError) + return false + } + + // Ensure rollback was successful + err = tc.db.View(func(tx database.Tx) error { + if !testFetchBlockIOMissing(tc, tx) { + return errSubTestFail + } + return nil + }) + if err != nil { + if err != errSubTestFail { + tc.t.Errorf("%v", err) + } + return false + } + + // Populate the database with loaded blocks and ensure all of the data + // fetching APIs work properly. + err = tc.db.Update(func(tx database.Tx) error { + // Store a bunch of blocks in the same transaction. + for i, block := range tc.blocks { + err := tx.StoreBlock(block) + if err != nil { + tc.t.Errorf("StoreBlock #%d: unexpected error: "+ + "%v", i, err) + return errSubTestFail + } + } + + // Ensure attempting to store the same block again while in the + // same transaction, but before it has been committed, returns + // the expected error. + for i, block := range tc.blocks { + testName := fmt.Sprintf("duplicate block entry #%d "+ + "(before commit)", i) + wantErrCode := database.ErrBlockExists + err := tx.StoreBlock(block) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return errSubTestFail + } + } + + // Ensure that all data fetches from the stored blocks before + // the transaction has been committed work as expected. + if !testFetchBlockIO(tc, tx) { + return errSubTestFail + } + + return nil + }) + if err != nil { + if err != errSubTestFail { + tc.t.Errorf("%v", err) + } + return false + } + + // Ensure all data fetch tests work as expected using a managed + // read-only transaction after the data was successfully committed + // above. + err = tc.db.View(func(tx database.Tx) error { + if !testFetchBlockIO(tc, tx) { + return errSubTestFail + } + + return nil + }) + if err != nil { + if err != errSubTestFail { + tc.t.Errorf("%v", err) + } + return false + } + + // Ensure all data fetch tests work as expected using a managed + // read-write transaction after the data was successfully committed + // above. + err = tc.db.Update(func(tx database.Tx) error { + if !testFetchBlockIO(tc, tx) { + return errSubTestFail + } + + // Ensure attempting to store existing blocks again returns the + // expected error. Note that this is different from the + // previous version since this is a new transaction after the + // blocks have been committed. + wantErrCode := database.ErrBlockExists + for i, block := range tc.blocks { + testName := fmt.Sprintf("duplicate block entry #%d "+ + "(before commit)", i) + err := tx.StoreBlock(block) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return errSubTestFail + } + } + + return nil + }) + if err != nil { + if err != errSubTestFail { + tc.t.Errorf("%v", err) + } + return false + } + + return true +} + +// testClosedTxInterface ensures that both the metadata and block IO API +// functions behave as expected when attempted against a closed transaction. +func testClosedTxInterface(tc *testContext, tx database.Tx) bool { + wantErrCode := database.ErrTxClosed + bucket := tx.Metadata() + cursor := tx.Metadata().Cursor() + bucketName := []byte("closedtxbucket") + keyName := []byte("closedtxkey") + + // ------------ + // Metadata API + // ------------ + + // Ensure that attempting to get an existing bucket returns nil when the + // transaction is closed. + if b := bucket.Bucket(bucketName); b != nil { + tc.t.Errorf("Bucket: did not return nil on closed tx") + return false + } + + // Ensure CreateBucket returns expected error. + testName := "CreateBucket on closed tx" + _, err := bucket.CreateBucket(bucketName) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure CreateBucketIfNotExists returns expected error. + testName = "CreateBucketIfNotExists on closed tx" + _, err = bucket.CreateBucketIfNotExists(bucketName) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure Delete returns expected error. + testName = "Delete on closed tx" + err = bucket.Delete(keyName) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure DeleteBucket returns expected error. + testName = "DeleteBucket on closed tx" + err = bucket.DeleteBucket(bucketName) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure ForEach returns expected error. + testName = "ForEach on closed tx" + err = bucket.ForEach(nil) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure Get returns expected error. + testName = "Get on closed tx" + if k := bucket.Get(keyName); k != nil { + tc.t.Errorf("Get: did not return nil on closed tx") + return false + } + + // Ensure Put returns expected error. + testName = "Put on closed tx" + err = bucket.Put(keyName, []byte("test")) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // ------------------- + // Metadata Cursor API + // ------------------- + + // Ensure attempting to get a bucket from a cursor on a closed tx gives + // back nil. + if b := cursor.Bucket(); b != nil { + tc.t.Error("Cursor.Bucket: returned non-nil on closed tx") + return false + } + + // Ensure Cursor.Delete returns expected error. + testName = "Cursor.Delete on closed tx" + err = cursor.Delete() + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure Cursor.First on a closed tx gives back nil key/value. + if k, v := cursor.First(); k != nil || v != nil { + tc.t.Error("Cursor.First: key and/or value are not nil on " + + "closed tx") + return false + } + + // Ensure Cursor.Last on a closed tx gives back nil key/value. + if k, v := cursor.Last(); k != nil || v != nil { + tc.t.Error("Cursor.Last: key and/or value are not nil on " + + "closed tx") + return false + } + + // Ensure Cursor.Next on a closed tx gives back nil key/value. + if k, v := cursor.Next(); k != nil || v != nil { + tc.t.Error("Cursor.Next: key and/or value are not nil on " + + "closed tx") + return false + } + + // Ensure Cursor.Prev on a closed tx gives back nil key/value. + if k, v := cursor.Prev(); k != nil || v != nil { + tc.t.Error("Cursor.Prev: key and/or value are not nil on " + + "closed tx") + return false + } + + // Ensure Cursor.Seek on a closed tx gives back nil key/value. + if k, v := cursor.Seek([]byte{}); k != nil || v != nil { + tc.t.Error("Cursor.Seek: key and/or value are not nil on " + + "closed tx") + return false + } + + // --------------------- + // Non-bulk Block IO API + // --------------------- + + // Test the individual block APIs one block at a time to ensure they + // return the expected error. Also, build the data needed to test the + // bulk APIs below while looping. + allBlockHashes := make([]wire.ShaHash, len(tc.blocks)) + allBlockRegions := make([]database.BlockRegion, len(tc.blocks)) + for i, block := range tc.blocks { + blockHash, err := block.Sha() + if err != nil { + tc.t.Errorf("block.Sha #%d: unexpected error: %v", i, + err) + return false + } + allBlockHashes[i] = *blockHash + + txLocs, err := block.TxLoc() + if err != nil { + tc.t.Errorf("block.TxLoc(%d): unexpected error: %v", i, + err) + return false + } + + // Ensure StoreBlock returns expected error. + testName = "StoreBlock on closed tx" + err = tx.StoreBlock(block) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure FetchBlock returns expected error. + testName = fmt.Sprintf("FetchBlock #%d on closed tx", i) + _, err = tx.FetchBlock(blockHash) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure FetchBlockHeader returns expected error. + testName = fmt.Sprintf("FetchBlockHeader #%d on closed tx", i) + _, err = tx.FetchBlockHeader(blockHash) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure the first transaction fetched as a block region from + // the database returns the expected error. + region := database.BlockRegion{ + Hash: blockHash, + Offset: uint32(txLocs[0].TxStart), + Len: uint32(txLocs[0].TxLen), + } + allBlockRegions[i] = region + _, err = tx.FetchBlockRegion(®ion) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure HasBlock returns expected error. + testName = fmt.Sprintf("HasBlock #%d on closed tx", i) + _, err = tx.HasBlock(blockHash) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + } + + // ----------------- + // Bulk Block IO API + // ----------------- + + // Ensure FetchBlocks returns expected error. + testName = "FetchBlocks on closed tx" + _, err = tx.FetchBlocks(allBlockHashes) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure FetchBlockHeaders returns expected error. + testName = "FetchBlockHeaders on closed tx" + _, err = tx.FetchBlockHeaders(allBlockHashes) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure FetchBlockRegions returns expected error. + testName = "FetchBlockRegions on closed tx" + _, err = tx.FetchBlockRegions(allBlockRegions) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure HasBlocks returns expected error. + testName = "HasBlocks on closed tx" + _, err = tx.HasBlocks(allBlockHashes) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // --------------- + // Commit/Rollback + // --------------- + + // Ensure that attempting to rollback or commit a transaction that is + // already closed returns the expected error. + err = tx.Rollback() + if !checkDbError(tc.t, "closed tx rollback", err, wantErrCode) { + return false + } + err = tx.Commit() + if !checkDbError(tc.t, "closed tx commit", err, wantErrCode) { + return false + } + + return true +} + +// testTxClosed ensures that both the metadata and block IO API functions behave +// as expected when attempted against both read-only and read-write +// transactions. +func testTxClosed(tc *testContext) bool { + bucketName := []byte("closedtxbucket") + keyName := []byte("closedtxkey") + + // Start a transaction, create a bucket and key used for testing, and + // immediately perform a commit on it so it is closed. + tx, err := tc.db.Begin(true) + if err != nil { + tc.t.Errorf("Begin(true): unexpected error: %v", err) + return false + } + defer rollbackOnPanic(tc.t, tx) + if _, err := tx.Metadata().CreateBucket(bucketName); err != nil { + tc.t.Errorf("CreateBucket: unexpected error: %v", err) + return false + } + if err := tx.Metadata().Put(keyName, []byte("test")); err != nil { + tc.t.Errorf("Put: unexpected error: %v", err) + return false + } + if err := tx.Commit(); err != nil { + tc.t.Errorf("Commit: unexpected error: %v", err) + return false + } + + // Ensure invoking all of the functions on the closed read-write + // transaction behave as expected. + if !testClosedTxInterface(tc, tx) { + return false + } + + // Repeat the tests with a rolled-back read-only transaction. + tx, err = tc.db.Begin(false) + if err != nil { + tc.t.Errorf("Begin(false): unexpected error: %v", err) + return false + } + defer rollbackOnPanic(tc.t, tx) + if err := tx.Rollback(); err != nil { + tc.t.Errorf("Rollback: unexpected error: %v", err) + return false + } + + // Ensure invoking all of the functions on the closed read-only + // transaction behave as expected. + return testClosedTxInterface(tc, tx) +} + +// testConcurrecy ensure the database properly supports concurrent readers and +// only a single writer. It also ensures views act as snapshots at the time +// they are acquired. +func testConcurrecy(tc *testContext) bool { + // sleepTime is how long each of the concurrent readers should sleep to + // aid in detection of whether or not the data is actually being read + // concurrently. It starts with a sane lower bound. + var sleepTime = time.Millisecond * 250 + + // Determine about how long it takes for a single block read. When it's + // longer than the default minimum sleep time, adjust the sleep time to + // help prevent durations that are too short which would cause erroneous + // test failures on slower systems. + startTime := time.Now() + err := tc.db.View(func(tx database.Tx) error { + blockHash, _ := tc.blocks[0].Sha() + _, err := tx.FetchBlock(blockHash) + if err != nil { + return err + } + return nil + }) + if err != nil { + tc.t.Errorf("Unexpected error in view: %v", err) + return false + } + elapsed := time.Now().Sub(startTime) + if sleepTime < elapsed { + sleepTime = elapsed + } + tc.t.Logf("Time to load block 0: %v, using sleep time: %v", elapsed, + sleepTime) + + // reader takes a block number to load and channel to return the result + // of the operation on. It is used below to launch multiple concurrent + // readers. + numReaders := len(tc.blocks) + resultChan := make(chan bool, numReaders) + reader := func(blockNum int) { + err := tc.db.View(func(tx database.Tx) error { + time.Sleep(sleepTime) + blockHash, _ := tc.blocks[blockNum].Sha() + _, err := tx.FetchBlock(blockHash) + if err != nil { + return err + } + return nil + }) + if err != nil { + tc.t.Errorf("Unexpected error in concurrent view: %v", + err) + resultChan <- false + } + resultChan <- true + } + + // Start up several concurrent readers for the same block and wait for + // the results. + startTime = time.Now() + for i := 0; i < numReaders; i++ { + go reader(0) + } + for i := 0; i < numReaders; i++ { + if result := <-resultChan; !result { + return false + } + } + elapsed = time.Now().Sub(startTime) + tc.t.Logf("%d concurrent reads of same block elapsed: %v", numReaders, + elapsed) + + // Consider it a failure if it took longer than half the time it would + // take with no concurrency. + if elapsed > sleepTime*time.Duration(numReaders/2) { + tc.t.Errorf("Concurrent views for same block did not appear to "+ + "run simultaneously: elapsed %v", elapsed) + return false + } + + // Start up several concurrent readers for different blocks and wait for + // the results. + startTime = time.Now() + for i := 0; i < numReaders; i++ { + go reader(i) + } + for i := 0; i < numReaders; i++ { + if result := <-resultChan; !result { + return false + } + } + elapsed = time.Now().Sub(startTime) + tc.t.Logf("%d concurrent reads of different blocks elapsed: %v", + numReaders, elapsed) + + // Consider it a failure if it took longer than half the time it would + // take with no concurrency. + if elapsed > sleepTime*time.Duration(numReaders/2) { + tc.t.Errorf("Concurrent views for different blocks did not "+ + "appear to run simultaneously: elapsed %v", elapsed) + return false + } + + // Start up a few readers and wait for them to acquire views. Each + // reader waits for a signal from the writer to be finished to ensure + // that the data written by the writer is not seen by the view since it + // was started before the data was set. + concurrentKey := []byte("notthere") + concurrentVal := []byte("someval") + started := make(chan struct{}) + writeComplete := make(chan struct{}) + reader = func(blockNum int) { + err := tc.db.View(func(tx database.Tx) error { + started <- struct{}{} + + // Wait for the writer to complete. + <-writeComplete + + // Since this reader was created before the write took + // place, the data it added should not be visible. + val := tx.Metadata().Get(concurrentKey) + if val != nil { + return fmt.Errorf("%s should not be visible", + concurrentKey) + } + return nil + }) + if err != nil { + tc.t.Errorf("Unexpected error in concurrent view: %v", + err) + resultChan <- false + } + resultChan <- true + } + for i := 0; i < numReaders; i++ { + go reader(0) + } + for i := 0; i < numReaders; i++ { + <-started + } + + // All readers are started and waiting for completion of the writer. + // Set some data the readers are expecting to not find and signal the + // readers the write is done by closing the writeComplete channel. + err = tc.db.Update(func(tx database.Tx) error { + err := tx.Metadata().Put(concurrentKey, concurrentVal) + if err != nil { + return err + } + return nil + }) + if err != nil { + tc.t.Errorf("Unexpected error in update: %v", err) + return false + } + close(writeComplete) + + // Wait for reader results. + for i := 0; i < numReaders; i++ { + if result := <-resultChan; !result { + return false + } + } + + // Start a few writers and ensure the total time is at least the + // writeSleepTime * numWriters. This ensures only one write transaction + // can be active at a time. + writeSleepTime := time.Millisecond * 250 + writer := func() { + err := tc.db.Update(func(tx database.Tx) error { + time.Sleep(writeSleepTime) + return nil + }) + if err != nil { + tc.t.Errorf("Unexpected error in concurrent view: %v", + err) + resultChan <- false + } + resultChan <- true + } + numWriters := 3 + startTime = time.Now() + for i := 0; i < numWriters; i++ { + go writer() + } + for i := 0; i < numWriters; i++ { + if result := <-resultChan; !result { + return false + } + } + elapsed = time.Now().Sub(startTime) + tc.t.Logf("%d concurrent writers elapsed using sleep time %v: %v", + numWriters, writeSleepTime, elapsed) + + // The total time must have been at least the sum of all sleeps if the + // writes blocked properly. + if elapsed < writeSleepTime*time.Duration(numWriters) { + tc.t.Errorf("Concurrent writes appeared to run simultaneously: "+ + "elapsed %v", elapsed) + return false + } + + return true +} + +// testConcurrentClose ensures that closing the database with open transactions +// blocks until the transactions are finished. +// +// The database will be closed upon returning from this function. +func testConcurrentClose(tc *testContext) bool { + // Start up a few readers and wait for them to acquire views. Each + // reader waits for a signal to complete to ensure the transactions stay + // open until they are explicitly signalled to be closed. + var activeReaders int32 + numReaders := 3 + started := make(chan struct{}) + finishReaders := make(chan struct{}) + resultChan := make(chan bool, numReaders+1) + reader := func() { + err := tc.db.View(func(tx database.Tx) error { + atomic.AddInt32(&activeReaders, 1) + started <- struct{}{} + <-finishReaders + atomic.AddInt32(&activeReaders, -1) + return nil + }) + if err != nil { + tc.t.Errorf("Unexpected error in concurrent view: %v", + err) + resultChan <- false + } + resultChan <- true + } + for i := 0; i < numReaders; i++ { + go reader() + } + for i := 0; i < numReaders; i++ { + <-started + } + + // Close the database in a separate goroutine. This should block until + // the transactions are finished. Once the close has taken place, the + // dbClosed channel is closed to signal the main goroutine below. + dbClosed := make(chan struct{}) + go func() { + started <- struct{}{} + err := tc.db.Close() + if err != nil { + tc.t.Errorf("Unexpected error in concurrent view: %v", + err) + resultChan <- false + } + close(dbClosed) + resultChan <- true + }() + <-started + + // Wait a short period and then signal the reader transactions to + // finish. When the db closed channel is received, ensure there are no + // active readers open. + time.AfterFunc(time.Millisecond*250, func() { close(finishReaders) }) + <-dbClosed + if nr := atomic.LoadInt32(&activeReaders); nr != 0 { + tc.t.Errorf("Close did not appear to block with active "+ + "readers: %d active", nr) + return false + } + + // Wait for all results. + for i := 0; i < numReaders+1; i++ { + if result := <-resultChan; !result { + return false + } + } + + return true +} + +// testInterface tests performs tests for the various interfaces of the database +// package which require state in the database for the given database type. +func testInterface(t *testing.T, db database.DB) { + // Create a test context to pass around. + context := testContext{t: t, db: db} + + // Load the test blocks and store in the test context for use throughout + // the tests. + blocks, err := loadBlocks(t, blockDataFile, blockDataNet) + if err != nil { + t.Errorf("loadBlocks: Unexpected error: %v", err) + return + } + context.blocks = blocks + + // Test the transaction metadata interface including managed and manual + // transactions as well as buckets. + if !testMetadataTxInterface(&context) { + return + } + + // Test the transaction block IO interface using managed and manual + // transactions. This function leaves all of the stored blocks in the + // database since they're used later. + if !testBlockIOTxInterface(&context) { + return + } + + // Test all of the transaction interface functions against a closed + // transaction work as expected. + if !testTxClosed(&context) { + return + } + + // Test the database properly supports concurrency. + if !testConcurrecy(&context) { + return + } + + // Test that closing the database with open transactions blocks until + // the transactions are finished. + // + // The database will be closed upon returning from this function, so it + // must be the last thing called. + testConcurrentClose(&context) +} diff --git a/database2/ffboltdb/mockfile_test.go b/database2/ffboltdb/mockfile_test.go new file mode 100644 index 0000000000..bf089c6707 --- /dev/null +++ b/database2/ffboltdb/mockfile_test.go @@ -0,0 +1,163 @@ +// Copyright (c) 2015 Conformal Systems LLC. +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +// This file is part of the ffboltdb package rather than the ffboltdb_test +// package as it is part of the whitebox testing. + +package ffboltdb + +import ( + "errors" + "io" + "sync" +) + +// Errors used for the mock file. +var ( + // errMockFileClosed is used to indicate a mock file is closed. + errMockFileClosed = errors.New("file closed") + + // errInvalidOffset is used to indicate an offset that is out of range + // for the file was provided. + errInvalidOffset = errors.New("invalid offset") + + // errSyncFail is used to indicate simulated sync failure. + errSyncFail = errors.New("simulated sync failure") +) + +// mockFile implements the filer interface and used in order to force failures +// the database code related to reading and writing from the flat block files. +// A maxSize of -1 is unlimited. +type mockFile struct { + sync.RWMutex + maxSize int64 + data []byte + forceSyncErr bool + closed bool +} + +// Close closes the mock file without releasing any data associated with it. +// This allows it to be "reopened" without losing the data. +// +// This is part of the filer implementation. +func (f *mockFile) Close() error { + f.Lock() + defer f.Unlock() + + if f.closed { + return errMockFileClosed + } + f.closed = true + return nil +} + +// ReadAt reads len(b) bytes from the mock file starting at byte offset off. It +// returns the number of bytes read and the error, if any. ReadAt always +// returns a non-nil error when n < len(b). At end of file, that error is +// io.EOF. +// +// This is part of the filer implementation. +func (f *mockFile) ReadAt(b []byte, off int64) (int, error) { + f.RLock() + defer f.RUnlock() + + if f.closed { + return 0, errMockFileClosed + } + maxSize := int64(len(f.data)) + if f.maxSize > -1 && maxSize > f.maxSize { + maxSize = f.maxSize + } + if off < 0 || off > maxSize { + return 0, errInvalidOffset + } + + // Limit to the max size field, if set. + numToRead := int64(len(b)) + endOffset := off + numToRead + if endOffset > maxSize { + numToRead = maxSize - off + } + + copy(b, f.data[off:off+numToRead]) + if numToRead < int64(len(b)) { + return int(numToRead), io.EOF + } + return int(numToRead), nil +} + +// Truncate changes the size of the mock file. +// +// This is part of the filer implementation. +func (f *mockFile) Truncate(size int64) error { + f.Lock() + defer f.Unlock() + + if f.closed { + return errMockFileClosed + } + maxSize := int64(len(f.data)) + if f.maxSize > -1 && maxSize > f.maxSize { + maxSize = f.maxSize + } + if size > maxSize { + return errInvalidOffset + } + + f.data = f.data[:size] + return nil +} + +// Write writes len(b) bytes to the mock file. It returns the number of bytes +// written and an error, if any. Write returns a non-nil error any time +// n != len(b). +// +// This is part of the filer implementation. +func (f *mockFile) WriteAt(b []byte, off int64) (int, error) { + f.Lock() + defer f.Unlock() + + if f.closed { + return 0, errMockFileClosed + } + maxSize := f.maxSize + if maxSize < 0 { + maxSize = 100 * 1024 // 100KiB + } + if off < 0 || off > maxSize { + return 0, errInvalidOffset + } + + // Limit to the max size field, if set, and grow the slice if needed. + numToWrite := int64(len(b)) + if off+numToWrite > maxSize { + numToWrite = maxSize - off + } + if off+numToWrite > int64(len(f.data)) { + newData := make([]byte, off+numToWrite) + copy(newData, f.data) + f.data = newData + } + + copy(f.data[off:], b[:numToWrite]) + if numToWrite < int64(len(b)) { + return int(numToWrite), io.EOF + } + return int(numToWrite), nil +} + +// Sync doesn't do anything for mock files. However, it will return an error if +// the mock file's forceSyncErr flag is set. +// +// This is part of the filer implementation. +func (f *mockFile) Sync() error { + if f.forceSyncErr { + return errSyncFail + } + + return nil +} + +// Ensure the mockFile type implements the filer interface. +var _ filer = (*mockFile)(nil) diff --git a/database2/ffboltdb/reconcile.go b/database2/ffboltdb/reconcile.go new file mode 100644 index 0000000000..d65d5135ca --- /dev/null +++ b/database2/ffboltdb/reconcile.go @@ -0,0 +1,117 @@ +// Copyright (c) 2015 Conformal Systems LLC. +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package ffboltdb + +import ( + "fmt" + "hash/crc32" + + "github.com/btcsuite/btcd/database2" +) + +// The serialized write cursor location format is: +// +// [0:4] Block file (4 bytes) +// [4:8] File offset (4 bytes) +// [8:12] Castagnoli CRC-32 checksum (4 bytes) + +// serializeWriteRow serialize the current block file and offset where new +// will be written into a format suitable for storage into the metadata. +func serializeWriteRow(curBlockFileNum, curFileOffset uint32) []byte { + var serializedRow [12]byte + byteOrder.PutUint32(serializedRow[0:4], curBlockFileNum) + byteOrder.PutUint32(serializedRow[4:8], curFileOffset) + checksum := crc32.Checksum(serializedRow[:8], castagnoli) + byteOrder.PutUint32(serializedRow[8:12], checksum) + return serializedRow[:] +} + +// deserializeWriteRow deserializes the write cursor location stored in the +// metadata. Returns ErrCorruption if the checksum of the entry doesn't match. +func deserializeWriteRow(writeRow []byte) (uint32, uint32, error) { + // Ensure the checksum matches. The checksum is at the end. + gotChecksum := crc32.Checksum(writeRow[:8], castagnoli) + wantChecksumBytes := writeRow[8:12] + wantChecksum := byteOrder.Uint32(wantChecksumBytes) + if gotChecksum != wantChecksum { + str := fmt.Sprintf("metadata for write cursor does not match "+ + "the expected checksum - got %d, want %d", gotChecksum, + wantChecksum) + return 0, 0, makeDbErr(database.ErrCorruption, str, nil) + } + + fileNum := byteOrder.Uint32(writeRow[0:4]) + fileOffset := byteOrder.Uint32(writeRow[4:8]) + return fileNum, fileOffset, nil +} + +// reconcileDB reconciles the metadata with the flat block files on disk. It +// will also initialize the bolt database if the create flag is set. +func reconcileDB(pdb *db, create bool) (database.DB, error) { + // Perform initial internal bucket and value creation during database + // creation. + if create { + if err := initBoltDB(pdb.boltDB); err != nil { + return nil, err + } + } + + // Load the current write cursor position from the metadata. + var curFileNum, curOffset uint32 + err := pdb.View(func(tx database.Tx) error { + writeRow := tx.Metadata().Get(writeLocKeyName) + if writeRow == nil { + str := "write cursor does not exist" + return makeDbErr(database.ErrCorruption, str, nil) + } + + var err error + curFileNum, curOffset, err = deserializeWriteRow(writeRow) + return err + }) + if err != nil { + return nil, err + } + + // When the write cursor position found by scanning the block files on + // disk is AFTER the position the metadata believes to be true, truncate + // the files on disk to match the metadata. This can be a fairly common + // occurence in unclean shutdown scenarios while the block files are in + // the middle of being written. Since the metadata isn't updated until + // after the block data is written, this is effectively just a rollback + // to the known good point before the unclean shutdown. + wc := pdb.store.writeCursor + if wc.curFileNum > curFileNum || (wc.curFileNum == curFileNum && + wc.curOffset > curOffset) { + + log.Info("Detected unclean shutdown - Repairing...") + log.Debugf("Metadata claims file %d, offset %d. Block data is "+ + "at file %d, offset %d", curFileNum, curOffset, + wc.curFileNum, wc.curOffset) + pdb.store.handleRollback(curFileNum, curOffset) + log.Infof("Database sync complete") + } + + // When the write cursor position found by scanning the block files on + // disk is BEFORE the position the metadata believes to be true, return + // a corruption error. Since sync is called after each block is written + // and before the metadata is updated, this should only happen, in the + // case of missing, deleted, or truncated block files, which generally + // is not an easliy recoverable scenario. In the future, it might be + // possible to rescan and rebuild the metadata from the block files, + // however, that would need to happen with coordination from a higher + // layer since it could invalidate other metadata. + if wc.curFileNum < curFileNum || (wc.curFileNum == curFileNum && + wc.curOffset < curOffset) { + + str := fmt.Sprintf("metadata claims file %d, offset %d, but "+ + "block data is at file %d, offset %d", curFileNum, + curOffset, wc.curFileNum, wc.curOffset) + log.Warnf("***Database corruption detected***: %v", str) + return nil, makeDbErr(database.ErrCorruption, str, nil) + } + + return pdb, nil +} diff --git a/database2/ffboltdb/whitebox_test.go b/database2/ffboltdb/whitebox_test.go new file mode 100644 index 0000000000..5cd2abb467 --- /dev/null +++ b/database2/ffboltdb/whitebox_test.go @@ -0,0 +1,796 @@ +// Copyright (c) 2015 Conformal Systems LLC. +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +// This file is part of the ffboltdb package rather than the ffboltdb_test +// package as it provides whitebox testing. + +package ffboltdb + +import ( + "compress/bzip2" + "encoding/binary" + "fmt" + "hash/crc32" + "io" + "os" + "path/filepath" + "testing" + + "github.com/btcsuite/bolt" + "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcd/database2" + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcutil" +) + +var ( + // blockDataNet is the expected network in the test block data. + blockDataNet = wire.MainNet + + // blockDataFile is the path to a file containing the first 256 blocks + // of the block chain. + blockDataFile = filepath.Join("..", "testdata", "blocks1-256.bz2") + + // errSubTestFail is used to signal that a sub test returned false. + errSubTestFail = fmt.Errorf("sub test failure") +) + +// loadBlocks loads the blocks contained in the testdata directory and returns +// a slice of them. +func loadBlocks(t *testing.T, dataFile string, network wire.BitcoinNet) ([]*btcutil.Block, error) { + // Open the file that contains the blocks for reading. + fi, err := os.Open(dataFile) + if err != nil { + t.Errorf("failed to open file %v, err %v", dataFile, err) + return nil, err + } + defer func() { + if err := fi.Close(); err != nil { + t.Errorf("failed to close file %v %v", dataFile, + err) + } + }() + dr := bzip2.NewReader(fi) + + // Set the first block as the genesis block. + blocks := make([]*btcutil.Block, 0, 256) + genesis := btcutil.NewBlock(chaincfg.MainNetParams.GenesisBlock) + blocks = append(blocks, genesis) + + // Load the remaining blocks. + for height := 1; ; height++ { + var net uint32 + err := binary.Read(dr, binary.LittleEndian, &net) + if err == io.EOF { + // Hit end of file at the expected offset. No error. + break + } + if err != nil { + t.Errorf("Failed to load network type for block %d: %v", + height, err) + return nil, err + } + if net != uint32(network) { + t.Errorf("Block doesn't match network: %v expects %v", + net, network) + return nil, err + } + + var blockLen uint32 + err = binary.Read(dr, binary.LittleEndian, &blockLen) + if err != nil { + t.Errorf("Failed to load block size for block %d: %v", + height, err) + return nil, err + } + + // Read the block. + blockBytes := make([]byte, blockLen) + _, err = io.ReadFull(dr, blockBytes) + if err != nil { + t.Errorf("Failed to load block %d: %v", height, err) + return nil, err + } + + // Deserialize and store the block. + block, err := btcutil.NewBlockFromBytes(blockBytes) + if err != nil { + t.Errorf("Failed to parse block %v: %v", height, err) + return nil, err + } + blocks = append(blocks, block) + } + + return blocks, nil +} + +// checkDbError ensures the passed error is a database.Error with an error code +// that matches the passed error code. +func checkDbError(t *testing.T, testName string, gotErr error, wantErrCode database.ErrorCode) bool { + dbErr, ok := gotErr.(database.Error) + if !ok { + t.Errorf("%s: unexpected error type - got %T, want %T", + testName, gotErr, database.Error{}) + return false + } + if dbErr.ErrorCode != wantErrCode { + t.Errorf("%s: unexpected error code - got %s (%s), want %s", + testName, dbErr.ErrorCode, dbErr.Description, + wantErrCode) + return false + } + + return true +} + +// testContext is used to store context information about a running test which +// is passed into helper functions. +type testContext struct { + t *testing.T + db database.DB + files map[uint32]*lockableFile + maxFileSizes map[uint32]int64 + blocks []*btcutil.Block +} + +// TestConvertErr ensures the bolt error to database error conversion works as +// expected. +func TestConvertErr(t *testing.T) { + t.Parallel() + + tests := []struct { + boltErr error + wantErrCode database.ErrorCode + }{ + {bolt.ErrDatabaseNotOpen, database.ErrDbNotOpen}, + {bolt.ErrInvalid, database.ErrInvalid}, + {bolt.ErrTxNotWritable, database.ErrTxNotWritable}, + {bolt.ErrTxClosed, database.ErrTxClosed}, + {bolt.ErrBucketNotFound, database.ErrBucketNotFound}, + {bolt.ErrBucketExists, database.ErrBucketExists}, + {bolt.ErrBucketNameRequired, database.ErrBucketNameRequired}, + {bolt.ErrKeyRequired, database.ErrKeyRequired}, + {bolt.ErrKeyTooLarge, database.ErrKeyTooLarge}, + {bolt.ErrValueTooLarge, database.ErrValueTooLarge}, + {bolt.ErrIncompatibleValue, database.ErrIncompatibleValue}, + } + + for i, test := range tests { + gotErr := convertErr("test", test.boltErr) + if gotErr.ErrorCode != test.wantErrCode { + t.Errorf("convertErr #%d unexpected error - got %v, "+ + "want %v", i, gotErr.ErrorCode, test.wantErrCode) + continue + } + } +} + +// TestCornerCases ensures several corner cases which can happen when opening +// a database and/or block files as well aswork as expected. +func TestCornerCases(t *testing.T) { + t.Parallel() + + // Create a file at the datapase path to force the open below to fail. + dbPath := filepath.Join(os.TempDir(), "ffboltdb-errors") + _ = os.RemoveAll(dbPath) + fi, err := os.Create(dbPath) + if err != nil { + t.Errorf("os.Create: unexpected error: %v", err) + return + } + fi.Close() + + // Ensure creating a new database fails when a file exists where a + // directory is needed. + testName := "openDB: fail due to file at target location" + wantErrCode := database.ErrDriverSpecific + idb, err := openDB(dbPath, blockDataNet, true) + if !checkDbError(t, testName, err, wantErrCode) { + if err == nil { + idb.Close() + } + _ = os.RemoveAll(dbPath) + return + } + + // Remove the file and create the database to run tests against. It + // should be successful this time. + _ = os.RemoveAll(dbPath) + idb, err = openDB(dbPath, blockDataNet, true) + if err != nil { + t.Errorf("openDB: unexpected error: %v", err) + return + } + defer os.RemoveAll(dbPath) + defer idb.Close() + + // Ensure attempting to write to a file that can't be created returns + // the expected error. + testName = "writeBlock: open file failure" + filePath := blockFilePath(dbPath, 0) + if err := os.Mkdir(filePath, 0755); err != nil { + t.Errorf("os.Mkdir: unexpected error: %v", err) + return + } + store := idb.(*db).store + _, err = store.writeBlock([]byte{0x00}) + if !checkDbError(t, testName, err, database.ErrDriverSpecific) { + return + } + _ = os.RemoveAll(filePath) + + // Ensure initilization errors in the underlying bolt database work as + // expected. + testName = "initBoltDB: reinitialization" + wantErrCode = database.ErrBucketExists + boltDB := idb.(*db).boltDB + err = initBoltDB(boltDB) + if !checkDbError(t, testName, err, wantErrCode) { + return + } + + // Start a transaction and close the underlying bolt transaction out + // from under it. + dbTx, err := idb.Begin(true) + if err != nil { + t.Errorf("Begin: unexpected error: %v", err) + return + } + dbTx.(*transaction).boltTx.Rollback() + + // Ensure errors in the underlying bolt database during a transaction + // commit are handled properly. + testName = "Commit: underlying bolt error" + wantErrCode = database.ErrTxClosed + err = dbTx.Commit() + if !checkDbError(t, testName, err, wantErrCode) { + return + } + + // Reopen the transaction enough to force a rollback failure due to the + // underlying bolt tx being closed. + dbTx.(*transaction).db.mtx.RLock() + dbTx.(*transaction).closed = false + + // Ensure errors in the underlying bolt database during a transaction + // rollback are handled properly. + testName = "Rollback: underlying bolt error" + err = dbTx.Rollback() + if !checkDbError(t, testName, err, wantErrCode) { + return + } + + // Ensure errors in ForEach due to the underlying bolt database are + // handled properly. + err = idb.Update(func(tx database.Tx) error { + // Close the underlying bolt transaction out from under the + // transaction instance. + tx.(*transaction).boltTx.Rollback() + + wantErrCode = database.ErrTxClosed + err = tx.Metadata().ForEach(func(k, v []byte) error { + return nil + }) + if !checkDbError(t, testName, err, wantErrCode) { + return errSubTestFail + } + + // The Update is expected to fail since the underlying bolt + // transaction was closed. + return errSubTestFail + }) + if err != nil { + if err != errSubTestFail { + t.Errorf("Update: unexpected error: %v", err) + } + return + } + + // Close the underlying bolt database out from under the database + // instance. + boltDB.Close() + + // Ensure the View handles errors in the underlying bolt database + // properly. + testName = "View: underlying bolt error" + wantErrCode = database.ErrDbNotOpen + err = idb.View(func(tx database.Tx) error { + return nil + }) + if !checkDbError(t, testName, err, wantErrCode) { + return + } + + // Ensure the Update handles errors in the underlying bolt database + // properly. + testName = "Update: underlying bolt error" + err = idb.Update(func(tx database.Tx) error { + return nil + }) + if !checkDbError(t, testName, err, wantErrCode) { + return + } +} + +// resetDatabase removes everything from the opened database associated with the +// test context including all metadata and the mock files. +func resetDatabase(tc *testContext) bool { + // Reset the metadata. + err := tc.db.Update(func(tx database.Tx) error { + err := tx.Metadata().ForEach(func(k, v []byte) error { + if v == nil { + return tx.Metadata().DeleteBucket(k) + } + + return tx.Metadata().Delete(k) + }) + if err != nil { + return err + } + _, err = tx.Metadata().CreateBucket(blockIdxBucketName) + return err + }) + if err != nil { + tc.t.Errorf("Update: unexpected error: %v", err) + return false + } + + // Reset the mock files. + store := tc.db.(*db).store + wc := store.writeCursor + wc.curFile.Lock() + if wc.curFile.file != nil { + wc.curFile.file.Close() + wc.curFile.file = nil + } + wc.curFile.Unlock() + wc.Lock() + wc.curFileNum = 0 + wc.curOffset = 0 + wc.Unlock() + tc.files = make(map[uint32]*lockableFile) + tc.maxFileSizes = make(map[uint32]int64) + return true +} + +// testWriteFailures tests various failures paths when writing to the block +// files. +func testWriteFailures(tc *testContext) bool { + if !resetDatabase(tc) { + return false + } + + // Ensure file sync errors during writeBlock return the expected error. + store := tc.db.(*db).store + testName := "writeBlock: file sync failure" + store.writeCursor.Lock() + oldFile := store.writeCursor.curFile + store.writeCursor.curFile = &lockableFile{ + file: &mockFile{forceSyncErr: true, maxSize: -1}, + } + store.writeCursor.Unlock() + _, err := store.writeBlock([]byte{0x00}) + if !checkDbError(tc.t, testName, err, database.ErrDriverSpecific) { + return false + } + store.writeCursor.Lock() + store.writeCursor.curFile = oldFile + store.writeCursor.Unlock() + + // Force errors in the various error paths when writing data by using + // mock files with a limited max size. + block0Bytes, _ := tc.blocks[0].Bytes() + tests := []struct { + fileNum uint32 + maxSize int64 + }{ + // Force an error when writing the network bytes. + {fileNum: 0, maxSize: 2}, + + // Force an error when writing the block size. + {fileNum: 0, maxSize: 6}, + + // Force an error when writing the block. + {fileNum: 0, maxSize: 17}, + + // Force an error when writing the checksum. + {fileNum: 0, maxSize: int64(len(block0Bytes)) + 10}, + + // Force an error after writing enough blocks for force multiple + // files. + {fileNum: 15, maxSize: 1}, + } + + for i, test := range tests { + if !resetDatabase(tc) { + return false + } + + // Ensure storing the specified number of blocks using a mock + // file that fails the write fails when the transaction is + // committed, not when the block is stored. + tc.maxFileSizes = map[uint32]int64{test.fileNum: test.maxSize} + err := tc.db.Update(func(tx database.Tx) error { + for i, block := range tc.blocks { + err := tx.StoreBlock(block) + if err != nil { + tc.t.Errorf("StoreBlock (%d): unexpected "+ + "error: %v", i, err) + return errSubTestFail + } + } + + return nil + }) + testName := fmt.Sprintf("Force update commit failure - test "+ + "%d, fileNum %d, maxsize %d", i, test.fileNum, + test.maxSize) + if !checkDbError(tc.t, testName, err, database.ErrDriverSpecific) { + tc.t.Errorf("%v", err) + return false + } + + // Ensure the commit rollback removed all extra files and data. + if len(tc.files) != 1 { + tc.t.Errorf("Update rollback: new not removed - want "+ + "1 file, got %d", len(tc.files)) + return false + } + if _, ok := tc.files[0]; !ok { + tc.t.Error("Update rollback: file 0 does not exist") + return false + } + file := tc.files[0].file.(*mockFile) + if len(file.data) != 0 { + tc.t.Errorf("Update rollback: file did not truncate - "+ + "want len 0, got len %d", len(file.data)) + return false + } + } + + return true +} + +// testBlockFileErrors ensures the database returns expected errors with various +// file-related issues such as closed and missing files. +func testBlockFileErrors(tc *testContext) bool { + if !resetDatabase(tc) { + return false + } + + // Ensure errors in blockFile and openFile when requesting invalid file + // numbers. + store := tc.db.(*db).store + testName := "blockFile invalid file open" + _, err := store.blockFile(^uint32(0)) + if !checkDbError(tc.t, testName, err, database.ErrDriverSpecific) { + return false + } + testName = "openFile invalid file open" + _, err = store.openFile(^uint32(0)) + if !checkDbError(tc.t, testName, err, database.ErrDriverSpecific) { + return false + } + + // Insert the first block into the mock file. + err = tc.db.Update(func(tx database.Tx) error { + err := tx.StoreBlock(tc.blocks[0]) + if err != nil { + tc.t.Errorf("StoreBlock: unexpected error: %v", err) + return errSubTestFail + } + + return nil + }) + if err != nil { + if err != errSubTestFail { + tc.t.Errorf("Update: unexpected error: %v", err) + } + return false + } + + // Ensure errors in readBlock and readBlockRegion when requesting a file + // number that doesn't exist. + block0Hash, _ := tc.blocks[0].Sha() + testName = "readBlock invalid file number" + invalidLoc := blockLocation{ + blockFileNum: ^uint32(0), + blockLen: 80, + } + _, err = store.readBlock(block0Hash, invalidLoc) + if !checkDbError(tc.t, testName, err, database.ErrDriverSpecific) { + return false + } + testName = "readBlockRegion invalid file number" + _, err = store.readBlockRegion(invalidLoc, 0, 80) + if !checkDbError(tc.t, testName, err, database.ErrDriverSpecific) { + return false + } + + // Close the block file out from under the database. + store.writeCursor.curFile.Lock() + store.writeCursor.curFile.file.Close() + store.writeCursor.curFile.Unlock() + + // Ensure failures in FetchBlock and FetchBlockRegion(s) since the + // underlying file they need to read from has been closed. + err = tc.db.View(func(tx database.Tx) error { + testName = "FetchBlock closed file" + wantErrCode := database.ErrDriverSpecific + _, err := tx.FetchBlock(block0Hash) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return errSubTestFail + } + + testName = "FetchBlockRegion closed file" + regions := []database.BlockRegion{ + { + Hash: block0Hash, + Len: 80, + Offset: 0, + }, + } + _, err = tx.FetchBlockRegion(®ions[0]) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return errSubTestFail + } + + testName = "FetchBlockRegions closed file" + _, err = tx.FetchBlockRegions(regions) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return errSubTestFail + } + + return nil + }) + if err != nil { + if err != errSubTestFail { + tc.t.Errorf("View: unexpected error: %v", err) + } + return false + } + + return true +} + +// testCorruption ensures the database returns expected errors under various +// corruption scenarios. +func testCorruption(tc *testContext) bool { + if !resetDatabase(tc) { + return false + } + + // Insert the first block into the mock file. + err := tc.db.Update(func(tx database.Tx) error { + err := tx.StoreBlock(tc.blocks[0]) + if err != nil { + tc.t.Errorf("StoreBlock: unexpected error: %v", err) + return errSubTestFail + } + + return nil + }) + if err != nil { + if err != errSubTestFail { + tc.t.Errorf("Update: unexpected error: %v", err) + } + return false + } + + // Ensure corruption is detected by intentionally modifying the bytes + // stored to the mock file and reading the block. + block0Bytes, _ := tc.blocks[0].Bytes() + block0Hash, _ := tc.blocks[0].Sha() + tests := []struct { + offset uint32 + fixChecksum bool + wantErrCode database.ErrorCode + }{ + // One of the network bytes. The checksum needs to be fixed so + // the invalid network is detected. + {2, true, database.ErrDriverSpecific}, + + // The same network byte, but this time don't fix the checksum + // to ensure the corruption is detected. + {2, false, database.ErrCorruption}, + + // One of the block length bytes. + {6, false, database.ErrCorruption}, + + // Random header byte. + {17, false, database.ErrCorruption}, + + // Random transaction byte. + {90, false, database.ErrCorruption}, + + // Random checksum byte. + {uint32(len(block0Bytes)) + 10, false, database.ErrCorruption}, + } + err = tc.db.View(func(tx database.Tx) error { + data := tc.files[0].file.(*mockFile).data + for i, test := range tests { + // Corrupt the byte at the offset by a single bit. + data[test.offset] ^= 0x10 + + // Fix the checksum if requested to force other errors. + fileLen := len(data) + var oldChecksumBytes [4]byte + copy(oldChecksumBytes[:], data[fileLen-4:]) + if test.fixChecksum { + toSum := data[:fileLen-4] + cksum := crc32.Checksum(toSum, castagnoli) + binary.BigEndian.PutUint32(data[fileLen-4:], cksum) + } + + testName := fmt.Sprintf("FetchBlock (test #%d): "+ + "corruption", i) + _, err := tx.FetchBlock(block0Hash) + if !checkDbError(tc.t, testName, err, test.wantErrCode) { + return errSubTestFail + } + + // Reset the corrupted data back to the original. + data[test.offset] ^= 0x10 + if test.fixChecksum { + copy(data[fileLen-4:], oldChecksumBytes[:]) + } + } + + return nil + }) + if err != nil { + if err != errSubTestFail { + tc.t.Errorf("View: unexpected error: %v", err) + } + return false + } + + // Modify the checksum in the block row index and ensure the expected + // error is received when reading the block row. + err = tc.db.Update(func(tx database.Tx) error { + // Intentionally corrupt the block row entry. + blockIdxBucket := tx.Metadata().Bucket(blockIdxBucketName) + oldBlockRow := blockIdxBucket.Get(block0Hash[:]) + blockRow := make([]byte, len(oldBlockRow)) + copy(blockRow, oldBlockRow) + blockRow[3] ^= 0x20 + err := blockIdxBucket.Put(block0Hash[:], blockRow) + if err != nil { + tc.t.Errorf("Put: Unexpected error: %v", err) + return errSubTestFail + } + + // Ensure attempting to fetch block data for the block with the + // corrupted block row returns the expected error. + testName := "FetchBlock with corrupted block row" + wantErrCode := database.ErrCorruption + _, err = tx.FetchBlock(block0Hash) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return errSubTestFail + } + + // Put the uncorrupted block row entry back. + err = blockIdxBucket.Put(block0Hash[:], oldBlockRow) + if err != nil { + tc.t.Errorf("Put: Unexpected error: %v", err) + return errSubTestFail + } + + return nil + }) + if err != nil { + if err != errSubTestFail { + tc.t.Errorf("Update: unexpected error: %v", err) + } + return false + } + + return true +} + +// TestFailureScenarios ensure several failure scenarios such as database +// corruption, block file write failures, and rollback failures. +func TestFailureScenarios(t *testing.T) { + // Create a new database to run tests against. + dbPath := filepath.Join(os.TempDir(), "ffboltdb-failurescenarios") + _ = os.RemoveAll(dbPath) + idb, err := database.Create(dbType, dbPath, blockDataNet) + if err != nil { + t.Errorf("Failed to create test database (%s) %v", dbType, err) + return + } + defer os.RemoveAll(dbPath) + defer idb.Close() + + // Create a test context to pass around. + tc := &testContext{ + t: t, + db: idb, + files: make(map[uint32]*lockableFile), + maxFileSizes: make(map[uint32]int64), + } + + // Change the maximum file size to a small value to force multiple flat + // files with the test data set and replace the file-related functions + // to make use of mock files in memory. This allows injection of + // various file-related errors. + store := idb.(*db).store + store.maxBlockFileSize = 1024 // 1KiB + store.openWriteFileFunc = func(fileNum uint32) (filer, error) { + if file, ok := tc.files[fileNum]; ok { + // "Reopen" the file. + file.Lock() + mock := file.file.(*mockFile) + mock.Lock() + mock.closed = false + mock.Unlock() + file.Unlock() + return mock, nil + } + + // Limit the max size of the mock file as specified in the test + // context. + maxSize := int64(-1) + if maxFileSize, ok := tc.maxFileSizes[fileNum]; ok { + maxSize = int64(maxFileSize) + } + file := &mockFile{maxSize: int64(maxSize)} + tc.files[fileNum] = &lockableFile{file: file} + return file, nil + } + store.openFileFunc = func(fileNum uint32) (*lockableFile, error) { + // Force error when trying to open max file num. + if fileNum == ^uint32(0) { + return nil, makeDbErr(database.ErrDriverSpecific, + "test", nil) + } + if file, ok := tc.files[fileNum]; ok { + // "Reopen" the file. + file.Lock() + mock := file.file.(*mockFile) + mock.Lock() + mock.closed = false + mock.Unlock() + file.Unlock() + return file, nil + } + file := &lockableFile{file: &mockFile{}} + tc.files[fileNum] = file + return file, nil + } + store.deleteFileFunc = func(fileNum uint32) error { + if file, ok := tc.files[fileNum]; ok { + file.Lock() + file.file.Close() + file.Unlock() + delete(tc.files, fileNum) + return nil + } + + str := fmt.Sprintf("file %d does not exist", fileNum) + return makeDbErr(database.ErrDriverSpecific, str, nil) + } + + // Load the test blocks and save in the test context for use throughout + // the tests. + blocks, err := loadBlocks(t, blockDataFile, blockDataNet) + if err != nil { + t.Errorf("loadBlocks: Unexpected error: %v", err) + return + } + tc.blocks = blocks + + // Test various failures paths when writing to the block files. + if !testWriteFailures(tc) { + return + } + + // Test various file-related issues such as closed and missing files. + if !testBlockFileErrors(tc) { + return + } + + // Test various corruption scenarios. + testCorruption(tc) +} diff --git a/database2/interface.go b/database2/interface.go new file mode 100644 index 0000000000..8b3fb74364 --- /dev/null +++ b/database2/interface.go @@ -0,0 +1,419 @@ +// Copyright (c) 2015 Conformal Systems LLC. +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +// Parts of this interface were inspired heavily by the excellent boltdb project +// at https://github.com/boltdb/bolt by Ben B. Johnson. + +package database + +import ( + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcutil" +) + +// Cursor represents a cursor over key/value pairs and nested buckets of a +// bucket. +// +// Note that open cursors are not tracked on bucket changes and any +// modifications to the bucket, with the exception of Cursor.Delete, invalidate +// the cursor. After invalidation, the cursor must be repositioned, or the keys +// and values returned may be unpredictable. +type Cursor interface { + // Bucket returns the bucket the cursor was created for. + Bucket() Bucket + + // Delete removes the current key/value pair the cursor is at without + // invalidating the cursor. + // + // The interface contract guarantees at least the following errors will + // be returned (other implementation-specific errors are possible): + // - ErrIncompatibleValue if attempted when the cursor points to a + // nested bucket + // - ErrTxNotWritable if attempted against a read-only transaction + // - ErrTxClosed if the transaction has already been closed + Delete() error + + // First positions the cursor at the first key/value pair and returns + // the pair. + First() (key, value []byte) + + // Last positions the cursor at the last key/value pair and returns the + // pair. + Last() (key, value []byte) + + // Next moves the cursor one key/value pair forward and returns the new + // pair. + Next() (key, value []byte) + + // Prev moves the cursor one key/value pair backward and returns the new + // pair. + Prev() (key, value []byte) + + // Seek positions the cursor at the passed seek key. When the key does + // not exist, the cursor is moved to the next key after seek. Returns + // the new pair. + Seek(seek []byte) (key, value []byte) +} + +// Bucket represents a collection of key/value pairs. +type Bucket interface { + // Bucket retrieves a nested bucket with the given key. Returns nil if + // the bucket does not exist. + Bucket(key []byte) Bucket + + // CreateBucket creates and returns a new nested bucket with the given + // key. + // + // The interface contract guarantees at least the following errors will + // be returned (other implementation-specific errors are possible): + // - ErrBucketExists if the bucket already exists + // - ErrBucketNameRequired if the key is empty + // - ErrIncompatibleValue if the key is otherwise invalid for the + // particular implementation + // - ErrTxNotWritable if attempted against a read-only transaction + // - ErrTxClosed if the transaction has already been closed + CreateBucket(key []byte) (Bucket, error) + + // CreateBucketIfNotExists creates and returns a new nested bucket with + // the given key if it does not already exist. + // + // The interface contract guarantees at least the following errors will + // be returned (other implementation-specific errors are possible): + // - ErrBucketNameRequired if the key is empty + // - ErrIncompatibleValue if the key is otherwise invalid for the + // particular implementation + // - ErrTxNotWritable if attempted against a read-only transaction + // - ErrTxClosed if the transaction has already been closed + CreateBucketIfNotExists(key []byte) (Bucket, error) + + // DeleteBucket removes a nested bucket with the given key. + // + // The interface contract guarantees at least the following errors will + // be returned (other implementation-specific errors are possible): + // - ErrTxNotWritable if attempted against a read-only transaction + // - ErrBucketNotFound if the specified bucket does not exist + // - ErrTxNotWritable if attempted against a read-only transaction + // - ErrTxClosed if the transaction has already been closed + DeleteBucket(key []byte) error + + // ForEach invokes the passed function with every key/value pair in + // the bucket. This includes nested buckets, in which case the value + // is nil, but it does not include the key/value pairs within those + // nested buckets. + // + // The interface contract guarantees at least the following errors will + // be returned (other implementation-specific errors are possible): + // - ErrTxClosed if the transaction has already been closed + // + // NOTE: The values returned by this function are only valid during a + // transaction. Attempting to access them after a transaction has ended + // results in undefined behavior. This constraint prevents additional + // data copies and allows support for memory-mapped database + // implementations. + ForEach(func(k, v []byte) error) error + + // Cursor returns a new cursor, allowing for iteration over the bucket's + // key/value pairs and nested buckets in forward or backward order. + Cursor() Cursor + + // Writable returns whether or not the bucket is writable. + Writable() bool + + // Put saves the specified key/value pair to the bucket. Keys that do + // not already exist are added and keys that already exist are + // overwritten. + // + // The interface contract guarantees at least the following errors will + // be returned (other implementation-specific errors are possible): + // - ErrKeyRequired if the key is empty + // - ErrIncompatibleValue if the key is the same as an existing bucket + // - ErrTxNotWritable if attempted against a read-only transaction + // - ErrTxClosed if the transaction has already been closed + Put(key, value []byte) error + + // Get returns the value for the given key. Returns nil if the key does + // not exist in this bucket. + // + // NOTE: The value returned by this function is only valid during a + // transaction. Attempting to access it after a transaction has ended + // results in undefined behavior. This constraint prevents additional + // data copies and allows support for memory-mapped database + // implementations. + Get(key []byte) []byte + + // Delete removes the specified key from the bucket. Deleting a key + // that does not exist does not return an error. + // + // The interface contract guarantees at least the following errors will + // be returned (other implementation-specific errors are possible): + // - ErrKeyRequired if the key is empty + // - ErrIncompatibleValue if the key is the same as an existing bucket + // - ErrTxNotWritable if attempted against a read-only transaction + // - ErrTxClosed if the transaction has already been closed + Delete(key []byte) error +} + +// BlockRegion specifies a particular region of a block identified by the +// specified hash, given an offset and length. +type BlockRegion struct { + Hash *wire.ShaHash + Offset uint32 + Len uint32 +} + +// Tx represents a database transaction. It can either by read-only or +// read-write. The transaction provides a metadata bucket against which all +// read and writes occur. +// +// As would be expected with a transaction, no changes will be saved to the +// database until it has been committed. The transaction will only provide a +// view of the database at the time it was created. Transactions should not be +// long running operations. +type Tx interface { + // Metadata returns the top-most bucket for all metadata storage. + Metadata() Bucket + + // StoreBlock stores the provided block into the database. There are no + // checks to ensure the block connects to a previous block, contains + // double spends, or any additional functionality such as transaction + // indexing. It simply stores the block in the database. + // + // The interface contract guarantees at least the following errors will + // be returned (other implementation-specific errors are possible): + // - ErrBlockExists when the block hash already exists + // - ErrTxNotWritable if attempted against a read-only transaction + // - ErrTxClosed if the transaction has already been closed + // + // Other errors are possible depending on the implementation. + StoreBlock(block *btcutil.Block) error + + // HasBlock returns whether or not a block with the given hash exists + // in the database. + // + // The interface contract guarantees at least the following errors will + // be returned (other implementation-specific errors are possible): + // - ErrTxClosed if the transaction has already been closed + // + // Other errors are possible depending on the implementation. + HasBlock(hash *wire.ShaHash) (bool, error) + + // HasBlocks returns whether or not the blocks with the provided hashes + // exist in the database. + // + // The interface contract guarantees at least the following errors will + // be returned (other implementation-specific errors are possible): + // - ErrTxClosed if the transaction has already been closed + // + // Other errors are possible depending on the implementation. + HasBlocks(hashes []wire.ShaHash) ([]bool, error) + + // FetchBlockHeader returns the raw serialized bytes for the block + // header identified by the given hash. The raw bytes are in the format + // returned by Serialize on a wire.BlockHeader. + // + // It is highly recommended to use this function (or FetchBlockHeaders) + // to obtain block headers over the FetchBlockRegion(s) functions since + // it provides the backend drivers the freedom to perform very specific + // optimizations which can result in significant speed advantages when + // working with headers. + // + // The interface contract guarantees at least the following errors will + // be returned (other implementation-specific errors are possible): + // - ErrBlockNotFound if the request block hash does not exist + // - ErrTxClosed if the transaction has already been closed + // - ErrCorruption if the database has somehow become corrupted + // + // NOTE: The data returned by this function is only valid during a + // database transaction. Attempting to access it after a transaction + // has ended results in undefined behavior. This constraint prevents + // additional data copies and allows support for memory-mapped database + // implementations. + FetchBlockHeader(hash *wire.ShaHash) ([]byte, error) + + // FetchBlockHeaders returns the raw serialized bytes for the block + // headers identified by the given hashes. The raw bytes are in the + // format returned by Serialize on a wire.BlockHeader. + // + // It is highly recommended to use this function (or FetchBlockHeader) + // to obtain block headers over the FetchBlockRegion(s) functions since + // it provides the backend drivers the freedom to perform very specific + // optimizations which can result in significant speed advantages when + // working with headers. + // + // Furthermore, depending on the specific implementation, this function + // can be more efficient for bulk loading multiple block headers than + // loading them one-by-one with FetchBlockHeader. + // + // The interface contract guarantees at least the following errors will + // be returned (other implementation-specific errors are possible): + // - ErrBlockNotFound if the request block hash does not exist + // - ErrTxClosed if the transaction has already been closed + // - ErrCorruption if the database has somehow become corrupted + // + // NOTE: The data returned by this function is only valid during a + // database transaction. Attempting to access it after a transaction + // has ended results in undefined behavior. This constraint prevents + // additional data copies and allows support for memory-mapped database + // implementations. + FetchBlockHeaders(hashes []wire.ShaHash) ([][]byte, error) + + // FetchBlock returns the raw serialized bytes for the block identified + // by the given hash. The raw bytes are in the format returned by + // Serialize on a wire.MsgBlock. + // + // The interface contract guarantees at least the following errors will + // be returned (other implementation-specific errors are possible): + // - ErrBlockNotFound if the request block hash does not exist + // - ErrTxClosed if the transaction has already been closed + // - ErrCorruption if the database has somehow become corrupted + // + // NOTE: The data returned by this function is only valid during a + // database transaction. Attempting to access it after a transaction + // has ended results in undefined behavior. This constraint prevents + // additional data copies and allows support for memory-mapped database + // implementations. + FetchBlock(hash *wire.ShaHash) ([]byte, error) + + // FetchBlocks returns the raw serialized bytes for the blocks + // identified by the given hashes. The raw bytes are in the format + // returned by Serialize on a wire.MsgBlock. + // + // The interface contract guarantees at least the following errors will + // be returned (other implementation-specific errors are possible): + // - ErrBlockNotFound if the request block hash does not exist + // - ErrTxClosed if the transaction has already been closed + // - ErrCorruption if the database has somehow become corrupted + // + // NOTE: The data returned by this function is only valid during a + // database transaction. Attempting to access it after a transaction + // has ended results in undefined behavior. This constraint prevents + // additional data copies and allows support for memory-mapped database + // implementations. + FetchBlocks(hashes []wire.ShaHash) ([][]byte, error) + + // FetchBlockRegion returns the raw serialized bytes for the given + // block region. + // + // For example, it is possible to directly extract Bitcoin transactions + // and/or scripts from a block with this function. Depending on the + // backend implementation, this can provide significant savings by + // avoiding the need to load entire blocks. + // + // The raw bytes are in the format returned by Serialize on a + // wire.MsgBlock and the Offset field in the provided BlockRegion is + // zero-based and relative to the start of the block (byte 0). + // + // The interface contract guarantees at least the following errors will + // be returned (other implementation-specific errors are possible): + // - ErrBlockNotFound if the request block hash does not exist + // - ErrBlockRegionInvalid if the region exceeds the bounds of the + // associated block + // - ErrTxClosed if the transaction has already been closed + // - ErrCorruption if the database has somehow become corrupted + // + // NOTE: The data returned by this function is only valid during a + // database transaction. Attempting to access it after a transaction + // has ended results in undefined behavior. This constraint prevents + // additional data copies and allows support for memory-mapped database + // implementations. + FetchBlockRegion(region *BlockRegion) ([]byte, error) + + // FetchBlockRegions returns the raw serialized bytes for the given + // block regions. + // + // For example, it is possible to directly extract Bitcoin transactions + // and/or scripts from various blocks with this function. Depending on + // the backend implementation, this can provide significant savings by + // avoiding the need to load entire blocks. + // + // The raw bytes are in the format returned by Serialize on a + // wire.MsgBlock and the Offset fields in the provided BlockRegions are + // zero-based and relative to the start of the block (byte 0). + // + // The interface contract guarantees at least the following errors will + // be returned (other implementation-specific errors are possible): + // - ErrBlockNotFound if the request block hash does not exist + // - ErrBlockRegionInvalid if one or more region exceed the bounds of + // the associated block + // - ErrTxClosed if the transaction has already been closed + // - ErrCorruption if the database has somehow become corrupted + // + // NOTE: The data returned by this function is only valid during a + // database transaction. Attempting to access it after a transaction + // has ended results in undefined behavior. This constraint prevents + // additional data copies and allows support for memory-mapped database + // implementations. + FetchBlockRegions(regions []BlockRegion) ([][]byte, error) + + // ****************************************************************** + // Methods related to both atomic metadata storage and block storage. + // ****************************************************************** + + // Commit commits all changes that have been made to the metadata or + // block storage to persistent storage. Calling this function on a + // managed transaction will result in a panic. + Commit() error + + // Rollback undoes all changes that have been made to the metadata or + // block storage. Calling this function on a managed transaction will + // result in a panic. + Rollback() error +} + +// DB provides a generic interface that is used to store bitcoin blocks and +// related metadata. This interface is intended to be agnostic to the actual +// mechanism used for backend data storage. The RegisterDriver function can be +// used to add a new backend data storage method. +// +// This interface is divided into two distinct categories of functionality. +// +// The first category is atomic metadata storage with bucket support. This is +// accomplished through the use of database transactions. +// +// The second category is generic block storage. This functionality is +// intentionally separate because the mechanism used for block storage may or +// may not be the same mechanism used for metadata storage. For example, it is +// often more efficient to store the block data as flat files while the metadata +// is kept in a database. However, this interface aims to be generic enough to +// support blocks in the database too, if needed by a particular backend. +type DB interface { + // Type returns the database driver type the current database instance + // was created with. + Type() string + + // Begin starts a transaction which is either read-only or read-write + // depending on the specified flag. Multiple read-only transactions + // can be started simultaneously while only a single read-write + // transaction can be started at a time. The call will block when + // starting a read-write transaction when one is already open. + // + // NOTE: The transaction must be closed by calling Rollback or Commit on + // it when it is no longer needed. Failure to do so can result in + // unclaimed memory and/or inablity to close the database due to locks + // depending on the specific database implementation. + Begin(writable bool) (Tx, error) + + // View invokes the passed function in the context of a managed + // read-only transaction. Any errors returned from the user-supplied + // function are returned from this function. + // + // Calling Rollback or Commit on the transaction passed to the + // user-supplied function will result in a panic. + View(fn func(tx Tx) error) error + + // Update invokes the passed function in the context of a managed + // read-write transaction. Any errors returned from the user-supplied + // function will cause the transaction to be rolled back and are + // returned from this function. Otherwise, the transaction is commited + // when the user-supplied function returns a nil error. + // + // Calling Rollback or Commit on the transaction passed to the + // user-supplied function will result in a panic. + Update(fn func(tx Tx) error) error + + // Close cleanly shuts down the database and syncs all data. It will + // block until all database transactions have been finalized (rolled + // back or committed). + Close() error +} diff --git a/database2/log.go b/database2/log.go new file mode 100644 index 0000000000..b390bc4010 --- /dev/null +++ b/database2/log.go @@ -0,0 +1,63 @@ +// Copyright (c) 2013-2014 Conformal Systems LLC. +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package database + +import ( + "errors" + "io" + + "github.com/btcsuite/btclog" +) + +// log is a logger that is initialized with no output filters. This +// means the package will not perform any logging by default until the caller +// requests it. +var log btclog.Logger + +// The default amount of logging is none. +func init() { + DisableLog() +} + +// DisableLog disables all library log output. Logging output is disabled +// by default until either UseLogger or SetLogWriter are called. +func DisableLog() { + log = btclog.Disabled +} + +// UseLogger uses a specified Logger to output package logging info. +// This should be used in preference to SetLogWriter if the caller is also +// using btclog. +func UseLogger(logger btclog.Logger) { + log = logger +} + +// SetLogWriter uses a specified io.Writer to output package logging info. +// This allows a caller to direct package logging output without needing a +// dependency on seelog. If the caller is also using btclog, UseLogger should +// be used instead. +func SetLogWriter(w io.Writer, level string) error { + if w == nil { + return errors.New("nil writer") + } + + lvl, ok := btclog.LogLevelFromString(level) + if !ok { + return errors.New("invalid log level") + } + + l, err := btclog.NewLoggerFromWriter(w, lvl) + if err != nil { + return err + } + + UseLogger(l) + return nil +} + +// GetLog returns the currently active logger. +func GetLog() btclog.Logger { + return log +} diff --git a/database2/log_test.go b/database2/log_test.go new file mode 100644 index 0000000000..8a9d24e84e --- /dev/null +++ b/database2/log_test.go @@ -0,0 +1,67 @@ +// Copyright (c) 2015 Conformal Systems LLC. +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package database_test + +import ( + "errors" + "io" + "os" + "testing" + + "github.com/btcsuite/btcd/database2" +) + +// TestSetLogWriter ensures the +func TestSetLogWriter(t *testing.T) { + tests := []struct { + name string + w io.Writer + level string + expected error + }{ + { + name: "nil writer", + w: nil, + level: "trace", + expected: errors.New("nil writer"), + }, + { + name: "invalid log level", + w: os.Stdout, + level: "wrong", + expected: errors.New("invalid log level"), + }, + { + name: "use off level", + w: os.Stdout, + level: "off", + expected: errors.New("min level can't be greater than max. Got min: 6, max: 5"), + }, + { + name: "pass", + w: os.Stdout, + level: "debug", + expected: nil, + }, + } + + t.Logf("Running %d tests", len(tests)) + for i, test := range tests { + err := database.SetLogWriter(test.w, test.level) + if err != nil { + if err.Error() != test.expected.Error() { + t.Errorf("SetLogWriter #%d (%s) wrong result\n"+ + "got: %v\nwant: %v", i, test.name, err, + test.expected) + } + } else { + if test.expected != nil { + t.Errorf("SetLogWriter #%d (%s) wrong result\n"+ + "got: %v\nwant: %v", i, test.name, err, + test.expected) + } + } + } +} diff --git a/database2/testdata/blocks1-256.bz2 b/database2/testdata/blocks1-256.bz2 new file mode 100644 index 0000000000..6b8bda4429 Binary files /dev/null and b/database2/testdata/blocks1-256.bz2 differ