Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Alignment w/mainline & Core, RPC, P2P improvements #139

Closed
wants to merge 3 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 6 additions & 6 deletions addrmgr/addrmanager.go
Original file line number Diff line number Diff line change
Expand Up @@ -101,11 +101,11 @@ const (
const (
// needAddressThreshold is the number of addresses under which the
// address manager will claim to need more addresses.
needAddressThreshold = 1000
needAddressThreshold = 3000

// dumpAddressInterval is the interval used to dump the address
// cache to disk for future use.
dumpAddressInterval = time.Minute * 10
dumpAddressInterval = 2 * time.Minute

// triedBucketSize is the maximum number of addresses in each
// tried address bucket.
Expand Down Expand Up @@ -137,15 +137,15 @@ const (

// numMissingDays is the number of days before which we assume an
// address has vanished if we have not seen it announced in that long.
numMissingDays = 30
numMissingDays = 14

// numRetries is the number of tried without a single success before
// we assume an address is bad.
numRetries = 3
numRetries = 5

// maxFailures is the maximum number of failures we will accept without
// a success before considering an address bad.
maxFailures = 10
maxFailures = 15

// minBadDays is the number of days since the last success before we
// will consider evicting an address.
Expand All @@ -154,7 +154,7 @@ const (
// getAddrMax is the most addresses that we will send in response
// to a getAddr (in practise the most addresses we will return from a
// call to AddressCache()).
getAddrMax = 2500
getAddrMax = 5000

// getAddrPercent is the percentage of total addresses known that we
// will share with a call to AddressCache.
Expand Down
6 changes: 0 additions & 6 deletions addrmgr/addrmanager_internal_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -92,16 +92,11 @@ func assertAddrs(t *testing.T, addrMgr *AddrManager,
// TestAddrManagerSerialization ensures that we can properly serialize and
// deserialize the manager's current address cache.
func TestAddrManagerSerialization(t *testing.T) {
t.Parallel()

// We'll start by creating our address manager backed by a temporary
// directory.
tempDir, err := ioutil.TempDir("", "addrmgr")
if err != nil {
t.Fatalf("unable to create temp dir: %v", err)
}
defer os.RemoveAll(tempDir)

addrMgr := New(tempDir, nil)

// We'll be adding 5 random addresses to the manager.
Expand Down Expand Up @@ -132,7 +127,6 @@ func TestAddrManagerSerialization(t *testing.T) {
// TestAddrManagerV1ToV2 ensures that we can properly upgrade the serialized
// version of the address manager from v1 to v2.
func TestAddrManagerV1ToV2(t *testing.T) {
t.Parallel()

// We'll start by creating our address manager backed by a temporary
// directory.
Expand Down
2 changes: 1 addition & 1 deletion addrmgr/addrmanager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -254,7 +254,7 @@ func TestConnected(t *testing.T) {

func TestNeedMoreAddresses(t *testing.T) {
n := addrmgr.New("testneedmoreaddresses", lookupFunc)
addrsToAdd := 1500
addrsToAdd := 5000
b := n.NeedMoreAddresses()
if !b {
t.Errorf("Expected that we need more addresses")
Expand Down
8 changes: 4 additions & 4 deletions addrmgr/knownaddress_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -97,13 +97,13 @@ func TestIsBad(t *testing.T) {
t.Errorf("test case 7: addresses more than a month old are bad.")
}

//It has failed at least three times and never succeeded.
if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(minutesOldNa, 3, minutesOld, zeroTime, true, 0)) {
//It has failed at least five times and never succeeded.
if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(minutesOldNa, 5, minutesOld, zeroTime, true, 0)) {
t.Errorf("test case 8: addresses that have never succeeded are bad.")
}

//It has failed ten times in the last week
if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(minutesOldNa, 10, minutesOld, monthOld, true, 0)) {
//It has failed fifteen times in the last week
if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(minutesOldNa, 15, minutesOld, monthOld, true, 0)) {
t.Errorf("test case 9: addresses that have not succeeded in too long are bad.")
}

Expand Down
2 changes: 2 additions & 0 deletions blockchain/accept.go
Original file line number Diff line number Diff line change
Expand Up @@ -87,9 +87,11 @@ func (b *BlockChain) maybeAcceptBlock(block *btcutil.Block, flags BehaviorFlags)
// Notify the caller that the new block was accepted into the block
// chain. The caller would typically want to react by relaying the
// inventory to other peers.
b.notificationLock.Lock()
b.chainLock.Unlock()
b.sendNotification(NTBlockAccepted, block)
b.chainLock.Lock()
b.notificationLock.Unlock()

return isMainChain, nil
}
15 changes: 14 additions & 1 deletion blockchain/chain.go
Original file line number Diff line number Diff line change
Expand Up @@ -158,6 +158,9 @@ type BlockChain struct {
// chain state can be quickly reconstructed on load.
stateLock sync.RWMutex
stateSnapshot *BestState
// notificationLock is used to make sure notifications are sent
// serially and protect against double mutex unlock panics during reorg.
notificationLock sync.Mutex

// The following caches are used to efficiently keep track of the
// current deployment threshold state of each rule change deployment.
Expand Down Expand Up @@ -685,9 +688,13 @@ func (b *BlockChain) connectBlock(node *blockNode, block *btcutil.Block,
// Notify the caller that the block was connected to the main chain.
// The caller would typically want to react with actions such as
// updating wallets.
b.notificationLock.Lock()
b.chainLock.Unlock()
b.sendNotification(NTBlockConnected, block)
b.chainLock.Lock()
b.notificationLock.Unlock()
b.stateLock.Lock()
defer b.stateLock.Unlock()

return nil
}
Expand Down Expand Up @@ -810,9 +817,11 @@ func (b *BlockChain) disconnectBlock(node *blockNode, block *btcutil.Block, view
// Notify the caller that the block was disconnected from the main
// chain. The caller would typically want to react with actions such as
// updating wallets.
b.notificationLock.Lock()
b.chainLock.Unlock()
b.sendNotification(NTBlockDisconnected, block)
b.chainLock.Lock()
b.notificationLock.Unlock()

return nil
}
Expand Down Expand Up @@ -1076,8 +1085,12 @@ func (b *BlockChain) reorganizeChain(detachNodes, attachNodes *list.List) er.R {
if err != nil {
return err
}
b.notificationLock.Lock()
b.chainLock.Unlock()
b.sendNotification(NTBlockAccepted, block)
b.chainLock.Lock()
b.notificationLock.Unlock()
}

// Log the point where the chain forked and old and new best chain
// heads.
if forkNode != nil {
Expand Down
38 changes: 0 additions & 38 deletions blockchain/difficulty.go
Original file line number Diff line number Diff line change
Expand Up @@ -154,44 +154,6 @@ func CalcWork(bits uint32) *big.Int {
return new(big.Int).Div(oneLsh256, denominator)
}

// calcEasiestDifficulty calculates the easiest possible difficulty that a block
// can have given starting difficulty bits and a duration. It is mainly used to
// verify that claimed proof of work by a block is sane as compared to a
// known good checkpoint.
func (b *BlockChain) calcEasiestDifficulty(bits uint32, duration time.Duration) uint32 {
// Convert types used in the calculations below.
durationVal := int64(duration / time.Second)
adjustmentFactor := big.NewInt(b.chainParams.RetargetAdjustmentFactor)

// The test network rules allow minimum difficulty blocks after more
// than twice the desired amount of time needed to generate a block has
// elapsed.
if b.chainParams.ReduceMinDifficulty {
reductionTime := int64(b.chainParams.MinDiffReductionTime /
time.Second)
if durationVal > reductionTime {
return b.chainParams.PowLimitBits
}
}

// Since easier difficulty equates to higher numbers, the easiest
// difficulty for a given duration is the largest value possible given
// the number of retargets for the duration and starting difficulty
// multiplied by the max adjustment factor.
newTarget := CompactToBig(bits)
for durationVal > 0 && newTarget.Cmp(b.chainParams.PowLimit) < 0 {
newTarget.Mul(newTarget, adjustmentFactor)
durationVal -= b.maxRetargetTimespan
}

// Limit new value to the proof of work limit.
if newTarget.Cmp(b.chainParams.PowLimit) > 0 {
newTarget.Set(b.chainParams.PowLimit)
}

return BigToCompact(newTarget)
}

// findPrevTestNetDifficulty returns the difficulty of the previous block which
// did not have the special testnet minimum difficulty rule applied.
//
Expand Down
45 changes: 1 addition & 44 deletions blockchain/process.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@ package blockchain

import (
"fmt"
"time"

"github.com/pkt-cash/pktd/btcutil/er"
"github.com/pkt-cash/pktd/wire/ruleerror"
Expand Down Expand Up @@ -152,8 +151,6 @@ func (b *BlockChain) ProcessBlock(block *btcutil.Block, flags BehaviorFlags) (bo
b.chainLock.Lock()
defer b.chainLock.Unlock()

fastAdd := flags&BFFastAdd == BFFastAdd

blockHash := block.Hash()
log.Tracef("Processing block %v", blockHash)

Expand All @@ -179,48 +176,8 @@ func (b *BlockChain) ProcessBlock(block *btcutil.Block, flags BehaviorFlags) (bo
return false, false, err
}

blockHeader := &block.MsgBlock().Header

// Find the previous checkpoint and perform some additional checks based
// on the checkpoint. This provides a few nice properties such as
// preventing old side chain blocks before the last checkpoint,
// rejecting easy to mine, but otherwise bogus, blocks that could be
// used to eat memory, and ensuring expected (versus claimed) proof of
// work requirements since the previous checkpoint are met.
checkpointNode, err := b.findPreviousCheckpoint()
if err != nil {
return false, false, err
}
if checkpointNode != nil {
// Ensure the block timestamp is after the checkpoint timestamp.
checkpointTime := time.Unix(checkpointNode.timestamp, 0)
if blockHeader.Timestamp.Before(checkpointTime) {
str := fmt.Sprintf("block %v has timestamp %v before "+
"last checkpoint timestamp %v", blockHash,
blockHeader.Timestamp, checkpointTime)
return false, false, ruleerror.ErrCheckpointTimeTooOld.New(str, nil)
}
if !fastAdd {
// Even though the checks prior to now have already ensured the
// proof of work exceeds the claimed amount, the claimed amount
// is a field in the block header which could be forged. This
// check ensures the proof of work is at least the minimum
// expected based on elapsed time since the last checkpoint and
// maximum adjustment allowed by the retarget rules.
duration := blockHeader.Timestamp.Sub(checkpointTime)
requiredTarget := CompactToBig(b.calcEasiestDifficulty(
checkpointNode.bits, duration))
currentTarget := CompactToBig(blockHeader.Bits)
if currentTarget.Cmp(requiredTarget) > 0 {
str := fmt.Sprintf("block target difficulty of %064x "+
"is too low when compared to the previous "+
"checkpoint", currentTarget)
return false, false, ruleerror.ErrDifficultyTooLow.New(str, nil)
}
}
}

// Handle orphan blocks.
blockHeader := &block.MsgBlock().Header
prevHash := &blockHeader.PrevBlock
prevHashExists, err := b.blockExists(prevHash)
if err != nil {
Expand Down
3 changes: 3 additions & 0 deletions cmd/btcctl/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -213,6 +213,9 @@ func loadConfig() (*config, []string, er.R) {
"indicates that a parameter should be read "+
"from the\nnext unread line from standard "+
"input.")
if e.Type == flags.ErrHelp {
os.Exit(0)
}
return nil, nil, er.E(err)
}
}
Expand Down
15 changes: 7 additions & 8 deletions connmgr/connmanager.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,9 @@ import (

// maxFailedAttempts is the maximum number of successive failed connection
// attempts after which network failure is assumed and new connections will
// be delayed by the configured retry duration.
const maxFailedAttempts = 10
// be delayed by the configured retry duration. We use 15 to normalize with
// the Satoshi code.
const maxFailedAttempts = 15

var (
//ErrDialNil is used to indicate that Dial cannot be nil in the configuration.
Expand All @@ -27,16 +28,17 @@ var (
// maxRetryDuration is the max duration of time retrying of a persistent
// connection is allowed to grow to. This is necessary since the retry
// logic uses a backoff mechanism which increases the interval base times
// the number of retries that have been done.
maxRetryDuration = time.Minute * 5
// the number of retries that have been done. Changing from 5 minutes to
// 10 minutes to normalize with current BTC core.
maxRetryDuration = 10 * time.Minute

// defaultRetryDuration is the default duration of time for retrying
// persistent connections.
defaultRetryDuration = time.Second * 5

// defaultTargetOutbound is the default number of outbound connections to
// maintain.
defaultTargetOutbound = uint32(8)
defaultTargetOutbound = uint32(14)
)

// ConnState represents the state of the requested connection.
Expand Down Expand Up @@ -415,7 +417,6 @@ func (cm *ConnManager) Connect(c *ConnReq) {
if atomic.LoadInt32(&cm.stop) != 0 {
return
}

// During the time we wait for retry there is a chance that
// this connection was already canceled
if c.State() == ConnCanceled {
Expand All @@ -425,10 +426,8 @@ func (cm *ConnManager) Connect(c *ConnReq) {
cm.NewConnReq()
return
}

if atomic.LoadUint64(&c.id) == 0 {
atomic.StoreUint64(&c.id, atomic.AddUint64(&cm.connReqCount, 1))

// Submit a request of a pending connection attempt to the
// connection manager. By registering the id before the
// connection is even established, we'll be able to later
Expand Down
27 changes: 24 additions & 3 deletions connmgr/connmanager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,25 @@ func TestStartStop(t *testing.T) {
}
}

// TestConnReqString ensures that ConnReq.String() does not crash
func TestConnReqString(t *testing.T) {
defer func() {
if r := recover(); r != nil {
t.Fatalf("ConnReq.String crashed %v", r)
}
}()
cr1 := &ConnReq{
Addr: &net.TCPAddr{
IP: net.ParseIP("127.0.0.1"),
Port: 18555,
},
Permanent: true,
}
_ = cr1.String()
cr2 := &ConnReq{}
_ = cr2.String()
}

// TestConnectMode tests that the connection manager works in the connect mode.
//
// In connect mode, automatic connections are disabled, so we test that
Expand Down Expand Up @@ -301,8 +320,10 @@ func TestRetryPermanent(t *testing.T) {

// TestMaxRetryDuration tests the maximum retry duration.
//
// We have a timed dialer which initially returns err but after RetryDuration
// hits maxRetryDuration returns a mock conn.
// We have a timed dialer which initially returns
// err but after RetryDuration hits maxRetryDuration
// returns a mock conn. We set TargetOutbound to zero
// because we only want the connection we requested.
func TestMaxRetryDuration(t *testing.T) {
networkUp := make(chan struct{})
time.AfterFunc(5*time.Millisecond, func() {
Expand All @@ -320,7 +341,7 @@ func TestMaxRetryDuration(t *testing.T) {
connected := make(chan *ConnReq)
cmgr, err := New(&Config{
RetryDuration: time.Millisecond,
TargetOutbound: 1,
TargetOutbound: 0,
Dial: timedDialer,
OnConnection: func(c *ConnReq, conn net.Conn) {
connected <- c
Expand Down
Loading