Skip to content

Commit

Permalink
test: add regression test
Browse files Browse the repository at this point in the history
  • Loading branch information
hacdias committed May 24, 2023
1 parent 1c7f5d6 commit 93aa0f8
Show file tree
Hide file tree
Showing 4 changed files with 101 additions and 40 deletions.
7 changes: 4 additions & 3 deletions config/internal.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,10 @@ package config

type Internal struct {
// All marked as omitempty since we are expecting to make changes to all subcomponents of Internal
Bitswap *InternalBitswap `json:",omitempty"`
UnixFSShardingSizeThreshold *OptionalString `json:",omitempty"`
Libp2pForceReachability *OptionalString `json:",omitempty"`
Bitswap *InternalBitswap `json:",omitempty"`
UnixFSShardingSizeThreshold *OptionalString `json:",omitempty"`
Libp2pForceReachability *OptionalString `json:",omitempty"`
BackupBootstrapInterval *OptionalDuration `json:",omitempty"`
}

type InternalBitswap struct {
Expand Down
56 changes: 24 additions & 32 deletions core/bootstrap/bootstrap.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,30 +51,25 @@ type BootstrapConfig struct {
// to control the peers the process uses at any moment.
BootstrapPeers func() []peer.AddrInfo

// SavePeersPeriod governs the periodic interval at which the node will
// BackupBootstrapInterval governs the periodic interval at which the node will
// attempt to save connected nodes to use as temporary bootstrap peers.
SavePeersPeriod time.Duration

// SaveConnectedPeersRatio controls the number peers we're saving compared
// to the target MinPeerThreshold. For example, if MinPeerThreshold is 4,
// and we have a ratio of 5 we will save 20 connected peers.
//
// Note: one peer can have many addresses under its ID, so saving a peer
// might translate to more than one line in the config (following the above
// example that means TempBootstrapPeers may have more than 20 lines, but
// all those lines will be addresses of at most 20 peers).
SaveConnectedPeersRatio int
SaveTempPeersForBootstrap func(context.Context, []peer.AddrInfo)
LoadTempPeersForBootstrap func(context.Context) []peer.AddrInfo
BackupBootstrapInterval time.Duration

// MaxBackupBootstrapSize controls the maximum number of peers we're saving
// as backup bootstrap peers.
MaxBackupBootstrapSize int

SaveBackupBootstrapPeers func(context.Context, []peer.AddrInfo)
LoadBackupBootstrapPeers func(context.Context) []peer.AddrInfo
}

// DefaultBootstrapConfig specifies default sane parameters for bootstrapping.
var DefaultBootstrapConfig = BootstrapConfig{
MinPeerThreshold: 4,
Period: 30 * time.Second,
ConnectionTimeout: (30 * time.Second) / 3, // Perod / 3
SavePeersPeriod: 1 * time.Hour,
SaveConnectedPeersRatio: 2,
BackupBootstrapInterval: 1 * time.Hour,
MaxBackupBootstrapSize: 20,
}

func BootstrapConfigWithPeers(pis []peer.AddrInfo) BootstrapConfig {
Expand Down Expand Up @@ -146,7 +141,7 @@ func startSavePeersAsTemporaryBootstrapProc(cfg BootstrapConfig, host host.Host,
log.Debugf("saveConnectedPeersAsTemporaryBootstrap error: %s", err)
}
}
savePeersProc := periodicproc.Tick(cfg.SavePeersPeriod, savePeersFn)
savePeersProc := periodicproc.Tick(cfg.BackupBootstrapInterval, savePeersFn)

// When the main bootstrap process ends also terminate the 'save connected
// peers' ones. Coupling the two seems the easiest way to handle this backup
Expand All @@ -165,11 +160,8 @@ func saveConnectedPeersAsTemporaryBootstrap(ctx context.Context, host host.Host,
// Randomize the list of connected peers, we don't prioritize anyone.
connectedPeers := randomizeList(host.Network().Peers())

// Save peers from the connected list that aren't bootstrap ones.
bootstrapPeers := cfg.BootstrapPeers()

saveNumber := cfg.SaveConnectedPeersRatio * cfg.MinPeerThreshold
savedPeers := make([]peer.AddrInfo, 0, saveNumber)
backupPeers := make([]peer.AddrInfo, 0, cfg.MaxBackupBootstrapSize)

// Choose peers to save and filter out the ones that are already bootstrap nodes.
for _, p := range connectedPeers {
Expand All @@ -181,45 +173,45 @@ func saveConnectedPeersAsTemporaryBootstrap(ctx context.Context, host host.Host,
}
}
if !found {
savedPeers = append(savedPeers, peer.AddrInfo{
backupPeers = append(backupPeers, peer.AddrInfo{
ID: p,
Addrs: host.Network().Peerstore().Addrs(p),
})
}

if len(savedPeers) >= saveNumber {
if len(backupPeers) >= cfg.MaxBackupBootstrapSize {
break
}
}

// If we didn't reach the target number use previously stored connected peers.
if len(savedPeers) < saveNumber {
oldSavedPeers := cfg.LoadTempPeersForBootstrap(ctx)
if len(backupPeers) < cfg.MaxBackupBootstrapSize {
oldSavedPeers := cfg.LoadBackupBootstrapPeers(ctx)
log.Debugf("missing %d peers to reach backup bootstrap target of %d, trying from previous list of %d saved peers",
saveNumber-len(savedPeers), saveNumber, len(oldSavedPeers))
cfg.MaxBackupBootstrapSize-len(backupPeers), cfg.MaxBackupBootstrapSize, len(oldSavedPeers))

// Add some of the old saved peers. Ensure we don't duplicate them.
for _, p := range oldSavedPeers {
found := false
for _, sp := range savedPeers {
for _, sp := range backupPeers {
if p.ID == sp.ID {
found = true
break
}
}

if !found {
savedPeers = append(savedPeers, p)
backupPeers = append(backupPeers, p)
}

if len(savedPeers) >= saveNumber {
if len(backupPeers) >= cfg.MaxBackupBootstrapSize {
break
}
}
}

cfg.SaveTempPeersForBootstrap(ctx, savedPeers)
log.Debugf("saved %d connected peers (of %d target) as bootstrap backup in the config", len(savedPeers), saveNumber)
cfg.SaveBackupBootstrapPeers(ctx, backupPeers)
log.Debugf("saved %d peers (of %d target) as bootstrap backup in the config", len(backupPeers), cfg.MaxBackupBootstrapSize)
return nil
}

Expand Down Expand Up @@ -253,7 +245,7 @@ func bootstrapRound(ctx context.Context, host host.Host, cfg BootstrapConfig) er

log.Debugf("not enough bootstrap peers to fill the remaining target of %d connections, trying backup list", numToDial)

tempBootstrapPeers := cfg.LoadTempPeersForBootstrap(ctx)
tempBootstrapPeers := cfg.LoadBackupBootstrapPeers(ctx)
if len(tempBootstrapPeers) > 0 {
numToDial -= int(peersConnect(ctx, host, tempBootstrapPeers, numToDial, false))
if numToDial <= 0 {
Expand Down
18 changes: 13 additions & 5 deletions core/core.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ import (
"context"
"encoding/json"
"io"
"time"

"github.com/ipfs/boxo/filestore"
pin "github.com/ipfs/boxo/pinning/pinner"
Expand Down Expand Up @@ -168,17 +169,17 @@ func (n *IpfsNode) Bootstrap(cfg bootstrap.BootstrapConfig) error {
return ps
}
}
if cfg.SaveTempPeersForBootstrap == nil {
cfg.SaveTempPeersForBootstrap = func(ctx context.Context, peerList []peer.AddrInfo) {
if cfg.SaveBackupBootstrapPeers == nil {
cfg.SaveBackupBootstrapPeers = func(ctx context.Context, peerList []peer.AddrInfo) {
err := n.saveTempBootstrapPeers(ctx, peerList)
if err != nil {
log.Warnf("saveTempBootstrapPeers failed: %s", err)
return
}
}
}
if cfg.LoadTempPeersForBootstrap == nil {
cfg.LoadTempPeersForBootstrap = func(ctx context.Context) []peer.AddrInfo {
if cfg.LoadBackupBootstrapPeers == nil {
cfg.LoadBackupBootstrapPeers = func(ctx context.Context) []peer.AddrInfo {
peerList, err := n.loadTempBootstrapPeers(ctx)
if err != nil {
log.Warnf("loadTempBootstrapPeers failed: %s", err)
Expand All @@ -188,7 +189,14 @@ func (n *IpfsNode) Bootstrap(cfg bootstrap.BootstrapConfig) error {
}
}

var err error
repoConf, err := n.Repo.Config()
if err != nil {
return err
}
if repoConf.Internal.BackupBootstrapInterval != nil {
cfg.BackupBootstrapInterval = repoConf.Internal.BackupBootstrapInterval.WithDefault(time.Hour)
}

n.Bootstrapper, err = bootstrap.Bootstrap(n.Identity, n.PeerHost, n.Routing, cfg)
return err
}
Expand Down
60 changes: 60 additions & 0 deletions test/cli/backup_bootstrap_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
package cli

import (
"fmt"
"testing"
"time"

"github.com/ipfs/kubo/config"
"github.com/ipfs/kubo/test/cli/harness"
"github.com/stretchr/testify/assert"
)

func TestBackupBootstrapPeers(t *testing.T) {
nodes := harness.NewT(t).NewNodes(3).Init()
nodes.ForEachPar(func(n *harness.Node) {
n.UpdateConfig(func(cfg *config.Config) {
cfg.Bootstrap = []string{}
cfg.Addresses.Swarm = []string{fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", harness.NewRandPort())}
cfg.Discovery.MDNS.Enabled = false
cfg.Internal.BackupBootstrapInterval = config.NewOptionalDuration(250 * time.Millisecond)
})
})

// Start all nodes and ensure they all have no peers.
nodes.StartDaemons()
nodes.ForEachPar(func(n *harness.Node) {
assert.Len(t, n.Peers(), 0)
})

// Connect nodes 0 and 1, ensure they know each other.
nodes[0].Connect(nodes[1])
assert.Len(t, nodes[0].Peers(), 1)
assert.Len(t, nodes[1].Peers(), 1)
assert.Len(t, nodes[2].Peers(), 0)

// Wait a bit to ensure that 0 and 1 saved their temporary bootstrap backups.
time.Sleep(time.Millisecond * 500)
nodes.StopDaemons()

// Start 1 and 2. 2 does not know anyone yet.
nodes[1].StartDaemon()
nodes[2].StartDaemon()
assert.Len(t, nodes[1].Peers(), 0)
assert.Len(t, nodes[2].Peers(), 0)

// Connect 1 and 2, ensure they know each other.
nodes[1].Connect(nodes[2])
assert.Len(t, nodes[1].Peers(), 1)
assert.Len(t, nodes[2].Peers(), 1)

// Start 0, wait a bit. Should connect to 1, and then discover 2 via the
// backup bootstrap peers.
nodes[0].StartDaemon()
time.Sleep(time.Millisecond * 500)

// Check if they're all connected.
assert.Len(t, nodes[0].Peers(), 2)
assert.Len(t, nodes[1].Peers(), 2)
assert.Len(t, nodes[2].Peers(), 2)
}

0 comments on commit 93aa0f8

Please sign in to comment.