diff --git a/dht.go b/dht.go index 9312df86253..3da0018f05b 100644 --- a/dht.go +++ b/dht.go @@ -330,7 +330,7 @@ func (dht *IpfsDHT) betterPeersToQuery(pmes *pb.Message, p peer.ID, count int) [ return nil } - var filtered []peer.ID + filtered := make([]peer.ID, 0, len(closer)) for _, clp := range closer { // == to self? thats bad diff --git a/handlers.go b/handlers.go index 3426aa8abd2..317da210b35 100644 --- a/handlers.go +++ b/handlers.go @@ -197,8 +197,9 @@ func (dht *IpfsDHT) handleFindPeer(ctx context.Context, p peer.ID, pmes *pb.Mess return resp, nil } - var withAddresses []pstore.PeerInfo closestinfos := pstore.PeerInfos(dht.peerstore, closest) + // possibly an over-allocation but this array is temporary anyways. + withAddresses := make([]pstore.PeerInfo, 0, len(closestinfos)) for _, pi := range closestinfos { if len(pi.Addrs) > 0 { withAddresses = append(withAddresses, pi) diff --git a/lookup.go b/lookup.go index ae27874e897..508ceda0a01 100644 --- a/lookup.go +++ b/lookup.go @@ -110,8 +110,9 @@ func (dht *IpfsDHT) closerPeersSingle(ctx context.Context, key string, p peer.ID return nil, err } - var out []peer.ID - for _, pbp := range pmes.GetCloserPeers() { + closer := pmes.GetCloserPeers() + out := make([]peer.ID, 0, len(closer)) + for _, pbp := range closer { pid := peer.ID(pbp.GetId()) if pid != dht.self { // dont add self dht.peerstore.AddAddrs(pid, pbp.Addresses(), pstore.TempAddrTTL) diff --git a/providers/providers.go b/providers/providers.go index 77572e18ddf..421fb98e102 100644 --- a/providers/providers.go +++ b/providers/providers.go @@ -270,6 +270,7 @@ func (pm *ProviderManager) run() { log.Error("Error loading provider keys: ", err) continue } + now := time.Now() for { k, ok := keys() if !ok { @@ -281,21 +282,24 @@ func (pm *ProviderManager) run() { log.Error("error loading known provset: ", err) continue } - var filtered []peer.ID for p, t := range provs.set { - if time.Now().Sub(t) > ProvideValidity { + if now.Sub(t) > ProvideValidity { delete(provs.set, p) - } else { - filtered = append(filtered, p) } } - - provs.providers = filtered - if len(filtered) == 0 { + // have we run out of providers? + if len(provs.set) == 0 { + provs.providers = nil err := pm.deleteProvSet(k) if err != nil { log.Error("error deleting provider set: ", err) } + } else if len(provs.set) < len(provs.providers) { + // We must have modified the providers set, recompute. + provs.providers = make([]peer.ID, 0, len(provs.set)) + for p := range provs.set { + provs.providers = append(provs.providers, p) + } } } case <-pm.proc.Closing(): diff --git a/routing.go b/routing.go index 93ba29ea5f9..d2dfcd0728f 100644 --- a/routing.go +++ b/routing.go @@ -93,7 +93,7 @@ func (dht *IpfsDHT) GetValue(ctx context.Context, key string) ([]byte, error) { return nil, err } - var recs [][]byte + recs := make([][]byte, 0, len(vals)) for _, v := range vals { if v.Val != nil { recs = append(recs, v.Val) @@ -144,7 +144,7 @@ func (dht *IpfsDHT) GetValue(ctx context.Context, key string) ([]byte, error) { } func (dht *IpfsDHT) GetValues(ctx context.Context, key string, nvals int) ([]routing.RecvdVal, error) { - var vals []routing.RecvdVal + vals := make([]routing.RecvdVal, 0, nvals) var valslock sync.Mutex // If we have it local, dont bother doing an RPC!