Skip to content

Commit

Permalink
Merge pull request ipfs#106 from libp2p/feat/optimize-allocations
Browse files Browse the repository at this point in the history
optimize allocations
  • Loading branch information
Stebalien authored Dec 10, 2017
2 parents 1677049 + 87251f9 commit a68a53a
Show file tree
Hide file tree
Showing 5 changed files with 19 additions and 13 deletions.
2 changes: 1 addition & 1 deletion dht.go
Original file line number Diff line number Diff line change
Expand Up @@ -330,7 +330,7 @@ func (dht *IpfsDHT) betterPeersToQuery(pmes *pb.Message, p peer.ID, count int) [
return nil
}

var filtered []peer.ID
filtered := make([]peer.ID, 0, len(closer))
for _, clp := range closer {

// == to self? thats bad
Expand Down
3 changes: 2 additions & 1 deletion handlers.go
Original file line number Diff line number Diff line change
Expand Up @@ -197,8 +197,9 @@ func (dht *IpfsDHT) handleFindPeer(ctx context.Context, p peer.ID, pmes *pb.Mess
return resp, nil
}

var withAddresses []pstore.PeerInfo
closestinfos := pstore.PeerInfos(dht.peerstore, closest)
// possibly an over-allocation but this array is temporary anyways.
withAddresses := make([]pstore.PeerInfo, 0, len(closestinfos))
for _, pi := range closestinfos {
if len(pi.Addrs) > 0 {
withAddresses = append(withAddresses, pi)
Expand Down
5 changes: 3 additions & 2 deletions lookup.go
Original file line number Diff line number Diff line change
Expand Up @@ -110,8 +110,9 @@ func (dht *IpfsDHT) closerPeersSingle(ctx context.Context, key string, p peer.ID
return nil, err
}

var out []peer.ID
for _, pbp := range pmes.GetCloserPeers() {
closer := pmes.GetCloserPeers()
out := make([]peer.ID, 0, len(closer))
for _, pbp := range closer {
pid := peer.ID(pbp.GetId())
if pid != dht.self { // dont add self
dht.peerstore.AddAddrs(pid, pbp.Addresses(), pstore.TempAddrTTL)
Expand Down
18 changes: 11 additions & 7 deletions providers/providers.go
Original file line number Diff line number Diff line change
Expand Up @@ -270,6 +270,7 @@ func (pm *ProviderManager) run() {
log.Error("Error loading provider keys: ", err)
continue
}
now := time.Now()
for {
k, ok := keys()
if !ok {
Expand All @@ -281,21 +282,24 @@ func (pm *ProviderManager) run() {
log.Error("error loading known provset: ", err)
continue
}
var filtered []peer.ID
for p, t := range provs.set {
if time.Now().Sub(t) > ProvideValidity {
if now.Sub(t) > ProvideValidity {
delete(provs.set, p)
} else {
filtered = append(filtered, p)
}
}

provs.providers = filtered
if len(filtered) == 0 {
// have we run out of providers?
if len(provs.set) == 0 {
provs.providers = nil
err := pm.deleteProvSet(k)
if err != nil {
log.Error("error deleting provider set: ", err)
}
} else if len(provs.set) < len(provs.providers) {
// We must have modified the providers set, recompute.
provs.providers = make([]peer.ID, 0, len(provs.set))
for p := range provs.set {
provs.providers = append(provs.providers, p)
}
}
}
case <-pm.proc.Closing():
Expand Down
4 changes: 2 additions & 2 deletions routing.go
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ func (dht *IpfsDHT) GetValue(ctx context.Context, key string) ([]byte, error) {
return nil, err
}

var recs [][]byte
recs := make([][]byte, 0, len(vals))
for _, v := range vals {
if v.Val != nil {
recs = append(recs, v.Val)
Expand Down Expand Up @@ -144,7 +144,7 @@ func (dht *IpfsDHT) GetValue(ctx context.Context, key string) ([]byte, error) {
}

func (dht *IpfsDHT) GetValues(ctx context.Context, key string, nvals int) ([]routing.RecvdVal, error) {
var vals []routing.RecvdVal
vals := make([]routing.RecvdVal, 0, nvals)
var valslock sync.Mutex

// If we have it local, dont bother doing an RPC!
Expand Down

0 comments on commit a68a53a

Please sign in to comment.