Skip to content

Commit

Permalink
pusher: retry shallow receipts
Browse files Browse the repository at this point in the history
  • Loading branch information
acud committed Jun 7, 2021
1 parent ebedbfe commit 24121ff
Show file tree
Hide file tree
Showing 2 changed files with 30 additions and 3 deletions.
27 changes: 25 additions & 2 deletions pkg/pusher/pusher.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ type Service struct {
networkID uint64
storer storage.Storer
pushSyncer pushsync.PushSyncer
depther topology.NeighborhoodDepther
logger logging.Logger
tag *tags.Tags
tracer *tracing.Tracer
Expand All @@ -45,15 +46,20 @@ type Service struct {
var (
retryInterval = 5 * time.Second // time interval between retries
concurrentJobs = 10 // how many chunks to push simultaneously
retryCount = 3
)

var ErrInvalidAddress = errors.New("invalid address")
var (
ErrInvalidAddress = errors.New("invalid address")
ErrShallowReceipt = errors.New("shallow recipt")

This comment has been minimized.

Copy link
@Eknir

Eknir Jun 7, 2021

Contributor

typo

)

func New(networkID uint64, storer storage.Storer, peerSuggester topology.ClosestPeerer, pushSyncer pushsync.PushSyncer, tagger *tags.Tags, logger logging.Logger, tracer *tracing.Tracer) *Service {
func New(networkID uint64, storer storage.Storer, depther topology.NeighborhoodDepther, pushSyncer pushsync.PushSyncer, tagger *tags.Tags, logger logging.Logger, tracer *tracing.Tracer) *Service {
service := &Service{
networkID: networkID,
storer: storer,
pushSyncer: pushSyncer,
depther: depther,
tag: tagger,
logger: logger,
tracer: tracer,
Expand All @@ -80,6 +86,7 @@ func (s *Service) chunksWorker() {
mtx sync.Mutex
span opentracing.Span
logger *logrus.Entry
retryCounter = make(map[string]int)
)
defer timer.Stop()
defer close(s.chunksWorkerQuitC)
Expand Down Expand Up @@ -151,6 +158,7 @@ LOOP:
logger.Tracef("pusher: pushed chunk %s to node %s", ch.Address().String(), storerPeer.String())
po := swarm.Proximity(ch.Address().Bytes(), storerPeer.Bytes())
s.metrics.ReceiptDepth.WithLabelValues(strconv.Itoa(int(po))).Inc()
delete(retryCounter, ch.Address().ByteString())
} else {
s.metrics.TotalErrors.Inc()
s.metrics.ErrorTime.Observe(time.Since(startTime).Seconds())
Expand Down Expand Up @@ -190,6 +198,21 @@ LOOP:
err = fmt.Errorf("pusher: receipt storer address: %w", err)
return
}

po := swarm.Proximity(ch.Address().Bytes(), storerPeer.Bytes())
d := s.depther.NeighborhoodDepth()
if po < d {

This comment has been minimized.

Copy link
@Eknir

Eknir Jun 7, 2021

Contributor

Not smaller than equal to?

This comment has been minimized.

Copy link
@acud

acud Jun 8, 2021

Author Member

no. if the depth is equal then the receipt is fine

mtx.Lock()
retryCounter[ch.Address().ByteString()]++
if retryCounter[ch.Address().ByteString()] < retryCount {
mtx.Unlock()
err = fmt.Errorf("pusher: shallow receipt depth %d, want at least %d", po, d)
po := swarm.Proximity(ch.Address().Bytes(), storerPeer.Bytes())
s.metrics.ReceiptDepth.WithLabelValues(strconv.Itoa(int(po))).Inc()

This comment has been minimized.

Copy link
@Eknir

Eknir Jun 7, 2021

Contributor

You should differentiate between receiptDepth of successful pushes (when receipt has a depth above our depth or when it was retried sufficient amount of times) and the receiptDepth of unsuccessful pushes (each attempt)

return
}
mtx.Unlock()
}
}

if err = s.storer.Set(ctx, storage.ModeSetSync, ch.Address()); err != nil {
Expand Down
6 changes: 5 additions & 1 deletion pkg/topology/topology.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ type Driver interface {
ClosestPeerer
EachPeerer
EachNeighbor
NeighborhoodDepth() uint8
NeighborhoodDepther
SubscribePeersChange() (c <-chan struct{}, unsubscribe func())
io.Closer
Halter
Expand Down Expand Up @@ -137,3 +137,7 @@ type Halter interface {
// while allowing it to still run.
Halt()
}

type NeighborhoodDepther interface {
NeighborhoodDepth() uint8
}

3 comments on commit 24121ff

@Eknir
Copy link
Contributor

@Eknir Eknir commented on 24121ff Jun 7, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is it guaranteed or likely that a retry is going via another route, not hitting a similar peer that might give us troubles?

@Eknir
Copy link
Contributor

@Eknir Eknir commented on 24121ff Jun 7, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

When a chunk is retried, is it retried immediately, or does it happen later?

@Eknir
Copy link
Contributor

@Eknir Eknir commented on 24121ff Jun 7, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I am asking these questions, because ultimately retrying only helps if we know that the situation in the network is different. This is either when we try via a different route or when the peer with a shallow depth increased it's own depth.

Please sign in to comment.