Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Restart zero worker if there is still work to do #18658

Merged
merged 5 commits into from
Feb 8, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 14 additions & 0 deletions modules/queue/workerpool.go
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,9 @@ func (p *WorkerPool) hasNoWorkerScaling() bool {
return p.numberOfWorkers == 0 && (p.boostTimeout == 0 || p.boostWorkers == 0 || p.maxNumberOfWorkers == 0)
}

// zeroBoost will add a temporary boost worker for a no worker queue
// p.lock must be locked at the start of this function BUT it will be unlocked by the end of this function
// (This is because addWorkers has to be called whilst unlocked)
func (p *WorkerPool) zeroBoost() {
ctx, cancel := context.WithTimeout(p.baseCtx, p.boostTimeout)
mq := GetManager().GetManagedQueue(p.qid)
Expand Down Expand Up @@ -316,6 +319,17 @@ func (p *WorkerPool) addWorkers(ctx context.Context, cancel context.CancelFunc,
}
p.pause()
}
select {
case <-p.baseCtx.Done():
// this worker queue is shut-down don't reboost
default:
if p.numberOfWorkers == 0 && atomic.LoadInt64(&p.numInQueue) > 0 {
// OK there are no workers but... there's still work to be done -> Reboost
p.zeroBoost()
// p.lock will be unlocked by zeroBoost
return
}
}
p.lock.Unlock()
}()
}
Expand Down
28 changes: 18 additions & 10 deletions services/mirror/mirror.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,11 +59,13 @@ func Update(ctx context.Context, pullLimit, pushLimit int) error {

handler := func(idx int, bean interface{}, limit int) error {
var item SyncRequest
var repo *repo_model.Repository
if m, ok := bean.(*repo_model.Mirror); ok {
if m.Repo == nil {
log.Error("Disconnected mirror found: %d", m.ID)
return nil
}
repo = m.Repo
item = SyncRequest{
Type: PullMirrorType,
RepoID: m.RepoID,
Expand All @@ -73,6 +75,7 @@ func Update(ctx context.Context, pullLimit, pushLimit int) error {
log.Error("Disconnected push-mirror found: %d", m.ID)
return nil
}
repo = m.Repo
item = SyncRequest{
Type: PushMirrorType,
RepoID: m.RepoID,
Expand All @@ -89,17 +92,16 @@ func Update(ctx context.Context, pullLimit, pushLimit int) error {
default:
}

// Check if this request is already in the queue
has, err := mirrorQueue.Has(&item)
if err != nil {
return err
}
if has {
return nil
}

// Push to the Queue
if err := mirrorQueue.Push(&item); err != nil {
if err == queue.ErrAlreadyInQueue {
if item.Type == PushMirrorType {
log.Trace("PushMirrors for %-v already queued for sync", repo)
} else {
log.Trace("PullMirrors for %-v already queued for sync", repo)
}
return nil
}
return err
}

Expand All @@ -110,23 +112,29 @@ func Update(ctx context.Context, pullLimit, pushLimit int) error {
return nil
}

pullMirrorsRequested := 0
if pullLimit != 0 {
requested = 0
if err := repo_model.MirrorsIterate(func(idx int, bean interface{}) error {
return handler(idx, bean, pullLimit)
}); err != nil && err != errLimit {
log.Error("MirrorsIterate: %v", err)
return err
}
pullMirrorsRequested, requested = requested, 0
}
pushMirrorsRequested := 0
if pushLimit != 0 {
requested = 0
if err := repo_model.PushMirrorsIterate(func(idx int, bean interface{}) error {
return handler(idx, bean, pushLimit)
}); err != nil && err != errLimit {
log.Error("PushMirrorsIterate: %v", err)
return err
}
pushMirrorsRequested, requested = requested, 0
}
log.Trace("Finished: Update")
log.Trace("Finished: Update: %d pull mirrors and %d push mirrors queued", pullMirrorsRequested, pushMirrorsRequested)
return nil
}

Expand Down