Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

executor: reduce overhead of copystack in idxlookup workers #58705

Merged
merged 3 commits into from
Jan 13, 2025
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 5 additions & 2 deletions pkg/executor/distsql.go
Original file line number Diff line number Diff line change
Expand Up @@ -643,8 +643,9 @@ func (e *IndexLookUpExecutor) startWorkers(ctx context.Context, initBatchSize in
// so fetching index and getting table data can run concurrently.
e.workerCtx, e.cancelFunc = context.WithCancel(ctx)
e.pool = &workerPool{
TolerablePendingTasks: 1,
MaxWorkers: int32(max(1, e.indexLookupConcurrency)),
spawn: func(workers, tasks uint32) bool {
cfzjywxk marked this conversation as resolved.
Show resolved Hide resolved
return workers < uint32(e.indexLookupConcurrency) && tasks > 1
},
}
if err := e.startIndexWorker(ctx, initBatchSize); err != nil {
return err
Expand Down Expand Up @@ -732,6 +733,7 @@ func (e *IndexLookUpExecutor) startIndexWorker(ctx context.Context, initBatchSiz
e.idxWorkerWg.Add(1)
e.pool.submit(func() {
defer trace.StartRegion(ctx, "IndexLookUpIndexTask").End()
growWorkerStack16K()
worker := &indexWorker{
idxLookup: e,
finished: e.finished,
Expand Down Expand Up @@ -1090,6 +1092,7 @@ func (w *indexWorker) fetchHandles(ctx context.Context, results []distsql.Select
case <-e.finished:
return
default:
growWorkerStack16K()
execTableTask(e, task)
}
})
Expand Down
53 changes: 27 additions & 26 deletions pkg/executor/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,9 @@
package executor

import (
"runtime"
"strings"
"sync"
"sync/atomic"

"github.com/pingcap/tidb/pkg/extension"
"github.com/pingcap/tidb/pkg/parser/ast"
Expand Down Expand Up @@ -122,7 +122,7 @@ func encodePassword(u *ast.UserSpec, authPlugin *extension.AuthPlugin) (string,
return u.EncodedPassword()
}

var taskPool = sync.Pool{
var globalTaskPool = sync.Pool{
New: func() any { return &workerTask{} },
}

Expand All @@ -136,31 +136,31 @@ type workerPool struct {
head *workerTask
tail *workerTask

tasks atomic.Int32
workers atomic.Int32

// TolerablePendingTasks is the number of tasks that can be tolerated in the queue, that is, the pool won't spawn a
// new goroutine if the number of tasks is less than this number.
TolerablePendingTasks int32
// MaxWorkers is the maximum number of workers that the pool can spawn.
MaxWorkers int32
tasks uint32
workers uint32
spawn func(workers, tasks uint32) bool
}

func (p *workerPool) submit(f func()) {
task := taskPool.Get().(*workerTask)
task := globalTaskPool.Get().(*workerTask)
task.f, task.next = f, nil

spawn := false
p.lock.Lock()
if p.head == nil {
p.head = task
} else {
p.tail.next = task
}
p.tail = task
p.tasks++
if p.workers == 0 || p.spawn == nil || p.spawn(p.workers, p.tasks) {
p.workers++
spawn = true
}
p.lock.Unlock()
tasks := p.tasks.Add(1)

if workers := p.workers.Load(); workers == 0 || (workers < p.MaxWorkers && tasks > p.TolerablePendingTasks) {
p.workers.Add(1)
if spawn {
go p.run()
}
}
Expand All @@ -170,21 +170,22 @@ func (p *workerPool) run() {
var task *workerTask

p.lock.Lock()
if p.head != nil {
task, p.head = p.head, p.head.next
if p.head == nil {
p.tail = nil
}
}
p.lock.Unlock()

if task == nil {
p.workers.Add(-1)
if p.head == nil {
p.workers--
p.lock.Unlock()
return
}
p.tasks.Add(-1)
task, p.head = p.head, p.head.next
p.tasks--
p.lock.Unlock()

task.f()
taskPool.Put(task)
globalTaskPool.Put(task)
}
}

//go:noinline
func growWorkerStack16K() {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This feels like 8K. Is that a mistake or something else?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not a mistake, allocating a 8K var on stack requires 16K stack size.

var data [8192]byte
runtime.KeepAlive(&data)
}
17 changes: 10 additions & 7 deletions pkg/executor/utils_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ func TestEncodePasswordWithPlugin(t *testing.T) {
require.Equal(t, "", pwd)
}

func TestGoPool(t *testing.T) {
func TestWorkerPool(t *testing.T) {
var (
list []int
lock sync.Mutex
Expand All @@ -199,8 +199,9 @@ func TestGoPool(t *testing.T) {
t.Run("SingleWorker", func(t *testing.T) {
clean()
pool := &workerPool{
TolerablePendingTasks: 0,
MaxWorkers: 1,
spawn: func(workers, tasks uint32) bool {
return workers < 1 && tasks > 0
},
}
wg := sync.WaitGroup{}
wg.Add(1)
Expand All @@ -224,8 +225,9 @@ func TestGoPool(t *testing.T) {
t.Run("TwoWorkers", func(t *testing.T) {
clean()
pool := &workerPool{
TolerablePendingTasks: 0,
MaxWorkers: 2,
spawn: func(workers, tasks uint32) bool {
return workers < 2 && tasks > 0
},
}
wg := sync.WaitGroup{}
wg.Add(1)
Expand All @@ -249,8 +251,9 @@ func TestGoPool(t *testing.T) {
t.Run("TolerateOnePendingTask", func(t *testing.T) {
clean()
pool := &workerPool{
TolerablePendingTasks: 1,
MaxWorkers: 2,
spawn: func(workers, tasks uint32) bool {
return workers < 2 && tasks > 1
},
}
wg := sync.WaitGroup{}
wg.Add(1)
Expand Down