Skip to content

Commit

Permalink
Revert "runtime: double-link list of waiting Ms"
Browse files Browse the repository at this point in the history
This reverts commit d881ed6 (CL 585637).

Reason for revert: This is part of a patch series that changed the
handling of contended lock2/unlock2 calls, reducing the maximum
throughput of contended runtime.mutex values, and causing a performance
regression on applications where that is (or became) the bottleneck.

Updates #66999
Updates #67585

Change-Id: I70d8d0b74f73be95c43d664f584e8d98519aba26
Reviewed-on: https://go-review.googlesource.com/c/go/+/589116
Auto-Submit: Rhys Hiltner <rhys.hiltner@gmail.com>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: Than McIntosh <thanm@google.com>
Reviewed-by: Michael Pratt <mpratt@google.com>
  • Loading branch information
rhysh authored and gopherbot committed May 30, 2024
1 parent 5dead59 commit afbbc28
Show file tree
Hide file tree
Showing 3 changed files with 25 additions and 324 deletions.
63 changes: 7 additions & 56 deletions src/runtime/lock_futex.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,13 +37,6 @@ const (
// independently: a thread can enter lock2, observe that another thread is
// already asleep, and immediately try to grab the lock anyway without waiting
// for its "fair" turn.
//
// The rest of mutex.key holds a pointer to the head of a linked list of the Ms
// that are waiting for the mutex. The pointer portion is set if and only if the
// mutex_sleeping flag is set. Because the futex syscall operates on 32 bits but
// a uintptr may be larger, the flag lets us be sure the futexsleep call will
// only commit if the pointer portion is unset. Otherwise an M allocated at an
// address like 0x123_0000_0000 might miss its wakeups.

// We use the uintptr mutex.key and note.key as a uint32.
//
Expand Down Expand Up @@ -74,53 +67,18 @@ func lock2(l *mutex) {

timer := &lockTimer{lock: l}
timer.begin()

// If a goroutine's stack needed to grow during a lock2 call, the M could
// end up with two active lock2 calls (one each on curg and g0). If both are
// contended, the call on g0 will corrupt mWaitList. Disable stack growth.
stackguard0, throwsplit := gp.stackguard0, gp.throwsplit
if gp == gp.m.curg {
gp.stackguard0, gp.throwsplit = stackPreempt, true
}

// On uniprocessors, no point spinning.
// On multiprocessors, spin for ACTIVE_SPIN attempts.
spin := 0
if ncpu > 1 {
spin = active_spin
}
var enqueued bool
Loop:
for i := 0; ; i++ {
v := atomic.Loaduintptr(&l.key)
if v&mutex_locked == 0 {
// Unlocked. Try to lock.
if atomic.Casuintptr(&l.key, v, v|mutex_locked) {
// We now own the mutex
v = v | mutex_locked
for {
old := v

head := muintptr(v &^ (mutex_sleeping | mutex_locked))
fixMutexWaitList(head)
if enqueued {
head = removeMutexWaitList(head, gp.m)
}

v = mutex_locked
if head != 0 {
v = v | uintptr(head) | mutex_sleeping
}

if v == old || atomic.Casuintptr(&l.key, old, v) {
gp.m.mWaitList.clearLinks()
break
}
v = atomic.Loaduintptr(&l.key)
}
if gp == gp.m.curg {
gp.stackguard0, gp.throwsplit = stackguard0, throwsplit
}
timer.end()
return
}
Expand All @@ -132,28 +90,21 @@ Loop:
osyield()
} else {
// Someone else has it.
// l->key points to a linked list of M's waiting
// for this lock, chained through m->mWaitList.next.
// Queue this M.
for {
head := v &^ (mutex_locked | mutex_sleeping)
if !enqueued {
gp.m.mWaitList.next = muintptr(head)
head = uintptr(unsafe.Pointer(gp.m))
if atomic.Casuintptr(&l.key, v, head|mutex_locked|mutex_sleeping) {
enqueued = true
break
}
gp.m.mWaitList.next = 0
if atomic.Casuintptr(&l.key, v, head|mutex_locked|mutex_sleeping) {
break
}
v = atomic.Loaduintptr(&l.key)
if v&mutex_locked == 0 {
continue Loop
}
}
// Queued. Wait.
futexsleep(key32(&l.key), uint32(v), -1)
i = 0
if v&mutex_locked != 0 {
// Queued. Wait.
futexsleep(key32(&l.key), uint32(v), -1)
i = 0
}
}
}
}
Expand Down
56 changes: 8 additions & 48 deletions src/runtime/lock_sema.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,49 +54,18 @@ func lock2(l *mutex) {

timer := &lockTimer{lock: l}
timer.begin()

// If a goroutine's stack needed to grow during a lock2 call, the M could
// end up with two active lock2 calls (one each on curg and g0). If both are
// contended, the call on g0 will corrupt mWaitList. Disable stack growth.
stackguard0, throwsplit := gp.stackguard0, gp.throwsplit
if gp == gp.m.curg {
gp.stackguard0, gp.throwsplit = stackPreempt, true
}

// On uniprocessor's, no point spinning.
// On multiprocessors, spin for ACTIVE_SPIN attempts.
spin := 0
if ncpu > 1 {
spin = active_spin
}
var enqueued bool
Loop:
for i := 0; ; i++ {
v := atomic.Loaduintptr(&l.key)
if v&locked == 0 {
// Unlocked. Try to lock.
if atomic.Casuintptr(&l.key, v, v|locked) {
// We now own the mutex
v = v | locked
for {
old := v

head := muintptr(v &^ locked)
fixMutexWaitList(head)
if enqueued {
head = removeMutexWaitList(head, gp.m)
}
v = locked | uintptr(head)

if v == old || atomic.Casuintptr(&l.key, old, v) {
gp.m.mWaitList.clearLinks()
break
}
v = atomic.Loaduintptr(&l.key)
}
if gp == gp.m.curg {
gp.stackguard0, gp.throwsplit = stackguard0, throwsplit
}
timer.end()
return
}
Expand All @@ -112,29 +81,20 @@ Loop:
// for this lock, chained through m.mWaitList.next.
// Queue this M.
for {
if !enqueued {
gp.m.mWaitList.next = muintptr(v &^ locked)
if atomic.Casuintptr(&l.key, v, uintptr(unsafe.Pointer(gp.m))|locked) {
enqueued = true
break
}
gp.m.mWaitList.next = 0
gp.m.mWaitList.next = muintptr(v &^ locked)
if atomic.Casuintptr(&l.key, v, uintptr(unsafe.Pointer(gp.m))|locked) {
break
}

v = atomic.Loaduintptr(&l.key)
if v&locked == 0 {
continue Loop
}
}
// Queued. Wait.
semasleep(-1)
i = 0
enqueued = false
// unlock2 removed this M from the list (it was at the head). We
// need to erase the metadata about its former position in the
// list -- and since it's no longer a published member we can do
// so without races.
gp.m.mWaitList.clearLinks()
if v&locked != 0 {
// Queued. Wait.
semasleep(-1)
i = 0
}
}
}
}
Expand Down
Loading

0 comments on commit afbbc28

Please sign in to comment.