diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/Lock.NativeAot.cs b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/Lock.NativeAot.cs
index 492e186b95bfc..95ddf847c5bf7 100644
--- a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/Lock.NativeAot.cs
+++ b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/Lock.NativeAot.cs
@@ -3,33 +3,27 @@
using System.Diagnostics;
using System.Diagnostics.Tracing;
+using System.Runtime;
using System.Runtime.CompilerServices;
namespace System.Threading
{
public sealed partial class Lock
{
- private const short SpinCountNotInitialized = short.MinValue;
-
// NOTE: Lock must not have a static (class) constructor, as Lock itself is used to synchronize
// class construction. If Lock has its own class constructor, this can lead to infinite recursion.
// All static data in Lock must be lazy-initialized.
private static int s_staticsInitializationStage;
- private static bool s_isSingleProcessor;
+ private static int s_processorCount;
private static short s_maxSpinCount;
private static short s_minSpinCount;
- ///
- /// Initializes a new instance of the class.
- ///
- public Lock() => _spinCount = SpinCountNotInitialized;
-
[MethodImpl(MethodImplOptions.AggressiveInlining)]
internal bool TryEnterOneShot(int currentManagedThreadId)
{
Debug.Assert(currentManagedThreadId != 0);
- if (State.TryLock(this))
+ if (this.TryLock())
{
Debug.Assert(_owningThreadId == 0);
Debug.Assert(_recursionCount == 0);
@@ -55,7 +49,7 @@ internal void Exit(int currentManagedThreadId)
[MethodImpl(MethodImplOptions.AggressiveInlining)]
internal bool TryEnterSlow(int timeoutMs, int currentManagedThreadId) =>
- TryEnterSlow(timeoutMs, new ThreadId((uint)currentManagedThreadId)).IsInitialized;
+ TryEnterSlow(timeoutMs, new ThreadId((uint)currentManagedThreadId));
[MethodImpl(MethodImplOptions.AggressiveInlining)]
internal bool GetIsHeldByCurrentThread(int currentManagedThreadId)
@@ -63,7 +57,7 @@ internal bool GetIsHeldByCurrentThread(int currentManagedThreadId)
Debug.Assert(currentManagedThreadId != 0);
bool isHeld = _owningThreadId == (uint)currentManagedThreadId;
- Debug.Assert(!isHeld || new State(this).IsLocked);
+ Debug.Assert(!isHeld || this.IsLocked);
return isHeld;
}
@@ -72,14 +66,9 @@ internal uint ExitAll()
Debug.Assert(IsHeldByCurrentThread);
uint recursionCount = _recursionCount;
- _owningThreadId = 0;
_recursionCount = 0;
- State state = State.Unlock(this);
- if (state.HasAnyWaiters)
- {
- SignalWaiterIfNecessary(state);
- }
+ ReleaseCore();
return recursionCount;
}
@@ -92,108 +81,83 @@ internal void Reenter(uint previousRecursionCount)
_recursionCount = previousRecursionCount;
}
- [MethodImpl(MethodImplOptions.AggressiveInlining)]
- private TryLockResult LazyInitializeOrEnter()
+ // Returns false until the static variable is lazy-initialized
+ internal static bool IsSingleProcessor => s_processorCount == 1;
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ internal static void LazyInit()
{
- StaticsInitializationStage stage = (StaticsInitializationStage)Volatile.Read(ref s_staticsInitializationStage);
- switch (stage)
+ while (Volatile.Read(ref s_staticsInitializationStage) < (int)StaticsInitializationStage.Usable)
{
- case StaticsInitializationStage.Complete:
- if (_spinCount == SpinCountNotInitialized)
- {
- _spinCount = s_maxSpinCount;
- }
- return TryLockResult.Spin;
-
- case StaticsInitializationStage.Started:
- // Spin-wait until initialization is complete or the lock is acquired to prevent class construction cycles
- // later during a full wait
- bool sleep = true;
- while (true)
- {
- if (sleep)
- {
- Thread.UninterruptibleSleep0();
- }
- else
- {
- Thread.SpinWait(1);
- }
-
- stage = (StaticsInitializationStage)Volatile.Read(ref s_staticsInitializationStage);
- if (stage == StaticsInitializationStage.Complete)
- {
- goto case StaticsInitializationStage.Complete;
- }
- else if (stage == StaticsInitializationStage.NotStarted)
- {
- goto default;
- }
-
- if (State.TryLock(this))
- {
- return TryLockResult.Locked;
- }
-
- sleep = !sleep;
- }
-
- default:
- Debug.Assert(stage == StaticsInitializationStage.NotStarted);
- if (TryInitializeStatics())
- {
- goto case StaticsInitializationStage.Complete;
- }
- goto case StaticsInitializationStage.Started;
+ if (s_staticsInitializationStage == (int)StaticsInitializationStage.NotStarted &&
+ Interlocked.CompareExchange(
+ ref s_staticsInitializationStage,
+ (int)StaticsInitializationStage.Started,
+ (int)StaticsInitializationStage.NotStarted) == (int)StaticsInitializationStage.NotStarted)
+ {
+ ScheduleStaticsInit();
+ }
}
}
- [MethodImpl(MethodImplOptions.NoInlining)]
- private static bool TryInitializeStatics()
+ internal static void ScheduleStaticsInit()
{
- // Since Lock is used to synchronize class construction, and some of the statics initialization may involve class
- // construction, update the stage first to avoid infinite recursion
- switch (
- (StaticsInitializationStage)
- Interlocked.CompareExchange(
- ref s_staticsInitializationStage,
- (int)StaticsInitializationStage.Started,
- (int)StaticsInitializationStage.NotStarted))
+ // initialize essentials
+ // this is safe to do as these do not need to take locks
+ s_maxSpinCount = DefaultMaxSpinCount << SpinCountScaleShift;
+ s_minSpinCount = DefaultMinSpinCount << SpinCountScaleShift;
+
+ // we can now use the slow path of the lock.
+ Volatile.Write(ref s_staticsInitializationStage, (int)StaticsInitializationStage.Usable);
+
+ // other static initialization is optional (but may take locks)
+ // schedule initialization on finalizer thread to avoid reentrancies.
+ StaticsInitializer.Schedule();
+
+ // trigger an ephemeral GC, in case the app is not allocating anything.
+ // this will be once per lifetime of the runtime, so it is ok.
+ GC.Collect(0);
+ }
+
+ private class StaticsInitializer
+ {
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ public static void Schedule()
{
- case StaticsInitializationStage.Started:
- return false;
- case StaticsInitializationStage.Complete:
- return true;
+ new StaticsInitializer();
}
- try
+ ~StaticsInitializer()
{
- s_isSingleProcessor = Environment.IsSingleProcessor;
- s_maxSpinCount = DetermineMaxSpinCount();
- s_minSpinCount = DetermineMinSpinCount();
+ s_processorCount = RuntimeImports.RhGetProcessCpuCount();
+ if (s_processorCount > 1)
+ {
+ s_minSpinCount = (short)(DetermineMinSpinCount() << SpinCountScaleShift);
+ s_maxSpinCount = (short)(DetermineMaxSpinCount() << SpinCountScaleShift);
+ }
+ else
+ {
+ s_minSpinCount = 0;
+ s_maxSpinCount = 0;
+ }
- // Also initialize some types that are used later to prevent potential class construction cycles
NativeRuntimeEventSource.Log.IsEnabled();
+ Stopwatch.GetTimestamp();
+ Volatile.Write(ref s_staticsInitializationStage, (int)StaticsInitializationStage.Complete);
}
- catch
- {
- s_staticsInitializationStage = (int)StaticsInitializationStage.NotStarted;
- throw;
- }
-
- Volatile.Write(ref s_staticsInitializationStage, (int)StaticsInitializationStage.Complete);
- return true;
}
- // Returns false until the static variable is lazy-initialized
- internal static bool IsSingleProcessor => s_isSingleProcessor;
+ internal static bool StaticsInitComplete()
+ {
+ return Volatile.Read(ref s_staticsInitializationStage) == (int)StaticsInitializationStage.Complete;
+ }
// Used to transfer the state when inflating thin locks
internal void InitializeLocked(int managedThreadId, uint recursionCount)
{
Debug.Assert(recursionCount == 0 || managedThreadId != 0);
- _state = managedThreadId == 0 ? State.InitialStateValue : State.LockedStateValue;
+ _state = managedThreadId == 0 ? Unlocked : Locked;
_owningThreadId = (uint)managedThreadId;
_recursionCount = recursionCount;
}
@@ -219,6 +183,7 @@ private enum StaticsInitializationStage
{
NotStarted,
Started,
+ Usable,
Complete
}
}
diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/ObjectHeader.cs b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/ObjectHeader.cs
index 44ffec51a7e25..d7737ed8e744a 100644
--- a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/ObjectHeader.cs
+++ b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/ObjectHeader.cs
@@ -406,7 +406,7 @@ private static unsafe int TryAcquireUncommon(object obj, int currentThreadID, bo
return -1;
}
- // rare contention on owned lock,
+ // rare contention on a lock that we own,
// perhaps hashcode was installed or finalization bits were touched.
// we still own the lock though and may be able to increment, try again
continue;
@@ -423,12 +423,9 @@ private static unsafe int TryAcquireUncommon(object obj, int currentThreadID, bo
}
}
- if (retries != 0)
- {
- // spin a bit before retrying (1 spinwait is roughly 35 nsec)
- // the object is not pinned here
- Thread.SpinWaitInternal(i);
- }
+ // spin a bit before retrying (1 spinwait is roughly 35 nsec)
+ // the object is not pinned here
+ Thread.SpinWaitInternal(i);
}
// owned by somebody else
diff --git a/src/libraries/System.Private.CoreLib/src/System/Threading/Lock.NonNativeAot.cs b/src/libraries/System.Private.CoreLib/src/System/Threading/Lock.NonNativeAot.cs
index 9386b7ed17460..3006e8802bf97 100644
--- a/src/libraries/System.Private.CoreLib/src/System/Threading/Lock.NonNativeAot.cs
+++ b/src/libraries/System.Private.CoreLib/src/System/Threading/Lock.NonNativeAot.cs
@@ -8,15 +8,11 @@ namespace System.Threading
{
public sealed partial class Lock
{
- private static readonly short s_maxSpinCount = DetermineMaxSpinCount();
- private static readonly short s_minSpinCount = DetermineMinSpinCount();
+ private static readonly short s_maxSpinCount = (short)(IsSingleProcessor ? 0 :DetermineMaxSpinCount() << SpinCountScaleShift);
+ private static readonly short s_minSpinCount = (short)(IsSingleProcessor ? 0 :DetermineMinSpinCount() << SpinCountScaleShift);
- ///
- /// Initializes a new instance of the class.
- ///
- public Lock() => _spinCount = s_maxSpinCount;
-
- private static TryLockResult LazyInitializeOrEnter() => TryLockResult.Spin;
+ private static void LazyInit() { }
+ private static bool StaticsInitComplete() => true;
private static bool IsSingleProcessor => Environment.IsSingleProcessor;
internal partial struct ThreadId
diff --git a/src/libraries/System.Private.CoreLib/src/System/Threading/Lock.cs b/src/libraries/System.Private.CoreLib/src/System/Threading/Lock.cs
index fe794fdf9426e..cc3f5e4a566f4 100644
--- a/src/libraries/System.Private.CoreLib/src/System/Threading/Lock.cs
+++ b/src/libraries/System.Private.CoreLib/src/System/Threading/Lock.cs
@@ -19,13 +19,38 @@ namespace System.Threading
[Runtime.Versioning.RequiresPreviewFeatures]
public sealed partial class Lock
{
+ private const short SpinCountNotInitialized = short.MinValue;
+
private const short DefaultMaxSpinCount = 22;
- private const short DefaultAdaptiveSpinPeriod = 100;
- private const short SpinSleep0Threshold = 10;
- private const ushort MaxDurationMsForPreemptingWaiters = 100;
+ private const short DefaultMinSpinCount = 1;
+
+ // While spinning is parameterized in terms of iterations,
+ // the internal tuning operates with spin count at a finer scale.
+ // One iteration is mapped to 64 spin count units.
+ private const short SpinCountScaleShift = 6;
private static long s_contentionCount;
+ //
+ // We will use exponential backoff in rare cases when we need to change state atomically and cannot
+ // make progress due to concurrent state changes by other threads.
+ // While we cannot know the ideal amount of wait needed before making a successfull attempt,
+ // the exponential backoff will generally be not more than 2X worse than the perfect guess and
+ // will do a lot less attempts than an simple retry. On multiprocessor machine fruitless attempts
+ // will cause unnecessary sharing of the contended state which may make modifying the state more expensive.
+ // To protect against degenerate cases we will cap the per-iteration wait to 1024 spinwaits.
+ //
+ private const uint MaxExponentialBackoffBits = 10;
+
+ //
+ // This lock is unfair and permits acquiring a contended lock by a nonwaiter in the presence of waiters.
+ // It is possible for one thread to keep holding the lock long enough that waiters go to sleep and
+ // then release and reacquire fast enough that waiters have no chance to get the lock.
+ // In extreme cases one thread could keep retaking the lock starving everybody else.
+ // If we see woken waiters not able to take the lock for too long we will ask nonwaiters to wait.
+ //
+ private const uint WaiterWatchdogTicks = 100;
+
// The field's type is not ThreadId to try to retain the relative order of fields of intrinsic types. The type system
// appears to place struct fields after fields of other types, in which case there can be a greater chance that
// _owningThreadId is not in the same cache line as _state.
@@ -35,12 +60,35 @@ public sealed partial class Lock
private uint _owningThreadId;
#endif
- private uint _state; // see State for layout
+ //
+ // m_state layout:
+ //
+ // bit 0: True if the lock is held, false otherwise.
+ //
+ // bit 1: True if nonwaiters must not get ahead of waiters when acquiring a contended lock.
+ //
+ // sign bit: True if we've set the event to wake a waiting thread. The waiter resets this to false when it
+ // wakes up. This avoids the overhead of setting the event multiple times.
+ //
+ // everything else: A count of the number of threads waiting on the event.
+ //
+ private const uint Unlocked = 0;
+ private const uint Locked = 1;
+ private const uint YieldToWaiters = 2;
+ private const uint WaiterCountIncrement = 4;
+ private const uint WaiterWoken = 1u << 31;
+
+ private uint _state;
private uint _recursionCount;
private short _spinCount;
- private ushort _waiterStartTimeMs;
+ private short _wakeWatchDog;
private AutoResetEvent? _waitEvent;
+ ///
+ /// Initializes a new instance of the class.
+ ///
+ public Lock() => _spinCount = SpinCountNotInitialized;
+
///
/// Enters the lock. Once the method returns, the calling thread would be the only thread that holds the lock.
///
@@ -56,17 +104,8 @@ public sealed partial class Lock
[MethodImpl(MethodImplOptions.NoInlining)]
public void Enter()
{
- ThreadId currentThreadId = TryEnter_Inlined(timeoutMs: -1);
- Debug.Assert(currentThreadId.IsInitialized);
- }
-
- [MethodImpl(MethodImplOptions.NoInlining)]
- private ThreadId EnterAndGetCurrentThreadId()
- {
- ThreadId currentThreadId = TryEnter_Inlined(timeoutMs: -1);
- Debug.Assert(currentThreadId.IsInitialized);
- Debug.Assert(currentThreadId.Id == _owningThreadId);
- return currentThreadId;
+ bool success = TryEnter_Inlined(timeoutMs: -1);
+ Debug.Assert(success);
}
///
@@ -89,7 +128,11 @@ private ThreadId EnterAndGetCurrentThreadId()
/// enough that it would typically not be reached when the lock is used properly.
///
[MethodImpl(MethodImplOptions.AggressiveInlining)]
- public Scope EnterScope() => new Scope(this, EnterAndGetCurrentThreadId());
+ public Scope EnterScope()
+ {
+ Enter();
+ return new Scope(this, new ThreadId(this._owningThreadId));
+ }
///
/// A disposable structure that is returned by , which when disposed, exits the lock.
@@ -145,7 +188,7 @@ public void Dispose()
/// enough that it would typically not be reached when the lock is used properly.
///
[MethodImpl(MethodImplOptions.NoInlining)]
- public bool TryEnter() => TryEnter_Inlined(timeoutMs: 0).IsInitialized;
+ public bool TryEnter() => TryEnter_Inlined(timeoutMs: 0);
///
/// Tries to enter the lock, waiting for roughly the specified duration. If the lock is entered, the calling thread
@@ -209,25 +252,45 @@ public bool TryEnter(int millisecondsTimeout)
public bool TryEnter(TimeSpan timeout) => TryEnter_Outlined(WaitHandle.ToTimeoutMilliseconds(timeout));
[MethodImpl(MethodImplOptions.NoInlining)]
- private bool TryEnter_Outlined(int timeoutMs) => TryEnter_Inlined(timeoutMs).IsInitialized;
+ private bool TryEnter_Outlined(int timeoutMs) => TryEnter_Inlined(timeoutMs);
[MethodImpl(MethodImplOptions.AggressiveInlining)]
- private ThreadId TryEnter_Inlined(int timeoutMs)
+ private bool TryEnter_Inlined(int timeoutMs)
{
Debug.Assert(timeoutMs >= -1);
ThreadId currentThreadId = ThreadId.Current_NoInitialize;
- if (currentThreadId.IsInitialized && State.TryLock(this))
+ if (currentThreadId.IsInitialized && this.TryLock())
{
Debug.Assert(!new ThreadId(_owningThreadId).IsInitialized);
Debug.Assert(_recursionCount == 0);
_owningThreadId = currentThreadId.Id;
- return currentThreadId;
+ return true;
}
return TryEnterSlow(timeoutMs, currentThreadId);
}
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ private bool TryLock()
+ {
+ uint origState = _state;
+ if ((origState & (YieldToWaiters | Locked)) == 0)
+ {
+ uint newState = origState + Locked;
+ if (Interlocked.CompareExchange(ref _state, newState, origState) == origState)
+ {
+ Debug.Assert(_owningThreadId == 0);
+ Debug.Assert(_recursionCount == 0);
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ private bool IsLocked => (_state & Locked) != 0;
+
///
/// Exits the lock.
///
@@ -269,28 +332,44 @@ private void ExitImpl()
{
Debug.Assert(new ThreadId(_owningThreadId).IsInitialized);
Debug.Assert(_owningThreadId == ThreadId.Current_NoInitialize.Id);
- Debug.Assert(new State(this).IsLocked);
+ Debug.Assert(this.IsLocked);
if (_recursionCount == 0)
{
- _owningThreadId = 0;
-
- State state = State.Unlock(this);
- if (state.HasAnyWaiters)
- {
- SignalWaiterIfNecessary(state);
- }
- }
- else
- {
- _recursionCount--;
+ ReleaseCore();
+ return;
}
+
+ _recursionCount--;
+ }
+
+ // we use this to de-synchronize threads if interlocked operations fail
+ // we will pick a random number in exponentially expanding range and spin that many times
+ private static unsafe void CollisionBackoff(int collisions)
+ {
+ Debug.Assert(collisions > 0);
+
+ // no need for much randomness here, we will just hash the stack address + s_contentionCount.
+ uint rand = ((uint)&collisions + (uint)s_contentionCount) * 2654435769u;
+ uint spins = rand >> (byte)(32 - Math.Min(collisions, MaxExponentialBackoffBits));
+ Thread.SpinWait((int)spins);
}
- private static bool IsAdaptiveSpinEnabled(short minSpinCount) => minSpinCount <= 0;
+ // same idea as in CollisionBackoff, but with guaranteed minimum wait
+ private static unsafe void IterationBackoff(int iteration)
+ {
+ Debug.Assert(iteration > 0 && iteration < MaxExponentialBackoffBits);
+
+ uint rand = ((uint)&iteration + (uint)s_contentionCount) * 2654435769u;
+ // set the highmost bit to ensure minimum number of spins is exponentialy increasing
+ // it basically gurantees that we spin at least 1, 2, 4, 8, 16, times, and so on
+ rand |= (1u << 31);
+ uint spins = rand >> (byte)(32 - iteration);
+ Thread.SpinWait((int)spins);
+ }
[MethodImpl(MethodImplOptions.NoInlining)]
- private ThreadId TryEnterSlow(int timeoutMs, ThreadId currentThreadId)
+ internal bool TryEnterSlow(int timeoutMs, ThreadId currentThreadId)
{
Debug.Assert(timeoutMs >= -1);
@@ -300,20 +379,23 @@ private ThreadId TryEnterSlow(int timeoutMs, ThreadId currentThreadId)
// initializing the thread info, try the fast path first.
currentThreadId.InitializeForCurrentThread();
Debug.Assert(_owningThreadId != currentThreadId.Id);
- if (State.TryLock(this))
+ if (this.TryLock())
{
- goto Locked;
+ Debug.Assert(!new ThreadId(_owningThreadId).IsInitialized);
+ Debug.Assert(_recursionCount == 0);
+ _owningThreadId = currentThreadId.Id;
+ return true;
}
}
else if (_owningThreadId == currentThreadId.Id)
{
- Debug.Assert(new State(this).IsLocked);
+ Debug.Assert(this.IsLocked);
uint newRecursionCount = _recursionCount + 1;
if (newRecursionCount != 0)
{
_recursionCount = newRecursionCount;
- return currentThreadId;
+ return true;
}
throw new LockRecursionException(SR.Lock_Enter_LockRecursionException);
@@ -321,908 +403,420 @@ private ThreadId TryEnterSlow(int timeoutMs, ThreadId currentThreadId)
if (timeoutMs == 0)
{
- return new ThreadId(0);
- }
-
- if (LazyInitializeOrEnter() == TryLockResult.Locked)
- {
- goto Locked;
- }
-
- bool isSingleProcessor = IsSingleProcessor;
- short maxSpinCount = s_maxSpinCount;
- if (maxSpinCount == 0)
- {
- goto Wait;
- }
-
- short minSpinCount = s_minSpinCount;
- short spinCount = _spinCount;
- if (spinCount < 0)
- {
- // When negative, the spin count serves as a counter for contentions such that a spin-wait can be attempted
- // periodically to see if it would be beneficial. Increment the spin count and skip spin-waiting.
- Debug.Assert(IsAdaptiveSpinEnabled(minSpinCount));
- _spinCount = (short)(spinCount + 1);
- goto Wait;
- }
-
- // Try to acquire the lock, and check if non-waiters should stop preempting waiters. If this thread should not
- // preempt waiters, skip spin-waiting. Upon contention, register a spinner.
- TryLockResult tryLockResult = State.TryLockBeforeSpinLoop(this, spinCount, out bool isFirstSpinner);
- if (tryLockResult != TryLockResult.Spin)
- {
- goto LockedOrWait;
- }
-
- // Lock was not acquired and a spinner was registered
-
- if (isFirstSpinner)
- {
- // Whether a full-length spin-wait would be effective is determined by having the first spinner do a full-length
- // spin-wait to see if it is effective. Shorter spin-waits would more often be ineffective just because they are
- // shorter.
- spinCount = maxSpinCount;
- }
-
- for (short spinIndex = 0; ;)
- {
- LowLevelSpinWaiter.Wait(spinIndex, SpinSleep0Threshold, isSingleProcessor);
-
- if (++spinIndex >= spinCount)
- {
- // The last lock attempt for this spin will be done after the loop
- break;
- }
-
- // Try to acquire the lock and unregister the spinner
- tryLockResult = State.TryLockInsideSpinLoop(this);
- if (tryLockResult == TryLockResult.Spin)
- {
- continue;
- }
-
- if (tryLockResult == TryLockResult.Locked)
- {
- if (isFirstSpinner && IsAdaptiveSpinEnabled(minSpinCount))
- {
- // Since the first spinner does a full-length spin-wait, and to keep upward and downward changes to the
- // spin count more balanced, only the first spinner adjusts the spin count
- spinCount = _spinCount;
- if (spinCount < maxSpinCount)
- {
- _spinCount = (short)(spinCount + 1);
- }
- }
-
- goto Locked;
- }
-
- // The lock was not acquired and the spinner was not unregistered, stop spinning
- Debug.Assert(tryLockResult == TryLockResult.Wait);
- break;
- }
-
- // Unregister the spinner and try to acquire the lock
- tryLockResult = State.TryLockAfterSpinLoop(this);
- if (isFirstSpinner && IsAdaptiveSpinEnabled(minSpinCount))
- {
- // Since the first spinner does a full-length spin-wait, and to keep upward and downward changes to the
- // spin count more balanced, only the first spinner adjusts the spin count
- if (tryLockResult == TryLockResult.Locked)
- {
- spinCount = _spinCount;
- if (spinCount < maxSpinCount)
- {
- _spinCount = (short)(spinCount + 1);
- }
- }
- else
- {
- // If the spin count is already zero, skip spin-waiting for a while, even for the first spinners. After a
- // number of contentions, the first spinner will attempt a spin-wait again to see if it is effective.
- Debug.Assert(tryLockResult == TryLockResult.Wait);
- spinCount = _spinCount;
- _spinCount = spinCount > 0 ? (short)(spinCount - 1) : minSpinCount;
- }
+ return false;
}
- LockedOrWait:
- Debug.Assert(tryLockResult != TryLockResult.Spin);
- if (tryLockResult == TryLockResult.Wait)
+ if (_spinCount == SpinCountNotInitialized)
{
- goto Wait;
+ LazyInit();
+ _spinCount = s_minSpinCount;
}
- Debug.Assert(tryLockResult == TryLockResult.Locked);
-
- Locked:
- Debug.Assert(!new ThreadId(_owningThreadId).IsInitialized);
- Debug.Assert(_recursionCount == 0);
- _owningThreadId = currentThreadId.Id;
- return currentThreadId;
-
- Wait:
- bool areContentionEventsEnabled =
- NativeRuntimeEventSource.Log.IsEnabled(
- EventLevel.Informational,
- NativeRuntimeEventSource.Keywords.ContentionKeyword);
- AutoResetEvent waitEvent = _waitEvent ?? CreateWaitEvent(areContentionEventsEnabled);
- if (State.TryLockBeforeWait(this))
+ bool hasWaited = false;
+ long contentionTrackingStartedTicks = 0;
+ // we will retry after waking up
+ while (true)
{
- // Lock was acquired and a waiter was not registered
- goto Locked;
- }
+ int iteration = 1;
- // Lock was not acquired and a waiter was registered. All following paths need to unregister the waiter, including
- // exceptional paths.
- try
- {
- Interlocked.Increment(ref s_contentionCount);
+ // We will count when we failed to change the state of the lock and increase pauses
+ // so that bursts of activity are better tolerated. This should not happen often.
+ int collisions = 0;
- long waitStartTimeTicks = 0;
- if (areContentionEventsEnabled)
- {
- NativeRuntimeEventSource.Log.ContentionStart(this);
- waitStartTimeTicks = Stopwatch.GetTimestamp();
- }
+ // We will track the changes of ownership while we are trying to acquire the lock.
+ var oldOwner = _owningThreadId;
+ uint ownerChanged = 0;
- bool acquiredLock = false;
- int waitStartTimeMs = timeoutMs < 0 ? 0 : Environment.TickCount;
- int remainingTimeoutMs = timeoutMs;
+ int iterationLimit = _spinCount >> SpinCountScaleShift;
+ // inner loop where we try acquiring the lock or registering as a waiter
while (true)
{
- if (!waitEvent.WaitOne(remainingTimeoutMs))
+ //
+ // Try to grab the lock. We may take the lock here even if there are existing waiters. This creates the possibility
+ // of starvation of waiters, but it also prevents lock convoys and preempted waiters from destroying perf.
+ // However, if we do not see _wakeWatchDog cleared for long enough, we go into YieldToWaiters mode to ensure some
+ // waiter progress.
+ //
+ uint oldState = _state;
+ bool canAcquire = ((oldState & Locked) == Unlocked) &&
+ (hasWaited || ((oldState & YieldToWaiters) == 0));
+
+ if (canAcquire)
{
- break;
- }
+ uint newState = oldState | Locked;
+ if (hasWaited)
+ newState = (newState - WaiterCountIncrement) & ~(WaiterWoken | YieldToWaiters);
- // Spin a bit while trying to acquire the lock. This has a few benefits:
- // - Spinning helps to reduce waiter starvation. Since other non-waiter threads can take the lock while
- // there are waiters (see State.TryLock()), once a waiter wakes it will be able to better compete with
- // other spinners for the lock.
- // - If there is another thread that is repeatedly acquiring and releasing the lock, spinning before waiting
- // again helps to prevent a waiter from repeatedly context-switching in and out
- // - Further in the same situation above, waking up and waiting shortly thereafter deprioritizes this waiter
- // because events release waiters in FIFO order. Spinning a bit helps a waiter to retain its priority at
- // least for one spin duration before it gets deprioritized behind all other waiters.
- for (short spinIndex = 0; spinIndex < maxSpinCount; spinIndex++)
- {
- if (State.TryLockInsideWaiterSpinLoop(this))
+ if (Interlocked.CompareExchange(ref _state, newState, oldState) == oldState)
{
- acquiredLock = true;
- break;
- }
+ // GOT THE LOCK!!
+ Debug.Assert((_state | Locked) != 0);
+ Debug.Assert(_owningThreadId == 0);
+ Debug.Assert(_recursionCount == 0);
+ _owningThreadId = currentThreadId.Id;
+
+ if (hasWaited)
+ _wakeWatchDog = 0;
+
+ // now we can estimate how busy the lock is and adjust spinning accordingly
+ short spinLimit = _spinCount;
+ if (ownerChanged != 0)
+ {
+ // The lock has changed ownership while we were trying to acquire it.
+ // It is a signal that we might want to spin less next time.
+ // Pursuing a lock that is being "stolen" by other threads is inefficient
+ // due to cache misses and unnecessary sharing of state that keeps invalidating.
+ if (spinLimit > s_minSpinCount)
+ {
+ _spinCount = (short)(spinLimit - 1);
+ }
+ }
+ else if (spinLimit < s_maxSpinCount &&
+ iteration >= (spinLimit >> SpinCountScaleShift))
+ {
+ // we used all of allowed iterations, but the lock does not look very contested,
+ // we can allow a bit more spinning.
+ //
+ // NB: if we acquired the lock while registering a waiter, and owner did not change it still counts.
+ // (however iteration does not grow beyond the iterationLimit)
+ _spinCount = (short)(spinLimit + 1);
+ }
- LowLevelSpinWaiter.Wait(spinIndex, SpinSleep0Threshold, isSingleProcessor);
- }
+ if (contentionTrackingStartedTicks != 0)
+ LogContentionEnd(contentionTrackingStartedTicks);
- if (acquiredLock)
- {
- break;
+ return true;
+ }
}
- if (State.TryLockAfterWaiterSpinLoop(this))
+ var newOwner = _owningThreadId;
+ if (newOwner != 0 && newOwner != oldOwner)
{
- acquiredLock = true;
- break;
+ if (oldOwner != 0)
+ ownerChanged++;
+
+ oldOwner = newOwner;
}
- if (remainingTimeoutMs < 0)
+ if (iteration < iterationLimit)
{
+ // We failed to acquire the lock and want to retry after a pause.
+ // Ideally we will retry right when the lock becomes free, but we cannot know when that will happen.
+ // We will use a pause that doubles up on every iteration. It will not be more than 2x worse
+ // than the ideal guess, while minimizing the number of retries.
+ // We will allow pauses up to 64~128 spinwaits.
+ IterationBackoff(Math.Min(iteration, 6));
+ iteration++;
continue;
}
-
- uint waitDurationMs = (uint)(Environment.TickCount - waitStartTimeMs);
- if (waitDurationMs >= (uint)timeoutMs)
+ else if (!canAcquire)
{
- break;
+ // make sure we have the event before committing to wait on it
+ if (_waitEvent == null)
+ CreateWaitEvent();
+
+ //
+ // We reached our spin limit, and need to wait. Increment the waiter count.
+ // Note that we do not do any overflow checking on this increment. In order to overflow,
+ // we'd need to have about 1 billion waiting threads, which is inconceivable anytime in the
+ // forseeable future.
+ //
+ uint newState = oldState + WaiterCountIncrement;
+ if (hasWaited)
+ newState = (newState - WaiterCountIncrement) & ~WaiterWoken;
+
+ if (Interlocked.CompareExchange(ref _state, newState, oldState) == oldState)
+ break;
}
- remainingTimeoutMs = timeoutMs - (int)waitDurationMs;
+ CollisionBackoff(++collisions);
}
- if (acquiredLock)
- {
- // In NativeAOT, ensure that class construction cycles do not occur after the lock is acquired but before
- // the state is fully updated. Update the state to fully reflect that this thread owns the lock before doing
- // other things.
- Debug.Assert(!new ThreadId(_owningThreadId).IsInitialized);
- Debug.Assert(_recursionCount == 0);
- _owningThreadId = currentThreadId.Id;
-
- if (areContentionEventsEnabled)
- {
- double waitDurationNs =
- (Stopwatch.GetTimestamp() - waitStartTimeTicks) * 1_000_000_000.0 / Stopwatch.Frequency;
- NativeRuntimeEventSource.Log.ContentionStop(waitDurationNs);
- }
-
- return currentThreadId;
- }
- }
- catch // run this code before exception filters in callers
- {
- State.UnregisterWaiter(this);
- throw;
- }
+ //
+ // Now we wait.
+ //
- State.UnregisterWaiter(this);
- return new ThreadId(0);
- }
+ TimeoutTracker timeoutTracker = TimeoutTracker.Start(timeoutMs);
- private void ResetWaiterStartTime() => _waiterStartTimeMs = 0;
+ // Lock was not acquired and a waiter was registered. All following paths need to unregister the waiter, including
+ // exceptional paths.
+ try
+ {
+ Interlocked.Increment(ref s_contentionCount);
- [MethodImpl(MethodImplOptions.AggressiveInlining)]
- private void RecordWaiterStartTime()
- {
- ushort currentTimeMs = (ushort)Environment.TickCount;
- if (currentTimeMs == 0)
- {
- // Don't record zero, that value is reserved for indicating that a time is not recorded
- currentTimeMs--;
- }
- _waiterStartTimeMs = currentTimeMs;
- }
+ if (contentionTrackingStartedTicks == 0)
+ contentionTrackingStartedTicks = LogContentionStart();
- private bool ShouldStopPreemptingWaiters
- {
- [MethodImpl(MethodImplOptions.AggressiveInlining)]
- get
- {
- // If the recorded time is zero, a time has not been recorded yet
- ushort waiterStartTimeMs = _waiterStartTimeMs;
- return
- waiterStartTimeMs != 0 &&
- (ushort)Environment.TickCount - waiterStartTimeMs >= MaxDurationMsForPreemptingWaiters;
- }
- }
+ Debug.Assert(_state >= WaiterCountIncrement);
+ bool waitSucceeded = _waitEvent!.WaitOne(timeoutMs);
+ Debug.Assert(_state >= WaiterCountIncrement);
- [MethodImpl(MethodImplOptions.NoInlining)]
- private unsafe AutoResetEvent CreateWaitEvent(bool areContentionEventsEnabled)
- {
- var newWaitEvent = new AutoResetEvent(false);
- AutoResetEvent? waitEventBeforeUpdate = Interlocked.CompareExchange(ref _waitEvent, newWaitEvent, null);
- if (waitEventBeforeUpdate == null)
- {
- // Also check NativeRuntimeEventSource.Log.IsEnabled() to enable trimming
- if (areContentionEventsEnabled && NativeRuntimeEventSource.Log.IsEnabled())
+ if (!waitSucceeded)
+ break;
+ }
+ catch
{
- NativeRuntimeEventSource.Log.ContentionLockCreated(this);
+ // waiting failed
+ UnregisterWaiter(contentionTrackingStartedTicks);
+ throw;
}
- return newWaitEvent;
+ // we did not time out and will try acquiring the lock again.
+ timeoutMs = timeoutTracker.Remaining;
+ hasWaited = true;
}
- newWaitEvent.Dispose();
- return waitEventBeforeUpdate;
+ // We timed out. We're not going to wait again.
+ UnregisterWaiter(contentionTrackingStartedTicks);
+ return false;
}
[MethodImpl(MethodImplOptions.NoInlining)]
- private void SignalWaiterIfNecessary(State state)
+ private void UnregisterWaiter(long contentionTrackingStartedTicks)
{
- if (State.TrySetIsWaiterSignaledToWake(this, state))
+ int collisions = 0;
+ while (true)
{
- // Signal a waiter to wake
- Debug.Assert(_waitEvent != null);
- bool signaled = _waitEvent.Set();
- Debug.Assert(signaled);
- }
- }
-
- ///
- /// true
if the lock is held by the calling thread, false
otherwise.
- ///
- public bool IsHeldByCurrentThread
- {
- get
- {
- var owningThreadId = new ThreadId(_owningThreadId);
- bool isHeld = owningThreadId.IsInitialized && owningThreadId.Id == ThreadId.Current_NoInitialize.Id;
- Debug.Assert(!isHeld || new State(this).IsLocked);
- return isHeld;
- }
- }
+ uint oldState = _state;
+ Debug.Assert(oldState >= WaiterCountIncrement);
- internal static long ContentionCount => s_contentionCount;
- internal void Dispose() => _waitEvent?.Dispose();
+ uint newState = oldState - WaiterCountIncrement;
- internal nint LockIdForEvents
- {
- get
- {
- Debug.Assert(_waitEvent != null);
- return _waitEvent.SafeWaitHandle.DangerousGetHandle();
- }
- }
+ // We could not have consumed a wake, or the wait would've succeeded.
+ // If we are the last waiter though, we will clear WaiterWoken and YieldToWaiters
+ // just so that lock would not look like contended.
+ if (newState < WaiterCountIncrement)
+ newState &= ~(WaiterWoken | YieldToWaiters);
- internal unsafe nint ObjectIdForEvents
- {
- get
- {
- Lock lockObj = this;
- return *(nint*)Unsafe.AsPointer(ref lockObj);
- }
- }
-
- internal ulong OwningThreadId => _owningThreadId;
+ if (Interlocked.CompareExchange(ref _state, newState, oldState) == oldState)
+ {
+ if (contentionTrackingStartedTicks != 0)
+ LogContentionEnd(contentionTrackingStartedTicks);
- private static short DetermineMaxSpinCount() =>
- AppContextConfigHelper.GetInt16Config(
- "System.Threading.Lock.SpinCount",
- "DOTNET_Lock_SpinCount",
- DefaultMaxSpinCount,
- allowNegative: false);
+ return;
+ }
- private static short DetermineMinSpinCount()
- {
- // The config var can be set to -1 to disable adaptive spin
- short adaptiveSpinPeriod =
- AppContextConfigHelper.GetInt16Config(
- "System.Threading.Lock.AdaptiveSpinPeriod",
- "DOTNET_Lock_AdaptiveSpinPeriod",
- DefaultAdaptiveSpinPeriod,
- allowNegative: true);
- if (adaptiveSpinPeriod < -1)
- {
- adaptiveSpinPeriod = DefaultAdaptiveSpinPeriod;
+ CollisionBackoff(++collisions);
}
-
- return (short)-adaptiveSpinPeriod;
}
- private struct State : IEquatable
+ internal struct TimeoutTracker
{
- // Layout constants for Lock._state
- private const uint IsLockedMask = (uint)1 << 0; // bit 0
- private const uint ShouldNotPreemptWaitersMask = (uint)1 << 1; // bit 1
- private const uint SpinnerCountIncrement = (uint)1 << 2; // bits 2-4
- private const uint SpinnerCountMask = (uint)0x7 << 2;
- private const uint IsWaiterSignaledToWakeMask = (uint)1 << 5; // bit 5
- private const byte WaiterCountShift = 6;
- private const uint WaiterCountIncrement = (uint)1 << WaiterCountShift; // bits 6-31
-
- private uint _state;
-
- public State(Lock lockObj) : this(lockObj._state) { }
- private State(uint state) => _state = state;
-
- public static uint InitialStateValue => 0;
- public static uint LockedStateValue => IsLockedMask;
- private static uint Neg(uint state) => (uint)-(int)state;
- public bool IsInitialState => this == default;
- public bool IsLocked => (_state & IsLockedMask) != 0;
-
- private void SetIsLocked()
- {
- Debug.Assert(!IsLocked);
- _state += IsLockedMask;
- }
+ private int _start;
+ private int _timeout;
- private bool ShouldNotPreemptWaiters => (_state & ShouldNotPreemptWaitersMask) != 0;
-
- private void SetShouldNotPreemptWaiters()
+ public static TimeoutTracker Start(int timeout)
{
- Debug.Assert(!ShouldNotPreemptWaiters);
- Debug.Assert(HasAnyWaiters);
-
- _state += ShouldNotPreemptWaitersMask;
+ TimeoutTracker tracker = default;
+ tracker._timeout = timeout;
+ if (timeout != Timeout.Infinite)
+ tracker._start = Environment.TickCount;
+ return tracker;
}
- private void ClearShouldNotPreemptWaiters()
- {
- Debug.Assert(ShouldNotPreemptWaiters);
- _state -= ShouldNotPreemptWaitersMask;
- }
-
- private bool ShouldNonWaiterAttemptToAcquireLock
+ public int Remaining
{
get
{
- Debug.Assert(HasAnyWaiters || !ShouldNotPreemptWaiters);
- return (_state & (IsLockedMask | ShouldNotPreemptWaitersMask)) == 0;
+ if (_timeout == Timeout.Infinite)
+ return Timeout.Infinite;
+ int elapsed = Environment.TickCount - _start;
+ if (elapsed > _timeout)
+ return 0;
+ return _timeout - elapsed;
}
}
+ }
- private bool HasAnySpinners => (_state & SpinnerCountMask) != 0;
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ private unsafe void CreateWaitEvent()
+ {
+ Debug.Assert(!IsHeldByCurrentThread);
- private bool TryIncrementSpinnerCount()
+ var newWaitEvent = new AutoResetEvent(false);
+ if (Interlocked.CompareExchange(ref _waitEvent, newWaitEvent, null) == null)
{
- uint newState = _state + SpinnerCountIncrement;
- if (new State(newState).HasAnySpinners) // overflow check
+ // Also check NativeRuntimeEventSource.Log.IsEnabled() to enable trimming
+ if (StaticsInitComplete() && NativeRuntimeEventSource.Log.IsEnabled())
{
- _state = newState;
- return true;
+ if (NativeRuntimeEventSource.Log.IsEnabled(
+ EventLevel.Informational,
+ NativeRuntimeEventSource.Keywords.ContentionKeyword))
+ {
+ NativeRuntimeEventSource.Log.ContentionLockCreated(this);
+ }
}
- return false;
}
-
- private void DecrementSpinnerCount()
- {
- Debug.Assert(HasAnySpinners);
- _state -= SpinnerCountIncrement;
- }
-
- private bool IsWaiterSignaledToWake => (_state & IsWaiterSignaledToWakeMask) != 0;
-
- private void SetIsWaiterSignaledToWake()
- {
- Debug.Assert(HasAnyWaiters);
- Debug.Assert(NeedToSignalWaiter);
-
- _state += IsWaiterSignaledToWakeMask;
- }
-
- private void ClearIsWaiterSignaledToWake()
+ else
{
- Debug.Assert(IsWaiterSignaledToWake);
- _state -= IsWaiterSignaledToWakeMask;
+ newWaitEvent.Dispose();
}
+ }
- public bool HasAnyWaiters => _state >= WaiterCountIncrement;
+ private long LogContentionStart()
+ {
+ Debug.Assert(!IsHeldByCurrentThread);
- private bool TryIncrementWaiterCount()
+ // Also check NativeRuntimeEventSource.Log.IsEnabled() to enable trimming
+ if (StaticsInitComplete() && NativeRuntimeEventSource.Log.IsEnabled())
{
- uint newState = _state + WaiterCountIncrement;
- if (new State(newState).HasAnyWaiters) // overflow check
+ if (NativeRuntimeEventSource.Log.IsEnabled(
+ EventLevel.Informational,
+ NativeRuntimeEventSource.Keywords.ContentionKeyword))
{
- _state = newState;
- return true;
- }
- return false;
- }
-
- private void DecrementWaiterCount()
- {
- Debug.Assert(HasAnyWaiters);
- _state -= WaiterCountIncrement;
- }
+ NativeRuntimeEventSource.Log.ContentionStart(this);
- public bool NeedToSignalWaiter
- {
- get
- {
- Debug.Assert(HasAnyWaiters);
- return (_state & (SpinnerCountMask | IsWaiterSignaledToWakeMask)) == 0;
+ return Stopwatch.GetTimestamp();
}
}
- public static bool operator ==(State state1, State state2) => state1._state == state2._state;
- public static bool operator !=(State state1, State state2) => !(state1 == state2);
+ return 0;
+ }
- bool IEquatable.Equals(State other) => this == other;
- public override bool Equals(object? obj) => obj is State other && this == other;
- public override int GetHashCode() => (int)_state;
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ private void LogContentionEnd(long contentionTrackingStartedTicks)
+ {
+ Debug.Assert(IsHeldByCurrentThread);
- private static State CompareExchange(Lock lockObj, State toState, State fromState) =>
- new State(Interlocked.CompareExchange(ref lockObj._state, toState._state, fromState._state));
+ double waitDurationNs =
+ (Stopwatch.GetTimestamp() - contentionTrackingStartedTicks) * 1_000_000_000.0 / Stopwatch.Frequency;
- [MethodImpl(MethodImplOptions.AggressiveInlining)]
- public static bool TryLock(Lock lockObj)
+ try
{
- // The lock is mostly fair to release waiters in a typically FIFO order (though the order is not guaranteed).
- // However, it allows non-waiters to acquire the lock if it's available to avoid lock convoys.
- //
- // Lock convoys can be detrimental to performance in scenarios where work is being done on multiple threads and
- // the work involves periodically taking a particular lock for a short time to access shared resources. With a
- // lock convoy, once there is a waiter for the lock (which is not uncommon in such scenarios), a worker thread
- // would be forced to context-switch on the subsequent attempt to acquire the lock, often long before the worker
- // thread exhausts its time slice. This process repeats as long as the lock has a waiter, forcing every worker
- // to context-switch on each attempt to acquire the lock, killing performance and creating a positive feedback
- // loop that makes it more likely for the lock to have waiters. To avoid the lock convoy, each worker needs to
- // be allowed to acquire the lock multiple times in sequence despite there being a waiter for the lock in order
- // to have the worker continue working efficiently during its time slice as long as the lock is not contended.
- //
- // This scheme has the possibility to starve waiters. Waiter starvation is mitigated by other means, see
- // TryLockBeforeSpinLoop() and references to ShouldNotPreemptWaiters.
-
- var state = new State(lockObj);
- if (!state.ShouldNonWaiterAttemptToAcquireLock)
+ // Also check NativeRuntimeEventSource.Log.IsEnabled() to enable trimming
+ if (NativeRuntimeEventSource.Log.IsEnabled())
{
- return false;
+ NativeRuntimeEventSource.Log.ContentionStop(waitDurationNs);
}
-
- State newState = state;
- newState.SetIsLocked();
-
- return CompareExchange(lockObj, newState, state) == state;
}
-
- [MethodImpl(MethodImplOptions.AggressiveInlining)]
- public static State Unlock(Lock lockObj)
+ catch
{
- Debug.Assert(IsLockedMask == 1);
-
- var state = new State(Interlocked.Decrement(ref lockObj._state));
- Debug.Assert(!state.IsLocked);
- return state;
+ // We are throwing. The acquire failed and we should not leave the lock locked.
+ this.Exit();
+ throw;
}
+ }
- [MethodImpl(MethodImplOptions.AggressiveInlining)]
- public static TryLockResult TryLockBeforeSpinLoop(Lock lockObj, short spinCount, out bool isFirstSpinner)
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ private void ReleaseCore()
+ {
+ Debug.Assert(_recursionCount == 0);
+ _owningThreadId = 0;
+ uint origState = Interlocked.Decrement(ref _state);
+ if ((int)origState < (int)WaiterCountIncrement) // true if have no waiters or WaiterWoken is set
{
- // Normally, threads are allowed to preempt waiters to acquire the lock in order to avoid creating lock convoys,
- // see TryLock(). There can be cases where waiters can be easily starved as a result. For example, a thread that
- // holds a lock for a significant amount of time (much longer than the time it takes to do a context switch),
- // then releases and reacquires the lock in quick succession, and repeats. Though a waiter would be woken upon
- // lock release, usually it will not have enough time to context-switch-in and take the lock, and can be starved
- // for an unreasonably long duration.
- //
- // In order to prevent such starvation and force a bit of fair forward progress, it is sometimes necessary to
- // change the normal policy and disallow threads from preempting waiters. ShouldNotPreemptWaiters() indicates
- // the current state of the policy and this method determines whether the policy should be changed to disallow
- // non-waiters from preempting waiters.
- // - When the first waiter begins waiting, it records the current time as a "waiter starvation start time".
- // That is a point in time after which no forward progress has occurred for waiters. When a waiter acquires
- // the lock, the time is updated to the current time.
- // - This method checks whether the starvation duration has crossed a threshold and if so, sets
- // ShouldNotPreemptWaitersMask
- //
- // When unreasonable starvation is occurring, the lock will be released occasionally and if caused by spinners,
- // those threads may start to spin again.
- // - Before starting to spin this method is called. If ShouldNotPreemptWaitersMask is set, the spinner will
- // skip spinning and wait instead. Spinners that are already registered at the time
- // ShouldNotPreemptWaitersMask is set will stop spinning as necessary. Eventually, all spinners will drain
- // and no new ones will be registered.
- // - Upon releasing a lock, if there are no spinners, a waiter will be signaled to wake. On that path,
- // TrySetIsWaiterSignaledToWake() is called.
- // - Eventually, after spinners have drained, only a waiter will be able to acquire the lock. When a waiter
- // acquires the lock, or when the last waiter unregisters itself, ShouldNotPreemptWaitersMask is cleared to
- // restore the normal policy.
-
- Debug.Assert(spinCount >= 0);
-
- isFirstSpinner = false;
- var state = new State(lockObj);
- while (true)
- {
- State newState = state;
- TryLockResult result = TryLockResult.Spin;
- if (newState.HasAnyWaiters)
- {
- if (newState.ShouldNotPreemptWaiters)
- {
- return TryLockResult.Wait;
- }
- if (lockObj.ShouldStopPreemptingWaiters)
- {
- newState.SetShouldNotPreemptWaiters();
- result = TryLockResult.Wait;
- }
- }
- if (result == TryLockResult.Spin)
- {
- Debug.Assert(!newState.ShouldNotPreemptWaiters);
- if (!newState.IsLocked)
- {
- newState.SetIsLocked();
- result = TryLockResult.Locked;
- }
- else if ((newState.HasAnySpinners && spinCount == 0) || !newState.TryIncrementSpinnerCount())
- {
- return TryLockResult.Wait;
- }
- }
-
- State stateBeforeUpdate = CompareExchange(lockObj, newState, state);
- if (stateBeforeUpdate == state)
- {
- if (result == TryLockResult.Spin && !state.HasAnySpinners)
- {
- isFirstSpinner = true;
- }
- return result;
- }
-
- state = stateBeforeUpdate;
- }
+ return;
}
- [MethodImpl(MethodImplOptions.AggressiveInlining)]
- public static TryLockResult TryLockInsideSpinLoop(Lock lockObj)
- {
- // This method is called from inside a spin loop, it must unregister the spinner if the lock is acquired
-
- var state = new State(lockObj);
- while (true)
- {
- Debug.Assert(state.HasAnySpinners);
- if (!state.ShouldNonWaiterAttemptToAcquireLock)
- {
- return state.ShouldNotPreemptWaiters ? TryLockResult.Wait : TryLockResult.Spin;
- }
-
- State newState = state;
- newState.SetIsLocked();
- newState.DecrementSpinnerCount();
-
- State stateBeforeUpdate = CompareExchange(lockObj, newState, state);
- if (stateBeforeUpdate == state)
- {
- return TryLockResult.Locked;
- }
-
- state = stateBeforeUpdate;
- }
- }
+ //
+ // We have waiters; take the slow path.
+ //
+ AwakeWaiterIfNeeded();
+ }
- [MethodImpl(MethodImplOptions.AggressiveInlining)]
- public static TryLockResult TryLockAfterSpinLoop(Lock lockObj)
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ private void AwakeWaiterIfNeeded()
+ {
+ int collisions = 0;
+ while (true)
{
- // This method is called at the end of a spin loop, it must unregister the spinner always and acquire the lock
- // if it's available. If the lock is available, a spinner must acquire the lock along with unregistering itself,
- // because a lock releaser does not wake a waiter when there is a spinner registered.
-
- var state = new State(Interlocked.Add(ref lockObj._state, Neg(SpinnerCountIncrement)));
- Debug.Assert(new State(state._state + SpinnerCountIncrement).HasAnySpinners);
-
- while (true)
+ uint oldState = _state;
+ if ((int)oldState >= (int)WaiterCountIncrement) // false if WaiterWoken is set
{
- Debug.Assert(state.HasAnyWaiters || !state.ShouldNotPreemptWaiters);
- if (state.IsLocked)
- {
- return TryLockResult.Wait;
- }
+ // there are waiters, and nobody has woken one.
+ uint newState = oldState | WaiterWoken;
- State newState = state;
- newState.SetIsLocked();
-
- State stateBeforeUpdate = CompareExchange(lockObj, newState, state);
- if (stateBeforeUpdate == state)
+ short lastWakeTicks = _wakeWatchDog;
+ if (lastWakeTicks != 0 && (short)Environment.TickCount - lastWakeTicks > WaiterWatchdogTicks)
{
- return TryLockResult.Locked;
+ newState |= YieldToWaiters;
}
- state = stateBeforeUpdate;
- }
- }
-
- [MethodImpl(MethodImplOptions.AggressiveInlining)]
- public static bool TryLockBeforeWait(Lock lockObj)
- {
- // This method is called before waiting. It must either acquire the lock or register a waiter. It also keeps
- // track of the waiter starvation start time.
-
- var state = new State(lockObj);
- bool waiterStartTimeWasReset = false;
- while (true)
- {
- State newState = state;
- if (newState.ShouldNonWaiterAttemptToAcquireLock)
+ if (Interlocked.CompareExchange(ref _state, newState, oldState) == oldState)
{
- newState.SetIsLocked();
- }
- else
- {
- if (!newState.TryIncrementWaiterCount())
+ _waitEvent!.Set();
+ if (lastWakeTicks == 0)
{
- ThrowHelper.ThrowOutOfMemoryException_LockEnter_WaiterCountOverflow();
+ // nonzero timestamp of the last wake
+ _wakeWatchDog = (short)(Environment.TickCount | 1);
}
- if (!state.HasAnyWaiters && !waiterStartTimeWasReset)
- {
- // This would be the first waiter. Once the waiter is registered, another thread may check the
- // waiter starvation start time and the previously recorded value may be stale, causing
- // ShouldNotPreemptWaitersMask to be set unnecessarily. Reset the start time before registering the
- // waiter.
- waiterStartTimeWasReset = true;
- lockObj.ResetWaiterStartTime();
- }
- }
-
- State stateBeforeUpdate = CompareExchange(lockObj, newState, state);
- if (stateBeforeUpdate == state)
- {
- if (state.ShouldNonWaiterAttemptToAcquireLock)
- {
- return true;
- }
-
- Debug.Assert(state.HasAnyWaiters || waiterStartTimeWasReset);
- if (!state.HasAnyWaiters || waiterStartTimeWasReset)
- {
- // This was the first waiter or the waiter start time was reset, record the waiter start time
- lockObj.RecordWaiterStartTime();
- }
- return false;
+ return;
}
-
- state = stateBeforeUpdate;
}
- }
-
- [MethodImpl(MethodImplOptions.AggressiveInlining)]
- public static bool TryLockInsideWaiterSpinLoop(Lock lockObj)
- {
- // This method is called from inside the waiter's spin loop and should observe the wake signal only if the lock
- // is taken, to prevent a lock releaser from waking another waiter while one is already spinning to acquire the
- // lock
-
- bool waiterStartTimeWasRecorded = false;
- var state = new State(lockObj);
- while (true)
+ else
{
- Debug.Assert(state.HasAnyWaiters);
- Debug.Assert(state.IsWaiterSignaledToWake);
-
- if (state.IsLocked)
- {
- return false;
- }
-
- State newState = state;
- newState.SetIsLocked();
- newState.ClearIsWaiterSignaledToWake();
- newState.DecrementWaiterCount();
- if (newState.ShouldNotPreemptWaiters)
- {
- newState.ClearShouldNotPreemptWaiters();
-
- if (newState.HasAnyWaiters && !waiterStartTimeWasRecorded)
- {
- // Update the waiter starvation start time. The time must be recorded before
- // ShouldNotPreemptWaitersMask is cleared, as once that is cleared, another thread may check the
- // waiter starvation start time and the previously recorded value may be stale, causing
- // ShouldNotPreemptWaitersMask to be set again unnecessarily.
- waiterStartTimeWasRecorded = true;
- lockObj.RecordWaiterStartTime();
- }
- }
-
- State stateBeforeUpdate = CompareExchange(lockObj, newState, state);
- if (stateBeforeUpdate == state)
- {
- if (newState.HasAnyWaiters)
- {
- Debug.Assert(!state.ShouldNotPreemptWaiters || waiterStartTimeWasRecorded);
- if (!waiterStartTimeWasRecorded)
- {
- // Since the lock was acquired successfully by a waiter, update the waiter starvation start time
- lockObj.RecordWaiterStartTime();
- }
- }
- return true;
- }
-
- state = stateBeforeUpdate;
+ // no need to wake a waiter.
+ return;
}
- }
-
- [MethodImpl(MethodImplOptions.AggressiveInlining)]
- public static bool TryLockAfterWaiterSpinLoop(Lock lockObj)
- {
- // This method is called at the end of the waiter's spin loop. It must observe the wake signal always, and if
- // the lock is available, it must acquire the lock and unregister the waiter. If the lock is available, a waiter
- // must acquire the lock along with observing the wake signal, because a lock releaser does not wake a waiter
- // when a waiter was signaled but the wake signal has not been observed. If the lock is acquired, the waiter
- // starvation start time is also updated.
- var state = new State(Interlocked.Add(ref lockObj._state, Neg(IsWaiterSignaledToWakeMask)));
- Debug.Assert(new State(state._state + IsWaiterSignaledToWakeMask).IsWaiterSignaledToWake);
-
- bool waiterStartTimeWasRecorded = false;
- while (true)
- {
- Debug.Assert(state.HasAnyWaiters);
-
- if (state.IsLocked)
- {
- return false;
- }
-
- State newState = state;
- newState.SetIsLocked();
- newState.DecrementWaiterCount();
- if (newState.ShouldNotPreemptWaiters)
- {
- newState.ClearShouldNotPreemptWaiters();
-
- if (newState.HasAnyWaiters && !waiterStartTimeWasRecorded)
- {
- // Update the waiter starvation start time. The time must be recorded before
- // ShouldNotPreemptWaitersMask is cleared, as once that is cleared, another thread may check the
- // waiter starvation start time and the previously recorded value may be stale, causing
- // ShouldNotPreemptWaitersMask to be set again unnecessarily.
- waiterStartTimeWasRecorded = true;
- lockObj.RecordWaiterStartTime();
- }
- }
-
- State stateBeforeUpdate = CompareExchange(lockObj, newState, state);
- if (stateBeforeUpdate == state)
- {
- if (newState.HasAnyWaiters)
- {
- Debug.Assert(!state.ShouldNotPreemptWaiters || waiterStartTimeWasRecorded);
- if (!waiterStartTimeWasRecorded)
- {
- // Since the lock was acquired successfully by a waiter, update the waiter starvation start time
- lockObj.RecordWaiterStartTime();
- }
- }
- return true;
- }
-
- state = stateBeforeUpdate;
- }
+ CollisionBackoff(++collisions);
}
+ }
- [MethodImpl(MethodImplOptions.NoInlining)]
- public static void UnregisterWaiter(Lock lockObj)
+ ///
+ /// true
if the lock is held by the calling thread, false
otherwise.
+ ///
+ public bool IsHeldByCurrentThread
+ {
+ get
{
- // This method is called upon an exception while waiting, or when a wait has timed out. It must unregister the
- // waiter, and if it's the last waiter, clear ShouldNotPreemptWaitersMask to allow other threads to acquire the
- // lock.
-
- var state = new State(lockObj);
- while (true)
- {
- Debug.Assert(state.HasAnyWaiters);
-
- State newState = state;
- newState.DecrementWaiterCount();
- if (newState.ShouldNotPreemptWaiters && !newState.HasAnyWaiters)
- {
- newState.ClearShouldNotPreemptWaiters();
- }
+ var owningThreadId = new ThreadId(_owningThreadId);
+ bool isHeld = owningThreadId.IsInitialized && owningThreadId.Id == ThreadId.Current_NoInitialize.Id;
+ Debug.Assert(!isHeld || this.IsLocked);
+ return isHeld;
+ }
+ }
- State stateBeforeUpdate = CompareExchange(lockObj, newState, state);
- if (stateBeforeUpdate == state)
- {
- return;
- }
+ internal static long ContentionCount => s_contentionCount;
+ internal void Dispose() => _waitEvent?.Dispose();
- state = stateBeforeUpdate;
- }
+ internal nint LockIdForEvents
+ {
+ get
+ {
+ Debug.Assert(_waitEvent != null);
+ return _waitEvent.SafeWaitHandle.DangerousGetHandle();
}
+ }
- [MethodImpl(MethodImplOptions.AggressiveInlining)]
- public static bool TrySetIsWaiterSignaledToWake(Lock lockObj, State state)
+ internal unsafe nint ObjectIdForEvents
+ {
+ get
{
- // Determine whether we must signal a waiter to wake. Keep track of whether a thread has been signaled to wake
- // but has not yet woken from the wait. IsWaiterSignaledToWakeMask is cleared when a signaled thread wakes up by
- // observing a signal. Since threads can preempt waiting threads and acquire the lock (see TryLock()), it allows
- // for example, one thread to acquire and release the lock multiple times while there are multiple waiting
- // threads. In such a case, we don't want that thread to signal a waiter every time it releases the lock, as
- // that will cause unnecessary context switches with more and more signaled threads waking up, finding that the
- // lock is still locked, and going back into a wait state. So, signal only one waiting thread at a time.
-
- Debug.Assert(state.HasAnyWaiters);
-
- while (true)
- {
- if (!state.NeedToSignalWaiter)
- {
- return false;
- }
+ Lock lockObj = this;
+ return *(nint*)Unsafe.AsPointer(ref lockObj);
+ }
+ }
- State newState = state;
- newState.SetIsWaiterSignaledToWake();
- if (!newState.ShouldNotPreemptWaiters && lockObj.ShouldStopPreemptingWaiters)
- {
- newState.SetShouldNotPreemptWaiters();
- }
+ internal ulong OwningThreadId => _owningThreadId;
- State stateBeforeUpdate = CompareExchange(lockObj, newState, state);
- if (stateBeforeUpdate == state)
- {
- return true;
- }
- if (!stateBeforeUpdate.HasAnyWaiters)
- {
- return false;
- }
+ // Lock starts with MinSpinCount and may self-adjust up to the MaxSpinCount
+ // Setting MaxSpinCount <= MinSpinCount will effectively disable adaptive spin adjustment
+ private static short DetermineMaxSpinCount()
+ {
+ var count = AppContextConfigHelper.GetInt16Config(
+ "System.Threading.Lock.MaxSpinCount",
+ "DOTNET_Lock_MaxSpinCount",
+ DefaultMaxSpinCount,
+ allowNegative: false);
- state = stateBeforeUpdate;
- }
- }
+ return count >= short.MaxValue >> SpinCountScaleShift ?
+ DefaultMaxSpinCount :
+ count;
}
- private enum TryLockResult
+ private static short DetermineMinSpinCount()
{
- Locked,
- Spin,
- Wait
+ var count = AppContextConfigHelper.GetInt16Config(
+ "System.Threading.Lock.MinSpinCount",
+ "DOTNET_Lock_MinSpinCount",
+ DefaultMinSpinCount,
+ allowNegative: false);
+
+ return count >= short.MaxValue >> SpinCountScaleShift ?
+ DefaultMaxSpinCount :
+ count;
}
}
}