@@ -32,6 +32,7 @@ pub struct RawSpinlock {
32
32
impl RawSpinlock {
33
33
// Can fail to lock even if the spinlock is not locked. May be more efficient than `try_lock`
34
34
// when called in a loop.
35
+ #[ inline]
35
36
fn try_lock_weak ( & self ) -> bool {
36
37
// The Orderings are the same as try_lock, and are still correct here.
37
38
self . locked
@@ -48,6 +49,7 @@ unsafe impl RawMutex for RawSpinlock {
48
49
// A spinlock guard can be sent to another thread and unlocked there
49
50
type GuardMarker = GuardSend ;
50
51
52
+ #[ inline]
51
53
fn lock ( & self ) {
52
54
while !self . try_lock_weak ( ) {
53
55
// Wait until the lock looks unlocked before retrying
@@ -59,6 +61,7 @@ unsafe impl RawMutex for RawSpinlock {
59
61
}
60
62
}
61
63
64
+ #[ inline]
62
65
fn try_lock ( & self ) -> bool {
63
66
// Code taken from:
64
67
// https://github.com/Amanieu/parking_lot/blob/fa294cd677936bf365afa0497039953b10c722f5/lock_api/src/lib.rs#L49-L53
@@ -74,10 +77,12 @@ unsafe impl RawMutex for RawSpinlock {
74
77
. is_ok ( )
75
78
}
76
79
80
+ #[ inline]
77
81
unsafe fn unlock ( & self ) {
78
82
self . locked . store ( false , Ordering :: Release ) ;
79
83
}
80
84
85
+ #[ inline]
81
86
fn is_locked ( & self ) -> bool {
82
87
// Relaxed is sufficient because this operation does not provide synchronization, only atomicity.
83
88
self . locked . load ( Ordering :: Relaxed )
@@ -197,6 +202,7 @@ pub type MappedSpinlockGuard<'a, T> = lock_api::MappedMutexGuard<'a, RawSpinlock
197
202
///
198
203
/// static SPINLOCK: Spinlock<i32> = const_spinlock(42);
199
204
/// ```
205
+ #[ inline]
200
206
pub const fn const_spinlock < T > ( val : T ) -> Spinlock < T > {
201
207
Spinlock :: const_new ( <RawSpinlock as lock_api:: RawMutex >:: INIT , val)
202
208
}
0 commit comments