From 3663aa90002dcde1c4836726008ae1456b0709aa Mon Sep 17 00:00:00 2001 From: Juventer Date: Sat, 13 Apr 2024 00:34:35 +0200 Subject: [PATCH 1/5] Implemented IrqMutex --- Cargo.toml | 3 + src/interrupt.rs | 45 ++++ src/lib.rs | 3 + src/mutex.rs | 23 +- src/mutex/interrupt.rs | 565 +++++++++++++++++++++++++++++++++++++++++ 5 files changed, 633 insertions(+), 6 deletions(-) create mode 100644 src/interrupt.rs create mode 100644 src/mutex/interrupt.rs diff --git a/Cargo.toml b/Cargo.toml index 1a7879c..3af4d55 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -32,6 +32,9 @@ ticket_mutex = ["mutex"] # Enables `FairMutex`. fair_mutex = ["mutex"] +# Enables `IrqMutex`. +irq_mutex = ["mutex"] + # Enables the non-default ticket mutex implementation for `Mutex`. use_ticket_mutex = ["mutex", "ticket_mutex"] diff --git a/src/interrupt.rs b/src/interrupt.rs new file mode 100644 index 0000000..119c9e1 --- /dev/null +++ b/src/interrupt.rs @@ -0,0 +1,45 @@ +use core::arch::asm; +///Contains architecture specific interrupt mask and restore code +/// +/// + + + +/// +/// Masks all maskable interrupts and returns the previous Interrupt State +#[cfg(target_arch = "x86_64")] +#[inline(always)] +pub(crate) fn mask_interrupts() -> bool { + + let mut flags: u64; + unsafe{ + asm!{ + "pushfw", + "popw {}", + "cli", + out(reg) flags + } + } + + //Masks of all Bits except the Interrupt Flag + if flags & 0x200 > 0 { + return true; + } + + false + +} + +/// Restores the Interrupt State to its previous value +#[cfg(target_arch = "x86_64")] +#[inline(always)] +pub(crate) fn restore_interrupts(interrupts: bool) { + if interrupts { + unsafe{ + asm!{ + "sti", + "nop" //on x86_64 sti creates a Interrupt Shadow, the NOP contains this Sideeffect to the inline ASM + } + } + } +} \ No newline at end of file diff --git a/src/lib.rs b/src/lib.rs index 50768bc..d70ad74 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -99,6 +99,9 @@ pub use relax::{RelaxStrategy, Spin}; #[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))] pub use rwlock::RwLockReadGuard; +#[cfg(feature = "irq_mutex")] +mod interrupt; + // Avoid confusing inference errors by aliasing away the relax strategy parameter. Users that need to use a different // relax strategy can do so by accessing the types through their fully-qualified path. This is a little bit horrible // but sadly adding a default type parameter is *still* a breaking change in Rust (for understandable reasons). diff --git a/src/mutex.rs b/src/mutex.rs index 40b7d0f..11f4dd7 100644 --- a/src/mutex.rs +++ b/src/mutex.rs @@ -34,25 +34,36 @@ pub mod fair; #[cfg_attr(docsrs, doc(cfg(feature = "fair_mutex")))] pub use self::fair::{FairMutex, FairMutexGuard, Starvation}; +#[cfg(feature = "irq_mutex")] +#[cfg_attr(docsrs, doc(cfg(feature = "irq_mutex")))] +pub mod interrupt; +#[cfg(feature = "irq_mutex")] +#[cfg_attr(docsrs, doc(cfg(feature = "spin_mutex")))] +pub use self::interrupt::{IrqMutex, IrqMutexGuard}; + use crate::{RelaxStrategy, Spin}; use core::{ fmt, ops::{Deref, DerefMut}, }; -#[cfg(all(not(feature = "spin_mutex"), not(feature = "use_ticket_mutex")))] -compile_error!("The `mutex` feature flag was used (perhaps through another feature?) without either `spin_mutex` or `use_ticket_mutex`. One of these is required."); +#[cfg(all(not(feature = "spin_mutex"), not(feature = "use_ticket_mutex"), not(feature = "irq_mutex") ))] +compile_error!("The `mutex` feature flag was used (perhaps through another feature?) without either `spin_mutex`, `use_ticket_mutex` or `irq_mutex`. One of these is required."); -#[cfg(all(not(feature = "use_ticket_mutex"), feature = "spin_mutex"))] +#[cfg(all(not(any(feature = "use_ticket_mutex", feature = "irq_mutex")), feature = "spin_mutex"))] type InnerMutex = self::spin::SpinMutex; -#[cfg(all(not(feature = "use_ticket_mutex"), feature = "spin_mutex"))] +#[cfg(all(not(any(feature = "use_ticket_mutex", feature = "irq_mutex")), feature = "spin_mutex"))] type InnerMutexGuard<'a, T> = self::spin::SpinMutexGuard<'a, T>; -#[cfg(feature = "use_ticket_mutex")] +#[cfg(all(not(any(feature = "irq_mutex", feature = "spin_mutex")), feature = "use_ticket_mutex"))] type InnerMutex = self::ticket::TicketMutex; -#[cfg(feature = "use_ticket_mutex")] +#[cfg(all(not(any(feature = "irq_mutex", feature = "spin_mutex")), feature = "use_ticket_mutex"))] type InnerMutexGuard<'a, T> = self::ticket::TicketMutexGuard<'a, T>; +#[cfg(all(not(any(feature = "use_ticket_mutex", feature = "spin_mutex")), feature = "irq_mutex"))] +type InnerMutex = self::interrupt::IrqMutex; +#[cfg(all(not(any(feature = "use_ticket_mutex", feature = "spin_mutex")), feature = "irq_mutex"))] +type InnerMutexGuard<'a, T> = self::interrupt::IrqMutexGuard<'a, T>; /// A spin-based lock providing mutually exclusive access to data. /// /// The implementation uses either a ticket mutex or a regular spin mutex depending on whether the `spin_mutex` or diff --git a/src/mutex/interrupt.rs b/src/mutex/interrupt.rs new file mode 100644 index 0000000..e2f1006 --- /dev/null +++ b/src/mutex/interrupt.rs @@ -0,0 +1,565 @@ +//! A naïve spinning mutex. +//! +//! Waiting threads hammer an atomic variable until it becomes available. Best-case latency is low, but worst-case +//! latency is theoretically infinite. +//! Mask Interrupts to prevent deadlocks + +use crate::{ + atomic::{AtomicBool, Ordering}, interrupt, RelaxStrategy, Spin +}; +use core::{ + cell::UnsafeCell, + fmt, + marker::PhantomData, + mem::ManuallyDrop, + ops::{Deref, DerefMut}, +}; + +/// A [spin lock](https://en.m.wikipedia.org/wiki/Spinlock) providing mutually exclusive access to data. +/// +/// # Example +/// +/// ``` +/// use spin; +/// +/// let lock = spin::mutex::IrqMutex::<_>::new(0); +/// +/// // Modify the data +/// *lock.lock() = 2; +/// +/// // Read the data +/// let answer = *lock.lock(); +/// assert_eq!(answer, 2); +/// ``` +/// +/// # Thread safety example +/// +/// ``` +/// use spin; +/// use std::sync::{Arc, Barrier}; +/// +/// let thread_count = 1000; +/// let spin_mutex = Arc::new(spin::mutex::IrqMutex::<_>::new(0)); +/// +/// // We use a barrier to ensure the readout happens after all writing +/// let barrier = Arc::new(Barrier::new(thread_count + 1)); +/// +/// # let mut ts = Vec::new(); +/// for _ in (0..thread_count) { +/// let my_barrier = barrier.clone(); +/// let my_lock = spin_mutex.clone(); +/// # let t = +/// std::thread::spawn(move || { +/// let mut guard = my_lock.lock(); +/// *guard += 1; +/// +/// // Release the lock to prevent a deadlock +/// drop(guard); +/// my_barrier.wait(); +/// }); +/// # ts.push(t); +/// } +/// +/// barrier.wait(); +/// +/// let answer = { *spin_mutex.lock() }; +/// assert_eq!(answer, thread_count); +/// +/// # for t in ts { +/// # t.join().unwrap(); +/// # } +/// ``` +pub struct IrqMutex { + phantom: PhantomData, + pub(crate) lock: AtomicBool, + data: UnsafeCell, +} + +/// A guard that provides mutable data access. +/// +/// When the guard falls out of scope it will release the lock. +pub struct IrqMutexGuard<'a, T: ?Sized + 'a> { + lock: &'a AtomicBool, + data: *mut T, + interrupt: bool, +} + +// Same unsafe impls as `std::sync::Mutex` +unsafe impl Sync for IrqMutex {} +unsafe impl Send for IrqMutex {} + +unsafe impl Sync for IrqMutexGuard<'_, T> {} +unsafe impl Send for IrqMutexGuard<'_, T> {} + +impl IrqMutex { + /// Creates a new [`IrqMutex`] wrapping the supplied data. + /// + /// # Example + /// + /// ``` + /// use spin::mutex::IrqMutex; + /// + /// static MUTEX: IrqMutex<()> = IrqMutex::<_>::new(()); + /// + /// fn demo() { + /// let lock = MUTEX.lock(); + /// // do something with lock + /// drop(lock); + /// } + /// ``` + #[inline(always)] + pub const fn new(data: T) -> Self { + IrqMutex { + lock: AtomicBool::new(false), + data: UnsafeCell::new(data), + phantom: PhantomData, + } + } + + /// Consumes this [`IrqMutex`] and unwraps the underlying data. + /// + /// # Example + /// + /// ``` + /// let lock = spin::mutex::IrqMutex::<_>::new(42); + /// assert_eq!(42, lock.into_inner()); + /// ``` + #[inline(always)] + pub fn into_inner(self) -> T { + // We know statically that there are no outstanding references to + // `self` so there's no need to lock. + let IrqMutex { data, .. } = self; + data.into_inner() + } + + /// Returns a mutable pointer to the underlying data. + /// + /// This is mostly meant to be used for applications which require manual unlocking, but where + /// storing both the lock and the pointer to the inner data gets inefficient. + /// + /// # Example + /// ``` + /// let lock = spin::mutex::IrqMutex::<_>::new(42); + /// + /// unsafe { + /// core::mem::forget(lock.lock()); + /// + /// assert_eq!(lock.as_mut_ptr().read(), 42); + /// lock.as_mut_ptr().write(58); + /// + /// lock.force_unlock(); + /// } + /// + /// assert_eq!(*lock.lock(), 58); + /// + /// ``` + #[inline(always)] + pub fn as_mut_ptr(&self) -> *mut T { + self.data.get() + } +} + +impl IrqMutex { + /// Locks the [`IrqMutex`] and returns a guard that permits access to the inner data. + /// + /// The returned value may be dereferenced for data access + /// and the lock will be dropped when the guard falls out of scope. + /// + /// ``` + /// let lock = spin::mutex::IrqMutex::<_>::new(0); + /// { + /// let mut data = lock.lock(); + /// // The lock is now locked and the data can be accessed + /// *data += 1; + /// // The lock is implicitly dropped at the end of the scope + /// } + /// ``` + #[inline(always)] + pub fn lock(&self) -> IrqMutexGuard { + // Can fail to lock even if the spinlock is not locked. May be more efficient than `try_lock` + // when called in a loop. + loop { + if let Some(guard) = self.try_lock_weak() { + break guard; + } + + while self.is_locked() { + R::relax(); + } + } + } +} + +impl IrqMutex { + /// Returns `true` if the lock is currently held. + /// + /// # Safety + /// + /// This function provides no synchronization guarantees and so its result should be considered 'out of date' + /// the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic. + #[inline(always)] + pub fn is_locked(&self) -> bool { + self.lock.load(Ordering::Relaxed) + } + + /// Force unlock this [`IrqMutex`]. + /// + /// # Safety + /// + /// This is *extremely* unsafe if the lock is not held by the current + /// thread. However, this can be useful in some instances for exposing the + /// lock to FFI that doesn't know how to deal with RAII. + #[inline(always)] + pub unsafe fn force_unlock(&self) { + self.lock.store(false, Ordering::Release); + } + + /// Try to lock this [`IrqMutex`], returning a lock guard if successful. + /// + /// # Example + /// + /// ``` + /// let lock = spin::mutex::IrqMutex::<_>::new(42); + /// + /// let maybe_guard = lock.try_lock(); + /// assert!(maybe_guard.is_some()); + /// + /// // `maybe_guard` is still held, so the second call fails + /// let maybe_guard2 = lock.try_lock(); + /// assert!(maybe_guard2.is_none()); + /// ``` + #[inline(always)] + pub fn try_lock(&self) -> Option> { + // The reason for using a strong compare_exchange is explained here: + // https://github.com/Amanieu/parking_lot/pull/207#issuecomment-575869107 + let interrupt_state = interrupt::mask_interrupts(); + if self + .lock + .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed) + .is_ok() + { + Some(IrqMutexGuard { + lock: &self.lock, + data: unsafe { &mut *self.data.get() }, + interrupt: interrupt_state + }) + } else { + interrupt::restore_interrupts(interrupt_state); + None + } + } + + /// Try to lock this [`IrqMutex`], returning a lock guard if succesful. + /// + /// Unlike [`IrqMutex::try_lock`], this function is allowed to spuriously fail even when the mutex is unlocked, + /// which can result in more efficient code on some platforms. + #[inline(always)] + pub fn try_lock_weak(&self) -> Option> { + let interrupt_state = interrupt::mask_interrupts(); + if self + .lock + .compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed) + .is_ok() + { + Some(IrqMutexGuard { + lock: &self.lock, + data: unsafe { &mut *self.data.get() }, + interrupt: interrupt_state + }) + } else { + interrupt::restore_interrupts(interrupt_state); + None + } + } + + /// Returns a mutable reference to the underlying data. + /// + /// Since this call borrows the [`IrqMutex`] mutably, and a mutable reference is guaranteed to be exclusive in + /// Rust, no actual locking needs to take place -- the mutable borrow statically guarantees no locks exist. As + /// such, this is a 'zero-cost' operation. + /// + /// # Example + /// + /// ``` + /// let mut lock = spin::mutex::IrqMutex::<_>::new(0); + /// *lock.get_mut() = 10; + /// assert_eq!(*lock.lock(), 10); + /// ``` + #[inline(always)] + pub fn get_mut(&mut self) -> &mut T { + // We know statically that there are no other references to `self`, so + // there's no need to lock the inner mutex. + unsafe { &mut *self.data.get() } + } +} + +impl fmt::Debug for IrqMutex { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self.try_lock() { + Some(guard) => write!(f, "Mutex {{ data: ") + .and_then(|()| (&*guard).fmt(f)) + .and_then(|()| write!(f, "}}")), + None => write!(f, "Mutex {{ }}"), + } + } +} + +impl Default for IrqMutex { + fn default() -> Self { + Self::new(Default::default()) + } +} + +impl From for IrqMutex { + fn from(data: T) -> Self { + Self::new(data) + } +} + +impl<'a, T: ?Sized> IrqMutexGuard<'a, T> { + /// Leak the lock guard, yielding a mutable reference to the underlying data. + /// + /// Note that this function will permanently lock the original [`IrqMutex`]. + /// + /// ``` + /// let mylock = spin::mutex::IrqMutex::<_>::new(0); + /// + /// let data: &mut i32 = spin::mutex::IrqMutexGuard::leak(mylock.lock()); + /// + /// *data = 1; + /// assert_eq!(*data, 1); + /// ``` + #[inline(always)] + pub fn leak(this: Self) -> &'a mut T { + // Use ManuallyDrop to avoid stacked-borrow invalidation + let mut this = ManuallyDrop::new(this); + // We know statically that only we are referencing data + unsafe { &mut *this.data } + } +} + +impl<'a, T: ?Sized + fmt::Debug> fmt::Debug for IrqMutexGuard<'a, T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&**self, f) + } +} + +impl<'a, T: ?Sized + fmt::Display> fmt::Display for IrqMutexGuard<'a, T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Display::fmt(&**self, f) + } +} + +impl<'a, T: ?Sized> Deref for IrqMutexGuard<'a, T> { + type Target = T; + fn deref(&self) -> &T { + // We know statically that only we are referencing data + unsafe { &*self.data } + } +} + +impl<'a, T: ?Sized> DerefMut for IrqMutexGuard<'a, T> { + fn deref_mut(&mut self) -> &mut T { + // We know statically that only we are referencing data + unsafe { &mut *self.data } + } +} + +impl<'a, T: ?Sized> Drop for IrqMutexGuard<'a, T> { + /// The dropping of the MutexGuard will release the lock it was created from. + fn drop(&mut self) { + self.lock.store(false, Ordering::Release); + interrupt::restore_interrupts(self.interrupt); + } +} + +#[cfg(feature = "lock_api")] +unsafe impl lock_api_crate::RawMutex for IrqMutex<(), R> { + type GuardMarker = lock_api_crate::GuardSend; + + const INIT: Self = Self::new(()); + + fn lock(&self) { + // Prevent guard destructor running + core::mem::forget(Self::lock(self)); + } + + fn try_lock(&self) -> bool { + // Prevent guard destructor running + Self::try_lock(self).map(core::mem::forget).is_some() + } + + unsafe fn unlock(&self) { + self.force_unlock(); + } + + fn is_locked(&self) -> bool { + Self::is_locked(self) + } +} + +#[cfg(test)] +mod tests { + use std::prelude::v1::*; + + use std::sync::atomic::{AtomicUsize, Ordering}; + use std::sync::mpsc::channel; + use std::sync::Arc; + use std::thread; + + type IrqMutex = super::IrqMutex; + + #[derive(Eq, PartialEq, Debug)] + struct NonCopy(i32); + + #[test] + fn smoke() { + let m = IrqMutex::<_>::new(()); + drop(m.lock()); + drop(m.lock()); + } + + #[test] + fn lots_and_lots() { + static M: IrqMutex<()> = IrqMutex::<_>::new(()); + static mut CNT: u32 = 0; + const J: u32 = 1000; + const K: u32 = 3; + + fn inc() { + for _ in 0..J { + unsafe { + let _g = M.lock(); + CNT += 1; + } + } + } + + let (tx, rx) = channel(); + let mut ts = Vec::new(); + for _ in 0..K { + let tx2 = tx.clone(); + ts.push(thread::spawn(move || { + inc(); + tx2.send(()).unwrap(); + })); + let tx2 = tx.clone(); + ts.push(thread::spawn(move || { + inc(); + tx2.send(()).unwrap(); + })); + } + + drop(tx); + for _ in 0..2 * K { + rx.recv().unwrap(); + } + assert_eq!(unsafe { CNT }, J * K * 2); + + for t in ts { + t.join().unwrap(); + } + } + + #[test] + fn try_lock() { + let mutex = IrqMutex::<_>::new(42); + + // First lock succeeds + let a = mutex.try_lock(); + assert_eq!(a.as_ref().map(|r| **r), Some(42)); + + // Additional lock fails + let b = mutex.try_lock(); + assert!(b.is_none()); + + // After dropping lock, it succeeds again + ::core::mem::drop(a); + let c = mutex.try_lock(); + assert_eq!(c.as_ref().map(|r| **r), Some(42)); + } + + #[test] + fn test_into_inner() { + let m = IrqMutex::<_>::new(NonCopy(10)); + assert_eq!(m.into_inner(), NonCopy(10)); + } + + #[test] + fn test_into_inner_drop() { + struct Foo(Arc); + impl Drop for Foo { + fn drop(&mut self) { + self.0.fetch_add(1, Ordering::SeqCst); + } + } + let num_drops = Arc::new(AtomicUsize::new(0)); + let m = IrqMutex::<_>::new(Foo(num_drops.clone())); + assert_eq!(num_drops.load(Ordering::SeqCst), 0); + { + let _inner = m.into_inner(); + assert_eq!(num_drops.load(Ordering::SeqCst), 0); + } + assert_eq!(num_drops.load(Ordering::SeqCst), 1); + } + + #[test] + fn test_mutex_arc_nested() { + // Tests nested mutexes and access + // to underlying data. + let arc = Arc::new(IrqMutex::<_>::new(1)); + let arc2 = Arc::new(IrqMutex::<_>::new(arc)); + let (tx, rx) = channel(); + let t = thread::spawn(move || { + let lock = arc2.lock(); + let lock2 = lock.lock(); + assert_eq!(*lock2, 1); + tx.send(()).unwrap(); + }); + rx.recv().unwrap(); + t.join().unwrap(); + } + + #[test] + fn test_mutex_arc_access_in_unwind() { + let arc = Arc::new(IrqMutex::<_>::new(1)); + let arc2 = arc.clone(); + let _ = thread::spawn(move || -> () { + struct Unwinder { + i: Arc>, + } + impl Drop for Unwinder { + fn drop(&mut self) { + *self.i.lock() += 1; + } + } + let _u = Unwinder { i: arc2 }; + panic!(); + }) + .join(); + let lock = arc.lock(); + assert_eq!(*lock, 2); + } + + #[test] + fn test_mutex_unsized() { + let mutex: &IrqMutex<[i32]> = &IrqMutex::<_>::new([1, 2, 3]); + { + let b = &mut *mutex.lock(); + b[0] = 4; + b[2] = 5; + } + let comp: &[i32] = &[4, 2, 5]; + assert_eq!(&*mutex.lock(), comp); + } + + #[test] + fn test_mutex_force_lock() { + let lock = IrqMutex::<_>::new(()); + ::std::mem::forget(lock.lock()); + unsafe { + lock.force_unlock(); + } + assert!(lock.try_lock().is_some()); + } +} From d58d055470cbee3aeb7231bea44a4326254993f1 Mon Sep 17 00:00:00 2001 From: Juventer Date: Sat, 13 Apr 2024 00:46:12 +0200 Subject: [PATCH 2/5] Added irq_mutex feature flag to readme --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 7fd3780..200bbd1 100644 --- a/README.md +++ b/README.md @@ -74,6 +74,8 @@ The crate comes with a few feature flags that you may wish to use. - `spin_mutex` enables the `SpinMutex` type. +- `irq_mutex` enables the `IrqMutex` type. + - `ticket_mutex` enables the `TicketMutex` type. - `use_ticket_mutex` switches to a ticket lock for the implementation of `Mutex`. This From 5562c8bf875d211ab5a43340746b4104ddaba29f Mon Sep 17 00:00:00 2001 From: Shinribo <73356576+Shinribo@users.noreply.github.com> Date: Wed, 1 May 2024 15:07:15 +0200 Subject: [PATCH 3/5] Changed IqrMutex implementation to be a wrapper around Mutex Note: doesnt build, need to find a solution for leak() --- Cargo.toml | 7 +- README.md | 5 +- src/interrupt.rs | 45 ---- src/irqmutex.rs | 284 +++++++++++++++++++++ src/lib.rs | 7 +- src/mutex.rs | 23 +- src/mutex/interrupt.rs | 565 ----------------------------------------- 7 files changed, 303 insertions(+), 633 deletions(-) delete mode 100644 src/interrupt.rs create mode 100644 src/irqmutex.rs delete mode 100644 src/mutex/interrupt.rs diff --git a/Cargo.toml b/Cargo.toml index 3af4d55..39b1cec 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,6 +16,7 @@ rust-version = "1.38" lock_api_crate = { package = "lock_api", version = "0.4", optional = true } # Enable require-cas feature to provide a better error message if the end user forgets to use the cfg or feature. portable-atomic = { version = "1.3", optional = true, default-features = false, features = ["require-cas"] } +critical-section ={ version = "1.1.2", optional = true} [features] default = ["lock_api", "mutex", "spin_mutex", "rwlock", "once", "lazy", "barrier"] @@ -32,9 +33,6 @@ ticket_mutex = ["mutex"] # Enables `FairMutex`. fair_mutex = ["mutex"] -# Enables `IrqMutex`. -irq_mutex = ["mutex"] - # Enables the non-default ticket mutex implementation for `Mutex`. use_ticket_mutex = ["mutex", "ticket_mutex"] @@ -56,6 +54,9 @@ lock_api = ["lock_api_crate"] # Enables std-only features such as yield-relaxing. std = [] +#Enable Platform Specific Variant of Mutex +irq_mutex = ["critical-section", "mutex"] + # Use the portable_atomic crate to support platforms without native atomic operations. # The `portable_atomic_unsafe_assume_single_core` cfg or `critical-section` feature # of `portable-atomic` crate must also be set by the final binary crate. diff --git a/README.md b/README.md index 200bbd1..0e36b2e 100644 --- a/README.md +++ b/README.md @@ -74,8 +74,6 @@ The crate comes with a few feature flags that you may wish to use. - `spin_mutex` enables the `SpinMutex` type. -- `irq_mutex` enables the `IrqMutex` type. - - `ticket_mutex` enables the `TicketMutex` type. - `use_ticket_mutex` switches to a ticket lock for the implementation of `Mutex`. This @@ -99,6 +97,9 @@ The crate comes with a few feature flags that you may wish to use. The `portable_atomic_unsafe_assume_single_core` cfg or `critical-section` feature of `portable-atomic` crate must also be set by the final binary crate. +- `irqmutex` enables the `IryMutex` type and usage of the `critical-section` crate + The user has to supply a implementation of critical-section aquire()/restore(); See the `critical-section` page for details + When using the cfg, this can be done by adapting the following snippet to the `.cargo/config` file: ``` [target.] diff --git a/src/interrupt.rs b/src/interrupt.rs deleted file mode 100644 index 119c9e1..0000000 --- a/src/interrupt.rs +++ /dev/null @@ -1,45 +0,0 @@ -use core::arch::asm; -///Contains architecture specific interrupt mask and restore code -/// -/// - - - -/// -/// Masks all maskable interrupts and returns the previous Interrupt State -#[cfg(target_arch = "x86_64")] -#[inline(always)] -pub(crate) fn mask_interrupts() -> bool { - - let mut flags: u64; - unsafe{ - asm!{ - "pushfw", - "popw {}", - "cli", - out(reg) flags - } - } - - //Masks of all Bits except the Interrupt Flag - if flags & 0x200 > 0 { - return true; - } - - false - -} - -/// Restores the Interrupt State to its previous value -#[cfg(target_arch = "x86_64")] -#[inline(always)] -pub(crate) fn restore_interrupts(interrupts: bool) { - if interrupts { - unsafe{ - asm!{ - "sti", - "nop" //on x86_64 sti creates a Interrupt Shadow, the NOP contains this Sideeffect to the inline ASM - } - } - } -} \ No newline at end of file diff --git a/src/irqmutex.rs b/src/irqmutex.rs new file mode 100644 index 0000000..336d236 --- /dev/null +++ b/src/irqmutex.rs @@ -0,0 +1,284 @@ +use critical_section::RestoreState; +use critical_section::acquire; +use critical_section::release; + +use core::{ + fmt, + ops::{Deref, DerefMut}, +}; + +type InnerMutex = crate::Mutex; +type InnerMutexGuard<'a, T> = crate::MutexGuard<'a, T>; + +/// A spin-based lock providing mutually exclusive access to data. +/// +/// The implementation uses either a ticket mutex or a regular spin mutex depending on whether the `spin_mutex` or +/// `ticket_mutex` feature flag is enabled. +/// +/// # Example +/// +/// ``` +/// use spin; +/// +/// let lock = spin::Mutex::new(0); +/// +/// // Modify the data +/// *lock.lock() = 2; +/// +/// // Read the data +/// let answer = *lock.lock(); +/// assert_eq!(answer, 2); +/// ``` +/// +/// # Thread safety example +/// +/// ``` +/// use spin; +/// use std::sync::{Arc, Barrier}; +/// +/// let thread_count = 1000; +/// let spin_mutex = Arc::new(spin::Mutex::new(0)); +/// +/// // We use a barrier to ensure the readout happens after all writing +/// let barrier = Arc::new(Barrier::new(thread_count + 1)); +/// +/// # let mut ts = Vec::new(); +/// for _ in 0..thread_count { +/// let my_barrier = barrier.clone(); +/// let my_lock = spin_mutex.clone(); +/// # let t = +/// std::thread::spawn(move || { +/// let mut guard = my_lock.lock(); +/// *guard += 1; +/// +/// // Release the lock to prevent a deadlock +/// drop(guard); +/// my_barrier.wait(); +/// }); +/// # ts.push(t); +/// } +/// +/// barrier.wait(); +/// +/// let answer = { *spin_mutex.lock() }; +/// assert_eq!(answer, thread_count); +/// +/// # for t in ts { +/// # t.join().unwrap(); +/// # } +/// ``` +pub struct IrqMutex { + inner: InnerMutex +} + +/// A generic guard that will protect some data access and +/// uses either a ticket lock or a normal spin mutex. +/// +/// For more info see [`TicketMutexGuard`] or [`SpinMutexGuard`]. +/// +/// [`TicketMutexGuard`]: ./struct.TicketMutexGuard.html +/// [`SpinMutexGuard`]: ./struct.SpinMutexGuard.html +pub struct IrqMutexGuard<'a, T: 'a + ?Sized> { + inner: InnerMutexGuard<'a, T>, + irq_state: RestoreState +} + +impl IrqMutex { + /// Creates a new [`Mutex`] wrapping the supplied data. + /// + /// # Example + /// + /// ``` + /// use spin::Mutex; + /// + /// static MUTEX: Mutex<()> = Mutex::new(()); + /// + /// fn demo() { + /// let lock = MUTEX.lock(); + /// // do something with lock + /// drop(lock); + /// } + /// ``` + #[inline(always)] + pub const fn new(value: T) -> Self { + Self { + inner: InnerMutex::new(value), + } + } + + /// Consumes this [`Mutex`] and unwraps the underlying data. + /// + /// # Example + /// + /// ``` + /// let lock = spin::Mutex::new(42); + /// assert_eq!(42, lock.into_inner()); + /// ``` + #[inline(always)] + pub fn into_inner(self) -> T { + self.inner.into_inner() + } +} + +impl IrqMutex { + /// Locks the [`Mutex`] and returns a guard that permits access to the inner data. + /// + /// The returned value may be dereferenced for data access + /// and the lock will be dropped when the guard falls out of scope. + /// + /// ``` + /// let lock = spin::Mutex::new(0); + /// { + /// let mut data = lock.lock(); + /// // The lock is now locked and the data can be accessed + /// *data += 1; + /// // The lock is implicitly dropped at the end of the scope + /// } + /// ``` + #[inline(always)] + pub fn lock(&self) -> IrqMutexGuard { + let state = unsafe{acquire()}; + IrqMutexGuard { + inner: self.inner.lock(), + irq_state: state + } + } +} + +impl IrqMutex { + /// Returns `true` if the lock is currently held. + /// + /// # Safety + /// + /// This function provides no synchronization guarantees and so its result should be considered 'out of date' + /// the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic. + #[inline(always)] + pub fn is_locked(&self) -> bool { + self.inner.is_locked() + } + + /// Force unlock this [`Mutex`]. + /// + /// # Safety + /// + /// This is *extremely* unsafe if the lock is not held by the current + /// thread. However, this can be useful in some instances for exposing the + /// lock to FFI that doesn't know how to deal with RAII. + #[inline(always)] + pub unsafe fn force_unlock(&self) { + self.inner.force_unlock() + } + + /// Try to lock this [`Mutex`], returning a lock guard if successful. + /// + /// # Example + /// + /// ``` + /// let lock = spin::Mutex::new(42); + /// + /// let maybe_guard = lock.try_lock(); + /// assert!(maybe_guard.is_some()); + /// + /// // `maybe_guard` is still held, so the second call fails + /// let maybe_guard2 = lock.try_lock(); + /// assert!(maybe_guard2.is_none()); + /// ``` + #[inline(always)] + pub fn try_lock(&self) -> Option> { + let state = unsafe{acquire()}; + let maybe_guard = self.inner + .try_lock() + .map(|guard| IrqMutexGuard { inner: guard, irq_state: state }); + if maybe_guard.is_none() { + unsafe{release(state)}; + } + maybe_guard + } + + /// Returns a mutable reference to the underlying data. + /// + /// Since this call borrows the [`Mutex`] mutably, and a mutable reference is guaranteed to be exclusive in Rust, + /// no actual locking needs to take place -- the mutable borrow statically guarantees no locks exist. As such, + /// this is a 'zero-cost' operation. + /// + /// # Example + /// + /// ``` + /// let mut lock = spin::Mutex::new(0); + /// *lock.get_mut() = 10; + /// assert_eq!(*lock.lock(), 10); + /// ``` + #[inline(always)] + pub fn get_mut(&mut self) -> &mut T { + self.inner.get_mut() + } +} + +impl fmt::Debug for IrqMutex { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&self.inner, f) + } +} + +impl Default for IrqMutex { + fn default() -> Self { + Self::new(Default::default()) + } +} + +impl From for IrqMutex { + fn from(data: T) -> Self { + Self::new(data) + } +} + +impl<'a, T: ?Sized> IrqMutexGuard<'a, T> { + /// Leak the lock guard, yielding a mutable reference to the underlying data. + /// + /// Note that this function will permanently lock the original [`Mutex`]. + /// + /// ``` + /// let mylock = spin::Mutex::new(0); + /// + /// let data: &mut i32 = spin::MutexGuard::leak(mylock.lock()); + /// + /// *data = 1; + /// assert_eq!(*data, 1); + /// ``` + #[inline(always)] + pub fn leak(this: Self) -> &'a mut T { + InnerMutexGuard::leak(this.inner) + } +} + +impl<'a, T: ?Sized + fmt::Debug> fmt::Debug for IrqMutexGuard<'a, T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&**self, f) + } +} + +impl<'a, T: ?Sized + fmt::Display> fmt::Display for IrqMutexGuard<'a, T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Display::fmt(&**self, f) + } +} + +impl<'a, T: ?Sized> Deref for IrqMutexGuard<'a, T> { + type Target = T; + fn deref(&self) -> &T { + &*self.inner + } +} + +impl<'a, T: ?Sized> DerefMut for IrqMutexGuard<'a, T> { + fn deref_mut(&mut self) -> &mut T { + &mut *self.inner + } +} + +impl<'a, T: ?Sized> Drop for IrqMutexGuard<'a, T> { + /// The dropping of the IrqMutexGuard will release the lock it was created from and restore Interrupts to its former value + fn drop(&mut self) { + unsafe{release(self.irq_state)}; + } +} diff --git a/src/lib.rs b/src/lib.rs index d70ad74..174ba29 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -99,8 +99,13 @@ pub use relax::{RelaxStrategy, Spin}; #[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))] pub use rwlock::RwLockReadGuard; + +#[cfg(feature = "critical-section")] +extern crate critical_section; + #[cfg(feature = "irq_mutex")] -mod interrupt; +#[cfg_attr(docsrs, doc(cfg(feature = "irq_mutex")))] +pub mod irqmutex; // Avoid confusing inference errors by aliasing away the relax strategy parameter. Users that need to use a different // relax strategy can do so by accessing the types through their fully-qualified path. This is a little bit horrible diff --git a/src/mutex.rs b/src/mutex.rs index 11f4dd7..40b7d0f 100644 --- a/src/mutex.rs +++ b/src/mutex.rs @@ -34,36 +34,25 @@ pub mod fair; #[cfg_attr(docsrs, doc(cfg(feature = "fair_mutex")))] pub use self::fair::{FairMutex, FairMutexGuard, Starvation}; -#[cfg(feature = "irq_mutex")] -#[cfg_attr(docsrs, doc(cfg(feature = "irq_mutex")))] -pub mod interrupt; -#[cfg(feature = "irq_mutex")] -#[cfg_attr(docsrs, doc(cfg(feature = "spin_mutex")))] -pub use self::interrupt::{IrqMutex, IrqMutexGuard}; - use crate::{RelaxStrategy, Spin}; use core::{ fmt, ops::{Deref, DerefMut}, }; -#[cfg(all(not(feature = "spin_mutex"), not(feature = "use_ticket_mutex"), not(feature = "irq_mutex") ))] -compile_error!("The `mutex` feature flag was used (perhaps through another feature?) without either `spin_mutex`, `use_ticket_mutex` or `irq_mutex`. One of these is required."); +#[cfg(all(not(feature = "spin_mutex"), not(feature = "use_ticket_mutex")))] +compile_error!("The `mutex` feature flag was used (perhaps through another feature?) without either `spin_mutex` or `use_ticket_mutex`. One of these is required."); -#[cfg(all(not(any(feature = "use_ticket_mutex", feature = "irq_mutex")), feature = "spin_mutex"))] +#[cfg(all(not(feature = "use_ticket_mutex"), feature = "spin_mutex"))] type InnerMutex = self::spin::SpinMutex; -#[cfg(all(not(any(feature = "use_ticket_mutex", feature = "irq_mutex")), feature = "spin_mutex"))] +#[cfg(all(not(feature = "use_ticket_mutex"), feature = "spin_mutex"))] type InnerMutexGuard<'a, T> = self::spin::SpinMutexGuard<'a, T>; -#[cfg(all(not(any(feature = "irq_mutex", feature = "spin_mutex")), feature = "use_ticket_mutex"))] +#[cfg(feature = "use_ticket_mutex")] type InnerMutex = self::ticket::TicketMutex; -#[cfg(all(not(any(feature = "irq_mutex", feature = "spin_mutex")), feature = "use_ticket_mutex"))] +#[cfg(feature = "use_ticket_mutex")] type InnerMutexGuard<'a, T> = self::ticket::TicketMutexGuard<'a, T>; -#[cfg(all(not(any(feature = "use_ticket_mutex", feature = "spin_mutex")), feature = "irq_mutex"))] -type InnerMutex = self::interrupt::IrqMutex; -#[cfg(all(not(any(feature = "use_ticket_mutex", feature = "spin_mutex")), feature = "irq_mutex"))] -type InnerMutexGuard<'a, T> = self::interrupt::IrqMutexGuard<'a, T>; /// A spin-based lock providing mutually exclusive access to data. /// /// The implementation uses either a ticket mutex or a regular spin mutex depending on whether the `spin_mutex` or diff --git a/src/mutex/interrupt.rs b/src/mutex/interrupt.rs deleted file mode 100644 index e2f1006..0000000 --- a/src/mutex/interrupt.rs +++ /dev/null @@ -1,565 +0,0 @@ -//! A naïve spinning mutex. -//! -//! Waiting threads hammer an atomic variable until it becomes available. Best-case latency is low, but worst-case -//! latency is theoretically infinite. -//! Mask Interrupts to prevent deadlocks - -use crate::{ - atomic::{AtomicBool, Ordering}, interrupt, RelaxStrategy, Spin -}; -use core::{ - cell::UnsafeCell, - fmt, - marker::PhantomData, - mem::ManuallyDrop, - ops::{Deref, DerefMut}, -}; - -/// A [spin lock](https://en.m.wikipedia.org/wiki/Spinlock) providing mutually exclusive access to data. -/// -/// # Example -/// -/// ``` -/// use spin; -/// -/// let lock = spin::mutex::IrqMutex::<_>::new(0); -/// -/// // Modify the data -/// *lock.lock() = 2; -/// -/// // Read the data -/// let answer = *lock.lock(); -/// assert_eq!(answer, 2); -/// ``` -/// -/// # Thread safety example -/// -/// ``` -/// use spin; -/// use std::sync::{Arc, Barrier}; -/// -/// let thread_count = 1000; -/// let spin_mutex = Arc::new(spin::mutex::IrqMutex::<_>::new(0)); -/// -/// // We use a barrier to ensure the readout happens after all writing -/// let barrier = Arc::new(Barrier::new(thread_count + 1)); -/// -/// # let mut ts = Vec::new(); -/// for _ in (0..thread_count) { -/// let my_barrier = barrier.clone(); -/// let my_lock = spin_mutex.clone(); -/// # let t = -/// std::thread::spawn(move || { -/// let mut guard = my_lock.lock(); -/// *guard += 1; -/// -/// // Release the lock to prevent a deadlock -/// drop(guard); -/// my_barrier.wait(); -/// }); -/// # ts.push(t); -/// } -/// -/// barrier.wait(); -/// -/// let answer = { *spin_mutex.lock() }; -/// assert_eq!(answer, thread_count); -/// -/// # for t in ts { -/// # t.join().unwrap(); -/// # } -/// ``` -pub struct IrqMutex { - phantom: PhantomData, - pub(crate) lock: AtomicBool, - data: UnsafeCell, -} - -/// A guard that provides mutable data access. -/// -/// When the guard falls out of scope it will release the lock. -pub struct IrqMutexGuard<'a, T: ?Sized + 'a> { - lock: &'a AtomicBool, - data: *mut T, - interrupt: bool, -} - -// Same unsafe impls as `std::sync::Mutex` -unsafe impl Sync for IrqMutex {} -unsafe impl Send for IrqMutex {} - -unsafe impl Sync for IrqMutexGuard<'_, T> {} -unsafe impl Send for IrqMutexGuard<'_, T> {} - -impl IrqMutex { - /// Creates a new [`IrqMutex`] wrapping the supplied data. - /// - /// # Example - /// - /// ``` - /// use spin::mutex::IrqMutex; - /// - /// static MUTEX: IrqMutex<()> = IrqMutex::<_>::new(()); - /// - /// fn demo() { - /// let lock = MUTEX.lock(); - /// // do something with lock - /// drop(lock); - /// } - /// ``` - #[inline(always)] - pub const fn new(data: T) -> Self { - IrqMutex { - lock: AtomicBool::new(false), - data: UnsafeCell::new(data), - phantom: PhantomData, - } - } - - /// Consumes this [`IrqMutex`] and unwraps the underlying data. - /// - /// # Example - /// - /// ``` - /// let lock = spin::mutex::IrqMutex::<_>::new(42); - /// assert_eq!(42, lock.into_inner()); - /// ``` - #[inline(always)] - pub fn into_inner(self) -> T { - // We know statically that there are no outstanding references to - // `self` so there's no need to lock. - let IrqMutex { data, .. } = self; - data.into_inner() - } - - /// Returns a mutable pointer to the underlying data. - /// - /// This is mostly meant to be used for applications which require manual unlocking, but where - /// storing both the lock and the pointer to the inner data gets inefficient. - /// - /// # Example - /// ``` - /// let lock = spin::mutex::IrqMutex::<_>::new(42); - /// - /// unsafe { - /// core::mem::forget(lock.lock()); - /// - /// assert_eq!(lock.as_mut_ptr().read(), 42); - /// lock.as_mut_ptr().write(58); - /// - /// lock.force_unlock(); - /// } - /// - /// assert_eq!(*lock.lock(), 58); - /// - /// ``` - #[inline(always)] - pub fn as_mut_ptr(&self) -> *mut T { - self.data.get() - } -} - -impl IrqMutex { - /// Locks the [`IrqMutex`] and returns a guard that permits access to the inner data. - /// - /// The returned value may be dereferenced for data access - /// and the lock will be dropped when the guard falls out of scope. - /// - /// ``` - /// let lock = spin::mutex::IrqMutex::<_>::new(0); - /// { - /// let mut data = lock.lock(); - /// // The lock is now locked and the data can be accessed - /// *data += 1; - /// // The lock is implicitly dropped at the end of the scope - /// } - /// ``` - #[inline(always)] - pub fn lock(&self) -> IrqMutexGuard { - // Can fail to lock even if the spinlock is not locked. May be more efficient than `try_lock` - // when called in a loop. - loop { - if let Some(guard) = self.try_lock_weak() { - break guard; - } - - while self.is_locked() { - R::relax(); - } - } - } -} - -impl IrqMutex { - /// Returns `true` if the lock is currently held. - /// - /// # Safety - /// - /// This function provides no synchronization guarantees and so its result should be considered 'out of date' - /// the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic. - #[inline(always)] - pub fn is_locked(&self) -> bool { - self.lock.load(Ordering::Relaxed) - } - - /// Force unlock this [`IrqMutex`]. - /// - /// # Safety - /// - /// This is *extremely* unsafe if the lock is not held by the current - /// thread. However, this can be useful in some instances for exposing the - /// lock to FFI that doesn't know how to deal with RAII. - #[inline(always)] - pub unsafe fn force_unlock(&self) { - self.lock.store(false, Ordering::Release); - } - - /// Try to lock this [`IrqMutex`], returning a lock guard if successful. - /// - /// # Example - /// - /// ``` - /// let lock = spin::mutex::IrqMutex::<_>::new(42); - /// - /// let maybe_guard = lock.try_lock(); - /// assert!(maybe_guard.is_some()); - /// - /// // `maybe_guard` is still held, so the second call fails - /// let maybe_guard2 = lock.try_lock(); - /// assert!(maybe_guard2.is_none()); - /// ``` - #[inline(always)] - pub fn try_lock(&self) -> Option> { - // The reason for using a strong compare_exchange is explained here: - // https://github.com/Amanieu/parking_lot/pull/207#issuecomment-575869107 - let interrupt_state = interrupt::mask_interrupts(); - if self - .lock - .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed) - .is_ok() - { - Some(IrqMutexGuard { - lock: &self.lock, - data: unsafe { &mut *self.data.get() }, - interrupt: interrupt_state - }) - } else { - interrupt::restore_interrupts(interrupt_state); - None - } - } - - /// Try to lock this [`IrqMutex`], returning a lock guard if succesful. - /// - /// Unlike [`IrqMutex::try_lock`], this function is allowed to spuriously fail even when the mutex is unlocked, - /// which can result in more efficient code on some platforms. - #[inline(always)] - pub fn try_lock_weak(&self) -> Option> { - let interrupt_state = interrupt::mask_interrupts(); - if self - .lock - .compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed) - .is_ok() - { - Some(IrqMutexGuard { - lock: &self.lock, - data: unsafe { &mut *self.data.get() }, - interrupt: interrupt_state - }) - } else { - interrupt::restore_interrupts(interrupt_state); - None - } - } - - /// Returns a mutable reference to the underlying data. - /// - /// Since this call borrows the [`IrqMutex`] mutably, and a mutable reference is guaranteed to be exclusive in - /// Rust, no actual locking needs to take place -- the mutable borrow statically guarantees no locks exist. As - /// such, this is a 'zero-cost' operation. - /// - /// # Example - /// - /// ``` - /// let mut lock = spin::mutex::IrqMutex::<_>::new(0); - /// *lock.get_mut() = 10; - /// assert_eq!(*lock.lock(), 10); - /// ``` - #[inline(always)] - pub fn get_mut(&mut self) -> &mut T { - // We know statically that there are no other references to `self`, so - // there's no need to lock the inner mutex. - unsafe { &mut *self.data.get() } - } -} - -impl fmt::Debug for IrqMutex { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self.try_lock() { - Some(guard) => write!(f, "Mutex {{ data: ") - .and_then(|()| (&*guard).fmt(f)) - .and_then(|()| write!(f, "}}")), - None => write!(f, "Mutex {{ }}"), - } - } -} - -impl Default for IrqMutex { - fn default() -> Self { - Self::new(Default::default()) - } -} - -impl From for IrqMutex { - fn from(data: T) -> Self { - Self::new(data) - } -} - -impl<'a, T: ?Sized> IrqMutexGuard<'a, T> { - /// Leak the lock guard, yielding a mutable reference to the underlying data. - /// - /// Note that this function will permanently lock the original [`IrqMutex`]. - /// - /// ``` - /// let mylock = spin::mutex::IrqMutex::<_>::new(0); - /// - /// let data: &mut i32 = spin::mutex::IrqMutexGuard::leak(mylock.lock()); - /// - /// *data = 1; - /// assert_eq!(*data, 1); - /// ``` - #[inline(always)] - pub fn leak(this: Self) -> &'a mut T { - // Use ManuallyDrop to avoid stacked-borrow invalidation - let mut this = ManuallyDrop::new(this); - // We know statically that only we are referencing data - unsafe { &mut *this.data } - } -} - -impl<'a, T: ?Sized + fmt::Debug> fmt::Debug for IrqMutexGuard<'a, T> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Debug::fmt(&**self, f) - } -} - -impl<'a, T: ?Sized + fmt::Display> fmt::Display for IrqMutexGuard<'a, T> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Display::fmt(&**self, f) - } -} - -impl<'a, T: ?Sized> Deref for IrqMutexGuard<'a, T> { - type Target = T; - fn deref(&self) -> &T { - // We know statically that only we are referencing data - unsafe { &*self.data } - } -} - -impl<'a, T: ?Sized> DerefMut for IrqMutexGuard<'a, T> { - fn deref_mut(&mut self) -> &mut T { - // We know statically that only we are referencing data - unsafe { &mut *self.data } - } -} - -impl<'a, T: ?Sized> Drop for IrqMutexGuard<'a, T> { - /// The dropping of the MutexGuard will release the lock it was created from. - fn drop(&mut self) { - self.lock.store(false, Ordering::Release); - interrupt::restore_interrupts(self.interrupt); - } -} - -#[cfg(feature = "lock_api")] -unsafe impl lock_api_crate::RawMutex for IrqMutex<(), R> { - type GuardMarker = lock_api_crate::GuardSend; - - const INIT: Self = Self::new(()); - - fn lock(&self) { - // Prevent guard destructor running - core::mem::forget(Self::lock(self)); - } - - fn try_lock(&self) -> bool { - // Prevent guard destructor running - Self::try_lock(self).map(core::mem::forget).is_some() - } - - unsafe fn unlock(&self) { - self.force_unlock(); - } - - fn is_locked(&self) -> bool { - Self::is_locked(self) - } -} - -#[cfg(test)] -mod tests { - use std::prelude::v1::*; - - use std::sync::atomic::{AtomicUsize, Ordering}; - use std::sync::mpsc::channel; - use std::sync::Arc; - use std::thread; - - type IrqMutex = super::IrqMutex; - - #[derive(Eq, PartialEq, Debug)] - struct NonCopy(i32); - - #[test] - fn smoke() { - let m = IrqMutex::<_>::new(()); - drop(m.lock()); - drop(m.lock()); - } - - #[test] - fn lots_and_lots() { - static M: IrqMutex<()> = IrqMutex::<_>::new(()); - static mut CNT: u32 = 0; - const J: u32 = 1000; - const K: u32 = 3; - - fn inc() { - for _ in 0..J { - unsafe { - let _g = M.lock(); - CNT += 1; - } - } - } - - let (tx, rx) = channel(); - let mut ts = Vec::new(); - for _ in 0..K { - let tx2 = tx.clone(); - ts.push(thread::spawn(move || { - inc(); - tx2.send(()).unwrap(); - })); - let tx2 = tx.clone(); - ts.push(thread::spawn(move || { - inc(); - tx2.send(()).unwrap(); - })); - } - - drop(tx); - for _ in 0..2 * K { - rx.recv().unwrap(); - } - assert_eq!(unsafe { CNT }, J * K * 2); - - for t in ts { - t.join().unwrap(); - } - } - - #[test] - fn try_lock() { - let mutex = IrqMutex::<_>::new(42); - - // First lock succeeds - let a = mutex.try_lock(); - assert_eq!(a.as_ref().map(|r| **r), Some(42)); - - // Additional lock fails - let b = mutex.try_lock(); - assert!(b.is_none()); - - // After dropping lock, it succeeds again - ::core::mem::drop(a); - let c = mutex.try_lock(); - assert_eq!(c.as_ref().map(|r| **r), Some(42)); - } - - #[test] - fn test_into_inner() { - let m = IrqMutex::<_>::new(NonCopy(10)); - assert_eq!(m.into_inner(), NonCopy(10)); - } - - #[test] - fn test_into_inner_drop() { - struct Foo(Arc); - impl Drop for Foo { - fn drop(&mut self) { - self.0.fetch_add(1, Ordering::SeqCst); - } - } - let num_drops = Arc::new(AtomicUsize::new(0)); - let m = IrqMutex::<_>::new(Foo(num_drops.clone())); - assert_eq!(num_drops.load(Ordering::SeqCst), 0); - { - let _inner = m.into_inner(); - assert_eq!(num_drops.load(Ordering::SeqCst), 0); - } - assert_eq!(num_drops.load(Ordering::SeqCst), 1); - } - - #[test] - fn test_mutex_arc_nested() { - // Tests nested mutexes and access - // to underlying data. - let arc = Arc::new(IrqMutex::<_>::new(1)); - let arc2 = Arc::new(IrqMutex::<_>::new(arc)); - let (tx, rx) = channel(); - let t = thread::spawn(move || { - let lock = arc2.lock(); - let lock2 = lock.lock(); - assert_eq!(*lock2, 1); - tx.send(()).unwrap(); - }); - rx.recv().unwrap(); - t.join().unwrap(); - } - - #[test] - fn test_mutex_arc_access_in_unwind() { - let arc = Arc::new(IrqMutex::<_>::new(1)); - let arc2 = arc.clone(); - let _ = thread::spawn(move || -> () { - struct Unwinder { - i: Arc>, - } - impl Drop for Unwinder { - fn drop(&mut self) { - *self.i.lock() += 1; - } - } - let _u = Unwinder { i: arc2 }; - panic!(); - }) - .join(); - let lock = arc.lock(); - assert_eq!(*lock, 2); - } - - #[test] - fn test_mutex_unsized() { - let mutex: &IrqMutex<[i32]> = &IrqMutex::<_>::new([1, 2, 3]); - { - let b = &mut *mutex.lock(); - b[0] = 4; - b[2] = 5; - } - let comp: &[i32] = &[4, 2, 5]; - assert_eq!(&*mutex.lock(), comp); - } - - #[test] - fn test_mutex_force_lock() { - let lock = IrqMutex::<_>::new(()); - ::std::mem::forget(lock.lock()); - unsafe { - lock.force_unlock(); - } - assert!(lock.try_lock().is_some()); - } -} From 6c3871bf6fc09d030f885a8bb5e7f3219a90a9da Mon Sep 17 00:00:00 2001 From: Shinribo <73356576+Shinribo@users.noreply.github.com> Date: Thu, 30 May 2024 22:10:41 +0200 Subject: [PATCH 4/5] Updated Coments, added a missing interrupt restore and made IrqMutex unsafe --- README.md | 1 + src/irqmutex.rs | 8 +++++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 0e36b2e..8126812 100644 --- a/README.md +++ b/README.md @@ -99,6 +99,7 @@ The crate comes with a few feature flags that you may wish to use. - `irqmutex` enables the `IryMutex` type and usage of the `critical-section` crate The user has to supply a implementation of critical-section aquire()/restore(); See the `critical-section` page for details + Unsafe Feature: When IrqMutex's are nested, the innter IrqMutexGuard must not outlive the outer IrqMutexGuard, a violation leads to Undefinded Interrupt Behavior When using the cfg, this can be done by adapting the following snippet to the `.cargo/config` file: ``` diff --git a/src/irqmutex.rs b/src/irqmutex.rs index 336d236..3cc24a1 100644 --- a/src/irqmutex.rs +++ b/src/irqmutex.rs @@ -98,9 +98,13 @@ impl IrqMutex { /// // do something with lock /// drop(lock); /// } + /// + /// UNSAFE: + /// When IrqMutex's are nested, the innter IrqMutexGuard must not outlive the outer IrqMutexGuard, a violation leads to Undefinded Interrupt Behavior + /// /// ``` #[inline(always)] - pub const fn new(value: T) -> Self { + pub const unsafe fn new(value: T) -> Self { Self { inner: InnerMutex::new(value), } @@ -236,6 +240,7 @@ impl<'a, T: ?Sized> IrqMutexGuard<'a, T> { /// Leak the lock guard, yielding a mutable reference to the underlying data. /// /// Note that this function will permanently lock the original [`Mutex`]. + /// Restores the Interrupt State /// /// ``` /// let mylock = spin::Mutex::new(0); @@ -247,6 +252,7 @@ impl<'a, T: ?Sized> IrqMutexGuard<'a, T> { /// ``` #[inline(always)] pub fn leak(this: Self) -> &'a mut T { + unsafe { release(this.irq_state) } InnerMutexGuard::leak(this.inner) } } From a3b5daf2e12262e9d73b01db5f6dc902fe338c6d Mon Sep 17 00:00:00 2001 From: Shinribo <73356576+Shinribo@users.noreply.github.com> Date: Fri, 31 May 2024 17:58:28 +0200 Subject: [PATCH 5/5] Moved the unsafe keyword and corresponding comment from new() to lock() --- src/irqmutex.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/irqmutex.rs b/src/irqmutex.rs index 3cc24a1..37efaa0 100644 --- a/src/irqmutex.rs +++ b/src/irqmutex.rs @@ -99,12 +99,11 @@ impl IrqMutex { /// drop(lock); /// } /// - /// UNSAFE: - /// When IrqMutex's are nested, the innter IrqMutexGuard must not outlive the outer IrqMutexGuard, a violation leads to Undefinded Interrupt Behavior + /// /// /// ``` #[inline(always)] - pub const unsafe fn new(value: T) -> Self { + pub const fn new(value: T) -> Self { Self { inner: InnerMutex::new(value), } @@ -139,8 +138,10 @@ impl IrqMutex { /// // The lock is implicitly dropped at the end of the scope /// } /// ``` + /// UNSAFE: + /// When IrqMutex's are nested, the innter IrqMutexGuard must not outlive the outer IrqMutexGuard, a violation leads to Undefinded Interrupt Behavior #[inline(always)] - pub fn lock(&self) -> IrqMutexGuard { + pub unsafe fn lock(&self) -> IrqMutexGuard { let state = unsafe{acquire()}; IrqMutexGuard { inner: self.inner.lock(),