diff --git a/rust/kernel/sync/condvar.rs b/rust/kernel/sync/condvar.rs index 6e670854336ea5..bb9a34516957c3 100644 --- a/rust/kernel/sync/condvar.rs +++ b/rust/kernel/sync/condvar.rs @@ -64,8 +64,8 @@ impl CondVar { /// [`CondVar::notify_all`], or when the thread receives a signal. /// /// Returns whether there is a signal pending. - pub fn wait(&self, g: &Guard) -> bool { - let l = g.lock; + pub fn wait(&self, guard: &Guard) -> bool { + let lock = guard.lock; let mut wait = MaybeUninit::::uninit(); // SAFETY: `wait` points to valid memory. @@ -81,12 +81,12 @@ impl CondVar { } // SAFETY: The guard is evidence that the caller owns the lock. - unsafe { l.unlock() }; + unsafe { lock.unlock() }; // SAFETY: No arguments, switches to another thread. unsafe { bindings::schedule() }; - l.lock_noguard(); + lock.lock_noguard(); // SAFETY: Both `wait` and `wait_list` point to valid memory. unsafe { bindings::finish_wait(self.wait_list.get(), wait.as_mut_ptr()) }; diff --git a/rust/kernel/sync/guard.rs b/rust/kernel/sync/guard.rs index dac1fe2516f79f..84e5d319a5fd82 100644 --- a/rust/kernel/sync/guard.rs +++ b/rust/kernel/sync/guard.rs @@ -78,9 +78,5 @@ pub trait Lock { unsafe fn unlock(&self); /// Returns the data protected by the lock. - /// - /// # Safety - /// - /// It must only be called by the current owner of the lock. - unsafe fn locked_data(&self) -> &core::cell::UnsafeCell; + fn locked_data(&self) -> &core::cell::UnsafeCell; } diff --git a/rust/kernel/sync/locked_by.rs b/rust/kernel/sync/locked_by.rs new file mode 100644 index 00000000000000..fc540b35c53ae1 --- /dev/null +++ b/rust/kernel/sync/locked_by.rs @@ -0,0 +1,112 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! A wrapper for data protected by a lock that does not wrap it. + +use super::{Guard, Lock}; +use core::{cell::UnsafeCell, ops::Deref, ptr}; + +/// Allows access to some data to be serialised by a lock that does not wrap it. +/// +/// In most cases, data protected by a lock is wrapped by the appropriate lock type, e.g., +/// [`super::Mutex`] or [`super::SpinLock`]. [`LockedBy`] is meant for cases when this is not +/// possible. For example, if a container has a lock and some data in the contained elements needs +/// to be protected by the same lock. +/// +/// [`LockedBy`] wraps the data in lieu of another locking primitive, and only allows access to it +/// when the caller shows evidence that 'external' lock is locked. +/// +/// # Example +/// +/// The following is an example for illustrative purposes: `InnerDirectory::bytes_used` is an +/// aggregate of all `InnerFile::bytes_used` and must be kept consistent; so we wrap `InnerFile` in +/// a `LockedBy` so that it shares a lock with `InnerDirectory`. This allows us to enforce at +/// compile-time that access to `InnerFile` is only granted when an `InnerDirectory` is also +/// locked; we enforce at run time that the right `InnerDirectory` is locked. +/// +/// ``` +/// use super::Mutex; +/// use alloc::{string::String, vec::Vec}; +/// +/// struct InnerFile { +/// bytes_used: u64, +/// } +/// +/// struct File { +/// name: String, +/// inner: LockedBy>, +/// } +/// +/// struct InnerDirectory { +/// /// The sum of the bytes used by all files. +/// bytes_used: u64, +/// files: Vec, +/// } +/// +/// struct Directory { +/// name: String, +/// inner: Mutex, +/// } +/// ``` +pub struct LockedBy { + owner: *const L::Inner, + data: UnsafeCell, +} + +// SAFETY: `LockedBy` can be transferred across thread boundaries iff the data it protects can. +unsafe impl Send for LockedBy {} + +// SAFETY: `LockedBy` serialises the interior mutability it provides, so it is `Sync` as long as the +// data it protects is `Send`. +unsafe impl Sync for LockedBy {} + +impl LockedBy { + /// Constructs a new instance of [`LockedBy`]. + /// + /// It stores a raw pointer to the owner that is never dereferenced. It is only used to ensure + /// that the right owner is being used to access the protected data. If the owner is freed, the + /// data becomes inaccessible; if another instance of the owner is allocated *on the same + /// memory location*, the data becomes accessible again: none of this affects memory safety + /// because in any case at most one thread (or CPU) can access the protected data at a time. + pub fn new(owner: &L, data: T) -> Self { + Self { + owner: owner.locked_data().get(), + data: UnsafeCell::new(data), + } + } +} + +impl LockedBy { + /// Returns a reference to the protected data when the caller provides evidence (via a + /// [`Guard`]) that the owner is locked. + pub fn access<'a>(&'a self, guard: &'a Guard) -> &'a T { + if !ptr::eq(guard.deref(), self.owner) { + panic!("guard does not match owner"); + } + + // SAFETY: `guard` is evidence that the owner is locked. + unsafe { &mut *self.data.get() } + } + + /// Returns a mutable reference to the protected data when the caller provides evidence (via a + /// mutable [`Guard`]) that the owner is locked mutably. + pub fn access_mut<'a>(&'a self, guard: &'a mut Guard) -> &'a mut T { + if !ptr::eq(guard.deref().deref(), self.owner) { + panic!("guard does not match owner"); + } + + // SAFETY: `guard` is evidence that the owner is locked. + unsafe { &mut *self.data.get() } + } + + /// Returns a mutable reference to the protected data when the caller provides evidence (via a + /// mutable owner) that the owner is locked mutably. Showing a mutable reference to the owner + /// is sufficient because we know no other references can exist to it. + pub fn access_from_mut<'a>(&'a self, owner: &'a mut L::Inner) -> &'a mut T { + if !ptr::eq(owner, self.owner) { + panic!("mismatched owners"); + } + + // SAFETY: `owner` is evidence that there is only one reference to the owner. + unsafe { &mut *self.data.get() } + } +} diff --git a/rust/kernel/sync/mod.rs b/rust/kernel/sync/mod.rs index 9f533ed025ea87..7c24e05ec92767 100644 --- a/rust/kernel/sync/mod.rs +++ b/rust/kernel/sync/mod.rs @@ -22,11 +22,13 @@ use core::pin::Pin; mod condvar; mod guard; +mod locked_by; mod mutex; mod spinlock; pub use condvar::CondVar; pub use guard::{Guard, Lock}; +pub use locked_by::LockedBy; pub use mutex::Mutex; pub use spinlock::SpinLock; diff --git a/rust/kernel/sync/mutex.rs b/rust/kernel/sync/mutex.rs index a040861da0a024..e528228d16c101 100644 --- a/rust/kernel/sync/mutex.rs +++ b/rust/kernel/sync/mutex.rs @@ -95,7 +95,7 @@ impl Lock for Mutex { bindings::mutex_unlock(self.mutex.get()); } - unsafe fn locked_data(&self) -> &UnsafeCell { + fn locked_data(&self) -> &UnsafeCell { &self.data } } diff --git a/rust/kernel/sync/spinlock.rs b/rust/kernel/sync/spinlock.rs index 3f3aacbcb8f081..49a7d5fd837b2f 100644 --- a/rust/kernel/sync/spinlock.rs +++ b/rust/kernel/sync/spinlock.rs @@ -102,7 +102,7 @@ impl Lock for SpinLock { rust_helper_spin_unlock(self.spin_lock.get()); } - unsafe fn locked_data(&self) -> &UnsafeCell { + fn locked_data(&self) -> &UnsafeCell { &self.data } }