|
| 1 | +use crate::cell::UnsafeCell; |
| 2 | +use crate::collections::VecDeque; |
1 | 3 | use crate::ffi::c_void;
|
| 4 | +use crate::ops::{Deref, DerefMut, Drop}; |
2 | 5 | use crate::ptr;
|
| 6 | +use crate::sync::atomic::{spin_loop_hint, AtomicUsize, Ordering}; |
3 | 7 | use crate::sys::hermit::abi;
|
4 | 8 |
|
| 9 | +/// This type provides a lock based on busy waiting to realize mutual exclusion |
| 10 | +/// |
| 11 | +/// # Description |
| 12 | +/// |
| 13 | +/// This structure behaves a lot like a common mutex. There are some differences: |
| 14 | +/// |
| 15 | +/// - By using busy waiting, it can be used outside the runtime. |
| 16 | +/// - It is a so called ticket lock and is completly fair. |
| 17 | +#[cfg_attr(target_arch = "x86_64", repr(align(128)))] |
| 18 | +#[cfg_attr(not(target_arch = "x86_64"), repr(align(64)))] |
| 19 | +struct Spinlock<T: ?Sized> { |
| 20 | + queue: AtomicUsize, |
| 21 | + dequeue: AtomicUsize, |
| 22 | + data: UnsafeCell<T>, |
| 23 | +} |
| 24 | + |
| 25 | +unsafe impl<T: ?Sized + Send> Sync for Spinlock<T> {} |
| 26 | +unsafe impl<T: ?Sized + Send> Send for Spinlock<T> {} |
| 27 | + |
| 28 | +/// A guard to which the protected data can be accessed |
| 29 | +/// |
| 30 | +/// When the guard falls out of scope it will release the lock. |
| 31 | +struct SpinlockGuard<'a, T: ?Sized + 'a> { |
| 32 | + dequeue: &'a AtomicUsize, |
| 33 | + data: &'a mut T, |
| 34 | +} |
| 35 | + |
| 36 | +impl<T> Spinlock<T> { |
| 37 | + pub const fn new(user_data: T) -> Spinlock<T> { |
| 38 | + Spinlock { |
| 39 | + queue: AtomicUsize::new(0), |
| 40 | + dequeue: AtomicUsize::new(1), |
| 41 | + data: UnsafeCell::new(user_data), |
| 42 | + } |
| 43 | + } |
| 44 | + |
| 45 | + #[inline] |
| 46 | + fn obtain_lock(&self) { |
| 47 | + let ticket = self.queue.fetch_add(1, Ordering::SeqCst) + 1; |
| 48 | + while self.dequeue.load(Ordering::SeqCst) != ticket { |
| 49 | + spin_loop_hint(); |
| 50 | + } |
| 51 | + } |
| 52 | + |
| 53 | + #[inline] |
| 54 | + pub unsafe fn lock(&self) -> SpinlockGuard<'_, T> { |
| 55 | + self.obtain_lock(); |
| 56 | + SpinlockGuard { dequeue: &self.dequeue, data: &mut *self.data.get() } |
| 57 | + } |
| 58 | +} |
| 59 | + |
| 60 | +impl<T: ?Sized + Default> Default for Spinlock<T> { |
| 61 | + fn default() -> Spinlock<T> { |
| 62 | + Spinlock::new(Default::default()) |
| 63 | + } |
| 64 | +} |
| 65 | + |
| 66 | +impl<'a, T: ?Sized> Deref for SpinlockGuard<'a, T> { |
| 67 | + type Target = T; |
| 68 | + fn deref(&self) -> &T { |
| 69 | + &*self.data |
| 70 | + } |
| 71 | +} |
| 72 | + |
| 73 | +impl<'a, T: ?Sized> DerefMut for SpinlockGuard<'a, T> { |
| 74 | + fn deref_mut(&mut self) -> &mut T { |
| 75 | + &mut *self.data |
| 76 | + } |
| 77 | +} |
| 78 | + |
| 79 | +impl<'a, T: ?Sized> Drop for SpinlockGuard<'a, T> { |
| 80 | + /// The dropping of the SpinlockGuard will release the lock it was created from. |
| 81 | + fn drop(&mut self) { |
| 82 | + self.dequeue.fetch_add(1, Ordering::SeqCst); |
| 83 | + } |
| 84 | +} |
| 85 | + |
| 86 | +/// Realize a priority queue for tasks |
| 87 | +struct PriorityQueue { |
| 88 | + queues: [Option<VecDeque<abi::Tid>>; abi::NO_PRIORITIES], |
| 89 | + prio_bitmap: u64, |
| 90 | +} |
| 91 | + |
| 92 | +impl PriorityQueue { |
| 93 | + pub const fn new() -> PriorityQueue { |
| 94 | + PriorityQueue { |
| 95 | + queues: [ |
| 96 | + None, None, None, None, None, None, None, None, None, None, None, None, None, None, |
| 97 | + None, None, None, None, None, None, None, None, None, None, None, None, None, None, |
| 98 | + None, None, None, |
| 99 | + ], |
| 100 | + prio_bitmap: 0, |
| 101 | + } |
| 102 | + } |
| 103 | + |
| 104 | + /// Add a task id by its priority to the queue |
| 105 | + pub fn push(&mut self, prio: abi::Priority, id: abi::Tid) { |
| 106 | + let i: usize = prio.into().into(); |
| 107 | + self.prio_bitmap |= (1 << i) as u64; |
| 108 | + if let Some(queue) = &mut self.queues[i] { |
| 109 | + queue.push_back(id); |
| 110 | + } else { |
| 111 | + let mut queue = VecDeque::new(); |
| 112 | + queue.push_back(id); |
| 113 | + self.queues[i] = Some(queue); |
| 114 | + } |
| 115 | + } |
| 116 | + |
| 117 | + fn pop_from_queue(&mut self, queue_index: usize) -> Option<abi::Tid> { |
| 118 | + if let Some(queue) = &mut self.queues[queue_index] { |
| 119 | + let id = queue.pop_front(); |
| 120 | + |
| 121 | + if queue.is_empty() { |
| 122 | + self.prio_bitmap &= !(1 << queue_index as u64); |
| 123 | + } |
| 124 | + |
| 125 | + id |
| 126 | + } else { |
| 127 | + None |
| 128 | + } |
| 129 | + } |
| 130 | + |
| 131 | + /// Pop the task handle with the highest priority from the queue |
| 132 | + pub fn pop(&mut self) -> Option<abi::Tid> { |
| 133 | + for i in 0..abi::NO_PRIORITIES { |
| 134 | + if self.prio_bitmap & (1 << i) != 0 { |
| 135 | + return self.pop_from_queue(i); |
| 136 | + } |
| 137 | + } |
| 138 | + |
| 139 | + None |
| 140 | + } |
| 141 | +} |
| 142 | + |
| 143 | +struct MutexInner { |
| 144 | + locked: bool, |
| 145 | + blocked_task: PriorityQueue, |
| 146 | +} |
| 147 | + |
| 148 | +impl MutexInner { |
| 149 | + pub const fn new() -> MutexInner { |
| 150 | + MutexInner { locked: false, blocked_task: PriorityQueue::new() } |
| 151 | + } |
| 152 | +} |
| 153 | + |
5 | 154 | pub struct Mutex {
|
6 |
| - inner: *const c_void, |
| 155 | + inner: Spinlock<MutexInner>, |
7 | 156 | }
|
8 | 157 |
|
| 158 | +pub type MovableMutex = Box<Mutex>; |
| 159 | + |
9 | 160 | unsafe impl Send for Mutex {}
|
10 | 161 | unsafe impl Sync for Mutex {}
|
11 | 162 |
|
12 | 163 | impl Mutex {
|
13 | 164 | pub const fn new() -> Mutex {
|
14 |
| - Mutex { inner: ptr::null() } |
| 165 | + Mutex { inner: Spinlock::new(MutexInner::new()) } |
15 | 166 | }
|
16 | 167 |
|
17 | 168 | #[inline]
|
18 | 169 | pub unsafe fn init(&mut self) {
|
19 |
| - let _ = abi::sem_init(&mut self.inner as *mut *const c_void, 1); |
| 170 | + self.inner = Spinlock::new(MutexInner::new()); |
20 | 171 | }
|
21 | 172 |
|
22 | 173 | #[inline]
|
23 | 174 | pub unsafe fn lock(&self) {
|
24 |
| - let _ = abi::sem_timedwait(self.inner, 0); |
| 175 | + loop { |
| 176 | + let mut guard = self.inner.lock(); |
| 177 | + if guard.locked == false { |
| 178 | + guard.locked = true; |
| 179 | + return; |
| 180 | + } else { |
| 181 | + let prio = abi::get_priority(); |
| 182 | + let id = abi::getpid(); |
| 183 | + |
| 184 | + guard.blocked_task.push(prio, id); |
| 185 | + abi::block_current_task(); |
| 186 | + drop(guard); |
| 187 | + abi::yield_now(); |
| 188 | + } |
| 189 | + } |
25 | 190 | }
|
26 | 191 |
|
27 | 192 | #[inline]
|
28 | 193 | pub unsafe fn unlock(&self) {
|
29 |
| - let _ = abi::sem_post(self.inner); |
| 194 | + let mut guard = self.inner.lock(); |
| 195 | + guard.locked = false; |
| 196 | + if let Some(tid) = guard.blocked_task.pop() { |
| 197 | + abi::wakeup_task(tid); |
| 198 | + } |
30 | 199 | }
|
31 | 200 |
|
32 | 201 | #[inline]
|
33 | 202 | pub unsafe fn try_lock(&self) -> bool {
|
34 |
| - let result = abi::sem_trywait(self.inner); |
35 |
| - result == 0 |
| 203 | + let mut guard = self.inner.lock(); |
| 204 | + if guard.locked == false { |
| 205 | + guard.locked = true; |
| 206 | + } |
| 207 | + guard.locked |
36 | 208 | }
|
37 | 209 |
|
38 | 210 | #[inline]
|
39 |
| - pub unsafe fn destroy(&self) { |
40 |
| - let _ = abi::sem_destroy(self.inner); |
41 |
| - } |
| 211 | + pub unsafe fn destroy(&self) {} |
42 | 212 | }
|
43 | 213 |
|
44 | 214 | pub struct ReentrantMutex {
|
|
0 commit comments