Skip to content

Commit e34263d

Browse files
Rollup merge of #77610 - hermitcore:dtors, r=m-ou-se
revise Hermit's mutex interface to support the behaviour of StaticMutex #77147 simplifies things by splitting this Mutex type into two types matching the two use cases: StaticMutex and MovableMutex. To support the new behavior of StaticMutex, we move part of the mutex implementation into libstd. The interface to the OS changed. Consequently, I removed a few functions, which aren't longer needed.
2 parents a547055 + bf268fe commit e34263d

File tree

8 files changed

+186
-167
lines changed

8 files changed

+186
-167
lines changed

Cargo.lock

+2-2
Original file line numberDiff line numberDiff line change
@@ -1366,9 +1366,9 @@ dependencies = [
13661366

13671367
[[package]]
13681368
name = "hermit-abi"
1369-
version = "0.1.15"
1369+
version = "0.1.17"
13701370
source = "registry+https://github.com/rust-lang/crates.io-index"
1371-
checksum = "3deed196b6e7f9e44a2ae8d94225d80302d81208b1bb673fd21fe634645c85a9"
1371+
checksum = "5aca5565f760fb5b220e499d72710ed156fdb74e631659e99377d9ebfbd13ae8"
13721372
dependencies = [
13731373
"compiler_builtins",
13741374
"libc",

library/alloc/src/alloc.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -372,7 +372,7 @@ pub fn handle_alloc_error(layout: Layout) -> ! {
372372
unsafe { oom_impl(layout) }
373373
}
374374

375-
#[cfg(not(any(test, bootstrap)))]
375+
#[cfg(not(any(target_os = "hermit", test, bootstrap)))]
376376
#[doc(hidden)]
377377
#[allow(unused_attributes)]
378378
#[unstable(feature = "alloc_internals", issue = "none")]

library/std/Cargo.toml

+1-1
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ dlmalloc = { version = "0.1", features = ['rustc-dep-of-std'] }
4242
fortanix-sgx-abi = { version = "0.3.2", features = ['rustc-dep-of-std'] }
4343

4444
[target.'cfg(all(any(target_arch = "x86_64", target_arch = "aarch64"), target_os = "hermit"))'.dependencies]
45-
hermit-abi = { version = "0.1.15", features = ['rustc-dep-of-std'] }
45+
hermit-abi = { version = "0.1.17", features = ['rustc-dep-of-std'] }
4646

4747
[target.wasm32-wasi.dependencies]
4848
wasi = { version = "0.9.0", features = ['rustc-dep-of-std'], default-features = false }

library/std/src/sys/hermit/fs.rs

-4
Original file line numberDiff line numberDiff line change
@@ -334,10 +334,6 @@ impl File {
334334
pub fn set_permissions(&self, _perm: FilePermissions) -> io::Result<()> {
335335
Err(Error::from_raw_os_error(22))
336336
}
337-
338-
pub fn diverge(&self) -> ! {
339-
loop {}
340-
}
341337
}
342338

343339
impl DirBuilder {

library/std/src/sys/hermit/mod.rs

+1
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@ pub mod net;
3131
pub mod os;
3232
pub mod path;
3333
pub mod pipe;
34+
#[path = "../unsupported/process.rs"]
3435
pub mod process;
3536
pub mod rwlock;
3637
pub mod stack_overflow;

library/std/src/sys/hermit/mutex.rs

+180-10
Original file line numberDiff line numberDiff line change
@@ -1,44 +1,214 @@
1+
use crate::cell::UnsafeCell;
2+
use crate::collections::VecDeque;
13
use crate::ffi::c_void;
4+
use crate::ops::{Deref, DerefMut, Drop};
25
use crate::ptr;
6+
use crate::sync::atomic::{spin_loop_hint, AtomicUsize, Ordering};
37
use crate::sys::hermit::abi;
48

9+
/// This type provides a lock based on busy waiting to realize mutual exclusion
10+
///
11+
/// # Description
12+
///
13+
/// This structure behaves a lot like a common mutex. There are some differences:
14+
///
15+
/// - By using busy waiting, it can be used outside the runtime.
16+
/// - It is a so called ticket lock and is completly fair.
17+
#[cfg_attr(target_arch = "x86_64", repr(align(128)))]
18+
#[cfg_attr(not(target_arch = "x86_64"), repr(align(64)))]
19+
struct Spinlock<T: ?Sized> {
20+
queue: AtomicUsize,
21+
dequeue: AtomicUsize,
22+
data: UnsafeCell<T>,
23+
}
24+
25+
unsafe impl<T: ?Sized + Send> Sync for Spinlock<T> {}
26+
unsafe impl<T: ?Sized + Send> Send for Spinlock<T> {}
27+
28+
/// A guard to which the protected data can be accessed
29+
///
30+
/// When the guard falls out of scope it will release the lock.
31+
struct SpinlockGuard<'a, T: ?Sized + 'a> {
32+
dequeue: &'a AtomicUsize,
33+
data: &'a mut T,
34+
}
35+
36+
impl<T> Spinlock<T> {
37+
pub const fn new(user_data: T) -> Spinlock<T> {
38+
Spinlock {
39+
queue: AtomicUsize::new(0),
40+
dequeue: AtomicUsize::new(1),
41+
data: UnsafeCell::new(user_data),
42+
}
43+
}
44+
45+
#[inline]
46+
fn obtain_lock(&self) {
47+
let ticket = self.queue.fetch_add(1, Ordering::SeqCst) + 1;
48+
while self.dequeue.load(Ordering::SeqCst) != ticket {
49+
spin_loop_hint();
50+
}
51+
}
52+
53+
#[inline]
54+
pub unsafe fn lock(&self) -> SpinlockGuard<'_, T> {
55+
self.obtain_lock();
56+
SpinlockGuard { dequeue: &self.dequeue, data: &mut *self.data.get() }
57+
}
58+
}
59+
60+
impl<T: ?Sized + Default> Default for Spinlock<T> {
61+
fn default() -> Spinlock<T> {
62+
Spinlock::new(Default::default())
63+
}
64+
}
65+
66+
impl<'a, T: ?Sized> Deref for SpinlockGuard<'a, T> {
67+
type Target = T;
68+
fn deref(&self) -> &T {
69+
&*self.data
70+
}
71+
}
72+
73+
impl<'a, T: ?Sized> DerefMut for SpinlockGuard<'a, T> {
74+
fn deref_mut(&mut self) -> &mut T {
75+
&mut *self.data
76+
}
77+
}
78+
79+
impl<'a, T: ?Sized> Drop for SpinlockGuard<'a, T> {
80+
/// The dropping of the SpinlockGuard will release the lock it was created from.
81+
fn drop(&mut self) {
82+
self.dequeue.fetch_add(1, Ordering::SeqCst);
83+
}
84+
}
85+
86+
/// Realize a priority queue for tasks
87+
struct PriorityQueue {
88+
queues: [Option<VecDeque<abi::Tid>>; abi::NO_PRIORITIES],
89+
prio_bitmap: u64,
90+
}
91+
92+
impl PriorityQueue {
93+
pub const fn new() -> PriorityQueue {
94+
PriorityQueue {
95+
queues: [
96+
None, None, None, None, None, None, None, None, None, None, None, None, None, None,
97+
None, None, None, None, None, None, None, None, None, None, None, None, None, None,
98+
None, None, None,
99+
],
100+
prio_bitmap: 0,
101+
}
102+
}
103+
104+
/// Add a task id by its priority to the queue
105+
pub fn push(&mut self, prio: abi::Priority, id: abi::Tid) {
106+
let i: usize = prio.into().into();
107+
self.prio_bitmap |= (1 << i) as u64;
108+
if let Some(queue) = &mut self.queues[i] {
109+
queue.push_back(id);
110+
} else {
111+
let mut queue = VecDeque::new();
112+
queue.push_back(id);
113+
self.queues[i] = Some(queue);
114+
}
115+
}
116+
117+
fn pop_from_queue(&mut self, queue_index: usize) -> Option<abi::Tid> {
118+
if let Some(queue) = &mut self.queues[queue_index] {
119+
let id = queue.pop_front();
120+
121+
if queue.is_empty() {
122+
self.prio_bitmap &= !(1 << queue_index as u64);
123+
}
124+
125+
id
126+
} else {
127+
None
128+
}
129+
}
130+
131+
/// Pop the task handle with the highest priority from the queue
132+
pub fn pop(&mut self) -> Option<abi::Tid> {
133+
for i in 0..abi::NO_PRIORITIES {
134+
if self.prio_bitmap & (1 << i) != 0 {
135+
return self.pop_from_queue(i);
136+
}
137+
}
138+
139+
None
140+
}
141+
}
142+
143+
struct MutexInner {
144+
locked: bool,
145+
blocked_task: PriorityQueue,
146+
}
147+
148+
impl MutexInner {
149+
pub const fn new() -> MutexInner {
150+
MutexInner { locked: false, blocked_task: PriorityQueue::new() }
151+
}
152+
}
153+
5154
pub struct Mutex {
6-
inner: *const c_void,
155+
inner: Spinlock<MutexInner>,
7156
}
8157

158+
pub type MovableMutex = Box<Mutex>;
159+
9160
unsafe impl Send for Mutex {}
10161
unsafe impl Sync for Mutex {}
11162

12163
impl Mutex {
13164
pub const fn new() -> Mutex {
14-
Mutex { inner: ptr::null() }
165+
Mutex { inner: Spinlock::new(MutexInner::new()) }
15166
}
16167

17168
#[inline]
18169
pub unsafe fn init(&mut self) {
19-
let _ = abi::sem_init(&mut self.inner as *mut *const c_void, 1);
170+
self.inner = Spinlock::new(MutexInner::new());
20171
}
21172

22173
#[inline]
23174
pub unsafe fn lock(&self) {
24-
let _ = abi::sem_timedwait(self.inner, 0);
175+
loop {
176+
let mut guard = self.inner.lock();
177+
if guard.locked == false {
178+
guard.locked = true;
179+
return;
180+
} else {
181+
let prio = abi::get_priority();
182+
let id = abi::getpid();
183+
184+
guard.blocked_task.push(prio, id);
185+
abi::block_current_task();
186+
drop(guard);
187+
abi::yield_now();
188+
}
189+
}
25190
}
26191

27192
#[inline]
28193
pub unsafe fn unlock(&self) {
29-
let _ = abi::sem_post(self.inner);
194+
let mut guard = self.inner.lock();
195+
guard.locked = false;
196+
if let Some(tid) = guard.blocked_task.pop() {
197+
abi::wakeup_task(tid);
198+
}
30199
}
31200

32201
#[inline]
33202
pub unsafe fn try_lock(&self) -> bool {
34-
let result = abi::sem_trywait(self.inner);
35-
result == 0
203+
let mut guard = self.inner.lock();
204+
if guard.locked == false {
205+
guard.locked = true;
206+
}
207+
guard.locked
36208
}
37209

38210
#[inline]
39-
pub unsafe fn destroy(&self) {
40-
let _ = abi::sem_destroy(self.inner);
41-
}
211+
pub unsafe fn destroy(&self) {}
42212
}
43213

44214
pub struct ReentrantMutex {

0 commit comments

Comments
 (0)