Skip to content

Commit 8c37fdf

Browse files
committed
std: make ReentrantMutex movable and const; simplify Stdout initialization
1 parent 75b7e52 commit 8c37fdf

File tree

12 files changed

+39
-165
lines changed

12 files changed

+39
-165
lines changed

library/std/src/io/stdio.rs

+16-25
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@ use crate::io::prelude::*;
88
use crate::cell::{Cell, RefCell};
99
use crate::fmt;
1010
use crate::io::{self, BufReader, IoSlice, IoSliceMut, LineWriter, Lines};
11-
use crate::pin::Pin;
1211
use crate::sync::atomic::{AtomicBool, Ordering};
1312
use crate::sync::{Arc, Mutex, MutexGuard, OnceLock};
1413
use crate::sys::stdio;
@@ -526,7 +525,7 @@ pub struct Stdout {
526525
// FIXME: this should be LineWriter or BufWriter depending on the state of
527526
// stdout (tty or not). Note that if this is not line buffered it
528527
// should also flush-on-panic or some form of flush-on-abort.
529-
inner: Pin<&'static ReentrantMutex<RefCell<LineWriter<StdoutRaw>>>>,
528+
inner: &'static ReentrantMutex<RefCell<LineWriter<StdoutRaw>>>,
530529
}
531530

532531
/// A locked reference to the [`Stdout`] handle.
@@ -603,24 +602,20 @@ static STDOUT: OnceLock<ReentrantMutex<RefCell<LineWriter<StdoutRaw>>>> = OnceLo
603602
#[stable(feature = "rust1", since = "1.0.0")]
604603
pub fn stdout() -> Stdout {
605604
Stdout {
606-
inner: Pin::static_ref(&STDOUT).get_or_init_pin(
607-
|| unsafe { ReentrantMutex::new(RefCell::new(LineWriter::new(stdout_raw()))) },
608-
|mutex| unsafe { mutex.init() },
609-
),
605+
inner: STDOUT
606+
.get_or_init(|| ReentrantMutex::new(RefCell::new(LineWriter::new(stdout_raw())))),
610607
}
611608
}
612609

613610
pub fn cleanup() {
614-
if let Some(instance) = STDOUT.get() {
615-
// Flush the data and disable buffering during shutdown
616-
// by replacing the line writer by one with zero
617-
// buffering capacity.
618-
// We use try_lock() instead of lock(), because someone
619-
// might have leaked a StdoutLock, which would
620-
// otherwise cause a deadlock here.
621-
if let Some(lock) = Pin::static_ref(instance).try_lock() {
622-
*lock.borrow_mut() = LineWriter::with_capacity(0, stdout_raw());
623-
}
611+
// Flush the data and disable buffering during shutdown
612+
// by replacing the line writer by one with zero
613+
// buffering capacity.
614+
// We use try_lock() instead of lock(), because someone
615+
// might have leaked a StdoutLock, which would
616+
// otherwise cause a deadlock here.
617+
if let Some(lock) = STDOUT.get().and_then(ReentrantMutex::try_lock) {
618+
*lock.borrow_mut() = LineWriter::with_capacity(0, stdout_raw());
624619
}
625620
}
626621

@@ -761,7 +756,7 @@ impl fmt::Debug for StdoutLock<'_> {
761756
/// standard library or via raw Windows API calls, will fail.
762757
#[stable(feature = "rust1", since = "1.0.0")]
763758
pub struct Stderr {
764-
inner: Pin<&'static ReentrantMutex<RefCell<StderrRaw>>>,
759+
inner: &'static ReentrantMutex<RefCell<StderrRaw>>,
765760
}
766761

767762
/// A locked reference to the [`Stderr`] handle.
@@ -834,16 +829,12 @@ pub struct StderrLock<'a> {
834829
#[stable(feature = "rust1", since = "1.0.0")]
835830
pub fn stderr() -> Stderr {
836831
// Note that unlike `stdout()` we don't use `at_exit` here to register a
837-
// destructor. Stderr is not buffered , so there's no need to run a
832+
// destructor. Stderr is not buffered, so there's no need to run a
838833
// destructor for flushing the buffer
839-
static INSTANCE: OnceLock<ReentrantMutex<RefCell<StderrRaw>>> = OnceLock::new();
834+
static INSTANCE: ReentrantMutex<RefCell<StderrRaw>> =
835+
ReentrantMutex::new(RefCell::new(stderr_raw()));
840836

841-
Stderr {
842-
inner: Pin::static_ref(&INSTANCE).get_or_init_pin(
843-
|| unsafe { ReentrantMutex::new(RefCell::new(stderr_raw())) },
844-
|mutex| unsafe { mutex.init() },
845-
),
846-
}
837+
Stderr { inner: &INSTANCE }
847838
}
848839

849840
impl Stderr {

library/std/src/sync/once_lock.rs

-55
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@ use crate::fmt;
33
use crate::marker::PhantomData;
44
use crate::mem::MaybeUninit;
55
use crate::panic::{RefUnwindSafe, UnwindSafe};
6-
use crate::pin::Pin;
76
use crate::sync::Once;
87

98
/// A synchronization primitive which can be written to only once.
@@ -223,60 +222,6 @@ impl<T> OnceLock<T> {
223222
Ok(unsafe { self.get_unchecked() })
224223
}
225224

226-
/// Internal-only API that gets the contents of the cell, initializing it
227-
/// in two steps with `f` and `g` if the cell was empty.
228-
///
229-
/// `f` is called to construct the value, which is then moved into the cell
230-
/// and given as a (pinned) mutable reference to `g` to finish
231-
/// initialization.
232-
///
233-
/// This allows `g` to inspect an manipulate the value after it has been
234-
/// moved into its final place in the cell, but before the cell is
235-
/// considered initialized.
236-
///
237-
/// # Panics
238-
///
239-
/// If `f` or `g` panics, the panic is propagated to the caller, and the
240-
/// cell remains uninitialized.
241-
///
242-
/// With the current implementation, if `g` panics, the value from `f` will
243-
/// not be dropped. This should probably be fixed if this is ever used for
244-
/// a type where this matters.
245-
///
246-
/// It is an error to reentrantly initialize the cell from `f`. The exact
247-
/// outcome is unspecified. Current implementation deadlocks, but this may
248-
/// be changed to a panic in the future.
249-
pub(crate) fn get_or_init_pin<F, G>(self: Pin<&Self>, f: F, g: G) -> Pin<&T>
250-
where
251-
F: FnOnce() -> T,
252-
G: FnOnce(Pin<&mut T>),
253-
{
254-
if let Some(value) = self.get_ref().get() {
255-
// SAFETY: The inner value was already initialized, and will not be
256-
// moved anymore.
257-
return unsafe { Pin::new_unchecked(value) };
258-
}
259-
260-
let slot = &self.value;
261-
262-
// Ignore poisoning from other threads
263-
// If another thread panics, then we'll be able to run our closure
264-
self.once.call_once_force(|_| {
265-
let value = f();
266-
// SAFETY: We use the Once (self.once) to guarantee unique access
267-
// to the UnsafeCell (slot).
268-
let value: &mut T = unsafe { (&mut *slot.get()).write(value) };
269-
// SAFETY: The value has been written to its final place in
270-
// self.value. We do not to move it anymore, which we promise here
271-
// with a Pin<&mut T>.
272-
g(unsafe { Pin::new_unchecked(value) });
273-
});
274-
275-
// SAFETY: The inner value has been initialized, and will not be moved
276-
// anymore.
277-
unsafe { Pin::new_unchecked(self.get_ref().get_unchecked()) }
278-
}
279-
280225
/// Consumes the `OnceLock`, returning the wrapped value. Returns
281226
/// `None` if the cell was empty.
282227
///

library/std/src/sys/hermit/mutex.rs

-3
Original file line numberDiff line numberDiff line change
@@ -174,9 +174,6 @@ impl Mutex {
174174
Mutex { inner: Spinlock::new(MutexInner::new()) }
175175
}
176176

177-
#[inline]
178-
pub unsafe fn init(&mut self) {}
179-
180177
#[inline]
181178
pub unsafe fn lock(&self) {
182179
loop {

library/std/src/sys/itron/mutex.rs

-6
Original file line numberDiff line numberDiff line change
@@ -31,12 +31,6 @@ impl Mutex {
3131
Mutex { mtx: SpinIdOnceCell::new() }
3232
}
3333

34-
pub unsafe fn init(&mut self) {
35-
// Initialize `self.mtx` eagerly
36-
let id = new_mtx().unwrap_or_else(|e| fail(e, &"acre_mtx"));
37-
unsafe { self.mtx.set_unchecked((id, ())) };
38-
}
39-
4034
/// Get the inner mutex's ID, which is lazily created.
4135
fn raw(&self) -> abi::ID {
4236
match self.mtx.get_or_try_init(|| new_mtx().map(|id| (id, ()))) {

library/std/src/sys/sgx/mutex.rs

-3
Original file line numberDiff line numberDiff line change
@@ -20,9 +20,6 @@ impl Mutex {
2020
Mutex { inner: SpinMutex::new(WaitVariable::new(false)) }
2121
}
2222

23-
#[inline]
24-
pub unsafe fn init(&mut self) {}
25-
2623
#[inline]
2724
pub unsafe fn lock(&self) {
2825
let mut guard = self.inner.lock();

library/std/src/sys/unix/locks/fuchsia_mutex.rs

-3
Original file line numberDiff line numberDiff line change
@@ -85,9 +85,6 @@ impl Mutex {
8585
Mutex { futex: AtomicU32::new(UNLOCKED) }
8686
}
8787

88-
#[inline]
89-
pub unsafe fn init(&mut self) {}
90-
9188
#[inline]
9289
pub unsafe fn try_lock(&self) -> bool {
9390
let thread_self = zx_thread_self();

library/std/src/sys/unix/locks/futex_mutex.rs

-3
Original file line numberDiff line numberDiff line change
@@ -19,9 +19,6 @@ impl Mutex {
1919
Self { futex: AtomicU32::new(0) }
2020
}
2121

22-
#[inline]
23-
pub unsafe fn init(&mut self) {}
24-
2522
#[inline]
2623
pub unsafe fn try_lock(&self) -> bool {
2724
self.futex.compare_exchange(0, 1, Acquire, Relaxed).is_ok()

library/std/src/sys/unix/locks/pthread_mutex.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ impl Mutex {
5252
Mutex { inner: UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER) }
5353
}
5454
#[inline]
55-
pub unsafe fn init(&mut self) {
55+
unsafe fn init(&mut self) {
5656
// Issue #33770
5757
//
5858
// A pthread mutex initialized with PTHREAD_MUTEX_INITIALIZER will have

library/std/src/sys/unsupported/locks/mutex.rs

-3
Original file line numberDiff line numberDiff line change
@@ -16,9 +16,6 @@ impl Mutex {
1616
Mutex { locked: Cell::new(false) }
1717
}
1818

19-
#[inline]
20-
pub unsafe fn init(&mut self) {}
21-
2219
#[inline]
2320
pub unsafe fn lock(&self) {
2421
assert_eq!(self.locked.replace(true), false, "cannot recursively acquire mutex");

library/std/src/sys/windows/locks/mutex.rs

-2
Original file line numberDiff line numberDiff line change
@@ -37,8 +37,6 @@ impl Mutex {
3737
pub const fn new() -> Mutex {
3838
Mutex { srwlock: UnsafeCell::new(c::SRWLOCK_INIT) }
3939
}
40-
#[inline]
41-
pub unsafe fn init(&mut self) {}
4240

4341
#[inline]
4442
pub unsafe fn lock(&self) {

library/std/src/sys_common/remutex.rs

+12-34
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,11 @@
11
#[cfg(all(test, not(target_os = "emscripten")))]
22
mod tests;
33

4+
use super::mutex as sys;
45
use crate::cell::UnsafeCell;
5-
use crate::marker::PhantomPinned;
66
use crate::ops::Deref;
77
use crate::panic::{RefUnwindSafe, UnwindSafe};
8-
use crate::pin::Pin;
98
use crate::sync::atomic::{AtomicUsize, Ordering::Relaxed};
10-
use crate::sys::locks as sys;
119

1210
/// A re-entrant mutual exclusion
1311
///
@@ -41,11 +39,10 @@ use crate::sys::locks as sys;
4139
/// synchronization is left to the mutex, making relaxed memory ordering for
4240
/// the `owner` field fine in all cases.
4341
pub struct ReentrantMutex<T> {
44-
mutex: sys::Mutex,
42+
mutex: sys::MovableMutex,
4543
owner: AtomicUsize,
4644
lock_count: UnsafeCell<u32>,
4745
data: T,
48-
_pinned: PhantomPinned,
4946
}
5047

5148
unsafe impl<T: Send> Send for ReentrantMutex<T> {}
@@ -68,39 +65,22 @@ impl<T> RefUnwindSafe for ReentrantMutex<T> {}
6865
/// guarded data.
6966
#[must_use = "if unused the ReentrantMutex will immediately unlock"]
7067
pub struct ReentrantMutexGuard<'a, T: 'a> {
71-
lock: Pin<&'a ReentrantMutex<T>>,
68+
lock: &'a ReentrantMutex<T>,
7269
}
7370

7471
impl<T> !Send for ReentrantMutexGuard<'_, T> {}
7572

7673
impl<T> ReentrantMutex<T> {
7774
/// Creates a new reentrant mutex in an unlocked state.
78-
///
79-
/// # Unsafety
80-
///
81-
/// This function is unsafe because it is required that `init` is called
82-
/// once this mutex is in its final resting place, and only then are the
83-
/// lock/unlock methods safe.
84-
pub const unsafe fn new(t: T) -> ReentrantMutex<T> {
75+
pub const fn new(t: T) -> ReentrantMutex<T> {
8576
ReentrantMutex {
86-
mutex: sys::Mutex::new(),
77+
mutex: sys::MovableMutex::new(),
8778
owner: AtomicUsize::new(0),
8879
lock_count: UnsafeCell::new(0),
8980
data: t,
90-
_pinned: PhantomPinned,
9181
}
9282
}
9383

94-
/// Initializes this mutex so it's ready for use.
95-
///
96-
/// # Unsafety
97-
///
98-
/// Unsafe to call more than once, and must be called after this will no
99-
/// longer move in memory.
100-
pub unsafe fn init(self: Pin<&mut Self>) {
101-
self.get_unchecked_mut().mutex.init()
102-
}
103-
10484
/// Acquires a mutex, blocking the current thread until it is able to do so.
10585
///
10686
/// This function will block the caller until it is available to acquire the mutex.
@@ -113,15 +93,14 @@ impl<T> ReentrantMutex<T> {
11393
/// If another user of this mutex panicked while holding the mutex, then
11494
/// this call will return failure if the mutex would otherwise be
11595
/// acquired.
116-
pub fn lock(self: Pin<&Self>) -> ReentrantMutexGuard<'_, T> {
96+
pub fn lock(&self) -> ReentrantMutexGuard<'_, T> {
11797
let this_thread = current_thread_unique_ptr();
118-
// Safety: We only touch lock_count when we own the lock,
119-
// and since self is pinned we can safely call the lock() on the mutex.
98+
// Safety: We only touch lock_count when we own the lock.
12099
unsafe {
121100
if self.owner.load(Relaxed) == this_thread {
122101
self.increment_lock_count();
123102
} else {
124-
self.mutex.lock();
103+
self.mutex.raw_lock();
125104
self.owner.store(this_thread, Relaxed);
126105
debug_assert_eq!(*self.lock_count.get(), 0);
127106
*self.lock_count.get() = 1;
@@ -142,10 +121,9 @@ impl<T> ReentrantMutex<T> {
142121
/// If another user of this mutex panicked while holding the mutex, then
143122
/// this call will return failure if the mutex would otherwise be
144123
/// acquired.
145-
pub fn try_lock(self: Pin<&Self>) -> Option<ReentrantMutexGuard<'_, T>> {
124+
pub fn try_lock(&self) -> Option<ReentrantMutexGuard<'_, T>> {
146125
let this_thread = current_thread_unique_ptr();
147-
// Safety: We only touch lock_count when we own the lock,
148-
// and since self is pinned we can safely call the try_lock on the mutex.
126+
// Safety: We only touch lock_count when we own the lock.
149127
unsafe {
150128
if self.owner.load(Relaxed) == this_thread {
151129
self.increment_lock_count();
@@ -179,12 +157,12 @@ impl<T> Deref for ReentrantMutexGuard<'_, T> {
179157
impl<T> Drop for ReentrantMutexGuard<'_, T> {
180158
#[inline]
181159
fn drop(&mut self) {
182-
// Safety: We own the lock, and the lock is pinned.
160+
// Safety: We own the lock.
183161
unsafe {
184162
*self.lock.lock_count.get() -= 1;
185163
if *self.lock.lock_count.get() == 0 {
186164
self.lock.owner.store(0, Relaxed);
187-
self.lock.mutex.unlock();
165+
self.lock.mutex.raw_unlock();
188166
}
189167
}
190168
}

0 commit comments

Comments
 (0)