From dbb451cce66ad4a1db3678b2ca26a630405d2899 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Sat, 9 Mar 2024 20:01:52 +0100 Subject: [PATCH] windows: remove support for slim rwlock Since https://github.com/rust-lang/rust/pull/121956 we don't need it any more, and we are generally short on Windows staff so reducing the amount of code we have to test and maintain sounds like a good idea. The InitOnce stuff is still used by `thread_local_key::StaticKey`. --- src/shims/windows/foreign_items.rs | 45 --- src/shims/windows/sync.rs | 271 ------------------ .../concurrency/windows_condvar_shared.rs | 226 --------------- .../concurrency/windows_condvar_shared.stdout | 60 ---- 4 files changed, 602 deletions(-) delete mode 100644 tests/pass-dep/concurrency/windows_condvar_shared.rs delete mode 100644 tests/pass-dep/concurrency/windows_condvar_shared.stdout diff --git a/src/shims/windows/foreign_items.rs b/src/shims/windows/foreign_items.rs index 9e0baf86cb..f942b72f0e 100644 --- a/src/shims/windows/foreign_items.rs +++ b/src/shims/windows/foreign_items.rs @@ -297,32 +297,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { } // Synchronization primitives - "AcquireSRWLockExclusive" => { - let [ptr] = this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?; - this.AcquireSRWLockExclusive(ptr)?; - } - "ReleaseSRWLockExclusive" => { - let [ptr] = this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?; - this.ReleaseSRWLockExclusive(ptr)?; - } - "TryAcquireSRWLockExclusive" => { - let [ptr] = this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?; - let ret = this.TryAcquireSRWLockExclusive(ptr)?; - this.write_scalar(ret, dest)?; - } - "AcquireSRWLockShared" => { - let [ptr] = this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?; - this.AcquireSRWLockShared(ptr)?; - } - "ReleaseSRWLockShared" => { - let [ptr] = this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?; - this.ReleaseSRWLockShared(ptr)?; - } - "TryAcquireSRWLockShared" => { - let [ptr] = this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?; - let ret = this.TryAcquireSRWLockShared(ptr)?; - this.write_scalar(ret, dest)?; - } "InitOnceBeginInitialize" => { let [ptr, flags, pending, context] = this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?; @@ -335,25 +309,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { let result = this.InitOnceComplete(ptr, flags, context)?; this.write_scalar(result, dest)?; } - "SleepConditionVariableSRW" => { - let [condvar, lock, timeout, flags] = - this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?; - - let result = this.SleepConditionVariableSRW(condvar, lock, timeout, flags, dest)?; - this.write_scalar(result, dest)?; - } - "WakeConditionVariable" => { - let [condvar] = - this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?; - - this.WakeConditionVariable(condvar)?; - } - "WakeAllConditionVariable" => { - let [condvar] = - this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?; - - this.WakeAllConditionVariable(condvar)?; - } "WaitOnAddress" => { let [ptr_op, compare_op, size_op, timeout_op] = this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?; diff --git a/src/shims/windows/sync.rs b/src/shims/windows/sync.rs index 5edc18af48..f02939f888 100644 --- a/src/shims/windows/sync.rs +++ b/src/shims/windows/sync.rs @@ -3,52 +3,14 @@ use std::time::Duration; use rustc_target::abi::Size; use crate::concurrency::init_once::InitOnceStatus; -use crate::concurrency::sync::{CondvarLock, RwLockMode}; use crate::concurrency::thread::MachineCallback; use crate::*; impl<'mir, 'tcx> EvalContextExtPriv<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {} trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { - /// Try to reacquire the lock associated with the condition variable after we - /// were signaled. - fn reacquire_cond_lock( - &mut self, - thread: ThreadId, - lock: RwLockId, - mode: RwLockMode, - ) -> InterpResult<'tcx> { - let this = self.eval_context_mut(); - this.unblock_thread(thread); - - match mode { - RwLockMode::Read => - if this.rwlock_is_write_locked(lock) { - this.rwlock_enqueue_and_block_reader(lock, thread); - } else { - this.rwlock_reader_lock(lock, thread); - }, - RwLockMode::Write => - if this.rwlock_is_locked(lock) { - this.rwlock_enqueue_and_block_writer(lock, thread); - } else { - this.rwlock_writer_lock(lock, thread); - }, - } - - Ok(()) - } - // Windows sync primitives are pointer sized. // We only use the first 4 bytes for the id. - fn srwlock_get_id( - &mut self, - rwlock_op: &OpTy<'tcx, Provenance>, - ) -> InterpResult<'tcx, RwLockId> { - let this = self.eval_context_mut(); - this.rwlock_get_or_create_id(rwlock_op, this.windows_ty_layout("SRWLOCK"), 0) - } - fn init_once_get_id( &mut self, init_once_op: &OpTy<'tcx, Provenance>, @@ -56,117 +18,11 @@ trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { let this = self.eval_context_mut(); this.init_once_get_or_create_id(init_once_op, this.windows_ty_layout("INIT_ONCE"), 0) } - - fn condvar_get_id( - &mut self, - condvar_op: &OpTy<'tcx, Provenance>, - ) -> InterpResult<'tcx, CondvarId> { - let this = self.eval_context_mut(); - this.condvar_get_or_create_id(condvar_op, this.windows_ty_layout("CONDITION_VARIABLE"), 0) - } } impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {} #[allow(non_snake_case)] pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { - fn AcquireSRWLockExclusive(&mut self, lock_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx> { - let this = self.eval_context_mut(); - let id = this.srwlock_get_id(lock_op)?; - let active_thread = this.get_active_thread(); - - if this.rwlock_is_locked(id) { - // Note: this will deadlock if the lock is already locked by this - // thread in any way. - // - // FIXME: Detect and report the deadlock proactively. (We currently - // report the deadlock only when no thread can continue execution, - // but we could detect that this lock is already locked and report - // an error.) - this.rwlock_enqueue_and_block_writer(id, active_thread); - } else { - this.rwlock_writer_lock(id, active_thread); - } - - Ok(()) - } - - fn TryAcquireSRWLockExclusive( - &mut self, - lock_op: &OpTy<'tcx, Provenance>, - ) -> InterpResult<'tcx, Scalar> { - let this = self.eval_context_mut(); - let id = this.srwlock_get_id(lock_op)?; - let active_thread = this.get_active_thread(); - - if this.rwlock_is_locked(id) { - // Lock is already held. - Ok(Scalar::from_u8(0)) - } else { - this.rwlock_writer_lock(id, active_thread); - Ok(Scalar::from_u8(1)) - } - } - - fn ReleaseSRWLockExclusive(&mut self, lock_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx> { - let this = self.eval_context_mut(); - let id = this.srwlock_get_id(lock_op)?; - let active_thread = this.get_active_thread(); - - if !this.rwlock_writer_unlock(id, active_thread) { - // The docs do not say anything about this case, but it seems better to not allow it. - throw_ub_format!( - "calling ReleaseSRWLockExclusive on an SRWLock that is not exclusively locked by the current thread" - ); - } - - Ok(()) - } - - fn AcquireSRWLockShared(&mut self, lock_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx> { - let this = self.eval_context_mut(); - let id = this.srwlock_get_id(lock_op)?; - let active_thread = this.get_active_thread(); - - if this.rwlock_is_write_locked(id) { - this.rwlock_enqueue_and_block_reader(id, active_thread); - } else { - this.rwlock_reader_lock(id, active_thread); - } - - Ok(()) - } - - fn TryAcquireSRWLockShared( - &mut self, - lock_op: &OpTy<'tcx, Provenance>, - ) -> InterpResult<'tcx, Scalar> { - let this = self.eval_context_mut(); - let id = this.srwlock_get_id(lock_op)?; - let active_thread = this.get_active_thread(); - - if this.rwlock_is_write_locked(id) { - Ok(Scalar::from_u8(0)) - } else { - this.rwlock_reader_lock(id, active_thread); - Ok(Scalar::from_u8(1)) - } - } - - fn ReleaseSRWLockShared(&mut self, lock_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx> { - let this = self.eval_context_mut(); - let id = this.srwlock_get_id(lock_op)?; - let active_thread = this.get_active_thread(); - - if !this.rwlock_reader_unlock(id, active_thread) { - // The docs do not say anything about this case, but it seems better to not allow it. - throw_ub_format!( - "calling ReleaseSRWLockShared on an SRWLock that is not locked by the current thread" - ); - } - - Ok(()) - } - fn InitOnceBeginInitialize( &mut self, init_once_op: &OpTy<'tcx, Provenance>, @@ -399,131 +255,4 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { Ok(()) } - - fn SleepConditionVariableSRW( - &mut self, - condvar_op: &OpTy<'tcx, Provenance>, - lock_op: &OpTy<'tcx, Provenance>, - timeout_op: &OpTy<'tcx, Provenance>, - flags_op: &OpTy<'tcx, Provenance>, - dest: &MPlaceTy<'tcx, Provenance>, - ) -> InterpResult<'tcx, Scalar> { - let this = self.eval_context_mut(); - - let condvar_id = this.condvar_get_id(condvar_op)?; - let lock_id = this.srwlock_get_id(lock_op)?; - let timeout_ms = this.read_scalar(timeout_op)?.to_u32()?; - let flags = this.read_scalar(flags_op)?.to_u32()?; - - let timeout_time = if timeout_ms == this.eval_windows_u32("c", "INFINITE") { - None - } else { - let duration = Duration::from_millis(timeout_ms.into()); - Some(this.machine.clock.now().checked_add(duration).unwrap()) - }; - - let shared_mode = 0x1; // CONDITION_VARIABLE_LOCKMODE_SHARED is not in std - let mode = if flags == 0 { - RwLockMode::Write - } else if flags == shared_mode { - RwLockMode::Read - } else { - throw_unsup_format!("unsupported `Flags` {flags} in `SleepConditionVariableSRW`"); - }; - - let active_thread = this.get_active_thread(); - - let was_locked = match mode { - RwLockMode::Read => this.rwlock_reader_unlock(lock_id, active_thread), - RwLockMode::Write => this.rwlock_writer_unlock(lock_id, active_thread), - }; - - if !was_locked { - throw_ub_format!( - "calling SleepConditionVariableSRW with an SRWLock that is not locked by the current thread" - ); - } - - this.block_thread(active_thread); - this.condvar_wait(condvar_id, active_thread, CondvarLock::RwLock { id: lock_id, mode }); - - if let Some(timeout_time) = timeout_time { - struct Callback<'tcx> { - thread: ThreadId, - condvar_id: CondvarId, - lock_id: RwLockId, - mode: RwLockMode, - dest: MPlaceTy<'tcx, Provenance>, - } - - impl<'tcx> VisitProvenance for Callback<'tcx> { - fn visit_provenance(&self, visit: &mut VisitWith<'_>) { - let Callback { thread: _, condvar_id: _, lock_id: _, mode: _, dest } = self; - dest.visit_provenance(visit); - } - } - - impl<'mir, 'tcx: 'mir> MachineCallback<'mir, 'tcx> for Callback<'tcx> { - fn call(&self, this: &mut MiriInterpCx<'mir, 'tcx>) -> InterpResult<'tcx> { - this.reacquire_cond_lock(self.thread, self.lock_id, self.mode)?; - - this.condvar_remove_waiter(self.condvar_id, self.thread); - - let error_timeout = this.eval_windows("c", "ERROR_TIMEOUT"); - this.set_last_error(error_timeout)?; - this.write_scalar(this.eval_windows("c", "FALSE"), &self.dest)?; - Ok(()) - } - } - - this.register_timeout_callback( - active_thread, - Time::Monotonic(timeout_time), - Box::new(Callback { - thread: active_thread, - condvar_id, - lock_id, - mode, - dest: dest.clone(), - }), - ); - } - - Ok(this.eval_windows("c", "TRUE")) - } - - fn WakeConditionVariable(&mut self, condvar_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx> { - let this = self.eval_context_mut(); - let condvar_id = this.condvar_get_id(condvar_op)?; - - if let Some((thread, lock)) = this.condvar_signal(condvar_id) { - if let CondvarLock::RwLock { id, mode } = lock { - this.reacquire_cond_lock(thread, id, mode)?; - this.unregister_timeout_callback_if_exists(thread); - } else { - panic!("mutexes should not exist on windows"); - } - } - - Ok(()) - } - - fn WakeAllConditionVariable( - &mut self, - condvar_op: &OpTy<'tcx, Provenance>, - ) -> InterpResult<'tcx> { - let this = self.eval_context_mut(); - let condvar_id = this.condvar_get_id(condvar_op)?; - - while let Some((thread, lock)) = this.condvar_signal(condvar_id) { - if let CondvarLock::RwLock { id, mode } = lock { - this.reacquire_cond_lock(thread, id, mode)?; - this.unregister_timeout_callback_if_exists(thread); - } else { - panic!("mutexes should not exist on windows"); - } - } - - Ok(()) - } } diff --git a/tests/pass-dep/concurrency/windows_condvar_shared.rs b/tests/pass-dep/concurrency/windows_condvar_shared.rs deleted file mode 100644 index 5bff9098a5..0000000000 --- a/tests/pass-dep/concurrency/windows_condvar_shared.rs +++ /dev/null @@ -1,226 +0,0 @@ -//@only-target-windows: Uses win32 api functions -// We are making scheduler assumptions here. -//@compile-flags: -Zmiri-preemption-rate=0 - -use std::ptr::null_mut; -use std::thread; - -use windows_sys::Win32::System::Threading::{ - AcquireSRWLockExclusive, AcquireSRWLockShared, ReleaseSRWLockExclusive, ReleaseSRWLockShared, - SleepConditionVariableSRW, WakeAllConditionVariable, CONDITION_VARIABLE, - CONDITION_VARIABLE_LOCKMODE_SHARED, INFINITE, SRWLOCK, -}; - -// not in windows-sys -const SRWLOCK_INIT: SRWLOCK = SRWLOCK { Ptr: null_mut() }; -const CONDITION_VARIABLE_INIT: CONDITION_VARIABLE = CONDITION_VARIABLE { Ptr: null_mut() }; - -#[derive(Copy, Clone)] -struct SendPtr(*mut T); - -unsafe impl Send for SendPtr {} - -/// threads should be able to reacquire the lock while it is locked by multiple other threads in shared mode -fn all_shared() { - println!("all_shared"); - - let mut lock = SRWLOCK_INIT; - let mut condvar = CONDITION_VARIABLE_INIT; - - let lock_ptr = SendPtr(&mut lock); - let condvar_ptr = SendPtr(&mut condvar); - - let mut handles = Vec::with_capacity(10); - - // waiters - for i in 0..5 { - handles.push(thread::spawn(move || { - let condvar_ptr = condvar_ptr; // avoid field capture - let lock_ptr = lock_ptr; // avoid field capture - unsafe { - AcquireSRWLockShared(lock_ptr.0); - } - println!("exclusive waiter {i} locked"); - - let r = unsafe { - SleepConditionVariableSRW( - condvar_ptr.0, - lock_ptr.0, - INFINITE, - CONDITION_VARIABLE_LOCKMODE_SHARED, - ) - }; - assert_ne!(r, 0); - - println!("exclusive waiter {i} reacquired lock"); - - // unlocking is unnecessary because the lock is never used again - })); - } - - // ensures each waiter is waiting by this point - thread::yield_now(); - - // readers - for i in 0..5 { - handles.push(thread::spawn(move || { - let lock_ptr = lock_ptr; // avoid field capture - unsafe { - AcquireSRWLockShared(lock_ptr.0); - } - println!("reader {i} locked"); - - // switch to next reader or main thread - thread::yield_now(); - - unsafe { - ReleaseSRWLockShared(lock_ptr.0); - } - println!("reader {i} unlocked"); - })); - } - - // ensures each reader has acquired the lock - thread::yield_now(); - - unsafe { - WakeAllConditionVariable(condvar_ptr.0); - } - - for handle in handles { - handle.join().unwrap(); - } -} - -// reacquiring a lock should wait until the lock is not exclusively locked -fn shared_sleep_and_exclusive_lock() { - println!("shared_sleep_and_exclusive_lock"); - - let mut lock = SRWLOCK_INIT; - let mut condvar = CONDITION_VARIABLE_INIT; - - let lock_ptr = SendPtr(&mut lock); - let condvar_ptr = SendPtr(&mut condvar); - - let mut waiters = Vec::with_capacity(5); - for i in 0..5 { - waiters.push(thread::spawn(move || { - let lock_ptr = lock_ptr; // avoid field capture - let condvar_ptr = condvar_ptr; // avoid field capture - unsafe { - AcquireSRWLockShared(lock_ptr.0); - } - println!("shared waiter {i} locked"); - - let r = unsafe { - SleepConditionVariableSRW( - condvar_ptr.0, - lock_ptr.0, - INFINITE, - CONDITION_VARIABLE_LOCKMODE_SHARED, - ) - }; - assert_ne!(r, 0); - - println!("shared waiter {i} reacquired lock"); - - // unlocking is unnecessary because the lock is never used again - })); - } - - // ensures each waiter is waiting by this point - thread::yield_now(); - - unsafe { - AcquireSRWLockExclusive(lock_ptr.0); - } - println!("main locked"); - - unsafe { - WakeAllConditionVariable(condvar_ptr.0); - } - - // waiters are now waiting for the lock to be unlocked - thread::yield_now(); - - unsafe { - ReleaseSRWLockExclusive(lock_ptr.0); - } - println!("main unlocked"); - - for handle in waiters { - handle.join().unwrap(); - } -} - -// threads reacquiring locks should wait for all locks to be released first -fn exclusive_sleep_and_shared_lock() { - println!("exclusive_sleep_and_shared_lock"); - - let mut lock = SRWLOCK_INIT; - let mut condvar = CONDITION_VARIABLE_INIT; - - let lock_ptr = SendPtr(&mut lock); - let condvar_ptr = SendPtr(&mut condvar); - - let mut handles = Vec::with_capacity(10); - for i in 0..5 { - handles.push(thread::spawn(move || { - let lock_ptr = lock_ptr; // avoid field capture - let condvar_ptr = condvar_ptr; // avoid field capture - unsafe { - AcquireSRWLockExclusive(lock_ptr.0); - } - - println!("exclusive waiter {i} locked"); - - let r = unsafe { SleepConditionVariableSRW(condvar_ptr.0, lock_ptr.0, INFINITE, 0) }; - assert_ne!(r, 0); - - println!("exclusive waiter {i} reacquired lock"); - - // switch to next waiter or main thread - thread::yield_now(); - - unsafe { - ReleaseSRWLockExclusive(lock_ptr.0); - } - println!("exclusive waiter {i} unlocked"); - })); - } - - for i in 0..5 { - handles.push(thread::spawn(move || { - let lock_ptr = lock_ptr; // avoid field capture - unsafe { - AcquireSRWLockShared(lock_ptr.0); - } - println!("reader {i} locked"); - - // switch to next reader or main thread - thread::yield_now(); - - unsafe { - ReleaseSRWLockShared(lock_ptr.0); - } - println!("reader {i} unlocked"); - })); - } - - // ensures each reader has acquired the lock - thread::yield_now(); - - unsafe { - WakeAllConditionVariable(condvar_ptr.0); - } - - for handle in handles { - handle.join().unwrap(); - } -} - -fn main() { - all_shared(); - shared_sleep_and_exclusive_lock(); - exclusive_sleep_and_shared_lock(); -} diff --git a/tests/pass-dep/concurrency/windows_condvar_shared.stdout b/tests/pass-dep/concurrency/windows_condvar_shared.stdout deleted file mode 100644 index 918b54668f..0000000000 --- a/tests/pass-dep/concurrency/windows_condvar_shared.stdout +++ /dev/null @@ -1,60 +0,0 @@ -all_shared -exclusive waiter 0 locked -exclusive waiter 1 locked -exclusive waiter 2 locked -exclusive waiter 3 locked -exclusive waiter 4 locked -reader 0 locked -reader 1 locked -reader 2 locked -reader 3 locked -reader 4 locked -exclusive waiter 0 reacquired lock -exclusive waiter 1 reacquired lock -exclusive waiter 2 reacquired lock -exclusive waiter 3 reacquired lock -exclusive waiter 4 reacquired lock -reader 0 unlocked -reader 1 unlocked -reader 2 unlocked -reader 3 unlocked -reader 4 unlocked -shared_sleep_and_exclusive_lock -shared waiter 0 locked -shared waiter 1 locked -shared waiter 2 locked -shared waiter 3 locked -shared waiter 4 locked -main locked -main unlocked -shared waiter 0 reacquired lock -shared waiter 1 reacquired lock -shared waiter 2 reacquired lock -shared waiter 3 reacquired lock -shared waiter 4 reacquired lock -exclusive_sleep_and_shared_lock -exclusive waiter 0 locked -exclusive waiter 1 locked -exclusive waiter 2 locked -exclusive waiter 3 locked -exclusive waiter 4 locked -reader 0 locked -reader 1 locked -reader 2 locked -reader 3 locked -reader 4 locked -reader 0 unlocked -reader 1 unlocked -reader 2 unlocked -reader 3 unlocked -reader 4 unlocked -exclusive waiter 0 reacquired lock -exclusive waiter 0 unlocked -exclusive waiter 1 reacquired lock -exclusive waiter 1 unlocked -exclusive waiter 2 reacquired lock -exclusive waiter 2 unlocked -exclusive waiter 3 reacquired lock -exclusive waiter 3 unlocked -exclusive waiter 4 reacquired lock -exclusive waiter 4 unlocked