From 679245769b6984ec5a7edf70fb4744d8411468b8 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Tue, 21 Apr 2020 16:38:14 -0700 Subject: [PATCH 01/17] Implement support for synchronization primitives. --- src/eval.rs | 6 + src/lib.rs | 6 +- src/machine.rs | 8 +- src/shims/foreign_items/posix.rs | 48 +- src/shims/sync.rs | 650 +++++++++++------- src/sync.rs | 299 ++++++++ src/thread.rs | 147 +++- tests/run-pass/concurrency/barrier.rs | 27 + tests/run-pass/concurrency/barrier.stderr | 2 + tests/run-pass/concurrency/barrier.stdout | 20 + tests/run-pass/concurrency/condvar.rs | 28 + tests/run-pass/concurrency/condvar.stderr | 2 + .../run-pass/concurrency/libc_pthread_cond.rs | 199 ++++++ .../concurrency/libc_pthread_cond.stderr | 2 + tests/run-pass/concurrency/mpsc.rs | 56 ++ tests/run-pass/concurrency/mpsc.stderr | 2 + tests/run-pass/concurrency/once.rs | 44 ++ tests/run-pass/concurrency/once.stderr | 2 + 18 files changed, 1276 insertions(+), 272 deletions(-) create mode 100644 src/sync.rs create mode 100644 tests/run-pass/concurrency/barrier.rs create mode 100644 tests/run-pass/concurrency/barrier.stderr create mode 100644 tests/run-pass/concurrency/barrier.stdout create mode 100644 tests/run-pass/concurrency/condvar.rs create mode 100644 tests/run-pass/concurrency/condvar.stderr create mode 100644 tests/run-pass/concurrency/libc_pthread_cond.rs create mode 100644 tests/run-pass/concurrency/libc_pthread_cond.stderr create mode 100644 tests/run-pass/concurrency/mpsc.rs create mode 100644 tests/run-pass/concurrency/mpsc.stderr create mode 100644 tests/run-pass/concurrency/once.rs create mode 100644 tests/run-pass/concurrency/once.stderr diff --git a/src/eval.rs b/src/eval.rs index 5daad7cc06..30901a8f12 100644 --- a/src/eval.rs +++ b/src/eval.rs @@ -210,6 +210,12 @@ pub fn eval_main<'tcx>(tcx: TyCtxt<'tcx>, main_id: DefId, config: MiriConfig) -> SchedulingAction::ExecuteStep => { assert!(ecx.step()?, "a terminated thread was scheduled for execution"); } + SchedulingAction::ExecuteCallback => { + assert!(ecx.machine.communicate, + "scheduler callbacks require disabled isolation, but the code \ + that created the callback did not check it"); + ecx.run_scheduler_callback()?; + } SchedulingAction::ExecuteDtors => { // This will either enable the thread again (so we go back // to `ExecuteStep`), or determine that this thread is done diff --git a/src/lib.rs b/src/lib.rs index 0ea0d57cac..e79fc2add3 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -31,6 +31,7 @@ mod operator; mod range_map; mod shims; mod stacked_borrows; +mod sync; mod thread; // Make all those symbols available in the same place as our own. @@ -45,7 +46,7 @@ pub use crate::shims::fs::{DirHandler, EvalContextExt as FileEvalContextExt, Fil pub use crate::shims::intrinsics::EvalContextExt as IntrinsicsEvalContextExt; pub use crate::shims::os_str::EvalContextExt as OsStrEvalContextExt; pub use crate::shims::panic::{CatchUnwindData, EvalContextExt as PanicEvalContextExt}; -pub use crate::shims::sync::{EvalContextExt as SyncEvalContextExt}; +pub use crate::shims::sync::{EvalContextExt as SyncShimsEvalContextExt}; pub use crate::shims::thread::EvalContextExt as ThreadShimsEvalContextExt; pub use crate::shims::time::EvalContextExt as TimeEvalContextExt; pub use crate::shims::tls::{EvalContextExt as TlsEvalContextExt, TlsData}; @@ -70,6 +71,9 @@ pub use crate::stacked_borrows::{ pub use crate::thread::{ EvalContextExt as ThreadsEvalContextExt, SchedulingAction, ThreadId, ThreadManager, ThreadState, }; +pub use crate::sync::{ + EvalContextExt as SyncEvalContextExt, CondvarId, MutexId, RwLockId +}; /// Insert rustc arguments at the beginning of the argument list that Miri wants to be /// set per default, for maximal validation power. diff --git a/src/machine.rs b/src/machine.rs index 51aa7ae310..4fb08cd259 100644 --- a/src/machine.rs +++ b/src/machine.rs @@ -5,7 +5,7 @@ use std::borrow::Cow; use std::cell::RefCell; use std::num::NonZeroU64; use std::rc::Rc; -use std::time::Instant; +use std::time::{Instant, SystemTime}; use std::fmt; use log::trace; @@ -251,6 +251,11 @@ pub struct Evaluator<'mir, 'tcx> { /// The "time anchor" for this machine's monotone clock (for `Instant` simulation). pub(crate) time_anchor: Instant, + /// The approximate system time when "time anchor" was created. This is used + /// for converting system time to monotone time so that we can simplify the + /// thread scheduler to deal only with a single representation of time. + pub(crate) time_anchor_timestamp: SystemTime, + /// The set of threads. pub(crate) threads: ThreadManager<'mir, 'tcx>, @@ -281,6 +286,7 @@ impl<'mir, 'tcx> Evaluator<'mir, 'tcx> { dir_handler: Default::default(), panic_payload: None, time_anchor: Instant::now(), + time_anchor_timestamp: SystemTime::now(), layouts, threads: ThreadManager::default(), } diff --git a/src/shims/foreign_items/posix.rs b/src/shims/foreign_items/posix.rs index 09191011a4..352e38113a 100644 --- a/src/shims/foreign_items/posix.rs +++ b/src/shims/foreign_items/posix.rs @@ -330,6 +330,45 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let result = this.pthread_rwlock_destroy(rwlock)?; this.write_scalar(Scalar::from_i32(result), dest)?; } + "pthread_condattr_init" => { + let result = this.pthread_condattr_init(args[0])?; + this.write_scalar(Scalar::from_i32(result), dest)?; + } + "pthread_condattr_setclock" => { + let result = this.pthread_condattr_setclock(args[0], args[1])?; + this.write_scalar(Scalar::from_i32(result), dest)?; + } + "pthread_condattr_getclock" => { + let result = this.pthread_condattr_getclock(args[0], args[1])?; + this.write_scalar(Scalar::from_i32(result), dest)?; + } + "pthread_condattr_destroy" => { + let result = this.pthread_condattr_destroy(args[0])?; + this.write_scalar(Scalar::from_i32(result), dest)?; + } + "pthread_cond_init" => { + let result = this.pthread_cond_init(args[0], args[1])?; + this.write_scalar(Scalar::from_i32(result), dest)?; + } + "pthread_cond_signal" => { + let result = this.pthread_cond_signal(args[0])?; + this.write_scalar(Scalar::from_i32(result), dest)?; + } + "pthread_cond_broadcast" => { + let result = this.pthread_cond_broadcast(args[0])?; + this.write_scalar(Scalar::from_i32(result), dest)?; + } + "pthread_cond_wait" => { + let result = this.pthread_cond_wait(args[0], args[1])?; + this.write_scalar(Scalar::from_i32(result), dest)?; + } + "pthread_cond_timedwait" => { + this.pthread_cond_timedwait(args[0], args[1], args[2], dest)?; + } + "pthread_cond_destroy" => { + let result = this.pthread_cond_destroy(args[0])?; + this.write_scalar(Scalar::from_i32(result), dest)?; + } // Threading "pthread_create" => { @@ -391,16 +430,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx | "pthread_attr_init" | "pthread_attr_destroy" - | "pthread_condattr_init" - | "pthread_condattr_destroy" - | "pthread_cond_destroy" - if this.frame().instance.to_string().starts_with("std::sys::unix::") => { - let &[_] = check_arg_count(args)?; - this.write_null(dest)?; - } - | "pthread_cond_init" | "pthread_attr_setstacksize" - | "pthread_condattr_setclock" if this.frame().instance.to_string().starts_with("std::sys::unix::") => { let &[_, _] = check_arg_count(args)?; this.write_null(dest)?; diff --git a/src/shims/sync.rs b/src/shims/sync.rs index c205c5c8dd..dfd7999457 100644 --- a/src/shims/sync.rs +++ b/src/shims/sync.rs @@ -1,8 +1,10 @@ +use std::time::{Duration, SystemTime}; + use rustc_middle::ty::{layout::TyAndLayout, TyKind, TypeAndMut}; use rustc_target::abi::{LayoutOf, Size}; use crate::stacked_borrows::Tag; -use crate::thread::BlockSetId; + use crate::*; fn assert_ptr_target_min_size<'mir, 'tcx: 'mir>( @@ -76,45 +78,12 @@ fn mutexattr_set_kind<'mir, 'tcx: 'mir>( // Our chosen memory layout for the emulated mutex (does not have to match the platform layout!): // bytes 0-3: reserved for signature on macOS // (need to avoid this because it is set by static initializer macros) -// bytes 4-7: count of how many times this mutex has been locked, as a u32 -// bytes 8-11: when count > 0, id of the owner thread as a u32 +// bytes 4-7: mutex id as u32 or 0 if id is not assigned yet. // bytes 12-15 or 16-19 (depending on platform): mutex kind, as an i32 // (the kind has to be at its offset for compatibility with static initializer macros) -// bytes 20-23: when count > 0, id of the blockset in which the blocked threads -// are waiting or 0 if blockset is not yet assigned. const PTHREAD_MUTEX_T_MIN_SIZE: u64 = 24; -fn mutex_get_locked_count<'mir, 'tcx: 'mir>( - ecx: &MiriEvalContext<'mir, 'tcx>, - mutex_op: OpTy<'tcx, Tag>, -) -> InterpResult<'tcx, ScalarMaybeUninit> { - get_at_offset(ecx, mutex_op, 4, ecx.machine.layouts.u32, PTHREAD_MUTEX_T_MIN_SIZE) -} - -fn mutex_set_locked_count<'mir, 'tcx: 'mir>( - ecx: &mut MiriEvalContext<'mir, 'tcx>, - mutex_op: OpTy<'tcx, Tag>, - locked_count: impl Into>, -) -> InterpResult<'tcx, ()> { - set_at_offset(ecx, mutex_op, 4, locked_count, ecx.machine.layouts.u32, PTHREAD_MUTEX_T_MIN_SIZE) -} - -fn mutex_get_owner<'mir, 'tcx: 'mir>( - ecx: &MiriEvalContext<'mir, 'tcx>, - mutex_op: OpTy<'tcx, Tag>, -) -> InterpResult<'tcx, ScalarMaybeUninit> { - get_at_offset(ecx, mutex_op, 8, ecx.machine.layouts.u32, PTHREAD_MUTEX_T_MIN_SIZE) -} - -fn mutex_set_owner<'mir, 'tcx: 'mir>( - ecx: &mut MiriEvalContext<'mir, 'tcx>, - mutex_op: OpTy<'tcx, Tag>, - owner: impl Into>, -) -> InterpResult<'tcx, ()> { - set_at_offset(ecx, mutex_op, 8, owner, ecx.machine.layouts.u32, PTHREAD_MUTEX_T_MIN_SIZE) -} - fn mutex_get_kind<'mir, 'tcx: 'mir>( ecx: &mut MiriEvalContext<'mir, 'tcx>, mutex_op: OpTy<'tcx, Tag>, @@ -132,34 +101,34 @@ fn mutex_set_kind<'mir, 'tcx: 'mir>( set_at_offset(ecx, mutex_op, offset, kind, ecx.machine.layouts.i32, PTHREAD_MUTEX_T_MIN_SIZE) } -fn mutex_get_blockset<'mir, 'tcx: 'mir>( +fn mutex_get_id<'mir, 'tcx: 'mir>( ecx: &MiriEvalContext<'mir, 'tcx>, mutex_op: OpTy<'tcx, Tag>, -) -> InterpResult<'tcx, ScalarMaybeUninit> { - get_at_offset(ecx, mutex_op, 20, ecx.machine.layouts.u32, PTHREAD_MUTEX_T_MIN_SIZE) +) -> InterpResult<'tcx, ScalarMaybeUndef> { + get_at_offset(ecx, mutex_op, 4, ecx.machine.layouts.u32, PTHREAD_MUTEX_T_MIN_SIZE) } -fn mutex_set_blockset<'mir, 'tcx: 'mir>( +fn mutex_set_id<'mir, 'tcx: 'mir>( ecx: &mut MiriEvalContext<'mir, 'tcx>, mutex_op: OpTy<'tcx, Tag>, - blockset: impl Into>, + id: impl Into>, ) -> InterpResult<'tcx, ()> { - set_at_offset(ecx, mutex_op, 20, blockset, ecx.machine.layouts.u32, PTHREAD_MUTEX_T_MIN_SIZE) + set_at_offset(ecx, mutex_op, 4, id, ecx.machine.layouts.u32, PTHREAD_MUTEX_T_MIN_SIZE) } -fn mutex_get_or_create_blockset<'mir, 'tcx: 'mir>( +fn mutex_get_or_create_id<'mir, 'tcx: 'mir>( ecx: &mut MiriEvalContext<'mir, 'tcx>, mutex_op: OpTy<'tcx, Tag>, -) -> InterpResult<'tcx, BlockSetId> { - let blockset = mutex_get_blockset(ecx, mutex_op)?.to_u32()?; - if blockset == 0 { - // 0 is a default value and also not a valid blockset id. Need to - // allocate a new blockset. - let blockset = ecx.create_blockset()?; - mutex_set_blockset(ecx, mutex_op, blockset.to_u32_scalar())?; - Ok(blockset) +) -> InterpResult<'tcx, MutexId> { + let id = mutex_get_id(ecx, mutex_op)?.to_u32()?; + if id == 0 { + // 0 is a default value and also not a valid mutex id. Need to allocate + // a new mutex. + let id = ecx.mutex_create(); + mutex_set_id(ecx, mutex_op, id.to_u32_scalar())?; + Ok(id) } else { - Ok(BlockSetId::new(blockset)) + Ok(id.into()) } } @@ -168,105 +137,160 @@ fn mutex_get_or_create_blockset<'mir, 'tcx: 'mir>( // Our chosen memory layout for the emulated rwlock (does not have to match the platform layout!): // bytes 0-3: reserved for signature on macOS // (need to avoid this because it is set by static initializer macros) -// bytes 4-7: reader count, as a u32 -// bytes 8-11: writer count, as a u32 -// bytes 12-15: when writer or reader count > 0, id of the blockset in which the -// blocked writers are waiting or 0 if blockset is not yet assigned. -// bytes 16-20: when writer count > 0, id of the blockset in which the blocked -// readers are waiting or 0 if blockset is not yet assigned. +// bytes 4-7: rwlock id as u32 or 0 if id is not assigned yet. -const PTHREAD_RWLOCK_T_MIN_SIZE: u64 = 20; +const PTHREAD_RWLOCK_T_MIN_SIZE: u64 = 32; -fn rwlock_get_readers<'mir, 'tcx: 'mir>( +fn rwlock_get_id<'mir, 'tcx: 'mir>( ecx: &MiriEvalContext<'mir, 'tcx>, rwlock_op: OpTy<'tcx, Tag>, ) -> InterpResult<'tcx, ScalarMaybeUninit> { get_at_offset(ecx, rwlock_op, 4, ecx.machine.layouts.u32, PTHREAD_RWLOCK_T_MIN_SIZE) } -fn rwlock_set_readers<'mir, 'tcx: 'mir>( +fn rwlock_set_id<'mir, 'tcx: 'mir>( ecx: &mut MiriEvalContext<'mir, 'tcx>, rwlock_op: OpTy<'tcx, Tag>, - readers: impl Into>, + id: impl Into>, ) -> InterpResult<'tcx, ()> { - set_at_offset(ecx, rwlock_op, 4, readers, ecx.machine.layouts.u32, PTHREAD_RWLOCK_T_MIN_SIZE) + set_at_offset(ecx, rwlock_op, 4, id, ecx.machine.layouts.u32, PTHREAD_RWLOCK_T_MIN_SIZE) } -fn rwlock_get_writers<'mir, 'tcx: 'mir>( - ecx: &MiriEvalContext<'mir, 'tcx>, +fn rwlock_get_or_create_id<'mir, 'tcx: 'mir>( + ecx: &mut MiriEvalContext<'mir, 'tcx>, rwlock_op: OpTy<'tcx, Tag>, -) -> InterpResult<'tcx, ScalarMaybeUninit> { - get_at_offset(ecx, rwlock_op, 8, ecx.machine.layouts.u32, PTHREAD_RWLOCK_T_MIN_SIZE) +) -> InterpResult<'tcx, RwLockId> { + let id = rwlock_get_id(ecx, rwlock_op)?.to_u32()?; + if id == 0 { + // 0 is a default value and also not a valid rwlock id. Need to allocate + // a new read-write lock. + let id = ecx.rwlock_create(); + rwlock_set_id(ecx, rwlock_op, id.to_u32_scalar())?; + Ok(id) + } else { + Ok(id.into()) + } +} + +// pthread_condattr_t + +// Our chosen memory layout for emulation (does not have to match the platform layout!): +// store an i32 in the first four bytes equal to the corresponding libc clock id constant +// (e.g. CLOCK_REALTIME). + +const PTHREAD_CONDATTR_T_MIN_SIZE: u64 = 4; + +fn condattr_get_clock_id<'mir, 'tcx: 'mir>( + ecx: &MiriEvalContext<'mir, 'tcx>, + attr_op: OpTy<'tcx, Tag>, +) -> InterpResult<'tcx, ScalarMaybeUndef> { + get_at_offset(ecx, attr_op, 0, ecx.machine.layouts.i32, PTHREAD_CONDATTR_T_MIN_SIZE) } -fn rwlock_set_writers<'mir, 'tcx: 'mir>( +fn condattr_set_clock_id<'mir, 'tcx: 'mir>( ecx: &mut MiriEvalContext<'mir, 'tcx>, - rwlock_op: OpTy<'tcx, Tag>, - writers: impl Into>, + attr_op: OpTy<'tcx, Tag>, + clock_id: impl Into>, ) -> InterpResult<'tcx, ()> { - set_at_offset(ecx, rwlock_op, 8, writers, ecx.machine.layouts.u32, PTHREAD_RWLOCK_T_MIN_SIZE) + set_at_offset(ecx, attr_op, 0, clock_id, ecx.machine.layouts.i32, PTHREAD_CONDATTR_T_MIN_SIZE) } -fn rwlock_get_writer_blockset<'mir, 'tcx: 'mir>( +// pthread_cond_t + +// Our chosen memory layout for the emulated conditional variable (does not have +// to match the platform layout!): + +// bytes 4-7: the conditional variable id as u32 or 0 if id is not assigned yet. +// bytes 8-11: the clock id constant as i32 + +const PTHREAD_COND_T_MIN_SIZE: u64 = 12; + +fn cond_get_id<'mir, 'tcx: 'mir>( ecx: &MiriEvalContext<'mir, 'tcx>, - rwlock_op: OpTy<'tcx, Tag>, -) -> InterpResult<'tcx, ScalarMaybeUninit> { - get_at_offset(ecx, rwlock_op, 12, ecx.machine.layouts.u32, PTHREAD_RWLOCK_T_MIN_SIZE) + cond_op: OpTy<'tcx, Tag>, +) -> InterpResult<'tcx, ScalarMaybeUndef> { + get_at_offset(ecx, cond_op, 4, ecx.machine.layouts.u32, PTHREAD_COND_T_MIN_SIZE) } -fn rwlock_set_writer_blockset<'mir, 'tcx: 'mir>( +fn cond_set_id<'mir, 'tcx: 'mir>( ecx: &mut MiriEvalContext<'mir, 'tcx>, - rwlock_op: OpTy<'tcx, Tag>, - blockset: impl Into>, + cond_op: OpTy<'tcx, Tag>, + id: impl Into>, ) -> InterpResult<'tcx, ()> { - set_at_offset(ecx, rwlock_op, 12, blockset, ecx.machine.layouts.u32, PTHREAD_RWLOCK_T_MIN_SIZE) + set_at_offset(ecx, cond_op, 4, id, ecx.machine.layouts.u32, PTHREAD_COND_T_MIN_SIZE) } -fn rwlock_get_or_create_writer_blockset<'mir, 'tcx: 'mir>( +fn cond_get_or_create_id<'mir, 'tcx: 'mir>( ecx: &mut MiriEvalContext<'mir, 'tcx>, - rwlock_op: OpTy<'tcx, Tag>, -) -> InterpResult<'tcx, BlockSetId> { - let blockset = rwlock_get_writer_blockset(ecx, rwlock_op)?.to_u32()?; - if blockset == 0 { - // 0 is a default value and also not a valid blockset id. Need to - // allocate a new blockset. - let blockset = ecx.create_blockset()?; - rwlock_set_writer_blockset(ecx, rwlock_op, blockset.to_u32_scalar())?; - Ok(blockset) + cond_op: OpTy<'tcx, Tag>, +) -> InterpResult<'tcx, CondvarId> { + let id = cond_get_id(ecx, cond_op)?.to_u32()?; + if id == 0 { + // 0 is a default value and also not a valid conditional variable id. + // Need to allocate a new id. + let id = ecx.condvar_create(); + cond_set_id(ecx, cond_op, id.to_u32_scalar())?; + Ok(id) } else { - Ok(BlockSetId::new(blockset)) + Ok(id.into()) } } -fn rwlock_get_reader_blockset<'mir, 'tcx: 'mir>( +fn cond_get_clock_id<'mir, 'tcx: 'mir>( ecx: &MiriEvalContext<'mir, 'tcx>, - rwlock_op: OpTy<'tcx, Tag>, -) -> InterpResult<'tcx, ScalarMaybeUninit> { - get_at_offset(ecx, rwlock_op, 16, ecx.machine.layouts.u32, PTHREAD_RWLOCK_T_MIN_SIZE) + cond_op: OpTy<'tcx, Tag>, +) -> InterpResult<'tcx, ScalarMaybeUndef> { + get_at_offset(ecx, cond_op, 8, ecx.machine.layouts.i32, PTHREAD_COND_T_MIN_SIZE) } -fn rwlock_set_reader_blockset<'mir, 'tcx: 'mir>( +fn cond_set_clock_id<'mir, 'tcx: 'mir>( ecx: &mut MiriEvalContext<'mir, 'tcx>, - rwlock_op: OpTy<'tcx, Tag>, - blockset: impl Into>, + cond_op: OpTy<'tcx, Tag>, + clock_id: impl Into>, ) -> InterpResult<'tcx, ()> { - set_at_offset(ecx, rwlock_op, 16, blockset, ecx.machine.layouts.u32, PTHREAD_RWLOCK_T_MIN_SIZE) + set_at_offset(ecx, cond_op, 8, clock_id, ecx.machine.layouts.i32, PTHREAD_COND_T_MIN_SIZE) } -fn rwlock_get_or_create_reader_blockset<'mir, 'tcx: 'mir>( +/// Try to reacquire the mutex associated with the condition variable after we were signaled. +fn reacquire_cond_mutex<'mir, 'tcx: 'mir>( ecx: &mut MiriEvalContext<'mir, 'tcx>, - rwlock_op: OpTy<'tcx, Tag>, -) -> InterpResult<'tcx, BlockSetId> { - let blockset = rwlock_get_reader_blockset(ecx, rwlock_op)?.to_u32()?; - if blockset == 0 { - // 0 is a default value and also not a valid blockset id. Need to - // allocate a new blockset. - let blockset = ecx.create_blockset()?; - rwlock_set_reader_blockset(ecx, rwlock_op, blockset.to_u32_scalar())?; - Ok(blockset) + thread: ThreadId, + mutex: MutexId, +) -> InterpResult<'tcx> { + if ecx.mutex_is_locked(mutex) { + ecx.mutex_enqueue(mutex, thread); } else { - Ok(BlockSetId::new(blockset)) + ecx.mutex_lock(mutex, thread); + ecx.unblock_thread(thread)?; } + Ok(()) +} + +/// Release the mutex associated with the condition variable because we are +/// entering the waiting state. +fn release_cond_mutex<'mir, 'tcx: 'mir>( + ecx: &mut MiriEvalContext<'mir, 'tcx>, + active_thread: ThreadId, + mutex: MutexId, +) -> InterpResult<'tcx> { + if let Some((owner_thread, current_locked_count)) = ecx.mutex_unlock(mutex) { + if current_locked_count != 0 { + throw_unsup_format!("awaiting on multiple times acquired lock is not supported"); + } + if owner_thread != active_thread { + throw_ub_format!("awaiting on a mutex owned by a different thread"); + } + if let Some(thread) = ecx.mutex_dequeue(mutex) { + // We have at least one thread waiting on this mutex. Transfer + // ownership to it. + ecx.mutex_lock(mutex, thread); + ecx.unblock_thread(thread)?; + } + } else { + throw_ub_format!("awaiting on unlocked mutex"); + } + ecx.block_thread(active_thread)?; + Ok(()) } impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {} @@ -323,7 +347,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx mutexattr_get_kind(this, attr_op)?.not_undef()? }; - mutex_set_locked_count(this, mutex_op, Scalar::from_u32(0))?; + let _ = mutex_get_or_create_id(this, mutex_op)?; mutex_set_kind(this, mutex_op, kind)?; Ok(0) @@ -333,21 +357,15 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let this = self.eval_context_mut(); let kind = mutex_get_kind(this, mutex_op)?.not_undef()?; - let locked_count = mutex_get_locked_count(this, mutex_op)?.to_u32()?; + let id = mutex_get_or_create_id(this, mutex_op)?; let active_thread = this.get_active_thread()?; - if locked_count == 0 { - // The mutex is unlocked. Let's lock it. - mutex_set_locked_count(this, mutex_op, Scalar::from_u32(1))?; - mutex_set_owner(this, mutex_op, active_thread.to_u32_scalar())?; - Ok(0) - } else { - // The mutex is locked. Let's check by whom. - let owner_thread: ThreadId = mutex_get_owner(this, mutex_op)?.to_u32()?.into(); + if this.mutex_is_locked(id) { + let owner_thread = this.mutex_get_owner(id); if owner_thread != active_thread { // Block the active thread. - let blockset = mutex_get_or_create_blockset(this, mutex_op)?; - this.block_active_thread(blockset)?; + this.block_thread(active_thread)?; + this.mutex_enqueue(id, active_thread); Ok(0) } else { // Trying to acquire the same mutex again. @@ -356,17 +374,16 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx } else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? { this.eval_libc_i32("EDEADLK") } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? { - match locked_count.checked_add(1) { - Some(new_count) => { - mutex_set_locked_count(this, mutex_op, Scalar::from_u32(new_count))?; - Ok(0) - } - None => this.eval_libc_i32("EAGAIN"), - } + this.mutex_lock(id, active_thread); + Ok(0) } else { throw_ub_format!("called pthread_mutex_lock on an unsupported type of mutex"); } } + } else { + // The mutex is unlocked. Let's lock it. + this.mutex_lock(id, active_thread); + Ok(0) } } @@ -374,16 +391,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let this = self.eval_context_mut(); let kind = mutex_get_kind(this, mutex_op)?.not_undef()?; - let locked_count = mutex_get_locked_count(this, mutex_op)?.to_u32()?; + let id = mutex_get_or_create_id(this, mutex_op)?; let active_thread = this.get_active_thread()?; - if locked_count == 0 { - // The mutex is unlocked. Let's lock it. - mutex_set_locked_count(this, mutex_op, Scalar::from_u32(1))?; - mutex_set_owner(this, mutex_op, active_thread.to_u32_scalar())?; - Ok(0) - } else { - let owner_thread: ThreadId = mutex_get_owner(this, mutex_op)?.to_u32()?.into(); + if this.mutex_is_locked(id) { + let owner_thread = this.mutex_get_owner(id); if owner_thread != active_thread { this.eval_libc_i32("EBUSY") } else { @@ -392,19 +404,18 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx { this.eval_libc_i32("EBUSY") } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? { - match locked_count.checked_add(1) { - Some(new_count) => { - mutex_set_locked_count(this, mutex_op, Scalar::from_u32(new_count))?; - Ok(0) - } - None => this.eval_libc_i32("EAGAIN"), - } + this.mutex_lock(id, active_thread); + Ok(0) } else { throw_ub_format!( "called pthread_mutex_trylock on an unsupported type of mutex" ); } } + } else { + // The mutex is unlocked. Let's lock it. + this.mutex_lock(id, active_thread); + Ok(0) } } @@ -412,21 +423,20 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let this = self.eval_context_mut(); let kind = mutex_get_kind(this, mutex_op)?.not_undef()?; - let locked_count = mutex_get_locked_count(this, mutex_op)?.to_u32()?; - let owner_thread: ThreadId = mutex_get_owner(this, mutex_op)?.to_u32()?.into(); - - if owner_thread != this.get_active_thread()? { - throw_ub_format!("called pthread_mutex_unlock on a mutex owned by another thread"); - } else if locked_count == 1 { - let blockset = mutex_get_or_create_blockset(this, mutex_op)?; - if let Some(new_owner) = this.unblock_some_thread(blockset)? { - // We have at least one thread waiting on this mutex. Transfer - // ownership to it. - mutex_set_owner(this, mutex_op, new_owner.to_u32_scalar())?; - } else { - // No thread is waiting on this mutex. - mutex_set_owner(this, mutex_op, Scalar::from_u32(0))?; - mutex_set_locked_count(this, mutex_op, Scalar::from_u32(0))?; + let id = mutex_get_or_create_id(this, mutex_op)?; + + if let Some((owner_thread, current_locked_count)) = this.mutex_unlock(id) { + if owner_thread != this.get_active_thread()? { + throw_ub_format!("called pthread_mutex_unlock on a mutex owned by another thread"); + } + if current_locked_count == 0 { + // The mutex is unlocked. + if let Some(thread) = this.mutex_dequeue(id) { + // We have at least one thread waiting on this mutex. Transfer + // ownership to it. + this.mutex_lock(id, thread); + this.unblock_thread(thread)?; + } } Ok(0) } else { @@ -435,16 +445,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx } else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? { this.eval_libc_i32("EPERM") } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? { - match locked_count.checked_sub(1) { - Some(new_count) => { - mutex_set_locked_count(this, mutex_op, Scalar::from_u32(new_count))?; - Ok(0) - } - None => { - // locked_count was already zero - this.eval_libc_i32("EPERM") - } - } + this.eval_libc_i32("EPERM") } else { throw_ub_format!("called pthread_mutex_unlock on an unsupported type of mutex"); } @@ -454,13 +455,14 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_mutex_destroy(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - if mutex_get_locked_count(this, mutex_op)?.to_u32()? != 0 { + let id = mutex_get_or_create_id(this, mutex_op)?; + + if this.mutex_is_locked(id) { throw_ub_format!("destroyed a locked mutex"); } - mutex_set_kind(this, mutex_op, ScalarMaybeUninit::Uninit)?; - mutex_set_locked_count(this, mutex_op, ScalarMaybeUninit::Uninit)?; - mutex_set_blockset(this, mutex_op, ScalarMaybeUninit::Uninit)?; + mutex_set_kind(this, mutex_op, ScalarMaybeUndef::Undef)?; + mutex_set_id(this, mutex_op, ScalarMaybeUndef::Undef)?; Ok(0) } @@ -468,121 +470,305 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_rwlock_rdlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?; - let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?; + let id = rwlock_get_or_create_id(this, rwlock_op)?; + let active_thread = this.get_active_thread()?; - if writers != 0 { - // The lock is locked by a writer. - assert_eq!(writers, 1); - let reader_blockset = rwlock_get_or_create_reader_blockset(this, rwlock_op)?; - this.block_active_thread(reader_blockset)?; + if this.rwlock_is_write_locked(id) { + this.rwlock_enqueue_reader(id, active_thread); + this.block_thread(active_thread)?; Ok(0) } else { - match readers.checked_add(1) { - Some(new_readers) => { - rwlock_set_readers(this, rwlock_op, Scalar::from_u32(new_readers))?; - Ok(0) - } - None => this.eval_libc_i32("EAGAIN"), - } + this.rwlock_reader_add(id, active_thread); + Ok(0) } } fn pthread_rwlock_tryrdlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?; - let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?; - if writers != 0 { + let id = rwlock_get_or_create_id(this, rwlock_op)?; + let active_thread = this.get_active_thread()?; + + if this.rwlock_is_write_locked(id) { this.eval_libc_i32("EBUSY") } else { - match readers.checked_add(1) { - Some(new_readers) => { - rwlock_set_readers(this, rwlock_op, Scalar::from_u32(new_readers))?; - Ok(0) - } - None => this.eval_libc_i32("EAGAIN"), - } + this.rwlock_reader_add(id, active_thread); + Ok(0) } } fn pthread_rwlock_wrlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?; - let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?; - let writer_blockset = rwlock_get_or_create_writer_blockset(this, rwlock_op)?; - if readers != 0 || writers != 0 { - this.block_active_thread(writer_blockset)?; + let id = rwlock_get_or_create_id(this, rwlock_op)?; + let active_thread = this.get_active_thread()?; + + if this.rwlock_is_locked(id) { + this.block_thread(active_thread)?; + this.rwlock_enqueue_writer(id, active_thread); } else { - rwlock_set_writers(this, rwlock_op, Scalar::from_u32(1))?; + this.rwlock_writer_set(id, active_thread); } + Ok(0) } fn pthread_rwlock_trywrlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?; - let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?; - if readers != 0 || writers != 0 { + let id = rwlock_get_or_create_id(this, rwlock_op)?; + let active_thread = this.get_active_thread()?; + + if this.rwlock_is_locked(id) { this.eval_libc_i32("EBUSY") } else { - rwlock_set_writers(this, rwlock_op, Scalar::from_u32(1))?; + this.rwlock_writer_set(id, active_thread); Ok(0) } } - // FIXME: We should check that this lock was locked by the active thread. fn pthread_rwlock_unlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?; - let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?; - let writer_blockset = rwlock_get_or_create_writer_blockset(this, rwlock_op)?; - if let Some(new_readers) = readers.checked_sub(1) { - assert_eq!(writers, 0); - rwlock_set_readers(this, rwlock_op, Scalar::from_u32(new_readers))?; - if new_readers == 0 { - if let Some(_writer) = this.unblock_some_thread(writer_blockset)? { - rwlock_set_writers(this, rwlock_op, Scalar::from_u32(1))?; + let id = rwlock_get_or_create_id(this, rwlock_op)?; + let active_thread = this.get_active_thread()?; + + if this.rwlock_reader_remove(id, active_thread) { + // The thread was a reader. + if this.rwlock_is_locked(id) { + // No more readers owning the lock. Give it to a writer if there + // is any. + if let Some(writer) = this.rwlock_dequeue_writer(id) { + this.unblock_thread(writer)?; + this.rwlock_writer_set(id, writer); } } Ok(0) - } else if writers != 0 { - let reader_blockset = rwlock_get_or_create_reader_blockset(this, rwlock_op)?; + } else if Some(active_thread) == this.rwlock_writer_remove(id) { + // The thread was a writer. + // // We are prioritizing writers here against the readers. As a // result, not only readers can starve writers, but also writers can // starve readers. - if let Some(_writer) = this.unblock_some_thread(writer_blockset)? { - assert_eq!(writers, 1); + if let Some(writer) = this.rwlock_dequeue_writer(id) { + // Give the lock to another writer. + this.unblock_thread(writer)?; + this.rwlock_writer_set(id, writer); } else { - rwlock_set_writers(this, rwlock_op, Scalar::from_u32(0))?; - let mut readers = 0; - while let Some(_reader) = this.unblock_some_thread(reader_blockset)? { - readers += 1; + // Give the lock to all readers. + while let Some(reader) = this.rwlock_dequeue_reader(id) { + this.unblock_thread(reader)?; + this.rwlock_reader_add(id, reader); } - rwlock_set_readers(this, rwlock_op, Scalar::from_u32(readers))? } Ok(0) } else { - throw_ub_format!("unlocked an rwlock that was not locked"); + throw_ub_format!("unlocked an rwlock that was not locked by the active thread"); } } fn pthread_rwlock_destroy(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - if rwlock_get_readers(this, rwlock_op)?.to_u32()? != 0 - || rwlock_get_writers(this, rwlock_op)?.to_u32()? != 0 - { + let id = rwlock_get_or_create_id(this, rwlock_op)?; + + if this.rwlock_is_locked(id) { throw_ub_format!("destroyed a locked rwlock"); } - rwlock_set_readers(this, rwlock_op, ScalarMaybeUninit::Uninit)?; - rwlock_set_writers(this, rwlock_op, ScalarMaybeUninit::Uninit)?; - rwlock_set_reader_blockset(this, rwlock_op, ScalarMaybeUninit::Uninit)?; - rwlock_set_writer_blockset(this, rwlock_op, ScalarMaybeUninit::Uninit)?; + rwlock_set_id(this, rwlock_op, ScalarMaybeUndef::Undef)?; + + Ok(0) + } + + fn pthread_condattr_init(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { + let this = self.eval_context_mut(); + + let default_clock_id = this.eval_libc("CLOCK_REALTIME")?; + condattr_set_clock_id(this, attr_op, default_clock_id)?; + + Ok(0) + } + + fn pthread_condattr_setclock( + &mut self, + attr_op: OpTy<'tcx, Tag>, + clock_id_op: OpTy<'tcx, Tag>, + ) -> InterpResult<'tcx, i32> { + let this = self.eval_context_mut(); + + let clock_id = this.read_scalar(clock_id_op)?.not_undef()?; + if clock_id == this.eval_libc("CLOCK_REALTIME")? + || clock_id == this.eval_libc("CLOCK_MONOTONIC")? + { + condattr_set_clock_id(this, attr_op, clock_id)?; + } else { + let einval = this.eval_libc_i32("EINVAL")?; + return Ok(einval); + } + + Ok(0) + } + + fn pthread_condattr_getclock( + &mut self, + attr_op: OpTy<'tcx, Tag>, + clk_id_op: OpTy<'tcx, Tag>, + ) -> InterpResult<'tcx, i32> { + let this = self.eval_context_mut(); + + let clock_id = condattr_get_clock_id(this, attr_op)?; + this.write_scalar(clock_id, this.deref_operand(clk_id_op)?.into())?; + + Ok(0) + } + + fn pthread_condattr_destroy(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { + let this = self.eval_context_mut(); + + condattr_set_clock_id(this, attr_op, ScalarMaybeUndef::Undef)?; + + Ok(0) + } + + fn pthread_cond_init( + &mut self, + cond_op: OpTy<'tcx, Tag>, + attr_op: OpTy<'tcx, Tag>, + ) -> InterpResult<'tcx, i32> { + let this = self.eval_context_mut(); + + let attr = this.read_scalar(attr_op)?.not_undef()?; + let clock_id = if this.is_null(attr)? { + this.eval_libc("CLOCK_REALTIME")? + } else { + condattr_get_clock_id(this, attr_op)?.not_undef()? + }; + + let _ = cond_get_or_create_id(this, cond_op)?; + cond_set_clock_id(this, cond_op, clock_id)?; + + Ok(0) + } + + fn pthread_cond_signal(&mut self, cond_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { + let this = self.eval_context_mut(); + let id = cond_get_or_create_id(this, cond_op)?; + if let Some((thread, mutex)) = this.condvar_signal(id) { + reacquire_cond_mutex(this, thread, mutex)?; + this.unregister_callback_if_exists(thread)?; + } + + Ok(0) + } + + fn pthread_cond_broadcast(&mut self, cond_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { + let this = self.eval_context_mut(); + let id = cond_get_or_create_id(this, cond_op)?; + + while let Some((thread, mutex)) = this.condvar_signal(id) { + reacquire_cond_mutex(this, thread, mutex)?; + this.unregister_callback_if_exists(thread)?; + } + + Ok(0) + } + + fn pthread_cond_wait( + &mut self, + cond_op: OpTy<'tcx, Tag>, + mutex_op: OpTy<'tcx, Tag>, + ) -> InterpResult<'tcx, i32> { + let this = self.eval_context_mut(); + + let id = cond_get_or_create_id(this, cond_op)?; + let mutex_id = mutex_get_or_create_id(this, mutex_op)?; + let active_thread = this.get_active_thread()?; + + release_cond_mutex(this, active_thread, mutex_id)?; + this.condvar_wait(id, active_thread, mutex_id); + + Ok(0) + } + + fn pthread_cond_timedwait( + &mut self, + cond_op: OpTy<'tcx, Tag>, + mutex_op: OpTy<'tcx, Tag>, + abstime_op: OpTy<'tcx, Tag>, + dest: PlaceTy<'tcx, Tag>, + ) -> InterpResult<'tcx> { + let this = self.eval_context_mut(); + + this.check_no_isolation("pthread_cond_timedwait")?; + + let id = cond_get_or_create_id(this, cond_op)?; + let mutex_id = mutex_get_or_create_id(this, mutex_op)?; + let active_thread = this.get_active_thread()?; + + release_cond_mutex(this, active_thread, mutex_id)?; + this.condvar_wait(id, active_thread, mutex_id); + + // We return success for now and override it in the timeout callback. + this.write_scalar(Scalar::from_i32(0), dest)?; + + // Extract the timeout. + let clock_id = cond_get_clock_id(this, cond_op)?.to_i32()?; + let duration = { + let tp = this.deref_operand(abstime_op)?; + let mut offset = Size::from_bytes(0); + let layout = this.libc_ty_layout("time_t")?; + let seconds_place = tp.offset(offset, MemPlaceMeta::None, layout, this)?; + let seconds = this.read_scalar(seconds_place.into())?.to_u64()?; + offset += layout.size; + let layout = this.libc_ty_layout("c_long")?; + let nanoseconds_place = tp.offset(offset, MemPlaceMeta::None, layout, this)?; + let nanoseconds = this.read_scalar(nanoseconds_place.into())?.to_u64()?; + Duration::new(seconds, nanoseconds as u32) + }; + + let timeout_time = if clock_id == this.eval_libc_i32("CLOCK_REALTIME")? { + let time_anchor_since_epoch = + this.machine.time_anchor_timestamp.duration_since(SystemTime::UNIX_EPOCH).unwrap(); + let duration_since_time_anchor = duration.checked_sub(time_anchor_since_epoch).unwrap(); + this.machine.time_anchor.checked_add(duration_since_time_anchor).unwrap() + } else if clock_id == this.eval_libc_i32("CLOCK_MONOTONIC")? { + this.machine.time_anchor.checked_add(duration).unwrap() + } else { + throw_ub_format!("Unsupported clock id."); + }; + + // Register the timeout callback. + this.register_callback( + active_thread, + timeout_time, + Box::new(move |ecx| { + // Try to reacquire the mutex. + reacquire_cond_mutex(ecx, active_thread, mutex_id)?; + + // Remove the thread from the conditional variable. + ecx.condvar_remove_waiter(id, active_thread); + + // Set the timeout value. + let timeout = ecx.eval_libc_i32("ETIMEDOUT")?; + ecx.write_scalar(Scalar::from_i32(timeout), dest)?; + + Ok(()) + }), + )?; + + Ok(()) + } + + fn pthread_cond_destroy(&mut self, cond_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { + let this = self.eval_context_mut(); + + let id = cond_get_or_create_id(this, cond_op)?; + if this.condvar_is_awaited(id) { + throw_ub_format!("destroyed an awaited conditional variable"); + } + cond_set_id(this, cond_op, ScalarMaybeUndef::Undef)?; + cond_set_clock_id(this, cond_op, ScalarMaybeUndef::Undef)?; Ok(0) } diff --git a/src/sync.rs b/src/sync.rs new file mode 100644 index 0000000000..5d181692fb --- /dev/null +++ b/src/sync.rs @@ -0,0 +1,299 @@ +use std::collections::{hash_map::Entry, HashMap, VecDeque}; +use std::convert::TryFrom; +use std::num::NonZeroU32; +use std::time::Instant; + +use rustc_index::vec::{Idx, IndexVec}; + +use crate::*; + +macro_rules! declare_id { + ($name: ident) => { + #[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq, Hash)] + pub struct $name(NonZeroU32); + + impl Idx for $name { + fn new(idx: usize) -> Self { + $name(NonZeroU32::new(u32::try_from(idx).unwrap() + 1).unwrap()) + } + fn index(self) -> usize { + usize::try_from(self.0.get() - 1).unwrap() + } + } + + impl From for $name { + fn from(id: u32) -> Self { + Self(NonZeroU32::new(id).unwrap()) + } + } + + impl $name { + pub fn to_u32_scalar<'tcx>(&self) -> Scalar { + Scalar::from_u32(self.0.get()) + } + } + }; +} + +declare_id!(MutexId); + +/// The mutex state. +#[derive(Default, Debug)] +struct Mutex { + /// The thread that currently owns the lock. + owner: Option, + /// How many times the mutex was locked by the owner. + lock_count: usize, + /// The queue of threads waiting for this mutex. + queue: VecDeque, +} + +declare_id!(RwLockId); + +/// The read-write lock state. +#[derive(Default, Debug)] +struct RwLock { + /// The writer thread that currently owns the lock. + writer: Option, + /// The readers that currently own the lock and how many times they acquired + /// the lock. + readers: HashMap, + /// The queue of writer threads waiting for this lock. + writer_queue: VecDeque, + /// The queue of reader threads waiting for this lock. + reader_queue: VecDeque, +} + +declare_id!(CondvarId); + +/// A thread waiting on a conditional variable. +#[derive(Debug)] +struct CondvarWaiter { + /// The thread that is waiting on this variable. + thread: ThreadId, + /// The mutex on which the thread is waiting. + mutex: MutexId, + /// The moment in time when the waiter should time out. + timeout: Option, +} + +/// The conditional variable state. +#[derive(Default, Debug)] +struct Condvar { + waiters: VecDeque, +} + +/// The state of all synchronization variables. +#[derive(Default, Debug)] +pub(super) struct SynchronizationState { + mutexes: IndexVec, + rwlocks: IndexVec, + condvars: IndexVec, +} + +// Public interface to synchronization primitives. Please note that in most +// cases, the function calls are infallible and it is the client's (shim +// implementation's) responsibility to detect and deal with erroneous +// situations. +impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {} +pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> { + #[inline] + /// Create state for a new mutex. + fn mutex_create(&mut self) -> MutexId { + let this = self.eval_context_mut(); + this.machine.threads.sync.mutexes.push(Default::default()) + } + + #[inline] + /// Get the id of the thread that currently owns this lock. + fn mutex_get_owner(&mut self, id: MutexId) -> ThreadId { + let this = self.eval_context_ref(); + this.machine.threads.sync.mutexes[id].owner.unwrap() + } + + #[inline] + /// Check if locked. + fn mutex_is_locked(&mut self, id: MutexId) -> bool { + let this = self.eval_context_mut(); + this.machine.threads.sync.mutexes[id].owner.is_some() + } + + /// Lock by setting the mutex owner and increasing the lock count. + fn mutex_lock(&mut self, id: MutexId, thread: ThreadId) { + let this = self.eval_context_mut(); + let mutex = &mut this.machine.threads.sync.mutexes[id]; + if let Some(current_owner) = mutex.owner { + assert_eq!(thread, current_owner, "mutex already locked by another thread"); + assert!( + mutex.lock_count > 0, + "invariant violation: lock_count == 0 iff the thread is unlocked" + ); + } else { + mutex.owner = Some(thread); + } + mutex.lock_count = mutex.lock_count.checked_add(1).unwrap(); + } + + /// Unlock by decreasing the lock count. If the lock count reaches 0, unset + /// the owner. + fn mutex_unlock(&mut self, id: MutexId) -> Option<(ThreadId, usize)> { + let this = self.eval_context_mut(); + let mutex = &mut this.machine.threads.sync.mutexes[id]; + if let Some(current_owner) = mutex.owner { + mutex.lock_count = mutex + .lock_count + .checked_sub(1) + .expect("invariant violation: lock_count == 0 iff the thread is unlocked"); + if mutex.lock_count == 0 { + mutex.owner = None; + } + Some((current_owner, mutex.lock_count)) + } else { + None + } + } + + #[inline] + /// Take a thread out the queue waiting for the lock. + fn mutex_enqueue(&mut self, id: MutexId, thread: ThreadId) { + let this = self.eval_context_mut(); + this.machine.threads.sync.mutexes[id].queue.push_back(thread); + } + + #[inline] + /// Take a thread out the queue waiting for the lock. + fn mutex_dequeue(&mut self, id: MutexId) -> Option { + let this = self.eval_context_mut(); + this.machine.threads.sync.mutexes[id].queue.pop_front() + } + + #[inline] + /// Create state for a new read write lock. + fn rwlock_create(&mut self) -> RwLockId { + let this = self.eval_context_mut(); + this.machine.threads.sync.rwlocks.push(Default::default()) + } + + #[inline] + /// Check if locked. + fn rwlock_is_locked(&mut self, id: RwLockId) -> bool { + let this = self.eval_context_mut(); + this.machine.threads.sync.rwlocks[id].writer.is_some() + || !this.machine.threads.sync.rwlocks[id].readers.is_empty() + } + + #[inline] + /// Check if write locked. + fn rwlock_is_write_locked(&mut self, id: RwLockId) -> bool { + let this = self.eval_context_mut(); + this.machine.threads.sync.rwlocks[id].writer.is_some() + } + + /// Add a reader that collectively with other readers owns the lock. + fn rwlock_reader_add(&mut self, id: RwLockId, reader: ThreadId) { + let this = self.eval_context_mut(); + assert!(!this.rwlock_is_write_locked(id), "the lock is write locked"); + let count = this.machine.threads.sync.rwlocks[id].readers.entry(reader).or_insert(0); + *count += 1; + } + + /// Try removing the reader. Returns `true` if succeeded. + fn rwlock_reader_remove(&mut self, id: RwLockId, reader: ThreadId) -> bool { + let this = self.eval_context_mut(); + match this.machine.threads.sync.rwlocks[id].readers.entry(reader) { + Entry::Occupied(mut entry) => { + let count = entry.get_mut(); + *count -= 1; + if *count == 0 { + entry.remove(); + } + true + } + Entry::Vacant(_) => false, + } + } + + #[inline] + /// Put the reader in the queue waiting for the lock. + fn rwlock_enqueue_reader(&mut self, id: RwLockId, reader: ThreadId) { + let this = self.eval_context_mut(); + assert!(this.rwlock_is_write_locked(id), "queueing on not write locked lock"); + this.machine.threads.sync.rwlocks[id].reader_queue.push_back(reader); + } + + #[inline] + /// Take the reader out the queue waiting for the lock. + fn rwlock_dequeue_reader(&mut self, id: RwLockId) -> Option { + let this = self.eval_context_mut(); + this.machine.threads.sync.rwlocks[id].reader_queue.pop_front() + } + + #[inline] + /// Lock by setting the writer that owns the lock. + fn rwlock_writer_set(&mut self, id: RwLockId, writer: ThreadId) { + let this = self.eval_context_mut(); + assert!(!this.rwlock_is_locked(id), "the lock is already locked"); + this.machine.threads.sync.rwlocks[id].writer = Some(writer); + } + + #[inline] + /// Try removing the writer. + fn rwlock_writer_remove(&mut self, id: RwLockId) -> Option { + let this = self.eval_context_mut(); + this.machine.threads.sync.rwlocks[id].writer.take() + } + + #[inline] + /// Put the writer in the queue waiting for the lock. + fn rwlock_enqueue_writer(&mut self, id: RwLockId, writer: ThreadId) { + let this = self.eval_context_mut(); + assert!(this.rwlock_is_locked(id), "queueing on unlocked lock"); + this.machine.threads.sync.rwlocks[id].writer_queue.push_back(writer); + } + + #[inline] + /// Take the writer out the queue waiting for the lock. + fn rwlock_dequeue_writer(&mut self, id: RwLockId) -> Option { + let this = self.eval_context_mut(); + this.machine.threads.sync.rwlocks[id].writer_queue.pop_front() + } + + #[inline] + /// Create state for a new conditional variable. + fn condvar_create(&mut self) -> CondvarId { + let this = self.eval_context_mut(); + this.machine.threads.sync.condvars.push(Default::default()) + } + + #[inline] + /// Is the conditional variable awaited? + fn condvar_is_awaited(&mut self, id: CondvarId) -> bool { + let this = self.eval_context_mut(); + !this.machine.threads.sync.condvars[id].waiters.is_empty() + } + + /// Mark that the thread is waiting on the conditional variable. + fn condvar_wait(&mut self, id: CondvarId, thread: ThreadId, mutex: MutexId) { + let this = self.eval_context_mut(); + let waiters = &mut this.machine.threads.sync.condvars[id].waiters; + assert!(waiters.iter().all(|waiter| waiter.thread != thread), "thread is already waiting"); + waiters.push_back(CondvarWaiter { thread, mutex, timeout: None }); + } + + /// Wake up some thread (if there is any) sleeping on the conditional + /// variable. + fn condvar_signal(&mut self, id: CondvarId) -> Option<(ThreadId, MutexId)> { + let this = self.eval_context_mut(); + this.machine.threads.sync.condvars[id] + .waiters + .pop_front() + .map(|waiter| (waiter.thread, waiter.mutex)) + } + + #[inline] + /// Remove the thread from the queue of threads waiting on this conditional variable. + fn condvar_remove_waiter(&mut self, id: CondvarId, thread: ThreadId) { + let this = self.eval_context_mut(); + this.machine.threads.sync.condvars[id].waiters.retain(|waiter| waiter.thread != thread); + } +} diff --git a/src/thread.rs b/src/thread.rs index d78beed28c..6ebf35a652 100644 --- a/src/thread.rs +++ b/src/thread.rs @@ -1,8 +1,10 @@ //! Implements threads. use std::cell::RefCell; +use std::collections::hash_map::Entry; use std::convert::TryFrom; use std::num::{NonZeroU32, TryFromIntError}; +use std::time::Instant; use log::trace; @@ -15,18 +17,24 @@ use rustc_middle::{ ty::{self, Instance}, }; +use crate::sync::SynchronizationState; use crate::*; #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum SchedulingAction { /// Execute step on the active thread. ExecuteStep, + /// Execute a scheduler's callback. + ExecuteCallback, /// Execute destructors of the active thread. ExecuteDtors, /// Stop the program. Stop, } +type EventCallback<'mir, 'tcx> = + Box>) -> InterpResult<'tcx> + 'tcx>; + /// A thread identifier. #[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq, Hash)] pub struct ThreadId(u32); @@ -94,6 +102,7 @@ pub enum ThreadState { BlockedOnJoin(ThreadId), /// The thread is blocked and belongs to the given blockset. Blocked(BlockSetId), + BlockedThread, /// The thread has terminated its execution (we do not delete terminated /// threads). Terminated, @@ -162,6 +171,23 @@ impl<'mir, 'tcx> Default for Thread<'mir, 'tcx> { } } +/// Callbacks are used to implement timeouts. For example, waiting on a +/// conditional variable with a timeout creates a callback that is called after +/// the specified time and unblocks the thread. If another thread signals on the +/// conditional variable, the signal handler deletes the callback. +struct CallBackInfo<'mir, 'tcx> { + /// The callback should be called no earlier than this time. + call_time: Instant, + /// The called function. + callback: EventCallback<'mir, 'tcx>, +} + +impl<'mir, 'tcx> std::fmt::Debug for CallBackInfo<'mir, 'tcx> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "CallBack({:?})", self.call_time) + } +} + /// A set of threads. #[derive(Debug)] pub struct ThreadManager<'mir, 'tcx> { @@ -171,6 +197,8 @@ pub struct ThreadManager<'mir, 'tcx> { /// /// Note that this vector also contains terminated threads. threads: IndexVec>, + /// FIXME: make private. + pub(crate) sync: SynchronizationState, /// A counter used to generate unique identifiers for blocksets. blockset_counter: u32, /// A mapping from a thread-local static to an allocation id of a thread @@ -178,6 +206,8 @@ pub struct ThreadManager<'mir, 'tcx> { thread_local_alloc_ids: RefCell>, /// A flag that indicates that we should change the active thread. yield_active_thread: bool, + /// Callbacks that are called once the specified time passes. + callbacks: FxHashMap>, } impl<'mir, 'tcx> Default for ThreadManager<'mir, 'tcx> { @@ -191,9 +221,11 @@ impl<'mir, 'tcx> Default for ThreadManager<'mir, 'tcx> { Self { active_thread: ThreadId::new(0), threads: threads, + sync: SynchronizationState::default(), blockset_counter: 0, thread_local_alloc_ids: Default::default(), yield_active_thread: false, + callbacks: FxHashMap::default(), } } } @@ -321,30 +353,18 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { self.active_thread_ref().thread_name() } - /// Allocate a new blockset id. - fn create_blockset(&mut self) -> BlockSetId { - self.blockset_counter = self.blockset_counter.checked_add(1).unwrap(); - BlockSetId::new(self.blockset_counter) - } - - /// Block the currently active thread and put it into the given blockset. - fn block_active_thread(&mut self, set: BlockSetId) { - let state = &mut self.active_thread_mut().state; + /// Put the thread into the blocked state. + fn block_thread(&mut self, thread: ThreadId) { + let state = &mut self.threads[thread].state; assert_eq!(*state, ThreadState::Enabled); - *state = ThreadState::Blocked(set); + *state = ThreadState::BlockedThread; } - /// Unblock any one thread from the given blockset if it contains at least - /// one. Return the id of the unblocked thread. - fn unblock_some_thread(&mut self, set: BlockSetId) -> Option { - for (id, thread) in self.threads.iter_enumerated_mut() { - if thread.state == ThreadState::Blocked(set) { - trace!("unblocking {:?} in blockset {:?}", id, set); - thread.state = ThreadState::Enabled; - return Some(id); - } - } - None + /// Put the blocked thread into the enabled state. + fn unblock_thread(&mut self, thread: ThreadId) { + let state = &mut self.threads[thread].state; + assert_eq!(*state, ThreadState::BlockedThread); + *state = ThreadState::Enabled; } /// Change the active thread to some enabled thread. @@ -352,6 +372,39 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { self.yield_active_thread = true; } + /// Register the given `callback` to be called once the `call_time` passes. + fn register_callback( + &mut self, + thread: ThreadId, + call_time: Instant, + callback: EventCallback<'mir, 'tcx>, + ) { + self.callbacks + .insert(thread, CallBackInfo { call_time: call_time, callback: callback }) + .unwrap_none(); + } + + /// Unregister the callback for the `thread`. + fn unregister_callback_if_exists(&mut self, thread: ThreadId) { + self.callbacks.remove(&thread); + } + + /// Get a callback that is ready to be called. + fn get_callback(&mut self) -> Option<(ThreadId, EventCallback<'mir, 'tcx>)> { + let current_time = Instant::now(); + // We use a for loop here to make the scheduler more deterministic. + for thread in self.threads.indices() { + match self.callbacks.entry(thread) { + Entry::Occupied(entry) => + if current_time >= entry.get().call_time { + return Some((thread, entry.remove().callback)); + }, + Entry::Vacant(_) => {} + } + } + None + } + /// Decide which action to take next and on which thread. /// /// The currently implemented scheduling policy is the one that is commonly @@ -407,6 +460,18 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { // We have not found a thread to execute. if self.threads.iter().all(|thread| thread.state == ThreadState::Terminated) { unreachable!(); + } else if let Some(next_call_time) = + self.callbacks.values().min_by_key(|info| info.call_time) + { + // All threads are currently blocked, but we have unexecuted + // callbacks, which may unblock some of the threads. Hence, + // sleep until the first callback. + if let Some(sleep_time) = + next_call_time.call_time.checked_duration_since(Instant::now()) + { + std::thread::sleep(sleep_time); + } + Ok(SchedulingAction::ExecuteCallback) } else { throw_machine_stop!(TerminationInfo::Deadlock); } @@ -577,27 +642,51 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx } #[inline] - fn create_blockset(&mut self) -> InterpResult<'tcx, BlockSetId> { + fn block_thread(&mut self, thread: ThreadId) -> InterpResult<'tcx> { let this = self.eval_context_mut(); - Ok(this.machine.threads.create_blockset()) + Ok(this.machine.threads.block_thread(thread)) } #[inline] - fn block_active_thread(&mut self, set: BlockSetId) -> InterpResult<'tcx> { + fn unblock_thread(&mut self, thread: ThreadId) -> InterpResult<'tcx> { let this = self.eval_context_mut(); - Ok(this.machine.threads.block_active_thread(set)) + Ok(this.machine.threads.unblock_thread(thread)) } #[inline] - fn unblock_some_thread(&mut self, set: BlockSetId) -> InterpResult<'tcx, Option> { + fn yield_active_thread(&mut self) -> InterpResult<'tcx> { let this = self.eval_context_mut(); - Ok(this.machine.threads.unblock_some_thread(set)) + this.machine.threads.yield_active_thread(); + Ok(()) } #[inline] - fn yield_active_thread(&mut self) -> InterpResult<'tcx> { + fn register_callback( + &mut self, + thread: ThreadId, + call_time: Instant, + callback: EventCallback<'mir, 'tcx>, + ) -> InterpResult<'tcx> { let this = self.eval_context_mut(); - this.machine.threads.yield_active_thread(); + this.machine.threads.register_callback(thread, call_time, callback); + Ok(()) + } + + #[inline] + fn unregister_callback_if_exists(&mut self, thread: ThreadId) -> InterpResult<'tcx> { + let this = self.eval_context_mut(); + this.machine.threads.unregister_callback_if_exists(thread); + Ok(()) + } + + /// Execute the callback on the callback's thread. + #[inline] + fn run_scheduler_callback(&mut self) -> InterpResult<'tcx> { + let this = self.eval_context_mut(); + let (thread, callback) = this.machine.threads.get_callback().expect("no callback found"); + let old_thread = this.set_active_thread(thread)?; + callback(this)?; + this.set_active_thread(old_thread)?; Ok(()) } diff --git a/tests/run-pass/concurrency/barrier.rs b/tests/run-pass/concurrency/barrier.rs new file mode 100644 index 0000000000..1e976a6345 --- /dev/null +++ b/tests/run-pass/concurrency/barrier.rs @@ -0,0 +1,27 @@ +// ignore-windows: Concurrency on Windows is not supported yet. + +//! Check if Rust barriers are working. + +use std::sync::{Arc, Barrier}; +use std::thread; + + +/// This test is taken from the Rust documentation. +fn main() { + let mut handles = Vec::with_capacity(10); + let barrier = Arc::new(Barrier::new(10)); + for _ in 0..10 { + let c = barrier.clone(); + // The same messages will be printed together. + // You will NOT see any interleaving. + handles.push(thread::spawn(move|| { + println!("before wait"); + c.wait(); + println!("after wait"); + })); + } + // Wait for other threads to finish. + for handle in handles { + handle.join().unwrap(); + } +} \ No newline at end of file diff --git a/tests/run-pass/concurrency/barrier.stderr b/tests/run-pass/concurrency/barrier.stderr new file mode 100644 index 0000000000..2dbfb7721d --- /dev/null +++ b/tests/run-pass/concurrency/barrier.stderr @@ -0,0 +1,2 @@ +warning: thread support is experimental. For example, Miri does not detect data races yet. + diff --git a/tests/run-pass/concurrency/barrier.stdout b/tests/run-pass/concurrency/barrier.stdout new file mode 100644 index 0000000000..f2c036a173 --- /dev/null +++ b/tests/run-pass/concurrency/barrier.stdout @@ -0,0 +1,20 @@ +before wait +before wait +before wait +before wait +before wait +before wait +before wait +before wait +before wait +before wait +after wait +after wait +after wait +after wait +after wait +after wait +after wait +after wait +after wait +after wait diff --git a/tests/run-pass/concurrency/condvar.rs b/tests/run-pass/concurrency/condvar.rs new file mode 100644 index 0000000000..ab971ee6e8 --- /dev/null +++ b/tests/run-pass/concurrency/condvar.rs @@ -0,0 +1,28 @@ +// ignore-windows: Concurrency on Windows is not supported yet. + +//! Check if Rust conditional variables are working. + +use std::sync::{Arc, Condvar, Mutex}; +use std::thread; + +/// The test taken from the Rust documentation. +fn main() { + let pair = Arc::new((Mutex::new(false), Condvar::new())); + let pair2 = pair.clone(); + + // Inside of our lock, spawn a new thread, and then wait for it to start. + thread::spawn(move || { + let (lock, cvar) = &*pair2; + let mut started = lock.lock().unwrap(); + *started = true; + // We notify the condvar that the value has changed. + cvar.notify_one(); + }); + + // Wait for the thread to start up. + let (lock, cvar) = &*pair; + let mut started = lock.lock().unwrap(); + while !*started { + started = cvar.wait(started).unwrap(); + } +} diff --git a/tests/run-pass/concurrency/condvar.stderr b/tests/run-pass/concurrency/condvar.stderr new file mode 100644 index 0000000000..2dbfb7721d --- /dev/null +++ b/tests/run-pass/concurrency/condvar.stderr @@ -0,0 +1,2 @@ +warning: thread support is experimental. For example, Miri does not detect data races yet. + diff --git a/tests/run-pass/concurrency/libc_pthread_cond.rs b/tests/run-pass/concurrency/libc_pthread_cond.rs new file mode 100644 index 0000000000..83a651e6f0 --- /dev/null +++ b/tests/run-pass/concurrency/libc_pthread_cond.rs @@ -0,0 +1,199 @@ +// ignore-windows: No libc on Windows +// compile-flags: -Zmiri-disable-isolation + +#![feature(rustc_private)] + +extern crate libc; + +use std::cell::UnsafeCell; +use std::mem::{self, MaybeUninit}; +use std::sync::Arc; +use std::thread; + +struct Mutex { + inner: UnsafeCell, +} + +unsafe impl Sync for Mutex {} + +impl std::fmt::Debug for Mutex { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Mutex") + } +} + +struct Cond { + inner: UnsafeCell, +} + +unsafe impl Sync for Cond {} + +impl std::fmt::Debug for Cond { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Cond") + } +} + +unsafe fn create_cond_attr_monotonic() -> libc::pthread_condattr_t { + let mut attr = MaybeUninit::::uninit(); + assert_eq!(libc::pthread_condattr_init(attr.as_mut_ptr()), 0); + assert_eq!(libc::pthread_condattr_setclock(attr.as_mut_ptr(), libc::CLOCK_MONOTONIC), 0); + attr.assume_init() +} + +unsafe fn create_cond(attr: Option) -> Cond { + let cond: Cond = mem::zeroed(); + if let Some(mut attr) = attr { + assert_eq!(libc::pthread_cond_init(cond.inner.get() as *mut _, &attr as *const _), 0); + assert_eq!(libc::pthread_condattr_destroy(&mut attr as *mut _), 0); + } else { + assert_eq!(libc::pthread_cond_init(cond.inner.get() as *mut _, 0 as *const _), 0); + } + cond +} + +unsafe fn create_mutex() -> Mutex { + mem::zeroed() +} + +unsafe fn create_timeout(seconds: i64) -> libc::timespec { + let mut now: libc::timespec = mem::zeroed(); + assert_eq!(libc::clock_gettime(libc::CLOCK_MONOTONIC, &mut now), 0); + libc::timespec { tv_sec: now.tv_sec + seconds, tv_nsec: now.tv_nsec } +} + +fn test_pthread_condattr_t() { + unsafe { + let mut attr = create_cond_attr_monotonic(); + let mut clock_id = MaybeUninit::::uninit(); + assert_eq!(libc::pthread_condattr_getclock(&attr as *const _, clock_id.as_mut_ptr()), 0); + assert_eq!(clock_id.assume_init(), libc::CLOCK_MONOTONIC); + assert_eq!(libc::pthread_condattr_destroy(&mut attr as *mut _), 0); + } +} + +fn test_signal() { + unsafe { + let cond = Arc::new(create_cond(None)); + let mutex = Arc::new(create_mutex()); + + assert_eq!(libc::pthread_mutex_lock(mutex.inner.get() as *mut _), 0); + + let spawn_mutex = Arc::clone(&mutex); + let spawn_cond = Arc::clone(&cond); + let handle = thread::spawn(move || { + assert_eq!(libc::pthread_mutex_lock(spawn_mutex.inner.get() as *mut _), 0); + assert_eq!(libc::pthread_cond_signal(spawn_cond.inner.get() as *mut _), 0); + assert_eq!(libc::pthread_mutex_unlock(spawn_mutex.inner.get() as *mut _), 0); + }); + + assert_eq!( + libc::pthread_cond_wait(cond.inner.get() as *mut _, mutex.inner.get() as *mut _), + 0 + ); + assert_eq!(libc::pthread_mutex_unlock(mutex.inner.get() as *mut _), 0); + + handle.join().unwrap(); + + let mutex = Arc::try_unwrap(mutex).unwrap(); + assert_eq!(libc::pthread_mutex_destroy(mutex.inner.get() as *mut _), 0); + let cond = Arc::try_unwrap(cond).unwrap(); + assert_eq!(libc::pthread_cond_destroy(cond.inner.get() as *mut _), 0); + } +} + +fn test_broadcast() { + unsafe { + let cond = Arc::new(create_cond(None)); + let mutex = Arc::new(create_mutex()); + + assert_eq!(libc::pthread_mutex_lock(mutex.inner.get() as *mut _), 0); + + let spawn_mutex = Arc::clone(&mutex); + let spawn_cond = Arc::clone(&cond); + let handle = thread::spawn(move || { + assert_eq!(libc::pthread_mutex_lock(spawn_mutex.inner.get() as *mut _), 0); + assert_eq!(libc::pthread_cond_broadcast(spawn_cond.inner.get() as *mut _), 0); + assert_eq!(libc::pthread_mutex_unlock(spawn_mutex.inner.get() as *mut _), 0); + }); + + assert_eq!( + libc::pthread_cond_wait(cond.inner.get() as *mut _, mutex.inner.get() as *mut _), + 0 + ); + assert_eq!(libc::pthread_mutex_unlock(mutex.inner.get() as *mut _), 0); + + handle.join().unwrap(); + + let mutex = Arc::try_unwrap(mutex).unwrap(); + assert_eq!(libc::pthread_mutex_destroy(mutex.inner.get() as *mut _), 0); + let cond = Arc::try_unwrap(cond).unwrap(); + assert_eq!(libc::pthread_cond_destroy(cond.inner.get() as *mut _), 0); + } +} + +fn test_timed_wait_timeout() { + unsafe { + let attr = create_cond_attr_monotonic(); + let cond = create_cond(Some(attr)); + let mutex = create_mutex(); + let timeout = create_timeout(1); + + assert_eq!(libc::pthread_mutex_lock(mutex.inner.get() as *mut _), 0); + assert_eq!( + libc::pthread_cond_timedwait( + cond.inner.get() as *mut _, + mutex.inner.get() as *mut _, + &timeout + ), + libc::ETIMEDOUT + ); + assert_eq!(libc::pthread_mutex_unlock(mutex.inner.get() as *mut _), 0); + assert_eq!(libc::pthread_mutex_destroy(mutex.inner.get() as *mut _), 0); + assert_eq!(libc::pthread_cond_destroy(cond.inner.get() as *mut _), 0); + } +} + +fn test_timed_wait_notimeout() { + unsafe { + let attr = create_cond_attr_monotonic(); + let cond = Arc::new(create_cond(Some(attr))); + let mutex = Arc::new(create_mutex()); + let timeout = create_timeout(100); + + assert_eq!(libc::pthread_mutex_lock(mutex.inner.get() as *mut _), 0); + + let spawn_mutex = Arc::clone(&mutex); + let spawn_cond = Arc::clone(&cond); + let handle = thread::spawn(move || { + assert_eq!(libc::pthread_mutex_lock(spawn_mutex.inner.get() as *mut _), 0); + assert_eq!(libc::pthread_cond_signal(spawn_cond.inner.get() as *mut _), 0); + assert_eq!(libc::pthread_mutex_unlock(spawn_mutex.inner.get() as *mut _), 0); + }); + + assert_eq!( + libc::pthread_cond_timedwait( + cond.inner.get() as *mut _, + mutex.inner.get() as *mut _, + &timeout + ), + 0 + ); + assert_eq!(libc::pthread_mutex_unlock(mutex.inner.get() as *mut _), 0); + + handle.join().unwrap(); + + let mutex = Arc::try_unwrap(mutex).unwrap(); + assert_eq!(libc::pthread_mutex_destroy(mutex.inner.get() as *mut _), 0); + let cond = Arc::try_unwrap(cond).unwrap(); + assert_eq!(libc::pthread_cond_destroy(cond.inner.get() as *mut _), 0); + } +} + +fn main() { + test_pthread_condattr_t(); + test_signal(); + test_broadcast(); + test_timed_wait_timeout(); + test_timed_wait_notimeout(); +} diff --git a/tests/run-pass/concurrency/libc_pthread_cond.stderr b/tests/run-pass/concurrency/libc_pthread_cond.stderr new file mode 100644 index 0000000000..2dbfb7721d --- /dev/null +++ b/tests/run-pass/concurrency/libc_pthread_cond.stderr @@ -0,0 +1,2 @@ +warning: thread support is experimental. For example, Miri does not detect data races yet. + diff --git a/tests/run-pass/concurrency/mpsc.rs b/tests/run-pass/concurrency/mpsc.rs new file mode 100644 index 0000000000..3558f5415d --- /dev/null +++ b/tests/run-pass/concurrency/mpsc.rs @@ -0,0 +1,56 @@ +// ignore-windows: Concurrency on Windows is not supported yet. + +//! Check if Rust channels are working. + +use std::sync::mpsc::{channel, sync_channel}; +use std::thread; + +/// The test taken from the Rust documentation. +fn simple_send() { + let (tx, rx) = channel(); + thread::spawn(move || { + tx.send(10).unwrap(); + }); + assert_eq!(rx.recv().unwrap(), 10); +} + +/// The test taken from the Rust documentation. +fn multiple_send() { + let (tx, rx) = channel(); + for i in 0..10 { + let tx = tx.clone(); + thread::spawn(move || { + tx.send(i).unwrap(); + }); + } + + let mut sum = 0; + for _ in 0..10 { + let j = rx.recv().unwrap(); + assert!(0 <= j && j < 10); + sum += j; + } + assert_eq!(sum, 45); +} + +/// The test taken from the Rust documentation. +fn send_on_sync() { + let (sender, receiver) = sync_channel(1); + + // this returns immediately + sender.send(1).unwrap(); + + thread::spawn(move || { + // this will block until the previous message has been received + sender.send(2).unwrap(); + }); + + assert_eq!(receiver.recv().unwrap(), 1); + assert_eq!(receiver.recv().unwrap(), 2); +} + +fn main() { + simple_send(); + multiple_send(); + send_on_sync(); +} diff --git a/tests/run-pass/concurrency/mpsc.stderr b/tests/run-pass/concurrency/mpsc.stderr new file mode 100644 index 0000000000..2dbfb7721d --- /dev/null +++ b/tests/run-pass/concurrency/mpsc.stderr @@ -0,0 +1,2 @@ +warning: thread support is experimental. For example, Miri does not detect data races yet. + diff --git a/tests/run-pass/concurrency/once.rs b/tests/run-pass/concurrency/once.rs new file mode 100644 index 0000000000..499ceacfa8 --- /dev/null +++ b/tests/run-pass/concurrency/once.rs @@ -0,0 +1,44 @@ +// ignore-windows: Concurrency on Windows is not supported yet. + +//! Check if Rust once statics are working. The test taken from the Rust +//! documentation. + +use std::sync::Once; +use std::thread; + +static mut VAL: usize = 0; +static INIT: Once = Once::new(); + +fn get_cached_val() -> usize { + unsafe { + INIT.call_once(|| { + VAL = expensive_computation(); + }); + VAL + } +} + +fn expensive_computation() -> usize { + let mut i = 1; + let mut c = 1; + while i < 10000 { + i *= c; + c += 1; + } + i +} + +fn main() { + let handles: Vec<_> = (0..10) + .map(|_| { + thread::spawn(|| { + thread::yield_now(); + let val = get_cached_val(); + assert_eq!(val, 40320); + }) + }) + .collect(); + for handle in handles { + handle.join().unwrap(); + } +} diff --git a/tests/run-pass/concurrency/once.stderr b/tests/run-pass/concurrency/once.stderr new file mode 100644 index 0000000000..2dbfb7721d --- /dev/null +++ b/tests/run-pass/concurrency/once.stderr @@ -0,0 +1,2 @@ +warning: thread support is experimental. For example, Miri does not detect data races yet. + From d0de439ac8366afce491250a64b78702fa5d7dd6 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Thu, 30 Apr 2020 13:47:12 -0700 Subject: [PATCH 02/17] Cleanup. --- src/thread.rs | 28 +++++++--------------------- 1 file changed, 7 insertions(+), 21 deletions(-) diff --git a/src/thread.rs b/src/thread.rs index 6ebf35a652..856468705d 100644 --- a/src/thread.rs +++ b/src/thread.rs @@ -3,7 +3,7 @@ use std::cell::RefCell; use std::collections::hash_map::Entry; use std::convert::TryFrom; -use std::num::{NonZeroU32, TryFromIntError}; +use std::num::TryFromIntError; use std::time::Instant; use log::trace; @@ -77,21 +77,6 @@ impl ThreadId { } } -/// An identifier of a set of blocked threads. 0 is used to indicate the absence -/// of a blockset identifier and, therefore, is not a valid identifier. -#[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq, Hash)] -pub struct BlockSetId(NonZeroU32); - -impl BlockSetId { - /// Panics if `id` is 0. - pub fn new(id: u32) -> Self { - Self(NonZeroU32::new(id).expect("0 is not a valid blockset id")) - } - pub fn to_u32_scalar<'tcx>(&self) -> Scalar { - Scalar::from_u32(self.0.get()) - } -} - /// The state of a thread. #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum ThreadState { @@ -100,9 +85,10 @@ pub enum ThreadState { /// The thread tried to join the specified thread and is blocked until that /// thread terminates. BlockedOnJoin(ThreadId), - /// The thread is blocked and belongs to the given blockset. - Blocked(BlockSetId), - BlockedThread, + /// The thread is blocked on some synchronization primitive. It is the + /// responsibility of the synchronization primitives to track threads that + /// are blocked by them. + BlockedOnSync, /// The thread has terminated its execution (we do not delete terminated /// threads). Terminated, @@ -357,13 +343,13 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { fn block_thread(&mut self, thread: ThreadId) { let state = &mut self.threads[thread].state; assert_eq!(*state, ThreadState::Enabled); - *state = ThreadState::BlockedThread; + *state = ThreadState::BlockedOnSync; } /// Put the blocked thread into the enabled state. fn unblock_thread(&mut self, thread: ThreadId) { let state = &mut self.threads[thread].state; - assert_eq!(*state, ThreadState::BlockedThread); + assert_eq!(*state, ThreadState::BlockedOnSync); *state = ThreadState::Enabled; } From 044a068c672cf8edae2cd9d5032995f37f1c3718 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Thu, 30 Apr 2020 14:07:07 -0700 Subject: [PATCH 03/17] Improve code readability and comments. --- src/eval.rs | 4 ++-- src/shims/sync.rs | 12 +++++----- src/sync.rs | 15 +++++++----- src/thread.rs | 60 +++++++++++++++++++++++------------------------ 4 files changed, 47 insertions(+), 44 deletions(-) diff --git a/src/eval.rs b/src/eval.rs index 30901a8f12..7a6c562e7c 100644 --- a/src/eval.rs +++ b/src/eval.rs @@ -210,11 +210,11 @@ pub fn eval_main<'tcx>(tcx: TyCtxt<'tcx>, main_id: DefId, config: MiriConfig) -> SchedulingAction::ExecuteStep => { assert!(ecx.step()?, "a terminated thread was scheduled for execution"); } - SchedulingAction::ExecuteCallback => { + SchedulingAction::ExecuteTimeoutCallback => { assert!(ecx.machine.communicate, "scheduler callbacks require disabled isolation, but the code \ that created the callback did not check it"); - ecx.run_scheduler_callback()?; + ecx.run_timeout_callback()?; } SchedulingAction::ExecuteDtors => { // This will either enable the thread again (so we go back diff --git a/src/shims/sync.rs b/src/shims/sync.rs index dfd7999457..f31efe18e1 100644 --- a/src/shims/sync.rs +++ b/src/shims/sync.rs @@ -128,7 +128,7 @@ fn mutex_get_or_create_id<'mir, 'tcx: 'mir>( mutex_set_id(ecx, mutex_op, id.to_u32_scalar())?; Ok(id) } else { - Ok(id.into()) + Ok(MutexId::from_u32(id)) } } @@ -168,7 +168,7 @@ fn rwlock_get_or_create_id<'mir, 'tcx: 'mir>( rwlock_set_id(ecx, rwlock_op, id.to_u32_scalar())?; Ok(id) } else { - Ok(id.into()) + Ok(RwLockId::from_u32(id)) } } @@ -232,7 +232,7 @@ fn cond_get_or_create_id<'mir, 'tcx: 'mir>( cond_set_id(ecx, cond_op, id.to_u32_scalar())?; Ok(id) } else { - Ok(id.into()) + Ok(CondvarId::from_u32(id)) } } @@ -656,7 +656,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let id = cond_get_or_create_id(this, cond_op)?; if let Some((thread, mutex)) = this.condvar_signal(id) { reacquire_cond_mutex(this, thread, mutex)?; - this.unregister_callback_if_exists(thread)?; + this.unregister_timeout_callback_if_exists(thread)?; } Ok(0) @@ -668,7 +668,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx while let Some((thread, mutex)) = this.condvar_signal(id) { reacquire_cond_mutex(this, thread, mutex)?; - this.unregister_callback_if_exists(thread)?; + this.unregister_timeout_callback_if_exists(thread)?; } Ok(0) @@ -739,7 +739,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx }; // Register the timeout callback. - this.register_callback( + this.register_timeout_callback( active_thread, timeout_time, Box::new(move |ecx| { diff --git a/src/sync.rs b/src/sync.rs index 5d181692fb..88b5d6c060 100644 --- a/src/sync.rs +++ b/src/sync.rs @@ -9,9 +9,18 @@ use crate::*; macro_rules! declare_id { ($name: ident) => { + /// 0 is used to indicate that the id was not yet assigned and, + /// therefore, is not a valid identifier. #[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq, Hash)] pub struct $name(NonZeroU32); + impl $name { + // Panics if `id == 0`. + pub fn from_u32(id: u32) -> Self { + Self(NonZeroU32::new(id).unwrap()) + } + } + impl Idx for $name { fn new(idx: usize) -> Self { $name(NonZeroU32::new(u32::try_from(idx).unwrap() + 1).unwrap()) @@ -21,12 +30,6 @@ macro_rules! declare_id { } } - impl From for $name { - fn from(id: u32) -> Self { - Self(NonZeroU32::new(id).unwrap()) - } - } - impl $name { pub fn to_u32_scalar<'tcx>(&self) -> Scalar { Scalar::from_u32(self.0.get()) diff --git a/src/thread.rs b/src/thread.rs index 856468705d..f67de48b71 100644 --- a/src/thread.rs +++ b/src/thread.rs @@ -24,15 +24,17 @@ use crate::*; pub enum SchedulingAction { /// Execute step on the active thread. ExecuteStep, - /// Execute a scheduler's callback. - ExecuteCallback, + /// Execute a timeout callback. + ExecuteTimeoutCallback, /// Execute destructors of the active thread. ExecuteDtors, /// Stop the program. Stop, } -type EventCallback<'mir, 'tcx> = +/// Timeout timeout_callbacks can be created by synchronization primitives to tell the +/// scheduler that they should be called once some period of time passes. +type TimeoutCallback<'mir, 'tcx> = Box>) -> InterpResult<'tcx> + 'tcx>; /// A thread identifier. @@ -161,14 +163,14 @@ impl<'mir, 'tcx> Default for Thread<'mir, 'tcx> { /// conditional variable with a timeout creates a callback that is called after /// the specified time and unblocks the thread. If another thread signals on the /// conditional variable, the signal handler deletes the callback. -struct CallBackInfo<'mir, 'tcx> { +struct TimeoutCallbackInfo<'mir, 'tcx> { /// The callback should be called no earlier than this time. call_time: Instant, /// The called function. - callback: EventCallback<'mir, 'tcx>, + callback: TimeoutCallback<'mir, 'tcx>, } -impl<'mir, 'tcx> std::fmt::Debug for CallBackInfo<'mir, 'tcx> { +impl<'mir, 'tcx> std::fmt::Debug for TimeoutCallbackInfo<'mir, 'tcx> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "CallBack({:?})", self.call_time) } @@ -183,17 +185,16 @@ pub struct ThreadManager<'mir, 'tcx> { /// /// Note that this vector also contains terminated threads. threads: IndexVec>, - /// FIXME: make private. + /// This field is pub(crate) because the synchronization primitives + /// (`crate::sync`) need a way to access it. pub(crate) sync: SynchronizationState, - /// A counter used to generate unique identifiers for blocksets. - blockset_counter: u32, /// A mapping from a thread-local static to an allocation id of a thread /// specific allocation. thread_local_alloc_ids: RefCell>, /// A flag that indicates that we should change the active thread. yield_active_thread: bool, /// Callbacks that are called once the specified time passes. - callbacks: FxHashMap>, + timeout_callbacks: FxHashMap>, } impl<'mir, 'tcx> Default for ThreadManager<'mir, 'tcx> { @@ -208,10 +209,9 @@ impl<'mir, 'tcx> Default for ThreadManager<'mir, 'tcx> { active_thread: ThreadId::new(0), threads: threads, sync: SynchronizationState::default(), - blockset_counter: 0, thread_local_alloc_ids: Default::default(), yield_active_thread: false, - callbacks: FxHashMap::default(), + timeout_callbacks: FxHashMap::default(), } } } @@ -359,28 +359,28 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { } /// Register the given `callback` to be called once the `call_time` passes. - fn register_callback( + fn register_timeout_callback( &mut self, thread: ThreadId, call_time: Instant, - callback: EventCallback<'mir, 'tcx>, + callback: TimeoutCallback<'mir, 'tcx>, ) { - self.callbacks - .insert(thread, CallBackInfo { call_time: call_time, callback: callback }) + self.timeout_callbacks + .insert(thread, TimeoutCallbackInfo { call_time: call_time, callback: callback }) .unwrap_none(); } /// Unregister the callback for the `thread`. - fn unregister_callback_if_exists(&mut self, thread: ThreadId) { - self.callbacks.remove(&thread); + fn unregister_timeout_callback_if_exists(&mut self, thread: ThreadId) { + self.timeout_callbacks.remove(&thread); } /// Get a callback that is ready to be called. - fn get_callback(&mut self) -> Option<(ThreadId, EventCallback<'mir, 'tcx>)> { + fn get_callback(&mut self) -> Option<(ThreadId, TimeoutCallback<'mir, 'tcx>)> { let current_time = Instant::now(); // We use a for loop here to make the scheduler more deterministic. for thread in self.threads.indices() { - match self.callbacks.entry(thread) { + match self.timeout_callbacks.entry(thread) { Entry::Occupied(entry) => if current_time >= entry.get().call_time { return Some((thread, entry.remove().callback)); @@ -447,17 +447,17 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { if self.threads.iter().all(|thread| thread.state == ThreadState::Terminated) { unreachable!(); } else if let Some(next_call_time) = - self.callbacks.values().min_by_key(|info| info.call_time) + self.timeout_callbacks.values().min_by_key(|info| info.call_time) { // All threads are currently blocked, but we have unexecuted - // callbacks, which may unblock some of the threads. Hence, + // timeout_callbacks, which may unblock some of the threads. Hence, // sleep until the first callback. if let Some(sleep_time) = next_call_time.call_time.checked_duration_since(Instant::now()) { std::thread::sleep(sleep_time); } - Ok(SchedulingAction::ExecuteCallback) + Ok(SchedulingAction::ExecuteTimeoutCallback) } else { throw_machine_stop!(TerminationInfo::Deadlock); } @@ -647,27 +647,27 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx } #[inline] - fn register_callback( + fn register_timeout_callback( &mut self, thread: ThreadId, call_time: Instant, - callback: EventCallback<'mir, 'tcx>, + callback: TimeoutCallback<'mir, 'tcx>, ) -> InterpResult<'tcx> { let this = self.eval_context_mut(); - this.machine.threads.register_callback(thread, call_time, callback); + this.machine.threads.register_timeout_callback(thread, call_time, callback); Ok(()) } #[inline] - fn unregister_callback_if_exists(&mut self, thread: ThreadId) -> InterpResult<'tcx> { + fn unregister_timeout_callback_if_exists(&mut self, thread: ThreadId) -> InterpResult<'tcx> { let this = self.eval_context_mut(); - this.machine.threads.unregister_callback_if_exists(thread); + this.machine.threads.unregister_timeout_callback_if_exists(thread); Ok(()) } - /// Execute the callback on the callback's thread. + /// Execute a timeout callback on the callback's thread. #[inline] - fn run_scheduler_callback(&mut self) -> InterpResult<'tcx> { + fn run_timeout_callback(&mut self) -> InterpResult<'tcx> { let this = self.eval_context_mut(); let (thread, callback) = this.machine.threads.get_callback().expect("no callback found"); let old_thread = this.set_active_thread(thread)?; From 6e774dec86daeda8aa6ae2fa20c2334cd8841db6 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Thu, 30 Apr 2020 14:48:09 -0700 Subject: [PATCH 04/17] Move all run-pass synchronization primitive tests to sync.rs. --- tests/run-pass/concurrency/barrier.rs | 27 --- tests/run-pass/concurrency/condvar.rs | 28 --- tests/run-pass/concurrency/condvar.stderr | 2 - tests/run-pass/concurrency/locks.rs | 75 ------ tests/run-pass/concurrency/locks.stderr | 2 - tests/run-pass/concurrency/mpsc.rs | 56 ----- tests/run-pass/concurrency/mpsc.stderr | 2 - tests/run-pass/concurrency/once.rs | 44 ---- tests/run-pass/concurrency/once.stderr | 2 - tests/run-pass/concurrency/sync.rs | 216 ++++++++++++++++++ .../{barrier.stderr => sync.stderr} | 0 .../{barrier.stdout => sync.stdout} | 0 12 files changed, 216 insertions(+), 238 deletions(-) delete mode 100644 tests/run-pass/concurrency/barrier.rs delete mode 100644 tests/run-pass/concurrency/condvar.rs delete mode 100644 tests/run-pass/concurrency/condvar.stderr delete mode 100644 tests/run-pass/concurrency/locks.rs delete mode 100644 tests/run-pass/concurrency/locks.stderr delete mode 100644 tests/run-pass/concurrency/mpsc.rs delete mode 100644 tests/run-pass/concurrency/mpsc.stderr delete mode 100644 tests/run-pass/concurrency/once.rs delete mode 100644 tests/run-pass/concurrency/once.stderr create mode 100644 tests/run-pass/concurrency/sync.rs rename tests/run-pass/concurrency/{barrier.stderr => sync.stderr} (100%) rename tests/run-pass/concurrency/{barrier.stdout => sync.stdout} (100%) diff --git a/tests/run-pass/concurrency/barrier.rs b/tests/run-pass/concurrency/barrier.rs deleted file mode 100644 index 1e976a6345..0000000000 --- a/tests/run-pass/concurrency/barrier.rs +++ /dev/null @@ -1,27 +0,0 @@ -// ignore-windows: Concurrency on Windows is not supported yet. - -//! Check if Rust barriers are working. - -use std::sync::{Arc, Barrier}; -use std::thread; - - -/// This test is taken from the Rust documentation. -fn main() { - let mut handles = Vec::with_capacity(10); - let barrier = Arc::new(Barrier::new(10)); - for _ in 0..10 { - let c = barrier.clone(); - // The same messages will be printed together. - // You will NOT see any interleaving. - handles.push(thread::spawn(move|| { - println!("before wait"); - c.wait(); - println!("after wait"); - })); - } - // Wait for other threads to finish. - for handle in handles { - handle.join().unwrap(); - } -} \ No newline at end of file diff --git a/tests/run-pass/concurrency/condvar.rs b/tests/run-pass/concurrency/condvar.rs deleted file mode 100644 index ab971ee6e8..0000000000 --- a/tests/run-pass/concurrency/condvar.rs +++ /dev/null @@ -1,28 +0,0 @@ -// ignore-windows: Concurrency on Windows is not supported yet. - -//! Check if Rust conditional variables are working. - -use std::sync::{Arc, Condvar, Mutex}; -use std::thread; - -/// The test taken from the Rust documentation. -fn main() { - let pair = Arc::new((Mutex::new(false), Condvar::new())); - let pair2 = pair.clone(); - - // Inside of our lock, spawn a new thread, and then wait for it to start. - thread::spawn(move || { - let (lock, cvar) = &*pair2; - let mut started = lock.lock().unwrap(); - *started = true; - // We notify the condvar that the value has changed. - cvar.notify_one(); - }); - - // Wait for the thread to start up. - let (lock, cvar) = &*pair; - let mut started = lock.lock().unwrap(); - while !*started { - started = cvar.wait(started).unwrap(); - } -} diff --git a/tests/run-pass/concurrency/condvar.stderr b/tests/run-pass/concurrency/condvar.stderr deleted file mode 100644 index 2dbfb7721d..0000000000 --- a/tests/run-pass/concurrency/condvar.stderr +++ /dev/null @@ -1,2 +0,0 @@ -warning: thread support is experimental. For example, Miri does not detect data races yet. - diff --git a/tests/run-pass/concurrency/locks.rs b/tests/run-pass/concurrency/locks.rs deleted file mode 100644 index f5469712c5..0000000000 --- a/tests/run-pass/concurrency/locks.rs +++ /dev/null @@ -1,75 +0,0 @@ -// ignore-windows: Concurrency on Windows is not supported yet. - -use std::sync::{Arc, Mutex, RwLock}; -use std::thread; - -fn check_mutex() { - let data = Arc::new(Mutex::new(0)); - let mut threads = Vec::new(); - - for _ in 0..3 { - let data = Arc::clone(&data); - let thread = thread::spawn(move || { - let mut data = data.lock().unwrap(); - thread::yield_now(); - *data += 1; - }); - threads.push(thread); - } - - for thread in threads { - thread.join().unwrap(); - } - - assert!(data.try_lock().is_ok()); - - let data = Arc::try_unwrap(data).unwrap().into_inner().unwrap(); - assert_eq!(data, 3); -} - -fn check_rwlock_write() { - let data = Arc::new(RwLock::new(0)); - let mut threads = Vec::new(); - - for _ in 0..3 { - let data = Arc::clone(&data); - let thread = thread::spawn(move || { - let mut data = data.write().unwrap(); - thread::yield_now(); - *data += 1; - }); - threads.push(thread); - } - - for thread in threads { - thread.join().unwrap(); - } - - assert!(data.try_write().is_ok()); - - let data = Arc::try_unwrap(data).unwrap().into_inner().unwrap(); - assert_eq!(data, 3); -} - -fn check_rwlock_read_no_deadlock() { - let l1 = Arc::new(RwLock::new(0)); - let l2 = Arc::new(RwLock::new(0)); - - let l1_copy = Arc::clone(&l1); - let l2_copy = Arc::clone(&l2); - let _guard1 = l1.read().unwrap(); - let handle = thread::spawn(move || { - let _guard2 = l2_copy.read().unwrap(); - thread::yield_now(); - let _guard1 = l1_copy.read().unwrap(); - }); - thread::yield_now(); - let _guard2 = l2.read().unwrap(); - handle.join().unwrap(); -} - -fn main() { - check_mutex(); - check_rwlock_write(); - check_rwlock_read_no_deadlock(); -} diff --git a/tests/run-pass/concurrency/locks.stderr b/tests/run-pass/concurrency/locks.stderr deleted file mode 100644 index 2dbfb7721d..0000000000 --- a/tests/run-pass/concurrency/locks.stderr +++ /dev/null @@ -1,2 +0,0 @@ -warning: thread support is experimental. For example, Miri does not detect data races yet. - diff --git a/tests/run-pass/concurrency/mpsc.rs b/tests/run-pass/concurrency/mpsc.rs deleted file mode 100644 index 3558f5415d..0000000000 --- a/tests/run-pass/concurrency/mpsc.rs +++ /dev/null @@ -1,56 +0,0 @@ -// ignore-windows: Concurrency on Windows is not supported yet. - -//! Check if Rust channels are working. - -use std::sync::mpsc::{channel, sync_channel}; -use std::thread; - -/// The test taken from the Rust documentation. -fn simple_send() { - let (tx, rx) = channel(); - thread::spawn(move || { - tx.send(10).unwrap(); - }); - assert_eq!(rx.recv().unwrap(), 10); -} - -/// The test taken from the Rust documentation. -fn multiple_send() { - let (tx, rx) = channel(); - for i in 0..10 { - let tx = tx.clone(); - thread::spawn(move || { - tx.send(i).unwrap(); - }); - } - - let mut sum = 0; - for _ in 0..10 { - let j = rx.recv().unwrap(); - assert!(0 <= j && j < 10); - sum += j; - } - assert_eq!(sum, 45); -} - -/// The test taken from the Rust documentation. -fn send_on_sync() { - let (sender, receiver) = sync_channel(1); - - // this returns immediately - sender.send(1).unwrap(); - - thread::spawn(move || { - // this will block until the previous message has been received - sender.send(2).unwrap(); - }); - - assert_eq!(receiver.recv().unwrap(), 1); - assert_eq!(receiver.recv().unwrap(), 2); -} - -fn main() { - simple_send(); - multiple_send(); - send_on_sync(); -} diff --git a/tests/run-pass/concurrency/mpsc.stderr b/tests/run-pass/concurrency/mpsc.stderr deleted file mode 100644 index 2dbfb7721d..0000000000 --- a/tests/run-pass/concurrency/mpsc.stderr +++ /dev/null @@ -1,2 +0,0 @@ -warning: thread support is experimental. For example, Miri does not detect data races yet. - diff --git a/tests/run-pass/concurrency/once.rs b/tests/run-pass/concurrency/once.rs deleted file mode 100644 index 499ceacfa8..0000000000 --- a/tests/run-pass/concurrency/once.rs +++ /dev/null @@ -1,44 +0,0 @@ -// ignore-windows: Concurrency on Windows is not supported yet. - -//! Check if Rust once statics are working. The test taken from the Rust -//! documentation. - -use std::sync::Once; -use std::thread; - -static mut VAL: usize = 0; -static INIT: Once = Once::new(); - -fn get_cached_val() -> usize { - unsafe { - INIT.call_once(|| { - VAL = expensive_computation(); - }); - VAL - } -} - -fn expensive_computation() -> usize { - let mut i = 1; - let mut c = 1; - while i < 10000 { - i *= c; - c += 1; - } - i -} - -fn main() { - let handles: Vec<_> = (0..10) - .map(|_| { - thread::spawn(|| { - thread::yield_now(); - let val = get_cached_val(); - assert_eq!(val, 40320); - }) - }) - .collect(); - for handle in handles { - handle.join().unwrap(); - } -} diff --git a/tests/run-pass/concurrency/once.stderr b/tests/run-pass/concurrency/once.stderr deleted file mode 100644 index 2dbfb7721d..0000000000 --- a/tests/run-pass/concurrency/once.stderr +++ /dev/null @@ -1,2 +0,0 @@ -warning: thread support is experimental. For example, Miri does not detect data races yet. - diff --git a/tests/run-pass/concurrency/sync.rs b/tests/run-pass/concurrency/sync.rs new file mode 100644 index 0000000000..b09bfe9e0c --- /dev/null +++ b/tests/run-pass/concurrency/sync.rs @@ -0,0 +1,216 @@ +// ignore-windows: Concurrency on Windows is not supported yet. + +use std::sync::mpsc::{channel, sync_channel}; +use std::sync::{Arc, Barrier, Condvar, Mutex, Once, RwLock}; +use std::thread; + +// Check if Rust barriers are working. + +/// This test is taken from the Rust documentation. +fn check_barriers() { + let mut handles = Vec::with_capacity(10); + let barrier = Arc::new(Barrier::new(10)); + for _ in 0..10 { + let c = barrier.clone(); + // The same messages will be printed together. + // You will NOT see any interleaving. + handles.push(thread::spawn(move || { + println!("before wait"); + c.wait(); + println!("after wait"); + })); + } + // Wait for other threads to finish. + for handle in handles { + handle.join().unwrap(); + } +} + +// Check if Rust conditional variables are working. + +/// The test taken from the Rust documentation. +fn check_conditional_variables() { + let pair = Arc::new((Mutex::new(false), Condvar::new())); + let pair2 = pair.clone(); + + // Inside of our lock, spawn a new thread, and then wait for it to start. + thread::spawn(move || { + let (lock, cvar) = &*pair2; + let mut started = lock.lock().unwrap(); + *started = true; + // We notify the condvar that the value has changed. + cvar.notify_one(); + }); + + // Wait for the thread to start up. + let (lock, cvar) = &*pair; + let mut started = lock.lock().unwrap(); + while !*started { + started = cvar.wait(started).unwrap(); + } +} + +// Check if locks are working. + +fn check_mutex() { + let data = Arc::new(Mutex::new(0)); + let mut threads = Vec::new(); + + for _ in 0..3 { + let data = Arc::clone(&data); + let thread = thread::spawn(move || { + let mut data = data.lock().unwrap(); + thread::yield_now(); + *data += 1; + }); + threads.push(thread); + } + + for thread in threads { + thread.join().unwrap(); + } + + assert!(data.try_lock().is_ok()); + + let data = Arc::try_unwrap(data).unwrap().into_inner().unwrap(); + assert_eq!(data, 3); +} + +fn check_rwlock_write() { + let data = Arc::new(RwLock::new(0)); + let mut threads = Vec::new(); + + for _ in 0..3 { + let data = Arc::clone(&data); + let thread = thread::spawn(move || { + let mut data = data.write().unwrap(); + thread::yield_now(); + *data += 1; + }); + threads.push(thread); + } + + for thread in threads { + thread.join().unwrap(); + } + + assert!(data.try_write().is_ok()); + + let data = Arc::try_unwrap(data).unwrap().into_inner().unwrap(); + assert_eq!(data, 3); +} + +fn check_rwlock_read_no_deadlock() { + let l1 = Arc::new(RwLock::new(0)); + let l2 = Arc::new(RwLock::new(0)); + + let l1_copy = Arc::clone(&l1); + let l2_copy = Arc::clone(&l2); + let _guard1 = l1.read().unwrap(); + let handle = thread::spawn(move || { + let _guard2 = l2_copy.read().unwrap(); + thread::yield_now(); + let _guard1 = l1_copy.read().unwrap(); + }); + thread::yield_now(); + let _guard2 = l2.read().unwrap(); + handle.join().unwrap(); +} + +// Check if channels are working. + +/// The test taken from the Rust documentation. +fn simple_send() { + let (tx, rx) = channel(); + thread::spawn(move || { + tx.send(10).unwrap(); + }); + assert_eq!(rx.recv().unwrap(), 10); +} + +/// The test taken from the Rust documentation. +fn multiple_send() { + let (tx, rx) = channel(); + for i in 0..10 { + let tx = tx.clone(); + thread::spawn(move || { + tx.send(i).unwrap(); + }); + } + + let mut sum = 0; + for _ in 0..10 { + let j = rx.recv().unwrap(); + assert!(0 <= j && j < 10); + sum += j; + } + assert_eq!(sum, 45); +} + +/// The test taken from the Rust documentation. +fn send_on_sync() { + let (sender, receiver) = sync_channel(1); + + // this returns immediately + sender.send(1).unwrap(); + + thread::spawn(move || { + // this will block until the previous message has been received + sender.send(2).unwrap(); + }); + + assert_eq!(receiver.recv().unwrap(), 1); + assert_eq!(receiver.recv().unwrap(), 2); +} + +// Check if Rust once statics are working. + +static mut VAL: usize = 0; +static INIT: Once = Once::new(); + +fn get_cached_val() -> usize { + unsafe { + INIT.call_once(|| { + VAL = expensive_computation(); + }); + VAL + } +} + +fn expensive_computation() -> usize { + let mut i = 1; + let mut c = 1; + while i < 10000 { + i *= c; + c += 1; + } + i +} + +/// The test taken from the Rust documentation. +fn check_once() { + let handles: Vec<_> = (0..10) + .map(|_| { + thread::spawn(|| { + thread::yield_now(); + let val = get_cached_val(); + assert_eq!(val, 40320); + }) + }) + .collect(); + for handle in handles { + handle.join().unwrap(); + } +} + +fn main() { + check_barriers(); + check_conditional_variables(); + check_mutex(); + check_rwlock_write(); + check_rwlock_read_no_deadlock(); + simple_send(); + multiple_send(); + send_on_sync(); + check_once(); +} diff --git a/tests/run-pass/concurrency/barrier.stderr b/tests/run-pass/concurrency/sync.stderr similarity index 100% rename from tests/run-pass/concurrency/barrier.stderr rename to tests/run-pass/concurrency/sync.stderr diff --git a/tests/run-pass/concurrency/barrier.stdout b/tests/run-pass/concurrency/sync.stdout similarity index 100% rename from tests/run-pass/concurrency/barrier.stdout rename to tests/run-pass/concurrency/sync.stdout From 4a303b13095122c2007bf8751bdbc0e89b8708d3 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Thu, 30 Apr 2020 14:59:35 -0700 Subject: [PATCH 05/17] Add a timeout test for conditional variables. --- tests/run-pass/concurrency/sync.rs | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/tests/run-pass/concurrency/sync.rs b/tests/run-pass/concurrency/sync.rs index b09bfe9e0c..e422a7fbdb 100644 --- a/tests/run-pass/concurrency/sync.rs +++ b/tests/run-pass/concurrency/sync.rs @@ -1,8 +1,10 @@ // ignore-windows: Concurrency on Windows is not supported yet. +// compile-flags: -Zmiri-disable-isolation use std::sync::mpsc::{channel, sync_channel}; use std::sync::{Arc, Barrier, Condvar, Mutex, Once, RwLock}; use std::thread; +use std::time::{Duration, Instant}; // Check if Rust barriers are working. @@ -50,6 +52,17 @@ fn check_conditional_variables() { } } +/// Test that waiting on a conditional variable with a timeout does not +/// deadlock. +fn check_conditional_variables_timeout() { + let lock = Mutex::new(()); + let cvar = Condvar::new(); + let guard = lock.lock().unwrap(); + let now = Instant::now(); + let _guard = cvar.wait_timeout(guard, Duration::from_millis(100)).unwrap().0; + assert!(now.elapsed().as_millis() >= 100); +} + // Check if locks are working. fn check_mutex() { @@ -206,6 +219,7 @@ fn check_once() { fn main() { check_barriers(); check_conditional_variables(); + check_conditional_variables_timeout(); check_mutex(); check_rwlock_write(); check_rwlock_read_no_deadlock(); From 86eb262e8a2d270cc8195185b217710f815761b3 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Thu, 30 Apr 2020 15:37:27 -0700 Subject: [PATCH 06/17] Cleanup Condvar tests. --- src/shims/sync.rs | 12 +- .../run-pass/concurrency/libc_pthread_cond.rs | 212 ++++-------------- .../concurrency/libc_pthread_cond.stderr | 2 - tests/run-pass/concurrency/sync.rs | 54 ++++- 4 files changed, 102 insertions(+), 178 deletions(-) delete mode 100644 tests/run-pass/concurrency/libc_pthread_cond.stderr diff --git a/src/shims/sync.rs b/src/shims/sync.rs index f31efe18e1..a586be8139 100644 --- a/src/shims/sync.rs +++ b/src/shims/sync.rs @@ -1,4 +1,5 @@ use std::time::{Duration, SystemTime}; +use std::convert::TryInto; use rustc_middle::ty::{layout::TyAndLayout, TyKind, TypeAndMut}; use rustc_target::abi::{LayoutOf, Size}; @@ -719,12 +720,17 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let mut offset = Size::from_bytes(0); let layout = this.libc_ty_layout("time_t")?; let seconds_place = tp.offset(offset, MemPlaceMeta::None, layout, this)?; - let seconds = this.read_scalar(seconds_place.into())?.to_u64()?; + let seconds = this.read_scalar(seconds_place.into())?; offset += layout.size; let layout = this.libc_ty_layout("c_long")?; let nanoseconds_place = tp.offset(offset, MemPlaceMeta::None, layout, this)?; - let nanoseconds = this.read_scalar(nanoseconds_place.into())?.to_u64()?; - Duration::new(seconds, nanoseconds as u32) + let nanoseconds = this.read_scalar(nanoseconds_place.into())?; + let (seconds, nanoseconds) = if this.pointer_size().bytes() == 8 { + (seconds.to_u64()?, nanoseconds.to_u64()?.try_into().unwrap()) + } else { + (seconds.to_u32()?.into(), nanoseconds.to_u32()?) + }; + Duration::new(seconds, nanoseconds) }; let timeout_time = if clock_id == this.eval_libc_i32("CLOCK_REALTIME")? { diff --git a/tests/run-pass/concurrency/libc_pthread_cond.rs b/tests/run-pass/concurrency/libc_pthread_cond.rs index 83a651e6f0..9b7a06b431 100644 --- a/tests/run-pass/concurrency/libc_pthread_cond.rs +++ b/tests/run-pass/concurrency/libc_pthread_cond.rs @@ -1,199 +1,75 @@ // ignore-windows: No libc on Windows +// ignore-macos: pthread_condattr_setclock is not supported on MacOS. // compile-flags: -Zmiri-disable-isolation #![feature(rustc_private)] +/// Test that conditional variable timeouts are working properly with both +/// monotonic and system clocks. extern crate libc; -use std::cell::UnsafeCell; -use std::mem::{self, MaybeUninit}; -use std::sync::Arc; -use std::thread; +use std::mem; +use std::time::Instant; -struct Mutex { - inner: UnsafeCell, -} - -unsafe impl Sync for Mutex {} - -impl std::fmt::Debug for Mutex { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "Mutex") - } -} - -struct Cond { - inner: UnsafeCell, -} - -unsafe impl Sync for Cond {} - -impl std::fmt::Debug for Cond { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "Cond") - } -} - -unsafe fn create_cond_attr_monotonic() -> libc::pthread_condattr_t { - let mut attr = MaybeUninit::::uninit(); - assert_eq!(libc::pthread_condattr_init(attr.as_mut_ptr()), 0); - assert_eq!(libc::pthread_condattr_setclock(attr.as_mut_ptr(), libc::CLOCK_MONOTONIC), 0); - attr.assume_init() -} - -unsafe fn create_cond(attr: Option) -> Cond { - let cond: Cond = mem::zeroed(); - if let Some(mut attr) = attr { - assert_eq!(libc::pthread_cond_init(cond.inner.get() as *mut _, &attr as *const _), 0); - assert_eq!(libc::pthread_condattr_destroy(&mut attr as *mut _), 0); - } else { - assert_eq!(libc::pthread_cond_init(cond.inner.get() as *mut _, 0 as *const _), 0); - } - cond -} - -unsafe fn create_mutex() -> Mutex { - mem::zeroed() -} - -unsafe fn create_timeout(seconds: i64) -> libc::timespec { - let mut now: libc::timespec = mem::zeroed(); - assert_eq!(libc::clock_gettime(libc::CLOCK_MONOTONIC, &mut now), 0); - libc::timespec { tv_sec: now.tv_sec + seconds, tv_nsec: now.tv_nsec } -} - -fn test_pthread_condattr_t() { +fn test_timed_wait_timeout_monotonic() { unsafe { - let mut attr = create_cond_attr_monotonic(); - let mut clock_id = MaybeUninit::::uninit(); - assert_eq!(libc::pthread_condattr_getclock(&attr as *const _, clock_id.as_mut_ptr()), 0); - assert_eq!(clock_id.assume_init(), libc::CLOCK_MONOTONIC); - assert_eq!(libc::pthread_condattr_destroy(&mut attr as *mut _), 0); - } -} + let mut attr: libc::pthread_condattr_t = mem::zeroed(); + assert_eq!(libc::pthread_condattr_init(&mut attr as *mut _), 0); + assert_eq!(libc::pthread_condattr_setclock(&mut attr as *mut _, libc::CLOCK_MONOTONIC), 0); -fn test_signal() { - unsafe { - let cond = Arc::new(create_cond(None)); - let mutex = Arc::new(create_mutex()); + let mut cond: libc::pthread_cond_t = mem::zeroed(); + assert_eq!(libc::pthread_cond_init(&mut cond as *mut _, &attr as *const _), 0); + assert_eq!(libc::pthread_condattr_destroy(&mut attr as *mut _), 0); - assert_eq!(libc::pthread_mutex_lock(mutex.inner.get() as *mut _), 0); + let mut mutex: libc::pthread_mutex_t = mem::zeroed(); - let spawn_mutex = Arc::clone(&mutex); - let spawn_cond = Arc::clone(&cond); - let handle = thread::spawn(move || { - assert_eq!(libc::pthread_mutex_lock(spawn_mutex.inner.get() as *mut _), 0); - assert_eq!(libc::pthread_cond_signal(spawn_cond.inner.get() as *mut _), 0); - assert_eq!(libc::pthread_mutex_unlock(spawn_mutex.inner.get() as *mut _), 0); - }); + let mut now: libc::timespec = mem::zeroed(); + assert_eq!(libc::clock_gettime(libc::CLOCK_MONOTONIC, &mut now), 0); + let timeout = libc::timespec { tv_sec: now.tv_sec + 1, tv_nsec: now.tv_nsec }; + assert_eq!(libc::pthread_mutex_lock(&mut mutex as *mut _), 0); + let current_time = Instant::now(); assert_eq!( - libc::pthread_cond_wait(cond.inner.get() as *mut _, mutex.inner.get() as *mut _), - 0 + libc::pthread_cond_timedwait(&mut cond as *mut _, &mut mutex as *mut _, &timeout), + libc::ETIMEDOUT ); - assert_eq!(libc::pthread_mutex_unlock(mutex.inner.get() as *mut _), 0); - - handle.join().unwrap(); - - let mutex = Arc::try_unwrap(mutex).unwrap(); - assert_eq!(libc::pthread_mutex_destroy(mutex.inner.get() as *mut _), 0); - let cond = Arc::try_unwrap(cond).unwrap(); - assert_eq!(libc::pthread_cond_destroy(cond.inner.get() as *mut _), 0); + assert!(current_time.elapsed().as_millis() >= 900); + assert_eq!(libc::pthread_mutex_unlock(&mut mutex as *mut _), 0); + assert_eq!(libc::pthread_mutex_destroy(&mut mutex as *mut _), 0); + assert_eq!(libc::pthread_cond_destroy(&mut cond as *mut _), 0); } } -fn test_broadcast() { +fn test_timed_wait_timeout_realtime() { unsafe { - let cond = Arc::new(create_cond(None)); - let mutex = Arc::new(create_mutex()); - - assert_eq!(libc::pthread_mutex_lock(mutex.inner.get() as *mut _), 0); - - let spawn_mutex = Arc::clone(&mutex); - let spawn_cond = Arc::clone(&cond); - let handle = thread::spawn(move || { - assert_eq!(libc::pthread_mutex_lock(spawn_mutex.inner.get() as *mut _), 0); - assert_eq!(libc::pthread_cond_broadcast(spawn_cond.inner.get() as *mut _), 0); - assert_eq!(libc::pthread_mutex_unlock(spawn_mutex.inner.get() as *mut _), 0); - }); + let mut attr: libc::pthread_condattr_t = mem::zeroed(); + assert_eq!(libc::pthread_condattr_init(&mut attr as *mut _), 0); + assert_eq!(libc::pthread_condattr_setclock(&mut attr as *mut _, libc::CLOCK_REALTIME), 0); - assert_eq!( - libc::pthread_cond_wait(cond.inner.get() as *mut _, mutex.inner.get() as *mut _), - 0 - ); - assert_eq!(libc::pthread_mutex_unlock(mutex.inner.get() as *mut _), 0); + let mut cond: libc::pthread_cond_t = mem::zeroed(); + assert_eq!(libc::pthread_cond_init(&mut cond as *mut _, &attr as *const _), 0); + assert_eq!(libc::pthread_condattr_destroy(&mut attr as *mut _), 0); - handle.join().unwrap(); + let mut mutex: libc::pthread_mutex_t = mem::zeroed(); - let mutex = Arc::try_unwrap(mutex).unwrap(); - assert_eq!(libc::pthread_mutex_destroy(mutex.inner.get() as *mut _), 0); - let cond = Arc::try_unwrap(cond).unwrap(); - assert_eq!(libc::pthread_cond_destroy(cond.inner.get() as *mut _), 0); - } -} + let mut now: libc::timespec = mem::zeroed(); + assert_eq!(libc::clock_gettime(libc::CLOCK_REALTIME, &mut now), 0); + let timeout = libc::timespec { tv_sec: now.tv_sec + 1, tv_nsec: now.tv_nsec }; -fn test_timed_wait_timeout() { - unsafe { - let attr = create_cond_attr_monotonic(); - let cond = create_cond(Some(attr)); - let mutex = create_mutex(); - let timeout = create_timeout(1); - - assert_eq!(libc::pthread_mutex_lock(mutex.inner.get() as *mut _), 0); + assert_eq!(libc::pthread_mutex_lock(&mut mutex as *mut _), 0); + let current_time = Instant::now(); assert_eq!( - libc::pthread_cond_timedwait( - cond.inner.get() as *mut _, - mutex.inner.get() as *mut _, - &timeout - ), + libc::pthread_cond_timedwait(&mut cond as *mut _, &mut mutex as *mut _, &timeout), libc::ETIMEDOUT ); - assert_eq!(libc::pthread_mutex_unlock(mutex.inner.get() as *mut _), 0); - assert_eq!(libc::pthread_mutex_destroy(mutex.inner.get() as *mut _), 0); - assert_eq!(libc::pthread_cond_destroy(cond.inner.get() as *mut _), 0); - } -} - -fn test_timed_wait_notimeout() { - unsafe { - let attr = create_cond_attr_monotonic(); - let cond = Arc::new(create_cond(Some(attr))); - let mutex = Arc::new(create_mutex()); - let timeout = create_timeout(100); - - assert_eq!(libc::pthread_mutex_lock(mutex.inner.get() as *mut _), 0); - - let spawn_mutex = Arc::clone(&mutex); - let spawn_cond = Arc::clone(&cond); - let handle = thread::spawn(move || { - assert_eq!(libc::pthread_mutex_lock(spawn_mutex.inner.get() as *mut _), 0); - assert_eq!(libc::pthread_cond_signal(spawn_cond.inner.get() as *mut _), 0); - assert_eq!(libc::pthread_mutex_unlock(spawn_mutex.inner.get() as *mut _), 0); - }); - - assert_eq!( - libc::pthread_cond_timedwait( - cond.inner.get() as *mut _, - mutex.inner.get() as *mut _, - &timeout - ), - 0 - ); - assert_eq!(libc::pthread_mutex_unlock(mutex.inner.get() as *mut _), 0); - - handle.join().unwrap(); - - let mutex = Arc::try_unwrap(mutex).unwrap(); - assert_eq!(libc::pthread_mutex_destroy(mutex.inner.get() as *mut _), 0); - let cond = Arc::try_unwrap(cond).unwrap(); - assert_eq!(libc::pthread_cond_destroy(cond.inner.get() as *mut _), 0); + assert!(current_time.elapsed().as_millis() >= 900); + assert_eq!(libc::pthread_mutex_unlock(&mut mutex as *mut _), 0); + assert_eq!(libc::pthread_mutex_destroy(&mut mutex as *mut _), 0); + assert_eq!(libc::pthread_cond_destroy(&mut cond as *mut _), 0); } } fn main() { - test_pthread_condattr_t(); - test_signal(); - test_broadcast(); - test_timed_wait_timeout(); - test_timed_wait_notimeout(); + test_timed_wait_timeout_monotonic(); + test_timed_wait_timeout_realtime(); } diff --git a/tests/run-pass/concurrency/libc_pthread_cond.stderr b/tests/run-pass/concurrency/libc_pthread_cond.stderr deleted file mode 100644 index 2dbfb7721d..0000000000 --- a/tests/run-pass/concurrency/libc_pthread_cond.stderr +++ /dev/null @@ -1,2 +0,0 @@ -warning: thread support is experimental. For example, Miri does not detect data races yet. - diff --git a/tests/run-pass/concurrency/sync.rs b/tests/run-pass/concurrency/sync.rs index e422a7fbdb..e3f3a03b11 100644 --- a/tests/run-pass/concurrency/sync.rs +++ b/tests/run-pass/concurrency/sync.rs @@ -31,7 +31,7 @@ fn check_barriers() { // Check if Rust conditional variables are working. /// The test taken from the Rust documentation. -fn check_conditional_variables() { +fn check_conditional_variables_notify_one() { let pair = Arc::new((Mutex::new(false), Condvar::new())); let pair2 = pair.clone(); @@ -52,17 +52,59 @@ fn check_conditional_variables() { } } +/// The test taken from the Rust documentation. +fn check_conditional_variables_notify_all() { + let pair = Arc::new((Mutex::new(false), Condvar::new())); + let pair2 = pair.clone(); + + thread::spawn(move || { + let (lock, cvar) = &*pair2; + let mut started = lock.lock().unwrap(); + *started = true; + // We notify the condvar that the value has changed. + cvar.notify_all(); + }); + + // Wait for the thread to start up. + let (lock, cvar) = &*pair; + let mut started = lock.lock().unwrap(); + // As long as the value inside the `Mutex` is `false`, we wait. + while !*started { + started = cvar.wait(started).unwrap(); + } +} + /// Test that waiting on a conditional variable with a timeout does not /// deadlock. -fn check_conditional_variables_timeout() { +fn check_conditional_variables_timed_wait_timeout() { let lock = Mutex::new(()); let cvar = Condvar::new(); let guard = lock.lock().unwrap(); let now = Instant::now(); - let _guard = cvar.wait_timeout(guard, Duration::from_millis(100)).unwrap().0; + let (_guard, timeout) = cvar.wait_timeout(guard, Duration::from_millis(100)).unwrap(); + assert!(timeout.timed_out()); assert!(now.elapsed().as_millis() >= 100); } +/// Test that signaling a conditional variable when waiting with a timeout works +/// as expected. +fn check_conditional_variables_timed_wait_notimeout() { + let pair = Arc::new((Mutex::new(()), Condvar::new())); + let pair2 = pair.clone(); + + let (lock, cvar) = &*pair; + let guard = lock.lock().unwrap(); + + let handle = thread::spawn(move || { + let (_lock, cvar) = &*pair2; + cvar.notify_one(); + }); + + let (_guard, timeout) = cvar.wait_timeout(guard, Duration::from_millis(100)).unwrap(); + assert!(!timeout.timed_out()); + handle.join().unwrap(); +} + // Check if locks are working. fn check_mutex() { @@ -218,8 +260,10 @@ fn check_once() { fn main() { check_barriers(); - check_conditional_variables(); - check_conditional_variables_timeout(); + check_conditional_variables_notify_one(); + check_conditional_variables_notify_all(); + check_conditional_variables_timed_wait_timeout(); + check_conditional_variables_timed_wait_notimeout(); check_mutex(); check_rwlock_write(); check_rwlock_read_no_deadlock(); From 0bbac1275177176c6bb1a8640b5ef34dbb5c5074 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Mon, 18 May 2020 16:28:19 +0200 Subject: [PATCH 07/17] Change how the time is handled. --- src/machine.rs | 8 +------- src/shims/sync.rs | 10 ++++------ src/sync.rs | 5 +---- src/thread.rs | 47 ++++++++++++++++++++++++++++++----------------- 4 files changed, 36 insertions(+), 34 deletions(-) diff --git a/src/machine.rs b/src/machine.rs index 4fb08cd259..51aa7ae310 100644 --- a/src/machine.rs +++ b/src/machine.rs @@ -5,7 +5,7 @@ use std::borrow::Cow; use std::cell::RefCell; use std::num::NonZeroU64; use std::rc::Rc; -use std::time::{Instant, SystemTime}; +use std::time::Instant; use std::fmt; use log::trace; @@ -251,11 +251,6 @@ pub struct Evaluator<'mir, 'tcx> { /// The "time anchor" for this machine's monotone clock (for `Instant` simulation). pub(crate) time_anchor: Instant, - /// The approximate system time when "time anchor" was created. This is used - /// for converting system time to monotone time so that we can simplify the - /// thread scheduler to deal only with a single representation of time. - pub(crate) time_anchor_timestamp: SystemTime, - /// The set of threads. pub(crate) threads: ThreadManager<'mir, 'tcx>, @@ -286,7 +281,6 @@ impl<'mir, 'tcx> Evaluator<'mir, 'tcx> { dir_handler: Default::default(), panic_payload: None, time_anchor: Instant::now(), - time_anchor_timestamp: SystemTime::now(), layouts, threads: ThreadManager::default(), } diff --git a/src/shims/sync.rs b/src/shims/sync.rs index a586be8139..5432c76dfe 100644 --- a/src/shims/sync.rs +++ b/src/shims/sync.rs @@ -1,10 +1,11 @@ -use std::time::{Duration, SystemTime}; use std::convert::TryInto; +use std::time::{Duration, SystemTime}; use rustc_middle::ty::{layout::TyAndLayout, TyKind, TypeAndMut}; use rustc_target::abi::{LayoutOf, Size}; use crate::stacked_borrows::Tag; +use crate::thread::Time; use crate::*; @@ -734,12 +735,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx }; let timeout_time = if clock_id == this.eval_libc_i32("CLOCK_REALTIME")? { - let time_anchor_since_epoch = - this.machine.time_anchor_timestamp.duration_since(SystemTime::UNIX_EPOCH).unwrap(); - let duration_since_time_anchor = duration.checked_sub(time_anchor_since_epoch).unwrap(); - this.machine.time_anchor.checked_add(duration_since_time_anchor).unwrap() + Time::RealTime(SystemTime::UNIX_EPOCH.checked_add(duration).unwrap()) } else if clock_id == this.eval_libc_i32("CLOCK_MONOTONIC")? { - this.machine.time_anchor.checked_add(duration).unwrap() + Time::Monotonic(this.machine.time_anchor.checked_add(duration).unwrap()) } else { throw_ub_format!("Unsupported clock id."); }; diff --git a/src/sync.rs b/src/sync.rs index 88b5d6c060..e05d111cb2 100644 --- a/src/sync.rs +++ b/src/sync.rs @@ -1,7 +1,6 @@ use std::collections::{hash_map::Entry, HashMap, VecDeque}; use std::convert::TryFrom; use std::num::NonZeroU32; -use std::time::Instant; use rustc_index::vec::{Idx, IndexVec}; @@ -76,8 +75,6 @@ struct CondvarWaiter { thread: ThreadId, /// The mutex on which the thread is waiting. mutex: MutexId, - /// The moment in time when the waiter should time out. - timeout: Option, } /// The conditional variable state. @@ -280,7 +277,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let this = self.eval_context_mut(); let waiters = &mut this.machine.threads.sync.condvars[id].waiters; assert!(waiters.iter().all(|waiter| waiter.thread != thread), "thread is already waiting"); - waiters.push_back(CondvarWaiter { thread, mutex, timeout: None }); + waiters.push_back(CondvarWaiter { thread, mutex }); } /// Wake up some thread (if there is any) sleeping on the conditional diff --git a/src/thread.rs b/src/thread.rs index f67de48b71..69b31b541a 100644 --- a/src/thread.rs +++ b/src/thread.rs @@ -4,7 +4,7 @@ use std::cell::RefCell; use std::collections::hash_map::Entry; use std::convert::TryFrom; use std::num::TryFromIntError; -use std::time::Instant; +use std::time::{Duration, Instant, SystemTime}; use log::trace; @@ -159,13 +159,30 @@ impl<'mir, 'tcx> Default for Thread<'mir, 'tcx> { } } +#[derive(Debug)] +pub enum Time { + Monotonic(Instant), + RealTime(SystemTime), +} + +impl Time { + /// How long do we have to wait from now until the specified time? + fn get_wait_time(&self) -> Duration { + match self { + Time::Monotonic(instant) => instant.saturating_duration_since(Instant::now()), + Time::RealTime(time) => + time.duration_since(SystemTime::now()).unwrap_or(Duration::new(0, 0)), + } + } +} + /// Callbacks are used to implement timeouts. For example, waiting on a /// conditional variable with a timeout creates a callback that is called after /// the specified time and unblocks the thread. If another thread signals on the /// conditional variable, the signal handler deletes the callback. struct TimeoutCallbackInfo<'mir, 'tcx> { /// The callback should be called no earlier than this time. - call_time: Instant, + call_time: Time, /// The called function. callback: TimeoutCallback<'mir, 'tcx>, } @@ -362,11 +379,11 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { fn register_timeout_callback( &mut self, thread: ThreadId, - call_time: Instant, + call_time: Time, callback: TimeoutCallback<'mir, 'tcx>, ) { self.timeout_callbacks - .insert(thread, TimeoutCallbackInfo { call_time: call_time, callback: callback }) + .insert(thread, TimeoutCallbackInfo { call_time, callback }) .unwrap_none(); } @@ -376,13 +393,12 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { } /// Get a callback that is ready to be called. - fn get_callback(&mut self) -> Option<(ThreadId, TimeoutCallback<'mir, 'tcx>)> { - let current_time = Instant::now(); + fn get_ready_callback(&mut self) -> Option<(ThreadId, TimeoutCallback<'mir, 'tcx>)> { // We use a for loop here to make the scheduler more deterministic. for thread in self.threads.indices() { match self.timeout_callbacks.entry(thread) { Entry::Occupied(entry) => - if current_time >= entry.get().call_time { + if entry.get().call_time.get_wait_time() == Duration::new(0, 0) { return Some((thread, entry.remove().callback)); }, Entry::Vacant(_) => {} @@ -445,18 +461,14 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { } // We have not found a thread to execute. if self.threads.iter().all(|thread| thread.state == ThreadState::Terminated) { - unreachable!(); - } else if let Some(next_call_time) = - self.timeout_callbacks.values().min_by_key(|info| info.call_time) + unreachable!("all threads terminated without the main thread terminating?!"); + } else if let Some(sleep_time) = + self.timeout_callbacks.values().map(|info| info.call_time.get_wait_time()).min() { // All threads are currently blocked, but we have unexecuted // timeout_callbacks, which may unblock some of the threads. Hence, // sleep until the first callback. - if let Some(sleep_time) = - next_call_time.call_time.checked_duration_since(Instant::now()) - { - std::thread::sleep(sleep_time); - } + std::thread::sleep(sleep_time); Ok(SchedulingAction::ExecuteTimeoutCallback) } else { throw_machine_stop!(TerminationInfo::Deadlock); @@ -650,7 +662,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn register_timeout_callback( &mut self, thread: ThreadId, - call_time: Instant, + call_time: Time, callback: TimeoutCallback<'mir, 'tcx>, ) -> InterpResult<'tcx> { let this = self.eval_context_mut(); @@ -669,7 +681,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx #[inline] fn run_timeout_callback(&mut self) -> InterpResult<'tcx> { let this = self.eval_context_mut(); - let (thread, callback) = this.machine.threads.get_callback().expect("no callback found"); + let (thread, callback) = + this.machine.threads.get_ready_callback().expect("no callback found"); let old_thread = this.set_active_thread(thread)?; callback(this)?; this.set_active_thread(old_thread)?; From 3da61fa4274b370dc2c72ce8b7bdbbfeb836110a Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Mon, 18 May 2020 16:39:19 +0200 Subject: [PATCH 08/17] Add comments explaining the declare_id macro. --- src/sync.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/sync.rs b/src/sync.rs index e05d111cb2..7957faeb7e 100644 --- a/src/sync.rs +++ b/src/sync.rs @@ -6,6 +6,10 @@ use rustc_index::vec::{Idx, IndexVec}; use crate::*; +/// We cannot use the `newtype_index!` macro because we have to use 0 as a +/// sentinel value meaning that the identifier is not assigned. This is because +/// the pthreads static initializers initialize memory with zeros (see the +/// `src/shims/sync.rs` file). macro_rules! declare_id { ($name: ident) => { /// 0 is used to indicate that the id was not yet assigned and, @@ -22,9 +26,13 @@ macro_rules! declare_id { impl Idx for $name { fn new(idx: usize) -> Self { + // We use 0 as a sentinel value (see the comment above) and, + // therefore, need to shift by one when converting from an index + // into a vector. $name(NonZeroU32::new(u32::try_from(idx).unwrap() + 1).unwrap()) } fn index(self) -> usize { + // See the comment in `Self::new`. usize::try_from(self.0.get() - 1).unwrap() } } From fdfd56b75b2aefefe6545eed704550ff5de3bdd7 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Mon, 18 May 2020 17:18:15 +0200 Subject: [PATCH 09/17] Small changes. --- src/shims/sync.rs | 63 +++++++---------- src/sync.rs | 70 +++++++++++++------ src/thread.rs | 7 +- .../libc_pthread_rwlock_read_wrong_owner.rs | 32 +++++++++ .../libc_pthread_rwlock_write_wrong_owner.rs | 32 +++++++++ 5 files changed, 143 insertions(+), 61 deletions(-) create mode 100644 tests/compile-fail/sync/libc_pthread_rwlock_read_wrong_owner.rs create mode 100644 tests/compile-fail/sync/libc_pthread_rwlock_write_wrong_owner.rs diff --git a/src/shims/sync.rs b/src/shims/sync.rs index 5432c76dfe..ee2579c22f 100644 --- a/src/shims/sync.rs +++ b/src/shims/sync.rs @@ -202,6 +202,7 @@ fn condattr_set_clock_id<'mir, 'tcx: 'mir>( // Our chosen memory layout for the emulated conditional variable (does not have // to match the platform layout!): +// bytes 0-3: reserved for signature on macOS // bytes 4-7: the conditional variable id as u32 or 0 if id is not assigned yet. // bytes 8-11: the clock id constant as i32 @@ -275,19 +276,13 @@ fn release_cond_mutex<'mir, 'tcx: 'mir>( active_thread: ThreadId, mutex: MutexId, ) -> InterpResult<'tcx> { - if let Some((owner_thread, current_locked_count)) = ecx.mutex_unlock(mutex) { - if current_locked_count != 0 { - throw_unsup_format!("awaiting on multiple times acquired lock is not supported"); + if let Some((old_owner_thread, old_locked_count)) = ecx.mutex_unlock(mutex)? { + if old_locked_count != 1 { + throw_unsup_format!("awaiting on a lock acquired multiple times is not supported"); } - if owner_thread != active_thread { + if old_owner_thread != active_thread { throw_ub_format!("awaiting on a mutex owned by a different thread"); } - if let Some(thread) = ecx.mutex_dequeue(mutex) { - // We have at least one thread waiting on this mutex. Transfer - // ownership to it. - ecx.mutex_lock(mutex, thread); - ecx.unblock_thread(thread)?; - } } else { throw_ub_format!("awaiting on unlocked mutex"); } @@ -349,7 +344,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx mutexattr_get_kind(this, attr_op)?.not_undef()? }; - let _ = mutex_get_or_create_id(this, mutex_op)?; + // Write 0 to use the same code path as the static initializers. + mutex_set_id(this, mutex_op, Scalar::from_i32(0))?; + mutex_set_kind(this, mutex_op, kind)?; Ok(0) @@ -427,19 +424,10 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let kind = mutex_get_kind(this, mutex_op)?.not_undef()?; let id = mutex_get_or_create_id(this, mutex_op)?; - if let Some((owner_thread, current_locked_count)) = this.mutex_unlock(id) { - if owner_thread != this.get_active_thread()? { + if let Some((old_owner_thread, _old_locked_count)) = this.mutex_unlock(id)? { + if old_owner_thread != this.get_active_thread()? { throw_ub_format!("called pthread_mutex_unlock on a mutex owned by another thread"); } - if current_locked_count == 0 { - // The mutex is unlocked. - if let Some(thread) = this.mutex_dequeue(id) { - // We have at least one thread waiting on this mutex. Transfer - // ownership to it. - this.mutex_lock(id, thread); - this.unblock_thread(thread)?; - } - } Ok(0) } else { if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? { @@ -476,11 +464,10 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let active_thread = this.get_active_thread()?; if this.rwlock_is_write_locked(id) { - this.rwlock_enqueue_reader(id, active_thread); - this.block_thread(active_thread)?; + this.rwlock_enqueue_and_block_reader(id, active_thread)?; Ok(0) } else { - this.rwlock_reader_add(id, active_thread); + this.rwlock_reader_lock(id, active_thread); Ok(0) } } @@ -494,7 +481,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx if this.rwlock_is_write_locked(id) { this.eval_libc_i32("EBUSY") } else { - this.rwlock_reader_add(id, active_thread); + this.rwlock_reader_lock(id, active_thread); Ok(0) } } @@ -506,10 +493,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let active_thread = this.get_active_thread()?; if this.rwlock_is_locked(id) { - this.block_thread(active_thread)?; - this.rwlock_enqueue_writer(id, active_thread); + this.rwlock_enqueue_and_block_writer(id, active_thread)?; } else { - this.rwlock_writer_set(id, active_thread); + this.rwlock_writer_lock(id, active_thread); } Ok(0) @@ -524,7 +510,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx if this.rwlock_is_locked(id) { this.eval_libc_i32("EBUSY") } else { - this.rwlock_writer_set(id, active_thread); + this.rwlock_writer_lock(id, active_thread); Ok(0) } } @@ -535,18 +521,18 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let id = rwlock_get_or_create_id(this, rwlock_op)?; let active_thread = this.get_active_thread()?; - if this.rwlock_reader_remove(id, active_thread) { + if this.rwlock_reader_unlock(id, active_thread) { // The thread was a reader. if this.rwlock_is_locked(id) { // No more readers owning the lock. Give it to a writer if there // is any. if let Some(writer) = this.rwlock_dequeue_writer(id) { this.unblock_thread(writer)?; - this.rwlock_writer_set(id, writer); + this.rwlock_writer_lock(id, writer); } } Ok(0) - } else if Some(active_thread) == this.rwlock_writer_remove(id) { + } else if Some(active_thread) == this.rwlock_writer_unlock(id) { // The thread was a writer. // // We are prioritizing writers here against the readers. As a @@ -555,12 +541,12 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx if let Some(writer) = this.rwlock_dequeue_writer(id) { // Give the lock to another writer. this.unblock_thread(writer)?; - this.rwlock_writer_set(id, writer); + this.rwlock_writer_lock(id, writer); } else { // Give the lock to all readers. while let Some(reader) = this.rwlock_dequeue_reader(id) { this.unblock_thread(reader)?; - this.rwlock_reader_add(id, reader); + this.rwlock_reader_lock(id, reader); } } Ok(0) @@ -586,6 +572,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_condattr_init(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); + // The default value of the clock attribute shall refer to the system + // clock. + // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_condattr_setclock.html let default_clock_id = this.eval_libc("CLOCK_REALTIME")?; condattr_set_clock_id(this, attr_op, default_clock_id)?; @@ -647,7 +636,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx condattr_get_clock_id(this, attr_op)?.not_undef()? }; - let _ = cond_get_or_create_id(this, cond_op)?; + // Write 0 to use the same code path as the static initializers. + cond_set_id(this, cond_op, Scalar::from_i32(0))?; + cond_set_clock_id(this, cond_op, clock_id)?; Ok(0) diff --git a/src/sync.rs b/src/sync.rs index 7957faeb7e..a71d4597c6 100644 --- a/src/sync.rs +++ b/src/sync.rs @@ -1,6 +1,7 @@ use std::collections::{hash_map::Entry, HashMap, VecDeque}; use std::convert::TryFrom; use std::num::NonZeroU32; +use std::ops::Not; use rustc_index::vec::{Idx, IndexVec}; @@ -142,34 +143,47 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx mutex.lock_count = mutex.lock_count.checked_add(1).unwrap(); } - /// Unlock by decreasing the lock count. If the lock count reaches 0, unset - /// the owner. - fn mutex_unlock(&mut self, id: MutexId) -> Option<(ThreadId, usize)> { + /// Try unlocking by decreasing the lock count and returning the old owner + /// and the old lock count. If the lock count reaches 0, release the lock + /// and potentially give to a new owner. If the lock was not locked, return + /// `None`. + /// + /// Note: It is the caller's responsibility to check that the thread that + /// unlocked the lock actually is the same one, which owned it. + fn mutex_unlock(&mut self, id: MutexId) -> InterpResult<'tcx, Option<(ThreadId, usize)>> { let this = self.eval_context_mut(); let mutex = &mut this.machine.threads.sync.mutexes[id]; if let Some(current_owner) = mutex.owner { - mutex.lock_count = mutex - .lock_count + // Mutex is locked. + let old_lock_count = mutex.lock_count; + mutex.lock_count = old_lock_count .checked_sub(1) .expect("invariant violation: lock_count == 0 iff the thread is unlocked"); if mutex.lock_count == 0 { mutex.owner = None; + // The mutex is completely unlocked. Try transfering ownership + // to another thread. + if let Some(new_owner) = this.mutex_dequeue(id) { + this.mutex_lock(id, new_owner); + this.unblock_thread(new_owner)?; + } } - Some((current_owner, mutex.lock_count)) + Ok(Some((current_owner, old_lock_count))) } else { - None + // Mutex is unlocked. + Ok(None) } } #[inline] - /// Take a thread out the queue waiting for the lock. + /// Put the thread into the queue waiting for the lock. fn mutex_enqueue(&mut self, id: MutexId, thread: ThreadId) { let this = self.eval_context_mut(); this.machine.threads.sync.mutexes[id].queue.push_back(thread); } #[inline] - /// Take a thread out the queue waiting for the lock. + /// Take a thread out of the queue waiting for the lock. fn mutex_dequeue(&mut self, id: MutexId) -> Option { let this = self.eval_context_mut(); this.machine.threads.sync.mutexes[id].queue.pop_front() @@ -187,7 +201,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn rwlock_is_locked(&mut self, id: RwLockId) -> bool { let this = self.eval_context_mut(); this.machine.threads.sync.rwlocks[id].writer.is_some() - || !this.machine.threads.sync.rwlocks[id].readers.is_empty() + || this.machine.threads.sync.rwlocks[id].readers.is_empty().not() } #[inline] @@ -197,16 +211,18 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx this.machine.threads.sync.rwlocks[id].writer.is_some() } - /// Add a reader that collectively with other readers owns the lock. - fn rwlock_reader_add(&mut self, id: RwLockId, reader: ThreadId) { + /// Read-lock the lock by adding the `reader` the list of threads that own + /// this lock. + fn rwlock_reader_lock(&mut self, id: RwLockId, reader: ThreadId) { let this = self.eval_context_mut(); assert!(!this.rwlock_is_write_locked(id), "the lock is write locked"); let count = this.machine.threads.sync.rwlocks[id].readers.entry(reader).or_insert(0); - *count += 1; + *count = count.checked_add(1).expect("the reader counter overflowed"); } - /// Try removing the reader. Returns `true` if succeeded. - fn rwlock_reader_remove(&mut self, id: RwLockId, reader: ThreadId) -> bool { + /// Try read-unlock the lock for `reader`. Returns `true` if succeeded, + /// `false` if this `reader` did not hold the lock. + fn rwlock_reader_unlock(&mut self, id: RwLockId, reader: ThreadId) -> bool { let this = self.eval_context_mut(); match this.machine.threads.sync.rwlocks[id].readers.entry(reader) { Entry::Occupied(mut entry) => { @@ -222,15 +238,20 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx } #[inline] - /// Put the reader in the queue waiting for the lock. - fn rwlock_enqueue_reader(&mut self, id: RwLockId, reader: ThreadId) { + /// Put the reader in the queue waiting for the lock and block it. + fn rwlock_enqueue_and_block_reader( + &mut self, + id: RwLockId, + reader: ThreadId, + ) -> InterpResult<'tcx> { let this = self.eval_context_mut(); assert!(this.rwlock_is_write_locked(id), "queueing on not write locked lock"); this.machine.threads.sync.rwlocks[id].reader_queue.push_back(reader); + this.block_thread(reader) } #[inline] - /// Take the reader out the queue waiting for the lock. + /// Take a reader out the queue waiting for the lock. fn rwlock_dequeue_reader(&mut self, id: RwLockId) -> Option { let this = self.eval_context_mut(); this.machine.threads.sync.rwlocks[id].reader_queue.pop_front() @@ -238,25 +259,30 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx #[inline] /// Lock by setting the writer that owns the lock. - fn rwlock_writer_set(&mut self, id: RwLockId, writer: ThreadId) { + fn rwlock_writer_lock(&mut self, id: RwLockId, writer: ThreadId) { let this = self.eval_context_mut(); assert!(!this.rwlock_is_locked(id), "the lock is already locked"); this.machine.threads.sync.rwlocks[id].writer = Some(writer); } #[inline] - /// Try removing the writer. - fn rwlock_writer_remove(&mut self, id: RwLockId) -> Option { + /// Try to unlock by removing the writer. + fn rwlock_writer_unlock(&mut self, id: RwLockId) -> Option { let this = self.eval_context_mut(); this.machine.threads.sync.rwlocks[id].writer.take() } #[inline] /// Put the writer in the queue waiting for the lock. - fn rwlock_enqueue_writer(&mut self, id: RwLockId, writer: ThreadId) { + fn rwlock_enqueue_and_block_writer( + &mut self, + id: RwLockId, + writer: ThreadId, + ) -> InterpResult<'tcx> { let this = self.eval_context_mut(); assert!(this.rwlock_is_locked(id), "queueing on unlocked lock"); this.machine.threads.sync.rwlocks[id].writer_queue.push_back(writer); + this.block_thread(writer) } #[inline] diff --git a/src/thread.rs b/src/thread.rs index 69b31b541a..e61761e599 100644 --- a/src/thread.rs +++ b/src/thread.rs @@ -32,7 +32,7 @@ pub enum SchedulingAction { Stop, } -/// Timeout timeout_callbacks can be created by synchronization primitives to tell the +/// Timeout callbacks can be created by synchronization primitives to tell the /// scheduler that they should be called once some period of time passes. type TimeoutCallback<'mir, 'tcx> = Box>) -> InterpResult<'tcx> + 'tcx>; @@ -189,7 +189,7 @@ struct TimeoutCallbackInfo<'mir, 'tcx> { impl<'mir, 'tcx> std::fmt::Debug for TimeoutCallbackInfo<'mir, 'tcx> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "CallBack({:?})", self.call_time) + write!(f, "TimeoutCallback({:?})", self.call_time) } } @@ -394,7 +394,8 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { /// Get a callback that is ready to be called. fn get_ready_callback(&mut self) -> Option<(ThreadId, TimeoutCallback<'mir, 'tcx>)> { - // We use a for loop here to make the scheduler more deterministic. + // We iterate over all threads in the order of their indices because + // this allows us to have a deterministic scheduler. for thread in self.threads.indices() { match self.timeout_callbacks.entry(thread) { Entry::Occupied(entry) => diff --git a/tests/compile-fail/sync/libc_pthread_rwlock_read_wrong_owner.rs b/tests/compile-fail/sync/libc_pthread_rwlock_read_wrong_owner.rs new file mode 100644 index 0000000000..a73a8496a3 --- /dev/null +++ b/tests/compile-fail/sync/libc_pthread_rwlock_read_wrong_owner.rs @@ -0,0 +1,32 @@ +// ignore-windows: No libc on Windows + +#![feature(rustc_private)] + +extern crate libc; + +use std::cell::UnsafeCell; +use std::sync::Arc; +use std::thread; + +struct RwLock(UnsafeCell); + +unsafe impl Send for RwLock {} +unsafe impl Sync for RwLock {} + +fn new_lock() -> Arc { + Arc::new(RwLock(UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER))) +} + +fn main() { + unsafe { + let lock = new_lock(); + assert_eq!(libc::pthread_rwlock_rdlock(lock.0.get() as *mut _), 0); + + let lock_copy = lock.clone(); + thread::spawn(move || { + assert_eq!(libc::pthread_rwlock_unlock(lock_copy.0.get() as *mut _), 0); //~ ERROR: Undefined Behavior: unlocked an rwlock that was not locked by the active thread + }) + .join() + .unwrap(); + } +} diff --git a/tests/compile-fail/sync/libc_pthread_rwlock_write_wrong_owner.rs b/tests/compile-fail/sync/libc_pthread_rwlock_write_wrong_owner.rs new file mode 100644 index 0000000000..663dedb6f6 --- /dev/null +++ b/tests/compile-fail/sync/libc_pthread_rwlock_write_wrong_owner.rs @@ -0,0 +1,32 @@ +// ignore-windows: No libc on Windows + +#![feature(rustc_private)] + +extern crate libc; + +use std::cell::UnsafeCell; +use std::sync::Arc; +use std::thread; + +struct RwLock(UnsafeCell); + +unsafe impl Send for RwLock {} +unsafe impl Sync for RwLock {} + +fn new_lock() -> Arc { + Arc::new(RwLock(UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER))) +} + +fn main() { + unsafe { + let lock = new_lock(); + assert_eq!(libc::pthread_rwlock_wrlock(lock.0.get() as *mut _), 0); + + let lock_copy = lock.clone(); + thread::spawn(move || { + assert_eq!(libc::pthread_rwlock_unlock(lock_copy.0.get() as *mut _), 0); //~ ERROR: Undefined Behavior: unlocked an rwlock that was not locked by the active thread + }) + .join() + .unwrap(); + } +} From 0838347d8f77091ffb5a30606010d0bbedda22a4 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Tue, 19 May 2020 16:26:42 +0200 Subject: [PATCH 10/17] Change the scheduling to execute timeout callbacks first. --- src/thread.rs | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/src/thread.rs b/src/thread.rs index e61761e599..70c2419c4d 100644 --- a/src/thread.rs +++ b/src/thread.rs @@ -441,6 +441,22 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { } return Ok(SchedulingAction::Stop); } + // At least for `pthread_cond_timedwait` we need to report timeout when + // the function is called already after the specified time even if a + // signal is received before the thread gets scheduled. Therefore, we + // need to schedule all timeout callbacks before we continue regular + // execution. + // + // Documentation: + // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_cond_timedwait.html# + if let Some(sleep_time) = + self.timeout_callbacks.values().map(|info| info.call_time.get_wait_time()).min() + { + if sleep_time == Duration::new(0, 0) { + return Ok(SchedulingAction::ExecuteTimeoutCallback); + } + } + // No callbacks scheduled, pick a regular thread to execute. if self.threads[self.active_thread].state == ThreadState::Enabled && !self.yield_active_thread { From 8b5a9836be5b115f27f48406f68bf64d931ceabc Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Tue, 19 May 2020 16:47:25 +0200 Subject: [PATCH 11/17] Small changes. --- src/shims/sync.rs | 27 ++++++++---- src/sync.rs | 1 + .../run-pass/concurrency/libc_pthread_cond.rs | 42 ++++--------------- tests/run-pass/concurrency/sync.rs | 5 ++- 4 files changed, 30 insertions(+), 45 deletions(-) diff --git a/src/shims/sync.rs b/src/shims/sync.rs index ee2579c22f..f34799f742 100644 --- a/src/shims/sync.rs +++ b/src/shims/sync.rs @@ -254,7 +254,8 @@ fn cond_set_clock_id<'mir, 'tcx: 'mir>( set_at_offset(ecx, cond_op, 8, clock_id, ecx.machine.layouts.i32, PTHREAD_COND_T_MIN_SIZE) } -/// Try to reacquire the mutex associated with the condition variable after we were signaled. +/// Try to reacquire the mutex associated with the condition variable after we +/// were signaled. fn reacquire_cond_mutex<'mir, 'tcx: 'mir>( ecx: &mut MiriEvalContext<'mir, 'tcx>, thread: ThreadId, @@ -269,6 +270,17 @@ fn reacquire_cond_mutex<'mir, 'tcx: 'mir>( Ok(()) } +/// Reacquire the conditional variable and remove the timeout callback if any +/// was registered. +fn post_cond_signal<'mir, 'tcx: 'mir>( + ecx: &mut MiriEvalContext<'mir, 'tcx>, + thread: ThreadId, + mutex: MutexId, +) -> InterpResult<'tcx> { + reacquire_cond_mutex(ecx, thread, mutex)?; + ecx.unregister_timeout_callback_if_exists(thread) +} + /// Release the mutex associated with the condition variable because we are /// entering the waiting state. fn release_cond_mutex<'mir, 'tcx: 'mir>( @@ -648,8 +660,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let this = self.eval_context_mut(); let id = cond_get_or_create_id(this, cond_op)?; if let Some((thread, mutex)) = this.condvar_signal(id) { - reacquire_cond_mutex(this, thread, mutex)?; - this.unregister_timeout_callback_if_exists(thread)?; + post_cond_signal(this, thread, mutex)?; } Ok(0) @@ -660,8 +671,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let id = cond_get_or_create_id(this, cond_op)?; while let Some((thread, mutex)) = this.condvar_signal(id) { - reacquire_cond_mutex(this, thread, mutex)?; - this.unregister_timeout_callback_if_exists(thread)?; + post_cond_signal(this, thread, mutex)?; } Ok(0) @@ -730,7 +740,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx } else if clock_id == this.eval_libc_i32("CLOCK_MONOTONIC")? { Time::Monotonic(this.machine.time_anchor.checked_add(duration).unwrap()) } else { - throw_ub_format!("Unsupported clock id."); + throw_unsup_format!("Unsupported clock id."); }; // Register the timeout callback. @@ -738,13 +748,14 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx active_thread, timeout_time, Box::new(move |ecx| { - // Try to reacquire the mutex. + // We are not waiting for the condvar any more, wait for the + // mutex instead. reacquire_cond_mutex(ecx, active_thread, mutex_id)?; // Remove the thread from the conditional variable. ecx.condvar_remove_waiter(id, active_thread); - // Set the timeout value. + // Set the return value: we timed out. let timeout = ecx.eval_libc_i32("ETIMEDOUT")?; ecx.write_scalar(Scalar::from_i32(timeout), dest)?; diff --git a/src/sync.rs b/src/sync.rs index a71d4597c6..cbae29bdbb 100644 --- a/src/sync.rs +++ b/src/sync.rs @@ -179,6 +179,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx /// Put the thread into the queue waiting for the lock. fn mutex_enqueue(&mut self, id: MutexId, thread: ThreadId) { let this = self.eval_context_mut(); + assert!(this.mutex_is_locked(id), "queing on unlocked mutex"); this.machine.threads.sync.mutexes[id].queue.push_back(thread); } diff --git a/tests/run-pass/concurrency/libc_pthread_cond.rs b/tests/run-pass/concurrency/libc_pthread_cond.rs index 9b7a06b431..39b6a7e4ef 100644 --- a/tests/run-pass/concurrency/libc_pthread_cond.rs +++ b/tests/run-pass/concurrency/libc_pthread_cond.rs @@ -11,11 +11,11 @@ extern crate libc; use std::mem; use std::time::Instant; -fn test_timed_wait_timeout_monotonic() { +fn test_timed_wait_timeout(clock_id: i32) { unsafe { let mut attr: libc::pthread_condattr_t = mem::zeroed(); assert_eq!(libc::pthread_condattr_init(&mut attr as *mut _), 0); - assert_eq!(libc::pthread_condattr_setclock(&mut attr as *mut _, libc::CLOCK_MONOTONIC), 0); + assert_eq!(libc::pthread_condattr_setclock(&mut attr as *mut _, clock_id), 0); let mut cond: libc::pthread_cond_t = mem::zeroed(); assert_eq!(libc::pthread_cond_init(&mut cond as *mut _, &attr as *const _), 0); @@ -24,7 +24,7 @@ fn test_timed_wait_timeout_monotonic() { let mut mutex: libc::pthread_mutex_t = mem::zeroed(); let mut now: libc::timespec = mem::zeroed(); - assert_eq!(libc::clock_gettime(libc::CLOCK_MONOTONIC, &mut now), 0); + assert_eq!(libc::clock_gettime(clock_id, &mut now), 0); let timeout = libc::timespec { tv_sec: now.tv_sec + 1, tv_nsec: now.tv_nsec }; assert_eq!(libc::pthread_mutex_lock(&mut mutex as *mut _), 0); @@ -33,36 +33,8 @@ fn test_timed_wait_timeout_monotonic() { libc::pthread_cond_timedwait(&mut cond as *mut _, &mut mutex as *mut _, &timeout), libc::ETIMEDOUT ); - assert!(current_time.elapsed().as_millis() >= 900); - assert_eq!(libc::pthread_mutex_unlock(&mut mutex as *mut _), 0); - assert_eq!(libc::pthread_mutex_destroy(&mut mutex as *mut _), 0); - assert_eq!(libc::pthread_cond_destroy(&mut cond as *mut _), 0); - } -} - -fn test_timed_wait_timeout_realtime() { - unsafe { - let mut attr: libc::pthread_condattr_t = mem::zeroed(); - assert_eq!(libc::pthread_condattr_init(&mut attr as *mut _), 0); - assert_eq!(libc::pthread_condattr_setclock(&mut attr as *mut _, libc::CLOCK_REALTIME), 0); - - let mut cond: libc::pthread_cond_t = mem::zeroed(); - assert_eq!(libc::pthread_cond_init(&mut cond as *mut _, &attr as *const _), 0); - assert_eq!(libc::pthread_condattr_destroy(&mut attr as *mut _), 0); - - let mut mutex: libc::pthread_mutex_t = mem::zeroed(); - - let mut now: libc::timespec = mem::zeroed(); - assert_eq!(libc::clock_gettime(libc::CLOCK_REALTIME, &mut now), 0); - let timeout = libc::timespec { tv_sec: now.tv_sec + 1, tv_nsec: now.tv_nsec }; - - assert_eq!(libc::pthread_mutex_lock(&mut mutex as *mut _), 0); - let current_time = Instant::now(); - assert_eq!( - libc::pthread_cond_timedwait(&mut cond as *mut _, &mut mutex as *mut _, &timeout), - libc::ETIMEDOUT - ); - assert!(current_time.elapsed().as_millis() >= 900); + let elapsed_time = current_time.elapsed().as_millis(); + assert!(900 <= elapsed_time && elapsed_time <= 1100); assert_eq!(libc::pthread_mutex_unlock(&mut mutex as *mut _), 0); assert_eq!(libc::pthread_mutex_destroy(&mut mutex as *mut _), 0); assert_eq!(libc::pthread_cond_destroy(&mut cond as *mut _), 0); @@ -70,6 +42,6 @@ fn test_timed_wait_timeout_realtime() { } fn main() { - test_timed_wait_timeout_monotonic(); - test_timed_wait_timeout_realtime(); + test_timed_wait_timeout(libc::CLOCK_MONOTONIC); + test_timed_wait_timeout(libc::CLOCK_REALTIME); } diff --git a/tests/run-pass/concurrency/sync.rs b/tests/run-pass/concurrency/sync.rs index e3f3a03b11..5c19eee342 100644 --- a/tests/run-pass/concurrency/sync.rs +++ b/tests/run-pass/concurrency/sync.rs @@ -35,8 +35,9 @@ fn check_conditional_variables_notify_one() { let pair = Arc::new((Mutex::new(false), Condvar::new())); let pair2 = pair.clone(); - // Inside of our lock, spawn a new thread, and then wait for it to start. + // Spawn a new thread. thread::spawn(move || { + thread::yield_now(); let (lock, cvar) = &*pair2; let mut started = lock.lock().unwrap(); *started = true; @@ -44,7 +45,7 @@ fn check_conditional_variables_notify_one() { cvar.notify_one(); }); - // Wait for the thread to start up. + // Wait for the thread to fully start up. let (lock, cvar) = &*pair; let mut started = lock.lock().unwrap(); while !*started { From babedc938e2c9a681737b3468b9124a58b2ec677 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Tue, 19 May 2020 18:33:26 +0200 Subject: [PATCH 12/17] Rewrite notify all test. --- tests/run-pass/concurrency/sync.rs | 39 ++++++++++++++++++------------ 1 file changed, 23 insertions(+), 16 deletions(-) diff --git a/tests/run-pass/concurrency/sync.rs b/tests/run-pass/concurrency/sync.rs index 5c19eee342..faf47851bd 100644 --- a/tests/run-pass/concurrency/sync.rs +++ b/tests/run-pass/concurrency/sync.rs @@ -53,25 +53,32 @@ fn check_conditional_variables_notify_one() { } } -/// The test taken from the Rust documentation. fn check_conditional_variables_notify_all() { - let pair = Arc::new((Mutex::new(false), Condvar::new())); - let pair2 = pair.clone(); + let pair = Arc::new(((Mutex::new(())), Condvar::new())); - thread::spawn(move || { - let (lock, cvar) = &*pair2; - let mut started = lock.lock().unwrap(); - *started = true; - // We notify the condvar that the value has changed. - cvar.notify_all(); - }); + // Spawn threads and block them on the conditional variable. + let handles: Vec<_> = (0..5) + .map(|_| { + let pair2 = pair.clone(); + thread::spawn(move || { + let (lock, cvar) = &*pair2; + let guard = lock.lock().unwrap(); + // Block waiting on the conditional variable. + let _ = cvar.wait(guard).unwrap(); + }) + }) + .inspect(|_| { + thread::yield_now(); + thread::yield_now(); + }) + .collect(); - // Wait for the thread to start up. - let (lock, cvar) = &*pair; - let mut started = lock.lock().unwrap(); - // As long as the value inside the `Mutex` is `false`, we wait. - while !*started { - started = cvar.wait(started).unwrap(); + let (_, cvar) = &*pair; + // Unblock all threads. + cvar.notify_all(); + + for handle in handles { + handle.join().unwrap(); } } From bd97074517c6ba334247b70f33199e40374c223a Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Tue, 19 May 2020 18:44:32 +0200 Subject: [PATCH 13/17] Small changes. --- src/shims/sync.rs | 79 ++++++++++++------- src/sync.rs | 16 +++- src/thread.rs | 11 ++- .../libc_pthread_mutex_normal_deadlock.rs | 4 +- .../sync/libc_pthread_mutex_wrong_owner.rs | 2 +- tests/run-pass/concurrency/sync.rs | 7 +- 6 files changed, 77 insertions(+), 42 deletions(-) diff --git a/src/shims/sync.rs b/src/shims/sync.rs index f34799f742..4fe3534739 100644 --- a/src/shims/sync.rs +++ b/src/shims/sync.rs @@ -288,15 +288,12 @@ fn release_cond_mutex<'mir, 'tcx: 'mir>( active_thread: ThreadId, mutex: MutexId, ) -> InterpResult<'tcx> { - if let Some((old_owner_thread, old_locked_count)) = ecx.mutex_unlock(mutex)? { + if let Some(old_locked_count) = ecx.mutex_unlock(mutex, active_thread)? { if old_locked_count != 1 { throw_unsup_format!("awaiting on a lock acquired multiple times is not supported"); } - if old_owner_thread != active_thread { - throw_ub_format!("awaiting on a mutex owned by a different thread"); - } } else { - throw_ub_format!("awaiting on unlocked mutex"); + throw_ub_format!("awaiting on unlocked or owned by a different thread mutex"); } ecx.block_thread(active_thread)?; Ok(()) @@ -321,7 +318,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let this = self.eval_context_mut(); let kind = this.read_scalar(kind_op)?.not_undef()?; - if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? + if kind == this.eval_libc("PTHREAD_MUTEX_DEFAULT")? + || kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? || kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? || kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? { @@ -380,6 +378,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx Ok(0) } else { // Trying to acquire the same mutex again. + if kind == this.eval_libc("PTHREAD_MUTEX_DEFAULT")? { + // FIXME: Sometimes this is actually a Deadlock. + // https://github.com/rust-lang/miri/issues/1419 + throw_ub_format!( + "trying to acquire already locked PTHREAD_MUTEX_DEFAULT (see #1419)" + ); + } if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? { throw_machine_stop!(TerminationInfo::Deadlock); } else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? { @@ -388,7 +393,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx this.mutex_lock(id, active_thread); Ok(0) } else { - throw_ub_format!("called pthread_mutex_lock on an unsupported type of mutex"); + throw_unsup_format!( + "called pthread_mutex_lock on an unsupported type of mutex" + ); } } } else { @@ -410,7 +417,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx if owner_thread != active_thread { this.eval_libc_i32("EBUSY") } else { - if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? + if kind == this.eval_libc("PTHREAD_MUTEX_DEFAULT")? + || kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? || kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? { this.eval_libc_i32("EBUSY") @@ -418,7 +426,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx this.mutex_lock(id, active_thread); Ok(0) } else { - throw_ub_format!( + throw_unsup_format!( "called pthread_mutex_trylock on an unsupported type of mutex" ); } @@ -435,21 +443,29 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let kind = mutex_get_kind(this, mutex_op)?.not_undef()?; let id = mutex_get_or_create_id(this, mutex_op)?; + let active_thread = this.get_active_thread()?; - if let Some((old_owner_thread, _old_locked_count)) = this.mutex_unlock(id)? { - if old_owner_thread != this.get_active_thread()? { - throw_ub_format!("called pthread_mutex_unlock on a mutex owned by another thread"); - } + if let Some(_old_locked_count) = this.mutex_unlock(id, active_thread)? { + // The mutex was locked by the current thread. Ok(0) } else { - if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? { - throw_ub_format!("unlocked a PTHREAD_MUTEX_NORMAL mutex that was not locked"); + // The mutex was locked by another thread or not locked at all. See + // the “Unlock When Not Owner” column in + // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_mutex_unlock.html. + if kind == this.eval_libc("PTHREAD_MUTEX_DEFAULT")? { + throw_ub_format!( + "unlocked a PTHREAD_MUTEX_DEFAULT mutex that was not locked by the current thread" + ); + } else if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? { + throw_ub_format!( + "unlocked a PTHREAD_MUTEX_NORMAL mutex that was not locked by the current thread" + ); } else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? { this.eval_libc_i32("EPERM") } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? { this.eval_libc_i32("EPERM") } else { - throw_ub_format!("called pthread_mutex_unlock on an unsupported type of mutex"); + throw_unsup_format!("called pthread_mutex_unlock on an unsupported type of mutex"); } } } @@ -505,6 +521,18 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let active_thread = this.get_active_thread()?; if this.rwlock_is_locked(id) { + // Note: this will deadlock if the lock is already locked by this + // thread in any way. + // + // Relevant documentation: + // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_rwlock_wrlock.html + // An in depth discussion on this topic: + // https://github.com/rust-lang/rust/issues/53127 + // + // FIXME: Detect and report the deadlock proactively. (We currently + // report the deadlock only when no thread can continue execution, + // but we could detect that this lock is already locked and report + // an error.) this.rwlock_enqueue_and_block_writer(id, active_thread)?; } else { this.rwlock_writer_lock(id, active_thread); @@ -719,19 +747,14 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let clock_id = cond_get_clock_id(this, cond_op)?.to_i32()?; let duration = { let tp = this.deref_operand(abstime_op)?; - let mut offset = Size::from_bytes(0); - let layout = this.libc_ty_layout("time_t")?; - let seconds_place = tp.offset(offset, MemPlaceMeta::None, layout, this)?; + let seconds_place = this.mplace_field(tp, 0)?; let seconds = this.read_scalar(seconds_place.into())?; - offset += layout.size; - let layout = this.libc_ty_layout("c_long")?; - let nanoseconds_place = tp.offset(offset, MemPlaceMeta::None, layout, this)?; + let nanoseconds_place = this.mplace_field(tp, 1)?; let nanoseconds = this.read_scalar(nanoseconds_place.into())?; - let (seconds, nanoseconds) = if this.pointer_size().bytes() == 8 { - (seconds.to_u64()?, nanoseconds.to_u64()?.try_into().unwrap()) - } else { - (seconds.to_u32()?.into(), nanoseconds.to_u32()?) - }; + let (seconds, nanoseconds) = ( + seconds.to_machine_usize(this)?, + nanoseconds.to_machine_usize(this)?.try_into().unwrap(), + ); Duration::new(seconds, nanoseconds) }; @@ -740,7 +763,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx } else if clock_id == this.eval_libc_i32("CLOCK_MONOTONIC")? { Time::Monotonic(this.machine.time_anchor.checked_add(duration).unwrap()) } else { - throw_unsup_format!("Unsupported clock id."); + throw_unsup_format!("unsupported clock id: {}", clock_id); }; // Register the timeout callback. diff --git a/src/sync.rs b/src/sync.rs index cbae29bdbb..026542926e 100644 --- a/src/sync.rs +++ b/src/sync.rs @@ -30,10 +30,12 @@ macro_rules! declare_id { // We use 0 as a sentinel value (see the comment above) and, // therefore, need to shift by one when converting from an index // into a vector. - $name(NonZeroU32::new(u32::try_from(idx).unwrap() + 1).unwrap()) + let shifted_idx = u32::try_from(idx).unwrap().checked_add(1).unwrap(); + $name(NonZeroU32::new(shifted_idx).unwrap()) } fn index(self) -> usize { // See the comment in `Self::new`. + // (This cannot underflow because self is NonZeroU32.) usize::try_from(self.0.get() - 1).unwrap() } } @@ -150,11 +152,19 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx /// /// Note: It is the caller's responsibility to check that the thread that /// unlocked the lock actually is the same one, which owned it. - fn mutex_unlock(&mut self, id: MutexId) -> InterpResult<'tcx, Option<(ThreadId, usize)>> { + fn mutex_unlock( + &mut self, + id: MutexId, + expected_owner: ThreadId, + ) -> InterpResult<'tcx, Option> { let this = self.eval_context_mut(); let mutex = &mut this.machine.threads.sync.mutexes[id]; if let Some(current_owner) = mutex.owner { // Mutex is locked. + if current_owner != expected_owner { + // Only the owner can unlock the mutex. + return Ok(None); + } let old_lock_count = mutex.lock_count; mutex.lock_count = old_lock_count .checked_sub(1) @@ -168,7 +178,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx this.unblock_thread(new_owner)?; } } - Ok(Some((current_owner, old_lock_count))) + Ok(Some(old_lock_count)) } else { // Mutex is unlocked. Ok(None) diff --git a/src/thread.rs b/src/thread.rs index 70c2419c4d..45b07477fa 100644 --- a/src/thread.rs +++ b/src/thread.rs @@ -159,6 +159,7 @@ impl<'mir, 'tcx> Default for Thread<'mir, 'tcx> { } } +/// A specific moment in time. #[derive(Debug)] pub enum Time { Monotonic(Instant), @@ -449,9 +450,9 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { // // Documentation: // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_cond_timedwait.html# - if let Some(sleep_time) = - self.timeout_callbacks.values().map(|info| info.call_time.get_wait_time()).min() - { + let potential_sleep_time = + self.timeout_callbacks.values().map(|info| info.call_time.get_wait_time()).min(); + if let Some(sleep_time) = potential_sleep_time { if sleep_time == Duration::new(0, 0) { return Ok(SchedulingAction::ExecuteTimeoutCallback); } @@ -479,9 +480,7 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { // We have not found a thread to execute. if self.threads.iter().all(|thread| thread.state == ThreadState::Terminated) { unreachable!("all threads terminated without the main thread terminating?!"); - } else if let Some(sleep_time) = - self.timeout_callbacks.values().map(|info| info.call_time.get_wait_time()).min() - { + } else if let Some(sleep_time) = potential_sleep_time { // All threads are currently blocked, but we have unexecuted // timeout_callbacks, which may unblock some of the threads. Hence, // sleep until the first callback. diff --git a/tests/compile-fail/sync/libc_pthread_mutex_normal_deadlock.rs b/tests/compile-fail/sync/libc_pthread_mutex_normal_deadlock.rs index 7034bf64ec..4af8ee5df4 100644 --- a/tests/compile-fail/sync/libc_pthread_mutex_normal_deadlock.rs +++ b/tests/compile-fail/sync/libc_pthread_mutex_normal_deadlock.rs @@ -11,6 +11,8 @@ fn main() { let mut mutex: libc::pthread_mutex_t = std::mem::zeroed(); assert_eq!(libc::pthread_mutex_init(&mut mutex as *mut _, &mutexattr as *const _), 0); assert_eq!(libc::pthread_mutex_lock(&mut mutex as *mut _), 0); - libc::pthread_mutex_lock(&mut mutex as *mut _); //~ ERROR deadlock + // FIXME: The error should be deadlock. See issue + // https://github.com/rust-lang/miri/issues/1419. + libc::pthread_mutex_lock(&mut mutex as *mut _); //~ ERROR Undefined Behavior } } diff --git a/tests/compile-fail/sync/libc_pthread_mutex_wrong_owner.rs b/tests/compile-fail/sync/libc_pthread_mutex_wrong_owner.rs index 3009721abe..e67e8d366e 100644 --- a/tests/compile-fail/sync/libc_pthread_mutex_wrong_owner.rs +++ b/tests/compile-fail/sync/libc_pthread_mutex_wrong_owner.rs @@ -24,7 +24,7 @@ fn main() { let lock_copy = lock.clone(); thread::spawn(move || { - assert_eq!(libc::pthread_mutex_unlock(lock_copy.0.get() as *mut _), 0); //~ ERROR: Undefined Behavior: called pthread_mutex_unlock on a mutex owned by another thread + assert_eq!(libc::pthread_mutex_unlock(lock_copy.0.get() as *mut _), 0); //~ ERROR: Undefined Behavior: unlocked a PTHREAD_MUTEX_DEFAULT mutex that was not locked }) .join() .unwrap(); diff --git a/tests/run-pass/concurrency/sync.rs b/tests/run-pass/concurrency/sync.rs index faf47851bd..2009c01ce9 100644 --- a/tests/run-pass/concurrency/sync.rs +++ b/tests/run-pass/concurrency/sync.rs @@ -91,7 +91,8 @@ fn check_conditional_variables_timed_wait_timeout() { let now = Instant::now(); let (_guard, timeout) = cvar.wait_timeout(guard, Duration::from_millis(100)).unwrap(); assert!(timeout.timed_out()); - assert!(now.elapsed().as_millis() >= 100); + let elapsed_time = now.elapsed().as_millis(); + assert!(100 <= elapsed_time && elapsed_time <= 300); } /// Test that signaling a conditional variable when waiting with a timeout works @@ -243,7 +244,7 @@ fn get_cached_val() -> usize { fn expensive_computation() -> usize { let mut i = 1; let mut c = 1; - while i < 10000 { + while i < 1000 { i *= c; c += 1; } @@ -257,7 +258,7 @@ fn check_once() { thread::spawn(|| { thread::yield_now(); let val = get_cached_val(); - assert_eq!(val, 40320); + assert_eq!(val, 5040); }) }) .collect(); From 6ff0af3adf6aa9d1dac07d45cd40bdc8b123d229 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Sun, 24 May 2020 20:20:28 +0200 Subject: [PATCH 14/17] Fix #1419. --- src/shims/sync.rs | 58 ++++++++++++++----- .../libc_pthread_mutex_normal_deadlock.rs | 4 +- 2 files changed, 45 insertions(+), 17 deletions(-) diff --git a/src/shims/sync.rs b/src/shims/sync.rs index 4fe3534739..ee139b0579 100644 --- a/src/shims/sync.rs +++ b/src/shims/sync.rs @@ -58,8 +58,31 @@ fn set_at_offset<'mir, 'tcx: 'mir>( // store an i32 in the first four bytes equal to the corresponding libc mutex kind constant // (e.g. PTHREAD_MUTEX_NORMAL). +/// A flag that allows to distinguish `PTHREAD_MUTEX_NORMAL` from +/// `PTHREAD_MUTEX_DEFAULT`. Since in `glibc` they have the same numeric values, +/// but different behaviour, we need a way to distinguish them. We do this by +/// setting this bit flag to the `PTHREAD_MUTEX_NORMAL` mutexes. See the comment +/// in `pthread_mutexattr_settype` function. +const PTHREAD_MUTEX_NORMAL_FLAG: i32 = 0x8000000; + const PTHREAD_MUTEXATTR_T_MIN_SIZE: u64 = 4; +fn is_mutex_kind_default<'mir, 'tcx: 'mir>( + ecx: &mut MiriEvalContext<'mir, 'tcx>, + kind: Scalar, +) -> InterpResult<'tcx, bool> { + Ok(kind == ecx.eval_libc("PTHREAD_MUTEX_DEFAULT")?) +} + +fn is_mutex_kind_normal<'mir, 'tcx: 'mir>( + ecx: &mut MiriEvalContext<'mir, 'tcx>, + kind: Scalar, +) -> InterpResult<'tcx, bool> { + let kind = kind.to_i32()?; + let mutex_normal_kind = ecx.eval_libc("PTHREAD_MUTEX_NORMAL")?.to_i32()?; + Ok(kind == (mutex_normal_kind | PTHREAD_MUTEX_NORMAL_FLAG)) +} + fn mutexattr_get_kind<'mir, 'tcx: 'mir>( ecx: &MiriEvalContext<'mir, 'tcx>, attr_op: OpTy<'tcx, Tag>, @@ -318,8 +341,20 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let this = self.eval_context_mut(); let kind = this.read_scalar(kind_op)?.not_undef()?; - if kind == this.eval_libc("PTHREAD_MUTEX_DEFAULT")? - || kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? + if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? { + // In `glibc` implementation, the numeric values of + // `PTHREAD_MUTEX_NORMAL` and `PTHREAD_MUTEX_DEFAULT` are equal, but + // they have different behaviour in some cases. Therefore, we add + // this flag to ensure that we can distinguish + // `PTHREAD_MUTEX_NORMAL` from `PTHREAD_MUTEX_DEFAULT`. + let normal_kind = kind.to_i32()? | PTHREAD_MUTEX_NORMAL_FLAG; + // Check that after setting the flag, the kind is distinguishable + // from all other kinds. + assert_ne!(normal_kind, this.eval_libc("PTHREAD_MUTEX_DEFAULT")?.to_i32()?); + assert_ne!(normal_kind, this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")?.to_i32()?); + assert_ne!(normal_kind, this.eval_libc("PTHREAD_MUTEX_RECURSIVE")?.to_i32()?); + mutexattr_set_kind(this, attr_op, Scalar::from_i32(normal_kind))?; + } else if kind == this.eval_libc("PTHREAD_MUTEX_DEFAULT")? || kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? || kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? { @@ -378,14 +413,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx Ok(0) } else { // Trying to acquire the same mutex again. - if kind == this.eval_libc("PTHREAD_MUTEX_DEFAULT")? { - // FIXME: Sometimes this is actually a Deadlock. - // https://github.com/rust-lang/miri/issues/1419 - throw_ub_format!( - "trying to acquire already locked PTHREAD_MUTEX_DEFAULT (see #1419)" - ); - } - if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? { + if is_mutex_kind_default(this, kind)? { + throw_ub_format!("trying to acquire already locked PTHREAD_MUTEX_DEFAULT"); + } else if is_mutex_kind_normal(this, kind)? { throw_machine_stop!(TerminationInfo::Deadlock); } else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? { this.eval_libc_i32("EDEADLK") @@ -417,8 +447,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx if owner_thread != active_thread { this.eval_libc_i32("EBUSY") } else { - if kind == this.eval_libc("PTHREAD_MUTEX_DEFAULT")? - || kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? + if is_mutex_kind_default(this, kind)? + || is_mutex_kind_normal(this, kind)? || kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? { this.eval_libc_i32("EBUSY") @@ -452,11 +482,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx // The mutex was locked by another thread or not locked at all. See // the “Unlock When Not Owner” column in // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_mutex_unlock.html. - if kind == this.eval_libc("PTHREAD_MUTEX_DEFAULT")? { + if is_mutex_kind_default(this, kind)? { throw_ub_format!( "unlocked a PTHREAD_MUTEX_DEFAULT mutex that was not locked by the current thread" ); - } else if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? { + } else if is_mutex_kind_normal(this, kind)? { throw_ub_format!( "unlocked a PTHREAD_MUTEX_NORMAL mutex that was not locked by the current thread" ); diff --git a/tests/compile-fail/sync/libc_pthread_mutex_normal_deadlock.rs b/tests/compile-fail/sync/libc_pthread_mutex_normal_deadlock.rs index 4af8ee5df4..96e0ff3bff 100644 --- a/tests/compile-fail/sync/libc_pthread_mutex_normal_deadlock.rs +++ b/tests/compile-fail/sync/libc_pthread_mutex_normal_deadlock.rs @@ -11,8 +11,6 @@ fn main() { let mut mutex: libc::pthread_mutex_t = std::mem::zeroed(); assert_eq!(libc::pthread_mutex_init(&mut mutex as *mut _, &mutexattr as *const _), 0); assert_eq!(libc::pthread_mutex_lock(&mut mutex as *mut _), 0); - // FIXME: The error should be deadlock. See issue - // https://github.com/rust-lang/miri/issues/1419. - libc::pthread_mutex_lock(&mut mutex as *mut _); //~ ERROR Undefined Behavior + libc::pthread_mutex_lock(&mut mutex as *mut _); //~ ERROR deadlock: the evaluated program deadlocked } } From 90590a399d326641a9132bb4a33f4645c08b73d8 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Sun, 24 May 2020 20:29:56 +0200 Subject: [PATCH 15/17] Small fixes. --- src/shims/sync.rs | 21 +++++++++++++------ src/thread.rs | 15 +++++++++---- .../sync/libc_pthread_mutex_NULL_deadlock.rs | 16 ++++++++++++++ .../libc_pthread_mutex_default_deadlock.rs | 17 +++++++++++++++ .../sync/libc_pthread_mutex_wrong_owner.rs | 2 +- 5 files changed, 60 insertions(+), 11 deletions(-) create mode 100644 tests/compile-fail/sync/libc_pthread_mutex_NULL_deadlock.rs create mode 100644 tests/compile-fail/sync/libc_pthread_mutex_default_deadlock.rs diff --git a/src/shims/sync.rs b/src/shims/sync.rs index ee139b0579..95092a042d 100644 --- a/src/shims/sync.rs +++ b/src/shims/sync.rs @@ -301,6 +301,8 @@ fn post_cond_signal<'mir, 'tcx: 'mir>( mutex: MutexId, ) -> InterpResult<'tcx> { reacquire_cond_mutex(ecx, thread, mutex)?; + // Waiting for the mutex is not included in the waiting time because we need + // to acquire the mutex always even if we get a timeout. ecx.unregister_timeout_callback_if_exists(thread) } @@ -343,10 +345,17 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let kind = this.read_scalar(kind_op)?.not_undef()?; if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? { // In `glibc` implementation, the numeric values of - // `PTHREAD_MUTEX_NORMAL` and `PTHREAD_MUTEX_DEFAULT` are equal, but - // they have different behaviour in some cases. Therefore, we add - // this flag to ensure that we can distinguish - // `PTHREAD_MUTEX_NORMAL` from `PTHREAD_MUTEX_DEFAULT`. + // `PTHREAD_MUTEX_NORMAL` and `PTHREAD_MUTEX_DEFAULT` are equal. + // However, a mutex created by explicitly passing + // `PTHREAD_MUTEX_NORMAL` type has in some cases different behaviour + // from the default mutex for which the type was not explicitly + // specified. For a more detailed discussion, please see + // https://github.com/rust-lang/miri/issues/1419. + // + // To distinguish these two cases in already constructed mutexes, we + // use the same trick as glibc: for the case when + // `pthread_mutexattr_settype` is caled explicitly, we set the + // `PTHREAD_MUTEX_NORMAL_FLAG` flag. let normal_kind = kind.to_i32()? | PTHREAD_MUTEX_NORMAL_FLAG; // Check that after setting the flag, the kind is distinguishable // from all other kinds. @@ -414,7 +423,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx } else { // Trying to acquire the same mutex again. if is_mutex_kind_default(this, kind)? { - throw_ub_format!("trying to acquire already locked PTHREAD_MUTEX_DEFAULT"); + throw_ub_format!("trying to acquire already locked default mutex"); } else if is_mutex_kind_normal(this, kind)? { throw_machine_stop!(TerminationInfo::Deadlock); } else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? { @@ -484,7 +493,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_mutex_unlock.html. if is_mutex_kind_default(this, kind)? { throw_ub_format!( - "unlocked a PTHREAD_MUTEX_DEFAULT mutex that was not locked by the current thread" + "unlocked a default mutex that was not locked by the current thread" ); } else if is_mutex_kind_normal(this, kind)? { throw_ub_format!( diff --git a/src/thread.rs b/src/thread.rs index 45b07477fa..59f08eec16 100644 --- a/src/thread.rs +++ b/src/thread.rs @@ -377,6 +377,9 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { } /// Register the given `callback` to be called once the `call_time` passes. + /// + /// The callback will be called with `thread` being the active thread, and + /// the callback may not change the active thread. fn register_timeout_callback( &mut self, thread: ThreadId, @@ -452,10 +455,8 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_cond_timedwait.html# let potential_sleep_time = self.timeout_callbacks.values().map(|info| info.call_time.get_wait_time()).min(); - if let Some(sleep_time) = potential_sleep_time { - if sleep_time == Duration::new(0, 0) { - return Ok(SchedulingAction::ExecuteTimeoutCallback); - } + if potential_sleep_time == Some(Duration::new(0, 0)) { + return Ok(SchedulingAction::ExecuteTimeoutCallback); } // No callbacks scheduled, pick a regular thread to execute. if self.threads[self.active_thread].state == ThreadState::Enabled @@ -699,6 +700,12 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let this = self.eval_context_mut(); let (thread, callback) = this.machine.threads.get_ready_callback().expect("no callback found"); + // This back-and-forth with `set_active_thread` is here because of two + // design decisions: + // 1. Make the caller and not the callback responsible for changing + // thread. + // 2. Make the scheduler the only place that can change the active + // thread. let old_thread = this.set_active_thread(thread)?; callback(this)?; this.set_active_thread(old_thread)?; diff --git a/tests/compile-fail/sync/libc_pthread_mutex_NULL_deadlock.rs b/tests/compile-fail/sync/libc_pthread_mutex_NULL_deadlock.rs new file mode 100644 index 0000000000..3a737b2e3e --- /dev/null +++ b/tests/compile-fail/sync/libc_pthread_mutex_NULL_deadlock.rs @@ -0,0 +1,16 @@ +// ignore-windows: No libc on Windows +// +// Check that if we pass NULL attribute, then we get the default mutex type. + +#![feature(rustc_private)] + +extern crate libc; + +fn main() { + unsafe { + let mut mutex: libc::pthread_mutex_t = std::mem::zeroed(); + assert_eq!(libc::pthread_mutex_init(&mut mutex as *mut _, std::ptr::null() as *const _), 0); + assert_eq!(libc::pthread_mutex_lock(&mut mutex as *mut _), 0); + libc::pthread_mutex_lock(&mut mutex as *mut _); //~ ERROR Undefined Behavior: trying to acquire already locked default mutex + } +} diff --git a/tests/compile-fail/sync/libc_pthread_mutex_default_deadlock.rs b/tests/compile-fail/sync/libc_pthread_mutex_default_deadlock.rs new file mode 100644 index 0000000000..0f6f570d70 --- /dev/null +++ b/tests/compile-fail/sync/libc_pthread_mutex_default_deadlock.rs @@ -0,0 +1,17 @@ +// ignore-windows: No libc on Windows +// +// Check that if we do not set the mutex type, it is the default. + +#![feature(rustc_private)] + +extern crate libc; + +fn main() { + unsafe { + let mutexattr: libc::pthread_mutexattr_t = std::mem::zeroed(); + let mut mutex: libc::pthread_mutex_t = std::mem::zeroed(); + assert_eq!(libc::pthread_mutex_init(&mut mutex as *mut _, &mutexattr as *const _), 0); + assert_eq!(libc::pthread_mutex_lock(&mut mutex as *mut _), 0); + libc::pthread_mutex_lock(&mut mutex as *mut _); //~ ERROR Undefined Behavior: trying to acquire already locked default mutex + } +} diff --git a/tests/compile-fail/sync/libc_pthread_mutex_wrong_owner.rs b/tests/compile-fail/sync/libc_pthread_mutex_wrong_owner.rs index e67e8d366e..d69929d4ed 100644 --- a/tests/compile-fail/sync/libc_pthread_mutex_wrong_owner.rs +++ b/tests/compile-fail/sync/libc_pthread_mutex_wrong_owner.rs @@ -24,7 +24,7 @@ fn main() { let lock_copy = lock.clone(); thread::spawn(move || { - assert_eq!(libc::pthread_mutex_unlock(lock_copy.0.get() as *mut _), 0); //~ ERROR: Undefined Behavior: unlocked a PTHREAD_MUTEX_DEFAULT mutex that was not locked + assert_eq!(libc::pthread_mutex_unlock(lock_copy.0.get() as *mut _), 0); //~ ERROR: Undefined Behavior: unlocked a default mutex that was not locked by the current thread }) .join() .unwrap(); From dec205757ae2e370e631729c96f1d8e4a0ab1936 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Mon, 25 May 2020 00:28:01 +0200 Subject: [PATCH 16/17] Fix compilation errors after rebase. --- src/shims/foreign_items/posix.rs | 34 ++++++++++++++++++++++---------- src/shims/sync.rs | 30 ++++++++++++++-------------- 2 files changed, 39 insertions(+), 25 deletions(-) diff --git a/src/shims/foreign_items/posix.rs b/src/shims/foreign_items/posix.rs index 352e38113a..db2fab526c 100644 --- a/src/shims/foreign_items/posix.rs +++ b/src/shims/foreign_items/posix.rs @@ -331,42 +331,52 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx this.write_scalar(Scalar::from_i32(result), dest)?; } "pthread_condattr_init" => { - let result = this.pthread_condattr_init(args[0])?; + let &[attr] = check_arg_count(args)?; + let result = this.pthread_condattr_init(attr)?; this.write_scalar(Scalar::from_i32(result), dest)?; } "pthread_condattr_setclock" => { - let result = this.pthread_condattr_setclock(args[0], args[1])?; + let &[attr, clock_id] = check_arg_count(args)?; + let result = this.pthread_condattr_setclock(attr, clock_id)?; this.write_scalar(Scalar::from_i32(result), dest)?; } "pthread_condattr_getclock" => { - let result = this.pthread_condattr_getclock(args[0], args[1])?; + let &[attr, clock_id] = check_arg_count(args)?; + let result = this.pthread_condattr_getclock(attr, clock_id)?; this.write_scalar(Scalar::from_i32(result), dest)?; } "pthread_condattr_destroy" => { - let result = this.pthread_condattr_destroy(args[0])?; + let &[attr] = check_arg_count(args)?; + let result = this.pthread_condattr_destroy(attr)?; this.write_scalar(Scalar::from_i32(result), dest)?; } "pthread_cond_init" => { - let result = this.pthread_cond_init(args[0], args[1])?; + let &[cond, attr] = check_arg_count(args)?; + let result = this.pthread_cond_init(cond, attr)?; this.write_scalar(Scalar::from_i32(result), dest)?; } "pthread_cond_signal" => { - let result = this.pthread_cond_signal(args[0])?; + let &[cond] = check_arg_count(args)?; + let result = this.pthread_cond_signal(cond)?; this.write_scalar(Scalar::from_i32(result), dest)?; } "pthread_cond_broadcast" => { - let result = this.pthread_cond_broadcast(args[0])?; + let &[cond] = check_arg_count(args)?; + let result = this.pthread_cond_broadcast(cond)?; this.write_scalar(Scalar::from_i32(result), dest)?; } "pthread_cond_wait" => { - let result = this.pthread_cond_wait(args[0], args[1])?; + let &[cond, mutex] = check_arg_count(args)?; + let result = this.pthread_cond_wait(cond, mutex)?; this.write_scalar(Scalar::from_i32(result), dest)?; } "pthread_cond_timedwait" => { - this.pthread_cond_timedwait(args[0], args[1], args[2], dest)?; + let &[cond, mutex, abstime] = check_arg_count(args)?; + this.pthread_cond_timedwait(cond, mutex, abstime, dest)?; } "pthread_cond_destroy" => { - let result = this.pthread_cond_destroy(args[0])?; + let &[cond] = check_arg_count(args)?; + let result = this.pthread_cond_destroy(cond)?; this.write_scalar(Scalar::from_i32(result), dest)?; } @@ -430,6 +440,10 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx | "pthread_attr_init" | "pthread_attr_destroy" + if this.frame().instance.to_string().starts_with("std::sys::unix::") => { + let &[_] = check_arg_count(args)?; + this.write_null(dest)?; + } | "pthread_attr_setstacksize" if this.frame().instance.to_string().starts_with("std::sys::unix::") => { let &[_, _] = check_arg_count(args)?; diff --git a/src/shims/sync.rs b/src/shims/sync.rs index 95092a042d..5b0de43e54 100644 --- a/src/shims/sync.rs +++ b/src/shims/sync.rs @@ -129,14 +129,14 @@ fn mutex_set_kind<'mir, 'tcx: 'mir>( fn mutex_get_id<'mir, 'tcx: 'mir>( ecx: &MiriEvalContext<'mir, 'tcx>, mutex_op: OpTy<'tcx, Tag>, -) -> InterpResult<'tcx, ScalarMaybeUndef> { +) -> InterpResult<'tcx, ScalarMaybeUninit> { get_at_offset(ecx, mutex_op, 4, ecx.machine.layouts.u32, PTHREAD_MUTEX_T_MIN_SIZE) } fn mutex_set_id<'mir, 'tcx: 'mir>( ecx: &mut MiriEvalContext<'mir, 'tcx>, mutex_op: OpTy<'tcx, Tag>, - id: impl Into>, + id: impl Into>, ) -> InterpResult<'tcx, ()> { set_at_offset(ecx, mutex_op, 4, id, ecx.machine.layouts.u32, PTHREAD_MUTEX_T_MIN_SIZE) } @@ -176,7 +176,7 @@ fn rwlock_get_id<'mir, 'tcx: 'mir>( fn rwlock_set_id<'mir, 'tcx: 'mir>( ecx: &mut MiriEvalContext<'mir, 'tcx>, rwlock_op: OpTy<'tcx, Tag>, - id: impl Into>, + id: impl Into>, ) -> InterpResult<'tcx, ()> { set_at_offset(ecx, rwlock_op, 4, id, ecx.machine.layouts.u32, PTHREAD_RWLOCK_T_MIN_SIZE) } @@ -208,14 +208,14 @@ const PTHREAD_CONDATTR_T_MIN_SIZE: u64 = 4; fn condattr_get_clock_id<'mir, 'tcx: 'mir>( ecx: &MiriEvalContext<'mir, 'tcx>, attr_op: OpTy<'tcx, Tag>, -) -> InterpResult<'tcx, ScalarMaybeUndef> { +) -> InterpResult<'tcx, ScalarMaybeUninit> { get_at_offset(ecx, attr_op, 0, ecx.machine.layouts.i32, PTHREAD_CONDATTR_T_MIN_SIZE) } fn condattr_set_clock_id<'mir, 'tcx: 'mir>( ecx: &mut MiriEvalContext<'mir, 'tcx>, attr_op: OpTy<'tcx, Tag>, - clock_id: impl Into>, + clock_id: impl Into>, ) -> InterpResult<'tcx, ()> { set_at_offset(ecx, attr_op, 0, clock_id, ecx.machine.layouts.i32, PTHREAD_CONDATTR_T_MIN_SIZE) } @@ -234,14 +234,14 @@ const PTHREAD_COND_T_MIN_SIZE: u64 = 12; fn cond_get_id<'mir, 'tcx: 'mir>( ecx: &MiriEvalContext<'mir, 'tcx>, cond_op: OpTy<'tcx, Tag>, -) -> InterpResult<'tcx, ScalarMaybeUndef> { +) -> InterpResult<'tcx, ScalarMaybeUninit> { get_at_offset(ecx, cond_op, 4, ecx.machine.layouts.u32, PTHREAD_COND_T_MIN_SIZE) } fn cond_set_id<'mir, 'tcx: 'mir>( ecx: &mut MiriEvalContext<'mir, 'tcx>, cond_op: OpTy<'tcx, Tag>, - id: impl Into>, + id: impl Into>, ) -> InterpResult<'tcx, ()> { set_at_offset(ecx, cond_op, 4, id, ecx.machine.layouts.u32, PTHREAD_COND_T_MIN_SIZE) } @@ -265,14 +265,14 @@ fn cond_get_or_create_id<'mir, 'tcx: 'mir>( fn cond_get_clock_id<'mir, 'tcx: 'mir>( ecx: &MiriEvalContext<'mir, 'tcx>, cond_op: OpTy<'tcx, Tag>, -) -> InterpResult<'tcx, ScalarMaybeUndef> { +) -> InterpResult<'tcx, ScalarMaybeUninit> { get_at_offset(ecx, cond_op, 8, ecx.machine.layouts.i32, PTHREAD_COND_T_MIN_SIZE) } fn cond_set_clock_id<'mir, 'tcx: 'mir>( ecx: &mut MiriEvalContext<'mir, 'tcx>, cond_op: OpTy<'tcx, Tag>, - clock_id: impl Into>, + clock_id: impl Into>, ) -> InterpResult<'tcx, ()> { set_at_offset(ecx, cond_op, 8, clock_id, ecx.machine.layouts.i32, PTHREAD_COND_T_MIN_SIZE) } @@ -518,8 +518,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx throw_ub_format!("destroyed a locked mutex"); } - mutex_set_kind(this, mutex_op, ScalarMaybeUndef::Undef)?; - mutex_set_id(this, mutex_op, ScalarMaybeUndef::Undef)?; + mutex_set_kind(this, mutex_op, ScalarMaybeUninit::Uninit)?; + mutex_set_id(this, mutex_op, ScalarMaybeUninit::Uninit)?; Ok(0) } @@ -643,7 +643,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx throw_ub_format!("destroyed a locked rwlock"); } - rwlock_set_id(this, rwlock_op, ScalarMaybeUndef::Undef)?; + rwlock_set_id(this, rwlock_op, ScalarMaybeUninit::Uninit)?; Ok(0) } @@ -696,7 +696,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_condattr_destroy(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - condattr_set_clock_id(this, attr_op, ScalarMaybeUndef::Undef)?; + condattr_set_clock_id(this, attr_op, ScalarMaybeUninit::Uninit)?; Ok(0) } @@ -835,8 +835,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx if this.condvar_is_awaited(id) { throw_ub_format!("destroyed an awaited conditional variable"); } - cond_set_id(this, cond_op, ScalarMaybeUndef::Undef)?; - cond_set_clock_id(this, cond_op, ScalarMaybeUndef::Undef)?; + cond_set_id(this, cond_op, ScalarMaybeUninit::Uninit)?; + cond_set_clock_id(this, cond_op, ScalarMaybeUninit::Uninit)?; Ok(0) } From 34ddd775e8352c4905b56f183421902cdda9d100 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Mon, 25 May 2020 08:07:07 +0200 Subject: [PATCH 17/17] Increase the elapsed time window. --- tests/run-pass/concurrency/libc_pthread_cond.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/run-pass/concurrency/libc_pthread_cond.rs b/tests/run-pass/concurrency/libc_pthread_cond.rs index 39b6a7e4ef..27f5ead450 100644 --- a/tests/run-pass/concurrency/libc_pthread_cond.rs +++ b/tests/run-pass/concurrency/libc_pthread_cond.rs @@ -34,7 +34,7 @@ fn test_timed_wait_timeout(clock_id: i32) { libc::ETIMEDOUT ); let elapsed_time = current_time.elapsed().as_millis(); - assert!(900 <= elapsed_time && elapsed_time <= 1100); + assert!(900 <= elapsed_time && elapsed_time <= 1300); assert_eq!(libc::pthread_mutex_unlock(&mut mutex as *mut _), 0); assert_eq!(libc::pthread_mutex_destroy(&mut mutex as *mut _), 0); assert_eq!(libc::pthread_cond_destroy(&mut cond as *mut _), 0);