From c9064272a5051772f1a486b70868ea2b579de597 Mon Sep 17 00:00:00 2001 From: DrMeepster <19316085+DrMeepster@users.noreply.github.com> Date: Mon, 17 Oct 2022 22:50:30 -0700 Subject: [PATCH 1/8] add windows one time initialization --- src/concurrency/sync.rs | 198 ++++++++++++++++++++++++++++- src/concurrency/thread.rs | 8 +- src/lib.rs | 2 +- src/shims/windows/foreign_items.rs | 12 ++ src/shims/windows/sync.rs | 180 ++++++++++++++++++++++---- 5 files changed, 368 insertions(+), 32 deletions(-) diff --git a/src/concurrency/sync.rs b/src/concurrency/sync.rs index 464f452ca7..1892095104 100644 --- a/src/concurrency/sync.rs +++ b/src/concurrency/sync.rs @@ -7,6 +7,7 @@ use log::trace; use rustc_data_structures::fx::FxHashMap; use rustc_index::vec::{Idx, IndexVec}; +use super::thread::MachineCallback; use super::vector_clock::VClock; use crate::*; @@ -149,13 +150,68 @@ struct FutexWaiter { bitset: u32, } +declare_id!(InitOnceId); + +struct InitOnceWaiter<'mir, 'tcx> { + thread: ThreadId, + callback: Box + 'tcx>, +} + +impl<'mir, 'tcx> std::fmt::Debug for InitOnceWaiter<'mir, 'tcx> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("InitOnce") + .field("thread", &self.thread) + .field("callback", &"dyn MachineCallback") + .finish() + } +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +/// The current status of a one time initialization. +pub enum InitOnceStatus { + Uninitialized, + Begun, + Complete, +} + +impl Default for InitOnceStatus { + fn default() -> Self { + Self::Uninitialized + } +} + +/// The one time initialization state. +#[derive(Default, Debug)] +struct InitOnce<'mir, 'tcx> { + status: InitOnceStatus, + waiters: VecDeque>, + data_race: VClock, +} + +impl<'mir, 'tcx> VisitTags for InitOnce<'mir, 'tcx> { + fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) { + for waiter in self.waiters.iter() { + waiter.callback.visit_tags(visit); + } + } +} + /// The state of all synchronization variables. #[derive(Default, Debug)] -pub(crate) struct SynchronizationState { +pub(crate) struct SynchronizationState<'mir, 'tcx> { mutexes: IndexVec, rwlocks: IndexVec, condvars: IndexVec, futexes: FxHashMap, + init_onces: IndexVec>, +} + +impl<'mir, 'tcx> VisitTags for SynchronizationState<'mir, 'tcx> { + fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) { + for init_once in self.init_onces.iter() { + init_once.visit_tags(visit); + } + } } // Private extension trait for local helper methods @@ -581,4 +637,144 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { futex.waiters.retain(|waiter| waiter.thread != thread); } } + + #[inline] + /// Create state for a new one time initialization. + fn init_once_create(&mut self) -> InitOnceId { + let this = self.eval_context_mut(); + this.machine.threads.sync.init_onces.push(Default::default()) + } + + #[inline] + /// Provides the closure with the next InitOnceId. Creates that InitOnce if the closure returns None, + /// otherwise returns the value from the closure + fn init_once_get_or_create(&mut self, existing: F) -> InterpResult<'tcx, InitOnceId> + where + F: FnOnce( + &mut MiriInterpCx<'mir, 'tcx>, + InitOnceId, + ) -> InterpResult<'tcx, Option>, + { + let this = self.eval_context_mut(); + let next_index = this.machine.threads.sync.init_onces.next_index(); + if let Some(old) = existing(this, next_index)? { + Ok(old) + } else { + let new_index = this.machine.threads.sync.init_onces.push(Default::default()); + assert_eq!(next_index, new_index); + Ok(new_index) + } + } + + #[inline] + fn init_once_status(&mut self, id: InitOnceId) -> InitOnceStatus { + let this = self.eval_context_ref(); + this.machine.threads.sync.init_onces[id].status + } + + #[inline] + /// Put the thread into the queue waiting for the initialization. + fn init_once_enqueue_and_block( + &mut self, + id: InitOnceId, + thread: ThreadId, + callback: Box + 'tcx>, + ) { + let this = self.eval_context_mut(); + let init_once = &mut this.machine.threads.sync.init_onces[id]; + assert_ne!(init_once.status, InitOnceStatus::Complete, "queueing on complete init once"); + init_once.waiters.push_back(InitOnceWaiter { thread, callback }); + this.block_thread(thread); + } + + #[inline] + fn init_once_begin(&mut self, id: InitOnceId) { + let this = self.eval_context_mut(); + let init_once = &mut this.machine.threads.sync.init_onces[id]; + assert_eq!( + init_once.status, + InitOnceStatus::Uninitialized, + "begining already begun or complete init once" + ); + init_once.status = InitOnceStatus::Begun; + } + + #[inline] + fn init_once_complete(&mut self, id: InitOnceId) -> InterpResult<'tcx> { + let this = self.eval_context_mut(); + let current_thread = this.get_active_thread(); + let init_once = &mut this.machine.threads.sync.init_onces[id]; + + assert_eq!( + init_once.status, + InitOnceStatus::Begun, + "completing already complete or uninit init once" + ); + + init_once.status = InitOnceStatus::Complete; + + // Each complete happens-before the end of the wait + if let Some(data_race) = &this.machine.data_race { + data_race.validate_lock_release(&mut init_once.data_race, current_thread); + } + + // need to take the queue to avoid having `this` be borrowed multiple times + for waiter in std::mem::take(&mut init_once.waiters) { + this.unblock_thread(waiter.thread); + + this.set_active_thread(waiter.thread); + waiter.callback.call(this)?; + this.set_active_thread(current_thread); + + if let Some(data_race) = &this.machine.data_race { + data_race.validate_lock_acquire( + &this.machine.threads.sync.init_onces[id].data_race, + waiter.thread, + ); + } + } + + Ok(()) + } + + #[inline] + fn init_once_fail(&mut self, id: InitOnceId) -> InterpResult<'tcx> { + let this = self.eval_context_mut(); + let current_thread = this.get_active_thread(); + let init_once = &mut this.machine.threads.sync.init_onces[id]; + assert_eq!( + init_once.status, + InitOnceStatus::Begun, + "failing already completed or uninit init once" + ); + + // Each complete happens-before the end of the wait + if let Some(data_race) = &this.machine.data_race { + data_race.validate_lock_release(&mut init_once.data_race, current_thread); + } + + // the behavior of failing the initialization is left vague by the docs + // it had to be determined experimentally + if let Some(waiter) = init_once.waiters.pop_front() { + // try initializing again on a different thread + init_once.status = InitOnceStatus::Begun; + + this.unblock_thread(waiter.thread); + + this.set_active_thread(waiter.thread); + waiter.callback.call(this)?; + this.set_active_thread(current_thread); + + if let Some(data_race) = &this.machine.data_race { + data_race.validate_lock_acquire( + &this.machine.threads.sync.init_onces[id].data_race, + waiter.thread, + ); + } + } else { + init_once.status = InitOnceStatus::Uninitialized; + } + + Ok(()) + } } diff --git a/src/concurrency/thread.rs b/src/concurrency/thread.rs index ec1da4138d..3432f10f7a 100644 --- a/src/concurrency/thread.rs +++ b/src/concurrency/thread.rs @@ -30,8 +30,7 @@ pub enum SchedulingAction { Stop, } -/// Timeout callbacks can be created by synchronization primitives to tell the -/// scheduler that they should be called once some period of time passes. +/// Trait for callbacks that can be executed when some event happens, such as after a timeout. pub trait MachineCallback<'mir, 'tcx>: VisitTags { fn call(&self, ecx: &mut InterpCx<'mir, 'tcx, MiriMachine<'mir, 'tcx>>) -> InterpResult<'tcx>; } @@ -269,7 +268,7 @@ pub struct ThreadManager<'mir, 'tcx> { threads: IndexVec>, /// This field is pub(crate) because the synchronization primitives /// (`crate::sync`) need a way to access it. - pub(crate) sync: SynchronizationState, + pub(crate) sync: SynchronizationState<'mir, 'tcx>, /// A mapping from a thread-local static to an allocation id of a thread /// specific allocation. thread_local_alloc_ids: RefCell>>, @@ -303,7 +302,7 @@ impl VisitTags for ThreadManager<'_, '_> { timeout_callbacks, active_thread: _, yield_active_thread: _, - sync: _, + sync, } = self; for thread in threads { @@ -315,6 +314,7 @@ impl VisitTags for ThreadManager<'_, '_> { for callback in timeout_callbacks.values() { callback.callback.visit_tags(visit); } + sync.visit_tags(visit); } } diff --git a/src/lib.rs b/src/lib.rs index 8de0b0413a..f7c22b76f4 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -87,7 +87,7 @@ pub use crate::concurrency::{ AtomicFenceOrd, AtomicReadOrd, AtomicRwOrd, AtomicWriteOrd, EvalContextExt as DataRaceEvalContextExt, }, - sync::{CondvarId, EvalContextExt as SyncEvalContextExt, MutexId, RwLockId}, + sync::{CondvarId, EvalContextExt as SyncEvalContextExt, InitOnceId, MutexId, RwLockId}, thread::{ EvalContextExt as ThreadsEvalContextExt, SchedulingAction, ThreadId, ThreadManager, ThreadState, Time, diff --git a/src/shims/windows/foreign_items.rs b/src/shims/windows/foreign_items.rs index b0670358f9..d998bdf420 100644 --- a/src/shims/windows/foreign_items.rs +++ b/src/shims/windows/foreign_items.rs @@ -261,6 +261,18 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { let ret = this.TryAcquireSRWLockShared(ptr)?; this.write_scalar(ret, dest)?; } + "InitOnceBeginInitialize" => { + let [ptr, flags, pending, context] = + this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?; + let result = this.InitOnceBeginInitialize(ptr, flags, pending, context)?; + this.write_scalar(result, dest)?; + } + "InitOnceComplete" => { + let [ptr, flags, context] = + this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?; + let result = this.InitOnceComplete(ptr, flags, context)?; + this.write_scalar(result, dest)?; + } // Dynamic symbol loading "GetProcAddress" => { diff --git a/src/shims/windows/sync.rs b/src/shims/windows/sync.rs index 88b117c54b..feed90fad1 100644 --- a/src/shims/windows/sync.rs +++ b/src/shims/windows/sync.rs @@ -1,20 +1,24 @@ +use crate::concurrency::sync::InitOnceStatus; +use crate::concurrency::thread::MachineCallback; use crate::*; -// Locks are pointer-sized pieces of data, initialized to 0. -// We use the first 4 bytes to store the RwLockId. - -fn srwlock_get_or_create_id<'mir, 'tcx: 'mir>( - ecx: &mut MiriInterpCx<'mir, 'tcx>, - lock_op: &OpTy<'tcx, Provenance>, -) -> InterpResult<'tcx, RwLockId> { - let value_place = ecx.deref_operand_and_offset(lock_op, 0, ecx.machine.layouts.u32)?; +impl<'mir, 'tcx> EvalContextExtPriv<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {} +trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { + // These synchronization structures are pointer-sized pieces of data, initialized to 0. + // We use the first 4 bytes to store the id. + fn get_or_create_id( + &mut self, + next_id: Scalar, + lock_op: &OpTy<'tcx, Provenance>, + ) -> InterpResult<'tcx, Option> { + let this = self.eval_context_mut(); + let value_place = this.deref_operand_and_offset(lock_op, 0, this.machine.layouts.u32)?; - ecx.rwlock_get_or_create(|ecx, next_id| { - let (old, success) = ecx + let (old, success) = this .atomic_compare_exchange_scalar( &value_place, - &ImmTy::from_uint(0u32, ecx.machine.layouts.u32), - next_id.to_u32_scalar(), + &ImmTy::from_uint(0u32, this.machine.layouts.u32), + next_id, AtomicRwOrd::Relaxed, AtomicReadOrd::Relaxed, false, @@ -25,17 +29,38 @@ fn srwlock_get_or_create_id<'mir, 'tcx: 'mir>( // Caller of the closure needs to allocate next_id None } else { - Some(RwLockId::from_u32(old.to_u32().expect("layout is u32"))) + Some(old.to_u32().expect("layout is u32")) + }) + } + + fn srwlock_get_or_create_id( + &mut self, + lock_op: &OpTy<'tcx, Provenance>, + ) -> InterpResult<'tcx, RwLockId> { + let this = self.eval_context_mut(); + this.rwlock_get_or_create(|ecx, next_id| { + Ok(ecx.get_or_create_id(next_id.to_u32_scalar(), lock_op)?.map(RwLockId::from_u32)) + }) + } + + fn init_once_get_or_create_id( + &mut self, + lock_op: &OpTy<'tcx, Provenance>, + ) -> InterpResult<'tcx, InitOnceId> { + let this = self.eval_context_mut(); + this.init_once_get_or_create(|ecx, next_id| { + Ok(ecx.get_or_create_id(next_id.to_u32_scalar(), lock_op)?.map(InitOnceId::from_u32)) }) - }) + } } impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {} + +#[allow(non_snake_case)] pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { - #[allow(non_snake_case)] fn AcquireSRWLockExclusive(&mut self, lock_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx> { let this = self.eval_context_mut(); - let id = srwlock_get_or_create_id(this, lock_op)?; + let id = this.srwlock_get_or_create_id(lock_op)?; let active_thread = this.get_active_thread(); if this.rwlock_is_locked(id) { @@ -54,13 +79,12 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { Ok(()) } - #[allow(non_snake_case)] fn TryAcquireSRWLockExclusive( &mut self, lock_op: &OpTy<'tcx, Provenance>, ) -> InterpResult<'tcx, Scalar> { let this = self.eval_context_mut(); - let id = srwlock_get_or_create_id(this, lock_op)?; + let id = this.srwlock_get_or_create_id(lock_op)?; let active_thread = this.get_active_thread(); if this.rwlock_is_locked(id) { @@ -72,10 +96,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { } } - #[allow(non_snake_case)] fn ReleaseSRWLockExclusive(&mut self, lock_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx> { let this = self.eval_context_mut(); - let id = srwlock_get_or_create_id(this, lock_op)?; + let id = this.srwlock_get_or_create_id(lock_op)?; let active_thread = this.get_active_thread(); if !this.rwlock_writer_unlock(id, active_thread) { @@ -88,10 +111,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { Ok(()) } - #[allow(non_snake_case)] fn AcquireSRWLockShared(&mut self, lock_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx> { let this = self.eval_context_mut(); - let id = srwlock_get_or_create_id(this, lock_op)?; + let id = this.srwlock_get_or_create_id(lock_op)?; let active_thread = this.get_active_thread(); if this.rwlock_is_write_locked(id) { @@ -103,13 +125,12 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { Ok(()) } - #[allow(non_snake_case)] fn TryAcquireSRWLockShared( &mut self, lock_op: &OpTy<'tcx, Provenance>, ) -> InterpResult<'tcx, Scalar> { let this = self.eval_context_mut(); - let id = srwlock_get_or_create_id(this, lock_op)?; + let id = this.srwlock_get_or_create_id(lock_op)?; let active_thread = this.get_active_thread(); if this.rwlock_is_write_locked(id) { @@ -120,10 +141,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { } } - #[allow(non_snake_case)] fn ReleaseSRWLockShared(&mut self, lock_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx> { let this = self.eval_context_mut(); - let id = srwlock_get_or_create_id(this, lock_op)?; + let id = this.srwlock_get_or_create_id(lock_op)?; let active_thread = this.get_active_thread(); if !this.rwlock_reader_unlock(id, active_thread) { @@ -135,4 +155,112 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { Ok(()) } + + fn InitOnceBeginInitialize( + &mut self, + init_once_op: &OpTy<'tcx, Provenance>, + flags_op: &OpTy<'tcx, Provenance>, + pending_op: &OpTy<'tcx, Provenance>, + context_op: &OpTy<'tcx, Provenance>, + ) -> InterpResult<'tcx, Scalar> { + let this = self.eval_context_mut(); + let active_thread = this.get_active_thread(); + + let id = this.init_once_get_or_create_id(init_once_op)?; + let flags = this.read_scalar(flags_op)?.to_u32()?; + let pending_place = this.deref_operand(pending_op)?.into(); + let context = this.read_pointer(context_op)?; + + if flags != 0 { + throw_unsup_format!("unsupported `dwFlags` {flags} in `InitOnceBeginInitialize`"); + } + + if !this.ptr_is_null(context)? { + throw_unsup_format!("non-null `lpContext` in `InitOnceBeginInitialize`"); + } + + struct Callback<'tcx> { + init_once_id: InitOnceId, + pending_place: PlaceTy<'tcx, Provenance>, + } + + impl<'tcx> VisitTags for Callback<'tcx> { + fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) { + let Callback { init_once_id: _, pending_place } = self; + pending_place.visit_tags(visit); + } + } + + impl<'mir, 'tcx> MachineCallback<'mir, 'tcx> for Callback<'tcx> { + fn call(&self, this: &mut MiriInterpCx<'mir, 'tcx>) -> InterpResult<'tcx> { + let pending = match this.init_once_status(self.init_once_id) { + InitOnceStatus::Uninitialized => + unreachable!("status should have either been set to begun or complete"), + InitOnceStatus::Begun => this.eval_windows("c", "TRUE")?, + InitOnceStatus::Complete => this.eval_windows("c", "FALSE")?, + }; + + this.write_scalar(pending, &self.pending_place)?; + + Ok(()) + } + } + + match this.init_once_status(id) { + InitOnceStatus::Uninitialized => { + this.init_once_begin(id); + this.write_scalar(this.eval_windows("c", "TRUE")?, &pending_place)?; + } + InitOnceStatus::Begun => + this.init_once_enqueue_and_block( + id, + active_thread, + Box::new(Callback { init_once_id: id, pending_place }), + ), + InitOnceStatus::Complete => + this.write_scalar(this.eval_windows("c", "FALSE")?, &pending_place)?, + } + + Ok(Scalar::from_i32(1)) + } + + fn InitOnceComplete( + &mut self, + init_once_op: &OpTy<'tcx, Provenance>, + flags_op: &OpTy<'tcx, Provenance>, + context_op: &OpTy<'tcx, Provenance>, + ) -> InterpResult<'tcx, Scalar> { + let this = self.eval_context_mut(); + + let id = this.init_once_get_or_create_id(init_once_op)?; + let flags = this.read_scalar(flags_op)?.to_u32()?; + let context = this.read_pointer(context_op)?; + + let success = if flags == 0 { + true + } else if flags == this.eval_windows("c", "INIT_ONCE_INIT_FAILED")?.to_u32()? { + false + } else { + throw_unsup_format!("unsupported `dwFlags` {flags} in `InitOnceBeginInitialize`"); + }; + + if !this.ptr_is_null(context)? { + throw_unsup_format!("non-null `lpContext` in `InitOnceBeginInitialize`"); + } + + if this.init_once_status(id) != InitOnceStatus::Begun { + // The docs do not say anything about this case, but it seems better to not allow it. + throw_ub_format!( + "calling InitOnceComplete on a one time initialization that has not begun or is already completed" + ); + } + + if success { + this.init_once_complete(id)?; + } else { + this.init_once_fail(id)?; + } + + Ok(Scalar::from_i32(1)) + } } From 2c012b7e1d5c529150bd8d748ffb245e599ed428 Mon Sep 17 00:00:00 2001 From: DrMeepster <19316085+DrMeepster@users.noreply.github.com> Date: Tue, 18 Oct 2022 00:48:40 -0700 Subject: [PATCH 2/8] add test for init once --- tests/pass/concurrency/windows_init_once.rs | 138 ++++++++++++++++++ .../pass/concurrency/windows_init_once.stdout | 6 + 2 files changed, 144 insertions(+) create mode 100644 tests/pass/concurrency/windows_init_once.rs create mode 100644 tests/pass/concurrency/windows_init_once.stdout diff --git a/tests/pass/concurrency/windows_init_once.rs b/tests/pass/concurrency/windows_init_once.rs new file mode 100644 index 0000000000..d3c72c3d02 --- /dev/null +++ b/tests/pass/concurrency/windows_init_once.rs @@ -0,0 +1,138 @@ +//@only-target-windows: Uses win32 api functions +// We are making scheduler assumptions here. +//@compile-flags: -Zmiri-preemption-rate=0 + +use std::ffi::c_void; +use std::ptr::null_mut; +use std::thread; + +#[derive(Copy, Clone)] +struct SendPtr(*mut T); + +unsafe impl Send for SendPtr {} + +extern "system" { + fn InitOnceBeginInitialize( + init: *mut *mut c_void, + flags: u32, + pending: *mut i32, + context: *mut c_void, + ) -> i32; + + fn InitOnceComplete(init: *mut *mut c_void, flags: u32, context: *mut c_void) -> i32; +} + +const TRUE: i32 = 1; +const FALSE: i32 = 0; + +const INIT_ONCE_INIT_FAILED: u32 = 4; + +fn single_thread() { + let mut init_once = null_mut(); + let mut pending = 0; + + unsafe { + assert_eq!(InitOnceBeginInitialize(&mut init_once, 0, &mut pending, null_mut()), TRUE); + assert_eq!(pending, TRUE); + + assert_eq!(InitOnceComplete(&mut init_once, 0, null_mut()), TRUE); + + assert_eq!(InitOnceBeginInitialize(&mut init_once, 0, &mut pending, null_mut()), TRUE); + assert_eq!(pending, FALSE); + } + + let mut init_once = null_mut(); + + unsafe { + assert_eq!(InitOnceBeginInitialize(&mut init_once, 0, &mut pending, null_mut()), TRUE); + assert_eq!(pending, TRUE); + + assert_eq!(InitOnceComplete(&mut init_once, INIT_ONCE_INIT_FAILED, null_mut()), TRUE); + + assert_eq!(InitOnceBeginInitialize(&mut init_once, 0, &mut pending, null_mut()), TRUE); + assert_eq!(pending, TRUE); + } +} + +fn block_until_complete() { + let mut init_once = null_mut(); + let mut pending = 0; + + unsafe { + assert_eq!(InitOnceBeginInitialize(&mut init_once, 0, &mut pending, null_mut()), TRUE); + assert_eq!(pending, TRUE); + } + + let init_once_ptr = SendPtr(&mut init_once); + + let waiter = move || unsafe { + let mut pending = 0; + + assert_eq!(InitOnceBeginInitialize(init_once_ptr.0, 0, &mut pending, null_mut()), TRUE); + assert_eq!(pending, FALSE); + + println!("finished waiting for initialization"); + }; + + let waiter1 = thread::spawn(waiter); + let waiter2 = thread::spawn(waiter); + + // this yield ensures `waiter1` & `waiter2` are blocked on the main thread + thread::yield_now(); + + println!("completing initialization"); + + unsafe { + assert_eq!(InitOnceComplete(init_once_ptr.0, 0, null_mut()), TRUE); + } + + waiter1.join().unwrap(); + waiter2.join().unwrap(); +} + +fn retry_on_fail() { + let mut init_once = null_mut(); + let mut pending = 0; + + unsafe { + assert_eq!(InitOnceBeginInitialize(&mut init_once, 0, &mut pending, null_mut()), TRUE); + assert_eq!(pending, TRUE); + } + + let init_once_ptr = SendPtr(&mut init_once); + + let waiter = move || unsafe { + let mut pending = 0; + + assert_eq!(InitOnceBeginInitialize(init_once_ptr.0, 0, &mut pending, null_mut()), TRUE); + + if pending == 1 { + println!("retrying initialization"); + + assert_eq!(InitOnceComplete(init_once_ptr.0, 0, null_mut()), TRUE); + } else { + println!("finished waiting for initialization"); + } + }; + + let waiter1 = thread::spawn(waiter); + let waiter2 = thread::spawn(waiter); + + // this yield ensures `waiter1` & `waiter2` are blocked on the main thread + thread::yield_now(); + + println!("failing initialization"); + + unsafe { + assert_eq!(InitOnceComplete(init_once_ptr.0, INIT_ONCE_INIT_FAILED, null_mut()), TRUE); + } + + waiter1.join().unwrap(); + waiter2.join().unwrap(); +} + +fn main() { + single_thread(); + block_until_complete(); + retry_on_fail(); +} diff --git a/tests/pass/concurrency/windows_init_once.stdout b/tests/pass/concurrency/windows_init_once.stdout new file mode 100644 index 0000000000..f3d5aad8ed --- /dev/null +++ b/tests/pass/concurrency/windows_init_once.stdout @@ -0,0 +1,6 @@ +completing initialization +finished waiting for initialization +finished waiting for initialization +failing initialization +retrying initialization +finished waiting for initialization From 9327dffcec44d64e3c5ba308fbec5e7847e1d469 Mon Sep 17 00:00:00 2001 From: DrMeepster <19316085+DrMeepster@users.noreply.github.com> Date: Tue, 18 Oct 2022 16:37:30 -0700 Subject: [PATCH 3/8] update rust version --- rust-version | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rust-version b/rust-version index 21e9d5a05d..f94611f0d4 100644 --- a/rust-version +++ b/rust-version @@ -1 +1 @@ -538f118da1409759ba198acc0ff62070bc6d2dce +a24a020e6d926dffe6b472fc647978f92269504e From 0532f1f2d5a8fbe6b52da48a92e128bbdd9028a0 Mon Sep 17 00:00:00 2001 From: DrMeepster <19316085+DrMeepster@users.noreply.github.com> Date: Tue, 18 Oct 2022 17:23:17 -0700 Subject: [PATCH 4/8] use Default derive for InitOnceStatus --- src/concurrency/sync.rs | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/src/concurrency/sync.rs b/src/concurrency/sync.rs index 1892095104..f514c30be4 100644 --- a/src/concurrency/sync.rs +++ b/src/concurrency/sync.rs @@ -166,20 +166,15 @@ impl<'mir, 'tcx> std::fmt::Debug for InitOnceWaiter<'mir, 'tcx> { } } -#[derive(Debug, Copy, Clone, PartialEq, Eq)] +#[derive(Default, Debug, Copy, Clone, PartialEq, Eq,)] /// The current status of a one time initialization. pub enum InitOnceStatus { + #[default] Uninitialized, Begun, Complete, } -impl Default for InitOnceStatus { - fn default() -> Self { - Self::Uninitialized - } -} - /// The one time initialization state. #[derive(Default, Debug)] struct InitOnce<'mir, 'tcx> { From a6460dfa50a513dbef27f7fafa3149ff6b6dbab0 Mon Sep 17 00:00:00 2001 From: DrMeepster <19316085+DrMeepster@users.noreply.github.com> Date: Tue, 18 Oct 2022 18:24:16 -0700 Subject: [PATCH 5/8] code reuse for sync ids --- src/concurrency/sync.rs | 87 +++++++++++++++++++++++++++- src/lib.rs | 2 +- src/shims/unix/sync.rs | 115 ++++++-------------------------------- src/shims/windows/sync.rs | 68 +++------------------- 4 files changed, 110 insertions(+), 162 deletions(-) diff --git a/src/concurrency/sync.rs b/src/concurrency/sync.rs index f514c30be4..2ec852b413 100644 --- a/src/concurrency/sync.rs +++ b/src/concurrency/sync.rs @@ -11,6 +11,11 @@ use super::thread::MachineCallback; use super::vector_clock::VClock; use crate::*; +pub trait SyncId { + fn from_u32(id: u32) -> Self; + fn to_u32_scalar(&self) -> Scalar; +} + /// We cannot use the `newtype_index!` macro because we have to use 0 as a /// sentinel value meaning that the identifier is not assigned. This is because /// the pthreads static initializers initialize memory with zeros (see the @@ -22,11 +27,14 @@ macro_rules! declare_id { #[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq, Hash)] pub struct $name(NonZeroU32); - impl $name { + impl SyncId for $name { // Panics if `id == 0`. - pub fn from_u32(id: u32) -> Self { + fn from_u32(id: u32) -> Self { Self(NonZeroU32::new(id).unwrap()) } + fn to_u32_scalar(&self) -> Scalar { + Scalar::from_u32(self.0.get()) + } } impl Idx for $name { @@ -166,7 +174,7 @@ impl<'mir, 'tcx> std::fmt::Debug for InitOnceWaiter<'mir, 'tcx> { } } -#[derive(Default, Debug, Copy, Clone, PartialEq, Eq,)] +#[derive(Default, Debug, Copy, Clone, PartialEq, Eq)] /// The current status of a one time initialization. pub enum InitOnceStatus { #[default] @@ -212,6 +220,37 @@ impl<'mir, 'tcx> VisitTags for SynchronizationState<'mir, 'tcx> { // Private extension trait for local helper methods impl<'mir, 'tcx: 'mir> EvalContextExtPriv<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {} trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { + #[inline] + // Miri sync structures contain zero-initialized ids stored at some offset behind a pointer + fn get_or_create_id( + &mut self, + next_id: Id, + lock_op: &OpTy<'tcx, Provenance>, + offset: u64, + ) -> InterpResult<'tcx, Option> { + let this = self.eval_context_mut(); + let value_place = + this.deref_operand_and_offset(lock_op, offset, this.machine.layouts.u32)?; + + let (old, success) = this + .atomic_compare_exchange_scalar( + &value_place, + &ImmTy::from_uint(0u32, this.machine.layouts.u32), + next_id.to_u32_scalar(), + AtomicRwOrd::Relaxed, + AtomicReadOrd::Relaxed, + false, + )? + .to_scalar_pair(); + + Ok(if success.to_bool().expect("compare_exchange's second return value is a bool") { + // Caller of the closure needs to allocate next_id + None + } else { + Some(Id::from_u32(old.to_u32().expect("layout is u32"))) + }) + } + /// Take a reader out of the queue waiting for the lock. /// Returns `true` if some thread got the rwlock. #[inline] @@ -261,6 +300,48 @@ trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { // situations. impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {} pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { + fn mutex_get_or_create_id( + &mut self, + lock_op: &OpTy<'tcx, Provenance>, + offset: u64, + ) -> InterpResult<'tcx, MutexId> { + let this = self.eval_context_mut(); + this.mutex_get_or_create(|ecx, next_id| Ok(ecx.get_or_create_id(next_id, lock_op, offset)?)) + } + + fn rwlock_get_or_create_id( + &mut self, + lock_op: &OpTy<'tcx, Provenance>, + offset: u64, + ) -> InterpResult<'tcx, RwLockId> { + let this = self.eval_context_mut(); + this.rwlock_get_or_create( + |ecx, next_id| Ok(ecx.get_or_create_id(next_id, lock_op, offset)?), + ) + } + + fn condvar_get_or_create_id( + &mut self, + lock_op: &OpTy<'tcx, Provenance>, + offset: u64, + ) -> InterpResult<'tcx, CondvarId> { + let this = self.eval_context_mut(); + this.condvar_get_or_create(|ecx, next_id| { + Ok(ecx.get_or_create_id(next_id, lock_op, offset)?) + }) + } + + fn init_once_get_or_create_id( + &mut self, + lock_op: &OpTy<'tcx, Provenance>, + offset: u64, + ) -> InterpResult<'tcx, InitOnceId> { + let this = self.eval_context_mut(); + this.init_once_get_or_create(|ecx, next_id| { + Ok(ecx.get_or_create_id(next_id, lock_op, offset)?) + }) + } + #[inline] /// Create state for a new mutex. fn mutex_create(&mut self) -> MutexId { diff --git a/src/lib.rs b/src/lib.rs index f7c22b76f4..5c4094378a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -87,7 +87,7 @@ pub use crate::concurrency::{ AtomicFenceOrd, AtomicReadOrd, AtomicRwOrd, AtomicWriteOrd, EvalContextExt as DataRaceEvalContextExt, }, - sync::{CondvarId, EvalContextExt as SyncEvalContextExt, InitOnceId, MutexId, RwLockId}, + sync::{CondvarId, EvalContextExt as SyncEvalContextExt, InitOnceId, MutexId, RwLockId, SyncId}, thread::{ EvalContextExt as ThreadsEvalContextExt, SchedulingAction, ThreadId, ThreadManager, ThreadState, Time, diff --git a/src/shims/unix/sync.rs b/src/shims/unix/sync.rs index 5aafe76ade..7857fa4bcc 100644 --- a/src/shims/unix/sync.rs +++ b/src/shims/unix/sync.rs @@ -108,33 +108,6 @@ fn mutex_set_id<'mir, 'tcx: 'mir>( ) } -fn mutex_get_or_create_id<'mir, 'tcx: 'mir>( - ecx: &mut MiriInterpCx<'mir, 'tcx>, - mutex_op: &OpTy<'tcx, Provenance>, -) -> InterpResult<'tcx, MutexId> { - let value_place = ecx.deref_operand_and_offset(mutex_op, 4, ecx.machine.layouts.u32)?; - - ecx.mutex_get_or_create(|ecx, next_id| { - let (old, success) = ecx - .atomic_compare_exchange_scalar( - &value_place, - &ImmTy::from_uint(0u32, ecx.machine.layouts.u32), - next_id.to_u32_scalar(), - AtomicRwOrd::Relaxed, - AtomicReadOrd::Relaxed, - false, - )? - .to_scalar_pair(); - - Ok(if success.to_bool().expect("compare_exchange's second return value is a bool") { - // Caller of the closure needs to allocate next_id - None - } else { - Some(MutexId::from_u32(old.to_u32().expect("layout is u32"))) - }) - }) -} - // pthread_rwlock_t is between 32 and 56 bytes, depending on the platform. // Our chosen memory layout for the emulated rwlock (does not have to match the platform layout!): @@ -149,33 +122,6 @@ fn rwlock_get_id<'mir, 'tcx: 'mir>( ecx.read_scalar_at_offset_atomic(rwlock_op, 4, ecx.machine.layouts.u32, AtomicReadOrd::Relaxed) } -fn rwlock_get_or_create_id<'mir, 'tcx: 'mir>( - ecx: &mut MiriInterpCx<'mir, 'tcx>, - rwlock_op: &OpTy<'tcx, Provenance>, -) -> InterpResult<'tcx, RwLockId> { - let value_place = ecx.deref_operand_and_offset(rwlock_op, 4, ecx.machine.layouts.u32)?; - - ecx.rwlock_get_or_create(|ecx, next_id| { - let (old, success) = ecx - .atomic_compare_exchange_scalar( - &value_place, - &ImmTy::from_uint(0u32, ecx.machine.layouts.u32), - next_id.to_u32_scalar(), - AtomicRwOrd::Relaxed, - AtomicReadOrd::Relaxed, - false, - )? - .to_scalar_pair(); - - Ok(if success.to_bool().expect("compare_exchange's second return value is a bool") { - // Caller of the closure needs to allocate next_id - None - } else { - Some(RwLockId::from_u32(old.to_u32().expect("layout is u32"))) - }) - }) -} - // pthread_condattr_t // Our chosen memory layout for emulation (does not have to match the platform layout!): @@ -232,33 +178,6 @@ fn cond_set_id<'mir, 'tcx: 'mir>( ) } -fn cond_get_or_create_id<'mir, 'tcx: 'mir>( - ecx: &mut MiriInterpCx<'mir, 'tcx>, - cond_op: &OpTy<'tcx, Provenance>, -) -> InterpResult<'tcx, CondvarId> { - let value_place = ecx.deref_operand_and_offset(cond_op, 4, ecx.machine.layouts.u32)?; - - ecx.condvar_get_or_create(|ecx, next_id| { - let (old, success) = ecx - .atomic_compare_exchange_scalar( - &value_place, - &ImmTy::from_uint(0u32, ecx.machine.layouts.u32), - next_id.to_u32_scalar(), - AtomicRwOrd::Relaxed, - AtomicReadOrd::Relaxed, - false, - )? - .to_scalar_pair(); - - Ok(if success.to_bool().expect("compare_exchange's second return value is a bool") { - // Caller of the closure needs to allocate next_id - None - } else { - Some(CondvarId::from_u32(old.to_u32().expect("layout is u32"))) - }) - }) -} - fn cond_get_clock_id<'mir, 'tcx: 'mir>( ecx: &MiriInterpCx<'mir, 'tcx>, cond_op: &OpTy<'tcx, Provenance>, @@ -435,7 +354,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { let this = self.eval_context_mut(); let kind = mutex_get_kind(this, mutex_op)?; - let id = mutex_get_or_create_id(this, mutex_op)?; + let id = this.mutex_get_or_create_id(mutex_op, 4)?; let active_thread = this.get_active_thread(); if this.mutex_is_locked(id) { @@ -475,7 +394,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { let this = self.eval_context_mut(); let kind = mutex_get_kind(this, mutex_op)?; - let id = mutex_get_or_create_id(this, mutex_op)?; + let id = this.mutex_get_or_create_id(mutex_op, 4)?; let active_thread = this.get_active_thread(); if this.mutex_is_locked(id) { @@ -511,7 +430,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { let this = self.eval_context_mut(); let kind = mutex_get_kind(this, mutex_op)?; - let id = mutex_get_or_create_id(this, mutex_op)?; + let id = this.mutex_get_or_create_id(mutex_op, 4)?; let active_thread = this.get_active_thread(); if let Some(_old_locked_count) = this.mutex_unlock(id, active_thread) { @@ -545,7 +464,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { ) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let id = mutex_get_or_create_id(this, mutex_op)?; + let id = this.mutex_get_or_create_id(mutex_op, 4)?; if this.mutex_is_locked(id) { throw_ub_format!("destroyed a locked mutex"); @@ -568,7 +487,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { ) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let id = rwlock_get_or_create_id(this, rwlock_op)?; + let id = this.rwlock_get_or_create_id(rwlock_op, 4)?; let active_thread = this.get_active_thread(); if this.rwlock_is_write_locked(id) { @@ -586,7 +505,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { ) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let id = rwlock_get_or_create_id(this, rwlock_op)?; + let id = this.rwlock_get_or_create_id(rwlock_op, 4)?; let active_thread = this.get_active_thread(); if this.rwlock_is_write_locked(id) { @@ -603,7 +522,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { ) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let id = rwlock_get_or_create_id(this, rwlock_op)?; + let id = this.rwlock_get_or_create_id(rwlock_op, 4)?; let active_thread = this.get_active_thread(); if this.rwlock_is_locked(id) { @@ -633,7 +552,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { ) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let id = rwlock_get_or_create_id(this, rwlock_op)?; + let id = this.rwlock_get_or_create_id(rwlock_op, 4)?; let active_thread = this.get_active_thread(); if this.rwlock_is_locked(id) { @@ -650,7 +569,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { ) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let id = rwlock_get_or_create_id(this, rwlock_op)?; + let id = this.rwlock_get_or_create_id(rwlock_op, 4)?; let active_thread = this.get_active_thread(); #[allow(clippy::if_same_then_else)] @@ -669,7 +588,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { ) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let id = rwlock_get_or_create_id(this, rwlock_op)?; + let id = this.rwlock_get_or_create_id(rwlock_op, 4)?; if this.rwlock_is_locked(id) { throw_ub_format!("destroyed a locked rwlock"); @@ -772,7 +691,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { fn pthread_cond_signal(&mut self, cond_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let id = cond_get_or_create_id(this, cond_op)?; + let id = this.condvar_get_or_create_id(cond_op, 4)?; if let Some((thread, mutex)) = this.condvar_signal(id) { post_cond_signal(this, thread, mutex)?; } @@ -785,7 +704,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { cond_op: &OpTy<'tcx, Provenance>, ) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let id = cond_get_or_create_id(this, cond_op)?; + let id = this.condvar_get_or_create_id(cond_op, 4)?; while let Some((thread, mutex)) = this.condvar_signal(id) { post_cond_signal(this, thread, mutex)?; @@ -801,8 +720,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { ) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let id = cond_get_or_create_id(this, cond_op)?; - let mutex_id = mutex_get_or_create_id(this, mutex_op)?; + let id = this.condvar_get_or_create_id(cond_op, 4)?; + let mutex_id = this.mutex_get_or_create_id(mutex_op, 4)?; let active_thread = this.get_active_thread(); release_cond_mutex_and_block(this, active_thread, mutex_id)?; @@ -822,8 +741,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { this.check_no_isolation("`pthread_cond_timedwait`")?; - let id = cond_get_or_create_id(this, cond_op)?; - let mutex_id = mutex_get_or_create_id(this, mutex_op)?; + let id = this.condvar_get_or_create_id(cond_op, 4)?; + let mutex_id = this.mutex_get_or_create_id(mutex_op, 4)?; let active_thread = this.get_active_thread(); // Extract the timeout. @@ -899,7 +818,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { ) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let id = cond_get_or_create_id(this, cond_op)?; + let id = this.condvar_get_or_create_id(cond_op, 4)?; if this.condvar_is_awaited(id) { throw_ub_format!("destroying an awaited conditional variable"); } diff --git a/src/shims/windows/sync.rs b/src/shims/windows/sync.rs index feed90fad1..3eca86d386 100644 --- a/src/shims/windows/sync.rs +++ b/src/shims/windows/sync.rs @@ -2,65 +2,13 @@ use crate::concurrency::sync::InitOnceStatus; use crate::concurrency::thread::MachineCallback; use crate::*; -impl<'mir, 'tcx> EvalContextExtPriv<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {} -trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { - // These synchronization structures are pointer-sized pieces of data, initialized to 0. - // We use the first 4 bytes to store the id. - fn get_or_create_id( - &mut self, - next_id: Scalar, - lock_op: &OpTy<'tcx, Provenance>, - ) -> InterpResult<'tcx, Option> { - let this = self.eval_context_mut(); - let value_place = this.deref_operand_and_offset(lock_op, 0, this.machine.layouts.u32)?; - - let (old, success) = this - .atomic_compare_exchange_scalar( - &value_place, - &ImmTy::from_uint(0u32, this.machine.layouts.u32), - next_id, - AtomicRwOrd::Relaxed, - AtomicReadOrd::Relaxed, - false, - )? - .to_scalar_pair(); - - Ok(if success.to_bool().expect("compare_exchange's second return value is a bool") { - // Caller of the closure needs to allocate next_id - None - } else { - Some(old.to_u32().expect("layout is u32")) - }) - } - - fn srwlock_get_or_create_id( - &mut self, - lock_op: &OpTy<'tcx, Provenance>, - ) -> InterpResult<'tcx, RwLockId> { - let this = self.eval_context_mut(); - this.rwlock_get_or_create(|ecx, next_id| { - Ok(ecx.get_or_create_id(next_id.to_u32_scalar(), lock_op)?.map(RwLockId::from_u32)) - }) - } - - fn init_once_get_or_create_id( - &mut self, - lock_op: &OpTy<'tcx, Provenance>, - ) -> InterpResult<'tcx, InitOnceId> { - let this = self.eval_context_mut(); - this.init_once_get_or_create(|ecx, next_id| { - Ok(ecx.get_or_create_id(next_id.to_u32_scalar(), lock_op)?.map(InitOnceId::from_u32)) - }) - } -} - impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {} #[allow(non_snake_case)] pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { fn AcquireSRWLockExclusive(&mut self, lock_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx> { let this = self.eval_context_mut(); - let id = this.srwlock_get_or_create_id(lock_op)?; + let id = this.rwlock_get_or_create_id(lock_op, 0)?; let active_thread = this.get_active_thread(); if this.rwlock_is_locked(id) { @@ -84,7 +32,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { lock_op: &OpTy<'tcx, Provenance>, ) -> InterpResult<'tcx, Scalar> { let this = self.eval_context_mut(); - let id = this.srwlock_get_or_create_id(lock_op)?; + let id = this.rwlock_get_or_create_id(lock_op, 0)?; let active_thread = this.get_active_thread(); if this.rwlock_is_locked(id) { @@ -98,7 +46,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { fn ReleaseSRWLockExclusive(&mut self, lock_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx> { let this = self.eval_context_mut(); - let id = this.srwlock_get_or_create_id(lock_op)?; + let id = this.rwlock_get_or_create_id(lock_op, 0)?; let active_thread = this.get_active_thread(); if !this.rwlock_writer_unlock(id, active_thread) { @@ -113,7 +61,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { fn AcquireSRWLockShared(&mut self, lock_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx> { let this = self.eval_context_mut(); - let id = this.srwlock_get_or_create_id(lock_op)?; + let id = this.rwlock_get_or_create_id(lock_op, 0)?; let active_thread = this.get_active_thread(); if this.rwlock_is_write_locked(id) { @@ -130,7 +78,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { lock_op: &OpTy<'tcx, Provenance>, ) -> InterpResult<'tcx, Scalar> { let this = self.eval_context_mut(); - let id = this.srwlock_get_or_create_id(lock_op)?; + let id = this.rwlock_get_or_create_id(lock_op, 0)?; let active_thread = this.get_active_thread(); if this.rwlock_is_write_locked(id) { @@ -143,7 +91,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { fn ReleaseSRWLockShared(&mut self, lock_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx> { let this = self.eval_context_mut(); - let id = this.srwlock_get_or_create_id(lock_op)?; + let id = this.rwlock_get_or_create_id(lock_op, 0)?; let active_thread = this.get_active_thread(); if !this.rwlock_reader_unlock(id, active_thread) { @@ -166,7 +114,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { let this = self.eval_context_mut(); let active_thread = this.get_active_thread(); - let id = this.init_once_get_or_create_id(init_once_op)?; + let id = this.init_once_get_or_create_id(init_once_op, 0)?; let flags = this.read_scalar(flags_op)?.to_u32()?; let pending_place = this.deref_operand(pending_op)?.into(); let context = this.read_pointer(context_op)?; @@ -232,7 +180,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { ) -> InterpResult<'tcx, Scalar> { let this = self.eval_context_mut(); - let id = this.init_once_get_or_create_id(init_once_op)?; + let id = this.init_once_get_or_create_id(init_once_op, 0)?; let flags = this.read_scalar(flags_op)?.to_u32()?; let context = this.read_pointer(context_op)?; From 2c48b1b72e432e78cc3ab6bae3fd0455d1c49615 Mon Sep 17 00:00:00 2001 From: DrMeepster <19316085+DrMeepster@users.noreply.github.com> Date: Tue, 18 Oct 2022 23:50:39 -0700 Subject: [PATCH 6/8] remove redundant Ok(...?) --- src/concurrency/sync.rs | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/src/concurrency/sync.rs b/src/concurrency/sync.rs index 2ec852b413..02b6c09cd5 100644 --- a/src/concurrency/sync.rs +++ b/src/concurrency/sync.rs @@ -315,9 +315,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { offset: u64, ) -> InterpResult<'tcx, RwLockId> { let this = self.eval_context_mut(); - this.rwlock_get_or_create( - |ecx, next_id| Ok(ecx.get_or_create_id(next_id, lock_op, offset)?), - ) + this.rwlock_get_or_create(|ecx, next_id| ecx.get_or_create_id(next_id, lock_op, offset)) } fn condvar_get_or_create_id( @@ -326,9 +324,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { offset: u64, ) -> InterpResult<'tcx, CondvarId> { let this = self.eval_context_mut(); - this.condvar_get_or_create(|ecx, next_id| { - Ok(ecx.get_or_create_id(next_id, lock_op, offset)?) - }) + this.condvar_get_or_create(|ecx, next_id| ecx.get_or_create_id(next_id, lock_op, offset)) } fn init_once_get_or_create_id( @@ -337,9 +333,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { offset: u64, ) -> InterpResult<'tcx, InitOnceId> { let this = self.eval_context_mut(); - this.init_once_get_or_create(|ecx, next_id| { - Ok(ecx.get_or_create_id(next_id, lock_op, offset)?) - }) + this.init_once_get_or_create(|ecx, next_id| ecx.get_or_create_id(next_id, lock_op, offset)) } #[inline] From 96805444f5f7a2b912d0af43267cea22593c8b7b Mon Sep 17 00:00:00 2001 From: DrMeepster <19316085+DrMeepster@users.noreply.github.com> Date: Wed, 19 Oct 2022 23:06:01 -0700 Subject: [PATCH 7/8] change rust version to fix CI --- rust-version | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rust-version b/rust-version index f94611f0d4..0a3daa2d08 100644 --- a/rust-version +++ b/rust-version @@ -1 +1 @@ -a24a020e6d926dffe6b472fc647978f92269504e +edabf59ca4646b3fc1a961c26431215001043f6a From c9b32cca668cca76fe2b5a728861eac6e39238a6 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Thu, 20 Oct 2022 22:18:59 +0200 Subject: [PATCH 8/8] slight refactoring --- src/concurrency/init_once.rs | 204 ++++++++++++++++++++++++++++ src/concurrency/mod.rs | 2 + src/concurrency/sync.rs | 249 +++-------------------------------- src/lib.rs | 22 ++-- src/shims/env.rs | 10 +- src/shims/unix/sync.rs | 38 +++--- src/shims/windows/sync.rs | 90 +++++++------ 7 files changed, 310 insertions(+), 305 deletions(-) create mode 100644 src/concurrency/init_once.rs diff --git a/src/concurrency/init_once.rs b/src/concurrency/init_once.rs new file mode 100644 index 0000000000..791931901e --- /dev/null +++ b/src/concurrency/init_once.rs @@ -0,0 +1,204 @@ +use std::collections::VecDeque; +use std::num::NonZeroU32; + +use rustc_index::vec::Idx; + +use super::sync::EvalContextExtPriv; +use super::thread::MachineCallback; +use super::vector_clock::VClock; +use crate::*; + +declare_id!(InitOnceId); + +/// A thread waiting on an InitOnce object. +struct InitOnceWaiter<'mir, 'tcx> { + /// The thread that is waiting. + thread: ThreadId, + /// The callback that should be executed, after the thread has been woken up. + callback: Box + 'tcx>, +} + +impl<'mir, 'tcx> std::fmt::Debug for InitOnceWaiter<'mir, 'tcx> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("InitOnce") + .field("thread", &self.thread) + .field("callback", &"dyn MachineCallback") + .finish() + } +} + +#[derive(Default, Debug, Copy, Clone, PartialEq, Eq)] +/// The current status of a one time initialization. +pub enum InitOnceStatus { + #[default] + Uninitialized, + Begun, + Complete, +} + +/// The one time initialization state. +#[derive(Default, Debug)] +pub(super) struct InitOnce<'mir, 'tcx> { + status: InitOnceStatus, + waiters: VecDeque>, + data_race: VClock, +} + +impl<'mir, 'tcx> VisitTags for InitOnce<'mir, 'tcx> { + fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) { + for waiter in self.waiters.iter() { + waiter.callback.visit_tags(visit); + } + } +} + +impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {} +pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { + fn init_once_get_or_create_id( + &mut self, + lock_op: &OpTy<'tcx, Provenance>, + offset: u64, + ) -> InterpResult<'tcx, InitOnceId> { + let this = self.eval_context_mut(); + this.init_once_get_or_create(|ecx, next_id| ecx.get_or_create_id(next_id, lock_op, offset)) + } + + /// Provides the closure with the next InitOnceId. Creates that InitOnce if the closure returns None, + /// otherwise returns the value from the closure. + #[inline] + fn init_once_get_or_create(&mut self, existing: F) -> InterpResult<'tcx, InitOnceId> + where + F: FnOnce( + &mut MiriInterpCx<'mir, 'tcx>, + InitOnceId, + ) -> InterpResult<'tcx, Option>, + { + let this = self.eval_context_mut(); + let next_index = this.machine.threads.sync.init_onces.next_index(); + if let Some(old) = existing(this, next_index)? { + Ok(old) + } else { + let new_index = this.machine.threads.sync.init_onces.push(Default::default()); + assert_eq!(next_index, new_index); + Ok(new_index) + } + } + + #[inline] + fn init_once_status(&mut self, id: InitOnceId) -> InitOnceStatus { + let this = self.eval_context_ref(); + this.machine.threads.sync.init_onces[id].status + } + + /// Put the thread into the queue waiting for the initialization. + #[inline] + fn init_once_enqueue_and_block( + &mut self, + id: InitOnceId, + thread: ThreadId, + callback: Box + 'tcx>, + ) { + let this = self.eval_context_mut(); + let init_once = &mut this.machine.threads.sync.init_onces[id]; + assert_ne!(init_once.status, InitOnceStatus::Complete, "queueing on complete init once"); + init_once.waiters.push_back(InitOnceWaiter { thread, callback }); + this.block_thread(thread); + } + + /// Begin initializing this InitOnce. Must only be called after checking that it is currently + /// uninitialized. + #[inline] + fn init_once_begin(&mut self, id: InitOnceId) { + let this = self.eval_context_mut(); + let init_once = &mut this.machine.threads.sync.init_onces[id]; + assert_eq!( + init_once.status, + InitOnceStatus::Uninitialized, + "begining already begun or complete init once" + ); + init_once.status = InitOnceStatus::Begun; + } + + #[inline] + fn init_once_complete(&mut self, id: InitOnceId) -> InterpResult<'tcx> { + let this = self.eval_context_mut(); + let current_thread = this.get_active_thread(); + let init_once = &mut this.machine.threads.sync.init_onces[id]; + + assert_eq!( + init_once.status, + InitOnceStatus::Begun, + "completing already complete or uninit init once" + ); + + init_once.status = InitOnceStatus::Complete; + + // Each complete happens-before the end of the wait + if let Some(data_race) = &this.machine.data_race { + data_race.validate_lock_release(&mut init_once.data_race, current_thread); + } + + // Wake up everyone. + // need to take the queue to avoid having `this` be borrowed multiple times + for waiter in std::mem::take(&mut init_once.waiters) { + // End of the wait happens-before woken-up thread. + if let Some(data_race) = &this.machine.data_race { + data_race.validate_lock_acquire( + &this.machine.threads.sync.init_onces[id].data_race, + waiter.thread, + ); + } + + this.unblock_thread(waiter.thread); + + // Call callback, with the woken-up thread as `current`. + this.set_active_thread(waiter.thread); + waiter.callback.call(this)?; + this.set_active_thread(current_thread); + } + + Ok(()) + } + + #[inline] + fn init_once_fail(&mut self, id: InitOnceId) -> InterpResult<'tcx> { + let this = self.eval_context_mut(); + let current_thread = this.get_active_thread(); + let init_once = &mut this.machine.threads.sync.init_onces[id]; + assert_eq!( + init_once.status, + InitOnceStatus::Begun, + "failing already completed or uninit init once" + ); + + // Each complete happens-before the end of the wait + // FIXME: should this really induce synchronization? If we think of it as a lock, then yes, + // but the docs don't talk about such details. + if let Some(data_race) = &this.machine.data_race { + data_race.validate_lock_release(&mut init_once.data_race, current_thread); + } + + // Wake up one waiting thread, so they can go ahead and try to init this. + if let Some(waiter) = init_once.waiters.pop_front() { + // End of the wait happens-before woken-up thread. + if let Some(data_race) = &this.machine.data_race { + data_race.validate_lock_acquire( + &this.machine.threads.sync.init_onces[id].data_race, + waiter.thread, + ); + } + + this.unblock_thread(waiter.thread); + + // Call callback, with the woken-up thread as `current`. + this.set_active_thread(waiter.thread); + waiter.callback.call(this)?; + this.set_active_thread(current_thread); + } else { + // Nobody there to take this, so go back to 'uninit' + init_once.status = InitOnceStatus::Uninitialized; + } + + Ok(()) + } +} diff --git a/src/concurrency/mod.rs b/src/concurrency/mod.rs index 61ef3d5640..45903107f1 100644 --- a/src/concurrency/mod.rs +++ b/src/concurrency/mod.rs @@ -1,6 +1,8 @@ pub mod data_race; mod range_object_map; +#[macro_use] pub mod sync; +pub mod init_once; pub mod thread; mod vector_clock; pub mod weak_memory; diff --git a/src/concurrency/sync.rs b/src/concurrency/sync.rs index 02b6c09cd5..e76610e730 100644 --- a/src/concurrency/sync.rs +++ b/src/concurrency/sync.rs @@ -7,13 +7,13 @@ use log::trace; use rustc_data_structures::fx::FxHashMap; use rustc_index::vec::{Idx, IndexVec}; -use super::thread::MachineCallback; +use super::init_once::InitOnce; use super::vector_clock::VClock; use crate::*; pub trait SyncId { fn from_u32(id: u32) -> Self; - fn to_u32_scalar(&self) -> Scalar; + fn to_u32(&self) -> u32; } /// We cannot use the `newtype_index!` macro because we have to use 0 as a @@ -32,8 +32,8 @@ macro_rules! declare_id { fn from_u32(id: u32) -> Self { Self(NonZeroU32::new(id).unwrap()) } - fn to_u32_scalar(&self) -> Scalar { - Scalar::from_u32(self.0.get()) + fn to_u32(&self) -> u32 { + self.0.get() } } @@ -158,47 +158,6 @@ struct FutexWaiter { bitset: u32, } -declare_id!(InitOnceId); - -struct InitOnceWaiter<'mir, 'tcx> { - thread: ThreadId, - callback: Box + 'tcx>, -} - -impl<'mir, 'tcx> std::fmt::Debug for InitOnceWaiter<'mir, 'tcx> { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("InitOnce") - .field("thread", &self.thread) - .field("callback", &"dyn MachineCallback") - .finish() - } -} - -#[derive(Default, Debug, Copy, Clone, PartialEq, Eq)] -/// The current status of a one time initialization. -pub enum InitOnceStatus { - #[default] - Uninitialized, - Begun, - Complete, -} - -/// The one time initialization state. -#[derive(Default, Debug)] -struct InitOnce<'mir, 'tcx> { - status: InitOnceStatus, - waiters: VecDeque>, - data_race: VClock, -} - -impl<'mir, 'tcx> VisitTags for InitOnce<'mir, 'tcx> { - fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) { - for waiter in self.waiters.iter() { - waiter.callback.visit_tags(visit); - } - } -} - /// The state of all synchronization variables. #[derive(Default, Debug)] pub(crate) struct SynchronizationState<'mir, 'tcx> { @@ -206,7 +165,7 @@ pub(crate) struct SynchronizationState<'mir, 'tcx> { rwlocks: IndexVec, condvars: IndexVec, futexes: FxHashMap, - init_onces: IndexVec>, + pub(super) init_onces: IndexVec>, } impl<'mir, 'tcx> VisitTags for SynchronizationState<'mir, 'tcx> { @@ -219,7 +178,9 @@ impl<'mir, 'tcx> VisitTags for SynchronizationState<'mir, 'tcx> { // Private extension trait for local helper methods impl<'mir, 'tcx: 'mir> EvalContextExtPriv<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {} -trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { +pub(super) trait EvalContextExtPriv<'mir, 'tcx: 'mir>: + crate::MiriInterpCxExt<'mir, 'tcx> +{ #[inline] // Miri sync structures contain zero-initialized ids stored at some offset behind a pointer fn get_or_create_id( @@ -236,8 +197,8 @@ trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { .atomic_compare_exchange_scalar( &value_place, &ImmTy::from_uint(0u32, this.machine.layouts.u32), - next_id.to_u32_scalar(), - AtomicRwOrd::Relaxed, + Scalar::from_u32(next_id.to_u32()), + AtomicRwOrd::Relaxed, // deliberately *no* synchronization AtomicReadOrd::Relaxed, false, )? @@ -306,7 +267,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { offset: u64, ) -> InterpResult<'tcx, MutexId> { let this = self.eval_context_mut(); - this.mutex_get_or_create(|ecx, next_id| Ok(ecx.get_or_create_id(next_id, lock_op, offset)?)) + this.mutex_get_or_create(|ecx, next_id| ecx.get_or_create_id(next_id, lock_op, offset)) } fn rwlock_get_or_create_id( @@ -327,22 +288,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { this.condvar_get_or_create(|ecx, next_id| ecx.get_or_create_id(next_id, lock_op, offset)) } - fn init_once_get_or_create_id( - &mut self, - lock_op: &OpTy<'tcx, Provenance>, - offset: u64, - ) -> InterpResult<'tcx, InitOnceId> { - let this = self.eval_context_mut(); - this.init_once_get_or_create(|ecx, next_id| ecx.get_or_create_id(next_id, lock_op, offset)) - } - - #[inline] - /// Create state for a new mutex. - fn mutex_create(&mut self) -> MutexId { - let this = self.eval_context_mut(); - this.machine.threads.sync.mutexes.push(Default::default()) - } - #[inline] /// Provides the closure with the next MutexId. Creates that mutex if the closure returns None, /// otherwise returns the value from the closure @@ -427,8 +372,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { } } - #[inline] /// Put the thread into the queue waiting for the mutex. + #[inline] fn mutex_enqueue_and_block(&mut self, id: MutexId, thread: ThreadId) { let this = self.eval_context_mut(); assert!(this.mutex_is_locked(id), "queing on unlocked mutex"); @@ -436,16 +381,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { this.block_thread(thread); } - #[inline] - /// Create state for a new read write lock. - fn rwlock_create(&mut self) -> RwLockId { - let this = self.eval_context_mut(); - this.machine.threads.sync.rwlocks.push(Default::default()) - } - - #[inline] /// Provides the closure with the next RwLockId. Creates that RwLock if the closure returns None, /// otherwise returns the value from the closure + #[inline] fn rwlock_get_or_create(&mut self, existing: F) -> InterpResult<'tcx, RwLockId> where F: FnOnce(&mut MiriInterpCx<'mir, 'tcx>, RwLockId) -> InterpResult<'tcx, Option>, @@ -475,8 +413,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { rwlock.writer.is_some() || rwlock.readers.is_empty().not() } - #[inline] /// Check if write locked. + #[inline] fn rwlock_is_write_locked(&self, id: RwLockId) -> bool { let this = self.eval_context_ref(); let rwlock = &this.machine.threads.sync.rwlocks[id]; @@ -533,8 +471,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { true } - #[inline] /// Put the reader in the queue waiting for the lock and block it. + #[inline] fn rwlock_enqueue_and_block_reader(&mut self, id: RwLockId, reader: ThreadId) { let this = self.eval_context_mut(); assert!(this.rwlock_is_write_locked(id), "read-queueing on not write locked rwlock"); @@ -542,8 +480,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { this.block_thread(reader); } - #[inline] /// Lock by setting the writer that owns the lock. + #[inline] fn rwlock_writer_lock(&mut self, id: RwLockId, writer: ThreadId) { let this = self.eval_context_mut(); assert!(!this.rwlock_is_locked(id), "the rwlock is already locked"); @@ -555,8 +493,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { } } - #[inline] /// Try to unlock by removing the writer. + #[inline] fn rwlock_writer_unlock(&mut self, id: RwLockId, expected_writer: ThreadId) -> bool { let this = self.eval_context_mut(); let rwlock = &mut this.machine.threads.sync.rwlocks[id]; @@ -593,8 +531,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { } } - #[inline] /// Put the writer in the queue waiting for the lock. + #[inline] fn rwlock_enqueue_and_block_writer(&mut self, id: RwLockId, writer: ThreadId) { let this = self.eval_context_mut(); assert!(this.rwlock_is_locked(id), "write-queueing on unlocked rwlock"); @@ -602,16 +540,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { this.block_thread(writer); } - #[inline] - /// Create state for a new conditional variable. - fn condvar_create(&mut self) -> CondvarId { - let this = self.eval_context_mut(); - this.machine.threads.sync.condvars.push(Default::default()) - } - - #[inline] /// Provides the closure with the next CondvarId. Creates that Condvar if the closure returns None, /// otherwise returns the value from the closure + #[inline] fn condvar_get_or_create(&mut self, existing: F) -> InterpResult<'tcx, CondvarId> where F: FnOnce( @@ -630,8 +561,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { } } - #[inline] /// Is the conditional variable awaited? + #[inline] fn condvar_is_awaited(&mut self, id: CondvarId) -> bool { let this = self.eval_context_mut(); !this.machine.threads.sync.condvars[id].waiters.is_empty() @@ -707,144 +638,4 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { futex.waiters.retain(|waiter| waiter.thread != thread); } } - - #[inline] - /// Create state for a new one time initialization. - fn init_once_create(&mut self) -> InitOnceId { - let this = self.eval_context_mut(); - this.machine.threads.sync.init_onces.push(Default::default()) - } - - #[inline] - /// Provides the closure with the next InitOnceId. Creates that InitOnce if the closure returns None, - /// otherwise returns the value from the closure - fn init_once_get_or_create(&mut self, existing: F) -> InterpResult<'tcx, InitOnceId> - where - F: FnOnce( - &mut MiriInterpCx<'mir, 'tcx>, - InitOnceId, - ) -> InterpResult<'tcx, Option>, - { - let this = self.eval_context_mut(); - let next_index = this.machine.threads.sync.init_onces.next_index(); - if let Some(old) = existing(this, next_index)? { - Ok(old) - } else { - let new_index = this.machine.threads.sync.init_onces.push(Default::default()); - assert_eq!(next_index, new_index); - Ok(new_index) - } - } - - #[inline] - fn init_once_status(&mut self, id: InitOnceId) -> InitOnceStatus { - let this = self.eval_context_ref(); - this.machine.threads.sync.init_onces[id].status - } - - #[inline] - /// Put the thread into the queue waiting for the initialization. - fn init_once_enqueue_and_block( - &mut self, - id: InitOnceId, - thread: ThreadId, - callback: Box + 'tcx>, - ) { - let this = self.eval_context_mut(); - let init_once = &mut this.machine.threads.sync.init_onces[id]; - assert_ne!(init_once.status, InitOnceStatus::Complete, "queueing on complete init once"); - init_once.waiters.push_back(InitOnceWaiter { thread, callback }); - this.block_thread(thread); - } - - #[inline] - fn init_once_begin(&mut self, id: InitOnceId) { - let this = self.eval_context_mut(); - let init_once = &mut this.machine.threads.sync.init_onces[id]; - assert_eq!( - init_once.status, - InitOnceStatus::Uninitialized, - "begining already begun or complete init once" - ); - init_once.status = InitOnceStatus::Begun; - } - - #[inline] - fn init_once_complete(&mut self, id: InitOnceId) -> InterpResult<'tcx> { - let this = self.eval_context_mut(); - let current_thread = this.get_active_thread(); - let init_once = &mut this.machine.threads.sync.init_onces[id]; - - assert_eq!( - init_once.status, - InitOnceStatus::Begun, - "completing already complete or uninit init once" - ); - - init_once.status = InitOnceStatus::Complete; - - // Each complete happens-before the end of the wait - if let Some(data_race) = &this.machine.data_race { - data_race.validate_lock_release(&mut init_once.data_race, current_thread); - } - - // need to take the queue to avoid having `this` be borrowed multiple times - for waiter in std::mem::take(&mut init_once.waiters) { - this.unblock_thread(waiter.thread); - - this.set_active_thread(waiter.thread); - waiter.callback.call(this)?; - this.set_active_thread(current_thread); - - if let Some(data_race) = &this.machine.data_race { - data_race.validate_lock_acquire( - &this.machine.threads.sync.init_onces[id].data_race, - waiter.thread, - ); - } - } - - Ok(()) - } - - #[inline] - fn init_once_fail(&mut self, id: InitOnceId) -> InterpResult<'tcx> { - let this = self.eval_context_mut(); - let current_thread = this.get_active_thread(); - let init_once = &mut this.machine.threads.sync.init_onces[id]; - assert_eq!( - init_once.status, - InitOnceStatus::Begun, - "failing already completed or uninit init once" - ); - - // Each complete happens-before the end of the wait - if let Some(data_race) = &this.machine.data_race { - data_race.validate_lock_release(&mut init_once.data_race, current_thread); - } - - // the behavior of failing the initialization is left vague by the docs - // it had to be determined experimentally - if let Some(waiter) = init_once.waiters.pop_front() { - // try initializing again on a different thread - init_once.status = InitOnceStatus::Begun; - - this.unblock_thread(waiter.thread); - - this.set_active_thread(waiter.thread); - waiter.callback.call(this)?; - this.set_active_thread(current_thread); - - if let Some(data_race) = &this.machine.data_race { - data_race.validate_lock_acquire( - &this.machine.threads.sync.init_onces[id].data_race, - waiter.thread, - ); - } - } else { - init_once.status = InitOnceStatus::Uninitialized; - } - - Ok(()) - } } diff --git a/src/lib.rs b/src/lib.rs index 5c4094378a..479353bb98 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -83,34 +83,28 @@ pub use crate::shims::EvalContextExt as _; pub use crate::clock::{Clock, Instant}; pub use crate::concurrency::{ - data_race::{ - AtomicFenceOrd, AtomicReadOrd, AtomicRwOrd, AtomicWriteOrd, - EvalContextExt as DataRaceEvalContextExt, - }, - sync::{CondvarId, EvalContextExt as SyncEvalContextExt, InitOnceId, MutexId, RwLockId, SyncId}, - thread::{ - EvalContextExt as ThreadsEvalContextExt, SchedulingAction, ThreadId, ThreadManager, - ThreadState, Time, - }, + data_race::{AtomicFenceOrd, AtomicReadOrd, AtomicRwOrd, AtomicWriteOrd, EvalContextExt as _}, + init_once::{EvalContextExt as _, InitOnceId}, + sync::{CondvarId, EvalContextExt as _, MutexId, RwLockId, SyncId}, + thread::{EvalContextExt as _, SchedulingAction, ThreadId, ThreadManager, ThreadState, Time}, }; pub use crate::diagnostics::{ - report_error, EvalContextExt as DiagnosticsEvalContextExt, NonHaltingDiagnostic, - TerminationInfo, + report_error, EvalContextExt as _, NonHaltingDiagnostic, TerminationInfo, }; pub use crate::eval::{ create_ecx, eval_entry, AlignmentCheck, BacktraceStyle, IsolatedOp, MiriConfig, RejectOpWith, }; -pub use crate::helpers::{CurrentSpan, EvalContextExt as HelpersEvalContextExt}; +pub use crate::helpers::{CurrentSpan, EvalContextExt as _}; pub use crate::intptrcast::ProvenanceMode; pub use crate::machine::{ AllocExtra, FrameData, MiriInterpCx, MiriInterpCxExt, MiriMachine, MiriMemoryKind, Provenance, ProvenanceExtra, PAGE_SIZE, STACK_ADDR, STACK_SIZE, }; pub use crate::mono_hash_map::MonoHashMap; -pub use crate::operator::EvalContextExt as OperatorEvalContextExt; +pub use crate::operator::EvalContextExt as _; pub use crate::range_map::RangeMap; pub use crate::stacked_borrows::{ - CallId, EvalContextExt as StackedBorEvalContextExt, Item, Permission, SbTag, Stack, Stacks, + CallId, EvalContextExt as _, Item, Permission, SbTag, Stack, Stacks, }; pub use crate::tag_gc::{EvalContextExt as _, VisitTags}; diff --git a/src/shims/env.rs b/src/shims/env.rs index 036f06bde0..bf6c1f8756 100644 --- a/src/shims/env.rs +++ b/src/shims/env.rs @@ -274,7 +274,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { this.deallocate_ptr(var, None, MiriMemoryKind::Runtime.into())?; this.update_environ()?; } - Ok(Scalar::from_i32(1)) // return non-zero on success + Ok(this.eval_windows("c", "TRUE")?) } else { let value = this.read_os_str_from_wide_str(value_ptr)?; let var_ptr = alloc_env_var_as_wide_str(&name, &value, this)?; @@ -282,7 +282,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { this.deallocate_ptr(var, None, MiriMemoryKind::Runtime.into())?; } this.update_environ()?; - Ok(Scalar::from_i32(1)) // return non-zero on success + Ok(this.eval_windows("c", "TRUE")?) } } @@ -411,14 +411,14 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { this.reject_in_isolation("`SetCurrentDirectoryW`", reject_with)?; this.set_last_error_from_io_error(ErrorKind::PermissionDenied)?; - return Ok(Scalar::from_i32(0)); + return this.eval_windows("c", "FALSE"); } match env::set_current_dir(path) { - Ok(()) => Ok(Scalar::from_i32(1)), + Ok(()) => this.eval_windows("c", "TRUE"), Err(e) => { this.set_last_error_from_io_error(e.kind())?; - Ok(Scalar::from_i32(0)) + this.eval_windows("c", "FALSE") } } } diff --git a/src/shims/unix/sync.rs b/src/shims/unix/sync.rs index 7857fa4bcc..3e1e34c5db 100644 --- a/src/shims/unix/sync.rs +++ b/src/shims/unix/sync.rs @@ -19,6 +19,10 @@ use crate::*; /// in `pthread_mutexattr_settype` function. const PTHREAD_MUTEX_NORMAL_FLAG: i32 = 0x8000000; +const MUTEX_ID_OFFSET: u64 = 4; +const RWLOCK_ID_OFFSET: u64 = 4; +const CONDVAR_ID_OFFSET: u64 = 4; + fn is_mutex_kind_default<'mir, 'tcx: 'mir>( ecx: &mut MiriInterpCx<'mir, 'tcx>, kind: Scalar, @@ -354,7 +358,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { let this = self.eval_context_mut(); let kind = mutex_get_kind(this, mutex_op)?; - let id = this.mutex_get_or_create_id(mutex_op, 4)?; + let id = this.mutex_get_or_create_id(mutex_op, MUTEX_ID_OFFSET)?; let active_thread = this.get_active_thread(); if this.mutex_is_locked(id) { @@ -394,7 +398,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { let this = self.eval_context_mut(); let kind = mutex_get_kind(this, mutex_op)?; - let id = this.mutex_get_or_create_id(mutex_op, 4)?; + let id = this.mutex_get_or_create_id(mutex_op, MUTEX_ID_OFFSET)?; let active_thread = this.get_active_thread(); if this.mutex_is_locked(id) { @@ -430,7 +434,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { let this = self.eval_context_mut(); let kind = mutex_get_kind(this, mutex_op)?; - let id = this.mutex_get_or_create_id(mutex_op, 4)?; + let id = this.mutex_get_or_create_id(mutex_op, MUTEX_ID_OFFSET)?; let active_thread = this.get_active_thread(); if let Some(_old_locked_count) = this.mutex_unlock(id, active_thread) { @@ -464,7 +468,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { ) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let id = this.mutex_get_or_create_id(mutex_op, 4)?; + let id = this.mutex_get_or_create_id(mutex_op, MUTEX_ID_OFFSET)?; if this.mutex_is_locked(id) { throw_ub_format!("destroyed a locked mutex"); @@ -487,7 +491,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { ) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let id = this.rwlock_get_or_create_id(rwlock_op, 4)?; + let id = this.rwlock_get_or_create_id(rwlock_op, RWLOCK_ID_OFFSET)?; let active_thread = this.get_active_thread(); if this.rwlock_is_write_locked(id) { @@ -505,7 +509,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { ) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let id = this.rwlock_get_or_create_id(rwlock_op, 4)?; + let id = this.rwlock_get_or_create_id(rwlock_op, RWLOCK_ID_OFFSET)?; let active_thread = this.get_active_thread(); if this.rwlock_is_write_locked(id) { @@ -522,7 +526,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { ) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let id = this.rwlock_get_or_create_id(rwlock_op, 4)?; + let id = this.rwlock_get_or_create_id(rwlock_op, RWLOCK_ID_OFFSET)?; let active_thread = this.get_active_thread(); if this.rwlock_is_locked(id) { @@ -552,7 +556,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { ) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let id = this.rwlock_get_or_create_id(rwlock_op, 4)?; + let id = this.rwlock_get_or_create_id(rwlock_op, RWLOCK_ID_OFFSET)?; let active_thread = this.get_active_thread(); if this.rwlock_is_locked(id) { @@ -569,7 +573,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { ) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let id = this.rwlock_get_or_create_id(rwlock_op, 4)?; + let id = this.rwlock_get_or_create_id(rwlock_op, RWLOCK_ID_OFFSET)?; let active_thread = this.get_active_thread(); #[allow(clippy::if_same_then_else)] @@ -588,7 +592,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { ) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let id = this.rwlock_get_or_create_id(rwlock_op, 4)?; + let id = this.rwlock_get_or_create_id(rwlock_op, RWLOCK_ID_OFFSET)?; if this.rwlock_is_locked(id) { throw_ub_format!("destroyed a locked rwlock"); @@ -691,7 +695,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { fn pthread_cond_signal(&mut self, cond_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let id = this.condvar_get_or_create_id(cond_op, 4)?; + let id = this.condvar_get_or_create_id(cond_op, CONDVAR_ID_OFFSET)?; if let Some((thread, mutex)) = this.condvar_signal(id) { post_cond_signal(this, thread, mutex)?; } @@ -704,7 +708,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { cond_op: &OpTy<'tcx, Provenance>, ) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let id = this.condvar_get_or_create_id(cond_op, 4)?; + let id = this.condvar_get_or_create_id(cond_op, CONDVAR_ID_OFFSET)?; while let Some((thread, mutex)) = this.condvar_signal(id) { post_cond_signal(this, thread, mutex)?; @@ -720,8 +724,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { ) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let id = this.condvar_get_or_create_id(cond_op, 4)?; - let mutex_id = this.mutex_get_or_create_id(mutex_op, 4)?; + let id = this.condvar_get_or_create_id(cond_op, CONDVAR_ID_OFFSET)?; + let mutex_id = this.mutex_get_or_create_id(mutex_op, MUTEX_ID_OFFSET)?; let active_thread = this.get_active_thread(); release_cond_mutex_and_block(this, active_thread, mutex_id)?; @@ -741,8 +745,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { this.check_no_isolation("`pthread_cond_timedwait`")?; - let id = this.condvar_get_or_create_id(cond_op, 4)?; - let mutex_id = this.mutex_get_or_create_id(mutex_op, 4)?; + let id = this.condvar_get_or_create_id(cond_op, CONDVAR_ID_OFFSET)?; + let mutex_id = this.mutex_get_or_create_id(mutex_op, MUTEX_ID_OFFSET)?; let active_thread = this.get_active_thread(); // Extract the timeout. @@ -818,7 +822,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { ) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let id = this.condvar_get_or_create_id(cond_op, 4)?; + let id = this.condvar_get_or_create_id(cond_op, CONDVAR_ID_OFFSET)?; if this.condvar_is_awaited(id) { throw_ub_format!("destroying an awaited conditional variable"); } diff --git a/src/shims/windows/sync.rs b/src/shims/windows/sync.rs index 3eca86d386..8064ca5667 100644 --- a/src/shims/windows/sync.rs +++ b/src/shims/windows/sync.rs @@ -1,14 +1,17 @@ -use crate::concurrency::sync::InitOnceStatus; +use crate::concurrency::init_once::InitOnceStatus; use crate::concurrency::thread::MachineCallback; use crate::*; +const SRWLOCK_ID_OFFSET: u64 = 0; +const INIT_ONCE_ID_OFFSET: u64 = 0; + impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {} #[allow(non_snake_case)] pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { fn AcquireSRWLockExclusive(&mut self, lock_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx> { let this = self.eval_context_mut(); - let id = this.rwlock_get_or_create_id(lock_op, 0)?; + let id = this.rwlock_get_or_create_id(lock_op, SRWLOCK_ID_OFFSET)?; let active_thread = this.get_active_thread(); if this.rwlock_is_locked(id) { @@ -32,7 +35,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { lock_op: &OpTy<'tcx, Provenance>, ) -> InterpResult<'tcx, Scalar> { let this = self.eval_context_mut(); - let id = this.rwlock_get_or_create_id(lock_op, 0)?; + let id = this.rwlock_get_or_create_id(lock_op, SRWLOCK_ID_OFFSET)?; let active_thread = this.get_active_thread(); if this.rwlock_is_locked(id) { @@ -46,7 +49,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { fn ReleaseSRWLockExclusive(&mut self, lock_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx> { let this = self.eval_context_mut(); - let id = this.rwlock_get_or_create_id(lock_op, 0)?; + let id = this.rwlock_get_or_create_id(lock_op, SRWLOCK_ID_OFFSET)?; let active_thread = this.get_active_thread(); if !this.rwlock_writer_unlock(id, active_thread) { @@ -61,7 +64,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { fn AcquireSRWLockShared(&mut self, lock_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx> { let this = self.eval_context_mut(); - let id = this.rwlock_get_or_create_id(lock_op, 0)?; + let id = this.rwlock_get_or_create_id(lock_op, SRWLOCK_ID_OFFSET)?; let active_thread = this.get_active_thread(); if this.rwlock_is_write_locked(id) { @@ -78,7 +81,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { lock_op: &OpTy<'tcx, Provenance>, ) -> InterpResult<'tcx, Scalar> { let this = self.eval_context_mut(); - let id = this.rwlock_get_or_create_id(lock_op, 0)?; + let id = this.rwlock_get_or_create_id(lock_op, SRWLOCK_ID_OFFSET)?; let active_thread = this.get_active_thread(); if this.rwlock_is_write_locked(id) { @@ -91,7 +94,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { fn ReleaseSRWLockShared(&mut self, lock_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx> { let this = self.eval_context_mut(); - let id = this.rwlock_get_or_create_id(lock_op, 0)?; + let id = this.rwlock_get_or_create_id(lock_op, SRWLOCK_ID_OFFSET)?; let active_thread = this.get_active_thread(); if !this.rwlock_reader_unlock(id, active_thread) { @@ -114,7 +117,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { let this = self.eval_context_mut(); let active_thread = this.get_active_thread(); - let id = this.init_once_get_or_create_id(init_once_op, 0)?; + let id = this.init_once_get_or_create_id(init_once_op, INIT_ONCE_ID_OFFSET)?; let flags = this.read_scalar(flags_op)?.to_u32()?; let pending_place = this.deref_operand(pending_op)?.into(); let context = this.read_pointer(context_op)?; @@ -127,49 +130,56 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { throw_unsup_format!("non-null `lpContext` in `InitOnceBeginInitialize`"); } - struct Callback<'tcx> { - init_once_id: InitOnceId, - pending_place: PlaceTy<'tcx, Provenance>, - } - - impl<'tcx> VisitTags for Callback<'tcx> { - fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) { - let Callback { init_once_id: _, pending_place } = self; - pending_place.visit_tags(visit); - } - } - - impl<'mir, 'tcx> MachineCallback<'mir, 'tcx> for Callback<'tcx> { - fn call(&self, this: &mut MiriInterpCx<'mir, 'tcx>) -> InterpResult<'tcx> { - let pending = match this.init_once_status(self.init_once_id) { - InitOnceStatus::Uninitialized => - unreachable!("status should have either been set to begun or complete"), - InitOnceStatus::Begun => this.eval_windows("c", "TRUE")?, - InitOnceStatus::Complete => this.eval_windows("c", "FALSE")?, - }; - - this.write_scalar(pending, &self.pending_place)?; - - Ok(()) - } - } - match this.init_once_status(id) { InitOnceStatus::Uninitialized => { this.init_once_begin(id); this.write_scalar(this.eval_windows("c", "TRUE")?, &pending_place)?; } - InitOnceStatus::Begun => + InitOnceStatus::Begun => { + // Someone else is already on it. + // Block this thread until they are done. + // When we are woken up, set the `pending` flag accordingly. + struct Callback<'tcx> { + init_once_id: InitOnceId, + pending_place: PlaceTy<'tcx, Provenance>, + } + + impl<'tcx> VisitTags for Callback<'tcx> { + fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) { + let Callback { init_once_id: _, pending_place } = self; + pending_place.visit_tags(visit); + } + } + + impl<'mir, 'tcx> MachineCallback<'mir, 'tcx> for Callback<'tcx> { + fn call(&self, this: &mut MiriInterpCx<'mir, 'tcx>) -> InterpResult<'tcx> { + let pending = match this.init_once_status(self.init_once_id) { + InitOnceStatus::Uninitialized => + unreachable!( + "status should have either been set to begun or complete" + ), + InitOnceStatus::Begun => this.eval_windows("c", "TRUE")?, + InitOnceStatus::Complete => this.eval_windows("c", "FALSE")?, + }; + + this.write_scalar(pending, &self.pending_place)?; + + Ok(()) + } + } + this.init_once_enqueue_and_block( id, active_thread, Box::new(Callback { init_once_id: id, pending_place }), - ), + ) + } InitOnceStatus::Complete => this.write_scalar(this.eval_windows("c", "FALSE")?, &pending_place)?, } - Ok(Scalar::from_i32(1)) + // This always succeeds (even if the thread is blocked, we will succeed if we ever unblock). + this.eval_windows("c", "TRUE") } fn InitOnceComplete( @@ -180,7 +190,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { ) -> InterpResult<'tcx, Scalar> { let this = self.eval_context_mut(); - let id = this.init_once_get_or_create_id(init_once_op, 0)?; + let id = this.init_once_get_or_create_id(init_once_op, INIT_ONCE_ID_OFFSET)?; let flags = this.read_scalar(flags_op)?.to_u32()?; let context = this.read_pointer(context_op)?; @@ -209,6 +219,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { this.init_once_fail(id)?; } - Ok(Scalar::from_i32(1)) + this.eval_windows("c", "TRUE") } }