From 0b9c8b04349f7ffccc3f961e52a2c8bd46da92e6 Mon Sep 17 00:00:00 2001 From: Andy Wang Date: Sat, 7 May 2022 21:30:15 +0100 Subject: [PATCH] Use proper atomic rmw for {mutex, rwlock, cond, srwlock}_get_or_create_id --- src/data_race.rs | 29 ++++++++++------ src/shims/posix/sync.rs | 73 ++++++++++++++++++++++++++------------- src/shims/windows/sync.rs | 21 +++++++---- src/sync.rs | 21 +++++++++++ 4 files changed, 103 insertions(+), 41 deletions(-) diff --git a/src/data_race.rs b/src/data_race.rs index c4a9cacb96..902e33e7a3 100644 --- a/src/data_race.rs +++ b/src/data_race.rs @@ -431,21 +431,33 @@ impl MemoryCellClocks { /// Evaluation context extensions. impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriEvalContext<'mir, 'tcx> {} pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> { - /// Atomic variant of read_scalar_at_offset. - fn read_scalar_at_offset_atomic( + /// Calculates the MPlaceTy given the offset and layout of an access on an operand + fn offset_and_layout_to_place( &self, op: &OpTy<'tcx, Tag>, offset: u64, layout: TyAndLayout<'tcx>, - atomic: AtomicReadOp, - ) -> InterpResult<'tcx, ScalarMaybeUninit> { + ) -> InterpResult<'tcx, MPlaceTy<'tcx, Tag>> { let this = self.eval_context_ref(); let op_place = this.deref_operand(op)?; let offset = Size::from_bytes(offset); - // Ensure that the following read at an offset is within bounds. + // Ensure that the access is within bounds. assert!(op_place.layout.size >= offset + layout.size); let value_place = op_place.offset(offset, MemPlaceMeta::None, layout, this)?; + Ok(value_place) + } + + /// Atomic variant of read_scalar_at_offset. + fn read_scalar_at_offset_atomic( + &self, + op: &OpTy<'tcx, Tag>, + offset: u64, + layout: TyAndLayout<'tcx>, + atomic: AtomicReadOp, + ) -> InterpResult<'tcx, ScalarMaybeUninit> { + let this = self.eval_context_ref(); + let value_place = this.offset_and_layout_to_place(op, offset, layout)?; this.read_scalar_atomic(&value_place, atomic) } @@ -459,12 +471,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> { atomic: AtomicWriteOp, ) -> InterpResult<'tcx> { let this = self.eval_context_mut(); - let op_place = this.deref_operand(op)?; - let offset = Size::from_bytes(offset); - - // Ensure that the following read at an offset is within bounds. - assert!(op_place.layout.size >= offset + layout.size); - let value_place = op_place.offset(offset, MemPlaceMeta::None, layout, this)?; + let value_place = this.offset_and_layout_to_place(op, offset, layout)?; this.write_scalar_atomic(value.into(), &value_place, atomic) } diff --git a/src/shims/posix/sync.rs b/src/shims/posix/sync.rs index ea940df1c6..a10b6ab3a7 100644 --- a/src/shims/posix/sync.rs +++ b/src/shims/posix/sync.rs @@ -91,7 +91,7 @@ fn mutex_get_id<'mir, 'tcx: 'mir>( ecx: &MiriEvalContext<'mir, 'tcx>, mutex_op: &OpTy<'tcx, Tag>, ) -> InterpResult<'tcx, ScalarMaybeUninit> { - ecx.read_scalar_at_offset_atomic(mutex_op, 4, ecx.machine.layouts.u32, AtomicReadOp::Relaxed) + ecx.read_scalar_at_offset_atomic(mutex_op, 4, ecx.machine.layouts.u32, AtomicReadOp::Acquire) } fn mutex_set_id<'mir, 'tcx: 'mir>( @@ -104,7 +104,7 @@ fn mutex_set_id<'mir, 'tcx: 'mir>( 4, id, layout_of_maybe_uninit(ecx.tcx, ecx.tcx.types.u32), - AtomicWriteOp::Relaxed, + AtomicWriteOp::Release, ) } @@ -112,15 +112,23 @@ fn mutex_get_or_create_id<'mir, 'tcx: 'mir>( ecx: &mut MiriEvalContext<'mir, 'tcx>, mutex_op: &OpTy<'tcx, Tag>, ) -> InterpResult<'tcx, MutexId> { - let id = mutex_get_id(ecx, mutex_op)?.to_u32()?; - if id == 0 { - // 0 is a default value and also not a valid mutex id. Need to allocate - // a new mutex. + let value_place = ecx.offset_and_layout_to_place(mutex_op, 4, ecx.machine.layouts.u32)?; + let (old, success) = ecx + .atomic_compare_exchange_scalar( + &value_place, + &ImmTy::from_uint(0u32, ecx.machine.layouts.u32), + ecx.mutex_next_id().to_u32_scalar().into(), + AtomicRwOp::AcqRel, + AtomicReadOp::Acquire, + false, + )? + .to_scalar_pair()?; + + if success.to_bool().expect("compare_exchange's second return value is a bool") { let id = ecx.mutex_create(); - mutex_set_id(ecx, mutex_op, id.to_u32_scalar())?; Ok(id) } else { - Ok(MutexId::from_u32(id)) + Ok(MutexId::from_u32(old.to_u32().expect("layout is u32"))) } } @@ -135,7 +143,7 @@ fn rwlock_get_id<'mir, 'tcx: 'mir>( ecx: &MiriEvalContext<'mir, 'tcx>, rwlock_op: &OpTy<'tcx, Tag>, ) -> InterpResult<'tcx, ScalarMaybeUninit> { - ecx.read_scalar_at_offset_atomic(rwlock_op, 4, ecx.machine.layouts.u32, AtomicReadOp::Relaxed) + ecx.read_scalar_at_offset_atomic(rwlock_op, 4, ecx.machine.layouts.u32, AtomicReadOp::Acquire) } fn rwlock_set_id<'mir, 'tcx: 'mir>( @@ -148,7 +156,7 @@ fn rwlock_set_id<'mir, 'tcx: 'mir>( 4, id, layout_of_maybe_uninit(ecx.tcx, ecx.tcx.types.u32), - AtomicWriteOp::Relaxed, + AtomicWriteOp::Release, ) } @@ -156,15 +164,23 @@ fn rwlock_get_or_create_id<'mir, 'tcx: 'mir>( ecx: &mut MiriEvalContext<'mir, 'tcx>, rwlock_op: &OpTy<'tcx, Tag>, ) -> InterpResult<'tcx, RwLockId> { - let id = rwlock_get_id(ecx, rwlock_op)?.to_u32()?; - if id == 0 { - // 0 is a default value and also not a valid rwlock id. Need to allocate - // a new read-write lock. + let value_place = ecx.offset_and_layout_to_place(rwlock_op, 4, ecx.machine.layouts.u32)?; + let (old, success) = ecx + .atomic_compare_exchange_scalar( + &value_place, + &ImmTy::from_uint(0u32, ecx.machine.layouts.u32), + ecx.rwlock_next_id().to_u32_scalar().into(), + AtomicRwOp::AcqRel, + AtomicReadOp::Acquire, + false, + )? + .to_scalar_pair()?; + + if success.to_bool().expect("compare_exchange's second return value is a bool") { let id = ecx.rwlock_create(); - rwlock_set_id(ecx, rwlock_op, id.to_u32_scalar())?; Ok(id) } else { - Ok(RwLockId::from_u32(id)) + Ok(RwLockId::from_u32(old.to_u32().expect("layout is u32"))) } } @@ -207,7 +223,7 @@ fn cond_get_id<'mir, 'tcx: 'mir>( ecx: &MiriEvalContext<'mir, 'tcx>, cond_op: &OpTy<'tcx, Tag>, ) -> InterpResult<'tcx, ScalarMaybeUninit> { - ecx.read_scalar_at_offset_atomic(cond_op, 4, ecx.machine.layouts.u32, AtomicReadOp::Relaxed) + ecx.read_scalar_at_offset_atomic(cond_op, 4, ecx.machine.layouts.u32, AtomicReadOp::Acquire) } fn cond_set_id<'mir, 'tcx: 'mir>( @@ -220,7 +236,7 @@ fn cond_set_id<'mir, 'tcx: 'mir>( 4, id, layout_of_maybe_uninit(ecx.tcx, ecx.tcx.types.u32), - AtomicWriteOp::Relaxed, + AtomicWriteOp::Release, ) } @@ -228,15 +244,24 @@ fn cond_get_or_create_id<'mir, 'tcx: 'mir>( ecx: &mut MiriEvalContext<'mir, 'tcx>, cond_op: &OpTy<'tcx, Tag>, ) -> InterpResult<'tcx, CondvarId> { - let id = cond_get_id(ecx, cond_op)?.to_u32()?; - if id == 0 { - // 0 is a default value and also not a valid conditional variable id. - // Need to allocate a new id. + let value_place = ecx.offset_and_layout_to_place(cond_op, 4, ecx.machine.layouts.u32)?; + + let (old, success) = ecx + .atomic_compare_exchange_scalar( + &value_place, + &ImmTy::from_uint(0u32, ecx.machine.layouts.u32), + ecx.condvar_next_id().to_u32_scalar().into(), + AtomicRwOp::AcqRel, + AtomicReadOp::Acquire, + false, + )? + .to_scalar_pair()?; + + if success.to_bool().expect("compare_exchange's second return value is a bool") { let id = ecx.condvar_create(); - cond_set_id(ecx, cond_op, id.to_u32_scalar())?; Ok(id) } else { - Ok(CondvarId::from_u32(id)) + Ok(CondvarId::from_u32(old.to_u32().expect("layout is u32"))) } } diff --git a/src/shims/windows/sync.rs b/src/shims/windows/sync.rs index 78458dc6c9..ccd65aac90 100644 --- a/src/shims/windows/sync.rs +++ b/src/shims/windows/sync.rs @@ -7,15 +7,24 @@ fn srwlock_get_or_create_id<'mir, 'tcx: 'mir>( ecx: &mut MiriEvalContext<'mir, 'tcx>, lock_op: &OpTy<'tcx, Tag>, ) -> InterpResult<'tcx, RwLockId> { - let id = ecx.read_scalar_at_offset(lock_op, 0, ecx.machine.layouts.u32)?.to_u32()?; - if id == 0 { - // 0 is a default value and also not a valid rwlock id. Need to allocate - // a new rwlock. + let value_place = ecx.offset_and_layout_to_place(lock_op, 0, ecx.machine.layouts.u32)?; + + let (old, success) = ecx + .atomic_compare_exchange_scalar( + &value_place, + &ImmTy::from_uint(0u32, ecx.machine.layouts.u32), + ecx.rwlock_next_id().to_u32_scalar().into(), + AtomicRwOp::AcqRel, + AtomicReadOp::Acquire, + false, + )? + .to_scalar_pair()?; + + if success.to_bool().expect("compare_exchange's second return value is a bool") { let id = ecx.rwlock_create(); - ecx.write_scalar_at_offset(lock_op, 0, id.to_u32_scalar(), ecx.machine.layouts.u32)?; Ok(id) } else { - Ok(RwLockId::from_u32(id)) + Ok(RwLockId::from_u32(old.to_u32().expect("layout is u32"))) } } diff --git a/src/sync.rs b/src/sync.rs index ac1687a22e..8c5b8ebfec 100644 --- a/src/sync.rs +++ b/src/sync.rs @@ -208,6 +208,13 @@ trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx // situations. impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {} pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> { + #[inline] + /// Peek the id of the next mutex + fn mutex_next_id(&self) -> MutexId { + let this = self.eval_context_ref(); + this.machine.threads.sync.mutexes.next_index() + } + #[inline] /// Create state for a new mutex. fn mutex_create(&mut self) -> MutexId { @@ -290,6 +297,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx this.block_thread(thread); } + #[inline] + /// Peek the id of the next read write lock + fn rwlock_next_id(&self) -> RwLockId { + let this = self.eval_context_ref(); + this.machine.threads.sync.rwlocks.next_index() + } + #[inline] /// Create state for a new read write lock. fn rwlock_create(&mut self) -> RwLockId { @@ -438,6 +452,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx this.block_thread(writer); } + #[inline] + /// Peek the id of the next Condvar + fn condvar_next_id(&self) -> CondvarId { + let this = self.eval_context_ref(); + this.machine.threads.sync.condvars.next_index() + } + #[inline] /// Create state for a new conditional variable. fn condvar_create(&mut self) -> CondvarId {