Skip to content

Commit

Permalink
Use proper atomic rmw for {mutex, rwlock, cond, srwlock}_get_or_creat…
Browse files Browse the repository at this point in the history
…e_id
  • Loading branch information
cbeuw committed May 10, 2022
1 parent f04f3e8 commit 0b9c8b0
Show file tree
Hide file tree
Showing 4 changed files with 103 additions and 41 deletions.
29 changes: 18 additions & 11 deletions src/data_race.rs
Original file line number Diff line number Diff line change
Expand Up @@ -431,21 +431,33 @@ impl MemoryCellClocks {
/// Evaluation context extensions.
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriEvalContext<'mir, 'tcx> {}
pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
/// Atomic variant of read_scalar_at_offset.
fn read_scalar_at_offset_atomic(
/// Calculates the MPlaceTy given the offset and layout of an access on an operand
fn offset_and_layout_to_place(
&self,
op: &OpTy<'tcx, Tag>,
offset: u64,
layout: TyAndLayout<'tcx>,
atomic: AtomicReadOp,
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
) -> InterpResult<'tcx, MPlaceTy<'tcx, Tag>> {
let this = self.eval_context_ref();
let op_place = this.deref_operand(op)?;
let offset = Size::from_bytes(offset);

// Ensure that the following read at an offset is within bounds.
// Ensure that the access is within bounds.
assert!(op_place.layout.size >= offset + layout.size);
let value_place = op_place.offset(offset, MemPlaceMeta::None, layout, this)?;
Ok(value_place)
}

/// Atomic variant of read_scalar_at_offset.
fn read_scalar_at_offset_atomic(
&self,
op: &OpTy<'tcx, Tag>,
offset: u64,
layout: TyAndLayout<'tcx>,
atomic: AtomicReadOp,
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
let this = self.eval_context_ref();
let value_place = this.offset_and_layout_to_place(op, offset, layout)?;
this.read_scalar_atomic(&value_place, atomic)
}

Expand All @@ -459,12 +471,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
atomic: AtomicWriteOp,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let op_place = this.deref_operand(op)?;
let offset = Size::from_bytes(offset);

// Ensure that the following read at an offset is within bounds.
assert!(op_place.layout.size >= offset + layout.size);
let value_place = op_place.offset(offset, MemPlaceMeta::None, layout, this)?;
let value_place = this.offset_and_layout_to_place(op, offset, layout)?;
this.write_scalar_atomic(value.into(), &value_place, atomic)
}

Expand Down
73 changes: 49 additions & 24 deletions src/shims/posix/sync.rs
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ fn mutex_get_id<'mir, 'tcx: 'mir>(
ecx: &MiriEvalContext<'mir, 'tcx>,
mutex_op: &OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
ecx.read_scalar_at_offset_atomic(mutex_op, 4, ecx.machine.layouts.u32, AtomicReadOp::Relaxed)
ecx.read_scalar_at_offset_atomic(mutex_op, 4, ecx.machine.layouts.u32, AtomicReadOp::Acquire)
}

fn mutex_set_id<'mir, 'tcx: 'mir>(
Expand All @@ -104,23 +104,31 @@ fn mutex_set_id<'mir, 'tcx: 'mir>(
4,
id,
layout_of_maybe_uninit(ecx.tcx, ecx.tcx.types.u32),
AtomicWriteOp::Relaxed,
AtomicWriteOp::Release,
)
}

fn mutex_get_or_create_id<'mir, 'tcx: 'mir>(
ecx: &mut MiriEvalContext<'mir, 'tcx>,
mutex_op: &OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, MutexId> {
let id = mutex_get_id(ecx, mutex_op)?.to_u32()?;
if id == 0 {
// 0 is a default value and also not a valid mutex id. Need to allocate
// a new mutex.
let value_place = ecx.offset_and_layout_to_place(mutex_op, 4, ecx.machine.layouts.u32)?;
let (old, success) = ecx
.atomic_compare_exchange_scalar(
&value_place,
&ImmTy::from_uint(0u32, ecx.machine.layouts.u32),
ecx.mutex_next_id().to_u32_scalar().into(),
AtomicRwOp::AcqRel,
AtomicReadOp::Acquire,
false,
)?
.to_scalar_pair()?;

if success.to_bool().expect("compare_exchange's second return value is a bool") {
let id = ecx.mutex_create();
mutex_set_id(ecx, mutex_op, id.to_u32_scalar())?;
Ok(id)
} else {
Ok(MutexId::from_u32(id))
Ok(MutexId::from_u32(old.to_u32().expect("layout is u32")))
}
}

Expand All @@ -135,7 +143,7 @@ fn rwlock_get_id<'mir, 'tcx: 'mir>(
ecx: &MiriEvalContext<'mir, 'tcx>,
rwlock_op: &OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
ecx.read_scalar_at_offset_atomic(rwlock_op, 4, ecx.machine.layouts.u32, AtomicReadOp::Relaxed)
ecx.read_scalar_at_offset_atomic(rwlock_op, 4, ecx.machine.layouts.u32, AtomicReadOp::Acquire)
}

fn rwlock_set_id<'mir, 'tcx: 'mir>(
Expand All @@ -148,23 +156,31 @@ fn rwlock_set_id<'mir, 'tcx: 'mir>(
4,
id,
layout_of_maybe_uninit(ecx.tcx, ecx.tcx.types.u32),
AtomicWriteOp::Relaxed,
AtomicWriteOp::Release,
)
}

fn rwlock_get_or_create_id<'mir, 'tcx: 'mir>(
ecx: &mut MiriEvalContext<'mir, 'tcx>,
rwlock_op: &OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, RwLockId> {
let id = rwlock_get_id(ecx, rwlock_op)?.to_u32()?;
if id == 0 {
// 0 is a default value and also not a valid rwlock id. Need to allocate
// a new read-write lock.
let value_place = ecx.offset_and_layout_to_place(rwlock_op, 4, ecx.machine.layouts.u32)?;
let (old, success) = ecx
.atomic_compare_exchange_scalar(
&value_place,
&ImmTy::from_uint(0u32, ecx.machine.layouts.u32),
ecx.rwlock_next_id().to_u32_scalar().into(),
AtomicRwOp::AcqRel,
AtomicReadOp::Acquire,
false,
)?
.to_scalar_pair()?;

if success.to_bool().expect("compare_exchange's second return value is a bool") {
let id = ecx.rwlock_create();
rwlock_set_id(ecx, rwlock_op, id.to_u32_scalar())?;
Ok(id)
} else {
Ok(RwLockId::from_u32(id))
Ok(RwLockId::from_u32(old.to_u32().expect("layout is u32")))
}
}

Expand Down Expand Up @@ -207,7 +223,7 @@ fn cond_get_id<'mir, 'tcx: 'mir>(
ecx: &MiriEvalContext<'mir, 'tcx>,
cond_op: &OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
ecx.read_scalar_at_offset_atomic(cond_op, 4, ecx.machine.layouts.u32, AtomicReadOp::Relaxed)
ecx.read_scalar_at_offset_atomic(cond_op, 4, ecx.machine.layouts.u32, AtomicReadOp::Acquire)
}

fn cond_set_id<'mir, 'tcx: 'mir>(
Expand All @@ -220,23 +236,32 @@ fn cond_set_id<'mir, 'tcx: 'mir>(
4,
id,
layout_of_maybe_uninit(ecx.tcx, ecx.tcx.types.u32),
AtomicWriteOp::Relaxed,
AtomicWriteOp::Release,
)
}

fn cond_get_or_create_id<'mir, 'tcx: 'mir>(
ecx: &mut MiriEvalContext<'mir, 'tcx>,
cond_op: &OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, CondvarId> {
let id = cond_get_id(ecx, cond_op)?.to_u32()?;
if id == 0 {
// 0 is a default value and also not a valid conditional variable id.
// Need to allocate a new id.
let value_place = ecx.offset_and_layout_to_place(cond_op, 4, ecx.machine.layouts.u32)?;

let (old, success) = ecx
.atomic_compare_exchange_scalar(
&value_place,
&ImmTy::from_uint(0u32, ecx.machine.layouts.u32),
ecx.condvar_next_id().to_u32_scalar().into(),
AtomicRwOp::AcqRel,
AtomicReadOp::Acquire,
false,
)?
.to_scalar_pair()?;

if success.to_bool().expect("compare_exchange's second return value is a bool") {
let id = ecx.condvar_create();
cond_set_id(ecx, cond_op, id.to_u32_scalar())?;
Ok(id)
} else {
Ok(CondvarId::from_u32(id))
Ok(CondvarId::from_u32(old.to_u32().expect("layout is u32")))
}
}

Expand Down
21 changes: 15 additions & 6 deletions src/shims/windows/sync.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,15 +7,24 @@ fn srwlock_get_or_create_id<'mir, 'tcx: 'mir>(
ecx: &mut MiriEvalContext<'mir, 'tcx>,
lock_op: &OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, RwLockId> {
let id = ecx.read_scalar_at_offset(lock_op, 0, ecx.machine.layouts.u32)?.to_u32()?;
if id == 0 {
// 0 is a default value and also not a valid rwlock id. Need to allocate
// a new rwlock.
let value_place = ecx.offset_and_layout_to_place(lock_op, 0, ecx.machine.layouts.u32)?;

let (old, success) = ecx
.atomic_compare_exchange_scalar(
&value_place,
&ImmTy::from_uint(0u32, ecx.machine.layouts.u32),
ecx.rwlock_next_id().to_u32_scalar().into(),
AtomicRwOp::AcqRel,
AtomicReadOp::Acquire,
false,
)?
.to_scalar_pair()?;

if success.to_bool().expect("compare_exchange's second return value is a bool") {
let id = ecx.rwlock_create();
ecx.write_scalar_at_offset(lock_op, 0, id.to_u32_scalar(), ecx.machine.layouts.u32)?;
Ok(id)
} else {
Ok(RwLockId::from_u32(id))
Ok(RwLockId::from_u32(old.to_u32().expect("layout is u32")))
}
}

Expand Down
21 changes: 21 additions & 0 deletions src/sync.rs
Original file line number Diff line number Diff line change
Expand Up @@ -208,6 +208,13 @@ trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
// situations.
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
#[inline]
/// Peek the id of the next mutex
fn mutex_next_id(&self) -> MutexId {
let this = self.eval_context_ref();
this.machine.threads.sync.mutexes.next_index()
}

#[inline]
/// Create state for a new mutex.
fn mutex_create(&mut self) -> MutexId {
Expand Down Expand Up @@ -290,6 +297,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
this.block_thread(thread);
}

#[inline]
/// Peek the id of the next read write lock
fn rwlock_next_id(&self) -> RwLockId {
let this = self.eval_context_ref();
this.machine.threads.sync.rwlocks.next_index()
}

#[inline]
/// Create state for a new read write lock.
fn rwlock_create(&mut self) -> RwLockId {
Expand Down Expand Up @@ -438,6 +452,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
this.block_thread(writer);
}

#[inline]
/// Peek the id of the next Condvar
fn condvar_next_id(&self) -> CondvarId {
let this = self.eval_context_ref();
this.machine.threads.sync.condvars.next_index()
}

#[inline]
/// Create state for a new conditional variable.
fn condvar_create(&mut self) -> CondvarId {
Expand Down

0 comments on commit 0b9c8b0

Please sign in to comment.