Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit edaf7d1

Browse files
committedMay 7, 2022
Use proper atomic rmw for {mutex, rwlock, cond, srwlock}_get_or_create_id
1 parent 92fc4a5 commit edaf7d1

File tree

4 files changed

+103
-41
lines changed

4 files changed

+103
-41
lines changed
 

‎src/data_race.rs

+18-11
Original file line numberDiff line numberDiff line change
@@ -431,21 +431,33 @@ impl MemoryCellClocks {
431431
/// Evaluation context extensions.
432432
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriEvalContext<'mir, 'tcx> {}
433433
pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
434-
/// Atomic variant of read_scalar_at_offset.
435-
fn read_scalar_at_offset_atomic(
434+
/// Calculates the MPlaceTy given the offset and layout of an access on an operand
435+
fn offset_and_layout_to_place(
436436
&self,
437437
op: &OpTy<'tcx, Tag>,
438438
offset: u64,
439439
layout: TyAndLayout<'tcx>,
440-
atomic: AtomicReadOp,
441-
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
440+
) -> InterpResult<'tcx, MPlaceTy<'tcx, Tag>> {
442441
let this = self.eval_context_ref();
443442
let op_place = this.deref_operand(op)?;
444443
let offset = Size::from_bytes(offset);
445444

446-
// Ensure that the following read at an offset is within bounds.
445+
// Ensure that the access is within bounds.
447446
assert!(op_place.layout.size >= offset + layout.size);
448447
let value_place = op_place.offset(offset, MemPlaceMeta::None, layout, this)?;
448+
Ok(value_place)
449+
}
450+
451+
/// Atomic variant of read_scalar_at_offset.
452+
fn read_scalar_at_offset_atomic(
453+
&self,
454+
op: &OpTy<'tcx, Tag>,
455+
offset: u64,
456+
layout: TyAndLayout<'tcx>,
457+
atomic: AtomicReadOp,
458+
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
459+
let this = self.eval_context_ref();
460+
let value_place = this.offset_and_layout_to_place(op, offset, layout)?;
449461
this.read_scalar_atomic(&value_place, atomic)
450462
}
451463

@@ -459,12 +471,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
459471
atomic: AtomicWriteOp,
460472
) -> InterpResult<'tcx> {
461473
let this = self.eval_context_mut();
462-
let op_place = this.deref_operand(op)?;
463-
let offset = Size::from_bytes(offset);
464-
465-
// Ensure that the following read at an offset is within bounds.
466-
assert!(op_place.layout.size >= offset + layout.size);
467-
let value_place = op_place.offset(offset, MemPlaceMeta::None, layout, this)?;
474+
let value_place = this.offset_and_layout_to_place(op, offset, layout)?;
468475
this.write_scalar_atomic(value.into(), &value_place, atomic)
469476
}
470477

‎src/shims/posix/sync.rs

+49-24
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,7 @@ fn mutex_get_id<'mir, 'tcx: 'mir>(
9191
ecx: &MiriEvalContext<'mir, 'tcx>,
9292
mutex_op: &OpTy<'tcx, Tag>,
9393
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
94-
ecx.read_scalar_at_offset_atomic(mutex_op, 4, ecx.machine.layouts.u32, AtomicReadOp::Relaxed)
94+
ecx.read_scalar_at_offset_atomic(mutex_op, 4, ecx.machine.layouts.u32, AtomicReadOp::Acquire)
9595
}
9696

9797
fn mutex_set_id<'mir, 'tcx: 'mir>(
@@ -104,23 +104,31 @@ fn mutex_set_id<'mir, 'tcx: 'mir>(
104104
4,
105105
id,
106106
layout_of_maybe_uninit(ecx.tcx, ecx.tcx.types.u32),
107-
AtomicWriteOp::Relaxed,
107+
AtomicWriteOp::Release,
108108
)
109109
}
110110

111111
fn mutex_get_or_create_id<'mir, 'tcx: 'mir>(
112112
ecx: &mut MiriEvalContext<'mir, 'tcx>,
113113
mutex_op: &OpTy<'tcx, Tag>,
114114
) -> InterpResult<'tcx, MutexId> {
115-
let id = mutex_get_id(ecx, mutex_op)?.to_u32()?;
116-
if id == 0 {
117-
// 0 is a default value and also not a valid mutex id. Need to allocate
118-
// a new mutex.
115+
let value_place = ecx.offset_and_layout_to_place(mutex_op, 4, ecx.machine.layouts.u32)?;
116+
let (old, success) = ecx
117+
.atomic_compare_exchange_scalar(
118+
&value_place,
119+
&ImmTy::from_uint(0u32, ecx.machine.layouts.u32),
120+
ecx.mutex_next_id().to_u32_scalar().into(),
121+
AtomicRwOp::AcqRel,
122+
AtomicReadOp::Acquire,
123+
false,
124+
)?
125+
.to_scalar_pair()?;
126+
127+
if success.to_bool().expect("compare_exchange's second return value is a bool") {
119128
let id = ecx.mutex_create();
120-
mutex_set_id(ecx, mutex_op, id.to_u32_scalar())?;
121129
Ok(id)
122130
} else {
123-
Ok(MutexId::from_u32(id))
131+
Ok(MutexId::from_u32(old.to_u32().expect("layout is u32")))
124132
}
125133
}
126134

@@ -135,7 +143,7 @@ fn rwlock_get_id<'mir, 'tcx: 'mir>(
135143
ecx: &MiriEvalContext<'mir, 'tcx>,
136144
rwlock_op: &OpTy<'tcx, Tag>,
137145
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
138-
ecx.read_scalar_at_offset_atomic(rwlock_op, 4, ecx.machine.layouts.u32, AtomicReadOp::Relaxed)
146+
ecx.read_scalar_at_offset_atomic(rwlock_op, 4, ecx.machine.layouts.u32, AtomicReadOp::Acquire)
139147
}
140148

141149
fn rwlock_set_id<'mir, 'tcx: 'mir>(
@@ -148,23 +156,31 @@ fn rwlock_set_id<'mir, 'tcx: 'mir>(
148156
4,
149157
id,
150158
layout_of_maybe_uninit(ecx.tcx, ecx.tcx.types.u32),
151-
AtomicWriteOp::Relaxed,
159+
AtomicWriteOp::Release,
152160
)
153161
}
154162

155163
fn rwlock_get_or_create_id<'mir, 'tcx: 'mir>(
156164
ecx: &mut MiriEvalContext<'mir, 'tcx>,
157165
rwlock_op: &OpTy<'tcx, Tag>,
158166
) -> InterpResult<'tcx, RwLockId> {
159-
let id = rwlock_get_id(ecx, rwlock_op)?.to_u32()?;
160-
if id == 0 {
161-
// 0 is a default value and also not a valid rwlock id. Need to allocate
162-
// a new read-write lock.
167+
let value_place = ecx.offset_and_layout_to_place(rwlock_op, 4, ecx.machine.layouts.u32)?;
168+
let (old, success) = ecx
169+
.atomic_compare_exchange_scalar(
170+
&value_place,
171+
&ImmTy::from_uint(0u32, ecx.machine.layouts.u32),
172+
ecx.rwlock_next_id().to_u32_scalar().into(),
173+
AtomicRwOp::AcqRel,
174+
AtomicReadOp::Acquire,
175+
false,
176+
)?
177+
.to_scalar_pair()?;
178+
179+
if success.to_bool().expect("compare_exchange's second return value is a bool") {
163180
let id = ecx.rwlock_create();
164-
rwlock_set_id(ecx, rwlock_op, id.to_u32_scalar())?;
165181
Ok(id)
166182
} else {
167-
Ok(RwLockId::from_u32(id))
183+
Ok(RwLockId::from_u32(old.to_u32().expect("layout is u32")))
168184
}
169185
}
170186

@@ -207,7 +223,7 @@ fn cond_get_id<'mir, 'tcx: 'mir>(
207223
ecx: &MiriEvalContext<'mir, 'tcx>,
208224
cond_op: &OpTy<'tcx, Tag>,
209225
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
210-
ecx.read_scalar_at_offset_atomic(cond_op, 4, ecx.machine.layouts.u32, AtomicReadOp::Relaxed)
226+
ecx.read_scalar_at_offset_atomic(cond_op, 4, ecx.machine.layouts.u32, AtomicReadOp::Acquire)
211227
}
212228

213229
fn cond_set_id<'mir, 'tcx: 'mir>(
@@ -220,23 +236,32 @@ fn cond_set_id<'mir, 'tcx: 'mir>(
220236
4,
221237
id,
222238
layout_of_maybe_uninit(ecx.tcx, ecx.tcx.types.u32),
223-
AtomicWriteOp::Relaxed,
239+
AtomicWriteOp::Release,
224240
)
225241
}
226242

227243
fn cond_get_or_create_id<'mir, 'tcx: 'mir>(
228244
ecx: &mut MiriEvalContext<'mir, 'tcx>,
229245
cond_op: &OpTy<'tcx, Tag>,
230246
) -> InterpResult<'tcx, CondvarId> {
231-
let id = cond_get_id(ecx, cond_op)?.to_u32()?;
232-
if id == 0 {
233-
// 0 is a default value and also not a valid conditional variable id.
234-
// Need to allocate a new id.
247+
let value_place = ecx.offset_and_layout_to_place(cond_op, 4, ecx.machine.layouts.u32)?;
248+
249+
let (old, success) = ecx
250+
.atomic_compare_exchange_scalar(
251+
&value_place,
252+
&ImmTy::from_uint(0u32, ecx.machine.layouts.u32),
253+
ecx.condvar_next_id().to_u32_scalar().into(),
254+
AtomicRwOp::AcqRel,
255+
AtomicReadOp::Acquire,
256+
false,
257+
)?
258+
.to_scalar_pair()?;
259+
260+
if success.to_bool().expect("compare_exchange's second return value is a bool") {
235261
let id = ecx.condvar_create();
236-
cond_set_id(ecx, cond_op, id.to_u32_scalar())?;
237262
Ok(id)
238263
} else {
239-
Ok(CondvarId::from_u32(id))
264+
Ok(CondvarId::from_u32(old.to_u32().expect("layout is u32")))
240265
}
241266
}
242267

‎src/shims/windows/sync.rs

+15-6
Original file line numberDiff line numberDiff line change
@@ -7,15 +7,24 @@ fn srwlock_get_or_create_id<'mir, 'tcx: 'mir>(
77
ecx: &mut MiriEvalContext<'mir, 'tcx>,
88
lock_op: &OpTy<'tcx, Tag>,
99
) -> InterpResult<'tcx, RwLockId> {
10-
let id = ecx.read_scalar_at_offset(lock_op, 0, ecx.machine.layouts.u32)?.to_u32()?;
11-
if id == 0 {
12-
// 0 is a default value and also not a valid rwlock id. Need to allocate
13-
// a new rwlock.
10+
let value_place = ecx.offset_and_layout_to_place(lock_op, 0, ecx.machine.layouts.u32)?;
11+
12+
let (old, success) = ecx
13+
.atomic_compare_exchange_scalar(
14+
&value_place,
15+
&ImmTy::from_uint(0u32, ecx.machine.layouts.u32),
16+
ecx.rwlock_next_id().to_u32_scalar().into(),
17+
AtomicRwOp::AcqRel,
18+
AtomicReadOp::Acquire,
19+
false,
20+
)?
21+
.to_scalar_pair()?;
22+
23+
if success.to_bool().expect("compare_exchange's second return value is a bool") {
1424
let id = ecx.rwlock_create();
15-
ecx.write_scalar_at_offset(lock_op, 0, id.to_u32_scalar(), ecx.machine.layouts.u32)?;
1625
Ok(id)
1726
} else {
18-
Ok(RwLockId::from_u32(id))
27+
Ok(RwLockId::from_u32(old.to_u32().expect("layout is u32")))
1928
}
2029
}
2130

‎src/sync.rs

+21
Original file line numberDiff line numberDiff line change
@@ -208,6 +208,13 @@ trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
208208
// situations.
209209
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
210210
pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
211+
#[inline]
212+
/// Peek the id of the next mutex
213+
fn mutex_next_id(&self) -> MutexId {
214+
let this = self.eval_context_ref();
215+
this.machine.threads.sync.mutexes.next_index()
216+
}
217+
211218
#[inline]
212219
/// Create state for a new mutex.
213220
fn mutex_create(&mut self) -> MutexId {
@@ -290,6 +297,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
290297
this.block_thread(thread);
291298
}
292299

300+
#[inline]
301+
/// Peek the id of the next read write lock
302+
fn rwlock_next_id(&self) -> RwLockId {
303+
let this = self.eval_context_ref();
304+
this.machine.threads.sync.rwlocks.next_index()
305+
}
306+
293307
#[inline]
294308
/// Create state for a new read write lock.
295309
fn rwlock_create(&mut self) -> RwLockId {
@@ -438,6 +452,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
438452
this.block_thread(writer);
439453
}
440454

455+
#[inline]
456+
/// Peek the id of the next Condvar
457+
fn condvar_next_id(&self) -> CondvarId {
458+
let this = self.eval_context_ref();
459+
this.machine.threads.sync.condvars.next_index()
460+
}
461+
441462
#[inline]
442463
/// Create state for a new conditional variable.
443464
fn condvar_create(&mut self) -> CondvarId {

0 commit comments

Comments
 (0)
Please sign in to comment.