Skip to content

Commit 0a32bfd

Browse files
committed
weak memory: fix non-atomic read clearing store buffer
1 parent fa8b965 commit 0a32bfd

File tree

8 files changed

+84
-45
lines changed

8 files changed

+84
-45
lines changed

src/borrow_tracker/stacked_borrows/mod.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -749,7 +749,7 @@ trait EvalContextPrivExt<'tcx, 'ecx>: crate::MiriInterpCxExt<'tcx> {
749749
// Make sure the data race model also knows about this.
750750
// FIXME(genmc): Ensure this is still done in GenMC mode. Check for other places where GenMC may need to be informed.
751751
if let Some(data_race) = alloc_extra.data_race.as_vclocks_mut() {
752-
data_race.write(
752+
data_race.write_non_atomic(
753753
alloc_id,
754754
range,
755755
NaWriteType::Retag,
@@ -798,7 +798,7 @@ trait EvalContextPrivExt<'tcx, 'ecx>: crate::MiriInterpCxExt<'tcx> {
798798
assert_eq!(access, AccessKind::Read);
799799
// Make sure the data race model also knows about this.
800800
if let Some(data_race) = alloc_extra.data_race.as_vclocks_ref() {
801-
data_race.read(
801+
data_race.read_non_atomic(
802802
alloc_id,
803803
range,
804804
NaReadType::Retag,

src/borrow_tracker/tree_borrows/mod.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -366,7 +366,7 @@ trait EvalContextPrivExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
366366
// Also inform the data race model (but only if any bytes are actually affected).
367367
if range_in_alloc.size.bytes() > 0 {
368368
if let Some(data_race) = alloc_extra.data_race.as_vclocks_ref() {
369-
data_race.read(
369+
data_race.read_non_atomic(
370370
alloc_id,
371371
range_in_alloc,
372372
NaReadType::Retag,

src/concurrency/data_race.rs

Lines changed: 26 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -648,18 +648,17 @@ impl MemoryCellClocks {
648648
thread_clocks.clock.index_mut(index).span = current_span;
649649
}
650650
thread_clocks.clock.index_mut(index).set_read_type(read_type);
651-
if self.write_was_before(&thread_clocks.clock) {
652-
// We must be ordered-after all atomic writes.
653-
let race_free = if let Some(atomic) = self.atomic() {
654-
atomic.write_vector <= thread_clocks.clock
655-
} else {
656-
true
657-
};
658-
self.read.set_at_index(&thread_clocks.clock, index);
659-
if race_free { Ok(()) } else { Err(DataRace) }
660-
} else {
661-
Err(DataRace)
651+
// Check synchronization with non-atomic writes.
652+
if !self.write_was_before(&thread_clocks.clock) {
653+
return Err(DataRace);
662654
}
655+
// Check synchronization with atomic writes.
656+
if !self.atomic().is_none_or(|atomic| atomic.write_vector <= thread_clocks.clock) {
657+
return Err(DataRace);
658+
}
659+
// Record this access.
660+
self.read.set_at_index(&thread_clocks.clock, index);
661+
Ok(())
663662
}
664663

665664
/// Detect races for non-atomic write operations at the current memory cell
@@ -675,24 +674,21 @@ impl MemoryCellClocks {
675674
if !current_span.is_dummy() {
676675
thread_clocks.clock.index_mut(index).span = current_span;
677676
}
678-
if self.write_was_before(&thread_clocks.clock) && self.read <= thread_clocks.clock {
679-
let race_free = if let Some(atomic) = self.atomic() {
680-
atomic.write_vector <= thread_clocks.clock
681-
&& atomic.read_vector <= thread_clocks.clock
682-
} else {
683-
true
684-
};
685-
self.write = (index, thread_clocks.clock[index]);
686-
self.write_type = write_type;
687-
if race_free {
688-
self.read.set_zero_vector();
689-
Ok(())
690-
} else {
691-
Err(DataRace)
692-
}
693-
} else {
694-
Err(DataRace)
677+
// Check synchronization with non-atomic accesses.
678+
if !(self.write_was_before(&thread_clocks.clock) && self.read <= thread_clocks.clock) {
679+
return Err(DataRace);
695680
}
681+
// Check synchronization with atomic accesses.
682+
if !self.atomic().is_none_or(|atomic| {
683+
atomic.write_vector <= thread_clocks.clock && atomic.read_vector <= thread_clocks.clock
684+
}) {
685+
return Err(DataRace);
686+
}
687+
// Record this access.
688+
self.write = (index, thread_clocks.clock[index]);
689+
self.write_type = write_type;
690+
self.read.set_zero_vector();
691+
Ok(())
696692
}
697693
}
698694

@@ -1201,7 +1197,7 @@ impl VClockAlloc {
12011197
/// operation for which data-race detection is handled separately, for example
12021198
/// atomic read operations. The `ty` parameter is used for diagnostics, letting
12031199
/// the user know which type was read.
1204-
pub fn read<'tcx>(
1200+
pub fn read_non_atomic<'tcx>(
12051201
&self,
12061202
alloc_id: AllocId,
12071203
access_range: AllocRange,
@@ -1243,7 +1239,7 @@ impl VClockAlloc {
12431239
/// being created or if it is temporarily disabled during a racy read or write
12441240
/// operation. The `ty` parameter is used for diagnostics, letting
12451241
/// the user know which type was written.
1246-
pub fn write<'tcx>(
1242+
pub fn write_non_atomic<'tcx>(
12471243
&mut self,
12481244
alloc_id: AllocId,
12491245
access_range: AllocRange,

src/concurrency/weak_memory.rs

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -177,11 +177,11 @@ impl StoreBufferAlloc {
177177
Self { store_buffers: RefCell::new(RangeObjectMap::new()) }
178178
}
179179

180-
/// When a non-atomic access happens on a location that has been atomically accessed
181-
/// before without data race, we can determine that the non-atomic access fully happens
180+
/// When a non-atomic write happens on a location that has been atomically accessed
181+
/// before without data race, we can determine that the non-atomic write fully happens
182182
/// after all the prior atomic writes so the location no longer needs to exhibit
183-
/// any weak memory behaviours until further atomic accesses.
184-
pub fn memory_accessed(&self, range: AllocRange, global: &DataRaceState) {
183+
/// any weak memory behaviours until further atomic writes.
184+
pub fn non_atomic_write(&self, range: AllocRange, global: &DataRaceState) {
185185
if !global.ongoing_action_data_race_free() {
186186
let mut buffers = self.store_buffers.borrow_mut();
187187
let access_type = buffers.access_type(range);

src/machine.rs

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1533,14 +1533,11 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
15331533
genmc_ctx.memory_load(machine, ptr.addr(), range.size)?,
15341534
GlobalDataRaceHandler::Vclocks(_data_race) => {
15351535
let _trace = enter_trace_span!(data_race::before_memory_read);
1536-
let AllocDataRaceHandler::Vclocks(data_race, weak_memory) = &alloc_extra.data_race
1536+
let AllocDataRaceHandler::Vclocks(data_race, _weak_memory) = &alloc_extra.data_race
15371537
else {
15381538
unreachable!();
15391539
};
1540-
data_race.read(alloc_id, range, NaReadType::Read, None, machine)?;
1541-
if let Some(weak_memory) = weak_memory {
1542-
weak_memory.memory_accessed(range, machine.data_race.as_vclocks_ref().unwrap());
1543-
}
1540+
data_race.read_non_atomic(alloc_id, range, NaReadType::Read, None, machine)?;
15441541
}
15451542
}
15461543
if let Some(borrow_tracker) = &alloc_extra.borrow_tracker {
@@ -1573,9 +1570,10 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
15731570
else {
15741571
unreachable!()
15751572
};
1576-
data_race.write(alloc_id, range, NaWriteType::Write, None, machine)?;
1573+
data_race.write_non_atomic(alloc_id, range, NaWriteType::Write, None, machine)?;
15771574
if let Some(weak_memory) = weak_memory {
1578-
weak_memory.memory_accessed(range, machine.data_race.as_vclocks_ref().unwrap());
1575+
weak_memory
1576+
.non_atomic_write(range, machine.data_race.as_vclocks_ref().unwrap());
15791577
}
15801578
}
15811579
}
@@ -1606,7 +1604,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
16061604
GlobalDataRaceHandler::Vclocks(_global_state) => {
16071605
let _trace = enter_trace_span!(data_race::before_memory_deallocation);
16081606
let data_race = alloc_extra.data_race.as_vclocks_mut().unwrap();
1609-
data_race.write(
1607+
data_race.write_non_atomic(
16101608
alloc_id,
16111609
alloc_range(Size::ZERO, size),
16121610
NaWriteType::Deallocate,

tests/pass/0weak_memory/weak.rs

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -92,6 +92,9 @@ fn initialization_write(add_fence: bool) {
9292

9393
let j1 = spawn(move || {
9494
x.store(22, Relaxed);
95+
// Since nobody else writes to `x`, we can non-atomically read it.
96+
// (This tests that we do not delete the store buffer here.)
97+
unsafe { std::ptr::from_ref(x).read() };
9598
// Relaxed is intentional. We want to test if the thread 2 reads the initialisation write
9699
// after a relaxed write
97100
wait.store(1, Relaxed);
File renamed without changes.
Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
//! This reproduces #4655 every single time
2+
//@ compile-flags: -Zmiri-fixed-schedule -Zmiri-disable-stacked-borrows
3+
use std::sync::atomic::{AtomicUsize, Ordering};
4+
use std::{ptr, thread};
5+
6+
const SIZE: usize = 256;
7+
8+
static mut ARRAY: [u8; SIZE] = [0; _];
9+
// Everything strictly less than this has been initialized by the sender.
10+
static POS: AtomicUsize = AtomicUsize::new(0);
11+
12+
fn main() {
13+
// Sender
14+
let th = std::thread::spawn(|| {
15+
for i in 0..SIZE {
16+
unsafe { ptr::write(&raw mut ARRAY[i], 1) };
17+
POS.store(i + 1, Ordering::Release);
18+
19+
thread::yield_now();
20+
21+
// We are the only writer, so we can do non-atomic reads as well.
22+
unsafe { (&raw const POS).cast::<usize>().read() };
23+
}
24+
});
25+
26+
// Receiver
27+
loop {
28+
let i = POS.load(Ordering::Acquire);
29+
30+
if i > 0 {
31+
unsafe { ptr::read(&raw const ARRAY[i - 1]) };
32+
}
33+
34+
if i == SIZE {
35+
break;
36+
}
37+
38+
thread::yield_now();
39+
}
40+
41+
th.join().unwrap();
42+
}

0 commit comments

Comments
 (0)