@@ -648,18 +648,17 @@ impl MemoryCellClocks {
648648 thread_clocks. clock . index_mut ( index) . span = current_span;
649649 }
650650 thread_clocks. clock . index_mut ( index) . set_read_type ( read_type) ;
651- if self . write_was_before ( & thread_clocks. clock ) {
652- // We must be ordered-after all atomic writes.
653- let race_free = if let Some ( atomic) = self . atomic ( ) {
654- atomic. write_vector <= thread_clocks. clock
655- } else {
656- true
657- } ;
658- self . read . set_at_index ( & thread_clocks. clock , index) ;
659- if race_free { Ok ( ( ) ) } else { Err ( DataRace ) }
660- } else {
661- Err ( DataRace )
651+ // Check synchronization with non-atomic writes.
652+ if !self . write_was_before ( & thread_clocks. clock ) {
653+ return Err ( DataRace ) ;
662654 }
655+ // Check synchronization with atomic writes.
656+ if !self . atomic ( ) . is_none_or ( |atomic| atomic. write_vector <= thread_clocks. clock ) {
657+ return Err ( DataRace ) ;
658+ }
659+ // Record this access.
660+ self . read . set_at_index ( & thread_clocks. clock , index) ;
661+ Ok ( ( ) )
663662 }
664663
665664 /// Detect races for non-atomic write operations at the current memory cell
@@ -675,24 +674,23 @@ impl MemoryCellClocks {
675674 if !current_span. is_dummy ( ) {
676675 thread_clocks. clock . index_mut ( index) . span = current_span;
677676 }
678- if self . write_was_before ( & thread_clocks. clock ) && self . read <= thread_clocks. clock {
679- let race_free = if let Some ( atomic) = self . atomic ( ) {
680- atomic. write_vector <= thread_clocks. clock
681- && atomic. read_vector <= thread_clocks. clock
682- } else {
683- true
684- } ;
685- self . write = ( index, thread_clocks. clock [ index] ) ;
686- self . write_type = write_type;
687- if race_free {
688- self . read . set_zero_vector ( ) ;
689- Ok ( ( ) )
690- } else {
691- Err ( DataRace )
692- }
693- } else {
694- Err ( DataRace )
677+ // Check synchronization with non-atomic accesses.
678+ if !( self . write_was_before ( & thread_clocks. clock ) && self . read <= thread_clocks. clock ) {
679+ return Err ( DataRace ) ;
695680 }
681+ // Check synchronization with atomic accesses.
682+ if !self . atomic ( ) . is_none_or ( |atomic| {
683+ atomic. write_vector <= thread_clocks. clock && atomic. read_vector <= thread_clocks. clock
684+ } ) {
685+ return Err ( DataRace ) ;
686+ }
687+ // Record this access.
688+ self . write = ( index, thread_clocks. clock [ index] ) ;
689+ self . write_type = write_type;
690+ self . read . set_zero_vector ( ) ;
691+ // This is not an atomic location any more.
692+ self . atomic_ops = None ;
693+ Ok ( ( ) )
696694 }
697695}
698696
@@ -758,14 +756,9 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> {
758756 let this = self . eval_context_mut ( ) ;
759757 this. atomic_access_check ( dest, AtomicAccessType :: Store ) ?;
760758
761- // Read the previous value so we can put it in the store buffer later.
762- // The program didn't actually do a read, so suppress the memory access hooks.
763- // This is also a very special exception where we just ignore an error -- if this read
764- // was UB e.g. because the memory is uninitialized, we don't want to know!
765- let old_val = this. run_for_validation_ref ( |this| this. read_scalar ( dest) ) . discard_err ( ) ;
766-
767759 // Inform GenMC about the atomic store.
768760 if let Some ( genmc_ctx) = this. machine . data_race . as_genmc_ref ( ) {
761+ let old_val = this. run_for_validation_ref ( |this| this. read_scalar ( dest) ) . discard_err ( ) ;
769762 if genmc_ctx. atomic_store (
770763 this,
771764 dest. ptr ( ) . addr ( ) ,
@@ -780,6 +773,9 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> {
780773 }
781774 return interp_ok ( ( ) ) ;
782775 }
776+
777+ // Read the previous value so we can put it in the store buffer later.
778+ let old_val = this. get_latest_nonatomic_val ( dest) ;
783779 this. allow_data_races_mut ( move |this| this. write_scalar ( val, dest) ) ?;
784780 this. validate_atomic_store ( dest, atomic) ?;
785781 this. buffered_atomic_write ( val, dest, atomic, old_val)
@@ -1201,7 +1197,7 @@ impl VClockAlloc {
12011197 /// operation for which data-race detection is handled separately, for example
12021198 /// atomic read operations. The `ty` parameter is used for diagnostics, letting
12031199 /// the user know which type was read.
1204- pub fn read < ' tcx > (
1200+ pub fn read_non_atomic < ' tcx > (
12051201 & self ,
12061202 alloc_id : AllocId ,
12071203 access_range : AllocRange ,
@@ -1243,7 +1239,7 @@ impl VClockAlloc {
12431239 /// being created or if it is temporarily disabled during a racy read or write
12441240 /// operation. The `ty` parameter is used for diagnostics, letting
12451241 /// the user know which type was written.
1246- pub fn write < ' tcx > (
1242+ pub fn write_non_atomic < ' tcx > (
12471243 & mut self ,
12481244 alloc_id : AllocId ,
12491245 access_range : AllocRange ,
@@ -1540,6 +1536,35 @@ trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> {
15401536 )
15411537 }
15421538
1539+ /// Returns the most recent *non-atomic* value stored in the given place.
1540+ /// Errors if we don't need that (because we don't do store buffering) or if
1541+ /// the most recent value is in fact atomic.
1542+ fn get_latest_nonatomic_val ( & self , place : & MPlaceTy < ' tcx > ) -> Result < Option < Scalar > , ( ) > {
1543+ let this = self . eval_context_ref ( ) ;
1544+ // These cannot fail because `atomic_access_check` was done first.
1545+ let ( alloc_id, offset, _prov) = this. ptr_get_alloc_id ( place. ptr ( ) , 0 ) . unwrap ( ) ;
1546+ let alloc_meta = & this. get_alloc_extra ( alloc_id) . unwrap ( ) . data_race ;
1547+ if alloc_meta. as_weak_memory_ref ( ) . is_none ( ) {
1548+ // No reason to read old value if we don't track store buffers.
1549+ return Err ( ( ) ) ;
1550+ }
1551+ let data_race = alloc_meta. as_vclocks_ref ( ) . unwrap ( ) ;
1552+ // Only read old value if this is currently a non-atomic location.
1553+ for ( _range, clocks) in data_race. alloc_ranges . borrow_mut ( ) . iter ( offset, place. layout . size )
1554+ {
1555+ // If this had an atomic write that's not before the non-atomic write, that should
1556+ // already be in the store buffer. Initializing the store buffer now would use the
1557+ // wrong `sync_clock` so we better make sure that does not happen.
1558+ if clocks. atomic ( ) . is_some_and ( |atomic| !( atomic. write_vector <= clocks. write ( ) ) ) {
1559+ return Err ( ( ) ) ;
1560+ }
1561+ }
1562+ // The program didn't actually do a read, so suppress the memory access hooks.
1563+ // This is also a very special exception where we just ignore an error -- if this read
1564+ // was UB e.g. because the memory is uninitialized, we don't want to know!
1565+ Ok ( this. run_for_validation_ref ( |this| this. read_scalar ( place) ) . discard_err ( ) )
1566+ }
1567+
15431568 /// Generic atomic operation implementation
15441569 fn validate_atomic_op < A : Debug + Copy > (
15451570 & self ,
0 commit comments