Skip to content

Commit 5aef34c

Browse files
committed
Auto merge of #2464 - RalfJung:atomic-must-be-mutable, r=RalfJung
Atomics must be mutable Fixes #2463 Needs rust-lang/rust#100181
2 parents df3c141 + a1f5a75 commit 5aef34c

File tree

9 files changed

+204
-144
lines changed

9 files changed

+204
-144
lines changed

Diff for: rust-version

+1-1
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
93ab13b4e894ab74258c40aaf29872db2b17b6b4
1+
6d3f1beae1720055e5a30f4dbe7a9e7fb810c65e

Diff for: src/concurrency/data_race.rs

+120-74
Original file line numberDiff line numberDiff line change
@@ -46,10 +46,11 @@ use std::{
4646
mem,
4747
};
4848

49+
use rustc_ast::Mutability;
4950
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
5051
use rustc_index::vec::{Idx, IndexVec};
5152
use rustc_middle::{mir, ty::layout::TyAndLayout};
52-
use rustc_target::abi::Size;
53+
use rustc_target::abi::{Align, Size};
5354

5455
use crate::*;
5556

@@ -470,6 +471,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
470471
atomic: AtomicReadOrd,
471472
) -> InterpResult<'tcx, ScalarMaybeUninit<Provenance>> {
472473
let this = self.eval_context_ref();
474+
this.atomic_access_check(place)?;
473475
// This will read from the last store in the modification order of this location. In case
474476
// weak memory emulation is enabled, this may not be the store we will pick to actually read from and return.
475477
// This is fine with StackedBorrow and race checks because they don't concern metadata on
@@ -490,6 +492,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
490492
atomic: AtomicWriteOrd,
491493
) -> InterpResult<'tcx> {
492494
let this = self.eval_context_mut();
495+
this.atomic_access_check(dest)?;
496+
493497
this.validate_overlapping_atomic(dest)?;
494498
this.allow_data_races_mut(move |this| this.write_scalar(val, &dest.into()))?;
495499
this.validate_atomic_store(dest, atomic)?;
@@ -511,6 +515,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
511515
atomic: AtomicRwOrd,
512516
) -> InterpResult<'tcx, ImmTy<'tcx, Provenance>> {
513517
let this = self.eval_context_mut();
518+
this.atomic_access_check(place)?;
514519

515520
this.validate_overlapping_atomic(place)?;
516521
let old = this.allow_data_races_mut(|this| this.read_immediate(&place.into()))?;
@@ -540,6 +545,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
540545
atomic: AtomicRwOrd,
541546
) -> InterpResult<'tcx, ScalarMaybeUninit<Provenance>> {
542547
let this = self.eval_context_mut();
548+
this.atomic_access_check(place)?;
543549

544550
this.validate_overlapping_atomic(place)?;
545551
let old = this.allow_data_races_mut(|this| this.read_scalar(&place.into()))?;
@@ -561,6 +567,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
561567
atomic: AtomicRwOrd,
562568
) -> InterpResult<'tcx, ImmTy<'tcx, Provenance>> {
563569
let this = self.eval_context_mut();
570+
this.atomic_access_check(place)?;
564571

565572
this.validate_overlapping_atomic(place)?;
566573
let old = this.allow_data_races_mut(|this| this.read_immediate(&place.into()))?;
@@ -604,6 +611,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
604611
) -> InterpResult<'tcx, Immediate<Provenance>> {
605612
use rand::Rng as _;
606613
let this = self.eval_context_mut();
614+
this.atomic_access_check(place)?;
607615

608616
this.validate_overlapping_atomic(place)?;
609617
// Failure ordering cannot be stronger than success ordering, therefore first attempt
@@ -647,80 +655,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
647655
Ok(res)
648656
}
649657

650-
/// Update the data-race detector for an atomic read occurring at the
651-
/// associated memory-place and on the current thread.
652-
fn validate_atomic_load(
653-
&self,
654-
place: &MPlaceTy<'tcx, Provenance>,
655-
atomic: AtomicReadOrd,
656-
) -> InterpResult<'tcx> {
657-
let this = self.eval_context_ref();
658-
this.validate_overlapping_atomic(place)?;
659-
this.validate_atomic_op(
660-
place,
661-
atomic,
662-
"Atomic Load",
663-
move |memory, clocks, index, atomic| {
664-
if atomic == AtomicReadOrd::Relaxed {
665-
memory.load_relaxed(&mut *clocks, index)
666-
} else {
667-
memory.load_acquire(&mut *clocks, index)
668-
}
669-
},
670-
)
671-
}
672-
673-
/// Update the data-race detector for an atomic write occurring at the
674-
/// associated memory-place and on the current thread.
675-
fn validate_atomic_store(
676-
&mut self,
677-
place: &MPlaceTy<'tcx, Provenance>,
678-
atomic: AtomicWriteOrd,
679-
) -> InterpResult<'tcx> {
680-
let this = self.eval_context_mut();
681-
this.validate_overlapping_atomic(place)?;
682-
this.validate_atomic_op(
683-
place,
684-
atomic,
685-
"Atomic Store",
686-
move |memory, clocks, index, atomic| {
687-
if atomic == AtomicWriteOrd::Relaxed {
688-
memory.store_relaxed(clocks, index)
689-
} else {
690-
memory.store_release(clocks, index)
691-
}
692-
},
693-
)
694-
}
695-
696-
/// Update the data-race detector for an atomic read-modify-write occurring
697-
/// at the associated memory place and on the current thread.
698-
fn validate_atomic_rmw(
699-
&mut self,
700-
place: &MPlaceTy<'tcx, Provenance>,
701-
atomic: AtomicRwOrd,
702-
) -> InterpResult<'tcx> {
703-
use AtomicRwOrd::*;
704-
let acquire = matches!(atomic, Acquire | AcqRel | SeqCst);
705-
let release = matches!(atomic, Release | AcqRel | SeqCst);
706-
let this = self.eval_context_mut();
707-
this.validate_overlapping_atomic(place)?;
708-
this.validate_atomic_op(place, atomic, "Atomic RMW", move |memory, clocks, index, _| {
709-
if acquire {
710-
memory.load_acquire(clocks, index)?;
711-
} else {
712-
memory.load_relaxed(clocks, index)?;
713-
}
714-
if release {
715-
memory.rmw_release(clocks, index)
716-
} else {
717-
memory.rmw_relaxed(clocks, index)
718-
}
719-
})
720-
}
721-
722658
/// Update the data-race detector for an atomic fence on the current thread.
723-
fn validate_atomic_fence(&mut self, atomic: AtomicFenceOrd) -> InterpResult<'tcx> {
659+
fn atomic_fence(&mut self, atomic: AtomicFenceOrd) -> InterpResult<'tcx> {
724660
let this = self.eval_context_mut();
725661
if let Some(data_race) = &mut this.machine.data_race {
726662
data_race.maybe_perform_sync_operation(&this.machine.threads, |index, mut clocks| {
@@ -1016,6 +952,7 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
1016952
fn allow_data_races_ref<R>(&self, op: impl FnOnce(&MiriEvalContext<'mir, 'tcx>) -> R) -> R {
1017953
let this = self.eval_context_ref();
1018954
if let Some(data_race) = &this.machine.data_race {
955+
assert!(!data_race.ongoing_action_data_race_free.get(), "cannot nest allow_data_races");
1019956
data_race.ongoing_action_data_race_free.set(true);
1020957
}
1021958
let result = op(this);
@@ -1035,6 +972,7 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
1035972
) -> R {
1036973
let this = self.eval_context_mut();
1037974
if let Some(data_race) = &this.machine.data_race {
975+
assert!(!data_race.ongoing_action_data_race_free.get(), "cannot nest allow_data_races");
1038976
data_race.ongoing_action_data_race_free.set(true);
1039977
}
1040978
let result = op(this);
@@ -1044,6 +982,114 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
1044982
result
1045983
}
1046984

985+
/// Checks that an atomic access is legal at the given place.
986+
fn atomic_access_check(&self, place: &MPlaceTy<'tcx, Provenance>) -> InterpResult<'tcx> {
987+
let this = self.eval_context_ref();
988+
// Check alignment requirements. Atomics must always be aligned to their size,
989+
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
990+
// be 8-aligned).
991+
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
992+
this.check_ptr_access_align(
993+
place.ptr,
994+
place.layout.size,
995+
align,
996+
CheckInAllocMsg::MemoryAccessTest,
997+
)?;
998+
// Ensure the allocation is mutable. Even failing (read-only) compare_exchange need mutable
999+
// memory on many targets (i.e., they segfault if taht memory is mapped read-only), and
1000+
// atomic loads can be implemented via compare_exchange on some targets. There could
1001+
// possibly be some very specific exceptions to this, see
1002+
// <https://github.com/rust-lang/miri/pull/2464#discussion_r939636130> for details.
1003+
// We avoid `get_ptr_alloc` since we do *not* want to run the access hooks -- the actual
1004+
// access will happen later.
1005+
let (alloc_id, _offset, _prov) =
1006+
this.ptr_try_get_alloc_id(place.ptr).expect("there are no zero-sized atomic accesses");
1007+
if this.get_alloc_mutability(alloc_id)? == Mutability::Not {
1008+
// FIXME: make this prettier, once these messages have separate title/span/help messages.
1009+
throw_ub_format!(
1010+
"atomic operations cannot be performed on read-only memory\n\
1011+
many platforms require atomic read-modify-write instructions to be performed on writeable memory, even if the operation fails \
1012+
(and is hence nominally read-only)\n\
1013+
some platforms implement (some) atomic loads via compare-exchange, which means they do not work on read-only memory; \
1014+
it is possible that we could have an exception permitting this for specific kinds of loads\n\
1015+
please report an issue at <https://github.com/rust-lang/miri/issues> if this is a problem for you"
1016+
);
1017+
}
1018+
Ok(())
1019+
}
1020+
1021+
/// Update the data-race detector for an atomic read occurring at the
1022+
/// associated memory-place and on the current thread.
1023+
fn validate_atomic_load(
1024+
&self,
1025+
place: &MPlaceTy<'tcx, Provenance>,
1026+
atomic: AtomicReadOrd,
1027+
) -> InterpResult<'tcx> {
1028+
let this = self.eval_context_ref();
1029+
this.validate_overlapping_atomic(place)?;
1030+
this.validate_atomic_op(
1031+
place,
1032+
atomic,
1033+
"Atomic Load",
1034+
move |memory, clocks, index, atomic| {
1035+
if atomic == AtomicReadOrd::Relaxed {
1036+
memory.load_relaxed(&mut *clocks, index)
1037+
} else {
1038+
memory.load_acquire(&mut *clocks, index)
1039+
}
1040+
},
1041+
)
1042+
}
1043+
1044+
/// Update the data-race detector for an atomic write occurring at the
1045+
/// associated memory-place and on the current thread.
1046+
fn validate_atomic_store(
1047+
&mut self,
1048+
place: &MPlaceTy<'tcx, Provenance>,
1049+
atomic: AtomicWriteOrd,
1050+
) -> InterpResult<'tcx> {
1051+
let this = self.eval_context_mut();
1052+
this.validate_overlapping_atomic(place)?;
1053+
this.validate_atomic_op(
1054+
place,
1055+
atomic,
1056+
"Atomic Store",
1057+
move |memory, clocks, index, atomic| {
1058+
if atomic == AtomicWriteOrd::Relaxed {
1059+
memory.store_relaxed(clocks, index)
1060+
} else {
1061+
memory.store_release(clocks, index)
1062+
}
1063+
},
1064+
)
1065+
}
1066+
1067+
/// Update the data-race detector for an atomic read-modify-write occurring
1068+
/// at the associated memory place and on the current thread.
1069+
fn validate_atomic_rmw(
1070+
&mut self,
1071+
place: &MPlaceTy<'tcx, Provenance>,
1072+
atomic: AtomicRwOrd,
1073+
) -> InterpResult<'tcx> {
1074+
use AtomicRwOrd::*;
1075+
let acquire = matches!(atomic, Acquire | AcqRel | SeqCst);
1076+
let release = matches!(atomic, Release | AcqRel | SeqCst);
1077+
let this = self.eval_context_mut();
1078+
this.validate_overlapping_atomic(place)?;
1079+
this.validate_atomic_op(place, atomic, "Atomic RMW", move |memory, clocks, index, _| {
1080+
if acquire {
1081+
memory.load_acquire(clocks, index)?;
1082+
} else {
1083+
memory.load_relaxed(clocks, index)?;
1084+
}
1085+
if release {
1086+
memory.rmw_release(clocks, index)
1087+
} else {
1088+
memory.rmw_relaxed(clocks, index)
1089+
}
1090+
})
1091+
}
1092+
10471093
/// Generic atomic operation implementation
10481094
fn validate_atomic_op<A: Debug + Copy>(
10491095
&self,

0 commit comments

Comments
 (0)