Skip to content

Commit

Permalink
Auto merge of #2464 - RalfJung:atomic-must-be-mutable, r=RalfJung
Browse files Browse the repository at this point in the history
Atomics must be mutable

Fixes #2463
Needs rust-lang/rust#100181
  • Loading branch information
bors committed Aug 9, 2022
2 parents df3c141 + a1f5a75 commit 5aef34c
Show file tree
Hide file tree
Showing 9 changed files with 204 additions and 144 deletions.
2 changes: 1 addition & 1 deletion rust-version
Original file line number Diff line number Diff line change
@@ -1 +1 @@
93ab13b4e894ab74258c40aaf29872db2b17b6b4
6d3f1beae1720055e5a30f4dbe7a9e7fb810c65e
194 changes: 120 additions & 74 deletions src/concurrency/data_race.rs
Original file line number Diff line number Diff line change
Expand Up @@ -46,10 +46,11 @@ use std::{
mem,
};

use rustc_ast::Mutability;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_index::vec::{Idx, IndexVec};
use rustc_middle::{mir, ty::layout::TyAndLayout};
use rustc_target::abi::Size;
use rustc_target::abi::{Align, Size};

use crate::*;

Expand Down Expand Up @@ -470,6 +471,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
atomic: AtomicReadOrd,
) -> InterpResult<'tcx, ScalarMaybeUninit<Provenance>> {
let this = self.eval_context_ref();
this.atomic_access_check(place)?;
// This will read from the last store in the modification order of this location. In case
// weak memory emulation is enabled, this may not be the store we will pick to actually read from and return.
// This is fine with StackedBorrow and race checks because they don't concern metadata on
Expand All @@ -490,6 +492,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
atomic: AtomicWriteOrd,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
this.atomic_access_check(dest)?;

this.validate_overlapping_atomic(dest)?;
this.allow_data_races_mut(move |this| this.write_scalar(val, &dest.into()))?;
this.validate_atomic_store(dest, atomic)?;
Expand All @@ -511,6 +515,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
atomic: AtomicRwOrd,
) -> InterpResult<'tcx, ImmTy<'tcx, Provenance>> {
let this = self.eval_context_mut();
this.atomic_access_check(place)?;

this.validate_overlapping_atomic(place)?;
let old = this.allow_data_races_mut(|this| this.read_immediate(&place.into()))?;
Expand Down Expand Up @@ -540,6 +545,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
atomic: AtomicRwOrd,
) -> InterpResult<'tcx, ScalarMaybeUninit<Provenance>> {
let this = self.eval_context_mut();
this.atomic_access_check(place)?;

this.validate_overlapping_atomic(place)?;
let old = this.allow_data_races_mut(|this| this.read_scalar(&place.into()))?;
Expand All @@ -561,6 +567,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
atomic: AtomicRwOrd,
) -> InterpResult<'tcx, ImmTy<'tcx, Provenance>> {
let this = self.eval_context_mut();
this.atomic_access_check(place)?;

this.validate_overlapping_atomic(place)?;
let old = this.allow_data_races_mut(|this| this.read_immediate(&place.into()))?;
Expand Down Expand Up @@ -604,6 +611,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
) -> InterpResult<'tcx, Immediate<Provenance>> {
use rand::Rng as _;
let this = self.eval_context_mut();
this.atomic_access_check(place)?;

this.validate_overlapping_atomic(place)?;
// Failure ordering cannot be stronger than success ordering, therefore first attempt
Expand Down Expand Up @@ -647,80 +655,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
Ok(res)
}

/// Update the data-race detector for an atomic read occurring at the
/// associated memory-place and on the current thread.
fn validate_atomic_load(
&self,
place: &MPlaceTy<'tcx, Provenance>,
atomic: AtomicReadOrd,
) -> InterpResult<'tcx> {
let this = self.eval_context_ref();
this.validate_overlapping_atomic(place)?;
this.validate_atomic_op(
place,
atomic,
"Atomic Load",
move |memory, clocks, index, atomic| {
if atomic == AtomicReadOrd::Relaxed {
memory.load_relaxed(&mut *clocks, index)
} else {
memory.load_acquire(&mut *clocks, index)
}
},
)
}

/// Update the data-race detector for an atomic write occurring at the
/// associated memory-place and on the current thread.
fn validate_atomic_store(
&mut self,
place: &MPlaceTy<'tcx, Provenance>,
atomic: AtomicWriteOrd,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
this.validate_overlapping_atomic(place)?;
this.validate_atomic_op(
place,
atomic,
"Atomic Store",
move |memory, clocks, index, atomic| {
if atomic == AtomicWriteOrd::Relaxed {
memory.store_relaxed(clocks, index)
} else {
memory.store_release(clocks, index)
}
},
)
}

/// Update the data-race detector for an atomic read-modify-write occurring
/// at the associated memory place and on the current thread.
fn validate_atomic_rmw(
&mut self,
place: &MPlaceTy<'tcx, Provenance>,
atomic: AtomicRwOrd,
) -> InterpResult<'tcx> {
use AtomicRwOrd::*;
let acquire = matches!(atomic, Acquire | AcqRel | SeqCst);
let release = matches!(atomic, Release | AcqRel | SeqCst);
let this = self.eval_context_mut();
this.validate_overlapping_atomic(place)?;
this.validate_atomic_op(place, atomic, "Atomic RMW", move |memory, clocks, index, _| {
if acquire {
memory.load_acquire(clocks, index)?;
} else {
memory.load_relaxed(clocks, index)?;
}
if release {
memory.rmw_release(clocks, index)
} else {
memory.rmw_relaxed(clocks, index)
}
})
}

/// Update the data-race detector for an atomic fence on the current thread.
fn validate_atomic_fence(&mut self, atomic: AtomicFenceOrd) -> InterpResult<'tcx> {
fn atomic_fence(&mut self, atomic: AtomicFenceOrd) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
if let Some(data_race) = &mut this.machine.data_race {
data_race.maybe_perform_sync_operation(&this.machine.threads, |index, mut clocks| {
Expand Down Expand Up @@ -1016,6 +952,7 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
fn allow_data_races_ref<R>(&self, op: impl FnOnce(&MiriEvalContext<'mir, 'tcx>) -> R) -> R {
let this = self.eval_context_ref();
if let Some(data_race) = &this.machine.data_race {
assert!(!data_race.ongoing_action_data_race_free.get(), "cannot nest allow_data_races");
data_race.ongoing_action_data_race_free.set(true);
}
let result = op(this);
Expand All @@ -1035,6 +972,7 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
) -> R {
let this = self.eval_context_mut();
if let Some(data_race) = &this.machine.data_race {
assert!(!data_race.ongoing_action_data_race_free.get(), "cannot nest allow_data_races");
data_race.ongoing_action_data_race_free.set(true);
}
let result = op(this);
Expand All @@ -1044,6 +982,114 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
result
}

/// Checks that an atomic access is legal at the given place.
fn atomic_access_check(&self, place: &MPlaceTy<'tcx, Provenance>) -> InterpResult<'tcx> {
let this = self.eval_context_ref();
// Check alignment requirements. Atomics must always be aligned to their size,
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
// be 8-aligned).
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
this.check_ptr_access_align(
place.ptr,
place.layout.size,
align,
CheckInAllocMsg::MemoryAccessTest,
)?;
// Ensure the allocation is mutable. Even failing (read-only) compare_exchange need mutable
// memory on many targets (i.e., they segfault if taht memory is mapped read-only), and
// atomic loads can be implemented via compare_exchange on some targets. There could
// possibly be some very specific exceptions to this, see
// <https://github.com/rust-lang/miri/pull/2464#discussion_r939636130> for details.
// We avoid `get_ptr_alloc` since we do *not* want to run the access hooks -- the actual
// access will happen later.
let (alloc_id, _offset, _prov) =
this.ptr_try_get_alloc_id(place.ptr).expect("there are no zero-sized atomic accesses");
if this.get_alloc_mutability(alloc_id)? == Mutability::Not {
// FIXME: make this prettier, once these messages have separate title/span/help messages.
throw_ub_format!(
"atomic operations cannot be performed on read-only memory\n\
many platforms require atomic read-modify-write instructions to be performed on writeable memory, even if the operation fails \
(and is hence nominally read-only)\n\
some platforms implement (some) atomic loads via compare-exchange, which means they do not work on read-only memory; \
it is possible that we could have an exception permitting this for specific kinds of loads\n\
please report an issue at <https://github.com/rust-lang/miri/issues> if this is a problem for you"
);
}
Ok(())
}

/// Update the data-race detector for an atomic read occurring at the
/// associated memory-place and on the current thread.
fn validate_atomic_load(
&self,
place: &MPlaceTy<'tcx, Provenance>,
atomic: AtomicReadOrd,
) -> InterpResult<'tcx> {
let this = self.eval_context_ref();
this.validate_overlapping_atomic(place)?;
this.validate_atomic_op(
place,
atomic,
"Atomic Load",
move |memory, clocks, index, atomic| {
if atomic == AtomicReadOrd::Relaxed {
memory.load_relaxed(&mut *clocks, index)
} else {
memory.load_acquire(&mut *clocks, index)
}
},
)
}

/// Update the data-race detector for an atomic write occurring at the
/// associated memory-place and on the current thread.
fn validate_atomic_store(
&mut self,
place: &MPlaceTy<'tcx, Provenance>,
atomic: AtomicWriteOrd,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
this.validate_overlapping_atomic(place)?;
this.validate_atomic_op(
place,
atomic,
"Atomic Store",
move |memory, clocks, index, atomic| {
if atomic == AtomicWriteOrd::Relaxed {
memory.store_relaxed(clocks, index)
} else {
memory.store_release(clocks, index)
}
},
)
}

/// Update the data-race detector for an atomic read-modify-write occurring
/// at the associated memory place and on the current thread.
fn validate_atomic_rmw(
&mut self,
place: &MPlaceTy<'tcx, Provenance>,
atomic: AtomicRwOrd,
) -> InterpResult<'tcx> {
use AtomicRwOrd::*;
let acquire = matches!(atomic, Acquire | AcqRel | SeqCst);
let release = matches!(atomic, Release | AcqRel | SeqCst);
let this = self.eval_context_mut();
this.validate_overlapping_atomic(place)?;
this.validate_atomic_op(place, atomic, "Atomic RMW", move |memory, clocks, index, _| {
if acquire {
memory.load_acquire(clocks, index)?;
} else {
memory.load_relaxed(clocks, index)?;
}
if release {
memory.rmw_release(clocks, index)
} else {
memory.rmw_relaxed(clocks, index)
}
})
}

/// Generic atomic operation implementation
fn validate_atomic_op<A: Debug + Copy>(
&self,
Expand Down
Loading

0 comments on commit 5aef34c

Please sign in to comment.