Skip to content

Commit f3b31c3

Browse files
committed
do not reuse stack addresses; make reuse rate configurable
1 parent 0fa941d commit f3b31c3

File tree

8 files changed

+49
-19
lines changed

8 files changed

+49
-19
lines changed

README.md

+5
Original file line numberDiff line numberDiff line change
@@ -295,6 +295,11 @@ up the sysroot. If you are using `miri` (the Miri driver) directly, see the
295295
Miri adds its own set of `-Z` flags, which are usually set via the `MIRIFLAGS`
296296
environment variable. We first document the most relevant and most commonly used flags:
297297

298+
* `-Zmiri-address-reuse-rate=<rate>` changes the probability that a freed *non-stack* allocation
299+
will be added to the pool for address reuse, and the probability that a new *non-stack* allocation
300+
will be taken from the pool. Stack allocations never get added to or taken from the pool. The
301+
default is `0.5`. Note that a very high reuse rate can mask concurrency bugs as address
302+
reuse induces synchronization between threads.
298303
* `-Zmiri-compare-exchange-weak-failure-rate=<rate>` changes the failure rate of
299304
`compare_exchange_weak` operations. The default is `0.8` (so 4 out of 5 weak ops will fail).
300305
You can change it to any value between `0.0` and `1.0`, where `1.0` means it

src/alloc_addresses/mod.rs

+9-5
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@ impl GlobalStateInner {
7878
GlobalStateInner {
7979
int_to_ptr_map: Vec::default(),
8080
base_addr: FxHashMap::default(),
81-
reuse: ReusePool::new(),
81+
reuse: ReusePool::new(config.address_reuse_rate),
8282
exposed: FxHashSet::default(),
8383
next_base_addr: stack_addr,
8484
provenance_mode: config.provenance_mode,
@@ -142,7 +142,11 @@ trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
142142
}
143143
}
144144

145-
fn addr_from_alloc_id(&self, alloc_id: AllocId, _kind: MemoryKind) -> InterpResult<'tcx, u64> {
145+
fn addr_from_alloc_id(
146+
&self,
147+
alloc_id: AllocId,
148+
memory_kind: MemoryKind,
149+
) -> InterpResult<'tcx, u64> {
146150
let ecx = self.eval_context_ref();
147151
let mut global_state = ecx.machine.alloc_addresses.borrow_mut();
148152
let global_state = &mut *global_state;
@@ -161,7 +165,7 @@ trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
161165

162166
// This allocation does not have a base address yet, pick or reuse one.
163167
let base_addr = if let Some((reuse_addr, clock)) =
164-
global_state.reuse.take_addr(&mut *rng, size, align)
168+
global_state.reuse.take_addr(&mut *rng, size, align, memory_kind)
165169
{
166170
if let Some(data_race) = &ecx.machine.data_race {
167171
data_race.validate_lock_acquire(&clock, ecx.get_active_thread());
@@ -334,7 +338,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
334338
}
335339

336340
impl<'mir, 'tcx> MiriMachine<'mir, 'tcx> {
337-
pub fn free_alloc_id(&mut self, dead_id: AllocId, size: Size, align: Align) {
341+
pub fn free_alloc_id(&mut self, dead_id: AllocId, size: Size, align: Align, kind: MemoryKind) {
338342
let global_state = self.alloc_addresses.get_mut();
339343
let rng = self.rng.get_mut();
340344

@@ -359,7 +363,7 @@ impl<'mir, 'tcx> MiriMachine<'mir, 'tcx> {
359363
// `alloc_id_from_addr` any more.
360364
global_state.exposed.remove(&dead_id);
361365
// Also remember this address for future reuse.
362-
global_state.reuse.add_addr(rng, addr, size, align, || {
366+
global_state.reuse.add_addr(rng, addr, size, align, kind, || {
363367
let mut clock = concurrency::VClock::default();
364368
if let Some(data_race) = &self.data_race {
365369
data_race.validate_lock_release(

src/alloc_addresses/reuse_pool.rs

+12-10
Original file line numberDiff line numberDiff line change
@@ -4,20 +4,17 @@ use rand::Rng;
44

55
use rustc_target::abi::{Align, Size};
66

7-
use crate::concurrency::VClock;
7+
use crate::{concurrency::VClock, MemoryKind};
88

99
const MAX_POOL_SIZE: usize = 64;
1010

11-
// Just use fair coins, until we have evidence that other numbers are better.
12-
const ADDR_REMEMBER_CHANCE: f64 = 0.5;
13-
const ADDR_TAKE_CHANCE: f64 = 0.5;
14-
1511
/// The pool strikes a balance between exploring more possible executions and making it more likely
1612
/// to find bugs. The hypothesis is that bugs are more likely to occur when reuse happens for
1713
/// allocations with the same layout, since that can trigger e.g. ABA issues in a concurrent data
1814
/// structure. Therefore we only reuse allocations when size and alignment match exactly.
1915
#[derive(Debug)]
2016
pub struct ReusePool {
17+
address_reuse_rate: f64,
2118
/// The i-th element in `pool` stores allocations of alignment `2^i`. We store these reusable
2219
/// allocations as address-size pairs, the list must be sorted by the size.
2320
///
@@ -30,8 +27,8 @@ pub struct ReusePool {
3027
}
3128

3229
impl ReusePool {
33-
pub fn new() -> Self {
34-
ReusePool { pool: vec![] }
30+
pub fn new(address_reuse_rate: f64) -> Self {
31+
ReusePool { address_reuse_rate, pool: vec![] }
3532
}
3633

3734
fn subpool(&mut self, align: Align) -> &mut Vec<(u64, Size, VClock)> {
@@ -48,10 +45,14 @@ impl ReusePool {
4845
addr: u64,
4946
size: Size,
5047
align: Align,
48+
kind: MemoryKind,
5149
clock: impl FnOnce() -> VClock,
5250
) {
5351
// Let's see if we even want to remember this address.
54-
if !rng.gen_bool(ADDR_REMEMBER_CHANCE) {
52+
// We don't remember stack addresses: there's a lot of them (so the perf impact is big),
53+
// and we only want to reuse stack slots within the same thread or else we'll add a lot of
54+
// undesired synchronization.
55+
if kind == MemoryKind::Stack || !rng.gen_bool(self.address_reuse_rate) {
5556
return;
5657
}
5758
// Determine the pool to add this to, and where in the pool to put it.
@@ -73,9 +74,10 @@ impl ReusePool {
7374
rng: &mut impl Rng,
7475
size: Size,
7576
align: Align,
77+
kind: MemoryKind,
7678
) -> Option<(u64, VClock)> {
77-
// Determine whether we'll even attempt a reuse.
78-
if !rng.gen_bool(ADDR_TAKE_CHANCE) {
79+
// Determine whether we'll even attempt a reuse. As above, we don't do reuse for stack addresses.
80+
if kind == MemoryKind::Stack || !rng.gen_bool(self.address_reuse_rate) {
7981
return None;
8082
}
8183
// Determine the pool to take this from.

src/bin/miri.rs

+14
Original file line numberDiff line numberDiff line change
@@ -542,6 +542,20 @@ fn main() {
542542
miri_config.tracked_alloc_ids.extend(ids);
543543
} else if arg == "-Zmiri-track-alloc-accesses" {
544544
miri_config.track_alloc_accesses = true;
545+
} else if let Some(param) = arg.strip_prefix("-Zmiri-address-reuse-rate=") {
546+
let rate = match param.parse::<f64>() {
547+
Ok(rate) if rate >= 0.0 && rate <= 1.0 => rate,
548+
Ok(_) =>
549+
show_error!(
550+
"-Zmiri-compare-exchange-weak-failure-rate must be between `0.0` and `1.0`"
551+
),
552+
Err(err) =>
553+
show_error!(
554+
"-Zmiri-compare-exchange-weak-failure-rate requires a `f64` between `0.0` and `1.0`: {}",
555+
err
556+
),
557+
};
558+
miri_config.address_reuse_rate = rate;
545559
} else if let Some(param) = arg.strip_prefix("-Zmiri-compare-exchange-weak-failure-rate=") {
546560
let rate = match param.parse::<f64>() {
547561
Ok(rate) if rate >= 0.0 && rate <= 1.0 => rate,

src/eval.rs

+3
Original file line numberDiff line numberDiff line change
@@ -150,6 +150,8 @@ pub struct MiriConfig {
150150
pub page_size: Option<u64>,
151151
/// Whether to collect a backtrace when each allocation is created, just in case it leaks.
152152
pub collect_leak_backtraces: bool,
153+
/// Probability for address reuse.
154+
pub address_reuse_rate: f64,
153155
}
154156

155157
impl Default for MiriConfig {
@@ -186,6 +188,7 @@ impl Default for MiriConfig {
186188
num_cpus: 1,
187189
page_size: None,
188190
collect_leak_backtraces: true,
191+
address_reuse_rate: 0.5,
189192
}
190193
}
191194
}

src/machine.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -1282,7 +1282,7 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> {
12821282
(alloc_id, prove_extra): (AllocId, Self::ProvenanceExtra),
12831283
size: Size,
12841284
align: Align,
1285-
_kind: MemoryKind,
1285+
kind: MemoryKind,
12861286
) -> InterpResult<'tcx> {
12871287
if machine.tracked_alloc_ids.contains(&alloc_id) {
12881288
machine.emit_diagnostic(NonHaltingDiagnostic::FreedAlloc(alloc_id));
@@ -1303,7 +1303,7 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> {
13031303
{
13041304
*deallocated_at = Some(machine.current_span());
13051305
}
1306-
machine.free_alloc_id(alloc_id, size, align);
1306+
machine.free_alloc_id(alloc_id, size, align, kind);
13071307
Ok(())
13081308
}
13091309

tests/fail/weak_memory/racing_mixed_size.rs

+2-1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
// We want to control preemption here.
2-
//@compile-flags: -Zmiri-preemption-rate=0
2+
// Avoid accidental synchronization via address reuse.
3+
//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-address-reuse-rate=0
34

45
#![feature(core_intrinsics)]
56

tests/fail/weak_memory/racing_mixed_size_read.rs

+2-1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
// We want to control preemption here.
2-
//@compile-flags: -Zmiri-preemption-rate=0
2+
// Avoid accidental synchronization via address reuse.
3+
//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-address-reuse-rate=0
34

45
use std::sync::atomic::Ordering::*;
56
use std::sync::atomic::{AtomicU16, AtomicU32};

0 commit comments

Comments
 (0)