diff --git a/src/intptrcast.rs b/src/intptrcast.rs index 665a134184..6f4169e950 100644 --- a/src/intptrcast.rs +++ b/src/intptrcast.rs @@ -1,5 +1,4 @@ use std::cell::RefCell; -use std::cmp::max; use std::collections::hash_map::Entry; use log::trace; @@ -107,9 +106,11 @@ impl<'mir, 'tcx> GlobalState { slack, ); - // Remember next base address. If this allocation is zero-sized, leave a gap - // of at least 1 to avoid two allocations having the same base address. - global_state.next_base_addr = base_addr.checked_add(max(size.bytes(), 1)).unwrap(); + // Remember next base address. Leave a gap of at least 1 to avoid two zero-sized allocations + // having the same base address, and to avoid ambiguous provenance for the address between two + // allocations (also see https://github.com/rust-lang/unsafe-code-guidelines/issues/313). + let size_plus_1 = size.bytes().checked_add(1).unwrap(); + global_state.next_base_addr = base_addr.checked_add(size_plus_1).unwrap(); // Given that `next_base_addr` increases in each allocation, pushing the // corresponding tuple keeps `int_to_ptr_map` sorted global_state.int_to_ptr_map.push((base_addr, alloc_id)); diff --git a/tests/run-pass/adjacent-allocs.rs b/tests/run-pass/adjacent-allocs.rs new file mode 100644 index 0000000000..509965fe4f --- /dev/null +++ b/tests/run-pass/adjacent-allocs.rs @@ -0,0 +1,22 @@ +fn main() { + // The slack between allocations is random. + // Loop a few times to hit the zero-slack case. + for _ in 0..1024 { + let n = 0u64; + let ptr: *const u64 = &n; + + // Allocate a new stack variable whose lifetime quickly ends. + // If there's a chance that &m == ptr.add(1), then an int-to-ptr cast of + // that value will have ambiguous provenance between n and m. + // See https://github.com/rust-lang/miri/issues/1866#issuecomment-985770125 + { + let m = 0u64; + let _ = &m as *const u64; + } + + let iptr = ptr as usize; + let zst = (iptr + 8) as *const (); + // This is a ZST ptr just at the end of `n`, so it should be valid to deref. + unsafe { *zst } + } +}