Skip to content

Commit

Permalink
Include a Span in VClock
Browse files Browse the repository at this point in the history
  • Loading branch information
saethlin committed Dec 16, 2022
1 parent e9e11de commit b06737e
Show file tree
Hide file tree
Showing 35 changed files with 532 additions and 89 deletions.
171 changes: 108 additions & 63 deletions src/concurrency/data_race.rs

Large diffs are not rendered by default.

6 changes: 4 additions & 2 deletions src/concurrency/init_once.rs
Original file line number Diff line number Diff line change
Expand Up @@ -160,6 +160,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
fn init_once_complete(&mut self, id: InitOnceId) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let current_thread = this.get_active_thread();
let current_span = this.machine.current_span();
let init_once = &mut this.machine.threads.sync.init_onces[id];

assert_eq!(
Expand All @@ -172,7 +173,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {

// Each complete happens-before the end of the wait
if let Some(data_race) = &this.machine.data_race {
data_race.validate_lock_release(&mut init_once.data_race, current_thread);
data_race.validate_lock_release(&mut init_once.data_race, current_thread, current_span);
}

// Wake up everyone.
Expand All @@ -188,6 +189,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
fn init_once_fail(&mut self, id: InitOnceId) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let current_thread = this.get_active_thread();
let current_span = this.machine.current_span();
let init_once = &mut this.machine.threads.sync.init_onces[id];
assert_eq!(
init_once.status,
Expand All @@ -197,7 +199,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {

// Each complete happens-before the end of the wait
if let Some(data_race) = &this.machine.data_race {
data_race.validate_lock_release(&mut init_once.data_race, current_thread);
data_race.validate_lock_release(&mut init_once.data_race, current_thread, current_span);
}

// Wake up one waiting thread, so they can go ahead and try to init this.
Expand Down
33 changes: 27 additions & 6 deletions src/concurrency/sync.rs
Original file line number Diff line number Diff line change
Expand Up @@ -359,6 +359,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
/// return `None`.
fn mutex_unlock(&mut self, id: MutexId, expected_owner: ThreadId) -> Option<usize> {
let this = self.eval_context_mut();
let current_span = this.machine.current_span();
let mutex = &mut this.machine.threads.sync.mutexes[id];
if let Some(current_owner) = mutex.owner {
// Mutex is locked.
Expand All @@ -375,7 +376,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
// The mutex is completely unlocked. Try transfering ownership
// to another thread.
if let Some(data_race) = &this.machine.data_race {
data_race.validate_lock_release(&mut mutex.data_race, current_owner);
data_race.validate_lock_release(
&mut mutex.data_race,
current_owner,
current_span,
);
}
this.mutex_dequeue_and_lock(id);
}
Expand Down Expand Up @@ -454,6 +459,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
/// Returns `true` if succeeded, `false` if this `reader` did not hold the lock.
fn rwlock_reader_unlock(&mut self, id: RwLockId, reader: ThreadId) -> bool {
let this = self.eval_context_mut();
let current_span = this.machine.current_span();
let rwlock = &mut this.machine.threads.sync.rwlocks[id];
match rwlock.readers.entry(reader) {
Entry::Occupied(mut entry) => {
Expand All @@ -470,7 +476,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
Entry::Vacant(_) => return false, // we did not even own this lock
}
if let Some(data_race) = &this.machine.data_race {
data_race.validate_lock_release_shared(&mut rwlock.data_race_reader, reader);
data_race.validate_lock_release_shared(
&mut rwlock.data_race_reader,
reader,
current_span,
);
}

// The thread was a reader. If the lock is not held any more, give it to a writer.
Expand Down Expand Up @@ -511,6 +521,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
#[inline]
fn rwlock_writer_unlock(&mut self, id: RwLockId, expected_writer: ThreadId) -> bool {
let this = self.eval_context_mut();
let current_span = this.machine.current_span();
let rwlock = &mut this.machine.threads.sync.rwlocks[id];
if let Some(current_writer) = rwlock.writer {
if current_writer != expected_writer {
Expand All @@ -523,8 +534,16 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
// since this writer happens-before both the union of readers once they are finished
// and the next writer
if let Some(data_race) = &this.machine.data_race {
data_race.validate_lock_release(&mut rwlock.data_race, current_writer);
data_race.validate_lock_release(&mut rwlock.data_race_reader, current_writer);
data_race.validate_lock_release(
&mut rwlock.data_race,
current_writer,
current_span,
);
data_race.validate_lock_release(
&mut rwlock.data_race_reader,
current_writer,
current_span,
);
}
// The thread was a writer.
//
Expand Down Expand Up @@ -595,12 +614,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
fn condvar_signal(&mut self, id: CondvarId) -> Option<(ThreadId, CondvarLock)> {
let this = self.eval_context_mut();
let current_thread = this.get_active_thread();
let current_span = this.machine.current_span();
let condvar = &mut this.machine.threads.sync.condvars[id];
let data_race = &this.machine.data_race;

// Each condvar signal happens-before the end of the condvar wake
if let Some(data_race) = data_race {
data_race.validate_lock_release(&mut condvar.data_race, current_thread);
data_race.validate_lock_release(&mut condvar.data_race, current_thread, current_span);
}
condvar.waiters.pop_front().map(|waiter| {
if let Some(data_race) = data_race {
Expand Down Expand Up @@ -628,12 +648,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
fn futex_wake(&mut self, addr: u64, bitset: u32) -> Option<ThreadId> {
let this = self.eval_context_mut();
let current_thread = this.get_active_thread();
let current_span = this.machine.current_span();
let futex = &mut this.machine.threads.sync.futexes.get_mut(&addr)?;
let data_race = &this.machine.data_race;

// Each futex-wake happens-before the end of the futex wait
if let Some(data_race) = data_race {
data_race.validate_lock_release(&mut futex.data_race, current_thread);
data_race.validate_lock_release(&mut futex.data_race, current_thread, current_span);
}

// Wake up the first thread in the queue that matches any of the bits in the bitset.
Expand Down
12 changes: 9 additions & 3 deletions src/concurrency/thread.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ use rustc_hir::def_id::DefId;
use rustc_index::vec::{Idx, IndexVec};
use rustc_middle::mir::Mutability;
use rustc_middle::ty::layout::TyAndLayout;
use rustc_span::Span;
use rustc_target::spec::abi::Abi;

use crate::concurrency::data_race;
Expand Down Expand Up @@ -617,6 +618,7 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> {
fn thread_terminated(
&mut self,
mut data_race: Option<&mut data_race::GlobalState>,
current_span: Span,
) -> Vec<Pointer<Provenance>> {
let mut free_tls_statics = Vec::new();
{
Expand All @@ -634,7 +636,7 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> {
}
// Set the thread into a terminated state in the data-race detector.
if let Some(ref mut data_race) = data_race {
data_race.thread_terminated(self);
data_race.thread_terminated(self, current_span);
}
// Check if we need to unblock any threads.
let mut joined_threads = vec![]; // store which threads joined, we'll need it
Expand Down Expand Up @@ -813,8 +815,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
let mut state = tls::TlsDtorsState::default();
Box::new(move |m| state.on_stack_empty(m))
});
let current_span = this.machine.current_span();
if let Some(data_race) = &mut this.machine.data_race {
data_race.thread_created(&this.machine.threads, new_thread_id);
data_race.thread_created(&this.machine.threads, new_thread_id, current_span);
}

// Write the current thread-id, switch to the next thread later
Expand Down Expand Up @@ -1041,7 +1044,10 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
assert!(thread.stack.is_empty(), "only threads with an empty stack can be terminated");
thread.state = ThreadState::Terminated;

for ptr in this.machine.threads.thread_terminated(this.machine.data_race.as_mut()) {
let current_span = this.machine.current_span();
for ptr in
this.machine.threads.thread_terminated(this.machine.data_race.as_mut(), current_span)
{
this.deallocate_ptr(ptr.into(), None, MiriMemoryKind::Tls.into())?;
}
Ok(())
Expand Down
90 changes: 77 additions & 13 deletions src/concurrency/vector_clock.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,11 @@
use rustc_index::vec::Idx;
use rustc_span::{Span, SpanData, DUMMY_SP};
use smallvec::SmallVec;
use std::{cmp::Ordering, fmt::Debug, ops::Index};
use std::{
cmp::Ordering,
fmt::Debug,
ops::{Index, IndexMut},
};

/// A vector clock index, this is associated with a thread id
/// but in some cases one vector index may be shared with
Expand Down Expand Up @@ -42,7 +47,37 @@ const SMALL_VECTOR: usize = 4;

/// The type of the time-stamps recorded in the data-race detector
/// set to a type of unsigned integer
pub type VTimestamp = u32;
#[derive(Clone, Copy, Debug, Eq)]
pub struct VTimestamp {
time: u32,
pub span: Span,
}

impl VTimestamp {
pub const NONE: VTimestamp = VTimestamp { time: 0, span: DUMMY_SP };

pub fn span_data(&self) -> SpanData {
self.span.data()
}
}

impl PartialEq for VTimestamp {
fn eq(&self, other: &Self) -> bool {
self.time == other.time
}
}

impl PartialOrd for VTimestamp {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}

impl Ord for VTimestamp {
fn cmp(&self, other: &Self) -> Ordering {
self.time.cmp(&other.time)
}
}

/// A vector clock for detecting data-races, this is conceptually
/// a map from a vector index (and thus a thread id) to a timestamp.
Expand All @@ -62,7 +97,7 @@ impl VClock {
/// for a value at the given index
pub fn new_with_index(index: VectorIdx, timestamp: VTimestamp) -> VClock {
let len = index.index() + 1;
let mut vec = smallvec::smallvec![0; len];
let mut vec = smallvec::smallvec![VTimestamp::NONE; len];
vec[index.index()] = timestamp;
VClock(vec)
}
Expand All @@ -79,7 +114,7 @@ impl VClock {
#[inline]
fn get_mut_with_min_len(&mut self, min_len: usize) -> &mut [VTimestamp] {
if self.0.len() < min_len {
self.0.resize(min_len, 0);
self.0.resize(min_len, VTimestamp::NONE);
}
assert!(self.0.len() >= min_len);
self.0.as_mut_slice()
Expand All @@ -88,11 +123,14 @@ impl VClock {
/// Increment the vector clock at a known index
/// this will panic if the vector index overflows
#[inline]
pub fn increment_index(&mut self, idx: VectorIdx) {
pub fn increment_index(&mut self, idx: VectorIdx, current_span: Span) {
let idx = idx.index();
let mut_slice = self.get_mut_with_min_len(idx + 1);
let idx_ref = &mut mut_slice[idx];
*idx_ref = idx_ref.checked_add(1).expect("Vector clock overflow")
idx_ref.time = idx_ref.time.checked_add(1).expect("Vector clock overflow");
if current_span != DUMMY_SP {
idx_ref.span = current_span;
}
}

// Join the two vector-clocks together, this
Expand All @@ -102,14 +140,31 @@ impl VClock {
let rhs_slice = other.as_slice();
let lhs_slice = self.get_mut_with_min_len(rhs_slice.len());
for (l, &r) in lhs_slice.iter_mut().zip(rhs_slice.iter()) {
let l_span = l.span;
let r_span = r.span;
*l = r.max(*l);
if l.span == DUMMY_SP {
if r_span != DUMMY_SP {
l.span = r_span;
}
if l_span != DUMMY_SP {
l.span = l_span;
}
}
}
}

/// Set the element at the current index of the vector
pub fn set_at_index(&mut self, other: &Self, idx: VectorIdx) {
let mut_slice = self.get_mut_with_min_len(idx.index() + 1);

let prev_span = mut_slice[idx.index()].span;

mut_slice[idx.index()] = other[idx];

if other[idx].span == DUMMY_SP {
mut_slice[idx.index()].span = prev_span;
}
}

/// Set the vector to the all-zero vector
Expand Down Expand Up @@ -313,7 +368,14 @@ impl Index<VectorIdx> for VClock {

#[inline]
fn index(&self, index: VectorIdx) -> &VTimestamp {
self.as_slice().get(index.to_u32() as usize).unwrap_or(&0)
self.as_slice().get(index.to_u32() as usize).unwrap_or(&VTimestamp::NONE)
}
}

impl IndexMut<VectorIdx> for VClock {
#[inline]
fn index_mut(&mut self, index: VectorIdx) -> &mut VTimestamp {
self.0.as_mut_slice().get_mut(index.to_u32() as usize).unwrap()
}
}

Expand All @@ -323,24 +385,25 @@ impl Index<VectorIdx> for VClock {
#[cfg(test)]
mod tests {

use super::{VClock, VTimestamp, VectorIdx};
use std::cmp::Ordering;
use super::{VClock, VectorIdx};
use rustc_span::DUMMY_SP;

#[test]
fn test_equal() {
let mut c1 = VClock::default();
let mut c2 = VClock::default();
assert_eq!(c1, c2);
c1.increment_index(VectorIdx(5));
c1.increment_index(VectorIdx(5), DUMMY_SP);
assert_ne!(c1, c2);
c2.increment_index(VectorIdx(53));
c2.increment_index(VectorIdx(53), DUMMY_SP);
assert_ne!(c1, c2);
c1.increment_index(VectorIdx(53));
c1.increment_index(VectorIdx(53), DUMMY_SP);
assert_ne!(c1, c2);
c2.increment_index(VectorIdx(5));
c2.increment_index(VectorIdx(5), DUMMY_SP);
assert_eq!(c1, c2);
}

/*
#[test]
fn test_partial_order() {
// Small test
Expand Down Expand Up @@ -449,4 +512,5 @@ mod tests {
"Invalid alt (>=):\n l: {l:?}\n r: {r:?}"
);
}
*/
}
2 changes: 1 addition & 1 deletion src/concurrency/weak_memory.rs
Original file line number Diff line number Diff line change
Expand Up @@ -258,7 +258,7 @@ impl<'mir, 'tcx: 'mir> StoreBuffer {
// The thread index and timestamp of the initialisation write
// are never meaningfully used, so it's fine to leave them as 0
store_index: VectorIdx::from(0),
timestamp: 0,
timestamp: VTimestamp::NONE,
val: init,
is_seqcst: false,
load_info: RefCell::new(LoadInfo::default()),
Expand Down
Loading

0 comments on commit b06737e

Please sign in to comment.