diff --git a/src/concurrency/data_race.rs b/src/concurrency/data_race.rs index d0fc349f1a..69514395bc 100644 --- a/src/concurrency/data_race.rs +++ b/src/concurrency/data_race.rs @@ -49,7 +49,7 @@ use std::{ use rustc_ast::Mutability; use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_index::vec::{Idx, IndexVec}; -use rustc_middle::{mir, ty::layout::TyAndLayout}; +use rustc_middle::mir; use rustc_target::abi::{Align, Size}; use crate::*; @@ -440,33 +440,6 @@ impl MemoryCellClocks { /// Evaluation context extensions. impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx> {} pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriInterpCxExt<'mir, 'tcx> { - /// Atomic variant of read_scalar_at_offset. - fn read_scalar_at_offset_atomic( - &self, - op: &OpTy<'tcx, Provenance>, - offset: u64, - layout: TyAndLayout<'tcx>, - atomic: AtomicReadOrd, - ) -> InterpResult<'tcx, Scalar> { - let this = self.eval_context_ref(); - let value_place = this.deref_operand_and_offset(op, offset, layout)?; - this.read_scalar_atomic(&value_place, atomic) - } - - /// Atomic variant of write_scalar_at_offset. - fn write_scalar_at_offset_atomic( - &mut self, - op: &OpTy<'tcx, Provenance>, - offset: u64, - value: impl Into>, - layout: TyAndLayout<'tcx>, - atomic: AtomicWriteOrd, - ) -> InterpResult<'tcx> { - let this = self.eval_context_mut(); - let value_place = this.deref_operand_and_offset(op, offset, layout)?; - this.write_scalar_atomic(value.into(), &value_place, atomic) - } - /// Perform an atomic read operation at the memory location. fn read_scalar_atomic( &self, diff --git a/src/concurrency/sync.rs b/src/concurrency/sync.rs index ba5ae852c5..dc4b435b71 100644 --- a/src/concurrency/sync.rs +++ b/src/concurrency/sync.rs @@ -193,8 +193,9 @@ impl<'mir, 'tcx: 'mir> EvalContextExtPriv<'mir, 'tcx> for crate::MiriInterpCx<'m pub(super) trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { + /// Lazily initialize the ID of this Miri sync structure. + /// ('0' indicates uninit.) #[inline] - // Miri sync structures contain zero-initialized ids stored at some offset behind a pointer fn get_or_create_id( &mut self, next_id: Id, @@ -205,6 +206,7 @@ pub(super) trait EvalContextExtPriv<'mir, 'tcx: 'mir>: let value_place = this.deref_operand_and_offset(lock_op, offset, this.machine.layouts.u32)?; + // Since we are lazy, this update has to be atomic. let (old, success) = this .atomic_compare_exchange_scalar( &value_place, diff --git a/src/helpers.rs b/src/helpers.rs index f98727186c..fb4d91a26d 100644 --- a/src/helpers.rs +++ b/src/helpers.rs @@ -667,7 +667,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { layout: TyAndLayout<'tcx>, ) -> InterpResult<'tcx, MPlaceTy<'tcx, Provenance>> { let this = self.eval_context_ref(); - let op_place = this.deref_operand(op)?; + let op_place = this.deref_operand(op)?; // FIXME: we still deref with the original type! let offset = Size::from_bytes(offset); // Ensure that the access is within bounds. @@ -687,17 +687,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { this.read_scalar(&value_place.into()) } - fn write_immediate_at_offset( - &mut self, - op: &OpTy<'tcx, Provenance>, - offset: u64, - value: &ImmTy<'tcx, Provenance>, - ) -> InterpResult<'tcx, ()> { - let this = self.eval_context_mut(); - let value_place = this.deref_operand_and_offset(op, offset, value.layout)?; - this.write_immediate(**value, &value_place.into()) - } - fn write_scalar_at_offset( &mut self, op: &OpTy<'tcx, Provenance>, @@ -705,7 +694,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { value: impl Into>, layout: TyAndLayout<'tcx>, ) -> InterpResult<'tcx, ()> { - self.write_immediate_at_offset(op, offset, &ImmTy::from_scalar(value.into(), layout)) + let this = self.eval_context_mut(); + let value_place = this.deref_operand_and_offset(op, offset, layout)?; + this.write_scalar(value, &value_place.into()) } /// Parse a `timespec` struct and return it as a `std::time::Duration`. It returns `None` diff --git a/src/shims/unix/sync.rs b/src/shims/unix/sync.rs index a727564684..e0afb500cb 100644 --- a/src/shims/unix/sync.rs +++ b/src/shims/unix/sync.rs @@ -1,8 +1,5 @@ use std::time::SystemTime; -use rustc_hir::LangItem; -use rustc_middle::ty::{layout::TyAndLayout, query::TyCtxtAt, Ty}; - use crate::concurrency::sync::CondvarLock; use crate::concurrency::thread::{MachineCallback, Time}; use crate::*; @@ -20,39 +17,34 @@ use crate::*; /// in `pthread_mutexattr_settype` function. const PTHREAD_MUTEX_NORMAL_FLAG: i32 = 0x8000000; -const MUTEX_ID_OFFSET: u64 = 4; -const RWLOCK_ID_OFFSET: u64 = 4; -const CONDVAR_ID_OFFSET: u64 = 4; - fn is_mutex_kind_default<'mir, 'tcx: 'mir>( ecx: &mut MiriInterpCx<'mir, 'tcx>, - kind: Scalar, + kind: i32, ) -> InterpResult<'tcx, bool> { - Ok(kind == ecx.eval_libc("PTHREAD_MUTEX_DEFAULT")?) + Ok(kind == ecx.eval_libc_i32("PTHREAD_MUTEX_DEFAULT")?) } fn is_mutex_kind_normal<'mir, 'tcx: 'mir>( ecx: &mut MiriInterpCx<'mir, 'tcx>, - kind: Scalar, + kind: i32, ) -> InterpResult<'tcx, bool> { - let kind = kind.to_i32()?; - let mutex_normal_kind = ecx.eval_libc("PTHREAD_MUTEX_NORMAL")?.to_i32()?; + let mutex_normal_kind = ecx.eval_libc_i32("PTHREAD_MUTEX_NORMAL")?; Ok(kind == (mutex_normal_kind | PTHREAD_MUTEX_NORMAL_FLAG)) } fn mutexattr_get_kind<'mir, 'tcx: 'mir>( ecx: &MiriInterpCx<'mir, 'tcx>, attr_op: &OpTy<'tcx, Provenance>, -) -> InterpResult<'tcx, Scalar> { - ecx.read_scalar_at_offset(attr_op, 0, ecx.machine.layouts.i32) +) -> InterpResult<'tcx, i32> { + ecx.read_scalar_at_offset(attr_op, 0, ecx.machine.layouts.i32)?.to_i32() } fn mutexattr_set_kind<'mir, 'tcx: 'mir>( ecx: &mut MiriInterpCx<'mir, 'tcx>, attr_op: &OpTy<'tcx, Provenance>, - kind: impl Into>, + kind: i32, ) -> InterpResult<'tcx, ()> { - ecx.write_scalar_at_offset(attr_op, 0, kind, layout_of_maybe_uninit(ecx.tcx, ecx.tcx.types.i32)) + ecx.write_scalar_at_offset(attr_op, 0, Scalar::from_i32(kind), ecx.machine.layouts.i32) } // pthread_mutex_t is between 24 and 48 bytes, depending on the platform. @@ -64,53 +56,35 @@ fn mutexattr_set_kind<'mir, 'tcx: 'mir>( // bytes 12-15 or 16-19 (depending on platform): mutex kind, as an i32 // (the kind has to be at its offset for compatibility with static initializer macros) -fn mutex_get_kind<'mir, 'tcx: 'mir>( - ecx: &MiriInterpCx<'mir, 'tcx>, +fn mutex_get_id<'mir, 'tcx: 'mir>( + ecx: &mut MiriInterpCx<'mir, 'tcx>, mutex_op: &OpTy<'tcx, Provenance>, -) -> InterpResult<'tcx, Scalar> { - let offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 }; - ecx.read_scalar_at_offset_atomic( - mutex_op, - offset, - ecx.machine.layouts.i32, - AtomicReadOrd::Relaxed, - ) +) -> InterpResult<'tcx, MutexId> { + ecx.mutex_get_or_create_id(mutex_op, 4) } -fn mutex_set_kind<'mir, 'tcx: 'mir>( +fn mutex_reset_id<'mir, 'tcx: 'mir>( ecx: &mut MiriInterpCx<'mir, 'tcx>, mutex_op: &OpTy<'tcx, Provenance>, - kind: impl Into>, ) -> InterpResult<'tcx, ()> { - let offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 }; - ecx.write_scalar_at_offset_atomic( - mutex_op, - offset, - kind, - layout_of_maybe_uninit(ecx.tcx, ecx.tcx.types.i32), - AtomicWriteOrd::Relaxed, - ) + ecx.write_scalar_at_offset(mutex_op, 4, Scalar::from_i32(0), ecx.machine.layouts.u32) } -fn mutex_get_id<'mir, 'tcx: 'mir>( +fn mutex_get_kind<'mir, 'tcx: 'mir>( ecx: &MiriInterpCx<'mir, 'tcx>, mutex_op: &OpTy<'tcx, Provenance>, -) -> InterpResult<'tcx, Scalar> { - ecx.read_scalar_at_offset_atomic(mutex_op, 4, ecx.machine.layouts.u32, AtomicReadOrd::Relaxed) +) -> InterpResult<'tcx, i32> { + let offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 }; + ecx.read_scalar_at_offset(mutex_op, offset, ecx.machine.layouts.i32)?.to_i32() } -fn mutex_set_id<'mir, 'tcx: 'mir>( +fn mutex_set_kind<'mir, 'tcx: 'mir>( ecx: &mut MiriInterpCx<'mir, 'tcx>, mutex_op: &OpTy<'tcx, Provenance>, - id: impl Into>, + kind: i32, ) -> InterpResult<'tcx, ()> { - ecx.write_scalar_at_offset_atomic( - mutex_op, - 4, - id, - layout_of_maybe_uninit(ecx.tcx, ecx.tcx.types.u32), - AtomicWriteOrd::Relaxed, - ) + let offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 }; + ecx.write_scalar_at_offset(mutex_op, offset, Scalar::from_i32(kind), ecx.machine.layouts.i32) } // pthread_rwlock_t is between 32 and 56 bytes, depending on the platform. @@ -121,10 +95,10 @@ fn mutex_set_id<'mir, 'tcx: 'mir>( // bytes 4-7: rwlock id as u32 or 0 if id is not assigned yet. fn rwlock_get_id<'mir, 'tcx: 'mir>( - ecx: &MiriInterpCx<'mir, 'tcx>, + ecx: &mut MiriInterpCx<'mir, 'tcx>, rwlock_op: &OpTy<'tcx, Provenance>, -) -> InterpResult<'tcx, Scalar> { - ecx.read_scalar_at_offset_atomic(rwlock_op, 4, ecx.machine.layouts.u32, AtomicReadOrd::Relaxed) +) -> InterpResult<'tcx, RwLockId> { + ecx.rwlock_get_or_create_id(rwlock_op, 4) } // pthread_condattr_t @@ -136,21 +110,16 @@ fn rwlock_get_id<'mir, 'tcx: 'mir>( fn condattr_get_clock_id<'mir, 'tcx: 'mir>( ecx: &MiriInterpCx<'mir, 'tcx>, attr_op: &OpTy<'tcx, Provenance>, -) -> InterpResult<'tcx, Scalar> { - ecx.read_scalar_at_offset(attr_op, 0, ecx.machine.layouts.i32) +) -> InterpResult<'tcx, i32> { + ecx.read_scalar_at_offset(attr_op, 0, ecx.machine.layouts.i32)?.to_i32() } fn condattr_set_clock_id<'mir, 'tcx: 'mir>( ecx: &mut MiriInterpCx<'mir, 'tcx>, attr_op: &OpTy<'tcx, Provenance>, - clock_id: impl Into>, + clock_id: i32, ) -> InterpResult<'tcx, ()> { - ecx.write_scalar_at_offset( - attr_op, - 0, - clock_id, - layout_of_maybe_uninit(ecx.tcx, ecx.machine.layouts.i32.ty), - ) + ecx.write_scalar_at_offset(attr_op, 0, Scalar::from_i32(clock_id), ecx.machine.layouts.i32) } // pthread_cond_t @@ -163,44 +132,32 @@ fn condattr_set_clock_id<'mir, 'tcx: 'mir>( // bytes 8-11: the clock id constant as i32 fn cond_get_id<'mir, 'tcx: 'mir>( - ecx: &MiriInterpCx<'mir, 'tcx>, + ecx: &mut MiriInterpCx<'mir, 'tcx>, cond_op: &OpTy<'tcx, Provenance>, -) -> InterpResult<'tcx, Scalar> { - ecx.read_scalar_at_offset_atomic(cond_op, 4, ecx.machine.layouts.u32, AtomicReadOrd::Relaxed) +) -> InterpResult<'tcx, CondvarId> { + ecx.condvar_get_or_create_id(cond_op, 4) } -fn cond_set_id<'mir, 'tcx: 'mir>( +fn cond_reset_id<'mir, 'tcx: 'mir>( ecx: &mut MiriInterpCx<'mir, 'tcx>, cond_op: &OpTy<'tcx, Provenance>, - id: impl Into>, ) -> InterpResult<'tcx, ()> { - ecx.write_scalar_at_offset_atomic( - cond_op, - 4, - id, - layout_of_maybe_uninit(ecx.tcx, ecx.tcx.types.u32), - AtomicWriteOrd::Relaxed, - ) + ecx.write_scalar_at_offset(cond_op, 4, Scalar::from_i32(0), ecx.machine.layouts.u32) } fn cond_get_clock_id<'mir, 'tcx: 'mir>( ecx: &MiriInterpCx<'mir, 'tcx>, cond_op: &OpTy<'tcx, Provenance>, -) -> InterpResult<'tcx, Scalar> { - ecx.read_scalar_at_offset(cond_op, 8, ecx.machine.layouts.i32) +) -> InterpResult<'tcx, i32> { + ecx.read_scalar_at_offset(cond_op, 8, ecx.machine.layouts.i32)?.to_i32() } fn cond_set_clock_id<'mir, 'tcx: 'mir>( ecx: &mut MiriInterpCx<'mir, 'tcx>, cond_op: &OpTy<'tcx, Provenance>, - clock_id: impl Into>, + clock_id: i32, ) -> InterpResult<'tcx, ()> { - ecx.write_scalar_at_offset( - cond_op, - 8, - clock_id, - layout_of_maybe_uninit(ecx.tcx, ecx.tcx.types.i32), - ) + ecx.write_scalar_at_offset(cond_op, 8, Scalar::from_i32(clock_id), ecx.machine.layouts.i32) } /// Try to reacquire the mutex associated with the condition variable after we @@ -260,7 +217,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { ) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let default_kind = this.eval_libc("PTHREAD_MUTEX_DEFAULT")?; + let default_kind = this.eval_libc_i32("PTHREAD_MUTEX_DEFAULT")?; mutexattr_set_kind(this, attr_op, default_kind)?; Ok(0) @@ -273,8 +230,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { ) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let kind = this.read_scalar(kind_op)?; - if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? { + let kind = this.read_scalar(kind_op)?.to_i32()?; + if kind == this.eval_libc_i32("PTHREAD_MUTEX_NORMAL")? { // In `glibc` implementation, the numeric values of // `PTHREAD_MUTEX_NORMAL` and `PTHREAD_MUTEX_DEFAULT` are equal. // However, a mutex created by explicitly passing @@ -287,16 +244,16 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { // use the same trick as glibc: for the case when // `pthread_mutexattr_settype` is caled explicitly, we set the // `PTHREAD_MUTEX_NORMAL_FLAG` flag. - let normal_kind = kind.to_i32()? | PTHREAD_MUTEX_NORMAL_FLAG; + let normal_kind = kind | PTHREAD_MUTEX_NORMAL_FLAG; // Check that after setting the flag, the kind is distinguishable // from all other kinds. - assert_ne!(normal_kind, this.eval_libc("PTHREAD_MUTEX_DEFAULT")?.to_i32()?); - assert_ne!(normal_kind, this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")?.to_i32()?); - assert_ne!(normal_kind, this.eval_libc("PTHREAD_MUTEX_RECURSIVE")?.to_i32()?); - mutexattr_set_kind(this, attr_op, Scalar::from_i32(normal_kind))?; - } else if kind == this.eval_libc("PTHREAD_MUTEX_DEFAULT")? - || kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? - || kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? + assert_ne!(normal_kind, this.eval_libc_i32("PTHREAD_MUTEX_DEFAULT")?); + assert_ne!(normal_kind, this.eval_libc_i32("PTHREAD_MUTEX_ERRORCHECK")?); + assert_ne!(normal_kind, this.eval_libc_i32("PTHREAD_MUTEX_RECURSIVE")?); + mutexattr_set_kind(this, attr_op, normal_kind)?; + } else if kind == this.eval_libc_i32("PTHREAD_MUTEX_DEFAULT")? + || kind == this.eval_libc_i32("PTHREAD_MUTEX_ERRORCHECK")? + || kind == this.eval_libc_i32("PTHREAD_MUTEX_RECURSIVE")? { mutexattr_set_kind(this, attr_op, kind)?; } else { @@ -342,13 +299,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { let attr = this.read_pointer(attr_op)?; let kind = if this.ptr_is_null(attr)? { - this.eval_libc("PTHREAD_MUTEX_DEFAULT")? + this.eval_libc_i32("PTHREAD_MUTEX_DEFAULT")? } else { mutexattr_get_kind(this, attr_op)? }; // Write 0 to use the same code path as the static initializers. - mutex_set_id(this, mutex_op, Scalar::from_i32(0))?; + mutex_reset_id(this, mutex_op)?; mutex_set_kind(this, mutex_op, kind)?; @@ -359,7 +316,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { let this = self.eval_context_mut(); let kind = mutex_get_kind(this, mutex_op)?; - let id = this.mutex_get_or_create_id(mutex_op, MUTEX_ID_OFFSET)?; + let id = mutex_get_id(this, mutex_op)?; let active_thread = this.get_active_thread(); if this.mutex_is_locked(id) { @@ -374,9 +331,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { throw_ub_format!("trying to acquire already locked default mutex"); } else if is_mutex_kind_normal(this, kind)? { throw_machine_stop!(TerminationInfo::Deadlock); - } else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? { + } else if kind == this.eval_libc_i32("PTHREAD_MUTEX_ERRORCHECK")? { this.eval_libc_i32("EDEADLK") - } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? { + } else if kind == this.eval_libc_i32("PTHREAD_MUTEX_RECURSIVE")? { this.mutex_lock(id, active_thread); Ok(0) } else { @@ -399,7 +356,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { let this = self.eval_context_mut(); let kind = mutex_get_kind(this, mutex_op)?; - let id = this.mutex_get_or_create_id(mutex_op, MUTEX_ID_OFFSET)?; + let id = mutex_get_id(this, mutex_op)?; let active_thread = this.get_active_thread(); if this.mutex_is_locked(id) { @@ -409,10 +366,10 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { } else { if is_mutex_kind_default(this, kind)? || is_mutex_kind_normal(this, kind)? - || kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? + || kind == this.eval_libc_i32("PTHREAD_MUTEX_ERRORCHECK")? { this.eval_libc_i32("EBUSY") - } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? { + } else if kind == this.eval_libc_i32("PTHREAD_MUTEX_RECURSIVE")? { this.mutex_lock(id, active_thread); Ok(0) } else { @@ -435,7 +392,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { let this = self.eval_context_mut(); let kind = mutex_get_kind(this, mutex_op)?; - let id = this.mutex_get_or_create_id(mutex_op, MUTEX_ID_OFFSET)?; + let id = mutex_get_id(this, mutex_op)?; let active_thread = this.get_active_thread(); if let Some(_old_locked_count) = this.mutex_unlock(id, active_thread) { @@ -453,8 +410,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { throw_ub_format!( "unlocked a PTHREAD_MUTEX_NORMAL mutex that was not locked by the current thread" ); - } else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? - || kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? + } else if kind == this.eval_libc_i32("PTHREAD_MUTEX_ERRORCHECK")? + || kind == this.eval_libc_i32("PTHREAD_MUTEX_RECURSIVE")? { this.eval_libc_i32("EPERM") } else { @@ -469,7 +426,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { ) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let id = this.mutex_get_or_create_id(mutex_op, MUTEX_ID_OFFSET)?; + let id = mutex_get_id(this, mutex_op)?; if this.mutex_is_locked(id) { throw_ub_format!("destroyed a locked mutex"); @@ -492,7 +449,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { ) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let id = this.rwlock_get_or_create_id(rwlock_op, RWLOCK_ID_OFFSET)?; + let id = rwlock_get_id(this, rwlock_op)?; let active_thread = this.get_active_thread(); if this.rwlock_is_write_locked(id) { @@ -510,7 +467,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { ) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let id = this.rwlock_get_or_create_id(rwlock_op, RWLOCK_ID_OFFSET)?; + let id = rwlock_get_id(this, rwlock_op)?; let active_thread = this.get_active_thread(); if this.rwlock_is_write_locked(id) { @@ -527,7 +484,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { ) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let id = this.rwlock_get_or_create_id(rwlock_op, RWLOCK_ID_OFFSET)?; + let id = rwlock_get_id(this, rwlock_op)?; let active_thread = this.get_active_thread(); if this.rwlock_is_locked(id) { @@ -557,7 +514,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { ) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let id = this.rwlock_get_or_create_id(rwlock_op, RWLOCK_ID_OFFSET)?; + let id = rwlock_get_id(this, rwlock_op)?; let active_thread = this.get_active_thread(); if this.rwlock_is_locked(id) { @@ -574,7 +531,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { ) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let id = this.rwlock_get_or_create_id(rwlock_op, RWLOCK_ID_OFFSET)?; + let id = rwlock_get_id(this, rwlock_op)?; let active_thread = this.get_active_thread(); #[allow(clippy::if_same_then_else)] @@ -593,7 +550,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { ) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let id = this.rwlock_get_or_create_id(rwlock_op, RWLOCK_ID_OFFSET)?; + let id = rwlock_get_id(this, rwlock_op)?; if this.rwlock_is_locked(id) { throw_ub_format!("destroyed a locked rwlock"); @@ -618,7 +575,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { // The default value of the clock attribute shall refer to the system // clock. // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_condattr_setclock.html - let default_clock_id = this.eval_libc("CLOCK_REALTIME")?; + let default_clock_id = this.eval_libc_i32("CLOCK_REALTIME")?; condattr_set_clock_id(this, attr_op, default_clock_id)?; Ok(0) @@ -631,9 +588,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { ) -> InterpResult<'tcx, Scalar> { let this = self.eval_context_mut(); - let clock_id = this.read_scalar(clock_id_op)?; - if clock_id == this.eval_libc("CLOCK_REALTIME")? - || clock_id == this.eval_libc("CLOCK_MONOTONIC")? + let clock_id = this.read_scalar(clock_id_op)?.to_i32()?; + if clock_id == this.eval_libc_i32("CLOCK_REALTIME")? + || clock_id == this.eval_libc_i32("CLOCK_MONOTONIC")? { condattr_set_clock_id(this, attr_op, clock_id)?; } else { @@ -652,7 +609,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { let this = self.eval_context_mut(); let clock_id = condattr_get_clock_id(this, attr_op)?; - this.write_scalar(clock_id, &this.deref_operand(clk_id_op)?.into())?; + this.write_scalar(Scalar::from_i32(clock_id), &this.deref_operand(clk_id_op)?.into())?; Ok(Scalar::from_i32(0)) } @@ -681,13 +638,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { let attr = this.read_pointer(attr_op)?; let clock_id = if this.ptr_is_null(attr)? { - this.eval_libc("CLOCK_REALTIME")? + this.eval_libc_i32("CLOCK_REALTIME")? } else { condattr_get_clock_id(this, attr_op)? }; // Write 0 to use the same code path as the static initializers. - cond_set_id(this, cond_op, Scalar::from_i32(0))?; + cond_reset_id(this, cond_op)?; cond_set_clock_id(this, cond_op, clock_id)?; @@ -696,7 +653,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { fn pthread_cond_signal(&mut self, cond_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let id = this.condvar_get_or_create_id(cond_op, CONDVAR_ID_OFFSET)?; + let id = cond_get_id(this, cond_op)?; if let Some((thread, lock)) = this.condvar_signal(id) { if let CondvarLock::Mutex(mutex) = lock { post_cond_signal(this, thread, mutex)?; @@ -713,7 +670,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { cond_op: &OpTy<'tcx, Provenance>, ) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let id = this.condvar_get_or_create_id(cond_op, CONDVAR_ID_OFFSET)?; + let id = cond_get_id(this, cond_op)?; while let Some((thread, lock)) = this.condvar_signal(id) { if let CondvarLock::Mutex(mutex) = lock { @@ -733,8 +690,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { ) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let id = this.condvar_get_or_create_id(cond_op, CONDVAR_ID_OFFSET)?; - let mutex_id = this.mutex_get_or_create_id(mutex_op, MUTEX_ID_OFFSET)?; + let id = cond_get_id(this, cond_op)?; + let mutex_id = mutex_get_id(this, mutex_op)?; let active_thread = this.get_active_thread(); release_cond_mutex_and_block(this, active_thread, mutex_id)?; @@ -752,12 +709,12 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { ) -> InterpResult<'tcx> { let this = self.eval_context_mut(); - let id = this.condvar_get_or_create_id(cond_op, CONDVAR_ID_OFFSET)?; - let mutex_id = this.mutex_get_or_create_id(mutex_op, MUTEX_ID_OFFSET)?; + let id = cond_get_id(this, cond_op)?; + let mutex_id = mutex_get_id(this, mutex_op)?; let active_thread = this.get_active_thread(); // Extract the timeout. - let clock_id = cond_get_clock_id(this, cond_op)?.to_i32()?; + let clock_id = cond_get_clock_id(this, cond_op)?; let duration = match this.read_timespec(&this.deref_operand(abstime_op)?)? { Some(duration) => duration, None => { @@ -830,7 +787,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { ) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let id = this.condvar_get_or_create_id(cond_op, CONDVAR_ID_OFFSET)?; + let id = cond_get_id(this, cond_op)?; if this.condvar_is_awaited(id) { throw_ub_format!("destroying an awaited conditional variable"); } @@ -846,11 +803,3 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { Ok(0) } } - -fn layout_of_maybe_uninit<'tcx>(tcx: TyCtxtAt<'tcx>, param: Ty<'tcx>) -> TyAndLayout<'tcx> { - let def_id = tcx.require_lang_item(LangItem::MaybeUninit, None); - let ty = tcx.bound_type_of(def_id).subst(*tcx, &[param.into()]); - - let param_env = tcx.param_env(def_id); - tcx.layout_of(param_env.and(ty)).unwrap() -}