Skip to content

Commit 3e610c2

Browse files
committed
adjust for earlier init checking in the core engine
1 parent 7865255 commit 3e610c2

33 files changed

+163
-200
lines changed

src/concurrency/data_race.rs

+16-16
Original file line numberDiff line numberDiff line change
@@ -443,7 +443,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
443443
offset: u64,
444444
layout: TyAndLayout<'tcx>,
445445
atomic: AtomicReadOrd,
446-
) -> InterpResult<'tcx, ScalarMaybeUninit<Provenance>> {
446+
) -> InterpResult<'tcx, Scalar<Provenance>> {
447447
let this = self.eval_context_ref();
448448
let value_place = this.deref_operand_and_offset(op, offset, layout)?;
449449
this.read_scalar_atomic(&value_place, atomic)
@@ -454,7 +454,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
454454
&mut self,
455455
op: &OpTy<'tcx, Provenance>,
456456
offset: u64,
457-
value: impl Into<ScalarMaybeUninit<Provenance>>,
457+
value: impl Into<Scalar<Provenance>>,
458458
layout: TyAndLayout<'tcx>,
459459
atomic: AtomicWriteOrd,
460460
) -> InterpResult<'tcx> {
@@ -468,7 +468,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
468468
&self,
469469
place: &MPlaceTy<'tcx, Provenance>,
470470
atomic: AtomicReadOrd,
471-
) -> InterpResult<'tcx, ScalarMaybeUninit<Provenance>> {
471+
) -> InterpResult<'tcx, Scalar<Provenance>> {
472472
let this = self.eval_context_ref();
473473
// This will read from the last store in the modification order of this location. In case
474474
// weak memory emulation is enabled, this may not be the store we will pick to actually read from and return.
@@ -485,7 +485,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
485485
/// Perform an atomic write operation at the memory location.
486486
fn write_scalar_atomic(
487487
&mut self,
488-
val: ScalarMaybeUninit<Provenance>,
488+
val: Scalar<Provenance>,
489489
dest: &MPlaceTy<'tcx, Provenance>,
490490
atomic: AtomicWriteOrd,
491491
) -> InterpResult<'tcx> {
@@ -523,10 +523,10 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
523523
this.validate_atomic_rmw(place, atomic)?;
524524

525525
this.buffered_atomic_rmw(
526-
val.to_scalar_or_uninit(),
526+
val.to_scalar(),
527527
place,
528528
atomic,
529-
old.to_scalar_or_uninit(),
529+
old.to_scalar(),
530530
)?;
531531
Ok(old)
532532
}
@@ -536,9 +536,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
536536
fn atomic_exchange_scalar(
537537
&mut self,
538538
place: &MPlaceTy<'tcx, Provenance>,
539-
new: ScalarMaybeUninit<Provenance>,
539+
new: Scalar<Provenance>,
540540
atomic: AtomicRwOrd,
541-
) -> InterpResult<'tcx, ScalarMaybeUninit<Provenance>> {
541+
) -> InterpResult<'tcx, Scalar<Provenance>> {
542542
let this = self.eval_context_mut();
543543

544544
this.validate_overlapping_atomic(place)?;
@@ -564,7 +564,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
564564

565565
this.validate_overlapping_atomic(place)?;
566566
let old = this.allow_data_races_mut(|this| this.read_immediate(&place.into()))?;
567-
let lt = this.binary_op(mir::BinOp::Lt, &old, &rhs)?.to_scalar()?.to_bool()?;
567+
let lt = this.binary_op(mir::BinOp::Lt, &old, &rhs)?.to_scalar().to_bool()?;
568568

569569
let new_val = if min {
570570
if lt { &old } else { &rhs }
@@ -577,10 +577,10 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
577577
this.validate_atomic_rmw(place, atomic)?;
578578

579579
this.buffered_atomic_rmw(
580-
new_val.to_scalar_or_uninit(),
580+
new_val.to_scalar(),
581581
place,
582582
atomic,
583-
old.to_scalar_or_uninit(),
583+
old.to_scalar(),
584584
)?;
585585

586586
// Return the old value.
@@ -597,7 +597,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
597597
&mut self,
598598
place: &MPlaceTy<'tcx, Provenance>,
599599
expect_old: &ImmTy<'tcx, Provenance>,
600-
new: ScalarMaybeUninit<Provenance>,
600+
new: Scalar<Provenance>,
601601
success: AtomicRwOrd,
602602
fail: AtomicReadOrd,
603603
can_fail_spuriously: bool,
@@ -616,14 +616,14 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
616616
// If the operation would succeed, but is "weak", fail some portion
617617
// of the time, based on `success_rate`.
618618
let success_rate = 1.0 - this.machine.cmpxchg_weak_failure_rate;
619-
let cmpxchg_success = eq.to_scalar()?.to_bool()?
619+
let cmpxchg_success = eq.to_scalar().to_bool()?
620620
&& if can_fail_spuriously {
621621
this.machine.rng.get_mut().gen_bool(success_rate)
622622
} else {
623623
true
624624
};
625625
let res = Immediate::ScalarPair(
626-
old.to_scalar_or_uninit(),
626+
old.to_scalar(),
627627
Scalar::from_bool(cmpxchg_success).into(),
628628
);
629629

@@ -633,14 +633,14 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
633633
if cmpxchg_success {
634634
this.allow_data_races_mut(|this| this.write_scalar(new, &place.into()))?;
635635
this.validate_atomic_rmw(place, success)?;
636-
this.buffered_atomic_rmw(new, place, success, old.to_scalar_or_uninit())?;
636+
this.buffered_atomic_rmw(new, place, success, old.to_scalar())?;
637637
} else {
638638
this.validate_atomic_load(place, fail)?;
639639
// A failed compare exchange is equivalent to a load, reading from the latest store
640640
// in the modification order.
641641
// Since `old` is only a value and not the store element, we need to separately
642642
// find it in our store buffer and perform load_impl on it.
643-
this.perform_read_on_buffered_latest(place, fail, old.to_scalar_or_uninit())?;
643+
this.perform_read_on_buffered_latest(place, fail, old.to_scalar())?;
644644
}
645645

646646
// Return the old value.

src/concurrency/weak_memory.rs

+18-18
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@ use std::{
7878
};
7979

8080
use rustc_const_eval::interpret::{
81-
alloc_range, AllocRange, InterpResult, MPlaceTy, ScalarMaybeUninit,
81+
alloc_range, AllocRange, InterpResult, MPlaceTy, Scalar,
8282
};
8383
use rustc_data_structures::fx::FxHashMap;
8484

@@ -129,10 +129,10 @@ struct StoreElement {
129129
/// The timestamp of the storing thread when it performed the store
130130
timestamp: VTimestamp,
131131
/// The value of this store
132-
// FIXME: this means the store is either fully initialized or fully uninitialized;
132+
// FIXME: this means the store must be fully initialized;
133133
// we will have to change this if we want to support atomics on
134-
// partially initialized data.
135-
val: ScalarMaybeUninit<Provenance>,
134+
// (partially) uninitialized data.
135+
val: Scalar<Provenance>,
136136

137137
/// Timestamp of first loads from this store element by each thread
138138
/// Behind a RefCell to keep load op take &self
@@ -179,7 +179,7 @@ impl StoreBufferAlloc {
179179
fn get_or_create_store_buffer<'tcx>(
180180
&self,
181181
range: AllocRange,
182-
init: ScalarMaybeUninit<Provenance>,
182+
init: Scalar<Provenance>,
183183
) -> InterpResult<'tcx, Ref<'_, StoreBuffer>> {
184184
let access_type = self.store_buffers.borrow().access_type(range);
185185
let pos = match access_type {
@@ -204,7 +204,7 @@ impl StoreBufferAlloc {
204204
fn get_or_create_store_buffer_mut<'tcx>(
205205
&mut self,
206206
range: AllocRange,
207-
init: ScalarMaybeUninit<Provenance>,
207+
init: Scalar<Provenance>,
208208
) -> InterpResult<'tcx, &mut StoreBuffer> {
209209
let buffers = self.store_buffers.get_mut();
210210
let access_type = buffers.access_type(range);
@@ -225,7 +225,7 @@ impl StoreBufferAlloc {
225225
}
226226

227227
impl<'mir, 'tcx: 'mir> StoreBuffer {
228-
fn new(init: ScalarMaybeUninit<Provenance>) -> Self {
228+
fn new(init: Scalar<Provenance>) -> Self {
229229
let mut buffer = VecDeque::new();
230230
buffer.reserve(STORE_BUFFER_LIMIT);
231231
let mut ret = Self { buffer };
@@ -258,7 +258,7 @@ impl<'mir, 'tcx: 'mir> StoreBuffer {
258258
is_seqcst: bool,
259259
rng: &mut (impl rand::Rng + ?Sized),
260260
validate: impl FnOnce() -> InterpResult<'tcx>,
261-
) -> InterpResult<'tcx, (ScalarMaybeUninit<Provenance>, LoadRecency)> {
261+
) -> InterpResult<'tcx, (Scalar<Provenance>, LoadRecency)> {
262262
// Having a live borrow to store_buffer while calling validate_atomic_load is fine
263263
// because the race detector doesn't touch store_buffer
264264

@@ -283,7 +283,7 @@ impl<'mir, 'tcx: 'mir> StoreBuffer {
283283

284284
fn buffered_write(
285285
&mut self,
286-
val: ScalarMaybeUninit<Provenance>,
286+
val: Scalar<Provenance>,
287287
global: &DataRaceState,
288288
thread_mgr: &ThreadManager<'_, '_>,
289289
is_seqcst: bool,
@@ -374,7 +374,7 @@ impl<'mir, 'tcx: 'mir> StoreBuffer {
374374
/// ATOMIC STORE IMPL in the paper (except we don't need the location's vector clock)
375375
fn store_impl(
376376
&mut self,
377-
val: ScalarMaybeUninit<Provenance>,
377+
val: Scalar<Provenance>,
378378
index: VectorIdx,
379379
thread_clock: &VClock,
380380
is_seqcst: bool,
@@ -420,7 +420,7 @@ impl StoreElement {
420420
&self,
421421
index: VectorIdx,
422422
clocks: &ThreadClockSet,
423-
) -> ScalarMaybeUninit<Provenance> {
423+
) -> Scalar<Provenance> {
424424
let _ = self.loads.borrow_mut().try_insert(index, clocks.clock[index]);
425425
self.val
426426
}
@@ -463,10 +463,10 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>:
463463

464464
fn buffered_atomic_rmw(
465465
&mut self,
466-
new_val: ScalarMaybeUninit<Provenance>,
466+
new_val: Scalar<Provenance>,
467467
place: &MPlaceTy<'tcx, Provenance>,
468468
atomic: AtomicRwOrd,
469-
init: ScalarMaybeUninit<Provenance>,
469+
init: Scalar<Provenance>,
470470
) -> InterpResult<'tcx> {
471471
let this = self.eval_context_mut();
472472
let (alloc_id, base_offset, ..) = this.ptr_get_alloc_id(place.ptr)?;
@@ -491,9 +491,9 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>:
491491
&self,
492492
place: &MPlaceTy<'tcx, Provenance>,
493493
atomic: AtomicReadOrd,
494-
latest_in_mo: ScalarMaybeUninit<Provenance>,
494+
latest_in_mo: Scalar<Provenance>,
495495
validate: impl FnOnce() -> InterpResult<'tcx>,
496-
) -> InterpResult<'tcx, ScalarMaybeUninit<Provenance>> {
496+
) -> InterpResult<'tcx, Scalar<Provenance>> {
497497
let this = self.eval_context_ref();
498498
if let Some(global) = &this.machine.data_race {
499499
let (alloc_id, base_offset, ..) = this.ptr_get_alloc_id(place.ptr)?;
@@ -528,10 +528,10 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>:
528528

529529
fn buffered_atomic_write(
530530
&mut self,
531-
val: ScalarMaybeUninit<Provenance>,
531+
val: Scalar<Provenance>,
532532
dest: &MPlaceTy<'tcx, Provenance>,
533533
atomic: AtomicWriteOrd,
534-
init: ScalarMaybeUninit<Provenance>,
534+
init: Scalar<Provenance>,
535535
) -> InterpResult<'tcx> {
536536
let this = self.eval_context_mut();
537537
let (alloc_id, base_offset, ..) = this.ptr_get_alloc_id(dest.ptr)?;
@@ -575,7 +575,7 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>:
575575
&self,
576576
place: &MPlaceTy<'tcx, Provenance>,
577577
atomic: AtomicReadOrd,
578-
init: ScalarMaybeUninit<Provenance>,
578+
init: Scalar<Provenance>,
579579
) -> InterpResult<'tcx> {
580580
let this = self.eval_context_ref();
581581

src/helpers.rs

+16-8
Original file line numberDiff line numberDiff line change
@@ -89,8 +89,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
8989
let instance = this.resolve_path(path);
9090
let cid = GlobalId { instance, promoted: None };
9191
let const_val = this.eval_to_allocation(cid)?;
92-
let const_val = this.read_scalar(&const_val.into())?;
93-
const_val.check_init()
92+
this.read_scalar(&const_val.into())
9493
}
9594

9695
/// Helper function to get a `libc` constant as a `Scalar`.
@@ -540,7 +539,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
540539
fn get_last_error(&mut self) -> InterpResult<'tcx, Scalar<Provenance>> {
541540
let this = self.eval_context_mut();
542541
let errno_place = this.last_error_place()?;
543-
this.read_scalar(&errno_place.into())?.check_init()
542+
this.read_scalar(&errno_place.into())
544543
}
545544

546545
/// This function tries to produce the most similar OS error from the `std::io::ErrorKind`
@@ -650,22 +649,31 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
650649
op: &OpTy<'tcx, Provenance>,
651650
offset: u64,
652651
layout: TyAndLayout<'tcx>,
653-
) -> InterpResult<'tcx, ScalarMaybeUninit<Provenance>> {
652+
) -> InterpResult<'tcx, Scalar<Provenance>> {
654653
let this = self.eval_context_ref();
655654
let value_place = this.deref_operand_and_offset(op, offset, layout)?;
656655
this.read_scalar(&value_place.into())
657656
}
658657

658+
fn write_immediate_at_offset(
659+
&mut self,
660+
op: &OpTy<'tcx, Provenance>,
661+
offset: u64,
662+
value: &ImmTy<'tcx, Provenance>,
663+
) -> InterpResult<'tcx, ()> {
664+
let this = self.eval_context_mut();
665+
let value_place = this.deref_operand_and_offset(op, offset, value.layout)?;
666+
this.write_immediate(**value, &value_place.into())
667+
}
668+
659669
fn write_scalar_at_offset(
660670
&mut self,
661671
op: &OpTy<'tcx, Provenance>,
662672
offset: u64,
663-
value: impl Into<ScalarMaybeUninit<Provenance>>,
673+
value: impl Into<Scalar<Provenance>>,
664674
layout: TyAndLayout<'tcx>,
665675
) -> InterpResult<'tcx, ()> {
666-
let this = self.eval_context_mut();
667-
let value_place = this.deref_operand_and_offset(op, offset, layout)?;
668-
this.write_scalar(value, &value_place.into())
676+
self.write_immediate_at_offset(op, offset, &ImmTy::from_scalar(value.into(), layout))
669677
}
670678

671679
/// Parse a `timespec` struct and return it as a `std::time::Duration`. It returns `None`

src/machine.rs

+2-7
Original file line numberDiff line numberDiff line change
@@ -150,7 +150,7 @@ static_assert_size!(Pointer<Provenance>, 24);
150150
// #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
151151
//static_assert_size!(Pointer<Option<Provenance>>, 24);
152152
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
153-
static_assert_size!(ScalarMaybeUninit<Provenance>, 32);
153+
static_assert_size!(Scalar<Provenance>, 32);
154154

155155
impl interpret::Provenance for Provenance {
156156
/// We use absolute addresses in the `offset` of a `Pointer<Provenance>`.
@@ -531,7 +531,7 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> {
531531
}
532532

533533
#[inline(always)]
534-
fn force_int_for_alignment_check(ecx: &MiriEvalContext<'mir, 'tcx>) -> bool {
534+
fn use_addr_for_alignment_check(ecx: &MiriEvalContext<'mir, 'tcx>) -> bool {
535535
ecx.machine.check_alignment == AlignmentCheck::Int
536536
}
537537

@@ -540,11 +540,6 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> {
540540
ecx.machine.validate
541541
}
542542

543-
#[inline(always)]
544-
fn enforce_number_init(_ecx: &MiriEvalContext<'mir, 'tcx>) -> bool {
545-
true
546-
}
547-
548543
#[inline(always)]
549544
fn enforce_abi(ecx: &MiriEvalContext<'mir, 'tcx>) -> bool {
550545
ecx.machine.enforce_abi

src/operator.rs

+10-12
Original file line numberDiff line numberDiff line change
@@ -32,16 +32,14 @@ impl<'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'mir, 'tcx> {
3232
// Just compare the bits. ScalarPairs are compared lexicographically.
3333
// We thus always compare pairs and simply fill scalars up with 0.
3434
let left = match **left {
35-
Immediate::Scalar(l) => (l.check_init()?.to_bits(size)?, 0),
36-
Immediate::ScalarPair(l1, l2) =>
37-
(l1.check_init()?.to_bits(size)?, l2.check_init()?.to_bits(size)?),
38-
Immediate::Uninit => throw_ub!(InvalidUninitBytes(None)),
35+
Immediate::Scalar(l) => (l.to_bits(size)?, 0),
36+
Immediate::ScalarPair(l1, l2) => (l1.to_bits(size)?, l2.to_bits(size)?),
37+
Immediate::Uninit => panic!("we should never see uninit data here"),
3938
};
4039
let right = match **right {
41-
Immediate::Scalar(r) => (r.check_init()?.to_bits(size)?, 0),
42-
Immediate::ScalarPair(r1, r2) =>
43-
(r1.check_init()?.to_bits(size)?, r2.check_init()?.to_bits(size)?),
44-
Immediate::Uninit => throw_ub!(InvalidUninitBytes(None)),
40+
Immediate::Scalar(r) => (r.to_bits(size)?, 0),
41+
Immediate::ScalarPair(r1, r2) => (r1.to_bits(size)?, r2.to_bits(size)?),
42+
Immediate::Uninit => panic!("we should never see uninit data here"),
4543
};
4644
let res = match bin_op {
4745
Eq => left == right,
@@ -57,8 +55,8 @@ impl<'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'mir, 'tcx> {
5755

5856
Offset => {
5957
assert!(left.layout.ty.is_unsafe_ptr());
60-
let ptr = left.to_scalar()?.to_pointer(self)?;
61-
let offset = right.to_scalar()?.to_machine_isize(self)?;
58+
let ptr = left.to_scalar().to_pointer(self)?;
59+
let offset = right.to_scalar().to_machine_isize(self)?;
6260

6361
let pointee_ty =
6462
left.layout.ty.builtin_deref(true).expect("Offset called on non-ptr type").ty;
@@ -71,11 +69,11 @@ impl<'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'mir, 'tcx> {
7169
Add | Sub | BitOr | BitAnd | BitXor => {
7270
assert!(left.layout.ty.is_unsafe_ptr());
7371
assert!(right.layout.ty.is_unsafe_ptr());
74-
let ptr = left.to_scalar()?.to_pointer(self)?;
72+
let ptr = left.to_scalar().to_pointer(self)?;
7573
// We do the actual operation with usize-typed scalars.
7674
let left = ImmTy::from_uint(ptr.addr().bytes(), self.machine.layouts.usize);
7775
let right = ImmTy::from_uint(
78-
right.to_scalar()?.to_machine_usize(self)?,
76+
right.to_scalar().to_machine_usize(self)?,
7977
self.machine.layouts.usize,
8078
);
8179
let (result, overflowing, _ty) =

0 commit comments

Comments
 (0)