diff --git a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs index b18976302b4ff..d7610ea82748c 100644 --- a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs +++ b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs @@ -116,9 +116,9 @@ pub(super) fn op_to_const<'tcx>( // instead allow `ConstValue::Scalar` to store `ScalarMaybeUninit`, but that would affect all // the usual cases of extracting e.g. a `usize`, without there being a real use case for the // `Undef` situation. - let try_as_immediate = match op.layout.abi { + let try_as_immediate = match op.layout().abi { Abi::Scalar(abi::Scalar::Initialized { .. }) => true, - Abi::ScalarPair(..) => match op.layout.ty.kind() { + Abi::ScalarPair(..) => match op.layout().ty.kind() { ty::Ref(_, inner, _) => match *inner.kind() { ty::Slice(elem) => elem == ecx.tcx.types.u8, ty::Str => true, diff --git a/compiler/rustc_const_eval/src/const_eval/mod.rs b/compiler/rustc_const_eval/src/const_eval/mod.rs index bf65fdc54ca48..ac2fc6845b80d 100644 --- a/compiler/rustc_const_eval/src/const_eval/mod.rs +++ b/compiler/rustc_const_eval/src/const_eval/mod.rs @@ -119,7 +119,7 @@ pub(crate) fn try_destructure_mir_constant<'tcx>( .map(|i| { let field_op = ecx.operand_field(&down, i)?; let val = op_to_const(&ecx, &field_op); - Ok(mir::ConstantKind::Val(val, field_op.layout.ty)) + Ok(mir::ConstantKind::Val(val, field_op.layout().ty)) }) .collect::>>()?; let fields = tcx.arena.alloc_from_iter(fields_iter); diff --git a/compiler/rustc_const_eval/src/interpret/cast.rs b/compiler/rustc_const_eval/src/interpret/cast.rs index 5d598b65c7224..61c3c229f3ad5 100644 --- a/compiler/rustc_const_eval/src/interpret/cast.rs +++ b/compiler/rustc_const_eval/src/interpret/cast.rs @@ -57,10 +57,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { Pointer(PointerCast::ReifyFnPointer) => { // The src operand does not matter, just its type - match *src.layout.ty.kind() { + match *src.layout().ty.kind() { ty::FnDef(def_id, substs) => { // All reifications must be monomorphic, bail out otherwise. - ensure_monomorphic_enough(*self.tcx, src.layout.ty)?; + ensure_monomorphic_enough(*self.tcx, src.layout().ty)?; let instance = ty::Instance::resolve_for_fn_ptr( *self.tcx, @@ -73,7 +73,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let fn_ptr = self.create_fn_alloc_ptr(FnVal::Instance(instance)); self.write_pointer(fn_ptr, dest)?; } - _ => span_bug!(self.cur_span(), "reify fn pointer on {:?}", src.layout.ty), + _ => span_bug!(self.cur_span(), "reify fn pointer on {:?}", src.layout().ty), } } @@ -90,10 +90,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { Pointer(PointerCast::ClosureFnPointer(_)) => { // The src operand does not matter, just its type - match *src.layout.ty.kind() { + match *src.layout().ty.kind() { ty::Closure(def_id, substs) => { // All reifications must be monomorphic, bail out otherwise. - ensure_monomorphic_enough(*self.tcx, src.layout.ty)?; + ensure_monomorphic_enough(*self.tcx, src.layout().ty)?; let instance = ty::Instance::resolve_closure( *self.tcx, @@ -105,7 +105,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let fn_ptr = self.create_fn_alloc_ptr(FnVal::Instance(instance)); self.write_pointer(fn_ptr, dest)?; } - _ => span_bug!(self.cur_span(), "closure fn pointer on {:?}", src.layout.ty), + _ => span_bug!(self.cur_span(), "closure fn pointer on {:?}", src.layout().ty), } } } @@ -328,7 +328,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } _ => { - span_bug!(self.cur_span(), "invalid unsizing {:?} -> {:?}", src.layout.ty, cast_ty) + span_bug!( + self.cur_span(), + "invalid unsizing {:?} -> {:?}", + src.layout().ty, + cast_ty + ) } } } @@ -339,8 +344,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { cast_ty: TyAndLayout<'tcx>, dest: &PlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx> { - trace!("Unsizing {:?} of type {} into {:?}", *src, src.layout.ty, cast_ty.ty); - match (&src.layout.ty.kind(), &cast_ty.ty.kind()) { + trace!("Unsizing {:?} of type {} into {:?}", *src, src.layout().ty, cast_ty.ty); + match (&src.layout().ty.kind(), &cast_ty.ty.kind()) { (&ty::Ref(_, s, _), &ty::Ref(_, c, _) | &ty::RawPtr(TypeAndMut { ty: c, .. })) | (&ty::RawPtr(TypeAndMut { ty: s, .. }), &ty::RawPtr(TypeAndMut { ty: c, .. })) => { self.unsize_into_ptr(src, dest, *s, *c) @@ -351,14 +356,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // unsizing of generic struct with pointer fields // Example: `Arc` -> `Arc` // here we need to increase the size of every &T thin ptr field to a fat ptr - for i in 0..src.layout.fields.count() { + for i in 0..src.layout().fields.count() { let cast_ty_field = cast_ty.field(self, i); if cast_ty_field.is_zst() { continue; } let src_field = self.operand_field(src, i)?; let dst_field = self.place_field(dest, i)?; - if src_field.layout.ty == cast_ty_field.ty { + if src_field.layout().ty == cast_ty_field.ty { self.copy_op(&src_field, &dst_field, /*allow_transmute*/ false)?; } else { self.unsize_into(&src_field, cast_ty_field, &dst_field)?; @@ -369,7 +374,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { _ => span_bug!( self.cur_span(), "unsize_into: invalid conversion: {:?} -> {:?}", - src.layout, + src.layout(), dest.layout ), } diff --git a/compiler/rustc_const_eval/src/interpret/eval_context.rs b/compiler/rustc_const_eval/src/interpret/eval_context.rs index 2e47cf8921073..4c617221d0b0b 100644 --- a/compiler/rustc_const_eval/src/interpret/eval_context.rs +++ b/compiler/rustc_const_eval/src/interpret/eval_context.rs @@ -2,10 +2,8 @@ use std::cell::Cell; use std::fmt; use std::mem; -use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; use rustc_hir::{self as hir, def_id::DefId, definitions::DefPathData}; use rustc_index::vec::IndexVec; -use rustc_macros::HashStable; use rustc_middle::mir; use rustc_middle::mir::interpret::{InterpError, InvalidProgramInfo}; use rustc_middle::ty::layout::{ @@ -16,7 +14,6 @@ use rustc_middle::ty::{ self, query::TyCtxtAt, subst::SubstsRef, ParamEnv, Ty, TyCtxt, TypeFoldable, }; use rustc_mir_dataflow::storage::always_storage_live_locals; -use rustc_query_system::ich::StableHashingContext; use rustc_session::Limit; use rustc_span::{Pos, Span}; use rustc_target::abi::{call::FnAbi, Align, HasDataLayout, Size, TargetDataLayout}; @@ -142,7 +139,7 @@ pub struct FrameInfo<'tcx> { } /// Unwind information. -#[derive(Clone, Copy, Eq, PartialEq, Debug, HashStable)] +#[derive(Clone, Copy, Eq, PartialEq, Debug)] pub enum StackPopUnwind { /// The cleanup block. Cleanup(mir::BasicBlock), @@ -152,7 +149,7 @@ pub enum StackPopUnwind { NotAllowed, } -#[derive(Clone, Copy, Eq, PartialEq, Debug, HashStable)] // Miri debug-prints these +#[derive(Clone, Copy, Eq, PartialEq, Debug)] // Miri debug-prints these pub enum StackPopCleanup { /// Jump to the next block in the caller, or cause UB if None (that's a function /// that may never return). Also store layout of return place so @@ -168,16 +165,15 @@ pub enum StackPopCleanup { } /// State of a local variable including a memoized layout -#[derive(Clone, Debug, PartialEq, Eq, HashStable)] +#[derive(Clone, Debug)] pub struct LocalState<'tcx, Tag: Provenance = AllocId> { pub value: LocalValue, /// Don't modify if `Some`, this is only used to prevent computing the layout twice - #[stable_hasher(ignore)] pub layout: Cell>>, } /// Current value of a local variable -#[derive(Copy, Clone, PartialEq, Eq, HashStable, Debug)] // Miri debug-prints these +#[derive(Copy, Clone, Debug)] // Miri debug-prints these pub enum LocalValue { /// This local is not currently alive, and cannot be used at all. Dead, @@ -1021,31 +1017,3 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> std::fmt::Debug } } } - -impl<'ctx, 'mir, 'tcx, Tag: Provenance, Extra> HashStable> - for Frame<'mir, 'tcx, Tag, Extra> -where - Extra: HashStable>, - Tag: HashStable>, -{ - fn hash_stable(&self, hcx: &mut StableHashingContext<'ctx>, hasher: &mut StableHasher) { - // Exhaustive match on fields to make sure we forget no field. - let Frame { - body, - instance, - return_to_block, - return_place, - locals, - loc, - extra, - tracing_span: _, - } = self; - body.hash_stable(hcx, hasher); - instance.hash_stable(hcx, hasher); - return_to_block.hash_stable(hcx, hasher); - return_place.hash_stable(hcx, hasher); - locals.hash_stable(hcx, hasher); - loc.hash_stable(hcx, hasher); - extra.hash_stable(hcx, hasher); - } -} diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs index 93b64d9d37a49..04e87846885ca 100644 --- a/compiler/rustc_const_eval/src/interpret/intrinsics.rs +++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs @@ -604,7 +604,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { nonoverlapping: bool, ) -> InterpResult<'tcx> { let count = self.read_scalar(&count)?.to_machine_usize(self)?; - let layout = self.layout_of(src.layout.ty.builtin_deref(true).unwrap().ty)?; + let layout = self.layout_of(src.layout().ty.builtin_deref(true).unwrap().ty)?; let (size, align) = (layout.size, layout.align.abi); // `checked_mul` enforces a too small bound (the correct one would probably be machine_isize_max), // but no actual allocation can be big enough for the difference to be noticeable. @@ -627,7 +627,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { byte: &OpTy<'tcx, >::PointerTag>, count: &OpTy<'tcx, >::PointerTag>, ) -> InterpResult<'tcx> { - let layout = self.layout_of(dst.layout.ty.builtin_deref(true).unwrap().ty)?; + let layout = self.layout_of(dst.layout().ty.builtin_deref(true).unwrap().ty)?; let dst = self.read_pointer(&dst)?; let byte = self.read_scalar(&byte)?.to_u8()?; @@ -649,7 +649,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { lhs: &OpTy<'tcx, >::PointerTag>, rhs: &OpTy<'tcx, >::PointerTag>, ) -> InterpResult<'tcx, Scalar> { - let layout = self.layout_of(lhs.layout.ty.builtin_deref(true).unwrap().ty)?; + let layout = self.layout_of(lhs.layout().ty.builtin_deref(true).unwrap().ty)?; assert!(!layout.is_unsized()); let lhs = self.read_pointer(lhs)?; diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs index 1465b98629345..c797b59050e25 100644 --- a/compiler/rustc_const_eval/src/interpret/operand.rs +++ b/compiler/rustc_const_eval/src/interpret/operand.rs @@ -4,7 +4,6 @@ use std::fmt::Write; use rustc_hir::def::Namespace; -use rustc_macros::HashStable; use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt, TyAndLayout}; use rustc_middle::ty::print::{FmtPrinter, PrettyPrinter, Printer}; use rustc_middle::ty::{ConstInt, DelaySpanBugEmitted, Ty}; @@ -25,7 +24,7 @@ use super::{ /// operations and wide pointers. This idea was taken from rustc's codegen. /// In particular, thanks to `ScalarPair`, arithmetic operations and casts can be entirely /// defined on `Immediate`, and do not have to work with a `Place`. -#[derive(Copy, Clone, PartialEq, Eq, HashStable, Hash, Debug)] +#[derive(Copy, Clone, Debug)] pub enum Immediate { /// A single scalar value (must have *initialized* `Scalar` ABI). /// FIXME: we also currently often use this for ZST. @@ -179,68 +178,6 @@ impl<'tcx, Tag: Provenance> std::ops::Deref for ImmTy<'tcx, Tag> { } } -/// An `Operand` is the result of computing a `mir::Operand`. It can be immediate, -/// or still in memory. The latter is an optimization, to delay reading that chunk of -/// memory and to avoid having to store arbitrary-sized data here. -#[derive(Copy, Clone, PartialEq, Eq, HashStable, Hash, Debug)] -pub enum Operand { - Immediate(Immediate), - Indirect(MemPlace), -} - -#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] -pub struct OpTy<'tcx, Tag: Provenance = AllocId> { - op: Operand, // Keep this private; it helps enforce invariants. - pub layout: TyAndLayout<'tcx>, - /// rustc does not have a proper way to represent the type of a field of a `repr(packed)` struct: - /// it needs to have a different alignment than the field type would usually have. - /// So we represent this here with a separate field that "overwrites" `layout.align`. - /// This means `layout.align` should never be used for an `OpTy`! - /// `None` means "alignment does not matter since this is a by-value operand" - /// (`Operand::Immediate`); this field is only relevant for `Operand::Indirect`. - /// Also CTFE ignores alignment anyway, so this is for Miri only. - pub align: Option, -} - -#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] -rustc_data_structures::static_assert_size!(OpTy<'_>, 88); - -impl<'tcx, Tag: Provenance> std::ops::Deref for OpTy<'tcx, Tag> { - type Target = Operand; - #[inline(always)] - fn deref(&self) -> &Operand { - &self.op - } -} - -impl<'tcx, Tag: Provenance> From> for OpTy<'tcx, Tag> { - #[inline(always)] - fn from(mplace: MPlaceTy<'tcx, Tag>) -> Self { - OpTy { op: Operand::Indirect(*mplace), layout: mplace.layout, align: Some(mplace.align) } - } -} - -impl<'tcx, Tag: Provenance> From<&'_ MPlaceTy<'tcx, Tag>> for OpTy<'tcx, Tag> { - #[inline(always)] - fn from(mplace: &MPlaceTy<'tcx, Tag>) -> Self { - OpTy { op: Operand::Indirect(**mplace), layout: mplace.layout, align: Some(mplace.align) } - } -} - -impl<'tcx, Tag: Provenance> From<&'_ mut MPlaceTy<'tcx, Tag>> for OpTy<'tcx, Tag> { - #[inline(always)] - fn from(mplace: &mut MPlaceTy<'tcx, Tag>) -> Self { - OpTy { op: Operand::Indirect(**mplace), layout: mplace.layout, align: Some(mplace.align) } - } -} - -impl<'tcx, Tag: Provenance> From> for OpTy<'tcx, Tag> { - #[inline(always)] - fn from(val: ImmTy<'tcx, Tag>) -> Self { - OpTy { op: Operand::Immediate(val.imm), layout: val.layout, align: None } - } -} - impl<'tcx, Tag: Provenance> ImmTy<'tcx, Tag> { #[inline] pub fn from_scalar(val: Scalar, layout: TyAndLayout<'tcx>) -> Self { @@ -284,15 +221,91 @@ impl<'tcx, Tag: Provenance> ImmTy<'tcx, Tag> { } } +/// An `Operand` is the result of computing a `mir::Operand`. It can be immediate, +/// or still in memory. The latter is an optimization, to delay reading that chunk of +/// memory and to avoid having to store arbitrary-sized data here. +#[derive(Copy, Clone, Debug)] +pub enum Operand { + Immediate(Immediate), + Indirect(MemPlace), +} + +/// An `OpTy` is basically an `Operand` + layout. +/// However, that representation would waste space since *only* the `Indirect` variant needs an `align` as well. +/// So we instead copy the variants of `Operand`. +#[derive(Copy, Clone, Debug)] +pub enum OpTy<'tcx, Tag: Provenance = AllocId> { + Immediate(ImmTy<'tcx, Tag>), + Indirect(MPlaceTy<'tcx, Tag>), +} + +#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] +rustc_data_structures::static_assert_size!(OpTy<'_>, 80); + +impl<'tcx, Tag: Provenance> From> for OpTy<'tcx, Tag> { + #[inline(always)] + fn from(mplace: MPlaceTy<'tcx, Tag>) -> Self { + OpTy::Indirect(mplace) + } +} + +impl<'tcx, Tag: Provenance> From<&'_ MPlaceTy<'tcx, Tag>> for OpTy<'tcx, Tag> { + #[inline(always)] + fn from(mplace: &MPlaceTy<'tcx, Tag>) -> Self { + OpTy::Indirect(*mplace) + } +} + +impl<'tcx, Tag: Provenance> From<&'_ mut MPlaceTy<'tcx, Tag>> for OpTy<'tcx, Tag> { + #[inline(always)] + fn from(mplace: &mut MPlaceTy<'tcx, Tag>) -> Self { + OpTy::Indirect(*mplace) + } +} + +impl<'tcx, Tag: Provenance> From> for OpTy<'tcx, Tag> { + #[inline(always)] + fn from(val: ImmTy<'tcx, Tag>) -> Self { + OpTy::Immediate(val) + } +} + +impl<'tcx, Tag: Provenance> OpTy<'tcx, Tag> { + #[inline(always)] + pub fn op(&self) -> Operand { + match self { + OpTy::Immediate(imm) => Operand::Immediate(**imm), + OpTy::Indirect(mplace) => Operand::Indirect(**mplace), + } + } + + #[inline(always)] + pub fn layout(&self) -> &TyAndLayout<'tcx> { + match self { + OpTy::Immediate(imm) => &imm.layout, + OpTy::Indirect(mplace) => &mplace.layout, + } + } + + #[inline(always)] + pub fn align(&self) -> Option { + match self { + OpTy::Immediate(_) => None, + OpTy::Indirect(mplace) => Some(mplace.align), + } + } +} + impl<'tcx, Tag: Provenance> OpTy<'tcx, Tag> { pub fn len(&self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> { - if self.layout.is_unsized() { - // There are no unsized immediates. - self.assert_mem_place().len(cx) - } else { - match self.layout.fields { - abi::FieldsShape::Array { count, .. } => Ok(count), - _ => bug!("len not supported on sized type {:?}", self.layout.ty), + match self.try_as_mplace() { + Ok(mplace) => mplace.len(cx), + Err(imm) => { + assert!(!imm.layout.is_unsized()); + match imm.layout.fields { + abi::FieldsShape::Array { count, .. } => Ok(count), + _ => bug!("len not supported on immediate of type {:?}", imm.layout.ty), + } } } } @@ -430,7 +443,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { if let Ok(imm) = self.read_immediate_raw(op, /*force*/ false)? { Ok(imm) } else { - span_bug!(self.cur_span(), "primitive read failed for type: {:?}", op.layout.ty); + span_bug!(self.cur_span(), "primitive read failed for type: {:?}", op.layout().ty); } } @@ -468,7 +481,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::PointerTag>, u64)> { // Basically we just transmute this place into an array following simd_size_and_type. // This only works in memory, but repr(simd) types should never be immediates anyway. - assert!(op.layout.ty.is_simd()); + assert!(op.layout().ty.is_simd()); match op.try_as_mplace() { Ok(mplace) => self.mplace_to_simd(&mplace), Err(imm) => match *imm { @@ -496,11 +509,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let layout = self.layout_of_local(frame, local, layout)?; let op = if layout.is_zst() { // Bypass `access_local` (helps in ConstProp) - Operand::Immediate(Immediate::Uninit) + ImmTy::uninit(layout).into() } else { - *M::access_local(frame, local)? + OpTy::from_op(M::access_local(frame, local)?, layout, Some(layout.align.abi)) }; - Ok(OpTy { op, layout, align: Some(layout.align.abi) }) + Ok(op) } /// Every place can be read from, so we can turn them into an operand. @@ -514,10 +527,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let op = match **place { Place::Ptr(mplace) => Operand::Indirect(mplace), Place::Local { frame, local } => { - *self.local_to_op(&self.stack()[frame], local, None)? + self.local_to_op(&self.stack()[frame], local, None)?.op() } }; - Ok(OpTy { op, layout: place.layout, align: Some(place.align) }) + Ok(OpTy::from_op(&op, place.layout, Some(place.align))) } /// Evaluate a place with the goal of reading from it. This lets us sometimes @@ -537,7 +550,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { op = self.operand_projection(&op, elem)? } - trace!("eval_place_to_op: got {:?}", *op); + trace!("eval_place_to_op: got {:?}", op.op()); // Sanity-check the type we ended up with. debug_assert!( mir_assign_valid_types( @@ -546,11 +559,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { self.layout_of(self.subst_from_current_frame_and_normalize_erasing_regions( mir_place.ty(&self.frame().body.local_decls, *self.tcx).ty )?)?, - op.layout, + *op.layout(), ), "eval_place of a MIR place with type {:?} produced an interpreter operand with type {:?}", mir_place.ty(&self.frame().body.local_decls, *self.tcx).ty, - op.layout.ty, + op.layout().ty, ); Ok(op) } @@ -580,7 +593,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { self.mir_const_to_op(&val, layout)? } }; - trace!("{:?}: {:?}", mir_op, *op); + trace!("{:?}: {:?}", mir_op, op.op()); Ok(op) } @@ -670,7 +683,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { )) } }; - Ok(OpTy { op, layout, align: Some(layout.align.abi) }) + Ok(OpTy::from_op(&op, layout, Some(layout.align.abi))) } /// Read discriminant, return the runtime value as well as the variant index. @@ -679,9 +692,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { &self, op: &OpTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx, (Scalar, VariantIdx)> { - trace!("read_discriminant_value {:#?}", op.layout); + trace!("read_discriminant_value {:#?}", op.layout()); // Get type and layout of the discriminant. - let discr_layout = self.layout_of(op.layout.ty.discriminant_ty(*self.tcx))?; + let discr_layout = self.layout_of(op.layout().ty.discriminant_ty(*self.tcx))?; trace!("discriminant type: {:?}", discr_layout.ty); // We use "discriminant" to refer to the value associated with a particular enum variant. @@ -689,9 +702,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // declared list of variants -- they can differ with explicitly assigned discriminants. // We use "tag" to refer to how the discriminant is encoded in memory, which can be either // straight-forward (`TagEncoding::Direct`) or with a niche (`TagEncoding::Niche`). - let (tag_scalar_layout, tag_encoding, tag_field) = match op.layout.variants { + let (tag_scalar_layout, tag_encoding, tag_field) = match op.layout().variants { Variants::Single { index } => { - let discr = match op.layout.ty.discriminant_for_variant(*self.tcx, index) { + let discr = match op.layout().ty.discriminant_for_variant(*self.tcx, index) { Some(discr) => { // This type actually has discriminants. assert_eq!(discr.ty, discr_layout.ty); @@ -745,7 +758,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { self.cast_from_int_like(scalar, tag_val.layout, discr_layout.ty).unwrap(); let discr_bits = discr_val.assert_bits(discr_layout.size); // Convert discriminant to variant index, and catch invalid discriminants. - let index = match *op.layout.ty.kind() { + let index = match *op.layout().ty.kind() { ty::Adt(adt, _) => { adt.discriminants(*self.tcx).find(|(_, var)| var.val == discr_bits) } @@ -801,7 +814,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { .checked_add(variant_index_relative) .expect("overflow computing absolute variant idx"); let variants_len = op - .layout + .layout() .ty .ty_adt_def() .expect("tagged layout for non adt") diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs index 2001359d199cf..d6522ed033f72 100644 --- a/compiler/rustc_const_eval/src/interpret/place.rs +++ b/compiler/rustc_const_eval/src/interpret/place.rs @@ -5,7 +5,6 @@ use std::hash::Hash; use rustc_ast::Mutability; -use rustc_macros::HashStable; use rustc_middle::mir; use rustc_middle::ty; use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt, TyAndLayout}; @@ -17,7 +16,7 @@ use super::{ Pointer, Provenance, Scalar, ScalarMaybeUninit, }; -#[derive(Copy, Clone, Hash, PartialEq, Eq, HashStable, Debug)] +#[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)] /// Information required for the sound usage of a `MemPlace`. pub enum MemPlaceMeta { /// The unsized payload (e.g. length for slices or vtable pointer for trait objects). @@ -51,7 +50,7 @@ impl MemPlaceMeta { } } -#[derive(Copy, Clone, Hash, PartialEq, Eq, HashStable, Debug)] +#[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)] pub struct MemPlace { /// The pointer can be a pure integer, with the `None` tag. pub ptr: Pointer>, @@ -64,7 +63,7 @@ pub struct MemPlace { #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] rustc_data_structures::static_assert_size!(MemPlace, 40); -#[derive(Copy, Clone, Hash, PartialEq, Eq, HashStable, Debug)] +#[derive(Copy, Clone, Debug)] pub enum Place { /// A place referring to a value allocated in the `Memory` system. Ptr(MemPlace), @@ -265,16 +264,31 @@ impl<'tcx, Tag: Provenance> MPlaceTy<'tcx, Tag> { // These are defined here because they produce a place. impl<'tcx, Tag: Provenance> OpTy<'tcx, Tag> { + /// If `Op` is indirect, `align` *must* be `Some`. + /// + /// Be very careful with this function, a lot of code relies in `op` and `layout` matching + /// properly. + pub(super) fn from_op( + op: &Operand, + layout: TyAndLayout<'tcx>, + align: Option, + ) -> Self { + match op { + Operand::Immediate(imm) => ImmTy::from_immediate(*imm, layout).into(), + Operand::Indirect(mplace) => { + OpTy::Indirect(MPlaceTy { mplace: *mplace, layout, align: align.unwrap() }) + } + } + } + #[inline(always)] /// Note: do not call `as_ref` on the resulting place. This function should only be used to /// read from the resulting mplace, not to get its address back. pub fn try_as_mplace(&self) -> Result, ImmTy<'tcx, Tag>> { - match **self { - Operand::Indirect(mplace) => { - Ok(MPlaceTy { mplace, layout: self.layout, align: self.align.unwrap() }) - } - Operand::Immediate(_) if self.layout.is_zst() => Ok(MPlaceTy::dangling(self.layout)), - Operand::Immediate(imm) => Err(ImmTy::from_immediate(imm, self.layout)), + match self { + OpTy::Indirect(mplace) => Ok(*mplace), + OpTy::Immediate(imm) if imm.layout.is_zst() => Ok(MPlaceTy::dangling(imm.layout)), + OpTy::Immediate(imm) => Err(*imm), } } @@ -645,12 +659,12 @@ where // We do NOT compare the types for equality, because well-typed code can // actually "transmute" `&mut T` to `&T` in an assignment without a cast. let layout_compat = - mir_assign_valid_types(*self.tcx, self.param_env, src.layout, dest.layout); + mir_assign_valid_types(*self.tcx, self.param_env, *src.layout(), dest.layout); if !allow_transmute && !layout_compat { span_bug!( self.cur_span(), "type mismatch when copying!\nsrc: {:?},\ndest: {:?}", - src.layout.ty, + src.layout().ty, dest.layout.ty, ); } @@ -659,12 +673,12 @@ where // avoid force_allocation. let src = match self.read_immediate_raw(src, /*force*/ false)? { Ok(src_val) => { - assert!(!src.layout.is_unsized(), "cannot have unsized immediates"); + assert!(!src.layout().is_unsized(), "cannot have unsized immediates"); assert!( !dest.layout.is_unsized(), "the src is sized, so the dest must also be sized" ); - assert_eq!(src.layout.size, dest.layout.size); + assert_eq!(src.layout().size, dest.layout.size); // Yay, we got a value that we can write directly. return if layout_compat { self.write_immediate_no_validate(*src_val, dest) @@ -676,7 +690,7 @@ where let dest_mem = self.force_allocation(dest)?; self.write_immediate_to_mplace_no_validate( *src_val, - src.layout, + *src.layout(), dest_mem.align, *dest_mem, ) diff --git a/compiler/rustc_const_eval/src/interpret/projection.rs b/compiler/rustc_const_eval/src/interpret/projection.rs index 31fb6a8944df6..6f868a8714766 100644 --- a/compiler/rustc_const_eval/src/interpret/projection.rs +++ b/compiler/rustc_const_eval/src/interpret/projection.rs @@ -168,9 +168,9 @@ where variant: VariantIdx, ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> { // Downcast just changes the layout - let mut base = *base; - base.layout = base.layout.for_variant(self, variant); - Ok(base) + let layout = base.layout().for_variant(self, variant); + let align = base.align(); + Ok(OpTy::from_op(&base.op(), layout, align)) } //# Slice indexing @@ -182,7 +182,7 @@ where index: u64, ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> { // Not using the layout method because we want to compute on u64 - match base.layout.fields { + match base.layout().fields { abi::FieldsShape::Array { stride, count: _ } => { // `count` is nonsense for slices, use the dynamic length instead. let len = base.len(self)?; @@ -192,7 +192,7 @@ where } let offset = stride * index; // `Size` multiplication // All fields have the same layout. - let field_layout = base.layout.field(self, 0); + let field_layout = base.layout().field(self, 0); assert!(!field_layout.is_unsized()); base.offset(offset, MemPlaceMeta::None, field_layout, self) @@ -200,7 +200,7 @@ where _ => span_bug!( self.cur_span(), "`mplace_index` called on non-array type {:?}", - base.layout.ty + base.layout().ty ), } } @@ -212,10 +212,10 @@ where base: &'a OpTy<'tcx, Tag>, ) -> InterpResult<'tcx, impl Iterator>> + 'a> { let len = base.len(self)?; // also asserts that we have a type where this makes sense - let abi::FieldsShape::Array { stride, .. } = base.layout.fields else { + let abi::FieldsShape::Array { stride, .. } = base.layout().fields else { span_bug!(self.cur_span(), "operand_array_fields: expected an array layout"); }; - let layout = base.layout.field(self, 0); + let layout = base.layout().field(self, 0); let dl = &self.tcx.data_layout; // `Size` multiplication Ok((0..len).map(move |i| base.offset(stride * i, MemPlaceMeta::None, layout, dl))) @@ -304,25 +304,33 @@ where // Not using layout method because that works with usize, and does not work with slices // (that have count 0 in their layout). - let from_offset = match base.layout.fields { + let from_offset = match base.layout().fields { abi::FieldsShape::Array { stride, .. } => stride * from, // `Size` multiplication is checked _ => { - span_bug!(self.cur_span(), "unexpected layout of index access: {:#?}", base.layout) + span_bug!( + self.cur_span(), + "unexpected layout of index access: {:#?}", + base.layout() + ) } }; // Compute meta and new layout let inner_len = actual_to.checked_sub(from).unwrap(); - let (meta, ty) = match base.layout.ty.kind() { + let (meta, ty) = match base.layout().ty.kind() { // It is not nice to match on the type, but that seems to be the only way to // implement this. ty::Array(inner, _) => (MemPlaceMeta::None, self.tcx.mk_array(*inner, inner_len)), ty::Slice(..) => { let len = Scalar::from_machine_usize(inner_len, self); - (MemPlaceMeta::Meta(len), base.layout.ty) + (MemPlaceMeta::Meta(len), base.layout().ty) } _ => { - span_bug!(self.cur_span(), "cannot subslice non-array type: `{:?}`", base.layout.ty) + span_bug!( + self.cur_span(), + "cannot subslice non-array type: `{:?}`", + base.layout().ty + ) } }; let layout = self.layout_of(ty)?; diff --git a/compiler/rustc_const_eval/src/interpret/step.rs b/compiler/rustc_const_eval/src/interpret/step.rs index 240910c08b2ed..5afdb4d699623 100644 --- a/compiler/rustc_const_eval/src/interpret/step.rs +++ b/compiler/rustc_const_eval/src/interpret/step.rs @@ -210,7 +210,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { Repeat(ref operand, _) => { let src = self.eval_operand(operand, None)?; - assert!(!src.layout.is_unsized()); + assert!(!src.layout().is_unsized()); let dest = self.force_allocation(&dest)?; let length = dest.len(self)?; diff --git a/compiler/rustc_const_eval/src/interpret/terminator.rs b/compiler/rustc_const_eval/src/interpret/terminator.rs index 9e74b99ecd73b..0d949c9de5083 100644 --- a/compiler/rustc_const_eval/src/interpret/terminator.rs +++ b/compiler/rustc_const_eval/src/interpret/terminator.rs @@ -71,13 +71,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let func = self.eval_operand(func, None)?; let args = self.eval_operands(args)?; - let fn_sig_binder = func.layout.ty.fn_sig(*self.tcx); + let fn_sig_binder = func.layout().ty.fn_sig(*self.tcx); let fn_sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, fn_sig_binder); let extra_args = &args[fn_sig.inputs().len()..]; - let extra_args = self.tcx.mk_type_list(extra_args.iter().map(|arg| arg.layout.ty)); + let extra_args = + self.tcx.mk_type_list(extra_args.iter().map(|arg| arg.layout().ty)); - let (fn_val, fn_abi, with_caller_location) = match *func.layout.ty.kind() { + let (fn_val, fn_abi, with_caller_location) = match *func.layout().ty.kind() { ty::FnPtr(_sig) => { let fn_ptr = self.read_pointer(&func)?; let fn_val = self.get_ptr_fn(fn_ptr)?; @@ -95,7 +96,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { _ => span_bug!( terminator.source_info.span, "invalid callee of type {:?}", - func.layout.ty + func.layout().ty ), }; @@ -289,13 +290,13 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { throw_ub_format!( "calling a function with argument of type {:?} passing data of type {:?}", callee_arg.layout.ty, - caller_arg.layout.ty + caller_arg.layout().ty ) } // Special handling for unsized parameters. - if caller_arg.layout.is_unsized() { + if caller_arg.layout().is_unsized() { // `check_argument_compat` ensures that both have the same type, so we know they will use the metadata the same way. - assert_eq!(caller_arg.layout.ty, callee_arg.layout.ty); + assert_eq!(caller_arg.layout().ty, callee_arg.layout.ty); // We have to properly pre-allocate the memory for the callee. // So let's tear down some wrappers. // This all has to be in memory, there are no immediate unsized values. @@ -304,7 +305,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let (dest_frame, dest_local) = callee_arg.assert_local(); // We are just initializing things, so there can't be anything here yet. assert!(matches!( - *self.local_to_op(&self.stack()[dest_frame], dest_local, None)?, + self.local_to_op(&self.stack()[dest_frame], dest_local, None)?.op(), Operand::Immediate(Immediate::Uninit) )); // Allocate enough memory to hold `src`. @@ -416,7 +417,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { "caller ABI: {:?}, args: {:#?}", caller_abi, args.iter() - .map(|arg| (arg.layout.ty, format!("{:?}", **arg))) + .map(|arg| (arg.layout().ty, format!("{:?}", arg.op()))) .collect::>() ); trace!( @@ -446,7 +447,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { args.iter() .map(|&a| Ok(a)) .chain( - (0..untuple_arg.layout.fields.count()) + (0..untuple_arg.layout().fields.count()) .map(|i| self.operand_field(untuple_arg, i)), ) .collect::>>>( @@ -527,19 +528,19 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // unwrap those newtypes until we are there. let mut receiver = args[0]; let receiver_place = loop { - match receiver.layout.ty.kind() { + match receiver.layout().ty.kind() { ty::Ref(..) | ty::RawPtr(..) => break self.deref_operand(&receiver)?, ty::Dynamic(..) => break receiver.assert_mem_place(), // no immediate unsized values _ => { // Not there yet, search for the only non-ZST field. let mut non_zst_field = None; - for i in 0..receiver.layout.fields.count() { + for i in 0..receiver.layout().fields.count() { let field = self.operand_field(&receiver, i)?; - if !field.layout.is_zst() { + if !field.layout().is_zst() { assert!( non_zst_field.is_none(), "multiple non-ZST fields in dyn receiver type {}", - receiver.layout.ty + receiver.layout().ty ); non_zst_field = Some(field); } @@ -547,7 +548,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { receiver = non_zst_field.unwrap_or_else(|| { panic!( "no non-ZST fields in dyn receiver type {}", - receiver.layout.ty + receiver.layout().ty ) }); } diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs index 5114ce5d452b3..d67a2fabef9ed 100644 --- a/compiler/rustc_const_eval/src/interpret/validity.rs +++ b/compiler/rustc_const_eval/src/interpret/validity.rs @@ -518,7 +518,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, ' value: &OpTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx, bool> { // Go over all the primitive types - let ty = value.layout.ty; + let ty = value.layout().ty; match ty.kind() { ty::Bool => { let value = self.read_scalar(value)?; @@ -768,7 +768,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M> field: usize, new_op: &OpTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx> { - let elem = self.aggregate_field_path_elem(old_op.layout, field); + let elem = self.aggregate_field_path_elem(*old_op.layout(), field); self.with_elem(elem, move |this| this.visit_value(new_op)) } @@ -779,11 +779,11 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M> variant_id: VariantIdx, new_op: &OpTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx> { - let name = match old_op.layout.ty.kind() { + let name = match old_op.layout().ty.kind() { ty::Adt(adt, _) => PathElem::Variant(adt.variant(variant_id).name), // Generators also have variants ty::Generator(..) => PathElem::GeneratorState(variant_id), - _ => bug!("Unexpected type with variant: {:?}", old_op.layout.ty), + _ => bug!("Unexpected type with variant: {:?}", old_op.layout().ty), }; self.with_elem(name, move |this| this.visit_value(new_op)) } @@ -796,7 +796,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M> ) -> InterpResult<'tcx> { // Special check preventing `UnsafeCell` inside unions in the inner part of constants. if matches!(self.ctfe_mode, Some(CtfeValidationMode::Const { inner: true, .. })) { - if !op.layout.ty.is_freeze(self.ecx.tcx.at(DUMMY_SP), self.ecx.param_env) { + if !op.layout().ty.is_freeze(self.ecx.tcx.at(DUMMY_SP), self.ecx.param_env) { throw_validation_failure!(self.path, { "`UnsafeCell` in a `const`" }); } } @@ -811,7 +811,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M> #[inline] fn visit_value(&mut self, op: &OpTy<'tcx, M::PointerTag>) -> InterpResult<'tcx> { - trace!("visit_value: {:?}, {:?}", *op, op.layout); + trace!("visit_value: {:?}, {:?}", *op, op.layout()); // Check primitive types -- the leaves of our recursive descent. if self.try_visit_primitive(op)? { @@ -819,7 +819,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M> } // Special check preventing `UnsafeCell` in the inner part of constants - if let Some(def) = op.layout.ty.ty_adt_def() { + if let Some(def) = op.layout().ty.ty_adt_def() { if matches!(self.ctfe_mode, Some(CtfeValidationMode::Const { inner: true, .. })) && Some(def.did()) == self.ecx.tcx.lang_items().unsafe_cell_type() { @@ -840,10 +840,10 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M> // FIXME: We could avoid some redundant checks here. For newtypes wrapping // scalars, we do the same check on every "level" (e.g., first we check // MyNewtype and then the scalar in there). - match op.layout.abi { + match op.layout().abi { Abi::Uninhabited => { throw_validation_failure!(self.path, - { "a value of uninhabited type {:?}", op.layout.ty } + { "a value of uninhabited type {:?}", op.layout().ty } ); } Abi::Scalar(scalar_layout) => { @@ -882,7 +882,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M> op: &OpTy<'tcx, M::PointerTag>, fields: impl Iterator>, ) -> InterpResult<'tcx> { - match op.layout.ty.kind() { + match op.layout().ty.kind() { ty::Str => { let mplace = op.assert_mem_place(); // strings are unsized and hence never immediate let len = mplace.len(self.ecx)?; @@ -995,7 +995,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { ref_tracking: Option<&mut RefTracking, Vec>>, ctfe_mode: Option, ) -> InterpResult<'tcx> { - trace!("validate_operand_internal: {:?}, {:?}", *op, op.layout.ty); + trace!("validate_operand_internal: {:?}, {:?}", *op, op.layout().ty); // Construct a visitor let mut visitor = ValidityVisitor { path, ref_tracking, ctfe_mode, ecx: self }; diff --git a/compiler/rustc_const_eval/src/interpret/visitor.rs b/compiler/rustc_const_eval/src/interpret/visitor.rs index c262bca9bb4ee..dc0a83d004e0d 100644 --- a/compiler/rustc_const_eval/src/interpret/visitor.rs +++ b/compiler/rustc_const_eval/src/interpret/visitor.rs @@ -46,7 +46,7 @@ pub trait Value<'mir, 'tcx, M: Machine<'mir, 'tcx>>: Copy { impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> Value<'mir, 'tcx, M> for OpTy<'tcx, M::PointerTag> { #[inline(always)] fn layout(&self) -> TyAndLayout<'tcx> { - self.layout + *self.layout() } #[inline(always)] diff --git a/compiler/rustc_mir_transform/src/const_prop.rs b/compiler/rustc_mir_transform/src/const_prop.rs index fb5423dd157c2..c84abadff9b3f 100644 --- a/compiler/rustc_mir_transform/src/const_prop.rs +++ b/compiler/rustc_mir_transform/src/const_prop.rs @@ -553,11 +553,11 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { // where it makes sense. if let interpret::Operand::Immediate(interpret::Immediate::Scalar( ScalarMaybeUninit::Scalar(scalar), - )) = *value + )) = value.op() { *operand = self.operand_from_scalar( scalar, - value.layout.ty, + value.layout().ty, self.source_info.unwrap().span, ); } @@ -739,7 +739,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { interpret::Immediate::Scalar(ScalarMaybeUninit::Scalar(scalar)) => { *rval = Rvalue::Use(self.operand_from_scalar( scalar, - value.layout.ty, + value.layout().ty, source_info.span, )); } @@ -750,7 +750,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { // Found a value represented as a pair. For now only do const-prop if the type // of `rvalue` is also a tuple with two scalars. // FIXME: enable the general case stated above ^. - let ty = value.layout.ty; + let ty = value.layout().ty; // Only do it for tuples if let ty::Tuple(types) = ty.kind() { // Only do it if tuple is also a pair with two scalars @@ -763,7 +763,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { if ty_is_scalar(ty1) && ty_is_scalar(ty2) { let alloc = this .ecx - .intern_with_temp_alloc(value.layout, |ecx, dest| { + .intern_with_temp_alloc(*value.layout(), |ecx, dest| { ecx.write_immediate(*imm, dest) }) .unwrap(); @@ -800,7 +800,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { return false; } - match **op { + match op.op() { interpret::Operand::Immediate(Immediate::Scalar(ScalarMaybeUninit::Scalar(s))) => { s.try_to_int().is_ok() }