From 2bc59c7ae2631863e947ccdba42b989feeec6b36 Mon Sep 17 00:00:00 2001 From: b-naber Date: Tue, 12 Apr 2022 13:08:47 +0200 Subject: [PATCH 1/9] add some helper methods to ScalarInt --- compiler/rustc_middle/src/ty/consts/int.rs | 92 ++++++++++++++++++++++ 1 file changed, 92 insertions(+) diff --git a/compiler/rustc_middle/src/ty/consts/int.rs b/compiler/rustc_middle/src/ty/consts/int.rs index 72623ba54eeb4..a3ce674c11524 100644 --- a/compiler/rustc_middle/src/ty/consts/int.rs +++ b/compiler/rustc_middle/src/ty/consts/int.rs @@ -237,6 +237,98 @@ impl ScalarInt { pub fn try_to_machine_usize<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Result { Ok(self.to_bits(tcx.data_layout.pointer_size)? as u64) } + + /// Tries to convert the `ScalarInt` to an unsigned integer of the given size. + /// Fails if the size of the `ScalarInt` is unequal to `size` and returns the + /// `ScalarInt`s size in that case. + #[inline] + pub fn try_to_uint(self, size: Size) -> Result { + self.to_bits(size) + } + + // Tries to convert the `ScalarInt` to `u8`. Fails if the `size` of the `ScalarInt` + // in not equal to `Size { raw: 1 }` and returns the `size` value of the `ScalarInt` in + // that case. + #[inline] + pub fn try_to_u8(self) -> Result { + self.to_bits(Size::from_bits(8)).map(|v| u8::try_from(v).unwrap()) + } + + /// Tries to convert the `ScalarInt` to `u16`. Fails if the size of the `ScalarInt` + /// in not equal to `Size { raw: 2 }` and returns the `size` value of the `ScalarInt` in + /// that case. + #[inline] + pub fn try_to_u16(self) -> Result { + self.to_bits(Size::from_bits(16)).map(|v| u16::try_from(v).unwrap()) + } + + /// Tries to convert the `ScalarInt` to `u32`. Fails if the `size` of the `ScalarInt` + /// in not equal to `Size { raw: 4 }` and returns the `size` value of the `ScalarInt` in + /// that case. + #[inline] + pub fn try_to_u32(self) -> Result { + self.to_bits(Size::from_bits(32)).map(|v| u32::try_from(v).unwrap()) + } + + /// Tries to convert the `ScalarInt` to `u64`. Fails if the `size` of the `ScalarInt` + /// in not equal to `Size { raw: 8 }` and returns the `size` value of the `ScalarInt` in + /// that case. + #[inline] + pub fn try_to_u64(self) -> Result { + self.to_bits(Size::from_bits(64)).map(|v| u64::try_from(v).unwrap()) + } + + /// Tries to convert the `ScalarInt` to `u128`. Fails if the `size` of the `ScalarInt` + /// in not equal to `Size { raw: 16 }` and returns the `size` value of the `ScalarInt` in + /// that case. + #[inline] + pub fn try_to_u128(self) -> Result { + self.to_bits(Size::from_bits(128)) + } + + /// Tries to convert the `ScalarInt` to a signed integer of the given size. + /// Fails if the size of the `ScalarInt` is unequal to `size` and returns the + /// `ScalarInt`s size in that case. + #[inline] + pub fn try_to_int(self, size: Size) -> Result { + let b = self.to_bits(size)?; + Ok(size.sign_extend(b) as i128) + } + + /// Tries to convert the `ScalarInt` to i8. + /// Fails if the size of the `ScalarInt` is unequal to `Size { raw: 1 }` + /// and returns the `ScalarInt`s size in that case. + pub fn try_to_i8(self) -> Result { + self.try_to_int(Size::from_bits(8)).map(|v| i8::try_from(v).unwrap()) + } + + /// Tries to convert the `ScalarInt` to i16. + /// Fails if the size of the `ScalarInt` is unequal to `Size { raw: 2 }` + /// and returns the `ScalarInt`s size in that case. + pub fn try_to_i16(self) -> Result { + self.try_to_int(Size::from_bits(16)).map(|v| i16::try_from(v).unwrap()) + } + + /// Tries to convert the `ScalarInt` to i32. + /// Fails if the size of the `ScalarInt` is unequal to `Size { raw: 4 }` + /// and returns the `ScalarInt`s size in that case. + pub fn try_to_i32(self) -> Result { + self.try_to_int(Size::from_bits(32)).map(|v| i32::try_from(v).unwrap()) + } + + /// Tries to convert the `ScalarInt` to i64. + /// Fails if the size of the `ScalarInt` is unequal to `Size { raw: 8 }` + /// and returns the `ScalarInt`s size in that case. + pub fn try_to_i64(self) -> Result { + self.try_to_int(Size::from_bits(64)).map(|v| i64::try_from(v).unwrap()) + } + + /// Tries to convert the `ScalarInt` to i128. + /// Fails if the size of the `ScalarInt` is unequal to `Size { raw: 16 }` + /// and returns the `ScalarInt`s size in that case. + pub fn try_to_i128(self) -> Result { + self.try_to_int(Size::from_bits(128)).map(|v| i128::try_from(v).unwrap()) + } } macro_rules! from { From eaf8cdaa0bcf7bc188da8d8d0a35126cf37b0580 Mon Sep 17 00:00:00 2001 From: b-naber Date: Tue, 12 Apr 2022 15:07:35 +0200 Subject: [PATCH 2/9] add helper methods on ValTree --- compiler/rustc_middle/src/ty/consts/valtree.rs | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/compiler/rustc_middle/src/ty/consts/valtree.rs b/compiler/rustc_middle/src/ty/consts/valtree.rs index 195760c059081..e17c69cb3537e 100644 --- a/compiler/rustc_middle/src/ty/consts/valtree.rs +++ b/compiler/rustc_middle/src/ty/consts/valtree.rs @@ -31,4 +31,20 @@ impl<'tcx> ValTree<'tcx> { pub fn zst() -> Self { Self::Branch(&[]) } + + #[inline] + pub fn unwrap_leaf(self) -> ScalarInt { + match self { + Self::Leaf(s) => s, + _ => bug!("expected leaf, got {:?}", self), + } + } + + #[inline] + pub fn unwrap_branch(self) -> &'tcx [Self] { + match self { + Self::Branch(branch) => branch, + _ => bug!("expected branch, got {:?}", self), + } + } } From 1157dc7167d13a9bdcafd30b8ad0ecf5ae5faa7f Mon Sep 17 00:00:00 2001 From: b-naber Date: Tue, 5 Apr 2022 16:33:42 +0200 Subject: [PATCH 3/9] implement valtree -> constvalue conversion --- .../src/const_eval/eval_queries.rs | 38 +- .../rustc_const_eval/src/const_eval/mod.rs | 136 +---- .../src/const_eval/valtrees.rs | 479 ++++++++++++++++++ .../rustc_const_eval/src/interpret/mod.rs | 2 +- .../rustc_const_eval/src/interpret/operand.rs | 5 +- .../rustc_const_eval/src/interpret/place.rs | 27 +- .../src/mir/interpret/allocation.rs | 5 + .../rustc_middle/src/mir/interpret/pointer.rs | 2 +- .../rustc_middle/src/ty/consts/valtree.rs | 3 + 9 files changed, 546 insertions(+), 151 deletions(-) create mode 100644 compiler/rustc_const_eval/src/const_eval/valtrees.rs diff --git a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs index 7cca6178ab257..38fecf7232ebc 100644 --- a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs +++ b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs @@ -106,6 +106,7 @@ pub(super) fn mk_eval_cx<'mir, 'tcx>( /// This function converts an interpreter value into a constant that is meant for use in the /// type system. +#[instrument(skip(ecx), level = "debug")] pub(super) fn op_to_const<'tcx>( ecx: &CompileTimeEvalContext<'_, 'tcx>, op: &OpTy<'tcx>, @@ -140,21 +141,26 @@ pub(super) fn op_to_const<'tcx>( op.try_as_mplace() }; + debug!(?immediate); + // We know `offset` is relative to the allocation, so we can use `into_parts`. - let to_const_value = |mplace: &MPlaceTy<'_>| match mplace.ptr.into_parts() { - (Some(alloc_id), offset) => { - let alloc = ecx.tcx.global_alloc(alloc_id).unwrap_memory(); - ConstValue::ByRef { alloc, offset } - } - (None, offset) => { - assert!(mplace.layout.is_zst()); - assert_eq!( - offset.bytes() % mplace.layout.align.abi.bytes(), - 0, - "this MPlaceTy must come from a validated constant, thus we can assume the \ + let to_const_value = |mplace: &MPlaceTy<'_>| { + debug!("to_const_value(mplace: {:?})", mplace); + match mplace.ptr.into_parts() { + (Some(alloc_id), offset) => { + let alloc = ecx.tcx.global_alloc(alloc_id).unwrap_memory(); + ConstValue::ByRef { alloc, offset } + } + (None, offset) => { + assert!(mplace.layout.is_zst()); + assert_eq!( + offset.bytes() % mplace.layout.align.abi.bytes(), + 0, + "this MPlaceTy must come from a validated constant, thus we can assume the \ alignment is correct", - ); - ConstValue::Scalar(Scalar::ZST) + ); + ConstValue::Scalar(Scalar::ZST) + } } }; match immediate { @@ -166,6 +172,7 @@ pub(super) fn op_to_const<'tcx>( ScalarMaybeUninit::Uninit => to_const_value(&op.assert_mem_place()), }, Immediate::ScalarPair(a, b) => { + debug!("ScalarPair(a: {:?}, b: {:?})", a, b); // We know `offset` is relative to the allocation, so we can use `into_parts`. let (data, start) = match ecx.scalar_to_ptr(a.check_init().unwrap()).unwrap().into_parts() { @@ -209,7 +216,10 @@ fn turn_into_const_value<'tcx>( ); // Turn this into a proper constant. - op_to_const(&ecx, &mplace.into()) + let const_val = op_to_const(&ecx, &mplace.into()); + debug!(?const_val); + + const_val } pub fn eval_to_const_value_raw_provider<'tcx>( diff --git a/compiler/rustc_const_eval/src/const_eval/mod.rs b/compiler/rustc_const_eval/src/const_eval/mod.rs index 68f9bee593f65..96c18d488ee8c 100644 --- a/compiler/rustc_const_eval/src/const_eval/mod.rs +++ b/compiler/rustc_const_eval/src/const_eval/mod.rs @@ -3,29 +3,26 @@ use std::convert::TryFrom; use rustc_hir::Mutability; -use rustc_middle::ty::layout::HasTyCtxt; +use rustc_middle::mir; use rustc_middle::ty::{self, TyCtxt}; -use rustc_middle::{ - mir::{self, interpret::ConstAlloc}, - ty::ScalarInt, -}; use rustc_span::{source_map::DUMMY_SP, symbol::Symbol}; -use rustc_target::abi::VariantIdx; use crate::interpret::{ - intern_const_alloc_recursive, ConstValue, InternKind, InterpCx, InterpResult, MPlaceTy, - MemPlaceMeta, Scalar, + intern_const_alloc_recursive, ConstValue, InternKind, InterpCx, InterpResult, MemPlaceMeta, + Scalar, }; mod error; mod eval_queries; mod fn_queries; mod machine; +mod valtrees; pub use error::*; pub use eval_queries::*; pub use fn_queries::*; pub use machine::*; +pub(crate) use valtrees::{const_to_valtree, valtree_to_const_value}; pub(crate) fn const_caller_location( tcx: TyCtxt<'_>, @@ -41,128 +38,6 @@ pub(crate) fn const_caller_location( ConstValue::Scalar(Scalar::from_maybe_pointer(loc_place.ptr, &tcx)) } -/// Convert an evaluated constant to a type level constant -pub(crate) fn const_to_valtree<'tcx>( - tcx: TyCtxt<'tcx>, - param_env: ty::ParamEnv<'tcx>, - raw: ConstAlloc<'tcx>, -) -> Option> { - let ecx = mk_eval_cx( - tcx, DUMMY_SP, param_env, - // It is absolutely crucial for soundness that - // we do not read from static items or other mutable memory. - false, - ); - let place = ecx.raw_const_to_mplace(raw).unwrap(); - const_to_valtree_inner(&ecx, &place) -} - -#[instrument(skip(ecx), level = "debug")] -fn branches<'tcx>( - ecx: &CompileTimeEvalContext<'tcx, 'tcx>, - place: &MPlaceTy<'tcx>, - n: usize, - variant: Option, -) -> Option> { - let place = match variant { - Some(variant) => ecx.mplace_downcast(&place, variant).unwrap(), - None => *place, - }; - let variant = variant.map(|variant| Some(ty::ValTree::Leaf(ScalarInt::from(variant.as_u32())))); - debug!(?place, ?variant); - - let fields = (0..n).map(|i| { - let field = ecx.mplace_field(&place, i).unwrap(); - const_to_valtree_inner(ecx, &field) - }); - // For enums, we prepend their variant index before the variant's fields so we can figure out - // the variant again when just seeing a valtree. - let branches = variant.into_iter().chain(fields); - Some(ty::ValTree::Branch(ecx.tcx.arena.alloc_from_iter(branches.collect::>>()?))) -} - -fn slice_branches<'tcx>( - ecx: &CompileTimeEvalContext<'tcx, 'tcx>, - place: &MPlaceTy<'tcx>, -) -> Option> { - let n = place.len(&ecx.tcx()).expect(&format!("expected to use len of place {:?}", place)); - let branches = (0..n).map(|i| { - let place_elem = ecx.mplace_index(place, i).unwrap(); - const_to_valtree_inner(ecx, &place_elem) - }); - - Some(ty::ValTree::Branch(ecx.tcx.arena.alloc_from_iter(branches.collect::>>()?))) -} - -#[instrument(skip(ecx), level = "debug")] -fn const_to_valtree_inner<'tcx>( - ecx: &CompileTimeEvalContext<'tcx, 'tcx>, - place: &MPlaceTy<'tcx>, -) -> Option> { - match place.layout.ty.kind() { - ty::FnDef(..) => Some(ty::ValTree::zst()), - ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => { - let val = ecx.read_immediate(&place.into()).unwrap(); - let val = val.to_scalar().unwrap(); - Some(ty::ValTree::Leaf(val.assert_int())) - } - - // Raw pointers are not allowed in type level constants, as we cannot properly test them for - // equality at compile-time (see `ptr_guaranteed_eq`/`_ne`). - // Technically we could allow function pointers (represented as `ty::Instance`), but this is not guaranteed to - // agree with runtime equality tests. - ty::FnPtr(_) | ty::RawPtr(_) => None, - - ty::Ref(_, _, _) => { - let derefd_place = ecx.deref_operand(&place.into()).unwrap_or_else(|e| bug!("couldn't deref {:?}, error: {:?}", place, e)); - debug!(?derefd_place); - - const_to_valtree_inner(ecx, &derefd_place) - } - - ty::Str | ty::Slice(_) | ty::Array(_, _) => { - let valtree = slice_branches(ecx, place); - debug!(?valtree); - - valtree - } - // Trait objects are not allowed in type level constants, as we have no concept for - // resolving their backing type, even if we can do that at const eval time. We may - // hypothetically be able to allow `dyn StructuralEq` trait objects in the future, - // but it is unclear if this is useful. - ty::Dynamic(..) => None, - - ty::Tuple(substs) => branches(ecx, place, substs.len(), None), - - ty::Adt(def, _) => { - if def.variants().is_empty() { - bug!("uninhabited types should have errored and never gotten converted to valtree") - } - - let variant = ecx.read_discriminant(&place.into()).unwrap().1; - - branches(ecx, place, def.variant(variant).fields.len(), def.is_enum().then_some(variant)) - } - - ty::Never - | ty::Error(_) - | ty::Foreign(..) - | ty::Infer(ty::FreshIntTy(_)) - | ty::Infer(ty::FreshFloatTy(_)) - | ty::Projection(..) - | ty::Param(_) - | ty::Bound(..) - | ty::Placeholder(..) - // FIXME(oli-obk): we could look behind opaque types - | ty::Opaque(..) - | ty::Infer(_) - // FIXME(oli-obk): we can probably encode closures just like structs - | ty::Closure(..) - | ty::Generator(..) - | ty::GeneratorWitness(..) => None, - } -} - /// This function should never fail for validated constants. However, it is also invoked from the /// pretty printer which might attempt to format invalid constants and in that case it might fail. pub(crate) fn try_destructure_const<'tcx>( @@ -202,6 +77,7 @@ pub(crate) fn try_destructure_const<'tcx>( Ok(mir::DestructuredConst { variant, fields }) } +#[instrument(skip(tcx), level = "debug")] pub(crate) fn deref_const<'tcx>( tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>, diff --git a/compiler/rustc_const_eval/src/const_eval/valtrees.rs b/compiler/rustc_const_eval/src/const_eval/valtrees.rs new file mode 100644 index 0000000000000..ded539fd3408b --- /dev/null +++ b/compiler/rustc_const_eval/src/const_eval/valtrees.rs @@ -0,0 +1,479 @@ +use super::eval_queries::{mk_eval_cx, op_to_const}; +use super::machine::CompileTimeEvalContext; +use crate::interpret::{ + intern_const_alloc_recursive, ConstValue, ImmTy, Immediate, InternKind, MemoryKind, PlaceTy, + Pointer, Scalar, ScalarMaybeUninit, +}; +use rustc_middle::mir::interpret::{ConstAlloc, GlobalAlloc}; +use rustc_middle::mir::{Field, ProjectionElem}; +use rustc_middle::ty::{self, ScalarInt, Ty, TyCtxt}; +use rustc_span::source_map::DUMMY_SP; +use rustc_target::abi::VariantIdx; + +use crate::interpret::visitor::Value; +use crate::interpret::MPlaceTy; + +/// Convert an evaluated constant to a type level constant +#[instrument(skip(tcx), level = "debug")] +pub(crate) fn const_to_valtree<'tcx>( + tcx: TyCtxt<'tcx>, + param_env: ty::ParamEnv<'tcx>, + raw: ConstAlloc<'tcx>, +) -> Option> { + let ecx = mk_eval_cx( + tcx, DUMMY_SP, param_env, + // It is absolutely crucial for soundness that + // we do not read from static items or other mutable memory. + false, + ); + let place = ecx.raw_const_to_mplace(raw).unwrap(); + const_to_valtree_inner(&ecx, &place) +} + +#[instrument(skip(ecx), level = "debug")] +fn branches<'tcx>( + ecx: &CompileTimeEvalContext<'tcx, 'tcx>, + place: &MPlaceTy<'tcx>, + n: usize, + variant: Option, +) -> Option> { + let place = match variant { + Some(variant) => ecx.mplace_downcast(&place, variant).unwrap(), + None => *place, + }; + let variant = variant.map(|variant| Some(ty::ValTree::Leaf(ScalarInt::from(variant.as_u32())))); + debug!(?place, ?variant); + + let fields = (0..n).map(|i| { + let field = ecx.mplace_field(&place, i).unwrap(); + const_to_valtree_inner(ecx, &field) + }); + // For enums, we preped their variant index before the variant's fields so we can figure out + // the variant again when just seeing a valtree. + let branches = variant.into_iter().chain(fields); + Some(ty::ValTree::Branch(ecx.tcx.arena.alloc_from_iter(branches.collect::>>()?))) +} + +fn slice_branches<'tcx>( + ecx: &CompileTimeEvalContext<'tcx, 'tcx>, + place: &MPlaceTy<'tcx>, +) -> Option> { + let n = place.len(&ecx.tcx.tcx).expect(&format!("expected to use len of place {:?}", place)); + let branches = (0..n).map(|i| { + let place_elem = ecx.mplace_index(place, i).unwrap(); + const_to_valtree_inner(ecx, &place_elem) + }); + + Some(ty::ValTree::Branch(ecx.tcx.arena.alloc_from_iter(branches.collect::>>()?))) +} + +#[instrument(skip(ecx), level = "debug")] +fn const_to_valtree_inner<'tcx>( + ecx: &CompileTimeEvalContext<'tcx, 'tcx>, + place: &MPlaceTy<'tcx>, +) -> Option> { + match place.layout.ty.kind() { + ty::FnDef(..) => Some(ty::ValTree::zst()), + ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => { + let val = ecx.read_immediate(&place.into()).unwrap(); + let val = val.to_scalar().unwrap(); + Some(ty::ValTree::Leaf(val.assert_int())) + } + + // Raw pointers are not allowed in type level constants, as we cannot properly test them for + // equality at compile-time (see `ptr_guaranteed_eq`/`_ne`). + // Technically we could allow function pointers (represented as `ty::Instance`), but this is not guaranteed to + // agree with runtime equality tests. + ty::FnPtr(_) | ty::RawPtr(_) => None, + + ty::Ref(_, _, _) => { + let derefd_place = ecx.deref_operand(&place.into()).unwrap_or_else(|e| bug!("couldn't deref {:?}, error: {:?}", place, e)); + debug!(?derefd_place); + + const_to_valtree_inner(ecx, &derefd_place) + } + + ty::Str | ty::Slice(_) | ty::Array(_, _) => { + let valtree = slice_branches(ecx, place); + debug!(?valtree); + + valtree + } + // Trait objects are not allowed in type level constants, as we have no concept for + // resolving their backing type, even if we can do that at const eval time. We may + // hypothetically be able to allow `dyn StructuralEq` trait objects in the future, + // but it is unclear if this is useful. + ty::Dynamic(..) => None, + + ty::Tuple(substs) => branches(ecx, place, substs.len(), None), + + ty::Adt(def, _) => { + if def.variants().is_empty() { + bug!("uninhabited types should have errored and never gotten converted to valtree") + } + + let variant = ecx.read_discriminant(&place.into()).unwrap().1; + + branches(ecx, place, def.variant(variant).fields.len(), def.is_enum().then_some(variant)) + } + + ty::Never + | ty::Error(_) + | ty::Foreign(..) + | ty::Infer(ty::FreshIntTy(_)) + | ty::Infer(ty::FreshFloatTy(_)) + | ty::Projection(..) + | ty::Param(_) + | ty::Bound(..) + | ty::Placeholder(..) + // FIXME(oli-obk): we could look behind opaque types + | ty::Opaque(..) + | ty::Infer(_) + // FIXME(oli-obk): we can probably encode closures just like structs + | ty::Closure(..) + | ty::Generator(..) + | ty::GeneratorWitness(..) => None, + } +} + +#[instrument(skip(ecx), level = "debug")] +fn create_mplace_from_layout<'tcx>( + ecx: &mut CompileTimeEvalContext<'tcx, 'tcx>, + param_env_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>, +) -> MPlaceTy<'tcx> { + let tcx = ecx.tcx; + let layout = tcx.layout_of(param_env_ty).unwrap(); + debug!(?layout); + + ecx.allocate(layout, MemoryKind::Stack).unwrap() +} + +/// Converts a `ValTree` to a `ConstValue`, which is needed after mir +/// construction has finished. +#[instrument(skip(tcx), level = "debug")] +pub fn valtree_to_const_value<'tcx>( + tcx: TyCtxt<'tcx>, + param_env_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>, + valtree: ty::ValTree<'tcx>, +) -> ConstValue<'tcx> { + // Basic idea: We directly construct `Scalar` values from trivial `ValTree`s + // (those for constants with type bool, int, uint, float or char). + // For all other types we create an `MPlace` and fill that by walking + // the `ValTree` and using `place_projection` and `place_field` to + // create inner `MPlace`s which are filled recursively. + // FIXME Does this need an example? + + let (param_env, ty) = param_env_ty.into_parts(); + let mut ecx = mk_eval_cx(tcx, DUMMY_SP, param_env, false); + + match ty.kind() { + ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => match valtree { + ty::ValTree::Leaf(scalar_int) => ConstValue::Scalar(Scalar::Int(scalar_int)), + ty::ValTree::Branch(_) => bug!( + "ValTrees for Bool, Int, Uint, Float or Char should have the form ValTree::Leaf" + ), + }, + ty::Ref(_, inner_ty, _) => { + match inner_ty.kind() { + ty::Slice(_) | ty::Str => { + let slice_ty = match inner_ty.kind() { + ty::Slice(slice_ty) => *slice_ty, + ty::Str => tcx.mk_ty(ty::Uint(ty::UintTy::U8)), + _ => bug!("expected ty::Slice | ty::Str"), + }; + debug!(?slice_ty); + + let valtrees = valtree.unwrap_branch(); + + // Create a place for the underlying array + let len = valtrees.len(); + let arr_ty = tcx.mk_array(slice_ty, len as u64); + let mut place = + create_mplace_from_layout(&mut ecx, ty::ParamEnv::empty().and(arr_ty)); + debug!(?place); + + // Insert elements of `arr_valtree` into `place` + fill_place_recursively(&mut ecx, &mut place, valtree, arr_ty); + dump_place(&ecx, place.into()); + + // The allocation behind `place` is local, we need to intern it + intern_const_alloc_recursive(&mut ecx, InternKind::Constant, &place).unwrap(); + + // Now we need to get the Allocation + let alloc_id = place.mplace.ptr.provenance.unwrap(); + debug!(?alloc_id); + + let data = match tcx.get_global_alloc(alloc_id) { + Some(GlobalAlloc::Memory(const_alloc)) => const_alloc, + _ => bug!("expected memory allocation"), + }; + debug!(?data); + + return ConstValue::Slice { data, start: 0, end: len as usize }; + } + _ => { + match valtree { + ty::ValTree::Branch(_) => { + // create a place for the pointee + let mut place = create_mplace_from_layout( + &mut ecx, + ty::ParamEnv::empty().and(*inner_ty), + ); + debug!(?place); + + // insert elements of valtree into `place` + fill_place_recursively(&mut ecx, &mut place, valtree, *inner_ty); + dump_place(&ecx, place.into()); + intern_const_alloc_recursive(&mut ecx, InternKind::Constant, &place) + .unwrap(); + + let ref_place = place.mplace.to_ref(&tcx); + let imm = ImmTy::from_immediate( + ref_place, + tcx.layout_of(param_env_ty).unwrap(), + ); + + let const_val = op_to_const(&ecx, &imm.into()); + debug!(?const_val); + + const_val + } + ty::ValTree::Leaf(_) => { + let mut place = create_mplace_from_layout( + &mut ecx, + ty::ParamEnv::empty().and(*inner_ty), + ); + + fill_place_recursively(&mut ecx, &mut place, valtree, *inner_ty); + dump_place(&ecx, place.into()); + + let ref_place = place.mplace.to_ref(&tcx); + let imm = ImmTy::from_immediate( + ref_place, + tcx.layout_of(param_env_ty).unwrap(), + ); + + op_to_const(&ecx, &imm.into()) + } + } + } + } + } + ty::Tuple(_) | ty::Array(_, _) | ty::Adt(..) => { + let mut place = create_mplace_from_layout(&mut ecx, param_env_ty); + debug!(?place); + + fill_place_recursively(&mut ecx, &mut place, valtree, ty); + dump_place(&ecx, place.into()); + intern_const_alloc_recursive(&mut ecx, InternKind::Constant, &place).unwrap(); + + let const_val = op_to_const(&ecx, &place.into()); + debug!(?const_val); + + const_val + } + ty::Never + | ty::FnDef(..) + | ty::Error(_) + | ty::Foreign(..) + | ty::Infer(ty::FreshIntTy(_)) + | ty::Infer(ty::FreshFloatTy(_)) + | ty::Projection(..) + | ty::Param(_) + | ty::Bound(..) + | ty::Placeholder(..) + | ty::Opaque(..) + | ty::Infer(_) + | ty::Closure(..) + | ty::Generator(..) + | ty::GeneratorWitness(..) + | ty::FnPtr(_) + | ty::RawPtr(_) + | ty::Str + | ty::Slice(_) + | ty::Dynamic(..) => bug!("no ValTree should have been created for type {:?}", ty.kind()), + } +} + +// FIXME Needs a better/correct name +#[instrument(skip(ecx), level = "debug")] +fn fill_place_recursively<'tcx>( + ecx: &mut CompileTimeEvalContext<'tcx, 'tcx>, + place: &mut MPlaceTy<'tcx>, + valtree: ty::ValTree<'tcx>, + ty: Ty<'tcx>, +) { + // This will match on valtree and write the value(s) corresponding to the ValTree + // inside the place recursively. + + let tcx = ecx.tcx.tcx; + + match ty.kind() { + ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => { + let scalar_int = valtree.unwrap_leaf(); + debug!("writing trivial valtree {:?} to place {:?}", scalar_int, place); + ecx.write_immediate( + Immediate::Scalar(ScalarMaybeUninit::Scalar(scalar_int.into())), + &(*place).into(), + ) + .unwrap(); + } + ty::Ref(_, inner_ty, _) => { + match inner_ty.kind() { + ty::Slice(_) | ty::Str => { + let slice_ty = match inner_ty.kind() { + ty::Slice(slice_ty) => *slice_ty, + ty::Str => tcx.mk_ty(ty::Uint(ty::UintTy::U8)), + _ => bug!("expected ty::Slice | ty::Str"), + }; + debug!(?slice_ty); + + let valtrees = valtree.unwrap_branch(); + debug!(?valtrees); + let len = valtrees.len(); + debug!(?len); + + // create a place for the underlying array + let arr_ty = tcx.mk_array(slice_ty, len as u64); + let mut arr_place = + create_mplace_from_layout(ecx, ty::ParamEnv::empty().and(arr_ty)); + debug!(?arr_place); + + // Insert elements of `arr_valtree` into `place` + fill_place_recursively(ecx, &mut arr_place, valtree, arr_ty); + dump_place(&ecx, arr_place.into()); + + // Now we need to create a `ScalarPair` from the filled `place` + // and write that into `place` + let (alloc_id, offset) = arr_place.mplace.ptr.into_parts(); + debug!(?alloc_id, ?offset); + let unwrapped_ptr = Pointer { offset, provenance: alloc_id.unwrap() }; + let len_scalar = ScalarMaybeUninit::Scalar(Scalar::from_u64(len as u64)); + + let imm = Immediate::ScalarPair( + ScalarMaybeUninit::from_pointer(unwrapped_ptr, &tcx), + len_scalar, + ); + debug!(?imm); + + // Now write the ScalarPair into the original place we wanted to fill + // in this call + let _ = ecx.write_immediate(imm, &(*place).into()).unwrap(); + + dump_place(&ecx, (*place).into()); + } + _ => { + let mut pointee_place = + create_mplace_from_layout(ecx, ty::ParamEnv::empty().and(*inner_ty)); + debug!(?pointee_place); + fill_place_recursively(ecx, &mut pointee_place, valtree, *inner_ty); + + dump_place(ecx, pointee_place.into()); + intern_const_alloc_recursive(ecx, InternKind::Constant, &pointee_place) + .unwrap(); + + let imm = pointee_place.mplace.to_ref(&tcx); + debug!(?imm); + + ecx.write_immediate(imm, &(*place).into()).unwrap(); + } + } + } + ty::Tuple(tuple_types) => { + let branches = valtree.unwrap_branch(); + assert_eq!(tuple_types.len(), branches.len()); + + for (i, inner_valtree) in branches.iter().enumerate() { + debug!(?i, ?inner_valtree); + let inner_ty = tuple_types.get(i).expect(&format!( + "expected to be able to index at position {} into {:?}", + i, tuple_types + )); + debug!(?inner_ty); + + // Create the mplace for the tuple element + let mut place_inner = ecx.mplace_field(place, i).unwrap(); + debug!(?place_inner); + + // insert valtree corresponding to tuple element into place + fill_place_recursively(ecx, &mut place_inner, *inner_valtree, *inner_ty); + } + } + ty::Array(inner_ty, _) => { + let inner_valtrees = valtree.unwrap_branch(); + for (i, inner_valtree) in inner_valtrees.iter().enumerate() { + debug!(?i, ?inner_valtree); + + let mut place_inner = ecx.mplace_field(place, i).unwrap(); + debug!(?place_inner); + + fill_place_recursively(ecx, &mut place_inner, *inner_valtree, *inner_ty) + } + } + ty::Adt(def, substs) if def.is_enum() => { + debug!("enum, substs: {:?}", substs); + let inner_valtrees = valtree.unwrap_branch(); + + // First element of valtree corresponds to variant + let scalar_int = inner_valtrees[0].unwrap_leaf(); + let variant_idx = VariantIdx::from_u32(scalar_int.try_to_u32().unwrap()); + let variant = def.variant(variant_idx); + debug!(?variant); + + // Need to downcast place + let place_downcast = place.project_downcast(ecx, variant_idx).unwrap(); + debug!(?place_downcast); + + // fill `place_downcast` with the valtree elements corresponding to + // the fields of the enum + let fields = &variant.fields; + let inner_valtrees = &inner_valtrees[1..]; + for (i, field) in fields.iter().enumerate() { + debug!(?i, ?field); + + let field_ty = field.ty(tcx, substs); + debug!(?field_ty); + + let mut field_mplace = ecx.mplace_field(&place_downcast, i).unwrap(); + debug!(?field_mplace); + let inner_valtree = inner_valtrees[i]; + + fill_place_recursively(ecx, &mut field_mplace, inner_valtree, field_ty); + dump_place(&ecx, field_mplace.into()); + } + + debug!("dump of place_downcast"); + dump_place(ecx, place_downcast.into()); + + // don't forget filling the place with the discriminant of the enum + ecx.write_discriminant(variant_idx, &(*place).into()).unwrap(); + dump_place(ecx, (*place).into()); + } + ty::Adt(def, substs) => { + debug!("Adt def: {:?} with substs: {:?}", def, substs); + let inner_valtrees = valtree.unwrap_branch(); + debug!(?inner_valtrees); + let (fields, inner_valtrees) = + (&def.variant(VariantIdx::from_usize(0)).fields[..], inner_valtrees); + + debug!("fields: {:?}", fields); + + for (i, field) in fields.iter().enumerate() { + let field_ty = field.ty(tcx, substs); + debug!(?field_ty); + let old_field_ty = tcx.type_of(field.did); + debug!(?old_field_ty); + let projection_elem = ProjectionElem::Field(Field::from_usize(i), field_ty); + let mut field_place = ecx.mplace_projection(place, projection_elem).unwrap(); + let inner_valtree = inner_valtrees[i]; + + fill_place_recursively(ecx, &mut field_place, inner_valtree, field_ty); + } + } + _ => {} + } +} + +fn dump_place<'tcx>(ecx: &CompileTimeEvalContext<'tcx, 'tcx>, place: PlaceTy<'tcx>) { + trace!("{:?}", ecx.dump_place(place.place)); +} diff --git a/compiler/rustc_const_eval/src/interpret/mod.rs b/compiler/rustc_const_eval/src/interpret/mod.rs index 2b9fe56599715..dba746e72e245 100644 --- a/compiler/rustc_const_eval/src/interpret/mod.rs +++ b/compiler/rustc_const_eval/src/interpret/mod.rs @@ -14,7 +14,7 @@ mod terminator; mod traits; mod util; mod validity; -mod visitor; +pub(crate) mod visitor; pub use rustc_middle::mir::interpret::*; // have all the `interpret` symbols in one place: here diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs index dfc0028e87fcc..170fbab2cce04 100644 --- a/compiler/rustc_const_eval/src/interpret/operand.rs +++ b/compiler/rustc_const_eval/src/interpret/operand.rs @@ -98,7 +98,7 @@ impl<'tcx, Tag: Provenance> Immediate { // as input for binary and cast operations. #[derive(Copy, Clone, Debug)] pub struct ImmTy<'tcx, Tag: Provenance = AllocId> { - imm: Immediate, + pub imm: Immediate, pub layout: TyAndLayout<'tcx>, } @@ -248,7 +248,7 @@ impl<'tcx, Tag: Provenance> ImmTy<'tcx, Tag> { impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { /// Try reading an immediate in memory; this is interesting particularly for `ScalarPair`. /// Returns `None` if the layout does not permit loading this as a value. - fn try_read_immediate_from_mplace( + pub(crate) fn try_read_immediate_from_mplace( &self, mplace: &MPlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx, Option>> { @@ -424,6 +424,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { }) } + #[instrument(skip(self), level = "debug")] pub fn operand_projection( &self, base: &OpTy<'tcx, M::PointerTag>, diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs index e4660fe090ce5..8caf9eee2d95f 100644 --- a/compiler/rustc_const_eval/src/interpret/place.rs +++ b/compiler/rustc_const_eval/src/interpret/place.rs @@ -82,7 +82,7 @@ rustc_data_structures::static_assert_size!(Place, 56); #[derive(Copy, Clone, Debug)] pub struct PlaceTy<'tcx, Tag: Provenance = AllocId> { - place: Place, // Keep this private; it helps enforce invariants. + pub(crate) place: Place, // Keep this private; it helps enforce invariants. pub layout: TyAndLayout<'tcx>, } @@ -100,7 +100,7 @@ impl<'tcx, Tag: Provenance> std::ops::Deref for PlaceTy<'tcx, Tag> { /// A MemPlace with its layout. Constructing it is only possible in this module. #[derive(Copy, Clone, Hash, Eq, PartialEq, Debug)] pub struct MPlaceTy<'tcx, Tag: Provenance = AllocId> { - mplace: MemPlace, + pub(crate) mplace: MemPlace, pub layout: TyAndLayout<'tcx>, } @@ -294,6 +294,7 @@ where /// Take an operand, representing a pointer, and dereference it to a place -- that /// will always be a MemPlace. Lives in `place.rs` because it creates a place. + #[instrument(skip(self), level = "debug")] pub fn deref_operand( &self, src: &OpTy<'tcx, M::PointerTag>, @@ -487,7 +488,8 @@ where } /// Project into an mplace - pub(super) fn mplace_projection( + #[instrument(skip(self), level = "debug")] + pub(crate) fn mplace_projection( &self, base: &MPlaceTy<'tcx, M::PointerTag>, proj_elem: mir::PlaceElem<'tcx>, @@ -548,6 +550,7 @@ where /// Just a convenience function, but used quite a bit. /// This is the only projection that might have a side-effect: We cannot project /// into the field of a local `ScalarPair`, we have to first allocate it. + #[instrument(skip(self), level = "debug")] pub fn place_field( &mut self, base: &PlaceTy<'tcx, M::PointerTag>, @@ -586,6 +589,7 @@ where } /// Projects into a place. + #[instrument(skip(self), level = "debug")] pub fn place_projection( &mut self, base: &PlaceTy<'tcx, M::PointerTag>, @@ -617,19 +621,23 @@ where /// Computes a place. You should only use this if you intend to write into this /// place; for reading, a more efficient alternative is `eval_place_for_read`. + #[instrument(skip(self), level = "debug")] pub fn eval_place( &mut self, place: mir::Place<'tcx>, ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> { + debug!("projection: {:?}", place.projection); let mut place_ty = PlaceTy { // This works even for dead/uninitialized locals; we check further when writing place: Place::Local { frame: self.frame_idx(), local: place.local }, layout: self.layout_of_local(self.frame(), place.local, None)?, }; + debug!(?place_ty); for elem in place.projection.iter() { place_ty = self.place_projection(&place_ty, &elem)? } + debug!("place after projections: {:?}", place_ty); trace!("{:?}", self.dump_place(place_ty.place)); // Sanity-check the type we ended up with. @@ -646,6 +654,7 @@ where /// Write an immediate to a place #[inline(always)] + #[instrument(skip(self), level = "debug")] pub fn write_immediate( &mut self, src: Immediate, @@ -684,6 +693,7 @@ where /// Write an immediate to a place. /// If you use this you are responsible for validating that things got copied at the /// right type. + #[instrument(skip(self), level = "debug")] fn write_immediate_no_validate( &mut self, src: Immediate, @@ -736,6 +746,7 @@ where /// Write an immediate to memory. /// If you use this you are responsible for validating that things got copied at the /// right type. + #[instrument(skip(self), level = "debug")] fn write_immediate_to_mplace_no_validate( &mut self, value: Immediate, @@ -758,6 +769,7 @@ where // cover all the bytes! match value { Immediate::Scalar(scalar) => { + debug!(?scalar); match dest.layout.abi { Abi::Scalar(_) => {} // fine _ => span_bug!( @@ -830,6 +842,7 @@ where /// Copies the data from an operand to a place. This does not support transmuting! /// Use `copy_op_transmute` if the layouts could disagree. #[inline(always)] + #[instrument(skip(self), level = "debug")] pub fn copy_op( &mut self, src: &OpTy<'tcx, M::PointerTag>, @@ -849,6 +862,7 @@ where /// Use `copy_op_transmute` if the layouts could disagree. /// Also, if you use this you are responsible for validating that things get copied at the /// right type. + #[instrument(skip(self), level = "debug")] fn copy_op_no_validate( &mut self, src: &OpTy<'tcx, M::PointerTag>, @@ -868,6 +882,7 @@ where // Let us see if the layout is simple so we take a shortcut, avoid force_allocation. let src = match self.try_read_immediate(src)? { Ok(src_val) => { + debug!("immediate from src is {:?}", src_val); assert!(!src.layout.is_unsized(), "cannot have unsized immediates"); // Yay, we got a value that we can write directly. // FIXME: Add a check to make sure that if `src` is indirect, @@ -955,6 +970,7 @@ where /// This supports unsized types and returns the computed size to avoid some /// redundant computation when copying; use `force_allocation` for a simpler, sized-only /// version. + #[instrument(skip(self), level = "debug")] pub fn force_allocation_maybe_sized( &mut self, place: &PlaceTy<'tcx, M::PointerTag>, @@ -962,6 +978,7 @@ where ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::PointerTag>, Option)> { let (mplace, size) = match place.place { Place::Local { frame, local } => { + debug!("LocalPlace"); match M::access_local_mut(self, frame, local)? { Ok(&mut local_val) => { // We need to make an allocation. @@ -975,9 +992,12 @@ where let (size, align) = self .size_and_align_of(&meta, &local_layout)? .expect("Cannot allocate for non-dyn-sized type"); + debug!(?size, ?align); let ptr = self.allocate_ptr(size, align, MemoryKind::Stack)?; + debug!("allocated ptr: {:?}", ptr); let mplace = MemPlace { ptr: ptr.into(), align, meta }; if let LocalValue::Live(Operand::Immediate(value)) = local_val { + debug!("LocalValue::Live: immediate value {:?}", value); // Preserve old value. // We don't have to validate as we can assume the local // was already valid for its type. @@ -1037,6 +1057,7 @@ where } /// Writes the discriminant of the given variant. + #[instrument(skip(self), level = "debug")] pub fn write_discriminant( &mut self, variant_index: VariantIdx, diff --git a/compiler/rustc_middle/src/mir/interpret/allocation.rs b/compiler/rustc_middle/src/mir/interpret/allocation.rs index c628406064fb6..5e7e362eeef42 100644 --- a/compiler/rustc_middle/src/mir/interpret/allocation.rs +++ b/compiler/rustc_middle/src/mir/interpret/allocation.rs @@ -418,6 +418,7 @@ impl Allocation { /// /// It is the caller's responsibility to check bounds and alignment beforehand. /// Most likely, you want to call `InterpCx::write_scalar` instead of this method. + #[instrument(skip(self, cx), level = "debug")] pub fn write_scalar( &mut self, cx: &impl HasDataLayout, @@ -432,6 +433,7 @@ impl Allocation { return self.write_uninit(cx, range); } }; + debug!(?val); // `to_bits_or_ptr_internal` is the right method because we just want to store this data // as-is into memory. @@ -442,13 +444,16 @@ impl Allocation { } Ok(data) => (data, None), }; + debug!(?bytes, ?provenance); let endian = cx.data_layout().endian; let dst = self.get_bytes_mut(cx, range)?; + debug!(?dst); write_target_uint(endian, dst, bytes).unwrap(); // See if we have to also write a relocation. if let Some(provenance) = provenance { + debug!("insert relocation for {:?}", provenance); self.relocations.0.insert(range.start, provenance); } diff --git a/compiler/rustc_middle/src/mir/interpret/pointer.rs b/compiler/rustc_middle/src/mir/interpret/pointer.rs index c71aea417eca0..cb36e5409543d 100644 --- a/compiler/rustc_middle/src/mir/interpret/pointer.rs +++ b/compiler/rustc_middle/src/mir/interpret/pointer.rs @@ -158,7 +158,7 @@ impl Provenance for AllocId { #[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, TyEncodable, TyDecodable, Hash)] #[derive(HashStable)] pub struct Pointer { - pub(super) offset: Size, // kept private to avoid accidental misinterpretation (meaning depends on `Tag` type) + pub offset: Size, // FIXME This should probably be private pub provenance: Tag, } diff --git a/compiler/rustc_middle/src/ty/consts/valtree.rs b/compiler/rustc_middle/src/ty/consts/valtree.rs index e17c69cb3537e..418848f69d726 100644 --- a/compiler/rustc_middle/src/ty/consts/valtree.rs +++ b/compiler/rustc_middle/src/ty/consts/valtree.rs @@ -20,6 +20,9 @@ pub enum ValTree<'tcx> { /// See the `ScalarInt` documentation for how `ScalarInt` guarantees that equal values /// of these types have the same representation. Leaf(ScalarInt), + + //SliceOrStr(ValSlice<'tcx>), + // dont use SliceOrStr for now /// The fields of any kind of aggregate. Structs, tuples and arrays are represented by /// listing their fields' values in order. /// Enums are represented by storing their discriminant as a field, followed by all From 28af967bb9165999294250dc3f3a56c2193c35d9 Mon Sep 17 00:00:00 2001 From: b-naber Date: Thu, 21 Apr 2022 16:37:24 +0200 Subject: [PATCH 4/9] implement (as of now still unused) query for valtree -> constvalue conversion --- compiler/rustc_const_eval/src/lib.rs | 4 ++++ compiler/rustc_middle/src/query/mod.rs | 5 +++++ compiler/rustc_query_impl/src/keys.rs | 11 +++++++++++ 3 files changed, 20 insertions(+) diff --git a/compiler/rustc_const_eval/src/lib.rs b/compiler/rustc_const_eval/src/lib.rs index d688331ae0a56..34a004525196d 100644 --- a/compiler/rustc_const_eval/src/lib.rs +++ b/compiler/rustc_const_eval/src/lib.rs @@ -35,6 +35,7 @@ pub mod transform; pub mod util; use rustc_middle::ty::query::Providers; +use rustc_middle::ty::ParamEnv; pub fn provide(providers: &mut Providers) { const_eval::provide(providers); @@ -49,6 +50,9 @@ pub fn provide(providers: &mut Providers) { let (param_env, raw) = param_env_and_value.into_parts(); const_eval::const_to_valtree(tcx, param_env, raw) }; + providers.valtree_to_const_val = |tcx, (ty, valtree)| { + const_eval::valtree_to_const_value(tcx, ParamEnv::empty().and(ty), valtree) + }; providers.deref_const = |tcx, param_env_and_value| { let (param_env, value) = param_env_and_value.into_parts(); const_eval::deref_const(tcx, param_env, value) diff --git a/compiler/rustc_middle/src/query/mod.rs b/compiler/rustc_middle/src/query/mod.rs index 78a3383243306..2834db2613137 100644 --- a/compiler/rustc_middle/src/query/mod.rs +++ b/compiler/rustc_middle/src/query/mod.rs @@ -936,6 +936,11 @@ rustc_queries! { remap_env_constness } + /// Converts a type level constant value into `ConstValue` + query valtree_to_const_val(key: (Ty<'tcx>, ty::ValTree<'tcx>)) -> ConstValue<'tcx> { + desc { "convert type-level constant value to mir constant value"} + } + /// Destructure a constant ADT or array into its variant index and its /// field values or return `None` if constant is invalid. /// diff --git a/compiler/rustc_query_impl/src/keys.rs b/compiler/rustc_query_impl/src/keys.rs index f1f83a7299c4d..3f0f856b5dd7c 100644 --- a/compiler/rustc_query_impl/src/keys.rs +++ b/compiler/rustc_query_impl/src/keys.rs @@ -502,3 +502,14 @@ impl<'tcx> Key for (ty::Instance<'tcx>, &'tcx ty::List>) { self.0.default_span(tcx) } } + +impl<'tcx> Key for (Ty<'tcx>, ty::ValTree<'tcx>) { + #[inline(always)] + fn query_crate_is_local(&self) -> bool { + true + } + + fn default_span(&self, _: TyCtxt<'_>) -> Span { + DUMMY_SP + } +} From bc698c73e90c253b0d37be8127b3fb542d9e95c2 Mon Sep 17 00:00:00 2001 From: b-naber Date: Thu, 21 Apr 2022 19:35:06 +0200 Subject: [PATCH 5/9] deduplicate a lot of code --- .../src/const_eval/valtrees.rs | 339 ++++++------------ .../rustc_const_eval/src/interpret/mod.rs | 4 +- .../rustc_const_eval/src/interpret/operand.rs | 4 +- .../rustc_const_eval/src/interpret/place.rs | 16 +- .../src/mir/interpret/allocation.rs | 4 - .../rustc_middle/src/mir/interpret/pointer.rs | 2 +- 6 files changed, 122 insertions(+), 247 deletions(-) diff --git a/compiler/rustc_const_eval/src/const_eval/valtrees.rs b/compiler/rustc_const_eval/src/const_eval/valtrees.rs index ded539fd3408b..39a3df79a28eb 100644 --- a/compiler/rustc_const_eval/src/const_eval/valtrees.rs +++ b/compiler/rustc_const_eval/src/const_eval/valtrees.rs @@ -2,16 +2,16 @@ use super::eval_queries::{mk_eval_cx, op_to_const}; use super::machine::CompileTimeEvalContext; use crate::interpret::{ intern_const_alloc_recursive, ConstValue, ImmTy, Immediate, InternKind, MemoryKind, PlaceTy, - Pointer, Scalar, ScalarMaybeUninit, + Scalar, ScalarMaybeUninit, }; -use rustc_middle::mir::interpret::{ConstAlloc, GlobalAlloc}; +use rustc_middle::mir::interpret::ConstAlloc; use rustc_middle::mir::{Field, ProjectionElem}; use rustc_middle::ty::{self, ScalarInt, Ty, TyCtxt}; use rustc_span::source_map::DUMMY_SP; use rustc_target::abi::VariantIdx; -use crate::interpret::visitor::Value; use crate::interpret::MPlaceTy; +use crate::interpret::Value; /// Convert an evaluated constant to a type level constant #[instrument(skip(tcx), level = "debug")] @@ -54,6 +54,7 @@ fn branches<'tcx>( Some(ty::ValTree::Branch(ecx.tcx.arena.alloc_from_iter(branches.collect::>>()?))) } +#[instrument(skip(ecx), level = "debug")] fn slice_branches<'tcx>( ecx: &CompileTimeEvalContext<'tcx, 'tcx>, place: &MPlaceTy<'tcx>, @@ -139,15 +140,44 @@ fn const_to_valtree_inner<'tcx>( #[instrument(skip(ecx), level = "debug")] fn create_mplace_from_layout<'tcx>( ecx: &mut CompileTimeEvalContext<'tcx, 'tcx>, - param_env_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>, + ty: Ty<'tcx>, ) -> MPlaceTy<'tcx> { let tcx = ecx.tcx; - let layout = tcx.layout_of(param_env_ty).unwrap(); + let param_env = ecx.param_env; + let layout = tcx.layout_of(param_env.and(ty)).unwrap(); debug!(?layout); ecx.allocate(layout, MemoryKind::Stack).unwrap() } +#[instrument(skip(ecx), level = "debug")] +fn create_pointee_place<'tcx>( + ecx: &mut CompileTimeEvalContext<'tcx, 'tcx>, + ty: Ty<'tcx>, + valtree: ty::ValTree<'tcx>, +) -> MPlaceTy<'tcx> { + let tcx = ecx.tcx.tcx; + + match ty.kind() { + ty::Slice(_) | ty::Str => { + let slice_ty = match ty.kind() { + ty::Slice(slice_ty) => *slice_ty, + ty::Str => tcx.mk_ty(ty::Uint(ty::UintTy::U8)), + _ => bug!("expected ty::Slice | ty::Str"), + }; + + // Create a place for the underlying array + let len = valtree.unwrap_branch().len() as u64; + let arr_ty = tcx.mk_array(slice_ty, len as u64); + let place = create_mplace_from_layout(ecx, arr_ty); + debug!(?place); + + place + } + _ => create_mplace_from_layout(ecx, ty), + } +} + /// Converts a `ValTree` to a `ConstValue`, which is needed after mir /// construction has finished. #[instrument(skip(tcx), level = "debug")] @@ -174,96 +204,28 @@ pub fn valtree_to_const_value<'tcx>( ), }, ty::Ref(_, inner_ty, _) => { - match inner_ty.kind() { - ty::Slice(_) | ty::Str => { - let slice_ty = match inner_ty.kind() { - ty::Slice(slice_ty) => *slice_ty, - ty::Str => tcx.mk_ty(ty::Uint(ty::UintTy::U8)), - _ => bug!("expected ty::Slice | ty::Str"), - }; - debug!(?slice_ty); - - let valtrees = valtree.unwrap_branch(); - - // Create a place for the underlying array - let len = valtrees.len(); - let arr_ty = tcx.mk_array(slice_ty, len as u64); - let mut place = - create_mplace_from_layout(&mut ecx, ty::ParamEnv::empty().and(arr_ty)); - debug!(?place); - - // Insert elements of `arr_valtree` into `place` - fill_place_recursively(&mut ecx, &mut place, valtree, arr_ty); - dump_place(&ecx, place.into()); - - // The allocation behind `place` is local, we need to intern it - intern_const_alloc_recursive(&mut ecx, InternKind::Constant, &place).unwrap(); - - // Now we need to get the Allocation - let alloc_id = place.mplace.ptr.provenance.unwrap(); - debug!(?alloc_id); - - let data = match tcx.get_global_alloc(alloc_id) { - Some(GlobalAlloc::Memory(const_alloc)) => const_alloc, - _ => bug!("expected memory allocation"), - }; - debug!(?data); - - return ConstValue::Slice { data, start: 0, end: len as usize }; - } - _ => { - match valtree { - ty::ValTree::Branch(_) => { - // create a place for the pointee - let mut place = create_mplace_from_layout( - &mut ecx, - ty::ParamEnv::empty().and(*inner_ty), - ); - debug!(?place); - - // insert elements of valtree into `place` - fill_place_recursively(&mut ecx, &mut place, valtree, *inner_ty); - dump_place(&ecx, place.into()); - intern_const_alloc_recursive(&mut ecx, InternKind::Constant, &place) - .unwrap(); - - let ref_place = place.mplace.to_ref(&tcx); - let imm = ImmTy::from_immediate( - ref_place, - tcx.layout_of(param_env_ty).unwrap(), - ); - - let const_val = op_to_const(&ecx, &imm.into()); - debug!(?const_val); - - const_val - } - ty::ValTree::Leaf(_) => { - let mut place = create_mplace_from_layout( - &mut ecx, - ty::ParamEnv::empty().and(*inner_ty), - ); - - fill_place_recursively(&mut ecx, &mut place, valtree, *inner_ty); - dump_place(&ecx, place.into()); - - let ref_place = place.mplace.to_ref(&tcx); - let imm = ImmTy::from_immediate( - ref_place, - tcx.layout_of(param_env_ty).unwrap(), - ); - - op_to_const(&ecx, &imm.into()) - } - } - } - } + // create a place for the pointee + let mut pointee_place = create_pointee_place(&mut ecx, *inner_ty, valtree); + debug!(?pointee_place); + + // insert elements of valtree into `place` + fill_place_recursively(&mut ecx, &mut pointee_place, valtree); + dump_place(&ecx, pointee_place.into()); + intern_const_alloc_recursive(&mut ecx, InternKind::Constant, &pointee_place).unwrap(); + + let ref_place = pointee_place.to_ref(&tcx); + let imm = ImmTy::from_immediate(ref_place, tcx.layout_of(param_env_ty).unwrap()); + + let const_val = op_to_const(&ecx, &imm.into()); + debug!(?const_val); + + const_val } ty::Tuple(_) | ty::Array(_, _) | ty::Adt(..) => { - let mut place = create_mplace_from_layout(&mut ecx, param_env_ty); + let mut place = create_mplace_from_layout(&mut ecx, ty); debug!(?place); - fill_place_recursively(&mut ecx, &mut place, valtree, ty); + fill_place_recursively(&mut ecx, &mut place, valtree); dump_place(&ecx, place.into()); intern_const_alloc_recursive(&mut ecx, InternKind::Constant, &place).unwrap(); @@ -301,12 +263,12 @@ fn fill_place_recursively<'tcx>( ecx: &mut CompileTimeEvalContext<'tcx, 'tcx>, place: &mut MPlaceTy<'tcx>, valtree: ty::ValTree<'tcx>, - ty: Ty<'tcx>, ) { // This will match on valtree and write the value(s) corresponding to the ValTree // inside the place recursively. let tcx = ecx.tcx.tcx; + let ty = place.layout.ty; match ty.kind() { ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => { @@ -319,161 +281,90 @@ fn fill_place_recursively<'tcx>( .unwrap(); } ty::Ref(_, inner_ty, _) => { - match inner_ty.kind() { + let mut pointee_place = create_pointee_place(ecx, *inner_ty, valtree); + debug!(?pointee_place); + + fill_place_recursively(ecx, &mut pointee_place, valtree); + dump_place(ecx, pointee_place.into()); + intern_const_alloc_recursive(ecx, InternKind::Constant, &pointee_place).unwrap(); + + let imm = match inner_ty.kind() { ty::Slice(_) | ty::Str => { - let slice_ty = match inner_ty.kind() { - ty::Slice(slice_ty) => *slice_ty, - ty::Str => tcx.mk_ty(ty::Uint(ty::UintTy::U8)), - _ => bug!("expected ty::Slice | ty::Str"), - }; - debug!(?slice_ty); - - let valtrees = valtree.unwrap_branch(); - debug!(?valtrees); - let len = valtrees.len(); - debug!(?len); - - // create a place for the underlying array - let arr_ty = tcx.mk_array(slice_ty, len as u64); - let mut arr_place = - create_mplace_from_layout(ecx, ty::ParamEnv::empty().and(arr_ty)); - debug!(?arr_place); - - // Insert elements of `arr_valtree` into `place` - fill_place_recursively(ecx, &mut arr_place, valtree, arr_ty); - dump_place(&ecx, arr_place.into()); - - // Now we need to create a `ScalarPair` from the filled `place` - // and write that into `place` - let (alloc_id, offset) = arr_place.mplace.ptr.into_parts(); - debug!(?alloc_id, ?offset); - let unwrapped_ptr = Pointer { offset, provenance: alloc_id.unwrap() }; + let len = valtree.unwrap_branch().len(); let len_scalar = ScalarMaybeUninit::Scalar(Scalar::from_u64(len as u64)); - let imm = Immediate::ScalarPair( - ScalarMaybeUninit::from_pointer(unwrapped_ptr, &tcx), + Immediate::ScalarPair( + ScalarMaybeUninit::from_maybe_pointer((*pointee_place).ptr, &tcx), len_scalar, - ); - debug!(?imm); - - // Now write the ScalarPair into the original place we wanted to fill - // in this call - let _ = ecx.write_immediate(imm, &(*place).into()).unwrap(); - - dump_place(&ecx, (*place).into()); + ) } - _ => { - let mut pointee_place = - create_mplace_from_layout(ecx, ty::ParamEnv::empty().and(*inner_ty)); - debug!(?pointee_place); - fill_place_recursively(ecx, &mut pointee_place, valtree, *inner_ty); + _ => pointee_place.to_ref(&tcx), + }; + debug!(?imm); - dump_place(ecx, pointee_place.into()); - intern_const_alloc_recursive(ecx, InternKind::Constant, &pointee_place) - .unwrap(); - - let imm = pointee_place.mplace.to_ref(&tcx); - debug!(?imm); - - ecx.write_immediate(imm, &(*place).into()).unwrap(); - } - } + ecx.write_immediate(imm, &(*place).into()).unwrap(); } - ty::Tuple(tuple_types) => { + ty::Adt(_, _) | ty::Tuple(_) | ty::Array(_, _) | ty::Str => { let branches = valtree.unwrap_branch(); - assert_eq!(tuple_types.len(), branches.len()); + // Need to downcast place for enums + let (place_adjusted, branches, variant_idx) = match ty.kind() { + ty::Adt(def, _) if def.is_enum() => { + // First element of valtree corresponds to variant + let scalar_int = branches[0].unwrap_leaf(); + let variant_idx = VariantIdx::from_u32(scalar_int.try_to_u32().unwrap()); + let variant = def.variant(variant_idx); + debug!(?variant); + + ( + place.project_downcast(ecx, variant_idx).unwrap(), + &branches[1..], + Some(variant_idx), + ) + } + _ => (*place, branches, None), + }; + debug!(?place_adjusted, ?branches); + + // Create the places for the fields and fill them recursively for (i, inner_valtree) in branches.iter().enumerate() { debug!(?i, ?inner_valtree); - let inner_ty = tuple_types.get(i).expect(&format!( - "expected to be able to index at position {} into {:?}", - i, tuple_types - )); - debug!(?inner_ty); - - // Create the mplace for the tuple element - let mut place_inner = ecx.mplace_field(place, i).unwrap(); + + let mut place_inner = match *ty.kind() { + ty::Adt(def, substs) if !def.is_enum() => { + let field = &def.variant(VariantIdx::from_usize(0)).fields[i]; + let field_ty = field.ty(tcx, substs); + let projection_elem = ProjectionElem::Field(Field::from_usize(i), field_ty); + + ecx.mplace_projection(&place_adjusted, projection_elem).unwrap() + } + ty::Adt(_, _) | ty::Tuple(_) => ecx.mplace_field(&place_adjusted, i).unwrap(), + ty::Array(_, _) | ty::Str => { + ecx.mplace_index(&place_adjusted, i as u64).unwrap() + } + _ => bug!(), + }; debug!(?place_inner); // insert valtree corresponding to tuple element into place - fill_place_recursively(ecx, &mut place_inner, *inner_valtree, *inner_ty); + fill_place_recursively(ecx, &mut place_inner, *inner_valtree); + dump_place(&ecx, place_inner.into()); } - } - ty::Array(inner_ty, _) => { - let inner_valtrees = valtree.unwrap_branch(); - for (i, inner_valtree) in inner_valtrees.iter().enumerate() { - debug!(?i, ?inner_valtree); - let mut place_inner = ecx.mplace_field(place, i).unwrap(); - debug!(?place_inner); + debug!("dump of place_adjusted:"); + dump_place(ecx, place_adjusted.into()); - fill_place_recursively(ecx, &mut place_inner, *inner_valtree, *inner_ty) - } - } - ty::Adt(def, substs) if def.is_enum() => { - debug!("enum, substs: {:?}", substs); - let inner_valtrees = valtree.unwrap_branch(); - - // First element of valtree corresponds to variant - let scalar_int = inner_valtrees[0].unwrap_leaf(); - let variant_idx = VariantIdx::from_u32(scalar_int.try_to_u32().unwrap()); - let variant = def.variant(variant_idx); - debug!(?variant); - - // Need to downcast place - let place_downcast = place.project_downcast(ecx, variant_idx).unwrap(); - debug!(?place_downcast); - - // fill `place_downcast` with the valtree elements corresponding to - // the fields of the enum - let fields = &variant.fields; - let inner_valtrees = &inner_valtrees[1..]; - for (i, field) in fields.iter().enumerate() { - debug!(?i, ?field); - - let field_ty = field.ty(tcx, substs); - debug!(?field_ty); - - let mut field_mplace = ecx.mplace_field(&place_downcast, i).unwrap(); - debug!(?field_mplace); - let inner_valtree = inner_valtrees[i]; - - fill_place_recursively(ecx, &mut field_mplace, inner_valtree, field_ty); - dump_place(&ecx, field_mplace.into()); + if let Some(variant_idx) = variant_idx { + // don't forget filling the place with the discriminant of the enum + ecx.write_discriminant(variant_idx, &(*place).into()).unwrap(); } - debug!("dump of place_downcast"); - dump_place(ecx, place_downcast.into()); - - // don't forget filling the place with the discriminant of the enum - ecx.write_discriminant(variant_idx, &(*place).into()).unwrap(); dump_place(ecx, (*place).into()); } - ty::Adt(def, substs) => { - debug!("Adt def: {:?} with substs: {:?}", def, substs); - let inner_valtrees = valtree.unwrap_branch(); - debug!(?inner_valtrees); - let (fields, inner_valtrees) = - (&def.variant(VariantIdx::from_usize(0)).fields[..], inner_valtrees); - - debug!("fields: {:?}", fields); - - for (i, field) in fields.iter().enumerate() { - let field_ty = field.ty(tcx, substs); - debug!(?field_ty); - let old_field_ty = tcx.type_of(field.did); - debug!(?old_field_ty); - let projection_elem = ProjectionElem::Field(Field::from_usize(i), field_ty); - let mut field_place = ecx.mplace_projection(place, projection_elem).unwrap(); - let inner_valtree = inner_valtrees[i]; - - fill_place_recursively(ecx, &mut field_place, inner_valtree, field_ty); - } - } - _ => {} + _ => bug!("shouldn't have created a ValTree for {:?}", ty), } } fn dump_place<'tcx>(ecx: &CompileTimeEvalContext<'tcx, 'tcx>, place: PlaceTy<'tcx>) { - trace!("{:?}", ecx.dump_place(place.place)); + trace!("{:?}", ecx.dump_place(*place)); } diff --git a/compiler/rustc_const_eval/src/interpret/mod.rs b/compiler/rustc_const_eval/src/interpret/mod.rs index dba746e72e245..69d6c8470a273 100644 --- a/compiler/rustc_const_eval/src/interpret/mod.rs +++ b/compiler/rustc_const_eval/src/interpret/mod.rs @@ -14,7 +14,7 @@ mod terminator; mod traits; mod util; mod validity; -pub(crate) mod visitor; +mod visitor; pub use rustc_middle::mir::interpret::*; // have all the `interpret` symbols in one place: here @@ -27,7 +27,7 @@ pub use self::memory::{AllocCheck, AllocRef, AllocRefMut, FnVal, Memory, MemoryK pub use self::operand::{ImmTy, Immediate, OpTy, Operand}; pub use self::place::{MPlaceTy, MemPlace, MemPlaceMeta, Place, PlaceTy}; pub use self::validity::{CtfeValidationMode, RefTracking}; -pub use self::visitor::{MutValueVisitor, ValueVisitor}; +pub use self::visitor::{MutValueVisitor, Value, ValueVisitor}; crate use self::intrinsics::eval_nullary_intrinsic; use eval_context::{from_known_layout, mir_assign_valid_types}; diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs index 170fbab2cce04..f2d833b320249 100644 --- a/compiler/rustc_const_eval/src/interpret/operand.rs +++ b/compiler/rustc_const_eval/src/interpret/operand.rs @@ -98,7 +98,7 @@ impl<'tcx, Tag: Provenance> Immediate { // as input for binary and cast operations. #[derive(Copy, Clone, Debug)] pub struct ImmTy<'tcx, Tag: Provenance = AllocId> { - pub imm: Immediate, + imm: Immediate, pub layout: TyAndLayout<'tcx>, } @@ -248,7 +248,7 @@ impl<'tcx, Tag: Provenance> ImmTy<'tcx, Tag> { impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { /// Try reading an immediate in memory; this is interesting particularly for `ScalarPair`. /// Returns `None` if the layout does not permit loading this as a value. - pub(crate) fn try_read_immediate_from_mplace( + fn try_read_immediate_from_mplace( &self, mplace: &MPlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx, Option>> { diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs index 8caf9eee2d95f..3bc6494f0088e 100644 --- a/compiler/rustc_const_eval/src/interpret/place.rs +++ b/compiler/rustc_const_eval/src/interpret/place.rs @@ -82,7 +82,7 @@ rustc_data_structures::static_assert_size!(Place, 56); #[derive(Copy, Clone, Debug)] pub struct PlaceTy<'tcx, Tag: Provenance = AllocId> { - pub(crate) place: Place, // Keep this private; it helps enforce invariants. + place: Place, // Keep this private; it helps enforce invariants. pub layout: TyAndLayout<'tcx>, } @@ -100,7 +100,7 @@ impl<'tcx, Tag: Provenance> std::ops::Deref for PlaceTy<'tcx, Tag> { /// A MemPlace with its layout. Constructing it is only possible in this module. #[derive(Copy, Clone, Hash, Eq, PartialEq, Debug)] pub struct MPlaceTy<'tcx, Tag: Provenance = AllocId> { - pub(crate) mplace: MemPlace, + mplace: MemPlace, pub layout: TyAndLayout<'tcx>, } @@ -589,7 +589,6 @@ where } /// Projects into a place. - #[instrument(skip(self), level = "debug")] pub fn place_projection( &mut self, base: &PlaceTy<'tcx, M::PointerTag>, @@ -626,18 +625,15 @@ where &mut self, place: mir::Place<'tcx>, ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> { - debug!("projection: {:?}", place.projection); let mut place_ty = PlaceTy { // This works even for dead/uninitialized locals; we check further when writing place: Place::Local { frame: self.frame_idx(), local: place.local }, layout: self.layout_of_local(self.frame(), place.local, None)?, }; - debug!(?place_ty); for elem in place.projection.iter() { place_ty = self.place_projection(&place_ty, &elem)? } - debug!("place after projections: {:?}", place_ty); trace!("{:?}", self.dump_place(place_ty.place)); // Sanity-check the type we ended up with. @@ -693,7 +689,6 @@ where /// Write an immediate to a place. /// If you use this you are responsible for validating that things got copied at the /// right type. - #[instrument(skip(self), level = "debug")] fn write_immediate_no_validate( &mut self, src: Immediate, @@ -746,7 +741,6 @@ where /// Write an immediate to memory. /// If you use this you are responsible for validating that things got copied at the /// right type. - #[instrument(skip(self), level = "debug")] fn write_immediate_to_mplace_no_validate( &mut self, value: Immediate, @@ -769,7 +763,6 @@ where // cover all the bytes! match value { Immediate::Scalar(scalar) => { - debug!(?scalar); match dest.layout.abi { Abi::Scalar(_) => {} // fine _ => span_bug!( @@ -882,7 +875,6 @@ where // Let us see if the layout is simple so we take a shortcut, avoid force_allocation. let src = match self.try_read_immediate(src)? { Ok(src_val) => { - debug!("immediate from src is {:?}", src_val); assert!(!src.layout.is_unsized(), "cannot have unsized immediates"); // Yay, we got a value that we can write directly. // FIXME: Add a check to make sure that if `src` is indirect, @@ -978,7 +970,6 @@ where ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::PointerTag>, Option)> { let (mplace, size) = match place.place { Place::Local { frame, local } => { - debug!("LocalPlace"); match M::access_local_mut(self, frame, local)? { Ok(&mut local_val) => { // We need to make an allocation. @@ -992,12 +983,9 @@ where let (size, align) = self .size_and_align_of(&meta, &local_layout)? .expect("Cannot allocate for non-dyn-sized type"); - debug!(?size, ?align); let ptr = self.allocate_ptr(size, align, MemoryKind::Stack)?; - debug!("allocated ptr: {:?}", ptr); let mplace = MemPlace { ptr: ptr.into(), align, meta }; if let LocalValue::Live(Operand::Immediate(value)) = local_val { - debug!("LocalValue::Live: immediate value {:?}", value); // Preserve old value. // We don't have to validate as we can assume the local // was already valid for its type. diff --git a/compiler/rustc_middle/src/mir/interpret/allocation.rs b/compiler/rustc_middle/src/mir/interpret/allocation.rs index 5e7e362eeef42..8cfc5ed0a95d0 100644 --- a/compiler/rustc_middle/src/mir/interpret/allocation.rs +++ b/compiler/rustc_middle/src/mir/interpret/allocation.rs @@ -433,7 +433,6 @@ impl Allocation { return self.write_uninit(cx, range); } }; - debug!(?val); // `to_bits_or_ptr_internal` is the right method because we just want to store this data // as-is into memory. @@ -444,16 +443,13 @@ impl Allocation { } Ok(data) => (data, None), }; - debug!(?bytes, ?provenance); let endian = cx.data_layout().endian; let dst = self.get_bytes_mut(cx, range)?; - debug!(?dst); write_target_uint(endian, dst, bytes).unwrap(); // See if we have to also write a relocation. if let Some(provenance) = provenance { - debug!("insert relocation for {:?}", provenance); self.relocations.0.insert(range.start, provenance); } diff --git a/compiler/rustc_middle/src/mir/interpret/pointer.rs b/compiler/rustc_middle/src/mir/interpret/pointer.rs index cb36e5409543d..c71aea417eca0 100644 --- a/compiler/rustc_middle/src/mir/interpret/pointer.rs +++ b/compiler/rustc_middle/src/mir/interpret/pointer.rs @@ -158,7 +158,7 @@ impl Provenance for AllocId { #[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, TyEncodable, TyDecodable, Hash)] #[derive(HashStable)] pub struct Pointer { - pub offset: Size, // FIXME This should probably be private + pub(super) offset: Size, // kept private to avoid accidental misinterpretation (meaning depends on `Tag` type) pub provenance: Tag, } From f7eae4e580d1e4db03800732421288c2ab557f1a Mon Sep 17 00:00:00 2001 From: b-naber Date: Sun, 24 Apr 2022 10:59:21 +0200 Subject: [PATCH 6/9] include valtree creation and valtree -> constvalue conversion in debug assertions check --- .../src/const_eval/eval_queries.rs | 9 +++- .../src/const_eval/valtrees.rs | 52 ++++++++----------- 2 files changed, 30 insertions(+), 31 deletions(-) diff --git a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs index 38fecf7232ebc..52b65c41b4f13 100644 --- a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs +++ b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs @@ -1,4 +1,4 @@ -use super::{CompileTimeEvalContext, CompileTimeInterpreter, ConstEvalErr}; +use super::{const_to_valtree, CompileTimeEvalContext, CompileTimeInterpreter, ConstEvalErr}; use crate::interpret::eval_nullary_intrinsic; use crate::interpret::{ intern_const_alloc_recursive, Allocation, ConstAlloc, ConstValue, CtfeValidationMode, GlobalId, @@ -215,6 +215,13 @@ fn turn_into_const_value<'tcx>( "the `eval_to_const_value_raw` query should not be used for statics, use `eval_to_allocation` instead" ); + if cfg!(debug_assertions) { + if let Some(valtree) = const_to_valtree(tcx, key.param_env, constant) { + let const_val = tcx.valtree_to_const_val((constant.ty, valtree)); + debug!(?const_val); + } + } + // Turn this into a proper constant. let const_val = op_to_const(&ecx, &mplace.into()); debug!(?const_val); diff --git a/compiler/rustc_const_eval/src/const_eval/valtrees.rs b/compiler/rustc_const_eval/src/const_eval/valtrees.rs index 39a3df79a28eb..f35b28e187a65 100644 --- a/compiler/rustc_const_eval/src/const_eval/valtrees.rs +++ b/compiler/rustc_const_eval/src/const_eval/valtrees.rs @@ -5,7 +5,6 @@ use crate::interpret::{ Scalar, ScalarMaybeUninit, }; use rustc_middle::mir::interpret::ConstAlloc; -use rustc_middle::mir::{Field, ProjectionElem}; use rustc_middle::ty::{self, ScalarInt, Ty, TyCtxt}; use rustc_span::source_map::DUMMY_SP; use rustc_target::abi::VariantIdx; @@ -197,45 +196,45 @@ pub fn valtree_to_const_value<'tcx>( let mut ecx = mk_eval_cx(tcx, DUMMY_SP, param_env, false); match ty.kind() { + ty::FnDef(..) => { + assert!(valtree.unwrap_branch().is_empty()); + ConstValue::Scalar(Scalar::ZST) + } ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => match valtree { ty::ValTree::Leaf(scalar_int) => ConstValue::Scalar(Scalar::Int(scalar_int)), ty::ValTree::Branch(_) => bug!( "ValTrees for Bool, Int, Uint, Float or Char should have the form ValTree::Leaf" ), }, - ty::Ref(_, inner_ty, _) => { - // create a place for the pointee - let mut pointee_place = create_pointee_place(&mut ecx, *inner_ty, valtree); - debug!(?pointee_place); - - // insert elements of valtree into `place` - fill_place_recursively(&mut ecx, &mut pointee_place, valtree); - dump_place(&ecx, pointee_place.into()); - intern_const_alloc_recursive(&mut ecx, InternKind::Constant, &pointee_place).unwrap(); - - let ref_place = pointee_place.to_ref(&tcx); - let imm = ImmTy::from_immediate(ref_place, tcx.layout_of(param_env_ty).unwrap()); - - let const_val = op_to_const(&ecx, &imm.into()); - debug!(?const_val); - - const_val - } - ty::Tuple(_) | ty::Array(_, _) | ty::Adt(..) => { - let mut place = create_mplace_from_layout(&mut ecx, ty); + ty::Ref(_, _, _) | ty::Tuple(_) | ty::Array(_, _) | ty::Adt(..) => { + let mut place = match ty.kind() { + ty::Ref(_, inner_ty, _) => { + // Need to create a place for the pointee to fill for Refs + create_pointee_place(&mut ecx, *inner_ty, valtree) + } + _ => create_mplace_from_layout(&mut ecx, ty), + }; debug!(?place); fill_place_recursively(&mut ecx, &mut place, valtree); dump_place(&ecx, place.into()); intern_const_alloc_recursive(&mut ecx, InternKind::Constant, &place).unwrap(); - let const_val = op_to_const(&ecx, &place.into()); + let const_val = match ty.kind() { + ty::Ref(_, _, _) => { + let ref_place = place.to_ref(&tcx); + let imm = + ImmTy::from_immediate(ref_place, tcx.layout_of(param_env_ty).unwrap()); + + op_to_const(&ecx, &imm.into()) + } + _ => op_to_const(&ecx, &place.into()), + }; debug!(?const_val); const_val } ty::Never - | ty::FnDef(..) | ty::Error(_) | ty::Foreign(..) | ty::Infer(ty::FreshIntTy(_)) @@ -331,13 +330,6 @@ fn fill_place_recursively<'tcx>( debug!(?i, ?inner_valtree); let mut place_inner = match *ty.kind() { - ty::Adt(def, substs) if !def.is_enum() => { - let field = &def.variant(VariantIdx::from_usize(0)).fields[i]; - let field_ty = field.ty(tcx, substs); - let projection_elem = ProjectionElem::Field(Field::from_usize(i), field_ty); - - ecx.mplace_projection(&place_adjusted, projection_elem).unwrap() - } ty::Adt(_, _) | ty::Tuple(_) => ecx.mplace_field(&place_adjusted, i).unwrap(), ty::Array(_, _) | ty::Str => { ecx.mplace_index(&place_adjusted, i as u64).unwrap() From 6fc3e630fb98cc6200f60ebbb7c7f826b2d48407 Mon Sep 17 00:00:00 2001 From: b-naber Date: Tue, 26 Apr 2022 10:56:04 +0200 Subject: [PATCH 7/9] add hacky closure to struct_tail_with_normalize in order to allow us to walk valtrees in lockstep with the type --- compiler/rustc_middle/src/ty/sty.rs | 2 +- compiler/rustc_middle/src/ty/util.rs | 14 ++++++++-- .../src/traits/project.rs | 28 +++++++++++-------- 3 files changed, 28 insertions(+), 16 deletions(-) diff --git a/compiler/rustc_middle/src/ty/sty.rs b/compiler/rustc_middle/src/ty/sty.rs index 5a13216846d54..1509de0e93070 100644 --- a/compiler/rustc_middle/src/ty/sty.rs +++ b/compiler/rustc_middle/src/ty/sty.rs @@ -2273,7 +2273,7 @@ impl<'tcx> Ty<'tcx> { tcx: TyCtxt<'tcx>, normalize: impl FnMut(Ty<'tcx>) -> Ty<'tcx>, ) -> (Ty<'tcx>, bool) { - let tail = tcx.struct_tail_with_normalize(self, normalize); + let tail = tcx.struct_tail_with_normalize(self, normalize, || {}); match tail.kind() { // Sized types ty::Infer(ty::IntVar(_) | ty::FloatVar(_)) diff --git a/compiler/rustc_middle/src/ty/util.rs b/compiler/rustc_middle/src/ty/util.rs index 39038e85b11a0..e7cc8b3427051 100644 --- a/compiler/rustc_middle/src/ty/util.rs +++ b/compiler/rustc_middle/src/ty/util.rs @@ -187,7 +187,7 @@ impl<'tcx> TyCtxt<'tcx> { /// if input `ty` is not a structure at all. pub fn struct_tail_without_normalization(self, ty: Ty<'tcx>) -> Ty<'tcx> { let tcx = self; - tcx.struct_tail_with_normalize(ty, |ty| ty) + tcx.struct_tail_with_normalize(ty, |ty| ty, || {}) } /// Returns the deeply last field of nested structures, or the same type if @@ -203,7 +203,7 @@ impl<'tcx> TyCtxt<'tcx> { param_env: ty::ParamEnv<'tcx>, ) -> Ty<'tcx> { let tcx = self; - tcx.struct_tail_with_normalize(ty, |ty| tcx.normalize_erasing_regions(param_env, ty)) + tcx.struct_tail_with_normalize(ty, |ty| tcx.normalize_erasing_regions(param_env, ty), || {}) } /// Returns the deeply last field of nested structures, or the same type if @@ -220,6 +220,10 @@ impl<'tcx> TyCtxt<'tcx> { self, mut ty: Ty<'tcx>, mut normalize: impl FnMut(Ty<'tcx>) -> Ty<'tcx>, + // This is a hack that is currently used to allow us to walk a ValTree + // in lockstep with the type in order to get the ValTree branch that + // corresponds to an unsized field. + mut f: impl FnMut() -> (), ) -> Ty<'tcx> { let recursion_limit = self.recursion_limit(); for iteration in 0.. { @@ -235,12 +239,16 @@ impl<'tcx> TyCtxt<'tcx> { break; } match def.non_enum_variant().fields.last() { - Some(f) => ty = f.ty(self, substs), + Some(field) => { + f(); + ty = field.ty(self, substs); + } None => break, } } ty::Tuple(tys) if let Some((&last_ty, _)) = tys.split_last() => { + f(); ty = last_ty; } diff --git a/compiler/rustc_trait_selection/src/traits/project.rs b/compiler/rustc_trait_selection/src/traits/project.rs index 8ba390c71db97..c7a61cbe25a1a 100644 --- a/compiler/rustc_trait_selection/src/traits/project.rs +++ b/compiler/rustc_trait_selection/src/traits/project.rs @@ -1519,18 +1519,22 @@ fn assemble_candidates_from_impls<'cx, 'tcx>( // Any type with multiple potential metadata types is therefore not eligible. let self_ty = selcx.infcx().shallow_resolve(obligation.predicate.self_ty()); - let tail = selcx.tcx().struct_tail_with_normalize(self_ty, |ty| { - // We throw away any obligations we get from this, since we normalize - // and confirm these obligations once again during confirmation - normalize_with_depth( - selcx, - obligation.param_env, - obligation.cause.clone(), - obligation.recursion_depth + 1, - ty, - ) - .value - }); + let tail = selcx.tcx().struct_tail_with_normalize( + self_ty, + |ty| { + // We throw away any obligations we get from this, since we normalize + // and confirm these obligations once again during confirmation + normalize_with_depth( + selcx, + obligation.param_env, + obligation.cause.clone(), + obligation.recursion_depth + 1, + ty, + ) + .value + }, + || {}, + ); match tail.kind() { ty::Bool From bfefb4d74cbd1520afaa97db1eba59c06283c696 Mon Sep 17 00:00:00 2001 From: b-naber Date: Tue, 26 Apr 2022 10:58:45 +0200 Subject: [PATCH 8/9] account for custom DSTs in valtree -> constvalue conversion --- .../src/const_eval/eval_queries.rs | 8 +- .../src/const_eval/valtrees.rs | 118 +++++++++++++++++- .../rustc_const_eval/src/interpret/place.rs | 6 + 3 files changed, 123 insertions(+), 9 deletions(-) diff --git a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs index 52b65c41b4f13..6112c4ffbfa6d 100644 --- a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs +++ b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs @@ -215,6 +215,10 @@ fn turn_into_const_value<'tcx>( "the `eval_to_const_value_raw` query should not be used for statics, use `eval_to_allocation` instead" ); + // Turn this into a proper constant. + let const_val = op_to_const(&ecx, &mplace.into()); + debug!(?const_val); + if cfg!(debug_assertions) { if let Some(valtree) = const_to_valtree(tcx, key.param_env, constant) { let const_val = tcx.valtree_to_const_val((constant.ty, valtree)); @@ -222,10 +226,6 @@ fn turn_into_const_value<'tcx>( } } - // Turn this into a proper constant. - let const_val = op_to_const(&ecx, &mplace.into()); - debug!(?const_val); - const_val } diff --git a/compiler/rustc_const_eval/src/const_eval/valtrees.rs b/compiler/rustc_const_eval/src/const_eval/valtrees.rs index f35b28e187a65..389301300ea15 100644 --- a/compiler/rustc_const_eval/src/const_eval/valtrees.rs +++ b/compiler/rustc_const_eval/src/const_eval/valtrees.rs @@ -1,13 +1,13 @@ use super::eval_queries::{mk_eval_cx, op_to_const}; use super::machine::CompileTimeEvalContext; use crate::interpret::{ - intern_const_alloc_recursive, ConstValue, ImmTy, Immediate, InternKind, MemoryKind, PlaceTy, - Scalar, ScalarMaybeUninit, + intern_const_alloc_recursive, ConstValue, ImmTy, Immediate, InternKind, MemPlaceMeta, + MemoryKind, PlaceTy, Scalar, ScalarMaybeUninit, }; use rustc_middle::mir::interpret::ConstAlloc; use rustc_middle::ty::{self, ScalarInt, Ty, TyCtxt}; use rustc_span::source_map::DUMMY_SP; -use rustc_target::abi::VariantIdx; +use rustc_target::abi::{Align, VariantIdx}; use crate::interpret::MPlaceTy; use crate::interpret::Value; @@ -108,7 +108,9 @@ fn const_to_valtree_inner<'tcx>( ty::Tuple(substs) => branches(ecx, place, substs.len(), None), ty::Adt(def, _) => { - if def.variants().is_empty() { + if def.is_union() { + return None + } else if def.variants().is_empty() { bug!("uninhabited types should have errored and never gotten converted to valtree") } @@ -149,6 +151,41 @@ fn create_mplace_from_layout<'tcx>( ecx.allocate(layout, MemoryKind::Stack).unwrap() } +// Walks custom DSTs and gets the type of the unsized field and the number of elements +// in the unsized field. +fn get_info_on_unsized_field<'tcx>( + ty: Ty<'tcx>, + valtree: ty::ValTree<'tcx>, + tcx: TyCtxt<'tcx>, +) -> (Ty<'tcx>, usize) { + let mut last_valtree = valtree; + let tail = tcx.struct_tail_with_normalize( + ty, + |ty| ty, + || { + let branches = last_valtree.unwrap_branch(); + last_valtree = branches[branches.len() - 1]; + debug!(?branches, ?last_valtree); + }, + ); + let unsized_inner_ty = match tail.kind() { + ty::Slice(t) => *t, + ty::Str => tail, + _ => bug!("expected Slice or Str"), + }; + + // Have to adjust type for ty::Str + let unsized_inner_ty = match unsized_inner_ty.kind() { + ty::Str => tcx.mk_ty(ty::Uint(ty::UintTy::U8)), + _ => unsized_inner_ty, + }; + + // Get the number of elements in the unsized field + let num_elems = last_valtree.unwrap_branch().len(); + + (unsized_inner_ty, num_elems) +} + #[instrument(skip(ecx), level = "debug")] fn create_pointee_place<'tcx>( ecx: &mut CompileTimeEvalContext<'tcx, 'tcx>, @@ -173,6 +210,33 @@ fn create_pointee_place<'tcx>( place } + ty::Adt(_, _) if !ty.is_sized(ecx.tcx, ty::ParamEnv::empty()) => { + // We need to create `Allocation`s for custom DSTs + + let layout = tcx.layout_of(ty::ParamEnv::empty().and(ty)).unwrap(); + let sized_fields_size = layout.layout.size(); + let (unsized_inner_ty, num_elems) = get_info_on_unsized_field(ty, valtree, tcx); + let unsized_inner_ty_size = + tcx.layout_of(ty::ParamEnv::empty().and(unsized_inner_ty)).unwrap().layout.size(); + debug!(?unsized_inner_ty, ?unsized_inner_ty_size, ?num_elems); + + // Get the size of the array behind the DST + let dst_size = unsized_inner_ty_size.checked_mul(num_elems as u64, &tcx).unwrap(); + + let ptr = ecx + .allocate_ptr( + sized_fields_size.checked_add(dst_size, &tcx).unwrap(), + Align::from_bytes(1).unwrap(), + MemoryKind::Stack, + ) + .unwrap(); + debug!(?ptr); + + let place = MPlaceTy::from_aligned_ptr(ptr.into(), layout); + debug!(?place); + + place + } _ => create_mplace_from_layout(ecx, ty), } } @@ -270,6 +334,13 @@ fn fill_place_recursively<'tcx>( let ty = place.layout.ty; match ty.kind() { + ty::FnDef(_, _) => { + ecx.write_immediate( + Immediate::Scalar(ScalarMaybeUninit::Scalar(Scalar::ZST)), + &(*place).into(), + ) + .unwrap(); + } ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => { let scalar_int = valtree.unwrap_leaf(); debug!("writing trivial valtree {:?} to place {:?}", scalar_int, place); @@ -306,6 +377,9 @@ fn fill_place_recursively<'tcx>( ty::Adt(_, _) | ty::Tuple(_) | ty::Array(_, _) | ty::Str => { let branches = valtree.unwrap_branch(); + // Need to collect the length of the unsized field for meta info + let mut unsized_meta_info = None; + // Need to downcast place for enums let (place_adjusted, branches, variant_idx) = match ty.kind() { ty::Adt(def, _) if def.is_enum() => { @@ -329,6 +403,35 @@ fn fill_place_recursively<'tcx>( for (i, inner_valtree) in branches.iter().enumerate() { debug!(?i, ?inner_valtree); + if !ty.is_sized(ecx.tcx, ty::ParamEnv::empty()) && i == branches.len() - 1 { + // Note: For custom DSTs we need to manually process the last unsized field. + // We created a `Pointer` for the `Allocation` of the complete sized version of + // the Adt in `create_pointee_place` and now we fill that `Allocation` with the + // values in the ValTree. For the unsized field we have to additionally add the meta + // data. + + let offset = place.layout.fields.offset(i); + let (unsized_inner_ty, num_elems) = get_info_on_unsized_field(ty, valtree, tcx); + unsized_meta_info = Some(num_elems); + + // We create an array type to allow the recursive call to fill the place + // corresponding to the array + let arr_ty = tcx.mk_array(unsized_inner_ty, num_elems as u64); + debug!(?arr_ty); + let arr_layout = tcx.layout_of(ty::ParamEnv::empty().and(arr_ty)).unwrap(); + let mut place_arr = + place.offset(offset, MemPlaceMeta::None, arr_layout, &tcx).unwrap(); + debug!(?place_arr); + + fill_place_recursively(ecx, &mut place_arr, *inner_valtree); + dump_place(&ecx, place_arr.into()); + + // Add the meta information for the unsized type + place_arr.meta = MemPlaceMeta::Meta(Scalar::from_u64(num_elems as u64)); + + break; + } + let mut place_inner = match *ty.kind() { ty::Adt(_, _) | ty::Tuple(_) => ecx.mplace_field(&place_adjusted, i).unwrap(), ty::Array(_, _) | ty::Str => { @@ -338,7 +441,6 @@ fn fill_place_recursively<'tcx>( }; debug!(?place_inner); - // insert valtree corresponding to tuple element into place fill_place_recursively(ecx, &mut place_inner, *inner_valtree); dump_place(&ecx, place_inner.into()); } @@ -351,6 +453,12 @@ fn fill_place_recursively<'tcx>( ecx.write_discriminant(variant_idx, &(*place).into()).unwrap(); } + // add meta information for unsized type + if !ty.is_sized(ecx.tcx, ty::ParamEnv::empty()) { + place.meta = + MemPlaceMeta::Meta(Scalar::from_u64(unsized_meta_info.unwrap() as u64)); + } + dump_place(ecx, (*place).into()); } _ => bug!("shouldn't have created a ValTree for {:?}", ty), diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs index 3bc6494f0088e..380eb5263618b 100644 --- a/compiler/rustc_const_eval/src/interpret/place.rs +++ b/compiler/rustc_const_eval/src/interpret/place.rs @@ -115,6 +115,12 @@ impl<'tcx, Tag: Provenance> std::ops::Deref for MPlaceTy<'tcx, Tag> { } } +impl<'tcx, Tag: Provenance> std::ops::DerefMut for MPlaceTy<'tcx, Tag> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.mplace + } +} + impl<'tcx, Tag: Provenance> From> for PlaceTy<'tcx, Tag> { #[inline(always)] fn from(mplace: MPlaceTy<'tcx, Tag>) -> Self { From ef5f07256cfa4e1f43a18acb45e0d8108f8471a4 Mon Sep 17 00:00:00 2001 From: b-naber Date: Wed, 27 Apr 2022 11:11:54 +0200 Subject: [PATCH 9/9] combine all unsized types and add another recursive call to process nested unsized types correctly --- .../src/const_eval/eval_queries.rs | 9 +- .../src/const_eval/valtrees.rs | 165 ++++++++---------- compiler/rustc_middle/src/ty/util.rs | 2 +- 3 files changed, 79 insertions(+), 97 deletions(-) diff --git a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs index 6112c4ffbfa6d..38fecf7232ebc 100644 --- a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs +++ b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs @@ -1,4 +1,4 @@ -use super::{const_to_valtree, CompileTimeEvalContext, CompileTimeInterpreter, ConstEvalErr}; +use super::{CompileTimeEvalContext, CompileTimeInterpreter, ConstEvalErr}; use crate::interpret::eval_nullary_intrinsic; use crate::interpret::{ intern_const_alloc_recursive, Allocation, ConstAlloc, ConstValue, CtfeValidationMode, GlobalId, @@ -219,13 +219,6 @@ fn turn_into_const_value<'tcx>( let const_val = op_to_const(&ecx, &mplace.into()); debug!(?const_val); - if cfg!(debug_assertions) { - if let Some(valtree) = const_to_valtree(tcx, key.param_env, constant) { - let const_val = tcx.valtree_to_const_val((constant.ty, valtree)); - debug!(?const_val); - } - } - const_val } diff --git a/compiler/rustc_const_eval/src/const_eval/valtrees.rs b/compiler/rustc_const_eval/src/const_eval/valtrees.rs index 389301300ea15..374179d0cc24d 100644 --- a/compiler/rustc_const_eval/src/const_eval/valtrees.rs +++ b/compiler/rustc_const_eval/src/const_eval/valtrees.rs @@ -194,55 +194,48 @@ fn create_pointee_place<'tcx>( ) -> MPlaceTy<'tcx> { let tcx = ecx.tcx.tcx; - match ty.kind() { - ty::Slice(_) | ty::Str => { - let slice_ty = match ty.kind() { - ty::Slice(slice_ty) => *slice_ty, - ty::Str => tcx.mk_ty(ty::Uint(ty::UintTy::U8)), - _ => bug!("expected ty::Slice | ty::Str"), - }; - - // Create a place for the underlying array - let len = valtree.unwrap_branch().len() as u64; - let arr_ty = tcx.mk_array(slice_ty, len as u64); - let place = create_mplace_from_layout(ecx, arr_ty); - debug!(?place); + if !ty.is_sized(ecx.tcx, ty::ParamEnv::empty()) { + // We need to create `Allocation`s for custom DSTs + + let (unsized_inner_ty, num_elems) = get_info_on_unsized_field(ty, valtree, tcx); + let unsized_inner_ty = match unsized_inner_ty.kind() { + ty::Str => tcx.mk_ty(ty::Uint(ty::UintTy::U8)), + _ => unsized_inner_ty, + }; + let unsized_inner_ty_size = + tcx.layout_of(ty::ParamEnv::empty().and(unsized_inner_ty)).unwrap().layout.size(); + debug!(?unsized_inner_ty, ?unsized_inner_ty_size, ?num_elems); + + // for custom DSTs only the last field/element is unsized, but we need to also allocate + // space for the other fields/elements + let layout = tcx.layout_of(ty::ParamEnv::empty().and(ty)).unwrap(); + let size_of_sized_part = layout.layout.size(); + + // Get the size of the memory behind the DST + let dst_size = unsized_inner_ty_size.checked_mul(num_elems as u64, &tcx).unwrap(); + + let ptr = ecx + .allocate_ptr( + size_of_sized_part.checked_add(dst_size, &tcx).unwrap(), + Align::from_bytes(1).unwrap(), + MemoryKind::Stack, + ) + .unwrap(); + debug!(?ptr); - place - } - ty::Adt(_, _) if !ty.is_sized(ecx.tcx, ty::ParamEnv::empty()) => { - // We need to create `Allocation`s for custom DSTs - - let layout = tcx.layout_of(ty::ParamEnv::empty().and(ty)).unwrap(); - let sized_fields_size = layout.layout.size(); - let (unsized_inner_ty, num_elems) = get_info_on_unsized_field(ty, valtree, tcx); - let unsized_inner_ty_size = - tcx.layout_of(ty::ParamEnv::empty().and(unsized_inner_ty)).unwrap().layout.size(); - debug!(?unsized_inner_ty, ?unsized_inner_ty_size, ?num_elems); - - // Get the size of the array behind the DST - let dst_size = unsized_inner_ty_size.checked_mul(num_elems as u64, &tcx).unwrap(); - - let ptr = ecx - .allocate_ptr( - sized_fields_size.checked_add(dst_size, &tcx).unwrap(), - Align::from_bytes(1).unwrap(), - MemoryKind::Stack, - ) - .unwrap(); - debug!(?ptr); - - let place = MPlaceTy::from_aligned_ptr(ptr.into(), layout); - debug!(?place); + let mut place = MPlaceTy::from_aligned_ptr(ptr.into(), layout); + place.meta = MemPlaceMeta::Meta(Scalar::from_u64(num_elems as u64)); + debug!(?place); - place - } - _ => create_mplace_from_layout(ecx, ty), + place + } else { + create_mplace_from_layout(ecx, ty) } } /// Converts a `ValTree` to a `ConstValue`, which is needed after mir /// construction has finished. +// FIXME Merge `valtree_to_const_value` and `fill_place_recursively` into one function #[instrument(skip(tcx), level = "debug")] pub fn valtree_to_const_value<'tcx>( tcx: TyCtxt<'tcx>, @@ -374,12 +367,9 @@ fn fill_place_recursively<'tcx>( ecx.write_immediate(imm, &(*place).into()).unwrap(); } - ty::Adt(_, _) | ty::Tuple(_) | ty::Array(_, _) | ty::Str => { + ty::Adt(_, _) | ty::Tuple(_) | ty::Array(_, _) | ty::Str | ty::Slice(_) => { let branches = valtree.unwrap_branch(); - // Need to collect the length of the unsized field for meta info - let mut unsized_meta_info = None; - // Need to downcast place for enums let (place_adjusted, branches, variant_idx) = match ty.kind() { ty::Adt(def, _) if def.is_enum() => { @@ -399,48 +389,52 @@ fn fill_place_recursively<'tcx>( }; debug!(?place_adjusted, ?branches); - // Create the places for the fields and fill them recursively + // Create the places (by indexing into `place`) for the fields and fill + // them recursively for (i, inner_valtree) in branches.iter().enumerate() { debug!(?i, ?inner_valtree); - if !ty.is_sized(ecx.tcx, ty::ParamEnv::empty()) && i == branches.len() - 1 { - // Note: For custom DSTs we need to manually process the last unsized field. - // We created a `Pointer` for the `Allocation` of the complete sized version of - // the Adt in `create_pointee_place` and now we fill that `Allocation` with the - // values in the ValTree. For the unsized field we have to additionally add the meta - // data. - - let offset = place.layout.fields.offset(i); - let (unsized_inner_ty, num_elems) = get_info_on_unsized_field(ty, valtree, tcx); - unsized_meta_info = Some(num_elems); - - // We create an array type to allow the recursive call to fill the place - // corresponding to the array - let arr_ty = tcx.mk_array(unsized_inner_ty, num_elems as u64); - debug!(?arr_ty); - let arr_layout = tcx.layout_of(ty::ParamEnv::empty().and(arr_ty)).unwrap(); - let mut place_arr = - place.offset(offset, MemPlaceMeta::None, arr_layout, &tcx).unwrap(); - debug!(?place_arr); - - fill_place_recursively(ecx, &mut place_arr, *inner_valtree); - dump_place(&ecx, place_arr.into()); - - // Add the meta information for the unsized type - place_arr.meta = MemPlaceMeta::Meta(Scalar::from_u64(num_elems as u64)); - - break; - } - - let mut place_inner = match *ty.kind() { - ty::Adt(_, _) | ty::Tuple(_) => ecx.mplace_field(&place_adjusted, i).unwrap(), - ty::Array(_, _) | ty::Str => { - ecx.mplace_index(&place_adjusted, i as u64).unwrap() + let mut place_inner = match ty.kind() { + ty::Str | ty::Slice(_) => ecx.mplace_index(&place, i as u64).unwrap(), + _ if !ty.is_sized(ecx.tcx, ty::ParamEnv::empty()) + && i == branches.len() - 1 => + { + // Note: For custom DSTs we need to manually process the last unsized field. + // We created a `Pointer` for the `Allocation` of the complete sized version of + // the Adt in `create_pointee_place` and now we fill that `Allocation` with the + // values in the ValTree. For the unsized field we have to additionally add the meta + // data. + + let (unsized_inner_ty, num_elems) = + get_info_on_unsized_field(ty, valtree, tcx); + debug!(?unsized_inner_ty); + + let inner_ty = match ty.kind() { + ty::Adt(def, substs) => { + def.variant(VariantIdx::from_u32(0)).fields[i].ty(tcx, substs) + } + ty::Tuple(inner_tys) => inner_tys[i], + _ => bug!("unexpected unsized type {:?}", ty), + }; + + let inner_layout = + tcx.layout_of(ty::ParamEnv::empty().and(inner_ty)).unwrap(); + debug!(?inner_layout); + + let offset = place_adjusted.layout.fields.offset(i); + place + .offset( + offset, + MemPlaceMeta::Meta(Scalar::from_u64(num_elems as u64)), + inner_layout, + &tcx, + ) + .unwrap() } - _ => bug!(), + _ => ecx.mplace_field(&place_adjusted, i).unwrap(), }; - debug!(?place_inner); + debug!(?place_inner); fill_place_recursively(ecx, &mut place_inner, *inner_valtree); dump_place(&ecx, place_inner.into()); } @@ -453,12 +447,7 @@ fn fill_place_recursively<'tcx>( ecx.write_discriminant(variant_idx, &(*place).into()).unwrap(); } - // add meta information for unsized type - if !ty.is_sized(ecx.tcx, ty::ParamEnv::empty()) { - place.meta = - MemPlaceMeta::Meta(Scalar::from_u64(unsized_meta_info.unwrap() as u64)); - } - + debug!("dump of place after writing discriminant:"); dump_place(ecx, (*place).into()); } _ => bug!("shouldn't have created a ValTree for {:?}", ty), diff --git a/compiler/rustc_middle/src/ty/util.rs b/compiler/rustc_middle/src/ty/util.rs index e7cc8b3427051..918fe49e8e3fc 100644 --- a/compiler/rustc_middle/src/ty/util.rs +++ b/compiler/rustc_middle/src/ty/util.rs @@ -220,7 +220,7 @@ impl<'tcx> TyCtxt<'tcx> { self, mut ty: Ty<'tcx>, mut normalize: impl FnMut(Ty<'tcx>) -> Ty<'tcx>, - // This is a hack that is currently used to allow us to walk a ValTree + // This is currently used to allow us to walk a ValTree // in lockstep with the type in order to get the ValTree branch that // corresponds to an unsized field. mut f: impl FnMut() -> (),