diff --git a/compiler/rustc_abi/src/lib.rs b/compiler/rustc_abi/src/lib.rs index ef0c763ac2038..d396f18d59c23 100644 --- a/compiler/rustc_abi/src/lib.rs +++ b/compiler/rustc_abi/src/lib.rs @@ -49,6 +49,14 @@ bitflags! { } } +/// Which niches (beyond the `null` niche) are available on references. +#[derive(Default, Copy, Clone, Hash, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))] +pub struct ReferenceNichePolicy { + pub size: bool, + pub align: bool, +} + #[derive(Copy, Clone, Debug, Eq, PartialEq)] #[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))] pub enum IntegerType { @@ -346,6 +354,33 @@ impl TargetDataLayout { } } + #[inline] + pub fn target_usize_max(&self) -> u64 { + self.pointer_size.unsigned_int_max().try_into().unwrap() + } + + #[inline] + pub fn target_isize_min(&self) -> i64 { + self.pointer_size.signed_int_min().try_into().unwrap() + } + + #[inline] + pub fn target_isize_max(&self) -> i64 { + self.pointer_size.signed_int_max().try_into().unwrap() + } + + /// Returns the (inclusive) range of possible addresses for an allocation with + /// the given size and alignment. + /// + /// Note that this doesn't take into account target-specific limitations. + #[inline] + pub fn address_range_for(&self, size: Size, align: Align) -> (u64, u64) { + let end = Size::from_bytes(self.target_usize_max()); + let min = align.bytes(); + let max = (end - size).align_down_to(align).bytes(); + (min, max) + } + #[inline] pub fn vector_align(&self, vec_size: Size) -> AbiAndPrefAlign { for &(size, align) in &self.vector_align { @@ -473,6 +508,12 @@ impl Size { Size::from_bytes((self.bytes() + mask) & !mask) } + #[inline] + pub fn align_down_to(self, align: Align) -> Size { + let mask = align.bytes() - 1; + Size::from_bytes(self.bytes() & !mask) + } + #[inline] pub fn is_aligned(self, align: Align) -> bool { let mask = align.bytes() - 1; @@ -967,6 +1008,43 @@ impl WrappingRange { } } + /// Returns `true` if `range` is contained in `self`. + #[inline(always)] + pub fn contains_range + Ord>(&self, range: RangeInclusive) -> bool { + if range.is_empty() { + return true; + } + + let (vmin, vmax) = range.into_inner(); + let (vmin, vmax) = (vmin.into(), vmax.into()); + + if self.start <= self.end { + self.start <= vmin && vmax <= self.end + } else { + // The last check is needed to cover the following case: + // `vmin ... start, end ... vmax`. In this special case there is no gap + // between `start` and `end` so we must return true. + self.start <= vmin || vmax <= self.end || self.start == self.end + 1 + } + } + + /// Returns `true` if `range` has an overlap with `self`. + #[inline(always)] + pub fn overlaps_range + Ord>(&self, range: RangeInclusive) -> bool { + if range.is_empty() { + return false; + } + + let (vmin, vmax) = range.into_inner(); + let (vmin, vmax) = (vmin.into(), vmax.into()); + + if self.start <= self.end { + self.start <= vmax && vmin <= self.end + } else { + self.start <= vmax || vmin <= self.end + } + } + /// Returns `self` with replaced `start` #[inline(always)] pub fn with_start(mut self, start: u128) -> Self { @@ -984,9 +1062,15 @@ impl WrappingRange { /// Returns `true` if `size` completely fills the range. #[inline] pub fn is_full_for(&self, size: Size) -> bool { + debug_assert!(self.is_in_range_for(size)); + self.start == (self.end.wrapping_add(1) & size.unsigned_int_max()) + } + + /// Returns `true` if the range is valid for `size`. + #[inline(always)] + pub fn is_in_range_for(&self, size: Size) -> bool { let max_value = size.unsigned_int_max(); - debug_assert!(self.start <= max_value && self.end <= max_value); - self.start == (self.end.wrapping_add(1) & max_value) + self.start <= max_value && self.end <= max_value } } @@ -1427,16 +1511,21 @@ impl Niche { pub fn reserve(&self, cx: &C, count: u128) -> Option<(u128, Scalar)> { assert!(count > 0); + if count > self.available(cx) { + return None; + } let Self { value, valid_range: v, .. } = *self; - let size = value.size(cx); - assert!(size.bits() <= 128); - let max_value = size.unsigned_int_max(); + let max_value = value.size(cx).unsigned_int_max(); + let distance_end_zero = max_value - v.end; - let niche = v.end.wrapping_add(1)..v.start; - let available = niche.end.wrapping_sub(niche.start) & max_value; - if count > available { - return None; + // Null-pointer optimization. This is guaranteed by Rust (at least for `Option<_>`), + // and offers better codegen opportunities. + if count == 1 && matches!(value, Pointer(_)) && !v.contains(0) { + // Select which bound to move to minimize the number of lost niches. + let valid_range = + if v.start - 1 > distance_end_zero { v.with_end(0) } else { v.with_start(0) }; + return Some((0, Scalar::Initialized { value, valid_range })); } // Extend the range of valid values being reserved by moving either `v.start` or `v.end` bound. @@ -1459,7 +1548,6 @@ impl Niche { let end = v.end.wrapping_add(count) & max_value; Some((start, Scalar::Initialized { value, valid_range: v.with_end(end) })) }; - let distance_end_zero = max_value - v.end; if v.start > v.end { // zero is unavailable because wrapping occurs move_end(v) diff --git a/compiler/rustc_codegen_gcc/src/type_of.rs b/compiler/rustc_codegen_gcc/src/type_of.rs index 84d5783851273..a30bce0a313cb 100644 --- a/compiler/rustc_codegen_gcc/src/type_of.rs +++ b/compiler/rustc_codegen_gcc/src/type_of.rs @@ -339,7 +339,8 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { return pointee; } - let result = Ty::ty_and_layout_pointee_info_at(*self, cx, offset); + let assume_valid_ptr = true; + let result = Ty::ty_and_layout_pointee_info_at(*self, cx, offset, assume_valid_ptr); cx.pointee_infos.borrow_mut().insert((self.ty, offset), result); result diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs index 2dbd467cc84c3..29dd53ff763a1 100644 --- a/compiler/rustc_codegen_llvm/src/type_of.rs +++ b/compiler/rustc_codegen_llvm/src/type_of.rs @@ -411,8 +411,8 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { if let Some(&pointee) = cx.pointee_infos.borrow().get(&(self.ty, offset)) { return pointee; } - - let result = Ty::ty_and_layout_pointee_info_at(*self, cx, offset); + let assume_valid_ptr = true; + let result = Ty::ty_and_layout_pointee_info_at(*self, cx, offset, assume_valid_ptr); cx.pointee_infos.borrow_mut().insert((self.ty, offset), result); result diff --git a/compiler/rustc_const_eval/messages.ftl b/compiler/rustc_const_eval/messages.ftl index d8eade5bd2a0e..8833f55831cad 100644 --- a/compiler/rustc_const_eval/messages.ftl +++ b/compiler/rustc_const_eval/messages.ftl @@ -244,7 +244,6 @@ const_eval_not_enough_caller_args = const_eval_null_box = {$front_matter}: encountered a null box const_eval_null_fn_ptr = {$front_matter}: encountered a null function pointer const_eval_null_ref = {$front_matter}: encountered a null reference -const_eval_nullable_ptr_out_of_range = {$front_matter}: encountered a potentially null pointer, but expected something that cannot possibly fail to be {$in_range} const_eval_nullary_intrinsic_fail = could not evaluate nullary intrinsic diff --git a/compiler/rustc_const_eval/src/const_eval/machine.rs b/compiler/rustc_const_eval/src/const_eval/machine.rs index 267795a6cb4ab..0a9a47b283793 100644 --- a/compiler/rustc_const_eval/src/const_eval/machine.rs +++ b/compiler/rustc_const_eval/src/const_eval/machine.rs @@ -1,7 +1,6 @@ use rustc_hir::def::DefKind; use rustc_hir::{LangItem, CRATE_HIR_ID}; use rustc_middle::mir; -use rustc_middle::mir::interpret::PointerArithmetic; use rustc_middle::ty::layout::{FnAbiOf, TyAndLayout}; use rustc_middle::ty::{self, Ty, TyCtxt}; use rustc_session::lint::builtin::INVALID_ALIGNMENT; @@ -17,7 +16,7 @@ use rustc_ast::Mutability; use rustc_hir::def_id::DefId; use rustc_middle::mir::AssertMessage; use rustc_span::symbol::{sym, Symbol}; -use rustc_target::abi::{Align, Size}; +use rustc_target::abi::{Align, HasDataLayout as _, Size}; use rustc_target::spec::abi::Abi as CallAbi; use crate::errors::{LongRunning, LongRunningWarn}; @@ -304,8 +303,8 @@ impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> { Ok(ControlFlow::Break(())) } else { // Not alignable in const, return `usize::MAX`. - let usize_max = Scalar::from_target_usize(self.target_usize_max(), self); - self.write_scalar(usize_max, dest)?; + let usize_max = self.data_layout().target_usize_max(); + self.write_scalar(Scalar::from_target_usize(usize_max, self), dest)?; self.return_to_block(ret)?; Ok(ControlFlow::Break(())) } @@ -333,7 +332,7 @@ impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> { // Inequality with integers other than null can never be known for sure. (Scalar::Int(int), ptr @ Scalar::Ptr(..)) | (ptr @ Scalar::Ptr(..), Scalar::Int(int)) - if int.is_null() && !self.scalar_may_be_null(ptr)? => + if int.is_null() && !self.ptr_scalar_range(ptr)?.contains(&0) => { 0 } diff --git a/compiler/rustc_const_eval/src/errors.rs b/compiler/rustc_const_eval/src/errors.rs index ca38cce710e60..61ce695ccd296 100644 --- a/compiler/rustc_const_eval/src/errors.rs +++ b/compiler/rustc_const_eval/src/errors.rs @@ -617,7 +617,6 @@ impl<'tcx> ReportErrorExt for ValidationErrorInfo<'tcx> { MutableRefInConst => const_eval_mutable_ref_in_const, NullFnPtr => const_eval_null_fn_ptr, NeverVal => const_eval_never_val, - NullablePtrOutOfRange { .. } => const_eval_nullable_ptr_out_of_range, PtrOutOfRange { .. } => const_eval_ptr_out_of_range, OutOfRange { .. } => const_eval_out_of_range, UnsafeCell => const_eval_unsafe_cell, @@ -732,9 +731,7 @@ impl<'tcx> ReportErrorExt for ValidationErrorInfo<'tcx> { | InvalidFnPtr { value } => { err.set_arg("value", value); } - NullablePtrOutOfRange { range, max_value } | PtrOutOfRange { range, max_value } => { - add_range_arg(range, max_value, handler, err) - } + PtrOutOfRange { range, max_value } => add_range_arg(range, max_value, handler, err), OutOfRange { range, max_value, value } => { err.set_arg("value", value); add_range_arg(range, max_value, handler, err); diff --git a/compiler/rustc_const_eval/src/interpret/discriminant.rs b/compiler/rustc_const_eval/src/interpret/discriminant.rs index f23a455c2ca30..99ea0ab18bc97 100644 --- a/compiler/rustc_const_eval/src/interpret/discriminant.rs +++ b/compiler/rustc_const_eval/src/interpret/discriminant.rs @@ -2,8 +2,7 @@ use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt}; use rustc_middle::{mir, ty}; -use rustc_target::abi::{self, TagEncoding}; -use rustc_target::abi::{VariantIdx, Variants}; +use rustc_target::abi::{self, TagEncoding, VariantIdx, Variants, WrappingRange}; use super::{ImmTy, InterpCx, InterpResult, Machine, OpTy, PlaceTy, Scalar}; @@ -180,19 +179,24 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // discriminant (encoded in niche/tag) and variant index are the same. let variants_start = niche_variants.start().as_u32(); let variants_end = niche_variants.end().as_u32(); + let variants_len = u128::from(variants_end - variants_start); let variant = match tag_val.try_to_int() { Err(dbg_val) => { // So this is a pointer then, and casting to an int failed. // Can only happen during CTFE. - // The niche must be just 0, and the ptr not null, then we know this is - // okay. Everything else, we conservatively reject. - let ptr_valid = niche_start == 0 - && variants_start == variants_end - && !self.scalar_may_be_null(tag_val)?; - if !ptr_valid { + // The pointer and niches ranges must be disjoint, then we know + // this is the untagged variant (as the value is not in the niche). + // Everything else, we conservatively reject. + let range = self.ptr_scalar_range(tag_val)?; + let niches = WrappingRange { + start: niche_start, + end: niche_start.wrapping_add(variants_len), + }; + if niches.overlaps_range(range) { throw_ub!(InvalidTag(dbg_val)) + } else { + untagged_variant } - untagged_variant } Ok(tag_bits) => { let tag_bits = tag_bits.assert_bits(tag_layout.size); @@ -205,7 +209,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let variant_index_relative = variant_index_relative_val.to_scalar().assert_bits(tag_val.layout.size); // Check if this is in the range that indicates an actual discriminant. - if variant_index_relative <= u128::from(variants_end - variants_start) { + if variant_index_relative <= variants_len { let variant_index_relative = u32::try_from(variant_index_relative) .expect("we checked that this fits into a u32"); // Then computing the absolute variant idx should not overflow any more. diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs index 04cae23f852a1..8ec9a71bf3a96 100644 --- a/compiler/rustc_const_eval/src/interpret/intrinsics.rs +++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs @@ -5,9 +5,7 @@ use rustc_hir::def_id::DefId; use rustc_middle::mir::{ self, - interpret::{ - Allocation, ConstAllocation, ConstValue, GlobalId, InterpResult, PointerArithmetic, Scalar, - }, + interpret::{Allocation, ConstAllocation, ConstValue, GlobalId, InterpResult, Scalar}, BinOp, NonDivergingIntrinsic, }; use rustc_middle::ty; @@ -15,7 +13,7 @@ use rustc_middle::ty::layout::{LayoutOf as _, ValidityRequirement}; use rustc_middle::ty::GenericArgsRef; use rustc_middle::ty::{Ty, TyCtxt}; use rustc_span::symbol::{sym, Symbol}; -use rustc_target::abi::{Abi, Align, Primitive, Size}; +use rustc_target::abi::{Abi, Align, HasDataLayout as _, Primitive, Size}; use super::{ util::ensure_monomorphic_enough, CheckInAllocMsg, ImmTy, InterpCx, Machine, OpTy, PlaceTy, @@ -361,11 +359,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { )?; // Perform division by size to compute return value. + let dl = self.data_layout(); let ret_layout = if intrinsic_name == sym::ptr_offset_from_unsigned { - assert!(0 <= dist && dist <= self.target_isize_max()); + assert!(0 <= dist && dist <= dl.target_isize_max()); usize_layout } else { - assert!(self.target_isize_min() <= dist && dist <= self.target_isize_max()); + assert!(dl.target_isize_min() <= dist && dist <= dl.target_isize_max()); isize_layout }; let pointee_layout = self.layout_of(instance_args.type_at(0))?; diff --git a/compiler/rustc_const_eval/src/interpret/memory.rs b/compiler/rustc_const_eval/src/interpret/memory.rs index 7b44a20ef03da..29fc5ffcfe7ca 100644 --- a/compiler/rustc_const_eval/src/interpret/memory.rs +++ b/compiler/rustc_const_eval/src/interpret/memory.rs @@ -10,6 +10,7 @@ use std::assert_matches::assert_matches; use std::borrow::Cow; use std::collections::VecDeque; use std::fmt; +use std::ops::RangeInclusive; use std::ptr; use rustc_ast::Mutability; @@ -1222,24 +1223,34 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { /// Machine pointer introspection. impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { - /// Test if this value might be null. + /// Turn a pointer-sized scalar into a (non-empty) range of possible values. /// If the machine does not support ptr-to-int casts, this is conservative. - pub fn scalar_may_be_null(&self, scalar: Scalar) -> InterpResult<'tcx, bool> { - Ok(match scalar.try_to_int() { - Ok(int) => int.is_null(), - Err(_) => { - // Can only happen during CTFE. - let ptr = scalar.to_pointer(self)?; - match self.ptr_try_get_alloc_id(ptr) { - Ok((alloc_id, offset, _)) => { - let (size, _align, _kind) = self.get_alloc_info(alloc_id); - // If the pointer is out-of-bounds, it may be null. - // Note that one-past-the-end (offset == size) is still inbounds, and never null. - offset > size - } - Err(_offset) => bug!("a non-int scalar is always a pointer"), + pub fn ptr_scalar_range( + &self, + scalar: Scalar, + ) -> InterpResult<'tcx, RangeInclusive> { + if let Ok(int) = scalar.to_target_usize(self) { + return Ok(int..=int); + } + + let ptr = scalar.to_pointer(self)?; + + // Can only happen during CTFE. + Ok(match self.ptr_try_get_alloc_id(ptr) { + Ok((alloc_id, offset, _)) => { + let offset = offset.bytes(); + let (size, align, _) = self.get_alloc_info(alloc_id); + let dl = self.data_layout(); + if offset > size.bytes() { + // If the pointer is out-of-bounds, we do not have a + // meaningful range to return. + 0..=dl.target_usize_max() + } else { + let (min, max) = dl.address_range_for(size, align); + (min + offset)..=(max + offset) } } + Err(_offset) => bug!("a non-int scalar is always a pointer"), }) } diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs index 21c655988a0e1..108394d224bb5 100644 --- a/compiler/rustc_const_eval/src/interpret/validity.rs +++ b/compiler/rustc_const_eval/src/interpret/validity.rs @@ -19,9 +19,7 @@ use rustc_middle::mir::interpret::{ use rustc_middle::ty; use rustc_middle::ty::layout::{LayoutOf, TyAndLayout}; use rustc_span::symbol::{sym, Symbol}; -use rustc_target::abi::{ - Abi, FieldIdx, Scalar as ScalarAbi, Size, VariantIdx, Variants, WrappingRange, -}; +use rustc_target::abi::{Abi, FieldIdx, Scalar as ScalarAbi, Size, VariantIdx, Variants}; use std::hash::Hash; @@ -554,7 +552,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, ' // FIXME: Check if the signature matches } else { // Otherwise (for standalone Miri), we have to still check it to be non-null. - if self.ecx.scalar_may_be_null(value)? { + if self.ecx.ptr_scalar_range(value)?.contains(&0) { throw_validation_failure!(self.path, NullFnPtr); } } @@ -595,46 +593,36 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, ' ) -> InterpResult<'tcx> { let size = scalar_layout.size(self.ecx); let valid_range = scalar_layout.valid_range(self.ecx); - let WrappingRange { start, end } = valid_range; let max_value = size.unsigned_int_max(); - assert!(end <= max_value); - let bits = match scalar.try_to_int() { - Ok(int) => int.assert_bits(size), + assert!(valid_range.end <= max_value); + match scalar.try_to_int() { + Ok(int) => { + // We have an explicit int: check it against the valid range. + let bits = int.assert_bits(size); + if valid_range.contains(bits) { + Ok(()) + } else { + throw_validation_failure!( + self.path, + OutOfRange { value: format!("{bits}"), range: valid_range, max_value } + ) + } + } Err(_) => { // So this is a pointer then, and casting to an int failed. // Can only happen during CTFE. - // We support 2 kinds of ranges here: full range, and excluding zero. - if start == 1 && end == max_value { - // Only null is the niche. So make sure the ptr is NOT null. - if self.ecx.scalar_may_be_null(scalar)? { - throw_validation_failure!( - self.path, - NullablePtrOutOfRange { range: valid_range, max_value } - ) - } else { - return Ok(()); - } - } else if scalar_layout.is_always_valid(self.ecx) { - // Easy. (This is reachable if `enforce_number_validity` is set.) - return Ok(()); + // We check if the possible addresses are compatible with the valid range. + let range = self.ecx.ptr_scalar_range(scalar)?; + if valid_range.contains_range(range) { + Ok(()) } else { - // Conservatively, we reject, because the pointer *could* have a bad - // value. + // Reject conservatively, because the pointer *could* have a bad value. throw_validation_failure!( self.path, PtrOutOfRange { range: valid_range, max_value } ) } } - }; - // Now compare. - if valid_range.contains(bits) { - Ok(()) - } else { - throw_validation_failure!( - self.path, - OutOfRange { value: format!("{bits}"), range: valid_range, max_value } - ) } } } diff --git a/compiler/rustc_interface/src/tests.rs b/compiler/rustc_interface/src/tests.rs index 12124f14a821f..aedc662b06790 100644 --- a/compiler/rustc_interface/src/tests.rs +++ b/compiler/rustc_interface/src/tests.rs @@ -28,6 +28,7 @@ use rustc_span::edition::{Edition, DEFAULT_EDITION}; use rustc_span::symbol::sym; use rustc_span::FileName; use rustc_span::SourceFileHashAlgorithm; +use rustc_target::abi::ReferenceNichePolicy; use rustc_target::spec::{CodeModel, LinkerFlavorCli, MergeFunctions, PanicStrategy, RelocModel}; use rustc_target::spec::{RelroLevel, SanitizerSet, SplitDebuginfo, StackProtector, TlsModel}; @@ -820,6 +821,7 @@ fn test_unstable_options_tracking_hash() { tracked!(profile_emit, Some(PathBuf::from("abc"))); tracked!(profile_sample_use, Some(PathBuf::from("abc"))); tracked!(profiler_runtime, "abc".to_string()); + tracked!(reference_niches, Some(ReferenceNichePolicy { size: true, align: false })); tracked!(relax_elf_relocations, Some(true)); tracked!(relro_level, Some(RelroLevel::Full)); tracked!(remap_cwd_prefix, Some(PathBuf::from("abc"))); diff --git a/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs b/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs index a8815ee0908d6..77c33336dff63 100644 --- a/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs +++ b/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs @@ -301,6 +301,7 @@ provide! { tcx, def_id, other, cdata, is_profiler_runtime => { cdata.root.profiler_runtime } required_panic_strategy => { cdata.root.required_panic_strategy } panic_in_drop_strategy => { cdata.root.panic_in_drop_strategy } + reference_niches_policy => { cdata.root.reference_niches_policy } extern_crate => { let r = *cdata.extern_crate.lock(); r.map(|c| &*tcx.arena.alloc(c)) diff --git a/compiler/rustc_metadata/src/rmeta/encoder.rs b/compiler/rustc_metadata/src/rmeta/encoder.rs index ac86110f2bdb3..46571e7796d02 100644 --- a/compiler/rustc_metadata/src/rmeta/encoder.rs +++ b/compiler/rustc_metadata/src/rmeta/encoder.rs @@ -673,6 +673,7 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> { stable_crate_id: tcx.def_path_hash(LOCAL_CRATE.as_def_id()).stable_crate_id(), required_panic_strategy: tcx.required_panic_strategy(LOCAL_CRATE), panic_in_drop_strategy: tcx.sess.opts.unstable_opts.panic_in_drop, + reference_niches_policy: tcx.reference_niches_policy(LOCAL_CRATE), edition: tcx.sess.edition(), has_global_allocator: tcx.has_global_allocator(LOCAL_CRATE), has_alloc_error_handler: tcx.has_alloc_error_handler(LOCAL_CRATE), diff --git a/compiler/rustc_metadata/src/rmeta/mod.rs b/compiler/rustc_metadata/src/rmeta/mod.rs index 0bc16fc64ff78..8bc2e0aa5a9ed 100644 --- a/compiler/rustc_metadata/src/rmeta/mod.rs +++ b/compiler/rustc_metadata/src/rmeta/mod.rs @@ -32,7 +32,7 @@ use rustc_span::edition::Edition; use rustc_span::hygiene::{ExpnIndex, MacroKind}; use rustc_span::symbol::{Ident, Symbol}; use rustc_span::{self, ExpnData, ExpnHash, ExpnId, Span}; -use rustc_target::abi::{FieldIdx, VariantIdx}; +use rustc_target::abi::{FieldIdx, ReferenceNichePolicy, VariantIdx}; use rustc_target::spec::{PanicStrategy, TargetTriple}; use std::marker::PhantomData; @@ -251,6 +251,7 @@ pub(crate) struct CrateRoot { stable_crate_id: StableCrateId, required_panic_strategy: Option, panic_in_drop_strategy: PanicStrategy, + reference_niches_policy: ReferenceNichePolicy, edition: Edition, has_global_allocator: bool, has_alloc_error_handler: bool, diff --git a/compiler/rustc_middle/src/mir/interpret/error.rs b/compiler/rustc_middle/src/mir/interpret/error.rs index 372452ea29a8d..1bcef17d73b06 100644 --- a/compiler/rustc_middle/src/mir/interpret/error.rs +++ b/compiler/rustc_middle/src/mir/interpret/error.rs @@ -388,7 +388,6 @@ pub enum ValidationErrorKind<'tcx> { MutableRefInConst, NullFnPtr, NeverVal, - NullablePtrOutOfRange { range: WrappingRange, max_value: u128 }, PtrOutOfRange { range: WrappingRange, max_value: u128 }, OutOfRange { value: String, range: WrappingRange, max_value: u128 }, UnsafeCell, diff --git a/compiler/rustc_middle/src/mir/interpret/pointer.rs b/compiler/rustc_middle/src/mir/interpret/pointer.rs index 65d04919357f3..c8133bcc387a2 100644 --- a/compiler/rustc_middle/src/mir/interpret/pointer.rs +++ b/compiler/rustc_middle/src/mir/interpret/pointer.rs @@ -19,33 +19,19 @@ pub trait PointerArithmetic: HasDataLayout { #[inline(always)] fn max_size_of_val(&self) -> Size { - Size::from_bytes(self.target_isize_max()) - } - - #[inline] - fn target_usize_max(&self) -> u64 { - self.pointer_size().unsigned_int_max().try_into().unwrap() - } - - #[inline] - fn target_isize_min(&self) -> i64 { - self.pointer_size().signed_int_min().try_into().unwrap() - } - - #[inline] - fn target_isize_max(&self) -> i64 { - self.pointer_size().signed_int_max().try_into().unwrap() + Size::from_bytes(self.data_layout().target_isize_max()) } #[inline] fn target_usize_to_isize(&self, val: u64) -> i64 { + let dl = self.data_layout(); let val = val as i64; // Now wrap-around into the machine_isize range. - if val > self.target_isize_max() { + if val > dl.target_isize_max() { // This can only happen if the ptr size is < 64, so we know max_usize_plus_1 fits into // i64. - debug_assert!(self.pointer_size().bits() < 64); - let max_usize_plus_1 = 1u128 << self.pointer_size().bits(); + debug_assert!(dl.pointer_size.bits() < 64); + let max_usize_plus_1 = 1u128 << dl.pointer_size.bits(); val - i64::try_from(max_usize_plus_1).unwrap() } else { val @@ -58,7 +44,7 @@ pub trait PointerArithmetic: HasDataLayout { #[inline] fn truncate_to_ptr(&self, (val, over): (u64, bool)) -> (u64, bool) { let val = u128::from(val); - let max_ptr_plus_1 = 1u128 << self.pointer_size().bits(); + let max_ptr_plus_1 = 1u128 << self.data_layout().pointer_size.bits(); (u64::try_from(val % max_ptr_plus_1).unwrap(), over || val >= max_ptr_plus_1) } @@ -76,11 +62,11 @@ pub trait PointerArithmetic: HasDataLayout { let n = i.unsigned_abs(); if i >= 0 { let (val, over) = self.overflowing_offset(val, n); - (val, over || i > self.target_isize_max()) + (val, over || i > self.data_layout().target_isize_max()) } else { let res = val.overflowing_sub(n); let (val, over) = self.truncate_to_ptr(res); - (val, over || i < self.target_isize_min()) + (val, over || i < self.data_layout().target_isize_min()) } } diff --git a/compiler/rustc_middle/src/query/erase.rs b/compiler/rustc_middle/src/query/erase.rs index 2c481745d987a..9bf022670053f 100644 --- a/compiler/rustc_middle/src/query/erase.rs +++ b/compiler/rustc_middle/src/query/erase.rs @@ -111,6 +111,11 @@ impl EraseType >()]; } +impl EraseType for Result, &ty::layout::LayoutError<'_>> { + type Result = + [u8; size_of::, &ty::layout::LayoutError<'_>>>()]; +} + impl EraseType for Result, mir::interpret::LitToConstError> { type Result = [u8; size_of::, mir::interpret::LitToConstError>>()]; } @@ -291,6 +296,7 @@ trivial! { rustc_span::Symbol, rustc_span::symbol::Ident, rustc_target::spec::PanicStrategy, + rustc_target::abi::ReferenceNichePolicy, rustc_type_ir::Variance, u32, usize, diff --git a/compiler/rustc_middle/src/query/mod.rs b/compiler/rustc_middle/src/query/mod.rs index b36f0df78f129..b5b00b7b640fc 100644 --- a/compiler/rustc_middle/src/query/mod.rs +++ b/compiler/rustc_middle/src/query/mod.rs @@ -1394,6 +1394,18 @@ rustc_queries! { desc { "computing layout of `{}`", key.value } } + /// Computes the naive layout approximation of a type. Note that this implicitly + /// executes in "reveal all" mode, and will normalize the input type. + /// + /// Unlike `layout_of`, this doesn't look past references (beyond the `Pointee::Metadata` + /// projection), and as such can be called on generic types like `Option<&T>`. + query naive_layout_of( + key: ty::ParamEnvAnd<'tcx, Ty<'tcx>> + ) -> Result, &'tcx ty::layout::LayoutError<'tcx>> { + depth_limit + desc { "computing layout (naive) of `{}`", key.value } + } + /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers. /// /// NB: this doesn't handle virtual calls - those should use `fn_abi_of_instance` @@ -1469,6 +1481,11 @@ rustc_queries! { desc { "getting a crate's configured panic-in-drop strategy" } separate_provide_extern } + query reference_niches_policy(_: CrateNum) -> abi::ReferenceNichePolicy { + fatal_cycle + desc { "getting a crate's policy for size and alignment niches of references" } + separate_provide_extern + } query is_no_builtins(_: CrateNum) -> bool { fatal_cycle desc { "getting whether a crate has `#![no_builtins]`" } diff --git a/compiler/rustc_middle/src/ty/layout.rs b/compiler/rustc_middle/src/ty/layout.rs index 62805d1e8b5c9..26137e86fa0df 100644 --- a/compiler/rustc_middle/src/ty/layout.rs +++ b/compiler/rustc_middle/src/ty/layout.rs @@ -313,7 +313,16 @@ impl<'tcx> SizeSkeleton<'tcx> { ) -> Result, &'tcx LayoutError<'tcx>> { debug_assert!(!ty.has_non_region_infer()); - // First try computing a static layout. + // First, try computing an exact naive layout (this covers simple types with generic + // references, where a full static layout would fail). + if let Ok(layout) = tcx.naive_layout_of(param_env.and(ty)) { + if layout.exact { + return Ok(SizeSkeleton::Known(layout.size)); + } + } + + // Second, try computing a full static layout (this covers cases when the naive layout + // wasn't smart enough, but cannot deal with generic references). let err = match tcx.layout_of(param_env.and(ty)) { Ok(layout) => { return Ok(SizeSkeleton::Known(layout.size)); @@ -327,6 +336,7 @@ impl<'tcx> SizeSkeleton<'tcx> { ) => return Err(e), }; + // Third, fall back to ad-hoc cases. match *ty.kind() { ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => { let non_zero = !ty.is_unsafe_ptr(); @@ -621,6 +631,219 @@ impl MaybeResult for Result { pub type TyAndLayout<'tcx> = rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>; +#[derive(Copy, Clone, Debug, HashStable)] +pub struct TyAndNaiveLayout<'tcx> { + pub ty: Ty<'tcx>, + pub layout: NaiveLayout, +} + +impl std::ops::Deref for TyAndNaiveLayout<'_> { + type Target = NaiveLayout; + fn deref(&self) -> &Self::Target { + &self.layout + } +} + +impl std::ops::DerefMut for TyAndNaiveLayout<'_> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.layout + } +} + +/// Extremely simplified approximation of a type's layout returned by the +/// `naive_layout_of` query. +#[derive(Copy, Clone, Debug, HashStable)] +pub struct NaiveLayout { + pub abi: NaiveAbi, + /// Niche information, required for tracking non-null enum optimizations. + pub niches: NaiveNiches, + /// An underestimate of the layout's size. + pub size: Size, + /// An underestimate of the layout's required alignment. + pub align: Align, + /// If `true`, `size` and `align` must be exact values. + pub exact: bool, +} + +#[derive(Copy, Clone, Debug, Eq, PartialEq, HashStable)] +pub enum NaiveNiches { + None, + Some, + Maybe, +} + +#[derive(Copy, Clone, Debug, Eq, PartialEq, HashStable)] +pub enum NaiveAbi { + /// A scalar layout, always implies `exact` and a non-zero `size`. + Scalar(Primitive), + /// An uninhabited layout. (needed to properly track `Scalar` and niches) + Uninhabited, + /// An unsized aggregate. (needed to properly track `Scalar` and niches) + Unsized, + /// Any other sized layout. + Sized, +} + +impl NaiveAbi { + #[inline] + pub fn as_aggregate(self) -> Self { + match self { + NaiveAbi::Scalar(_) => NaiveAbi::Sized, + _ => self, + } + } +} + +impl NaiveLayout { + /// The layout of an empty aggregate, e.g. `()`. + pub const EMPTY: Self = Self { + size: Size::ZERO, + align: Align::ONE, + exact: true, + abi: NaiveAbi::Sized, + niches: NaiveNiches::None, + }; + + /// Returns whether `self` is a valid approximation of the given full `layout`. + /// + /// This should always return `true` when both layouts are computed from the same type. + pub fn is_refined_by(&self, layout: Layout<'_>) -> bool { + if self.size > layout.size() || self.align > layout.align().abi { + return false; + } + + if let NaiveAbi::Scalar(prim) = self.abi { + if !self.exact + || self.size == Size::ZERO + || !matches!(layout.abi(), Abi::Scalar(s) if s.primitive() == prim) + { + return false; + } + } + + match (self.niches, layout.largest_niche()) { + (NaiveNiches::None, Some(_)) => return false, + (NaiveNiches::Some, None) => return false, + _ => (), + } + + !self.exact || (self.size, self.align) == (layout.size(), layout.align().abi) + } + + /// Returns if this layout is known to be pointer-like (`None` if uncertain) + /// + /// See the corresponding `Layout::is_pointer_like` method. + pub fn is_pointer_like(&self, dl: &TargetDataLayout) -> Option { + match self.abi { + NaiveAbi::Scalar(_) => { + assert!(self.exact); + Some(self.size == dl.pointer_size && self.align == dl.pointer_align.abi) + } + NaiveAbi::Uninhabited | NaiveAbi::Unsized => Some(false), + NaiveAbi::Sized if self.exact => Some(false), + NaiveAbi::Sized => None, + } + } + + /// Artificially lowers the alignment of this layout. + #[must_use] + #[inline] + pub fn packed(mut self, align: Align) -> Self { + if self.align > align { + self.align = align; + self.abi = self.abi.as_aggregate(); + } + self + } + + /// Artificially raises the alignment of this layout. + #[must_use] + #[inline] + pub fn align_to(mut self, align: Align) -> Self { + if align > self.align { + self.align = align; + self.abi = self.abi.as_aggregate(); + } + self + } + + /// Artificially makes this layout inexact. + #[must_use] + #[inline] + pub fn inexact(mut self) -> Self { + self.abi = self.abi.as_aggregate(); + self.exact = false; + self + } + + /// Pads this layout so that its size is a multiple of `align`. + #[must_use] + #[inline] + pub fn pad_to_align(mut self, align: Align) -> Self { + let new_size = self.size.align_to(align); + if new_size > self.size { + self.abi = self.abi.as_aggregate(); + self.size = new_size; + } + self + } + + /// Returns the layout of `self` immediately followed by `other`, without any + /// padding between them, as in a packed `struct` or tuple. + #[must_use] + #[inline] + pub fn concat(&self, other: &Self, dl: &TargetDataLayout) -> Option { + use NaiveAbi::*; + + let size = self.size.checked_add(other.size, dl)?; + let align = cmp::max(self.align, other.align); + let exact = self.exact && other.exact; + let abi = match (self.abi, other.abi) { + // The uninhabited and unsized ABIs override everything. + (Uninhabited, _) | (_, Uninhabited) => Uninhabited, + (Unsized, _) | (_, Unsized) => Unsized, + // A scalar struct must have a single non ZST-field. + (_, s @ Scalar(_)) if exact && self.size == Size::ZERO => s, + (s @ Scalar(_), _) if exact && other.size == Size::ZERO => s, + // Default case. + (_, _) => Sized, + }; + let niches = match (self.niches, other.niches) { + (NaiveNiches::Some, _) | (_, NaiveNiches::Some) => NaiveNiches::Some, + (NaiveNiches::None, NaiveNiches::None) => NaiveNiches::None, + (_, _) => NaiveNiches::Maybe, + }; + Some(Self { abi, size, align, exact, niches }) + } + + /// Returns the layout of `self` superposed with `other`, as in an `enum` + /// or an `union`. + /// + /// Note: This always ignore niche information from `other`. + #[must_use] + #[inline] + pub fn union(&self, other: &Self) -> Self { + use NaiveAbi::*; + + let size = cmp::max(self.size, other.size); + let align = cmp::max(self.align, other.align); + let exact = self.exact && other.exact; + let abi = match (self.abi, other.abi) { + // The unsized ABI overrides everything. + (Unsized, _) | (_, Unsized) => Unsized, + // A scalar union must have a single non ZST-field... + (_, s @ Scalar(_)) if exact && self.size == Size::ZERO => s, + (s @ Scalar(_), _) if exact && other.size == Size::ZERO => s, + // ...or identical scalar fields. + (Scalar(s1), Scalar(s2)) if s1 == s2 => Scalar(s1), + // Default cases. + (Uninhabited, Uninhabited) => Uninhabited, + (_, _) => Sized, + }; + Self { abi, size, align, exact, niches: self.niches } + } +} + /// Trait for contexts that want to be able to compute layouts of types. /// This automatically gives access to `LayoutOf`, through a blanket `impl`. pub trait LayoutOfHelpers<'tcx>: HasDataLayout + HasTyCtxt<'tcx> + HasParamEnv<'tcx> { @@ -673,6 +896,19 @@ pub trait LayoutOf<'tcx>: LayoutOfHelpers<'tcx> { .map_err(|err| self.handle_layout_err(*err, span, ty)), ) } + + /// Computes the naive layout estimate of a type. Note that this implicitly + /// executes in "reveal all" mode, and will normalize the input type. + /// + /// Unlike `layout_of`, this doesn't look past references (beyond the `Pointee::Metadata` + /// projection), and as such can be called on generic types like `Option<&T>`. + #[inline] + fn naive_layout_of( + &self, + ty: Ty<'tcx>, + ) -> Result, &'tcx LayoutError<'tcx>> { + self.tcx().naive_layout_of(self.param_env().and(ty)) + } } impl<'tcx, C: LayoutOfHelpers<'tcx>> LayoutOf<'tcx> for C {} @@ -969,6 +1205,9 @@ where this: TyAndLayout<'tcx>, cx: &C, offset: Size, + // If true, assume that pointers are either null or valid (according to their type), + // enabling extra optimizations. + mut assume_valid_ptr: bool, ) -> Option { let tcx = cx.tcx(); let param_env = cx.param_env(); @@ -991,19 +1230,19 @@ where // Freeze/Unpin queries, and can save time in the codegen backend (noalias // attributes in LLVM have compile-time cost even in unoptimized builds). let optimize = tcx.sess.opts.optimize != OptLevel::No; - let kind = match mt { - hir::Mutability::Not => PointerKind::SharedRef { + let safe = match (assume_valid_ptr, mt) { + (true, hir::Mutability::Not) => Some(PointerKind::SharedRef { frozen: optimize && ty.is_freeze(tcx, cx.param_env()), - }, - hir::Mutability::Mut => PointerKind::MutableRef { + }), + (true, hir::Mutability::Mut) => Some(PointerKind::MutableRef { unpin: optimize && ty.is_unpin(tcx, cx.param_env()), - }, + }), + (false, _) => None, }; - tcx.layout_of(param_env.and(ty)).ok().map(|layout| PointeeInfo { size: layout.size, align: layout.align.abi, - safe: Some(kind), + safe, }) } @@ -1012,19 +1251,21 @@ where // Within the discriminant field, only the niche itself is // always initialized, so we only check for a pointer at its // offset. - // - // If the niche is a pointer, it's either valid (according - // to its type), or null (which the niche field's scalar - // validity range encodes). This allows using - // `dereferenceable_or_null` for e.g., `Option<&T>`, and - // this will continue to work as long as we don't start - // using more niches than just null (e.g., the first page of - // the address space, or unaligned pointers). Variants::Multiple { - tag_encoding: TagEncoding::Niche { untagged_variant, .. }, + tag_encoding: + TagEncoding::Niche { + untagged_variant, + niche_variants: ref variants, + niche_start, + }, tag_field, .. } if this.fields.offset(tag_field) == offset => { + // We can only continue assuming pointer validity if the only possible + // discriminant value is null. The null special-case is permitted by LLVM's + // `dereferenceable_or_null`, and allow types like `Option<&T>` to benefit + // from optimizations. + assume_valid_ptr &= niche_start == 0 && variants.start() == variants.end(); Some(this.for_variant(cx, untagged_variant)) } _ => Some(this), @@ -1050,9 +1291,12 @@ where result = field.to_result().ok().and_then(|field| { if ptr_end <= field_start + field.size { // We found the right field, look inside it. - let field_info = - field.pointee_info_at(cx, offset - field_start); - field_info + Self::ty_and_layout_pointee_info_at( + field, + cx, + offset - field_start, + assume_valid_ptr, + ) } else { None } @@ -1067,7 +1311,7 @@ where // FIXME(eddyb) This should be for `ptr::Unique`, not `Box`. if let Some(ref mut pointee) = result { if let ty::Adt(def, _) = this.ty.kind() { - if def.is_box() && offset.bytes() == 0 { + if assume_valid_ptr && def.is_box() && offset.bytes() == 0 { let optimize = tcx.sess.opts.optimize != OptLevel::No; pointee.safe = Some(PointerKind::Box { unpin: optimize && this.ty.boxed_ty().is_unpin(tcx, cx.param_env()), diff --git a/compiler/rustc_query_system/src/query/job.rs b/compiler/rustc_query_system/src/query/job.rs index d2140161f1d9f..a53d1fcc69ef8 100644 --- a/compiler/rustc_query_system/src/query/job.rs +++ b/compiler/rustc_query_system/src/query/job.rs @@ -176,7 +176,8 @@ impl QueryJobId { while let Some(id) = current_id { let info = query_map.get(&id).unwrap(); // FIXME: This string comparison should probably not be done. - if format!("{:?}", info.query.dep_kind) == "layout_of" { + let query_name = format!("{:?}", info.query.dep_kind); + if query_name == "layout_of" || query_name == "naive_layout_of" { depth += 1; last_layout = Some((info.clone(), depth)); } diff --git a/compiler/rustc_session/src/config.rs b/compiler/rustc_session/src/config.rs index a8147ede970c3..1766e97b67d01 100644 --- a/compiler/rustc_session/src/config.rs +++ b/compiler/rustc_session/src/config.rs @@ -3117,6 +3117,7 @@ pub(crate) mod dep_tracking { use rustc_feature::UnstableFeatures; use rustc_span::edition::Edition; use rustc_span::RealFileName; + use rustc_target::abi::ReferenceNichePolicy; use rustc_target::spec::{CodeModel, MergeFunctions, PanicStrategy, RelocModel}; use rustc_target::spec::{ RelroLevel, SanitizerSet, SplitDebuginfo, StackProtector, TargetTriple, TlsModel, @@ -3212,6 +3213,7 @@ pub(crate) mod dep_tracking { OomStrategy, LanguageIdentifier, TraitSolver, + ReferenceNichePolicy, ); impl DepTrackingHash for (T1, T2) diff --git a/compiler/rustc_session/src/options.rs b/compiler/rustc_session/src/options.rs index 39efe9abeecdb..0c66121c72f89 100644 --- a/compiler/rustc_session/src/options.rs +++ b/compiler/rustc_session/src/options.rs @@ -6,6 +6,7 @@ use crate::{lint, EarlyErrorHandler}; use rustc_data_structures::profiling::TimePassesFormat; use rustc_errors::ColorConfig; use rustc_errors::{LanguageIdentifier, TerminalUrl}; +use rustc_target::abi::ReferenceNichePolicy; use rustc_target::spec::{CodeModel, LinkerFlavorCli, MergeFunctions, PanicStrategy, SanitizerSet}; use rustc_target::spec::{ RelocModel, RelroLevel, SplitDebuginfo, StackProtector, TargetTriple, TlsModel, @@ -421,6 +422,8 @@ mod desc { pub const parse_proc_macro_execution_strategy: &str = "one of supported execution strategies (`same-thread`, or `cross-thread`)"; pub const parse_dump_solver_proof_tree: &str = "one of: `always`, `on-request`, `on-error`"; + pub const parse_opt_reference_niches: &str = + "`null`, or a `,` separated combination of `size` or `align`"; } mod parse { @@ -1253,6 +1256,31 @@ mod parse { }; true } + + pub(crate) fn parse_opt_reference_niches( + slot: &mut Option, + v: Option<&str>, + ) -> bool { + let Some(s) = v else { + return false; + }; + + let slot = slot.get_or_insert_default(); + + if s == "null" { + return true; + } + + for opt in s.split(",") { + match opt { + "size" => slot.size = true, + "align" => slot.align = true, + _ => return false, + } + } + + true + } } options! { @@ -1701,6 +1729,8 @@ options! { "enable queries of the dependency graph for regression testing (default: no)"), randomize_layout: bool = (false, parse_bool, [TRACKED], "randomize the layout of types (default: no)"), + reference_niches: Option = (None, parse_opt_reference_niches, [TRACKED], + "override the set of discriminant niches that may be exposed by references"), relax_elf_relocations: Option = (None, parse_opt_bool, [TRACKED], "whether ELF relocations can be relaxed"), relro_level: Option = (None, parse_relro_level, [TRACKED], diff --git a/compiler/rustc_target/src/abi/mod.rs b/compiler/rustc_target/src/abi/mod.rs index 589cd3cf96b3e..752f10a74ac1c 100644 --- a/compiler/rustc_target/src/abi/mod.rs +++ b/compiler/rustc_target/src/abi/mod.rs @@ -50,6 +50,9 @@ pub trait TyAbiInterface<'a, C>: Sized { this: TyAndLayout<'a, Self>, cx: &C, offset: Size, + // If true, assume that pointers are either null or valid (according to their type), + // enabling extra optimizations. + assume_valid_ptr: bool, ) -> Option; fn is_adt(this: TyAndLayout<'a, Self>) -> bool; fn is_never(this: TyAndLayout<'a, Self>) -> bool; @@ -76,7 +79,8 @@ impl<'a, Ty> TyAndLayout<'a, Ty> { where Ty: TyAbiInterface<'a, C>, { - Ty::ty_and_layout_pointee_info_at(self, cx, offset) + let assume_valid_ptr = true; + Ty::ty_and_layout_pointee_info_at(self, cx, offset, assume_valid_ptr) } pub fn is_single_fp_element(self, cx: &C) -> bool diff --git a/compiler/rustc_trait_selection/src/solve/trait_goals.rs b/compiler/rustc_trait_selection/src/solve/trait_goals.rs index 930e62d638831..761f5327f6db1 100644 --- a/compiler/rustc_trait_selection/src/solve/trait_goals.rs +++ b/compiler/rustc_trait_selection/src/solve/trait_goals.rs @@ -223,9 +223,20 @@ impl<'tcx> assembly::GoalKind<'tcx> for TraitPredicate<'tcx> { return ecx.evaluate_added_goals_and_make_canonical_response(Certainty::AMBIGUOUS); } - if let Ok(layout) = tcx.layout_of(key) - && layout.layout.is_pointer_like(&tcx.data_layout) - { + // First, try computing an exact naive layout in case the type is generic. + let is_pointer_like = if let Ok(layout) = tcx.naive_layout_of(key) { + layout.is_pointer_like(&tcx.data_layout).unwrap_or_else(|| { + // Second, we fall back to full layout computation. + tcx.layout_of(key) + .ok() + .filter(|l| l.layout.is_pointer_like(&tcx.data_layout)) + .is_some() + }) + } else { + false + }; + + if is_pointer_like { // FIXME: We could make this faster by making a no-constraints response ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes) } else { diff --git a/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs b/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs index aa195d70a9f6d..f1d870269a60a 100644 --- a/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs +++ b/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs @@ -979,9 +979,20 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { return; } - if let Ok(layout) = tcx.layout_of(key) - && layout.layout.is_pointer_like(&tcx.data_layout) - { + // First, try computing an exact naive layout in case the type is generic. + let is_pointer_like = if let Ok(layout) = tcx.naive_layout_of(key) { + layout.is_pointer_like(&tcx.data_layout).unwrap_or_else(|| { + // Second, we fall back to full layout computation. + tcx.layout_of(key) + .ok() + .filter(|l| l.layout.is_pointer_like(&tcx.data_layout)) + .is_some() + }) + } else { + false + }; + + if is_pointer_like { candidates.vec.push(BuiltinCandidate { has_nested: false }); } } diff --git a/compiler/rustc_ty_utils/src/layout.rs b/compiler/rustc_ty_utils/src/layout.rs index b840ff184e0b7..da1eba68d53aa 100644 --- a/compiler/rustc_ty_utils/src/layout.rs +++ b/compiler/rustc_ty_utils/src/layout.rs @@ -3,7 +3,7 @@ use rustc_hir as hir; use rustc_index::bit_set::BitSet; use rustc_index::{IndexSlice, IndexVec}; use rustc_middle::mir::{GeneratorLayout, GeneratorSavedLocal}; -use rustc_middle::query::Providers; +use rustc_middle::query::{LocalCrate, Providers}; use rustc_middle::ty::layout::{ IntegerExt, LayoutCx, LayoutError, LayoutOf, TyAndLayout, MAX_SIMD_LANES, }; @@ -24,32 +24,28 @@ use crate::errors::{ use crate::layout_sanity_check::sanity_check_layout; pub fn provide(providers: &mut Providers) { - *providers = Providers { layout_of, ..*providers }; + *providers = Providers { layout_of, reference_niches_policy, ..*providers }; } +#[instrument(skip(tcx), level = "debug")] +fn reference_niches_policy<'tcx>(tcx: TyCtxt<'tcx>, _: LocalCrate) -> ReferenceNichePolicy { + tcx.sess.opts.unstable_opts.reference_niches.unwrap_or(DEFAULT_REF_NICHES) +} + +/// The reference niche policy for builtin types, and for types in +/// crates not specifying `-Z reference-niches`. +const DEFAULT_REF_NICHES: ReferenceNichePolicy = ReferenceNichePolicy { size: false, align: false }; + #[instrument(skip(tcx, query), level = "debug")] fn layout_of<'tcx>( tcx: TyCtxt<'tcx>, query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>, ) -> Result, &'tcx LayoutError<'tcx>> { - let (param_env, ty) = query.into_parts(); - debug!(?ty); - + let (param_env, unnormalized_ty) = query.into_parts(); let param_env = param_env.with_reveal_all_normalized(tcx); - let unnormalized_ty = ty; - - // FIXME: We might want to have two different versions of `layout_of`: - // One that can be called after typecheck has completed and can use - // `normalize_erasing_regions` here and another one that can be called - // before typecheck has completed and uses `try_normalize_erasing_regions`. - let ty = match tcx.try_normalize_erasing_regions(param_env, ty) { - Ok(t) => t, - Err(normalization_error) => { - return Err(tcx - .arena - .alloc(LayoutError::NormalizationFailure(ty, normalization_error))); - } - }; + // `naive_layout_of` takes care of normalizing the type. + let naive = tcx.naive_layout_of(query)?; + let ty = naive.ty; if ty != unnormalized_ty { // Ensure this layout is also cached for the normalized type. @@ -57,13 +53,11 @@ fn layout_of<'tcx>( } let cx = LayoutCx { tcx, param_env }; - let layout = layout_of_uncached(&cx, ty)?; - let layout = TyAndLayout { ty, layout }; + let layout = TyAndLayout { ty, layout }; record_layout_for_printing(&cx, layout); - - sanity_check_layout(&cx, &layout); + sanity_check_layout(&cx, &layout, &naive); Ok(layout) } @@ -83,12 +77,10 @@ fn univariant_uninterned<'tcx>( kind: StructKind, ) -> Result> { let dl = cx.data_layout(); - let pack = repr.pack; - if pack.is_some() && repr.align.is_some() { - cx.tcx.sess.delay_span_bug(DUMMY_SP, "struct cannot be packed and aligned"); - return Err(cx.tcx.arena.alloc(LayoutError::Unknown(ty))); - } - + assert!( + !(repr.pack.is_some() && repr.align.is_some()), + "already rejected by `naive_layout_of`" + ); cx.univariant(dl, fields, repr, kind).ok_or_else(|| error(cx, LayoutError::SizeOverflow(ty))) } @@ -146,75 +138,35 @@ fn layout_of_uncached<'tcx>( ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => { let mut data_ptr = scalar_unit(Pointer(AddressSpace::DATA)); if !ty.is_unsafe_ptr() { - data_ptr.valid_range_mut().start = 1; - } - - let pointee = tcx.normalize_erasing_regions(param_env, pointee); - if pointee.is_sized(tcx, param_env) { - return Ok(tcx.mk_layout(LayoutS::scalar(cx, data_ptr))); - } - - let metadata = if let Some(metadata_def_id) = tcx.lang_items().metadata_type() - // Projection eagerly bails out when the pointee references errors, - // fall back to structurally deducing metadata. - && !pointee.references_error() - { - let pointee_metadata = Ty::new_projection(tcx,metadata_def_id, [pointee]); - let metadata_ty = match tcx.try_normalize_erasing_regions( - param_env, - pointee_metadata, - ) { - Ok(metadata_ty) => metadata_ty, - Err(mut err) => { - // Usually `::Metadata` can't be normalized because - // its struct tail cannot be normalized either, so try to get a - // more descriptive layout error here, which will lead to less confusing - // diagnostics. - match tcx.try_normalize_erasing_regions( - param_env, - tcx.struct_tail_without_normalization(pointee), - ) { - Ok(_) => {}, - Err(better_err) => { - err = better_err; - } - } - return Err(error(cx, LayoutError::NormalizationFailure(pointee, err))); - }, + // Calling `layout_of` here would cause a query cycle for recursive types; + // so use a conservative estimate that doesn't look past references. + let naive = cx.naive_layout_of(pointee)?.layout; + + let niches = match *pointee.kind() { + ty::FnDef(def, ..) + | ty::Foreign(def) + | ty::Generator(def, ..) + | ty::Closure(def, ..) => tcx.reference_niches_policy(def.krate), + ty::Adt(def, _) => tcx.reference_niches_policy(def.did().krate), + _ => DEFAULT_REF_NICHES, }; - let metadata_layout = cx.layout_of(metadata_ty)?; - // If the metadata is a 1-zst, then the pointer is thin. - if metadata_layout.is_zst() && metadata_layout.align.abi.bytes() == 1 { - return Ok(tcx.mk_layout(LayoutS::scalar(cx, data_ptr))); - } + let (min_addr, max_addr) = dl.address_range_for( + if niches.size { naive.size } else { Size::ZERO }, + if niches.align { naive.align } else { Align::ONE }, + ); - let Abi::Scalar(metadata) = metadata_layout.abi else { - return Err(error(cx, LayoutError::Unknown(pointee))); - }; + *data_ptr.valid_range_mut() = + WrappingRange { start: min_addr.into(), end: max_addr.into() }; + } - metadata + if let Some(metadata) = ptr_metadata_scalar(cx, pointee)? { + // Effectively a (ptr, meta) tuple. + tcx.mk_layout(cx.scalar_pair(data_ptr, metadata)) } else { - let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env); - - match unsized_part.kind() { - ty::Foreign(..) => { - return Ok(tcx.mk_layout(LayoutS::scalar(cx, data_ptr))); - } - ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)), - ty::Dynamic(..) => { - let mut vtable = scalar_unit(Pointer(AddressSpace::DATA)); - vtable.valid_range_mut().start = 1; - vtable - } - _ => { - return Err(error(cx, LayoutError::Unknown(pointee))); - } - } - }; - - // Effectively a (ptr, meta) tuple. - tcx.mk_layout(cx.scalar_pair(data_ptr, metadata)) + // No metadata, this is a thin pointer. + tcx.mk_layout(LayoutS::scalar(cx, data_ptr)) + } } ty::Dynamic(_, _, ty::DynStar) => { @@ -226,16 +178,8 @@ fn layout_of_uncached<'tcx>( } // Arrays and slices. - ty::Array(element, mut count) => { - if count.has_projections() { - count = tcx.normalize_erasing_regions(param_env, count); - if count.has_projections() { - return Err(error(cx, LayoutError::Unknown(ty))); - } - } - - let count = count - .try_eval_target_usize(tcx, param_env) + ty::Array(element, count) => { + let count = compute_array_count(cx, count) .ok_or_else(|| error(cx, LayoutError::Unknown(ty)))?; let element = cx.layout_of(element)?; let size = element @@ -558,20 +502,104 @@ fn layout_of_uncached<'tcx>( } // Types with no meaningful known layout. - ty::Alias(..) => { - // NOTE(eddyb) `layout_of` query should've normalized these away, - // if that was possible, so there's no reason to try again here. - return Err(error(cx, LayoutError::Unknown(ty))); + ty::Alias(..) + | ty::Bound(..) + | ty::GeneratorWitness(..) + | ty::GeneratorWitnessMIR(..) + | ty::Infer(_) + | ty::Placeholder(..) + | ty::Param(_) + | ty::Error(_) => { + unreachable!("already rejected by `naive_layout_of`"); } + }) +} - ty::Bound(..) | ty::GeneratorWitness(..) | ty::GeneratorWitnessMIR(..) | ty::Infer(_) => { - bug!("Layout::compute: unexpected type `{}`", ty) +pub(crate) fn compute_array_count<'tcx>( + cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, + mut count: ty::Const<'tcx>, +) -> Option { + let LayoutCx { tcx, param_env } = *cx; + if count.has_projections() { + count = tcx.normalize_erasing_regions(param_env, count); + if count.has_projections() { + return None; } + } + + count.try_eval_target_usize(tcx, param_env) +} + +pub(crate) fn ptr_metadata_scalar<'tcx>( + cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, + pointee: Ty<'tcx>, +) -> Result, &'tcx LayoutError<'tcx>> { + let dl = cx.data_layout(); + let scalar_unit = |value: Primitive| { + let size = value.size(dl); + assert!(size.bits() <= 128); + Scalar::Initialized { value, valid_range: WrappingRange::full(size) } + }; - ty::Placeholder(..) | ty::Param(_) | ty::Error(_) => { - return Err(error(cx, LayoutError::Unknown(ty))); + let LayoutCx { tcx, param_env } = *cx; + + let pointee = tcx.normalize_erasing_regions(param_env, pointee); + if pointee.is_sized(tcx, param_env) { + return Ok(None); + } + + if let Some(metadata_def_id) = tcx.lang_items().metadata_type() + // Projection eagerly bails out when the pointee references errors, + // fall back to structurally deducing metadata. + && !pointee.references_error() + { + let pointee_metadata = Ty::new_projection(tcx,metadata_def_id, [pointee]); + let metadata_ty = match tcx.try_normalize_erasing_regions( + param_env, + pointee_metadata, + ) { + Ok(metadata_ty) => metadata_ty, + Err(mut err) => { + // Usually `::Metadata` can't be normalized because + // its struct tail cannot be normalized either, so try to get a + // more descriptive layout error here, which will lead to less confusing + // diagnostics. + match tcx.try_normalize_erasing_regions( + param_env, + tcx.struct_tail_without_normalization(pointee), + ) { + Ok(_) => {}, + Err(better_err) => { + err = better_err; + } + } + return Err(error(cx, LayoutError::NormalizationFailure(pointee, err))); + }, + }; + + let metadata_layout = cx.layout_of(metadata_ty)?; + + if metadata_layout.is_zst() && metadata_layout.align.abi.bytes() == 1 { + Ok(None) // If the metadata is a 1-zst, then the pointer is thin. + } else if let Abi::Scalar(metadata) = metadata_layout.abi { + Ok(Some(metadata)) + } else { + Err(error(cx, LayoutError::Unknown(pointee))) } - }) + } else { + let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env); + + match unsized_part.kind() { + ty::Foreign(..) => Ok(None), + ty::Slice(_) | ty::Str => Ok(Some(scalar_unit(Int(dl.ptr_sized_integer(), false)))), + ty::Dynamic(..) => { + let mut vtable = scalar_unit(Pointer(AddressSpace::DATA)); + vtable.valid_range_mut().start = 1; + Ok(Some(vtable)) + } + _ => Err(error(cx, LayoutError::Unknown(pointee))), + } + } } /// Overlap eligibility and variant assignment for each GeneratorSavedLocal. diff --git a/compiler/rustc_ty_utils/src/layout_naive.rs b/compiler/rustc_ty_utils/src/layout_naive.rs new file mode 100644 index 0000000000000..3070ab59d531d --- /dev/null +++ b/compiler/rustc_ty_utils/src/layout_naive.rs @@ -0,0 +1,322 @@ +use rustc_middle::query::Providers; +use rustc_middle::ty::layout::{ + IntegerExt, LayoutCx, LayoutError, LayoutOf, NaiveAbi, NaiveLayout, NaiveNiches, + TyAndNaiveLayout, +}; +use rustc_middle::ty::{self, ReprOptions, Ty, TyCtxt, TypeVisitableExt}; +use rustc_span::DUMMY_SP; +use rustc_target::abi::*; + +use std::ops::Bound; + +use crate::layout::{compute_array_count, ptr_metadata_scalar}; + +pub fn provide(providers: &mut Providers) { + *providers = Providers { naive_layout_of, ..*providers }; +} + +#[instrument(skip(tcx, query), level = "debug")] +fn naive_layout_of<'tcx>( + tcx: TyCtxt<'tcx>, + query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>, +) -> Result, &'tcx LayoutError<'tcx>> { + let (param_env, ty) = query.into_parts(); + debug!(?ty); + + let param_env = param_env.with_reveal_all_normalized(tcx); + let unnormalized_ty = ty; + + // FIXME: We might want to have two different versions of `layout_of`: + // One that can be called after typecheck has completed and can use + // `normalize_erasing_regions` here and another one that can be called + // before typecheck has completed and uses `try_normalize_erasing_regions`. + let ty = match tcx.try_normalize_erasing_regions(param_env, ty) { + Ok(t) => t, + Err(normalization_error) => { + return Err(tcx + .arena + .alloc(LayoutError::NormalizationFailure(ty, normalization_error))); + } + }; + + if ty != unnormalized_ty { + // Ensure this layout is also cached for the normalized type. + return tcx.naive_layout_of(param_env.and(ty)); + } + + let cx = LayoutCx { tcx, param_env }; + let layout = naive_layout_of_uncached(&cx, ty)?; + Ok(TyAndNaiveLayout { ty, layout }) +} + +fn error<'tcx>( + cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, + err: LayoutError<'tcx>, +) -> &'tcx LayoutError<'tcx> { + cx.tcx.arena.alloc(err) +} + +fn naive_layout_of_uncached<'tcx>( + cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, + ty: Ty<'tcx>, +) -> Result> { + let tcx = cx.tcx; + let dl = cx.data_layout(); + + let scalar = |niched: bool, value: Primitive| NaiveLayout { + abi: NaiveAbi::Scalar(value), + niches: if niched { NaiveNiches::Some } else { NaiveNiches::None }, + size: value.size(dl), + align: value.align(dl).abi, + exact: true, + }; + + let univariant = |fields: &mut dyn Iterator>, + repr: &ReprOptions| + -> Result> { + if repr.pack.is_some() && repr.align.is_some() { + cx.tcx.sess.delay_span_bug(DUMMY_SP, "struct cannot be packed and aligned"); + return Err(error(cx, LayoutError::Unknown(ty))); + } + + let linear = repr.inhibit_struct_field_reordering_opt(); + let pack = repr.pack.unwrap_or(Align::MAX); + let mut layout = NaiveLayout::EMPTY; + + for field in fields { + let field = cx.naive_layout_of(field)?.packed(pack); + if linear { + layout = layout.pad_to_align(field.align); + } + layout = layout + .concat(&field, dl) + .ok_or_else(|| error(cx, LayoutError::SizeOverflow(ty)))?; + } + + if let Some(align) = repr.align { + layout = layout.align_to(align); + } + + if linear { + layout.abi = layout.abi.as_aggregate(); + } + + Ok(layout.pad_to_align(layout.align)) + }; + + debug_assert!(!ty.has_non_region_infer()); + + Ok(match *ty.kind() { + // Basic scalars + ty::Bool => scalar(true, Int(I8, false)), + ty::Char => scalar(true, Int(I32, false)), + ty::Int(ity) => scalar(false, Int(Integer::from_int_ty(dl, ity), true)), + ty::Uint(ity) => scalar(false, Int(Integer::from_uint_ty(dl, ity), false)), + ty::Float(fty) => scalar( + false, + match fty { + ty::FloatTy::F32 => F32, + ty::FloatTy::F64 => F64, + }, + ), + ty::FnPtr(_) => scalar(true, Pointer(dl.instruction_address_space)), + + // The never type. + ty::Never => NaiveLayout { abi: NaiveAbi::Uninhabited, ..NaiveLayout::EMPTY }, + + // Potentially-wide pointers. + ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => { + let data_ptr = scalar(!ty.is_unsafe_ptr(), Pointer(AddressSpace::DATA)); + if let Some(metadata) = ptr_metadata_scalar(cx, pointee)? { + // Effectively a (ptr, meta) tuple. + let meta = scalar(!metadata.is_always_valid(dl), metadata.primitive()); + let l = data_ptr + .concat(&meta, dl) + .ok_or_else(|| error(cx, LayoutError::SizeOverflow(ty)))?; + l.pad_to_align(l.align) + } else { + // No metadata, this is a thin pointer. + data_ptr + } + } + + ty::Dynamic(_, _, ty::DynStar) => { + let ptr = scalar(false, Pointer(AddressSpace::DATA)); + let vtable = scalar(true, Pointer(AddressSpace::DATA)); + ptr.concat(&vtable, dl).ok_or_else(|| error(cx, LayoutError::SizeOverflow(ty)))? + } + + // Arrays and slices. + ty::Array(element, count) => { + let count = compute_array_count(cx, count) + .ok_or_else(|| error(cx, LayoutError::Unknown(ty)))?; + let element = cx.naive_layout_of(element)?; + NaiveLayout { + abi: element.abi.as_aggregate(), + size: element + .size + .checked_mul(count, cx) + .ok_or_else(|| error(cx, LayoutError::SizeOverflow(ty)))?, + niches: if count == 0 { NaiveNiches::None } else { element.niches }, + ..*element + } + } + ty::Slice(element) => NaiveLayout { + abi: NaiveAbi::Unsized, + size: Size::ZERO, + niches: NaiveNiches::None, + ..*cx.naive_layout_of(element)? + }, + + ty::FnDef(..) => NaiveLayout::EMPTY, + + // Unsized types. + ty::Str | ty::Dynamic(_, _, ty::Dyn) | ty::Foreign(..) => { + NaiveLayout { abi: NaiveAbi::Unsized, ..NaiveLayout::EMPTY } + } + + // FIXME(reference_niches): try to actually compute a reasonable layout estimate, + // without duplicating too much code from `generator_layout`. + ty::Generator(..) => { + NaiveLayout { exact: false, niches: NaiveNiches::Maybe, ..NaiveLayout::EMPTY } + } + + ty::Closure(_, ref substs) => { + univariant(&mut substs.as_closure().upvar_tys(), &ReprOptions::default())? + } + + ty::Tuple(tys) => univariant(&mut tys.iter(), &ReprOptions::default())?, + + ty::Adt(def, substs) if def.is_union() => { + assert_eq!(def.variants().len(), 1, "union should have a single variant"); + let repr = def.repr(); + let pack = repr.pack.unwrap_or(Align::MAX); + if repr.pack.is_some() && repr.align.is_some() { + cx.tcx.sess.delay_span_bug(DUMMY_SP, "union cannot be packed and aligned"); + return Err(error(cx, LayoutError::Unknown(ty))); + } + + let mut layout = NaiveLayout { + // Unions never have niches. + niches: NaiveNiches::None, + ..NaiveLayout::EMPTY + }; + + for f in &def.variants()[FIRST_VARIANT].fields { + let field = cx.naive_layout_of(f.ty(tcx, substs))?; + layout = layout.union(&field.packed(pack)); + } + + // Unions are always inhabited, and never scalar if `repr(C)`. + if !matches!(layout.abi, NaiveAbi::Scalar(_)) || repr.inhibit_enum_layout_opt() { + layout.abi = NaiveAbi::Sized; + } + + if let Some(align) = repr.align { + layout = layout.align_to(align); + } + layout.pad_to_align(layout.align) + } + + ty::Adt(def, substs) => { + let repr = def.repr(); + let mut layout = NaiveLayout { + // An ADT with no inhabited variants should have an uninhabited ABI. + abi: NaiveAbi::Uninhabited, + ..NaiveLayout::EMPTY + }; + + let mut empty_variants = 0; + for v in def.variants() { + let mut fields = v.fields.iter().map(|f| f.ty(tcx, substs)); + let vlayout = univariant(&mut fields, &repr)?; + + if vlayout.size == Size::ZERO && vlayout.exact { + empty_variants += 1; + } else { + // Remember the niches of the last seen variant. + layout.niches = vlayout.niches; + } + + layout = layout.union(&vlayout); + } + + if def.is_enum() { + let may_need_discr = match def.variants().len() { + 0 | 1 => false, + // Simple Option-like niche optimization. + // Handling this special case allows enums like `Option<&T>` + // to be recognized as `PointerLike` and to be transmutable + // in generic contexts. + 2 if empty_variants == 1 && layout.niches == NaiveNiches::Some => { + layout.niches = NaiveNiches::Maybe; // fill up the niche. + false + } + _ => true, + }; + + if may_need_discr || repr.inhibit_enum_layout_opt() { + // For simplicity, assume that the discriminant always get niched. + // This will be wrong in many cases, which will cause the size (and + // sometimes the alignment) to be underestimated. + // FIXME(reference_niches): Be smarter here. + layout.niches = NaiveNiches::Maybe; + layout = layout.inexact(); + } + } else { + assert_eq!(def.variants().len(), 1, "struct should have a single variant"); + + // We don't compute exact alignment for SIMD structs. + if repr.simd() { + layout = layout.inexact(); + } + + // `UnsafeCell` hides all niches. + if def.is_unsafe_cell() { + layout.niches = NaiveNiches::None; + } + } + + let valid_range = tcx.layout_scalar_valid_range(def.did()); + if valid_range != (Bound::Unbounded, Bound::Unbounded) { + let get = |bound, default| match bound { + Bound::Unbounded => default, + Bound::Included(v) => v, + Bound::Excluded(_) => bug!("exclusive `layout_scalar_valid_range` bound"), + }; + + let valid_range = WrappingRange { + start: get(valid_range.0, 0), + // FIXME: this is wrong for scalar-pair ABIs. Fortunately, the + // only type this could currently affect is`NonNull`, + // and the `NaiveNiches` result still ends up correct. + end: get(valid_range.1, layout.size.unsigned_int_max()), + }; + assert!( + valid_range.is_in_range_for(layout.size), + "`layout_scalar_valid_range` values are out of bounds", + ); + if !valid_range.is_full_for(layout.size) { + layout.niches = NaiveNiches::Some; + } + } + + layout.pad_to_align(layout.align) + } + + // Types with no meaningful known layout. + ty::Alias(..) => { + // NOTE(eddyb) `layout_of` query should've normalized these away, + // if that was possible, so there's no reason to try again here. + return Err(error(cx, LayoutError::Unknown(ty))); + } + + ty::Bound(..) | ty::GeneratorWitness(..) | ty::GeneratorWitnessMIR(..) | ty::Infer(_) => { + bug!("Layout::compute: unexpected type `{}`", ty) + } + + ty::Placeholder(..) | ty::Param(_) | ty::Error(_) => { + return Err(error(cx, LayoutError::Unknown(ty))); + } + }) +} diff --git a/compiler/rustc_ty_utils/src/layout_sanity_check.rs b/compiler/rustc_ty_utils/src/layout_sanity_check.rs index 8633334381ada..2e3fe4e7fb84b 100644 --- a/compiler/rustc_ty_utils/src/layout_sanity_check.rs +++ b/compiler/rustc_ty_utils/src/layout_sanity_check.rs @@ -1,5 +1,5 @@ use rustc_middle::ty::{ - layout::{LayoutCx, TyAndLayout}, + layout::{LayoutCx, NaiveLayout, TyAndLayout}, TyCtxt, }; use rustc_target::abi::*; @@ -10,6 +10,7 @@ use std::assert_matches::assert_matches; pub(super) fn sanity_check_layout<'tcx>( cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, layout: &TyAndLayout<'tcx>, + naive: &NaiveLayout, ) { // Type-level uninhabitedness should always imply ABI uninhabitedness. if layout.ty.is_privately_uninhabited(cx.tcx, cx.param_env) { @@ -20,6 +21,10 @@ pub(super) fn sanity_check_layout<'tcx>( bug!("size is not a multiple of align, in the following layout:\n{layout:#?}"); } + if !naive.is_refined_by(layout.layout) { + bug!("the naive layout isn't refined by the actual layout:\n{:#?}\n{:#?}", naive, layout); + } + if !cfg!(debug_assertions) { // Stop here, the rest is kind of expensive. return; diff --git a/compiler/rustc_ty_utils/src/lib.rs b/compiler/rustc_ty_utils/src/lib.rs index 55b8857ed391f..e2db6a6993f93 100644 --- a/compiler/rustc_ty_utils/src/lib.rs +++ b/compiler/rustc_ty_utils/src/lib.rs @@ -31,6 +31,7 @@ mod errors; mod implied_bounds; pub mod instance; mod layout; +mod layout_naive; mod layout_sanity_check; mod needs_drop; mod opaque_types; @@ -47,6 +48,7 @@ pub fn provide(providers: &mut Providers) { consts::provide(providers); implied_bounds::provide(providers); layout::provide(providers); + layout_naive::provide(providers); needs_drop::provide(providers); opaque_types::provide(providers); representability::provide(providers); diff --git a/src/tools/miri/src/intptrcast.rs b/src/tools/miri/src/intptrcast.rs index 4fd0af35304ec..a43ac61da74ae 100644 --- a/src/tools/miri/src/intptrcast.rs +++ b/src/tools/miri/src/intptrcast.rs @@ -207,7 +207,7 @@ impl<'mir, 'tcx> GlobalStateInner { .checked_add(max(size.bytes(), 1)) .ok_or_else(|| err_exhaust!(AddressSpaceFull))?; // Even if `Size` didn't overflow, we might still have filled up the address space. - if global_state.next_base_addr > ecx.target_usize_max() { + if global_state.next_base_addr > ecx.data_layout().target_usize_max() { throw_exhaust!(AddressSpaceFull); } // Given that `next_base_addr` increases in each allocation, pushing the diff --git a/src/tools/miri/src/shims/mod.rs b/src/tools/miri/src/shims/mod.rs index 1027b24e30114..0caa9b522f941 100644 --- a/src/tools/miri/src/shims/mod.rs +++ b/src/tools/miri/src/shims/mod.rs @@ -21,6 +21,7 @@ use log::trace; use rustc_middle::{mir, ty}; use rustc_target::spec::abi::Abi; +use rustc_target::abi::HasDataLayout as _; use crate::*; use helpers::check_arg_count; @@ -108,7 +109,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { } // Return error result (usize::MAX), and jump to caller. - this.write_scalar(Scalar::from_target_usize(this.target_usize_max(), this), dest)?; + let usize_max = this.data_layout().target_usize_max(); + this.write_scalar(Scalar::from_target_usize(usize_max, this), dest)?; this.go_to_block(ret); Ok(true) } diff --git a/src/tools/miri/src/shims/unix/fs.rs b/src/tools/miri/src/shims/unix/fs.rs index 0fdd55b407cd1..5da66801694bc 100644 --- a/src/tools/miri/src/shims/unix/fs.rs +++ b/src/tools/miri/src/shims/unix/fs.rs @@ -12,7 +12,7 @@ use log::trace; use rustc_data_structures::fx::FxHashMap; use rustc_middle::ty::TyCtxt; -use rustc_target::abi::{Align, Size}; +use rustc_target::abi::{Align, Size, HasDataLayout as _}; use crate::shims::os_str::bytes_to_os_str; use crate::*; @@ -753,7 +753,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { // We cap the number of read bytes to the largest value that we are able to fit in both the // host's and target's `isize`. This saves us from having to handle overflows later. let count = count - .min(u64::try_from(this.target_isize_max()).unwrap()) + .min(u64::try_from(this.data_layout().target_isize_max()).unwrap()) .min(u64::try_from(isize::MAX).unwrap()); let communicate = this.machine.communicate(); @@ -807,7 +807,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { // We cap the number of written bytes to the largest value that we are able to fit in both the // host's and target's `isize`. This saves us from having to handle overflows later. let count = count - .min(u64::try_from(this.target_isize_max()).unwrap()) + .min(u64::try_from(this.data_layout().target_isize_max()).unwrap()) .min(u64::try_from(isize::MAX).unwrap()); let communicate = this.machine.communicate(); diff --git a/src/tools/miri/tests/fail/layout_cycle.rs b/src/tools/miri/tests/fail/layout_cycle.rs index 3e0dd881db84e..d6a937de15cab 100644 --- a/src/tools/miri/tests/fail/layout_cycle.rs +++ b/src/tools/miri/tests/fail/layout_cycle.rs @@ -1,5 +1,5 @@ //@error-in-other-file: a cycle occurred during layout computation -//~^ ERROR: cycle detected when computing layout of +//~^ ERROR: cycle detected when computing layout (naive) of use std::mem; diff --git a/src/tools/miri/tests/fail/layout_cycle.stderr b/src/tools/miri/tests/fail/layout_cycle.stderr index 38907a1c50cc5..ccf93a9def494 100644 --- a/src/tools/miri/tests/fail/layout_cycle.stderr +++ b/src/tools/miri/tests/fail/layout_cycle.stderr @@ -1,7 +1,8 @@ -error[E0391]: cycle detected when computing layout of `S>` +error[E0391]: cycle detected when computing layout (naive) of `S>` | - = note: ...which requires computing layout of ` as Tr>::I`... - = note: ...which again requires computing layout of `S>`, completing the cycle + = note: ...which requires computing layout (naive) of ` as Tr>::I`... + = note: ...which again requires computing layout (naive) of `S>`, completing the cycle + = note: cycle used when computing layout of `S>` = note: see https://rustc-dev-guide.rust-lang.org/overview.html#queries and https://rustc-dev-guide.rust-lang.org/query.html for more information error: post-monomorphization error: a cycle occurred during layout computation diff --git a/tests/ui/consts/const-size_of-cycle.stderr b/tests/ui/consts/const-size_of-cycle.stderr index 46b432357aae6..08f0c1563ccb6 100644 --- a/tests/ui/consts/const-size_of-cycle.stderr +++ b/tests/ui/consts/const-size_of-cycle.stderr @@ -15,7 +15,8 @@ note: ...which requires const-evaluating + checking `Foo::bytes::{constant#0}`.. LL | bytes: [u8; std::mem::size_of::()] | ^^^^^^^^^^^^^^^^^^^^^^^^^^ = note: ...which requires computing layout of `Foo`... - = note: ...which requires computing layout of `[u8; std::mem::size_of::()]`... + = note: ...which requires computing layout (naive) of `Foo`... + = note: ...which requires computing layout (naive) of `[u8; std::mem::size_of::()]`... = note: ...which requires normalizing `[u8; std::mem::size_of::()]`... = note: ...which again requires evaluating type-level constant, completing the cycle note: cycle used when checking that `Foo` is well-formed diff --git a/tests/ui/consts/issue-44415.stderr b/tests/ui/consts/issue-44415.stderr index 01d24a6208144..7ff413def86d6 100644 --- a/tests/ui/consts/issue-44415.stderr +++ b/tests/ui/consts/issue-44415.stderr @@ -15,7 +15,8 @@ note: ...which requires const-evaluating + checking `Foo::bytes::{constant#0}`.. LL | bytes: [u8; unsafe { intrinsics::size_of::() }], | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ = note: ...which requires computing layout of `Foo`... - = note: ...which requires computing layout of `[u8; unsafe { intrinsics::size_of::() }]`... + = note: ...which requires computing layout (naive) of `Foo`... + = note: ...which requires computing layout (naive) of `[u8; unsafe { intrinsics::size_of::() }]`... = note: ...which requires normalizing `[u8; unsafe { intrinsics::size_of::() }]`... = note: ...which again requires evaluating type-level constant, completing the cycle note: cycle used when checking that `Foo` is well-formed diff --git a/tests/ui/dyn-star/param-env-region-infer.next.stderr b/tests/ui/dyn-star/param-env-region-infer.next.stderr index 28aec533a0067..51df71a373edf 100644 --- a/tests/ui/dyn-star/param-env-region-infer.next.stderr +++ b/tests/ui/dyn-star/param-env-region-infer.next.stderr @@ -9,7 +9,7 @@ note: ...which requires type-checking `make_dyn_star`... | LL | fn make_dyn_star<'a, T: PointerLike + Debug + 'a>(t: T) -> impl PointerLike + Debug + 'a { | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - = note: ...which requires computing layout of `make_dyn_star::{opaque#0}`... + = note: ...which requires computing layout (naive) of `make_dyn_star::{opaque#0}`... = note: ...which requires normalizing `make_dyn_star::{opaque#0}`... = note: ...which again requires computing type of `make_dyn_star::{opaque#0}`, completing the cycle note: cycle used when checking item types in top-level module diff --git a/tests/ui/generics/issue-32498.rs b/tests/ui/generics/issue-32498.rs index 1b54401097ea9..0abd5b1a9b14e 100644 --- a/tests/ui/generics/issue-32498.rs +++ b/tests/ui/generics/issue-32498.rs @@ -1,5 +1,6 @@ // run-pass #![allow(dead_code)] +#![recursion_limit = "129"] // Making sure that no overflow occurs. diff --git a/tests/ui/layout/valid_range_oob.stderr b/tests/ui/layout/valid_range_oob.stderr index a3a514fb83095..772113fa5fb29 100644 --- a/tests/ui/layout/valid_range_oob.stderr +++ b/tests/ui/layout/valid_range_oob.stderr @@ -1,6 +1,6 @@ error: the compiler unexpectedly panicked. this is a bug. query stack during panic: -#0 [layout_of] computing layout of `Foo` -#1 [eval_to_allocation_raw] const-evaluating + checking `FOO` +#0 [naive_layout_of] computing layout (naive) of `Foo` +#1 [layout_of] computing layout of `Foo` end of query stack diff --git a/tests/ui/lint/invalid_value.stderr b/tests/ui/lint/invalid_value.stderr index 57531b0968f1e..066fdccbaadfa 100644 --- a/tests/ui/lint/invalid_value.stderr +++ b/tests/ui/lint/invalid_value.stderr @@ -34,8 +34,7 @@ LL | let _val: Wrap<&'static T> = mem::zeroed(); | this code causes undefined behavior when executed | help: use `MaybeUninit` instead, and only call `assume_init` after initialization is done | - = note: `Wrap<&T>` must be non-null -note: because references must be non-null (in this struct field) +note: references must be non-null (in this struct field) --> $DIR/invalid_value.rs:17:18 | LL | struct Wrap { wrapped: T } @@ -50,8 +49,7 @@ LL | let _val: Wrap<&'static T> = mem::uninitialized(); | this code causes undefined behavior when executed | help: use `MaybeUninit` instead, and only call `assume_init` after initialization is done | - = note: `Wrap<&T>` must be non-null -note: because references must be non-null (in this struct field) +note: references must be non-null (in this struct field) --> $DIR/invalid_value.rs:17:18 | LL | struct Wrap { wrapped: T } diff --git a/tests/ui/recursion/issue-26548-recursion-via-normalize.rs b/tests/ui/recursion/issue-26548-recursion-via-normalize.rs index 6c7fc4beb543d..14bc74f57f61d 100644 --- a/tests/ui/recursion/issue-26548-recursion-via-normalize.rs +++ b/tests/ui/recursion/issue-26548-recursion-via-normalize.rs @@ -1,9 +1,9 @@ -//~ ERROR cycle detected when computing layout of `core::option::Option` +//~ ERROR cycle detected when computing layout (naive) of `core::option::Option` //~| NOTE see https://rustc-dev-guide.rust-lang.org/overview.html#queries and https://rustc-dev-guide.rust-lang.org/query.html for more information -//~| NOTE ...which requires computing layout of `S`... -//~| NOTE ...which requires computing layout of `core::option::Option<::It>`... -//~| NOTE ...which again requires computing layout of `core::option::Option`, completing the cycle -//~| NOTE cycle used when computing layout of `core::option::Option<::It>` +//~| NOTE ...which requires computing layout (naive) of `S`... +//~| NOTE ...which requires computing layout (naive) of `core::option::Option<::It>`... +//~| NOTE ...which again requires computing layout (naive) of `core::option::Option`, completing the cycle +//~| NOTE cycle used when computing layout (naive) of `core::option::Option<::It>` trait Mirror { type It: ?Sized; diff --git a/tests/ui/recursion/issue-26548-recursion-via-normalize.stderr b/tests/ui/recursion/issue-26548-recursion-via-normalize.stderr index 514bed607003b..109ba278232a7 100644 --- a/tests/ui/recursion/issue-26548-recursion-via-normalize.stderr +++ b/tests/ui/recursion/issue-26548-recursion-via-normalize.stderr @@ -1,9 +1,9 @@ -error[E0391]: cycle detected when computing layout of `core::option::Option` +error[E0391]: cycle detected when computing layout (naive) of `core::option::Option` | - = note: ...which requires computing layout of `S`... - = note: ...which requires computing layout of `core::option::Option<::It>`... - = note: ...which again requires computing layout of `core::option::Option`, completing the cycle - = note: cycle used when computing layout of `core::option::Option<::It>` + = note: ...which requires computing layout (naive) of `S`... + = note: ...which requires computing layout (naive) of `core::option::Option<::It>`... + = note: ...which again requires computing layout (naive) of `core::option::Option`, completing the cycle + = note: cycle used when computing layout (naive) of `core::option::Option<::It>` = note: see https://rustc-dev-guide.rust-lang.org/overview.html#queries and https://rustc-dev-guide.rust-lang.org/query.html for more information error: aborting due to previous error diff --git a/tests/ui/recursion_limit/zero-overflow.rs b/tests/ui/recursion_limit/zero-overflow.rs index 77bd818567608..98b3da6513520 100644 --- a/tests/ui/recursion_limit/zero-overflow.rs +++ b/tests/ui/recursion_limit/zero-overflow.rs @@ -1,4 +1,4 @@ -//~ ERROR overflow evaluating the requirement `&mut Self: DispatchFromDyn<&mut RustaceansAreAwesome> +//~ ERROR queries overflow the depth limit! //~| HELP consider increasing the recursion limit // build-fail diff --git a/tests/ui/recursion_limit/zero-overflow.stderr b/tests/ui/recursion_limit/zero-overflow.stderr index 9007ec0d78444..172c767d9f0c0 100644 --- a/tests/ui/recursion_limit/zero-overflow.stderr +++ b/tests/ui/recursion_limit/zero-overflow.stderr @@ -1,7 +1,7 @@ -error[E0275]: overflow evaluating the requirement `&mut Self: DispatchFromDyn<&mut RustaceansAreAwesome>` +error: queries overflow the depth limit! | = help: consider increasing the recursion limit by adding a `#![recursion_limit = "2"]` attribute to your crate (`zero_overflow`) + = note: query depth increased by 2 when computing layout of `()` error: aborting due to previous error -For more information about this error, try `rustc --explain E0275`. diff --git a/tests/ui/sized/recursive-type-2.rs b/tests/ui/sized/recursive-type-2.rs index 7d95417a6ffd9..7ee5ee854d4be 100644 --- a/tests/ui/sized/recursive-type-2.rs +++ b/tests/ui/sized/recursive-type-2.rs @@ -1,5 +1,5 @@ // build-fail -//~^ ERROR cycle detected when computing layout of `Foo<()>` +//~^ ERROR cycle detected when computing layout (naive) of `Foo<()>` trait A { type Assoc: ?Sized; } diff --git a/tests/ui/sized/recursive-type-2.stderr b/tests/ui/sized/recursive-type-2.stderr index 0f72f74145e8d..502b0a4352c7a 100644 --- a/tests/ui/sized/recursive-type-2.stderr +++ b/tests/ui/sized/recursive-type-2.stderr @@ -1,12 +1,8 @@ -error[E0391]: cycle detected when computing layout of `Foo<()>` +error[E0391]: cycle detected when computing layout (naive) of `Foo<()>` | - = note: ...which requires computing layout of `<() as A>::Assoc`... - = note: ...which again requires computing layout of `Foo<()>`, completing the cycle -note: cycle used when elaborating drops for `main` - --> $DIR/recursive-type-2.rs:11:1 - | -LL | fn main() { - | ^^^^^^^^^ + = note: ...which requires computing layout (naive) of `<() as A>::Assoc`... + = note: ...which again requires computing layout (naive) of `Foo<()>`, completing the cycle + = note: cycle used when computing layout of `Foo<()>` = note: see https://rustc-dev-guide.rust-lang.org/overview.html#queries and https://rustc-dev-guide.rust-lang.org/query.html for more information error: aborting due to previous error diff --git a/tests/ui/transmute/transmute-fat-pointers.rs b/tests/ui/transmute/transmute-fat-pointers.rs index 7c1beffd14ed4..d373ff5f24a53 100644 --- a/tests/ui/transmute/transmute-fat-pointers.rs +++ b/tests/ui/transmute/transmute-fat-pointers.rs @@ -30,4 +30,16 @@ fn f(x: &T) -> &U { unsafe { transmute(x) } //~ ERROR cannot transmute between types of different sizes } +fn g(x: &T) -> Option<&U> { + unsafe { transmute(x) } +} + +fn h(x: &[T]) -> Option<&dyn Send> { + unsafe { transmute(x) } +} + +fn i(x: [usize; 1]) -> Option<&'static T> { + unsafe { transmute(x) } +} + fn main() { } diff --git a/tests/ui/type-alias-impl-trait/issue-53092-2.stderr b/tests/ui/type-alias-impl-trait/issue-53092-2.stderr index 6148131b491f5..9d90c6fbc58ef 100644 --- a/tests/ui/type-alias-impl-trait/issue-53092-2.stderr +++ b/tests/ui/type-alias-impl-trait/issue-53092-2.stderr @@ -9,7 +9,7 @@ note: ...which requires type-checking `CONST_BUG`... | LL | const CONST_BUG: Bug = unsafe { std::mem::transmute(|_: u8| ()) }; | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - = note: ...which requires computing layout of `Bug`... + = note: ...which requires computing layout (naive) of `Bug`... = note: ...which requires normalizing `Bug`... = note: ...which again requires computing type of `Bug::{opaque#0}`, completing the cycle note: cycle used when checking item types in top-level module