From 43f9467f190ccb879113906c7a5aab5240d9d9fa Mon Sep 17 00:00:00 2001 From: Zach Hamlin Date: Sun, 10 Oct 2021 14:32:50 -0500 Subject: [PATCH 1/2] partial enum variant type support --- compiler/rustc_borrowck/src/type_check/mod.rs | 28 + .../src/debuginfo/type_names.rs | 1 + .../rustc_const_eval/src/const_eval/mod.rs | 1 + .../src/interpret/intrinsics.rs | 1 + .../src/interpret/intrinsics/type_name.rs | 1 + .../rustc_const_eval/src/interpret/operand.rs | 4 + .../src/interpret/validity.rs | 33 + .../src/infer/canonical/canonicalizer.rs | 1 + compiler/rustc_infer/src/infer/combine.rs | 1 + compiler/rustc_infer/src/infer/freshen.rs | 1 + compiler/rustc_lint/src/types.rs | 1 + compiler/rustc_middle/src/ty/cast.rs | 10 + compiler/rustc_middle/src/ty/codec.rs | 2 +- compiler/rustc_middle/src/ty/context.rs | 1 + compiler/rustc_middle/src/ty/error.rs | 8 + compiler/rustc_middle/src/ty/fast_reject.rs | 4 + compiler/rustc_middle/src/ty/flags.rs | 5 + compiler/rustc_middle/src/ty/layout.rs | 1165 +++++++++-------- compiler/rustc_middle/src/ty/outlives.rs | 1 + compiler/rustc_middle/src/ty/print/mod.rs | 1 + compiler/rustc_middle/src/ty/print/pretty.rs | 8 + compiler/rustc_middle/src/ty/relate.rs | 30 + .../rustc_middle/src/ty/structural_impls.rs | 11 + compiler/rustc_middle/src/ty/sty.rs | 18 + compiler/rustc_middle/src/ty/util.rs | 8 + compiler/rustc_middle/src/ty/walk.rs | 4 + .../rustc_mir_build/src/build/expr/into.rs | 2 + compiler/rustc_mir_build/src/thir/cx/expr.rs | 11 + .../rustc_mir_build/src/thir/pattern/mod.rs | 4 + .../rustc_mir_dataflow/src/elaborate_drops.rs | 11 + compiler/rustc_mir_dataflow/src/impls/mod.rs | 5 +- compiler/rustc_passes/src/dead.rs | 9 + compiler/rustc_privacy/src/lib.rs | 14 + compiler/rustc_resolve/src/late.rs | 1 + compiler/rustc_symbol_mangling/src/v0.rs | 1 + .../src/traits/coherence.rs | 1 + .../src/traits/error_reporting/mod.rs | 1 + .../src/traits/project.rs | 3 + .../src/traits/query/dropck_outlives.rs | 16 + .../src/traits/select/candidate_assembly.rs | 3 + .../src/traits/select/mod.rs | 22 +- .../src/traits/structural_match.rs | 1 + .../rustc_trait_selection/src/traits/wf.rs | 8 + compiler/rustc_traits/src/chalk/lowering.rs | 1 + compiler/rustc_traits/src/dropck_outlives.rs | 13 + compiler/rustc_ty_utils/src/instance.rs | 1 + compiler/rustc_ty_utils/src/ty.rs | 1 + compiler/rustc_typeck/src/astconv/mod.rs | 32 +- compiler/rustc_typeck/src/check/cast.rs | 1 + compiler/rustc_typeck/src/check/expr.rs | 124 +- .../rustc_typeck/src/check/fn_ctxt/_impl.rs | 1 - .../rustc_typeck/src/variance/constraints.rs | 5 + src/test/ui/enum-variant-types/fn_call.rs | 15 + .../ui/enum-variant-types/invalid_variant.rs | 14 + .../enum-variant-types/invalid_variant.stderr | 12 + .../enum-variant-types/variant_projection.rs | 16 + 56 files changed, 1120 insertions(+), 578 deletions(-) create mode 100644 src/test/ui/enum-variant-types/fn_call.rs create mode 100644 src/test/ui/enum-variant-types/invalid_variant.rs create mode 100644 src/test/ui/enum-variant-types/invalid_variant.stderr create mode 100644 src/test/ui/enum-variant-types/variant_projection.rs diff --git a/compiler/rustc_borrowck/src/type_check/mod.rs b/compiler/rustc_borrowck/src/type_check/mod.rs index 55790bd2daa9b..b09d645d30b48 100644 --- a/compiler/rustc_borrowck/src/type_check/mod.rs +++ b/compiler/rustc_borrowck/src/type_check/mod.rs @@ -744,6 +744,22 @@ impl<'a, 'b, 'tcx> TypeVerifier<'a, 'b, 'tcx> { PlaceTy { ty: base_ty, variant_index: Some(index) } } } + ty::Variant(ty, _) => match ty.kind() { + ty::Adt(adt_def, _substs) if adt_def.is_enum() => { + if index.as_usize() >= adt_def.variants.len() { + PlaceTy::from_ty(span_mirbug_and_err!( + self, + place, + "cast to variant #{:?} but enum only has {:?}", + index, + adt_def.variants.len() + )) + } else { + PlaceTy { ty: *ty, variant_index: Some(index) } + } + } + _ => bug!("unexpected type: {:?}", ty.kind()), + }, // We do not need to handle generators here, because this runs // before the generator transform stage. _ => { @@ -812,6 +828,12 @@ impl<'a, 'b, 'tcx> TypeVerifier<'a, 'b, 'tcx> { let (variant, substs) = match base_ty { PlaceTy { ty, variant_index: Some(variant_index) } => match *ty.kind() { ty::Adt(adt_def, substs) => (&adt_def.variants[variant_index], substs), + ty::Variant(ty, _) => match ty.kind() { + ty::Adt(adt_def, substs) => { + (adt_def.variants.get(variant_index).expect(""), *substs) + } + _ => bug!("unexpected type: {:?}", ty.kind()), + }, ty::Generator(def_id, substs, _) => { let mut variants = substs.as_generator().state_tys(def_id, tcx); let mut variant = match variants.nth(variant_index.into()) { @@ -833,6 +855,12 @@ impl<'a, 'b, 'tcx> TypeVerifier<'a, 'b, 'tcx> { ty::Adt(adt_def, substs) if !adt_def.is_enum() => { (&adt_def.variants[VariantIdx::new(0)], substs) } + ty::Variant(ty, _) => match ty.kind() { + ty::Adt(adt_def, substs) if adt_def.is_enum() => { + (&adt_def.variants[VariantIdx::new(0)], *substs) + } + _ => bug!("unexpected type: {:?}", ty.kind()), + }, ty::Closure(_, substs) => { return match substs .as_closure() diff --git a/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs b/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs index 6e7b296859740..89ac5ccd3cbb4 100644 --- a/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs +++ b/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs @@ -373,6 +373,7 @@ fn push_debuginfo_type_name<'tcx>( t ); } + ty::Variant(..) => unimplemented!("TODO(zhamlin)"), } /// MSVC names enums differently than other platforms so that the debugging visualization diff --git a/compiler/rustc_const_eval/src/const_eval/mod.rs b/compiler/rustc_const_eval/src/const_eval/mod.rs index a334165df4cb1..61e3122396823 100644 --- a/compiler/rustc_const_eval/src/const_eval/mod.rs +++ b/compiler/rustc_const_eval/src/const_eval/mod.rs @@ -129,6 +129,7 @@ fn const_to_valtree_inner<'tcx>( | ty::Closure(..) | ty::Generator(..) | ty::GeneratorWitness(..) => None, + ty::Variant(..) => unimplemented!("TODO(zhamlin)"), } } diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs index 698742fe98ceb..3b4b162c9a837 100644 --- a/compiler/rustc_const_eval/src/interpret/intrinsics.rs +++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs @@ -100,6 +100,7 @@ crate fn eval_nullary_intrinsic<'tcx>( | ty::Never | ty::Tuple(_) | ty::Error(_) => ConstValue::from_machine_usize(0u64, &tcx), + ty::Variant(..) => unimplemented!("TODO(zhamlin)"), }, other => bug!("`{}` is not a zero arg intrinsic", other), }) diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics/type_name.rs b/compiler/rustc_const_eval/src/interpret/intrinsics/type_name.rs index a7012cd63f313..45aa38a824017 100644 --- a/compiler/rustc_const_eval/src/interpret/intrinsics/type_name.rs +++ b/compiler/rustc_const_eval/src/interpret/intrinsics/type_name.rs @@ -65,6 +65,7 @@ impl<'tcx> Printer<'tcx> for AbsolutePathPrinter<'tcx> { ty::Foreign(def_id) => self.print_def_path(def_id, &[]), ty::GeneratorWitness(_) => bug!("type_name: unexpected `GeneratorWitness`"), + ty::Variant(..) => unimplemented!("TODO(zhamlin)"), } } diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs index de870bd5c6cf1..8a80482e6d106 100644 --- a/compiler/rustc_const_eval/src/interpret/operand.rs +++ b/compiler/rustc_const_eval/src/interpret/operand.rs @@ -687,6 +687,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { ty::Adt(adt, _) => { adt.discriminants(*self.tcx).find(|(_, var)| var.val == discr_bits) } + ty::Variant(ty, _) => match ty.kind() { + ty::Adt(adt, _) => adt.discriminants(*self.tcx).find(|(_, var)| var.val == discr_bits), + _ => bug!("unexpected type: {:?}", ty.kind()), + } ty::Generator(def_id, substs, _) => { let substs = substs.as_generator(); substs diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs index fc69770bf6a30..09925f26f18ce 100644 --- a/compiler/rustc_const_eval/src/interpret/validity.rs +++ b/compiler/rustc_const_eval/src/interpret/validity.rs @@ -219,6 +219,10 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, ' if tag_field == field { return match layout.ty.kind() { ty::Adt(def, ..) if def.is_enum() => PathElem::EnumTag, + ty::Variant(ty, ..) => match ty.kind() { + ty::Adt(def, ..) if def.is_enum() => PathElem::EnumTag, + _ => bug!("non-variant type {:?}", layout.ty), + }, ty::Generator(..) => PathElem::GeneratorTag, _ => bug!("non-variant type {:?}", layout.ty), }; @@ -272,6 +276,20 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, ' } } + ty::Variant(ty, ..) => match ty.kind() { + ty::Adt(def, ..) if def.is_enum() => { + // we might be projecting *to* a variant, or to a field *in* a variant. + match layout.variants { + Variants::Single { index } => { + // Inside a variant + PathElem::Field(def.variants[index].fields[field].ident.name) + } + Variants::Multiple { .. } => bug!("we handled variants above"), + } + } + _ => bug!("unexpected type: {:?}", ty.kind()), + }, + // other ADTs ty::Adt(def, _) => PathElem::Field(def.non_enum_variant().fields[field].ident.name), @@ -567,6 +585,17 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, ' self.check_safe_pointer(value, "box")?; Ok(true) } + ty::Variant(ty, _) => match ty.kind() { + ty::Adt(def, _) => { + if def.is_box() { + self.check_safe_pointer(value, "box")?; + Ok(true) + } else { + Ok(false) + } + } + _ => bug!("unexpected type: {:?}", ty.kind()), + }, ty::FnPtr(_sig) => { let value = try_validation!( self.ecx.read_immediate(value), @@ -729,6 +758,10 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M> ) -> InterpResult<'tcx> { let name = match old_op.layout.ty.kind() { ty::Adt(adt, _) => PathElem::Variant(adt.variants[variant_id].ident.name), + ty::Variant(ty, ..) => match ty.kind() { + ty::Adt(adt, ..) => PathElem::Variant(adt.variants[variant_id].ident.name), + _ => bug!("unexpected type {:?}", ty.kind()), + }, // Generators also have variants ty::Generator(..) => PathElem::GeneratorState(variant_id), _ => bug!("Unexpected type with variant: {:?}", old_op.layout.ty), diff --git a/compiler/rustc_infer/src/infer/canonical/canonicalizer.rs b/compiler/rustc_infer/src/infer/canonical/canonicalizer.rs index 934ada9932e71..1303c0fc5959d 100644 --- a/compiler/rustc_infer/src/infer/canonical/canonicalizer.rs +++ b/compiler/rustc_infer/src/infer/canonical/canonicalizer.rs @@ -385,6 +385,7 @@ impl<'cx, 'tcx> TypeFolder<'tcx> for Canonicalizer<'cx, 'tcx> { | ty::Uint(..) | ty::Float(..) | ty::Adt(..) + | ty::Variant(..) | ty::Str | ty::Error(_) | ty::Array(..) diff --git a/compiler/rustc_infer/src/infer/combine.rs b/compiler/rustc_infer/src/infer/combine.rs index 3f54247ecef21..58ef75b3ca8d8 100644 --- a/compiler/rustc_infer/src/infer/combine.rs +++ b/compiler/rustc_infer/src/infer/combine.rs @@ -72,6 +72,7 @@ impl<'infcx, 'tcx> InferCtxt<'infcx, 'tcx> { { let a_is_expected = relation.a_is_expected(); + debug!("super_combine_tys: {:?} | {:?}", a.kind(), b.kind()); match (a.kind(), b.kind()) { // Relate integral variables to other types (&ty::Infer(ty::IntVar(a_id)), &ty::Infer(ty::IntVar(b_id))) => { diff --git a/compiler/rustc_infer/src/infer/freshen.rs b/compiler/rustc_infer/src/infer/freshen.rs index c40e409891bc2..93418a7db13cc 100644 --- a/compiler/rustc_infer/src/infer/freshen.rs +++ b/compiler/rustc_infer/src/infer/freshen.rs @@ -199,6 +199,7 @@ impl<'a, 'tcx> TypeFolder<'tcx> for TypeFreshener<'a, 'tcx> { | ty::Uint(..) | ty::Float(..) | ty::Adt(..) + | ty::Variant(..) | ty::Str | ty::Error(_) | ty::Array(..) diff --git a/compiler/rustc_lint/src/types.rs b/compiler/rustc_lint/src/types.rs index 708cd56e068b5..5415eff8363ca 100644 --- a/compiler/rustc_lint/src/types.rs +++ b/compiler/rustc_lint/src/types.rs @@ -1130,6 +1130,7 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> { | ty::GeneratorWitness(..) | ty::Placeholder(..) | ty::FnDef(..) => bug!("unexpected type in foreign function: {:?}", ty), + ty::Variant(..) => unimplemented!("TODO(zhamlin)"), } } diff --git a/compiler/rustc_middle/src/ty/cast.rs b/compiler/rustc_middle/src/ty/cast.rs index 20a6af5f6c13b..04c8956ac32c3 100644 --- a/compiler/rustc_middle/src/ty/cast.rs +++ b/compiler/rustc_middle/src/ty/cast.rs @@ -59,6 +59,16 @@ impl<'tcx> CastTy<'tcx> { ty::Uint(u) => Some(CastTy::Int(IntTy::U(u))), ty::Float(_) => Some(CastTy::Float), ty::Adt(d, _) if d.is_enum() && d.is_payloadfree() => Some(CastTy::Int(IntTy::CEnum)), + ty::Variant(ty, _) => match ty.kind() { + ty::Adt(d, _) => { + if d.is_enum() && d.is_payloadfree() { + Some(CastTy::Int(IntTy::CEnum)) + } else { + None + } + } + _ => bug!("unexpected type: {:?}", ty.kind()), + } ty::RawPtr(mt) => Some(CastTy::Ptr(mt)), ty::FnPtr(..) => Some(CastTy::FnPtr), _ => None, diff --git a/compiler/rustc_middle/src/ty/codec.rs b/compiler/rustc_middle/src/ty/codec.rs index 434008ecb1f4f..65b05e4753185 100644 --- a/compiler/rustc_middle/src/ty/codec.rs +++ b/compiler/rustc_middle/src/ty/codec.rs @@ -307,7 +307,7 @@ macro_rules! impl_decodable_via_ref { })* } } - +// TODO(zhamlin): enum variant here? impl<'tcx, D: TyDecoder<'tcx>> RefDecodable<'tcx, D> for ty::AdtDef { fn decode(decoder: &mut D) -> Result<&'tcx Self, D::Error> { let def_id = >::decode(decoder)?; diff --git a/compiler/rustc_middle/src/ty/context.rs b/compiler/rustc_middle/src/ty/context.rs index 6a6fb30dce837..d4d154552f551 100644 --- a/compiler/rustc_middle/src/ty/context.rs +++ b/compiler/rustc_middle/src/ty/context.rs @@ -1919,6 +1919,7 @@ impl<'tcx> TyCtxt<'tcx> { fmt, self.0, Adt, + Variant, Array, Slice, RawPtr, diff --git a/compiler/rustc_middle/src/ty/error.rs b/compiler/rustc_middle/src/ty/error.rs index 08b4d3aecda0a..4e57f5231c82f 100644 --- a/compiler/rustc_middle/src/ty/error.rs +++ b/compiler/rustc_middle/src/ty/error.rs @@ -244,6 +244,10 @@ impl<'tcx> ty::TyS<'tcx> { ty::Tuple(ref tys) if tys.is_empty() => format!("`{}`", self).into(), ty::Adt(def, _) => format!("{} `{}`", def.descr(), tcx.def_path_str(def.did)).into(), + ty::Variant(ty, _) => match ty.kind() { + ty::Adt(def, _) => format!("{} `{}`", def.descr(), tcx.def_path_str(def.did)).into(), + _ => bug!("unexpected type: {:?}", ty.kind()), + } ty::Foreign(def_id) => format!("extern type `{}`", tcx.def_path_str(def_id)).into(), ty::Array(t, n) => { if t.is_simple_ty() { @@ -315,6 +319,10 @@ impl<'tcx> ty::TyS<'tcx> { | ty::Never => "type".into(), ty::Tuple(ref tys) if tys.is_empty() => "unit type".into(), ty::Adt(def, _) => def.descr().into(), + ty::Variant(ty, _) => match ty.kind() { + ty::Adt(def, _) => format!("{} variant", def.descr()).into(), + _ => bug!("unexpected type: {:?}", ty.kind()), + } ty::Foreign(_) => "extern type".into(), ty::Array(..) => "array".into(), ty::Slice(_) => "slice".into(), diff --git a/compiler/rustc_middle/src/ty/fast_reject.rs b/compiler/rustc_middle/src/ty/fast_reject.rs index 11ee942b83e77..8443b1725c5b7 100644 --- a/compiler/rustc_middle/src/ty/fast_reject.rs +++ b/compiler/rustc_middle/src/ty/fast_reject.rs @@ -66,6 +66,10 @@ pub fn simplify_type( ty::Uint(uint_type) => Some(UintSimplifiedType(uint_type)), ty::Float(float_type) => Some(FloatSimplifiedType(float_type)), ty::Adt(def, _) => Some(AdtSimplifiedType(def.did)), + ty::Variant(ref ty, _) => match ty.kind() { + ty::Adt(def, _) => Some(AdtSimplifiedType(def.did)), + _ => bug!("unexpected type: {:?}", ty.kind()), + } ty::Str => Some(StrSimplifiedType), ty::Array(..) | ty::Slice(_) => Some(ArraySimplifiedType), ty::RawPtr(_) => Some(PtrSimplifiedType), diff --git a/compiler/rustc_middle/src/ty/flags.rs b/compiler/rustc_middle/src/ty/flags.rs index a078b6fb742a7..4a978d5092d42 100644 --- a/compiler/rustc_middle/src/ty/flags.rs +++ b/compiler/rustc_middle/src/ty/flags.rs @@ -161,6 +161,11 @@ impl FlagComputation { self.add_substs(substs); } + &ty::Variant(ty, _) => match ty.kind() { + ty::Adt(_, substs) => self.add_substs(substs), + _ => bug!("unexpected type: {:?}", ty.kind()), + } + &ty::Projection(data) => { self.add_flags(TypeFlags::HAS_TY_PROJECTION); self.add_projection_ty(data); diff --git a/compiler/rustc_middle/src/ty/layout.rs b/compiler/rustc_middle/src/ty/layout.rs index 0bdf70b3ec488..50aaac62bb82b 100644 --- a/compiler/rustc_middle/src/ty/layout.rs +++ b/compiler/rustc_middle/src/ty/layout.rs @@ -526,6 +526,581 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> { }) } + fn layout_of_uncached_adt(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'tcx>> { + if let ty::Adt(def, substs) = ty.kind() { + let tcx = self.tcx; + let dl = self.data_layout(); + let scalar_unit = |value: Primitive| { + let size = value.size(dl); + assert!(size.bits() <= 128); + Scalar { + value, + valid_range: WrappingRange { start: 0, end: size.unsigned_int_max() }, + } + }; + + // Cache the field layouts. + let variants = def + .variants + .iter() + .map(|v| { + v.fields + .iter() + .map(|field| self.layout_of(field.ty(tcx, substs))) + .collect::, _>>() + }) + .collect::, _>>()?; + + if def.is_union() { + if def.repr.pack.is_some() && def.repr.align.is_some() { + self.tcx.sess.delay_span_bug( + tcx.def_span(def.did), + "union cannot be packed and aligned", + ); + return Err(LayoutError::Unknown(ty)); + } + + let mut align = + if def.repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align }; + + if let Some(repr_align) = def.repr.align { + align = align.max(AbiAndPrefAlign::new(repr_align)); + } + + let optimize = !def.repr.inhibit_union_abi_opt(); + let mut size = Size::ZERO; + let mut abi = Abi::Aggregate { sized: true }; + let index = VariantIdx::new(0); + for field in &variants[index] { + assert!(!field.is_unsized()); + align = align.max(field.align); + + // If all non-ZST fields have the same ABI, forward this ABI + if optimize && !field.is_zst() { + // Normalize scalar_unit to the maximal valid range + let field_abi = match field.abi { + Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)), + Abi::ScalarPair(x, y) => { + Abi::ScalarPair(scalar_unit(x.value), scalar_unit(y.value)) + } + Abi::Vector { element: x, count } => { + Abi::Vector { element: scalar_unit(x.value), count } + } + Abi::Uninhabited | Abi::Aggregate { .. } => { + Abi::Aggregate { sized: true } + } + }; + + if size == Size::ZERO { + // first non ZST: initialize 'abi' + abi = field_abi; + } else if abi != field_abi { + // different fields have different ABI: reset to Aggregate + abi = Abi::Aggregate { sized: true }; + } + } + + size = cmp::max(size, field.size); + } + + if let Some(pack) = def.repr.pack { + align = align.min(AbiAndPrefAlign::new(pack)); + } + + return Ok(tcx.intern_layout(Layout { + variants: Variants::Single { index }, + fields: FieldsShape::Union( + NonZeroUsize::new(variants[index].len()).ok_or(LayoutError::Unknown(ty))?, + ), + abi, + largest_niche: None, + align, + size: size.align_to(align.abi), + })); + } + + // A variant is absent if it's uninhabited and only has ZST fields. + // Present uninhabited variants only require space for their fields, + // but *not* an encoding of the discriminant (e.g., a tag value). + // See issue #49298 for more details on the need to leave space + // for non-ZST uninhabited data (mostly partial initialization). + let absent = |fields: &[TyAndLayout<'_>]| { + let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited()); + let is_zst = fields.iter().all(|f| f.is_zst()); + uninhabited && is_zst + }; + let (present_first, present_second) = { + let mut present_variants = variants + .iter_enumerated() + .filter_map(|(i, v)| if absent(v) { None } else { Some(i) }); + (present_variants.next(), present_variants.next()) + }; + let present_first = match present_first { + Some(present_first) => present_first, + // Uninhabited because it has no variants, or only absent ones. + None if def.is_enum() => { + return Ok(tcx.layout_of(self.param_env.and(tcx.types.never))?.layout); + } + // If it's a struct, still compute a layout so that we can still compute the + // field offsets. + None => VariantIdx::new(0), + }; + + let is_struct = !def.is_enum() || + // Only one variant is present. + (present_second.is_none() && + // Representation optimizations are allowed. + !def.repr.inhibit_enum_layout_opt()); + if is_struct { + // Struct, or univariant enum equivalent to a struct. + // (Typechecking will reject discriminant-sizing attrs.) + + let v = present_first; + let kind = if def.is_enum() || variants[v].is_empty() { + StructKind::AlwaysSized + } else { + let param_env = tcx.param_env(def.did); + let last_field = def.variants[v].fields.last().unwrap(); + let always_sized = + tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env); + if !always_sized { StructKind::MaybeUnsized } else { StructKind::AlwaysSized } + }; + + let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr, kind)?; + st.variants = Variants::Single { index: v }; + let (start, end) = self.tcx.layout_scalar_valid_range(def.did); + match st.abi { + Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => { + // the asserts ensure that we are not using the + // `#[rustc_layout_scalar_valid_range(n)]` + // attribute to widen the range of anything as that would probably + // result in UB somewhere + // FIXME(eddyb) the asserts are probably not needed, + // as larger validity ranges would result in missed + // optimizations, *not* wrongly assuming the inner + // value is valid. e.g. unions enlarge validity ranges, + // because the values may be uninitialized. + if let Bound::Included(start) = start { + // FIXME(eddyb) this might be incorrect - it doesn't + // account for wrap-around (end < start) ranges. + assert!(scalar.valid_range.start <= start); + scalar.valid_range.start = start; + } + if let Bound::Included(end) = end { + // FIXME(eddyb) this might be incorrect - it doesn't + // account for wrap-around (end < start) ranges. + assert!(scalar.valid_range.end >= end); + scalar.valid_range.end = end; + } + + // Update `largest_niche` if we have introduced a larger niche. + let niche = if def.repr.hide_niche() { + None + } else { + Niche::from_scalar(dl, Size::ZERO, *scalar) + }; + if let Some(niche) = niche { + match st.largest_niche { + Some(largest_niche) => { + // Replace the existing niche even if they're equal, + // because this one is at a lower offset. + if largest_niche.available(dl) <= niche.available(dl) { + st.largest_niche = Some(niche); + } + } + None => st.largest_niche = Some(niche), + } + } + } + _ => assert!( + start == Bound::Unbounded && end == Bound::Unbounded, + "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}", + def, + st, + ), + } + + return Ok(tcx.intern_layout(st)); + } + + // At this point, we have handled all unions and + // structs. (We have also handled univariant enums + // that allow representation optimization.) + assert!(def.is_enum()); + + // The current code for niche-filling relies on variant indices + // instead of actual discriminants, so dataful enums with + // explicit discriminants (RFC #2363) would misbehave. + let no_explicit_discriminants = def + .variants + .iter_enumerated() + .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32())); + + let mut niche_filling_layout = None; + + // Niche-filling enum optimization. + if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants { + let mut dataful_variant = None; + let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0); + + // Find one non-ZST variant. + 'variants: for (v, fields) in variants.iter_enumerated() { + if absent(fields) { + continue 'variants; + } + for f in fields { + if !f.is_zst() { + if dataful_variant.is_none() { + dataful_variant = Some(v); + continue 'variants; + } else { + dataful_variant = None; + break 'variants; + } + } + } + niche_variants = *niche_variants.start().min(&v)..=v; + } + + if niche_variants.start() > niche_variants.end() { + dataful_variant = None; + } + + if let Some(i) = dataful_variant { + let count = (niche_variants.end().as_u32() - niche_variants.start().as_u32() + + 1) as u128; + + // Find the field with the largest niche + let niche_candidate = variants[i] + .iter() + .enumerate() + .filter_map(|(j, field)| Some((j, field.largest_niche?))) + .max_by_key(|(_, niche)| niche.available(dl)); + + if let Some((field_index, niche, (niche_start, niche_scalar))) = niche_candidate + .and_then(|(field_index, niche)| { + Some((field_index, niche, niche.reserve(self, count)?)) + }) + { + let mut align = dl.aggregate_align; + let st = variants + .iter_enumerated() + .map(|(j, v)| { + let mut st = self.univariant_uninterned( + ty, + v, + &def.repr, + StructKind::AlwaysSized, + )?; + st.variants = Variants::Single { index: j }; + + align = align.max(st.align); + + Ok(st) + }) + .collect::, _>>()?; + + let offset = st[i].fields.offset(field_index) + niche.offset; + let size = st[i].size; + + let abi = if st.iter().all(|v| v.abi.is_uninhabited()) { + Abi::Uninhabited + } else { + match st[i].abi { + Abi::Scalar(_) => Abi::Scalar(niche_scalar), + Abi::ScalarPair(first, second) => { + // We need to use scalar_unit to reset the + // valid range to the maximal one for that + // primitive, because only the niche is + // guaranteed to be initialised, not the + // other primitive. + if offset.bytes() == 0 { + Abi::ScalarPair(niche_scalar, scalar_unit(second.value)) + } else { + Abi::ScalarPair(scalar_unit(first.value), niche_scalar) + } + } + _ => Abi::Aggregate { sized: true }, + } + }; + + let largest_niche = Niche::from_scalar(dl, offset, niche_scalar); + + niche_filling_layout = Some(Layout { + variants: Variants::Multiple { + tag: niche_scalar, + tag_encoding: TagEncoding::Niche { + dataful_variant: i, + niche_variants, + niche_start, + }, + tag_field: 0, + variants: st, + }, + fields: FieldsShape::Arbitrary { + offsets: vec![offset], + memory_index: vec![0], + }, + abi, + largest_niche, + size, + align, + }); + } + } + } + + let (mut min, mut max) = (i128::MAX, i128::MIN); + let discr_type = def.repr.discr_type(); + let bits = Integer::from_attr(self, discr_type).size().bits(); + for (i, discr) in def.discriminants(tcx) { + if variants[i].iter().any(|f| f.abi.is_uninhabited()) { + continue; + } + let mut x = discr.val as i128; + if discr_type.is_signed() { + // sign extend the raw representation to be an i128 + x = (x << (128 - bits)) >> (128 - bits); + } + if x < min { + min = x; + } + if x > max { + max = x; + } + } + // We might have no inhabited variants, so pretend there's at least one. + if (min, max) == (i128::MAX, i128::MIN) { + min = 0; + max = 0; + } + assert!(min <= max, "discriminant range is {}...{}", min, max); + let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max); + + let mut align = dl.aggregate_align; + let mut size = Size::ZERO; + + // We're interested in the smallest alignment, so start large. + let mut start_align = Align::from_bytes(256).unwrap(); + assert_eq!(Integer::for_align(dl, start_align), None); + + // repr(C) on an enum tells us to make a (tag, union) layout, + // so we need to grow the prefix alignment to be at least + // the alignment of the union. (This value is used both for + // determining the alignment of the overall enum, and the + // determining the alignment of the payload after the tag.) + let mut prefix_align = min_ity.align(dl).abi; + if def.repr.c() { + for fields in &variants { + for field in fields { + prefix_align = prefix_align.max(field.align.abi); + } + } + } + + // Create the set of structs that represent each variant. + let mut layout_variants = variants + .iter_enumerated() + .map(|(i, field_layouts)| { + let mut st = self.univariant_uninterned( + ty, + &field_layouts, + &def.repr, + StructKind::Prefixed(min_ity.size(), prefix_align), + )?; + st.variants = Variants::Single { index: i }; + // Find the first field we can't move later + // to make room for a larger discriminant. + for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) { + if !field.is_zst() || field.align.abi.bytes() != 1 { + start_align = start_align.min(field.align.abi); + break; + } + } + size = cmp::max(size, st.size); + align = align.max(st.align); + Ok(st) + }) + .collect::, _>>()?; + + // Align the maximum variant size to the largest alignment. + size = size.align_to(align.abi); + + if size.bytes() >= dl.obj_size_bound() { + return Err(LayoutError::SizeOverflow(ty)); + } + + let typeck_ity = Integer::from_attr(dl, def.repr.discr_type()); + if typeck_ity < min_ity { + // It is a bug if Layout decided on a greater discriminant size than typeck for + // some reason at this point (based on values discriminant can take on). Mostly + // because this discriminant will be loaded, and then stored into variable of + // type calculated by typeck. Consider such case (a bug): typeck decided on + // byte-sized discriminant, but layout thinks we need a 16-bit to store all + // discriminant values. That would be a bug, because then, in codegen, in order + // to store this 16-bit discriminant into 8-bit sized temporary some of the + // space necessary to represent would have to be discarded (or layout is wrong + // on thinking it needs 16 bits) + bug!( + "layout decided on a larger discriminant type ({:?}) than typeck ({:?})", + min_ity, + typeck_ity + ); + // However, it is fine to make discr type however large (as an optimisation) + // after this point – we’ll just truncate the value we load in codegen. + } + + // Check to see if we should use a different type for the + // discriminant. We can safely use a type with the same size + // as the alignment of the first field of each variant. + // We increase the size of the discriminant to avoid LLVM copying + // padding when it doesn't need to. This normally causes unaligned + // load/stores and excessive memcpy/memset operations. By using a + // bigger integer size, LLVM can be sure about its contents and + // won't be so conservative. + + // Use the initial field alignment + let mut ity = if def.repr.c() || def.repr.int.is_some() { + min_ity + } else { + Integer::for_align(dl, start_align).unwrap_or(min_ity) + }; + + // If the alignment is not larger than the chosen discriminant size, + // don't use the alignment as the final size. + if ity <= min_ity { + ity = min_ity; + } else { + // Patch up the variants' first few fields. + let old_ity_size = min_ity.size(); + let new_ity_size = ity.size(); + for variant in &mut layout_variants { + match variant.fields { + FieldsShape::Arbitrary { ref mut offsets, .. } => { + for i in offsets { + if *i <= old_ity_size { + assert_eq!(*i, old_ity_size); + *i = new_ity_size; + } + } + // We might be making the struct larger. + if variant.size <= old_ity_size { + variant.size = new_ity_size; + } + } + _ => bug!(), + } + } + } + + let tag_mask = ity.size().unsigned_int_max(); + let tag = Scalar { + value: Int(ity, signed), + valid_range: WrappingRange { + start: (min as u128 & tag_mask), + end: (max as u128 & tag_mask), + }, + }; + let mut abi = Abi::Aggregate { sized: true }; + if tag.value.size(dl) == size { + abi = Abi::Scalar(tag); + } else { + // Try to use a ScalarPair for all tagged enums. + let mut common_prim = None; + for (field_layouts, layout_variant) in iter::zip(&variants, &layout_variants) { + let offsets = match layout_variant.fields { + FieldsShape::Arbitrary { ref offsets, .. } => offsets, + _ => bug!(), + }; + let mut fields = iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst()); + let (field, offset) = match (fields.next(), fields.next()) { + (None, None) => continue, + (Some(pair), None) => pair, + _ => { + common_prim = None; + break; + } + }; + let prim = match field.abi { + Abi::Scalar(scalar) => scalar.value, + _ => { + common_prim = None; + break; + } + }; + if let Some(pair) = common_prim { + // This is pretty conservative. We could go fancier + // by conflating things like i32 and u32, or even + // realising that (u8, u8) could just cohabit with + // u16 or even u32. + if pair != (prim, offset) { + common_prim = None; + break; + } + } else { + common_prim = Some((prim, offset)); + } + } + if let Some((prim, offset)) = common_prim { + let pair = self.scalar_pair(tag, scalar_unit(prim)); + let pair_offsets = match pair.fields { + FieldsShape::Arbitrary { ref offsets, ref memory_index } => { + assert_eq!(memory_index, &[0, 1]); + offsets + } + _ => bug!(), + }; + if pair_offsets[0] == Size::ZERO + && pair_offsets[1] == *offset + && align == pair.align + && size == pair.size + { + // We can use `ScalarPair` only when it matches our + // already computed layout (including `#[repr(C)]`). + abi = pair.abi; + } + } + } + + if layout_variants.iter().all(|v| v.abi.is_uninhabited()) { + abi = Abi::Uninhabited; + } + + let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag); + + let tagged_layout = Layout { + variants: Variants::Multiple { + tag, + tag_encoding: TagEncoding::Direct, + tag_field: 0, + variants: layout_variants, + }, + fields: FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }, + largest_niche, + abi, + align, + size, + }; + + let best_layout = match (tagged_layout, niche_filling_layout) { + (tagged_layout, Some(niche_filling_layout)) => { + // Pick the smaller layout; otherwise, + // pick the layout with the larger niche; otherwise, + // pick tagged as it has simpler codegen. + cmp::min_by_key(tagged_layout, niche_filling_layout, |layout| { + let niche_size = layout.largest_niche.map_or(0, |n| n.available(dl)); + (layout.size, cmp::Reverse(niche_size)) + }) + } + (tagged_layout, None) => tagged_layout, + }; + + Ok(tcx.intern_layout(best_layout)) + } else { + bug!("unexpected type: {:?}", ty.kind()) + } + } + fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'tcx>> { let tcx = self.tcx; let param_env = self.param_env; @@ -822,577 +1397,9 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> { } // ADTs. - ty::Adt(def, substs) => { - // Cache the field layouts. - let variants = def - .variants - .iter() - .map(|v| { - v.fields - .iter() - .map(|field| self.layout_of(field.ty(tcx, substs))) - .collect::, _>>() - }) - .collect::, _>>()?; - - if def.is_union() { - if def.repr.pack.is_some() && def.repr.align.is_some() { - self.tcx.sess.delay_span_bug( - tcx.def_span(def.did), - "union cannot be packed and aligned", - ); - return Err(LayoutError::Unknown(ty)); - } - - let mut align = - if def.repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align }; - - if let Some(repr_align) = def.repr.align { - align = align.max(AbiAndPrefAlign::new(repr_align)); - } + ty::Adt(..) => self.layout_of_uncached_adt(ty)?, - let optimize = !def.repr.inhibit_union_abi_opt(); - let mut size = Size::ZERO; - let mut abi = Abi::Aggregate { sized: true }; - let index = VariantIdx::new(0); - for field in &variants[index] { - assert!(!field.is_unsized()); - align = align.max(field.align); - - // If all non-ZST fields have the same ABI, forward this ABI - if optimize && !field.is_zst() { - // Normalize scalar_unit to the maximal valid range - let field_abi = match field.abi { - Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)), - Abi::ScalarPair(x, y) => { - Abi::ScalarPair(scalar_unit(x.value), scalar_unit(y.value)) - } - Abi::Vector { element: x, count } => { - Abi::Vector { element: scalar_unit(x.value), count } - } - Abi::Uninhabited | Abi::Aggregate { .. } => { - Abi::Aggregate { sized: true } - } - }; - - if size == Size::ZERO { - // first non ZST: initialize 'abi' - abi = field_abi; - } else if abi != field_abi { - // different fields have different ABI: reset to Aggregate - abi = Abi::Aggregate { sized: true }; - } - } - - size = cmp::max(size, field.size); - } - - if let Some(pack) = def.repr.pack { - align = align.min(AbiAndPrefAlign::new(pack)); - } - - return Ok(tcx.intern_layout(Layout { - variants: Variants::Single { index }, - fields: FieldsShape::Union( - NonZeroUsize::new(variants[index].len()) - .ok_or(LayoutError::Unknown(ty))?, - ), - abi, - largest_niche: None, - align, - size: size.align_to(align.abi), - })); - } - - // A variant is absent if it's uninhabited and only has ZST fields. - // Present uninhabited variants only require space for their fields, - // but *not* an encoding of the discriminant (e.g., a tag value). - // See issue #49298 for more details on the need to leave space - // for non-ZST uninhabited data (mostly partial initialization). - let absent = |fields: &[TyAndLayout<'_>]| { - let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited()); - let is_zst = fields.iter().all(|f| f.is_zst()); - uninhabited && is_zst - }; - let (present_first, present_second) = { - let mut present_variants = variants - .iter_enumerated() - .filter_map(|(i, v)| if absent(v) { None } else { Some(i) }); - (present_variants.next(), present_variants.next()) - }; - let present_first = match present_first { - Some(present_first) => present_first, - // Uninhabited because it has no variants, or only absent ones. - None if def.is_enum() => { - return Ok(tcx.layout_of(param_env.and(tcx.types.never))?.layout); - } - // If it's a struct, still compute a layout so that we can still compute the - // field offsets. - None => VariantIdx::new(0), - }; - - let is_struct = !def.is_enum() || - // Only one variant is present. - (present_second.is_none() && - // Representation optimizations are allowed. - !def.repr.inhibit_enum_layout_opt()); - if is_struct { - // Struct, or univariant enum equivalent to a struct. - // (Typechecking will reject discriminant-sizing attrs.) - - let v = present_first; - let kind = if def.is_enum() || variants[v].is_empty() { - StructKind::AlwaysSized - } else { - let param_env = tcx.param_env(def.did); - let last_field = def.variants[v].fields.last().unwrap(); - let always_sized = - tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env); - if !always_sized { - StructKind::MaybeUnsized - } else { - StructKind::AlwaysSized - } - }; - - let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr, kind)?; - st.variants = Variants::Single { index: v }; - let (start, end) = self.tcx.layout_scalar_valid_range(def.did); - match st.abi { - Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => { - // the asserts ensure that we are not using the - // `#[rustc_layout_scalar_valid_range(n)]` - // attribute to widen the range of anything as that would probably - // result in UB somewhere - // FIXME(eddyb) the asserts are probably not needed, - // as larger validity ranges would result in missed - // optimizations, *not* wrongly assuming the inner - // value is valid. e.g. unions enlarge validity ranges, - // because the values may be uninitialized. - if let Bound::Included(start) = start { - // FIXME(eddyb) this might be incorrect - it doesn't - // account for wrap-around (end < start) ranges. - assert!(scalar.valid_range.start <= start); - scalar.valid_range.start = start; - } - if let Bound::Included(end) = end { - // FIXME(eddyb) this might be incorrect - it doesn't - // account for wrap-around (end < start) ranges. - assert!(scalar.valid_range.end >= end); - scalar.valid_range.end = end; - } - - // Update `largest_niche` if we have introduced a larger niche. - let niche = if def.repr.hide_niche() { - None - } else { - Niche::from_scalar(dl, Size::ZERO, *scalar) - }; - if let Some(niche) = niche { - match st.largest_niche { - Some(largest_niche) => { - // Replace the existing niche even if they're equal, - // because this one is at a lower offset. - if largest_niche.available(dl) <= niche.available(dl) { - st.largest_niche = Some(niche); - } - } - None => st.largest_niche = Some(niche), - } - } - } - _ => assert!( - start == Bound::Unbounded && end == Bound::Unbounded, - "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}", - def, - st, - ), - } - - return Ok(tcx.intern_layout(st)); - } - - // At this point, we have handled all unions and - // structs. (We have also handled univariant enums - // that allow representation optimization.) - assert!(def.is_enum()); - - // The current code for niche-filling relies on variant indices - // instead of actual discriminants, so dataful enums with - // explicit discriminants (RFC #2363) would misbehave. - let no_explicit_discriminants = def - .variants - .iter_enumerated() - .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32())); - - let mut niche_filling_layout = None; - - // Niche-filling enum optimization. - if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants { - let mut dataful_variant = None; - let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0); - - // Find one non-ZST variant. - 'variants: for (v, fields) in variants.iter_enumerated() { - if absent(fields) { - continue 'variants; - } - for f in fields { - if !f.is_zst() { - if dataful_variant.is_none() { - dataful_variant = Some(v); - continue 'variants; - } else { - dataful_variant = None; - break 'variants; - } - } - } - niche_variants = *niche_variants.start().min(&v)..=v; - } - - if niche_variants.start() > niche_variants.end() { - dataful_variant = None; - } - - if let Some(i) = dataful_variant { - let count = (niche_variants.end().as_u32() - - niche_variants.start().as_u32() - + 1) as u128; - - // Find the field with the largest niche - let niche_candidate = variants[i] - .iter() - .enumerate() - .filter_map(|(j, field)| Some((j, field.largest_niche?))) - .max_by_key(|(_, niche)| niche.available(dl)); - - if let Some((field_index, niche, (niche_start, niche_scalar))) = - niche_candidate.and_then(|(field_index, niche)| { - Some((field_index, niche, niche.reserve(self, count)?)) - }) - { - let mut align = dl.aggregate_align; - let st = variants - .iter_enumerated() - .map(|(j, v)| { - let mut st = self.univariant_uninterned( - ty, - v, - &def.repr, - StructKind::AlwaysSized, - )?; - st.variants = Variants::Single { index: j }; - - align = align.max(st.align); - - Ok(st) - }) - .collect::, _>>()?; - - let offset = st[i].fields.offset(field_index) + niche.offset; - let size = st[i].size; - - let abi = if st.iter().all(|v| v.abi.is_uninhabited()) { - Abi::Uninhabited - } else { - match st[i].abi { - Abi::Scalar(_) => Abi::Scalar(niche_scalar), - Abi::ScalarPair(first, second) => { - // We need to use scalar_unit to reset the - // valid range to the maximal one for that - // primitive, because only the niche is - // guaranteed to be initialised, not the - // other primitive. - if offset.bytes() == 0 { - Abi::ScalarPair(niche_scalar, scalar_unit(second.value)) - } else { - Abi::ScalarPair(scalar_unit(first.value), niche_scalar) - } - } - _ => Abi::Aggregate { sized: true }, - } - }; - - let largest_niche = Niche::from_scalar(dl, offset, niche_scalar); - - niche_filling_layout = Some(Layout { - variants: Variants::Multiple { - tag: niche_scalar, - tag_encoding: TagEncoding::Niche { - dataful_variant: i, - niche_variants, - niche_start, - }, - tag_field: 0, - variants: st, - }, - fields: FieldsShape::Arbitrary { - offsets: vec![offset], - memory_index: vec![0], - }, - abi, - largest_niche, - size, - align, - }); - } - } - } - - let (mut min, mut max) = (i128::MAX, i128::MIN); - let discr_type = def.repr.discr_type(); - let bits = Integer::from_attr(self, discr_type).size().bits(); - for (i, discr) in def.discriminants(tcx) { - if variants[i].iter().any(|f| f.abi.is_uninhabited()) { - continue; - } - let mut x = discr.val as i128; - if discr_type.is_signed() { - // sign extend the raw representation to be an i128 - x = (x << (128 - bits)) >> (128 - bits); - } - if x < min { - min = x; - } - if x > max { - max = x; - } - } - // We might have no inhabited variants, so pretend there's at least one. - if (min, max) == (i128::MAX, i128::MIN) { - min = 0; - max = 0; - } - assert!(min <= max, "discriminant range is {}...{}", min, max); - let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max); - - let mut align = dl.aggregate_align; - let mut size = Size::ZERO; - - // We're interested in the smallest alignment, so start large. - let mut start_align = Align::from_bytes(256).unwrap(); - assert_eq!(Integer::for_align(dl, start_align), None); - - // repr(C) on an enum tells us to make a (tag, union) layout, - // so we need to grow the prefix alignment to be at least - // the alignment of the union. (This value is used both for - // determining the alignment of the overall enum, and the - // determining the alignment of the payload after the tag.) - let mut prefix_align = min_ity.align(dl).abi; - if def.repr.c() { - for fields in &variants { - for field in fields { - prefix_align = prefix_align.max(field.align.abi); - } - } - } - - // Create the set of structs that represent each variant. - let mut layout_variants = variants - .iter_enumerated() - .map(|(i, field_layouts)| { - let mut st = self.univariant_uninterned( - ty, - &field_layouts, - &def.repr, - StructKind::Prefixed(min_ity.size(), prefix_align), - )?; - st.variants = Variants::Single { index: i }; - // Find the first field we can't move later - // to make room for a larger discriminant. - for field in - st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) - { - if !field.is_zst() || field.align.abi.bytes() != 1 { - start_align = start_align.min(field.align.abi); - break; - } - } - size = cmp::max(size, st.size); - align = align.max(st.align); - Ok(st) - }) - .collect::, _>>()?; - - // Align the maximum variant size to the largest alignment. - size = size.align_to(align.abi); - - if size.bytes() >= dl.obj_size_bound() { - return Err(LayoutError::SizeOverflow(ty)); - } - - let typeck_ity = Integer::from_attr(dl, def.repr.discr_type()); - if typeck_ity < min_ity { - // It is a bug if Layout decided on a greater discriminant size than typeck for - // some reason at this point (based on values discriminant can take on). Mostly - // because this discriminant will be loaded, and then stored into variable of - // type calculated by typeck. Consider such case (a bug): typeck decided on - // byte-sized discriminant, but layout thinks we need a 16-bit to store all - // discriminant values. That would be a bug, because then, in codegen, in order - // to store this 16-bit discriminant into 8-bit sized temporary some of the - // space necessary to represent would have to be discarded (or layout is wrong - // on thinking it needs 16 bits) - bug!( - "layout decided on a larger discriminant type ({:?}) than typeck ({:?})", - min_ity, - typeck_ity - ); - // However, it is fine to make discr type however large (as an optimisation) - // after this point – we’ll just truncate the value we load in codegen. - } - - // Check to see if we should use a different type for the - // discriminant. We can safely use a type with the same size - // as the alignment of the first field of each variant. - // We increase the size of the discriminant to avoid LLVM copying - // padding when it doesn't need to. This normally causes unaligned - // load/stores and excessive memcpy/memset operations. By using a - // bigger integer size, LLVM can be sure about its contents and - // won't be so conservative. - - // Use the initial field alignment - let mut ity = if def.repr.c() || def.repr.int.is_some() { - min_ity - } else { - Integer::for_align(dl, start_align).unwrap_or(min_ity) - }; - - // If the alignment is not larger than the chosen discriminant size, - // don't use the alignment as the final size. - if ity <= min_ity { - ity = min_ity; - } else { - // Patch up the variants' first few fields. - let old_ity_size = min_ity.size(); - let new_ity_size = ity.size(); - for variant in &mut layout_variants { - match variant.fields { - FieldsShape::Arbitrary { ref mut offsets, .. } => { - for i in offsets { - if *i <= old_ity_size { - assert_eq!(*i, old_ity_size); - *i = new_ity_size; - } - } - // We might be making the struct larger. - if variant.size <= old_ity_size { - variant.size = new_ity_size; - } - } - _ => bug!(), - } - } - } - - let tag_mask = ity.size().unsigned_int_max(); - let tag = Scalar { - value: Int(ity, signed), - valid_range: WrappingRange { - start: (min as u128 & tag_mask), - end: (max as u128 & tag_mask), - }, - }; - let mut abi = Abi::Aggregate { sized: true }; - if tag.value.size(dl) == size { - abi = Abi::Scalar(tag); - } else { - // Try to use a ScalarPair for all tagged enums. - let mut common_prim = None; - for (field_layouts, layout_variant) in iter::zip(&variants, &layout_variants) { - let offsets = match layout_variant.fields { - FieldsShape::Arbitrary { ref offsets, .. } => offsets, - _ => bug!(), - }; - let mut fields = - iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst()); - let (field, offset) = match (fields.next(), fields.next()) { - (None, None) => continue, - (Some(pair), None) => pair, - _ => { - common_prim = None; - break; - } - }; - let prim = match field.abi { - Abi::Scalar(scalar) => scalar.value, - _ => { - common_prim = None; - break; - } - }; - if let Some(pair) = common_prim { - // This is pretty conservative. We could go fancier - // by conflating things like i32 and u32, or even - // realising that (u8, u8) could just cohabit with - // u16 or even u32. - if pair != (prim, offset) { - common_prim = None; - break; - } - } else { - common_prim = Some((prim, offset)); - } - } - if let Some((prim, offset)) = common_prim { - let pair = self.scalar_pair(tag, scalar_unit(prim)); - let pair_offsets = match pair.fields { - FieldsShape::Arbitrary { ref offsets, ref memory_index } => { - assert_eq!(memory_index, &[0, 1]); - offsets - } - _ => bug!(), - }; - if pair_offsets[0] == Size::ZERO - && pair_offsets[1] == *offset - && align == pair.align - && size == pair.size - { - // We can use `ScalarPair` only when it matches our - // already computed layout (including `#[repr(C)]`). - abi = pair.abi; - } - } - } - - if layout_variants.iter().all(|v| v.abi.is_uninhabited()) { - abi = Abi::Uninhabited; - } - - let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag); - - let tagged_layout = Layout { - variants: Variants::Multiple { - tag, - tag_encoding: TagEncoding::Direct, - tag_field: 0, - variants: layout_variants, - }, - fields: FieldsShape::Arbitrary { - offsets: vec![Size::ZERO], - memory_index: vec![0], - }, - largest_niche, - abi, - align, - size, - }; - - let best_layout = match (tagged_layout, niche_filling_layout) { - (tagged_layout, Some(niche_filling_layout)) => { - // Pick the smaller layout; otherwise, - // pick the layout with the larger niche; otherwise, - // pick tagged as it has simpler codegen. - cmp::min_by_key(tagged_layout, niche_filling_layout, |layout| { - let niche_size = layout.largest_niche.map_or(0, |n| n.available(dl)); - (layout.size, cmp::Reverse(niche_size)) - }) - } - (tagged_layout, None) => tagged_layout, - }; - - tcx.intern_layout(best_layout) - } + ty::Variant(ty, _) => self.layout_of_uncached_adt(ty)?, // Types with no meaningful known layout. ty::Projection(_) | ty::Opaque(..) => { @@ -2386,6 +2393,24 @@ where } } + ty::Variant(ty, _) => match ty.kind() { + ty::Adt(def, substs) => match this.variants { + Variants::Single { index } => { + return TyMaybeWithLayout::Ty( + def.variants[index].fields[i].ty(tcx, substs), + ); + } + + // Discriminant field for enums (where applicable). + Variants::Multiple { tag, .. } => { + // TODO(zhamlin): make this work + assert_eq!(i, 0); + return TyMaybeWithLayout::TyAndLayout(tag_layout(tag)); + } + }, + _ => bug!("unexpected type: {:?}", ty.kind()), + }, + ty::Projection(_) | ty::Bound(..) | ty::Placeholder(..) diff --git a/compiler/rustc_middle/src/ty/outlives.rs b/compiler/rustc_middle/src/ty/outlives.rs index ef4ad998f10c8..aa2835ba9c535 100644 --- a/compiler/rustc_middle/src/ty/outlives.rs +++ b/compiler/rustc_middle/src/ty/outlives.rs @@ -166,6 +166,7 @@ fn compute_components( ty::Float(..) | // OutlivesScalar ty::Never | // ... ty::Adt(..) | // OutlivesNominalType + ty::Variant(..) | // OutlivesNominalType ty::Opaque(..) | // OutlivesNominalType (ish) ty::Foreign(..) | // OutlivesNominalType ty::Str | // OutlivesScalar (ish) diff --git a/compiler/rustc_middle/src/ty/print/mod.rs b/compiler/rustc_middle/src/ty/print/mod.rs index 308b4d2fefc71..447d7e1eff393 100644 --- a/compiler/rustc_middle/src/ty/print/mod.rs +++ b/compiler/rustc_middle/src/ty/print/mod.rs @@ -315,6 +315,7 @@ fn characteristic_def_id_of_type_cached<'a>( | ty::GeneratorWitness(..) | ty::Never | ty::Float(_) => None, + ty::Variant(..) => unimplemented!("TODO(zhamlin)"), } } pub fn characteristic_def_id_of_type(ty: Ty<'_>) -> Option { diff --git a/compiler/rustc_middle/src/ty/print/pretty.rs b/compiler/rustc_middle/src/ty/print/pretty.rs index 0bf81ea01130f..dc2d61b70ea00 100644 --- a/compiler/rustc_middle/src/ty/print/pretty.rs +++ b/compiler/rustc_middle/src/ty/print/pretty.rs @@ -597,6 +597,14 @@ pub trait PrettyPrinter<'tcx>: ty::Adt(def, substs) => { p!(print_def_path(def.did, substs)); } + + ty::Variant(ty, idx) => match ty.kind() { + ty::Adt(def, substs) => { + p!(print_def_path(def.did, substs)); + p!(write("::{}", def.variants.get(idx).unwrap().ident.name)); + } + _ => bug!("unexpected type: {:?}", ty.kind()), + }, ty::Dynamic(data, r) => { let print_r = self.region_should_not_be_omitted(r); if print_r { diff --git a/compiler/rustc_middle/src/ty/relate.rs b/compiler/rustc_middle/src/ty/relate.rs index 2c786538014ff..3b85b199f89bc 100644 --- a/compiler/rustc_middle/src/ty/relate.rs +++ b/compiler/rustc_middle/src/ty/relate.rs @@ -402,6 +402,36 @@ pub fn super_relate_tys>( Ok(tcx.mk_adt(a_def, substs)) } + // TODO(zhamlin): handle this somewhere else? + // Enum <- Enum Variant + (&ty::Adt(a_def, a_substs), &ty::Variant(b_ty, _)) if relation.a_is_expected() => match b_ty.kind() { + ty::Adt(b_def, b_substs) if a_def == *b_def => { + let substs = relation.relate_item_substs(a_def.did, a_substs, b_substs)?; + Ok(tcx.mk_adt(a_def, substs)) + }, + _ => Err(TypeError::Sorts(expected_found(relation, a, b))), + } + + (&ty::Variant(a_ty, _), &ty::Adt(b_def, b_substs)) => match a_ty.kind() { + ty::Adt(a_def, a_substs) if *a_def == b_def => { + let substs = relation.relate_item_substs(a_def.did, a_substs, b_substs)?; + Ok(tcx.mk_adt(a_def, substs)) + } + _ => Err(TypeError::Sorts(expected_found(relation, a, b))), + } + + (&ty::Variant(a_ty, a_idx), &ty::Variant(b_ty, b_idx)) => match a_ty.kind() { + ty::Adt(a_def, a_substs) => match b_ty.kind() { + ty::Adt(b_def, b_substs) if a_def == b_def && a_idx == b_idx => { + let substs = relation.relate_item_substs(a_def.did, a_substs, b_substs)?; + let adt = tcx.mk_adt(a_def, substs); + Ok(tcx.mk_ty(ty::Variant(adt, a_idx))) + } + _ => Err(TypeError::Sorts(expected_found(relation, a, b))), + }, + _ => Err(TypeError::Sorts(expected_found(relation, a, b))), + } + (&ty::Foreign(a_id), &ty::Foreign(b_id)) if a_id == b_id => Ok(tcx.mk_foreign(a_id)), (&ty::Dynamic(a_obj, a_region), &ty::Dynamic(b_obj, b_region)) => { diff --git a/compiler/rustc_middle/src/ty/structural_impls.rs b/compiler/rustc_middle/src/ty/structural_impls.rs index 8f343ba9fec22..89df6d01ec547 100644 --- a/compiler/rustc_middle/src/ty/structural_impls.rs +++ b/compiler/rustc_middle/src/ty/structural_impls.rs @@ -871,6 +871,13 @@ impl<'tcx> TypeFoldable<'tcx> for Ty<'tcx> { ty::Array(typ, sz) => ty::Array(typ.fold_with(folder), sz.fold_with(folder)), ty::Slice(typ) => ty::Slice(typ.fold_with(folder)), ty::Adt(tid, substs) => ty::Adt(tid, substs.fold_with(folder)), + ty::Variant(ty, idx) => match ty.kind() { + ty::Adt(tid, substs) => { + let adt_ty = folder.tcx().mk_ty(ty::Adt(tid, substs.fold_with(folder))); + ty::Variant(adt_ty, idx) + }, + _ => bug!("unexpected ty: {:?}", ty.kind()), + } ty::Dynamic(trait_ty, region) => { ty::Dynamic(trait_ty.fold_with(folder), region.fold_with(folder)) } @@ -917,6 +924,10 @@ impl<'tcx> TypeFoldable<'tcx> for Ty<'tcx> { } ty::Slice(typ) => typ.visit_with(visitor), ty::Adt(_, substs) => substs.visit_with(visitor), + ty::Variant(ty, _idx) => match ty.kind() { + ty::Adt(_, substs) => substs.visit_with(visitor), + _ => bug!("unexpected type: {:?}", ty.kind()), + } ty::Dynamic(ref trait_ty, ref reg) => { trait_ty.visit_with(visitor)?; reg.visit_with(visitor) diff --git a/compiler/rustc_middle/src/ty/sty.rs b/compiler/rustc_middle/src/ty/sty.rs index d3094b3e6ff4d..d5afd8a877f6b 100644 --- a/compiler/rustc_middle/src/ty/sty.rs +++ b/compiler/rustc_middle/src/ty/sty.rs @@ -195,6 +195,9 @@ pub enum TyKind<'tcx> { /// A type variable used during type checking. Infer(InferTy), + /// An enum (TyKind::Adt) and its variant + Variant(Ty<'tcx>, VariantIdx), + /// A placeholder for a type which could not be computed; this is /// propagated to avoid useless error messages. Error(DelaySpanBugEmitted), @@ -2011,6 +2014,12 @@ impl<'tcx> TyS<'tcx> { TyKind::Adt(adt, _) if adt.is_enum() => { Some(adt.discriminant_for_variant(tcx, variant_index)) } + TyKind::Variant(ty, _) => match ty.kind() { + ty::Adt(adt, _) if adt.is_enum() => { + Some(adt.discriminant_for_variant(tcx, variant_index)) + } + _ => bug!("unexpected type: {:?}", ty.kind()), + }, TyKind::Generator(def_id, substs, _) => { Some(substs.as_generator().discriminant_for_variant(*def_id, tcx, variant_index)) } @@ -2022,6 +2031,10 @@ impl<'tcx> TyS<'tcx> { pub fn discriminant_ty(&'tcx self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> { match self.kind() { ty::Adt(adt, _) if adt.is_enum() => adt.repr.discr_type().to_ty(tcx), + ty::Variant(ty, _) => match ty.kind() { + ty::Adt(adt, _) if adt.is_enum() => adt.repr.discr_type().to_ty(tcx), + _ => bug!("unexpected type: {:?}", ty.kind()), + }, ty::Generator(_, substs, _) => substs.as_generator().discr_ty(tcx), ty::Param(_) | ty::Projection(_) | ty::Opaque(..) | ty::Infer(ty::TyVar(_)) => { @@ -2083,6 +2096,7 @@ impl<'tcx> TyS<'tcx> { | ty::Closure(..) | ty::Never | ty::Error(_) + | ty::Variant(..) | ty::Foreign(..) // If returned by `struct_tail_without_normalization` this is a unit struct // without any fields, or not a struct, and therefore is Sized. @@ -2174,6 +2188,10 @@ impl<'tcx> TyS<'tcx> { ty::Tuple(tys) => tys.iter().all(|ty| ty.expect_ty().is_trivially_sized(tcx)), ty::Adt(def, _substs) => def.sized_constraint(tcx).is_empty(), + ty::Variant(ty, _) => match ty.kind() { + ty::Adt(def, _) => def.sized_constraint(tcx).is_empty(), + _ => bug!("unxepcted type: {:?}", ty.kind()), + }, ty::Projection(_) | ty::Param(_) | ty::Opaque(..) => false, diff --git a/compiler/rustc_middle/src/ty/util.rs b/compiler/rustc_middle/src/ty/util.rs index 2c884813d2318..2a5dca06e6963 100644 --- a/compiler/rustc_middle/src/ty/util.rs +++ b/compiler/rustc_middle/src/ty/util.rs @@ -144,6 +144,7 @@ impl<'tcx> TyCtxt<'tcx> { } pub fn has_error_field(self, ty: Ty<'tcx>) -> bool { + // TODO(zhamlin): enum variant here? if let ty::Adt(def, substs) = *ty.kind() { for field in def.all_fields() { let field_ty = field.ty(self, substs); @@ -203,6 +204,7 @@ impl<'tcx> TyCtxt<'tcx> { ); } match *ty.kind() { + // TODO(zhamlin): enum variant here? ty::Adt(def, substs) => { if !def.is_struct() { break; @@ -711,6 +713,7 @@ impl<'tcx> ty::TyS<'tcx> { | ty::Opaque(..) | ty::Param(_) | ty::Placeholder(_) + | ty::Variant(..) | ty::Projection(_) => false, } } @@ -751,6 +754,7 @@ impl<'tcx> ty::TyS<'tcx> { | ty::Opaque(..) | ty::Param(_) | ty::Placeholder(_) + | ty::Variant(..) | ty::Projection(_) => false, } } @@ -846,6 +850,7 @@ impl<'tcx> ty::TyS<'tcx> { match self.kind() { // Look for an impl of both `PartialStructuralEq` and `StructuralEq`. Adt(..) => tcx.has_structural_eq_impls(self), + Variant(..) => tcx.has_structural_eq_impls(self), // Primitive types that satisfy `Eq`. Bool | Char | Int(_) | Uint(_) | Str | Never => true, @@ -878,6 +883,7 @@ impl<'tcx> ty::TyS<'tcx> { } pub fn same_type(a: Ty<'tcx>, b: Ty<'tcx>) -> bool { + // TODO(zhamlin): enum variant here? match (&a.kind(), &b.kind()) { (&Adt(did_a, substs_a), &Adt(did_b, substs_b)) => { if did_a != did_b { @@ -957,6 +963,7 @@ impl<'tcx> ExplicitSelf<'tcx> { ty::Ref(region, ty, mutbl) if is_self_ty(ty) => ByReference(region, mutbl), ty::RawPtr(ty::TypeAndMut { ty, mutbl }) if is_self_ty(ty) => ByRawPointer(mutbl), ty::Adt(def, _) if def.is_box() && is_self_ty(self_arg_ty.boxed_ty()) => ByBox, + // TODO(zhamlin): enum variant here? _ => Other, } } @@ -1021,6 +1028,7 @@ pub fn needs_drop_components( | ty::Opaque(..) | ty::Infer(_) | ty::Closure(..) + | ty::Variant(..) | ty::Generator(..) => Ok(smallvec![ty]), } } diff --git a/compiler/rustc_middle/src/ty/walk.rs b/compiler/rustc_middle/src/ty/walk.rs index 73985cf31e0f9..a44889a92a62b 100644 --- a/compiler/rustc_middle/src/ty/walk.rs +++ b/compiler/rustc_middle/src/ty/walk.rs @@ -191,6 +191,10 @@ fn push_inner<'tcx>( | ty::FnDef(_, substs) => { stack.extend(substs.iter().rev()); } + ty::Variant(ty, _) => match ty.kind() { + ty::Adt(_, substs) => stack.extend(substs.iter().rev()), + _ => bug!("unexpected type: {:?}", ty.kind()), + }, ty::GeneratorWitness(ts) => { stack.extend(ts.skip_binder().iter().rev().map(|ty| ty.into())); } diff --git a/compiler/rustc_mir_build/src/build/expr/into.rs b/compiler/rustc_mir_build/src/build/expr/into.rs index 53868f2855763..e8585a58cf629 100644 --- a/compiler/rustc_mir_build/src/build/expr/into.rs +++ b/compiler/rustc_mir_build/src/build/expr/into.rs @@ -314,6 +314,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { ref fields, ref base, }) => { + debug!("zhamlin: adt expr into"); // See the notes for `ExprKind::Array` in `as_rvalue` and for // `ExprKind::Borrow` above. let is_union = adt_def.is_union(); @@ -376,6 +377,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { inferred_ty, }) }); + debug!("zhamlin: adt user type: {:?}", user_ty); let adt = Box::new(AggregateKind::Adt( adt_def, variant_index, diff --git a/compiler/rustc_mir_build/src/thir/cx/expr.rs b/compiler/rustc_mir_build/src/thir/cx/expr.rs index 17296a95bc17e..39ad789620fa9 100644 --- a/compiler/rustc_mir_build/src/thir/cx/expr.rs +++ b/compiler/rustc_mir_build/src/thir/cx/expr.rs @@ -934,6 +934,17 @@ impl<'tcx> Cx<'tcx> { fields: Box::new([]), base: None, })), + ty::Variant(ty, _) => match ty.kind() { + ty::Adt(adt_def, substs) => ExprKind::Adt(Box::new(Adt { + adt_def, + variant_index: adt_def.variant_index_with_ctor_id(def_id), + substs, + user_ty: user_provided_type, + fields: Box::new([]), + base: None, + })), + _ => bug!("unexpected ty: {:?}", ty), + }, _ => bug!("unexpected ty: {:?}", ty), } } diff --git a/compiler/rustc_mir_build/src/thir/pattern/mod.rs b/compiler/rustc_mir_build/src/thir/pattern/mod.rs index cb74ae4df2ef8..1953876686468 100644 --- a/compiler/rustc_mir_build/src/thir/pattern/mod.rs +++ b/compiler/rustc_mir_build/src/thir/pattern/mod.rs @@ -392,6 +392,10 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> { if adt_def.is_enum() { let substs = match ty.kind() { ty::Adt(_, substs) | ty::FnDef(_, substs) => substs, + ty::Variant(ty, _) => match ty.kind() { + ty::Adt(_, substs) => substs, + _ => bug!("unexpected type `{:?}`", ty.kind()) + } ty::Error(_) => { // Avoid ICE (#50585) return PatKind::Wild; diff --git a/compiler/rustc_mir_dataflow/src/elaborate_drops.rs b/compiler/rustc_mir_dataflow/src/elaborate_drops.rs index 7607ccc3aba83..229e930fae4fa 100644 --- a/compiler/rustc_mir_dataflow/src/elaborate_drops.rs +++ b/compiler/rustc_mir_dataflow/src/elaborate_drops.rs @@ -883,6 +883,17 @@ where self.open_drop_for_adt(def, substs) } } + + ty::Variant(ty, _) => match ty.kind() { + ty::Adt(def, substs) => { + if def.is_box() { + self.open_drop_for_box(def, substs) + } else { + self.open_drop_for_adt(def, substs) + } + }, + _ => bug!("unexpected type `{:?}`", ty.kind()), + } ty::Dynamic(..) => self.complete_drop(self.succ, self.unwind), ty::Array(ety, size) => { let size = size.try_eval_usize(self.tcx(), self.elaborator.param_env()); diff --git a/compiler/rustc_mir_dataflow/src/impls/mod.rs b/compiler/rustc_mir_dataflow/src/impls/mod.rs index 474f4f2a79b2a..06edcc44c4430 100644 --- a/compiler/rustc_mir_dataflow/src/impls/mod.rs +++ b/compiler/rustc_mir_dataflow/src/impls/mod.rs @@ -691,7 +691,10 @@ fn switch_on_enum_discriminant( { match &discriminated.ty(body, tcx).ty.kind() { ty::Adt(def, _) => Some((*discriminated, def)), - + ty::Variant(ty, _) => match ty.kind() { + ty::Adt(def, _) => Some((*discriminated, def)), + _ => bug!("unexpected type: {:?}", ty.kind()), + } // `Rvalue::Discriminant` is also used to get the active yield point for a // generator, but we do not need edge-specific effects in that case. This may // change in the future. diff --git a/compiler/rustc_passes/src/dead.rs b/compiler/rustc_passes/src/dead.rs index 772791324019a..a788b77490858 100644 --- a/compiler/rustc_passes/src/dead.rs +++ b/compiler/rustc_passes/src/dead.rs @@ -126,6 +126,15 @@ impl<'tcx> MarkSymbolVisitor<'tcx> { let index = self.tcx.field_index(hir_id, self.typeck_results()); self.insert_def_id(def.non_enum_variant().fields[index].did); } + ty::Variant(ty, idx) => match ty.kind() { + ty::Adt(def, _) => { + let index = self.tcx.field_index(hir_id, self.typeck_results()); + self.insert_def_id( + def.variants.get(*idx).expect("invalid variant").fields[index].did, + ); + } + _ => bug!("unexpected type: {:?}", ty.kind()), + }, ty::Tuple(..) => {} _ => span_bug!(lhs.span, "named field access on non-ADT"), } diff --git a/compiler/rustc_privacy/src/lib.rs b/compiler/rustc_privacy/src/lib.rs index ae3a9c71c5968..02d391bf8fbdb 100644 --- a/compiler/rustc_privacy/src/lib.rs +++ b/compiler/rustc_privacy/src/lib.rs @@ -213,6 +213,20 @@ where } } } + ty::Variant(ty, _) => match ty.kind() { + ty::Adt(&ty::AdtDef { did: def_id, .. }, _) => { + self.def_id_visitor.visit_def_id(def_id, "type", &ty)?; + if self.def_id_visitor.shallow() { + return ControlFlow::CONTINUE; + } + if let Some(assoc_item) = tcx.opt_associated_item(def_id) { + if let ty::ImplContainer(impl_def_id) = assoc_item.container { + tcx.type_of(impl_def_id).visit_with(self)?; + } + } + } + _ => bug!("unexpected type: {:?}", ty.kind()), + } ty::Projection(proj) => { if self.def_id_visitor.skip_assoc_tys() { // Visitors searching for minimal visibility/reachability want to diff --git a/compiler/rustc_resolve/src/late.rs b/compiler/rustc_resolve/src/late.rs index 9563325796538..93833665a1f9e 100644 --- a/compiler/rustc_resolve/src/late.rs +++ b/compiler/rustc_resolve/src/late.rs @@ -286,6 +286,7 @@ impl<'a> PathSource<'a> { | DefKind::AssocTy | DefKind::TyParam | DefKind::OpaqueTy + | DefKind::Variant | DefKind::ForeignTy, _, ) | Res::PrimTy(..) diff --git a/compiler/rustc_symbol_mangling/src/v0.rs b/compiler/rustc_symbol_mangling/src/v0.rs index 521730dfeb01c..68ec2784ba4f6 100644 --- a/compiler/rustc_symbol_mangling/src/v0.rs +++ b/compiler/rustc_symbol_mangling/src/v0.rs @@ -467,6 +467,7 @@ impl Printer<'tcx> for &mut SymbolMangler<'tcx> { } ty::GeneratorWitness(_) => bug!("symbol_names: unexpected `GeneratorWitness`"), + ty::Variant(..) => unimplemented!("TODO(zhamlin)"), } // Only cache types that do not refer to an enclosing diff --git a/compiler/rustc_trait_selection/src/traits/coherence.rs b/compiler/rustc_trait_selection/src/traits/coherence.rs index 668a74bd69715..eebf9c5ced132 100644 --- a/compiler/rustc_trait_selection/src/traits/coherence.rs +++ b/compiler/rustc_trait_selection/src/traits/coherence.rs @@ -556,6 +556,7 @@ fn ty_is_local_constructor(ty: Ty<'_>, in_crate: InCrate) -> bool { }, ty::Adt(def, _) => def_id_is_local(def.did, in_crate), + ty::Variant(..) => unimplemented!("TODO(zhamlin)"), ty::Foreign(did) => def_id_is_local(did, in_crate), ty::Opaque(..) => { // This merits some explanation. diff --git a/compiler/rustc_trait_selection/src/traits/error_reporting/mod.rs b/compiler/rustc_trait_selection/src/traits/error_reporting/mod.rs index 88e8df81488e6..611375357e7d1 100644 --- a/compiler/rustc_trait_selection/src/traits/error_reporting/mod.rs +++ b/compiler/rustc_trait_selection/src/traits/error_reporting/mod.rs @@ -1357,6 +1357,7 @@ impl<'a, 'tcx> InferCtxtPrivExt<'tcx> for InferCtxt<'a, 'tcx> { ty::Foreign(..) => Some(19), ty::GeneratorWitness(..) => Some(20), ty::Placeholder(..) | ty::Bound(..) | ty::Infer(..) | ty::Error(_) => None, + ty::Variant(..) => unimplemented!("TODO(zhamlin)"), } } diff --git a/compiler/rustc_trait_selection/src/traits/project.rs b/compiler/rustc_trait_selection/src/traits/project.rs index 47b006985ec56..d430eb003ad05 100644 --- a/compiler/rustc_trait_selection/src/traits/project.rs +++ b/compiler/rustc_trait_selection/src/traits/project.rs @@ -1402,6 +1402,8 @@ fn assemble_candidates_from_impls<'cx, 'tcx>( | ty::Placeholder(..) | ty::Infer(..) | ty::Error(_) => false, + ty::Variant(..) => unimplemented!("TODO(zhamlin)"), + } } super::ImplSource::Pointee(..) => { @@ -1447,6 +1449,7 @@ fn assemble_candidates_from_impls<'cx, 'tcx>( | ty::Placeholder(..) | ty::Infer(..) | ty::Error(_) => false, + ty::Variant(..) => unimplemented!("TODO(zhamlin)"), } } super::ImplSource::Param(..) => { diff --git a/compiler/rustc_trait_selection/src/traits/query/dropck_outlives.rs b/compiler/rustc_trait_selection/src/traits/query/dropck_outlives.rs index f05582f061429..147b67c2fa4df 100644 --- a/compiler/rustc_trait_selection/src/traits/query/dropck_outlives.rs +++ b/compiler/rustc_trait_selection/src/traits/query/dropck_outlives.rs @@ -126,6 +126,22 @@ pub fn trivial_dropck_outlives<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> bool { } } + ty::Variant(ty, _) => match ty.kind() { + ty::Adt(def, _) => { + if Some(def.did) == tcx.lang_items().manually_drop() { + // `ManuallyDrop` never has a dtor. + true + } else { + // Other types might. Moreover, PhantomData doesn't + // have a dtor, but it is considered to own its + // content, so it is non-trivial. Unions can have `impl Drop`, + // and hence are non-trivial as well. + false + } + }, + _ => bug!("unexpected type: {:?}", ty.kind()), + } + // The following *might* require a destructor: needs deeper inspection. ty::Dynamic(..) | ty::Projection(..) diff --git a/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs b/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs index 719412492f637..24c0a4a2be929 100644 --- a/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs +++ b/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs @@ -768,6 +768,8 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { } } + // TODO: zhamlin handle candidates here? + // `(.., T)` -> `(.., U)` (&ty::Tuple(tys_a), &ty::Tuple(tys_b)) => { if tys_a.len() == tys_b.len() { @@ -922,6 +924,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { } debug!("not returning"); } + ty::Variant(..) => unimplemented!("TODO(zhamlin)"), } debug!(?stack, "assemble_const_drop_candidates - in loop"); } diff --git a/compiler/rustc_trait_selection/src/traits/select/mod.rs b/compiler/rustc_trait_selection/src/traits/select/mod.rs index e191654210a43..139b8383ec083 100644 --- a/compiler/rustc_trait_selection/src/traits/select/mod.rs +++ b/compiler/rustc_trait_selection/src/traits/select/mod.rs @@ -1761,6 +1761,16 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { }), ) } + ty::Variant(ty, _) => match ty.kind() { + ty::Adt(def, substs) => { + let sized_crit = def.sized_constraint(self.tcx()); + // (*) binder moved here + Where(obligation.predicate.rebind({ + sized_crit.iter().map(|ty| ty.subst(self.tcx(), substs)).collect() + })) + }, + _ => bug!("unexpected type: {:?}", ty.kind()), + } ty::Projection(_) | ty::Param(_) | ty::Opaque(..) => None, ty::Infer(ty::TyVar(_)) => Ambiguous, @@ -1830,7 +1840,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { } } - ty::Adt(..) | ty::Projection(..) | ty::Param(..) | ty::Opaque(..) => { + ty::Adt(..) | ty::Variant(..) | ty::Projection(..) | ty::Param(..) | ty::Opaque(..) => { // Fallback to whatever user-defined impls exist in this case. None } @@ -1921,6 +1931,16 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { ty::Adt(def, substs) => { t.rebind(def.all_fields().map(|f| f.ty(self.tcx(), substs)).collect()) } + ty::Variant(ty, _) => match ty.kind() { + ty::Adt(def, substs) => { + if def.is_phantom_data() { + t.rebind(substs.types().collect()) + } else { + t.rebind(def.all_fields().map(|f| f.ty(self.tcx(), substs)).collect()) + } + }, + _ => bug!("unexpected type: {:?}", ty.kind()), + } ty::Opaque(def_id, substs) => { // We can resolve the `impl Trait` to its concrete type, diff --git a/compiler/rustc_trait_selection/src/traits/structural_match.rs b/compiler/rustc_trait_selection/src/traits/structural_match.rs index ac8bab0cf36a7..8d29a9f815f05 100644 --- a/compiler/rustc_trait_selection/src/traits/structural_match.rs +++ b/compiler/rustc_trait_selection/src/traits/structural_match.rs @@ -206,6 +206,7 @@ impl<'a, 'tcx> TypeVisitor<'tcx> for Search<'a, 'tcx> { // as this may still emit relevant errors. return ControlFlow::CONTINUE; } + ty::Variant(..) => unimplemented!("TODO(zhamlin)"), }; if !self.seen.insert(adt_def.did) { diff --git a/compiler/rustc_trait_selection/src/traits/wf.rs b/compiler/rustc_trait_selection/src/traits/wf.rs index cb47ba9c360da..1609031936f7a 100644 --- a/compiler/rustc_trait_selection/src/traits/wf.rs +++ b/compiler/rustc_trait_selection/src/traits/wf.rs @@ -545,6 +545,14 @@ impl<'a, 'tcx> WfPredicates<'a, 'tcx> { self.out.extend(obligations); } + ty::Variant(ty, _) => match ty.kind() { + ty::Adt(def, substs) => { + let obligations = self.nominal_obligations(def.did, substs); + self.out.extend(obligations); + }, + _ => bug!("unexpected type: {:?}", ty.kind()), + } + ty::FnDef(did, substs) => { let obligations = self.nominal_obligations(did, substs); self.out.extend(obligations); diff --git a/compiler/rustc_traits/src/chalk/lowering.rs b/compiler/rustc_traits/src/chalk/lowering.rs index e24f699adf6b3..c9ff3993e4808 100644 --- a/compiler/rustc_traits/src/chalk/lowering.rs +++ b/compiler/rustc_traits/src/chalk/lowering.rs @@ -330,6 +330,7 @@ impl<'tcx> LowerInto<'tcx, chalk_ir::Ty>> for Ty<'tcx> { } ty::Infer(_infer) => unimplemented!(), ty::Error(_) => chalk_ir::TyKind::Error, + ty::Variant(..) => unimplemented!("TODO(zhamlin)"), } .intern(interner) } diff --git a/compiler/rustc_traits/src/dropck_outlives.rs b/compiler/rustc_traits/src/dropck_outlives.rs index 672e149b5fc96..1b51e862bcc6b 100644 --- a/compiler/rustc_traits/src/dropck_outlives.rs +++ b/compiler/rustc_traits/src/dropck_outlives.rs @@ -283,6 +283,19 @@ fn dtorck_constraint_for_ty<'tcx>( constraints.overflows.extend(overflows.subst(tcx, substs)); } + ty::Variant(ty, _) => match ty.kind() { + ty::Adt(def, substs) => { + let DtorckConstraint { dtorck_types, outlives, overflows } = + tcx.at(span).adt_dtorck_constraint(def.did)?; + // FIXME: we can try to recursively `dtorck_constraint_on_ty` + // there, but that needs some way to handle cycles. + constraints.dtorck_types.extend(dtorck_types.subst(tcx, substs)); + constraints.outlives.extend(outlives.subst(tcx, substs)); + constraints.overflows.extend(overflows.subst(tcx, substs)); + }, + _ => bug!("unexpected type: {:?}", ty.kind()), + } + // Objects must be alive in order for their destructor // to be called. ty::Dynamic(..) => { diff --git a/compiler/rustc_ty_utils/src/instance.rs b/compiler/rustc_ty_utils/src/instance.rs index 87b729faa54e0..941b50bcc837a 100644 --- a/compiler/rustc_ty_utils/src/instance.rs +++ b/compiler/rustc_ty_utils/src/instance.rs @@ -178,6 +178,7 @@ fn inner_resolve_instance<'tcx>( | ty::Generator(..) | ty::Tuple(..) | ty::Adt(..) + | ty::Variant(..) | ty::Dynamic(..) | ty::Array(..) | ty::Slice(..) => {} diff --git a/compiler/rustc_ty_utils/src/ty.rs b/compiler/rustc_ty_utils/src/ty.rs index 3d3b274370091..a6c30ed91c403 100644 --- a/compiler/rustc_ty_utils/src/ty.rs +++ b/compiler/rustc_ty_utils/src/ty.rs @@ -69,6 +69,7 @@ fn sized_constraint_for_ty<'tcx>( Placeholder(..) | Bound(..) | Infer(..) => { bug!("unexpected type `{:?}` in sized_constraint_for_ty", ty) } + ty::Variant(..) => unimplemented!("TODO(zhamlin)"), }; debug!("sized_constraint_for_ty({:?}) = {:?}", ty, result); result diff --git a/compiler/rustc_typeck/src/astconv/mod.rs b/compiler/rustc_typeck/src/astconv/mod.rs index 889b68773c27b..4700b896af0e0 100644 --- a/compiler/rustc_typeck/src/astconv/mod.rs +++ b/compiler/rustc_typeck/src/astconv/mod.rs @@ -2094,7 +2094,6 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { let last = segments.len() - 1; let mut path_segs = vec![]; - match kind { // Case 1. Reference to a struct constructor. DefKind::Ctor(CtorOf::Struct, ..) => { @@ -2216,6 +2215,37 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { let PathSeg(def_id, index) = path_segs.last().unwrap(); self.ast_path_to_ty(span, *def_id, &path.segments[*index]) } + Res::Def(kind @ DefKind::Variant, def_id) => { + // Convert to the "variant type" as if it were a real type. + // The resulting `Ty` is the variant type. + assert_eq!(opt_self_ty, None); + + let path_segs = + self.def_ids_for_value_path_segments(&path.segments, None, kind, def_id); + let generic_segs: FxHashSet<_> = + path_segs.iter().map(|PathSeg(_, index)| index).collect(); + self.prohibit_generics(path.segments.iter().enumerate().filter_map( + |(index, seg)| { + if !generic_segs.contains(&index) { Some(seg) } else { None } + }, + )); + + let PathSeg(def_id, index) = path_segs.last().unwrap(); + let ty = self.ast_path_to_ty(span, *def_id, &path.segments[*index]); + + if let Some(segment) = path.segments.last() { + let enum_adt = self.tcx().adt_def(*def_id); + let index = enum_adt + .variants + .iter_enumerated() + .find(|(_, v)| segment.ident.eq(&v.ident)) + .expect("unknown variant") + .0; + self.tcx().mk_ty(ty::Variant(ty, index)) + } else { + ty + } + } Res::Def(DefKind::TyParam, def_id) => { assert_eq!(opt_self_ty, None); self.prohibit_generics(path.segments); diff --git a/compiler/rustc_typeck/src/check/cast.rs b/compiler/rustc_typeck/src/check/cast.rs index 51c766fe57c10..170bede8a4143 100644 --- a/compiler/rustc_typeck/src/check/cast.rs +++ b/compiler/rustc_typeck/src/check/cast.rs @@ -144,6 +144,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { .delay_span_bug(span, &format!("`{:?}` should be sized but is not?", t)); return Err(ErrorReported); } + ty::Variant(..) => unimplemented!("TODO(zhamlin)"), }) } } diff --git a/compiler/rustc_typeck/src/check/expr.rs b/compiler/rustc_typeck/src/check/expr.rs index 676e751376a6c..71275ac1f654e 100644 --- a/compiler/rustc_typeck/src/check/expr.rs +++ b/compiler/rustc_typeck/src/check/expr.rs @@ -28,7 +28,7 @@ use rustc_data_structures::stack::ensure_sufficient_stack; use rustc_errors::ErrorReported; use rustc_errors::{pluralize, struct_span_err, Applicability, DiagnosticBuilder, DiagnosticId}; use rustc_hir as hir; -use rustc_hir::def::{CtorKind, DefKind, Res}; +use rustc_hir::def::{CtorKind, CtorOf, DefKind, Res}; use rustc_hir::def_id::DefId; use rustc_hir::{ExprKind, QPath}; use rustc_infer::infer; @@ -44,6 +44,7 @@ use rustc_span::hygiene::DesugaringKind; use rustc_span::lev_distance::find_best_match_for_name; use rustc_span::source_map::Span; use rustc_span::symbol::{kw, sym, Ident, Symbol}; +use rustc_target::abi::VariantIdx; use rustc_trait_selection::traits::{self, ObligationCauseCode}; impl<'a, 'tcx> FnCtxt<'a, 'tcx> { @@ -208,10 +209,47 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { let old_diverges = self.diverges.replace(Diverges::Maybe); let old_has_errors = self.has_errors.replace(false); + fn expecting_enum_variant(expected: &Expectation<'tcx>) -> Option<&'tcx ty::AdtDef> { + if let ExpectHasType(expected_ty) = expected { + if let ty::Variant(ty, _) = expected_ty.kind() { + if let ty::Adt(adt_def, _) = ty.kind() { + return Some(*adt_def); + } + } + } + None + } + + fn variant_index_from_qpath( + adt_def: &ty::AdtDef, + qpath: &QPath<'hir>, + ) -> Option { + match qpath { + QPath::Resolved(_, path) => match path.res { + Res::Def( + DefKind::Ctor(CtorOf::Variant, CtorKind::Const | CtorKind::Fn), + ctor_def_id, + ) => Some(adt_def.variant_index_with_ctor_id(ctor_def_id)), + _ => None, + }, + _ => None, + } + } + let ty = ensure_sufficient_stack(|| match &expr.kind { hir::ExprKind::Path( qpath @ hir::QPath::Resolved(..) | qpath @ hir::QPath::TypeRelative(..), - ) => self.check_expr_path(qpath, expr, args), + ) => { + let path_ty = self.check_expr_path(qpath, expr, args); + if let Some(adt_def) = expecting_enum_variant(&expected) { + variant_index_from_qpath(adt_def, qpath) + .and_then(|index| Some(self.tcx().mk_ty(ty::Variant(path_ty, index)))) + .or(Some(path_ty)) + .unwrap() + } else { + path_ty + } + } _ => self.check_expr_kind(expr, expected), }); @@ -261,6 +299,33 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { ) -> Ty<'tcx> { trace!("expr={:#?}", expr); + fn expecting_enum_variant(expected: &Expectation<'tcx>) -> Option<&'tcx ty::AdtDef> { + if let ExpectHasType(expected_ty) = expected { + if let ty::Variant(ty, _) = expected_ty.kind() { + if let ty::Adt(adt_def, _) = ty.kind() { + return Some(*adt_def); + } + } + } + None + } + + fn variant_index_from_qpath( + adt_def: &ty::AdtDef, + qpath: &QPath<'hir>, + ) -> Option { + match qpath { + QPath::Resolved(_, path) => match path.res { + Res::Def( + DefKind::Ctor(CtorOf::Variant, CtorKind::Const | CtorKind::Fn), + ctor_def_id, + ) => Some(adt_def.variant_index_with_ctor_id(ctor_def_id)), + _ => None, + }, + _ => None, + } + } + let tcx = self.tcx; match expr.kind { ExprKind::Box(subexpr) => self.check_expr_box(subexpr, expected), @@ -308,7 +373,20 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { self.check_expr_closure(expr, capture, &decl, body_id, gen, expected) } ExprKind::Block(body, _) => self.check_block_with_expected(&body, expected), - ExprKind::Call(callee, args) => self.check_call(expr, &callee, args, expected), + ExprKind::Call(callee, args) => { + let call_ty = self.check_call(expr, &callee, args, expected); + let adt_def = expecting_enum_variant(&expected); + match &callee.kind { + ExprKind::Path(qpath) if adt_def.is_some() => { + if let Some(index) = variant_index_from_qpath(adt_def.unwrap(), &qpath) { + self.tcx().mk_ty(ty::Variant(call_ty, index)) + } else { + call_ty + } + } + _ => call_ty, + } + } ExprKind::MethodCall(segment, span, args, _) => { self.check_method_call(expr, segment, span, args, expected) } @@ -329,6 +407,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { } ExprKind::Tup(elts) => self.check_expr_tuple(elts, expected, expr), ExprKind::Struct(qpath, fields, ref base_expr) => { + // TODO(zhamlin): handle enum structs self.check_expr_struct(expr, expected, qpath, fields, base_expr) } ExprKind::Field(base, field) => self.check_field(expr, &base, field), @@ -1685,6 +1764,45 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { while let Some((base_t, _)) = autoderef.next() { debug!("base_t: {:?}", base_t); match base_t.kind() { + ty::Variant(ty, idx) => match ty.kind() { + ty::Adt(base_def, substs) if base_def.is_enum() => { + let variant = base_def.variants.get(*idx).expect("variant doesn't exist"); + let (adjusted_ident, def_scope) = + self.tcx.adjust_ident_and_get_scope(field, base_def.did, self.body_id); + let fstr = field.as_str(); + // handle tuple like enum variant + if let Ok(index) = fstr.parse::() { + if fstr == index.to_string() { + if let Some(field_index) = variant.fields.iter().position(|f| { + f.ident.normalize_to_macros_2_0() == adjusted_ident + }) { + let field = &variant.fields[field_index]; + debug!("zhamlin: trying for field: {:?}", field.ident); + let field_ty = self.field_ty(expr.span, field, substs); + // Save the index of all fields regardless of their visibility in case + // of error recovery. + self.write_field_index(expr.hir_id, field_index); + if field.vis.is_accessible_from(variant.def_id, self.tcx) { + debug!("zhamlin: is_accessible_from"); + let adjustments = self.adjust_steps(&autoderef); + self.apply_adjustments(base, adjustments); + self.register_predicates(autoderef.into_obligations()); + + self.tcx.check_stability( + field.did, + Some(expr.hir_id), + expr.span, + None, + ); + return field_ty; + } + private_candidate = Some((base_def.did, field_ty)); + } + } + } + } + _ => bug!("unexpected type: {:?}", ty.kind()), + }, ty::Adt(base_def, substs) if !base_def.is_enum() => { debug!("struct named {:?}", base_t); let (ident, def_scope) = diff --git a/compiler/rustc_typeck/src/check/fn_ctxt/_impl.rs b/compiler/rustc_typeck/src/check/fn_ctxt/_impl.rs index 7b9629e534bf9..0841c4425609c 100644 --- a/compiler/rustc_typeck/src/check/fn_ctxt/_impl.rs +++ b/compiler/rustc_typeck/src/check/fn_ctxt/_impl.rs @@ -1233,7 +1233,6 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { // segment belong to, let's sort out the parameters that the user // provided (if any) into their appropriate spaces. We'll also report // errors if type parameters are provided in an inappropriate place. - let generic_segs: FxHashSet<_> = path_segs.iter().map(|PathSeg(_, index)| index).collect(); let generics_has_err = >::prohibit_generics( self, diff --git a/compiler/rustc_typeck/src/variance/constraints.rs b/compiler/rustc_typeck/src/variance/constraints.rs index 1c8ac10818c03..f7805639fdf27 100644 --- a/compiler/rustc_typeck/src/variance/constraints.rs +++ b/compiler/rustc_typeck/src/variance/constraints.rs @@ -285,6 +285,11 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> { self.add_constraints_from_substs(current, def.did, substs, variance); } + ty::Variant(ty, _) => match ty.kind() { + ty::Adt(def, substs) => self.add_constraints_from_substs(current, def.did, substs, variance), + _ => bug!("unexpected type: {:?}", ty.kind()), + } + ty::Projection(ref data) => { self.add_constraints_from_invariant_substs(current, data.substs, variance); } diff --git a/src/test/ui/enum-variant-types/fn_call.rs b/src/test/ui/enum-variant-types/fn_call.rs new file mode 100644 index 0000000000000..a3f0f5588985c --- /dev/null +++ b/src/test/ui/enum-variant-types/fn_call.rs @@ -0,0 +1,15 @@ +// run-pass + +#![allow(warnings, unused)] + +enum Foo { + Variant1, + Variant2(u32), +} + +fn main() { + let f: Foo::Variant2 = Foo::Variant2(9); + bar(f); +} + +fn bar(f: Foo) {} diff --git a/src/test/ui/enum-variant-types/invalid_variant.rs b/src/test/ui/enum-variant-types/invalid_variant.rs new file mode 100644 index 0000000000000..6ab15edbf3fb1 --- /dev/null +++ b/src/test/ui/enum-variant-types/invalid_variant.rs @@ -0,0 +1,14 @@ +enum Foo { + Variant1, + Variant2(u32), +} + +pub fn main() { + let x: Foo::Variant2 = Foo::Variant2(9); + bar(x); + //~^ ERROR mismatched types [E0308] +} + +fn bar(x: Foo::Variant1) -> Foo { + x +} diff --git a/src/test/ui/enum-variant-types/invalid_variant.stderr b/src/test/ui/enum-variant-types/invalid_variant.stderr new file mode 100644 index 0000000000000..baf6aab19d6e1 --- /dev/null +++ b/src/test/ui/enum-variant-types/invalid_variant.stderr @@ -0,0 +1,12 @@ +error[E0308]: mismatched types + --> $DIR/invalid_variant.rs:8:9 + | +LL | bar(x); + | ^ expected enum `Foo`, found a different enum `Foo` + | + = note: expected enum variant `Foo::Variant1` + found enum variant `Foo::Variant2` + +error: aborting due to previous error + +For more information about this error, try `rustc --explain E0308`. diff --git a/src/test/ui/enum-variant-types/variant_projection.rs b/src/test/ui/enum-variant-types/variant_projection.rs new file mode 100644 index 0000000000000..f4d1411efaaa8 --- /dev/null +++ b/src/test/ui/enum-variant-types/variant_projection.rs @@ -0,0 +1,16 @@ +// run-pass + +#![allow(warnings, unused)] + +enum Foo { + Variant1(u32, String), + // TODO(zhamlin): fix issue with projection and multi variants + // Variant2(u32), +} + +fn main() { + let f: Foo::Variant1 = Foo::Variant1(3, "test".to_string()); + assert_eq!(f.0, 3); + assert_eq!(f.1, "test".to_string()); +} + From ccf569fbe8dad1ef9a27c25d4b05fda98228c475 Mon Sep 17 00:00:00 2001 From: Zach Hamlin Date: Tue, 12 Oct 2021 08:40:44 -0500 Subject: [PATCH 2/2] clean up handling of variant type --- .../rustc_const_eval/src/interpret/operand.rs | 7 +- .../src/interpret/validity.rs | 46 +- compiler/rustc_infer/src/infer/combine.rs | 1 - compiler/rustc_middle/src/ty/cast.rs | 11 +- compiler/rustc_middle/src/ty/error.rs | 7 +- compiler/rustc_middle/src/ty/fast_reject.rs | 5 +- compiler/rustc_middle/src/ty/flags.rs | 5 +- compiler/rustc_middle/src/ty/layout.rs | 1149 ++++++++--------- compiler/rustc_middle/src/ty/relate.rs | 28 +- .../rustc_middle/src/ty/structural_impls.rs | 13 +- compiler/rustc_middle/src/ty/sty.rs | 30 +- compiler/rustc_middle/src/ty/util.rs | 3 +- compiler/rustc_middle/src/ty/walk.rs | 7 +- .../rustc_mir_build/src/build/expr/into.rs | 2 - .../rustc_mir_build/src/thir/pattern/mod.rs | 6 +- .../rustc_mir_dataflow/src/elaborate_drops.rs | 14 +- compiler/rustc_mir_dataflow/src/impls/mod.rs | 7 +- compiler/rustc_privacy/src/lib.rs | 15 +- .../src/traits/query/dropck_outlives.rs | 16 +- .../src/traits/select/mod.rs | 28 +- .../rustc_trait_selection/src/traits/wf.rs | 11 +- compiler/rustc_traits/src/dropck_outlives.rs | 13 +- compiler/rustc_typeck/src/check/expr.rs | 2 +- .../rustc_typeck/src/variance/constraints.rs | 5 +- 24 files changed, 638 insertions(+), 793 deletions(-) diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs index 8a80482e6d106..2906f1a549af5 100644 --- a/compiler/rustc_const_eval/src/interpret/operand.rs +++ b/compiler/rustc_const_eval/src/interpret/operand.rs @@ -683,20 +683,17 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let discr_val = self.cast_from_scalar(tag_bits, tag_layout, discr_layout.ty); let discr_bits = discr_val.assert_bits(discr_layout.size); // Convert discriminant to variant index, and catch invalid discriminants. - let index = match *op.layout.ty.kind() { + let index = match *op.layout.ty.strip_variant_type().kind() { ty::Adt(adt, _) => { adt.discriminants(*self.tcx).find(|(_, var)| var.val == discr_bits) } - ty::Variant(ty, _) => match ty.kind() { - ty::Adt(adt, _) => adt.discriminants(*self.tcx).find(|(_, var)| var.val == discr_bits), - _ => bug!("unexpected type: {:?}", ty.kind()), - } ty::Generator(def_id, substs, _) => { let substs = substs.as_generator(); substs .discriminants(def_id, *self.tcx) .find(|(_, var)| var.val == discr_bits) } + ty::Variant(..) => unreachable!(), _ => span_bug!(self.cur_span(), "tagged layout for non-adt non-generator"), } .ok_or_else(|| err_ub!(InvalidTag(Scalar::from_uint(tag_bits, tag_layout.size))))?; diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs index 09925f26f18ce..91b76983d7970 100644 --- a/compiler/rustc_const_eval/src/interpret/validity.rs +++ b/compiler/rustc_const_eval/src/interpret/validity.rs @@ -217,12 +217,8 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, ' match layout.variants { Variants::Multiple { tag_field, .. } => { if tag_field == field { - return match layout.ty.kind() { + return match layout.ty.strip_variant_type().kind() { ty::Adt(def, ..) if def.is_enum() => PathElem::EnumTag, - ty::Variant(ty, ..) => match ty.kind() { - ty::Adt(def, ..) if def.is_enum() => PathElem::EnumTag, - _ => bug!("non-variant type {:?}", layout.ty), - }, ty::Generator(..) => PathElem::GeneratorTag, _ => bug!("non-variant type {:?}", layout.ty), }; @@ -232,7 +228,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, ' } // Now we know we are projecting to a field, so figure out which one. - match layout.ty.kind() { + match layout.ty.strip_variant_type().kind() { // generators and closures. ty::Closure(def_id, _) | ty::Generator(def_id, _, _) => { let mut name = None; @@ -276,20 +272,6 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, ' } } - ty::Variant(ty, ..) => match ty.kind() { - ty::Adt(def, ..) if def.is_enum() => { - // we might be projecting *to* a variant, or to a field *in* a variant. - match layout.variants { - Variants::Single { index } => { - // Inside a variant - PathElem::Field(def.variants[index].fields[field].ident.name) - } - Variants::Multiple { .. } => bug!("we handled variants above"), - } - } - _ => bug!("unexpected type: {:?}", ty.kind()), - }, - // other ADTs ty::Adt(def, _) => PathElem::Field(def.non_enum_variant().fields[field].ident.name), @@ -513,7 +495,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, ' ) -> InterpResult<'tcx, bool> { // Go over all the primitive types let ty = value.layout.ty; - match ty.kind() { + match ty.strip_variant_type().kind() { ty::Bool => { let value = self.read_scalar(value)?; try_validation!( @@ -585,17 +567,6 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, ' self.check_safe_pointer(value, "box")?; Ok(true) } - ty::Variant(ty, _) => match ty.kind() { - ty::Adt(def, _) => { - if def.is_box() { - self.check_safe_pointer(value, "box")?; - Ok(true) - } else { - Ok(false) - } - } - _ => bug!("unexpected type: {:?}", ty.kind()), - }, ty::FnPtr(_sig) => { let value = try_validation!( self.ecx.read_immediate(value), @@ -641,6 +612,8 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, ' | ty::Opaque(..) | ty::Projection(..) | ty::GeneratorWitness(..) => bug!("Encountered invalid type {:?}", ty), + + ty::Variant(..) => unreachable!(), } } @@ -756,15 +729,12 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M> variant_id: VariantIdx, new_op: &OpTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx> { - let name = match old_op.layout.ty.kind() { + let ty = old_op.layout.ty.strip_variant_type(); + let name = match ty.kind() { ty::Adt(adt, _) => PathElem::Variant(adt.variants[variant_id].ident.name), - ty::Variant(ty, ..) => match ty.kind() { - ty::Adt(adt, ..) => PathElem::Variant(adt.variants[variant_id].ident.name), - _ => bug!("unexpected type {:?}", ty.kind()), - }, // Generators also have variants ty::Generator(..) => PathElem::GeneratorState(variant_id), - _ => bug!("Unexpected type with variant: {:?}", old_op.layout.ty), + _ => bug!("Unexpected type with variant: {:?}", ty), }; self.with_elem(name, move |this| this.visit_value(new_op)) } diff --git a/compiler/rustc_infer/src/infer/combine.rs b/compiler/rustc_infer/src/infer/combine.rs index 58ef75b3ca8d8..3f54247ecef21 100644 --- a/compiler/rustc_infer/src/infer/combine.rs +++ b/compiler/rustc_infer/src/infer/combine.rs @@ -72,7 +72,6 @@ impl<'infcx, 'tcx> InferCtxt<'infcx, 'tcx> { { let a_is_expected = relation.a_is_expected(); - debug!("super_combine_tys: {:?} | {:?}", a.kind(), b.kind()); match (a.kind(), b.kind()) { // Relate integral variables to other types (&ty::Infer(ty::IntVar(a_id)), &ty::Infer(ty::IntVar(b_id))) => { diff --git a/compiler/rustc_middle/src/ty/cast.rs b/compiler/rustc_middle/src/ty/cast.rs index 04c8956ac32c3..ecb74192fb326 100644 --- a/compiler/rustc_middle/src/ty/cast.rs +++ b/compiler/rustc_middle/src/ty/cast.rs @@ -59,16 +59,7 @@ impl<'tcx> CastTy<'tcx> { ty::Uint(u) => Some(CastTy::Int(IntTy::U(u))), ty::Float(_) => Some(CastTy::Float), ty::Adt(d, _) if d.is_enum() && d.is_payloadfree() => Some(CastTy::Int(IntTy::CEnum)), - ty::Variant(ty, _) => match ty.kind() { - ty::Adt(d, _) => { - if d.is_enum() && d.is_payloadfree() { - Some(CastTy::Int(IntTy::CEnum)) - } else { - None - } - } - _ => bug!("unexpected type: {:?}", ty.kind()), - } + ty::Variant(ty, _) => Self::from_ty(ty), ty::RawPtr(mt) => Some(CastTy::Ptr(mt)), ty::FnPtr(..) => Some(CastTy::FnPtr), _ => None, diff --git a/compiler/rustc_middle/src/ty/error.rs b/compiler/rustc_middle/src/ty/error.rs index 4e57f5231c82f..21dc4e3d1505b 100644 --- a/compiler/rustc_middle/src/ty/error.rs +++ b/compiler/rustc_middle/src/ty/error.rs @@ -244,10 +244,7 @@ impl<'tcx> ty::TyS<'tcx> { ty::Tuple(ref tys) if tys.is_empty() => format!("`{}`", self).into(), ty::Adt(def, _) => format!("{} `{}`", def.descr(), tcx.def_path_str(def.did)).into(), - ty::Variant(ty, _) => match ty.kind() { - ty::Adt(def, _) => format!("{} `{}`", def.descr(), tcx.def_path_str(def.did)).into(), - _ => bug!("unexpected type: {:?}", ty.kind()), - } + ty::Variant(ty, _) => ty.sort_string(tcx), ty::Foreign(def_id) => format!("extern type `{}`", tcx.def_path_str(def_id)).into(), ty::Array(t, n) => { if t.is_simple_ty() { @@ -322,7 +319,7 @@ impl<'tcx> ty::TyS<'tcx> { ty::Variant(ty, _) => match ty.kind() { ty::Adt(def, _) => format!("{} variant", def.descr()).into(), _ => bug!("unexpected type: {:?}", ty.kind()), - } + }, ty::Foreign(_) => "extern type".into(), ty::Array(..) => "array".into(), ty::Slice(_) => "slice".into(), diff --git a/compiler/rustc_middle/src/ty/fast_reject.rs b/compiler/rustc_middle/src/ty/fast_reject.rs index 8443b1725c5b7..392160fa9e0f2 100644 --- a/compiler/rustc_middle/src/ty/fast_reject.rs +++ b/compiler/rustc_middle/src/ty/fast_reject.rs @@ -66,10 +66,7 @@ pub fn simplify_type( ty::Uint(uint_type) => Some(UintSimplifiedType(uint_type)), ty::Float(float_type) => Some(FloatSimplifiedType(float_type)), ty::Adt(def, _) => Some(AdtSimplifiedType(def.did)), - ty::Variant(ref ty, _) => match ty.kind() { - ty::Adt(def, _) => Some(AdtSimplifiedType(def.did)), - _ => bug!("unexpected type: {:?}", ty.kind()), - } + ty::Variant(ty, _) => simplify_type(tcx, ty, can_simplify_params), ty::Str => Some(StrSimplifiedType), ty::Array(..) | ty::Slice(_) => Some(ArraySimplifiedType), ty::RawPtr(_) => Some(PtrSimplifiedType), diff --git a/compiler/rustc_middle/src/ty/flags.rs b/compiler/rustc_middle/src/ty/flags.rs index 4a978d5092d42..b7669d0025cd5 100644 --- a/compiler/rustc_middle/src/ty/flags.rs +++ b/compiler/rustc_middle/src/ty/flags.rs @@ -161,10 +161,7 @@ impl FlagComputation { self.add_substs(substs); } - &ty::Variant(ty, _) => match ty.kind() { - ty::Adt(_, substs) => self.add_substs(substs), - _ => bug!("unexpected type: {:?}", ty.kind()), - } + &ty::Variant(ty, _) => self.add_kind(ty.kind()), &ty::Projection(data) => { self.add_flags(TypeFlags::HAS_TY_PROJECTION); diff --git a/compiler/rustc_middle/src/ty/layout.rs b/compiler/rustc_middle/src/ty/layout.rs index 50aaac62bb82b..f28dc1b1905f0 100644 --- a/compiler/rustc_middle/src/ty/layout.rs +++ b/compiler/rustc_middle/src/ty/layout.rs @@ -526,581 +526,6 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> { }) } - fn layout_of_uncached_adt(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'tcx>> { - if let ty::Adt(def, substs) = ty.kind() { - let tcx = self.tcx; - let dl = self.data_layout(); - let scalar_unit = |value: Primitive| { - let size = value.size(dl); - assert!(size.bits() <= 128); - Scalar { - value, - valid_range: WrappingRange { start: 0, end: size.unsigned_int_max() }, - } - }; - - // Cache the field layouts. - let variants = def - .variants - .iter() - .map(|v| { - v.fields - .iter() - .map(|field| self.layout_of(field.ty(tcx, substs))) - .collect::, _>>() - }) - .collect::, _>>()?; - - if def.is_union() { - if def.repr.pack.is_some() && def.repr.align.is_some() { - self.tcx.sess.delay_span_bug( - tcx.def_span(def.did), - "union cannot be packed and aligned", - ); - return Err(LayoutError::Unknown(ty)); - } - - let mut align = - if def.repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align }; - - if let Some(repr_align) = def.repr.align { - align = align.max(AbiAndPrefAlign::new(repr_align)); - } - - let optimize = !def.repr.inhibit_union_abi_opt(); - let mut size = Size::ZERO; - let mut abi = Abi::Aggregate { sized: true }; - let index = VariantIdx::new(0); - for field in &variants[index] { - assert!(!field.is_unsized()); - align = align.max(field.align); - - // If all non-ZST fields have the same ABI, forward this ABI - if optimize && !field.is_zst() { - // Normalize scalar_unit to the maximal valid range - let field_abi = match field.abi { - Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)), - Abi::ScalarPair(x, y) => { - Abi::ScalarPair(scalar_unit(x.value), scalar_unit(y.value)) - } - Abi::Vector { element: x, count } => { - Abi::Vector { element: scalar_unit(x.value), count } - } - Abi::Uninhabited | Abi::Aggregate { .. } => { - Abi::Aggregate { sized: true } - } - }; - - if size == Size::ZERO { - // first non ZST: initialize 'abi' - abi = field_abi; - } else if abi != field_abi { - // different fields have different ABI: reset to Aggregate - abi = Abi::Aggregate { sized: true }; - } - } - - size = cmp::max(size, field.size); - } - - if let Some(pack) = def.repr.pack { - align = align.min(AbiAndPrefAlign::new(pack)); - } - - return Ok(tcx.intern_layout(Layout { - variants: Variants::Single { index }, - fields: FieldsShape::Union( - NonZeroUsize::new(variants[index].len()).ok_or(LayoutError::Unknown(ty))?, - ), - abi, - largest_niche: None, - align, - size: size.align_to(align.abi), - })); - } - - // A variant is absent if it's uninhabited and only has ZST fields. - // Present uninhabited variants only require space for their fields, - // but *not* an encoding of the discriminant (e.g., a tag value). - // See issue #49298 for more details on the need to leave space - // for non-ZST uninhabited data (mostly partial initialization). - let absent = |fields: &[TyAndLayout<'_>]| { - let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited()); - let is_zst = fields.iter().all(|f| f.is_zst()); - uninhabited && is_zst - }; - let (present_first, present_second) = { - let mut present_variants = variants - .iter_enumerated() - .filter_map(|(i, v)| if absent(v) { None } else { Some(i) }); - (present_variants.next(), present_variants.next()) - }; - let present_first = match present_first { - Some(present_first) => present_first, - // Uninhabited because it has no variants, or only absent ones. - None if def.is_enum() => { - return Ok(tcx.layout_of(self.param_env.and(tcx.types.never))?.layout); - } - // If it's a struct, still compute a layout so that we can still compute the - // field offsets. - None => VariantIdx::new(0), - }; - - let is_struct = !def.is_enum() || - // Only one variant is present. - (present_second.is_none() && - // Representation optimizations are allowed. - !def.repr.inhibit_enum_layout_opt()); - if is_struct { - // Struct, or univariant enum equivalent to a struct. - // (Typechecking will reject discriminant-sizing attrs.) - - let v = present_first; - let kind = if def.is_enum() || variants[v].is_empty() { - StructKind::AlwaysSized - } else { - let param_env = tcx.param_env(def.did); - let last_field = def.variants[v].fields.last().unwrap(); - let always_sized = - tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env); - if !always_sized { StructKind::MaybeUnsized } else { StructKind::AlwaysSized } - }; - - let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr, kind)?; - st.variants = Variants::Single { index: v }; - let (start, end) = self.tcx.layout_scalar_valid_range(def.did); - match st.abi { - Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => { - // the asserts ensure that we are not using the - // `#[rustc_layout_scalar_valid_range(n)]` - // attribute to widen the range of anything as that would probably - // result in UB somewhere - // FIXME(eddyb) the asserts are probably not needed, - // as larger validity ranges would result in missed - // optimizations, *not* wrongly assuming the inner - // value is valid. e.g. unions enlarge validity ranges, - // because the values may be uninitialized. - if let Bound::Included(start) = start { - // FIXME(eddyb) this might be incorrect - it doesn't - // account for wrap-around (end < start) ranges. - assert!(scalar.valid_range.start <= start); - scalar.valid_range.start = start; - } - if let Bound::Included(end) = end { - // FIXME(eddyb) this might be incorrect - it doesn't - // account for wrap-around (end < start) ranges. - assert!(scalar.valid_range.end >= end); - scalar.valid_range.end = end; - } - - // Update `largest_niche` if we have introduced a larger niche. - let niche = if def.repr.hide_niche() { - None - } else { - Niche::from_scalar(dl, Size::ZERO, *scalar) - }; - if let Some(niche) = niche { - match st.largest_niche { - Some(largest_niche) => { - // Replace the existing niche even if they're equal, - // because this one is at a lower offset. - if largest_niche.available(dl) <= niche.available(dl) { - st.largest_niche = Some(niche); - } - } - None => st.largest_niche = Some(niche), - } - } - } - _ => assert!( - start == Bound::Unbounded && end == Bound::Unbounded, - "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}", - def, - st, - ), - } - - return Ok(tcx.intern_layout(st)); - } - - // At this point, we have handled all unions and - // structs. (We have also handled univariant enums - // that allow representation optimization.) - assert!(def.is_enum()); - - // The current code for niche-filling relies on variant indices - // instead of actual discriminants, so dataful enums with - // explicit discriminants (RFC #2363) would misbehave. - let no_explicit_discriminants = def - .variants - .iter_enumerated() - .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32())); - - let mut niche_filling_layout = None; - - // Niche-filling enum optimization. - if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants { - let mut dataful_variant = None; - let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0); - - // Find one non-ZST variant. - 'variants: for (v, fields) in variants.iter_enumerated() { - if absent(fields) { - continue 'variants; - } - for f in fields { - if !f.is_zst() { - if dataful_variant.is_none() { - dataful_variant = Some(v); - continue 'variants; - } else { - dataful_variant = None; - break 'variants; - } - } - } - niche_variants = *niche_variants.start().min(&v)..=v; - } - - if niche_variants.start() > niche_variants.end() { - dataful_variant = None; - } - - if let Some(i) = dataful_variant { - let count = (niche_variants.end().as_u32() - niche_variants.start().as_u32() - + 1) as u128; - - // Find the field with the largest niche - let niche_candidate = variants[i] - .iter() - .enumerate() - .filter_map(|(j, field)| Some((j, field.largest_niche?))) - .max_by_key(|(_, niche)| niche.available(dl)); - - if let Some((field_index, niche, (niche_start, niche_scalar))) = niche_candidate - .and_then(|(field_index, niche)| { - Some((field_index, niche, niche.reserve(self, count)?)) - }) - { - let mut align = dl.aggregate_align; - let st = variants - .iter_enumerated() - .map(|(j, v)| { - let mut st = self.univariant_uninterned( - ty, - v, - &def.repr, - StructKind::AlwaysSized, - )?; - st.variants = Variants::Single { index: j }; - - align = align.max(st.align); - - Ok(st) - }) - .collect::, _>>()?; - - let offset = st[i].fields.offset(field_index) + niche.offset; - let size = st[i].size; - - let abi = if st.iter().all(|v| v.abi.is_uninhabited()) { - Abi::Uninhabited - } else { - match st[i].abi { - Abi::Scalar(_) => Abi::Scalar(niche_scalar), - Abi::ScalarPair(first, second) => { - // We need to use scalar_unit to reset the - // valid range to the maximal one for that - // primitive, because only the niche is - // guaranteed to be initialised, not the - // other primitive. - if offset.bytes() == 0 { - Abi::ScalarPair(niche_scalar, scalar_unit(second.value)) - } else { - Abi::ScalarPair(scalar_unit(first.value), niche_scalar) - } - } - _ => Abi::Aggregate { sized: true }, - } - }; - - let largest_niche = Niche::from_scalar(dl, offset, niche_scalar); - - niche_filling_layout = Some(Layout { - variants: Variants::Multiple { - tag: niche_scalar, - tag_encoding: TagEncoding::Niche { - dataful_variant: i, - niche_variants, - niche_start, - }, - tag_field: 0, - variants: st, - }, - fields: FieldsShape::Arbitrary { - offsets: vec![offset], - memory_index: vec![0], - }, - abi, - largest_niche, - size, - align, - }); - } - } - } - - let (mut min, mut max) = (i128::MAX, i128::MIN); - let discr_type = def.repr.discr_type(); - let bits = Integer::from_attr(self, discr_type).size().bits(); - for (i, discr) in def.discriminants(tcx) { - if variants[i].iter().any(|f| f.abi.is_uninhabited()) { - continue; - } - let mut x = discr.val as i128; - if discr_type.is_signed() { - // sign extend the raw representation to be an i128 - x = (x << (128 - bits)) >> (128 - bits); - } - if x < min { - min = x; - } - if x > max { - max = x; - } - } - // We might have no inhabited variants, so pretend there's at least one. - if (min, max) == (i128::MAX, i128::MIN) { - min = 0; - max = 0; - } - assert!(min <= max, "discriminant range is {}...{}", min, max); - let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max); - - let mut align = dl.aggregate_align; - let mut size = Size::ZERO; - - // We're interested in the smallest alignment, so start large. - let mut start_align = Align::from_bytes(256).unwrap(); - assert_eq!(Integer::for_align(dl, start_align), None); - - // repr(C) on an enum tells us to make a (tag, union) layout, - // so we need to grow the prefix alignment to be at least - // the alignment of the union. (This value is used both for - // determining the alignment of the overall enum, and the - // determining the alignment of the payload after the tag.) - let mut prefix_align = min_ity.align(dl).abi; - if def.repr.c() { - for fields in &variants { - for field in fields { - prefix_align = prefix_align.max(field.align.abi); - } - } - } - - // Create the set of structs that represent each variant. - let mut layout_variants = variants - .iter_enumerated() - .map(|(i, field_layouts)| { - let mut st = self.univariant_uninterned( - ty, - &field_layouts, - &def.repr, - StructKind::Prefixed(min_ity.size(), prefix_align), - )?; - st.variants = Variants::Single { index: i }; - // Find the first field we can't move later - // to make room for a larger discriminant. - for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) { - if !field.is_zst() || field.align.abi.bytes() != 1 { - start_align = start_align.min(field.align.abi); - break; - } - } - size = cmp::max(size, st.size); - align = align.max(st.align); - Ok(st) - }) - .collect::, _>>()?; - - // Align the maximum variant size to the largest alignment. - size = size.align_to(align.abi); - - if size.bytes() >= dl.obj_size_bound() { - return Err(LayoutError::SizeOverflow(ty)); - } - - let typeck_ity = Integer::from_attr(dl, def.repr.discr_type()); - if typeck_ity < min_ity { - // It is a bug if Layout decided on a greater discriminant size than typeck for - // some reason at this point (based on values discriminant can take on). Mostly - // because this discriminant will be loaded, and then stored into variable of - // type calculated by typeck. Consider such case (a bug): typeck decided on - // byte-sized discriminant, but layout thinks we need a 16-bit to store all - // discriminant values. That would be a bug, because then, in codegen, in order - // to store this 16-bit discriminant into 8-bit sized temporary some of the - // space necessary to represent would have to be discarded (or layout is wrong - // on thinking it needs 16 bits) - bug!( - "layout decided on a larger discriminant type ({:?}) than typeck ({:?})", - min_ity, - typeck_ity - ); - // However, it is fine to make discr type however large (as an optimisation) - // after this point – we’ll just truncate the value we load in codegen. - } - - // Check to see if we should use a different type for the - // discriminant. We can safely use a type with the same size - // as the alignment of the first field of each variant. - // We increase the size of the discriminant to avoid LLVM copying - // padding when it doesn't need to. This normally causes unaligned - // load/stores and excessive memcpy/memset operations. By using a - // bigger integer size, LLVM can be sure about its contents and - // won't be so conservative. - - // Use the initial field alignment - let mut ity = if def.repr.c() || def.repr.int.is_some() { - min_ity - } else { - Integer::for_align(dl, start_align).unwrap_or(min_ity) - }; - - // If the alignment is not larger than the chosen discriminant size, - // don't use the alignment as the final size. - if ity <= min_ity { - ity = min_ity; - } else { - // Patch up the variants' first few fields. - let old_ity_size = min_ity.size(); - let new_ity_size = ity.size(); - for variant in &mut layout_variants { - match variant.fields { - FieldsShape::Arbitrary { ref mut offsets, .. } => { - for i in offsets { - if *i <= old_ity_size { - assert_eq!(*i, old_ity_size); - *i = new_ity_size; - } - } - // We might be making the struct larger. - if variant.size <= old_ity_size { - variant.size = new_ity_size; - } - } - _ => bug!(), - } - } - } - - let tag_mask = ity.size().unsigned_int_max(); - let tag = Scalar { - value: Int(ity, signed), - valid_range: WrappingRange { - start: (min as u128 & tag_mask), - end: (max as u128 & tag_mask), - }, - }; - let mut abi = Abi::Aggregate { sized: true }; - if tag.value.size(dl) == size { - abi = Abi::Scalar(tag); - } else { - // Try to use a ScalarPair for all tagged enums. - let mut common_prim = None; - for (field_layouts, layout_variant) in iter::zip(&variants, &layout_variants) { - let offsets = match layout_variant.fields { - FieldsShape::Arbitrary { ref offsets, .. } => offsets, - _ => bug!(), - }; - let mut fields = iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst()); - let (field, offset) = match (fields.next(), fields.next()) { - (None, None) => continue, - (Some(pair), None) => pair, - _ => { - common_prim = None; - break; - } - }; - let prim = match field.abi { - Abi::Scalar(scalar) => scalar.value, - _ => { - common_prim = None; - break; - } - }; - if let Some(pair) = common_prim { - // This is pretty conservative. We could go fancier - // by conflating things like i32 and u32, or even - // realising that (u8, u8) could just cohabit with - // u16 or even u32. - if pair != (prim, offset) { - common_prim = None; - break; - } - } else { - common_prim = Some((prim, offset)); - } - } - if let Some((prim, offset)) = common_prim { - let pair = self.scalar_pair(tag, scalar_unit(prim)); - let pair_offsets = match pair.fields { - FieldsShape::Arbitrary { ref offsets, ref memory_index } => { - assert_eq!(memory_index, &[0, 1]); - offsets - } - _ => bug!(), - }; - if pair_offsets[0] == Size::ZERO - && pair_offsets[1] == *offset - && align == pair.align - && size == pair.size - { - // We can use `ScalarPair` only when it matches our - // already computed layout (including `#[repr(C)]`). - abi = pair.abi; - } - } - } - - if layout_variants.iter().all(|v| v.abi.is_uninhabited()) { - abi = Abi::Uninhabited; - } - - let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag); - - let tagged_layout = Layout { - variants: Variants::Multiple { - tag, - tag_encoding: TagEncoding::Direct, - tag_field: 0, - variants: layout_variants, - }, - fields: FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }, - largest_niche, - abi, - align, - size, - }; - - let best_layout = match (tagged_layout, niche_filling_layout) { - (tagged_layout, Some(niche_filling_layout)) => { - // Pick the smaller layout; otherwise, - // pick the layout with the larger niche; otherwise, - // pick tagged as it has simpler codegen. - cmp::min_by_key(tagged_layout, niche_filling_layout, |layout| { - let niche_size = layout.largest_niche.map_or(0, |n| n.available(dl)); - (layout.size, cmp::Reverse(niche_size)) - }) - } - (tagged_layout, None) => tagged_layout, - }; - - Ok(tcx.intern_layout(best_layout)) - } else { - bug!("unexpected type: {:?}", ty.kind()) - } - } - fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'tcx>> { let tcx = self.tcx; let param_env = self.param_env; @@ -1397,9 +822,579 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> { } // ADTs. - ty::Adt(..) => self.layout_of_uncached_adt(ty)?, + ty::Adt(def, substs) => { + // Cache the field layouts. + let variants = def + .variants + .iter() + .map(|v| { + v.fields + .iter() + .map(|field| self.layout_of(field.ty(tcx, substs))) + .collect::, _>>() + }) + .collect::, _>>()?; + + if def.is_union() { + if def.repr.pack.is_some() && def.repr.align.is_some() { + self.tcx.sess.delay_span_bug( + tcx.def_span(def.did), + "union cannot be packed and aligned", + ); + return Err(LayoutError::Unknown(ty)); + } + + let mut align = + if def.repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align }; + + if let Some(repr_align) = def.repr.align { + align = align.max(AbiAndPrefAlign::new(repr_align)); + } + + let optimize = !def.repr.inhibit_union_abi_opt(); + let mut size = Size::ZERO; + let mut abi = Abi::Aggregate { sized: true }; + let index = VariantIdx::new(0); + for field in &variants[index] { + assert!(!field.is_unsized()); + align = align.max(field.align); + + // If all non-ZST fields have the same ABI, forward this ABI + if optimize && !field.is_zst() { + // Normalize scalar_unit to the maximal valid range + let field_abi = match field.abi { + Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)), + Abi::ScalarPair(x, y) => { + Abi::ScalarPair(scalar_unit(x.value), scalar_unit(y.value)) + } + Abi::Vector { element: x, count } => { + Abi::Vector { element: scalar_unit(x.value), count } + } + Abi::Uninhabited | Abi::Aggregate { .. } => { + Abi::Aggregate { sized: true } + } + }; + + if size == Size::ZERO { + // first non ZST: initialize 'abi' + abi = field_abi; + } else if abi != field_abi { + // different fields have different ABI: reset to Aggregate + abi = Abi::Aggregate { sized: true }; + } + } + + size = cmp::max(size, field.size); + } + + if let Some(pack) = def.repr.pack { + align = align.min(AbiAndPrefAlign::new(pack)); + } + + return Ok(tcx.intern_layout(Layout { + variants: Variants::Single { index }, + fields: FieldsShape::Union( + NonZeroUsize::new(variants[index].len()) + .ok_or(LayoutError::Unknown(ty))?, + ), + abi, + largest_niche: None, + align, + size: size.align_to(align.abi), + })); + } + + // A variant is absent if it's uninhabited and only has ZST fields. + // Present uninhabited variants only require space for their fields, + // but *not* an encoding of the discriminant (e.g., a tag value). + // See issue #49298 for more details on the need to leave space + // for non-ZST uninhabited data (mostly partial initialization). + let absent = |fields: &[TyAndLayout<'_>]| { + let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited()); + let is_zst = fields.iter().all(|f| f.is_zst()); + uninhabited && is_zst + }; + let (present_first, present_second) = { + let mut present_variants = variants + .iter_enumerated() + .filter_map(|(i, v)| if absent(v) { None } else { Some(i) }); + (present_variants.next(), present_variants.next()) + }; + let present_first = match present_first { + Some(present_first) => present_first, + // Uninhabited because it has no variants, or only absent ones. + None if def.is_enum() => { + return Ok(tcx.layout_of(param_env.and(tcx.types.never))?.layout); + } + // If it's a struct, still compute a layout so that we can still compute the + // field offsets. + None => VariantIdx::new(0), + }; + + let is_struct = !def.is_enum() || + // Only one variant is present. + (present_second.is_none() && + // Representation optimizations are allowed. + !def.repr.inhibit_enum_layout_opt()); + if is_struct { + // Struct, or univariant enum equivalent to a struct. + // (Typechecking will reject discriminant-sizing attrs.) + + let v = present_first; + let kind = if def.is_enum() || variants[v].is_empty() { + StructKind::AlwaysSized + } else { + let param_env = tcx.param_env(def.did); + let last_field = def.variants[v].fields.last().unwrap(); + let always_sized = + tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env); + if !always_sized { + StructKind::MaybeUnsized + } else { + StructKind::AlwaysSized + } + }; + + let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr, kind)?; + st.variants = Variants::Single { index: v }; + let (start, end) = self.tcx.layout_scalar_valid_range(def.did); + match st.abi { + Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => { + // the asserts ensure that we are not using the + // `#[rustc_layout_scalar_valid_range(n)]` + // attribute to widen the range of anything as that would probably + // result in UB somewhere + // FIXME(eddyb) the asserts are probably not needed, + // as larger validity ranges would result in missed + // optimizations, *not* wrongly assuming the inner + // value is valid. e.g. unions enlarge validity ranges, + // because the values may be uninitialized. + if let Bound::Included(start) = start { + // FIXME(eddyb) this might be incorrect - it doesn't + // account for wrap-around (end < start) ranges. + assert!(scalar.valid_range.start <= start); + scalar.valid_range.start = start; + } + if let Bound::Included(end) = end { + // FIXME(eddyb) this might be incorrect - it doesn't + // account for wrap-around (end < start) ranges. + assert!(scalar.valid_range.end >= end); + scalar.valid_range.end = end; + } + + // Update `largest_niche` if we have introduced a larger niche. + let niche = if def.repr.hide_niche() { + None + } else { + Niche::from_scalar(dl, Size::ZERO, *scalar) + }; + if let Some(niche) = niche { + match st.largest_niche { + Some(largest_niche) => { + // Replace the existing niche even if they're equal, + // because this one is at a lower offset. + if largest_niche.available(dl) <= niche.available(dl) { + st.largest_niche = Some(niche); + } + } + None => st.largest_niche = Some(niche), + } + } + } + _ => assert!( + start == Bound::Unbounded && end == Bound::Unbounded, + "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}", + def, + st, + ), + } + + return Ok(tcx.intern_layout(st)); + } + + // At this point, we have handled all unions and + // structs. (We have also handled univariant enums + // that allow representation optimization.) + assert!(def.is_enum()); + + // The current code for niche-filling relies on variant indices + // instead of actual discriminants, so dataful enums with + // explicit discriminants (RFC #2363) would misbehave. + let no_explicit_discriminants = def + .variants + .iter_enumerated() + .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32())); + + let mut niche_filling_layout = None; + + // Niche-filling enum optimization. + if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants { + let mut dataful_variant = None; + let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0); + + // Find one non-ZST variant. + 'variants: for (v, fields) in variants.iter_enumerated() { + if absent(fields) { + continue 'variants; + } + for f in fields { + if !f.is_zst() { + if dataful_variant.is_none() { + dataful_variant = Some(v); + continue 'variants; + } else { + dataful_variant = None; + break 'variants; + } + } + } + niche_variants = *niche_variants.start().min(&v)..=v; + } + + if niche_variants.start() > niche_variants.end() { + dataful_variant = None; + } + + if let Some(i) = dataful_variant { + let count = (niche_variants.end().as_u32() + - niche_variants.start().as_u32() + + 1) as u128; + + // Find the field with the largest niche + let niche_candidate = variants[i] + .iter() + .enumerate() + .filter_map(|(j, field)| Some((j, field.largest_niche?))) + .max_by_key(|(_, niche)| niche.available(dl)); + + if let Some((field_index, niche, (niche_start, niche_scalar))) = + niche_candidate.and_then(|(field_index, niche)| { + Some((field_index, niche, niche.reserve(self, count)?)) + }) + { + let mut align = dl.aggregate_align; + let st = variants + .iter_enumerated() + .map(|(j, v)| { + let mut st = self.univariant_uninterned( + ty, + v, + &def.repr, + StructKind::AlwaysSized, + )?; + st.variants = Variants::Single { index: j }; + + align = align.max(st.align); + + Ok(st) + }) + .collect::, _>>()?; + + let offset = st[i].fields.offset(field_index) + niche.offset; + let size = st[i].size; + + let abi = if st.iter().all(|v| v.abi.is_uninhabited()) { + Abi::Uninhabited + } else { + match st[i].abi { + Abi::Scalar(_) => Abi::Scalar(niche_scalar), + Abi::ScalarPair(first, second) => { + // We need to use scalar_unit to reset the + // valid range to the maximal one for that + // primitive, because only the niche is + // guaranteed to be initialised, not the + // other primitive. + if offset.bytes() == 0 { + Abi::ScalarPair(niche_scalar, scalar_unit(second.value)) + } else { + Abi::ScalarPair(scalar_unit(first.value), niche_scalar) + } + } + _ => Abi::Aggregate { sized: true }, + } + }; + + let largest_niche = Niche::from_scalar(dl, offset, niche_scalar); + + niche_filling_layout = Some(Layout { + variants: Variants::Multiple { + tag: niche_scalar, + tag_encoding: TagEncoding::Niche { + dataful_variant: i, + niche_variants, + niche_start, + }, + tag_field: 0, + variants: st, + }, + fields: FieldsShape::Arbitrary { + offsets: vec![offset], + memory_index: vec![0], + }, + abi, + largest_niche, + size, + align, + }); + } + } + } + + let (mut min, mut max) = (i128::MAX, i128::MIN); + let discr_type = def.repr.discr_type(); + let bits = Integer::from_attr(self, discr_type).size().bits(); + for (i, discr) in def.discriminants(tcx) { + if variants[i].iter().any(|f| f.abi.is_uninhabited()) { + continue; + } + let mut x = discr.val as i128; + if discr_type.is_signed() { + // sign extend the raw representation to be an i128 + x = (x << (128 - bits)) >> (128 - bits); + } + if x < min { + min = x; + } + if x > max { + max = x; + } + } + // We might have no inhabited variants, so pretend there's at least one. + if (min, max) == (i128::MAX, i128::MIN) { + min = 0; + max = 0; + } + assert!(min <= max, "discriminant range is {}...{}", min, max); + let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max); + + let mut align = dl.aggregate_align; + let mut size = Size::ZERO; + + // We're interested in the smallest alignment, so start large. + let mut start_align = Align::from_bytes(256).unwrap(); + assert_eq!(Integer::for_align(dl, start_align), None); + + // repr(C) on an enum tells us to make a (tag, union) layout, + // so we need to grow the prefix alignment to be at least + // the alignment of the union. (This value is used both for + // determining the alignment of the overall enum, and the + // determining the alignment of the payload after the tag.) + let mut prefix_align = min_ity.align(dl).abi; + if def.repr.c() { + for fields in &variants { + for field in fields { + prefix_align = prefix_align.max(field.align.abi); + } + } + } + + // Create the set of structs that represent each variant. + let mut layout_variants = variants + .iter_enumerated() + .map(|(i, field_layouts)| { + let mut st = self.univariant_uninterned( + ty, + &field_layouts, + &def.repr, + StructKind::Prefixed(min_ity.size(), prefix_align), + )?; + st.variants = Variants::Single { index: i }; + // Find the first field we can't move later + // to make room for a larger discriminant. + for field in + st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) + { + if !field.is_zst() || field.align.abi.bytes() != 1 { + start_align = start_align.min(field.align.abi); + break; + } + } + size = cmp::max(size, st.size); + align = align.max(st.align); + Ok(st) + }) + .collect::, _>>()?; + + // Align the maximum variant size to the largest alignment. + size = size.align_to(align.abi); + + if size.bytes() >= dl.obj_size_bound() { + return Err(LayoutError::SizeOverflow(ty)); + } + + let typeck_ity = Integer::from_attr(dl, def.repr.discr_type()); + if typeck_ity < min_ity { + // It is a bug if Layout decided on a greater discriminant size than typeck for + // some reason at this point (based on values discriminant can take on). Mostly + // because this discriminant will be loaded, and then stored into variable of + // type calculated by typeck. Consider such case (a bug): typeck decided on + // byte-sized discriminant, but layout thinks we need a 16-bit to store all + // discriminant values. That would be a bug, because then, in codegen, in order + // to store this 16-bit discriminant into 8-bit sized temporary some of the + // space necessary to represent would have to be discarded (or layout is wrong + // on thinking it needs 16 bits) + bug!( + "layout decided on a larger discriminant type ({:?}) than typeck ({:?})", + min_ity, + typeck_ity + ); + // However, it is fine to make discr type however large (as an optimisation) + // after this point – we’ll just truncate the value we load in codegen. + } + + // Check to see if we should use a different type for the + // discriminant. We can safely use a type with the same size + // as the alignment of the first field of each variant. + // We increase the size of the discriminant to avoid LLVM copying + // padding when it doesn't need to. This normally causes unaligned + // load/stores and excessive memcpy/memset operations. By using a + // bigger integer size, LLVM can be sure about its contents and + // won't be so conservative. + + // Use the initial field alignment + let mut ity = if def.repr.c() || def.repr.int.is_some() { + min_ity + } else { + Integer::for_align(dl, start_align).unwrap_or(min_ity) + }; + + // If the alignment is not larger than the chosen discriminant size, + // don't use the alignment as the final size. + if ity <= min_ity { + ity = min_ity; + } else { + // Patch up the variants' first few fields. + let old_ity_size = min_ity.size(); + let new_ity_size = ity.size(); + for variant in &mut layout_variants { + match variant.fields { + FieldsShape::Arbitrary { ref mut offsets, .. } => { + for i in offsets { + if *i <= old_ity_size { + assert_eq!(*i, old_ity_size); + *i = new_ity_size; + } + } + // We might be making the struct larger. + if variant.size <= old_ity_size { + variant.size = new_ity_size; + } + } + _ => bug!(), + } + } + } + + let tag_mask = ity.size().unsigned_int_max(); + let tag = Scalar { + value: Int(ity, signed), + valid_range: WrappingRange { + start: (min as u128 & tag_mask), + end: (max as u128 & tag_mask), + }, + }; + let mut abi = Abi::Aggregate { sized: true }; + if tag.value.size(dl) == size { + abi = Abi::Scalar(tag); + } else { + // Try to use a ScalarPair for all tagged enums. + let mut common_prim = None; + for (field_layouts, layout_variant) in iter::zip(&variants, &layout_variants) { + let offsets = match layout_variant.fields { + FieldsShape::Arbitrary { ref offsets, .. } => offsets, + _ => bug!(), + }; + let mut fields = + iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst()); + let (field, offset) = match (fields.next(), fields.next()) { + (None, None) => continue, + (Some(pair), None) => pair, + _ => { + common_prim = None; + break; + } + }; + let prim = match field.abi { + Abi::Scalar(scalar) => scalar.value, + _ => { + common_prim = None; + break; + } + }; + if let Some(pair) = common_prim { + // This is pretty conservative. We could go fancier + // by conflating things like i32 and u32, or even + // realising that (u8, u8) could just cohabit with + // u16 or even u32. + if pair != (prim, offset) { + common_prim = None; + break; + } + } else { + common_prim = Some((prim, offset)); + } + } + if let Some((prim, offset)) = common_prim { + let pair = self.scalar_pair(tag, scalar_unit(prim)); + let pair_offsets = match pair.fields { + FieldsShape::Arbitrary { ref offsets, ref memory_index } => { + assert_eq!(memory_index, &[0, 1]); + offsets + } + _ => bug!(), + }; + if pair_offsets[0] == Size::ZERO + && pair_offsets[1] == *offset + && align == pair.align + && size == pair.size + { + // We can use `ScalarPair` only when it matches our + // already computed layout (including `#[repr(C)]`). + abi = pair.abi; + } + } + } + + if layout_variants.iter().all(|v| v.abi.is_uninhabited()) { + abi = Abi::Uninhabited; + } + + let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag); + + let tagged_layout = Layout { + variants: Variants::Multiple { + tag, + tag_encoding: TagEncoding::Direct, + tag_field: 0, + variants: layout_variants, + }, + fields: FieldsShape::Arbitrary { + offsets: vec![Size::ZERO], + memory_index: vec![0], + }, + largest_niche, + abi, + align, + size, + }; + + let best_layout = match (tagged_layout, niche_filling_layout) { + (tagged_layout, Some(niche_filling_layout)) => { + // Pick the smaller layout; otherwise, + // pick the layout with the larger niche; otherwise, + // pick tagged as it has simpler codegen. + cmp::min_by_key(tagged_layout, niche_filling_layout, |layout| { + let niche_size = layout.largest_niche.map_or(0, |n| n.available(dl)); + (layout.size, cmp::Reverse(niche_size)) + }) + } + (tagged_layout, None) => tagged_layout, + }; + + tcx.intern_layout(best_layout) + } - ty::Variant(ty, _) => self.layout_of_uncached_adt(ty)?, + ty::Variant(ty, _) => self.layout_of_uncached(ty)?, // Types with no meaningful known layout. ty::Projection(_) | ty::Opaque(..) => { diff --git a/compiler/rustc_middle/src/ty/relate.rs b/compiler/rustc_middle/src/ty/relate.rs index 3b85b199f89bc..b3ab20045a625 100644 --- a/compiler/rustc_middle/src/ty/relate.rs +++ b/compiler/rustc_middle/src/ty/relate.rs @@ -404,12 +404,14 @@ pub fn super_relate_tys>( // TODO(zhamlin): handle this somewhere else? // Enum <- Enum Variant - (&ty::Adt(a_def, a_substs), &ty::Variant(b_ty, _)) if relation.a_is_expected() => match b_ty.kind() { - ty::Adt(b_def, b_substs) if a_def == *b_def => { - let substs = relation.relate_item_substs(a_def.did, a_substs, b_substs)?; - Ok(tcx.mk_adt(a_def, substs)) - }, - _ => Err(TypeError::Sorts(expected_found(relation, a, b))), + (&ty::Adt(a_def, a_substs), &ty::Variant(b_ty, _)) if relation.a_is_expected() => { + match b_ty.kind() { + ty::Adt(b_def, b_substs) if a_def == *b_def => { + let substs = relation.relate_item_substs(a_def.did, a_substs, b_substs)?; + Ok(tcx.mk_adt(a_def, substs)) + } + _ => Err(TypeError::Sorts(expected_found(relation, a, b))), + } } (&ty::Variant(a_ty, _), &ty::Adt(b_def, b_substs)) => match a_ty.kind() { @@ -418,18 +420,10 @@ pub fn super_relate_tys>( Ok(tcx.mk_adt(a_def, substs)) } _ => Err(TypeError::Sorts(expected_found(relation, a, b))), - } + }, - (&ty::Variant(a_ty, a_idx), &ty::Variant(b_ty, b_idx)) => match a_ty.kind() { - ty::Adt(a_def, a_substs) => match b_ty.kind() { - ty::Adt(b_def, b_substs) if a_def == b_def && a_idx == b_idx => { - let substs = relation.relate_item_substs(a_def.did, a_substs, b_substs)?; - let adt = tcx.mk_adt(a_def, substs); - Ok(tcx.mk_ty(ty::Variant(adt, a_idx))) - } - _ => Err(TypeError::Sorts(expected_found(relation, a, b))), - }, - _ => Err(TypeError::Sorts(expected_found(relation, a, b))), + (&ty::Variant(a_ty, a_idx), &ty::Variant(b_ty, b_idx)) if a_idx == b_idx => { + relation.relate(a_ty, b_ty).map(|ty| tcx.mk_ty(ty::Variant(ty, a_idx))) } (&ty::Foreign(a_id), &ty::Foreign(b_id)) if a_id == b_id => Ok(tcx.mk_foreign(a_id)), diff --git a/compiler/rustc_middle/src/ty/structural_impls.rs b/compiler/rustc_middle/src/ty/structural_impls.rs index 89df6d01ec547..90416b97ea917 100644 --- a/compiler/rustc_middle/src/ty/structural_impls.rs +++ b/compiler/rustc_middle/src/ty/structural_impls.rs @@ -871,13 +871,7 @@ impl<'tcx> TypeFoldable<'tcx> for Ty<'tcx> { ty::Array(typ, sz) => ty::Array(typ.fold_with(folder), sz.fold_with(folder)), ty::Slice(typ) => ty::Slice(typ.fold_with(folder)), ty::Adt(tid, substs) => ty::Adt(tid, substs.fold_with(folder)), - ty::Variant(ty, idx) => match ty.kind() { - ty::Adt(tid, substs) => { - let adt_ty = folder.tcx().mk_ty(ty::Adt(tid, substs.fold_with(folder))); - ty::Variant(adt_ty, idx) - }, - _ => bug!("unexpected ty: {:?}", ty.kind()), - } + ty::Variant(ty, idx) => ty::Variant(ty.fold_with(folder), idx), ty::Dynamic(trait_ty, region) => { ty::Dynamic(trait_ty.fold_with(folder), region.fold_with(folder)) } @@ -924,10 +918,7 @@ impl<'tcx> TypeFoldable<'tcx> for Ty<'tcx> { } ty::Slice(typ) => typ.visit_with(visitor), ty::Adt(_, substs) => substs.visit_with(visitor), - ty::Variant(ty, _idx) => match ty.kind() { - ty::Adt(_, substs) => substs.visit_with(visitor), - _ => bug!("unexpected type: {:?}", ty.kind()), - } + ty::Variant(ty, _idx) => ty.visit_with(visitor), ty::Dynamic(ref trait_ty, ref reg) => { trait_ty.visit_with(visitor)?; reg.visit_with(visitor) diff --git a/compiler/rustc_middle/src/ty/sty.rs b/compiler/rustc_middle/src/ty/sty.rs index d5afd8a877f6b..7a00a48a8af0b 100644 --- a/compiler/rustc_middle/src/ty/sty.rs +++ b/compiler/rustc_middle/src/ty/sty.rs @@ -195,7 +195,7 @@ pub enum TyKind<'tcx> { /// A type variable used during type checking. Infer(InferTy), - /// An enum (TyKind::Adt) and its variant + /// Type of a variant of an enum Variant(Ty<'tcx>, VariantIdx), /// A placeholder for a type which could not be computed; this is @@ -2014,12 +2014,10 @@ impl<'tcx> TyS<'tcx> { TyKind::Adt(adt, _) if adt.is_enum() => { Some(adt.discriminant_for_variant(tcx, variant_index)) } - TyKind::Variant(ty, _) => match ty.kind() { - ty::Adt(adt, _) if adt.is_enum() => { - Some(adt.discriminant_for_variant(tcx, variant_index)) - } - _ => bug!("unexpected type: {:?}", ty.kind()), - }, + TyKind::Variant(ty, idx) => { + assert_eq!(*idx, variant_index); + ty.discriminant_for_variant(tcx, variant_index) + } TyKind::Generator(def_id, substs, _) => { Some(substs.as_generator().discriminant_for_variant(*def_id, tcx, variant_index)) } @@ -2031,10 +2029,7 @@ impl<'tcx> TyS<'tcx> { pub fn discriminant_ty(&'tcx self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> { match self.kind() { ty::Adt(adt, _) if adt.is_enum() => adt.repr.discr_type().to_ty(tcx), - ty::Variant(ty, _) => match ty.kind() { - ty::Adt(adt, _) if adt.is_enum() => adt.repr.discr_type().to_ty(tcx), - _ => bug!("unexpected type: {:?}", ty.kind()), - }, + ty::Variant(ty, _) => ty.discriminant_ty(tcx), ty::Generator(_, substs, _) => substs.as_generator().discr_ty(tcx), ty::Param(_) | ty::Projection(_) | ty::Opaque(..) | ty::Infer(ty::TyVar(_)) => { @@ -2188,10 +2183,8 @@ impl<'tcx> TyS<'tcx> { ty::Tuple(tys) => tys.iter().all(|ty| ty.expect_ty().is_trivially_sized(tcx)), ty::Adt(def, _substs) => def.sized_constraint(tcx).is_empty(), - ty::Variant(ty, _) => match ty.kind() { - ty::Adt(def, _) => def.sized_constraint(tcx).is_empty(), - _ => bug!("unxepcted type: {:?}", ty.kind()), - }, + + ty::Variant(ty, _) => ty.is_trivially_sized(tcx), ty::Projection(_) | ty::Param(_) | ty::Opaque(..) => false, @@ -2204,6 +2197,13 @@ impl<'tcx> TyS<'tcx> { } } } + + pub fn strip_variant_type(&self) -> &Self { + match self.kind() { + ty::Variant(ty, ..) => *ty, + _ => self, + } + } } /// Extra information about why we ended up with a particular variance. diff --git a/compiler/rustc_middle/src/ty/util.rs b/compiler/rustc_middle/src/ty/util.rs index 2a5dca06e6963..04dd6ac4c89b7 100644 --- a/compiler/rustc_middle/src/ty/util.rs +++ b/compiler/rustc_middle/src/ty/util.rs @@ -850,7 +850,8 @@ impl<'tcx> ty::TyS<'tcx> { match self.kind() { // Look for an impl of both `PartialStructuralEq` and `StructuralEq`. Adt(..) => tcx.has_structural_eq_impls(self), - Variant(..) => tcx.has_structural_eq_impls(self), + + Variant(ty, _) => ty.is_structural_eq_shallow(tcx), // Primitive types that satisfy `Eq`. Bool | Char | Int(_) | Uint(_) | Str | Never => true, diff --git a/compiler/rustc_middle/src/ty/walk.rs b/compiler/rustc_middle/src/ty/walk.rs index a44889a92a62b..57152d1c553f7 100644 --- a/compiler/rustc_middle/src/ty/walk.rs +++ b/compiler/rustc_middle/src/ty/walk.rs @@ -135,7 +135,7 @@ fn push_inner<'tcx>( parent: GenericArg<'tcx>, ) { match parent.unpack() { - GenericArgKind::Type(parent_ty) => match *parent_ty.kind() { + GenericArgKind::Type(parent_ty) => match *parent_ty.strip_variant_type().kind() { ty::Bool | ty::Char | ty::Int(_) @@ -191,10 +191,6 @@ fn push_inner<'tcx>( | ty::FnDef(_, substs) => { stack.extend(substs.iter().rev()); } - ty::Variant(ty, _) => match ty.kind() { - ty::Adt(_, substs) => stack.extend(substs.iter().rev()), - _ => bug!("unexpected type: {:?}", ty.kind()), - }, ty::GeneratorWitness(ts) => { stack.extend(ts.skip_binder().iter().rev().map(|ty| ty.into())); } @@ -202,6 +198,7 @@ fn push_inner<'tcx>( stack.push(sig.skip_binder().output().into()); stack.extend(sig.skip_binder().inputs().iter().copied().rev().map(|ty| ty.into())); } + ty::Variant(..) => unreachable!(), }, GenericArgKind::Lifetime(_) => {} GenericArgKind::Const(parent_ct) => { diff --git a/compiler/rustc_mir_build/src/build/expr/into.rs b/compiler/rustc_mir_build/src/build/expr/into.rs index e8585a58cf629..53868f2855763 100644 --- a/compiler/rustc_mir_build/src/build/expr/into.rs +++ b/compiler/rustc_mir_build/src/build/expr/into.rs @@ -314,7 +314,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { ref fields, ref base, }) => { - debug!("zhamlin: adt expr into"); // See the notes for `ExprKind::Array` in `as_rvalue` and for // `ExprKind::Borrow` above. let is_union = adt_def.is_union(); @@ -377,7 +376,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { inferred_ty, }) }); - debug!("zhamlin: adt user type: {:?}", user_ty); let adt = Box::new(AggregateKind::Adt( adt_def, variant_index, diff --git a/compiler/rustc_mir_build/src/thir/pattern/mod.rs b/compiler/rustc_mir_build/src/thir/pattern/mod.rs index 1953876686468..ee7d764adfa44 100644 --- a/compiler/rustc_mir_build/src/thir/pattern/mod.rs +++ b/compiler/rustc_mir_build/src/thir/pattern/mod.rs @@ -390,12 +390,8 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> { let enum_id = self.tcx.parent(variant_id).unwrap(); let adt_def = self.tcx.adt_def(enum_id); if adt_def.is_enum() { - let substs = match ty.kind() { + let substs = match ty.strip_variant_type().kind() { ty::Adt(_, substs) | ty::FnDef(_, substs) => substs, - ty::Variant(ty, _) => match ty.kind() { - ty::Adt(_, substs) => substs, - _ => bug!("unexpected type `{:?}`", ty.kind()) - } ty::Error(_) => { // Avoid ICE (#50585) return PatKind::Wild; diff --git a/compiler/rustc_mir_dataflow/src/elaborate_drops.rs b/compiler/rustc_mir_dataflow/src/elaborate_drops.rs index 229e930fae4fa..1b0e9c250849b 100644 --- a/compiler/rustc_mir_dataflow/src/elaborate_drops.rs +++ b/compiler/rustc_mir_dataflow/src/elaborate_drops.rs @@ -857,7 +857,7 @@ where /// ADT, both in the success case or if one of the destructors fail. fn open_drop(&mut self) -> BasicBlock { let ty = self.place_ty(self.place); - match ty.kind() { + match ty.strip_variant_type().kind() { ty::Closure(_, substs) => { let tys: Vec<_> = substs.as_closure().upvar_tys().collect(); self.open_drop_for_tuple(&tys) @@ -883,17 +883,6 @@ where self.open_drop_for_adt(def, substs) } } - - ty::Variant(ty, _) => match ty.kind() { - ty::Adt(def, substs) => { - if def.is_box() { - self.open_drop_for_box(def, substs) - } else { - self.open_drop_for_adt(def, substs) - } - }, - _ => bug!("unexpected type `{:?}`", ty.kind()), - } ty::Dynamic(..) => self.complete_drop(self.succ, self.unwind), ty::Array(ety, size) => { let size = size.try_eval_usize(self.tcx(), self.elaborator.param_env()); @@ -901,6 +890,7 @@ where } ty::Slice(ety) => self.open_drop_for_array(ety, None), + ty::Variant(..) => unreachable!(), _ => bug!("open drop from non-ADT `{:?}`", ty), } } diff --git a/compiler/rustc_mir_dataflow/src/impls/mod.rs b/compiler/rustc_mir_dataflow/src/impls/mod.rs index 06edcc44c4430..511d87b5123ec 100644 --- a/compiler/rustc_mir_dataflow/src/impls/mod.rs +++ b/compiler/rustc_mir_dataflow/src/impls/mod.rs @@ -689,17 +689,14 @@ fn switch_on_enum_discriminant( Some(mir::StatementKind::Assign(box (lhs, mir::Rvalue::Discriminant(discriminated)))) if *lhs == switch_on => { - match &discriminated.ty(body, tcx).ty.kind() { + match &discriminated.ty(body, tcx).ty.strip_variant_type().kind() { ty::Adt(def, _) => Some((*discriminated, def)), - ty::Variant(ty, _) => match ty.kind() { - ty::Adt(def, _) => Some((*discriminated, def)), - _ => bug!("unexpected type: {:?}", ty.kind()), - } // `Rvalue::Discriminant` is also used to get the active yield point for a // generator, but we do not need edge-specific effects in that case. This may // change in the future. ty::Generator(..) => None, + ty::Variant(..) => unreachable!(), t => bug!("`discriminant` called on unexpected type {:?}", t), } } diff --git a/compiler/rustc_privacy/src/lib.rs b/compiler/rustc_privacy/src/lib.rs index 02d391bf8fbdb..faeab24fae9ea 100644 --- a/compiler/rustc_privacy/src/lib.rs +++ b/compiler/rustc_privacy/src/lib.rs @@ -213,20 +213,7 @@ where } } } - ty::Variant(ty, _) => match ty.kind() { - ty::Adt(&ty::AdtDef { did: def_id, .. }, _) => { - self.def_id_visitor.visit_def_id(def_id, "type", &ty)?; - if self.def_id_visitor.shallow() { - return ControlFlow::CONTINUE; - } - if let Some(assoc_item) = tcx.opt_associated_item(def_id) { - if let ty::ImplContainer(impl_def_id) = assoc_item.container { - tcx.type_of(impl_def_id).visit_with(self)?; - } - } - } - _ => bug!("unexpected type: {:?}", ty.kind()), - } + ty::Variant(ty, _) => self.visit_ty(ty)?, ty::Projection(proj) => { if self.def_id_visitor.skip_assoc_tys() { // Visitors searching for minimal visibility/reachability want to diff --git a/compiler/rustc_trait_selection/src/traits/query/dropck_outlives.rs b/compiler/rustc_trait_selection/src/traits/query/dropck_outlives.rs index 147b67c2fa4df..365cc48a94328 100644 --- a/compiler/rustc_trait_selection/src/traits/query/dropck_outlives.rs +++ b/compiler/rustc_trait_selection/src/traits/query/dropck_outlives.rs @@ -126,21 +126,7 @@ pub fn trivial_dropck_outlives<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> bool { } } - ty::Variant(ty, _) => match ty.kind() { - ty::Adt(def, _) => { - if Some(def.did) == tcx.lang_items().manually_drop() { - // `ManuallyDrop` never has a dtor. - true - } else { - // Other types might. Moreover, PhantomData doesn't - // have a dtor, but it is considered to own its - // content, so it is non-trivial. Unions can have `impl Drop`, - // and hence are non-trivial as well. - false - } - }, - _ => bug!("unexpected type: {:?}", ty.kind()), - } + ty::Variant(ty, _) => trivial_dropck_outlives(tcx, ty), // The following *might* require a destructor: needs deeper inspection. ty::Dynamic(..) diff --git a/compiler/rustc_trait_selection/src/traits/select/mod.rs b/compiler/rustc_trait_selection/src/traits/select/mod.rs index 139b8383ec083..c6095389ed152 100644 --- a/compiler/rustc_trait_selection/src/traits/select/mod.rs +++ b/compiler/rustc_trait_selection/src/traits/select/mod.rs @@ -1723,7 +1723,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { // NOTE: binder moved to (*) let self_ty = self.infcx.shallow_resolve(obligation.predicate.skip_binder().self_ty()); - match self_ty.kind() { + match self_ty.strip_variant_type().kind() { ty::Infer(ty::IntVar(_) | ty::FloatVar(_)) | ty::Uint(_) | ty::Int(_) @@ -1761,16 +1761,6 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { }), ) } - ty::Variant(ty, _) => match ty.kind() { - ty::Adt(def, substs) => { - let sized_crit = def.sized_constraint(self.tcx()); - // (*) binder moved here - Where(obligation.predicate.rebind({ - sized_crit.iter().map(|ty| ty.subst(self.tcx(), substs)).collect() - })) - }, - _ => bug!("unexpected type: {:?}", ty.kind()), - } ty::Projection(_) | ty::Param(_) | ty::Opaque(..) => None, ty::Infer(ty::TyVar(_)) => Ambiguous, @@ -1780,6 +1770,8 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { | ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => { bug!("asked to assemble builtin bounds of unexpected type: {:?}", self_ty); } + + ty::Variant(..) => unreachable!(), } } @@ -1875,7 +1867,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { &self, t: ty::Binder<'tcx, Ty<'tcx>>, ) -> ty::Binder<'tcx, Vec>> { - match *t.skip_binder().kind() { + match *t.skip_binder().strip_variant_type().kind() { ty::Uint(_) | ty::Int(_) | ty::Bool @@ -1931,16 +1923,8 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { ty::Adt(def, substs) => { t.rebind(def.all_fields().map(|f| f.ty(self.tcx(), substs)).collect()) } - ty::Variant(ty, _) => match ty.kind() { - ty::Adt(def, substs) => { - if def.is_phantom_data() { - t.rebind(substs.types().collect()) - } else { - t.rebind(def.all_fields().map(|f| f.ty(self.tcx(), substs)).collect()) - } - }, - _ => bug!("unexpected type: {:?}", ty.kind()), - } + + ty::Variant(..) => unreachable!(), ty::Opaque(def_id, substs) => { // We can resolve the `impl Trait` to its concrete type, diff --git a/compiler/rustc_trait_selection/src/traits/wf.rs b/compiler/rustc_trait_selection/src/traits/wf.rs index 1609031936f7a..af8dca80d99d3 100644 --- a/compiler/rustc_trait_selection/src/traits/wf.rs +++ b/compiler/rustc_trait_selection/src/traits/wf.rs @@ -490,7 +490,7 @@ impl<'a, 'tcx> WfPredicates<'a, 'tcx> { } }; - match *ty.kind() { + match *ty.strip_variant_type().kind() { ty::Bool | ty::Char | ty::Int(..) @@ -545,14 +545,7 @@ impl<'a, 'tcx> WfPredicates<'a, 'tcx> { self.out.extend(obligations); } - ty::Variant(ty, _) => match ty.kind() { - ty::Adt(def, substs) => { - let obligations = self.nominal_obligations(def.did, substs); - self.out.extend(obligations); - }, - _ => bug!("unexpected type: {:?}", ty.kind()), - } - + ty::Variant(..) => unreachable!(), ty::FnDef(did, substs) => { let obligations = self.nominal_obligations(did, substs); self.out.extend(obligations); diff --git a/compiler/rustc_traits/src/dropck_outlives.rs b/compiler/rustc_traits/src/dropck_outlives.rs index 1b51e862bcc6b..bff62526580c3 100644 --- a/compiler/rustc_traits/src/dropck_outlives.rs +++ b/compiler/rustc_traits/src/dropck_outlives.rs @@ -283,18 +283,7 @@ fn dtorck_constraint_for_ty<'tcx>( constraints.overflows.extend(overflows.subst(tcx, substs)); } - ty::Variant(ty, _) => match ty.kind() { - ty::Adt(def, substs) => { - let DtorckConstraint { dtorck_types, outlives, overflows } = - tcx.at(span).adt_dtorck_constraint(def.did)?; - // FIXME: we can try to recursively `dtorck_constraint_on_ty` - // there, but that needs some way to handle cycles. - constraints.dtorck_types.extend(dtorck_types.subst(tcx, substs)); - constraints.outlives.extend(outlives.subst(tcx, substs)); - constraints.overflows.extend(overflows.subst(tcx, substs)); - }, - _ => bug!("unexpected type: {:?}", ty.kind()), - } + ty::Variant(ty, _) => dtorck_constraint_for_ty(tcx, span, for_ty, depth, ty, constraints)?, // Objects must be alive in order for their destructor // to be called. diff --git a/compiler/rustc_typeck/src/check/expr.rs b/compiler/rustc_typeck/src/check/expr.rs index 71275ac1f654e..720ea02348905 100644 --- a/compiler/rustc_typeck/src/check/expr.rs +++ b/compiler/rustc_typeck/src/check/expr.rs @@ -1767,7 +1767,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { ty::Variant(ty, idx) => match ty.kind() { ty::Adt(base_def, substs) if base_def.is_enum() => { let variant = base_def.variants.get(*idx).expect("variant doesn't exist"); - let (adjusted_ident, def_scope) = + let (adjusted_ident, _) = self.tcx.adjust_ident_and_get_scope(field, base_def.did, self.body_id); let fstr = field.as_str(); // handle tuple like enum variant diff --git a/compiler/rustc_typeck/src/variance/constraints.rs b/compiler/rustc_typeck/src/variance/constraints.rs index f7805639fdf27..0bd9fbfc68567 100644 --- a/compiler/rustc_typeck/src/variance/constraints.rs +++ b/compiler/rustc_typeck/src/variance/constraints.rs @@ -285,9 +285,8 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> { self.add_constraints_from_substs(current, def.did, substs, variance); } - ty::Variant(ty, _) => match ty.kind() { - ty::Adt(def, substs) => self.add_constraints_from_substs(current, def.did, substs, variance), - _ => bug!("unexpected type: {:?}", ty.kind()), + ty::Variant(ty, _) => { + self.add_constraints_from_ty(current, ty, variance); } ty::Projection(ref data) => {