From 99171735753883fb8dfde343e9c0e8f0e509bbef Mon Sep 17 00:00:00 2001
From: Moulins <arthur.heuillard@orange.fr>
Date: Fri, 28 Feb 2025 04:57:17 +0100
Subject: [PATCH 1/5] Remove most manual LayoutData creations and move them to
 `rustc_abi`

...either as:
- methods on LayoutCalculator, for faillible operations;
- constructors on LayoutData, for infaillible ones.
---
 compiler/rustc_abi/src/layout.rs              |  75 +++------
 compiler/rustc_abi/src/layout/simple.rs       | 148 ++++++++++++++++++
 compiler/rustc_abi/src/lib.rs                 |  42 -----
 compiler/rustc_middle/src/ty/layout.rs        |  40 ++---
 compiler/rustc_ty_utils/src/layout.rs         |  92 +++--------
 .../rust-analyzer/crates/hir-ty/src/layout.rs |  98 +++---------
 6 files changed, 228 insertions(+), 267 deletions(-)
 create mode 100644 compiler/rustc_abi/src/layout/simple.rs

diff --git a/compiler/rustc_abi/src/layout.rs b/compiler/rustc_abi/src/layout.rs
index d3ae6a29f10f4..42ecbce8117cb 100644
--- a/compiler/rustc_abi/src/layout.rs
+++ b/compiler/rustc_abi/src/layout.rs
@@ -12,6 +12,8 @@ use crate::{
     Variants, WrappingRange,
 };
 
+mod simple;
+
 #[cfg(feature = "nightly")]
 mod ty;
 
@@ -102,41 +104,27 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
         Self { cx }
     }
 
-    pub fn scalar_pair<FieldIdx: Idx, VariantIdx: Idx>(
+    pub fn array_like<FieldIdx: Idx, VariantIdx: Idx, F>(
         &self,
-        a: Scalar,
-        b: Scalar,
-    ) -> LayoutData<FieldIdx, VariantIdx> {
-        let dl = self.cx.data_layout();
-        let b_align = b.align(dl);
-        let align = a.align(dl).max(b_align).max(dl.aggregate_align);
-        let b_offset = a.size(dl).align_to(b_align.abi);
-        let size = (b_offset + b.size(dl)).align_to(align.abi);
-
-        // HACK(nox): We iter on `b` and then `a` because `max_by_key`
-        // returns the last maximum.
-        let largest_niche = Niche::from_scalar(dl, b_offset, b)
-            .into_iter()
-            .chain(Niche::from_scalar(dl, Size::ZERO, a))
-            .max_by_key(|niche| niche.available(dl));
-
-        let combined_seed = a.size(&self.cx).bytes().wrapping_add(b.size(&self.cx).bytes());
+        element: &LayoutData<FieldIdx, VariantIdx>,
+        count_if_sized: Option<u64>, // None for slices
+    ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
+        let count = count_if_sized.unwrap_or(0);
+        let size =
+            element.size.checked_mul(count, &self.cx).ok_or(LayoutCalculatorError::SizeOverflow)?;
 
-        LayoutData {
+        Ok(LayoutData {
             variants: Variants::Single { index: VariantIdx::new(0) },
-            fields: FieldsShape::Arbitrary {
-                offsets: [Size::ZERO, b_offset].into(),
-                memory_index: [0, 1].into(),
-            },
-            backend_repr: BackendRepr::ScalarPair(a, b),
-            largest_niche,
-            uninhabited: false,
-            align,
+            fields: FieldsShape::Array { stride: element.size, count },
+            backend_repr: BackendRepr::Memory { sized: count_if_sized.is_some() },
+            largest_niche: element.largest_niche.filter(|_| count != 0),
+            uninhabited: element.uninhabited && count != 0,
+            align: element.align,
             size,
             max_repr_align: None,
-            unadjusted_abi_align: align.abi,
-            randomization_seed: Hash64::new(combined_seed),
-        }
+            unadjusted_abi_align: element.align.abi,
+            randomization_seed: element.randomization_seed.wrapping_add(Hash64::new(count)),
+        })
     }
 
     pub fn univariant<
@@ -214,25 +202,6 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
         layout
     }
 
-    pub fn layout_of_never_type<FieldIdx: Idx, VariantIdx: Idx>(
-        &self,
-    ) -> LayoutData<FieldIdx, VariantIdx> {
-        let dl = self.cx.data_layout();
-        // This is also used for uninhabited enums, so we use `Variants::Empty`.
-        LayoutData {
-            variants: Variants::Empty,
-            fields: FieldsShape::Primitive,
-            backend_repr: BackendRepr::Memory { sized: true },
-            largest_niche: None,
-            uninhabited: true,
-            align: dl.i8_align,
-            size: Size::ZERO,
-            max_repr_align: None,
-            unadjusted_abi_align: dl.i8_align.abi,
-            randomization_seed: Hash64::ZERO,
-        }
-    }
-
     pub fn layout_of_struct_or_enum<
         'a,
         FieldIdx: Idx,
@@ -260,7 +229,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
             Some(present_first) => present_first,
             // Uninhabited because it has no variants, or only absent ones.
             None if is_enum => {
-                return Ok(self.layout_of_never_type());
+                return Ok(LayoutData::never_type(&self.cx));
             }
             // If it's a struct, still compute a layout so that we can still compute the
             // field offsets.
@@ -949,7 +918,8 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
                     // Common prim might be uninit.
                     Scalar::Union { value: prim }
                 };
-                let pair = self.scalar_pair::<FieldIdx, VariantIdx>(tag, prim_scalar);
+                let pair =
+                    LayoutData::<FieldIdx, VariantIdx>::scalar_pair(&self.cx, tag, prim_scalar);
                 let pair_offsets = match pair.fields {
                     FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
                         assert_eq!(memory_index.raw, [0, 1]);
@@ -1341,7 +1311,8 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
                             } else {
                                 ((j, b), (i, a))
                             };
-                            let pair = self.scalar_pair::<FieldIdx, VariantIdx>(a, b);
+                            let pair =
+                                LayoutData::<FieldIdx, VariantIdx>::scalar_pair(&self.cx, a, b);
                             let pair_offsets = match pair.fields {
                                 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
                                     assert_eq!(memory_index.raw, [0, 1]);
diff --git a/compiler/rustc_abi/src/layout/simple.rs b/compiler/rustc_abi/src/layout/simple.rs
new file mode 100644
index 0000000000000..0d0706defc2e5
--- /dev/null
+++ b/compiler/rustc_abi/src/layout/simple.rs
@@ -0,0 +1,148 @@
+use std::num::NonZero;
+
+use rustc_hashes::Hash64;
+use rustc_index::{Idx, IndexVec};
+
+use crate::{
+    BackendRepr, FieldsShape, HasDataLayout, LayoutData, Niche, Primitive, Scalar, Size, Variants,
+};
+
+/// "Simple" layout constructors that cannot fail.
+impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
+    pub fn unit<C: HasDataLayout>(cx: &C, sized: bool) -> Self {
+        let dl = cx.data_layout();
+        LayoutData {
+            variants: Variants::Single { index: VariantIdx::new(0) },
+            fields: FieldsShape::Arbitrary {
+                offsets: IndexVec::new(),
+                memory_index: IndexVec::new(),
+            },
+            backend_repr: BackendRepr::Memory { sized },
+            largest_niche: None,
+            uninhabited: false,
+            align: dl.i8_align,
+            size: Size::ZERO,
+            max_repr_align: None,
+            unadjusted_abi_align: dl.i8_align.abi,
+            randomization_seed: Hash64::new(0),
+        }
+    }
+
+    pub fn never_type<C: HasDataLayout>(cx: &C) -> Self {
+        let dl = cx.data_layout();
+        // This is also used for uninhabited enums, so we use `Variants::Empty`.
+        LayoutData {
+            variants: Variants::Empty,
+            fields: FieldsShape::Primitive,
+            backend_repr: BackendRepr::Memory { sized: true },
+            largest_niche: None,
+            uninhabited: true,
+            align: dl.i8_align,
+            size: Size::ZERO,
+            max_repr_align: None,
+            unadjusted_abi_align: dl.i8_align.abi,
+            randomization_seed: Hash64::ZERO,
+        }
+    }
+
+    pub fn scalar<C: HasDataLayout>(cx: &C, scalar: Scalar) -> Self {
+        let largest_niche = Niche::from_scalar(cx, Size::ZERO, scalar);
+        let size = scalar.size(cx);
+        let align = scalar.align(cx);
+
+        let range = scalar.valid_range(cx);
+
+        // All primitive types for which we don't have subtype coercions should get a distinct seed,
+        // so that types wrapping them can use randomization to arrive at distinct layouts.
+        //
+        // Some type information is already lost at this point, so as an approximation we derive
+        // the seed from what remains. For example on 64-bit targets usize and u64 can no longer
+        // be distinguished.
+        let randomization_seed = size
+            .bytes()
+            .wrapping_add(
+                match scalar.primitive() {
+                    Primitive::Int(_, true) => 1,
+                    Primitive::Int(_, false) => 2,
+                    Primitive::Float(_) => 3,
+                    Primitive::Pointer(_) => 4,
+                } << 32,
+            )
+            // distinguishes references from pointers
+            .wrapping_add((range.start as u64).rotate_right(16))
+            // distinguishes char from u32 and bool from u8
+            .wrapping_add((range.end as u64).rotate_right(16));
+
+        LayoutData {
+            variants: Variants::Single { index: VariantIdx::new(0) },
+            fields: FieldsShape::Primitive,
+            backend_repr: BackendRepr::Scalar(scalar),
+            largest_niche,
+            uninhabited: false,
+            size,
+            align,
+            max_repr_align: None,
+            unadjusted_abi_align: align.abi,
+            randomization_seed: Hash64::new(randomization_seed),
+        }
+    }
+
+    pub fn scalar_pair<C: HasDataLayout>(cx: &C, a: Scalar, b: Scalar) -> Self {
+        let dl = cx.data_layout();
+        let b_align = b.align(dl);
+        let align = a.align(dl).max(b_align).max(dl.aggregate_align);
+        let b_offset = a.size(dl).align_to(b_align.abi);
+        let size = (b_offset + b.size(dl)).align_to(align.abi);
+
+        // HACK(nox): We iter on `b` and then `a` because `max_by_key`
+        // returns the last maximum.
+        let largest_niche = Niche::from_scalar(dl, b_offset, b)
+            .into_iter()
+            .chain(Niche::from_scalar(dl, Size::ZERO, a))
+            .max_by_key(|niche| niche.available(dl));
+
+        let combined_seed = a.size(dl).bytes().wrapping_add(b.size(dl).bytes());
+
+        LayoutData {
+            variants: Variants::Single { index: VariantIdx::new(0) },
+            fields: FieldsShape::Arbitrary {
+                offsets: [Size::ZERO, b_offset].into(),
+                memory_index: [0, 1].into(),
+            },
+            backend_repr: BackendRepr::ScalarPair(a, b),
+            largest_niche,
+            uninhabited: false,
+            align,
+            size,
+            max_repr_align: None,
+            unadjusted_abi_align: align.abi,
+            randomization_seed: Hash64::new(combined_seed),
+        }
+    }
+
+    /// Returns a dummy layout for an uninhabited variant.
+    ///
+    /// Uninhabited variants get pruned as part of the layout calculation,
+    /// so this can be used after the fact to reconstitute a layout.
+    pub fn uninhabited_variant<C: HasDataLayout>(cx: &C, index: VariantIdx, fields: usize) -> Self {
+        let dl = cx.data_layout();
+        LayoutData {
+            variants: Variants::Single { index },
+            fields: match NonZero::new(fields) {
+                Some(fields) => FieldsShape::Union(fields),
+                None => FieldsShape::Arbitrary {
+                    offsets: IndexVec::new(),
+                    memory_index: IndexVec::new(),
+                },
+            },
+            backend_repr: BackendRepr::Memory { sized: true },
+            largest_niche: None,
+            uninhabited: true,
+            align: dl.i8_align,
+            size: Size::ZERO,
+            max_repr_align: None,
+            unadjusted_abi_align: dl.i8_align.abi,
+            randomization_seed: Hash64::ZERO,
+        }
+    }
+}
diff --git a/compiler/rustc_abi/src/lib.rs b/compiler/rustc_abi/src/lib.rs
index a59dc870aa33d..db9a26c3ef7f0 100644
--- a/compiler/rustc_abi/src/lib.rs
+++ b/compiler/rustc_abi/src/lib.rs
@@ -1744,48 +1744,6 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
     pub fn is_uninhabited(&self) -> bool {
         self.uninhabited
     }
-
-    pub fn scalar<C: HasDataLayout>(cx: &C, scalar: Scalar) -> Self {
-        let largest_niche = Niche::from_scalar(cx, Size::ZERO, scalar);
-        let size = scalar.size(cx);
-        let align = scalar.align(cx);
-
-        let range = scalar.valid_range(cx);
-
-        // All primitive types for which we don't have subtype coercions should get a distinct seed,
-        // so that types wrapping them can use randomization to arrive at distinct layouts.
-        //
-        // Some type information is already lost at this point, so as an approximation we derive
-        // the seed from what remains. For example on 64-bit targets usize and u64 can no longer
-        // be distinguished.
-        let randomization_seed = size
-            .bytes()
-            .wrapping_add(
-                match scalar.primitive() {
-                    Primitive::Int(_, true) => 1,
-                    Primitive::Int(_, false) => 2,
-                    Primitive::Float(_) => 3,
-                    Primitive::Pointer(_) => 4,
-                } << 32,
-            )
-            // distinguishes references from pointers
-            .wrapping_add((range.start as u64).rotate_right(16))
-            // distinguishes char from u32 and bool from u8
-            .wrapping_add((range.end as u64).rotate_right(16));
-
-        LayoutData {
-            variants: Variants::Single { index: VariantIdx::new(0) },
-            fields: FieldsShape::Primitive,
-            backend_repr: BackendRepr::Scalar(scalar),
-            largest_niche,
-            uninhabited: false,
-            size,
-            align,
-            max_repr_align: None,
-            unadjusted_abi_align: align.abi,
-            randomization_seed: Hash64::new(randomization_seed),
-        }
-    }
 }
 
 impl<FieldIdx: Idx, VariantIdx: Idx> fmt::Debug for LayoutData<FieldIdx, VariantIdx>
diff --git a/compiler/rustc_middle/src/ty/layout.rs b/compiler/rustc_middle/src/ty/layout.rs
index 272bb0cc915f9..d32c524a427c0 100644
--- a/compiler/rustc_middle/src/ty/layout.rs
+++ b/compiler/rustc_middle/src/ty/layout.rs
@@ -1,20 +1,17 @@
-use std::num::NonZero;
 use std::ops::Bound;
 use std::{cmp, fmt};
 
 use rustc_abi::{
-    AddressSpace, Align, BackendRepr, ExternAbi, FieldIdx, FieldsShape, HasDataLayout, LayoutData,
-    PointeeInfo, PointerKind, Primitive, ReprOptions, Scalar, Size, TagEncoding, TargetDataLayout,
+    AddressSpace, Align, ExternAbi, FieldIdx, FieldsShape, HasDataLayout, LayoutData, PointeeInfo,
+    PointerKind, Primitive, ReprOptions, Scalar, Size, TagEncoding, TargetDataLayout,
     TyAbiInterface, VariantIdx, Variants,
 };
 use rustc_error_messages::DiagMessage;
 use rustc_errors::{
     Diag, DiagArgValue, DiagCtxtHandle, Diagnostic, EmissionGuarantee, IntoDiagArg, Level,
 };
-use rustc_hashes::Hash64;
 use rustc_hir::LangItem;
 use rustc_hir::def_id::DefId;
-use rustc_index::IndexVec;
 use rustc_macros::{HashStable, TyDecodable, TyEncodable, extension};
 use rustc_session::config::OptLevel;
 use rustc_span::{DUMMY_SP, ErrorGuaranteed, Span, Symbol, sym};
@@ -762,11 +759,9 @@ where
         variant_index: VariantIdx,
     ) -> TyAndLayout<'tcx> {
         let layout = match this.variants {
-            Variants::Single { index }
-                // If all variants but one are uninhabited, the variant layout is the enum layout.
-                if index == variant_index =>
-            {
-                this.layout
+            // If all variants but one are uninhabited, the variant layout is the enum layout.
+            Variants::Single { index } if index == variant_index => {
+                return this;
             }
 
             Variants::Single { .. } | Variants::Empty => {
@@ -783,29 +778,18 @@ where
                 }
 
                 let fields = match this.ty.kind() {
-                    ty::Adt(def, _) if def.variants().is_empty() =>
-                        bug!("for_variant called on zero-variant enum {}", this.ty),
+                    ty::Adt(def, _) if def.variants().is_empty() => {
+                        bug!("for_variant called on zero-variant enum {}", this.ty)
+                    }
                     ty::Adt(def, _) => def.variant(variant_index).fields.len(),
                     _ => bug!("`ty_and_layout_for_variant` on unexpected type {}", this.ty),
                 };
-                tcx.mk_layout(LayoutData {
-                    variants: Variants::Single { index: variant_index },
-                    fields: match NonZero::new(fields) {
-                        Some(fields) => FieldsShape::Union(fields),
-                        None => FieldsShape::Arbitrary { offsets: IndexVec::new(), memory_index: IndexVec::new() },
-                    },
-                    backend_repr: BackendRepr::Memory { sized: true },
-                    largest_niche: None,
-                    uninhabited: true,
-                    align: tcx.data_layout.i8_align,
-                    size: Size::ZERO,
-                    max_repr_align: None,
-                    unadjusted_abi_align: tcx.data_layout.i8_align.abi,
-                    randomization_seed: Hash64::ZERO,
-                })
+                tcx.mk_layout(LayoutData::uninhabited_variant(cx, variant_index, fields))
             }
 
-            Variants::Multiple { ref variants, .. } => cx.tcx().mk_layout(variants[variant_index].clone()),
+            Variants::Multiple { ref variants, .. } => {
+                cx.tcx().mk_layout(variants[variant_index].clone())
+            }
         };
 
         assert_eq!(*layout.variants(), Variants::Single { index: variant_index });
diff --git a/compiler/rustc_ty_utils/src/layout.rs b/compiler/rustc_ty_utils/src/layout.rs
index a53f0538c5816..45c2639280f36 100644
--- a/compiler/rustc_ty_utils/src/layout.rs
+++ b/compiler/rustc_ty_utils/src/layout.rs
@@ -188,6 +188,10 @@ fn layout_of_uncached<'tcx>(
 
     let tcx = cx.tcx();
     let dl = cx.data_layout();
+    let map_layout = |result: Result<_, _>| match result {
+        Ok(layout) => Ok(tcx.mk_layout(layout)),
+        Err(err) => Err(map_error(cx, ty, err)),
+    };
     let scalar_unit = |value: Primitive| {
         let size = value.size(dl);
         assert!(size.bits() <= 128);
@@ -258,7 +262,7 @@ fn layout_of_uncached<'tcx>(
         }
 
         // The never type.
-        ty::Never => tcx.mk_layout(cx.calc.layout_of_never_type()),
+        ty::Never => tcx.mk_layout(LayoutData::never_type(cx)),
 
         // Potentially-wide pointers.
         ty::Ref(_, pointee, _) | ty::RawPtr(pointee, _) => {
@@ -329,7 +333,7 @@ fn layout_of_uncached<'tcx>(
             };
 
             // Effectively a (ptr, meta) tuple.
-            tcx.mk_layout(cx.calc.scalar_pair(data_ptr, metadata))
+            tcx.mk_layout(LayoutData::scalar_pair(cx, data_ptr, metadata))
         }
 
         ty::Dynamic(_, _, ty::DynStar) => {
@@ -337,7 +341,7 @@ fn layout_of_uncached<'tcx>(
             data.valid_range_mut().start = 0;
             let mut vtable = scalar_unit(Pointer(AddressSpace::DATA));
             vtable.valid_range_mut().start = 1;
-            tcx.mk_layout(cx.calc.scalar_pair(data, vtable))
+            tcx.mk_layout(LayoutData::scalar_pair(cx, data, vtable))
         }
 
         // Arrays and slices.
@@ -347,71 +351,29 @@ fn layout_of_uncached<'tcx>(
                 .ok_or_else(|| error(cx, LayoutError::Unknown(ty)))?;
 
             let element = cx.layout_of(element)?;
-            let size = element
-                .size
-                .checked_mul(count, dl)
-                .ok_or_else(|| error(cx, LayoutError::SizeOverflow(ty)))?;
-
-            let abi = BackendRepr::Memory { sized: true };
-
-            let largest_niche = if count != 0 { element.largest_niche } else { None };
-            let uninhabited = if count != 0 { element.uninhabited } else { false };
-
-            tcx.mk_layout(LayoutData {
-                variants: Variants::Single { index: FIRST_VARIANT },
-                fields: FieldsShape::Array { stride: element.size, count },
-                backend_repr: abi,
-                largest_niche,
-                uninhabited,
-                align: element.align,
-                size,
-                max_repr_align: None,
-                unadjusted_abi_align: element.align.abi,
-                randomization_seed: element.randomization_seed.wrapping_add(Hash64::new(count)),
-            })
+            map_layout(cx.calc.array_like(&element, Some(count)))?
         }
         ty::Slice(element) => {
             let element = cx.layout_of(element)?;
-            tcx.mk_layout(LayoutData {
-                variants: Variants::Single { index: FIRST_VARIANT },
-                fields: FieldsShape::Array { stride: element.size, count: 0 },
-                backend_repr: BackendRepr::Memory { sized: false },
-                largest_niche: None,
-                uninhabited: false,
-                align: element.align,
-                size: Size::ZERO,
-                max_repr_align: None,
-                unadjusted_abi_align: element.align.abi,
-                // adding a randomly chosen value to distinguish slices
-                randomization_seed: element
-                    .randomization_seed
-                    .wrapping_add(Hash64::new(0x2dcba99c39784102)),
-            })
+            map_layout(cx.calc.array_like(&element, None).map(|mut layout| {
+                // a randomly chosen value to distinguish slices
+                layout.randomization_seed = Hash64::new(0x2dcba99c39784102);
+                layout
+            }))?
+        }
+        ty::Str => {
+            let element = scalar(Int(I8, false));
+            map_layout(cx.calc.array_like(&element, None).map(|mut layout| {
+                // another random value
+                layout.randomization_seed = Hash64::new(0xc1325f37d127be22);
+                layout
+            }))?
         }
-        ty::Str => tcx.mk_layout(LayoutData {
-            variants: Variants::Single { index: FIRST_VARIANT },
-            fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
-            backend_repr: BackendRepr::Memory { sized: false },
-            largest_niche: None,
-            uninhabited: false,
-            align: dl.i8_align,
-            size: Size::ZERO,
-            max_repr_align: None,
-            unadjusted_abi_align: dl.i8_align.abi,
-            // another random value
-            randomization_seed: Hash64::new(0xc1325f37d127be22),
-        }),
 
         // Odd unit types.
-        ty::FnDef(..) => univariant(IndexSlice::empty(), StructKind::AlwaysSized)?,
-        ty::Dynamic(_, _, ty::Dyn) | ty::Foreign(..) => {
-            let mut unit =
-                univariant_uninterned(cx, ty, IndexSlice::empty(), StructKind::AlwaysSized)?;
-            match unit.backend_repr {
-                BackendRepr::Memory { ref mut sized } => *sized = false,
-                _ => bug!(),
-            }
-            tcx.mk_layout(unit)
+        ty::FnDef(..) | ty::Dynamic(_, _, ty::Dyn) | ty::Foreign(..) => {
+            let sized = matches!(ty.kind(), ty::FnDef(..));
+            tcx.mk_layout(LayoutData::unit(cx, sized))
         }
 
         ty::Coroutine(def_id, args) => coroutine_layout(cx, ty, def_id, args)?,
@@ -545,11 +507,7 @@ fn layout_of_uncached<'tcx>(
                     return Err(error(cx, LayoutError::ReferencesError(guar)));
                 }
 
-                return Ok(tcx.mk_layout(
-                    cx.calc
-                        .layout_of_union(&def.repr(), &variants)
-                        .map_err(|err| map_error(cx, ty, err))?,
-                ));
+                return map_layout(cx.calc.layout_of_union(&def.repr(), &variants));
             }
 
             let get_discriminant_type =
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/layout.rs b/src/tools/rust-analyzer/crates/hir-ty/src/layout.rs
index 7af31dabe45b3..05f38cd09e224 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/layout.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/layout.rs
@@ -15,7 +15,7 @@ use hir_def::{
 use la_arena::{Idx, RawIdx};
 use rustc_abi::AddressSpace;
 use rustc_hashes::Hash64;
-use rustc_index::{IndexSlice, IndexVec};
+use rustc_index::IndexVec;
 
 use triomphe::Arc;
 
@@ -190,7 +190,8 @@ pub fn layout_of_ty_query(
     let dl = &*target;
     let cx = LayoutCx::new(dl);
     let ty = normalize(db, trait_env.clone(), ty);
-    let result = match ty.kind(Interner) {
+    let kind = ty.kind(Interner);
+    let result = match kind {
         TyKind::Adt(AdtId(def), subst) => {
             if let hir_def::AdtId::StructId(s) = def {
                 let data = db.struct_data(*s);
@@ -216,7 +217,7 @@ pub fn layout_of_ty_query(
                     valid_range: WrappingRange { start: 0, end: 0x10FFFF },
                 },
             ),
-            chalk_ir::Scalar::Int(i) => scalar(
+            chalk_ir::Scalar::Int(i) => Layout::scalar(dl, scalar_unit(
                 dl,
                 Primitive::Int(
                     match i {
@@ -229,8 +230,8 @@ pub fn layout_of_ty_query(
                     },
                     true,
                 ),
-            ),
-            chalk_ir::Scalar::Uint(i) => scalar(
+            )),
+            chalk_ir::Scalar::Uint(i) => Layout::scalar(dl, scalar_unit(
                 dl,
                 Primitive::Int(
                     match i {
@@ -243,8 +244,8 @@ pub fn layout_of_ty_query(
                     },
                     false,
                 ),
-            ),
-            chalk_ir::Scalar::Float(f) => scalar(
+            )),
+            chalk_ir::Scalar::Float(f) => Layout::scalar(dl, scalar_unit(
                 dl,
                 Primitive::Float(match f {
                     FloatTy::F16 => Float::F16,
@@ -252,7 +253,7 @@ pub fn layout_of_ty_query(
                     FloatTy::F64 => Float::F64,
                     FloatTy::F128 => Float::F128,
                 }),
-            ),
+            )),
         },
         TyKind::Tuple(len, tys) => {
             let kind = if *len == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
@@ -268,56 +269,16 @@ pub fn layout_of_ty_query(
         TyKind::Array(element, count) => {
             let count = try_const_usize(db, count).ok_or(LayoutError::HasErrorConst)? as u64;
             let element = db.layout_of_ty(element.clone(), trait_env)?;
-            let size = element
-                .size
-                .checked_mul(count, dl)
-                .ok_or(LayoutError::BadCalc(LayoutCalculatorError::SizeOverflow))?;
-
-            let backend_repr = BackendRepr::Memory { sized: true };
-
-            let largest_niche = if count != 0 { element.largest_niche } else { None };
-            let uninhabited = if count != 0 { element.uninhabited } else { false };
-
-            Layout {
-                variants: Variants::Single { index: struct_variant_idx() },
-                fields: FieldsShape::Array { stride: element.size, count },
-                backend_repr,
-                largest_niche,
-                uninhabited,
-                align: element.align,
-                size,
-                max_repr_align: None,
-                unadjusted_abi_align: element.align.abi,
-                randomization_seed: Hash64::ZERO,
-            }
+            cx.calc.array_like::<_, _, ()>(&element, Some(count))?
         }
         TyKind::Slice(element) => {
             let element = db.layout_of_ty(element.clone(), trait_env)?;
-            Layout {
-                variants: Variants::Single { index: struct_variant_idx() },
-                fields: FieldsShape::Array { stride: element.size, count: 0 },
-                backend_repr: BackendRepr::Memory { sized: false },
-                largest_niche: None,
-                uninhabited: false,
-                align: element.align,
-                size: Size::ZERO,
-                max_repr_align: None,
-                unadjusted_abi_align: element.align.abi,
-                randomization_seed: Hash64::ZERO,
-            }
+            cx.calc.array_like::<_, _, ()>(&element, None)?
+        }
+        TyKind::Str => {
+            let element = scalar_unit(dl, Primitive::Int(Integer::I8, false));
+            cx.calc.array_like::<_, _, ()>(&Layout::scalar(dl, element), None)?
         }
-        TyKind::Str => Layout {
-            variants: Variants::Single { index: struct_variant_idx() },
-            fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
-            backend_repr: BackendRepr::Memory { sized: false },
-            largest_niche: None,
-            uninhabited: false,
-            align: dl.i8_align,
-            size: Size::ZERO,
-            max_repr_align: None,
-            unadjusted_abi_align: dl.i8_align.abi,
-            randomization_seed: Hash64::ZERO,
-        },
         // Potentially-wide pointers.
         TyKind::Ref(_, _, pointee) | TyKind::Raw(_, pointee) => {
             let mut data_ptr = scalar_unit(dl, Primitive::Pointer(AddressSpace::DATA));
@@ -355,17 +316,12 @@ pub fn layout_of_ty_query(
             };
 
             // Effectively a (ptr, meta) tuple.
-            cx.calc.scalar_pair(data_ptr, metadata)
+            LayoutData::scalar_pair(dl, data_ptr, metadata)
         }
-        TyKind::FnDef(_, _) => layout_of_unit(&cx)?,
-        TyKind::Never => cx.calc.layout_of_never_type(),
-        TyKind::Dyn(_) | TyKind::Foreign(_) => {
-            let mut unit = layout_of_unit(&cx)?;
-            match &mut unit.backend_repr {
-                BackendRepr::Memory { sized } => *sized = false,
-                _ => return Err(LayoutError::Unknown),
-            }
-            unit
+        TyKind::Never => LayoutData::never_type(dl),
+        TyKind::FnDef(..) | TyKind::Dyn(_) | TyKind::Foreign(_) => {
+            let sized = matches!(kind, TyKind::FnDef(..));
+            LayoutData::unit(dl, sized)
         }
         TyKind::Function(_) => {
             let mut ptr = scalar_unit(dl, Primitive::Pointer(dl.instruction_address_space));
@@ -434,16 +390,6 @@ pub fn layout_of_ty_recover(
     Err(LayoutError::RecursiveTypeWithoutIndirection)
 }
 
-fn layout_of_unit(cx: &LayoutCx<'_>) -> Result<Layout, LayoutError> {
-    cx.calc
-        .univariant::<RustcFieldIdx, RustcEnumVariantIdx, &&Layout>(
-            IndexSlice::empty(),
-            &ReprOptions::default(),
-            StructKind::AlwaysSized,
-        )
-        .map_err(Into::into)
-}
-
 fn struct_tail_erasing_lifetimes(db: &dyn HirDatabase, pointee: Ty) -> Ty {
     match pointee.kind(Interner) {
         TyKind::Adt(AdtId(hir_def::AdtId::StructId(i)), subst) => {
@@ -474,9 +420,5 @@ fn scalar_unit(dl: &TargetDataLayout, value: Primitive) -> Scalar {
     Scalar::Initialized { value, valid_range: WrappingRange::full(value.size(dl)) }
 }
 
-fn scalar(dl: &TargetDataLayout, value: Primitive) -> Layout {
-    Layout::scalar(dl, scalar_unit(dl, value))
-}
-
 #[cfg(test)]
 mod tests;

From e69491ac6021572d4ac9cde1c14a04673b4ec859 Mon Sep 17 00:00:00 2001
From: Moulins <arthur.heuillard@orange.fr>
Date: Fri, 7 Mar 2025 21:17:16 +0100
Subject: [PATCH 2/5] Move SIMD layout logic to `rustc_abi`

---
 compiler/rustc_abi/src/layout.rs       | 97 ++++++++++++++++++++++----
 compiler/rustc_abi/src/layout/ty.rs    |  6 ++
 compiler/rustc_abi/src/lib.rs          |  7 ++
 compiler/rustc_middle/src/ty/layout.rs |  7 +-
 compiler/rustc_ty_utils/src/layout.rs  | 79 +++++----------------
 5 files changed, 117 insertions(+), 79 deletions(-)

diff --git a/compiler/rustc_abi/src/layout.rs b/compiler/rustc_abi/src/layout.rs
index 42ecbce8117cb..d0d7cc68a77a8 100644
--- a/compiler/rustc_abi/src/layout.rs
+++ b/compiler/rustc_abi/src/layout.rs
@@ -62,17 +62,28 @@ pub enum LayoutCalculatorError<F> {
 
     /// The fields or variants have irreconcilable reprs
     ReprConflict,
+
+    /// The length of an SIMD type is zero
+    ZeroLengthSimdType,
+
+    /// The length of an SIMD type exceeds the maximum number of lanes
+    OversizedSimdType { max_lanes: u64 },
+
+    /// An element type of an SIMD type isn't a primitive
+    NonPrimitiveSimdType(F),
 }
 
 impl<F> LayoutCalculatorError<F> {
     pub fn without_payload(&self) -> LayoutCalculatorError<()> {
-        match self {
-            LayoutCalculatorError::UnexpectedUnsized(_) => {
-                LayoutCalculatorError::UnexpectedUnsized(())
-            }
-            LayoutCalculatorError::SizeOverflow => LayoutCalculatorError::SizeOverflow,
-            LayoutCalculatorError::EmptyUnion => LayoutCalculatorError::EmptyUnion,
-            LayoutCalculatorError::ReprConflict => LayoutCalculatorError::ReprConflict,
+        use LayoutCalculatorError::*;
+        match *self {
+            UnexpectedUnsized(_) => UnexpectedUnsized(()),
+            SizeOverflow => SizeOverflow,
+            EmptyUnion => EmptyUnion,
+            ReprConflict => ReprConflict,
+            ZeroLengthSimdType => ZeroLengthSimdType,
+            OversizedSimdType { max_lanes } => OversizedSimdType { max_lanes },
+            NonPrimitiveSimdType(_) => NonPrimitiveSimdType(()),
         }
     }
 
@@ -80,13 +91,15 @@ impl<F> LayoutCalculatorError<F> {
     ///
     /// Intended for use by rust-analyzer, as neither it nor `rustc_abi` depend on fluent infra.
     pub fn fallback_fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        use LayoutCalculatorError::*;
         f.write_str(match self {
-            LayoutCalculatorError::UnexpectedUnsized(_) => {
-                "an unsized type was found where a sized type was expected"
+            UnexpectedUnsized(_) => "an unsized type was found where a sized type was expected",
+            SizeOverflow => "size overflow",
+            EmptyUnion => "type is a union with no fields",
+            ReprConflict => "type has an invalid repr",
+            ZeroLengthSimdType | OversizedSimdType { .. } | NonPrimitiveSimdType(_) => {
+                "invalid simd type definition"
             }
-            LayoutCalculatorError::SizeOverflow => "size overflow",
-            LayoutCalculatorError::EmptyUnion => "type is a union with no fields",
-            LayoutCalculatorError::ReprConflict => "type has an invalid repr",
         })
     }
 }
@@ -127,6 +140,66 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
         })
     }
 
+    pub fn simd_type<
+        FieldIdx: Idx,
+        VariantIdx: Idx,
+        F: AsRef<LayoutData<FieldIdx, VariantIdx>> + fmt::Debug,
+    >(
+        &self,
+        element: F,
+        count: u64,
+        repr_packed: bool,
+    ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
+        let elt = element.as_ref();
+        if count == 0 {
+            return Err(LayoutCalculatorError::ZeroLengthSimdType);
+        } else if count > crate::MAX_SIMD_LANES {
+            return Err(LayoutCalculatorError::OversizedSimdType {
+                max_lanes: crate::MAX_SIMD_LANES,
+            });
+        }
+
+        let BackendRepr::Scalar(e_repr) = elt.backend_repr else {
+            return Err(LayoutCalculatorError::NonPrimitiveSimdType(element));
+        };
+
+        // Compute the size and alignment of the vector
+        let dl = self.cx.data_layout();
+        let size =
+            elt.size.checked_mul(count, dl).ok_or_else(|| LayoutCalculatorError::SizeOverflow)?;
+        let (repr, align) = if repr_packed && !count.is_power_of_two() {
+            // Non-power-of-two vectors have padding up to the next power-of-two.
+            // If we're a packed repr, remove the padding while keeping the alignment as close
+            // to a vector as possible.
+            (
+                BackendRepr::Memory { sized: true },
+                AbiAndPrefAlign {
+                    abi: Align::max_aligned_factor(size),
+                    pref: dl.llvmlike_vector_align(size).pref,
+                },
+            )
+        } else {
+            (BackendRepr::SimdVector { element: e_repr, count }, dl.llvmlike_vector_align(size))
+        };
+        let size = size.align_to(align.abi);
+
+        Ok(LayoutData {
+            variants: Variants::Single { index: VariantIdx::new(0) },
+            fields: FieldsShape::Arbitrary {
+                offsets: [Size::ZERO].into(),
+                memory_index: [0].into(),
+            },
+            backend_repr: repr,
+            largest_niche: elt.largest_niche,
+            uninhabited: false,
+            size,
+            align,
+            max_repr_align: None,
+            unadjusted_abi_align: elt.align.abi,
+            randomization_seed: elt.randomization_seed.wrapping_add(Hash64::new(count)),
+        })
+    }
+
     pub fn univariant<
         'a,
         FieldIdx: Idx,
diff --git a/compiler/rustc_abi/src/layout/ty.rs b/compiler/rustc_abi/src/layout/ty.rs
index 03f3f043c218e..4f43c0e6f8e96 100644
--- a/compiler/rustc_abi/src/layout/ty.rs
+++ b/compiler/rustc_abi/src/layout/ty.rs
@@ -150,6 +150,12 @@ impl<'a, Ty> Deref for TyAndLayout<'a, Ty> {
     }
 }
 
+impl<'a, Ty> AsRef<LayoutData<FieldIdx, VariantIdx>> for TyAndLayout<'a, Ty> {
+    fn as_ref(&self) -> &LayoutData<FieldIdx, VariantIdx> {
+        &*self.layout.0.0
+    }
+}
+
 /// Trait that needs to be implemented by the higher-level type representation
 /// (e.g. `rustc_middle::ty::Ty`), to provide `rustc_target::abi` functionality.
 pub trait TyAbiInterface<'a, C>: Sized + std::fmt::Debug {
diff --git a/compiler/rustc_abi/src/lib.rs b/compiler/rustc_abi/src/lib.rs
index db9a26c3ef7f0..1b73758200939 100644
--- a/compiler/rustc_abi/src/lib.rs
+++ b/compiler/rustc_abi/src/lib.rs
@@ -205,6 +205,13 @@ impl ReprOptions {
     }
 }
 
+/// The maximum supported number of lanes in a SIMD vector.
+///
+/// This value is selected based on backend support:
+/// * LLVM does not appear to have a vector width limit.
+/// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
+pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
+
 /// Parsed [Data layout](https://llvm.org/docs/LangRef.html#data-layout)
 /// for a target, which contains everything needed to compute layouts.
 #[derive(Debug, PartialEq, Eq)]
diff --git a/compiler/rustc_middle/src/ty/layout.rs b/compiler/rustc_middle/src/ty/layout.rs
index d32c524a427c0..ebb6a8c08a54c 100644
--- a/compiler/rustc_middle/src/ty/layout.rs
+++ b/compiler/rustc_middle/src/ty/layout.rs
@@ -182,12 +182,7 @@ pub const WIDE_PTR_ADDR: usize = 0;
 /// - For a slice, this is the length.
 pub const WIDE_PTR_EXTRA: usize = 1;
 
-/// The maximum supported number of lanes in a SIMD vector.
-///
-/// This value is selected based on backend support:
-/// * LLVM does not appear to have a vector width limit.
-/// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
-pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
+pub const MAX_SIMD_LANES: u64 = rustc_abi::MAX_SIMD_LANES;
 
 /// Used in `check_validity_requirement` to indicate the kind of initialization
 /// that is checked to be valid
diff --git a/compiler/rustc_ty_utils/src/layout.rs b/compiler/rustc_ty_utils/src/layout.rs
index 45c2639280f36..fe30b3e109319 100644
--- a/compiler/rustc_ty_utils/src/layout.rs
+++ b/compiler/rustc_ty_utils/src/layout.rs
@@ -5,9 +5,9 @@ use hir::def_id::DefId;
 use rustc_abi::Integer::{I8, I32};
 use rustc_abi::Primitive::{self, Float, Int, Pointer};
 use rustc_abi::{
-    AbiAndPrefAlign, AddressSpace, Align, BackendRepr, FIRST_VARIANT, FieldIdx, FieldsShape,
-    HasDataLayout, Layout, LayoutCalculatorError, LayoutData, Niche, ReprOptions, Scalar, Size,
-    StructKind, TagEncoding, VariantIdx, Variants, WrappingRange,
+    AddressSpace, BackendRepr, FIRST_VARIANT, FieldIdx, FieldsShape, HasDataLayout, Layout,
+    LayoutCalculatorError, LayoutData, Niche, ReprOptions, Scalar, Size, StructKind, TagEncoding,
+    VariantIdx, Variants, WrappingRange,
 };
 use rustc_hashes::Hash64;
 use rustc_index::bit_set::DenseBitSet;
@@ -16,7 +16,7 @@ use rustc_middle::bug;
 use rustc_middle::mir::{CoroutineLayout, CoroutineSavedLocal};
 use rustc_middle::query::Providers;
 use rustc_middle::ty::layout::{
-    FloatExt, HasTyCtxt, IntegerExt, LayoutCx, LayoutError, LayoutOf, MAX_SIMD_LANES, TyAndLayout,
+    FloatExt, HasTyCtxt, IntegerExt, LayoutCx, LayoutError, LayoutOf, TyAndLayout,
 };
 use rustc_middle::ty::print::with_no_trimmed_paths;
 use rustc_middle::ty::{
@@ -124,6 +124,19 @@ fn map_error<'tcx>(
                 .delayed_bug(format!("computed impossible repr (packed enum?): {ty:?}"));
             LayoutError::ReferencesError(guar)
         }
+        LayoutCalculatorError::ZeroLengthSimdType => {
+            // Can't be caught in typeck if the array length is generic.
+            cx.tcx().dcx().emit_fatal(ZeroLengthSimdType { ty })
+        }
+        LayoutCalculatorError::OversizedSimdType { max_lanes } => {
+            // Can't be caught in typeck if the array length is generic.
+            cx.tcx().dcx().emit_fatal(OversizedSimdType { ty, max_lanes })
+        }
+        LayoutCalculatorError::NonPrimitiveSimdType(field) => {
+            // This error isn't caught in typeck, e.g., if
+            // the element type of the vector is generic.
+            cx.tcx().dcx().emit_fatal(NonPrimitiveSimdType { ty, e_ty: field.ty })
+        }
     };
     error(cx, err)
 }
@@ -423,65 +436,9 @@ fn layout_of_uncached<'tcx>(
                 .try_to_target_usize(tcx)
                 .ok_or_else(|| error(cx, LayoutError::Unknown(ty)))?;
 
-            // SIMD vectors of zero length are not supported.
-            // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
-            // support.
-            //
-            // Can't be caught in typeck if the array length is generic.
-            if e_len == 0 {
-                tcx.dcx().emit_fatal(ZeroLengthSimdType { ty });
-            } else if e_len > MAX_SIMD_LANES {
-                tcx.dcx().emit_fatal(OversizedSimdType { ty, max_lanes: MAX_SIMD_LANES });
-            }
-
-            // Compute the ABI of the element type:
             let e_ly = cx.layout_of(e_ty)?;
-            let BackendRepr::Scalar(e_abi) = e_ly.backend_repr else {
-                // This error isn't caught in typeck, e.g., if
-                // the element type of the vector is generic.
-                tcx.dcx().emit_fatal(NonPrimitiveSimdType { ty, e_ty });
-            };
-
-            // Compute the size and alignment of the vector:
-            let size = e_ly
-                .size
-                .checked_mul(e_len, dl)
-                .ok_or_else(|| error(cx, LayoutError::SizeOverflow(ty)))?;
-
-            let (abi, align) = if def.repr().packed() && !e_len.is_power_of_two() {
-                // Non-power-of-two vectors have padding up to the next power-of-two.
-                // If we're a packed repr, remove the padding while keeping the alignment as close
-                // to a vector as possible.
-                (
-                    BackendRepr::Memory { sized: true },
-                    AbiAndPrefAlign {
-                        abi: Align::max_aligned_factor(size),
-                        pref: dl.llvmlike_vector_align(size).pref,
-                    },
-                )
-            } else {
-                (
-                    BackendRepr::SimdVector { element: e_abi, count: e_len },
-                    dl.llvmlike_vector_align(size),
-                )
-            };
-            let size = size.align_to(align.abi);
 
-            tcx.mk_layout(LayoutData {
-                variants: Variants::Single { index: FIRST_VARIANT },
-                fields: FieldsShape::Arbitrary {
-                    offsets: [Size::ZERO].into(),
-                    memory_index: [0].into(),
-                },
-                backend_repr: abi,
-                largest_niche: e_ly.largest_niche,
-                uninhabited: false,
-                size,
-                align,
-                max_repr_align: None,
-                unadjusted_abi_align: align.abi,
-                randomization_seed: e_ly.randomization_seed.wrapping_add(Hash64::new(e_len)),
-            })
+            map_layout(cx.calc.simd_type(e_ly, e_len, def.repr().packed()))?
         }
 
         // ADTs.

From f79f3d31a3cbf87e50531851f5079e7fcba7ce81 Mon Sep 17 00:00:00 2001
From: Moulins <arthur.heuillard@orange.fr>
Date: Fri, 7 Mar 2025 21:18:18 +0100
Subject: [PATCH 3/5] Use `rustc_abi` code for SIMD layout in rust-analyzer

---
 .../rust-analyzer/crates/hir-ty/src/layout.rs | 37 +++----------------
 .../crates/hir-ty/src/layout/adt.rs           |  6 +--
 .../rust-analyzer/crates/hir-ty/src/lib.rs    |  3 --
 3 files changed, 7 insertions(+), 39 deletions(-)

diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/layout.rs b/src/tools/rust-analyzer/crates/hir-ty/src/layout.rs
index 05f38cd09e224..2ac1792ba8684 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/layout.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/layout.rs
@@ -6,15 +6,14 @@ use base_db::ra_salsa::Cycle;
 use chalk_ir::{AdtId, FloatTy, IntTy, TyKind, UintTy};
 use hir_def::{
     layout::{
-        BackendRepr, FieldsShape, Float, Integer, LayoutCalculator, LayoutCalculatorError,
-        LayoutData, Primitive, ReprOptions, Scalar, Size, StructKind, TargetDataLayout,
+        Float, Integer, LayoutCalculator, LayoutCalculatorError,
+        LayoutData, Primitive, ReprOptions, Scalar, StructKind, TargetDataLayout,
         WrappingRange,
     },
     LocalFieldId, StructId,
 };
 use la_arena::{Idx, RawIdx};
 use rustc_abi::AddressSpace;
-use rustc_hashes::Hash64;
 use rustc_index::IndexVec;
 
 use triomphe::Arc;
@@ -23,7 +22,6 @@ use crate::{
     consteval::try_const_usize,
     db::{HirDatabase, InternedClosure},
     infer::normalize,
-    layout::adt::struct_variant_idx,
     utils::ClosureSubst,
     Interner, ProjectionTy, Substitution, TraitEnvironment, Ty,
 };
@@ -125,10 +123,10 @@ impl<'a> LayoutCx<'a> {
     }
 }
 
-// FIXME: move this to the `rustc_abi`.
 fn layout_of_simd_ty(
     db: &dyn HirDatabase,
     id: StructId,
+    repr_packed: bool,
     subst: &Substitution,
     env: Arc<TraitEnvironment>,
     dl: &TargetDataLayout,
@@ -149,33 +147,10 @@ fn layout_of_simd_ty(
     };
 
     let e_len = try_const_usize(db, &e_len).ok_or(LayoutError::HasErrorConst)? as u64;
-
-    // Compute the ABI of the element type:
     let e_ly = db.layout_of_ty(e_ty, env)?;
-    let BackendRepr::Scalar(e_abi) = e_ly.backend_repr else {
-        return Err(LayoutError::Unknown);
-    };
 
-    // Compute the size and alignment of the vector:
-    let size = e_ly
-        .size
-        .checked_mul(e_len, dl)
-        .ok_or(LayoutError::BadCalc(LayoutCalculatorError::SizeOverflow))?;
-    let align = dl.llvmlike_vector_align(size);
-    let size = size.align_to(align.abi);
-
-    Ok(Arc::new(Layout {
-        variants: Variants::Single { index: struct_variant_idx() },
-        fields: FieldsShape::Arbitrary { offsets: [Size::ZERO].into(), memory_index: [0].into() },
-        backend_repr: BackendRepr::SimdVector { element: e_abi, count: e_len },
-        largest_niche: e_ly.largest_niche,
-        uninhabited: false,
-        size,
-        align,
-        max_repr_align: None,
-        unadjusted_abi_align: align.abi,
-        randomization_seed: Hash64::ZERO,
-    }))
+    let cx = LayoutCx::new(dl);
+    Ok(Arc::new(cx.calc.simd_type(e_ly, e_len, repr_packed)?))
 }
 
 pub fn layout_of_ty_query(
@@ -197,7 +172,7 @@ pub fn layout_of_ty_query(
                 let data = db.struct_data(*s);
                 let repr = data.repr.unwrap_or_default();
                 if repr.simd() {
-                    return layout_of_simd_ty(db, *s, subst, trait_env, &target);
+                    return layout_of_simd_ty(db, *s, repr.packed(), subst, trait_env, &target);
                 }
             };
             return db.layout_of_adt(*def, subst.clone(), trait_env);
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/layout/adt.rs b/src/tools/rust-analyzer/crates/hir-ty/src/layout/adt.rs
index 0ba765bd75ef3..eb4729fab8426 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/layout/adt.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/layout/adt.rs
@@ -16,16 +16,12 @@ use triomphe::Arc;
 use crate::{
     db::HirDatabase,
     lang_items::is_unsafe_cell,
-    layout::{field_ty, Layout, LayoutError, RustcEnumVariantIdx},
+    layout::{field_ty, Layout, LayoutError},
     Substitution, TraitEnvironment,
 };
 
 use super::LayoutCx;
 
-pub(crate) fn struct_variant_idx() -> RustcEnumVariantIdx {
-    RustcEnumVariantIdx(0)
-}
-
 pub fn layout_of_adt_query(
     db: &dyn HirDatabase,
     def: AdtId,
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/lib.rs b/src/tools/rust-analyzer/crates/hir-ty/src/lib.rs
index 707c43777267e..e0dcc01821ec3 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/lib.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/lib.rs
@@ -12,9 +12,6 @@ extern crate ra_ap_rustc_index as rustc_index;
 #[cfg(feature = "in-rust-tree")]
 extern crate rustc_abi;
 
-#[cfg(feature = "in-rust-tree")]
-extern crate rustc_hashes;
-
 #[cfg(not(feature = "in-rust-tree"))]
 extern crate ra_ap_rustc_abi as rustc_abi;
 

From b8a217081d0d7aef0c89c9dacc21c020faa0949b Mon Sep 17 00:00:00 2001
From: Moulins <arthur.heuillard@orange.fr>
Date: Fri, 7 Mar 2025 22:09:56 +0100
Subject: [PATCH 4/5] Refactor coroutine layout logic to precompute all
 sublayouts

Also properly attaches spans on layouts of non-promoted coroutine
locals, which slightly improves the error messages for some coroutine tests.
---
 compiler/rustc_ty_utils/src/layout.rs         | 235 +++++++++---------
 .../indirect-recursion-issue-112047.stderr    |   3 +
 tests/ui/layout/post-mono-layout-cycle-2.rs   |   2 +-
 .../ui/layout/post-mono-layout-cycle-2.stderr |   6 -
 4 files changed, 122 insertions(+), 124 deletions(-)

diff --git a/compiler/rustc_ty_utils/src/layout.rs b/compiler/rustc_ty_utils/src/layout.rs
index fe30b3e109319..efca395fe4aeb 100644
--- a/compiler/rustc_ty_utils/src/layout.rs
+++ b/compiler/rustc_ty_utils/src/layout.rs
@@ -5,23 +5,21 @@ use hir::def_id::DefId;
 use rustc_abi::Integer::{I8, I32};
 use rustc_abi::Primitive::{self, Float, Int, Pointer};
 use rustc_abi::{
-    AddressSpace, BackendRepr, FIRST_VARIANT, FieldIdx, FieldsShape, HasDataLayout, Layout,
-    LayoutCalculatorError, LayoutData, Niche, ReprOptions, Scalar, Size, StructKind, TagEncoding,
-    VariantIdx, Variants, WrappingRange,
+    AddressSpace, BackendRepr, FIRST_VARIANT, FieldIdx, FieldsShape, HasDataLayout, Integer,
+    Layout, LayoutCalculator, LayoutCalculatorError, LayoutData, Niche, ReprOptions, Scalar, Size,
+    StructKind, TagEncoding, VariantIdx, Variants, WrappingRange,
 };
 use rustc_hashes::Hash64;
-use rustc_index::bit_set::DenseBitSet;
-use rustc_index::{IndexSlice, IndexVec};
+use rustc_index::bit_set::{BitMatrix, DenseBitSet};
+use rustc_index::{Idx, IndexSlice, IndexVec};
 use rustc_middle::bug;
-use rustc_middle::mir::{CoroutineLayout, CoroutineSavedLocal};
 use rustc_middle::query::Providers;
 use rustc_middle::ty::layout::{
     FloatExt, HasTyCtxt, IntegerExt, LayoutCx, LayoutError, LayoutOf, TyAndLayout,
 };
 use rustc_middle::ty::print::with_no_trimmed_paths;
 use rustc_middle::ty::{
-    self, AdtDef, CoroutineArgsExt, EarlyBinder, GenericArgsRef, PseudoCanonicalInput, Ty, TyCtxt,
-    TypeVisitableExt,
+    self, AdtDef, CoroutineArgsExt, EarlyBinder, PseudoCanonicalInput, Ty, TyCtxt, TypeVisitableExt,
 };
 use rustc_session::{DataTypeKind, FieldInfo, FieldKind, SizeKind, VariantInfo};
 use rustc_span::{Symbol, sym};
@@ -141,16 +139,6 @@ fn map_error<'tcx>(
     error(cx, err)
 }
 
-fn univariant_uninterned<'tcx>(
-    cx: &LayoutCx<'tcx>,
-    ty: Ty<'tcx>,
-    fields: &IndexSlice<FieldIdx, TyAndLayout<'tcx>>,
-    kind: StructKind,
-) -> Result<LayoutData<FieldIdx, VariantIdx>, &'tcx LayoutError<'tcx>> {
-    let repr = ReprOptions::default();
-    cx.calc.univariant(fields, &repr, kind).map_err(|err| map_error(cx, ty, err))
-}
-
 fn extract_const_value<'tcx>(
     cx: &LayoutCx<'tcx>,
     ty: Ty<'tcx>,
@@ -212,8 +200,10 @@ fn layout_of_uncached<'tcx>(
     };
     let scalar = |value: Primitive| tcx.mk_layout(LayoutData::scalar(cx, scalar_unit(value)));
 
-    let univariant = |fields: &IndexSlice<FieldIdx, TyAndLayout<'tcx>>, kind| {
-        Ok(tcx.mk_layout(univariant_uninterned(cx, ty, fields, kind)?))
+    let univariant = |tys: &[Ty<'tcx>], kind| {
+        let fields = tys.iter().map(|ty| cx.layout_of(*ty)).try_collect::<IndexVec<_, _>>()?;
+        let repr = ReprOptions::default();
+        map_layout(cx.calc.univariant(&fields, &repr, kind))
     };
     debug_assert!(!ty.has_non_region_infer());
 
@@ -389,29 +379,61 @@ fn layout_of_uncached<'tcx>(
             tcx.mk_layout(LayoutData::unit(cx, sized))
         }
 
-        ty::Coroutine(def_id, args) => coroutine_layout(cx, ty, def_id, args)?,
+        ty::Coroutine(def_id, args) => {
+            use rustc_middle::ty::layout::PrimitiveExt as _;
+
+            let Some(info) = tcx.coroutine_layout(def_id, args.as_coroutine().kind_ty()) else {
+                return Err(error(cx, LayoutError::Unknown(ty)));
+            };
+
+            let local_layouts = info
+                .field_tys
+                .iter()
+                .map(|local| {
+                    let field_ty = EarlyBinder::bind(local.ty);
+                    let uninit_ty = Ty::new_maybe_uninit(tcx, field_ty.instantiate(tcx, args));
+                    cx.spanned_layout_of(uninit_ty, local.source_info.span)
+                })
+                .try_collect::<IndexVec<_, _>>()?;
 
-        ty::Closure(_, args) => {
-            let tys = args.as_closure().upvar_tys();
-            univariant(
-                &tys.iter().map(|ty| cx.layout_of(ty)).try_collect::<IndexVec<_, _>>()?,
-                StructKind::AlwaysSized,
-            )?
+            let prefix_layouts = args
+                .as_coroutine()
+                .prefix_tys()
+                .iter()
+                .map(|ty| cx.layout_of(ty))
+                .try_collect::<IndexVec<_, _>>()?;
+
+            let layout = coroutine_layout(
+                &cx.calc,
+                &local_layouts,
+                prefix_layouts,
+                &info.variant_fields,
+                &info.storage_conflicts,
+                |tag| TyAndLayout {
+                    ty: tag.primitive().to_ty(tcx),
+                    layout: tcx.mk_layout(LayoutData::scalar(cx, tag)),
+                },
+            )
+            .map(|mut layout| {
+                // this is similar to how ReprOptions populates its field_shuffle_seed
+                layout.randomization_seed = tcx.def_path_hash(def_id).0.to_smaller_hash();
+                debug!("coroutine layout ({:?}): {:#?}", ty, layout);
+                layout
+            });
+            map_layout(layout)?
         }
 
+        ty::Closure(_, args) => univariant(args.as_closure().upvar_tys(), StructKind::AlwaysSized)?,
+
         ty::CoroutineClosure(_, args) => {
-            let tys = args.as_coroutine_closure().upvar_tys();
-            univariant(
-                &tys.iter().map(|ty| cx.layout_of(ty)).try_collect::<IndexVec<_, _>>()?,
-                StructKind::AlwaysSized,
-            )?
+            univariant(args.as_coroutine_closure().upvar_tys(), StructKind::AlwaysSized)?
         }
 
         ty::Tuple(tys) => {
             let kind =
                 if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
 
-            univariant(&tys.iter().map(|k| cx.layout_of(k)).try_collect::<IndexVec<_, _>>()?, kind)?
+            univariant(tys, kind)?
         }
 
         // SIMD vector types.
@@ -594,7 +616,7 @@ fn layout_of_uncached<'tcx>(
 
 /// Overlap eligibility and variant assignment for each CoroutineSavedLocal.
 #[derive(Clone, Debug, PartialEq)]
-enum SavedLocalEligibility {
+enum SavedLocalEligibility<VariantIdx, FieldIdx> {
     Unassigned,
     Assigned(VariantIdx),
     Ineligible(Option<FieldIdx>),
@@ -620,21 +642,22 @@ enum SavedLocalEligibility {
 // of any variant.
 
 /// Compute the eligibility and assignment of each local.
-fn coroutine_saved_local_eligibility(
-    info: &CoroutineLayout<'_>,
-) -> (DenseBitSet<CoroutineSavedLocal>, IndexVec<CoroutineSavedLocal, SavedLocalEligibility>) {
+fn coroutine_saved_local_eligibility<VariantIdx: Idx, FieldIdx: Idx, LocalIdx: Idx>(
+    nb_locals: usize,
+    variant_fields: &IndexSlice<VariantIdx, IndexVec<FieldIdx, LocalIdx>>,
+    storage_conflicts: &BitMatrix<LocalIdx, LocalIdx>,
+) -> (DenseBitSet<LocalIdx>, IndexVec<LocalIdx, SavedLocalEligibility<VariantIdx, FieldIdx>>) {
     use SavedLocalEligibility::*;
 
-    let mut assignments: IndexVec<CoroutineSavedLocal, SavedLocalEligibility> =
-        IndexVec::from_elem(Unassigned, &info.field_tys);
+    let mut assignments: IndexVec<LocalIdx, _> = IndexVec::from_elem_n(Unassigned, nb_locals);
 
     // The saved locals not eligible for overlap. These will get
     // "promoted" to the prefix of our coroutine.
-    let mut ineligible_locals = DenseBitSet::new_empty(info.field_tys.len());
+    let mut ineligible_locals = DenseBitSet::new_empty(nb_locals);
 
     // Figure out which of our saved locals are fields in only
     // one variant. The rest are deemed ineligible for overlap.
-    for (variant_index, fields) in info.variant_fields.iter_enumerated() {
+    for (variant_index, fields) in variant_fields.iter_enumerated() {
         for local in fields {
             match assignments[*local] {
                 Unassigned => {
@@ -657,13 +680,13 @@ fn coroutine_saved_local_eligibility(
 
     // Next, check every pair of eligible locals to see if they
     // conflict.
-    for local_a in info.storage_conflicts.rows() {
-        let conflicts_a = info.storage_conflicts.count(local_a);
+    for local_a in storage_conflicts.rows() {
+        let conflicts_a = storage_conflicts.count(local_a);
         if ineligible_locals.contains(local_a) {
             continue;
         }
 
-        for local_b in info.storage_conflicts.iter(local_a) {
+        for local_b in storage_conflicts.iter(local_a) {
             // local_a and local_b are storage live at the same time, therefore they
             // cannot overlap in the coroutine layout. The only way to guarantee
             // this is if they are in the same variant, or one is ineligible
@@ -675,7 +698,7 @@ fn coroutine_saved_local_eligibility(
             // If they conflict, we will choose one to make ineligible.
             // This is not always optimal; it's just a greedy heuristic that
             // seems to produce good results most of the time.
-            let conflicts_b = info.storage_conflicts.count(local_b);
+            let conflicts_b = storage_conflicts.count(local_b);
             let (remove, other) =
                 if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
             ineligible_locals.insert(remove);
@@ -690,7 +713,7 @@ fn coroutine_saved_local_eligibility(
     // lay them out with the other locals in the prefix and eliminate
     // unnecessary padding bytes.
     {
-        let mut used_variants = DenseBitSet::new_empty(info.variant_fields.len());
+        let mut used_variants = DenseBitSet::new_empty(variant_fields.len());
         for assignment in &assignments {
             if let Assigned(idx) = assignment {
                 used_variants.insert(*idx);
@@ -707,7 +730,7 @@ fn coroutine_saved_local_eligibility(
     // Write down the order of our locals that will be promoted to the prefix.
     {
         for (idx, local) in ineligible_locals.iter().enumerate() {
-            assignments[local] = Ineligible(Some(FieldIdx::from_usize(idx)));
+            assignments[local] = Ineligible(Some(FieldIdx::new(idx)));
         }
     }
     debug!("coroutine saved local assignments: {:?}", assignments);
@@ -716,52 +739,43 @@ fn coroutine_saved_local_eligibility(
 }
 
 /// Compute the full coroutine layout.
-fn coroutine_layout<'tcx>(
-    cx: &LayoutCx<'tcx>,
-    ty: Ty<'tcx>,
-    def_id: hir::def_id::DefId,
-    args: GenericArgsRef<'tcx>,
-) -> Result<Layout<'tcx>, &'tcx LayoutError<'tcx>> {
+fn coroutine_layout<
+    'a,
+    F: core::ops::Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + core::fmt::Debug + Copy,
+    VariantIdx: Idx,
+    FieldIdx: Idx,
+    LocalIdx: Idx,
+>(
+    calc: &LayoutCalculator<impl HasDataLayout>,
+    local_layouts: &IndexSlice<LocalIdx, F>,
+    mut prefix_layouts: IndexVec<FieldIdx, F>,
+    variant_fields: &IndexSlice<VariantIdx, IndexVec<FieldIdx, LocalIdx>>,
+    storage_conflicts: &BitMatrix<LocalIdx, LocalIdx>,
+    tag_to_layout: impl Fn(Scalar) -> F,
+) -> Result<LayoutData<FieldIdx, VariantIdx>, LayoutCalculatorError<F>> {
     use SavedLocalEligibility::*;
-    let tcx = cx.tcx();
-    let instantiate_field = |ty: Ty<'tcx>| EarlyBinder::bind(ty).instantiate(tcx, args);
 
-    let Some(info) = tcx.coroutine_layout(def_id, args.as_coroutine().kind_ty()) else {
-        return Err(error(cx, LayoutError::Unknown(ty)));
-    };
-    let (ineligible_locals, assignments) = coroutine_saved_local_eligibility(info);
+    let (ineligible_locals, assignments) =
+        coroutine_saved_local_eligibility(local_layouts.len(), variant_fields, storage_conflicts);
 
     // Build a prefix layout, including "promoting" all ineligible
     // locals as part of the prefix. We compute the layout of all of
     // these fields at once to get optimal packing.
-    let tag_index = args.as_coroutine().prefix_tys().len();
+    let tag_index = prefix_layouts.len();
 
-    // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
-    let max_discr = (info.variant_fields.len() - 1) as u128;
-    let discr_int = abi::Integer::fit_unsigned(max_discr);
+    // `variant_fields` already accounts for the reserved variants, so no need to add them.
+    let max_discr = (variant_fields.len() - 1) as u128;
+    let discr_int = Integer::fit_unsigned(max_discr);
     let tag = Scalar::Initialized {
         value: Primitive::Int(discr_int, /* signed = */ false),
         valid_range: WrappingRange { start: 0, end: max_discr },
     };
-    let tag_layout = TyAndLayout {
-        ty: discr_int.to_ty(tcx, /* signed = */ false),
-        layout: tcx.mk_layout(LayoutData::scalar(cx, tag)),
-    };
 
-    let promoted_layouts = ineligible_locals.iter().map(|local| {
-        let field_ty = instantiate_field(info.field_tys[local].ty);
-        let uninit_ty = Ty::new_maybe_uninit(tcx, field_ty);
-        cx.spanned_layout_of(uninit_ty, info.field_tys[local].source_info.span)
-    });
-    let prefix_layouts = args
-        .as_coroutine()
-        .prefix_tys()
-        .iter()
-        .map(|ty| cx.layout_of(ty))
-        .chain(iter::once(Ok(tag_layout)))
-        .chain(promoted_layouts)
-        .try_collect::<IndexVec<_, _>>()?;
-    let prefix = univariant_uninterned(cx, ty, &prefix_layouts, StructKind::AlwaysSized)?;
+    let promoted_layouts = ineligible_locals.iter().map(|local| local_layouts[local]);
+    prefix_layouts.push(tag_to_layout(tag));
+    prefix_layouts.extend(promoted_layouts);
+    let prefix =
+        calc.univariant(&prefix_layouts, &ReprOptions::default(), StructKind::AlwaysSized)?;
 
     let (prefix_size, prefix_align) = (prefix.size, prefix.align);
 
@@ -776,8 +790,8 @@ fn coroutine_layout<'tcx>(
 
             // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
             // "outer" and "promoted" fields respectively.
-            let b_start = FieldIdx::from_usize(tag_index + 1);
-            let offsets_b = IndexVec::from_raw(offsets.raw.split_off(b_start.as_usize()));
+            let b_start = FieldIdx::new(tag_index + 1);
+            let offsets_b = IndexVec::from_raw(offsets.raw.split_off(b_start.index()));
             let offsets_a = offsets;
 
             // Disentangle the "a" and "b" components of `inverse_memory_index`
@@ -785,9 +799,9 @@ fn coroutine_layout<'tcx>(
             // FIXME(eddyb) build a better abstraction for permutations, if possible.
             let inverse_memory_index_b: IndexVec<u32, FieldIdx> = inverse_memory_index
                 .iter()
-                .filter_map(|&i| i.as_u32().checked_sub(b_start.as_u32()).map(FieldIdx::from_u32))
+                .filter_map(|&i| i.index().checked_sub(b_start.index()).map(FieldIdx::new))
                 .collect();
-            inverse_memory_index.raw.retain(|&i| i < b_start);
+            inverse_memory_index.raw.retain(|&i| i.index() < b_start.index());
             let inverse_memory_index_a = inverse_memory_index;
 
             // Since `inverse_memory_index_{a,b}` each only refer to their
@@ -799,39 +813,34 @@ fn coroutine_layout<'tcx>(
                 FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
             (outer_fields, offsets_b, memory_index_b)
         }
-        _ => bug!(),
+        _ => unreachable!(),
     };
 
     let mut size = prefix.size;
     let mut align = prefix.align;
-    let variants = info
-        .variant_fields
+    let variants = variant_fields
         .iter_enumerated()
         .map(|(index, variant_fields)| {
             // Only include overlap-eligible fields when we compute our variant layout.
             let variant_only_tys = variant_fields
                 .iter()
                 .filter(|local| match assignments[**local] {
-                    Unassigned => bug!(),
+                    Unassigned => unreachable!(),
                     Assigned(v) if v == index => true,
-                    Assigned(_) => bug!("assignment does not match variant"),
+                    Assigned(_) => unreachable!("assignment does not match variant"),
                     Ineligible(_) => false,
                 })
-                .map(|local| {
-                    let field_ty = instantiate_field(info.field_tys[*local].ty);
-                    Ty::new_maybe_uninit(tcx, field_ty)
-                });
+                .map(|local| local_layouts[*local]);
 
-            let mut variant = univariant_uninterned(
-                cx,
-                ty,
-                &variant_only_tys.map(|ty| cx.layout_of(ty)).try_collect::<IndexVec<_, _>>()?,
+            let mut variant = calc.univariant(
+                &variant_only_tys.collect::<IndexVec<_, _>>(),
+                &ReprOptions::default(),
                 StructKind::Prefixed(prefix_size, prefix_align.abi),
             )?;
             variant.variants = Variants::Single { index };
 
             let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
-                bug!();
+                unreachable!();
             };
 
             // Now, stitch the promoted and variant-only fields back together in
@@ -841,21 +850,18 @@ fn coroutine_layout<'tcx>(
             // `promoted_memory_index` (as we'd end up with gaps).
             // So instead, we build an "inverse memory_index", as if all of the
             // promoted fields were being used, but leave the elements not in the
-            // subset as `INVALID_FIELD_IDX`, which we can filter out later to
+            // subset as `invalid_field_idx`, which we can filter out later to
             // obtain a valid (bijective) mapping.
-            const INVALID_FIELD_IDX: FieldIdx = FieldIdx::MAX;
-            debug_assert!(variant_fields.next_index() <= INVALID_FIELD_IDX);
+            let invalid_field_idx = promoted_memory_index.len() + memory_index.len();
+            let mut combined_inverse_memory_index =
+                IndexVec::from_elem_n(FieldIdx::new(invalid_field_idx), invalid_field_idx);
 
-            let mut combined_inverse_memory_index = IndexVec::from_elem_n(
-                INVALID_FIELD_IDX,
-                promoted_memory_index.len() + memory_index.len(),
-            );
             let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
             let combined_offsets = variant_fields
                 .iter_enumerated()
                 .map(|(i, local)| {
                     let (offset, memory_index) = match assignments[*local] {
-                        Unassigned => bug!(),
+                        Unassigned => unreachable!(),
                         Assigned(_) => {
                             let (offset, memory_index) = offsets_and_memory_index.next().unwrap();
                             (offset, promoted_memory_index.len() as u32 + memory_index)
@@ -872,7 +878,7 @@ fn coroutine_layout<'tcx>(
 
             // Remove the unused slots and invert the mapping to obtain the
             // combined `memory_index` (also see previous comment).
-            combined_inverse_memory_index.raw.retain(|&i| i != INVALID_FIELD_IDX);
+            combined_inverse_memory_index.raw.retain(|&i| i.index() != invalid_field_idx);
             let combined_memory_index = combined_inverse_memory_index.invert_bijective_mapping();
 
             variant.fields = FieldsShape::Arbitrary {
@@ -884,17 +890,14 @@ fn coroutine_layout<'tcx>(
             align = align.max(variant.align);
             Ok(variant)
         })
-        .try_collect::<IndexVec<VariantIdx, _>>()?;
+        .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
 
     size = size.align_to(align.abi);
 
     let uninhabited = prefix.uninhabited || variants.iter().all(|v| v.is_uninhabited());
     let abi = BackendRepr::Memory { sized: true };
 
-    // this is similar to how ReprOptions populates its field_shuffle_seed
-    let def_hash = tcx.def_path_hash(def_id).0.to_smaller_hash();
-
-    let layout = tcx.mk_layout(LayoutData {
+    Ok(LayoutData {
         variants: Variants::Multiple {
             tag,
             tag_encoding: TagEncoding::Direct,
@@ -915,10 +918,8 @@ fn coroutine_layout<'tcx>(
         align,
         max_repr_align: None,
         unadjusted_abi_align: align.abi,
-        randomization_seed: def_hash,
-    });
-    debug!("coroutine layout ({:?}): {:#?}", ty, layout);
-    Ok(layout)
+        randomization_seed: Default::default(),
+    })
 }
 
 fn record_layout_for_printing<'tcx>(cx: &LayoutCx<'tcx>, layout: TyAndLayout<'tcx>) {
diff --git a/tests/ui/async-await/in-trait/indirect-recursion-issue-112047.stderr b/tests/ui/async-await/in-trait/indirect-recursion-issue-112047.stderr
index 4ca6ef8981984..aa22a453744c0 100644
--- a/tests/ui/async-await/in-trait/indirect-recursion-issue-112047.stderr
+++ b/tests/ui/async-await/in-trait/indirect-recursion-issue-112047.stderr
@@ -3,6 +3,9 @@ error[E0733]: recursion in an async fn requires boxing
    |
 LL |     async fn second(self) {
    |     ^^^^^^^^^^^^^^^^^^^^^
+LL |
+LL |         self.first().await.second().await;
+   |         --------------------------------- recursive call here
    |
    = note: a recursive `async fn` call must introduce indirection such as `Box::pin` to avoid an infinitely sized future
 
diff --git a/tests/ui/layout/post-mono-layout-cycle-2.rs b/tests/ui/layout/post-mono-layout-cycle-2.rs
index 2daac12d7ac93..c8a4a222cc68a 100644
--- a/tests/ui/layout/post-mono-layout-cycle-2.rs
+++ b/tests/ui/layout/post-mono-layout-cycle-2.rs
@@ -1,4 +1,4 @@
-//@ build-fail
+//@ check-fail
 //@ edition: 2021
 
 use std::future::Future;
diff --git a/tests/ui/layout/post-mono-layout-cycle-2.stderr b/tests/ui/layout/post-mono-layout-cycle-2.stderr
index d8c51deffe3b9..f04e01071d7a9 100644
--- a/tests/ui/layout/post-mono-layout-cycle-2.stderr
+++ b/tests/ui/layout/post-mono-layout-cycle-2.stderr
@@ -12,12 +12,6 @@ LL |           Blah::iter(self, iterator).await
    |
    = note: a recursive `async fn` call must introduce indirection such as `Box::pin` to avoid an infinitely sized future
 
-note: the above error was encountered while instantiating `fn Wrap::<()>::ice`
-  --> $DIR/post-mono-layout-cycle-2.rs:54:9
-   |
-LL |         t.ice();
-   |         ^^^^^^^
-
 error: aborting due to 1 previous error
 
 For more information about this error, try `rustc --explain E0733`.

From 08530d3e99dd84eb0a32a00168a11cab44d70118 Mon Sep 17 00:00:00 2001
From: Moulins <arthur.heuillard@orange.fr>
Date: Fri, 7 Mar 2025 22:13:22 +0100
Subject: [PATCH 5/5] Move coroutine layout logic to `rustc_abi`

---
 compiler/rustc_abi/src/layout.rs           |  30 ++
 compiler/rustc_abi/src/layout/coroutine.rs | 320 ++++++++++++++++++
 compiler/rustc_ty_utils/src/layout.rs      | 357 ++-------------------
 3 files changed, 373 insertions(+), 334 deletions(-)
 create mode 100644 compiler/rustc_abi/src/layout/coroutine.rs

diff --git a/compiler/rustc_abi/src/layout.rs b/compiler/rustc_abi/src/layout.rs
index d0d7cc68a77a8..7bffeaf4cc9e2 100644
--- a/compiler/rustc_abi/src/layout.rs
+++ b/compiler/rustc_abi/src/layout.rs
@@ -4,6 +4,7 @@ use std::{cmp, iter};
 
 use rustc_hashes::Hash64;
 use rustc_index::Idx;
+use rustc_index::bit_set::BitMatrix;
 use tracing::debug;
 
 use crate::{
@@ -12,6 +13,7 @@ use crate::{
     Variants, WrappingRange,
 };
 
+mod coroutine;
 mod simple;
 
 #[cfg(feature = "nightly")]
@@ -200,6 +202,34 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
         })
     }
 
+    /// Compute the layout for a coroutine.
+    ///
+    /// This uses dedicated code instead of [`Self::layout_of_struct_or_enum`], as coroutine
+    /// fields may be shared between multiple variants (see the [`coroutine`] module for details).
+    pub fn coroutine<
+        'a,
+        F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
+        VariantIdx: Idx,
+        FieldIdx: Idx,
+        LocalIdx: Idx,
+    >(
+        &self,
+        local_layouts: &IndexSlice<LocalIdx, F>,
+        prefix_layouts: IndexVec<FieldIdx, F>,
+        variant_fields: &IndexSlice<VariantIdx, IndexVec<FieldIdx, LocalIdx>>,
+        storage_conflicts: &BitMatrix<LocalIdx, LocalIdx>,
+        tag_to_layout: impl Fn(Scalar) -> F,
+    ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
+        coroutine::layout(
+            self,
+            local_layouts,
+            prefix_layouts,
+            variant_fields,
+            storage_conflicts,
+            tag_to_layout,
+        )
+    }
+
     pub fn univariant<
         'a,
         FieldIdx: Idx,
diff --git a/compiler/rustc_abi/src/layout/coroutine.rs b/compiler/rustc_abi/src/layout/coroutine.rs
new file mode 100644
index 0000000000000..27e704d538c83
--- /dev/null
+++ b/compiler/rustc_abi/src/layout/coroutine.rs
@@ -0,0 +1,320 @@
+//! Coroutine layout logic.
+//!
+//! When laying out coroutines, we divide our saved local fields into two
+//! categories: overlap-eligible and overlap-ineligible.
+//!
+//! Those fields which are ineligible for overlap go in a "prefix" at the
+//! beginning of the layout, and always have space reserved for them.
+//!
+//! Overlap-eligible fields are only assigned to one variant, so we lay
+//! those fields out for each variant and put them right after the
+//! prefix.
+//!
+//! Finally, in the layout details, we point to the fields from the
+//! variants they are assigned to. It is possible for some fields to be
+//! included in multiple variants. No field ever "moves around" in the
+//! layout; its offset is always the same.
+//!
+//! Also included in the layout are the upvars and the discriminant.
+//! These are included as fields on the "outer" layout; they are not part
+//! of any variant.
+
+use std::iter;
+
+use rustc_index::bit_set::{BitMatrix, DenseBitSet};
+use rustc_index::{Idx, IndexSlice, IndexVec};
+use tracing::{debug, trace};
+
+use crate::{
+    BackendRepr, FieldsShape, HasDataLayout, Integer, LayoutData, Primitive, ReprOptions, Scalar,
+    StructKind, TagEncoding, Variants, WrappingRange,
+};
+
+/// Overlap eligibility and variant assignment for each CoroutineSavedLocal.
+#[derive(Clone, Debug, PartialEq)]
+enum SavedLocalEligibility<VariantIdx, FieldIdx> {
+    Unassigned,
+    Assigned(VariantIdx),
+    Ineligible(Option<FieldIdx>),
+}
+
+/// Compute the eligibility and assignment of each local.
+fn coroutine_saved_local_eligibility<VariantIdx: Idx, FieldIdx: Idx, LocalIdx: Idx>(
+    nb_locals: usize,
+    variant_fields: &IndexSlice<VariantIdx, IndexVec<FieldIdx, LocalIdx>>,
+    storage_conflicts: &BitMatrix<LocalIdx, LocalIdx>,
+) -> (DenseBitSet<LocalIdx>, IndexVec<LocalIdx, SavedLocalEligibility<VariantIdx, FieldIdx>>) {
+    use SavedLocalEligibility::*;
+
+    let mut assignments: IndexVec<LocalIdx, _> = IndexVec::from_elem_n(Unassigned, nb_locals);
+
+    // The saved locals not eligible for overlap. These will get
+    // "promoted" to the prefix of our coroutine.
+    let mut ineligible_locals = DenseBitSet::new_empty(nb_locals);
+
+    // Figure out which of our saved locals are fields in only
+    // one variant. The rest are deemed ineligible for overlap.
+    for (variant_index, fields) in variant_fields.iter_enumerated() {
+        for local in fields {
+            match assignments[*local] {
+                Unassigned => {
+                    assignments[*local] = Assigned(variant_index);
+                }
+                Assigned(idx) => {
+                    // We've already seen this local at another suspension
+                    // point, so it is no longer a candidate.
+                    trace!(
+                        "removing local {:?} in >1 variant ({:?}, {:?})",
+                        local, variant_index, idx
+                    );
+                    ineligible_locals.insert(*local);
+                    assignments[*local] = Ineligible(None);
+                }
+                Ineligible(_) => {}
+            }
+        }
+    }
+
+    // Next, check every pair of eligible locals to see if they
+    // conflict.
+    for local_a in storage_conflicts.rows() {
+        let conflicts_a = storage_conflicts.count(local_a);
+        if ineligible_locals.contains(local_a) {
+            continue;
+        }
+
+        for local_b in storage_conflicts.iter(local_a) {
+            // local_a and local_b are storage live at the same time, therefore they
+            // cannot overlap in the coroutine layout. The only way to guarantee
+            // this is if they are in the same variant, or one is ineligible
+            // (which means it is stored in every variant).
+            if ineligible_locals.contains(local_b) || assignments[local_a] == assignments[local_b] {
+                continue;
+            }
+
+            // If they conflict, we will choose one to make ineligible.
+            // This is not always optimal; it's just a greedy heuristic that
+            // seems to produce good results most of the time.
+            let conflicts_b = storage_conflicts.count(local_b);
+            let (remove, other) =
+                if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
+            ineligible_locals.insert(remove);
+            assignments[remove] = Ineligible(None);
+            trace!("removing local {:?} due to conflict with {:?}", remove, other);
+        }
+    }
+
+    // Count the number of variants in use. If only one of them, then it is
+    // impossible to overlap any locals in our layout. In this case it's
+    // always better to make the remaining locals ineligible, so we can
+    // lay them out with the other locals in the prefix and eliminate
+    // unnecessary padding bytes.
+    {
+        let mut used_variants = DenseBitSet::new_empty(variant_fields.len());
+        for assignment in &assignments {
+            if let Assigned(idx) = assignment {
+                used_variants.insert(*idx);
+            }
+        }
+        if used_variants.count() < 2 {
+            for assignment in assignments.iter_mut() {
+                *assignment = Ineligible(None);
+            }
+            ineligible_locals.insert_all();
+        }
+    }
+
+    // Write down the order of our locals that will be promoted to the prefix.
+    {
+        for (idx, local) in ineligible_locals.iter().enumerate() {
+            assignments[local] = Ineligible(Some(FieldIdx::new(idx)));
+        }
+    }
+    debug!("coroutine saved local assignments: {:?}", assignments);
+
+    (ineligible_locals, assignments)
+}
+
+/// Compute the full coroutine layout.
+pub(super) fn layout<
+    'a,
+    F: core::ops::Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + core::fmt::Debug + Copy,
+    VariantIdx: Idx,
+    FieldIdx: Idx,
+    LocalIdx: Idx,
+>(
+    calc: &super::LayoutCalculator<impl HasDataLayout>,
+    local_layouts: &IndexSlice<LocalIdx, F>,
+    mut prefix_layouts: IndexVec<FieldIdx, F>,
+    variant_fields: &IndexSlice<VariantIdx, IndexVec<FieldIdx, LocalIdx>>,
+    storage_conflicts: &BitMatrix<LocalIdx, LocalIdx>,
+    tag_to_layout: impl Fn(Scalar) -> F,
+) -> super::LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
+    use SavedLocalEligibility::*;
+
+    let (ineligible_locals, assignments) =
+        coroutine_saved_local_eligibility(local_layouts.len(), variant_fields, storage_conflicts);
+
+    // Build a prefix layout, including "promoting" all ineligible
+    // locals as part of the prefix. We compute the layout of all of
+    // these fields at once to get optimal packing.
+    let tag_index = prefix_layouts.len();
+
+    // `variant_fields` already accounts for the reserved variants, so no need to add them.
+    let max_discr = (variant_fields.len() - 1) as u128;
+    let discr_int = Integer::fit_unsigned(max_discr);
+    let tag = Scalar::Initialized {
+        value: Primitive::Int(discr_int, /* signed = */ false),
+        valid_range: WrappingRange { start: 0, end: max_discr },
+    };
+
+    let promoted_layouts = ineligible_locals.iter().map(|local| local_layouts[local]);
+    prefix_layouts.push(tag_to_layout(tag));
+    prefix_layouts.extend(promoted_layouts);
+    let prefix =
+        calc.univariant(&prefix_layouts, &ReprOptions::default(), StructKind::AlwaysSized)?;
+
+    let (prefix_size, prefix_align) = (prefix.size, prefix.align);
+
+    // Split the prefix layout into the "outer" fields (upvars and
+    // discriminant) and the "promoted" fields. Promoted fields will
+    // get included in each variant that requested them in
+    // CoroutineLayout.
+    debug!("prefix = {:#?}", prefix);
+    let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
+        FieldsShape::Arbitrary { mut offsets, memory_index } => {
+            let mut inverse_memory_index = memory_index.invert_bijective_mapping();
+
+            // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
+            // "outer" and "promoted" fields respectively.
+            let b_start = FieldIdx::new(tag_index + 1);
+            let offsets_b = IndexVec::from_raw(offsets.raw.split_off(b_start.index()));
+            let offsets_a = offsets;
+
+            // Disentangle the "a" and "b" components of `inverse_memory_index`
+            // by preserving the order but keeping only one disjoint "half" each.
+            // FIXME(eddyb) build a better abstraction for permutations, if possible.
+            let inverse_memory_index_b: IndexVec<u32, FieldIdx> = inverse_memory_index
+                .iter()
+                .filter_map(|&i| i.index().checked_sub(b_start.index()).map(FieldIdx::new))
+                .collect();
+            inverse_memory_index.raw.retain(|&i| i.index() < b_start.index());
+            let inverse_memory_index_a = inverse_memory_index;
+
+            // Since `inverse_memory_index_{a,b}` each only refer to their
+            // respective fields, they can be safely inverted
+            let memory_index_a = inverse_memory_index_a.invert_bijective_mapping();
+            let memory_index_b = inverse_memory_index_b.invert_bijective_mapping();
+
+            let outer_fields =
+                FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
+            (outer_fields, offsets_b, memory_index_b)
+        }
+        _ => unreachable!(),
+    };
+
+    let mut size = prefix.size;
+    let mut align = prefix.align;
+    let variants = variant_fields
+        .iter_enumerated()
+        .map(|(index, variant_fields)| {
+            // Only include overlap-eligible fields when we compute our variant layout.
+            let variant_only_tys = variant_fields
+                .iter()
+                .filter(|local| match assignments[**local] {
+                    Unassigned => unreachable!(),
+                    Assigned(v) if v == index => true,
+                    Assigned(_) => unreachable!("assignment does not match variant"),
+                    Ineligible(_) => false,
+                })
+                .map(|local| local_layouts[*local]);
+
+            let mut variant = calc.univariant(
+                &variant_only_tys.collect::<IndexVec<_, _>>(),
+                &ReprOptions::default(),
+                StructKind::Prefixed(prefix_size, prefix_align.abi),
+            )?;
+            variant.variants = Variants::Single { index };
+
+            let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
+                unreachable!();
+            };
+
+            // Now, stitch the promoted and variant-only fields back together in
+            // the order they are mentioned by our CoroutineLayout.
+            // Because we only use some subset (that can differ between variants)
+            // of the promoted fields, we can't just pick those elements of the
+            // `promoted_memory_index` (as we'd end up with gaps).
+            // So instead, we build an "inverse memory_index", as if all of the
+            // promoted fields were being used, but leave the elements not in the
+            // subset as `invalid_field_idx`, which we can filter out later to
+            // obtain a valid (bijective) mapping.
+            let invalid_field_idx = promoted_memory_index.len() + memory_index.len();
+            let mut combined_inverse_memory_index =
+                IndexVec::from_elem_n(FieldIdx::new(invalid_field_idx), invalid_field_idx);
+
+            let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
+            let combined_offsets = variant_fields
+                .iter_enumerated()
+                .map(|(i, local)| {
+                    let (offset, memory_index) = match assignments[*local] {
+                        Unassigned => unreachable!(),
+                        Assigned(_) => {
+                            let (offset, memory_index) = offsets_and_memory_index.next().unwrap();
+                            (offset, promoted_memory_index.len() as u32 + memory_index)
+                        }
+                        Ineligible(field_idx) => {
+                            let field_idx = field_idx.unwrap();
+                            (promoted_offsets[field_idx], promoted_memory_index[field_idx])
+                        }
+                    };
+                    combined_inverse_memory_index[memory_index] = i;
+                    offset
+                })
+                .collect();
+
+            // Remove the unused slots and invert the mapping to obtain the
+            // combined `memory_index` (also see previous comment).
+            combined_inverse_memory_index.raw.retain(|&i| i.index() != invalid_field_idx);
+            let combined_memory_index = combined_inverse_memory_index.invert_bijective_mapping();
+
+            variant.fields = FieldsShape::Arbitrary {
+                offsets: combined_offsets,
+                memory_index: combined_memory_index,
+            };
+
+            size = size.max(variant.size);
+            align = align.max(variant.align);
+            Ok(variant)
+        })
+        .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
+
+    size = size.align_to(align.abi);
+
+    let uninhabited = prefix.uninhabited || variants.iter().all(|v| v.is_uninhabited());
+    let abi = BackendRepr::Memory { sized: true };
+
+    Ok(LayoutData {
+        variants: Variants::Multiple {
+            tag,
+            tag_encoding: TagEncoding::Direct,
+            tag_field: tag_index,
+            variants,
+        },
+        fields: outer_fields,
+        backend_repr: abi,
+        // Suppress niches inside coroutines. If the niche is inside a field that is aliased (due to
+        // self-referentiality), getting the discriminant can cause aliasing violations.
+        // `UnsafeCell` blocks niches for the same reason, but we don't yet have `UnsafePinned` that
+        // would do the same for us here.
+        // See <https://github.com/rust-lang/rust/issues/63818>, <https://github.com/rust-lang/miri/issues/3780>.
+        // FIXME: Remove when <https://github.com/rust-lang/rust/issues/125735> is implemented and aliased coroutine fields are wrapped in `UnsafePinned`.
+        largest_niche: None,
+        uninhabited,
+        size,
+        align,
+        max_repr_align: None,
+        unadjusted_abi_align: align.abi,
+        randomization_seed: Default::default(),
+    })
+}
diff --git a/compiler/rustc_ty_utils/src/layout.rs b/compiler/rustc_ty_utils/src/layout.rs
index efca395fe4aeb..5a4bb2c95da6f 100644
--- a/compiler/rustc_ty_utils/src/layout.rs
+++ b/compiler/rustc_ty_utils/src/layout.rs
@@ -1,17 +1,13 @@
-use std::fmt::Debug;
-use std::iter;
-
 use hir::def_id::DefId;
 use rustc_abi::Integer::{I8, I32};
 use rustc_abi::Primitive::{self, Float, Int, Pointer};
 use rustc_abi::{
-    AddressSpace, BackendRepr, FIRST_VARIANT, FieldIdx, FieldsShape, HasDataLayout, Integer,
-    Layout, LayoutCalculator, LayoutCalculatorError, LayoutData, Niche, ReprOptions, Scalar, Size,
-    StructKind, TagEncoding, VariantIdx, Variants, WrappingRange,
+    AddressSpace, BackendRepr, FIRST_VARIANT, FieldIdx, FieldsShape, HasDataLayout, Layout,
+    LayoutCalculatorError, LayoutData, Niche, ReprOptions, Scalar, Size, StructKind, TagEncoding,
+    VariantIdx, Variants, WrappingRange,
 };
 use rustc_hashes::Hash64;
-use rustc_index::bit_set::{BitMatrix, DenseBitSet};
-use rustc_index::{Idx, IndexSlice, IndexVec};
+use rustc_index::IndexVec;
 use rustc_middle::bug;
 use rustc_middle::query::Providers;
 use rustc_middle::ty::layout::{
@@ -23,7 +19,7 @@ use rustc_middle::ty::{
 };
 use rustc_session::{DataTypeKind, FieldInfo, FieldKind, SizeKind, VariantInfo};
 use rustc_span::{Symbol, sym};
-use tracing::{debug, instrument, trace};
+use tracing::{debug, instrument};
 use {rustc_abi as abi, rustc_hir as hir};
 
 use crate::errors::{NonPrimitiveSimdType, OversizedSimdType, ZeroLengthSimdType};
@@ -403,23 +399,24 @@ fn layout_of_uncached<'tcx>(
                 .map(|ty| cx.layout_of(ty))
                 .try_collect::<IndexVec<_, _>>()?;
 
-            let layout = coroutine_layout(
-                &cx.calc,
-                &local_layouts,
-                prefix_layouts,
-                &info.variant_fields,
-                &info.storage_conflicts,
-                |tag| TyAndLayout {
-                    ty: tag.primitive().to_ty(tcx),
-                    layout: tcx.mk_layout(LayoutData::scalar(cx, tag)),
-                },
-            )
-            .map(|mut layout| {
-                // this is similar to how ReprOptions populates its field_shuffle_seed
-                layout.randomization_seed = tcx.def_path_hash(def_id).0.to_smaller_hash();
-                debug!("coroutine layout ({:?}): {:#?}", ty, layout);
-                layout
-            });
+            let layout = cx
+                .calc
+                .coroutine(
+                    &local_layouts,
+                    prefix_layouts,
+                    &info.variant_fields,
+                    &info.storage_conflicts,
+                    |tag| TyAndLayout {
+                        ty: tag.primitive().to_ty(tcx),
+                        layout: tcx.mk_layout(LayoutData::scalar(cx, tag)),
+                    },
+                )
+                .map(|mut layout| {
+                    // this is similar to how ReprOptions populates its field_shuffle_seed
+                    layout.randomization_seed = tcx.def_path_hash(def_id).0.to_smaller_hash();
+                    debug!("coroutine layout ({:?}): {:#?}", ty, layout);
+                    layout
+                });
             map_layout(layout)?
         }
 
@@ -614,314 +611,6 @@ fn layout_of_uncached<'tcx>(
     })
 }
 
-/// Overlap eligibility and variant assignment for each CoroutineSavedLocal.
-#[derive(Clone, Debug, PartialEq)]
-enum SavedLocalEligibility<VariantIdx, FieldIdx> {
-    Unassigned,
-    Assigned(VariantIdx),
-    Ineligible(Option<FieldIdx>),
-}
-
-// When laying out coroutines, we divide our saved local fields into two
-// categories: overlap-eligible and overlap-ineligible.
-//
-// Those fields which are ineligible for overlap go in a "prefix" at the
-// beginning of the layout, and always have space reserved for them.
-//
-// Overlap-eligible fields are only assigned to one variant, so we lay
-// those fields out for each variant and put them right after the
-// prefix.
-//
-// Finally, in the layout details, we point to the fields from the
-// variants they are assigned to. It is possible for some fields to be
-// included in multiple variants. No field ever "moves around" in the
-// layout; its offset is always the same.
-//
-// Also included in the layout are the upvars and the discriminant.
-// These are included as fields on the "outer" layout; they are not part
-// of any variant.
-
-/// Compute the eligibility and assignment of each local.
-fn coroutine_saved_local_eligibility<VariantIdx: Idx, FieldIdx: Idx, LocalIdx: Idx>(
-    nb_locals: usize,
-    variant_fields: &IndexSlice<VariantIdx, IndexVec<FieldIdx, LocalIdx>>,
-    storage_conflicts: &BitMatrix<LocalIdx, LocalIdx>,
-) -> (DenseBitSet<LocalIdx>, IndexVec<LocalIdx, SavedLocalEligibility<VariantIdx, FieldIdx>>) {
-    use SavedLocalEligibility::*;
-
-    let mut assignments: IndexVec<LocalIdx, _> = IndexVec::from_elem_n(Unassigned, nb_locals);
-
-    // The saved locals not eligible for overlap. These will get
-    // "promoted" to the prefix of our coroutine.
-    let mut ineligible_locals = DenseBitSet::new_empty(nb_locals);
-
-    // Figure out which of our saved locals are fields in only
-    // one variant. The rest are deemed ineligible for overlap.
-    for (variant_index, fields) in variant_fields.iter_enumerated() {
-        for local in fields {
-            match assignments[*local] {
-                Unassigned => {
-                    assignments[*local] = Assigned(variant_index);
-                }
-                Assigned(idx) => {
-                    // We've already seen this local at another suspension
-                    // point, so it is no longer a candidate.
-                    trace!(
-                        "removing local {:?} in >1 variant ({:?}, {:?})",
-                        local, variant_index, idx
-                    );
-                    ineligible_locals.insert(*local);
-                    assignments[*local] = Ineligible(None);
-                }
-                Ineligible(_) => {}
-            }
-        }
-    }
-
-    // Next, check every pair of eligible locals to see if they
-    // conflict.
-    for local_a in storage_conflicts.rows() {
-        let conflicts_a = storage_conflicts.count(local_a);
-        if ineligible_locals.contains(local_a) {
-            continue;
-        }
-
-        for local_b in storage_conflicts.iter(local_a) {
-            // local_a and local_b are storage live at the same time, therefore they
-            // cannot overlap in the coroutine layout. The only way to guarantee
-            // this is if they are in the same variant, or one is ineligible
-            // (which means it is stored in every variant).
-            if ineligible_locals.contains(local_b) || assignments[local_a] == assignments[local_b] {
-                continue;
-            }
-
-            // If they conflict, we will choose one to make ineligible.
-            // This is not always optimal; it's just a greedy heuristic that
-            // seems to produce good results most of the time.
-            let conflicts_b = storage_conflicts.count(local_b);
-            let (remove, other) =
-                if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
-            ineligible_locals.insert(remove);
-            assignments[remove] = Ineligible(None);
-            trace!("removing local {:?} due to conflict with {:?}", remove, other);
-        }
-    }
-
-    // Count the number of variants in use. If only one of them, then it is
-    // impossible to overlap any locals in our layout. In this case it's
-    // always better to make the remaining locals ineligible, so we can
-    // lay them out with the other locals in the prefix and eliminate
-    // unnecessary padding bytes.
-    {
-        let mut used_variants = DenseBitSet::new_empty(variant_fields.len());
-        for assignment in &assignments {
-            if let Assigned(idx) = assignment {
-                used_variants.insert(*idx);
-            }
-        }
-        if used_variants.count() < 2 {
-            for assignment in assignments.iter_mut() {
-                *assignment = Ineligible(None);
-            }
-            ineligible_locals.insert_all();
-        }
-    }
-
-    // Write down the order of our locals that will be promoted to the prefix.
-    {
-        for (idx, local) in ineligible_locals.iter().enumerate() {
-            assignments[local] = Ineligible(Some(FieldIdx::new(idx)));
-        }
-    }
-    debug!("coroutine saved local assignments: {:?}", assignments);
-
-    (ineligible_locals, assignments)
-}
-
-/// Compute the full coroutine layout.
-fn coroutine_layout<
-    'a,
-    F: core::ops::Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + core::fmt::Debug + Copy,
-    VariantIdx: Idx,
-    FieldIdx: Idx,
-    LocalIdx: Idx,
->(
-    calc: &LayoutCalculator<impl HasDataLayout>,
-    local_layouts: &IndexSlice<LocalIdx, F>,
-    mut prefix_layouts: IndexVec<FieldIdx, F>,
-    variant_fields: &IndexSlice<VariantIdx, IndexVec<FieldIdx, LocalIdx>>,
-    storage_conflicts: &BitMatrix<LocalIdx, LocalIdx>,
-    tag_to_layout: impl Fn(Scalar) -> F,
-) -> Result<LayoutData<FieldIdx, VariantIdx>, LayoutCalculatorError<F>> {
-    use SavedLocalEligibility::*;
-
-    let (ineligible_locals, assignments) =
-        coroutine_saved_local_eligibility(local_layouts.len(), variant_fields, storage_conflicts);
-
-    // Build a prefix layout, including "promoting" all ineligible
-    // locals as part of the prefix. We compute the layout of all of
-    // these fields at once to get optimal packing.
-    let tag_index = prefix_layouts.len();
-
-    // `variant_fields` already accounts for the reserved variants, so no need to add them.
-    let max_discr = (variant_fields.len() - 1) as u128;
-    let discr_int = Integer::fit_unsigned(max_discr);
-    let tag = Scalar::Initialized {
-        value: Primitive::Int(discr_int, /* signed = */ false),
-        valid_range: WrappingRange { start: 0, end: max_discr },
-    };
-
-    let promoted_layouts = ineligible_locals.iter().map(|local| local_layouts[local]);
-    prefix_layouts.push(tag_to_layout(tag));
-    prefix_layouts.extend(promoted_layouts);
-    let prefix =
-        calc.univariant(&prefix_layouts, &ReprOptions::default(), StructKind::AlwaysSized)?;
-
-    let (prefix_size, prefix_align) = (prefix.size, prefix.align);
-
-    // Split the prefix layout into the "outer" fields (upvars and
-    // discriminant) and the "promoted" fields. Promoted fields will
-    // get included in each variant that requested them in
-    // CoroutineLayout.
-    debug!("prefix = {:#?}", prefix);
-    let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
-        FieldsShape::Arbitrary { mut offsets, memory_index } => {
-            let mut inverse_memory_index = memory_index.invert_bijective_mapping();
-
-            // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
-            // "outer" and "promoted" fields respectively.
-            let b_start = FieldIdx::new(tag_index + 1);
-            let offsets_b = IndexVec::from_raw(offsets.raw.split_off(b_start.index()));
-            let offsets_a = offsets;
-
-            // Disentangle the "a" and "b" components of `inverse_memory_index`
-            // by preserving the order but keeping only one disjoint "half" each.
-            // FIXME(eddyb) build a better abstraction for permutations, if possible.
-            let inverse_memory_index_b: IndexVec<u32, FieldIdx> = inverse_memory_index
-                .iter()
-                .filter_map(|&i| i.index().checked_sub(b_start.index()).map(FieldIdx::new))
-                .collect();
-            inverse_memory_index.raw.retain(|&i| i.index() < b_start.index());
-            let inverse_memory_index_a = inverse_memory_index;
-
-            // Since `inverse_memory_index_{a,b}` each only refer to their
-            // respective fields, they can be safely inverted
-            let memory_index_a = inverse_memory_index_a.invert_bijective_mapping();
-            let memory_index_b = inverse_memory_index_b.invert_bijective_mapping();
-
-            let outer_fields =
-                FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
-            (outer_fields, offsets_b, memory_index_b)
-        }
-        _ => unreachable!(),
-    };
-
-    let mut size = prefix.size;
-    let mut align = prefix.align;
-    let variants = variant_fields
-        .iter_enumerated()
-        .map(|(index, variant_fields)| {
-            // Only include overlap-eligible fields when we compute our variant layout.
-            let variant_only_tys = variant_fields
-                .iter()
-                .filter(|local| match assignments[**local] {
-                    Unassigned => unreachable!(),
-                    Assigned(v) if v == index => true,
-                    Assigned(_) => unreachable!("assignment does not match variant"),
-                    Ineligible(_) => false,
-                })
-                .map(|local| local_layouts[*local]);
-
-            let mut variant = calc.univariant(
-                &variant_only_tys.collect::<IndexVec<_, _>>(),
-                &ReprOptions::default(),
-                StructKind::Prefixed(prefix_size, prefix_align.abi),
-            )?;
-            variant.variants = Variants::Single { index };
-
-            let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
-                unreachable!();
-            };
-
-            // Now, stitch the promoted and variant-only fields back together in
-            // the order they are mentioned by our CoroutineLayout.
-            // Because we only use some subset (that can differ between variants)
-            // of the promoted fields, we can't just pick those elements of the
-            // `promoted_memory_index` (as we'd end up with gaps).
-            // So instead, we build an "inverse memory_index", as if all of the
-            // promoted fields were being used, but leave the elements not in the
-            // subset as `invalid_field_idx`, which we can filter out later to
-            // obtain a valid (bijective) mapping.
-            let invalid_field_idx = promoted_memory_index.len() + memory_index.len();
-            let mut combined_inverse_memory_index =
-                IndexVec::from_elem_n(FieldIdx::new(invalid_field_idx), invalid_field_idx);
-
-            let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
-            let combined_offsets = variant_fields
-                .iter_enumerated()
-                .map(|(i, local)| {
-                    let (offset, memory_index) = match assignments[*local] {
-                        Unassigned => unreachable!(),
-                        Assigned(_) => {
-                            let (offset, memory_index) = offsets_and_memory_index.next().unwrap();
-                            (offset, promoted_memory_index.len() as u32 + memory_index)
-                        }
-                        Ineligible(field_idx) => {
-                            let field_idx = field_idx.unwrap();
-                            (promoted_offsets[field_idx], promoted_memory_index[field_idx])
-                        }
-                    };
-                    combined_inverse_memory_index[memory_index] = i;
-                    offset
-                })
-                .collect();
-
-            // Remove the unused slots and invert the mapping to obtain the
-            // combined `memory_index` (also see previous comment).
-            combined_inverse_memory_index.raw.retain(|&i| i.index() != invalid_field_idx);
-            let combined_memory_index = combined_inverse_memory_index.invert_bijective_mapping();
-
-            variant.fields = FieldsShape::Arbitrary {
-                offsets: combined_offsets,
-                memory_index: combined_memory_index,
-            };
-
-            size = size.max(variant.size);
-            align = align.max(variant.align);
-            Ok(variant)
-        })
-        .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
-
-    size = size.align_to(align.abi);
-
-    let uninhabited = prefix.uninhabited || variants.iter().all(|v| v.is_uninhabited());
-    let abi = BackendRepr::Memory { sized: true };
-
-    Ok(LayoutData {
-        variants: Variants::Multiple {
-            tag,
-            tag_encoding: TagEncoding::Direct,
-            tag_field: tag_index,
-            variants,
-        },
-        fields: outer_fields,
-        backend_repr: abi,
-        // Suppress niches inside coroutines. If the niche is inside a field that is aliased (due to
-        // self-referentiality), getting the discriminant can cause aliasing violations.
-        // `UnsafeCell` blocks niches for the same reason, but we don't yet have `UnsafePinned` that
-        // would do the same for us here.
-        // See <https://github.com/rust-lang/rust/issues/63818>, <https://github.com/rust-lang/miri/issues/3780>.
-        // FIXME: Remove when <https://github.com/rust-lang/rust/issues/125735> is implemented and aliased coroutine fields are wrapped in `UnsafePinned`.
-        largest_niche: None,
-        uninhabited,
-        size,
-        align,
-        max_repr_align: None,
-        unadjusted_abi_align: align.abi,
-        randomization_seed: Default::default(),
-    })
-}
-
 fn record_layout_for_printing<'tcx>(cx: &LayoutCx<'tcx>, layout: TyAndLayout<'tcx>) {
     // Ignore layouts that are done with non-empty environments or
     // non-monomorphic layouts, as the user only wants to see the stuff