Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

rename BackendRepr::Vector → SimdVector #137804

Merged
merged 1 commit into from
Mar 2, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion compiler/rustc_abi/src/callconv.rs
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
Ok(HomogeneousAggregate::Homogeneous(Reg { kind, size: self.size }))
}

BackendRepr::Vector { .. } => {
BackendRepr::SimdVector { .. } => {
assert!(!self.is_zst());
Ok(HomogeneousAggregate::Homogeneous(Reg {
kind: RegKind::Vector,
Expand Down
12 changes: 8 additions & 4 deletions compiler/rustc_abi/src/layout.rs
Original file line number Diff line number Diff line change
Expand Up @@ -386,13 +386,15 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
BackendRepr::Memory { sized: true }
}
// Vectors require at least element alignment, else disable the opt
BackendRepr::Vector { element, count: _ } if element.align(dl).abi > align.abi => {
BackendRepr::SimdVector { element, count: _ }
if element.align(dl).abi > align.abi =>
{
BackendRepr::Memory { sized: true }
}
// the alignment tests passed and we can use this
BackendRepr::Scalar(..)
| BackendRepr::ScalarPair(..)
| BackendRepr::Vector { .. }
| BackendRepr::SimdVector { .. }
| BackendRepr::Memory { .. } => repr,
},
};
Expand Down Expand Up @@ -464,7 +466,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
hide_niches(a);
hide_niches(b);
}
BackendRepr::Vector { element, count: _ } => hide_niches(element),
BackendRepr::SimdVector { element, count: _ } => hide_niches(element),
BackendRepr::Memory { sized: _ } => {}
}
st.largest_niche = None;
Expand Down Expand Up @@ -1314,7 +1316,9 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
match field.backend_repr {
// For plain scalars, or vectors of them, we can't unpack
// newtypes for `#[repr(C)]`, as that affects C ABIs.
BackendRepr::Scalar(_) | BackendRepr::Vector { .. } if optimize_abi => {
BackendRepr::Scalar(_) | BackendRepr::SimdVector { .. }
if optimize_abi =>
{
abi = field.backend_repr;
}
// But scalar pairs are Rust-specific and get
Expand Down
2 changes: 1 addition & 1 deletion compiler/rustc_abi/src/layout/ty.rs
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,7 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
C: HasDataLayout,
{
match self.backend_repr {
BackendRepr::Vector { .. } => self.size == expected_size,
BackendRepr::SimdVector { .. } => self.size == expected_size,
BackendRepr::Memory { .. } => {
if self.fields.count() == 1 && self.fields.offset(0).bytes() == 0 {
self.field(cx, 0).is_single_vector_element(cx, expected_size)
Expand Down
28 changes: 14 additions & 14 deletions compiler/rustc_abi/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1410,7 +1410,7 @@ impl AddressSpace {
pub enum BackendRepr {
Scalar(Scalar),
ScalarPair(Scalar, Scalar),
Vector {
SimdVector {
element: Scalar,
count: u64,
},
Expand All @@ -1426,9 +1426,9 @@ impl BackendRepr {
#[inline]
pub fn is_unsized(&self) -> bool {
match *self {
BackendRepr::Scalar(_) | BackendRepr::ScalarPair(..) | BackendRepr::Vector { .. } => {
false
}
BackendRepr::Scalar(_)
| BackendRepr::ScalarPair(..)
| BackendRepr::SimdVector { .. } => false,
BackendRepr::Memory { sized } => !sized,
}
}
Expand Down Expand Up @@ -1467,7 +1467,7 @@ impl BackendRepr {
BackendRepr::Scalar(s) => Some(s.align(cx).abi),
BackendRepr::ScalarPair(s1, s2) => Some(s1.align(cx).max(s2.align(cx)).abi),
// The align of a Vector can vary in surprising ways
BackendRepr::Vector { .. } | BackendRepr::Memory { .. } => None,
BackendRepr::SimdVector { .. } | BackendRepr::Memory { .. } => None,
}
}

Expand All @@ -1489,7 +1489,7 @@ impl BackendRepr {
Some(size)
}
// The size of a Vector can vary in surprising ways
BackendRepr::Vector { .. } | BackendRepr::Memory { .. } => None,
BackendRepr::SimdVector { .. } | BackendRepr::Memory { .. } => None,
}
}

Expand All @@ -1500,8 +1500,8 @@ impl BackendRepr {
BackendRepr::ScalarPair(s1, s2) => {
BackendRepr::ScalarPair(s1.to_union(), s2.to_union())
}
BackendRepr::Vector { element, count } => {
BackendRepr::Vector { element: element.to_union(), count }
BackendRepr::SimdVector { element, count } => {
BackendRepr::SimdVector { element: element.to_union(), count }
}
BackendRepr::Memory { .. } => BackendRepr::Memory { sized: true },
}
Expand All @@ -1513,8 +1513,8 @@ impl BackendRepr {
// We do *not* ignore the sign since it matters for some ABIs (e.g. s390x).
(BackendRepr::Scalar(l), BackendRepr::Scalar(r)) => l.primitive() == r.primitive(),
(
BackendRepr::Vector { element: element_l, count: count_l },
BackendRepr::Vector { element: element_r, count: count_r },
BackendRepr::SimdVector { element: element_l, count: count_l },
BackendRepr::SimdVector { element: element_r, count: count_r },
) => element_l.primitive() == element_r.primitive() && count_l == count_r,
(BackendRepr::ScalarPair(l1, l2), BackendRepr::ScalarPair(r1, r2)) => {
l1.primitive() == r1.primitive() && l2.primitive() == r2.primitive()
Expand Down Expand Up @@ -1735,7 +1735,7 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
/// Returns `true` if this is an aggregate type (including a ScalarPair!)
pub fn is_aggregate(&self) -> bool {
match self.backend_repr {
BackendRepr::Scalar(_) | BackendRepr::Vector { .. } => false,
BackendRepr::Scalar(_) | BackendRepr::SimdVector { .. } => false,
BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => true,
}
}
Expand Down Expand Up @@ -1877,9 +1877,9 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
/// non-trivial alignment constraints. You probably want to use `is_1zst` instead.
pub fn is_zst(&self) -> bool {
match self.backend_repr {
BackendRepr::Scalar(_) | BackendRepr::ScalarPair(..) | BackendRepr::Vector { .. } => {
false
}
BackendRepr::Scalar(_)
| BackendRepr::ScalarPair(..)
| BackendRepr::SimdVector { .. } => false,
BackendRepr::Memory { sized } => sized && self.size.bytes() == 0,
}
}
Expand Down
4 changes: 2 additions & 2 deletions compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
AbiParam::new(scalar_to_clif_type(tcx, scalar)),
attrs
)],
BackendRepr::Vector { .. } => {
BackendRepr::SimdVector { .. } => {
let vector_ty = crate::intrinsics::clif_vector_type(tcx, self.layout);
smallvec![AbiParam::new(vector_ty)]
}
Expand Down Expand Up @@ -135,7 +135,7 @@ impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
BackendRepr::Scalar(scalar) => {
(None, vec![AbiParam::new(scalar_to_clif_type(tcx, scalar))])
}
BackendRepr::Vector { .. } => {
BackendRepr::SimdVector { .. } => {
let vector_ty = crate::intrinsics::clif_vector_type(tcx, self.layout);
(None, vec![AbiParam::new(vector_ty)])
}
Expand Down
2 changes: 1 addition & 1 deletion compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ fn report_atomic_type_validation_error<'tcx>(

pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Type {
let (element, count) = match layout.backend_repr {
BackendRepr::Vector { element, count } => (element, count),
BackendRepr::SimdVector { element, count } => (element, count),
_ => unreachable!(),
};

Expand Down
8 changes: 5 additions & 3 deletions compiler/rustc_codegen_cranelift/src/value_and_place.rs
Original file line number Diff line number Diff line change
Expand Up @@ -173,9 +173,11 @@ impl<'tcx> CValue<'tcx> {
CValueInner::ByRef(ptr, None) => {
let clif_ty = match layout.backend_repr {
BackendRepr::Scalar(scalar) => scalar_to_clif_type(fx.tcx, scalar),
BackendRepr::Vector { element, count } => scalar_to_clif_type(fx.tcx, element)
.by(u32::try_from(count).unwrap())
.unwrap(),
BackendRepr::SimdVector { element, count } => {
scalar_to_clif_type(fx.tcx, element)
.by(u32::try_from(count).unwrap())
.unwrap()
}
_ => unreachable!("{:?}", layout.ty),
};
let mut flags = MemFlags::new();
Expand Down
2 changes: 1 addition & 1 deletion compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -312,7 +312,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc
let layout = self.layout_of(tp_ty).layout;
let _use_integer_compare = match layout.backend_repr() {
Scalar(_) | ScalarPair(_, _) => true,
Vector { .. } => false,
SimdVector { .. } => false,
Memory { .. } => {
// For rusty ABIs, small aggregates are actually passed
// as `RegKind::Integer` (see `FnAbi::adjust_for_abi`),
Expand Down
10 changes: 5 additions & 5 deletions compiler/rustc_codegen_gcc/src/type_of.rs
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ fn uncached_gcc_type<'gcc, 'tcx>(
) -> Type<'gcc> {
match layout.backend_repr {
BackendRepr::Scalar(_) => bug!("handled elsewhere"),
BackendRepr::Vector { ref element, count } => {
BackendRepr::SimdVector { ref element, count } => {
let element = layout.scalar_gcc_type_at(cx, element, Size::ZERO);
let element =
// NOTE: gcc doesn't allow pointer types in vectors.
Expand Down Expand Up @@ -178,17 +178,17 @@ pub trait LayoutGccExt<'tcx> {
impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
fn is_gcc_immediate(&self) -> bool {
match self.backend_repr {
BackendRepr::Scalar(_) | BackendRepr::Vector { .. } => true,
BackendRepr::Scalar(_) | BackendRepr::SimdVector { .. } => true,
BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => false,
}
}

fn is_gcc_scalar_pair(&self) -> bool {
match self.backend_repr {
BackendRepr::ScalarPair(..) => true,
BackendRepr::Scalar(_) | BackendRepr::Vector { .. } | BackendRepr::Memory { .. } => {
false
}
BackendRepr::Scalar(_)
| BackendRepr::SimdVector { .. }
| BackendRepr::Memory { .. } => false,
}
}

Expand Down
39 changes: 21 additions & 18 deletions compiler/rustc_codegen_llvm/src/asm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -939,9 +939,10 @@ fn llvm_fixup_input<'ll, 'tcx>(
}
bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
}
(AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Vector { element, count })
if layout.size.bytes() == 8 =>
{
(
AArch64(AArch64InlineAsmRegClass::vreg_low16),
BackendRepr::SimdVector { element, count },
) if layout.size.bytes() == 8 => {
let elem_ty = llvm_asm_scalar_type(bx.cx, element);
let vec_ty = bx.cx.type_vector(elem_ty, count);
let indices: Vec<_> = (0..count * 2).map(|x| bx.const_i32(x as i32)).collect();
Expand All @@ -954,7 +955,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
}
(
X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
BackendRepr::Vector { .. },
BackendRepr::SimdVector { .. },
) if layout.size.bytes() == 64 => bx.bitcast(value, bx.cx.type_vector(bx.cx.type_f64(), 8)),
(
X86(
Expand Down Expand Up @@ -989,7 +990,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
| X86InlineAsmRegClass::ymm_reg
| X86InlineAsmRegClass::zmm_reg,
),
BackendRepr::Vector { element, count: count @ (8 | 16) },
BackendRepr::SimdVector { element, count: count @ (8 | 16) },
) if element.primitive() == Primitive::Float(Float::F16) => {
bx.bitcast(value, bx.type_vector(bx.type_i16(), count))
}
Expand Down Expand Up @@ -1026,7 +1027,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
| ArmInlineAsmRegClass::qreg_low4
| ArmInlineAsmRegClass::qreg_low8,
),
BackendRepr::Vector { element, count: count @ (4 | 8) },
BackendRepr::SimdVector { element, count: count @ (4 | 8) },
) if element.primitive() == Primitive::Float(Float::F16) => {
bx.bitcast(value, bx.type_vector(bx.type_i16(), count))
}
Expand Down Expand Up @@ -1099,9 +1100,10 @@ fn llvm_fixup_output<'ll, 'tcx>(
}
value
}
(AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Vector { element, count })
if layout.size.bytes() == 8 =>
{
(
AArch64(AArch64InlineAsmRegClass::vreg_low16),
BackendRepr::SimdVector { element, count },
) if layout.size.bytes() == 8 => {
let elem_ty = llvm_asm_scalar_type(bx.cx, element);
let vec_ty = bx.cx.type_vector(elem_ty, count * 2);
let indices: Vec<_> = (0..count).map(|x| bx.const_i32(x as i32)).collect();
Expand All @@ -1114,7 +1116,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
}
(
X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
BackendRepr::Vector { .. },
BackendRepr::SimdVector { .. },
) if layout.size.bytes() == 64 => bx.bitcast(value, layout.llvm_type(bx.cx)),
(
X86(
Expand Down Expand Up @@ -1145,7 +1147,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
| X86InlineAsmRegClass::ymm_reg
| X86InlineAsmRegClass::zmm_reg,
),
BackendRepr::Vector { element, count: count @ (8 | 16) },
BackendRepr::SimdVector { element, count: count @ (8 | 16) },
) if element.primitive() == Primitive::Float(Float::F16) => {
bx.bitcast(value, bx.type_vector(bx.type_f16(), count))
}
Expand Down Expand Up @@ -1182,7 +1184,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
| ArmInlineAsmRegClass::qreg_low4
| ArmInlineAsmRegClass::qreg_low8,
),
BackendRepr::Vector { element, count: count @ (4 | 8) },
BackendRepr::SimdVector { element, count: count @ (4 | 8) },
) if element.primitive() == Primitive::Float(Float::F16) => {
bx.bitcast(value, bx.type_vector(bx.type_f16(), count))
}
Expand Down Expand Up @@ -1243,9 +1245,10 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
let count = 16 / layout.size.bytes();
cx.type_vector(elem_ty, count)
}
(AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Vector { element, count })
if layout.size.bytes() == 8 =>
{
(
AArch64(AArch64InlineAsmRegClass::vreg_low16),
BackendRepr::SimdVector { element, count },
) if layout.size.bytes() == 8 => {
let elem_ty = llvm_asm_scalar_type(cx, element);
cx.type_vector(elem_ty, count * 2)
}
Expand All @@ -1256,7 +1259,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
}
(
X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
BackendRepr::Vector { .. },
BackendRepr::SimdVector { .. },
) if layout.size.bytes() == 64 => cx.type_vector(cx.type_f64(), 8),
(
X86(
Expand Down Expand Up @@ -1284,7 +1287,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
| X86InlineAsmRegClass::ymm_reg
| X86InlineAsmRegClass::zmm_reg,
),
BackendRepr::Vector { element, count: count @ (8 | 16) },
BackendRepr::SimdVector { element, count: count @ (8 | 16) },
) if element.primitive() == Primitive::Float(Float::F16) => {
cx.type_vector(cx.type_i16(), count)
}
Expand Down Expand Up @@ -1321,7 +1324,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
| ArmInlineAsmRegClass::qreg_low4
| ArmInlineAsmRegClass::qreg_low8,
),
BackendRepr::Vector { element, count: count @ (4 | 8) },
BackendRepr::SimdVector { element, count: count @ (4 | 8) },
) if element.primitive() == Primitive::Float(Float::F16) => {
cx.type_vector(cx.type_i16(), count)
}
Expand Down
2 changes: 1 addition & 1 deletion compiler/rustc_codegen_llvm/src/intrinsic.rs
Original file line number Diff line number Diff line change
Expand Up @@ -470,7 +470,7 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
let layout = self.layout_of(tp_ty).layout;
let use_integer_compare = match layout.backend_repr() {
Scalar(_) | ScalarPair(_, _) => true,
Vector { .. } => false,
SimdVector { .. } => false,
Memory { .. } => {
// For rusty ABIs, small aggregates are actually passed
// as `RegKind::Integer` (see `FnAbi::adjust_for_abi`),
Expand Down
10 changes: 5 additions & 5 deletions compiler/rustc_codegen_llvm/src/type_of.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ fn uncached_llvm_type<'a, 'tcx>(
) -> &'a Type {
match layout.backend_repr {
BackendRepr::Scalar(_) => bug!("handled elsewhere"),
BackendRepr::Vector { element, count } => {
BackendRepr::SimdVector { element, count } => {
let element = layout.scalar_llvm_type_at(cx, element);
return cx.type_vector(element, count);
}
Expand Down Expand Up @@ -171,17 +171,17 @@ pub(crate) trait LayoutLlvmExt<'tcx> {
impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
fn is_llvm_immediate(&self) -> bool {
match self.backend_repr {
BackendRepr::Scalar(_) | BackendRepr::Vector { .. } => true,
BackendRepr::Scalar(_) | BackendRepr::SimdVector { .. } => true,
BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => false,
}
}

fn is_llvm_scalar_pair(&self) -> bool {
match self.backend_repr {
BackendRepr::ScalarPair(..) => true,
BackendRepr::Scalar(_) | BackendRepr::Vector { .. } | BackendRepr::Memory { .. } => {
false
}
BackendRepr::Scalar(_)
| BackendRepr::SimdVector { .. }
| BackendRepr::Memory { .. } => false,
}
}

Expand Down
2 changes: 1 addition & 1 deletion compiler/rustc_codegen_ssa/src/mir/naked_asm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -349,7 +349,7 @@ fn wasm_type<'tcx>(
PassMode::Direct(_) => {
let direct_type = match arg_abi.layout.backend_repr {
BackendRepr::Scalar(scalar) => wasm_primitive(scalar.primitive(), ptr_type),
BackendRepr::Vector { .. } => "v128",
BackendRepr::SimdVector { .. } => "v128",
BackendRepr::Memory { .. } => {
// FIXME: remove this branch once the wasm32-unknown-unknown ABI is fixed
let _ = WasmCAbi::Legacy;
Expand Down
Loading
Loading