Skip to content

Commit

Permalink
Don't store vector types in ssa variables
Browse files Browse the repository at this point in the history
They have been causing a lot of trouble. For example current MIR
building thinks that it is fine to dynamically index into them. And
there are different paths depending on if the repr(simd) struct uses
fields or a single array as interior. There is also trouble with moving
the inner array of a repr(simd) type that is an array wrapper.

If performance becomes a concern, I will implement this in a more
principled way.
  • Loading branch information
bjorn3 committed Mar 25, 2023
1 parent 6bced6e commit eed1f75
Show file tree
Hide file tree
Showing 7 changed files with 15 additions and 167 deletions.
1 change: 0 additions & 1 deletion scripts/test_rustc_tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,6 @@ rm tests/incremental/spike-neg2.rs # same

rm tests/ui/simd/intrinsic/generic-reduction-pass.rs # simd_reduce_add_unordered doesn't accept an accumulator for integer vectors

rm tests/ui/simd/intrinsic/generic-as.rs # crash when accessing vector type field (#1318)
rm tests/ui/simd/simd-bitmask.rs # crash

# bugs in the test suite
Expand Down
1 change: 0 additions & 1 deletion src/abi/comments.rs
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,6 @@ pub(super) fn add_local_place_comments<'tcx>(
assert_eq!(local, place_local);
("ssa", Cow::Owned(format!("var=({}, {})", var1.index(), var2.index())))
}
CPlaceInner::VarLane(_local, _var, _lane) => unreachable!(),
CPlaceInner::Addr(ptr, meta) => {
let meta = if let Some(meta) = meta {
Cow::Owned(format!("meta={}", meta))
Expand Down
4 changes: 2 additions & 2 deletions src/abi/pass_mode.rs
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
attrs
)],
Abi::Vector { .. } => {
let vector_ty = crate::intrinsics::clif_vector_type(tcx, self.layout).unwrap();
let vector_ty = crate::intrinsics::clif_vector_type(tcx, self.layout);
smallvec![AbiParam::new(vector_ty)]
}
_ => unreachable!("{:?}", self.layout.abi),
Expand Down Expand Up @@ -135,7 +135,7 @@ impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
(None, vec![AbiParam::new(scalar_to_clif_type(tcx, scalar))])
}
Abi::Vector { .. } => {
let vector_ty = crate::intrinsics::clif_vector_type(tcx, self.layout).unwrap();
let vector_ty = crate::intrinsics::clif_vector_type(tcx, self.layout);
(None, vec![AbiParam::new(vector_ty)])
}
_ => unreachable!("{:?}", self.layout.abi),
Expand Down
20 changes: 1 addition & 19 deletions src/common.rs
Original file line number Diff line number Diff line change
Expand Up @@ -72,19 +72,6 @@ fn clif_type_from_ty<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Option<types::Typ
pointer_ty(tcx)
}
}
ty::Adt(adt_def, _) if adt_def.repr().simd() => {
let (element, count) = match &tcx.layout_of(ParamEnv::reveal_all().and(ty)).unwrap().abi
{
Abi::Vector { element, count } => (*element, *count),
_ => unreachable!(),
};

match scalar_to_clif_type(tcx, element).by(u32::try_from(count).unwrap()) {
// Cranelift currently only implements icmp for 128bit vectors.
Some(vector_ty) if vector_ty.bits() == 128 => vector_ty,
_ => return None,
}
}
ty::Param(_) => bug!("ty param {:?}", ty),
_ => return None,
})
Expand All @@ -96,12 +83,7 @@ fn clif_pair_type_from_ty<'tcx>(
) -> Option<(types::Type, types::Type)> {
Some(match ty.kind() {
ty::Tuple(types) if types.len() == 2 => {
let a = clif_type_from_ty(tcx, types[0])?;
let b = clif_type_from_ty(tcx, types[1])?;
if a.is_vector() || b.is_vector() {
return None;
}
(a, b)
(clif_type_from_ty(tcx, types[0])?, clif_type_from_ty(tcx, types[1])?)
}
ty::RawPtr(TypeAndMut { ty: pointee_ty, mutbl: _ }) | ty::Ref(_, pointee_ty, _) => {
if has_ptr_meta(tcx, *pointee_ty) {
Expand Down
8 changes: 2 additions & 6 deletions src/intrinsics/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -51,17 +51,13 @@ fn report_atomic_type_validation_error<'tcx>(
fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
}

pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Option<Type> {
pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Type {
let (element, count) = match layout.abi {
Abi::Vector { element, count } => (element, count),
_ => unreachable!(),
};

match scalar_to_clif_type(tcx, element).by(u32::try_from(count).unwrap()) {
// Cranelift currently only implements icmp for 128bit vectors.
Some(vector_ty) if vector_ty.bits() == 128 => Some(vector_ty),
_ => None,
}
scalar_to_clif_type(tcx, element).by(u32::try_from(count).unwrap()).unwrap()
}

fn simd_for_each_lane<'tcx>(
Expand Down
2 changes: 1 addition & 1 deletion src/intrinsics/simd.rs
Original file line number Diff line number Diff line change
Expand Up @@ -253,7 +253,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
}

ret.write_cvalue(fx, base);
let ret_lane = ret.place_field(fx, mir::Field::new(idx.try_into().unwrap()));
let ret_lane = ret.place_lane(fx, idx.try_into().unwrap());
ret_lane.write_cvalue(fx, val);
}

Expand Down
146 changes: 9 additions & 137 deletions src/value_and_place.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
use crate::prelude::*;

use cranelift_codegen::ir::immediates::Offset32;
use cranelift_codegen::ir::{InstructionData, Opcode};

fn codegen_field<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
Expand Down Expand Up @@ -166,9 +165,6 @@ impl<'tcx> CValue<'tcx> {
CValueInner::ByRef(ptr, None) => {
let clif_ty = match layout.abi {
Abi::Scalar(scalar) => scalar_to_clif_type(fx.tcx, scalar),
Abi::Vector { element, count } => scalar_to_clif_type(fx.tcx, element)
.by(u32::try_from(count).unwrap())
.unwrap(),
_ => unreachable!("{:?}", layout.ty),
};
let mut flags = MemFlags::new();
Expand Down Expand Up @@ -214,17 +210,7 @@ impl<'tcx> CValue<'tcx> {
) -> CValue<'tcx> {
let layout = self.1;
match self.0 {
CValueInner::ByVal(val) => match layout.abi {
Abi::Vector { element: _, count } => {
let count = u8::try_from(count).expect("SIMD type with more than 255 lanes???");
let field = u8::try_from(field.index()).unwrap();
assert!(field < count);
let lane = fx.bcx.ins().extractlane(val, field);
let field_layout = layout.field(&*fx, usize::from(field));
CValue::by_val(lane, field_layout)
}
_ => unreachable!("value_field for ByVal with abi {:?}", layout.abi),
},
CValueInner::ByVal(_) => unreachable!(),
CValueInner::ByValPair(val1, val2) => match layout.abi {
Abi::ScalarPair(_, _) => {
let val = match field.as_u32() {
Expand Down Expand Up @@ -258,16 +244,7 @@ impl<'tcx> CValue<'tcx> {
let lane_layout = fx.layout_of(lane_ty);
assert!(lane_idx < lane_count);
match self.0 {
CValueInner::ByVal(val) => match layout.abi {
Abi::Vector { element: _, count: _ } => {
assert!(lane_count <= u8::MAX.into(), "SIMD type with more than 255 lanes???");
let lane_idx = u8::try_from(lane_idx).unwrap();
let lane = fx.bcx.ins().extractlane(val, lane_idx);
CValue::by_val(lane, lane_layout)
}
_ => unreachable!("value_lane for ByVal with abi {:?}", layout.abi),
},
CValueInner::ByValPair(_, _) => unreachable!(),
CValueInner::ByVal(_) | CValueInner::ByValPair(_, _) => unreachable!(),
CValueInner::ByRef(ptr, None) => {
let field_offset = lane_layout.size * lane_idx;
let field_ptr = ptr.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap());
Expand Down Expand Up @@ -348,7 +325,6 @@ pub(crate) struct CPlace<'tcx> {
pub(crate) enum CPlaceInner {
Var(Local, Variable),
VarPair(Local, Variable, Variable),
VarLane(Local, Variable, u8),
Addr(Pointer, Option<Value>),
}

Expand Down Expand Up @@ -442,12 +418,6 @@ impl<'tcx> CPlace<'tcx> {
//fx.bcx.set_val_label(val2, cranelift_codegen::ir::ValueLabel::new(var2.index()));
CValue::by_val_pair(val1, val2, layout)
}
CPlaceInner::VarLane(_local, var, lane) => {
let val = fx.bcx.use_var(var);
//fx.bcx.set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index()));
let val = fx.bcx.ins().extractlane(val, lane);
CValue::by_val(val, layout)
}
CPlaceInner::Addr(ptr, extra) => {
if let Some(extra) = extra {
CValue::by_ref_unsized(ptr, extra, layout)
Expand All @@ -470,9 +440,9 @@ impl<'tcx> CPlace<'tcx> {
pub(crate) fn to_ptr_maybe_unsized(self) -> (Pointer, Option<Value>) {
match self.inner {
CPlaceInner::Addr(ptr, extra) => (ptr, extra),
CPlaceInner::Var(_, _)
| CPlaceInner::VarPair(_, _, _)
| CPlaceInner::VarLane(_, _, _) => bug!("Expected CPlace::Addr, found {:?}", self),
CPlaceInner::Var(_, _) | CPlaceInner::VarPair(_, _, _) => {
bug!("Expected CPlace::Addr, found {:?}", self)
}
}
}

Expand Down Expand Up @@ -565,26 +535,6 @@ impl<'tcx> CPlace<'tcx> {
let dst_layout = self.layout();
let to_ptr = match self.inner {
CPlaceInner::Var(_local, var) => {
if let ty::Array(element, len) = dst_layout.ty.kind() {
// Can only happen for vector types
let len = u32::try_from(len.eval_target_usize(fx.tcx, ParamEnv::reveal_all()))
.unwrap();
let vector_ty = fx.clif_type(*element).unwrap().by(len).unwrap();

let data = match from.0 {
CValueInner::ByRef(ptr, None) => {
let mut flags = MemFlags::new();
flags.set_notrap();
ptr.load(fx, vector_ty, flags)
}
CValueInner::ByVal(_)
| CValueInner::ByValPair(_, _)
| CValueInner::ByRef(_, Some(_)) => bug!("array should be ByRef"),
};

fx.bcx.def_var(var, data);
return;
}
let data = CValue(from.0, dst_layout).load_scalar(fx);
let dst_ty = fx.clif_type(self.layout().ty).unwrap();
transmute_value(fx, var, data, dst_ty);
Expand All @@ -603,22 +553,6 @@ impl<'tcx> CPlace<'tcx> {
transmute_value(fx, var2, data2, dst_ty2);
return;
}
CPlaceInner::VarLane(_local, var, lane) => {
let data = from.load_scalar(fx);

// First get the old vector
let vector = fx.bcx.use_var(var);
//fx.bcx.set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index()));

// Next insert the written lane into the vector
let vector = fx.bcx.ins().insertlane(vector, data, lane);

// Finally write the new vector
//fx.bcx.set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index()));
fx.bcx.def_var(var, vector);

return;
}
CPlaceInner::Addr(ptr, None) => {
if dst_layout.size == Size::ZERO || dst_layout.abi == Abi::Uninhabited {
return;
Expand All @@ -631,7 +565,6 @@ impl<'tcx> CPlace<'tcx> {
let mut flags = MemFlags::new();
flags.set_notrap();
match from.layout().abi {
// FIXME make Abi::Vector work too
Abi::Scalar(_) => {
let val = from.load_scalar(fx);
to_ptr.store(fx, val, flags);
Expand Down Expand Up @@ -692,39 +625,6 @@ impl<'tcx> CPlace<'tcx> {
let layout = self.layout();

match self.inner {
CPlaceInner::Var(local, var) => match layout.ty.kind() {
ty::Array(_, _) => {
// Can only happen for vector types
return CPlace {
inner: CPlaceInner::VarLane(local, var, field.as_u32().try_into().unwrap()),
layout: layout.field(fx, field.as_u32().try_into().unwrap()),
};
}
ty::Adt(adt_def, substs) if layout.ty.is_simd() => {
let f0_ty = adt_def.non_enum_variant().fields[0].ty(fx.tcx, substs);

match f0_ty.kind() {
ty::Array(_, _) => {
assert_eq!(field.as_u32(), 0);
return CPlace {
inner: CPlaceInner::Var(local, var),
layout: layout.field(fx, field.as_u32().try_into().unwrap()),
};
}
_ => {
return CPlace {
inner: CPlaceInner::VarLane(
local,
var,
field.as_u32().try_into().unwrap(),
),
layout: layout.field(fx, field.as_u32().try_into().unwrap()),
};
}
}
}
_ => {}
},
CPlaceInner::VarPair(local, var1, var2) => {
let layout = layout.field(&*fx, field.index());

Expand Down Expand Up @@ -766,15 +666,8 @@ impl<'tcx> CPlace<'tcx> {
assert!(lane_idx < lane_count);

match self.inner {
CPlaceInner::Var(local, var) => {
assert!(matches!(layout.abi, Abi::Vector { .. }));
CPlace {
inner: CPlaceInner::VarLane(local, var, lane_idx.try_into().unwrap()),
layout: lane_layout,
}
}
CPlaceInner::Var(_, _) => unreachable!(),
CPlaceInner::VarPair(_, _, _) => unreachable!(),
CPlaceInner::VarLane(_, _, _) => unreachable!(),
CPlaceInner::Addr(ptr, None) => {
let field_offset = lane_layout.size * lane_idx;
let field_ptr = ptr.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap());
Expand All @@ -793,32 +686,11 @@ impl<'tcx> CPlace<'tcx> {
ty::Array(elem_ty, _) => {
let elem_layout = fx.layout_of(*elem_ty);
match self.inner {
CPlaceInner::Var(local, var) => {
// This is a hack to handle `vector_val.0[1]`. It doesn't allow dynamic
// indexing.
let lane_idx = match fx.bcx.func.dfg.insts
[fx.bcx.func.dfg.value_def(index).unwrap_inst()]
{
InstructionData::UnaryImm { opcode: Opcode::Iconst, imm } => imm,
_ => bug!(
"Dynamic indexing into a vector type is not supported: {self:?}[{index}]"
),
};
return CPlace {
inner: CPlaceInner::VarLane(
local,
var,
lane_idx.bits().try_into().unwrap(),
),
layout: elem_layout,
};
}
CPlaceInner::Addr(addr, None) => (elem_layout, addr),
CPlaceInner::Addr(_, Some(_))
| CPlaceInner::VarPair(_, _, _)
| CPlaceInner::VarLane(_, _, _) => bug!("Can't index into {self:?}"),
CPlaceInner::Var(_, _)
| CPlaceInner::Addr(_, Some(_))
| CPlaceInner::VarPair(_, _, _) => bug!("Can't index into {self:?}"),
}
// FIXME use VarLane in case of Var with simd type
}
ty::Slice(elem_ty) => (fx.layout_of(*elem_ty), self.to_ptr_maybe_unsized().0),
_ => bug!("place_index({:?})", self.layout().ty),
Expand Down

0 comments on commit eed1f75

Please sign in to comment.