Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Codegen tweaks #103511

Merged
merged 5 commits into from
Oct 25, 2022
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
46 changes: 16 additions & 30 deletions compiler/rustc_codegen_ssa/src/base.rs
Original file line number Diff line number Diff line change
@@ -337,40 +337,26 @@ pub fn coerce_unsized_into<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(

pub fn cast_shift_expr_rhs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
bx: &mut Bx,
op: hir::BinOpKind,
lhs: Bx::Value,
rhs: Bx::Value,
) -> Bx::Value {
cast_shift_rhs(bx, op, lhs, rhs)
}

fn cast_shift_rhs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
bx: &mut Bx,
op: hir::BinOpKind,
lhs: Bx::Value,
rhs: Bx::Value,
) -> Bx::Value {
// Shifts may have any size int on the rhs
if op.is_shift() {
let mut rhs_llty = bx.cx().val_ty(rhs);
let mut lhs_llty = bx.cx().val_ty(lhs);
if bx.cx().type_kind(rhs_llty) == TypeKind::Vector {
rhs_llty = bx.cx().element_type(rhs_llty)
}
if bx.cx().type_kind(lhs_llty) == TypeKind::Vector {
lhs_llty = bx.cx().element_type(lhs_llty)
}
let rhs_sz = bx.cx().int_width(rhs_llty);
let lhs_sz = bx.cx().int_width(lhs_llty);
if lhs_sz < rhs_sz {
bx.trunc(rhs, lhs_llty)
} else if lhs_sz > rhs_sz {
// FIXME (#1877: If in the future shifting by negative
// values is no longer undefined then this is wrong.
bx.zext(rhs, lhs_llty)
} else {
rhs
}
let mut rhs_llty = bx.cx().val_ty(rhs);
let mut lhs_llty = bx.cx().val_ty(lhs);
if bx.cx().type_kind(rhs_llty) == TypeKind::Vector {
rhs_llty = bx.cx().element_type(rhs_llty)
}
if bx.cx().type_kind(lhs_llty) == TypeKind::Vector {
lhs_llty = bx.cx().element_type(lhs_llty)
}
let rhs_sz = bx.cx().int_width(rhs_llty);
let lhs_sz = bx.cx().int_width(lhs_llty);
if lhs_sz < rhs_sz {
bx.trunc(rhs, lhs_llty)
} else if lhs_sz > rhs_sz {
// FIXME (#1877: If in the future shifting by negative
// values is no longer undefined then this is wrong.
bx.zext(rhs, lhs_llty)
} else {
rhs
}
5 changes: 2 additions & 3 deletions compiler/rustc_codegen_ssa/src/common.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
#![allow(non_camel_case_types)]

use rustc_errors::struct_span_err;
use rustc_hir as hir;
use rustc_hir::LangItem;
use rustc_middle::mir::interpret::ConstValue;
use rustc_middle::ty::{self, layout::TyAndLayout, Ty, TyCtxt};
@@ -140,7 +139,7 @@ pub fn build_unchecked_lshift<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
lhs: Bx::Value,
rhs: Bx::Value,
) -> Bx::Value {
let rhs = base::cast_shift_expr_rhs(bx, hir::BinOpKind::Shl, lhs, rhs);
let rhs = base::cast_shift_expr_rhs(bx, lhs, rhs);
// #1877, #10183: Ensure that input is always valid
let rhs = shift_mask_rhs(bx, rhs);
bx.shl(lhs, rhs)
@@ -152,7 +151,7 @@ pub fn build_unchecked_rshift<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
lhs: Bx::Value,
rhs: Bx::Value,
) -> Bx::Value {
let rhs = base::cast_shift_expr_rhs(bx, hir::BinOpKind::Shr, lhs, rhs);
let rhs = base::cast_shift_expr_rhs(bx, lhs, rhs);
// #1877, #10183: Ensure that input is always valid
let rhs = shift_mask_rhs(bx, rhs);
let is_signed = lhs_t.is_signed();
86 changes: 47 additions & 39 deletions compiler/rustc_codegen_ssa/src/mir/block.rs
Original file line number Diff line number Diff line change
@@ -63,7 +63,9 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
}
}

fn lltarget<Bx: BuilderMethods<'a, 'tcx>>(
/// Get a basic block (creating it if necessary), possibly with a landing
/// pad next to it.
fn llbb_with_landing_pad<Bx: BuilderMethods<'a, 'tcx>>(
&self,
fx: &mut FunctionCx<'a, 'tcx, Bx>,
target: mir::BasicBlock,
@@ -73,32 +75,36 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
let target_funclet = fx.cleanup_kinds[target].funclet_bb(target);
match (self.funclet_bb, target_funclet) {
(None, None) => (lltarget, false),
(Some(f), Some(t_f)) if f == t_f || !base::wants_msvc_seh(fx.cx.tcx().sess) => {
(lltarget, false)
}
// jump *into* cleanup - need a landing pad if GNU, cleanup pad if MSVC
(None, Some(_)) => (fx.landing_pad_for(target), false),
(Some(_), None) => span_bug!(span, "{:?} - jump out of cleanup?", self.terminator),
(Some(_), Some(_)) => (fx.landing_pad_for(target), true),
(Some(f), Some(t_f)) => {
if f == t_f || !base::wants_msvc_seh(fx.cx.tcx().sess) {
(lltarget, false)
} else {
(fx.landing_pad_for(target), true)
}
}
}
}

/// Create a basic block.
fn llblock<Bx: BuilderMethods<'a, 'tcx>>(
/// Get a basic block (creating it if necessary), possibly with cleanup
/// stuff in it or next to it.
fn llbb_with_cleanup<Bx: BuilderMethods<'a, 'tcx>>(
&self,
fx: &mut FunctionCx<'a, 'tcx, Bx>,
target: mir::BasicBlock,
) -> Bx::BasicBlock {
let (lltarget, is_cleanupret) = self.lltarget(fx, target);
let (lltarget, is_cleanupret) = self.llbb_with_landing_pad(fx, target);
if is_cleanupret {
// MSVC cross-funclet jump - need a trampoline

debug!("llblock: creating cleanup trampoline for {:?}", target);
debug_assert!(base::wants_msvc_seh(fx.cx.tcx().sess));
debug!("llbb_with_cleanup: creating cleanup trampoline for {:?}", target);
let name = &format!("{:?}_cleanup_trampoline_{:?}", self.bb, target);
let trampoline = Bx::append_block(fx.cx, fx.llfn, name);
let mut trampoline_bx = Bx::build(fx.cx, trampoline);
let trampoline_llbb = Bx::append_block(fx.cx, fx.llfn, name);
let mut trampoline_bx = Bx::build(fx.cx, trampoline_llbb);
trampoline_bx.cleanup_ret(self.funclet(fx).unwrap(), Some(lltarget));
trampoline
trampoline_llbb
} else {
lltarget
}
@@ -110,10 +116,11 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
bx: &mut Bx,
target: mir::BasicBlock,
) {
let (lltarget, is_cleanupret) = self.lltarget(fx, target);
let (lltarget, is_cleanupret) = self.llbb_with_landing_pad(fx, target);
if is_cleanupret {
// micro-optimization: generate a `ret` rather than a jump
// MSVC micro-optimization: generate a `ret` rather than a jump
// to a trampoline.
debug_assert!(base::wants_msvc_seh(fx.cx.tcx().sess));
bx.cleanup_ret(self.funclet(fx).unwrap(), Some(lltarget));
} else {
bx.br(lltarget);
@@ -138,7 +145,7 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
let fn_ty = bx.fn_decl_backend_type(&fn_abi);

let unwind_block = if let Some(cleanup) = cleanup.filter(|_| fn_abi.can_unwind) {
Some(self.llblock(fx, cleanup))
Some(self.llbb_with_cleanup(fx, cleanup))
} else if fx.mir[self.bb].is_cleanup
&& fn_abi.can_unwind
&& !base::wants_msvc_seh(fx.cx.tcx().sess)
@@ -231,7 +238,7 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
options,
line_spans,
instance,
Some((ret_llbb, self.llblock(fx, cleanup), self.funclet(fx))),
Some((ret_llbb, self.llbb_with_cleanup(fx, cleanup), self.funclet(fx))),
);
} else {
bx.codegen_inline_asm(template, &operands, options, line_spans, instance, None);
@@ -281,8 +288,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
if target_iter.len() == 1 {
// If there are two targets (one conditional, one fallback), emit br instead of switch
let (test_value, target) = target_iter.next().unwrap();
let lltrue = helper.llblock(self, target);
let llfalse = helper.llblock(self, targets.otherwise());
let lltrue = helper.llbb_with_cleanup(self, target);
let llfalse = helper.llbb_with_cleanup(self, targets.otherwise());
if switch_ty == bx.tcx().types.bool {
// Don't generate trivial icmps when switching on bool
match test_value {
@@ -299,8 +306,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
} else {
bx.switch(
discr.immediate(),
helper.llblock(self, targets.otherwise()),
target_iter.map(|(value, target)| (value, helper.llblock(self, target))),
helper.llbb_with_cleanup(self, targets.otherwise()),
target_iter.map(|(value, target)| (value, helper.llbb_with_cleanup(self, target))),
);
}
}
@@ -530,7 +537,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let cond = bx.expect(cond, expected);

// Create the failure block and the conditional branch to it.
let lltarget = helper.llblock(self, target);
let lltarget = helper.llbb_with_cleanup(self, target);
let panic_block = bx.append_sibling_block("panic");
if expected {
bx.cond_br(cond, lltarget, panic_block);
@@ -1459,20 +1466,20 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// bar();
// }
Some(&mir::TerminatorKind::Abort) => {
let cs_bb =
let cs_llbb =
Bx::append_block(self.cx, self.llfn, &format!("cs_funclet{:?}", bb));
let cp_bb =
let cp_llbb =
Bx::append_block(self.cx, self.llfn, &format!("cp_funclet{:?}", bb));
ret_llbb = cs_bb;
ret_llbb = cs_llbb;

let mut cs_bx = Bx::build(self.cx, cs_bb);
let cs = cs_bx.catch_switch(None, None, &[cp_bb]);
let mut cs_bx = Bx::build(self.cx, cs_llbb);
let cs = cs_bx.catch_switch(None, None, &[cp_llbb]);

// The "null" here is actually a RTTI type descriptor for the
// C++ personality function, but `catch (...)` has no type so
// it's null. The 64 here is actually a bitfield which
// represents that this is a catch-all block.
let mut cp_bx = Bx::build(self.cx, cp_bb);
let mut cp_bx = Bx::build(self.cx, cp_llbb);
let null = cp_bx.const_null(
cp_bx.type_i8p_ext(cp_bx.cx().data_layout().instruction_address_space),
);
@@ -1481,30 +1488,31 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
cp_bx.br(llbb);
}
_ => {
let cleanup_bb =
let cleanup_llbb =
Bx::append_block(self.cx, self.llfn, &format!("funclet_{:?}", bb));
ret_llbb = cleanup_bb;
let mut cleanup_bx = Bx::build(self.cx, cleanup_bb);
ret_llbb = cleanup_llbb;
let mut cleanup_bx = Bx::build(self.cx, cleanup_llbb);
funclet = cleanup_bx.cleanup_pad(None, &[]);
cleanup_bx.br(llbb);
}
}
self.funclets[bb] = Some(funclet);
ret_llbb
} else {
let bb = Bx::append_block(self.cx, self.llfn, "cleanup");
let mut bx = Bx::build(self.cx, bb);
let cleanup_llbb = Bx::append_block(self.cx, self.llfn, "cleanup");
let mut cleanup_bx = Bx::build(self.cx, cleanup_llbb);

let llpersonality = self.cx.eh_personality();
let llretty = self.landing_pad_type();
let lp = bx.cleanup_landing_pad(llretty, llpersonality);
let lp = cleanup_bx.cleanup_landing_pad(llretty, llpersonality);

let slot = self.get_personality_slot(&mut bx);
slot.storage_live(&mut bx);
Pair(bx.extract_value(lp, 0), bx.extract_value(lp, 1)).store(&mut bx, slot);
let slot = self.get_personality_slot(&mut cleanup_bx);
slot.storage_live(&mut cleanup_bx);
Pair(cleanup_bx.extract_value(lp, 0), cleanup_bx.extract_value(lp, 1))
.store(&mut cleanup_bx, slot);

bx.br(llbb);
bx.llbb()
cleanup_bx.br(llbb);
cleanup_llbb
}
}

20 changes: 10 additions & 10 deletions compiler/rustc_codegen_ssa/src/mir/mod.rs
Original file line number Diff line number Diff line change
@@ -148,10 +148,10 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
let debug_context = cx.create_function_debug_context(instance, &fn_abi, llfn, &mir);

let start_llbb = Bx::append_block(cx, llfn, "start");
let mut bx = Bx::build(cx, start_llbb);
let mut start_bx = Bx::build(cx, start_llbb);

if mir.basic_blocks.iter().any(|bb| bb.is_cleanup) {
bx.set_personality_fn(cx.eh_personality());
start_bx.set_personality_fn(cx.eh_personality());
}

let cleanup_kinds = analyze::cleanup_kinds(&mir);
@@ -180,7 +180,7 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
caller_location: None,
};

fx.per_local_var_debug_info = fx.compute_per_local_var_debug_info(&mut bx);
fx.per_local_var_debug_info = fx.compute_per_local_var_debug_info(&mut start_bx);

// Evaluate all required consts; codegen later assumes that CTFE will never fail.
let mut all_consts_ok = true;
@@ -206,29 +206,29 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(

// Allocate variable and temp allocas
fx.locals = {
let args = arg_local_refs(&mut bx, &mut fx, &memory_locals);
let args = arg_local_refs(&mut start_bx, &mut fx, &memory_locals);

let mut allocate_local = |local| {
let decl = &mir.local_decls[local];
let layout = bx.layout_of(fx.monomorphize(decl.ty));
let layout = start_bx.layout_of(fx.monomorphize(decl.ty));
assert!(!layout.ty.has_erasable_regions());

if local == mir::RETURN_PLACE && fx.fn_abi.ret.is_indirect() {
debug!("alloc: {:?} (return place) -> place", local);
let llretptr = bx.get_param(0);
let llretptr = start_bx.get_param(0);
return LocalRef::Place(PlaceRef::new_sized(llretptr, layout));
}

if memory_locals.contains(local) {
debug!("alloc: {:?} -> place", local);
if layout.is_unsized() {
LocalRef::UnsizedPlace(PlaceRef::alloca_unsized_indirect(&mut bx, layout))
LocalRef::UnsizedPlace(PlaceRef::alloca_unsized_indirect(&mut start_bx, layout))
} else {
LocalRef::Place(PlaceRef::alloca(&mut bx, layout))
LocalRef::Place(PlaceRef::alloca(&mut start_bx, layout))
}
} else {
debug!("alloc: {:?} -> operand", local);
LocalRef::new_operand(&mut bx, layout)
LocalRef::new_operand(&mut start_bx, layout)
}
};

@@ -240,7 +240,7 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
};

// Apply debuginfo to the newly allocated locals.
fx.debug_introduce_locals(&mut bx);
fx.debug_introduce_locals(&mut start_bx);

// Codegen the body of each block using reverse postorder
for (bb, _) in traversal::reverse_postorder(&mir) {