diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs index 854e3ccc21b4f..0cebf8838eb04 100644 --- a/compiler/rustc_codegen_llvm/src/abi.rs +++ b/compiler/rustc_codegen_llvm/src/abi.rs @@ -601,11 +601,7 @@ impl<'tcx> FnAbiLlvmExt<'tcx> for FnAbi<'tcx, Ty<'tcx>> { } } -impl AbiBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { - fn apply_attrs_callsite(&mut self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, callsite: Self::Value) { - fn_abi.apply_attrs_callsite(self, callsite) - } - +impl AbiBuilderMethods for Builder<'_, '_, '_> { fn get_param(&self, index: usize) -> Self::Value { llvm::get_param(self.llfn(), index as c_uint) } diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs index 84b091d8d4d79..3dd0f0894f3e3 100644 --- a/compiler/rustc_codegen_llvm/src/asm.rs +++ b/compiler/rustc_codegen_llvm/src/asm.rs @@ -402,7 +402,7 @@ fn inline_asm_call( alignstack, llvm::AsmDialect::from_generic(dia), ); - let call = bx.call(v, inputs, None); + let call = bx.call(v, inputs, None, None); // Store mark in a metadata node so we can map LLVM errors // back to source locations. See #17552. diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs index 053cda1e7cc9c..57b5701edf54b 100644 --- a/compiler/rustc_codegen_llvm/src/builder.rs +++ b/compiler/rustc_codegen_llvm/src/builder.rs @@ -1,3 +1,4 @@ +use crate::abi::FnAbiLlvmExt; use crate::common::Funclet; use crate::context::CodegenCx; use crate::llvm::{self, BasicBlock, False}; @@ -18,6 +19,7 @@ use rustc_hir::def_id::DefId; use rustc_middle::ty::layout::TyAndLayout; use rustc_middle::ty::{self, Ty, TyCtxt}; use rustc_span::Span; +use rustc_target::abi::call::FnAbi; use rustc_target::abi::{self, Align, Size}; use rustc_target::spec::{HasTargetSpec, Target}; use std::borrow::Cow; @@ -117,21 +119,36 @@ macro_rules! builder_methods_for_value_instructions { } } +// HACK(eddyb) this is an easy way to avoid a complex relationship between +// `Builder` and `UnpositionedBuilder`, even if it seems lopsided. +pub struct UnpositionedBuilder<'a, 'll, 'tcx>(Builder<'a, 'll, 'tcx>); + impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { + type Unpositioned = UnpositionedBuilder<'a, 'll, 'tcx>; + + fn unpositioned(cx: &'a CodegenCx<'ll, 'tcx>) -> Self::Unpositioned { + // Create a fresh builder from the crate context. + let llbuilder = unsafe { llvm::LLVMCreateBuilderInContext(cx.llcx) }; + UnpositionedBuilder(Builder { llbuilder, cx }) + } + + fn position_at_end(bx: Self::Unpositioned, llbb: &'ll BasicBlock) -> Self { + unsafe { + llvm::LLVMPositionBuilderAtEnd(bx.0.llbuilder, llbb); + } + bx.0 + } + + fn into_unpositioned(self) -> Self::Unpositioned { + UnpositionedBuilder(self) + } + fn new_block<'b>(cx: &'a CodegenCx<'ll, 'tcx>, llfn: &'ll Value, name: &'b str) -> Self { - let mut bx = Builder::with_cx(cx); let llbb = unsafe { let name = SmallCStr::new(name); llvm::LLVMAppendBasicBlockInContext(cx.llcx, llfn, name.as_ptr()) }; - bx.position_at_end(llbb); - bx - } - - fn with_cx(cx: &'a CodegenCx<'ll, 'tcx>) -> Self { - // Create a fresh builder from the crate context. - let llbuilder = unsafe { llvm::LLVMCreateBuilderInContext(cx.llcx) }; - Builder { llbuilder, cx } + Self::position_at_end(Self::unpositioned(cx), llbb) } fn build_sibling_block(&self, name: &str) -> Self { @@ -144,70 +161,70 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { fn set_span(&mut self, _span: Span) {} - fn position_at_end(&mut self, llbb: &'ll BasicBlock) { - unsafe { - llvm::LLVMPositionBuilderAtEnd(self.llbuilder, llbb); - } - } - - fn ret_void(&mut self) { + fn ret_void(self) -> Self::Unpositioned { unsafe { llvm::LLVMBuildRetVoid(self.llbuilder); } + self.into_unpositioned() } - fn ret(&mut self, v: &'ll Value) { + fn ret(self, v: &'ll Value) -> Self::Unpositioned { unsafe { llvm::LLVMBuildRet(self.llbuilder, v); } + self.into_unpositioned() } - fn br(&mut self, dest: &'ll BasicBlock) { + fn br(self, dest: &'ll BasicBlock) -> Self::Unpositioned { unsafe { llvm::LLVMBuildBr(self.llbuilder, dest); } + self.into_unpositioned() } fn cond_br( - &mut self, + self, cond: &'ll Value, then_llbb: &'ll BasicBlock, else_llbb: &'ll BasicBlock, - ) { + ) -> Self::Unpositioned { unsafe { llvm::LLVMBuildCondBr(self.llbuilder, cond, then_llbb, else_llbb); } + self.into_unpositioned() } fn switch( - &mut self, + self, v: &'ll Value, else_llbb: &'ll BasicBlock, cases: impl ExactSizeIterator, - ) { + ) -> Self::Unpositioned { let switch = unsafe { llvm::LLVMBuildSwitch(self.llbuilder, v, else_llbb, cases.len() as c_uint) }; for (on_val, dest) in cases { let on_val = self.const_uint_big(self.val_ty(v), on_val); unsafe { llvm::LLVMAddCase(switch, on_val, dest) } } + self.into_unpositioned() } fn invoke( - &mut self, + mut self, llfn: &'ll Value, args: &[&'ll Value], then: &'ll BasicBlock, catch: &'ll BasicBlock, funclet: Option<&Funclet<'ll>>, - ) -> &'ll Value { + fn_abi_for_attrs: Option<&FnAbi<'tcx, Ty<'tcx>>>, + ) -> (Self::Unpositioned, &'ll Value) { debug!("invoke {:?} with args ({:?})", llfn, args); let args = self.check_call("invoke", llfn, args); let bundle = funclet.map(|funclet| funclet.bundle()); let bundle = bundle.as_ref().map(|b| &*b.raw); - unsafe { + let invoke = unsafe { llvm::LLVMRustBuildInvoke( self.llbuilder, llfn, @@ -218,13 +235,18 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { bundle, UNNAMED, ) + }; + if let Some(fn_abi) = fn_abi_for_attrs { + fn_abi.apply_attrs_callsite(&mut self, invoke); } + (self.into_unpositioned(), invoke) } - fn unreachable(&mut self) { + fn unreachable(self) -> Self::Unpositioned { unsafe { llvm::LLVMBuildUnreachable(self.llbuilder); } + self.into_unpositioned() } builder_methods_for_value_instructions! { @@ -365,7 +387,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { }; let intrinsic = self.get_intrinsic(&name); - let res = self.call(intrinsic, &[lhs, rhs], None); + let res = self.call(intrinsic, &[lhs, rhs], None, None); (self.extract_value(res, 0), self.extract_value(res, 1)) } @@ -384,9 +406,8 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } fn alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value { - let mut bx = Builder::with_cx(self.cx); - bx.position_at_start(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn()) }); - bx.dynamic_alloca(ty, align) + let entry_llbb = unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn()) }; + Self::position_at_start(Self::unpositioned(self.cx), entry_llbb).dynamic_alloca(ty, align) } fn dynamic_alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value { @@ -515,29 +536,33 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { count: u64, dest: PlaceRef<'tcx, &'ll Value>, ) -> Self { - let zero = self.const_usize(0); - let count = self.const_usize(count); - let start = dest.project_index(&mut self, zero).llval; - let end = dest.project_index(&mut self, count).llval; + let cx = self.cx; + let original_llbb = self.llbb(); + + let start = dest.project_index(&mut self, cx.const_usize(0)).llval; + let end = dest.project_index(&mut self, cx.const_usize(count)).llval; let mut header_bx = self.build_sibling_block("repeat_loop_header"); + let header_llbb = header_bx.llbb(); let mut body_bx = self.build_sibling_block("repeat_loop_body"); + let body_llbb = body_bx.llbb(); let next_bx = self.build_sibling_block("repeat_loop_next"); + let current_llty = cx.val_ty(start); self.br(header_bx.llbb()); - let current = header_bx.phi(self.val_ty(start), &[start], &[self.llbb()]); + let current = header_bx.phi(current_llty, &[start], &[original_llbb]); let keep_going = header_bx.icmp(IntPredicate::IntNE, current, end); header_bx.cond_br(keep_going, body_bx.llbb(), next_bx.llbb()); - let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size); + let align = dest.align.restrict_for_offset(dest.layout.field(cx, 0).size); cg_elem .val .store(&mut body_bx, PlaceRef::new_sized_aligned(current, cg_elem.layout, align)); - let next = body_bx.inbounds_gep(current, &[self.const_usize(1)]); - body_bx.br(header_bx.llbb()); - header_bx.add_incoming_to_phi(current, next, body_bx.llbb()); + let next = body_bx.inbounds_gep(current, &[cx.const_usize(1)]); + body_bx.br(header_llbb); + Self::add_incoming_to_phi(current, &[next], &[body_llbb]); next_bx } @@ -676,7 +701,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { let int_width = self.cx.int_width(dest_ty); let name = format!("llvm.fptoui.sat.i{}.f{}", int_width, float_width); let intrinsic = self.get_intrinsic(&name); - return Some(self.call(intrinsic, &[val], None)); + return Some(self.call(intrinsic, &[val], None, None)); } None @@ -689,7 +714,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { let int_width = self.cx.int_width(dest_ty); let name = format!("llvm.fptosi.sat.i{}.f{}", int_width, float_width); let intrinsic = self.get_intrinsic(&name); - return Some(self.call(intrinsic, &[val], None)); + return Some(self.call(intrinsic, &[val], None, None)); } None @@ -724,7 +749,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { }; if let Some(name) = name { let intrinsic = self.get_intrinsic(name); - return self.call(intrinsic, &[val], None); + return self.call(intrinsic, &[val], None, None); } } } @@ -747,7 +772,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { }; if let Some(name) = name { let intrinsic = self.get_intrinsic(name); - return self.call(intrinsic, &[val], None); + return self.call(intrinsic, &[val], None, None); } } } @@ -943,8 +968,9 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } - fn resume(&mut self, exn: &'ll Value) -> &'ll Value { - unsafe { llvm::LLVMBuildResume(self.llbuilder, exn) } + fn resume(self, exn: &'ll Value) -> (Self::Unpositioned, &'ll Value) { + let resume = unsafe { llvm::LLVMBuildResume(self.llbuilder, exn) }; + (self.into_unpositioned(), resume) } fn cleanup_pad(&mut self, parent: Option<&'ll Value>, args: &[&'ll Value]) -> Funclet<'ll> { @@ -962,13 +988,13 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } fn cleanup_ret( - &mut self, + self, funclet: &Funclet<'ll>, unwind: Option<&'ll BasicBlock>, - ) -> &'ll Value { + ) -> (Self::Unpositioned, &'ll Value) { let ret = unsafe { llvm::LLVMRustBuildCleanupRet(self.llbuilder, funclet.cleanuppad(), unwind) }; - ret.expect("LLVM does not have support for cleanupret") + (self.into_unpositioned(), ret.expect("LLVM does not have support for cleanupret")) } fn catch_pad(&mut self, parent: &'ll Value, args: &[&'ll Value]) -> Funclet<'ll> { @@ -986,28 +1012,28 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } fn catch_switch( - &mut self, + self, parent: Option<&'ll Value>, unwind: Option<&'ll BasicBlock>, - num_handlers: usize, - ) -> &'ll Value { + handlers: &[&'ll BasicBlock], + ) -> (Self::Unpositioned, &'ll Value) { let name = cstr!("catchswitch"); let ret = unsafe { llvm::LLVMRustBuildCatchSwitch( self.llbuilder, parent, unwind, - num_handlers as c_uint, + handlers.len() as c_uint, name.as_ptr(), ) }; - ret.expect("LLVM does not have support for catchswitch") - } - - fn add_handler(&mut self, catch_switch: &'ll Value, handler: &'ll BasicBlock) { - unsafe { - llvm::LLVMRustAddHandler(catch_switch, handler); + let catch_switch = ret.expect("LLVM does not have support for catchswitch"); + for &handler in handlers { + unsafe { + llvm::LLVMRustAddHandler(catch_switch, handler); + } } + (self.into_unpositioned(), catch_switch) } fn set_personality_fn(&mut self, personality: &'ll Value) { @@ -1122,6 +1148,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { llfn: &'ll Value, args: &[&'ll Value], funclet: Option<&Funclet<'ll>>, + fn_abi_for_attrs: Option<&FnAbi<'tcx, Ty<'tcx>>>, ) -> &'ll Value { debug!("call {:?} with args ({:?})", llfn, args); @@ -1129,7 +1156,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { let bundle = funclet.map(|funclet| funclet.bundle()); let bundle = bundle.as_ref().map(|b| &*b.raw); - unsafe { + let call = unsafe { llvm::LLVMRustBuildCall( self.llbuilder, llfn, @@ -1137,7 +1164,11 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { args.len() as c_uint, bundle, ) + }; + if let Some(fn_abi) = fn_abi_for_attrs { + fn_abi.apply_attrs_callsite(self, call); } + call } fn zext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { @@ -1169,10 +1200,11 @@ impl Builder<'a, 'll, 'tcx> { unsafe { llvm::LLVMGetBasicBlockParent(self.llbb()) } } - fn position_at_start(&mut self, llbb: &'ll BasicBlock) { + fn position_at_start(bx: UnpositionedBuilder<'a, 'll, 'tcx>, llbb: &'ll BasicBlock) -> Self { unsafe { - llvm::LLVMRustPositionBuilderAtStart(self.llbuilder, llbb); + llvm::LLVMRustPositionBuilderAtStart(bx.0.llbuilder, llbb); } + bx.0 } pub fn minnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { @@ -1365,7 +1397,7 @@ impl Builder<'a, 'll, 'tcx> { let lifetime_intrinsic = self.cx.get_intrinsic(intrinsic); let ptr = self.pointercast(ptr, self.cx.type_i8p()); - self.call(lifetime_intrinsic, &[self.cx.const_u64(size), ptr], None); + self.call(lifetime_intrinsic, &[self.cx.const_u64(size), ptr], None, None); } pub(crate) fn phi( @@ -1374,17 +1406,15 @@ impl Builder<'a, 'll, 'tcx> { vals: &[&'ll Value], bbs: &[&'ll BasicBlock], ) -> &'ll Value { - assert_eq!(vals.len(), bbs.len()); let phi = unsafe { llvm::LLVMBuildPhi(self.llbuilder, ty, UNNAMED) }; - unsafe { - llvm::LLVMAddIncoming(phi, vals.as_ptr(), bbs.as_ptr(), vals.len() as c_uint); - phi - } + Self::add_incoming_to_phi(phi, vals, bbs); + phi } - fn add_incoming_to_phi(&mut self, phi: &'ll Value, val: &'ll Value, bb: &'ll BasicBlock) { + fn add_incoming_to_phi(phi: &'ll Value, vals: &[&'ll Value], bbs: &[&'ll BasicBlock]) { + assert_eq!(vals.len(), bbs.len()); unsafe { - llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint); + llvm::LLVMAddIncoming(phi, vals.as_ptr(), bbs.as_ptr(), vals.len() as c_uint); } } diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index fc6c1abf4af56..20e90e3051510 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -75,13 +75,13 @@ fn get_simple_intrinsic(cx: &CodegenCx<'ll, '_>, name: Symbol) -> Option<&'ll Va impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { fn codegen_intrinsic_call( - &mut self, + mut self, instance: ty::Instance<'tcx>, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OperandRef<'tcx, &'ll Value>], llresult: &'ll Value, span: Span, - ) { + ) -> Self { let tcx = self.tcx; let callee_ty = instance.ty(tcx, ty::ParamEnv::reveal_all()); @@ -97,44 +97,45 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { let name = tcx.item_name(def_id); let name_str = &*name.as_str(); - let llret_ty = self.layout_of(ret_ty).llvm_type(self); + let llret_ty = self.layout_of(ret_ty).llvm_type(&self); let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout); - let simple = get_simple_intrinsic(self, name); + let simple = get_simple_intrinsic(&self, name); let llval = match name { _ if simple.is_some() => self.call( simple.unwrap(), &args.iter().map(|arg| arg.immediate()).collect::>(), None, + None, ), sym::likely => { let expect = self.get_intrinsic(&("llvm.expect.i1")); - self.call(expect, &[args[0].immediate(), self.const_bool(true)], None) + self.call(expect, &[args[0].immediate(), self.const_bool(true)], None, None) } sym::unlikely => { let expect = self.get_intrinsic(&("llvm.expect.i1")); - self.call(expect, &[args[0].immediate(), self.const_bool(false)], None) + self.call(expect, &[args[0].immediate(), self.const_bool(false)], None, None) } kw::Try => { try_intrinsic( - self, + &mut self, args[0].immediate(), args[1].immediate(), args[2].immediate(), llresult, ); - return; + return self; } sym::breakpoint => { let llfn = self.get_intrinsic(&("llvm.debugtrap")); - self.call(llfn, &[], None) + self.call(llfn, &[], None, None) } sym::va_copy => { let intrinsic = self.cx().get_intrinsic(&("llvm.va_copy")); - self.call(intrinsic, &[args[0].immediate(), args[1].immediate()], None) + self.call(intrinsic, &[args[0].immediate(), args[1].immediate()], None, None) } sym::va_arg => { - match fn_abi.ret.layout.abi { + let (new_bx, val) = match fn_abi.ret.layout.abi { abi::Abi::Scalar(ref scalar) => { match scalar.value { Primitive::Int(..) => { @@ -143,8 +144,10 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { // less than 4 bytes in length. If it is, promote // the integer to a `i32` and truncate the result // back to the smaller type. - let promoted_result = emit_va_arg(self, args[0], tcx.types.i32); - self.trunc(promoted_result, llret_ty) + let (mut new_bx, promoted_result) = + emit_va_arg(self, args[0], tcx.types.i32); + let val = new_bx.trunc(promoted_result, llret_ty); + (new_bx, val) } else { emit_va_arg(self, args[0], ret_ty) } @@ -157,14 +160,16 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { } } _ => bug!("the va_arg intrinsic does not work with non-scalar types"), - } + }; + self = new_bx; + val } sym::volatile_load | sym::unaligned_volatile_load => { let tp_ty = substs.type_at(0); let mut ptr = args[0].immediate(); if let PassMode::Cast(ty) = fn_abi.ret.mode { - ptr = self.pointercast(ptr, self.type_ptr_to(ty.llvm_type(self))); + ptr = self.pointercast(ptr, self.type_ptr_to(ty.llvm_type(&self))); } let load = self.volatile_load(ptr); let align = if name == sym::unaligned_volatile_load { @@ -179,13 +184,13 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { } sym::volatile_store => { let dst = args[0].deref(self.cx()); - args[1].val.volatile_store(self, dst); - return; + args[1].val.volatile_store(&mut self, dst); + return self; } sym::unaligned_volatile_store => { let dst = args[0].deref(self.cx()); - args[1].val.unaligned_volatile_store(self, dst); - return; + args[1].val.unaligned_volatile_store(&mut self, dst); + return self; } sym::prefetch_read_data | sym::prefetch_write_data @@ -208,6 +213,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { self.const_i32(cache_type), ], None, + None, ) } sym::ctlz @@ -222,23 +228,24 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { | sym::saturating_add | sym::saturating_sub => { let ty = arg_tys[0]; - match int_type_width_signed(ty, self) { + match int_type_width_signed(ty, &self) { Some((width, signed)) => match name { sym::ctlz | sym::cttz => { let y = self.const_bool(false); let llfn = self.get_intrinsic(&format!("llvm.{}.i{}", name, width)); - self.call(llfn, &[args[0].immediate(), y], None) + self.call(llfn, &[args[0].immediate(), y], None, None) } sym::ctlz_nonzero | sym::cttz_nonzero => { let y = self.const_bool(true); let llvm_name = &format!("llvm.{}.i{}", &name_str[..4], width); let llfn = self.get_intrinsic(llvm_name); - self.call(llfn, &[args[0].immediate(), y], None) + self.call(llfn, &[args[0].immediate(), y], None, None) } sym::ctpop => self.call( self.get_intrinsic(&format!("llvm.ctpop.i{}", width)), &[args[0].immediate()], None, + None, ), sym::bswap => { if width == 8 { @@ -248,6 +255,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { self.get_intrinsic(&format!("llvm.bswap.i{}", width)), &[args[0].immediate()], None, + None, ) } } @@ -255,6 +263,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { self.get_intrinsic(&format!("llvm.bitreverse.i{}", width)), &[args[0].immediate()], None, + None, ), sym::rotate_left | sym::rotate_right => { let is_left = name == sym::rotate_left; @@ -264,7 +273,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { let llvm_name = &format!("llvm.fsh{}.i{}", if is_left { 'l' } else { 'r' }, width); let llfn = self.get_intrinsic(llvm_name); - self.call(llfn, &[val, val, raw_shift], None) + self.call(llfn, &[val, val, raw_shift], None, None) } sym::saturating_add | sym::saturating_sub => { let is_add = name == sym::saturating_add; @@ -277,7 +286,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { width ); let llfn = self.get_intrinsic(llvm_name); - self.call(llfn, &[lhs, rhs], None) + self.call(llfn, &[lhs, rhs], None, None) } _ => bug!(), }, @@ -291,15 +300,17 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { name, ty ), ); - return; + return self; } } } _ if name_str.starts_with("simd_") => { - match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) { + match generic_simd_intrinsic( + &mut self, name, callee_ty, args, ret_ty, llret_ty, span, + ) { Ok(llval) => llval, - Err(()) => return, + Err(()) => return self, } } @@ -308,30 +319,32 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { if !fn_abi.ret.is_ignore() { if let PassMode::Cast(ty) = fn_abi.ret.mode { - let ptr_llty = self.type_ptr_to(ty.llvm_type(self)); + let ptr_llty = self.type_ptr_to(ty.llvm_type(&self)); let ptr = self.pointercast(result.llval, ptr_llty); self.store(llval, ptr, result.align); } else { - OperandRef::from_immediate_or_packed_pair(self, llval, result.layout) + OperandRef::from_immediate_or_packed_pair(&mut self, llval, result.layout) .val - .store(self, result); + .store(&mut self, result); } } + + self } fn abort(&mut self) { let fnname = self.get_intrinsic(&("llvm.trap")); - self.call(fnname, &[], None); + self.call(fnname, &[], None, None); } fn assume(&mut self, val: Self::Value) { let assume_intrinsic = self.get_intrinsic("llvm.assume"); - self.call(assume_intrinsic, &[val], None); + self.call(assume_intrinsic, &[val], None, None); } fn expect(&mut self, cond: Self::Value, expected: bool) -> Self::Value { let expect = self.get_intrinsic(&"llvm.expect.i1"); - self.call(expect, &[cond, self.const_bool(expected)], None) + self.call(expect, &[cond, self.const_bool(expected)], None, None) } fn sideeffect(&mut self) { @@ -340,18 +353,18 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { // codegen backend being used, and so is unable to check the LLVM version. if unsafe { llvm::LLVMRustVersionMajor() } < 12 { let fnname = self.get_intrinsic(&("llvm.sideeffect")); - self.call(fnname, &[], None); + self.call(fnname, &[], None, None); } } fn va_start(&mut self, va_list: &'ll Value) -> &'ll Value { let intrinsic = self.cx().get_intrinsic("llvm.va_start"); - self.call(intrinsic, &[va_list], None) + self.call(intrinsic, &[va_list], None, None) } fn va_end(&mut self, va_list: &'ll Value) -> &'ll Value { let intrinsic = self.cx().get_intrinsic("llvm.va_end"); - self.call(intrinsic, &[va_list], None) + self.call(intrinsic, &[va_list], None, None) } } @@ -363,7 +376,7 @@ fn try_intrinsic( dest: &'ll Value, ) { if bx.sess().panic_strategy() == PanicStrategy::Abort { - bx.call(try_func, &[data], None); + bx.call(try_func, &[data], None, None); // Return 0 unconditionally from the intrinsic call; // we can never unwind. let ret_align = bx.tcx().data_layout.i32_align.abi; @@ -392,13 +405,15 @@ fn codegen_msvc_try( dest: &'ll Value, ) { let llfn = get_rust_try_fn(bx, &mut |mut bx| { + let cx = bx.cx; + bx.set_personality_fn(bx.eh_personality()); - let mut normal = bx.build_sibling_block("normal"); - let mut catchswitch = bx.build_sibling_block("catchswitch"); + let normal = bx.build_sibling_block("normal"); + let catchswitch = bx.build_sibling_block("catchswitch"); let mut catchpad_rust = bx.build_sibling_block("catchpad_rust"); let mut catchpad_foreign = bx.build_sibling_block("catchpad_foreign"); - let mut caught = bx.build_sibling_block("caught"); + let caught = bx.build_sibling_block("caught"); let try_func = llvm::get_param(bx.llfn(), 0); let data = llvm::get_param(bx.llfn(), 1); @@ -461,13 +476,12 @@ fn codegen_msvc_try( // More information can be found in libstd's seh.rs implementation. let ptr_align = bx.tcx().data_layout.pointer_align.abi; let slot = bx.alloca(bx.type_i8p(), ptr_align); - bx.invoke(try_func, &[data], normal.llbb(), catchswitch.llbb(), None); + bx.invoke(try_func, &[data], normal.llbb(), catchswitch.llbb(), None, None); - normal.ret(bx.const_i32(0)); + normal.ret(cx.const_i32(0)); - let cs = catchswitch.catch_switch(None, None, 2); - catchswitch.add_handler(cs, catchpad_rust.llbb()); - catchswitch.add_handler(cs, catchpad_foreign.llbb()); + let (_, cs) = + catchswitch.catch_switch(None, None, &[catchpad_rust.llbb(), catchpad_foreign.llbb()]); // We can't use the TypeDescriptor defined in libpanic_unwind because it // might be in another DLL and the SEH encoding only supports specifying @@ -483,14 +497,14 @@ fn codegen_msvc_try( // // When modifying, make sure that the type_name string exactly matches // the one used in src/libpanic_unwind/seh.rs. - let type_info_vtable = bx.declare_global("??_7type_info@@6B@", bx.type_i8p()); - let type_name = bx.const_bytes(b"rust_panic\0"); + let type_info_vtable = cx.declare_global("??_7type_info@@6B@", cx.type_i8p()); + let type_name = cx.const_bytes(b"rust_panic\0"); let type_info = - bx.const_struct(&[type_info_vtable, bx.const_null(bx.type_i8p()), type_name], false); - let tydesc = bx.declare_global("__rust_panic_type_info", bx.val_ty(type_info)); + cx.const_struct(&[type_info_vtable, cx.const_null(cx.type_i8p()), type_name], false); + let tydesc = cx.declare_global("__rust_panic_type_info", cx.val_ty(type_info)); unsafe { llvm::LLVMRustSetLinkage(tydesc, llvm::Linkage::LinkOnceODRLinkage); - llvm::SetUniqueComdat(bx.llmod, tydesc); + llvm::SetUniqueComdat(cx.llmod, tydesc); llvm::LLVMSetInitializer(tydesc, type_info); } @@ -500,25 +514,25 @@ fn codegen_msvc_try( // since our exception object effectively contains a Box. // // Source: MicrosoftCXXABI::getAddrOfCXXCatchHandlerType in clang - let flags = bx.const_i32(8); + let flags = cx.const_i32(8); let funclet = catchpad_rust.catch_pad(cs, &[tydesc, flags, slot]); let ptr = catchpad_rust.load(slot, ptr_align); - catchpad_rust.call(catch_func, &[data, ptr], Some(&funclet)); + catchpad_rust.call(catch_func, &[data, ptr], Some(&funclet), None); catchpad_rust.catch_ret(&funclet, caught.llbb()); // The flag value of 64 indicates a "catch-all". - let flags = bx.const_i32(64); - let null = bx.const_null(bx.type_i8p()); + let flags = cx.const_i32(64); + let null = cx.const_null(cx.type_i8p()); let funclet = catchpad_foreign.catch_pad(cs, &[null, flags, null]); - catchpad_foreign.call(catch_func, &[data, null], Some(&funclet)); + catchpad_foreign.call(catch_func, &[data, null], Some(&funclet), None); catchpad_foreign.catch_ret(&funclet, caught.llbb()); - caught.ret(bx.const_i32(1)); + caught.ret(cx.const_i32(1)); }); // Note that no invoke is used here because by definition this function // can't panic (that's what it's catching). - let ret = bx.call(llfn, &[try_func, data, catch_func], None); + let ret = bx.call(llfn, &[try_func, data, catch_func], None, None); let i32_align = bx.tcx().data_layout.i32_align.abi; bx.store(ret, dest, i32_align); } @@ -541,7 +555,9 @@ fn codegen_gnu_try( catch_func: &'ll Value, dest: &'ll Value, ) { - let llfn = get_rust_try_fn(bx, &mut |mut bx| { + let llfn = get_rust_try_fn(bx, &mut |bx| { + let cx = bx.cx; + // Codegens the shims described above: // // bx: @@ -554,14 +570,14 @@ fn codegen_gnu_try( // (%ptr, _) = landingpad // call %catch_func(%data, %ptr) // ret 1 - let mut then = bx.build_sibling_block("then"); + let then = bx.build_sibling_block("then"); let mut catch = bx.build_sibling_block("catch"); let try_func = llvm::get_param(bx.llfn(), 0); let data = llvm::get_param(bx.llfn(), 1); let catch_func = llvm::get_param(bx.llfn(), 2); - bx.invoke(try_func, &[data], then.llbb(), catch.llbb(), None); - then.ret(bx.const_i32(0)); + bx.invoke(try_func, &[data], then.llbb(), catch.llbb(), None, None); + then.ret(cx.const_i32(0)); // Type indicator for the exception being thrown. // @@ -569,18 +585,18 @@ fn codegen_gnu_try( // being thrown. The second value is a "selector" indicating which of // the landing pad clauses the exception's type had been matched to. // rust_try ignores the selector. - let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false); - let vals = catch.landing_pad(lpad_ty, bx.eh_personality(), 1); - let tydesc = bx.const_null(bx.type_i8p()); + let lpad_ty = cx.type_struct(&[cx.type_i8p(), cx.type_i32()], false); + let vals = catch.landing_pad(lpad_ty, cx.eh_personality(), 1); + let tydesc = cx.const_null(cx.type_i8p()); catch.add_clause(vals, tydesc); let ptr = catch.extract_value(vals, 0); - catch.call(catch_func, &[data, ptr], None); - catch.ret(bx.const_i32(1)); + catch.call(catch_func, &[data, ptr], None, None); + catch.ret(cx.const_i32(1)); }); // Note that no invoke is used here because by definition this function // can't panic (that's what it's catching). - let ret = bx.call(llfn, &[try_func, data, catch_func], None); + let ret = bx.call(llfn, &[try_func, data, catch_func], None, None); let i32_align = bx.tcx().data_layout.i32_align.abi; bx.store(ret, dest, i32_align); } @@ -595,7 +611,9 @@ fn codegen_emcc_try( catch_func: &'ll Value, dest: &'ll Value, ) { - let llfn = get_rust_try_fn(bx, &mut |mut bx| { + let llfn = get_rust_try_fn(bx, &mut |bx| { + let cx = bx.cx; + // Codegens the shims described above: // // bx: @@ -613,53 +631,53 @@ fn codegen_emcc_try( // %catch_data[1] = %is_rust_panic // call %catch_func(%data, %catch_data) // ret 1 - let mut then = bx.build_sibling_block("then"); + let then = bx.build_sibling_block("then"); let mut catch = bx.build_sibling_block("catch"); let try_func = llvm::get_param(bx.llfn(), 0); let data = llvm::get_param(bx.llfn(), 1); let catch_func = llvm::get_param(bx.llfn(), 2); - bx.invoke(try_func, &[data], then.llbb(), catch.llbb(), None); - then.ret(bx.const_i32(0)); + bx.invoke(try_func, &[data], then.llbb(), catch.llbb(), None, None); + then.ret(cx.const_i32(0)); // Type indicator for the exception being thrown. // // The first value in this tuple is a pointer to the exception object // being thrown. The second value is a "selector" indicating which of // the landing pad clauses the exception's type had been matched to. - let tydesc = bx.eh_catch_typeinfo(); - let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false); - let vals = catch.landing_pad(lpad_ty, bx.eh_personality(), 2); + let tydesc = cx.eh_catch_typeinfo(); + let lpad_ty = cx.type_struct(&[cx.type_i8p(), cx.type_i32()], false); + let vals = catch.landing_pad(lpad_ty, cx.eh_personality(), 2); catch.add_clause(vals, tydesc); - catch.add_clause(vals, bx.const_null(bx.type_i8p())); + catch.add_clause(vals, cx.const_null(cx.type_i8p())); let ptr = catch.extract_value(vals, 0); let selector = catch.extract_value(vals, 1); // Check if the typeid we got is the one for a Rust panic. - let llvm_eh_typeid_for = bx.get_intrinsic("llvm.eh.typeid.for"); - let rust_typeid = catch.call(llvm_eh_typeid_for, &[tydesc], None); + let llvm_eh_typeid_for = cx.get_intrinsic("llvm.eh.typeid.for"); + let rust_typeid = catch.call(llvm_eh_typeid_for, &[tydesc], None, None); let is_rust_panic = catch.icmp(IntPredicate::IntEQ, selector, rust_typeid); - let is_rust_panic = catch.zext(is_rust_panic, bx.type_bool()); + let is_rust_panic = catch.zext(is_rust_panic, cx.type_bool()); // We need to pass two values to catch_func (ptr and is_rust_panic), so // create an alloca and pass a pointer to that. - let ptr_align = bx.tcx().data_layout.pointer_align.abi; - let i8_align = bx.tcx().data_layout.i8_align.abi; + let ptr_align = cx.tcx.data_layout.pointer_align.abi; + let i8_align = cx.tcx.data_layout.i8_align.abi; let catch_data = - catch.alloca(bx.type_struct(&[bx.type_i8p(), bx.type_bool()], false), ptr_align); - let catch_data_0 = catch.inbounds_gep(catch_data, &[bx.const_usize(0), bx.const_usize(0)]); + catch.alloca(cx.type_struct(&[cx.type_i8p(), cx.type_bool()], false), ptr_align); + let catch_data_0 = catch.inbounds_gep(catch_data, &[cx.const_usize(0), cx.const_usize(0)]); catch.store(ptr, catch_data_0, ptr_align); - let catch_data_1 = catch.inbounds_gep(catch_data, &[bx.const_usize(0), bx.const_usize(1)]); + let catch_data_1 = catch.inbounds_gep(catch_data, &[cx.const_usize(0), cx.const_usize(1)]); catch.store(is_rust_panic, catch_data_1, i8_align); - let catch_data = catch.bitcast(catch_data, bx.type_i8p()); + let catch_data = catch.bitcast(catch_data, cx.type_i8p()); - catch.call(catch_func, &[data, catch_data], None); - catch.ret(bx.const_i32(1)); + catch.call(catch_func, &[data, catch_data], None, None); + catch.ret(cx.const_i32(1)); }); // Note that no invoke is used here because by definition this function // can't panic (that's what it's catching). - let ret = bx.call(llfn, &[try_func, data, catch_func], None); + let ret = bx.call(llfn, &[try_func, data, catch_func], None, None); let i32_align = bx.tcx().data_layout.i32_align.abi; bx.store(ret, dest, i32_align); } @@ -1073,7 +1091,7 @@ fn generic_simd_intrinsic( }; let llvm_name = &format!("llvm.{0}.v{1}{2}", intr_name, in_len, elem_ty_str); let f = bx.declare_cfn(&llvm_name, llvm::UnnamedAddr::No, fn_ty); - let c = bx.call(f, &args.iter().map(|arg| arg.immediate()).collect::>(), None); + let c = bx.call(f, &args.iter().map(|arg| arg.immediate()).collect::>(), None, None); Ok(c) } @@ -1258,7 +1276,8 @@ fn generic_simd_intrinsic( llvm_elem_vec_ty, ), ); - let v = bx.call(f, &[args[1].immediate(), alignment, mask, args[0].immediate()], None); + let v = + bx.call(f, &[args[1].immediate(), alignment, mask, args[0].immediate()], None, None); return Ok(v); } @@ -1385,7 +1404,8 @@ fn generic_simd_intrinsic( llvm::UnnamedAddr::No, bx.type_func(&[llvm_elem_vec_ty, llvm_pointer_vec_ty, alignment_ty, mask_ty], ret_t), ); - let v = bx.call(f, &[args[0].immediate(), args[1].immediate(), alignment, mask], None); + let v = + bx.call(f, &[args[0].immediate(), args[1].immediate(), alignment, mask], None, None); return Ok(v); } @@ -1712,7 +1732,7 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#, llvm::UnnamedAddr::No, bx.type_func(&[vec_ty, vec_ty], vec_ty), ); - let v = bx.call(f, &[lhs, rhs], None); + let v = bx.call(f, &[lhs, rhs], None, None); return Ok(v); } diff --git a/compiler/rustc_codegen_llvm/src/va_arg.rs b/compiler/rustc_codegen_llvm/src/va_arg.rs index 39d08fbee3b7f..efcfabad0c267 100644 --- a/compiler/rustc_codegen_llvm/src/va_arg.rs +++ b/compiler/rustc_codegen_llvm/src/va_arg.rs @@ -90,10 +90,12 @@ fn emit_ptr_va_arg( } fn emit_aapcs_va_arg( - bx: &mut Builder<'a, 'll, 'tcx>, + mut bx: Builder<'a, 'll, 'tcx>, list: OperandRef<'tcx, &'ll Value>, target_ty: Ty<'tcx>, -) -> &'ll Value { +) -> (Builder<'a, 'll, 'tcx>, &'ll Value) { + let cx = bx.cx; + // Implementation of the AAPCS64 calling convention for va_args see // https://github.com/ARM-software/abi-aa/blob/master/aapcs64/aapcs64.rst let va_list_addr = list.immediate(); @@ -101,7 +103,9 @@ fn emit_aapcs_va_arg( let mut maybe_reg = bx.build_sibling_block("va_arg.maybe_reg"); let mut in_reg = bx.build_sibling_block("va_arg.in_reg"); + let in_reg_llbb = in_reg.llbb(); let mut on_stack = bx.build_sibling_block("va_arg.on_stack"); + let on_stack_llbb = on_stack.llbb(); let mut end = bx.build_sibling_block("va_arg.end"); let zero = bx.const_i32(0); let offset_align = Align::from_bytes(4).unwrap(); @@ -127,10 +131,10 @@ fn emit_aapcs_va_arg( // the offset again. if gr_type && layout.align.abi.bytes() > 8 { - reg_off_v = maybe_reg.add(reg_off_v, bx.const_i32(15)); - reg_off_v = maybe_reg.and(reg_off_v, bx.const_i32(-16)); + reg_off_v = maybe_reg.add(reg_off_v, cx.const_i32(15)); + reg_off_v = maybe_reg.and(reg_off_v, cx.const_i32(-16)); } - let new_reg_off_v = maybe_reg.add(reg_off_v, bx.const_i32(slot_size as i32)); + let new_reg_off_v = maybe_reg.add(reg_off_v, cx.const_i32(slot_size as i32)); maybe_reg.store(new_reg_off_v, reg_off, offset_align); @@ -140,16 +144,16 @@ fn emit_aapcs_va_arg( maybe_reg.cond_br(use_stack, &on_stack.llbb(), &in_reg.llbb()); let top = in_reg.struct_gep(va_list_addr, reg_top_index); - let top = in_reg.load(top, bx.tcx().data_layout.pointer_align.abi); + let top = in_reg.load(top, cx.tcx().data_layout.pointer_align.abi); // reg_value = *(@top + reg_off_v); let mut reg_addr = in_reg.gep(top, &[reg_off_v]); - if bx.tcx().sess.target.endian == Endian::Big && layout.size.bytes() != slot_size { + if cx.tcx().sess.target.endian == Endian::Big && layout.size.bytes() != slot_size { // On big-endian systems the value is right-aligned in its slot. - let offset = bx.const_i32((slot_size - layout.size.bytes()) as i32); + let offset = cx.const_i32((slot_size - layout.size.bytes()) as i32); reg_addr = in_reg.gep(reg_addr, &[offset]); } - let reg_addr = in_reg.bitcast(reg_addr, bx.cx.type_ptr_to(layout.llvm_type(bx))); + let reg_addr = in_reg.bitcast(reg_addr, cx.type_ptr_to(layout.llvm_type(cx))); let reg_value = in_reg.load(reg_addr, layout.align.abi); in_reg.br(&end.llbb()); @@ -159,49 +163,62 @@ fn emit_aapcs_va_arg( on_stack.br(&end.llbb()); let val = end.phi( - layout.immediate_llvm_type(bx), + layout.immediate_llvm_type(cx), &[reg_value, stack_value], - &[&in_reg.llbb(), &on_stack.llbb()], + &[&in_reg_llbb, &on_stack_llbb], ); - *bx = end; - val + (end, val) } pub(super) fn emit_va_arg( - bx: &mut Builder<'a, 'll, 'tcx>, + mut bx: Builder<'a, 'll, 'tcx>, addr: OperandRef<'tcx, &'ll Value>, target_ty: Ty<'tcx>, -) -> &'ll Value { +) -> (Builder<'a, 'll, 'tcx>, &'ll Value) { // Determine the va_arg implementation to use. The LLVM va_arg instruction // is lacking in some instances, so we should only use it as a fallback. let target = &bx.cx.tcx.sess.target; let arch = &bx.cx.tcx.sess.target.arch; - match &**arch { + let val = match &**arch { // Windows x86 "x86" if target.is_like_windows => { - emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(4).unwrap(), false) + emit_ptr_va_arg(&mut bx, addr, target_ty, false, Align::from_bytes(4).unwrap(), false) } // Generic x86 - "x86" => emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(4).unwrap(), true), + "x86" => { + emit_ptr_va_arg(&mut bx, addr, target_ty, false, Align::from_bytes(4).unwrap(), true) + } // Windows AArch64 "aarch64" if target.is_like_windows => { - emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(8).unwrap(), false) + emit_ptr_va_arg(&mut bx, addr, target_ty, false, Align::from_bytes(8).unwrap(), false) } // macOS / iOS AArch64 "aarch64" if target.is_like_osx => { - emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(8).unwrap(), true) + emit_ptr_va_arg(&mut bx, addr, target_ty, false, Align::from_bytes(8).unwrap(), true) + } + "aarch64" => { + let (new_bx, val) = emit_aapcs_va_arg(bx, addr, target_ty); + bx = new_bx; + val } - "aarch64" => emit_aapcs_va_arg(bx, addr, target_ty), // Windows x86_64 "x86_64" if target.is_like_windows => { let target_ty_size = bx.cx.size_of(target_ty).bytes(); let indirect: bool = target_ty_size > 8 || !target_ty_size.is_power_of_two(); - emit_ptr_va_arg(bx, addr, target_ty, indirect, Align::from_bytes(8).unwrap(), false) + emit_ptr_va_arg( + &mut bx, + addr, + target_ty, + indirect, + Align::from_bytes(8).unwrap(), + false, + ) } // For all other architecture/OS combinations fall back to using // the LLVM va_arg instruction. // https://llvm.org/docs/LangRef.html#va-arg-instruction _ => bx.va_arg(addr.immediate(), bx.cx.layout_of(target_ty).llvm_type(bx.cx)), - } + }; + (bx, val) } diff --git a/compiler/rustc_codegen_ssa/src/base.rs b/compiler/rustc_codegen_ssa/src/base.rs index e045a23eb0ce3..49f82b50ff8f0 100644 --- a/compiler/rustc_codegen_ssa/src/base.rs +++ b/compiler/rustc_codegen_ssa/src/base.rs @@ -447,7 +447,7 @@ pub fn maybe_create_entry_wrapper<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( (rust_main, vec![arg_argc, arg_argv]) }; - let result = bx.call(start_fn, &args, None); + let result = bx.call(start_fn, &args, None, None); let cast = bx.intcast(result, cx.type_int(), true); bx.ret(cast); diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs index fd3f89a2aee96..16da2219f0234 100644 --- a/compiler/rustc_codegen_ssa/src/mir/block.rs +++ b/compiler/rustc_codegen_ssa/src/mir/block.rs @@ -74,9 +74,10 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> { debug!("llblock: creating cleanup trampoline for {:?}", target); let name = &format!("{:?}_cleanup_trampoline_{:?}", self.bb, target); - let mut trampoline = fx.new_block(name); + let trampoline = fx.new_block(name); + let trampoline_llbb = trampoline.llbb(); trampoline.cleanup_ret(self.funclet(fx).unwrap(), Some(lltarget)); - trampoline.llbb() + trampoline_llbb } else { lltarget } @@ -85,16 +86,16 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> { fn funclet_br>( &self, fx: &mut FunctionCx<'a, 'tcx, Bx>, - bx: &mut Bx, + bx: Bx, target: mir::BasicBlock, - ) { + ) -> Bx::Unpositioned { let (lltarget, is_cleanupret) = self.lltarget(fx, target); if is_cleanupret { // micro-optimization: generate a `ret` rather than a jump // to a trampoline. - bx.cleanup_ret(self.funclet(fx).unwrap(), Some(lltarget)); + bx.cleanup_ret(self.funclet(fx).unwrap(), Some(lltarget)).0 } else { - bx.br(lltarget); + bx.br(lltarget) } } @@ -103,33 +104,39 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> { fn do_call>( &self, fx: &mut FunctionCx<'a, 'tcx, Bx>, - bx: &mut Bx, + mut bx: Bx, fn_abi: FnAbi<'tcx, Ty<'tcx>>, fn_ptr: Bx::Value, llargs: &[Bx::Value], destination: Option<(ReturnDest<'tcx, Bx::Value>, mir::BasicBlock)>, cleanup: Option, - ) { + ) -> Bx::Unpositioned { // If there is a cleanup block and the function we're calling can unwind, then // do an invoke, otherwise do a call. if let Some(cleanup) = cleanup.filter(|_| fn_abi.can_unwind) { - let ret_bx = if let Some((_, target)) = destination { + let ret_llbb = if let Some((_, target)) = destination { fx.blocks[target] } else { fx.unreachable_block() }; - let invokeret = - bx.invoke(fn_ptr, &llargs, ret_bx, self.llblock(fx, cleanup), self.funclet(fx)); - bx.apply_attrs_callsite(&fn_abi, invokeret); + let (unpositioned_bx, invokeret) = bx.invoke( + fn_ptr, + &llargs, + ret_llbb, + self.llblock(fx, cleanup), + self.funclet(fx), + Some(&fn_abi), + ); if let Some((ret_dest, target)) = destination { let mut ret_bx = fx.build_block(target); fx.set_debug_loc(&mut ret_bx, self.terminator.source_info); fx.store_return(&mut ret_bx, ret_dest, &fn_abi.ret, invokeret); } + + unpositioned_bx } else { - let llret = bx.call(fn_ptr, &llargs, self.funclet(fx)); - bx.apply_attrs_callsite(&fn_abi, llret); + let llret = bx.call(fn_ptr, &llargs, self.funclet(fx), Some(&fn_abi)); if fx.mir[self.bb].is_cleanup { // Cleanup is always the cold path. Don't inline // drop glue. Also, when there is a deeply-nested @@ -139,10 +146,10 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> { } if let Some((ret_dest, target)) = destination { - fx.store_return(bx, ret_dest, &fn_abi.ret, llret); - self.funclet_br(fx, bx, target); + fx.store_return(&mut bx, ret_dest, &fn_abi.ret, llret); + self.funclet_br(fx, bx, target) } else { - bx.unreachable(); + bx.unreachable() } } } @@ -151,9 +158,13 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> { /// Codegen implementations for some terminator variants. impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { /// Generates code for a `Resume` terminator. - fn codegen_resume_terminator(&mut self, helper: TerminatorCodegenHelper<'tcx>, mut bx: Bx) { + fn codegen_resume_terminator( + &mut self, + helper: TerminatorCodegenHelper<'tcx>, + mut bx: Bx, + ) -> Bx::Unpositioned { if let Some(funclet) = helper.funclet(self) { - bx.cleanup_ret(funclet, None); + bx.cleanup_ret(funclet, None).0 } else { let slot = self.get_personality_slot(&mut bx); let lp0 = slot.project_field(&mut bx, 0); @@ -165,7 +176,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let mut lp = bx.const_undef(self.landing_pad_type()); lp = bx.insert_value(lp, lp0, 0); lp = bx.insert_value(lp, lp1, 1); - bx.resume(lp); + bx.resume(lp).0 } } @@ -176,7 +187,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { discr: &mir::Operand<'tcx>, switch_ty: Ty<'tcx>, targets: &SwitchTargets, - ) { + ) -> Bx::Unpositioned { let discr = self.codegen_operand(&mut bx, &discr); // `switch_ty` is redundant, sanity-check that. assert_eq!(discr.layout.ty, switch_ty); @@ -197,18 +208,18 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let switch_llty = bx.immediate_backend_type(bx.layout_of(switch_ty)); let llval = bx.const_uint_big(switch_llty, test_value); let cmp = bx.icmp(IntPredicate::IntEQ, discr.immediate(), llval); - bx.cond_br(cmp, lltrue, llfalse); + bx.cond_br(cmp, lltrue, llfalse) } } else { bx.switch( discr.immediate(), helper.llblock(self, targets.otherwise()), target_iter.map(|(value, target)| (value, helper.llblock(self, target))), - ); + ) } } - fn codegen_return_terminator(&mut self, mut bx: Bx) { + fn codegen_return_terminator(&mut self, mut bx: Bx) -> Bx::Unpositioned { // Call `va_end` if this is the definition of a C-variadic function. if self.fn_abi.c_variadic { // The `VaList` "spoofed" argument is just after all the real arguments. @@ -228,13 +239,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { bx.abort(); // `abort` does not terminate the block, so we still need to generate // an `unreachable` terminator after it. - bx.unreachable(); - return; + return bx.unreachable(); } let llval = match self.fn_abi.ret.mode { PassMode::Ignore | PassMode::Indirect { .. } => { - bx.ret_void(); - return; + return bx.ret_void(); } PassMode::Direct(_) | PassMode::Pair(..) => { @@ -271,7 +280,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { bx.load(addr, self.fn_abi.ret.layout.align.abi) } }; - bx.ret(llval); + bx.ret(llval) } fn codegen_drop_terminator( @@ -281,15 +290,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { location: mir::Place<'tcx>, target: mir::BasicBlock, unwind: Option, - ) { + ) -> Bx::Unpositioned { let ty = location.ty(self.mir, bx.tcx()).ty; let ty = self.monomorphize(ty); let drop_fn = Instance::resolve_drop_in_place(bx.tcx(), ty); if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def { // we don't actually need to drop anything. - helper.funclet_br(self, &mut bx, target); - return; + return helper.funclet_br(self, bx, target); } let place = self.codegen_place(&mut bx, location.as_ref()); @@ -316,15 +324,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } _ => (bx.get_fn_addr(drop_fn), FnAbi::of_instance(&bx, drop_fn, &[])), }; - helper.do_call( - self, - &mut bx, - fn_abi, - drop_fn, - args, - Some((ReturnDest::Nothing, target)), - unwind, - ); + helper.do_call(self, bx, fn_abi, drop_fn, args, Some((ReturnDest::Nothing, target)), unwind) } fn codegen_assert_terminator( @@ -337,7 +337,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { msg: &mir::AssertMessage<'tcx>, target: mir::BasicBlock, cleanup: Option, - ) { + ) -> Bx::Unpositioned { let span = terminator.source_info.span; let cond = self.codegen_operand(&mut bx, cond).immediate(); let mut const_cond = bx.const_to_opt_u128(cond, false).map(|c| c == 1); @@ -357,8 +357,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // Don't codegen the panic block if success if known. if const_cond == Some(expected) { - helper.funclet_br(self, &mut bx, target); - return; + return helper.funclet_br(self, bx, target); } // Pass the condition through llvm.expect for branch hinting. @@ -405,20 +404,21 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let llfn = bx.get_fn_addr(instance); // Codegen the actual panic invoke/call. - helper.do_call(self, &mut bx, fn_abi, llfn, &args, None, cleanup); + helper.do_call(self, bx, fn_abi, llfn, &args, None, cleanup) } - /// Returns `true` if this is indeed a panic intrinsic and codegen is done. + /// Returns `Ok` if this is indeed a panic intrinsic and codegen is done, + /// otherwise returns `Err(bx)`, having not touched `bx`. fn codegen_panic_intrinsic( &mut self, helper: &TerminatorCodegenHelper<'tcx>, - bx: &mut Bx, + mut bx: Bx, intrinsic: Option, instance: Option>, source_info: mir::SourceInfo, destination: &Option<(mir::Place<'tcx>, mir::BasicBlock)>, cleanup: Option, - ) -> bool { + ) -> Result { // Emit a panic or a no-op for `assert_*` intrinsics. // These are intrinsics that compile to panics so that we can get a message // which mentions the offending type, even from a const context. @@ -441,11 +441,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let do_panic = match intrinsic { Inhabited => layout.abi.is_uninhabited(), // We unwrap as the error type is `!`. - ZeroValid => !layout.might_permit_raw_init(bx, /*zero:*/ true).unwrap(), + ZeroValid => !layout.might_permit_raw_init(&bx, /*zero:*/ true).unwrap(), // We unwrap as the error type is `!`. - UninitValid => !layout.might_permit_raw_init(bx, /*zero:*/ false).unwrap(), + UninitValid => !layout.might_permit_raw_init(&bx, /*zero:*/ false).unwrap(), }; - if do_panic { + Ok(if do_panic { let msg_str = with_no_trimmed_paths(|| { if layout.abi.is_uninhabited() { // Use this error even for the other intrinsics as it is more precise. @@ -457,14 +457,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } }); let msg = bx.const_str(Symbol::intern(&msg_str)); - let location = self.get_caller_location(bx, source_info).immediate(); + let location = self.get_caller_location(&mut bx, source_info).immediate(); // Obtain the panic entry point. // FIXME: dedup this with `codegen_assert_terminator` above. let def_id = common::langcall(bx.tcx(), Some(source_info.span), "", LangItem::Panic); let instance = ty::Instance::mono(bx.tcx(), def_id); - let fn_abi = FnAbi::of_instance(bx, instance, &[]); + let fn_abi = FnAbi::of_instance(&bx, instance, &[]); let llfn = bx.get_fn_addr(instance); // Codegen the actual panic invoke/call. @@ -476,15 +476,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { &[msg.0, msg.1, location], destination.as_ref().map(|(_, bb)| (ReturnDest::Nothing, *bb)), cleanup, - ); + ) } else { // a NOP let target = destination.as_ref().unwrap().1; helper.funclet_br(self, bx, target) - } - true + }) } else { - false + Err(bx) } } @@ -498,7 +497,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { destination: &Option<(mir::Place<'tcx>, mir::BasicBlock)>, cleanup: Option, fn_span: Span, - ) { + ) -> Bx::Unpositioned { let source_info = terminator.source_info; let span = source_info.span; @@ -523,8 +522,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { if let Some(ty::InstanceDef::DropGlue(_, None)) = def { // Empty drop glue; a no-op. let &(_, target) = destination.as_ref().unwrap(); - helper.funclet_br(self, &mut bx, target); - return; + return helper.funclet_br(self, bx, target); } // FIXME(eddyb) avoid computing this if possible, when `instance` is @@ -554,10 +552,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { }; if intrinsic == Some(sym::transmute) { - if let Some(destination_ref) = destination.as_ref() { + return if let Some(destination_ref) = destination.as_ref() { let &(dest, target) = destination_ref; self.codegen_transmute(&mut bx, &args[0], dest); - helper.funclet_br(self, &mut bx, target); + helper.funclet_br(self, bx, target) } else { // If we are trying to transmute to an uninhabited type, // it is likely there is no allotted destination. In fact, @@ -566,22 +564,22 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // into an uninhabited type is impossible, so anything following // it must be unreachable. assert_eq!(fn_abi.ret.layout.abi, abi::Abi::Uninhabited); - bx.unreachable(); - } - return; + bx.unreachable() + }; } - if self.codegen_panic_intrinsic( + bx = match self.codegen_panic_intrinsic( &helper, - &mut bx, + bx, intrinsic, instance, source_info, destination, cleanup, ) { - return; - } + Ok(unpositioned_bx) => return unpositioned_bx, + Err(bx) => bx, + }; // The arguments we'll be passing. Plus one to account for outptr, if used. let arg_count = fn_abi.args.len() + fn_abi.ret.is_indirect() as usize; @@ -596,17 +594,15 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { }; if intrinsic == Some(sym::caller_location) { - if let Some((_, target)) = destination.as_ref() { - let location = self - .get_caller_location(&mut bx, mir::SourceInfo { span: fn_span, ..source_info }); + let &(_, target) = destination.as_ref().unwrap(); + let location = + self.get_caller_location(&mut bx, mir::SourceInfo { span: fn_span, ..source_info }); - if let ReturnDest::IndirectOperand(tmp, _) = ret_dest { - location.val.store(&mut bx, tmp); - } - self.store_return(&mut bx, ret_dest, &fn_abi.ret, location.immediate()); - helper.funclet_br(self, &mut bx, *target); + if let ReturnDest::IndirectOperand(tmp, _) = ret_dest { + location.val.store(&mut bx, tmp); } - return; + self.store_return(&mut bx, ret_dest, &fn_abi.ret, location.immediate()); + return helper.funclet_br(self, bx, target); } match intrinsic { @@ -650,8 +646,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { }) .collect(); - Self::codegen_intrinsic_call( - &mut bx, + bx = Self::codegen_intrinsic_call( + bx, *instance.as_ref().unwrap(), &fn_abi, &args, @@ -663,13 +659,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { self.store_return(&mut bx, ret_dest, &fn_abi.ret, dst.llval); } - if let Some((_, target)) = *destination { - helper.funclet_br(self, &mut bx, target); + return if let Some((_, target)) = *destination { + helper.funclet_br(self, bx, target) } else { - bx.unreachable(); - } - - return; + bx.unreachable() + }; } } @@ -782,13 +776,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { helper.do_call( self, - &mut bx, + bx, fn_abi, fn_ptr, &llargs, destination.as_ref().map(|&(_, target)| (ret_dest, target)), cleanup, - ); + ) } fn codegen_asm_terminator( @@ -801,7 +795,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { options: ast::InlineAsmOptions, line_spans: &[Span], destination: Option, - ) { + ) -> Bx::Unpositioned { let span = terminator.source_info.span; let operands: Vec<_> = operands @@ -878,9 +872,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { bx.codegen_inline_asm(template, &operands, options, line_spans); if let Some(target) = destination { - helper.funclet_br(self, &mut bx, target); + helper.funclet_br(self, bx, target) } else { - bx.unreachable(); + bx.unreachable() } } } @@ -905,7 +899,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { mut bx: Bx, bb: mir::BasicBlock, terminator: &'tcx mir::Terminator<'tcx>, - ) { + ) -> Bx::Unpositioned { debug!("codegen_terminator: {:?}", terminator); // Create the cleanup bundle, if needed. @@ -920,7 +914,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { bx.abort(); // `abort` does not terminate the block, so we still need to generate // an `unreachable` terminator after it. - bx.unreachable(); + bx.unreachable() } mir::TerminatorKind::Goto { target } => { @@ -935,30 +929,25 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { bx.sideeffect(); } - helper.funclet_br(self, &mut bx, target); + helper.funclet_br(self, bx, target) } mir::TerminatorKind::SwitchInt { ref discr, switch_ty, ref targets } => { - self.codegen_switchint_terminator(helper, bx, discr, switch_ty, targets); + self.codegen_switchint_terminator(helper, bx, discr, switch_ty, targets) } - mir::TerminatorKind::Return => { - self.codegen_return_terminator(bx); - } + mir::TerminatorKind::Return => self.codegen_return_terminator(bx), - mir::TerminatorKind::Unreachable => { - bx.unreachable(); - } + mir::TerminatorKind::Unreachable => bx.unreachable(), mir::TerminatorKind::Drop { place, target, unwind } => { - self.codegen_drop_terminator(helper, bx, place, target, unwind); + self.codegen_drop_terminator(helper, bx, place, target, unwind) } - mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, cleanup } => { - self.codegen_assert_terminator( + mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, cleanup } => self + .codegen_assert_terminator( helper, bx, terminator, cond, expected, msg, target, cleanup, - ); - } + ), mir::TerminatorKind::DropAndReplace { .. } => { bug!("undesugared DropAndReplace in codegen: {:?}", terminator); @@ -971,18 +960,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { cleanup, from_hir_call: _, fn_span, - } => { - self.codegen_call_terminator( - helper, - bx, - terminator, - func, - args, - destination, - cleanup, - fn_span, - ); - } + } => self.codegen_call_terminator( + helper, + bx, + terminator, + func, + args, + destination, + cleanup, + fn_span, + ), mir::TerminatorKind::GeneratorDrop | mir::TerminatorKind::Yield { .. } => { bug!("generator ops in codegen") } @@ -996,18 +983,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { options, line_spans, destination, - } => { - self.codegen_asm_terminator( - helper, - bx, - terminator, - template, - operands, - options, - line_spans, - destination, - ); - } + } => self.codegen_asm_terminator( + helper, + bx, + terminator, + template, + operands, + options, + line_spans, + destination, + ), } } @@ -1212,6 +1197,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } let mut bx = self.new_block("cleanup"); + let landing_pad_llbb = bx.llbb(); let llpersonality = self.cx.eh_personality(); let llretty = self.landing_pad_type(); @@ -1223,7 +1209,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { Pair(bx.extract_value(lp, 0), bx.extract_value(lp, 1)).store(&mut bx, slot); bx.br(target_bb); - bx.llbb() + + landing_pad_llbb } fn landing_pad_type(&self) -> Bx::Type { @@ -1233,10 +1220,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { fn unreachable_block(&mut self) -> Bx::BasicBlock { self.unreachable_block.unwrap_or_else(|| { - let mut bx = self.new_block("unreachable"); + let bx = self.new_block("unreachable"); + let llbb = bx.llbb(); bx.unreachable(); - self.unreachable_block = Some(bx.llbb()); - bx.llbb() + self.unreachable_block = Some(llbb); + llbb }) } @@ -1245,9 +1233,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } pub fn build_block(&self, bb: mir::BasicBlock) -> Bx { - let mut bx = Bx::with_cx(self.cx); - bx.position_at_end(self.blocks[bb]); - bx + Bx::position_at_end(Bx::unpositioned(self.cx), self.blocks[bb]) } fn make_return_dest( diff --git a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs index 8502309b90e5a..286b39c08d3d4 100644 --- a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs +++ b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs @@ -49,13 +49,13 @@ fn memset_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { pub fn codegen_intrinsic_call( - bx: &mut Bx, + mut bx: Bx, instance: ty::Instance<'tcx>, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OperandRef<'tcx, Bx::Value>], llresult: Bx::Value, span: Span, - ) { + ) -> Bx { let callee_ty = instance.ty(bx.tcx(), ty::ParamEnv::reveal_all()); let (def_id, substs) = match *callee_ty.kind() { @@ -76,11 +76,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let llval = match name { sym::assume => { bx.assume(args[0].immediate()); - return; + return bx; } sym::abort => { bx.abort(); - return; + return bx; } sym::va_start => bx.va_start(args[0].immediate()), @@ -88,7 +88,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { sym::size_of_val => { let tp_ty = substs.type_at(0); if let OperandValue::Pair(_, meta) = args[0].val { - let (llsize, _) = glue::size_and_align_of_dst(bx, tp_ty, Some(meta)); + let (llsize, _) = glue::size_and_align_of_dst(&mut bx, tp_ty, Some(meta)); llsize } else { bx.const_usize(bx.layout_of(tp_ty).size.bytes()) @@ -97,7 +97,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { sym::min_align_of_val => { let tp_ty = substs.type_at(0); if let OperandValue::Pair(_, meta) = args[0].val { - let (_, llalign) = glue::size_and_align_of_dst(bx, tp_ty, Some(meta)); + let (_, llalign) = glue::size_and_align_of_dst(&mut bx, tp_ty, Some(meta)); llalign } else { bx.const_usize(bx.layout_of(tp_ty).align.abi.bytes()) @@ -113,7 +113,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { .tcx() .const_eval_instance(ty::ParamEnv::reveal_all(), instance, None) .unwrap(); - OperandRef::from_const(bx, value, ret_ty).immediate_or_packed_pair(bx) + OperandRef::from_const(&mut bx, value, ret_ty).immediate_or_packed_pair(&mut bx) } sym::offset => { let ptr = args[0].immediate(); @@ -127,7 +127,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } sym::copy => { copy_intrinsic( - bx, + &mut bx, true, false, substs.type_at(0), @@ -135,23 +135,23 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { args[0].immediate(), args[2].immediate(), ); - return; + return bx; } sym::write_bytes => { memset_intrinsic( - bx, + &mut bx, false, substs.type_at(0), args[0].immediate(), args[1].immediate(), args[2].immediate(), ); - return; + return bx; } sym::volatile_copy_nonoverlapping_memory => { copy_intrinsic( - bx, + &mut bx, false, true, substs.type_at(0), @@ -159,11 +159,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { args[1].immediate(), args[2].immediate(), ); - return; + return bx; } sym::volatile_copy_memory => { copy_intrinsic( - bx, + &mut bx, true, true, substs.type_at(0), @@ -171,28 +171,28 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { args[1].immediate(), args[2].immediate(), ); - return; + return bx; } sym::volatile_set_memory => { memset_intrinsic( - bx, + &mut bx, true, substs.type_at(0), args[0].immediate(), args[1].immediate(), args[2].immediate(), ); - return; + return bx; } sym::volatile_store => { let dst = args[0].deref(bx.cx()); - args[1].val.volatile_store(bx, dst); - return; + args[1].val.volatile_store(&mut bx, dst); + return bx; } sym::unaligned_volatile_store => { let dst = args[0].deref(bx.cx()); - args[1].val.unaligned_volatile_store(bx, dst); - return; + args[1].val.unaligned_volatile_store(&mut bx, dst); + return bx; } sym::add_with_overflow | sym::sub_with_overflow @@ -223,12 +223,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let val = bx.from_immediate(val); let overflow = bx.from_immediate(overflow); - let dest = result.project_field(bx, 0); + let dest = result.project_field(&mut bx, 0); bx.store(val, dest.llval, dest.align); - let dest = result.project_field(bx, 1); + let dest = result.project_field(&mut bx, 1); bx.store(overflow, dest.llval, dest.align); - return; + return bx; } sym::exact_div => { if signed { @@ -292,7 +292,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { name, ty ), ); - return; + return bx; } } } @@ -316,7 +316,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { name, arg_tys[0] ), ); - return; + return bx; } } } @@ -333,7 +333,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { arg_tys[0] ), ); - return; + return bx; } let (_width, signed) = match int_type_width_signed(ret_ty, bx.tcx()) { Some(pair) => pair, @@ -348,7 +348,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { ret_ty ), ); - return; + return bx; } }; if signed { @@ -360,7 +360,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { sym::discriminant_value => { if ret_ty.is_integral() { - args[0].deref(bx.cx()).codegen_get_discr(bx, ret_ty) + args[0].deref(bx.cx()).codegen_get_discr(&mut bx, ret_ty) } else { span_bug!(span, "Invalid discriminant type for `{:?}`", arg_tys[0]) } @@ -429,13 +429,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let val = bx.from_immediate(val); let success = bx.from_immediate(success); - let dest = result.project_field(bx, 0); + let dest = result.project_field(&mut bx, 0); bx.store(val, dest.llval, dest.align); - let dest = result.project_field(bx, 1); + let dest = result.project_field(&mut bx, 1); bx.store(success, dest.llval, dest.align); - return; + return bx; } else { - return invalid_monomorphization(ty); + invalid_monomorphization(ty); + return bx; } } @@ -459,7 +460,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { result } } else { - return invalid_monomorphization(ty); + invalid_monomorphization(ty); + return bx; } } @@ -477,20 +479,21 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { val = bx.ptrtoint(val, bx.type_isize()); } bx.atomic_store(val, ptr, order, size); - return; + return bx; } else { - return invalid_monomorphization(ty); + invalid_monomorphization(ty); + return bx; } } "fence" => { bx.atomic_fence(order, SynchronizationScope::CrossThread); - return; + return bx; } "singlethreadfence" => { bx.atomic_fence(order, SynchronizationScope::SingleThread); - return; + return bx; } // These are all AtomicRMW ops @@ -525,7 +528,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } bx.atomic_rmw(atom_op, ptr, val, order) } else { - return invalid_monomorphization(ty); + invalid_monomorphization(ty); + return bx; } } } @@ -533,8 +537,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { sym::nontemporal_store => { let dst = args[0].deref(bx.cx()); - args[1].val.nontemporal_store(bx, dst); - return; + args[1].val.nontemporal_store(&mut bx, dst); + return bx; } sym::ptr_guaranteed_eq | sym::ptr_guaranteed_ne => { @@ -566,8 +570,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { _ => { // Need to use backend-specific things in the implementation. - bx.codegen_intrinsic_call(instance, fn_abi, args, llresult, span); - return; + return bx.codegen_intrinsic_call(instance, fn_abi, args, llresult, span); } }; @@ -577,11 +580,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let ptr = bx.pointercast(result.llval, ptr_llty); bx.store(llval, ptr, result.align); } else { - OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout) + OperandRef::from_immediate_or_packed_pair(&mut bx, llval, result.layout) .val - .store(bx, result); + .store(&mut bx, result); } } + + bx } } diff --git a/compiler/rustc_codegen_ssa/src/mir/mod.rs b/compiler/rustc_codegen_ssa/src/mir/mod.rs index 91df67b53d21f..3431e6695a101 100644 --- a/compiler/rustc_codegen_ssa/src/mir/mod.rs +++ b/compiler/rustc_codegen_ssa/src/mir/mod.rs @@ -262,6 +262,9 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( // Remove blocks that haven't been visited, or have no // predecessors. + // FIXME(eddyb) shouldn't need to create a positioned `Bx` just to + // call `delete_basic_block` on it. + let mut bx = fx.build_block(mir::START_BLOCK); for bb in mir.basic_blocks().indices() { // Unreachable block if !visited.contains(bb.index()) { @@ -313,12 +316,11 @@ fn create_funclets<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( // bar(); // } Some(&mir::TerminatorKind::Abort) => { - let mut cs_bx = bx.build_sibling_block(&format!("cs_funclet{:?}", bb)); + let cs_bx = bx.build_sibling_block(&format!("cs_funclet{:?}", bb)); let mut cp_bx = bx.build_sibling_block(&format!("cp_funclet{:?}", bb)); ret_llbb = cs_bx.llbb(); - let cs = cs_bx.catch_switch(None, None, 1); - cs_bx.add_handler(cs, cp_bx.llbb()); + let (_, cs) = cs_bx.catch_switch(None, None, &[cp_bx.llbb()]); // The "null" here is actually a RTTI type descriptor for the // C++ personality function, but `catch (...)` has no type so diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs index 9917c23f12150..5b0a4fa493ee4 100644 --- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs +++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs @@ -533,7 +533,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { }; let instance = ty::Instance::mono(bx.tcx(), def_id); let r = bx.cx().get_fn_addr(instance); - let call = bx.call(r, &[llsize, llalign], None); + let call = bx.call(r, &[llsize, llalign], None, None); let val = bx.pointercast(call, llty_ptr); let operand = OperandRef { val: OperandValue::Immediate(val), layout: box_layout }; diff --git a/compiler/rustc_codegen_ssa/src/traits/abi.rs b/compiler/rustc_codegen_ssa/src/traits/abi.rs index dd8495850bd54..179dea1f6d5bb 100644 --- a/compiler/rustc_codegen_ssa/src/traits/abi.rs +++ b/compiler/rustc_codegen_ssa/src/traits/abi.rs @@ -1,8 +1,7 @@ use super::BackendTypes; -use rustc_middle::ty::Ty; -use rustc_target::abi::call::FnAbi; -pub trait AbiBuilderMethods<'tcx>: BackendTypes { - fn apply_attrs_callsite(&mut self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, callsite: Self::Value); +// FIXME(eddyb) move this into either `BuilderMethods` or some other abstraction +// meant to handle whole-function aspects like params and stack slots (`alloca`s). +pub trait AbiBuilderMethods: BackendTypes { fn get_param(&self, index: usize) -> Self::Value; } diff --git a/compiler/rustc_codegen_ssa/src/traits/builder.rs b/compiler/rustc_codegen_ssa/src/traits/builder.rs index 1bc05f30e5c37..99f353dbe6d60 100644 --- a/compiler/rustc_codegen_ssa/src/traits/builder.rs +++ b/compiler/rustc_codegen_ssa/src/traits/builder.rs @@ -16,6 +16,7 @@ use crate::MemFlags; use rustc_middle::ty::layout::{HasParamEnv, TyAndLayout}; use rustc_middle::ty::Ty; use rustc_span::Span; +use rustc_target::abi::call::FnAbi; use rustc_target::abi::{Abi, Align, Scalar, Size}; use rustc_target::spec::HasTargetSpec; @@ -33,45 +34,78 @@ pub trait BuilderMethods<'a, 'tcx>: + CoverageInfoBuilderMethods<'tcx> + DebugInfoBuilderMethods + ArgAbiMethods<'tcx> - + AbiBuilderMethods<'tcx> + + AbiBuilderMethods + IntrinsicCallMethods<'tcx> + AsmBuilderMethods<'tcx> + StaticBuilderMethods + HasParamEnv<'tcx> + HasTargetSpec { + /// IR builder (like `Self`) that doesn't have a set (insert) position, and + /// cannot be used until positioned (which converted it to `Self`). + // FIXME(eddyb) maybe move this associated type to a different trait, and/or + // provide an `UnpositionedBuilderMethods` trait for operations involving it. + type Unpositioned; + + fn unpositioned(cx: &'a Self::CodegenCx) -> Self::Unpositioned; + fn position_at_end(bx: Self::Unpositioned, llbb: Self::BasicBlock) -> Self; + fn into_unpositioned(self) -> Self::Unpositioned; + fn new_block<'b>(cx: &'a Self::CodegenCx, llfn: Self::Function, name: &'b str) -> Self; - fn with_cx(cx: &'a Self::CodegenCx) -> Self; fn build_sibling_block(&self, name: &str) -> Self; fn cx(&self) -> &Self::CodegenCx; fn llbb(&self) -> Self::BasicBlock; fn set_span(&mut self, span: Span); - fn position_at_end(&mut self, llbb: Self::BasicBlock); - fn ret_void(&mut self); - fn ret(&mut self, v: Self::Value); - fn br(&mut self, dest: Self::BasicBlock); + // Terminator instructions (the final instruction in a block). + // These methods take the IR builder by value and return an unpositioned one + // (in order to make it impossible to accidentally add more instructions). + + fn ret_void(self) -> Self::Unpositioned; + fn ret(self, v: Self::Value) -> Self::Unpositioned; + fn br(self, dest: Self::BasicBlock) -> Self::Unpositioned; fn cond_br( - &mut self, + self, cond: Self::Value, then_llbb: Self::BasicBlock, else_llbb: Self::BasicBlock, - ); + ) -> Self::Unpositioned; fn switch( - &mut self, + self, v: Self::Value, else_llbb: Self::BasicBlock, cases: impl ExactSizeIterator, - ); + ) -> Self::Unpositioned; + fn unreachable(self) -> Self::Unpositioned; + + // EH (exception handling) terminator instructions. + // Just like regular terminators, these methods transform the IR builder type, + // but they can also return values (for various reasons). + // FIXME(eddyb) a lot of these are LLVM-specific, redesign them. + fn invoke( - &mut self, + self, llfn: Self::Value, args: &[Self::Value], then: Self::BasicBlock, catch: Self::BasicBlock, funclet: Option<&Self::Funclet>, - ) -> Self::Value; - fn unreachable(&mut self); + fn_abi_for_attrs: Option<&FnAbi<'tcx, Ty<'tcx>>>, + ) -> (Self::Unpositioned, Self::Value); + fn resume(self, exn: Self::Value) -> (Self::Unpositioned, Self::Value); + fn cleanup_ret( + self, + funclet: &Self::Funclet, + unwind: Option, + ) -> (Self::Unpositioned, Self::Value); + fn catch_switch( + self, + parent: Option, + unwind: Option, + handlers: &[Self::BasicBlock], + ) -> (Self::Unpositioned, Self::Value); + + // Regular (intra-block) instructions. fn add(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; fn fadd(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; @@ -233,21 +267,8 @@ pub trait BuilderMethods<'a, 'tcx>: num_clauses: usize, ) -> Self::Value; fn set_cleanup(&mut self, landing_pad: Self::Value); - fn resume(&mut self, exn: Self::Value) -> Self::Value; fn cleanup_pad(&mut self, parent: Option, args: &[Self::Value]) -> Self::Funclet; - fn cleanup_ret( - &mut self, - funclet: &Self::Funclet, - unwind: Option, - ) -> Self::Value; fn catch_pad(&mut self, parent: Self::Value, args: &[Self::Value]) -> Self::Funclet; - fn catch_switch( - &mut self, - parent: Option, - unwind: Option, - num_handlers: usize, - ) -> Self::Value; - fn add_handler(&mut self, catch_switch: Self::Value, handler: Self::BasicBlock); fn set_personality_fn(&mut self, personality: Self::Value); fn atomic_cmpxchg( @@ -288,6 +309,7 @@ pub trait BuilderMethods<'a, 'tcx>: llfn: Self::Value, args: &[Self::Value], funclet: Option<&Self::Funclet>, + fn_abi_for_attrs: Option<&FnAbi<'tcx, Ty<'tcx>>>, ) -> Self::Value; fn zext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; diff --git a/compiler/rustc_codegen_ssa/src/traits/intrinsic.rs b/compiler/rustc_codegen_ssa/src/traits/intrinsic.rs index 777436ad2ae8f..eb8dda14236ea 100644 --- a/compiler/rustc_codegen_ssa/src/traits/intrinsic.rs +++ b/compiler/rustc_codegen_ssa/src/traits/intrinsic.rs @@ -9,13 +9,13 @@ pub trait IntrinsicCallMethods<'tcx>: BackendTypes { /// and in `library/core/src/intrinsics.rs`; if you need access to any LLVM intrinsics, /// add them to `compiler/rustc_codegen_llvm/src/context.rs`. fn codegen_intrinsic_call( - &mut self, + self, instance: ty::Instance<'tcx>, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OperandRef<'tcx, Self::Value>], llresult: Self::Value, span: Span, - ); + ) -> Self; fn abort(&mut self); fn assume(&mut self, val: Self::Value);