From c324b4e34612e792549b4ac040eafe6750ec20d5 Mon Sep 17 00:00:00 2001 From: bjorn3 Date: Sun, 2 Dec 2018 12:54:54 +0100 Subject: [PATCH 01/28] Don't use c_uint in cg_ssa --- src/librustc_codegen_llvm/context.rs | 4 ++-- src/librustc_codegen_ssa/mir/mod.rs | 13 ++++++------- src/librustc_codegen_ssa/traits/misc.rs | 3 +-- 3 files changed, 9 insertions(+), 11 deletions(-) diff --git a/src/librustc_codegen_llvm/context.rs b/src/librustc_codegen_llvm/context.rs index 23e3a8425d370..63dd5393dc462 100644 --- a/src/librustc_codegen_llvm/context.rs +++ b/src/librustc_codegen_llvm/context.rs @@ -326,8 +326,8 @@ impl MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> { get_fn(self, instance) } - fn get_param(&self, llfn: &'ll Value, index: c_uint) -> &'ll Value { - llvm::get_param(llfn, index) + fn get_param(&self, llfn: &'ll Value, index: usize) -> &'ll Value { + llvm::get_param(llfn, index as c_uint) } fn eh_personality(&self) -> &'ll Value { diff --git a/src/librustc_codegen_ssa/mir/mod.rs b/src/librustc_codegen_ssa/mir/mod.rs index dc77d4673cd2a..2ff5508ccc76f 100644 --- a/src/librustc_codegen_ssa/mir/mod.rs +++ b/src/librustc_codegen_ssa/mir/mod.rs @@ -1,4 +1,3 @@ -use libc::c_uint; use rustc::ty::{self, Ty, TypeFoldable, UpvarSubsts}; use rustc::ty::layout::{TyLayout, HasTyCtxt}; use rustc::mir::{self, Mir}; @@ -540,18 +539,18 @@ fn arg_local_refs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( }); } PassMode::Direct(_) => { - let llarg = bx.get_param(bx.llfn(), llarg_idx as c_uint); + let llarg = bx.get_param(bx.llfn(), llarg_idx); bx.set_value_name(llarg, &name); llarg_idx += 1; return local( OperandRef::from_immediate_or_packed_pair(bx, llarg, arg.layout)); } PassMode::Pair(..) => { - let a = bx.get_param(bx.llfn(), llarg_idx as c_uint); + let a = bx.get_param(bx.llfn(), llarg_idx); bx.set_value_name(a, &(name.clone() + ".0")); llarg_idx += 1; - let b = bx.get_param(bx.llfn(), llarg_idx as c_uint); + let b = bx.get_param(bx.llfn(), llarg_idx); bx.set_value_name(b, &(name + ".1")); llarg_idx += 1; @@ -568,16 +567,16 @@ fn arg_local_refs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( // Don't copy an indirect argument to an alloca, the caller // already put it in a temporary alloca and gave it up. // FIXME: lifetimes - let llarg = bx.get_param(bx.llfn(), llarg_idx as c_uint); + let llarg = bx.get_param(bx.llfn(), llarg_idx); bx.set_value_name(llarg, &name); llarg_idx += 1; PlaceRef::new_sized(llarg, arg.layout, arg.layout.align.abi) } else if arg.is_unsized_indirect() { // As the storage for the indirect argument lives during // the whole function call, we just copy the fat pointer. - let llarg = bx.get_param(bx.llfn(), llarg_idx as c_uint); + let llarg = bx.get_param(bx.llfn(), llarg_idx); llarg_idx += 1; - let llextra = bx.get_param(bx.llfn(), llarg_idx as c_uint); + let llextra = bx.get_param(bx.llfn(), llarg_idx); llarg_idx += 1; let indirect_operand = OperandValue::Pair(llarg, llextra); diff --git a/src/librustc_codegen_ssa/traits/misc.rs b/src/librustc_codegen_ssa/traits/misc.rs index b23155563665d..b3d458f3a3be8 100644 --- a/src/librustc_codegen_ssa/traits/misc.rs +++ b/src/librustc_codegen_ssa/traits/misc.rs @@ -1,5 +1,4 @@ use super::BackendTypes; -use libc::c_uint; use rustc::mir::mono::Stats; use rustc::session::Session; use rustc::ty::{self, Instance, Ty}; @@ -15,7 +14,7 @@ pub trait MiscMethods<'tcx>: BackendTypes { fn check_overflow(&self) -> bool; fn instances(&self) -> &RefCell, Self::Value>>; fn get_fn(&self, instance: Instance<'tcx>) -> Self::Value; - fn get_param(&self, llfn: Self::Value, index: c_uint) -> Self::Value; + fn get_param(&self, llfn: Self::Value, index: usize) -> Self::Value; fn eh_personality(&self) -> Self::Value; fn eh_unwind_resume(&self) -> Self::Value; fn sess(&self) -> &Session; From 86104b492edb94c7ff6cc488191e7d7dd7d1ebf9 Mon Sep 17 00:00:00 2001 From: bjorn3 Date: Sun, 2 Dec 2018 13:57:41 +0100 Subject: [PATCH 02/28] `eval_mir_constant` doesn't need a builder param --- src/librustc_codegen_ssa/mir/block.rs | 2 +- src/librustc_codegen_ssa/mir/constant.rs | 45 +++++++++++------------- src/librustc_codegen_ssa/mir/operand.rs | 2 +- 3 files changed, 23 insertions(+), 26 deletions(-) diff --git a/src/librustc_codegen_ssa/mir/block.rs b/src/librustc_codegen_ssa/mir/block.rs index 627380ee38ff1..c228124449b3d 100644 --- a/src/librustc_codegen_ssa/mir/block.rs +++ b/src/librustc_codegen_ssa/mir/block.rs @@ -640,7 +640,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { span_bug!(span, "shuffle indices must be constant"); } mir::Operand::Constant(ref constant) => { - let c = self.eval_mir_constant(&bx, constant); + let c = self.eval_mir_constant(constant); let (llval, ty) = self.simd_shuffle_indices( &bx, constant.span, diff --git a/src/librustc_codegen_ssa/mir/constant.rs b/src/librustc_codegen_ssa/mir/constant.rs index 6bc69efa4a7d5..7db0ca309f6b8 100644 --- a/src/librustc_codegen_ssa/mir/constant.rs +++ b/src/librustc_codegen_ssa/mir/constant.rs @@ -3,41 +3,38 @@ use rustc_mir::const_eval::const_field; use rustc::mir; use rustc_data_structures::indexed_vec::Idx; use rustc::mir::interpret::GlobalId; -use rustc::ty::{self, Ty}; -use rustc::ty::layout; +use rustc::ty::{self, Ty, TyCtxt}; +use rustc::ty::layout::{self, HasTyCtxt}; use syntax::source_map::Span; use crate::traits::*; use super::FunctionCx; -impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { - fn fully_evaluate( - &mut self, - bx: &Bx, - constant: &'tcx ty::LazyConst<'tcx>, - ) -> Result, ErrorHandled> { - match *constant { - ty::LazyConst::Unevaluated(def_id, ref substs) => { - let tcx = bx.tcx(); - let param_env = ty::ParamEnv::reveal_all(); - let instance = ty::Instance::resolve(tcx, param_env, def_id, substs).unwrap(); - let cid = GlobalId { - instance, - promoted: None, - }; - tcx.const_eval(param_env.and(cid)) - }, - ty::LazyConst::Evaluated(constant) => Ok(constant), - } +fn fully_evaluate<'a, 'tcx: 'a>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + constant: &'tcx ty::LazyConst<'tcx>, +) -> Result, ErrorHandled> { + match *constant { + ty::LazyConst::Unevaluated(def_id, ref substs) => { + let param_env = ty::ParamEnv::reveal_all(); + let instance = ty::Instance::resolve(tcx, param_env, def_id, substs).unwrap(); + let cid = GlobalId { + instance, + promoted: None, + }; + tcx.const_eval(param_env.and(cid)) + }, + ty::LazyConst::Evaluated(constant) => Ok(constant), } +} +impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { pub fn eval_mir_constant( - &mut self, - bx: &Bx, + &self, constant: &mir::Constant<'tcx>, ) -> Result, ErrorHandled> { let c = self.monomorphize(&constant.literal); - self.fully_evaluate(bx, c) + fully_evaluate(self.cx.tcx(), c) } /// process constant containing SIMD shuffle indices diff --git a/src/librustc_codegen_ssa/mir/operand.rs b/src/librustc_codegen_ssa/mir/operand.rs index 0e8cdc83b486e..89244061befac 100644 --- a/src/librustc_codegen_ssa/mir/operand.rs +++ b/src/librustc_codegen_ssa/mir/operand.rs @@ -457,7 +457,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { mir::Operand::Constant(ref constant) => { let ty = self.monomorphize(&constant.ty); - self.eval_mir_constant(bx, constant) + self.eval_mir_constant(constant) .and_then(|c| OperandRef::from_const(bx, c)) .unwrap_or_else(|err| { match err { From cec0cf5b69cc666f1559dabffa43024bcecfb3f2 Mon Sep 17 00:00:00 2001 From: bjorn3 Date: Sun, 2 Dec 2018 14:05:56 +0100 Subject: [PATCH 03/28] Use Builder instead of CodegenCx for OperandRef and LocalRef --- src/librustc_codegen_llvm/builder.rs | 2 +- src/librustc_codegen_ssa/mir/mod.rs | 14 ++++++------ src/librustc_codegen_ssa/mir/operand.rs | 30 ++++++++++++------------- src/librustc_codegen_ssa/mir/rvalue.rs | 7 ++++-- 4 files changed, 28 insertions(+), 25 deletions(-) diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index d4d38a464576d..d0a861171c17c 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -616,7 +616,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { assert_eq!(place.llextra.is_some(), place.layout.is_unsized()); if place.layout.is_zst() { - return OperandRef::new_zst(self.cx(), place.layout); + return OperandRef::new_zst(self, place.layout); } fn scalar_load_metadata<'a, 'll, 'tcx>( diff --git a/src/librustc_codegen_ssa/mir/mod.rs b/src/librustc_codegen_ssa/mir/mod.rs index 2ff5508ccc76f..db970f3c8b2e5 100644 --- a/src/librustc_codegen_ssa/mir/mod.rs +++ b/src/librustc_codegen_ssa/mir/mod.rs @@ -178,16 +178,16 @@ enum LocalRef<'tcx, V> { Operand(Option>), } -impl<'tcx, V: CodegenObject> LocalRef<'tcx, V> { - fn new_operand>( - cx: &Cx, +impl<'a, 'tcx: 'a, V: CodegenObject> LocalRef<'tcx, V> { + fn new_operand>( + bx: &mut Bx, layout: TyLayout<'tcx>, ) -> LocalRef<'tcx, V> { if layout.is_zst() { // Zero-size temporaries aren't always initialized, which // doesn't matter because they don't contain data, but // we need something in the operand. - LocalRef::Operand(Some(OperandRef::new_zst(cx, layout))) + LocalRef::Operand(Some(OperandRef::new_zst(bx, layout))) } else { LocalRef::Operand(None) } @@ -275,7 +275,7 @@ pub fn codegen_mir<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( if !memory_locals.contains(local) && !dbg { debug!("alloc: {:?} ({}) -> operand", local, name); - return LocalRef::new_operand(bx.cx(), layout); + return LocalRef::new_operand(&mut bx, layout); } debug!("alloc: {:?} ({}) -> place", local, name); @@ -320,7 +320,7 @@ pub fn codegen_mir<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( // alloca in advance. Instead we wait until we see the // definition and update the operand there. debug!("alloc: {:?} -> operand", local); - LocalRef::new_operand(bx.cx(), layout) + LocalRef::new_operand(&mut bx, layout) } } }; @@ -529,7 +529,7 @@ fn arg_local_refs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( let local = |op| LocalRef::Operand(Some(op)); match arg.mode { PassMode::Ignore(IgnoreMode::Zst) => { - return local(OperandRef::new_zst(bx.cx(), arg.layout)); + return local(OperandRef::new_zst(bx, arg.layout)); } PassMode::Ignore(IgnoreMode::CVarArgs) => { let backend_type = bx.cx().immediate_backend_type(arg.layout); diff --git a/src/librustc_codegen_ssa/mir/operand.rs b/src/librustc_codegen_ssa/mir/operand.rs index 89244061befac..05a027e5abc85 100644 --- a/src/librustc_codegen_ssa/mir/operand.rs +++ b/src/librustc_codegen_ssa/mir/operand.rs @@ -54,13 +54,13 @@ impl fmt::Debug for OperandRef<'tcx, V> { } impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> { - pub fn new_zst>( - cx: &Cx, + pub fn new_zst>( + bx: &mut Bx, layout: TyLayout<'tcx> ) -> OperandRef<'tcx, V> { assert!(layout.is_zst()); OperandRef { - val: OperandValue::Immediate(cx.const_undef(cx.immediate_backend_type(layout))), + val: OperandValue::Immediate(bx.const_undef(bx.immediate_backend_type(layout))), layout } } @@ -69,10 +69,10 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> { bx: &mut Bx, val: ty::Const<'tcx> ) -> Result { - let layout = bx.cx().layout_of(val.ty); + let layout = bx.layout_of(val.ty); if layout.is_zst() { - return Ok(OperandRef::new_zst(bx.cx(), layout)); + return Ok(OperandRef::new_zst(bx, layout)); } let val = match val.val { @@ -81,10 +81,10 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> { layout::Abi::Scalar(ref x) => x, _ => bug!("from_const: invalid ByVal layout: {:#?}", layout) }; - let llval = bx.cx().scalar_to_backend( + let llval = bx.scalar_to_backend( x, scalar, - bx.cx().immediate_backend_type(layout), + bx.immediate_backend_type(layout), ); OperandValue::Immediate(llval) }, @@ -93,16 +93,16 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> { layout::Abi::ScalarPair(ref a, _) => a, _ => bug!("from_const: invalid ScalarPair layout: {:#?}", layout) }; - let a_llval = bx.cx().scalar_to_backend( + let a_llval = bx.scalar_to_backend( a, a_scalar, - bx.cx().scalar_pair_element_backend_type(layout, 0, true), + bx.scalar_pair_element_backend_type(layout, 0, true), ); - let b_llval = bx.cx().const_usize(b); + let b_llval = bx.const_usize(b); OperandValue::Pair(a_llval, b_llval) }, ConstValue::ByRef(ptr, alloc) => { - return Ok(bx.load_operand(bx.cx().from_const_alloc(layout, alloc, ptr.offset))); + return Ok(bx.load_operand(bx.from_const_alloc(layout, alloc, ptr.offset))); }, }; @@ -121,7 +121,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> { } } - pub fn deref>( + pub fn deref>( self, cx: &Cx ) -> PlaceRef<'tcx, V> { @@ -196,7 +196,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> { let mut val = match (self.val, &self.layout.abi) { // If the field is ZST, it has no data. _ if field.is_zst() => { - return OperandRef::new_zst(bx.cx(), field); + return OperandRef::new_zst(bx, field); } // Newtype of a scalar, scalar pair or vector. @@ -406,7 +406,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // checks in `codegen_consume` and `extract_field`. let elem = o.layout.field(bx.cx(), 0); if elem.is_zst() { - return Some(OperandRef::new_zst(bx.cx(), elem)); + return Some(OperandRef::new_zst(bx, elem)); } } _ => {} @@ -429,7 +429,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // ZSTs don't require any actual memory access. if layout.is_zst() { - return OperandRef::new_zst(bx.cx(), layout); + return OperandRef::new_zst(bx, layout); } if let Some(o) = self.maybe_codegen_consume_direct(bx, place) { diff --git a/src/librustc_codegen_ssa/mir/rvalue.rs b/src/librustc_codegen_ssa/mir/rvalue.rs index 25a7754d118d7..539846bd9857d 100644 --- a/src/librustc_codegen_ssa/mir/rvalue.rs +++ b/src/librustc_codegen_ssa/mir/rvalue.rs @@ -521,8 +521,11 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // According to `rvalue_creates_operand`, only ZST // aggregate rvalues are allowed to be operands. let ty = rvalue.ty(self.mir, self.cx.tcx()); - (bx, OperandRef::new_zst(self.cx, - self.cx.layout_of(self.monomorphize(&ty)))) + let operand = OperandRef::new_zst( + &mut bx, + self.cx.layout_of(self.monomorphize(&ty)), + ); + (bx, operand) } } } From 17ef2495fce5cbc8737eb612da6ad21a8634bb63 Mon Sep 17 00:00:00 2001 From: bjorn3 Date: Sun, 2 Dec 2018 14:13:13 +0100 Subject: [PATCH 04/28] Use implicit deref instead of BuilderMethods::cx() at more places --- src/librustc_codegen_llvm/builder.rs | 1 + src/librustc_codegen_ssa/base.rs | 74 +++++++------- src/librustc_codegen_ssa/mir/block.rs | 9 +- src/librustc_codegen_ssa/mir/operand.rs | 36 +++---- src/librustc_codegen_ssa/mir/place.rs | 94 +++++++++--------- src/librustc_codegen_ssa/mir/rvalue.rs | 124 ++++++++++++------------ 6 files changed, 169 insertions(+), 169 deletions(-) diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index d0a861171c17c..048adb0fb80ff 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -1472,6 +1472,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { impl StaticBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { fn get_static(&self, def_id: DefId) -> &'ll Value { + // Forward to the `get_static` method of `CodegenCx` self.cx().get_static(def_id) } } diff --git a/src/librustc_codegen_ssa/base.rs b/src/librustc_codegen_ssa/base.rs index 39ce15e477296..0fd51c8bef022 100644 --- a/src/librustc_codegen_ssa/base.rs +++ b/src/librustc_codegen_ssa/base.rs @@ -21,7 +21,7 @@ use rustc::middle::lang_items::StartFnLangItem; use rustc::middle::weak_lang_items; use rustc::mir::mono::{Stats, CodegenUnitNameBuilder}; use rustc::ty::{self, Ty, TyCtxt}; -use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, VariantIdx, HasTyCtxt}; +use rustc::ty::layout::{self, Align, TyLayout, VariantIdx, HasTyCtxt}; use rustc::ty::query::Providers; use rustc::middle::cstore::{self, LinkagePreference}; use rustc::util::common::{time, print_time_passes_entry}; @@ -162,16 +162,16 @@ pub fn compare_simd_types<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( /// The `old_info` argument is a bit funny. It is intended for use /// in an upcast, where the new vtable for an object will be derived /// from the old one. -pub fn unsized_info<'tcx, Cx: CodegenMethods<'tcx>>( - cx: &Cx, +pub fn unsized_info<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + bx: &Bx, source: Ty<'tcx>, target: Ty<'tcx>, - old_info: Option, -) -> Cx::Value { - let (source, target) = cx.tcx().struct_lockstep_tails(source, target); + old_info: Option, +) -> Bx::Value { + let (source, target) = bx.tcx().struct_lockstep_tails(source, target); match (&source.sty, &target.sty) { (&ty::Array(_, len), &ty::Slice(_)) => { - cx.const_usize(len.unwrap_usize(cx.tcx())) + bx.const_usize(len.unwrap_usize(bx.tcx())) } (&ty::Dynamic(..), &ty::Dynamic(..)) => { // For now, upcasts are limited to changes in marker @@ -180,10 +180,10 @@ pub fn unsized_info<'tcx, Cx: CodegenMethods<'tcx>>( old_info.expect("unsized_info: missing old info for trait upcast") } (_, &ty::Dynamic(ref data, ..)) => { - let vtable_ptr = cx.layout_of(cx.tcx().mk_mut_ptr(target)) - .field(cx, FAT_PTR_EXTRA); - cx.const_ptrcast(meth::get_vtable(cx, source, data.principal()), - cx.backend_type(vtable_ptr)) + let vtable_ptr = bx.layout_of(bx.tcx().mk_mut_ptr(target)) + .field(bx, FAT_PTR_EXTRA); + bx.const_ptrcast(meth::get_vtable(bx.cx(), source, data.principal()), + bx.backend_type(vtable_ptr)) } _ => bug!("unsized_info: invalid unsizing {:?} -> {:?}", source, @@ -206,24 +206,24 @@ pub fn unsize_thin_ptr<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) | (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }), &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => { - assert!(bx.cx().type_is_sized(a)); - let ptr_ty = bx.cx().type_ptr_to(bx.cx().backend_type(bx.cx().layout_of(b))); - (bx.pointercast(src, ptr_ty), unsized_info(bx.cx(), a, b, None)) + assert!(bx.type_is_sized(a)); + let ptr_ty = bx.type_ptr_to(bx.backend_type(bx.layout_of(b))); + (bx.pointercast(src, ptr_ty), unsized_info(bx, a, b, None)) } (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => { let (a, b) = (src_ty.boxed_ty(), dst_ty.boxed_ty()); - assert!(bx.cx().type_is_sized(a)); - let ptr_ty = bx.cx().type_ptr_to(bx.cx().backend_type(bx.cx().layout_of(b))); - (bx.pointercast(src, ptr_ty), unsized_info(bx.cx(), a, b, None)) + assert!(bx.type_is_sized(a)); + let ptr_ty = bx.type_ptr_to(bx.backend_type(bx.layout_of(b))); + (bx.pointercast(src, ptr_ty), unsized_info(bx, a, b, None)) } (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => { assert_eq!(def_a, def_b); - let src_layout = bx.cx().layout_of(src_ty); - let dst_layout = bx.cx().layout_of(dst_ty); + let src_layout = bx.layout_of(src_ty); + let dst_layout = bx.layout_of(dst_ty); let mut result = None; for i in 0..src_layout.fields.count() { - let src_f = src_layout.field(bx.cx(), i); + let src_f = src_layout.field(bx, i); assert_eq!(src_layout.fields.offset(i).bytes(), 0); assert_eq!(dst_layout.fields.offset(i).bytes(), 0); if src_f.is_zst() { @@ -231,15 +231,15 @@ pub fn unsize_thin_ptr<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( } assert_eq!(src_layout.size, src_f.size); - let dst_f = dst_layout.field(bx.cx(), i); + let dst_f = dst_layout.field(bx, i); assert_ne!(src_f.ty, dst_f.ty); assert_eq!(result, None); result = Some(unsize_thin_ptr(bx, src, src_f.ty, dst_f.ty)); } let (lldata, llextra) = result.unwrap(); // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. - (bx.bitcast(lldata, bx.cx().scalar_pair_element_backend_type(dst_layout, 0, true)), - bx.bitcast(llextra, bx.cx().scalar_pair_element_backend_type(dst_layout, 1, true))) + (bx.bitcast(lldata, bx.scalar_pair_element_backend_type(dst_layout, 0, true)), + bx.bitcast(llextra, bx.scalar_pair_element_backend_type(dst_layout, 1, true))) } _ => bug!("unsize_thin_ptr: called on bad types"), } @@ -261,8 +261,8 @@ pub fn coerce_unsized_into<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( // i.e., &'a fmt::Debug+Send => &'a fmt::Debug // So we need to pointercast the base to ensure // the types match up. - let thin_ptr = dst.layout.field(bx.cx(), FAT_PTR_ADDR); - (bx.pointercast(base, bx.cx().backend_type(thin_ptr)), info) + let thin_ptr = dst.layout.field(bx, FAT_PTR_ADDR); + (bx.pointercast(base, bx.backend_type(thin_ptr)), info) } OperandValue::Immediate(base) => { unsize_thin_ptr(bx, base, src_ty, dst_ty) @@ -323,16 +323,16 @@ fn cast_shift_rhs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( ) -> Bx::Value { // Shifts may have any size int on the rhs if op.is_shift() { - let mut rhs_llty = bx.cx().val_ty(rhs); - let mut lhs_llty = bx.cx().val_ty(lhs); - if bx.cx().type_kind(rhs_llty) == TypeKind::Vector { - rhs_llty = bx.cx().element_type(rhs_llty) + let mut rhs_llty = bx.val_ty(rhs); + let mut lhs_llty = bx.val_ty(lhs); + if bx.type_kind(rhs_llty) == TypeKind::Vector { + rhs_llty = bx.element_type(rhs_llty) } - if bx.cx().type_kind(lhs_llty) == TypeKind::Vector { - lhs_llty = bx.cx().element_type(lhs_llty) + if bx.type_kind(lhs_llty) == TypeKind::Vector { + lhs_llty = bx.element_type(lhs_llty) } - let rhs_sz = bx.cx().int_width(rhs_llty); - let lhs_sz = bx.cx().int_width(lhs_llty); + let rhs_sz = bx.int_width(rhs_llty); + let lhs_sz = bx.int_width(lhs_llty); if lhs_sz < rhs_sz { bx.trunc(rhs, lhs_llty) } else if lhs_sz > rhs_sz { @@ -360,8 +360,8 @@ pub fn from_immediate<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( bx: &mut Bx, val: Bx::Value ) -> Bx::Value { - if bx.cx().val_ty(val) == bx.cx().type_i1() { - bx.zext(val, bx.cx().type_i8()) + if bx.val_ty(val) == bx.type_i1() { + bx.zext(val, bx.type_i8()) } else { val } @@ -384,7 +384,7 @@ pub fn to_immediate_scalar<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( scalar: &layout::Scalar, ) -> Bx::Value { if scalar.is_bool() { - return bx.trunc(val, bx.cx().type_i1()); + return bx.trunc(val, bx.type_i1()); } val } @@ -403,7 +403,7 @@ pub fn memcpy_ty<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( return; } - bx.memcpy(dst, dst_align, src, src_align, bx.cx().const_usize(size), flags); + bx.memcpy(dst, dst_align, src, src_align, bx.const_usize(size), flags); } pub fn codegen_instance<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( diff --git a/src/librustc_codegen_ssa/mir/block.rs b/src/librustc_codegen_ssa/mir/block.rs index c228124449b3d..6a374b23024be 100644 --- a/src/librustc_codegen_ssa/mir/block.rs +++ b/src/librustc_codegen_ssa/mir/block.rs @@ -1,6 +1,6 @@ use rustc::middle::lang_items; use rustc::ty::{self, Ty, TypeFoldable}; -use rustc::ty::layout::{self, LayoutOf, HasTyCtxt}; +use rustc::ty::layout::{self, HasTyCtxt}; use rustc::mir; use rustc::mir::interpret::EvalErrorKind; use rustc_target::abi::call::{ArgType, FnType, PassMode, IgnoreMode}; @@ -1012,13 +1012,12 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { &mut self, bx: &mut Bx ) -> PlaceRef<'tcx, Bx::Value> { - let cx = bx.cx(); if let Some(slot) = self.personality_slot { slot } else { - let layout = cx.layout_of(cx.tcx().intern_tup(&[ - cx.tcx().mk_mut_ptr(cx.tcx().types.u8), - cx.tcx().types.i32 + let layout = bx.layout_of(bx.tcx().intern_tup(&[ + bx.tcx().mk_mut_ptr(bx.tcx().types.u8), + bx.tcx().types.i32 ])); let slot = PlaceRef::alloca(bx, layout, "personalityslot"); self.personality_slot = Some(slot); diff --git a/src/librustc_codegen_ssa/mir/operand.rs b/src/librustc_codegen_ssa/mir/operand.rs index 05a027e5abc85..9ec60d822fc1e 100644 --- a/src/librustc_codegen_ssa/mir/operand.rs +++ b/src/librustc_codegen_ssa/mir/operand.rs @@ -1,7 +1,7 @@ use rustc::mir::interpret::{ConstValue, ErrorHandled}; use rustc::mir; use rustc::ty; -use rustc::ty::layout::{self, Align, LayoutOf, TyLayout}; +use rustc::ty::layout::{self, Align, TyLayout}; use crate::base; use crate::MemFlags; @@ -148,11 +148,11 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> { bx: &mut Bx ) -> V { if let OperandValue::Pair(a, b) = self.val { - let llty = bx.cx().backend_type(self.layout); + let llty = bx.backend_type(self.layout); debug!("Operand::immediate_or_packed_pair: packing {:?} into {:?}", self, llty); // Reconstruct the immediate aggregate. - let mut llpair = bx.cx().const_undef(llty); + let mut llpair = bx.const_undef(llty); let imm_a = base::from_immediate(bx, a); let imm_b = base::from_immediate(bx, b); llpair = bx.insert_value(llpair, imm_a, 0); @@ -190,7 +190,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> { bx: &mut Bx, i: usize ) -> Self { - let field = self.layout.field(bx.cx(), i); + let field = self.layout.field(bx, i); let offset = self.layout.fields.offset(i); let mut val = match (self.val, &self.layout.abi) { @@ -209,12 +209,12 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> { // Extract a scalar component from a pair. (OperandValue::Pair(a_llval, b_llval), &layout::Abi::ScalarPair(ref a, ref b)) => { if offset.bytes() == 0 { - assert_eq!(field.size, a.value.size(bx.cx())); + assert_eq!(field.size, a.value.size(bx)); OperandValue::Immediate(a_llval) } else { - assert_eq!(offset, a.value.size(bx.cx()) - .align_to(b.value.align(bx.cx()).abi)); - assert_eq!(field.size, b.value.size(bx.cx())); + assert_eq!(offset, a.value.size(bx) + .align_to(b.value.align(bx).abi)); + assert_eq!(field.size, b.value.size(bx)); OperandValue::Immediate(b_llval) } } @@ -222,7 +222,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> { // `#[repr(simd)]` types are also immediate. (OperandValue::Immediate(llval), &layout::Abi::Vector { .. }) => { OperandValue::Immediate( - bx.extract_element(llval, bx.cx().const_usize(i as u64))) + bx.extract_element(llval, bx.const_usize(i as u64))) } _ => bug!("OperandRef::extract_field({:?}): not applicable", self) @@ -231,7 +231,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> { // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. // Bools in union fields needs to be truncated. let to_immediate_or_cast = |bx: &mut Bx, val, ty| { - if ty == bx.cx().type_i1() { + if ty == bx.type_i1() { bx.trunc(val, ty) } else { bx.bitcast(val, ty) @@ -240,12 +240,12 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> { match val { OperandValue::Immediate(ref mut llval) => { - *llval = to_immediate_or_cast(bx, *llval, bx.cx().immediate_backend_type(field)); + *llval = to_immediate_or_cast(bx, *llval, bx.immediate_backend_type(field)); } OperandValue::Pair(ref mut a, ref mut b) => { - *a = to_immediate_or_cast(bx, *a, bx.cx() + *a = to_immediate_or_cast(bx, *a, bx .scalar_pair_element_backend_type(field, 0, true)); - *b = to_immediate_or_cast(bx, *b, bx.cx() + *b = to_immediate_or_cast(bx, *b, bx .scalar_pair_element_backend_type(field, 1, true)); } OperandValue::Ref(..) => bug!() @@ -359,7 +359,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandValue { // Allocate an appropriate region on the stack, and copy the value into it let (llsize, _) = glue::size_and_align_of_dst(bx, unsized_ty, Some(llextra)); - let lldst = bx.array_alloca(bx.cx().type_i8(), llsize, "unsized_tmp", max_align); + let lldst = bx.array_alloca(bx.type_i8(), llsize, "unsized_tmp", max_align); bx.memcpy(lldst, max_align, llptr, min_align, llsize, flags); // Store the allocated region and the extra to the indirect place. @@ -404,7 +404,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // ZSTs don't require any actual memory access. // FIXME(eddyb) deduplicate this with the identical // checks in `codegen_consume` and `extract_field`. - let elem = o.layout.field(bx.cx(), 0); + let elem = o.layout.field(bx, 0); if elem.is_zst() { return Some(OperandRef::new_zst(bx, elem)); } @@ -425,7 +425,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { debug!("codegen_consume(place={:?})", place); let ty = self.monomorphized_place_ty(place); - let layout = bx.cx().layout_of(ty); + let layout = bx.layout_of(ty); // ZSTs don't require any actual memory access. if layout.is_zst() { @@ -471,9 +471,9 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // the above error (or silence it under some conditions) will not cause UB bx.abort(); // We've errored, so we don't have to produce working code. - let layout = bx.cx().layout_of(ty); + let layout = bx.layout_of(ty); bx.load_operand(PlaceRef::new_sized( - bx.cx().const_undef(bx.cx().type_ptr_to(bx.cx().backend_type(layout))), + bx.const_undef(bx.type_ptr_to(bx.backend_type(layout))), layout, layout.align.abi, )) diff --git a/src/librustc_codegen_ssa/mir/place.rs b/src/librustc_codegen_ssa/mir/place.rs index 1edcbfead2c94..c960af6c0a133 100644 --- a/src/librustc_codegen_ssa/mir/place.rs +++ b/src/librustc_codegen_ssa/mir/place.rs @@ -63,7 +63,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { ) -> Self { debug!("alloca({:?}: {:?})", name, layout); assert!(!layout.is_unsized(), "tried to statically allocate unsized place"); - let tmp = bx.alloca(bx.cx().backend_type(layout), name, layout.align.abi); + let tmp = bx.alloca(bx.backend_type(layout), name, layout.align.abi); Self::new_sized(tmp, layout, layout.align.abi) } @@ -75,8 +75,8 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { ) -> Self { debug!("alloca_unsized_indirect({:?}: {:?})", name, layout); assert!(layout.is_unsized(), "tried to allocate indirect place for sized values"); - let ptr_ty = bx.cx().tcx().mk_mut_ptr(layout.ty); - let ptr_layout = bx.cx().layout_of(ptr_ty); + let ptr_ty = bx.tcx().mk_mut_ptr(layout.ty); + let ptr_layout = bx.layout_of(ptr_ty); Self::alloca(bx, ptr_layout, name) } @@ -104,7 +104,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { self, bx: &mut Bx, ix: usize, ) -> Self { - let field = self.layout.field(bx.cx(), ix); + let field = self.layout.field(bx, ix); let offset = self.layout.fields.offset(ix); let effective_field_align = self.align.restrict_for_offset(offset); @@ -114,15 +114,15 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { self.llval } else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi { // Offsets have to match either first or second field. - assert_eq!(offset, a.value.size(bx.cx()).align_to(b.value.align(bx.cx()).abi)); + assert_eq!(offset, a.value.size(bx).align_to(b.value.align(bx).abi)); bx.struct_gep(self.llval, 1) } else { - bx.struct_gep(self.llval, bx.cx().backend_field_index(self.layout, ix)) + bx.struct_gep(self.llval, bx.backend_field_index(self.layout, ix)) }; PlaceRef { // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. - llval: bx.pointercast(llval, bx.cx().type_ptr_to(bx.cx().backend_type(field))), - llextra: if bx.cx().type_has_metadata(field.ty) { + llval: bx.pointercast(llval, bx.type_ptr_to(bx.backend_type(field))), + llextra: if bx.type_has_metadata(field.ty) { self.llextra } else { None @@ -172,7 +172,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { let meta = self.llextra; - let unaligned_offset = bx.cx().const_usize(offset.bytes()); + let unaligned_offset = bx.const_usize(offset.bytes()); // Get the alignment of the field let (_, unsized_align) = glue::size_and_align_of_dst(bx, field.ty, meta); @@ -183,7 +183,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { // (unaligned offset + (align - 1)) & -align // Calculate offset - let align_sub_1 = bx.sub(unsized_align, bx.cx().const_usize(1u64)); + let align_sub_1 = bx.sub(unsized_align, bx.const_usize(1u64)); let and_lhs = bx.add(unaligned_offset, align_sub_1); let and_rhs = bx.neg(unsized_align); let offset = bx.and(and_lhs, and_rhs); @@ -191,15 +191,15 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { debug!("struct_field_ptr: DST field offset: {:?}", offset); // Cast and adjust pointer - let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p()); + let byte_ptr = bx.pointercast(self.llval, bx.type_i8p()); let byte_ptr = bx.gep(byte_ptr, &[offset]); // Finally, cast back to the type expected - let ll_fty = bx.cx().backend_type(field); + let ll_fty = bx.backend_type(field); debug!("struct_field_ptr: Field type is {:?}", ll_fty); PlaceRef { - llval: bx.pointercast(byte_ptr, bx.cx().type_ptr_to(ll_fty)), + llval: bx.pointercast(byte_ptr, bx.type_ptr_to(ll_fty)), llextra: self.llextra, layout: field, align: effective_field_align, @@ -212,16 +212,16 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { bx: &mut Bx, cast_to: Ty<'tcx> ) -> V { - let cast_to = bx.cx().immediate_backend_type(bx.cx().layout_of(cast_to)); + let cast_to = bx.immediate_backend_type(bx.layout_of(cast_to)); if self.layout.abi.is_uninhabited() { - return bx.cx().const_undef(cast_to); + return bx.const_undef(cast_to); } match self.layout.variants { layout::Variants::Single { index } => { let discr_val = self.layout.ty.ty_adt_def().map_or( index.as_u32() as u128, - |def| def.discriminant_for_variant(bx.cx().tcx(), index).val); - return bx.cx().const_uint_big(cast_to, discr_val); + |def| def.discriminant_for_variant(bx.tcx(), index).val); + return bx.const_uint_big(cast_to, discr_val); } layout::Variants::Tagged { .. } | layout::Variants::NicheFilling { .. } => {}, @@ -248,30 +248,30 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { niche_start, .. } => { - let niche_llty = bx.cx().immediate_backend_type(discr.layout); + let niche_llty = bx.immediate_backend_type(discr.layout); if niche_variants.start() == niche_variants.end() { // FIXME(eddyb) Check the actual primitive type here. let niche_llval = if niche_start == 0 { // HACK(eddyb) Using `c_null` as it works on all types. - bx.cx().const_null(niche_llty) + bx.const_null(niche_llty) } else { - bx.cx().const_uint_big(niche_llty, niche_start) + bx.const_uint_big(niche_llty, niche_start) }; let select_arg = bx.icmp(IntPredicate::IntEQ, lldiscr, niche_llval); bx.select(select_arg, - bx.cx().const_uint(cast_to, niche_variants.start().as_u32() as u64), - bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64)) + bx.const_uint(cast_to, niche_variants.start().as_u32() as u64), + bx.const_uint(cast_to, dataful_variant.as_u32() as u64)) } else { // Rebase from niche values to discriminant values. let delta = niche_start.wrapping_sub(niche_variants.start().as_u32() as u128); - let lldiscr = bx.sub(lldiscr, bx.cx().const_uint_big(niche_llty, delta)); + let lldiscr = bx.sub(lldiscr, bx.const_uint_big(niche_llty, delta)); let lldiscr_max = - bx.cx().const_uint(niche_llty, niche_variants.end().as_u32() as u64); + bx.const_uint(niche_llty, niche_variants.end().as_u32() as u64); let select_arg = bx.icmp(IntPredicate::IntULE, lldiscr, lldiscr_max); let cast = bx.intcast(lldiscr, cast_to, false); bx.select(select_arg, cast, - bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64)) + bx.const_uint(cast_to, dataful_variant.as_u32() as u64)) } } } @@ -284,7 +284,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { bx: &mut Bx, variant_index: VariantIdx ) { - if self.layout.for_variant(bx.cx(), variant_index).abi.is_uninhabited() { + if self.layout.for_variant(bx, variant_index).abi.is_uninhabited() { return; } match self.layout.variants { @@ -297,7 +297,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { .discriminant_for_variant(bx.tcx(), variant_index) .val; bx.store( - bx.cx().const_uint_big(bx.cx().backend_type(ptr.layout), to), + bx.const_uint_big(bx.backend_type(ptr.layout), to), ptr.llval, ptr.align); } @@ -308,26 +308,26 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { .. } => { if variant_index != dataful_variant { - if bx.cx().sess().target.target.arch == "arm" || - bx.cx().sess().target.target.arch == "aarch64" { + if bx.sess().target.target.arch == "arm" || + bx.sess().target.target.arch == "aarch64" { // Issue #34427: As workaround for LLVM bug on ARM, // use memset of 0 before assigning niche value. - let fill_byte = bx.cx().const_u8(0); - let size = bx.cx().const_usize(self.layout.size.bytes()); + let fill_byte = bx.const_u8(0); + let size = bx.const_usize(self.layout.size.bytes()); bx.memset(self.llval, fill_byte, size, self.align, MemFlags::empty()); } let niche = self.project_field(bx, 0); - let niche_llty = bx.cx().immediate_backend_type(niche.layout); + let niche_llty = bx.immediate_backend_type(niche.layout); let niche_value = variant_index.as_u32() - niche_variants.start().as_u32(); let niche_value = (niche_value as u128) .wrapping_add(niche_start); // FIXME(eddyb) Check the actual primitive type here. let niche_llval = if niche_value == 0 { // HACK(eddyb) Using `c_null` as it works on all types. - bx.cx().const_null(niche_llty) + bx.const_null(niche_llty) } else { - bx.cx().const_uint_big(niche_llty, niche_value) + bx.const_uint_big(niche_llty, niche_value) }; OperandValue::Immediate(niche_llval).store(bx, niche); } @@ -350,7 +350,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { }; PlaceRef { - llval: bx.inbounds_gep(self.llval, &[bx.cx().const_usize(0), llindex]), + llval: bx.inbounds_gep(self.llval, &[bx.const_usize(0), llindex]), llextra: None, layout, align: self.align.restrict_for_offset(offset), @@ -363,11 +363,11 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { variant_index: VariantIdx ) -> Self { let mut downcast = *self; - downcast.layout = self.layout.for_variant(bx.cx(), variant_index); + downcast.layout = self.layout.for_variant(bx, variant_index); // Cast to the appropriate variant struct type. - let variant_ty = bx.cx().backend_type(downcast.layout); - downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty)); + let variant_ty = bx.backend_type(downcast.layout); + downcast.llval = bx.pointercast(downcast.llval, bx.type_ptr_to(variant_ty)); downcast } @@ -418,7 +418,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { match bx.tcx().const_eval(param_env.and(cid)) { Ok(val) => match val.val { mir::interpret::ConstValue::ByRef(ptr, alloc) => { - bx.cx().from_const_alloc(layout, alloc, ptr.offset) + bx.from_const_alloc(layout, alloc, ptr.offset) } _ => bug!("promoteds should have an allocation: {:?}", val), }, @@ -428,8 +428,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // With floats that won't always be true // so we generate an abort bx.abort(); - let llval = bx.cx().const_undef( - bx.cx().type_ptr_to(bx.cx().backend_type(layout)) + let llval = bx.const_undef( + bx.type_ptr_to(bx.backend_type(layout)) ); PlaceRef::new_sized(llval, layout, layout.align.abi) } @@ -465,33 +465,33 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { mir::ProjectionElem::ConstantIndex { offset, from_end: false, min_length: _ } => { - let lloffset = bx.cx().const_usize(offset as u64); + let lloffset = bx.const_usize(offset as u64); cg_base.project_index(bx, lloffset) } mir::ProjectionElem::ConstantIndex { offset, from_end: true, min_length: _ } => { - let lloffset = bx.cx().const_usize(offset as u64); + let lloffset = bx.const_usize(offset as u64); let lllen = cg_base.len(bx.cx()); let llindex = bx.sub(lllen, lloffset); cg_base.project_index(bx, llindex) } mir::ProjectionElem::Subslice { from, to } => { let mut subslice = cg_base.project_index(bx, - bx.cx().const_usize(from as u64)); + bx.const_usize(from as u64)); let projected_ty = PlaceTy::Ty { ty: cg_base.layout.ty } .projection_ty(tcx, &projection.elem).to_ty(tcx); - subslice.layout = bx.cx().layout_of(self.monomorphize(&projected_ty)); + subslice.layout = bx.layout_of(self.monomorphize(&projected_ty)); if subslice.layout.is_unsized() { subslice.llextra = Some(bx.sub(cg_base.llextra.unwrap(), - bx.cx().const_usize((from as u64) + (to as u64)))); + bx.const_usize((from as u64) + (to as u64)))); } // Cast the place pointer type to the new // array or slice type (*[%_; new_len]). subslice.llval = bx.pointercast(subslice.llval, - bx.cx().type_ptr_to(bx.cx().backend_type(subslice.layout))); + bx.type_ptr_to(bx.backend_type(subslice.layout))); subslice } diff --git a/src/librustc_codegen_ssa/mir/rvalue.rs b/src/librustc_codegen_ssa/mir/rvalue.rs index 539846bd9857d..c856af2fc1161 100644 --- a/src/librustc_codegen_ssa/mir/rvalue.rs +++ b/src/librustc_codegen_ssa/mir/rvalue.rs @@ -40,7 +40,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { mir::Rvalue::Cast(mir::CastKind::Unsize, ref source, _) => { // The destination necessarily contains a fat pointer, so if // it's a scalar pair, it's a fat pointer or newtype thereof. - if bx.cx().is_backend_scalar_pair(dest.layout) { + if bx.is_backend_scalar_pair(dest.layout) { // into-coerce of a thin pointer to a fat pointer - just // use the operand path. let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue); @@ -87,28 +87,28 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { if dest.layout.is_zst() { return bx; } - let zero = bx.cx().const_usize(0); + let zero = bx.const_usize(0); let start = dest.project_index(&mut bx, zero).llval; if let OperandValue::Immediate(v) = cg_elem.val { - let size = bx.cx().const_usize(dest.layout.size.bytes()); + let size = bx.const_usize(dest.layout.size.bytes()); // Use llvm.memset.p0i8.* to initialize all zero arrays - if bx.cx().is_const_integral(v) && bx.cx().const_to_uint(v) == 0 { - let fill = bx.cx().const_u8(0); + if bx.is_const_integral(v) && bx.const_to_uint(v) == 0 { + let fill = bx.const_u8(0); bx.memset(start, fill, size, dest.align, MemFlags::empty()); return bx; } // Use llvm.memset.p0i8.* to initialize byte arrays let v = base::from_immediate(&mut bx, v); - if bx.cx().val_ty(v) == bx.cx().type_i8() { + if bx.val_ty(v) == bx.type_i8() { bx.memset(start, v, size, dest.align, MemFlags::empty()); return bx; } } - let count = bx.cx().const_usize(count); + let count = bx.const_usize(count); let end = dest.project_index(&mut bx, count).llval; let mut header_bx = bx.build_sibling_block("repeat_loop_header"); @@ -116,7 +116,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let next_bx = bx.build_sibling_block("repeat_loop_next"); bx.br(header_bx.llbb()); - let current = header_bx.phi(bx.cx().val_ty(start), &[start], &[bx.llbb()]); + let current = header_bx.phi(bx.val_ty(start), &[start], &[bx.llbb()]); let keep_going = header_bx.icmp(IntPredicate::IntNE, current, end); header_bx.cond_br(keep_going, body_bx.llbb(), next_bx.llbb()); @@ -125,7 +125,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { cg_elem.val.store(&mut body_bx, PlaceRef::new_sized(current, cg_elem.layout, align)); - let next = body_bx.inbounds_gep(current, &[bx.cx().const_usize(1)]); + let next = body_bx.inbounds_gep(current, &[bx.const_usize(1)]); body_bx.br(header_bx.llbb()); header_bx.add_incoming_to_phi(current, next, body_bx.llbb()); @@ -196,13 +196,13 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => { let operand = self.codegen_operand(&mut bx, source); debug!("cast operand is {:?}", operand); - let cast = bx.cx().layout_of(self.monomorphize(&mir_cast_ty)); + let cast = bx.layout_of(self.monomorphize(&mir_cast_ty)); let val = match *kind { mir::CastKind::ReifyFnPointer => { match operand.layout.ty.sty { ty::FnDef(def_id, substs) => { - if bx.cx().tcx().has_attr(def_id, "rustc_args_required_const") { + if bx.tcx().has_attr(def_id, "rustc_args_required_const") { bug!("reifying a fn ptr that requires \ const arguments"); } @@ -218,8 +218,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { match operand.layout.ty.sty { ty::Closure(def_id, substs) => { let instance = monomorphize::resolve_closure( - bx.cx().tcx(), def_id, substs, ty::ClosureKind::FnOnce); - OperandValue::Immediate(bx.cx().get_fn(instance)) + bx.tcx(), def_id, substs, ty::ClosureKind::FnOnce); + OperandValue::Immediate(bx.get_fn(instance)) } _ => { bug!("{} cannot be cast to a fn ptr", operand.layout.ty) @@ -231,7 +231,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { operand.val } mir::CastKind::Unsize => { - assert!(bx.cx().is_backend_scalar_pair(cast)); + assert!(bx.is_backend_scalar_pair(cast)); match operand.val { OperandValue::Pair(lldata, llextra) => { // unsize from a fat pointer - this is a @@ -242,7 +242,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // HACK(eddyb) have to bitcast pointers // until LLVM removes pointee types. let lldata = bx.pointercast(lldata, - bx.cx().scalar_pair_element_backend_type(cast, 0, true)); + bx.scalar_pair_element_backend_type(cast, 0, true)); OperandValue::Pair(lldata, llextra) } OperandValue::Immediate(lldata) => { @@ -257,16 +257,16 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } } } - mir::CastKind::Misc if bx.cx().is_backend_scalar_pair(operand.layout) => { + mir::CastKind::Misc if bx.is_backend_scalar_pair(operand.layout) => { if let OperandValue::Pair(data_ptr, meta) = operand.val { - if bx.cx().is_backend_scalar_pair(cast) { + if bx.is_backend_scalar_pair(cast) { let data_cast = bx.pointercast(data_ptr, - bx.cx().scalar_pair_element_backend_type(cast, 0, true)); + bx.scalar_pair_element_backend_type(cast, 0, true)); OperandValue::Pair(data_cast, meta) } else { // cast to thin-ptr // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and // pointer-cast of that pointer to desired pointer type. - let llcast_ty = bx.cx().immediate_backend_type(cast); + let llcast_ty = bx.immediate_backend_type(cast); let llval = bx.pointercast(data_ptr, llcast_ty); OperandValue::Immediate(llval) } @@ -275,10 +275,10 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } } mir::CastKind::Misc => { - assert!(bx.cx().is_backend_immediate(cast)); - let ll_t_out = bx.cx().immediate_backend_type(cast); + assert!(bx.is_backend_immediate(cast)); + let ll_t_out = bx.immediate_backend_type(cast); if operand.layout.abi.is_uninhabited() { - let val = OperandValue::Immediate(bx.cx().const_undef(ll_t_out)); + let val = OperandValue::Immediate(bx.const_undef(ll_t_out)); return (bx, OperandRef { val, layout: cast, @@ -287,14 +287,14 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let r_t_in = CastTy::from_ty(operand.layout.ty) .expect("bad input type for cast"); let r_t_out = CastTy::from_ty(cast.ty).expect("bad output type for cast"); - let ll_t_in = bx.cx().immediate_backend_type(operand.layout); + let ll_t_in = bx.immediate_backend_type(operand.layout); match operand.layout.variants { layout::Variants::Single { index } => { if let Some(def) = operand.layout.ty.ty_adt_def() { let discr_val = def - .discriminant_for_variant(bx.cx().tcx(), index) + .discriminant_for_variant(bx.tcx(), index) .val; - let discr = bx.cx().const_uint_big(ll_t_out, discr_val); + let discr = bx.const_uint_big(ll_t_out, discr_val); return (bx, OperandRef { val: OperandValue::Immediate(discr), layout: cast, @@ -322,7 +322,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // have bound checks, and this is the most // convenient place to put the `assume`. let ll_t_in_const = - bx.cx().const_uint_big(ll_t_in, *scalar.valid_range.end()); + bx.const_uint_big(ll_t_in, *scalar.valid_range.end()); let cmp = bx.icmp( IntPredicate::IntULE, llval, @@ -338,8 +338,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { bx.intcast(llval, ll_t_out, signed) } (CastTy::Float, CastTy::Float) => { - let srcsz = bx.cx().float_width(ll_t_in); - let dstsz = bx.cx().float_width(ll_t_out); + let srcsz = bx.float_width(ll_t_in); + let dstsz = bx.float_width(ll_t_out); if dstsz > srcsz { bx.fpext(llval, ll_t_out) } else if srcsz > dstsz { @@ -356,7 +356,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { (CastTy::FnPtr, CastTy::Int(_)) => bx.ptrtoint(llval, ll_t_out), (CastTy::Int(_), CastTy::Ptr(_)) => { - let usize_llval = bx.intcast(llval, bx.cx().type_isize(), signed); + let usize_llval = bx.intcast(llval, bx.type_isize(), signed); bx.inttoptr(usize_llval, ll_t_out) } (CastTy::Int(_), CastTy::Float) => @@ -383,7 +383,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // Note: places are indirect, so storing the `llval` into the // destination effectively creates a reference. - let val = if !bx.cx().type_has_metadata(ty) { + let val = if !bx.type_has_metadata(ty) { OperandValue::Immediate(cg_place.llval) } else { OperandValue::Pair(cg_place.llval, cg_place.llextra.unwrap()) @@ -401,7 +401,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let size = self.evaluate_array_len(&mut bx, place); let operand = OperandRef { val: OperandValue::Immediate(size), - layout: bx.cx().layout_of(bx.tcx().types.usize), + layout: bx.layout_of(bx.tcx().types.usize), }; (bx, operand) } @@ -427,7 +427,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { }; let operand = OperandRef { val: OperandValue::Immediate(llresult), - layout: bx.cx().layout_of( + layout: bx.layout_of( op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)), }; (bx, operand) @@ -442,7 +442,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let operand_ty = bx.tcx().intern_tup(&[val_ty, bx.tcx().types.bool]); let operand = OperandRef { val: result, - layout: bx.cx().layout_of(operand_ty) + layout: bx.layout_of(operand_ty) }; (bx, operand) @@ -477,8 +477,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => { - assert!(bx.cx().type_is_sized(ty)); - let val = bx.cx().const_usize(bx.cx().layout_of(ty).size.bytes()); + assert!(bx.type_is_sized(ty)); + let val = bx.const_usize(bx.layout_of(ty).size.bytes()); let tcx = self.cx.tcx(); (bx, OperandRef { val: OperandValue::Immediate(val), @@ -488,21 +488,21 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { mir::Rvalue::NullaryOp(mir::NullOp::Box, content_ty) => { let content_ty = self.monomorphize(&content_ty); - let content_layout = bx.cx().layout_of(content_ty); - let llsize = bx.cx().const_usize(content_layout.size.bytes()); - let llalign = bx.cx().const_usize(content_layout.align.abi.bytes()); - let box_layout = bx.cx().layout_of(bx.tcx().mk_box(content_ty)); - let llty_ptr = bx.cx().backend_type(box_layout); + let content_layout = bx.layout_of(content_ty); + let llsize = bx.const_usize(content_layout.size.bytes()); + let llalign = bx.const_usize(content_layout.align.abi.bytes()); + let box_layout = bx.layout_of(bx.tcx().mk_box(content_ty)); + let llty_ptr = bx.backend_type(box_layout); // Allocate space: let def_id = match bx.tcx().lang_items().require(ExchangeMallocFnLangItem) { Ok(id) => id, Err(s) => { - bx.cx().sess().fatal(&format!("allocation of `{}` {}", box_layout.ty, s)); + bx.sess().fatal(&format!("allocation of `{}` {}", box_layout.ty, s)); } }; let instance = ty::Instance::mono(bx.tcx(), def_id); - let r = bx.cx().get_fn(instance); + let r = bx.get_fn(instance); let call = bx.call(r, &[llsize, llalign], None); let val = bx.pointercast(call, llty_ptr); @@ -540,8 +540,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { if let mir::Place::Local(index) = *place { if let LocalRef::Operand(Some(op)) = self.locals[index] { if let ty::Array(_, n) = op.layout.ty.sty { - let n = n.unwrap_usize(bx.cx().tcx()); - return bx.cx().const_usize(n); + let n = n.unwrap_usize(bx.tcx()); + return bx.const_usize(n); } } } @@ -599,7 +599,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { mir::BinOp::Shr => common::build_unchecked_rshift(bx, input_ty, lhs, rhs), mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt | mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => if is_unit { - bx.cx().const_bool(match op { + bx.const_bool(match op { mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt => false, mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => true, _ => unreachable!() @@ -673,9 +673,9 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // with #[rustc_inherit_overflow_checks] and inlined from // another crate (mostly core::num generic/#[inline] fns), // while the current crate doesn't use overflow checks. - if !bx.cx().check_overflow() { + if !bx.check_overflow() { let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty); - return OperandValue::Pair(val, bx.cx().const_bool(false)); + return OperandValue::Pair(val, bx.const_bool(false)); } let (val, of) = match op { @@ -690,12 +690,12 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { bx.checked_binop(oop, input_ty, lhs, rhs) } mir::BinOp::Shl | mir::BinOp::Shr => { - let lhs_llty = bx.cx().val_ty(lhs); - let rhs_llty = bx.cx().val_ty(rhs); + let lhs_llty = bx.val_ty(lhs); + let rhs_llty = bx.val_ty(rhs); let invert_mask = common::shift_mask_val(bx, lhs_llty, rhs_llty, true); let outer_bits = bx.and(rhs, invert_mask); - let of = bx.icmp(IntPredicate::IntNE, outer_bits, bx.cx().const_null(rhs_llty)); + let of = bx.icmp(IntPredicate::IntNE, outer_bits, bx.const_null(rhs_llty)); let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty); (val, of) @@ -745,8 +745,8 @@ fn cast_int_to_float<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( // It's only u128 -> f32 that can cause overflows (i.e., should yield infinity). // LLVM's uitofp produces undef in those cases, so we manually check for that case. let is_u128_to_f32 = !signed && - bx.cx().int_width(int_ty) == 128 && - bx.cx().float_width(float_ty) == 32; + bx.int_width(int_ty) == 128 && + bx.float_width(float_ty) == 32; if is_u128_to_f32 { // All inputs greater or equal to (f32::MAX + 0.5 ULP) are rounded to infinity, // and for everything else LLVM's uitofp works just fine. @@ -754,9 +754,9 @@ fn cast_int_to_float<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( use rustc_apfloat::Float; const MAX_F32_PLUS_HALF_ULP: u128 = ((1 << (Single::PRECISION + 1)) - 1) << (Single::MAX_EXP - Single::PRECISION as i16); - let max = bx.cx().const_uint_big(int_ty, MAX_F32_PLUS_HALF_ULP); + let max = bx.const_uint_big(int_ty, MAX_F32_PLUS_HALF_ULP); let overflow = bx.icmp(IntPredicate::IntUGE, x, max); - let infinity_bits = bx.cx().const_u32(ieee::Single::INFINITY.to_bits() as u32); + let infinity_bits = bx.const_u32(ieee::Single::INFINITY.to_bits() as u32); let infinity = bx.bitcast(infinity_bits, float_ty); let fp = bx.uitofp(x, float_ty); bx.select(overflow, infinity, fp) @@ -782,12 +782,12 @@ fn cast_float_to_int<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( bx.fptoui(x, int_ty) }; - if !bx.cx().sess().opts.debugging_opts.saturating_float_casts { + if !bx.sess().opts.debugging_opts.saturating_float_casts { return fptosui_result; } - let int_width = bx.cx().int_width(int_ty); - let float_width = bx.cx().float_width(float_ty); + let int_width = bx.int_width(int_ty); + let float_width = bx.float_width(float_ty); // LLVM's fpto[su]i returns undef when the input x is infinite, NaN, or does not fit into the // destination integer type after rounding towards zero. This `undef` value can cause UB in // safe code (see issue #10184), so we implement a saturating conversion on top of it: @@ -842,8 +842,8 @@ fn cast_float_to_int<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( let mut float_bits_to_llval = |bits| { let bits_llval = match float_width { - 32 => bx.cx().const_u32(bits as u32), - 64 => bx.cx().const_u64(bits as u64), + 32 => bx.const_u32(bits as u32), + 64 => bx.const_u64(bits as u64), n => bug!("unsupported float width {}", n), }; bx.bitcast(bits_llval, float_ty) @@ -898,8 +898,8 @@ fn cast_float_to_int<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( // performed is ultimately up to the backend, but at least x86 does perform them. let less_or_nan = bx.fcmp(RealPredicate::RealULT, x, f_min); let greater = bx.fcmp(RealPredicate::RealOGT, x, f_max); - let int_max = bx.cx().const_uint_big(int_ty, int_max(signed, int_width)); - let int_min = bx.cx().const_uint_big(int_ty, int_min(signed, int_width) as u128); + let int_max = bx.const_uint_big(int_ty, int_max(signed, int_width)); + let int_min = bx.const_uint_big(int_ty, int_min(signed, int_width) as u128); let s0 = bx.select(less_or_nan, int_min, fptosui_result); let s1 = bx.select(greater, int_max, s0); @@ -908,7 +908,7 @@ fn cast_float_to_int<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( // Therefore we only need to execute this step for signed integer types. if signed { // LLVM has no isNaN predicate, so we use (x == x) instead - let zero = bx.cx().const_uint(int_ty, 0); + let zero = bx.const_uint(int_ty, 0); let cmp = bx.fcmp(RealPredicate::RealOEQ, x, x); bx.select(cmp, s1, zero) } else { From 7d722697d4157b7d691b3c949531c338b2f82c3f Mon Sep 17 00:00:00 2001 From: bjorn3 Date: Sun, 2 Dec 2018 14:35:11 +0100 Subject: [PATCH 05/28] Misc --- src/librustc_codegen_llvm/builder.rs | 2 +- src/librustc_codegen_ssa/mir/place.rs | 2 +- src/librustc_codegen_ssa/traits/statics.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index 048adb0fb80ff..ebe3a81acc601 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -1471,7 +1471,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } impl StaticBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { - fn get_static(&self, def_id: DefId) -> &'ll Value { + fn get_static(&mut self, def_id: DefId) -> &'ll Value { // Forward to the `get_static` method of `CodegenCx` self.cx().get_static(def_id) } diff --git a/src/librustc_codegen_ssa/mir/place.rs b/src/librustc_codegen_ssa/mir/place.rs index c960af6c0a133..fccf3f4802ed2 100644 --- a/src/librustc_codegen_ssa/mir/place.rs +++ b/src/librustc_codegen_ssa/mir/place.rs @@ -80,7 +80,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { Self::alloca(bx, ptr_layout, name) } - pub fn len>( + pub fn len>( &self, cx: &Cx ) -> V { diff --git a/src/librustc_codegen_ssa/traits/statics.rs b/src/librustc_codegen_ssa/traits/statics.rs index c4e7fe703c219..55c1253f10673 100644 --- a/src/librustc_codegen_ssa/traits/statics.rs +++ b/src/librustc_codegen_ssa/traits/statics.rs @@ -8,5 +8,5 @@ pub trait StaticMethods: BackendTypes { } pub trait StaticBuilderMethods<'tcx>: BackendTypes { - fn get_static(&self, def_id: DefId) -> Self::Value; + fn get_static(&mut self, def_id: DefId) -> Self::Value; } From fbd969b8e4c2be6e68031d528f434cca4434debb Mon Sep 17 00:00:00 2001 From: bjorn3 Date: Sun, 2 Dec 2018 15:58:40 +0100 Subject: [PATCH 06/28] Remove const_{fat_ptr,array,vector,bytes} from cg_ssa --- src/librustc_codegen_llvm/common.rs | 54 ++++++++++++----------- src/librustc_codegen_ssa/traits/consts.rs | 4 -- 2 files changed, 28 insertions(+), 30 deletions(-) diff --git a/src/librustc_codegen_llvm/common.rs b/src/librustc_codegen_llvm/common.rs index 4bd036ea3b17a..aab0d8ac60271 100644 --- a/src/librustc_codegen_llvm/common.rs +++ b/src/librustc_codegen_llvm/common.rs @@ -93,6 +93,34 @@ impl BackendTypes for CodegenCx<'ll, 'tcx> { type DIScope = &'ll llvm::debuginfo::DIScope; } +impl CodegenCx<'ll, 'tcx> { + pub fn const_fat_ptr( + &self, + ptr: &'ll Value, + meta: &'ll Value + ) -> &'ll Value { + assert_eq!(abi::FAT_PTR_ADDR, 0); + assert_eq!(abi::FAT_PTR_EXTRA, 1); + self.const_struct(&[ptr, meta], false) + } + + pub fn const_array(&self, ty: &'ll Type, elts: &[&'ll Value]) -> &'ll Value { + unsafe { + return llvm::LLVMConstArray(ty, elts.as_ptr(), elts.len() as c_uint); + } + } + + pub fn const_vector(&self, elts: &[&'ll Value]) -> &'ll Value { + unsafe { + return llvm::LLVMConstVector(elts.as_ptr(), elts.len() as c_uint); + } + } + + pub fn const_bytes(&self, bytes: &[u8]) -> &'ll Value { + bytes_in_context(self.llcx, bytes) + } +} + impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { fn const_null(&self, t: &'ll Type) -> &'ll Value { unsafe { @@ -189,16 +217,6 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { self.const_fat_ptr(cs, self.const_usize(len as u64)) } - fn const_fat_ptr( - &self, - ptr: &'ll Value, - meta: &'ll Value - ) -> &'ll Value { - assert_eq!(abi::FAT_PTR_ADDR, 0); - assert_eq!(abi::FAT_PTR_EXTRA, 1); - self.const_struct(&[ptr, meta], false) - } - fn const_struct( &self, elts: &[&'ll Value], @@ -207,22 +225,6 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { struct_in_context(self.llcx, elts, packed) } - fn const_array(&self, ty: &'ll Type, elts: &[&'ll Value]) -> &'ll Value { - unsafe { - return llvm::LLVMConstArray(ty, elts.as_ptr(), elts.len() as c_uint); - } - } - - fn const_vector(&self, elts: &[&'ll Value]) -> &'ll Value { - unsafe { - return llvm::LLVMConstVector(elts.as_ptr(), elts.len() as c_uint); - } - } - - fn const_bytes(&self, bytes: &[u8]) -> &'ll Value { - bytes_in_context(self.llcx, bytes) - } - fn const_get_elt(&self, v: &'ll Value, idx: u64) -> &'ll Value { unsafe { assert_eq!(idx as c_uint as u64, idx); diff --git a/src/librustc_codegen_ssa/traits/consts.rs b/src/librustc_codegen_ssa/traits/consts.rs index 319f4b4e5e4b5..61db94d53d881 100644 --- a/src/librustc_codegen_ssa/traits/consts.rs +++ b/src/librustc_codegen_ssa/traits/consts.rs @@ -24,11 +24,7 @@ pub trait ConstMethods<'tcx>: BackendTypes { fn const_cstr(&self, s: LocalInternedString, null_terminated: bool) -> Self::Value; fn const_str_slice(&self, s: LocalInternedString) -> Self::Value; - fn const_fat_ptr(&self, ptr: Self::Value, meta: Self::Value) -> Self::Value; fn const_struct(&self, elts: &[Self::Value], packed: bool) -> Self::Value; - fn const_array(&self, ty: Self::Type, elts: &[Self::Value]) -> Self::Value; - fn const_vector(&self, elts: &[Self::Value]) -> Self::Value; - fn const_bytes(&self, bytes: &[u8]) -> Self::Value; fn const_get_elt(&self, v: Self::Value, idx: u64) -> Self::Value; fn const_get_real(&self, v: Self::Value) -> Option<(f64, bool)>; From 48c3baa47d80be0a26f7befc8fd9a8d2bf825e73 Mon Sep 17 00:00:00 2001 From: bjorn3 Date: Sun, 2 Dec 2018 16:53:39 +0100 Subject: [PATCH 07/28] Remove const_{cstr,str_slice,get_elt,get_real} and is_const_real methods from cg_ssa This introduces the static_panic_msg trait method to StaticBuilderMethods. --- src/librustc_codegen_llvm/builder.rs | 31 +++++ src/librustc_codegen_llvm/common.rs | 132 ++++++++++----------- src/librustc_codegen_ssa/mir/block.rs | 55 ++++----- src/librustc_codegen_ssa/mir/place.rs | 3 +- src/librustc_codegen_ssa/traits/consts.rs | 9 -- src/librustc_codegen_ssa/traits/statics.rs | 9 ++ 6 files changed, 129 insertions(+), 110 deletions(-) diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index ebe3a81acc601..3b3b437007aeb 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -5,6 +5,7 @@ use crate::context::CodegenCx; use crate::type_::Type; use crate::type_of::LayoutLlvmExt; use crate::value::Value; +use syntax::symbol::LocalInternedString; use rustc_codegen_ssa::common::{IntPredicate, TypeKind, RealPredicate}; use rustc_codegen_ssa::MemFlags; use libc::{c_uint, c_char}; @@ -1475,6 +1476,36 @@ impl StaticBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { // Forward to the `get_static` method of `CodegenCx` self.cx().get_static(def_id) } + + fn static_panic_msg( + &mut self, + msg: Option, + filename: LocalInternedString, + line: Self::Value, + col: Self::Value, + kind: &str, + ) -> Self::Value { + let align = self.tcx.data_layout.aggregate_align.abi + .max(self.tcx.data_layout.i32_align.abi) + .max(self.tcx.data_layout.pointer_align.abi); + + let filename = self.const_str_slice(filename); + + let with_msg_components; + let without_msg_components; + + let components = if let Some(msg) = msg { + let msg = self.const_str_slice(msg); + with_msg_components = [msg, filename, line, col]; + &with_msg_components as &[_] + } else { + without_msg_components = [filename, line, col]; + &without_msg_components as &[_] + }; + + let struct_ = self.const_struct(&components, false); + self.static_addr_of(struct_, align, Some(kind)) + } } impl Builder<'a, 'll, 'tcx> { diff --git a/src/librustc_codegen_llvm/common.rs b/src/librustc_codegen_llvm/common.rs index aab0d8ac60271..9554e54e4142a 100644 --- a/src/librustc_codegen_llvm/common.rs +++ b/src/librustc_codegen_llvm/common.rs @@ -119,6 +119,72 @@ impl CodegenCx<'ll, 'tcx> { pub fn const_bytes(&self, bytes: &[u8]) -> &'ll Value { bytes_in_context(self.llcx, bytes) } + + fn const_cstr( + &self, + s: LocalInternedString, + null_terminated: bool, + ) -> &'ll Value { + unsafe { + if let Some(&llval) = self.const_cstr_cache.borrow().get(&s) { + return llval; + } + + let sc = llvm::LLVMConstStringInContext(self.llcx, + s.as_ptr() as *const c_char, + s.len() as c_uint, + !null_terminated as Bool); + let sym = self.generate_local_symbol_name("str"); + let g = self.define_global(&sym[..], self.val_ty(sc)).unwrap_or_else(||{ + bug!("symbol `{}` is already defined", sym); + }); + llvm::LLVMSetInitializer(g, sc); + llvm::LLVMSetGlobalConstant(g, True); + llvm::LLVMRustSetLinkage(g, llvm::Linkage::InternalLinkage); + + self.const_cstr_cache.borrow_mut().insert(s, g); + g + } + } + + pub fn const_str_slice(&self, s: LocalInternedString) -> &'ll Value { + let len = s.len(); + let cs = consts::ptrcast(self.const_cstr(s, false), + self.type_ptr_to(self.layout_of(self.tcx.mk_str()).llvm_type(self))); + self.const_fat_ptr(cs, self.const_usize(len as u64)) + } + + pub fn const_get_elt(&self, v: &'ll Value, idx: u64) -> &'ll Value { + unsafe { + assert_eq!(idx as c_uint as u64, idx); + let us = &[idx as c_uint]; + let r = llvm::LLVMConstExtractValue(v, us.as_ptr(), us.len() as c_uint); + + debug!("const_get_elt(v={:?}, idx={}, r={:?})", + v, idx, r); + + r + } + } + + pub fn const_get_real(&self, v: &'ll Value) -> Option<(f64, bool)> { + unsafe { + if self.is_const_real(v) { + let mut loses_info: llvm::Bool = ::std::mem::uninitialized(); + let r = llvm::LLVMConstRealGetDouble(v, &mut loses_info); + let loses_info = if loses_info == 1 { true } else { false }; + Some((r, loses_info)) + } else { + None + } + } + } + + fn is_const_real(&self, v: &'ll Value) -> bool { + unsafe { + llvm::LLVMIsAConstantFP(v).is_some() + } + } } impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { @@ -183,40 +249,6 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { self.const_uint(self.type_i8(), i as u64) } - fn const_cstr( - &self, - s: LocalInternedString, - null_terminated: bool, - ) -> &'ll Value { - unsafe { - if let Some(&llval) = self.const_cstr_cache.borrow().get(&s) { - return llval; - } - - let sc = llvm::LLVMConstStringInContext(self.llcx, - s.as_ptr() as *const c_char, - s.len() as c_uint, - !null_terminated as Bool); - let sym = self.generate_local_symbol_name("str"); - let g = self.define_global(&sym[..], self.val_ty(sc)).unwrap_or_else(||{ - bug!("symbol `{}` is already defined", sym); - }); - llvm::LLVMSetInitializer(g, sc); - llvm::LLVMSetGlobalConstant(g, True); - llvm::LLVMRustSetLinkage(g, llvm::Linkage::InternalLinkage); - - self.const_cstr_cache.borrow_mut().insert(s, g); - g - } - } - - fn const_str_slice(&self, s: LocalInternedString) -> &'ll Value { - let len = s.len(); - let cs = consts::ptrcast(self.const_cstr(s, false), - self.type_ptr_to(self.layout_of(self.tcx.mk_str()).llvm_type(self))); - self.const_fat_ptr(cs, self.const_usize(len as u64)) - } - fn const_struct( &self, elts: &[&'ll Value], @@ -225,32 +257,6 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { struct_in_context(self.llcx, elts, packed) } - fn const_get_elt(&self, v: &'ll Value, idx: u64) -> &'ll Value { - unsafe { - assert_eq!(idx as c_uint as u64, idx); - let us = &[idx as c_uint]; - let r = llvm::LLVMConstExtractValue(v, us.as_ptr(), us.len() as c_uint); - - debug!("const_get_elt(v={:?}, idx={}, r={:?})", - v, idx, r); - - r - } - } - - fn const_get_real(&self, v: &'ll Value) -> Option<(f64, bool)> { - unsafe { - if self.is_const_real(v) { - let mut loses_info: llvm::Bool = ::std::mem::uninitialized(); - let r = llvm::LLVMConstRealGetDouble(v, &mut loses_info); - let loses_info = if loses_info == 1 { true } else { false }; - Some((r, loses_info)) - } else { - None - } - } - } - fn const_to_uint(&self, v: &'ll Value) -> u64 { unsafe { llvm::LLVMConstIntGetZExtValue(v) @@ -263,12 +269,6 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { } } - fn is_const_real(&self, v: &'ll Value) -> bool { - unsafe { - llvm::LLVMIsAConstantFP(v).is_some() - } - } - fn const_to_opt_u128(&self, v: &'ll Value, sign_ext: bool) -> Option { unsafe { if self.is_const_integral(v) { diff --git a/src/librustc_codegen_ssa/mir/block.rs b/src/librustc_codegen_ssa/mir/block.rs index 6a374b23024be..f9e3d1ab50346 100644 --- a/src/librustc_codegen_ssa/mir/block.rs +++ b/src/librustc_codegen_ssa/mir/block.rs @@ -1,6 +1,6 @@ use rustc::middle::lang_items; use rustc::ty::{self, Ty, TypeFoldable}; -use rustc::ty::layout::{self, HasTyCtxt}; +use rustc::ty::layout::{self, HasTyCtxt, LayoutOf}; use rustc::mir; use rustc::mir::interpret::EvalErrorKind; use rustc_target::abi::call::{ArgType, FnType, PassMode, IgnoreMode}; @@ -394,12 +394,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // Get the location information. let loc = bx.sess().source_map().lookup_char_pos(span.lo()); let filename = Symbol::intern(&loc.file.name.to_string()).as_str(); - let filename = bx.const_str_slice(filename); let line = bx.const_u32(loc.line as u32); let col = bx.const_u32(loc.col.to_usize() as u32 + 1); - let align = self.cx.tcx().data_layout.aggregate_align.abi - .max(self.cx.tcx().data_layout.i32_align.abi) - .max(self.cx.tcx().data_layout.pointer_align.abi); // Put together the arguments to the panic entry point. let (lang_item, args) = match *msg { @@ -407,30 +403,28 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let len = self.codegen_operand(&mut bx, len).immediate(); let index = self.codegen_operand(&mut bx, index).immediate(); - let file_line_col = bx.const_struct(&[filename, line, col], false); - let file_line_col = bx.static_addr_of( - file_line_col, - align, - Some("panic_bounds_check_loc") + let file_line_col = bx.static_panic_msg( + None, + filename, + line, + col, + "panic_bounds_check_loc", ); (lang_items::PanicBoundsCheckFnLangItem, - vec![file_line_col, index, len]) + vec![file_line_col, index, len]) } _ => { let str = msg.description(); let msg_str = Symbol::intern(str).as_str(); - let msg_str = bx.const_str_slice(msg_str); - let msg_file_line_col = bx.const_struct( - &[msg_str, filename, line, col], - false - ); - let msg_file_line_col = bx.static_addr_of( - msg_file_line_col, - align, - Some("panic_loc") + let msg_file_line_col = bx.static_panic_msg( + Some(msg_str), + filename, + line, + col, + "panic_loc", ); (lang_items::PanicFnLangItem, - vec![msg_file_line_col]) + vec![msg_file_line_col]) } }; @@ -534,27 +528,20 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { if layout.abi.is_uninhabited() { let loc = bx.sess().source_map().lookup_char_pos(span.lo()); let filename = Symbol::intern(&loc.file.name.to_string()).as_str(); - let filename = bx.const_str_slice(filename); let line = bx.const_u32(loc.line as u32); let col = bx.const_u32(loc.col.to_usize() as u32 + 1); - let align = self.cx.tcx().data_layout.aggregate_align.abi - .max(self.cx.tcx().data_layout.i32_align.abi) - .max(self.cx.tcx().data_layout.pointer_align.abi); let str = format!( "Attempted to instantiate uninhabited type {}", ty ); let msg_str = Symbol::intern(&str).as_str(); - let msg_str = bx.const_str_slice(msg_str); - let msg_file_line_col = bx.const_struct( - &[msg_str, filename, line, col], - false, - ); - let msg_file_line_col = bx.static_addr_of( - msg_file_line_col, - align, - Some("panic_loc"), + let msg_file_line_col = bx.static_panic_msg( + Some(msg_str), + filename, + line, + col, + "panic_loc", ); // Obtain the panic entry point. diff --git a/src/librustc_codegen_ssa/mir/place.rs b/src/librustc_codegen_ssa/mir/place.rs index fccf3f4802ed2..56e9b36e83233 100644 --- a/src/librustc_codegen_ssa/mir/place.rs +++ b/src/librustc_codegen_ssa/mir/place.rs @@ -439,7 +439,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // NB: The layout of a static may be unsized as is the case when working // with a static that is an extern_type. let layout = cx.layout_of(self.monomorphize(&ty)); - PlaceRef::new_thin_place(bx, bx.get_static(def_id), layout, layout.align.abi) + let static_ = bx.get_static(def_id); + PlaceRef::new_thin_place(bx, static_, layout, layout.align.abi) }, mir::Place::Projection(box mir::Projection { ref base, diff --git a/src/librustc_codegen_ssa/traits/consts.rs b/src/librustc_codegen_ssa/traits/consts.rs index 61db94d53d881..32412f303c155 100644 --- a/src/librustc_codegen_ssa/traits/consts.rs +++ b/src/librustc_codegen_ssa/traits/consts.rs @@ -3,7 +3,6 @@ use crate::mir::place::PlaceRef; use rustc::mir::interpret::Allocation; use rustc::mir::interpret::Scalar; use rustc::ty::layout; -use syntax::symbol::LocalInternedString; pub trait ConstMethods<'tcx>: BackendTypes { // Constant constructors @@ -19,20 +18,12 @@ pub trait ConstMethods<'tcx>: BackendTypes { fn const_usize(&self, i: u64) -> Self::Value; fn const_u8(&self, i: u8) -> Self::Value; - // This is a 'c-like' raw string, which differs from - // our boxed-and-length-annotated strings. - fn const_cstr(&self, s: LocalInternedString, null_terminated: bool) -> Self::Value; - - fn const_str_slice(&self, s: LocalInternedString) -> Self::Value; fn const_struct(&self, elts: &[Self::Value], packed: bool) -> Self::Value; - fn const_get_elt(&self, v: Self::Value, idx: u64) -> Self::Value; - fn const_get_real(&self, v: Self::Value) -> Option<(f64, bool)>; fn const_to_uint(&self, v: Self::Value) -> u64; fn const_to_opt_u128(&self, v: Self::Value, sign_ext: bool) -> Option; fn is_const_integral(&self, v: Self::Value) -> bool; - fn is_const_real(&self, v: Self::Value) -> bool; fn scalar_to_backend( &self, diff --git a/src/librustc_codegen_ssa/traits/statics.rs b/src/librustc_codegen_ssa/traits/statics.rs index 55c1253f10673..d8992c159337d 100644 --- a/src/librustc_codegen_ssa/traits/statics.rs +++ b/src/librustc_codegen_ssa/traits/statics.rs @@ -1,4 +1,5 @@ use super::BackendTypes; +use syntax_pos::symbol::LocalInternedString; use rustc::hir::def_id::DefId; use rustc::ty::layout::Align; @@ -9,4 +10,12 @@ pub trait StaticMethods: BackendTypes { pub trait StaticBuilderMethods<'tcx>: BackendTypes { fn get_static(&mut self, def_id: DefId) -> Self::Value; + fn static_panic_msg( + &mut self, + msg: Option, + filename: LocalInternedString, + line: Self::Value, + col: Self::Value, + kind: &str, + ) -> Self::Value; } From 86f0603d8f518180e830e9c534e3018d9254a259 Mon Sep 17 00:00:00 2001 From: bjorn3 Date: Sun, 2 Dec 2018 18:04:39 +0100 Subject: [PATCH 08/28] Remove param_substs from FunctionCx --- src/librustc_codegen_ssa/mir/mod.rs | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/src/librustc_codegen_ssa/mir/mod.rs b/src/librustc_codegen_ssa/mir/mod.rs index db970f3c8b2e5..7a2fccfdc555a 100644 --- a/src/librustc_codegen_ssa/mir/mod.rs +++ b/src/librustc_codegen_ssa/mir/mod.rs @@ -1,7 +1,6 @@ use rustc::ty::{self, Ty, TypeFoldable, UpvarSubsts}; use rustc::ty::layout::{TyLayout, HasTyCtxt}; use rustc::mir::{self, Mir}; -use rustc::ty::subst::SubstsRef; use rustc::session::config::DebugInfo; use rustc_mir::monomorphize::Instance; use rustc_target::abi::call::{FnType, PassMode, IgnoreMode}; @@ -83,9 +82,6 @@ pub struct FunctionCx<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> { /// Debug information for MIR scopes. scopes: IndexVec>, - /// If this function is being monomorphized, this contains the type substitutions used. - param_substs: SubstsRef<'tcx>, - /// If this function is a C-variadic function, this contains the `PlaceRef` of the /// "spoofed" `VaList`. va_list_ref: Option>, @@ -96,7 +92,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { where T: TypeFoldable<'tcx> { self.cx.tcx().subst_and_normalize_erasing_regions( - self.param_substs, + self.instance.substs, ty::ParamEnv::reveal_all(), value, ) @@ -203,6 +199,8 @@ pub fn codegen_mir<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( instance: Instance<'tcx>, sig: ty::FnSig<'tcx>, ) { + assert!(!instance.substs.needs_infer()); + let fn_ty = cx.new_fn_type(sig, &[]); debug!("fn_ty: {:?}", fn_ty); let debug_context = @@ -245,10 +243,6 @@ pub fn codegen_mir<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( scopes, locals: IndexVec::new(), debug_context, - param_substs: { - assert!(!instance.substs.needs_infer()); - instance.substs - }, va_list_ref: None, }; From 84c5b52ca8b3cc50dfee57c94269035793c335f8 Mon Sep 17 00:00:00 2001 From: bjorn3 Date: Sun, 2 Dec 2018 18:09:26 +0100 Subject: [PATCH 09/28] Remove internal mutability from source_locations_enabled --- src/librustc_codegen_llvm/debuginfo/mod.rs | 6 +++--- src/librustc_codegen_llvm/debuginfo/source_loc.rs | 2 +- src/librustc_codegen_ssa/debuginfo.rs | 9 ++++----- src/librustc_codegen_ssa/mir/mod.rs | 2 +- 4 files changed, 9 insertions(+), 10 deletions(-) diff --git a/src/librustc_codegen_llvm/debuginfo/mod.rs b/src/librustc_codegen_llvm/debuginfo/mod.rs index c0869bb889afa..c262167d92092 100644 --- a/src/librustc_codegen_llvm/debuginfo/mod.rs +++ b/src/librustc_codegen_llvm/debuginfo/mod.rs @@ -32,7 +32,7 @@ use rustc_codegen_ssa::debuginfo::{FunctionDebugContext, MirDebugScope, Variable VariableKind, FunctionDebugContextData}; use libc::c_uint; -use std::cell::{Cell, RefCell}; +use std::cell::RefCell; use std::ffi::CString; use syntax_pos::{self, Span, Pos}; @@ -158,7 +158,7 @@ impl DebugInfoBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { variable_kind: VariableKind, span: Span, ) { - assert!(!dbg_context.get_ref(span).source_locations_enabled.get()); + assert!(!dbg_context.get_ref(span).source_locations_enabled); let cx = self.cx(); let file = span_start(cx, span).file; @@ -327,7 +327,7 @@ impl DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> { // Initialize fn debug context (including scope map and namespace map) let fn_debug_context = FunctionDebugContextData { fn_metadata, - source_locations_enabled: Cell::new(false), + source_locations_enabled: false, defining_crate: def_id.krate, }; diff --git a/src/librustc_codegen_llvm/debuginfo/source_loc.rs b/src/librustc_codegen_llvm/debuginfo/source_loc.rs index f7620e11c233d..dec93a65dbaf4 100644 --- a/src/librustc_codegen_llvm/debuginfo/source_loc.rs +++ b/src/librustc_codegen_llvm/debuginfo/source_loc.rs @@ -30,7 +30,7 @@ pub fn set_source_location( FunctionDebugContext::RegularContext(ref data) => data }; - let dbg_loc = if function_debug_context.source_locations_enabled.get() { + let dbg_loc = if function_debug_context.source_locations_enabled { debug!("set_source_location: {}", bx.sess().source_map().span_to_string(span)); let loc = span_start(bx.cx(), span); InternalDebugLocation::new(scope.unwrap(), loc.line, loc.col.to_usize()) diff --git a/src/librustc_codegen_ssa/debuginfo.rs b/src/librustc_codegen_ssa/debuginfo.rs index c4531ff90ae7c..aa7cdbed99446 100644 --- a/src/librustc_codegen_ssa/debuginfo.rs +++ b/src/librustc_codegen_ssa/debuginfo.rs @@ -1,6 +1,5 @@ use syntax_pos::{BytePos, Span}; use rustc::hir::def_id::CrateNum; -use std::cell::Cell; pub enum FunctionDebugContext { RegularContext(FunctionDebugContextData), @@ -36,10 +35,10 @@ impl FunctionDebugContext { /// they are disabled when beginning to codegen a new function. This functions /// switches source location emitting on and must therefore be called before the /// first real statement/expression of the function is codegened. -pub fn start_emitting_source_locations(dbg_context: &FunctionDebugContext) { +pub fn start_emitting_source_locations(dbg_context: &mut FunctionDebugContext) { match *dbg_context { - FunctionDebugContext::RegularContext(ref data) => { - data.source_locations_enabled.set(true) + FunctionDebugContext::RegularContext(ref mut data) => { + data.source_locations_enabled = true; }, _ => { /* safe to ignore */ } } @@ -47,7 +46,7 @@ pub fn start_emitting_source_locations(dbg_context: &FunctionDebugContext) pub struct FunctionDebugContextData { pub fn_metadata: D, - pub source_locations_enabled: Cell, + pub source_locations_enabled: bool, pub defining_crate: CrateNum, } diff --git a/src/librustc_codegen_ssa/mir/mod.rs b/src/librustc_codegen_ssa/mir/mod.rs index 7a2fccfdc555a..0fd20bebfe917 100644 --- a/src/librustc_codegen_ssa/mir/mod.rs +++ b/src/librustc_codegen_ssa/mir/mod.rs @@ -334,7 +334,7 @@ pub fn codegen_mir<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( // Up until here, IR instructions for this function have explicitly not been annotated with // source code location, so we don't step into call setup code. From here on, source location // emitting should be enabled. - debuginfo::start_emitting_source_locations(&fx.debug_context); + debuginfo::start_emitting_source_locations(&mut fx.debug_context); let rpo = traversal::reverse_postorder(&mir); let mut visited = BitSet::new_empty(mir.basic_blocks().len()); From 18e59b766e7d881685973a775e77fdca5989ba71 Mon Sep 17 00:00:00 2001 From: bjorn3 Date: Sun, 2 Dec 2018 18:25:42 +0100 Subject: [PATCH 10/28] [WIP] Make some debug info methods take &mut FunctionDebugContext declare_local still takes &FunctionDebugContext, because of borrowck errors --- src/librustc_codegen_llvm/debuginfo/mod.rs | 4 ++-- src/librustc_codegen_ssa/mir/mod.rs | 14 +++++--------- src/librustc_codegen_ssa/traits/debuginfo.rs | 4 ++-- 3 files changed, 9 insertions(+), 13 deletions(-) diff --git a/src/librustc_codegen_llvm/debuginfo/mod.rs b/src/librustc_codegen_llvm/debuginfo/mod.rs index c262167d92092..066ac8f019e00 100644 --- a/src/librustc_codegen_llvm/debuginfo/mod.rs +++ b/src/librustc_codegen_llvm/debuginfo/mod.rs @@ -216,7 +216,7 @@ impl DebugInfoBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { fn set_source_location( &mut self, - debug_context: &FunctionDebugContext<&'ll DISubprogram>, + debug_context: &mut FunctionDebugContext<&'ll DISubprogram>, scope: Option<&'ll DIScope>, span: Span, ) { @@ -519,7 +519,7 @@ impl DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> { fn create_mir_scopes( &self, mir: &mir::Mir<'_>, - debug_context: &FunctionDebugContext<&'ll DISubprogram>, + debug_context: &mut FunctionDebugContext<&'ll DISubprogram>, ) -> IndexVec> { create_scope_map::create_mir_scopes(self, mir, debug_context) } diff --git a/src/librustc_codegen_ssa/mir/mod.rs b/src/librustc_codegen_ssa/mir/mod.rs index 0fd20bebfe917..9bf2d6e6e3288 100644 --- a/src/librustc_codegen_ssa/mir/mod.rs +++ b/src/librustc_codegen_ssa/mir/mod.rs @@ -104,7 +104,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { source_info: mir::SourceInfo ) { let (scope, span) = self.debug_loc(source_info); - bx.set_source_location(&self.debug_context, scope, span); + bx.set_source_location(&mut self.debug_context, scope, span); } pub fn debug_loc(&self, source_info: mir::SourceInfo) -> (Option, Span) { @@ -203,7 +203,7 @@ pub fn codegen_mir<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( let fn_ty = cx.new_fn_type(sig, &[]); debug!("fn_ty: {:?}", fn_ty); - let debug_context = + let mut debug_context = cx.create_function_debug_context(instance, sig, llfn, mir); let mut bx = Bx::new_block(cx, llfn, "start"); @@ -225,7 +225,7 @@ pub fn codegen_mir<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( }).collect(); // Compute debuginfo scopes from MIR scopes. - let scopes = cx.create_mir_scopes(mir, &debug_context); + let scopes = cx.create_mir_scopes(mir, &mut debug_context); let (landing_pads, funclets) = create_funclets(mir, &mut bx, &cleanup_kinds, &block_bxs); let mut fx = FunctionCx { @@ -253,7 +253,7 @@ pub fn codegen_mir<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( // FIXME(dlrobertson): This is ugly. Find a better way of getting the `PlaceRef` or // `LocalRef` from `arg_local_refs` let mut va_list_ref = None; - let args = arg_local_refs(&mut bx, &fx, &fx.scopes, &memory_locals, &mut va_list_ref); + let args = arg_local_refs(&mut bx, &fx, &memory_locals, &mut va_list_ref); fx.va_list_ref = va_list_ref; let mut allocate_local = |local| { @@ -430,10 +430,6 @@ fn create_funclets<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( fn arg_local_refs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( bx: &mut Bx, fx: &FunctionCx<'a, 'tcx, Bx>, - scopes: &IndexVec< - mir::SourceScope, - debuginfo::MirDebugScope - >, memory_locals: &BitSet, va_list_ref: &mut Option>, ) -> Vec> { @@ -443,7 +439,7 @@ fn arg_local_refs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( let mut llarg_idx = fx.fn_ty.ret.is_indirect() as usize; // Get the argument scope, if it exists and if we need it. - let arg_scope = scopes[mir::OUTERMOST_SOURCE_SCOPE]; + let arg_scope = fx.scopes[mir::OUTERMOST_SOURCE_SCOPE]; let arg_scope = if bx.sess().opts.debuginfo == DebugInfo::Full { arg_scope.scope_metadata } else { diff --git a/src/librustc_codegen_ssa/traits/debuginfo.rs b/src/librustc_codegen_ssa/traits/debuginfo.rs index 135188e98c71c..dcbb4e66cb385 100644 --- a/src/librustc_codegen_ssa/traits/debuginfo.rs +++ b/src/librustc_codegen_ssa/traits/debuginfo.rs @@ -28,7 +28,7 @@ pub trait DebugInfoMethods<'tcx>: BackendTypes { fn create_mir_scopes( &self, mir: &mir::Mir<'_>, - debug_context: &FunctionDebugContext, + debug_context: &mut FunctionDebugContext, ) -> IndexVec>; fn extend_scope_to_file( &self, @@ -53,7 +53,7 @@ pub trait DebugInfoBuilderMethods<'tcx>: BackendTypes { ); fn set_source_location( &mut self, - debug_context: &FunctionDebugContext, + debug_context: &mut FunctionDebugContext, scope: Option, span: Span, ); From 231ff37c93f50946c6050292337bbcf8fc662e23 Mon Sep 17 00:00:00 2001 From: bjorn3 Date: Sun, 2 Dec 2018 18:54:46 +0100 Subject: [PATCH 11/28] Remove a lot of methods from BuilderMethods --- src/librustc_codegen_llvm/builder.rs | 421 ++++++++++----------- src/librustc_codegen_ssa/traits/builder.rs | 47 --- 2 files changed, 208 insertions(+), 260 deletions(-) diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index 3b3b437007aeb..27248d9687d58 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -133,19 +133,6 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } - fn count_insn(&self, category: &str) { - if self.sess().codegen_stats() { - self.stats.borrow_mut().n_llvm_insns += 1; - } - if self.sess().count_llvm_insns() { - *self.stats - .borrow_mut() - .llvm_insns - .entry(category.to_string()) - .or_insert(0) += 1; - } - } - fn set_value_name(&mut self, value: &'ll Value, name: &str) { let cname = SmallCStr::new(name); unsafe { @@ -159,12 +146,6 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } - fn position_at_start(&mut self, llbb: &'ll BasicBlock) { - unsafe { - llvm::LLVMRustPositionBuilderAtStart(self.llbuilder, llbb); - } - } - fn ret_void(&mut self) { self.count_insn("retvoid"); unsafe { @@ -897,17 +878,12 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } /* Miscellaneous instructions */ - fn empty_phi(&mut self, ty: &'ll Type) -> &'ll Value { - self.count_insn("emptyphi"); - unsafe { - llvm::LLVMBuildPhi(self.llbuilder, ty, noname()) - } - } - fn phi(&mut self, ty: &'ll Type, vals: &[&'ll Value], bbs: &[&'ll BasicBlock]) -> &'ll Value { - assert_eq!(vals.len(), bbs.len()); - let phi = self.empty_phi(ty); self.count_insn("addincoming"); + assert_eq!(vals.len(), bbs.len()); + let phi = unsafe { + llvm::LLVMBuildPhi(self.llbuilder, ty, noname()) + }; unsafe { llvm::LLVMAddIncoming(phi, vals.as_ptr(), bbs.as_ptr(), @@ -1012,15 +988,6 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { self.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None); } - fn minnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("minnum"); - unsafe { llvm::LLVMRustBuildMinNum(self.llbuilder, lhs, rhs) } - } - fn maxnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("maxnum"); - unsafe { llvm::LLVMRustBuildMaxNum(self.llbuilder, lhs, rhs) } - } - fn select( &mut self, cond: &'ll Value, then_val: &'ll Value, @@ -1032,14 +999,6 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } - #[allow(dead_code)] - fn va_arg(&mut self, list: &'ll Value, ty: &'ll Type) -> &'ll Value { - self.count_insn("vaarg"); - unsafe { - llvm::LLVMBuildVAArg(self.llbuilder, list, ty, noname()) - } - } - fn extract_element(&mut self, vec: &'ll Value, idx: &'ll Value) -> &'ll Value { self.count_insn("extractelement"); unsafe { @@ -1047,24 +1006,6 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } - fn insert_element( - &mut self, vec: &'ll Value, - elt: &'ll Value, - idx: &'ll Value, - ) -> &'ll Value { - self.count_insn("insertelement"); - unsafe { - llvm::LLVMBuildInsertElement(self.llbuilder, vec, elt, idx, noname()) - } - } - - fn shuffle_vector(&mut self, v1: &'ll Value, v2: &'ll Value, mask: &'ll Value) -> &'ll Value { - self.count_insn("shufflevector"); - unsafe { - llvm::LLVMBuildShuffleVector(self.llbuilder, v1, v2, mask, noname()) - } - } - fn vector_splat(&mut self, num_elts: usize, elt: &'ll Value) -> &'ll Value { unsafe { let elt_ty = self.cx.val_ty(elt); @@ -1075,81 +1016,6 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } - fn vector_reduce_fadd_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value { - self.count_insn("vector.reduce.fadd_fast"); - unsafe { - // FIXME: add a non-fast math version once - // https://bugs.llvm.org/show_bug.cgi?id=36732 - // is fixed. - let instr = llvm::LLVMRustBuildVectorReduceFAdd(self.llbuilder, acc, src); - llvm::LLVMRustSetHasUnsafeAlgebra(instr); - instr - } - } - fn vector_reduce_fmul_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value { - self.count_insn("vector.reduce.fmul_fast"); - unsafe { - // FIXME: add a non-fast math version once - // https://bugs.llvm.org/show_bug.cgi?id=36732 - // is fixed. - let instr = llvm::LLVMRustBuildVectorReduceFMul(self.llbuilder, acc, src); - llvm::LLVMRustSetHasUnsafeAlgebra(instr); - instr - } - } - fn vector_reduce_add(&mut self, src: &'ll Value) -> &'ll Value { - self.count_insn("vector.reduce.add"); - unsafe { llvm::LLVMRustBuildVectorReduceAdd(self.llbuilder, src) } - } - fn vector_reduce_mul(&mut self, src: &'ll Value) -> &'ll Value { - self.count_insn("vector.reduce.mul"); - unsafe { llvm::LLVMRustBuildVectorReduceMul(self.llbuilder, src) } - } - fn vector_reduce_and(&mut self, src: &'ll Value) -> &'ll Value { - self.count_insn("vector.reduce.and"); - unsafe { llvm::LLVMRustBuildVectorReduceAnd(self.llbuilder, src) } - } - fn vector_reduce_or(&mut self, src: &'ll Value) -> &'ll Value { - self.count_insn("vector.reduce.or"); - unsafe { llvm::LLVMRustBuildVectorReduceOr(self.llbuilder, src) } - } - fn vector_reduce_xor(&mut self, src: &'ll Value) -> &'ll Value { - self.count_insn("vector.reduce.xor"); - unsafe { llvm::LLVMRustBuildVectorReduceXor(self.llbuilder, src) } - } - fn vector_reduce_fmin(&mut self, src: &'ll Value) -> &'ll Value { - self.count_insn("vector.reduce.fmin"); - unsafe { llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ false) } - } - fn vector_reduce_fmax(&mut self, src: &'ll Value) -> &'ll Value { - self.count_insn("vector.reduce.fmax"); - unsafe { llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ false) } - } - fn vector_reduce_fmin_fast(&mut self, src: &'ll Value) -> &'ll Value { - self.count_insn("vector.reduce.fmin_fast"); - unsafe { - let instr = llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ true); - llvm::LLVMRustSetHasUnsafeAlgebra(instr); - instr - } - } - fn vector_reduce_fmax_fast(&mut self, src: &'ll Value) -> &'ll Value { - self.count_insn("vector.reduce.fmax_fast"); - unsafe { - let instr = llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ true); - llvm::LLVMRustSetHasUnsafeAlgebra(instr); - instr - } - } - fn vector_reduce_min(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value { - self.count_insn("vector.reduce.min"); - unsafe { llvm::LLVMRustBuildVectorReduceMin(self.llbuilder, src, is_signed) } - } - fn vector_reduce_max(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value { - self.count_insn("vector.reduce.max"); - unsafe { llvm::LLVMRustBuildVectorReduceMax(self.llbuilder, src, is_signed) } - } - fn extract_value(&mut self, agg_val: &'ll Value, idx: u64) -> &'ll Value { self.count_insn("extractvalue"); assert_eq!(idx as c_uint as u64, idx); @@ -1177,12 +1043,6 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } - fn add_clause(&mut self, landing_pad: &'ll Value, clause: &'ll Value) { - unsafe { - llvm::LLVMAddClause(landing_pad, clause); - } - } - fn set_cleanup(&mut self, landing_pad: &'ll Value) { self.count_insn("setcleanup"); unsafe { @@ -1236,14 +1096,6 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { Funclet::new(ret.expect("LLVM does not have support for catchpad")) } - fn catch_ret(&mut self, funclet: &Funclet<'ll>, unwind: &'ll BasicBlock) -> &'ll Value { - self.count_insn("catchret"); - let ret = unsafe { - llvm::LLVMRustBuildCatchRet(self.llbuilder, funclet.cleanuppad(), unwind) - }; - ret.expect("LLVM does not have support for catchret") - } - fn catch_switch( &mut self, parent: Option<&'ll Value>, @@ -1347,67 +1199,6 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } - fn check_store<'b>(&mut self, - val: &'ll Value, - ptr: &'ll Value) -> &'ll Value { - let dest_ptr_ty = self.cx.val_ty(ptr); - let stored_ty = self.cx.val_ty(val); - let stored_ptr_ty = self.cx.type_ptr_to(stored_ty); - - assert_eq!(self.cx.type_kind(dest_ptr_ty), TypeKind::Pointer); - - if dest_ptr_ty == stored_ptr_ty { - ptr - } else { - debug!("Type mismatch in store. \ - Expected {:?}, got {:?}; inserting bitcast", - dest_ptr_ty, stored_ptr_ty); - self.bitcast(ptr, stored_ptr_ty) - } - } - - fn check_call<'b>(&mut self, - typ: &str, - llfn: &'ll Value, - args: &'b [&'ll Value]) -> Cow<'b, [&'ll Value]> { - let mut fn_ty = self.cx.val_ty(llfn); - // Strip off pointers - while self.cx.type_kind(fn_ty) == TypeKind::Pointer { - fn_ty = self.cx.element_type(fn_ty); - } - - assert!(self.cx.type_kind(fn_ty) == TypeKind::Function, - "builder::{} not passed a function, but {:?}", typ, fn_ty); - - let param_tys = self.cx.func_params_types(fn_ty); - - let all_args_match = param_tys.iter() - .zip(args.iter().map(|&v| self.val_ty(v))) - .all(|(expected_ty, actual_ty)| *expected_ty == actual_ty); - - if all_args_match { - return Cow::Borrowed(args); - } - - let casted_args: Vec<_> = param_tys.into_iter() - .zip(args.iter()) - .enumerate() - .map(|(i, (expected_ty, &actual_val))| { - let actual_ty = self.val_ty(actual_val); - if expected_ty != actual_ty { - debug!("Type mismatch in function call of {:?}. \ - Expected {:?} for param {}, got {:?}; injecting bitcast", - llfn, expected_ty, i, actual_ty); - self.bitcast(actual_val, expected_ty) - } else { - actual_val - } - }) - .collect(); - - Cow::Owned(casted_args) - } - fn lifetime_start(&mut self, ptr: &'ll Value, size: Size) { self.call_lifetime_intrinsic("llvm.lifetime.start", ptr, size); } @@ -1509,6 +1300,210 @@ impl StaticBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { } impl Builder<'a, 'll, 'tcx> { + fn count_insn(&self, category: &str) { + if self.sess().codegen_stats() { + self.stats.borrow_mut().n_llvm_insns += 1; + } + if self.sess().count_llvm_insns() { + *self.stats + .borrow_mut() + .llvm_insns + .entry(category.to_string()) + .or_insert(0) += 1; + } + } + + fn position_at_start(&mut self, llbb: &'ll BasicBlock) { + unsafe { + llvm::LLVMRustPositionBuilderAtStart(self.llbuilder, llbb); + } + } + + pub fn minnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("minnum"); + unsafe { llvm::LLVMRustBuildMinNum(self.llbuilder, lhs, rhs) } + } + + pub fn maxnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("maxnum"); + unsafe { llvm::LLVMRustBuildMaxNum(self.llbuilder, lhs, rhs) } + } + + pub fn insert_element( + &mut self, vec: &'ll Value, + elt: &'ll Value, + idx: &'ll Value, + ) -> &'ll Value { + self.count_insn("insertelement"); + unsafe { + llvm::LLVMBuildInsertElement(self.llbuilder, vec, elt, idx, noname()) + } + } + + pub fn shuffle_vector(&mut self, v1: &'ll Value, v2: &'ll Value, mask: &'ll Value) -> &'ll Value { + self.count_insn("shufflevector"); + unsafe { + llvm::LLVMBuildShuffleVector(self.llbuilder, v1, v2, mask, noname()) + } + } + + pub fn vector_reduce_fadd_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value { + self.count_insn("vector.reduce.fadd_fast"); + unsafe { + // FIXME: add a non-fast math version once + // https://bugs.llvm.org/show_bug.cgi?id=36732 + // is fixed. + let instr = llvm::LLVMRustBuildVectorReduceFAdd(self.llbuilder, acc, src); + llvm::LLVMRustSetHasUnsafeAlgebra(instr); + instr + } + } + pub fn vector_reduce_fmul_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value { + self.count_insn("vector.reduce.fmul_fast"); + unsafe { + // FIXME: add a non-fast math version once + // https://bugs.llvm.org/show_bug.cgi?id=36732 + // is fixed. + let instr = llvm::LLVMRustBuildVectorReduceFMul(self.llbuilder, acc, src); + llvm::LLVMRustSetHasUnsafeAlgebra(instr); + instr + } + } + pub fn vector_reduce_add(&mut self, src: &'ll Value) -> &'ll Value { + self.count_insn("vector.reduce.add"); + unsafe { llvm::LLVMRustBuildVectorReduceAdd(self.llbuilder, src) } + } + pub fn vector_reduce_mul(&mut self, src: &'ll Value) -> &'ll Value { + self.count_insn("vector.reduce.mul"); + unsafe { llvm::LLVMRustBuildVectorReduceMul(self.llbuilder, src) } + } + pub fn vector_reduce_and(&mut self, src: &'ll Value) -> &'ll Value { + self.count_insn("vector.reduce.and"); + unsafe { llvm::LLVMRustBuildVectorReduceAnd(self.llbuilder, src) } + } + pub fn vector_reduce_or(&mut self, src: &'ll Value) -> &'ll Value { + self.count_insn("vector.reduce.or"); + unsafe { llvm::LLVMRustBuildVectorReduceOr(self.llbuilder, src) } + } + pub fn vector_reduce_xor(&mut self, src: &'ll Value) -> &'ll Value { + self.count_insn("vector.reduce.xor"); + unsafe { llvm::LLVMRustBuildVectorReduceXor(self.llbuilder, src) } + } + pub fn vector_reduce_fmin(&mut self, src: &'ll Value) -> &'ll Value { + self.count_insn("vector.reduce.fmin"); + unsafe { llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ false) } + } + pub fn vector_reduce_fmax(&mut self, src: &'ll Value) -> &'ll Value { + self.count_insn("vector.reduce.fmax"); + unsafe { llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ false) } + } + pub fn vector_reduce_fmin_fast(&mut self, src: &'ll Value) -> &'ll Value { + self.count_insn("vector.reduce.fmin_fast"); + unsafe { + let instr = llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ true); + llvm::LLVMRustSetHasUnsafeAlgebra(instr); + instr + } + } + pub fn vector_reduce_fmax_fast(&mut self, src: &'ll Value) -> &'ll Value { + self.count_insn("vector.reduce.fmax_fast"); + unsafe { + let instr = llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ true); + llvm::LLVMRustSetHasUnsafeAlgebra(instr); + instr + } + } + pub fn vector_reduce_min(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value { + self.count_insn("vector.reduce.min"); + unsafe { llvm::LLVMRustBuildVectorReduceMin(self.llbuilder, src, is_signed) } + } + pub fn vector_reduce_max(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value { + self.count_insn("vector.reduce.max"); + unsafe { llvm::LLVMRustBuildVectorReduceMax(self.llbuilder, src, is_signed) } + } + + pub fn add_clause(&mut self, landing_pad: &'ll Value, clause: &'ll Value) { + unsafe { + llvm::LLVMAddClause(landing_pad, clause); + } + } + + pub fn catch_ret(&mut self, funclet: &Funclet<'ll>, unwind: &'ll BasicBlock) -> &'ll Value { + self.count_insn("catchret"); + let ret = unsafe { + llvm::LLVMRustBuildCatchRet(self.llbuilder, funclet.cleanuppad(), unwind) + }; + ret.expect("LLVM does not have support for catchret") + } + + fn check_store<'b>(&mut self, + val: &'ll Value, + ptr: &'ll Value) -> &'ll Value { + let dest_ptr_ty = self.cx.val_ty(ptr); + let stored_ty = self.cx.val_ty(val); + let stored_ptr_ty = self.cx.type_ptr_to(stored_ty); + + assert_eq!(self.cx.type_kind(dest_ptr_ty), TypeKind::Pointer); + + if dest_ptr_ty == stored_ptr_ty { + ptr + } else { + debug!("Type mismatch in store. \ + Expected {:?}, got {:?}; inserting bitcast", + dest_ptr_ty, stored_ptr_ty); + self.bitcast(ptr, stored_ptr_ty) + } + } + + fn check_call<'b>(&mut self, + typ: &str, + llfn: &'ll Value, + args: &'b [&'ll Value]) -> Cow<'b, [&'ll Value]> { + let mut fn_ty = self.cx.val_ty(llfn); + // Strip off pointers + while self.cx.type_kind(fn_ty) == TypeKind::Pointer { + fn_ty = self.cx.element_type(fn_ty); + } + + assert!(self.cx.type_kind(fn_ty) == TypeKind::Function, + "builder::{} not passed a function, but {:?}", typ, fn_ty); + + let param_tys = self.cx.func_params_types(fn_ty); + + let all_args_match = param_tys.iter() + .zip(args.iter().map(|&v| self.val_ty(v))) + .all(|(expected_ty, actual_ty)| *expected_ty == actual_ty); + + if all_args_match { + return Cow::Borrowed(args); + } + + let casted_args: Vec<_> = param_tys.into_iter() + .zip(args.iter()) + .enumerate() + .map(|(i, (expected_ty, &actual_val))| { + let actual_ty = self.val_ty(actual_val); + if expected_ty != actual_ty { + debug!("Type mismatch in function call of {:?}. \ + Expected {:?} for param {}, got {:?}; injecting bitcast", + llfn, expected_ty, i, actual_ty); + self.bitcast(actual_val, expected_ty) + } else { + actual_val + } + }) + .collect(); + + Cow::Owned(casted_args) + } + + pub fn va_arg(&mut self, list: &'ll Value, ty: &'ll Type) -> &'ll Value { + self.count_insn("vaarg"); + unsafe { + llvm::LLVMBuildVAArg(self.llbuilder, list, ty, noname()) + } + } + fn call_lifetime_intrinsic(&mut self, intrinsic: &str, ptr: &'ll Value, size: Size) { if self.cx.sess().opts.optimize == config::OptLevel::No { return; diff --git a/src/librustc_codegen_ssa/traits/builder.rs b/src/librustc_codegen_ssa/traits/builder.rs index 5099107a39303..6dd2a36bf2648 100644 --- a/src/librustc_codegen_ssa/traits/builder.rs +++ b/src/librustc_codegen_ssa/traits/builder.rs @@ -13,7 +13,6 @@ use rustc::ty::Ty; use rustc::ty::layout::{Align, Size}; use std::ffi::CStr; -use std::borrow::Cow; use std::ops::Range; use syntax::ast::AsmDialect; @@ -39,11 +38,9 @@ pub trait BuilderMethods<'a, 'tcx: 'a>: fn cx(&self) -> &Self::CodegenCx; fn llfn(&self) -> Self::Value; fn llbb(&self) -> Self::BasicBlock; - fn count_insn(&self, category: &str); fn set_value_name(&mut self, value: Self::Value, name: &str); fn position_at_end(&mut self, llbb: Self::BasicBlock); - fn position_at_start(&mut self, llbb: Self::BasicBlock); fn ret_void(&mut self); fn ret(&mut self, v: Self::Value); fn br(&mut self, dest: Self::BasicBlock); @@ -161,7 +158,6 @@ pub trait BuilderMethods<'a, 'tcx: 'a>: fn icmp(&mut self, op: IntPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value; fn fcmp(&mut self, op: RealPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn empty_phi(&mut self, ty: Self::Type) -> Self::Value; fn phi( &mut self, ty: Self::Type, @@ -206,8 +202,6 @@ pub trait BuilderMethods<'a, 'tcx: 'a>: flags: MemFlags, ); - fn minnum(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn maxnum(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; fn select( &mut self, cond: Self::Value, @@ -215,34 +209,8 @@ pub trait BuilderMethods<'a, 'tcx: 'a>: else_val: Self::Value, ) -> Self::Value; - fn va_arg(&mut self, list: Self::Value, ty: Self::Type) -> Self::Value; fn extract_element(&mut self, vec: Self::Value, idx: Self::Value) -> Self::Value; - fn insert_element( - &mut self, - vec: Self::Value, - elt: Self::Value, - idx: Self::Value, - ) -> Self::Value; - fn shuffle_vector( - &mut self, - v1: Self::Value, - v2: Self::Value, - mask: Self::Value, - ) -> Self::Value; fn vector_splat(&mut self, num_elts: usize, elt: Self::Value) -> Self::Value; - fn vector_reduce_fadd_fast(&mut self, acc: Self::Value, src: Self::Value) -> Self::Value; - fn vector_reduce_fmul_fast(&mut self, acc: Self::Value, src: Self::Value) -> Self::Value; - fn vector_reduce_add(&mut self, src: Self::Value) -> Self::Value; - fn vector_reduce_mul(&mut self, src: Self::Value) -> Self::Value; - fn vector_reduce_and(&mut self, src: Self::Value) -> Self::Value; - fn vector_reduce_or(&mut self, src: Self::Value) -> Self::Value; - fn vector_reduce_xor(&mut self, src: Self::Value) -> Self::Value; - fn vector_reduce_fmin(&mut self, src: Self::Value) -> Self::Value; - fn vector_reduce_fmax(&mut self, src: Self::Value) -> Self::Value; - fn vector_reduce_fmin_fast(&mut self, src: Self::Value) -> Self::Value; - fn vector_reduce_fmax_fast(&mut self, src: Self::Value) -> Self::Value; - fn vector_reduce_min(&mut self, src: Self::Value, is_signed: bool) -> Self::Value; - fn vector_reduce_max(&mut self, src: Self::Value, is_signed: bool) -> Self::Value; fn extract_value(&mut self, agg_val: Self::Value, idx: u64) -> Self::Value; fn insert_value(&mut self, agg_val: Self::Value, elt: Self::Value, idx: u64) -> Self::Value; @@ -252,7 +220,6 @@ pub trait BuilderMethods<'a, 'tcx: 'a>: pers_fn: Self::Value, num_clauses: usize, ) -> Self::Value; - fn add_clause(&mut self, landing_pad: Self::Value, clause: Self::Value); fn set_cleanup(&mut self, landing_pad: Self::Value); fn resume(&mut self, exn: Self::Value) -> Self::Value; fn cleanup_pad(&mut self, parent: Option, args: &[Self::Value]) -> Self::Funclet; @@ -262,7 +229,6 @@ pub trait BuilderMethods<'a, 'tcx: 'a>: unwind: Option, ) -> Self::Value; fn catch_pad(&mut self, parent: Self::Value, args: &[Self::Value]) -> Self::Funclet; - fn catch_ret(&mut self, funclet: &Self::Funclet, unwind: Self::BasicBlock) -> Self::Value; fn catch_switch( &mut self, parent: Option, @@ -293,19 +259,6 @@ pub trait BuilderMethods<'a, 'tcx: 'a>: fn add_incoming_to_phi(&mut self, phi: Self::Value, val: Self::Value, bb: Self::BasicBlock); fn set_invariant_load(&mut self, load: Self::Value); - /// Returns the ptr value that should be used for storing `val`. - fn check_store(&mut self, val: Self::Value, ptr: Self::Value) -> Self::Value; - - /// Returns the args that should be used for a call to `llfn`. - fn check_call<'b>( - &mut self, - typ: &str, - llfn: Self::Value, - args: &'b [Self::Value], - ) -> Cow<'b, [Self::Value]> - where - [Self::Value]: ToOwned; - /// Called for `StorageLive` fn lifetime_start(&mut self, ptr: Self::Value, size: Size); From d24125e03574d6f83fd2df7dd8afa8e043976865 Mon Sep 17 00:00:00 2001 From: bjorn3 Date: Tue, 4 Dec 2018 20:20:45 +0100 Subject: [PATCH 12/28] Rebase fallout --- src/librustc_codegen_llvm/builder.rs | 10 +++++++++- src/librustc_codegen_ssa/traits/builder.rs | 1 + 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index 27248d9687d58..102b7c195638f 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -999,6 +999,14 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } + #[allow(dead_code)] + fn va_arg(&mut self, list: &'ll Value, ty: &'ll Type) -> &'ll Value { + self.count_insn("vaarg"); + unsafe { + llvm::LLVMBuildVAArg(self.llbuilder, list, ty, noname()) + } + } + fn extract_element(&mut self, vec: &'ll Value, idx: &'ll Value) -> &'ll Value { self.count_insn("extractelement"); unsafe { @@ -1263,7 +1271,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } impl StaticBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { - fn get_static(&mut self, def_id: DefId) -> &'ll Value { +fn get_static(&mut self, def_id: DefId) -> &'ll Value { // Forward to the `get_static` method of `CodegenCx` self.cx().get_static(def_id) } diff --git a/src/librustc_codegen_ssa/traits/builder.rs b/src/librustc_codegen_ssa/traits/builder.rs index 6dd2a36bf2648..e0795e3c7aa24 100644 --- a/src/librustc_codegen_ssa/traits/builder.rs +++ b/src/librustc_codegen_ssa/traits/builder.rs @@ -209,6 +209,7 @@ pub trait BuilderMethods<'a, 'tcx: 'a>: else_val: Self::Value, ) -> Self::Value; + fn va_arg(&mut self, list: Self::Value, ty: Self::Type) -> Self::Value; fn extract_element(&mut self, vec: Self::Value, idx: Self::Value) -> Self::Value; fn vector_splat(&mut self, num_elts: usize, elt: Self::Value) -> Self::Value; fn extract_value(&mut self, agg_val: Self::Value, idx: u64) -> Self::Value; From 13d5c030351d36223acb73b8bf316886e20c5957 Mon Sep 17 00:00:00 2001 From: bjorn3 Date: Thu, 6 Dec 2018 18:46:41 +0100 Subject: [PATCH 13/28] Split memory related methods out of BuilderMethods and move get_param from Cx to Bx --- src/librustc_codegen_llvm/abi.rs | 4 + src/librustc_codegen_llvm/builder.rs | 962 +++++++++---------- src/librustc_codegen_llvm/context.rs | 5 - src/librustc_codegen_llvm/debuginfo/mod.rs | 7 + src/librustc_codegen_llvm/va_arg.rs | 5 +- src/librustc_codegen_ssa/base.rs | 4 +- src/librustc_codegen_ssa/mir/mod.rs | 18 +- src/librustc_codegen_ssa/mir/place.rs | 4 +- src/librustc_codegen_ssa/traits/abi.rs | 1 + src/librustc_codegen_ssa/traits/builder.rs | 83 +- src/librustc_codegen_ssa/traits/debuginfo.rs | 1 + src/librustc_codegen_ssa/traits/misc.rs | 1 - src/librustc_codegen_ssa/traits/mod.rs | 2 +- 13 files changed, 551 insertions(+), 546 deletions(-) diff --git a/src/librustc_codegen_llvm/abi.rs b/src/librustc_codegen_llvm/abi.rs index 49c9555a2c682..3a0d9e1334cf6 100644 --- a/src/librustc_codegen_llvm/abi.rs +++ b/src/librustc_codegen_llvm/abi.rs @@ -859,4 +859,8 @@ impl AbiBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { ) { ty.apply_attrs_callsite(self, callsite) } + + fn get_param(&self, index: usize) -> Self::Value { + llvm::get_param(self.llfn(), index as c_uint) + } } diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index 102b7c195638f..7a80b4086035f 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -87,6 +87,275 @@ impl HasCodegen<'tcx> for Builder<'_, 'll, 'tcx> { type CodegenCx = CodegenCx<'ll, 'tcx>; } +impl MemoryBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { + fn alloca(&mut self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value { + let mut bx = Builder::with_cx(self.cx); + bx.position_at_start(unsafe { + llvm::LLVMGetFirstBasicBlock(self.llfn()) + }); + bx.dynamic_alloca(ty, name, align) + } + + fn dynamic_alloca(&mut self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value { + self.count_insn("alloca"); + unsafe { + let alloca = if name.is_empty() { + llvm::LLVMBuildAlloca(self.llbuilder, ty, noname()) + } else { + let name = SmallCStr::new(name); + llvm::LLVMBuildAlloca(self.llbuilder, ty, + name.as_ptr()) + }; + llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint); + alloca + } + } + + fn array_alloca(&mut self, + ty: &'ll Type, + len: &'ll Value, + name: &str, + align: Align) -> &'ll Value { + self.count_insn("alloca"); + unsafe { + let alloca = if name.is_empty() { + llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, noname()) + } else { + let name = SmallCStr::new(name); + llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, + name.as_ptr()) + }; + llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint); + alloca + } + } + + fn load(&mut self, ptr: &'ll Value, align: Align) -> &'ll Value { + self.count_insn("load"); + unsafe { + let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname()); + llvm::LLVMSetAlignment(load, align.bytes() as c_uint); + load + } + } + + fn volatile_load(&mut self, ptr: &'ll Value) -> &'ll Value { + self.count_insn("load.volatile"); + unsafe { + let insn = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname()); + llvm::LLVMSetVolatile(insn, llvm::True); + insn + } + } + + fn atomic_load( + &mut self, + ptr: &'ll Value, + order: rustc_codegen_ssa::common::AtomicOrdering, + size: Size, + ) -> &'ll Value { + self.count_insn("load.atomic"); + unsafe { + let load = llvm::LLVMRustBuildAtomicLoad( + self.llbuilder, + ptr, + noname(), + AtomicOrdering::from_generic(order), + ); + // LLVM requires the alignment of atomic loads to be at least the size of the type. + llvm::LLVMSetAlignment(load, size.bytes() as c_uint); + load + } + } + + fn load_operand( + &mut self, + place: PlaceRef<'tcx, &'ll Value> + ) -> OperandRef<'tcx, &'ll Value> { + debug!("PlaceRef::load: {:?}", place); + + assert_eq!(place.llextra.is_some(), place.layout.is_unsized()); + + if place.layout.is_zst() { + return OperandRef::new_zst(self, place.layout); + } + + fn scalar_load_metadata<'a, 'll, 'tcx>( + bx: &mut Builder<'a, 'll, 'tcx>, + load: &'ll Value, + scalar: &layout::Scalar + ) { + let vr = scalar.valid_range.clone(); + match scalar.value { + layout::Int(..) => { + let range = scalar.valid_range_exclusive(bx); + if range.start != range.end { + bx.range_metadata(load, range); + } + } + layout::Pointer if vr.start() < vr.end() && !vr.contains(&0) => { + bx.nonnull_metadata(load); + } + _ => {} + } + } + + let val = if let Some(llextra) = place.llextra { + OperandValue::Ref(place.llval, Some(llextra), place.align) + } else if place.layout.is_llvm_immediate() { + let mut const_llval = None; + unsafe { + if let Some(global) = llvm::LLVMIsAGlobalVariable(place.llval) { + if llvm::LLVMIsGlobalConstant(global) == llvm::True { + const_llval = llvm::LLVMGetInitializer(global); + } + } + } + let llval = const_llval.unwrap_or_else(|| { + let load = self.load(place.llval, place.align); + if let layout::Abi::Scalar(ref scalar) = place.layout.abi { + scalar_load_metadata(self, load, scalar); + } + load + }); + OperandValue::Immediate(to_immediate(self, llval, place.layout)) + } else if let layout::Abi::ScalarPair(ref a, ref b) = place.layout.abi { + let b_offset = a.value.size(self).align_to(b.value.align(self).abi); + + let mut load = |i, scalar: &layout::Scalar, align| { + let llptr = self.struct_gep(place.llval, i as u64); + let load = self.load(llptr, align); + scalar_load_metadata(self, load, scalar); + if scalar.is_bool() { + self.trunc(load, self.type_i1()) + } else { + load + } + }; + + OperandValue::Pair( + load(0, a, place.align), + load(1, b, place.align.restrict_for_offset(b_offset)), + ) + } else { + OperandValue::Ref(place.llval, None, place.align) + }; + + OperandRef { val, layout: place.layout } + } + + + + fn range_metadata(&mut self, load: &'ll Value, range: Range) { + if self.sess().target.target.arch == "amdgpu" { + // amdgpu/LLVM does something weird and thinks a i64 value is + // split into a v2i32, halving the bitwidth LLVM expects, + // tripping an assertion. So, for now, just disable this + // optimization. + return; + } + + unsafe { + let llty = self.cx.val_ty(load); + let v = [ + self.cx.const_uint_big(llty, range.start), + self.cx.const_uint_big(llty, range.end) + ]; + + llvm::LLVMSetMetadata(load, llvm::MD_range as c_uint, + llvm::LLVMMDNodeInContext(self.cx.llcx, + v.as_ptr(), + v.len() as c_uint)); + } + } + + fn nonnull_metadata(&mut self, load: &'ll Value) { + unsafe { + llvm::LLVMSetMetadata(load, llvm::MD_nonnull as c_uint, + llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0)); + } + } + + fn store(&mut self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value { + self.store_with_flags(val, ptr, align, MemFlags::empty()) + } + + fn store_with_flags( + &mut self, + val: &'ll Value, + ptr: &'ll Value, + align: Align, + flags: MemFlags, + ) -> &'ll Value { + debug!("Store {:?} -> {:?} ({:?})", val, ptr, flags); + self.count_insn("store"); + let ptr = self.check_store(val, ptr); + unsafe { + let store = llvm::LLVMBuildStore(self.llbuilder, val, ptr); + let align = if flags.contains(MemFlags::UNALIGNED) { + 1 + } else { + align.bytes() as c_uint + }; + llvm::LLVMSetAlignment(store, align); + if flags.contains(MemFlags::VOLATILE) { + llvm::LLVMSetVolatile(store, llvm::True); + } + if flags.contains(MemFlags::NONTEMPORAL) { + // According to LLVM [1] building a nontemporal store must + // *always* point to a metadata value of the integer 1. + // + // [1]: http://llvm.org/docs/LangRef.html#store-instruction + let one = self.cx.const_i32(1); + let node = llvm::LLVMMDNodeInContext(self.cx.llcx, &one, 1); + llvm::LLVMSetMetadata(store, llvm::MD_nontemporal as c_uint, node); + } + store + } + } + + fn atomic_store(&mut self, val: &'ll Value, ptr: &'ll Value, + order: rustc_codegen_ssa::common::AtomicOrdering, size: Size) { + debug!("Store {:?} -> {:?}", val, ptr); + self.count_insn("store.atomic"); + let ptr = self.check_store(val, ptr); + unsafe { + let store = llvm::LLVMRustBuildAtomicStore( + self.llbuilder, + val, + ptr, + AtomicOrdering::from_generic(order), + ); + // LLVM requires the alignment of atomic stores to be at least the size of the type. + llvm::LLVMSetAlignment(store, size.bytes() as c_uint); + } + } + + fn gep(&mut self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value { + self.count_insn("gep"); + unsafe { + llvm::LLVMBuildGEP(self.llbuilder, ptr, indices.as_ptr(), + indices.len() as c_uint, noname()) + } + } + + fn inbounds_gep(&mut self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value { + self.count_insn("inboundsgep"); + unsafe { + llvm::LLVMBuildInBoundsGEP( + self.llbuilder, ptr, indices.as_ptr(), indices.len() as c_uint, noname()) + } + } + + fn struct_gep(&mut self, ptr: &'ll Value, idx: u64) -> &'ll Value { + self.count_insn("structgep"); + assert_eq!(idx as c_uint as u64, idx); + unsafe { + llvm::LLVMBuildStructGEP(self.llbuilder, ptr, idx as c_uint, noname()) + } + } +} + impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { fn new_block<'b>( cx: &'a CodegenCx<'ll, 'tcx>, @@ -121,25 +390,12 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { Builder::new_block(self.cx, self.llfn(), name) } - fn llfn(&self) -> &'ll Value { - unsafe { - llvm::LLVMGetBasicBlockParent(self.llbb()) - } - } - fn llbb(&self) -> &'ll BasicBlock { unsafe { llvm::LLVMGetInsertBlock(self.llbuilder) } } - fn set_value_name(&mut self, value: &'ll Value, name: &str) { - let cname = SmallCStr::new(name); - unsafe { - llvm::LLVMSetValueName(value, cname.as_ptr()); - } - } - fn position_at_end(&mut self, llbb: &'ll BasicBlock) { unsafe { llvm::LLVMPositionBuilderAtEnd(self.llbuilder, llbb); @@ -216,558 +472,299 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { then, catch, bundle, - noname()) - } - } - - fn unreachable(&mut self) { - self.count_insn("unreachable"); - unsafe { - llvm::LLVMBuildUnreachable(self.llbuilder); - } - } - - /* Arithmetic */ - fn add(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("add"); - unsafe { - llvm::LLVMBuildAdd(self.llbuilder, lhs, rhs, noname()) - } - } - - fn fadd(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("fadd"); - unsafe { - llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, noname()) - } - } - - fn fadd_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("fadd"); - unsafe { - let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, noname()); - llvm::LLVMRustSetHasUnsafeAlgebra(instr); - instr - } - } - - fn sub(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("sub"); - unsafe { - llvm::LLVMBuildSub(self.llbuilder, lhs, rhs, noname()) - } - } - - fn fsub(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("fsub"); - unsafe { - llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, noname()) - } - } - - fn fsub_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("fsub"); - unsafe { - let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, noname()); - llvm::LLVMRustSetHasUnsafeAlgebra(instr); - instr - } - } - - fn mul(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("mul"); - unsafe { - llvm::LLVMBuildMul(self.llbuilder, lhs, rhs, noname()) - } - } - - fn fmul(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("fmul"); - unsafe { - llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, noname()) - } - } - - fn fmul_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("fmul"); - unsafe { - let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, noname()); - llvm::LLVMRustSetHasUnsafeAlgebra(instr); - instr - } - } - - - fn udiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("udiv"); - unsafe { - llvm::LLVMBuildUDiv(self.llbuilder, lhs, rhs, noname()) - } - } - - fn exactudiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("exactudiv"); - unsafe { - llvm::LLVMBuildExactUDiv(self.llbuilder, lhs, rhs, noname()) - } - } - - fn sdiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("sdiv"); - unsafe { - llvm::LLVMBuildSDiv(self.llbuilder, lhs, rhs, noname()) - } - } - - fn exactsdiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("exactsdiv"); - unsafe { - llvm::LLVMBuildExactSDiv(self.llbuilder, lhs, rhs, noname()) - } - } - - fn fdiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("fdiv"); - unsafe { - llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, noname()) - } - } - - fn fdiv_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("fdiv"); - unsafe { - let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, noname()); - llvm::LLVMRustSetHasUnsafeAlgebra(instr); - instr - } - } - - fn urem(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("urem"); - unsafe { - llvm::LLVMBuildURem(self.llbuilder, lhs, rhs, noname()) - } - } - - fn srem(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("srem"); - unsafe { - llvm::LLVMBuildSRem(self.llbuilder, lhs, rhs, noname()) - } - } - - fn frem(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("frem"); - unsafe { - llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, noname()) - } - } - - fn frem_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("frem"); - unsafe { - let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, noname()); - llvm::LLVMRustSetHasUnsafeAlgebra(instr); - instr - } - } - - fn shl(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("shl"); - unsafe { - llvm::LLVMBuildShl(self.llbuilder, lhs, rhs, noname()) - } - } - - fn lshr(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("lshr"); - unsafe { - llvm::LLVMBuildLShr(self.llbuilder, lhs, rhs, noname()) + noname()) } } - fn ashr(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("ashr"); + fn unreachable(&mut self) { + self.count_insn("unreachable"); unsafe { - llvm::LLVMBuildAShr(self.llbuilder, lhs, rhs, noname()) + llvm::LLVMBuildUnreachable(self.llbuilder); } } - fn and(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("and"); + /* Arithmetic */ + fn add(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("add"); unsafe { - llvm::LLVMBuildAnd(self.llbuilder, lhs, rhs, noname()) + llvm::LLVMBuildAdd(self.llbuilder, lhs, rhs, noname()) } } - fn or(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("or"); + fn fadd(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("fadd"); unsafe { - llvm::LLVMBuildOr(self.llbuilder, lhs, rhs, noname()) + llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, noname()) } } - fn xor(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("xor"); + fn fadd_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("fadd"); unsafe { - llvm::LLVMBuildXor(self.llbuilder, lhs, rhs, noname()) + let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, noname()); + llvm::LLVMRustSetHasUnsafeAlgebra(instr); + instr } } - fn neg(&mut self, v: &'ll Value) -> &'ll Value { - self.count_insn("neg"); + fn sub(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("sub"); unsafe { - llvm::LLVMBuildNeg(self.llbuilder, v, noname()) + llvm::LLVMBuildSub(self.llbuilder, lhs, rhs, noname()) } } - fn fneg(&mut self, v: &'ll Value) -> &'ll Value { - self.count_insn("fneg"); + fn fsub(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("fsub"); unsafe { - llvm::LLVMBuildFNeg(self.llbuilder, v, noname()) + llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, noname()) } } - fn not(&mut self, v: &'ll Value) -> &'ll Value { - self.count_insn("not"); + fn fsub_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("fsub"); unsafe { - llvm::LLVMBuildNot(self.llbuilder, v, noname()) + let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, noname()); + llvm::LLVMRustSetHasUnsafeAlgebra(instr); + instr } } - fn checked_binop( - &mut self, - oop: OverflowOp, - ty: Ty<'_>, - lhs: Self::Value, - rhs: Self::Value, - ) -> (Self::Value, Self::Value) { - use syntax::ast::IntTy::*; - use syntax::ast::UintTy::*; - use rustc::ty::{Int, Uint}; - - let new_sty = match ty.sty { - Int(Isize) => Int(self.tcx.sess.target.isize_ty), - Uint(Usize) => Uint(self.tcx.sess.target.usize_ty), - ref t @ Uint(_) | ref t @ Int(_) => t.clone(), - _ => panic!("tried to get overflow intrinsic for op applied to non-int type") - }; - - let name = match oop { - OverflowOp::Add => match new_sty { - Int(I8) => "llvm.sadd.with.overflow.i8", - Int(I16) => "llvm.sadd.with.overflow.i16", - Int(I32) => "llvm.sadd.with.overflow.i32", - Int(I64) => "llvm.sadd.with.overflow.i64", - Int(I128) => "llvm.sadd.with.overflow.i128", - - Uint(U8) => "llvm.uadd.with.overflow.i8", - Uint(U16) => "llvm.uadd.with.overflow.i16", - Uint(U32) => "llvm.uadd.with.overflow.i32", - Uint(U64) => "llvm.uadd.with.overflow.i64", - Uint(U128) => "llvm.uadd.with.overflow.i128", - - _ => unreachable!(), - }, - OverflowOp::Sub => match new_sty { - Int(I8) => "llvm.ssub.with.overflow.i8", - Int(I16) => "llvm.ssub.with.overflow.i16", - Int(I32) => "llvm.ssub.with.overflow.i32", - Int(I64) => "llvm.ssub.with.overflow.i64", - Int(I128) => "llvm.ssub.with.overflow.i128", - - Uint(U8) => "llvm.usub.with.overflow.i8", - Uint(U16) => "llvm.usub.with.overflow.i16", - Uint(U32) => "llvm.usub.with.overflow.i32", - Uint(U64) => "llvm.usub.with.overflow.i64", - Uint(U128) => "llvm.usub.with.overflow.i128", - - _ => unreachable!(), - }, - OverflowOp::Mul => match new_sty { - Int(I8) => "llvm.smul.with.overflow.i8", - Int(I16) => "llvm.smul.with.overflow.i16", - Int(I32) => "llvm.smul.with.overflow.i32", - Int(I64) => "llvm.smul.with.overflow.i64", - Int(I128) => "llvm.smul.with.overflow.i128", - - Uint(U8) => "llvm.umul.with.overflow.i8", - Uint(U16) => "llvm.umul.with.overflow.i16", - Uint(U32) => "llvm.umul.with.overflow.i32", - Uint(U64) => "llvm.umul.with.overflow.i64", - Uint(U128) => "llvm.umul.with.overflow.i128", - - _ => unreachable!(), - }, - }; - - let intrinsic = self.get_intrinsic(&name); - let res = self.call(intrinsic, &[lhs, rhs], None); - ( - self.extract_value(res, 0), - self.extract_value(res, 1), - ) - } - - fn alloca(&mut self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value { - let mut bx = Builder::with_cx(self.cx); - bx.position_at_start(unsafe { - llvm::LLVMGetFirstBasicBlock(self.llfn()) - }); - bx.dynamic_alloca(ty, name, align) - } - - fn dynamic_alloca(&mut self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value { - self.count_insn("alloca"); + fn mul(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("mul"); unsafe { - let alloca = if name.is_empty() { - llvm::LLVMBuildAlloca(self.llbuilder, ty, noname()) - } else { - let name = SmallCStr::new(name); - llvm::LLVMBuildAlloca(self.llbuilder, ty, - name.as_ptr()) - }; - llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint); - alloca + llvm::LLVMBuildMul(self.llbuilder, lhs, rhs, noname()) } } - fn array_alloca(&mut self, - ty: &'ll Type, - len: &'ll Value, - name: &str, - align: Align) -> &'ll Value { - self.count_insn("alloca"); + fn fmul(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("fmul"); unsafe { - let alloca = if name.is_empty() { - llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, noname()) - } else { - let name = SmallCStr::new(name); - llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, - name.as_ptr()) - }; - llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint); - alloca + llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, noname()) } } - fn load(&mut self, ptr: &'ll Value, align: Align) -> &'ll Value { - self.count_insn("load"); + fn fmul_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("fmul"); unsafe { - let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname()); - llvm::LLVMSetAlignment(load, align.bytes() as c_uint); - load + let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, noname()); + llvm::LLVMRustSetHasUnsafeAlgebra(instr); + instr } } - fn volatile_load(&mut self, ptr: &'ll Value) -> &'ll Value { - self.count_insn("load.volatile"); + + fn udiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("udiv"); unsafe { - let insn = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname()); - llvm::LLVMSetVolatile(insn, llvm::True); - insn + llvm::LLVMBuildUDiv(self.llbuilder, lhs, rhs, noname()) } } - fn atomic_load( - &mut self, - ptr: &'ll Value, - order: rustc_codegen_ssa::common::AtomicOrdering, - size: Size, - ) -> &'ll Value { - self.count_insn("load.atomic"); + fn exactudiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("exactudiv"); unsafe { - let load = llvm::LLVMRustBuildAtomicLoad( - self.llbuilder, - ptr, - noname(), - AtomicOrdering::from_generic(order), - ); - // LLVM requires the alignment of atomic loads to be at least the size of the type. - llvm::LLVMSetAlignment(load, size.bytes() as c_uint); - load + llvm::LLVMBuildExactUDiv(self.llbuilder, lhs, rhs, noname()) } } - fn load_operand( - &mut self, - place: PlaceRef<'tcx, &'ll Value> - ) -> OperandRef<'tcx, &'ll Value> { - debug!("PlaceRef::load: {:?}", place); - - assert_eq!(place.llextra.is_some(), place.layout.is_unsized()); - - if place.layout.is_zst() { - return OperandRef::new_zst(self, place.layout); - } - - fn scalar_load_metadata<'a, 'll, 'tcx>( - bx: &mut Builder<'a, 'll, 'tcx>, - load: &'ll Value, - scalar: &layout::Scalar - ) { - let vr = scalar.valid_range.clone(); - match scalar.value { - layout::Int(..) => { - let range = scalar.valid_range_exclusive(bx); - if range.start != range.end { - bx.range_metadata(load, range); - } - } - layout::Pointer if vr.start() < vr.end() && !vr.contains(&0) => { - bx.nonnull_metadata(load); - } - _ => {} - } + fn sdiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("sdiv"); + unsafe { + llvm::LLVMBuildSDiv(self.llbuilder, lhs, rhs, noname()) } + } - let val = if let Some(llextra) = place.llextra { - OperandValue::Ref(place.llval, Some(llextra), place.align) - } else if place.layout.is_llvm_immediate() { - let mut const_llval = None; - unsafe { - if let Some(global) = llvm::LLVMIsAGlobalVariable(place.llval) { - if llvm::LLVMIsGlobalConstant(global) == llvm::True { - const_llval = llvm::LLVMGetInitializer(global); - } - } - } - let llval = const_llval.unwrap_or_else(|| { - let load = self.load(place.llval, place.align); - if let layout::Abi::Scalar(ref scalar) = place.layout.abi { - scalar_load_metadata(self, load, scalar); - } - load - }); - OperandValue::Immediate(to_immediate(self, llval, place.layout)) - } else if let layout::Abi::ScalarPair(ref a, ref b) = place.layout.abi { - let b_offset = a.value.size(self).align_to(b.value.align(self).abi); + fn exactsdiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("exactsdiv"); + unsafe { + llvm::LLVMBuildExactSDiv(self.llbuilder, lhs, rhs, noname()) + } + } - let mut load = |i, scalar: &layout::Scalar, align| { - let llptr = self.struct_gep(place.llval, i as u64); - let load = self.load(llptr, align); - scalar_load_metadata(self, load, scalar); - if scalar.is_bool() { - self.trunc(load, self.type_i1()) - } else { - load - } - }; + fn fdiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("fdiv"); + unsafe { + llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, noname()) + } + } - OperandValue::Pair( - load(0, a, place.align), - load(1, b, place.align.restrict_for_offset(b_offset)), - ) - } else { - OperandValue::Ref(place.llval, None, place.align) - }; + fn fdiv_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("fdiv"); + unsafe { + let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, noname()); + llvm::LLVMRustSetHasUnsafeAlgebra(instr); + instr + } + } - OperandRef { val, layout: place.layout } + fn urem(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("urem"); + unsafe { + llvm::LLVMBuildURem(self.llbuilder, lhs, rhs, noname()) + } } + fn srem(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("srem"); + unsafe { + llvm::LLVMBuildSRem(self.llbuilder, lhs, rhs, noname()) + } + } + fn frem(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("frem"); + unsafe { + llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, noname()) + } + } - fn range_metadata(&mut self, load: &'ll Value, range: Range) { - if self.sess().target.target.arch == "amdgpu" { - // amdgpu/LLVM does something weird and thinks a i64 value is - // split into a v2i32, halving the bitwidth LLVM expects, - // tripping an assertion. So, for now, just disable this - // optimization. - return; + fn frem_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("frem"); + unsafe { + let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, noname()); + llvm::LLVMRustSetHasUnsafeAlgebra(instr); + instr } + } + fn shl(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("shl"); unsafe { - let llty = self.cx.val_ty(load); - let v = [ - self.cx.const_uint_big(llty, range.start), - self.cx.const_uint_big(llty, range.end) - ]; + llvm::LLVMBuildShl(self.llbuilder, lhs, rhs, noname()) + } + } - llvm::LLVMSetMetadata(load, llvm::MD_range as c_uint, - llvm::LLVMMDNodeInContext(self.cx.llcx, - v.as_ptr(), - v.len() as c_uint)); + fn lshr(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("lshr"); + unsafe { + llvm::LLVMBuildLShr(self.llbuilder, lhs, rhs, noname()) } } - fn nonnull_metadata(&mut self, load: &'ll Value) { + fn ashr(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("ashr"); unsafe { - llvm::LLVMSetMetadata(load, llvm::MD_nonnull as c_uint, - llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0)); + llvm::LLVMBuildAShr(self.llbuilder, lhs, rhs, noname()) } } - fn store(&mut self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value { - self.store_with_flags(val, ptr, align, MemFlags::empty()) + fn and(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("and"); + unsafe { + llvm::LLVMBuildAnd(self.llbuilder, lhs, rhs, noname()) + } } - fn store_with_flags( - &mut self, - val: &'ll Value, - ptr: &'ll Value, - align: Align, - flags: MemFlags, - ) -> &'ll Value { - debug!("Store {:?} -> {:?} ({:?})", val, ptr, flags); - self.count_insn("store"); - let ptr = self.check_store(val, ptr); + fn or(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("or"); unsafe { - let store = llvm::LLVMBuildStore(self.llbuilder, val, ptr); - let align = if flags.contains(MemFlags::UNALIGNED) { - 1 - } else { - align.bytes() as c_uint - }; - llvm::LLVMSetAlignment(store, align); - if flags.contains(MemFlags::VOLATILE) { - llvm::LLVMSetVolatile(store, llvm::True); - } - if flags.contains(MemFlags::NONTEMPORAL) { - // According to LLVM [1] building a nontemporal store must - // *always* point to a metadata value of the integer 1. - // - // [1]: http://llvm.org/docs/LangRef.html#store-instruction - let one = self.cx.const_i32(1); - let node = llvm::LLVMMDNodeInContext(self.cx.llcx, &one, 1); - llvm::LLVMSetMetadata(store, llvm::MD_nontemporal as c_uint, node); - } - store + llvm::LLVMBuildOr(self.llbuilder, lhs, rhs, noname()) } } - fn atomic_store(&mut self, val: &'ll Value, ptr: &'ll Value, - order: rustc_codegen_ssa::common::AtomicOrdering, size: Size) { - debug!("Store {:?} -> {:?}", val, ptr); - self.count_insn("store.atomic"); - let ptr = self.check_store(val, ptr); + fn xor(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("xor"); unsafe { - let store = llvm::LLVMRustBuildAtomicStore( - self.llbuilder, - val, - ptr, - AtomicOrdering::from_generic(order), - ); - // LLVM requires the alignment of atomic stores to be at least the size of the type. - llvm::LLVMSetAlignment(store, size.bytes() as c_uint); + llvm::LLVMBuildXor(self.llbuilder, lhs, rhs, noname()) } } - fn gep(&mut self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value { - self.count_insn("gep"); + fn neg(&mut self, v: &'ll Value) -> &'ll Value { + self.count_insn("neg"); unsafe { - llvm::LLVMBuildGEP(self.llbuilder, ptr, indices.as_ptr(), - indices.len() as c_uint, noname()) + llvm::LLVMBuildNeg(self.llbuilder, v, noname()) } } - fn inbounds_gep(&mut self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value { - self.count_insn("inboundsgep"); + fn fneg(&mut self, v: &'ll Value) -> &'ll Value { + self.count_insn("fneg"); unsafe { - llvm::LLVMBuildInBoundsGEP( - self.llbuilder, ptr, indices.as_ptr(), indices.len() as c_uint, noname()) + llvm::LLVMBuildFNeg(self.llbuilder, v, noname()) + } + } + + fn not(&mut self, v: &'ll Value) -> &'ll Value { + self.count_insn("not"); + unsafe { + llvm::LLVMBuildNot(self.llbuilder, v, noname()) } } + fn checked_binop( + &mut self, + oop: OverflowOp, + ty: Ty<'_>, + lhs: Self::Value, + rhs: Self::Value, + ) -> (Self::Value, Self::Value) { + use syntax::ast::IntTy::*; + use syntax::ast::UintTy::*; + use rustc::ty::{Int, Uint}; + + let new_sty = match ty.sty { + Int(Isize) => Int(self.tcx.sess.target.isize_ty), + Uint(Usize) => Uint(self.tcx.sess.target.usize_ty), + ref t @ Uint(_) | ref t @ Int(_) => t.clone(), + _ => panic!("tried to get overflow intrinsic for op applied to non-int type") + }; + + let name = match oop { + OverflowOp::Add => match new_sty { + Int(I8) => "llvm.sadd.with.overflow.i8", + Int(I16) => "llvm.sadd.with.overflow.i16", + Int(I32) => "llvm.sadd.with.overflow.i32", + Int(I64) => "llvm.sadd.with.overflow.i64", + Int(I128) => "llvm.sadd.with.overflow.i128", + + Uint(U8) => "llvm.uadd.with.overflow.i8", + Uint(U16) => "llvm.uadd.with.overflow.i16", + Uint(U32) => "llvm.uadd.with.overflow.i32", + Uint(U64) => "llvm.uadd.with.overflow.i64", + Uint(U128) => "llvm.uadd.with.overflow.i128", + + _ => unreachable!(), + }, + OverflowOp::Sub => match new_sty { + Int(I8) => "llvm.ssub.with.overflow.i8", + Int(I16) => "llvm.ssub.with.overflow.i16", + Int(I32) => "llvm.ssub.with.overflow.i32", + Int(I64) => "llvm.ssub.with.overflow.i64", + Int(I128) => "llvm.ssub.with.overflow.i128", + + Uint(U8) => "llvm.usub.with.overflow.i8", + Uint(U16) => "llvm.usub.with.overflow.i16", + Uint(U32) => "llvm.usub.with.overflow.i32", + Uint(U64) => "llvm.usub.with.overflow.i64", + Uint(U128) => "llvm.usub.with.overflow.i128", + + _ => unreachable!(), + }, + OverflowOp::Mul => match new_sty { + Int(I8) => "llvm.smul.with.overflow.i8", + Int(I16) => "llvm.smul.with.overflow.i16", + Int(I32) => "llvm.smul.with.overflow.i32", + Int(I64) => "llvm.smul.with.overflow.i64", + Int(I128) => "llvm.smul.with.overflow.i128", + + Uint(U8) => "llvm.umul.with.overflow.i8", + Uint(U16) => "llvm.umul.with.overflow.i16", + Uint(U32) => "llvm.umul.with.overflow.i32", + Uint(U64) => "llvm.umul.with.overflow.i64", + Uint(U128) => "llvm.umul.with.overflow.i128", + + _ => unreachable!(), + }, + }; + + let intrinsic = self.get_intrinsic(&name); + let res = self.call(intrinsic, &[lhs, rhs], None); + ( + self.extract_value(res, 0), + self.extract_value(res, 1), + ) + } + /* Casts */ fn trunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("trunc"); @@ -1249,13 +1246,6 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } - fn struct_gep(&mut self, ptr: &'ll Value, idx: u64) -> &'ll Value { - self.count_insn("structgep"); - assert_eq!(idx as c_uint as u64, idx); - unsafe { - llvm::LLVMBuildStructGEP(self.llbuilder, ptr, idx as c_uint, noname()) - } - } fn cx(&self) -> &CodegenCx<'ll, 'tcx> { self.cx @@ -1308,6 +1298,12 @@ fn get_static(&mut self, def_id: DefId) -> &'ll Value { } impl Builder<'a, 'll, 'tcx> { + pub fn llfn(&self) -> &'ll Value { + unsafe { + llvm::LLVMGetBasicBlockParent(self.llbb()) + } + } + fn count_insn(&self, category: &str) { if self.sess().codegen_stats() { self.stats.borrow_mut().n_llvm_insns += 1; diff --git a/src/librustc_codegen_llvm/context.rs b/src/librustc_codegen_llvm/context.rs index 63dd5393dc462..acdce095d4e52 100644 --- a/src/librustc_codegen_llvm/context.rs +++ b/src/librustc_codegen_llvm/context.rs @@ -10,7 +10,6 @@ use crate::monomorphize::partitioning::CodegenUnit; use crate::type_::Type; use crate::type_of::PointeeInfo; use rustc_codegen_ssa::traits::*; -use libc::c_uint; use rustc_data_structures::base_n; use rustc_data_structures::small_c_str::SmallCStr; @@ -326,10 +325,6 @@ impl MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> { get_fn(self, instance) } - fn get_param(&self, llfn: &'ll Value, index: usize) -> &'ll Value { - llvm::get_param(llfn, index as c_uint) - } - fn eh_personality(&self) -> &'ll Value { // The exception handling personality function. // diff --git a/src/librustc_codegen_llvm/debuginfo/mod.rs b/src/librustc_codegen_llvm/debuginfo/mod.rs index 066ac8f019e00..6abbcd9feba7a 100644 --- a/src/librustc_codegen_llvm/debuginfo/mod.rs +++ b/src/librustc_codegen_llvm/debuginfo/mod.rs @@ -225,6 +225,13 @@ impl DebugInfoBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { fn insert_reference_to_gdb_debug_scripts_section_global(&mut self) { gdb::insert_reference_to_gdb_debug_scripts_section_global(self) } + + fn set_value_name(&mut self, value: &'ll Value, name: &str) { + let cname = SmallCStr::new(name); + unsafe { + llvm::LLVMSetValueName(value, cname.as_ptr()); + } + } } impl DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> { diff --git a/src/librustc_codegen_llvm/va_arg.rs b/src/librustc_codegen_llvm/va_arg.rs index 7aceaea4510ce..024bcd27ef9f5 100644 --- a/src/librustc_codegen_llvm/va_arg.rs +++ b/src/librustc_codegen_llvm/va_arg.rs @@ -3,7 +3,9 @@ use crate::type_::Type; use crate::type_of::LayoutLlvmExt; use crate::value::Value; use rustc_codegen_ssa::mir::operand::OperandRef; -use rustc_codegen_ssa::traits::{BaseTypeMethods, BuilderMethods, ConstMethods, DerivedTypeMethods}; +use rustc_codegen_ssa::traits::{ + BaseTypeMethods, BuilderMethods, ConstMethods, DerivedTypeMethods, MemoryBuilderMethods, +}; use rustc::ty::layout::{Align, HasDataLayout, HasTyCtxt, LayoutOf, Size}; use rustc::ty::Ty; @@ -145,4 +147,3 @@ pub(super) fn emit_va_arg( } } } - diff --git a/src/librustc_codegen_ssa/base.rs b/src/librustc_codegen_ssa/base.rs index 0fd51c8bef022..50ff61da6850d 100644 --- a/src/librustc_codegen_ssa/base.rs +++ b/src/librustc_codegen_ssa/base.rs @@ -502,8 +502,8 @@ pub fn maybe_create_entry_wrapper<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( bx.insert_reference_to_gdb_debug_scripts_section_global(); // Params from native main() used as args for rust start function - let param_argc = cx.get_param(llfn, 0); - let param_argv = cx.get_param(llfn, 1); + let param_argc = bx.get_param(0); + let param_argv = bx.get_param(1); let arg_argc = bx.intcast(param_argc, cx.type_isize(), true); let arg_argv = param_argv; diff --git a/src/librustc_codegen_ssa/mir/mod.rs b/src/librustc_codegen_ssa/mir/mod.rs index 9bf2d6e6e3288..d9ed7d581b755 100644 --- a/src/librustc_codegen_ssa/mir/mod.rs +++ b/src/librustc_codegen_ssa/mir/mod.rs @@ -23,7 +23,7 @@ use rustc::mir::traversal; use self::operand::{OperandRef, OperandValue}; /// Master context for codegenning from MIR. -pub struct FunctionCx<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> { +pub struct FunctionCx<'a, 'tcx: 'a, Bx: HasCodegen<'tcx>> { instance: Instance<'tcx>, mir: &'a mir::Mir<'tcx>, @@ -87,7 +87,7 @@ pub struct FunctionCx<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> { va_list_ref: Option>, } -impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { +impl<'a, 'tcx: 'a, Bx: HasCodegen<'tcx> + DebugInfoBuilderMethods<'tcx>> FunctionCx<'a, 'tcx, Bx> { pub fn monomorphize(&self, value: &T) -> T where T: TypeFoldable<'tcx> { @@ -295,7 +295,7 @@ pub fn codegen_mir<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( // Temporary or return place if local == mir::RETURN_PLACE && fx.fn_ty.ret.is_indirect() { debug!("alloc: {:?} (return place) -> place", local); - let llretptr = fx.cx.get_param(llfn, 0); + let llretptr = bx.get_param(0); LocalRef::Place(PlaceRef::new_sized(llretptr, layout, layout.align.abi)) } else if memory_locals.contains(local) { debug!("alloc: {:?} -> place", local); @@ -529,18 +529,18 @@ fn arg_local_refs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( }); } PassMode::Direct(_) => { - let llarg = bx.get_param(bx.llfn(), llarg_idx); + let llarg = bx.get_param(llarg_idx); bx.set_value_name(llarg, &name); llarg_idx += 1; return local( OperandRef::from_immediate_or_packed_pair(bx, llarg, arg.layout)); } PassMode::Pair(..) => { - let a = bx.get_param(bx.llfn(), llarg_idx); + let a = bx.get_param(llarg_idx); bx.set_value_name(a, &(name.clone() + ".0")); llarg_idx += 1; - let b = bx.get_param(bx.llfn(), llarg_idx); + let b = bx.get_param(llarg_idx); bx.set_value_name(b, &(name + ".1")); llarg_idx += 1; @@ -557,16 +557,16 @@ fn arg_local_refs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( // Don't copy an indirect argument to an alloca, the caller // already put it in a temporary alloca and gave it up. // FIXME: lifetimes - let llarg = bx.get_param(bx.llfn(), llarg_idx); + let llarg = bx.get_param(llarg_idx); bx.set_value_name(llarg, &name); llarg_idx += 1; PlaceRef::new_sized(llarg, arg.layout, arg.layout.align.abi) } else if arg.is_unsized_indirect() { // As the storage for the indirect argument lives during // the whole function call, we just copy the fat pointer. - let llarg = bx.get_param(bx.llfn(), llarg_idx); + let llarg = bx.get_param(llarg_idx); llarg_idx += 1; - let llextra = bx.get_param(bx.llfn(), llarg_idx); + let llextra = bx.get_param(llarg_idx); llarg_idx += 1; let indirect_operand = OperandValue::Pair(llarg, llextra); diff --git a/src/librustc_codegen_ssa/mir/place.rs b/src/librustc_codegen_ssa/mir/place.rs index 56e9b36e83233..df2aca417b75c 100644 --- a/src/librustc_codegen_ssa/mir/place.rs +++ b/src/librustc_codegen_ssa/mir/place.rs @@ -56,7 +56,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { } } - pub fn alloca>( + pub fn alloca>( bx: &mut Bx, layout: TyLayout<'tcx>, name: &str @@ -68,7 +68,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { } /// Returns a place for an indirect reference to an unsized place. - pub fn alloca_unsized_indirect>( + pub fn alloca_unsized_indirect>( bx: &mut Bx, layout: TyLayout<'tcx>, name: &str, diff --git a/src/librustc_codegen_ssa/traits/abi.rs b/src/librustc_codegen_ssa/traits/abi.rs index 8f7fa199b057a..a8fd4e1d2c7c7 100644 --- a/src/librustc_codegen_ssa/traits/abi.rs +++ b/src/librustc_codegen_ssa/traits/abi.rs @@ -10,4 +10,5 @@ pub trait AbiMethods<'tcx> { pub trait AbiBuilderMethods<'tcx>: BackendTypes { fn apply_attrs_callsite(&mut self, ty: &FnType<'tcx, Ty<'tcx>>, callsite: Self::Value); + fn get_param(&self, index: usize) -> Self::Value; } diff --git a/src/librustc_codegen_ssa/traits/builder.rs b/src/librustc_codegen_ssa/traits/builder.rs index e0795e3c7aa24..d25bb3c362c82 100644 --- a/src/librustc_codegen_ssa/traits/builder.rs +++ b/src/librustc_codegen_ssa/traits/builder.rs @@ -23,6 +23,47 @@ pub enum OverflowOp { Mul, } +pub trait MemoryBuilderMethods<'tcx>: HasCodegen<'tcx> { + fn alloca(&mut self, ty: Self::Type, name: &str, align: Align) -> Self::Value; + fn dynamic_alloca(&mut self, ty: Self::Type, name: &str, align: Align) -> Self::Value; + fn array_alloca( + &mut self, + ty: Self::Type, + len: Self::Value, + name: &str, + align: Align, + ) -> Self::Value; + + fn load(&mut self, ptr: Self::Value, align: Align) -> Self::Value; + fn volatile_load(&mut self, ptr: Self::Value) -> Self::Value; + fn atomic_load(&mut self, ptr: Self::Value, order: AtomicOrdering, size: Size) -> Self::Value; + fn load_operand(&mut self, place: PlaceRef<'tcx, Self::Value>) + -> OperandRef<'tcx, Self::Value>; + + fn range_metadata(&mut self, load: Self::Value, range: Range); + fn nonnull_metadata(&mut self, load: Self::Value); + + fn store(&mut self, val: Self::Value, ptr: Self::Value, align: Align) -> Self::Value; + fn store_with_flags( + &mut self, + val: Self::Value, + ptr: Self::Value, + align: Align, + flags: MemFlags, + ) -> Self::Value; + fn atomic_store( + &mut self, + val: Self::Value, + ptr: Self::Value, + order: AtomicOrdering, + size: Size, + ); + + fn gep(&mut self, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value; + fn inbounds_gep(&mut self, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value; + fn struct_gep(&mut self, ptr: Self::Value, idx: u64) -> Self::Value; +} + pub trait BuilderMethods<'a, 'tcx: 'a>: HasCodegen<'tcx> + DebugInfoBuilderMethods<'tcx> @@ -31,15 +72,14 @@ pub trait BuilderMethods<'a, 'tcx: 'a>: + IntrinsicCallMethods<'tcx> + AsmBuilderMethods<'tcx> + StaticBuilderMethods<'tcx> + + MemoryBuilderMethods<'tcx> { fn new_block<'b>(cx: &'a Self::CodegenCx, llfn: Self::Value, name: &'b str) -> Self; fn with_cx(cx: &'a Self::CodegenCx) -> Self; fn build_sibling_block<'b>(&self, name: &'b str) -> Self; fn cx(&self) -> &Self::CodegenCx; - fn llfn(&self) -> Self::Value; fn llbb(&self) -> Self::BasicBlock; - fn set_value_name(&mut self, value: Self::Value, name: &str); fn position_at_end(&mut self, llbb: Self::BasicBlock); fn ret_void(&mut self); fn ret(&mut self, v: Self::Value); @@ -102,45 +142,6 @@ pub trait BuilderMethods<'a, 'tcx: 'a>: rhs: Self::Value, ) -> (Self::Value, Self::Value); - fn alloca(&mut self, ty: Self::Type, name: &str, align: Align) -> Self::Value; - fn dynamic_alloca(&mut self, ty: Self::Type, name: &str, align: Align) -> Self::Value; - fn array_alloca( - &mut self, - ty: Self::Type, - len: Self::Value, - name: &str, - align: Align, - ) -> Self::Value; - - fn load(&mut self, ptr: Self::Value, align: Align) -> Self::Value; - fn volatile_load(&mut self, ptr: Self::Value) -> Self::Value; - fn atomic_load(&mut self, ptr: Self::Value, order: AtomicOrdering, size: Size) -> Self::Value; - fn load_operand(&mut self, place: PlaceRef<'tcx, Self::Value>) - -> OperandRef<'tcx, Self::Value>; - - fn range_metadata(&mut self, load: Self::Value, range: Range); - fn nonnull_metadata(&mut self, load: Self::Value); - - fn store(&mut self, val: Self::Value, ptr: Self::Value, align: Align) -> Self::Value; - fn store_with_flags( - &mut self, - val: Self::Value, - ptr: Self::Value, - align: Align, - flags: MemFlags, - ) -> Self::Value; - fn atomic_store( - &mut self, - val: Self::Value, - ptr: Self::Value, - order: AtomicOrdering, - size: Size, - ); - - fn gep(&mut self, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value; - fn inbounds_gep(&mut self, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value; - fn struct_gep(&mut self, ptr: Self::Value, idx: u64) -> Self::Value; - fn trunc(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; fn sext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; fn fptoui(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; diff --git a/src/librustc_codegen_ssa/traits/debuginfo.rs b/src/librustc_codegen_ssa/traits/debuginfo.rs index dcbb4e66cb385..a0b53fde09c4d 100644 --- a/src/librustc_codegen_ssa/traits/debuginfo.rs +++ b/src/librustc_codegen_ssa/traits/debuginfo.rs @@ -58,4 +58,5 @@ pub trait DebugInfoBuilderMethods<'tcx>: BackendTypes { span: Span, ); fn insert_reference_to_gdb_debug_scripts_section_global(&mut self); + fn set_value_name(&mut self, value: Self::Value, name: &str); } diff --git a/src/librustc_codegen_ssa/traits/misc.rs b/src/librustc_codegen_ssa/traits/misc.rs index b3d458f3a3be8..2797dd89f5b15 100644 --- a/src/librustc_codegen_ssa/traits/misc.rs +++ b/src/librustc_codegen_ssa/traits/misc.rs @@ -14,7 +14,6 @@ pub trait MiscMethods<'tcx>: BackendTypes { fn check_overflow(&self) -> bool; fn instances(&self) -> &RefCell, Self::Value>>; fn get_fn(&self, instance: Instance<'tcx>) -> Self::Value; - fn get_param(&self, llfn: Self::Value, index: usize) -> Self::Value; fn eh_personality(&self) -> Self::Value; fn eh_unwind_resume(&self) -> Self::Value; fn sess(&self) -> &Session; diff --git a/src/librustc_codegen_ssa/traits/mod.rs b/src/librustc_codegen_ssa/traits/mod.rs index 8fe8b7ecd4709..34880acaabc13 100644 --- a/src/librustc_codegen_ssa/traits/mod.rs +++ b/src/librustc_codegen_ssa/traits/mod.rs @@ -30,7 +30,7 @@ mod write; pub use self::abi::{AbiBuilderMethods, AbiMethods}; pub use self::asm::{AsmBuilderMethods, AsmMethods}; pub use self::backend::{Backend, BackendTypes, ExtraBackendMethods}; -pub use self::builder::{BuilderMethods, OverflowOp}; +pub use self::builder::{BuilderMethods, MemoryBuilderMethods, OverflowOp}; pub use self::consts::ConstMethods; pub use self::debuginfo::{DebugInfoBuilderMethods, DebugInfoMethods}; pub use self::declare::{DeclareMethods, PreDefineMethods}; From 73192870a9df2b5062afbf831e6c83a0ac20425e Mon Sep 17 00:00:00 2001 From: bjorn3 Date: Thu, 6 Dec 2018 19:04:02 +0100 Subject: [PATCH 14/28] Move mem{cpy,move,set} to MemoryBuilderMethods --- src/librustc_codegen_llvm/builder.rs | 114 ++++++++++----------- src/librustc_codegen_ssa/mir/operand.rs | 6 +- src/librustc_codegen_ssa/traits/builder.rs | 54 +++++----- src/librustc_codegen_ssa/traits/mod.rs | 2 +- 4 files changed, 89 insertions(+), 87 deletions(-) diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index 7a80b4086035f..8254e40cd3925 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -354,6 +354,63 @@ impl MemoryBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { llvm::LLVMBuildStructGEP(self.llbuilder, ptr, idx as c_uint, noname()) } } + + fn memcpy(&mut self, dst: &'ll Value, dst_align: Align, + src: &'ll Value, src_align: Align, + size: &'ll Value, flags: MemFlags) { + if flags.contains(MemFlags::NONTEMPORAL) { + // HACK(nox): This is inefficient but there is no nontemporal memcpy. + let val = self.load(src, src_align); + let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val))); + self.store_with_flags(val, ptr, dst_align, flags); + return; + } + let size = self.intcast(size, self.type_isize(), false); + let is_volatile = flags.contains(MemFlags::VOLATILE); + let dst = self.pointercast(dst, self.type_i8p()); + let src = self.pointercast(src, self.type_i8p()); + unsafe { + llvm::LLVMRustBuildMemCpy(self.llbuilder, dst, dst_align.bytes() as c_uint, + src, src_align.bytes() as c_uint, size, is_volatile); + } + } + + fn memmove(&mut self, dst: &'ll Value, dst_align: Align, + src: &'ll Value, src_align: Align, + size: &'ll Value, flags: MemFlags) { + if flags.contains(MemFlags::NONTEMPORAL) { + // HACK(nox): This is inefficient but there is no nontemporal memmove. + let val = self.load(src, src_align); + let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val))); + self.store_with_flags(val, ptr, dst_align, flags); + return; + } + let size = self.intcast(size, self.type_isize(), false); + let is_volatile = flags.contains(MemFlags::VOLATILE); + let dst = self.pointercast(dst, self.type_i8p()); + let src = self.pointercast(src, self.type_i8p()); + unsafe { + llvm::LLVMRustBuildMemMove(self.llbuilder, dst, dst_align.bytes() as c_uint, + src, src_align.bytes() as c_uint, size, is_volatile); + } + } + + fn memset( + &mut self, + ptr: &'ll Value, + fill_byte: &'ll Value, + size: &'ll Value, + align: Align, + flags: MemFlags, + ) { + let ptr_width = &self.sess().target.target.target_pointer_width; + let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width); + let llintrinsicfn = self.get_intrinsic(&intrinsic_key); + let ptr = self.pointercast(ptr, self.type_i8p()); + let align = self.const_u32(align.bytes() as u32); + let volatile = self.const_bool(flags.contains(MemFlags::VOLATILE)); + self.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None); + } } impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { @@ -928,63 +985,6 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } - fn memcpy(&mut self, dst: &'ll Value, dst_align: Align, - src: &'ll Value, src_align: Align, - size: &'ll Value, flags: MemFlags) { - if flags.contains(MemFlags::NONTEMPORAL) { - // HACK(nox): This is inefficient but there is no nontemporal memcpy. - let val = self.load(src, src_align); - let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val))); - self.store_with_flags(val, ptr, dst_align, flags); - return; - } - let size = self.intcast(size, self.type_isize(), false); - let is_volatile = flags.contains(MemFlags::VOLATILE); - let dst = self.pointercast(dst, self.type_i8p()); - let src = self.pointercast(src, self.type_i8p()); - unsafe { - llvm::LLVMRustBuildMemCpy(self.llbuilder, dst, dst_align.bytes() as c_uint, - src, src_align.bytes() as c_uint, size, is_volatile); - } - } - - fn memmove(&mut self, dst: &'ll Value, dst_align: Align, - src: &'ll Value, src_align: Align, - size: &'ll Value, flags: MemFlags) { - if flags.contains(MemFlags::NONTEMPORAL) { - // HACK(nox): This is inefficient but there is no nontemporal memmove. - let val = self.load(src, src_align); - let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val))); - self.store_with_flags(val, ptr, dst_align, flags); - return; - } - let size = self.intcast(size, self.type_isize(), false); - let is_volatile = flags.contains(MemFlags::VOLATILE); - let dst = self.pointercast(dst, self.type_i8p()); - let src = self.pointercast(src, self.type_i8p()); - unsafe { - llvm::LLVMRustBuildMemMove(self.llbuilder, dst, dst_align.bytes() as c_uint, - src, src_align.bytes() as c_uint, size, is_volatile); - } - } - - fn memset( - &mut self, - ptr: &'ll Value, - fill_byte: &'ll Value, - size: &'ll Value, - align: Align, - flags: MemFlags, - ) { - let ptr_width = &self.sess().target.target.target_pointer_width; - let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width); - let llintrinsicfn = self.get_intrinsic(&intrinsic_key); - let ptr = self.pointercast(ptr, self.type_i8p()); - let align = self.const_u32(align.bytes() as u32); - let volatile = self.const_bool(flags.contains(MemFlags::VOLATILE)); - self.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None); - } - fn select( &mut self, cond: &'ll Value, then_val: &'ll Value, diff --git a/src/librustc_codegen_ssa/mir/operand.rs b/src/librustc_codegen_ssa/mir/operand.rs index 9ec60d822fc1e..c9bfe9766cf75 100644 --- a/src/librustc_codegen_ssa/mir/operand.rs +++ b/src/librustc_codegen_ssa/mir/operand.rs @@ -54,10 +54,12 @@ impl fmt::Debug for OperandRef<'tcx, V> { } impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> { - pub fn new_zst>( + pub fn new_zst>( bx: &mut Bx, layout: TyLayout<'tcx> - ) -> OperandRef<'tcx, V> { + ) -> OperandRef<'tcx, V> + where Bx::CodegenCx: ConstMethods<'tcx> + { assert!(layout.is_zst()); OperandRef { val: OperandValue::Immediate(bx.const_undef(bx.immediate_backend_type(layout))), diff --git a/src/librustc_codegen_ssa/traits/builder.rs b/src/librustc_codegen_ssa/traits/builder.rs index d25bb3c362c82..401f2c74000e4 100644 --- a/src/librustc_codegen_ssa/traits/builder.rs +++ b/src/librustc_codegen_ssa/traits/builder.rs @@ -62,6 +62,33 @@ pub trait MemoryBuilderMethods<'tcx>: HasCodegen<'tcx> { fn gep(&mut self, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value; fn inbounds_gep(&mut self, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value; fn struct_gep(&mut self, ptr: Self::Value, idx: u64) -> Self::Value; + + fn memcpy( + &mut self, + dst: Self::Value, + dst_align: Align, + src: Self::Value, + src_align: Align, + size: Self::Value, + flags: MemFlags, + ); + fn memmove( + &mut self, + dst: Self::Value, + dst_align: Align, + src: Self::Value, + src_align: Align, + size: Self::Value, + flags: MemFlags, + ); + fn memset( + &mut self, + ptr: Self::Value, + fill_byte: Self::Value, + size: Self::Value, + align: Align, + flags: MemFlags, + ); } pub trait BuilderMethods<'a, 'tcx: 'a>: @@ -176,33 +203,6 @@ pub trait BuilderMethods<'a, 'tcx: 'a>: dia: AsmDialect, ) -> Option; - fn memcpy( - &mut self, - dst: Self::Value, - dst_align: Align, - src: Self::Value, - src_align: Align, - size: Self::Value, - flags: MemFlags, - ); - fn memmove( - &mut self, - dst: Self::Value, - dst_align: Align, - src: Self::Value, - src_align: Align, - size: Self::Value, - flags: MemFlags, - ); - fn memset( - &mut self, - ptr: Self::Value, - fill_byte: Self::Value, - size: Self::Value, - align: Align, - flags: MemFlags, - ); - fn select( &mut self, cond: Self::Value, diff --git a/src/librustc_codegen_ssa/traits/mod.rs b/src/librustc_codegen_ssa/traits/mod.rs index 34880acaabc13..58b4a299d16c4 100644 --- a/src/librustc_codegen_ssa/traits/mod.rs +++ b/src/librustc_codegen_ssa/traits/mod.rs @@ -30,7 +30,7 @@ mod write; pub use self::abi::{AbiBuilderMethods, AbiMethods}; pub use self::asm::{AsmBuilderMethods, AsmMethods}; pub use self::backend::{Backend, BackendTypes, ExtraBackendMethods}; -pub use self::builder::{BuilderMethods, MemoryBuilderMethods, OverflowOp}; +pub use self::builder::*; pub use self::consts::ConstMethods; pub use self::debuginfo::{DebugInfoBuilderMethods, DebugInfoMethods}; pub use self::declare::{DeclareMethods, PreDefineMethods}; From 1e0e3b0d0bf50337cda2724f9b583242c368eb84 Mon Sep 17 00:00:00 2001 From: bjorn3 Date: Fri, 7 Dec 2018 17:55:14 +0100 Subject: [PATCH 15/28] Introduce NumBuilderMethods and move some more methods to MemoryBuilderMethods --- src/librustc_codegen_llvm/builder.rs | 512 ++++++++++----------- src/librustc_codegen_llvm/va_arg.rs | 4 +- src/librustc_codegen_ssa/traits/builder.rs | 179 +++---- 3 files changed, 354 insertions(+), 341 deletions(-) diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index 8254e40cd3925..4b52bf4852413 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -148,25 +148,6 @@ impl MemoryBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { } } - fn atomic_load( - &mut self, - ptr: &'ll Value, - order: rustc_codegen_ssa::common::AtomicOrdering, - size: Size, - ) -> &'ll Value { - self.count_insn("load.atomic"); - unsafe { - let load = llvm::LLVMRustBuildAtomicLoad( - self.llbuilder, - ptr, - noname(), - AtomicOrdering::from_generic(order), - ); - // LLVM requires the alignment of atomic loads to be at least the size of the type. - llvm::LLVMSetAlignment(load, size.bytes() as c_uint); - load - } - } fn load_operand( &mut self, @@ -244,38 +225,6 @@ impl MemoryBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { OperandRef { val, layout: place.layout } } - - - fn range_metadata(&mut self, load: &'ll Value, range: Range) { - if self.sess().target.target.arch == "amdgpu" { - // amdgpu/LLVM does something weird and thinks a i64 value is - // split into a v2i32, halving the bitwidth LLVM expects, - // tripping an assertion. So, for now, just disable this - // optimization. - return; - } - - unsafe { - let llty = self.cx.val_ty(load); - let v = [ - self.cx.const_uint_big(llty, range.start), - self.cx.const_uint_big(llty, range.end) - ]; - - llvm::LLVMSetMetadata(load, llvm::MD_range as c_uint, - llvm::LLVMMDNodeInContext(self.cx.llcx, - v.as_ptr(), - v.len() as c_uint)); - } - } - - fn nonnull_metadata(&mut self, load: &'ll Value) { - unsafe { - llvm::LLVMSetMetadata(load, llvm::MD_nonnull as c_uint, - llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0)); - } - } - fn store(&mut self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value { self.store_with_flags(val, ptr, align, MemFlags::empty()) } @@ -314,23 +263,6 @@ impl MemoryBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { } } - fn atomic_store(&mut self, val: &'ll Value, ptr: &'ll Value, - order: rustc_codegen_ssa::common::AtomicOrdering, size: Size) { - debug!("Store {:?} -> {:?}", val, ptr); - self.count_insn("store.atomic"); - let ptr = self.check_store(val, ptr); - unsafe { - let store = llvm::LLVMRustBuildAtomicStore( - self.llbuilder, - val, - ptr, - AtomicOrdering::from_generic(order), - ); - // LLVM requires the alignment of atomic stores to be at least the size of the type. - llvm::LLVMSetAlignment(store, size.bytes() as c_uint); - } - } - fn gep(&mut self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value { self.count_insn("gep"); unsafe { @@ -355,6 +287,57 @@ impl MemoryBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { } } + fn pointercast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + self.count_insn("pointercast"); + unsafe { + llvm::LLVMBuildPointerCast(self.llbuilder, val, dest_ty, noname()) + } + } + + fn ptrtoint(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + self.count_insn("ptrtoint"); + unsafe { + llvm::LLVMBuildPtrToInt(self.llbuilder, val, dest_ty, noname()) + } + } + + fn inttoptr(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + self.count_insn("inttoptr"); + unsafe { + llvm::LLVMBuildIntToPtr(self.llbuilder, val, dest_ty, noname()) + } + } + + fn range_metadata(&mut self, load: &'ll Value, range: Range) { + if self.sess().target.target.arch == "amdgpu" { + // amdgpu/LLVM does something weird and thinks a i64 value is + // split into a v2i32, halving the bitwidth LLVM expects, + // tripping an assertion. So, for now, just disable this + // optimization. + return; + } + + unsafe { + let llty = self.cx.val_ty(load); + let v = [ + self.cx.const_uint_big(llty, range.start), + self.cx.const_uint_big(llty, range.end) + ]; + + llvm::LLVMSetMetadata(load, llvm::MD_range as c_uint, + llvm::LLVMMDNodeInContext(self.cx.llcx, + v.as_ptr(), + v.len() as c_uint)); + } + } + + fn nonnull_metadata(&mut self, load: &'ll Value) { + unsafe { + llvm::LLVMSetMetadata(load, llvm::MD_nonnull as c_uint, + llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0)); + } + } + fn memcpy(&mut self, dst: &'ll Value, dst_align: Align, src: &'ll Value, src_align: Align, size: &'ll Value, flags: MemFlags) { @@ -411,136 +394,101 @@ impl MemoryBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { let volatile = self.const_bool(flags.contains(MemFlags::VOLATILE)); self.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None); } -} - -impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { - fn new_block<'b>( - cx: &'a CodegenCx<'ll, 'tcx>, - llfn: &'ll Value, - name: &'b str - ) -> Self { - let mut bx = Builder::with_cx(cx); - let llbb = unsafe { - let name = SmallCStr::new(name); - llvm::LLVMAppendBasicBlockInContext( - cx.llcx, - llfn, - name.as_ptr() - ) - }; - bx.position_at_end(llbb); - bx - } - - fn with_cx(cx: &'a CodegenCx<'ll, 'tcx>) -> Self { - // Create a fresh builder from the crate context. - let llbuilder = unsafe { - llvm::LLVMCreateBuilderInContext(cx.llcx) - }; - Builder { - llbuilder, - cx, - } - } - - fn build_sibling_block<'b>(&self, name: &'b str) -> Self { - Builder::new_block(self.cx, self.llfn(), name) - } - - fn llbb(&self) -> &'ll BasicBlock { - unsafe { - llvm::LLVMGetInsertBlock(self.llbuilder) - } - } - fn position_at_end(&mut self, llbb: &'ll BasicBlock) { - unsafe { - llvm::LLVMPositionBuilderAtEnd(self.llbuilder, llbb); - } - } - - fn ret_void(&mut self) { - self.count_insn("retvoid"); - unsafe { - llvm::LLVMBuildRetVoid(self.llbuilder); - } - } - - fn ret(&mut self, v: &'ll Value) { - self.count_insn("ret"); - unsafe { - llvm::LLVMBuildRet(self.llbuilder, v); - } - } - - fn br(&mut self, dest: &'ll BasicBlock) { - self.count_insn("br"); + fn atomic_load( + &mut self, + ptr: &'ll Value, + order: rustc_codegen_ssa::common::AtomicOrdering, + size: Size, + ) -> &'ll Value { + self.count_insn("load.atomic"); unsafe { - llvm::LLVMBuildBr(self.llbuilder, dest); + let load = llvm::LLVMRustBuildAtomicLoad( + self.llbuilder, + ptr, + noname(), + AtomicOrdering::from_generic(order), + ); + // LLVM requires the alignment of atomic loads to be at least the size of the type. + llvm::LLVMSetAlignment(load, size.bytes() as c_uint); + load } } - fn cond_br( - &mut self, - cond: &'ll Value, - then_llbb: &'ll BasicBlock, - else_llbb: &'ll BasicBlock, - ) { - self.count_insn("condbr"); + fn atomic_store(&mut self, val: &'ll Value, ptr: &'ll Value, + order: rustc_codegen_ssa::common::AtomicOrdering, size: Size) { + debug!("Store {:?} -> {:?}", val, ptr); + self.count_insn("store.atomic"); + let ptr = self.check_store(val, ptr); unsafe { - llvm::LLVMBuildCondBr(self.llbuilder, cond, then_llbb, else_llbb); + let store = llvm::LLVMRustBuildAtomicStore( + self.llbuilder, + val, + ptr, + AtomicOrdering::from_generic(order), + ); + // LLVM requires the alignment of atomic stores to be at least the size of the type. + llvm::LLVMSetAlignment(store, size.bytes() as c_uint); } } - fn switch( + fn atomic_cmpxchg( &mut self, - v: &'ll Value, - else_llbb: &'ll BasicBlock, - num_cases: usize, + dst: &'ll Value, + cmp: &'ll Value, + src: &'ll Value, + order: rustc_codegen_ssa::common::AtomicOrdering, + failure_order: rustc_codegen_ssa::common::AtomicOrdering, + weak: bool, ) -> &'ll Value { + let weak = if weak { llvm::True } else { llvm::False }; unsafe { - llvm::LLVMBuildSwitch(self.llbuilder, v, else_llbb, num_cases as c_uint) + llvm::LLVMRustBuildAtomicCmpXchg( + self.llbuilder, + dst, + cmp, + src, + AtomicOrdering::from_generic(order), + AtomicOrdering::from_generic(failure_order), + weak + ) } } - fn invoke( + fn atomic_rmw( &mut self, - llfn: &'ll Value, - args: &[&'ll Value], - then: &'ll BasicBlock, - catch: &'ll BasicBlock, - funclet: Option<&Funclet<'ll>>, + op: rustc_codegen_ssa::common::AtomicRmwBinOp, + dst: &'ll Value, + src: &'ll Value, + order: rustc_codegen_ssa::common::AtomicOrdering, ) -> &'ll Value { - self.count_insn("invoke"); - - debug!("Invoke {:?} with args ({:?})", - llfn, - args); - - let args = self.check_call("invoke", llfn, args); - let bundle = funclet.map(|funclet| funclet.bundle()); - let bundle = bundle.as_ref().map(|b| &*b.raw); - unsafe { - llvm::LLVMRustBuildInvoke(self.llbuilder, - llfn, - args.as_ptr(), - args.len() as c_uint, - then, - catch, - bundle, - noname()) + llvm::LLVMBuildAtomicRMW( + self.llbuilder, + AtomicRmwBinOp::from_generic(op), + dst, + src, + AtomicOrdering::from_generic(order), + False) } } - fn unreachable(&mut self) { - self.count_insn("unreachable"); + fn atomic_fence( + &mut self, + order: rustc_codegen_ssa::common::AtomicOrdering, + scope: rustc_codegen_ssa::common::SynchronizationScope + ) { unsafe { - llvm::LLVMBuildUnreachable(self.llbuilder); + llvm::LLVMRustBuildAtomicFence( + self.llbuilder, + AtomicOrdering::from_generic(order), + SynchronizationScope::from_generic(scope) + ); } } +} - /* Arithmetic */ +impl NumBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { fn add(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("add"); unsafe { @@ -837,6 +785,13 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } + fn zext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + self.count_insn("zext"); + unsafe { + llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, noname()) + } + } + fn fptoui(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("fptoui"); unsafe { @@ -879,20 +834,6 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } - fn ptrtoint(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { - self.count_insn("ptrtoint"); - unsafe { - llvm::LLVMBuildPtrToInt(self.llbuilder, val, dest_ty, noname()) - } - } - - fn inttoptr(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { - self.count_insn("inttoptr"); - unsafe { - llvm::LLVMBuildIntToPtr(self.llbuilder, val, dest_ty, noname()) - } - } - fn bitcast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("bitcast"); unsafe { @@ -908,13 +849,6 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } - fn pointercast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { - self.count_insn("pointercast"); - unsafe { - llvm::LLVMBuildPointerCast(self.llbuilder, val, dest_ty, noname()) - } - } - /* Comparisons */ fn icmp(&mut self, op: IntPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("icmp"); @@ -930,6 +864,134 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { llvm::LLVMBuildFCmp(self.llbuilder, op as c_uint, lhs, rhs, noname()) } } +} + +impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { + fn new_block<'b>( + cx: &'a CodegenCx<'ll, 'tcx>, + llfn: &'ll Value, + name: &'b str + ) -> Self { + let mut bx = Builder::with_cx(cx); + let llbb = unsafe { + let name = SmallCStr::new(name); + llvm::LLVMAppendBasicBlockInContext( + cx.llcx, + llfn, + name.as_ptr() + ) + }; + bx.position_at_end(llbb); + bx + } + + fn with_cx(cx: &'a CodegenCx<'ll, 'tcx>) -> Self { + // Create a fresh builder from the crate context. + let llbuilder = unsafe { + llvm::LLVMCreateBuilderInContext(cx.llcx) + }; + Builder { + llbuilder, + cx, + } + } + + fn build_sibling_block<'b>(&self, name: &'b str) -> Self { + Builder::new_block(self.cx, self.llfn(), name) + } + + fn llbb(&self) -> &'ll BasicBlock { + unsafe { + llvm::LLVMGetInsertBlock(self.llbuilder) + } + } + + fn position_at_end(&mut self, llbb: &'ll BasicBlock) { + unsafe { + llvm::LLVMPositionBuilderAtEnd(self.llbuilder, llbb); + } + } + + fn ret_void(&mut self) { + self.count_insn("retvoid"); + unsafe { + llvm::LLVMBuildRetVoid(self.llbuilder); + } + } + + fn ret(&mut self, v: &'ll Value) { + self.count_insn("ret"); + unsafe { + llvm::LLVMBuildRet(self.llbuilder, v); + } + } + + fn br(&mut self, dest: &'ll BasicBlock) { + self.count_insn("br"); + unsafe { + llvm::LLVMBuildBr(self.llbuilder, dest); + } + } + + fn cond_br( + &mut self, + cond: &'ll Value, + then_llbb: &'ll BasicBlock, + else_llbb: &'ll BasicBlock, + ) { + self.count_insn("condbr"); + unsafe { + llvm::LLVMBuildCondBr(self.llbuilder, cond, then_llbb, else_llbb); + } + } + + fn switch( + &mut self, + v: &'ll Value, + else_llbb: &'ll BasicBlock, + num_cases: usize, + ) -> &'ll Value { + unsafe { + llvm::LLVMBuildSwitch(self.llbuilder, v, else_llbb, num_cases as c_uint) + } + } + + fn invoke( + &mut self, + llfn: &'ll Value, + args: &[&'ll Value], + then: &'ll BasicBlock, + catch: &'ll BasicBlock, + funclet: Option<&Funclet<'ll>>, + ) -> &'ll Value { + self.count_insn("invoke"); + + debug!("Invoke {:?} with args ({:?})", + llfn, + args); + + let args = self.check_call("invoke", llfn, args); + let bundle = funclet.map(|funclet| funclet.bundle()); + let bundle = bundle.as_ref().map(|b| &*b.raw); + + unsafe { + llvm::LLVMRustBuildInvoke(self.llbuilder, + llfn, + args.as_ptr(), + args.len() as c_uint, + then, + catch, + bundle, + noname()) + } + } + + fn unreachable(&mut self) { + self.count_insn("unreachable"); + unsafe { + llvm::LLVMBuildUnreachable(self.llbuilder); + } + } /* Miscellaneous instructions */ fn phi(&mut self, ty: &'ll Type, vals: &[&'ll Value], bbs: &[&'ll BasicBlock]) -> &'ll Value { @@ -1129,61 +1191,6 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } - // Atomic Operations - fn atomic_cmpxchg( - &mut self, - dst: &'ll Value, - cmp: &'ll Value, - src: &'ll Value, - order: rustc_codegen_ssa::common::AtomicOrdering, - failure_order: rustc_codegen_ssa::common::AtomicOrdering, - weak: bool, - ) -> &'ll Value { - let weak = if weak { llvm::True } else { llvm::False }; - unsafe { - llvm::LLVMRustBuildAtomicCmpXchg( - self.llbuilder, - dst, - cmp, - src, - AtomicOrdering::from_generic(order), - AtomicOrdering::from_generic(failure_order), - weak - ) - } - } - fn atomic_rmw( - &mut self, - op: rustc_codegen_ssa::common::AtomicRmwBinOp, - dst: &'ll Value, - src: &'ll Value, - order: rustc_codegen_ssa::common::AtomicOrdering, - ) -> &'ll Value { - unsafe { - llvm::LLVMBuildAtomicRMW( - self.llbuilder, - AtomicRmwBinOp::from_generic(op), - dst, - src, - AtomicOrdering::from_generic(order), - False) - } - } - - fn atomic_fence( - &mut self, - order: rustc_codegen_ssa::common::AtomicOrdering, - scope: rustc_codegen_ssa::common::SynchronizationScope - ) { - unsafe { - llvm::LLVMRustBuildAtomicFence( - self.llbuilder, - AtomicOrdering::from_generic(order), - SynchronizationScope::from_generic(scope) - ); - } - } - fn add_case(&mut self, s: &'ll Value, on_val: &'ll Value, dest: &'ll BasicBlock) { unsafe { llvm::LLVMAddCase(s, on_val, dest) @@ -1239,13 +1246,6 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } - fn zext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { - self.count_insn("zext"); - unsafe { - llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, noname()) - } - } - fn cx(&self) -> &CodegenCx<'ll, 'tcx> { self.cx diff --git a/src/librustc_codegen_llvm/va_arg.rs b/src/librustc_codegen_llvm/va_arg.rs index 024bcd27ef9f5..1c42da94aa551 100644 --- a/src/librustc_codegen_llvm/va_arg.rs +++ b/src/librustc_codegen_llvm/va_arg.rs @@ -3,9 +3,7 @@ use crate::type_::Type; use crate::type_of::LayoutLlvmExt; use crate::value::Value; use rustc_codegen_ssa::mir::operand::OperandRef; -use rustc_codegen_ssa::traits::{ - BaseTypeMethods, BuilderMethods, ConstMethods, DerivedTypeMethods, MemoryBuilderMethods, -}; +use rustc_codegen_ssa::traits::*; use rustc::ty::layout::{Align, HasDataLayout, HasTyCtxt, LayoutOf, Size}; use rustc::ty::Ty; diff --git a/src/librustc_codegen_ssa/traits/builder.rs b/src/librustc_codegen_ssa/traits/builder.rs index 401f2c74000e4..a33ab00ae5763 100644 --- a/src/librustc_codegen_ssa/traits/builder.rs +++ b/src/librustc_codegen_ssa/traits/builder.rs @@ -24,6 +24,7 @@ pub enum OverflowOp { } pub trait MemoryBuilderMethods<'tcx>: HasCodegen<'tcx> { + // Stack allocations fn alloca(&mut self, ty: Self::Type, name: &str, align: Align) -> Self::Value; fn dynamic_alloca(&mut self, ty: Self::Type, name: &str, align: Align) -> Self::Value; fn array_alloca( @@ -36,13 +37,9 @@ pub trait MemoryBuilderMethods<'tcx>: HasCodegen<'tcx> { fn load(&mut self, ptr: Self::Value, align: Align) -> Self::Value; fn volatile_load(&mut self, ptr: Self::Value) -> Self::Value; - fn atomic_load(&mut self, ptr: Self::Value, order: AtomicOrdering, size: Size) -> Self::Value; fn load_operand(&mut self, place: PlaceRef<'tcx, Self::Value>) -> OperandRef<'tcx, Self::Value>; - fn range_metadata(&mut self, load: Self::Value, range: Range); - fn nonnull_metadata(&mut self, load: Self::Value); - fn store(&mut self, val: Self::Value, ptr: Self::Value, align: Align) -> Self::Value; fn store_with_flags( &mut self, @@ -51,18 +48,20 @@ pub trait MemoryBuilderMethods<'tcx>: HasCodegen<'tcx> { align: Align, flags: MemFlags, ) -> Self::Value; - fn atomic_store( - &mut self, - val: Self::Value, - ptr: Self::Value, - order: AtomicOrdering, - size: Size, - ); + // Pointer operations fn gep(&mut self, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value; fn inbounds_gep(&mut self, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value; fn struct_gep(&mut self, ptr: Self::Value, idx: u64) -> Self::Value; + fn pointercast(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + fn ptrtoint(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + fn inttoptr(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + + fn range_metadata(&mut self, load: Self::Value, range: Range); + fn nonnull_metadata(&mut self, load: Self::Value); + + // Bulk memory operations fn memcpy( &mut self, dst: Self::Value, @@ -89,68 +88,46 @@ pub trait MemoryBuilderMethods<'tcx>: HasCodegen<'tcx> { align: Align, flags: MemFlags, ); -} -pub trait BuilderMethods<'a, 'tcx: 'a>: - HasCodegen<'tcx> - + DebugInfoBuilderMethods<'tcx> - + ArgTypeMethods<'tcx> - + AbiBuilderMethods<'tcx> - + IntrinsicCallMethods<'tcx> - + AsmBuilderMethods<'tcx> - + StaticBuilderMethods<'tcx> - + MemoryBuilderMethods<'tcx> -{ - fn new_block<'b>(cx: &'a Self::CodegenCx, llfn: Self::Value, name: &'b str) -> Self; - fn with_cx(cx: &'a Self::CodegenCx) -> Self; - fn build_sibling_block<'b>(&self, name: &'b str) -> Self; - fn cx(&self) -> &Self::CodegenCx; - fn llbb(&self) -> Self::BasicBlock; - - fn position_at_end(&mut self, llbb: Self::BasicBlock); - fn ret_void(&mut self); - fn ret(&mut self, v: Self::Value); - fn br(&mut self, dest: Self::BasicBlock); - fn cond_br( + // Atomics + fn atomic_load(&mut self, ptr: Self::Value, order: AtomicOrdering, size: Size) -> Self::Value; + fn atomic_store( &mut self, - cond: Self::Value, - then_llbb: Self::BasicBlock, - else_llbb: Self::BasicBlock, + val: Self::Value, + ptr: Self::Value, + order: AtomicOrdering, + size: Size, ); - fn switch( + fn atomic_cmpxchg( &mut self, - v: Self::Value, - else_llbb: Self::BasicBlock, - num_cases: usize, + dst: Self::Value, + cmp: Self::Value, + src: Self::Value, + order: AtomicOrdering, + failure_order: AtomicOrdering, + weak: bool, ) -> Self::Value; - fn invoke( + fn atomic_rmw( &mut self, - llfn: Self::Value, - args: &[Self::Value], - then: Self::BasicBlock, - catch: Self::BasicBlock, - funclet: Option<&Self::Funclet>, + op: AtomicRmwBinOp, + dst: Self::Value, + src: Self::Value, + order: AtomicOrdering, ) -> Self::Value; - fn unreachable(&mut self); + fn atomic_fence(&mut self, order: AtomicOrdering, scope: SynchronizationScope); +} + +pub trait NumBuilderMethods<'tcx>: HasCodegen<'tcx> { + // Integers fn add(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn fadd(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn fadd_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; fn sub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn fsub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn fsub_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; fn mul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn fmul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn fmul_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; fn udiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; fn exactudiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; fn sdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; fn exactsdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn fdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn fdiv_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; fn urem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; fn srem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn frem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn frem_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; fn shl(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; fn lshr(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; fn ashr(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; @@ -171,20 +148,76 @@ pub trait BuilderMethods<'a, 'tcx: 'a>: fn trunc(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; fn sext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + fn zext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + + fn bitcast(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + fn intcast(&mut self, val: Self::Value, dest_ty: Self::Type, is_signed: bool) -> Self::Value; + + fn icmp(&mut self, op: IntPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + + // Floats + fn fadd(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn fadd_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn fsub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn fsub_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn fmul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn fmul_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn fdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn fdiv_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn frem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn frem_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn fptoui(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; fn fptosi(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; fn uitofp(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; fn sitofp(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; fn fptrunc(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; fn fpext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; - fn ptrtoint(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; - fn inttoptr(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; - fn bitcast(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; - fn intcast(&mut self, val: Self::Value, dest_ty: Self::Type, is_signed: bool) -> Self::Value; - fn pointercast(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; - - fn icmp(&mut self, op: IntPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value; fn fcmp(&mut self, op: RealPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value; +} + +pub trait BuilderMethods<'a, 'tcx: 'a>: + HasCodegen<'tcx> + + DebugInfoBuilderMethods<'tcx> + + ArgTypeMethods<'tcx> + + AbiBuilderMethods<'tcx> + + IntrinsicCallMethods<'tcx> + + AsmBuilderMethods<'tcx> + + StaticBuilderMethods<'tcx> + + MemoryBuilderMethods<'tcx> + + NumBuilderMethods<'tcx> +{ + fn new_block<'b>(cx: &'a Self::CodegenCx, llfn: Self::Value, name: &'b str) -> Self; + fn with_cx(cx: &'a Self::CodegenCx) -> Self; + fn build_sibling_block<'b>(&self, name: &'b str) -> Self; + fn cx(&self) -> &Self::CodegenCx; + fn llbb(&self) -> Self::BasicBlock; + + fn position_at_end(&mut self, llbb: Self::BasicBlock); + fn ret_void(&mut self); + fn ret(&mut self, v: Self::Value); + fn br(&mut self, dest: Self::BasicBlock); + fn cond_br( + &mut self, + cond: Self::Value, + then_llbb: Self::BasicBlock, + else_llbb: Self::BasicBlock, + ); + fn switch( + &mut self, + v: Self::Value, + else_llbb: Self::BasicBlock, + num_cases: usize, + ) -> Self::Value; + fn invoke( + &mut self, + llfn: Self::Value, + args: &[Self::Value], + then: Self::BasicBlock, + catch: Self::BasicBlock, + funclet: Option<&Self::Funclet>, + ) -> Self::Value; + fn unreachable(&mut self); fn phi( &mut self, @@ -240,23 +273,6 @@ pub trait BuilderMethods<'a, 'tcx: 'a>: fn add_handler(&mut self, catch_switch: Self::Value, handler: Self::BasicBlock); fn set_personality_fn(&mut self, personality: Self::Value); - fn atomic_cmpxchg( - &mut self, - dst: Self::Value, - cmp: Self::Value, - src: Self::Value, - order: AtomicOrdering, - failure_order: AtomicOrdering, - weak: bool, - ) -> Self::Value; - fn atomic_rmw( - &mut self, - op: AtomicRmwBinOp, - dst: Self::Value, - src: Self::Value, - order: AtomicOrdering, - ) -> Self::Value; - fn atomic_fence(&mut self, order: AtomicOrdering, scope: SynchronizationScope); fn add_case(&mut self, s: Self::Value, on_val: Self::Value, dest: Self::BasicBlock); fn add_incoming_to_phi(&mut self, phi: Self::Value, val: Self::Value, bb: Self::BasicBlock); fn set_invariant_load(&mut self, load: Self::Value); @@ -273,7 +289,6 @@ pub trait BuilderMethods<'a, 'tcx: 'a>: args: &[Self::Value], funclet: Option<&Self::Funclet>, ) -> Self::Value; - fn zext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; unsafe fn delete_basic_block(&mut self, bb: Self::BasicBlock); fn do_not_inline(&mut self, llret: Self::Value); From 5f3346707af45d576cea40b5db79e6bc5aad7048 Mon Sep 17 00:00:00 2001 From: bjorn3 Date: Fri, 7 Dec 2018 18:04:34 +0100 Subject: [PATCH 16/28] Remove scalar_lltypes from cg_ssa --- src/librustc_codegen_llvm/type_.rs | 6 ------ src/librustc_codegen_ssa/traits/type_.rs | 3 --- 2 files changed, 9 deletions(-) diff --git a/src/librustc_codegen_llvm/type_.rs b/src/librustc_codegen_llvm/type_.rs index d5424fa459166..426c70fb2dc58 100644 --- a/src/librustc_codegen_llvm/type_.rs +++ b/src/librustc_codegen_llvm/type_.rs @@ -11,7 +11,6 @@ use rustc_codegen_ssa::traits::*; use crate::common; use crate::type_of::LayoutLlvmExt; use crate::abi::{LlvmType, FnTypeExt}; -use rustc::util::nodemap::FxHashMap; use rustc::ty::Ty; use rustc::ty::layout::TyLayout; use rustc_target::abi::call::{CastTarget, FnType, Reg}; @@ -19,7 +18,6 @@ use rustc_data_structures::small_c_str::SmallCStr; use rustc_codegen_ssa::common::TypeKind; use std::fmt; -use std::cell::RefCell; use std::ptr; use libc::c_uint; @@ -232,10 +230,6 @@ impl BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { fn val_ty(&self, v: &'ll Value) -> &'ll Type { common::val_ty(v) } - - fn scalar_lltypes(&self) -> &RefCell, Self::Type>> { - &self.scalar_lltypes - } } impl Type { diff --git a/src/librustc_codegen_ssa/traits/type_.rs b/src/librustc_codegen_ssa/traits/type_.rs index 7c5e615f22452..7521df8b8dd4a 100644 --- a/src/librustc_codegen_ssa/traits/type_.rs +++ b/src/librustc_codegen_ssa/traits/type_.rs @@ -5,9 +5,7 @@ use crate::common::{self, TypeKind}; use crate::mir::place::PlaceRef; use rustc::ty::layout::{self, Align, Size, TyLayout}; use rustc::ty::{self, Ty}; -use rustc::util::nodemap::FxHashMap; use rustc_target::abi::call::{ArgType, CastTarget, FnType, Reg}; -use std::cell::RefCell; use syntax::ast; // This depends on `Backend` and not `BackendTypes`, because consumers will probably want to use @@ -49,7 +47,6 @@ pub trait BaseTypeMethods<'tcx>: Backend<'tcx> { fn int_width(&self, ty: Self::Type) -> u64; fn val_ty(&self, v: Self::Value) -> Self::Type; - fn scalar_lltypes(&self) -> &RefCell, Self::Type>>; } pub trait DerivedTypeMethods<'tcx>: BaseTypeMethods<'tcx> + MiscMethods<'tcx> { From d16358230a058750949dd2c52c3fc95cc37e367e Mon Sep 17 00:00:00 2001 From: bjorn3 Date: Fri, 7 Dec 2018 18:28:31 +0100 Subject: [PATCH 17/28] Remove a lot of methods from *TypeMethods --- src/librustc_codegen_llvm/type_.rs | 120 ++++++++++++++++------- src/librustc_codegen_ssa/common.rs | 16 +-- src/librustc_codegen_ssa/traits/type_.rs | 70 ++----------- 3 files changed, 92 insertions(+), 114 deletions(-) diff --git a/src/librustc_codegen_llvm/type_.rs b/src/librustc_codegen_llvm/type_.rs index 426c70fb2dc58..bcd90aeceb647 100644 --- a/src/librustc_codegen_llvm/type_.rs +++ b/src/librustc_codegen_llvm/type_.rs @@ -11,8 +11,9 @@ use rustc_codegen_ssa::traits::*; use crate::common; use crate::type_of::LayoutLlvmExt; use crate::abi::{LlvmType, FnTypeExt}; +use syntax::ast; use rustc::ty::Ty; -use rustc::ty::layout::TyLayout; +use rustc::ty::layout::{self, Align, Size, TyLayout}; use rustc_target::abi::call::{CastTarget, FnType, Reg}; use rustc_data_structures::small_c_str::SmallCStr; use rustc_codegen_ssa::common::TypeKind; @@ -50,21 +51,99 @@ impl CodegenCx<'ll, 'tcx> { els.len() as c_uint, packed as Bool) } } -} -impl BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { - fn type_void(&self) -> &'ll Type { + crate fn type_void(&self) -> &'ll Type { unsafe { llvm::LLVMVoidTypeInContext(self.llcx) } } - fn type_metadata(&self) -> &'ll Type { + crate fn type_metadata(&self) -> &'ll Type { unsafe { llvm::LLVMRustMetadataTypeInContext(self.llcx) } } + ///x Creates an integer type with the given number of bits, e.g., i24 + crate fn type_ix(&self, num_bits: u64) -> &'ll Type { + unsafe { + llvm::LLVMIntTypeInContext(self.llcx, num_bits as c_uint) + } + } + + crate fn type_x86_mmx(&self) -> &'ll Type { + unsafe { + llvm::LLVMX86MMXTypeInContext(self.llcx) + } + } + + crate fn type_vector(&self, ty: &'ll Type, len: u64) -> &'ll Type { + unsafe { + llvm::LLVMVectorType(ty, len as c_uint) + } + } + + crate fn func_params_types(&self, ty: &'ll Type) -> Vec<&'ll Type> { + unsafe { + let n_args = llvm::LLVMCountParamTypes(ty) as usize; + let mut args = Vec::with_capacity(n_args); + llvm::LLVMGetParamTypes(ty, args.as_mut_ptr()); + args.set_len(n_args); + args + } + } + + crate fn type_bool(&self) -> &'ll Type { + self.type_i8() + } + + crate fn type_int_from_ty(&self, t: ast::IntTy) -> &'ll Type { + match t { + ast::IntTy::Isize => self.type_isize(), + ast::IntTy::I8 => self.type_i8(), + ast::IntTy::I16 => self.type_i16(), + ast::IntTy::I32 => self.type_i32(), + ast::IntTy::I64 => self.type_i64(), + ast::IntTy::I128 => self.type_i128(), + } + } + + crate fn type_uint_from_ty(&self, t: ast::UintTy) -> &'ll Type { + match t { + ast::UintTy::Usize => self.type_isize(), + ast::UintTy::U8 => self.type_i8(), + ast::UintTy::U16 => self.type_i16(), + ast::UintTy::U32 => self.type_i32(), + ast::UintTy::U64 => self.type_i64(), + ast::UintTy::U128 => self.type_i128(), + } + } + + crate fn type_float_from_ty(&self, t: ast::FloatTy) -> &'ll Type { + match t { + ast::FloatTy::F32 => self.type_f32(), + ast::FloatTy::F64 => self.type_f64(), + } + } + + crate fn type_pointee_for_align(&self, align: Align) -> &'ll Type { + // FIXME(eddyb) We could find a better approximation if ity.align < align. + let ity = layout::Integer::approximate_align(self, align); + self.type_from_integer(ity) + } + + /// Return a LLVM type that has at most the required alignment, + /// and exactly the required size, as a best-effort padding array. + crate fn type_padding_filler(&self, size: Size, align: Align) -> &'ll Type { + let unit = layout::Integer::approximate_align(self, align); + let size = size.bytes(); + let unit_size = unit.size().bytes(); + assert_eq!(size % unit_size, 0); + self.type_array(self.type_from_integer(unit), size / unit_size) + } +} + +impl BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { fn type_i1(&self) -> &'ll Type { unsafe { llvm::LLVMInt1TypeInContext(self.llcx) @@ -102,12 +181,6 @@ impl BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { } } - fn type_ix(&self, num_bits: u64) -> &'ll Type { - unsafe { - llvm::LLVMIntTypeInContext(self.llcx, num_bits as c_uint) - } - } - fn type_isize(&self) -> &'ll Type { self.isize_ty } @@ -124,12 +197,6 @@ impl BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { } } - fn type_x86_mmx(&self) -> &'ll Type { - unsafe { - llvm::LLVMX86MMXTypeInContext(self.llcx) - } - } - fn type_func( &self, args: &[&'ll Type], @@ -171,12 +238,6 @@ impl BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { } } - fn type_vector(&self, ty: &'ll Type, len: u64) -> &'ll Type { - unsafe { - llvm::LLVMVectorType(ty, len as c_uint) - } - } - fn type_kind(&self, ty: &'ll Type) -> TypeKind { unsafe { llvm::LLVMRustGetTypeKind(ty).to_generic() @@ -201,16 +262,6 @@ impl BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { } } - fn func_params_types(&self, ty: &'ll Type) -> Vec<&'ll Type> { - unsafe { - let n_args = llvm::LLVMCountParamTypes(ty) as usize; - let mut args = Vec::with_capacity(n_args); - llvm::LLVMGetParamTypes(ty, args.as_mut_ptr()); - args.set_len(n_args); - args - } - } - fn float_width(&self, ty: &'ll Type) -> usize { match self.type_kind(ty) { TypeKind::Float => 32, @@ -288,9 +339,6 @@ impl LayoutTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { fn cast_backend_type(&self, ty: &CastTarget) -> &'ll Type { ty.llvm_type(self) } - fn fn_backend_type(&self, ty: &FnType<'tcx, Ty<'tcx>>) -> &'ll Type { - ty.llvm_type(self) - } fn fn_ptr_backend_type(&self, ty: &FnType<'tcx, Ty<'tcx>>) -> &'ll Type { ty.ptr_to_llvm_type(self) } diff --git a/src/librustc_codegen_ssa/common.rs b/src/librustc_codegen_ssa/common.rs index db77074deef94..0e1885fe29ba6 100644 --- a/src/librustc_codegen_ssa/common.rs +++ b/src/librustc_codegen_ssa/common.rs @@ -1,7 +1,7 @@ #![allow(non_camel_case_types, non_snake_case)] -use rustc::ty::{self, Ty, TyCtxt}; -use syntax_pos::{DUMMY_SP, Span}; +use rustc::ty::{Ty, TyCtxt}; +use syntax_pos::Span; use rustc::hir::def_id::DefId; use rustc::middle::lang_items::LangItem; @@ -11,18 +11,6 @@ use crate::traits::*; use rustc::hir; use crate::traits::BuilderMethods; -pub fn type_needs_drop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { - ty.needs_drop(tcx, ty::ParamEnv::reveal_all()) -} - -pub fn type_is_sized<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { - ty.is_sized(tcx.at(DUMMY_SP), ty::ParamEnv::reveal_all()) -} - -pub fn type_is_freeze<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { - ty.is_freeze(tcx, ty::ParamEnv::reveal_all(), DUMMY_SP) -} - pub enum IntPredicate { IntEQ, IntNE, diff --git a/src/librustc_codegen_ssa/traits/type_.rs b/src/librustc_codegen_ssa/traits/type_.rs index 7521df8b8dd4a..b1b0a277d8d02 100644 --- a/src/librustc_codegen_ssa/traits/type_.rs +++ b/src/librustc_codegen_ssa/traits/type_.rs @@ -1,38 +1,31 @@ use super::misc::MiscMethods; use super::Backend; use super::HasCodegen; -use crate::common::{self, TypeKind}; +use crate::common::TypeKind; use crate::mir::place::PlaceRef; -use rustc::ty::layout::{self, Align, Size, TyLayout}; use rustc::ty::{self, Ty}; +use rustc::ty::layout::{self, TyLayout}; use rustc_target::abi::call::{ArgType, CastTarget, FnType, Reg}; -use syntax::ast; +use syntax_pos::DUMMY_SP; // This depends on `Backend` and not `BackendTypes`, because consumers will probably want to use // `LayoutOf` or `HasTyCtxt`. This way, they don't have to add a constraint on it themselves. pub trait BaseTypeMethods<'tcx>: Backend<'tcx> { - fn type_void(&self) -> Self::Type; - fn type_metadata(&self) -> Self::Type; fn type_i1(&self) -> Self::Type; fn type_i8(&self) -> Self::Type; fn type_i16(&self) -> Self::Type; fn type_i32(&self) -> Self::Type; fn type_i64(&self) -> Self::Type; fn type_i128(&self) -> Self::Type; - - // Creates an integer type with the given number of bits, e.g., i24 - fn type_ix(&self, num_bits: u64) -> Self::Type; fn type_isize(&self) -> Self::Type; fn type_f32(&self) -> Self::Type; fn type_f64(&self) -> Self::Type; - fn type_x86_mmx(&self) -> Self::Type; fn type_func(&self, args: &[Self::Type], ret: Self::Type) -> Self::Type; fn type_variadic_func(&self, args: &[Self::Type], ret: Self::Type) -> Self::Type; fn type_struct(&self, els: &[Self::Type], packed: bool) -> Self::Type; fn type_array(&self, ty: Self::Type, len: u64) -> Self::Type; - fn type_vector(&self, ty: Self::Type, len: u64) -> Self::Type; fn type_kind(&self, ty: Self::Type) -> TypeKind; fn type_ptr_to(&self, ty: Self::Type) -> Self::Type; fn element_type(&self, ty: Self::Type) -> Self::Type; @@ -40,7 +33,6 @@ pub trait BaseTypeMethods<'tcx>: Backend<'tcx> { /// Returns the number of elements in `self` if it is a LLVM vector type. fn vector_length(&self, ty: Self::Type) -> usize; - fn func_params_types(&self, ty: Self::Type) -> Vec; fn float_width(&self, ty: Self::Type) -> usize; /// Retrieves the bit width of the integer type `self`. @@ -50,10 +42,6 @@ pub trait BaseTypeMethods<'tcx>: Backend<'tcx> { } pub trait DerivedTypeMethods<'tcx>: BaseTypeMethods<'tcx> + MiscMethods<'tcx> { - fn type_bool(&self) -> Self::Type { - self.type_i8() - } - fn type_i8p(&self) -> Self::Type { self.type_ptr_to(self.type_i8()) } @@ -67,35 +55,6 @@ pub trait DerivedTypeMethods<'tcx>: BaseTypeMethods<'tcx> + MiscMethods<'tcx> { } } - fn type_int_from_ty(&self, t: ast::IntTy) -> Self::Type { - match t { - ast::IntTy::Isize => self.type_isize(), - ast::IntTy::I8 => self.type_i8(), - ast::IntTy::I16 => self.type_i16(), - ast::IntTy::I32 => self.type_i32(), - ast::IntTy::I64 => self.type_i64(), - ast::IntTy::I128 => self.type_i128(), - } - } - - fn type_uint_from_ty(&self, t: ast::UintTy) -> Self::Type { - match t { - ast::UintTy::Usize => self.type_isize(), - ast::UintTy::U8 => self.type_i8(), - ast::UintTy::U16 => self.type_i16(), - ast::UintTy::U32 => self.type_i32(), - ast::UintTy::U64 => self.type_i64(), - ast::UintTy::U128 => self.type_i128(), - } - } - - fn type_float_from_ty(&self, t: ast::FloatTy) -> Self::Type { - match t { - ast::FloatTy::F32 => self.type_f32(), - ast::FloatTy::F64 => self.type_f64(), - } - } - fn type_from_integer(&self, i: layout::Integer) -> Self::Type { use rustc::ty::layout::Integer::*; match i { @@ -107,32 +66,16 @@ pub trait DerivedTypeMethods<'tcx>: BaseTypeMethods<'tcx> + MiscMethods<'tcx> { } } - fn type_pointee_for_align(&self, align: Align) -> Self::Type { - // FIXME(eddyb) We could find a better approximation if ity.align < align. - let ity = layout::Integer::approximate_align(self, align); - self.type_from_integer(ity) - } - - /// Return a LLVM type that has at most the required alignment, - /// and exactly the required size, as a best-effort padding array. - fn type_padding_filler(&self, size: Size, align: Align) -> Self::Type { - let unit = layout::Integer::approximate_align(self, align); - let size = size.bytes(); - let unit_size = unit.size().bytes(); - assert_eq!(size % unit_size, 0); - self.type_array(self.type_from_integer(unit), size / unit_size) - } - fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool { - common::type_needs_drop(self.tcx(), ty) + ty.needs_drop(self.tcx(), ty::ParamEnv::reveal_all()) } fn type_is_sized(&self, ty: Ty<'tcx>) -> bool { - common::type_is_sized(self.tcx(), ty) + ty.is_sized(self.tcx().at(DUMMY_SP), ty::ParamEnv::reveal_all()) } fn type_is_freeze(&self, ty: Ty<'tcx>) -> bool { - common::type_is_freeze(self.tcx(), ty) + ty.is_freeze(self.tcx(), ty::ParamEnv::reveal_all(), DUMMY_SP) } fn type_has_metadata(&self, ty: Ty<'tcx>) -> bool { @@ -155,7 +98,6 @@ impl DerivedTypeMethods<'tcx> for T where Self: BaseTypeMethods<'tcx> + MiscM pub trait LayoutTypeMethods<'tcx>: Backend<'tcx> { fn backend_type(&self, layout: TyLayout<'tcx>) -> Self::Type; fn cast_backend_type(&self, ty: &CastTarget) -> Self::Type; - fn fn_backend_type(&self, ty: &FnType<'tcx, Ty<'tcx>>) -> Self::Type; fn fn_ptr_backend_type(&self, ty: &FnType<'tcx, Ty<'tcx>>) -> Self::Type; fn reg_backend_type(&self, ty: &Reg) -> Self::Type; fn immediate_backend_type(&self, layout: TyLayout<'tcx>) -> Self::Type; From e19c4b93cfa197c3d7a744ce22b01fe5f778a62a Mon Sep 17 00:00:00 2001 From: bjorn3 Date: Sat, 8 Dec 2018 11:48:43 +0100 Subject: [PATCH 18/28] Remove type_variadic_func and typ_array from cg_ssa --- src/librustc_codegen_llvm/type_.rs | 35 ++++++++++++------------ src/librustc_codegen_ssa/traits/type_.rs | 2 -- 2 files changed, 17 insertions(+), 20 deletions(-) diff --git a/src/librustc_codegen_llvm/type_.rs b/src/librustc_codegen_llvm/type_.rs index bcd90aeceb647..a5ed64a66a39d 100644 --- a/src/librustc_codegen_llvm/type_.rs +++ b/src/librustc_codegen_llvm/type_.rs @@ -141,6 +141,23 @@ impl CodegenCx<'ll, 'tcx> { assert_eq!(size % unit_size, 0); self.type_array(self.type_from_integer(unit), size / unit_size) } + + crate fn type_variadic_func( + &self, + args: &[&'ll Type], + ret: &'ll Type + ) -> &'ll Type { + unsafe { + llvm::LLVMFunctionType(ret, args.as_ptr(), + args.len() as c_uint, True) + } + } + + crate fn type_array(&self, ty: &'ll Type, len: u64) -> &'ll Type { + unsafe { + llvm::LLVMRustArrayType(ty, len) + } + } } impl BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { @@ -208,17 +225,6 @@ impl BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { } } - fn type_variadic_func( - &self, - args: &[&'ll Type], - ret: &'ll Type - ) -> &'ll Type { - unsafe { - llvm::LLVMFunctionType(ret, args.as_ptr(), - args.len() as c_uint, True) - } - } - fn type_struct( &self, els: &[&'ll Type], @@ -231,13 +237,6 @@ impl BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { } } - - fn type_array(&self, ty: &'ll Type, len: u64) -> &'ll Type { - unsafe { - llvm::LLVMRustArrayType(ty, len) - } - } - fn type_kind(&self, ty: &'ll Type) -> TypeKind { unsafe { llvm::LLVMRustGetTypeKind(ty).to_generic() diff --git a/src/librustc_codegen_ssa/traits/type_.rs b/src/librustc_codegen_ssa/traits/type_.rs index b1b0a277d8d02..8f690ca726386 100644 --- a/src/librustc_codegen_ssa/traits/type_.rs +++ b/src/librustc_codegen_ssa/traits/type_.rs @@ -23,9 +23,7 @@ pub trait BaseTypeMethods<'tcx>: Backend<'tcx> { fn type_f64(&self) -> Self::Type; fn type_func(&self, args: &[Self::Type], ret: Self::Type) -> Self::Type; - fn type_variadic_func(&self, args: &[Self::Type], ret: Self::Type) -> Self::Type; fn type_struct(&self, els: &[Self::Type], packed: bool) -> Self::Type; - fn type_array(&self, ty: Self::Type, len: u64) -> Self::Type; fn type_kind(&self, ty: Self::Type) -> TypeKind; fn type_ptr_to(&self, ty: Self::Type) -> Self::Type; fn element_type(&self, ty: Self::Type) -> Self::Type; From a0713ccd675a9ca2bba479be9fd1e5078d477683 Mon Sep 17 00:00:00 2001 From: bjorn3 Date: Sat, 8 Dec 2018 12:30:44 +0100 Subject: [PATCH 19/28] Relax some trait constraints --- src/librustc_codegen_llvm/builder.rs | 7 +------ src/librustc_codegen_ssa/base.rs | 12 +++++++----- src/librustc_codegen_ssa/traits/builder.rs | 1 - src/librustc_codegen_ssa/traits/mod.rs | 7 ++++++- 4 files changed, 14 insertions(+), 13 deletions(-) diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index 4b52bf4852413..1ddb6e8029168 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -1246,11 +1246,6 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } - - fn cx(&self) -> &CodegenCx<'ll, 'tcx> { - self.cx - } - unsafe fn delete_basic_block(&mut self, bb: &'ll BasicBlock) { llvm::LLVMDeleteBasicBlock(bb); } @@ -1261,7 +1256,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } impl StaticBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { -fn get_static(&mut self, def_id: DefId) -> &'ll Value { + fn get_static(&mut self, def_id: DefId) -> &'ll Value { // Forward to the `get_static` method of `CodegenCx` self.cx().get_static(def_id) } diff --git a/src/librustc_codegen_ssa/base.rs b/src/librustc_codegen_ssa/base.rs index 50ff61da6850d..6c81428bd6426 100644 --- a/src/librustc_codegen_ssa/base.rs +++ b/src/librustc_codegen_ssa/base.rs @@ -356,7 +356,7 @@ pub fn wants_msvc_seh(sess: &Session) -> bool { sess.target.target.options.is_like_msvc } -pub fn from_immediate<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( +pub fn from_immediate<'a, 'tcx: 'a, Bx: NumBuilderMethods<'tcx>>( bx: &mut Bx, val: Bx::Value ) -> Bx::Value { @@ -367,7 +367,7 @@ pub fn from_immediate<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( } } -pub fn to_immediate<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( +pub fn to_immediate<'a, 'tcx: 'a, Bx: NumBuilderMethods<'tcx>>( bx: &mut Bx, val: Bx::Value, layout: layout::TyLayout<'_>, @@ -378,7 +378,7 @@ pub fn to_immediate<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( val } -pub fn to_immediate_scalar<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( +pub fn to_immediate_scalar<'a, 'tcx: 'a, Bx: NumBuilderMethods<'tcx>>( bx: &mut Bx, val: Bx::Value, scalar: &layout::Scalar, @@ -389,7 +389,7 @@ pub fn to_immediate_scalar<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( val } -pub fn memcpy_ty<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( +pub fn memcpy_ty<'a, 'tcx: 'a, Bx: MemoryBuilderMethods<'tcx>>( bx: &mut Bx, dst: Bx::Value, dst_align: Align, @@ -397,7 +397,9 @@ pub fn memcpy_ty<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( src_align: Align, layout: TyLayout<'tcx>, flags: MemFlags, -) { +) + where Bx::CodegenCx: ConstMethods<'tcx> +{ let size = layout.size.bytes(); if size == 0 { return; diff --git a/src/librustc_codegen_ssa/traits/builder.rs b/src/librustc_codegen_ssa/traits/builder.rs index a33ab00ae5763..7b6055369dfd8 100644 --- a/src/librustc_codegen_ssa/traits/builder.rs +++ b/src/librustc_codegen_ssa/traits/builder.rs @@ -190,7 +190,6 @@ pub trait BuilderMethods<'a, 'tcx: 'a>: fn new_block<'b>(cx: &'a Self::CodegenCx, llfn: Self::Value, name: &'b str) -> Self; fn with_cx(cx: &'a Self::CodegenCx) -> Self; fn build_sibling_block<'b>(&self, name: &'b str) -> Self; - fn cx(&self) -> &Self::CodegenCx; fn llbb(&self) -> Self::BasicBlock; fn position_at_end(&mut self, llbb: Self::BasicBlock); diff --git a/src/librustc_codegen_ssa/traits/mod.rs b/src/librustc_codegen_ssa/traits/mod.rs index 58b4a299d16c4..04eca25ef98f4 100644 --- a/src/librustc_codegen_ssa/traits/mod.rs +++ b/src/librustc_codegen_ssa/traits/mod.rs @@ -85,5 +85,10 @@ pub trait HasCodegen<'tcx>: Type = Self::Type, Funclet = Self::Funclet, DIScope = Self::DIScope, - >; + > + + BaseTypeMethods<'tcx>; + + fn cx(&self) -> &Self::CodegenCx { + &**self + } } From 14c349235e2e70d689b789072b1d5c5ab50aea67 Mon Sep 17 00:00:00 2001 From: bjorn3 Date: Sat, 8 Dec 2018 15:22:45 +0100 Subject: [PATCH 20/28] Move the loop of Rvalue::Repeat codegen out of cg_ssa This way a codegen backend doesn't need to provide a phi instruction, which codegen backends other than LLVMmay not support easily, because they for example, like cranelift, use basic block parameters. --- src/librustc_codegen_llvm/builder.rs | 74 ++++++++++++++++------ src/librustc_codegen_ssa/mir/place.rs | 2 +- src/librustc_codegen_ssa/mir/rvalue.rs | 27 +------- src/librustc_codegen_ssa/traits/builder.rs | 15 +++-- 4 files changed, 65 insertions(+), 53 deletions(-) diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index 1ddb6e8029168..ee7fdd657294e 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -395,6 +395,38 @@ impl MemoryBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { self.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None); } + fn write_operand_repeatedly( + mut self, + cg_elem: OperandRef<'tcx, &'ll Value>, + count: u64, + dest: PlaceRef<'tcx, &'ll Value>, + ) -> Self { + let zero = self.const_usize(0); + let count = self.const_usize(count); + let start = dest.project_index(&mut self, zero).llval; + let end = dest.project_index(&mut self, count).llval; + + let mut header_bx = self.build_sibling_block("repeat_loop_header"); + let mut body_bx = self.build_sibling_block("repeat_loop_body"); + let next_bx = self.build_sibling_block("repeat_loop_next"); + + self.br(header_bx.llbb()); + let current = header_bx.phi(self.val_ty(start), &[start], &[self.llbb()]); + + let keep_going = header_bx.icmp(IntPredicate::IntNE, current, end); + header_bx.cond_br(keep_going, body_bx.llbb(), next_bx.llbb()); + + let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size); + cg_elem.val.store(&mut body_bx, + PlaceRef::new_sized(current, cg_elem.layout, align)); + + let next = body_bx.inbounds_gep(current, &[self.const_usize(1)]); + body_bx.br(header_bx.llbb()); + header_bx.add_incoming_to_phi(current, next, body_bx.llbb()); + + next_bx + } + fn atomic_load( &mut self, ptr: &'ll Value, @@ -994,20 +1026,6 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } /* Miscellaneous instructions */ - fn phi(&mut self, ty: &'ll Type, vals: &[&'ll Value], bbs: &[&'ll BasicBlock]) -> &'ll Value { - self.count_insn("addincoming"); - assert_eq!(vals.len(), bbs.len()); - let phi = unsafe { - llvm::LLVMBuildPhi(self.llbuilder, ty, noname()) - }; - unsafe { - llvm::LLVMAddIncoming(phi, vals.as_ptr(), - bbs.as_ptr(), - vals.len() as c_uint); - phi - } - } - fn inline_asm_call(&mut self, asm: &CStr, cons: &CStr, inputs: &[&'ll Value], output: &'ll Type, volatile: bool, alignstack: bool, @@ -1197,13 +1215,6 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } - fn add_incoming_to_phi(&mut self, phi: &'ll Value, val: &'ll Value, bb: &'ll BasicBlock) { - self.count_insn("addincoming"); - unsafe { - llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint); - } - } - fn set_invariant_load(&mut self, load: &'ll Value) { unsafe { llvm::LLVMSetMetadata(load, llvm::MD_invariant_load as c_uint, @@ -1518,4 +1529,25 @@ impl Builder<'a, 'll, 'tcx> { let ptr = self.pointercast(ptr, self.cx.type_i8p()); self.call(lifetime_intrinsic, &[self.cx.const_u64(size), ptr], None); } + + fn phi(&mut self, ty: &'ll Type, vals: &[&'ll Value], bbs: &[&'ll BasicBlock]) -> &'ll Value { + self.count_insn("addincoming"); + assert_eq!(vals.len(), bbs.len()); + let phi = unsafe { + llvm::LLVMBuildPhi(self.llbuilder, ty, noname()) + }; + unsafe { + llvm::LLVMAddIncoming(phi, vals.as_ptr(), + bbs.as_ptr(), + vals.len() as c_uint); + phi + } + } + + fn add_incoming_to_phi(&mut self, phi: &'ll Value, val: &'ll Value, bb: &'ll BasicBlock) { + self.count_insn("addincoming"); + unsafe { + llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint); + } + } } diff --git a/src/librustc_codegen_ssa/mir/place.rs b/src/librustc_codegen_ssa/mir/place.rs index df2aca417b75c..1951246b6fff6 100644 --- a/src/librustc_codegen_ssa/mir/place.rs +++ b/src/librustc_codegen_ssa/mir/place.rs @@ -335,7 +335,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { } } - pub fn project_index>( + pub fn project_index>( &self, bx: &mut Bx, llindex: V diff --git a/src/librustc_codegen_ssa/mir/rvalue.rs b/src/librustc_codegen_ssa/mir/rvalue.rs index c856af2fc1161..9d0d0b5259143 100644 --- a/src/librustc_codegen_ssa/mir/rvalue.rs +++ b/src/librustc_codegen_ssa/mir/rvalue.rs @@ -87,10 +87,10 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { if dest.layout.is_zst() { return bx; } - let zero = bx.const_usize(0); - let start = dest.project_index(&mut bx, zero).llval; if let OperandValue::Immediate(v) = cg_elem.val { + let zero = bx.const_usize(0); + let start = dest.project_index(&mut bx, zero).llval; let size = bx.const_usize(dest.layout.size.bytes()); // Use llvm.memset.p0i8.* to initialize all zero arrays @@ -108,28 +108,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } } - let count = bx.const_usize(count); - let end = dest.project_index(&mut bx, count).llval; - - let mut header_bx = bx.build_sibling_block("repeat_loop_header"); - let mut body_bx = bx.build_sibling_block("repeat_loop_body"); - let next_bx = bx.build_sibling_block("repeat_loop_next"); - - bx.br(header_bx.llbb()); - let current = header_bx.phi(bx.val_ty(start), &[start], &[bx.llbb()]); - - let keep_going = header_bx.icmp(IntPredicate::IntNE, current, end); - header_bx.cond_br(keep_going, body_bx.llbb(), next_bx.llbb()); - - let align = dest.align.restrict_for_offset(dest.layout.field(bx.cx(), 0).size); - cg_elem.val.store(&mut body_bx, - PlaceRef::new_sized(current, cg_elem.layout, align)); - - let next = body_bx.inbounds_gep(current, &[bx.const_usize(1)]); - body_bx.br(header_bx.llbb()); - header_bx.add_incoming_to_phi(current, next, body_bx.llbb()); - - next_bx + bx.write_operand_repeatedly(cg_elem, count, dest) } mir::Rvalue::Aggregate(ref kind, ref operands) => { diff --git a/src/librustc_codegen_ssa/traits/builder.rs b/src/librustc_codegen_ssa/traits/builder.rs index 7b6055369dfd8..d430590d532ba 100644 --- a/src/librustc_codegen_ssa/traits/builder.rs +++ b/src/librustc_codegen_ssa/traits/builder.rs @@ -89,6 +89,14 @@ pub trait MemoryBuilderMethods<'tcx>: HasCodegen<'tcx> { flags: MemFlags, ); + /// Called for Rvalue::Repeat when the elem is neither a ZST nor optimizable using memset. + fn write_operand_repeatedly( + self, + elem: OperandRef<'tcx, Self::Value>, + count: u64, + dest: PlaceRef<'tcx, Self::Value>, + ) -> Self; + // Atomics fn atomic_load(&mut self, ptr: Self::Value, order: AtomicOrdering, size: Size) -> Self::Value; fn atomic_store( @@ -218,12 +226,6 @@ pub trait BuilderMethods<'a, 'tcx: 'a>: ) -> Self::Value; fn unreachable(&mut self); - fn phi( - &mut self, - ty: Self::Type, - vals: &[Self::Value], - bbs: &[Self::BasicBlock], - ) -> Self::Value; fn inline_asm_call( &mut self, asm: &CStr, @@ -273,7 +275,6 @@ pub trait BuilderMethods<'a, 'tcx: 'a>: fn set_personality_fn(&mut self, personality: Self::Value); fn add_case(&mut self, s: Self::Value, on_val: Self::Value, dest: Self::BasicBlock); - fn add_incoming_to_phi(&mut self, phi: Self::Value, val: Self::Value, bb: Self::BasicBlock); fn set_invariant_load(&mut self, load: Self::Value); /// Called for `StorageLive` From 97e43a43c8881087551bb687378abe48836ab369 Mon Sep 17 00:00:00 2001 From: bjorn3 Date: Sat, 8 Dec 2018 15:39:57 +0100 Subject: [PATCH 21/28] Move unwinding related methods to UnwindBuilderMethods --- src/librustc_codegen_llvm/builder.rs | 242 +++++++++++---------- src/librustc_codegen_ssa/traits/builder.rs | 67 +++--- 2 files changed, 157 insertions(+), 152 deletions(-) diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index ee7fdd657294e..2266bf462d7e7 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -898,6 +898,128 @@ impl NumBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { } } +impl UnwindBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { + fn invoke( + &mut self, + llfn: &'ll Value, + args: &[&'ll Value], + then: &'ll BasicBlock, + catch: &'ll BasicBlock, + funclet: Option<&Funclet<'ll>>, + ) -> &'ll Value { + self.count_insn("invoke"); + + debug!("Invoke {:?} with args ({:?})", + llfn, + args); + + let args = self.check_call("invoke", llfn, args); + let bundle = funclet.map(|funclet| funclet.bundle()); + let bundle = bundle.as_ref().map(|b| &*b.raw); + + unsafe { + llvm::LLVMRustBuildInvoke(self.llbuilder, + llfn, + args.as_ptr(), + args.len() as c_uint, + then, + catch, + bundle, + noname()) + } + } + + fn landing_pad(&mut self, ty: &'ll Type, pers_fn: &'ll Value, + num_clauses: usize) -> &'ll Value { + self.count_insn("landingpad"); + unsafe { + llvm::LLVMBuildLandingPad(self.llbuilder, ty, pers_fn, + num_clauses as c_uint, noname()) + } + } + + fn set_cleanup(&mut self, landing_pad: &'ll Value) { + self.count_insn("setcleanup"); + unsafe { + llvm::LLVMSetCleanup(landing_pad, llvm::True); + } + } + + fn resume(&mut self, exn: &'ll Value) -> &'ll Value { + self.count_insn("resume"); + unsafe { + llvm::LLVMBuildResume(self.llbuilder, exn) + } + } + + fn cleanup_pad(&mut self, + parent: Option<&'ll Value>, + args: &[&'ll Value]) -> Funclet<'ll> { + self.count_insn("cleanuppad"); + let name = const_cstr!("cleanuppad"); + let ret = unsafe { + llvm::LLVMRustBuildCleanupPad(self.llbuilder, + parent, + args.len() as c_uint, + args.as_ptr(), + name.as_ptr()) + }; + Funclet::new(ret.expect("LLVM does not have support for cleanuppad")) + } + + fn cleanup_ret( + &mut self, funclet: &Funclet<'ll>, + unwind: Option<&'ll BasicBlock>, + ) -> &'ll Value { + self.count_insn("cleanupret"); + let ret = unsafe { + llvm::LLVMRustBuildCleanupRet(self.llbuilder, funclet.cleanuppad(), unwind) + }; + ret.expect("LLVM does not have support for cleanupret") + } + + fn catch_pad(&mut self, + parent: &'ll Value, + args: &[&'ll Value]) -> Funclet<'ll> { + self.count_insn("catchpad"); + let name = const_cstr!("catchpad"); + let ret = unsafe { + llvm::LLVMRustBuildCatchPad(self.llbuilder, parent, + args.len() as c_uint, args.as_ptr(), + name.as_ptr()) + }; + Funclet::new(ret.expect("LLVM does not have support for catchpad")) + } + + fn catch_switch( + &mut self, + parent: Option<&'ll Value>, + unwind: Option<&'ll BasicBlock>, + num_handlers: usize, + ) -> &'ll Value { + self.count_insn("catchswitch"); + let name = const_cstr!("catchswitch"); + let ret = unsafe { + llvm::LLVMRustBuildCatchSwitch(self.llbuilder, parent, unwind, + num_handlers as c_uint, + name.as_ptr()) + }; + ret.expect("LLVM does not have support for catchswitch") + } + + fn add_handler(&mut self, catch_switch: &'ll Value, handler: &'ll BasicBlock) { + unsafe { + llvm::LLVMRustAddHandler(catch_switch, handler); + } + } + + fn set_personality_fn(&mut self, personality: &'ll Value) { + unsafe { + llvm::LLVMSetPersonalityFn(self.llfn(), personality); + } + } +} + impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { fn new_block<'b>( cx: &'a CodegenCx<'ll, 'tcx>, @@ -988,36 +1110,6 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } - fn invoke( - &mut self, - llfn: &'ll Value, - args: &[&'ll Value], - then: &'ll BasicBlock, - catch: &'ll BasicBlock, - funclet: Option<&Funclet<'ll>>, - ) -> &'ll Value { - self.count_insn("invoke"); - - debug!("Invoke {:?} with args ({:?})", - llfn, - args); - - let args = self.check_call("invoke", llfn, args); - let bundle = funclet.map(|funclet| funclet.bundle()); - let bundle = bundle.as_ref().map(|b| &*b.raw); - - unsafe { - llvm::LLVMRustBuildInvoke(self.llbuilder, - llfn, - args.as_ptr(), - args.len() as c_uint, - then, - catch, - bundle, - noname()) - } - } - fn unreachable(&mut self) { self.count_insn("unreachable"); unsafe { @@ -1119,96 +1211,6 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } - fn landing_pad(&mut self, ty: &'ll Type, pers_fn: &'ll Value, - num_clauses: usize) -> &'ll Value { - self.count_insn("landingpad"); - unsafe { - llvm::LLVMBuildLandingPad(self.llbuilder, ty, pers_fn, - num_clauses as c_uint, noname()) - } - } - - fn set_cleanup(&mut self, landing_pad: &'ll Value) { - self.count_insn("setcleanup"); - unsafe { - llvm::LLVMSetCleanup(landing_pad, llvm::True); - } - } - - fn resume(&mut self, exn: &'ll Value) -> &'ll Value { - self.count_insn("resume"); - unsafe { - llvm::LLVMBuildResume(self.llbuilder, exn) - } - } - - fn cleanup_pad(&mut self, - parent: Option<&'ll Value>, - args: &[&'ll Value]) -> Funclet<'ll> { - self.count_insn("cleanuppad"); - let name = const_cstr!("cleanuppad"); - let ret = unsafe { - llvm::LLVMRustBuildCleanupPad(self.llbuilder, - parent, - args.len() as c_uint, - args.as_ptr(), - name.as_ptr()) - }; - Funclet::new(ret.expect("LLVM does not have support for cleanuppad")) - } - - fn cleanup_ret( - &mut self, funclet: &Funclet<'ll>, - unwind: Option<&'ll BasicBlock>, - ) -> &'ll Value { - self.count_insn("cleanupret"); - let ret = unsafe { - llvm::LLVMRustBuildCleanupRet(self.llbuilder, funclet.cleanuppad(), unwind) - }; - ret.expect("LLVM does not have support for cleanupret") - } - - fn catch_pad(&mut self, - parent: &'ll Value, - args: &[&'ll Value]) -> Funclet<'ll> { - self.count_insn("catchpad"); - let name = const_cstr!("catchpad"); - let ret = unsafe { - llvm::LLVMRustBuildCatchPad(self.llbuilder, parent, - args.len() as c_uint, args.as_ptr(), - name.as_ptr()) - }; - Funclet::new(ret.expect("LLVM does not have support for catchpad")) - } - - fn catch_switch( - &mut self, - parent: Option<&'ll Value>, - unwind: Option<&'ll BasicBlock>, - num_handlers: usize, - ) -> &'ll Value { - self.count_insn("catchswitch"); - let name = const_cstr!("catchswitch"); - let ret = unsafe { - llvm::LLVMRustBuildCatchSwitch(self.llbuilder, parent, unwind, - num_handlers as c_uint, - name.as_ptr()) - }; - ret.expect("LLVM does not have support for catchswitch") - } - - fn add_handler(&mut self, catch_switch: &'ll Value, handler: &'ll BasicBlock) { - unsafe { - llvm::LLVMRustAddHandler(catch_switch, handler); - } - } - - fn set_personality_fn(&mut self, personality: &'ll Value) { - unsafe { - llvm::LLVMSetPersonalityFn(self.llfn(), personality); - } - } - fn add_case(&mut self, s: &'ll Value, on_val: &'ll Value, dest: &'ll BasicBlock) { unsafe { llvm::LLVMAddCase(s, on_val, dest) diff --git a/src/librustc_codegen_ssa/traits/builder.rs b/src/librustc_codegen_ssa/traits/builder.rs index d430590d532ba..4fdccbb5966ec 100644 --- a/src/librustc_codegen_ssa/traits/builder.rs +++ b/src/librustc_codegen_ssa/traits/builder.rs @@ -184,6 +184,40 @@ pub trait NumBuilderMethods<'tcx>: HasCodegen<'tcx> { fn fcmp(&mut self, op: RealPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value; } +pub trait UnwindBuilderMethods<'tcx>: HasCodegen<'tcx> { + fn invoke( + &mut self, + llfn: Self::Value, + args: &[Self::Value], + then: Self::BasicBlock, + catch: Self::BasicBlock, + funclet: Option<&Self::Funclet>, + ) -> Self::Value; + fn landing_pad( + &mut self, + ty: Self::Type, + pers_fn: Self::Value, + num_clauses: usize, + ) -> Self::Value; + fn set_cleanup(&mut self, landing_pad: Self::Value); + fn resume(&mut self, exn: Self::Value) -> Self::Value; + fn cleanup_pad(&mut self, parent: Option, args: &[Self::Value]) -> Self::Funclet; + fn cleanup_ret( + &mut self, + funclet: &Self::Funclet, + unwind: Option, + ) -> Self::Value; + fn catch_pad(&mut self, parent: Self::Value, args: &[Self::Value]) -> Self::Funclet; + fn catch_switch( + &mut self, + parent: Option, + unwind: Option, + num_handlers: usize, + ) -> Self::Value; + fn add_handler(&mut self, catch_switch: Self::Value, handler: Self::BasicBlock); + fn set_personality_fn(&mut self, personality: Self::Value); +} + pub trait BuilderMethods<'a, 'tcx: 'a>: HasCodegen<'tcx> + DebugInfoBuilderMethods<'tcx> @@ -194,6 +228,7 @@ pub trait BuilderMethods<'a, 'tcx: 'a>: + StaticBuilderMethods<'tcx> + MemoryBuilderMethods<'tcx> + NumBuilderMethods<'tcx> + + UnwindBuilderMethods<'tcx> { fn new_block<'b>(cx: &'a Self::CodegenCx, llfn: Self::Value, name: &'b str) -> Self; fn with_cx(cx: &'a Self::CodegenCx) -> Self; @@ -216,14 +251,6 @@ pub trait BuilderMethods<'a, 'tcx: 'a>: else_llbb: Self::BasicBlock, num_cases: usize, ) -> Self::Value; - fn invoke( - &mut self, - llfn: Self::Value, - args: &[Self::Value], - then: Self::BasicBlock, - catch: Self::BasicBlock, - funclet: Option<&Self::Funclet>, - ) -> Self::Value; fn unreachable(&mut self); fn inline_asm_call( @@ -250,30 +277,6 @@ pub trait BuilderMethods<'a, 'tcx: 'a>: fn extract_value(&mut self, agg_val: Self::Value, idx: u64) -> Self::Value; fn insert_value(&mut self, agg_val: Self::Value, elt: Self::Value, idx: u64) -> Self::Value; - fn landing_pad( - &mut self, - ty: Self::Type, - pers_fn: Self::Value, - num_clauses: usize, - ) -> Self::Value; - fn set_cleanup(&mut self, landing_pad: Self::Value); - fn resume(&mut self, exn: Self::Value) -> Self::Value; - fn cleanup_pad(&mut self, parent: Option, args: &[Self::Value]) -> Self::Funclet; - fn cleanup_ret( - &mut self, - funclet: &Self::Funclet, - unwind: Option, - ) -> Self::Value; - fn catch_pad(&mut self, parent: Self::Value, args: &[Self::Value]) -> Self::Funclet; - fn catch_switch( - &mut self, - parent: Option, - unwind: Option, - num_handlers: usize, - ) -> Self::Value; - fn add_handler(&mut self, catch_switch: Self::Value, handler: Self::BasicBlock); - fn set_personality_fn(&mut self, personality: Self::Value); - fn add_case(&mut self, s: Self::Value, on_val: Self::Value, dest: Self::BasicBlock); fn set_invariant_load(&mut self, load: Self::Value); From dd509994b534b06bc6e54f577f789bf33ea7a575 Mon Sep 17 00:00:00 2001 From: bjorn3 Date: Sat, 8 Dec 2018 15:56:28 +0100 Subject: [PATCH 22/28] Split control flow related methods out to ControlFlowBuilderMethods --- src/librustc_codegen_llvm/builder.rs | 206 +++++++++++---------- src/librustc_codegen_ssa/traits/builder.rs | 52 +++--- 2 files changed, 132 insertions(+), 126 deletions(-) diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index 2266bf462d7e7..6790c8d20981d 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -87,6 +87,110 @@ impl HasCodegen<'tcx> for Builder<'_, 'll, 'tcx> { type CodegenCx = CodegenCx<'ll, 'tcx>; } +impl ControlFlowBuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { + fn new_block<'b>( + cx: &'a CodegenCx<'ll, 'tcx>, + llfn: &'ll Value, + name: &'b str + ) -> Self { + let mut bx = Builder::with_cx(cx); + let llbb = unsafe { + let name = SmallCStr::new(name); + llvm::LLVMAppendBasicBlockInContext( + cx.llcx, + llfn, + name.as_ptr() + ) + }; + bx.position_at_end(llbb); + bx + } + + fn with_cx(cx: &'a CodegenCx<'ll, 'tcx>) -> Self { + // Create a fresh builder from the crate context. + let llbuilder = unsafe { + llvm::LLVMCreateBuilderInContext(cx.llcx) + }; + Builder { + llbuilder, + cx, + } + } + + fn build_sibling_block<'b>(&self, name: &'b str) -> Self { + Builder::new_block(self.cx, self.llfn(), name) + } + + fn llbb(&self) -> &'ll BasicBlock { + unsafe { + llvm::LLVMGetInsertBlock(self.llbuilder) + } + } + + fn position_at_end(&mut self, llbb: &'ll BasicBlock) { + unsafe { + llvm::LLVMPositionBuilderAtEnd(self.llbuilder, llbb); + } + } + + fn ret_void(&mut self) { + self.count_insn("retvoid"); + unsafe { + llvm::LLVMBuildRetVoid(self.llbuilder); + } + } + + fn ret(&mut self, v: &'ll Value) { + self.count_insn("ret"); + unsafe { + llvm::LLVMBuildRet(self.llbuilder, v); + } + } + + fn br(&mut self, dest: &'ll BasicBlock) { + self.count_insn("br"); + unsafe { + llvm::LLVMBuildBr(self.llbuilder, dest); + } + } + + fn cond_br( + &mut self, + cond: &'ll Value, + then_llbb: &'ll BasicBlock, + else_llbb: &'ll BasicBlock, + ) { + self.count_insn("condbr"); + unsafe { + llvm::LLVMBuildCondBr(self.llbuilder, cond, then_llbb, else_llbb); + } + } + + fn switch( + &mut self, + v: &'ll Value, + else_llbb: &'ll BasicBlock, + num_cases: usize, + ) -> &'ll Value { + unsafe { + llvm::LLVMBuildSwitch(self.llbuilder, v, else_llbb, num_cases as c_uint) + } + } + + fn add_case(&mut self, s: &'ll Value, on_val: &'ll Value, dest: &'ll BasicBlock) { + unsafe { + llvm::LLVMAddCase(s, on_val, dest) + } + } + + fn unreachable(&mut self) { + self.count_insn("unreachable"); + unsafe { + llvm::LLVMBuildUnreachable(self.llbuilder); + } + } +} + impl MemoryBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { fn alloca(&mut self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value { let mut bx = Builder::with_cx(self.cx); @@ -1021,102 +1125,6 @@ impl UnwindBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { } impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { - fn new_block<'b>( - cx: &'a CodegenCx<'ll, 'tcx>, - llfn: &'ll Value, - name: &'b str - ) -> Self { - let mut bx = Builder::with_cx(cx); - let llbb = unsafe { - let name = SmallCStr::new(name); - llvm::LLVMAppendBasicBlockInContext( - cx.llcx, - llfn, - name.as_ptr() - ) - }; - bx.position_at_end(llbb); - bx - } - - fn with_cx(cx: &'a CodegenCx<'ll, 'tcx>) -> Self { - // Create a fresh builder from the crate context. - let llbuilder = unsafe { - llvm::LLVMCreateBuilderInContext(cx.llcx) - }; - Builder { - llbuilder, - cx, - } - } - - fn build_sibling_block<'b>(&self, name: &'b str) -> Self { - Builder::new_block(self.cx, self.llfn(), name) - } - - fn llbb(&self) -> &'ll BasicBlock { - unsafe { - llvm::LLVMGetInsertBlock(self.llbuilder) - } - } - - fn position_at_end(&mut self, llbb: &'ll BasicBlock) { - unsafe { - llvm::LLVMPositionBuilderAtEnd(self.llbuilder, llbb); - } - } - - fn ret_void(&mut self) { - self.count_insn("retvoid"); - unsafe { - llvm::LLVMBuildRetVoid(self.llbuilder); - } - } - - fn ret(&mut self, v: &'ll Value) { - self.count_insn("ret"); - unsafe { - llvm::LLVMBuildRet(self.llbuilder, v); - } - } - - fn br(&mut self, dest: &'ll BasicBlock) { - self.count_insn("br"); - unsafe { - llvm::LLVMBuildBr(self.llbuilder, dest); - } - } - - fn cond_br( - &mut self, - cond: &'ll Value, - then_llbb: &'ll BasicBlock, - else_llbb: &'ll BasicBlock, - ) { - self.count_insn("condbr"); - unsafe { - llvm::LLVMBuildCondBr(self.llbuilder, cond, then_llbb, else_llbb); - } - } - - fn switch( - &mut self, - v: &'ll Value, - else_llbb: &'ll BasicBlock, - num_cases: usize, - ) -> &'ll Value { - unsafe { - llvm::LLVMBuildSwitch(self.llbuilder, v, else_llbb, num_cases as c_uint) - } - } - - fn unreachable(&mut self) { - self.count_insn("unreachable"); - unsafe { - llvm::LLVMBuildUnreachable(self.llbuilder); - } - } - /* Miscellaneous instructions */ fn inline_asm_call(&mut self, asm: &CStr, cons: &CStr, inputs: &[&'ll Value], output: &'ll Type, @@ -1211,12 +1219,6 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } - fn add_case(&mut self, s: &'ll Value, on_val: &'ll Value, dest: &'ll BasicBlock) { - unsafe { - llvm::LLVMAddCase(s, on_val, dest) - } - } - fn set_invariant_load(&mut self, load: &'ll Value) { unsafe { llvm::LLVMSetMetadata(load, llvm::MD_invariant_load as c_uint, diff --git a/src/librustc_codegen_ssa/traits/builder.rs b/src/librustc_codegen_ssa/traits/builder.rs index 4fdccbb5966ec..65fcdb7d82ff9 100644 --- a/src/librustc_codegen_ssa/traits/builder.rs +++ b/src/librustc_codegen_ssa/traits/builder.rs @@ -23,6 +23,32 @@ pub enum OverflowOp { Mul, } +pub trait ControlFlowBuilderMethods<'a, 'tcx: 'a>: HasCodegen<'tcx> { + fn new_block<'b>(cx: &'a Self::CodegenCx, llfn: Self::Value, name: &'b str) -> Self; + fn with_cx(cx: &'a Self::CodegenCx) -> Self; + fn build_sibling_block<'b>(&self, name: &'b str) -> Self; + fn llbb(&self) -> Self::BasicBlock; + + fn position_at_end(&mut self, llbb: Self::BasicBlock); + fn ret_void(&mut self); + fn ret(&mut self, v: Self::Value); + fn br(&mut self, dest: Self::BasicBlock); + fn cond_br( + &mut self, + cond: Self::Value, + then_llbb: Self::BasicBlock, + else_llbb: Self::BasicBlock, + ); + fn switch( + &mut self, + v: Self::Value, + else_llbb: Self::BasicBlock, + num_cases: usize, + ) -> Self::Value; + fn add_case(&mut self, s: Self::Value, on_val: Self::Value, dest: Self::BasicBlock); + fn unreachable(&mut self); +} + pub trait MemoryBuilderMethods<'tcx>: HasCodegen<'tcx> { // Stack allocations fn alloca(&mut self, ty: Self::Type, name: &str, align: Align) -> Self::Value; @@ -226,33 +252,12 @@ pub trait BuilderMethods<'a, 'tcx: 'a>: + IntrinsicCallMethods<'tcx> + AsmBuilderMethods<'tcx> + StaticBuilderMethods<'tcx> + + + ControlFlowBuilderMethods<'a, 'tcx> + MemoryBuilderMethods<'tcx> + NumBuilderMethods<'tcx> + UnwindBuilderMethods<'tcx> { - fn new_block<'b>(cx: &'a Self::CodegenCx, llfn: Self::Value, name: &'b str) -> Self; - fn with_cx(cx: &'a Self::CodegenCx) -> Self; - fn build_sibling_block<'b>(&self, name: &'b str) -> Self; - fn llbb(&self) -> Self::BasicBlock; - - fn position_at_end(&mut self, llbb: Self::BasicBlock); - fn ret_void(&mut self); - fn ret(&mut self, v: Self::Value); - fn br(&mut self, dest: Self::BasicBlock); - fn cond_br( - &mut self, - cond: Self::Value, - then_llbb: Self::BasicBlock, - else_llbb: Self::BasicBlock, - ); - fn switch( - &mut self, - v: Self::Value, - else_llbb: Self::BasicBlock, - num_cases: usize, - ) -> Self::Value; - fn unreachable(&mut self); - fn inline_asm_call( &mut self, asm: &CStr, @@ -277,7 +282,6 @@ pub trait BuilderMethods<'a, 'tcx: 'a>: fn extract_value(&mut self, agg_val: Self::Value, idx: u64) -> Self::Value; fn insert_value(&mut self, agg_val: Self::Value, elt: Self::Value, idx: u64) -> Self::Value; - fn add_case(&mut self, s: Self::Value, on_val: Self::Value, dest: Self::BasicBlock); fn set_invariant_load(&mut self, load: Self::Value); /// Called for `StorageLive` From 6a92455988c366dc8f60beb63926215507ecb090 Mon Sep 17 00:00:00 2001 From: bjorn3 Date: Sat, 8 Dec 2018 16:07:27 +0100 Subject: [PATCH 23/28] Remove inline_asm_call from cg_ssa `count_insn` is no longer called for inline asm, because it is private to builder.rs --- src/librustc_codegen_llvm/asm.rs | 48 +++++++++++++++++++++- src/librustc_codegen_llvm/builder.rs | 42 +------------------ src/librustc_codegen_ssa/traits/builder.rs | 14 ------- 3 files changed, 47 insertions(+), 57 deletions(-) diff --git a/src/librustc_codegen_llvm/asm.rs b/src/librustc_codegen_llvm/asm.rs index 4427308f4155d..4862a77cb9425 100644 --- a/src/librustc_codegen_llvm/asm.rs +++ b/src/librustc_codegen_llvm/asm.rs @@ -10,7 +10,7 @@ use rustc_codegen_ssa::traits::*; use rustc_codegen_ssa::mir::place::PlaceRef; use rustc_codegen_ssa::mir::operand::OperandValue; -use std::ffi::CString; +use std::ffi::{CStr, CString}; use libc::{c_uint, c_char}; @@ -73,7 +73,8 @@ impl AsmBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { let asm = CString::new(ia.asm.as_str().as_bytes()).unwrap(); let constraint_cstr = CString::new(all_constraints).unwrap(); - let r = self.inline_asm_call( + let r = inline_asm_call( + self, &asm, &constraint_cstr, &inputs, @@ -119,3 +120,46 @@ impl AsmMethods<'tcx> for CodegenCx<'ll, 'tcx> { } } } + +fn inline_asm_call( + bx: &mut Builder<'a, 'll, 'tcx>, + asm: &CStr, + cons: &CStr, + inputs: &[&'ll Value], + output: &'ll llvm::Type, + volatile: bool, + alignstack: bool, + dia: ::syntax::ast::AsmDialect, +) -> Option<&'ll Value> { + let volatile = if volatile { llvm::True } + else { llvm::False }; + let alignstack = if alignstack { llvm::True } + else { llvm::False }; + + let argtys = inputs.iter().map(|v| { + debug!("Asm Input Type: {:?}", *v); + bx.cx.val_ty(*v) + }).collect::>(); + + debug!("Asm Output Type: {:?}", output); + let fty = bx.type_func(&argtys[..], output); + unsafe { + // Ask LLVM to verify that the constraints are well-formed. + let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons.as_ptr()); + debug!("Constraint verification result: {:?}", constraints_ok); + if constraints_ok { + let v = llvm::LLVMRustInlineAsm( + fty, + asm.as_ptr(), + cons.as_ptr(), + volatile, + alignstack, + llvm::AsmDialect::from_generic(dia), + ); + Some(bx.call(v, inputs, None)) + } else { + // LLVM has detected an issue with our constraints, bail out + None + } + } +} diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index 6790c8d20981d..d2a216e0c6130 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -1,4 +1,4 @@ -use crate::llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope, AsmDialect}; +use crate::llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope}; use crate::llvm::{self, False, BasicBlock}; use crate::common::Funclet; use crate::context::CodegenCx; @@ -19,7 +19,6 @@ use rustc_codegen_ssa::base::to_immediate; use rustc_codegen_ssa::mir::operand::{OperandValue, OperandRef}; use rustc_codegen_ssa::mir::place::PlaceRef; use std::borrow::Cow; -use std::ffi::CStr; use std::ops::{Deref, Range}; use std::ptr; @@ -1126,45 +1125,6 @@ impl UnwindBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { /* Miscellaneous instructions */ - fn inline_asm_call(&mut self, asm: &CStr, cons: &CStr, - inputs: &[&'ll Value], output: &'ll Type, - volatile: bool, alignstack: bool, - dia: syntax::ast::AsmDialect) -> Option<&'ll Value> { - self.count_insn("inlineasm"); - - let volatile = if volatile { llvm::True } - else { llvm::False }; - let alignstack = if alignstack { llvm::True } - else { llvm::False }; - - let argtys = inputs.iter().map(|v| { - debug!("Asm Input Type: {:?}", *v); - self.cx.val_ty(*v) - }).collect::>(); - - debug!("Asm Output Type: {:?}", output); - let fty = self.type_func(&argtys[..], output); - unsafe { - // Ask LLVM to verify that the constraints are well-formed. - let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons.as_ptr()); - debug!("Constraint verification result: {:?}", constraints_ok); - if constraints_ok { - let v = llvm::LLVMRustInlineAsm( - fty, - asm.as_ptr(), - cons.as_ptr(), - volatile, - alignstack, - AsmDialect::from_generic(dia), - ); - Some(self.call(v, inputs, None)) - } else { - // LLVM has detected an issue with our constraints, bail out - None - } - } - } - fn select( &mut self, cond: &'ll Value, then_val: &'ll Value, diff --git a/src/librustc_codegen_ssa/traits/builder.rs b/src/librustc_codegen_ssa/traits/builder.rs index 65fcdb7d82ff9..e370ab83938ff 100644 --- a/src/librustc_codegen_ssa/traits/builder.rs +++ b/src/librustc_codegen_ssa/traits/builder.rs @@ -11,10 +11,7 @@ use crate::mir::place::PlaceRef; use crate::MemFlags; use rustc::ty::Ty; use rustc::ty::layout::{Align, Size}; -use std::ffi::CStr; - use std::ops::Range; -use syntax::ast::AsmDialect; #[derive(Copy, Clone)] pub enum OverflowOp { @@ -258,17 +255,6 @@ pub trait BuilderMethods<'a, 'tcx: 'a>: + NumBuilderMethods<'tcx> + UnwindBuilderMethods<'tcx> { - fn inline_asm_call( - &mut self, - asm: &CStr, - cons: &CStr, - inputs: &[Self::Value], - output: Self::Type, - volatile: bool, - alignstack: bool, - dia: AsmDialect, - ) -> Option; - fn select( &mut self, cond: Self::Value, From c45a9b9638348ca86b6d3b9037e4ca0bfb7fff0c Mon Sep 17 00:00:00 2001 From: bjorn3 Date: Sat, 8 Dec 2018 16:21:23 +0100 Subject: [PATCH 24/28] Remove va_arg from cg_ssa --- src/librustc_codegen_llvm/builder.rs | 15 +++++++-------- src/librustc_codegen_ssa/traits/builder.rs | 1 - 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index d2a216e0c6130..a9edb36f0ec61 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -1136,14 +1136,6 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } - #[allow(dead_code)] - fn va_arg(&mut self, list: &'ll Value, ty: &'ll Type) -> &'ll Value { - self.count_insn("vaarg"); - unsafe { - llvm::LLVMBuildVAArg(self.llbuilder, list, ty, noname()) - } - } - fn extract_element(&mut self, vec: &'ll Value, idx: &'ll Value) -> &'ll Value { self.count_insn("extractelement"); unsafe { @@ -1514,4 +1506,11 @@ impl Builder<'a, 'll, 'tcx> { llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint); } } + + crate fn va_arg(&mut self, list: &'ll Value, ty: &'ll Type) -> &'ll Value { + self.count_insn("vaarg"); + unsafe { + llvm::LLVMBuildVAArg(self.llbuilder, list, ty, noname()) + } + } } diff --git a/src/librustc_codegen_ssa/traits/builder.rs b/src/librustc_codegen_ssa/traits/builder.rs index e370ab83938ff..032d1b9e6404a 100644 --- a/src/librustc_codegen_ssa/traits/builder.rs +++ b/src/librustc_codegen_ssa/traits/builder.rs @@ -262,7 +262,6 @@ pub trait BuilderMethods<'a, 'tcx: 'a>: else_val: Self::Value, ) -> Self::Value; - fn va_arg(&mut self, list: Self::Value, ty: Self::Type) -> Self::Value; fn extract_element(&mut self, vec: Self::Value, idx: Self::Value) -> Self::Value; fn vector_splat(&mut self, num_elts: usize, elt: Self::Value) -> Self::Value; fn extract_value(&mut self, agg_val: Self::Value, idx: u64) -> Self::Value; From d7a528cb829dd047750d7b8c90758c593b963486 Mon Sep 17 00:00:00 2001 From: bjorn3 Date: Sat, 8 Dec 2018 17:50:57 +0100 Subject: [PATCH 25/28] Relax some more constraints --- src/librustc_codegen_llvm/builder.rs | 36 +++++++++++----------- src/librustc_codegen_ssa/glue.rs | 8 +++-- src/librustc_codegen_ssa/meth.rs | 4 +-- src/librustc_codegen_ssa/mir/operand.rs | 10 +++--- src/librustc_codegen_ssa/mir/place.rs | 2 +- src/librustc_codegen_ssa/traits/builder.rs | 19 ++++++------ 6 files changed, 42 insertions(+), 37 deletions(-) diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index a9edb36f0ec61..052e8621f2964 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -441,6 +441,13 @@ impl MemoryBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { } } + fn set_invariant_load(&mut self, load: &'ll Value) { + unsafe { + llvm::LLVMSetMetadata(load, llvm::MD_invariant_load as c_uint, + llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0)); + } + } + fn memcpy(&mut self, dst: &'ll Value, dst_align: Align, src: &'ll Value, src_align: Align, size: &'ll Value, flags: MemFlags) { @@ -999,6 +1006,17 @@ impl NumBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { llvm::LLVMBuildFCmp(self.llbuilder, op as c_uint, lhs, rhs, noname()) } } + + fn select( + &mut self, cond: &'ll Value, + then_val: &'ll Value, + else_val: &'ll Value, + ) -> &'ll Value { + self.count_insn("select"); + unsafe { + llvm::LLVMBuildSelect(self.llbuilder, cond, then_val, else_val, noname()) + } + } } impl UnwindBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { @@ -1125,17 +1143,6 @@ impl UnwindBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { /* Miscellaneous instructions */ - fn select( - &mut self, cond: &'ll Value, - then_val: &'ll Value, - else_val: &'ll Value, - ) -> &'ll Value { - self.count_insn("select"); - unsafe { - llvm::LLVMBuildSelect(self.llbuilder, cond, then_val, else_val, noname()) - } - } - fn extract_element(&mut self, vec: &'ll Value, idx: &'ll Value) -> &'ll Value { self.count_insn("extractelement"); unsafe { @@ -1171,13 +1178,6 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } - fn set_invariant_load(&mut self, load: &'ll Value) { - unsafe { - llvm::LLVMSetMetadata(load, llvm::MD_invariant_load as c_uint, - llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0)); - } - } - fn lifetime_start(&mut self, ptr: &'ll Value, size: Size) { self.call_lifetime_intrinsic("llvm.lifetime.start", ptr, size); } diff --git a/src/librustc_codegen_ssa/glue.rs b/src/librustc_codegen_ssa/glue.rs index e2b49de05bd11..dbe82e8621cb7 100644 --- a/src/librustc_codegen_ssa/glue.rs +++ b/src/librustc_codegen_ssa/glue.rs @@ -7,10 +7,14 @@ use crate::common::IntPredicate; use crate::meth; use crate::traits::*; -pub fn size_and_align_of_dst<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( +pub fn size_and_align_of_dst< + 'a, + 'tcx: 'a, + Bx: MemoryBuilderMethods<'tcx> + NumBuilderMethods<'tcx>, +>( bx: &mut Bx, t: Ty<'tcx>, - info: Option + info: Option, ) -> (Bx::Value, Bx::Value) { let layout = bx.layout_of(t); debug!("size_and_align_of_dst(ty={}, info={:?}): layout: {:?}", diff --git a/src/librustc_codegen_ssa/meth.rs b/src/librustc_codegen_ssa/meth.rs index 49f3c87ee2d9d..688e905202bdc 100644 --- a/src/librustc_codegen_ssa/meth.rs +++ b/src/librustc_codegen_ssa/meth.rs @@ -18,7 +18,7 @@ impl<'a, 'tcx: 'a> VirtualIndex { VirtualIndex(index as u64 + 3) } - pub fn get_fn>( + pub fn get_fn>( self, bx: &mut Bx, llvtable: Bx::Value, @@ -40,7 +40,7 @@ impl<'a, 'tcx: 'a> VirtualIndex { ptr } - pub fn get_usize>( + pub fn get_usize>( self, bx: &mut Bx, llvtable: Bx::Value diff --git a/src/librustc_codegen_ssa/mir/operand.rs b/src/librustc_codegen_ssa/mir/operand.rs index c9bfe9766cf75..3a968a753e081 100644 --- a/src/librustc_codegen_ssa/mir/operand.rs +++ b/src/librustc_codegen_ssa/mir/operand.rs @@ -261,7 +261,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> { } impl<'a, 'tcx: 'a, V: CodegenObject> OperandValue { - pub fn store>( + pub fn store + NumBuilderMethods<'tcx>>( self, bx: &mut Bx, dest: PlaceRef<'tcx, V> @@ -269,7 +269,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandValue { self.store_with_flags(bx, dest, MemFlags::empty()); } - pub fn volatile_store>( + pub fn volatile_store + NumBuilderMethods<'tcx>>( self, bx: &mut Bx, dest: PlaceRef<'tcx, V> @@ -277,7 +277,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandValue { self.store_with_flags(bx, dest, MemFlags::VOLATILE); } - pub fn unaligned_volatile_store>( + pub fn unaligned_volatile_store + NumBuilderMethods<'tcx>>( self, bx: &mut Bx, dest: PlaceRef<'tcx, V>, @@ -285,7 +285,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandValue { self.store_with_flags(bx, dest, MemFlags::VOLATILE | MemFlags::UNALIGNED); } - pub fn nontemporal_store>( + pub fn nontemporal_store + NumBuilderMethods<'tcx>>( self, bx: &mut Bx, dest: PlaceRef<'tcx, V> @@ -293,7 +293,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandValue { self.store_with_flags(bx, dest, MemFlags::NONTEMPORAL); } - fn store_with_flags>( + fn store_with_flags + NumBuilderMethods<'tcx>>( self, bx: &mut Bx, dest: PlaceRef<'tcx, V>, diff --git a/src/librustc_codegen_ssa/mir/place.rs b/src/librustc_codegen_ssa/mir/place.rs index 1951246b6fff6..82a9a801f0e8a 100644 --- a/src/librustc_codegen_ssa/mir/place.rs +++ b/src/librustc_codegen_ssa/mir/place.rs @@ -100,7 +100,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { /// Access a field, at a point when the value's case is known. - pub fn project_field>( + pub fn project_field + NumBuilderMethods<'tcx>>( self, bx: &mut Bx, ix: usize, ) -> Self { diff --git a/src/librustc_codegen_ssa/traits/builder.rs b/src/librustc_codegen_ssa/traits/builder.rs index 032d1b9e6404a..fd7a7b38b93fb 100644 --- a/src/librustc_codegen_ssa/traits/builder.rs +++ b/src/librustc_codegen_ssa/traits/builder.rs @@ -81,8 +81,10 @@ pub trait MemoryBuilderMethods<'tcx>: HasCodegen<'tcx> { fn ptrtoint(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; fn inttoptr(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + // Optimization metadata fn range_metadata(&mut self, load: Self::Value, range: Range); fn nonnull_metadata(&mut self, load: Self::Value); + fn set_invariant_load(&mut self, load: Self::Value); // Bulk memory operations fn memcpy( @@ -205,6 +207,14 @@ pub trait NumBuilderMethods<'tcx>: HasCodegen<'tcx> { fn fptrunc(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; fn fpext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; fn fcmp(&mut self, op: RealPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + + /// This is not really only for numbers, but often used functions which also use numbers + fn select( + &mut self, + cond: Self::Value, + then_val: Self::Value, + else_val: Self::Value, + ) -> Self::Value; } pub trait UnwindBuilderMethods<'tcx>: HasCodegen<'tcx> { @@ -255,20 +265,11 @@ pub trait BuilderMethods<'a, 'tcx: 'a>: + NumBuilderMethods<'tcx> + UnwindBuilderMethods<'tcx> { - fn select( - &mut self, - cond: Self::Value, - then_val: Self::Value, - else_val: Self::Value, - ) -> Self::Value; - fn extract_element(&mut self, vec: Self::Value, idx: Self::Value) -> Self::Value; fn vector_splat(&mut self, num_elts: usize, elt: Self::Value) -> Self::Value; fn extract_value(&mut self, agg_val: Self::Value, idx: u64) -> Self::Value; fn insert_value(&mut self, agg_val: Self::Value, elt: Self::Value, idx: u64) -> Self::Value; - fn set_invariant_load(&mut self, load: Self::Value); - /// Called for `StorageLive` fn lifetime_start(&mut self, ptr: Self::Value, size: Size); From 018747b30998962eff8ea9e6f7334a4f2813f653 Mon Sep 17 00:00:00 2001 From: bjorn3 Date: Sat, 8 Dec 2018 18:42:31 +0100 Subject: [PATCH 26/28] Add a method for emiting a switch. --- src/librustc_codegen_llvm/builder.rs | 15 +++++++++------ src/librustc_codegen_llvm/common.rs | 1 + src/librustc_codegen_ssa/mir/block.rs | 11 +++++++---- src/librustc_codegen_ssa/traits/backend.rs | 1 + src/librustc_codegen_ssa/traits/builder.rs | 15 +++++++++++---- 5 files changed, 29 insertions(+), 14 deletions(-) diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index 052e8621f2964..640e3128119d8 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -46,6 +46,7 @@ fn noname() -> *const c_char { impl BackendTypes for Builder<'_, 'll, 'tcx> { type Value = as BackendTypes>::Value; + type Switch = as BackendTypes>::Switch; type BasicBlock = as BackendTypes>::BasicBlock; type Type = as BackendTypes>::Type; type Funclet = as BackendTypes>::Funclet; @@ -165,7 +166,7 @@ impl ControlFlowBuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } - fn switch( + fn switch_new( &mut self, v: &'ll Value, else_llbb: &'ll BasicBlock, @@ -176,12 +177,14 @@ impl ControlFlowBuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } - fn add_case(&mut self, s: &'ll Value, on_val: &'ll Value, dest: &'ll BasicBlock) { + fn switch_add_case(&mut self, s: &mut &'ll Value, on_val: &'ll Value, dest: &'ll BasicBlock) { unsafe { - llvm::LLVMAddCase(s, on_val, dest) + llvm::LLVMAddCase(*s, on_val, dest) } } + fn switch_emit(&mut self, _: &'ll Value) {} + fn unreachable(&mut self) { self.count_insn("unreachable"); unsafe { @@ -506,15 +509,15 @@ impl MemoryBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { } fn write_operand_repeatedly( - mut self, + &mut self, cg_elem: OperandRef<'tcx, &'ll Value>, count: u64, dest: PlaceRef<'tcx, &'ll Value>, ) -> Self { let zero = self.const_usize(0); let count = self.const_usize(count); - let start = dest.project_index(&mut self, zero).llval; - let end = dest.project_index(&mut self, count).llval; + let start = dest.project_index(self, zero).llval; + let end = dest.project_index(self, count).llval; let mut header_bx = self.build_sibling_block("repeat_loop_header"); let mut body_bx = self.build_sibling_block("repeat_loop_body"); diff --git a/src/librustc_codegen_llvm/common.rs b/src/librustc_codegen_llvm/common.rs index 9554e54e4142a..b0579d8c6b5a8 100644 --- a/src/librustc_codegen_llvm/common.rs +++ b/src/librustc_codegen_llvm/common.rs @@ -86,6 +86,7 @@ impl Funclet<'ll> { impl BackendTypes for CodegenCx<'ll, 'tcx> { type Value = &'ll Value; + type Switch = &'ll Value; type BasicBlock = &'ll BasicBlock; type Type = &'ll Type; type Funclet = Funclet<'ll>; diff --git a/src/librustc_codegen_ssa/mir/block.rs b/src/librustc_codegen_ssa/mir/block.rs index f9e3d1ab50346..00c261af49ed5 100644 --- a/src/librustc_codegen_ssa/mir/block.rs +++ b/src/librustc_codegen_ssa/mir/block.rs @@ -214,17 +214,20 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } } else { let (otherwise, targets) = targets.split_last().unwrap(); - let switch = bx.switch(discr.immediate(), - helper.llblock(self, *otherwise), - values.len()); + let mut switch = bx.switch_new( + discr.immediate(), + helper.llblock(self, *otherwise), + values.len(), + ); let switch_llty = bx.immediate_backend_type( bx.layout_of(switch_ty) ); for (&value, target) in values.iter().zip(targets) { let llval = bx.const_uint_big(switch_llty, value); let llbb = helper.llblock(self, *target); - bx.add_case(switch, llval, llbb) + bx.switch_add_case(&mut switch, llval, llbb) } + bx.switch_emit(switch); } } diff --git a/src/librustc_codegen_ssa/traits/backend.rs b/src/librustc_codegen_ssa/traits/backend.rs index 00eae9098e74f..98c6cf28422ec 100644 --- a/src/librustc_codegen_ssa/traits/backend.rs +++ b/src/librustc_codegen_ssa/traits/backend.rs @@ -14,6 +14,7 @@ use syntax_pos::symbol::InternedString; pub trait BackendTypes { type Value: CodegenObject; + type Switch; type BasicBlock: Copy; type Type: CodegenObject; type Funclet; diff --git a/src/librustc_codegen_ssa/traits/builder.rs b/src/librustc_codegen_ssa/traits/builder.rs index fd7a7b38b93fb..47ddeaef1a189 100644 --- a/src/librustc_codegen_ssa/traits/builder.rs +++ b/src/librustc_codegen_ssa/traits/builder.rs @@ -36,13 +36,20 @@ pub trait ControlFlowBuilderMethods<'a, 'tcx: 'a>: HasCodegen<'tcx> { then_llbb: Self::BasicBlock, else_llbb: Self::BasicBlock, ); - fn switch( + fn switch_new( &mut self, v: Self::Value, else_llbb: Self::BasicBlock, num_cases: usize, - ) -> Self::Value; - fn add_case(&mut self, s: Self::Value, on_val: Self::Value, dest: Self::BasicBlock); + ) -> Self::Switch; + fn switch_add_case( + &mut self, + s: &mut Self::Switch, + on_val: Self::Value, + dest: Self::BasicBlock, + ); + fn switch_emit(&mut self, s: Self::Switch); + fn unreachable(&mut self); } @@ -116,7 +123,7 @@ pub trait MemoryBuilderMethods<'tcx>: HasCodegen<'tcx> { /// Called for Rvalue::Repeat when the elem is neither a ZST nor optimizable using memset. fn write_operand_repeatedly( - self, + &mut self, elem: OperandRef<'tcx, Self::Value>, count: u64, dest: PlaceRef<'tcx, Self::Value>, From ef3358babd943fcc40ac10ffe205fe86eb7cd9b5 Mon Sep 17 00:00:00 2001 From: bjorn3 Date: Sun, 9 Dec 2018 14:41:20 +0100 Subject: [PATCH 27/28] Fix tidy --- src/librustc_codegen_llvm/builder.rs | 7 ++++++- src/librustc_codegen_ssa/mir/operand.rs | 4 +++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index 640e3128119d8..3f34cc5f33fd9 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -1309,7 +1309,12 @@ impl Builder<'a, 'll, 'tcx> { } } - pub fn shuffle_vector(&mut self, v1: &'ll Value, v2: &'ll Value, mask: &'ll Value) -> &'ll Value { + pub fn shuffle_vector( + &mut self, + v1: &'ll Value, + v2: &'ll Value, + mask: &'ll Value, + ) -> &'ll Value { self.count_insn("shufflevector"); unsafe { llvm::LLVMBuildShuffleVector(self.llbuilder, v1, v2, mask, noname()) diff --git a/src/librustc_codegen_ssa/mir/operand.rs b/src/librustc_codegen_ssa/mir/operand.rs index 3a968a753e081..6a8ca3d69f16c 100644 --- a/src/librustc_codegen_ssa/mir/operand.rs +++ b/src/librustc_codegen_ssa/mir/operand.rs @@ -277,7 +277,9 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandValue { self.store_with_flags(bx, dest, MemFlags::VOLATILE); } - pub fn unaligned_volatile_store + NumBuilderMethods<'tcx>>( + pub fn unaligned_volatile_store< + Bx: MemoryBuilderMethods<'tcx, Value = V> + NumBuilderMethods<'tcx> + >( self, bx: &mut Bx, dest: PlaceRef<'tcx, V>, From 270540ba50a8801efc31ea4f566ed526442e7f84 Mon Sep 17 00:00:00 2001 From: bjorn3 Date: Wed, 2 Jan 2019 16:08:23 +0100 Subject: [PATCH 28/28] Remove duplicate va_arg --- src/librustc_codegen_llvm/builder.rs | 7 ------- 1 file changed, 7 deletions(-) diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index 3f34cc5f33fd9..dda8ce44311ac 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -1514,11 +1514,4 @@ impl Builder<'a, 'll, 'tcx> { llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint); } } - - crate fn va_arg(&mut self, list: &'ll Value, ty: &'ll Type) -> &'ll Value { - self.count_insn("vaarg"); - unsafe { - llvm::LLVMBuildVAArg(self.llbuilder, list, ty, noname()) - } - } }