diff --git a/src/librustc_codegen_llvm/abi.rs b/src/librustc_codegen_llvm/abi.rs index 49c9555a2c682..3a0d9e1334cf6 100644 --- a/src/librustc_codegen_llvm/abi.rs +++ b/src/librustc_codegen_llvm/abi.rs @@ -859,4 +859,8 @@ impl AbiBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { ) { ty.apply_attrs_callsite(self, callsite) } + + fn get_param(&self, index: usize) -> Self::Value { + llvm::get_param(self.llfn(), index as c_uint) + } } diff --git a/src/librustc_codegen_llvm/asm.rs b/src/librustc_codegen_llvm/asm.rs index 4427308f4155d..4862a77cb9425 100644 --- a/src/librustc_codegen_llvm/asm.rs +++ b/src/librustc_codegen_llvm/asm.rs @@ -10,7 +10,7 @@ use rustc_codegen_ssa::traits::*; use rustc_codegen_ssa::mir::place::PlaceRef; use rustc_codegen_ssa::mir::operand::OperandValue; -use std::ffi::CString; +use std::ffi::{CStr, CString}; use libc::{c_uint, c_char}; @@ -73,7 +73,8 @@ impl AsmBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { let asm = CString::new(ia.asm.as_str().as_bytes()).unwrap(); let constraint_cstr = CString::new(all_constraints).unwrap(); - let r = self.inline_asm_call( + let r = inline_asm_call( + self, &asm, &constraint_cstr, &inputs, @@ -119,3 +120,46 @@ impl AsmMethods<'tcx> for CodegenCx<'ll, 'tcx> { } } } + +fn inline_asm_call( + bx: &mut Builder<'a, 'll, 'tcx>, + asm: &CStr, + cons: &CStr, + inputs: &[&'ll Value], + output: &'ll llvm::Type, + volatile: bool, + alignstack: bool, + dia: ::syntax::ast::AsmDialect, +) -> Option<&'ll Value> { + let volatile = if volatile { llvm::True } + else { llvm::False }; + let alignstack = if alignstack { llvm::True } + else { llvm::False }; + + let argtys = inputs.iter().map(|v| { + debug!("Asm Input Type: {:?}", *v); + bx.cx.val_ty(*v) + }).collect::>(); + + debug!("Asm Output Type: {:?}", output); + let fty = bx.type_func(&argtys[..], output); + unsafe { + // Ask LLVM to verify that the constraints are well-formed. + let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons.as_ptr()); + debug!("Constraint verification result: {:?}", constraints_ok); + if constraints_ok { + let v = llvm::LLVMRustInlineAsm( + fty, + asm.as_ptr(), + cons.as_ptr(), + volatile, + alignstack, + llvm::AsmDialect::from_generic(dia), + ); + Some(bx.call(v, inputs, None)) + } else { + // LLVM has detected an issue with our constraints, bail out + None + } + } +} diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index d4d38a464576d..dda8ce44311ac 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -1,10 +1,11 @@ -use crate::llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope, AsmDialect}; +use crate::llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope}; use crate::llvm::{self, False, BasicBlock}; use crate::common::Funclet; use crate::context::CodegenCx; use crate::type_::Type; use crate::type_of::LayoutLlvmExt; use crate::value::Value; +use syntax::symbol::LocalInternedString; use rustc_codegen_ssa::common::{IntPredicate, TypeKind, RealPredicate}; use rustc_codegen_ssa::MemFlags; use libc::{c_uint, c_char}; @@ -18,7 +19,6 @@ use rustc_codegen_ssa::base::to_immediate; use rustc_codegen_ssa::mir::operand::{OperandValue, OperandRef}; use rustc_codegen_ssa::mir::place::PlaceRef; use std::borrow::Cow; -use std::ffi::CStr; use std::ops::{Deref, Range}; use std::ptr; @@ -46,6 +46,7 @@ fn noname() -> *const c_char { impl BackendTypes for Builder<'_, 'll, 'tcx> { type Value = as BackendTypes>::Value; + type Switch = as BackendTypes>::Switch; type BasicBlock = as BackendTypes>::BasicBlock; type Type = as BackendTypes>::Type; type Funclet = as BackendTypes>::Funclet; @@ -86,7 +87,7 @@ impl HasCodegen<'tcx> for Builder<'_, 'll, 'tcx> { type CodegenCx = CodegenCx<'ll, 'tcx>; } -impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { +impl ControlFlowBuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { fn new_block<'b>( cx: &'a CodegenCx<'ll, 'tcx>, llfn: &'ll Value, @@ -120,50 +121,18 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { Builder::new_block(self.cx, self.llfn(), name) } - fn llfn(&self) -> &'ll Value { - unsafe { - llvm::LLVMGetBasicBlockParent(self.llbb()) - } - } - fn llbb(&self) -> &'ll BasicBlock { unsafe { llvm::LLVMGetInsertBlock(self.llbuilder) } } - fn count_insn(&self, category: &str) { - if self.sess().codegen_stats() { - self.stats.borrow_mut().n_llvm_insns += 1; - } - if self.sess().count_llvm_insns() { - *self.stats - .borrow_mut() - .llvm_insns - .entry(category.to_string()) - .or_insert(0) += 1; - } - } - - fn set_value_name(&mut self, value: &'ll Value, name: &str) { - let cname = SmallCStr::new(name); - unsafe { - llvm::LLVMSetValueName(value, cname.as_ptr()); - } - } - fn position_at_end(&mut self, llbb: &'ll BasicBlock) { unsafe { llvm::LLVMPositionBuilderAtEnd(self.llbuilder, llbb); } } - fn position_at_start(&mut self, llbb: &'ll BasicBlock) { - unsafe { - llvm::LLVMRustPositionBuilderAtStart(self.llbuilder, llbb); - } - } - fn ret_void(&mut self) { self.count_insn("retvoid"); unsafe { @@ -197,7 +166,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } - fn switch( + fn switch_new( &mut self, v: &'ll Value, else_llbb: &'ll BasicBlock, @@ -208,962 +177,879 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } - fn invoke( - &mut self, - llfn: &'ll Value, - args: &[&'ll Value], - then: &'ll BasicBlock, - catch: &'ll BasicBlock, - funclet: Option<&Funclet<'ll>>, - ) -> &'ll Value { - self.count_insn("invoke"); - - debug!("Invoke {:?} with args ({:?})", - llfn, - args); - - let args = self.check_call("invoke", llfn, args); - let bundle = funclet.map(|funclet| funclet.bundle()); - let bundle = bundle.as_ref().map(|b| &*b.raw); - + fn switch_add_case(&mut self, s: &mut &'ll Value, on_val: &'ll Value, dest: &'ll BasicBlock) { unsafe { - llvm::LLVMRustBuildInvoke(self.llbuilder, - llfn, - args.as_ptr(), - args.len() as c_uint, - then, - catch, - bundle, - noname()) + llvm::LLVMAddCase(*s, on_val, dest) } } + fn switch_emit(&mut self, _: &'ll Value) {} + fn unreachable(&mut self) { self.count_insn("unreachable"); unsafe { llvm::LLVMBuildUnreachable(self.llbuilder); } } +} - /* Arithmetic */ - fn add(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("add"); - unsafe { - llvm::LLVMBuildAdd(self.llbuilder, lhs, rhs, noname()) - } +impl MemoryBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { + fn alloca(&mut self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value { + let mut bx = Builder::with_cx(self.cx); + bx.position_at_start(unsafe { + llvm::LLVMGetFirstBasicBlock(self.llfn()) + }); + bx.dynamic_alloca(ty, name, align) } - fn fadd(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("fadd"); + fn dynamic_alloca(&mut self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value { + self.count_insn("alloca"); unsafe { - llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, noname()) + let alloca = if name.is_empty() { + llvm::LLVMBuildAlloca(self.llbuilder, ty, noname()) + } else { + let name = SmallCStr::new(name); + llvm::LLVMBuildAlloca(self.llbuilder, ty, + name.as_ptr()) + }; + llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint); + alloca } } - fn fadd_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("fadd"); + fn array_alloca(&mut self, + ty: &'ll Type, + len: &'ll Value, + name: &str, + align: Align) -> &'ll Value { + self.count_insn("alloca"); unsafe { - let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, noname()); - llvm::LLVMRustSetHasUnsafeAlgebra(instr); - instr + let alloca = if name.is_empty() { + llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, noname()) + } else { + let name = SmallCStr::new(name); + llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, + name.as_ptr()) + }; + llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint); + alloca } } - fn sub(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("sub"); + fn load(&mut self, ptr: &'ll Value, align: Align) -> &'ll Value { + self.count_insn("load"); unsafe { - llvm::LLVMBuildSub(self.llbuilder, lhs, rhs, noname()) + let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname()); + llvm::LLVMSetAlignment(load, align.bytes() as c_uint); + load } } - fn fsub(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("fsub"); + fn volatile_load(&mut self, ptr: &'ll Value) -> &'ll Value { + self.count_insn("load.volatile"); unsafe { - llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, noname()) + let insn = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname()); + llvm::LLVMSetVolatile(insn, llvm::True); + insn } } - fn fsub_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("fsub"); - unsafe { - let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, noname()); - llvm::LLVMRustSetHasUnsafeAlgebra(instr); - instr + + fn load_operand( + &mut self, + place: PlaceRef<'tcx, &'ll Value> + ) -> OperandRef<'tcx, &'ll Value> { + debug!("PlaceRef::load: {:?}", place); + + assert_eq!(place.llextra.is_some(), place.layout.is_unsized()); + + if place.layout.is_zst() { + return OperandRef::new_zst(self, place.layout); } - } - fn mul(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("mul"); - unsafe { - llvm::LLVMBuildMul(self.llbuilder, lhs, rhs, noname()) + fn scalar_load_metadata<'a, 'll, 'tcx>( + bx: &mut Builder<'a, 'll, 'tcx>, + load: &'ll Value, + scalar: &layout::Scalar + ) { + let vr = scalar.valid_range.clone(); + match scalar.value { + layout::Int(..) => { + let range = scalar.valid_range_exclusive(bx); + if range.start != range.end { + bx.range_metadata(load, range); + } + } + layout::Pointer if vr.start() < vr.end() && !vr.contains(&0) => { + bx.nonnull_metadata(load); + } + _ => {} + } } + + let val = if let Some(llextra) = place.llextra { + OperandValue::Ref(place.llval, Some(llextra), place.align) + } else if place.layout.is_llvm_immediate() { + let mut const_llval = None; + unsafe { + if let Some(global) = llvm::LLVMIsAGlobalVariable(place.llval) { + if llvm::LLVMIsGlobalConstant(global) == llvm::True { + const_llval = llvm::LLVMGetInitializer(global); + } + } + } + let llval = const_llval.unwrap_or_else(|| { + let load = self.load(place.llval, place.align); + if let layout::Abi::Scalar(ref scalar) = place.layout.abi { + scalar_load_metadata(self, load, scalar); + } + load + }); + OperandValue::Immediate(to_immediate(self, llval, place.layout)) + } else if let layout::Abi::ScalarPair(ref a, ref b) = place.layout.abi { + let b_offset = a.value.size(self).align_to(b.value.align(self).abi); + + let mut load = |i, scalar: &layout::Scalar, align| { + let llptr = self.struct_gep(place.llval, i as u64); + let load = self.load(llptr, align); + scalar_load_metadata(self, load, scalar); + if scalar.is_bool() { + self.trunc(load, self.type_i1()) + } else { + load + } + }; + + OperandValue::Pair( + load(0, a, place.align), + load(1, b, place.align.restrict_for_offset(b_offset)), + ) + } else { + OperandValue::Ref(place.llval, None, place.align) + }; + + OperandRef { val, layout: place.layout } } - fn fmul(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("fmul"); + fn store(&mut self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value { + self.store_with_flags(val, ptr, align, MemFlags::empty()) + } + + fn store_with_flags( + &mut self, + val: &'ll Value, + ptr: &'ll Value, + align: Align, + flags: MemFlags, + ) -> &'ll Value { + debug!("Store {:?} -> {:?} ({:?})", val, ptr, flags); + self.count_insn("store"); + let ptr = self.check_store(val, ptr); unsafe { - llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, noname()) + let store = llvm::LLVMBuildStore(self.llbuilder, val, ptr); + let align = if flags.contains(MemFlags::UNALIGNED) { + 1 + } else { + align.bytes() as c_uint + }; + llvm::LLVMSetAlignment(store, align); + if flags.contains(MemFlags::VOLATILE) { + llvm::LLVMSetVolatile(store, llvm::True); + } + if flags.contains(MemFlags::NONTEMPORAL) { + // According to LLVM [1] building a nontemporal store must + // *always* point to a metadata value of the integer 1. + // + // [1]: http://llvm.org/docs/LangRef.html#store-instruction + let one = self.cx.const_i32(1); + let node = llvm::LLVMMDNodeInContext(self.cx.llcx, &one, 1); + llvm::LLVMSetMetadata(store, llvm::MD_nontemporal as c_uint, node); + } + store } } - fn fmul_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("fmul"); + fn gep(&mut self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value { + self.count_insn("gep"); unsafe { - let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, noname()); - llvm::LLVMRustSetHasUnsafeAlgebra(instr); - instr + llvm::LLVMBuildGEP(self.llbuilder, ptr, indices.as_ptr(), + indices.len() as c_uint, noname()) } } - - fn udiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("udiv"); + fn inbounds_gep(&mut self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value { + self.count_insn("inboundsgep"); unsafe { - llvm::LLVMBuildUDiv(self.llbuilder, lhs, rhs, noname()) + llvm::LLVMBuildInBoundsGEP( + self.llbuilder, ptr, indices.as_ptr(), indices.len() as c_uint, noname()) } } - fn exactudiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("exactudiv"); + fn struct_gep(&mut self, ptr: &'ll Value, idx: u64) -> &'ll Value { + self.count_insn("structgep"); + assert_eq!(idx as c_uint as u64, idx); unsafe { - llvm::LLVMBuildExactUDiv(self.llbuilder, lhs, rhs, noname()) + llvm::LLVMBuildStructGEP(self.llbuilder, ptr, idx as c_uint, noname()) } } - fn sdiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("sdiv"); + fn pointercast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + self.count_insn("pointercast"); unsafe { - llvm::LLVMBuildSDiv(self.llbuilder, lhs, rhs, noname()) + llvm::LLVMBuildPointerCast(self.llbuilder, val, dest_ty, noname()) } } - fn exactsdiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("exactsdiv"); + fn ptrtoint(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + self.count_insn("ptrtoint"); unsafe { - llvm::LLVMBuildExactSDiv(self.llbuilder, lhs, rhs, noname()) + llvm::LLVMBuildPtrToInt(self.llbuilder, val, dest_ty, noname()) } } - fn fdiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("fdiv"); + fn inttoptr(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + self.count_insn("inttoptr"); unsafe { - llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, noname()) + llvm::LLVMBuildIntToPtr(self.llbuilder, val, dest_ty, noname()) } } - fn fdiv_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("fdiv"); + fn range_metadata(&mut self, load: &'ll Value, range: Range) { + if self.sess().target.target.arch == "amdgpu" { + // amdgpu/LLVM does something weird and thinks a i64 value is + // split into a v2i32, halving the bitwidth LLVM expects, + // tripping an assertion. So, for now, just disable this + // optimization. + return; + } + unsafe { - let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, noname()); - llvm::LLVMRustSetHasUnsafeAlgebra(instr); - instr + let llty = self.cx.val_ty(load); + let v = [ + self.cx.const_uint_big(llty, range.start), + self.cx.const_uint_big(llty, range.end) + ]; + + llvm::LLVMSetMetadata(load, llvm::MD_range as c_uint, + llvm::LLVMMDNodeInContext(self.cx.llcx, + v.as_ptr(), + v.len() as c_uint)); } } - fn urem(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("urem"); + fn nonnull_metadata(&mut self, load: &'ll Value) { unsafe { - llvm::LLVMBuildURem(self.llbuilder, lhs, rhs, noname()) + llvm::LLVMSetMetadata(load, llvm::MD_nonnull as c_uint, + llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0)); } } - fn srem(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("srem"); + fn set_invariant_load(&mut self, load: &'ll Value) { unsafe { - llvm::LLVMBuildSRem(self.llbuilder, lhs, rhs, noname()) + llvm::LLVMSetMetadata(load, llvm::MD_invariant_load as c_uint, + llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0)); } } - fn frem(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("frem"); + fn memcpy(&mut self, dst: &'ll Value, dst_align: Align, + src: &'ll Value, src_align: Align, + size: &'ll Value, flags: MemFlags) { + if flags.contains(MemFlags::NONTEMPORAL) { + // HACK(nox): This is inefficient but there is no nontemporal memcpy. + let val = self.load(src, src_align); + let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val))); + self.store_with_flags(val, ptr, dst_align, flags); + return; + } + let size = self.intcast(size, self.type_isize(), false); + let is_volatile = flags.contains(MemFlags::VOLATILE); + let dst = self.pointercast(dst, self.type_i8p()); + let src = self.pointercast(src, self.type_i8p()); unsafe { - llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, noname()) + llvm::LLVMRustBuildMemCpy(self.llbuilder, dst, dst_align.bytes() as c_uint, + src, src_align.bytes() as c_uint, size, is_volatile); } } - fn frem_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("frem"); + fn memmove(&mut self, dst: &'ll Value, dst_align: Align, + src: &'ll Value, src_align: Align, + size: &'ll Value, flags: MemFlags) { + if flags.contains(MemFlags::NONTEMPORAL) { + // HACK(nox): This is inefficient but there is no nontemporal memmove. + let val = self.load(src, src_align); + let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val))); + self.store_with_flags(val, ptr, dst_align, flags); + return; + } + let size = self.intcast(size, self.type_isize(), false); + let is_volatile = flags.contains(MemFlags::VOLATILE); + let dst = self.pointercast(dst, self.type_i8p()); + let src = self.pointercast(src, self.type_i8p()); unsafe { - let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, noname()); - llvm::LLVMRustSetHasUnsafeAlgebra(instr); - instr + llvm::LLVMRustBuildMemMove(self.llbuilder, dst, dst_align.bytes() as c_uint, + src, src_align.bytes() as c_uint, size, is_volatile); } } - fn shl(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("shl"); + fn memset( + &mut self, + ptr: &'ll Value, + fill_byte: &'ll Value, + size: &'ll Value, + align: Align, + flags: MemFlags, + ) { + let ptr_width = &self.sess().target.target.target_pointer_width; + let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width); + let llintrinsicfn = self.get_intrinsic(&intrinsic_key); + let ptr = self.pointercast(ptr, self.type_i8p()); + let align = self.const_u32(align.bytes() as u32); + let volatile = self.const_bool(flags.contains(MemFlags::VOLATILE)); + self.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None); + } + + fn write_operand_repeatedly( + &mut self, + cg_elem: OperandRef<'tcx, &'ll Value>, + count: u64, + dest: PlaceRef<'tcx, &'ll Value>, + ) -> Self { + let zero = self.const_usize(0); + let count = self.const_usize(count); + let start = dest.project_index(self, zero).llval; + let end = dest.project_index(self, count).llval; + + let mut header_bx = self.build_sibling_block("repeat_loop_header"); + let mut body_bx = self.build_sibling_block("repeat_loop_body"); + let next_bx = self.build_sibling_block("repeat_loop_next"); + + self.br(header_bx.llbb()); + let current = header_bx.phi(self.val_ty(start), &[start], &[self.llbb()]); + + let keep_going = header_bx.icmp(IntPredicate::IntNE, current, end); + header_bx.cond_br(keep_going, body_bx.llbb(), next_bx.llbb()); + + let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size); + cg_elem.val.store(&mut body_bx, + PlaceRef::new_sized(current, cg_elem.layout, align)); + + let next = body_bx.inbounds_gep(current, &[self.const_usize(1)]); + body_bx.br(header_bx.llbb()); + header_bx.add_incoming_to_phi(current, next, body_bx.llbb()); + + next_bx + } + + fn atomic_load( + &mut self, + ptr: &'ll Value, + order: rustc_codegen_ssa::common::AtomicOrdering, + size: Size, + ) -> &'ll Value { + self.count_insn("load.atomic"); unsafe { - llvm::LLVMBuildShl(self.llbuilder, lhs, rhs, noname()) + let load = llvm::LLVMRustBuildAtomicLoad( + self.llbuilder, + ptr, + noname(), + AtomicOrdering::from_generic(order), + ); + // LLVM requires the alignment of atomic loads to be at least the size of the type. + llvm::LLVMSetAlignment(load, size.bytes() as c_uint); + load } } - fn lshr(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("lshr"); + fn atomic_store(&mut self, val: &'ll Value, ptr: &'ll Value, + order: rustc_codegen_ssa::common::AtomicOrdering, size: Size) { + debug!("Store {:?} -> {:?}", val, ptr); + self.count_insn("store.atomic"); + let ptr = self.check_store(val, ptr); unsafe { - llvm::LLVMBuildLShr(self.llbuilder, lhs, rhs, noname()) + let store = llvm::LLVMRustBuildAtomicStore( + self.llbuilder, + val, + ptr, + AtomicOrdering::from_generic(order), + ); + // LLVM requires the alignment of atomic stores to be at least the size of the type. + llvm::LLVMSetAlignment(store, size.bytes() as c_uint); } } - fn ashr(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("ashr"); + fn atomic_cmpxchg( + &mut self, + dst: &'ll Value, + cmp: &'ll Value, + src: &'ll Value, + order: rustc_codegen_ssa::common::AtomicOrdering, + failure_order: rustc_codegen_ssa::common::AtomicOrdering, + weak: bool, + ) -> &'ll Value { + let weak = if weak { llvm::True } else { llvm::False }; unsafe { - llvm::LLVMBuildAShr(self.llbuilder, lhs, rhs, noname()) + llvm::LLVMRustBuildAtomicCmpXchg( + self.llbuilder, + dst, + cmp, + src, + AtomicOrdering::from_generic(order), + AtomicOrdering::from_generic(failure_order), + weak + ) } } - fn and(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("and"); + fn atomic_rmw( + &mut self, + op: rustc_codegen_ssa::common::AtomicRmwBinOp, + dst: &'ll Value, + src: &'ll Value, + order: rustc_codegen_ssa::common::AtomicOrdering, + ) -> &'ll Value { unsafe { - llvm::LLVMBuildAnd(self.llbuilder, lhs, rhs, noname()) + llvm::LLVMBuildAtomicRMW( + self.llbuilder, + AtomicRmwBinOp::from_generic(op), + dst, + src, + AtomicOrdering::from_generic(order), + False) } } - fn or(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("or"); + fn atomic_fence( + &mut self, + order: rustc_codegen_ssa::common::AtomicOrdering, + scope: rustc_codegen_ssa::common::SynchronizationScope + ) { unsafe { - llvm::LLVMBuildOr(self.llbuilder, lhs, rhs, noname()) + llvm::LLVMRustBuildAtomicFence( + self.llbuilder, + AtomicOrdering::from_generic(order), + SynchronizationScope::from_generic(scope) + ); } } +} - fn xor(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("xor"); +impl NumBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { + fn add(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("add"); unsafe { - llvm::LLVMBuildXor(self.llbuilder, lhs, rhs, noname()) + llvm::LLVMBuildAdd(self.llbuilder, lhs, rhs, noname()) } } - fn neg(&mut self, v: &'ll Value) -> &'ll Value { - self.count_insn("neg"); + fn fadd(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("fadd"); unsafe { - llvm::LLVMBuildNeg(self.llbuilder, v, noname()) + llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, noname()) } } - fn fneg(&mut self, v: &'ll Value) -> &'ll Value { - self.count_insn("fneg"); + fn fadd_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("fadd"); unsafe { - llvm::LLVMBuildFNeg(self.llbuilder, v, noname()) + let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, noname()); + llvm::LLVMRustSetHasUnsafeAlgebra(instr); + instr } } - fn not(&mut self, v: &'ll Value) -> &'ll Value { - self.count_insn("not"); + fn sub(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("sub"); unsafe { - llvm::LLVMBuildNot(self.llbuilder, v, noname()) + llvm::LLVMBuildSub(self.llbuilder, lhs, rhs, noname()) } } - fn checked_binop( - &mut self, - oop: OverflowOp, - ty: Ty<'_>, - lhs: Self::Value, - rhs: Self::Value, - ) -> (Self::Value, Self::Value) { - use syntax::ast::IntTy::*; - use syntax::ast::UintTy::*; - use rustc::ty::{Int, Uint}; + fn fsub(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("fsub"); + unsafe { + llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, noname()) + } + } - let new_sty = match ty.sty { - Int(Isize) => Int(self.tcx.sess.target.isize_ty), - Uint(Usize) => Uint(self.tcx.sess.target.usize_ty), - ref t @ Uint(_) | ref t @ Int(_) => t.clone(), - _ => panic!("tried to get overflow intrinsic for op applied to non-int type") - }; - - let name = match oop { - OverflowOp::Add => match new_sty { - Int(I8) => "llvm.sadd.with.overflow.i8", - Int(I16) => "llvm.sadd.with.overflow.i16", - Int(I32) => "llvm.sadd.with.overflow.i32", - Int(I64) => "llvm.sadd.with.overflow.i64", - Int(I128) => "llvm.sadd.with.overflow.i128", - - Uint(U8) => "llvm.uadd.with.overflow.i8", - Uint(U16) => "llvm.uadd.with.overflow.i16", - Uint(U32) => "llvm.uadd.with.overflow.i32", - Uint(U64) => "llvm.uadd.with.overflow.i64", - Uint(U128) => "llvm.uadd.with.overflow.i128", - - _ => unreachable!(), - }, - OverflowOp::Sub => match new_sty { - Int(I8) => "llvm.ssub.with.overflow.i8", - Int(I16) => "llvm.ssub.with.overflow.i16", - Int(I32) => "llvm.ssub.with.overflow.i32", - Int(I64) => "llvm.ssub.with.overflow.i64", - Int(I128) => "llvm.ssub.with.overflow.i128", - - Uint(U8) => "llvm.usub.with.overflow.i8", - Uint(U16) => "llvm.usub.with.overflow.i16", - Uint(U32) => "llvm.usub.with.overflow.i32", - Uint(U64) => "llvm.usub.with.overflow.i64", - Uint(U128) => "llvm.usub.with.overflow.i128", - - _ => unreachable!(), - }, - OverflowOp::Mul => match new_sty { - Int(I8) => "llvm.smul.with.overflow.i8", - Int(I16) => "llvm.smul.with.overflow.i16", - Int(I32) => "llvm.smul.with.overflow.i32", - Int(I64) => "llvm.smul.with.overflow.i64", - Int(I128) => "llvm.smul.with.overflow.i128", - - Uint(U8) => "llvm.umul.with.overflow.i8", - Uint(U16) => "llvm.umul.with.overflow.i16", - Uint(U32) => "llvm.umul.with.overflow.i32", - Uint(U64) => "llvm.umul.with.overflow.i64", - Uint(U128) => "llvm.umul.with.overflow.i128", - - _ => unreachable!(), - }, - }; - - let intrinsic = self.get_intrinsic(&name); - let res = self.call(intrinsic, &[lhs, rhs], None); - ( - self.extract_value(res, 0), - self.extract_value(res, 1), - ) - } - - fn alloca(&mut self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value { - let mut bx = Builder::with_cx(self.cx); - bx.position_at_start(unsafe { - llvm::LLVMGetFirstBasicBlock(self.llfn()) - }); - bx.dynamic_alloca(ty, name, align) - } - - fn dynamic_alloca(&mut self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value { - self.count_insn("alloca"); - unsafe { - let alloca = if name.is_empty() { - llvm::LLVMBuildAlloca(self.llbuilder, ty, noname()) - } else { - let name = SmallCStr::new(name); - llvm::LLVMBuildAlloca(self.llbuilder, ty, - name.as_ptr()) - }; - llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint); - alloca - } - } - - fn array_alloca(&mut self, - ty: &'ll Type, - len: &'ll Value, - name: &str, - align: Align) -> &'ll Value { - self.count_insn("alloca"); - unsafe { - let alloca = if name.is_empty() { - llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, noname()) - } else { - let name = SmallCStr::new(name); - llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, - name.as_ptr()) - }; - llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint); - alloca - } - } - - fn load(&mut self, ptr: &'ll Value, align: Align) -> &'ll Value { - self.count_insn("load"); - unsafe { - let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname()); - llvm::LLVMSetAlignment(load, align.bytes() as c_uint); - load - } - } - - fn volatile_load(&mut self, ptr: &'ll Value) -> &'ll Value { - self.count_insn("load.volatile"); - unsafe { - let insn = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname()); - llvm::LLVMSetVolatile(insn, llvm::True); - insn - } - } - - fn atomic_load( - &mut self, - ptr: &'ll Value, - order: rustc_codegen_ssa::common::AtomicOrdering, - size: Size, - ) -> &'ll Value { - self.count_insn("load.atomic"); + fn fsub_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("fsub"); unsafe { - let load = llvm::LLVMRustBuildAtomicLoad( - self.llbuilder, - ptr, - noname(), - AtomicOrdering::from_generic(order), - ); - // LLVM requires the alignment of atomic loads to be at least the size of the type. - llvm::LLVMSetAlignment(load, size.bytes() as c_uint); - load - } - } - - fn load_operand( - &mut self, - place: PlaceRef<'tcx, &'ll Value> - ) -> OperandRef<'tcx, &'ll Value> { - debug!("PlaceRef::load: {:?}", place); - - assert_eq!(place.llextra.is_some(), place.layout.is_unsized()); - - if place.layout.is_zst() { - return OperandRef::new_zst(self.cx(), place.layout); - } - - fn scalar_load_metadata<'a, 'll, 'tcx>( - bx: &mut Builder<'a, 'll, 'tcx>, - load: &'ll Value, - scalar: &layout::Scalar - ) { - let vr = scalar.valid_range.clone(); - match scalar.value { - layout::Int(..) => { - let range = scalar.valid_range_exclusive(bx); - if range.start != range.end { - bx.range_metadata(load, range); - } - } - layout::Pointer if vr.start() < vr.end() && !vr.contains(&0) => { - bx.nonnull_metadata(load); - } - _ => {} - } + let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, noname()); + llvm::LLVMRustSetHasUnsafeAlgebra(instr); + instr } - - let val = if let Some(llextra) = place.llextra { - OperandValue::Ref(place.llval, Some(llextra), place.align) - } else if place.layout.is_llvm_immediate() { - let mut const_llval = None; - unsafe { - if let Some(global) = llvm::LLVMIsAGlobalVariable(place.llval) { - if llvm::LLVMIsGlobalConstant(global) == llvm::True { - const_llval = llvm::LLVMGetInitializer(global); - } - } - } - let llval = const_llval.unwrap_or_else(|| { - let load = self.load(place.llval, place.align); - if let layout::Abi::Scalar(ref scalar) = place.layout.abi { - scalar_load_metadata(self, load, scalar); - } - load - }); - OperandValue::Immediate(to_immediate(self, llval, place.layout)) - } else if let layout::Abi::ScalarPair(ref a, ref b) = place.layout.abi { - let b_offset = a.value.size(self).align_to(b.value.align(self).abi); - - let mut load = |i, scalar: &layout::Scalar, align| { - let llptr = self.struct_gep(place.llval, i as u64); - let load = self.load(llptr, align); - scalar_load_metadata(self, load, scalar); - if scalar.is_bool() { - self.trunc(load, self.type_i1()) - } else { - load - } - }; - - OperandValue::Pair( - load(0, a, place.align), - load(1, b, place.align.restrict_for_offset(b_offset)), - ) - } else { - OperandValue::Ref(place.llval, None, place.align) - }; - - OperandRef { val, layout: place.layout } } - - - fn range_metadata(&mut self, load: &'ll Value, range: Range) { - if self.sess().target.target.arch == "amdgpu" { - // amdgpu/LLVM does something weird and thinks a i64 value is - // split into a v2i32, halving the bitwidth LLVM expects, - // tripping an assertion. So, for now, just disable this - // optimization. - return; - } - + fn mul(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("mul"); unsafe { - let llty = self.cx.val_ty(load); - let v = [ - self.cx.const_uint_big(llty, range.start), - self.cx.const_uint_big(llty, range.end) - ]; - - llvm::LLVMSetMetadata(load, llvm::MD_range as c_uint, - llvm::LLVMMDNodeInContext(self.cx.llcx, - v.as_ptr(), - v.len() as c_uint)); + llvm::LLVMBuildMul(self.llbuilder, lhs, rhs, noname()) } } - fn nonnull_metadata(&mut self, load: &'ll Value) { + fn fmul(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("fmul"); unsafe { - llvm::LLVMSetMetadata(load, llvm::MD_nonnull as c_uint, - llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0)); + llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, noname()) } } - fn store(&mut self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value { - self.store_with_flags(val, ptr, align, MemFlags::empty()) - } - - fn store_with_flags( - &mut self, - val: &'ll Value, - ptr: &'ll Value, - align: Align, - flags: MemFlags, - ) -> &'ll Value { - debug!("Store {:?} -> {:?} ({:?})", val, ptr, flags); - self.count_insn("store"); - let ptr = self.check_store(val, ptr); + fn fmul_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("fmul"); unsafe { - let store = llvm::LLVMBuildStore(self.llbuilder, val, ptr); - let align = if flags.contains(MemFlags::UNALIGNED) { - 1 - } else { - align.bytes() as c_uint - }; - llvm::LLVMSetAlignment(store, align); - if flags.contains(MemFlags::VOLATILE) { - llvm::LLVMSetVolatile(store, llvm::True); - } - if flags.contains(MemFlags::NONTEMPORAL) { - // According to LLVM [1] building a nontemporal store must - // *always* point to a metadata value of the integer 1. - // - // [1]: http://llvm.org/docs/LangRef.html#store-instruction - let one = self.cx.const_i32(1); - let node = llvm::LLVMMDNodeInContext(self.cx.llcx, &one, 1); - llvm::LLVMSetMetadata(store, llvm::MD_nontemporal as c_uint, node); - } - store + let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, noname()); + llvm::LLVMRustSetHasUnsafeAlgebra(instr); + instr } } - fn atomic_store(&mut self, val: &'ll Value, ptr: &'ll Value, - order: rustc_codegen_ssa::common::AtomicOrdering, size: Size) { - debug!("Store {:?} -> {:?}", val, ptr); - self.count_insn("store.atomic"); - let ptr = self.check_store(val, ptr); - unsafe { - let store = llvm::LLVMRustBuildAtomicStore( - self.llbuilder, - val, - ptr, - AtomicOrdering::from_generic(order), - ); - // LLVM requires the alignment of atomic stores to be at least the size of the type. - llvm::LLVMSetAlignment(store, size.bytes() as c_uint); - } - } - fn gep(&mut self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value { - self.count_insn("gep"); + fn udiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("udiv"); unsafe { - llvm::LLVMBuildGEP(self.llbuilder, ptr, indices.as_ptr(), - indices.len() as c_uint, noname()) + llvm::LLVMBuildUDiv(self.llbuilder, lhs, rhs, noname()) } } - fn inbounds_gep(&mut self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value { - self.count_insn("inboundsgep"); + fn exactudiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("exactudiv"); unsafe { - llvm::LLVMBuildInBoundsGEP( - self.llbuilder, ptr, indices.as_ptr(), indices.len() as c_uint, noname()) + llvm::LLVMBuildExactUDiv(self.llbuilder, lhs, rhs, noname()) } } - /* Casts */ - fn trunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { - self.count_insn("trunc"); + fn sdiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("sdiv"); unsafe { - llvm::LLVMBuildTrunc(self.llbuilder, val, dest_ty, noname()) + llvm::LLVMBuildSDiv(self.llbuilder, lhs, rhs, noname()) } } - fn sext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { - self.count_insn("sext"); + fn exactsdiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("exactsdiv"); unsafe { - llvm::LLVMBuildSExt(self.llbuilder, val, dest_ty, noname()) + llvm::LLVMBuildExactSDiv(self.llbuilder, lhs, rhs, noname()) } } - fn fptoui(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { - self.count_insn("fptoui"); + fn fdiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("fdiv"); unsafe { - llvm::LLVMBuildFPToUI(self.llbuilder, val, dest_ty, noname()) + llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, noname()) } } - fn fptosi(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { - self.count_insn("fptosi"); + fn fdiv_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("fdiv"); unsafe { - llvm::LLVMBuildFPToSI(self.llbuilder, val, dest_ty,noname()) + let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, noname()); + llvm::LLVMRustSetHasUnsafeAlgebra(instr); + instr } } - fn uitofp(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { - self.count_insn("uitofp"); + fn urem(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("urem"); unsafe { - llvm::LLVMBuildUIToFP(self.llbuilder, val, dest_ty, noname()) + llvm::LLVMBuildURem(self.llbuilder, lhs, rhs, noname()) } } - fn sitofp(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { - self.count_insn("sitofp"); + fn srem(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("srem"); unsafe { - llvm::LLVMBuildSIToFP(self.llbuilder, val, dest_ty, noname()) + llvm::LLVMBuildSRem(self.llbuilder, lhs, rhs, noname()) } } - fn fptrunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { - self.count_insn("fptrunc"); + fn frem(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("frem"); unsafe { - llvm::LLVMBuildFPTrunc(self.llbuilder, val, dest_ty, noname()) + llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, noname()) } } - fn fpext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { - self.count_insn("fpext"); + fn frem_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("frem"); unsafe { - llvm::LLVMBuildFPExt(self.llbuilder, val, dest_ty, noname()) + let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, noname()); + llvm::LLVMRustSetHasUnsafeAlgebra(instr); + instr } } - fn ptrtoint(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { - self.count_insn("ptrtoint"); + fn shl(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("shl"); unsafe { - llvm::LLVMBuildPtrToInt(self.llbuilder, val, dest_ty, noname()) + llvm::LLVMBuildShl(self.llbuilder, lhs, rhs, noname()) } } - fn inttoptr(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { - self.count_insn("inttoptr"); + fn lshr(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("lshr"); unsafe { - llvm::LLVMBuildIntToPtr(self.llbuilder, val, dest_ty, noname()) + llvm::LLVMBuildLShr(self.llbuilder, lhs, rhs, noname()) } } - fn bitcast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { - self.count_insn("bitcast"); + fn ashr(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("ashr"); unsafe { - llvm::LLVMBuildBitCast(self.llbuilder, val, dest_ty, noname()) + llvm::LLVMBuildAShr(self.llbuilder, lhs, rhs, noname()) } } - - fn intcast(&mut self, val: &'ll Value, dest_ty: &'ll Type, is_signed: bool) -> &'ll Value { - self.count_insn("intcast"); + fn and(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("and"); unsafe { - llvm::LLVMRustBuildIntCast(self.llbuilder, val, dest_ty, is_signed) + llvm::LLVMBuildAnd(self.llbuilder, lhs, rhs, noname()) } } - fn pointercast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { - self.count_insn("pointercast"); + fn or(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("or"); unsafe { - llvm::LLVMBuildPointerCast(self.llbuilder, val, dest_ty, noname()) + llvm::LLVMBuildOr(self.llbuilder, lhs, rhs, noname()) } } - /* Comparisons */ - fn icmp(&mut self, op: IntPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("icmp"); - let op = llvm::IntPredicate::from_generic(op); + fn xor(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("xor"); unsafe { - llvm::LLVMBuildICmp(self.llbuilder, op as c_uint, lhs, rhs, noname()) + llvm::LLVMBuildXor(self.llbuilder, lhs, rhs, noname()) } } - fn fcmp(&mut self, op: RealPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("fcmp"); + fn neg(&mut self, v: &'ll Value) -> &'ll Value { + self.count_insn("neg"); unsafe { - llvm::LLVMBuildFCmp(self.llbuilder, op as c_uint, lhs, rhs, noname()) + llvm::LLVMBuildNeg(self.llbuilder, v, noname()) } } - /* Miscellaneous instructions */ - fn empty_phi(&mut self, ty: &'ll Type) -> &'ll Value { - self.count_insn("emptyphi"); + fn fneg(&mut self, v: &'ll Value) -> &'ll Value { + self.count_insn("fneg"); unsafe { - llvm::LLVMBuildPhi(self.llbuilder, ty, noname()) + llvm::LLVMBuildFNeg(self.llbuilder, v, noname()) } } - fn phi(&mut self, ty: &'ll Type, vals: &[&'ll Value], bbs: &[&'ll BasicBlock]) -> &'ll Value { - assert_eq!(vals.len(), bbs.len()); - let phi = self.empty_phi(ty); - self.count_insn("addincoming"); + fn not(&mut self, v: &'ll Value) -> &'ll Value { + self.count_insn("not"); unsafe { - llvm::LLVMAddIncoming(phi, vals.as_ptr(), - bbs.as_ptr(), - vals.len() as c_uint); - phi + llvm::LLVMBuildNot(self.llbuilder, v, noname()) } } - fn inline_asm_call(&mut self, asm: &CStr, cons: &CStr, - inputs: &[&'ll Value], output: &'ll Type, - volatile: bool, alignstack: bool, - dia: syntax::ast::AsmDialect) -> Option<&'ll Value> { - self.count_insn("inlineasm"); - - let volatile = if volatile { llvm::True } - else { llvm::False }; - let alignstack = if alignstack { llvm::True } - else { llvm::False }; - - let argtys = inputs.iter().map(|v| { - debug!("Asm Input Type: {:?}", *v); - self.cx.val_ty(*v) - }).collect::>(); - - debug!("Asm Output Type: {:?}", output); - let fty = self.type_func(&argtys[..], output); - unsafe { - // Ask LLVM to verify that the constraints are well-formed. - let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons.as_ptr()); - debug!("Constraint verification result: {:?}", constraints_ok); - if constraints_ok { - let v = llvm::LLVMRustInlineAsm( - fty, - asm.as_ptr(), - cons.as_ptr(), - volatile, - alignstack, - AsmDialect::from_generic(dia), - ); - Some(self.call(v, inputs, None)) - } else { - // LLVM has detected an issue with our constraints, bail out - None - } - } + fn checked_binop( + &mut self, + oop: OverflowOp, + ty: Ty<'_>, + lhs: Self::Value, + rhs: Self::Value, + ) -> (Self::Value, Self::Value) { + use syntax::ast::IntTy::*; + use syntax::ast::UintTy::*; + use rustc::ty::{Int, Uint}; + + let new_sty = match ty.sty { + Int(Isize) => Int(self.tcx.sess.target.isize_ty), + Uint(Usize) => Uint(self.tcx.sess.target.usize_ty), + ref t @ Uint(_) | ref t @ Int(_) => t.clone(), + _ => panic!("tried to get overflow intrinsic for op applied to non-int type") + }; + + let name = match oop { + OverflowOp::Add => match new_sty { + Int(I8) => "llvm.sadd.with.overflow.i8", + Int(I16) => "llvm.sadd.with.overflow.i16", + Int(I32) => "llvm.sadd.with.overflow.i32", + Int(I64) => "llvm.sadd.with.overflow.i64", + Int(I128) => "llvm.sadd.with.overflow.i128", + + Uint(U8) => "llvm.uadd.with.overflow.i8", + Uint(U16) => "llvm.uadd.with.overflow.i16", + Uint(U32) => "llvm.uadd.with.overflow.i32", + Uint(U64) => "llvm.uadd.with.overflow.i64", + Uint(U128) => "llvm.uadd.with.overflow.i128", + + _ => unreachable!(), + }, + OverflowOp::Sub => match new_sty { + Int(I8) => "llvm.ssub.with.overflow.i8", + Int(I16) => "llvm.ssub.with.overflow.i16", + Int(I32) => "llvm.ssub.with.overflow.i32", + Int(I64) => "llvm.ssub.with.overflow.i64", + Int(I128) => "llvm.ssub.with.overflow.i128", + + Uint(U8) => "llvm.usub.with.overflow.i8", + Uint(U16) => "llvm.usub.with.overflow.i16", + Uint(U32) => "llvm.usub.with.overflow.i32", + Uint(U64) => "llvm.usub.with.overflow.i64", + Uint(U128) => "llvm.usub.with.overflow.i128", + + _ => unreachable!(), + }, + OverflowOp::Mul => match new_sty { + Int(I8) => "llvm.smul.with.overflow.i8", + Int(I16) => "llvm.smul.with.overflow.i16", + Int(I32) => "llvm.smul.with.overflow.i32", + Int(I64) => "llvm.smul.with.overflow.i64", + Int(I128) => "llvm.smul.with.overflow.i128", + + Uint(U8) => "llvm.umul.with.overflow.i8", + Uint(U16) => "llvm.umul.with.overflow.i16", + Uint(U32) => "llvm.umul.with.overflow.i32", + Uint(U64) => "llvm.umul.with.overflow.i64", + Uint(U128) => "llvm.umul.with.overflow.i128", + + _ => unreachable!(), + }, + }; + + let intrinsic = self.get_intrinsic(&name); + let res = self.call(intrinsic, &[lhs, rhs], None); + ( + self.extract_value(res, 0), + self.extract_value(res, 1), + ) } - fn memcpy(&mut self, dst: &'ll Value, dst_align: Align, - src: &'ll Value, src_align: Align, - size: &'ll Value, flags: MemFlags) { - if flags.contains(MemFlags::NONTEMPORAL) { - // HACK(nox): This is inefficient but there is no nontemporal memcpy. - let val = self.load(src, src_align); - let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val))); - self.store_with_flags(val, ptr, dst_align, flags); - return; - } - let size = self.intcast(size, self.type_isize(), false); - let is_volatile = flags.contains(MemFlags::VOLATILE); - let dst = self.pointercast(dst, self.type_i8p()); - let src = self.pointercast(src, self.type_i8p()); + /* Casts */ + fn trunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + self.count_insn("trunc"); unsafe { - llvm::LLVMRustBuildMemCpy(self.llbuilder, dst, dst_align.bytes() as c_uint, - src, src_align.bytes() as c_uint, size, is_volatile); + llvm::LLVMBuildTrunc(self.llbuilder, val, dest_ty, noname()) } } - fn memmove(&mut self, dst: &'ll Value, dst_align: Align, - src: &'ll Value, src_align: Align, - size: &'ll Value, flags: MemFlags) { - if flags.contains(MemFlags::NONTEMPORAL) { - // HACK(nox): This is inefficient but there is no nontemporal memmove. - let val = self.load(src, src_align); - let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val))); - self.store_with_flags(val, ptr, dst_align, flags); - return; - } - let size = self.intcast(size, self.type_isize(), false); - let is_volatile = flags.contains(MemFlags::VOLATILE); - let dst = self.pointercast(dst, self.type_i8p()); - let src = self.pointercast(src, self.type_i8p()); + fn sext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + self.count_insn("sext"); unsafe { - llvm::LLVMRustBuildMemMove(self.llbuilder, dst, dst_align.bytes() as c_uint, - src, src_align.bytes() as c_uint, size, is_volatile); + llvm::LLVMBuildSExt(self.llbuilder, val, dest_ty, noname()) } } - fn memset( - &mut self, - ptr: &'ll Value, - fill_byte: &'ll Value, - size: &'ll Value, - align: Align, - flags: MemFlags, - ) { - let ptr_width = &self.sess().target.target.target_pointer_width; - let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width); - let llintrinsicfn = self.get_intrinsic(&intrinsic_key); - let ptr = self.pointercast(ptr, self.type_i8p()); - let align = self.const_u32(align.bytes() as u32); - let volatile = self.const_bool(flags.contains(MemFlags::VOLATILE)); - self.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None); - } - - fn minnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("minnum"); - unsafe { llvm::LLVMRustBuildMinNum(self.llbuilder, lhs, rhs) } - } - fn maxnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("maxnum"); - unsafe { llvm::LLVMRustBuildMaxNum(self.llbuilder, lhs, rhs) } + fn zext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + self.count_insn("zext"); + unsafe { + llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, noname()) + } } - fn select( - &mut self, cond: &'ll Value, - then_val: &'ll Value, - else_val: &'ll Value, - ) -> &'ll Value { - self.count_insn("select"); + fn fptoui(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + self.count_insn("fptoui"); unsafe { - llvm::LLVMBuildSelect(self.llbuilder, cond, then_val, else_val, noname()) + llvm::LLVMBuildFPToUI(self.llbuilder, val, dest_ty, noname()) } } - #[allow(dead_code)] - fn va_arg(&mut self, list: &'ll Value, ty: &'ll Type) -> &'ll Value { - self.count_insn("vaarg"); + fn fptosi(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + self.count_insn("fptosi"); unsafe { - llvm::LLVMBuildVAArg(self.llbuilder, list, ty, noname()) + llvm::LLVMBuildFPToSI(self.llbuilder, val, dest_ty,noname()) } } - fn extract_element(&mut self, vec: &'ll Value, idx: &'ll Value) -> &'ll Value { - self.count_insn("extractelement"); + fn uitofp(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + self.count_insn("uitofp"); unsafe { - llvm::LLVMBuildExtractElement(self.llbuilder, vec, idx, noname()) + llvm::LLVMBuildUIToFP(self.llbuilder, val, dest_ty, noname()) } } - fn insert_element( - &mut self, vec: &'ll Value, - elt: &'ll Value, - idx: &'ll Value, - ) -> &'ll Value { - self.count_insn("insertelement"); + fn sitofp(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + self.count_insn("sitofp"); unsafe { - llvm::LLVMBuildInsertElement(self.llbuilder, vec, elt, idx, noname()) + llvm::LLVMBuildSIToFP(self.llbuilder, val, dest_ty, noname()) } } - fn shuffle_vector(&mut self, v1: &'ll Value, v2: &'ll Value, mask: &'ll Value) -> &'ll Value { - self.count_insn("shufflevector"); + fn fptrunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + self.count_insn("fptrunc"); unsafe { - llvm::LLVMBuildShuffleVector(self.llbuilder, v1, v2, mask, noname()) + llvm::LLVMBuildFPTrunc(self.llbuilder, val, dest_ty, noname()) } } - fn vector_splat(&mut self, num_elts: usize, elt: &'ll Value) -> &'ll Value { + fn fpext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + self.count_insn("fpext"); unsafe { - let elt_ty = self.cx.val_ty(elt); - let undef = llvm::LLVMGetUndef(self.type_vector(elt_ty, num_elts as u64)); - let vec = self.insert_element(undef, elt, self.cx.const_i32(0)); - let vec_i32_ty = self.type_vector(self.type_i32(), num_elts as u64); - self.shuffle_vector(vec, undef, self.const_null(vec_i32_ty)) + llvm::LLVMBuildFPExt(self.llbuilder, val, dest_ty, noname()) } } - fn vector_reduce_fadd_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value { - self.count_insn("vector.reduce.fadd_fast"); + fn bitcast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + self.count_insn("bitcast"); unsafe { - // FIXME: add a non-fast math version once - // https://bugs.llvm.org/show_bug.cgi?id=36732 - // is fixed. - let instr = llvm::LLVMRustBuildVectorReduceFAdd(self.llbuilder, acc, src); - llvm::LLVMRustSetHasUnsafeAlgebra(instr); - instr + llvm::LLVMBuildBitCast(self.llbuilder, val, dest_ty, noname()) } } - fn vector_reduce_fmul_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value { - self.count_insn("vector.reduce.fmul_fast"); + + + fn intcast(&mut self, val: &'ll Value, dest_ty: &'ll Type, is_signed: bool) -> &'ll Value { + self.count_insn("intcast"); unsafe { - // FIXME: add a non-fast math version once - // https://bugs.llvm.org/show_bug.cgi?id=36732 - // is fixed. - let instr = llvm::LLVMRustBuildVectorReduceFMul(self.llbuilder, acc, src); - llvm::LLVMRustSetHasUnsafeAlgebra(instr); - instr + llvm::LLVMRustBuildIntCast(self.llbuilder, val, dest_ty, is_signed) } } - fn vector_reduce_add(&mut self, src: &'ll Value) -> &'ll Value { - self.count_insn("vector.reduce.add"); - unsafe { llvm::LLVMRustBuildVectorReduceAdd(self.llbuilder, src) } - } - fn vector_reduce_mul(&mut self, src: &'ll Value) -> &'ll Value { - self.count_insn("vector.reduce.mul"); - unsafe { llvm::LLVMRustBuildVectorReduceMul(self.llbuilder, src) } - } - fn vector_reduce_and(&mut self, src: &'ll Value) -> &'ll Value { - self.count_insn("vector.reduce.and"); - unsafe { llvm::LLVMRustBuildVectorReduceAnd(self.llbuilder, src) } - } - fn vector_reduce_or(&mut self, src: &'ll Value) -> &'ll Value { - self.count_insn("vector.reduce.or"); - unsafe { llvm::LLVMRustBuildVectorReduceOr(self.llbuilder, src) } - } - fn vector_reduce_xor(&mut self, src: &'ll Value) -> &'ll Value { - self.count_insn("vector.reduce.xor"); - unsafe { llvm::LLVMRustBuildVectorReduceXor(self.llbuilder, src) } - } - fn vector_reduce_fmin(&mut self, src: &'ll Value) -> &'ll Value { - self.count_insn("vector.reduce.fmin"); - unsafe { llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ false) } - } - fn vector_reduce_fmax(&mut self, src: &'ll Value) -> &'ll Value { - self.count_insn("vector.reduce.fmax"); - unsafe { llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ false) } - } - fn vector_reduce_fmin_fast(&mut self, src: &'ll Value) -> &'ll Value { - self.count_insn("vector.reduce.fmin_fast"); + + /* Comparisons */ + fn icmp(&mut self, op: IntPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("icmp"); + let op = llvm::IntPredicate::from_generic(op); unsafe { - let instr = llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ true); - llvm::LLVMRustSetHasUnsafeAlgebra(instr); - instr + llvm::LLVMBuildICmp(self.llbuilder, op as c_uint, lhs, rhs, noname()) } } - fn vector_reduce_fmax_fast(&mut self, src: &'ll Value) -> &'ll Value { - self.count_insn("vector.reduce.fmax_fast"); + + fn fcmp(&mut self, op: RealPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("fcmp"); unsafe { - let instr = llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ true); - llvm::LLVMRustSetHasUnsafeAlgebra(instr); - instr + llvm::LLVMBuildFCmp(self.llbuilder, op as c_uint, lhs, rhs, noname()) } } - fn vector_reduce_min(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value { - self.count_insn("vector.reduce.min"); - unsafe { llvm::LLVMRustBuildVectorReduceMin(self.llbuilder, src, is_signed) } - } - fn vector_reduce_max(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value { - self.count_insn("vector.reduce.max"); - unsafe { llvm::LLVMRustBuildVectorReduceMax(self.llbuilder, src, is_signed) } - } - fn extract_value(&mut self, agg_val: &'ll Value, idx: u64) -> &'ll Value { - self.count_insn("extractvalue"); - assert_eq!(idx as c_uint as u64, idx); + fn select( + &mut self, cond: &'ll Value, + then_val: &'ll Value, + else_val: &'ll Value, + ) -> &'ll Value { + self.count_insn("select"); unsafe { - llvm::LLVMBuildExtractValue(self.llbuilder, agg_val, idx as c_uint, noname()) + llvm::LLVMBuildSelect(self.llbuilder, cond, then_val, else_val, noname()) } } +} + +impl UnwindBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { + fn invoke( + &mut self, + llfn: &'ll Value, + args: &[&'ll Value], + then: &'ll BasicBlock, + catch: &'ll BasicBlock, + funclet: Option<&Funclet<'ll>>, + ) -> &'ll Value { + self.count_insn("invoke"); + + debug!("Invoke {:?} with args ({:?})", + llfn, + args); + + let args = self.check_call("invoke", llfn, args); + let bundle = funclet.map(|funclet| funclet.bundle()); + let bundle = bundle.as_ref().map(|b| &*b.raw); - fn insert_value(&mut self, agg_val: &'ll Value, elt: &'ll Value, - idx: u64) -> &'ll Value { - self.count_insn("insertvalue"); - assert_eq!(idx as c_uint as u64, idx); unsafe { - llvm::LLVMBuildInsertValue(self.llbuilder, agg_val, elt, idx as c_uint, - noname()) + llvm::LLVMRustBuildInvoke(self.llbuilder, + llfn, + args.as_ptr(), + args.len() as c_uint, + then, + catch, + bundle, + noname()) } } @@ -1176,12 +1062,6 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } - fn add_clause(&mut self, landing_pad: &'ll Value, clause: &'ll Value) { - unsafe { - llvm::LLVMAddClause(landing_pad, clause); - } - } - fn set_cleanup(&mut self, landing_pad: &'ll Value) { self.count_insn("setcleanup"); unsafe { @@ -1235,117 +1115,301 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { Funclet::new(ret.expect("LLVM does not have support for catchpad")) } - fn catch_ret(&mut self, funclet: &Funclet<'ll>, unwind: &'ll BasicBlock) -> &'ll Value { - self.count_insn("catchret"); - let ret = unsafe { - llvm::LLVMRustBuildCatchRet(self.llbuilder, funclet.cleanuppad(), unwind) - }; - ret.expect("LLVM does not have support for catchret") + fn catch_switch( + &mut self, + parent: Option<&'ll Value>, + unwind: Option<&'ll BasicBlock>, + num_handlers: usize, + ) -> &'ll Value { + self.count_insn("catchswitch"); + let name = const_cstr!("catchswitch"); + let ret = unsafe { + llvm::LLVMRustBuildCatchSwitch(self.llbuilder, parent, unwind, + num_handlers as c_uint, + name.as_ptr()) + }; + ret.expect("LLVM does not have support for catchswitch") + } + + fn add_handler(&mut self, catch_switch: &'ll Value, handler: &'ll BasicBlock) { + unsafe { + llvm::LLVMRustAddHandler(catch_switch, handler); + } + } + + fn set_personality_fn(&mut self, personality: &'ll Value) { + unsafe { + llvm::LLVMSetPersonalityFn(self.llfn(), personality); + } + } +} + +impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { + /* Miscellaneous instructions */ + fn extract_element(&mut self, vec: &'ll Value, idx: &'ll Value) -> &'ll Value { + self.count_insn("extractelement"); + unsafe { + llvm::LLVMBuildExtractElement(self.llbuilder, vec, idx, noname()) + } + } + + fn vector_splat(&mut self, num_elts: usize, elt: &'ll Value) -> &'ll Value { + unsafe { + let elt_ty = self.cx.val_ty(elt); + let undef = llvm::LLVMGetUndef(self.type_vector(elt_ty, num_elts as u64)); + let vec = self.insert_element(undef, elt, self.cx.const_i32(0)); + let vec_i32_ty = self.type_vector(self.type_i32(), num_elts as u64); + self.shuffle_vector(vec, undef, self.const_null(vec_i32_ty)) + } + } + + fn extract_value(&mut self, agg_val: &'ll Value, idx: u64) -> &'ll Value { + self.count_insn("extractvalue"); + assert_eq!(idx as c_uint as u64, idx); + unsafe { + llvm::LLVMBuildExtractValue(self.llbuilder, agg_val, idx as c_uint, noname()) + } + } + + fn insert_value(&mut self, agg_val: &'ll Value, elt: &'ll Value, + idx: u64) -> &'ll Value { + self.count_insn("insertvalue"); + assert_eq!(idx as c_uint as u64, idx); + unsafe { + llvm::LLVMBuildInsertValue(self.llbuilder, agg_val, elt, idx as c_uint, + noname()) + } + } + + fn lifetime_start(&mut self, ptr: &'ll Value, size: Size) { + self.call_lifetime_intrinsic("llvm.lifetime.start", ptr, size); + } + + fn lifetime_end(&mut self, ptr: &'ll Value, size: Size) { + self.call_lifetime_intrinsic("llvm.lifetime.end", ptr, size); + } + + fn call( + &mut self, + llfn: &'ll Value, + args: &[&'ll Value], + funclet: Option<&Funclet<'ll>>, + ) -> &'ll Value { + self.count_insn("call"); + + debug!("Call {:?} with args ({:?})", + llfn, + args); + + let args = self.check_call("call", llfn, args); + let bundle = funclet.map(|funclet| funclet.bundle()); + let bundle = bundle.as_ref().map(|b| &*b.raw); + + unsafe { + llvm::LLVMRustBuildCall( + self.llbuilder, + llfn, + args.as_ptr() as *const &llvm::Value, + args.len() as c_uint, + bundle, noname() + ) + } + } + + unsafe fn delete_basic_block(&mut self, bb: &'ll BasicBlock) { + llvm::LLVMDeleteBasicBlock(bb); + } + + fn do_not_inline(&mut self, llret: &'ll Value) { + llvm::Attribute::NoInline.apply_callsite(llvm::AttributePlace::Function, llret); + } +} + +impl StaticBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { + fn get_static(&mut self, def_id: DefId) -> &'ll Value { + // Forward to the `get_static` method of `CodegenCx` + self.cx().get_static(def_id) } - fn catch_switch( + fn static_panic_msg( &mut self, - parent: Option<&'ll Value>, - unwind: Option<&'ll BasicBlock>, - num_handlers: usize, - ) -> &'ll Value { - self.count_insn("catchswitch"); - let name = const_cstr!("catchswitch"); - let ret = unsafe { - llvm::LLVMRustBuildCatchSwitch(self.llbuilder, parent, unwind, - num_handlers as c_uint, - name.as_ptr()) + msg: Option, + filename: LocalInternedString, + line: Self::Value, + col: Self::Value, + kind: &str, + ) -> Self::Value { + let align = self.tcx.data_layout.aggregate_align.abi + .max(self.tcx.data_layout.i32_align.abi) + .max(self.tcx.data_layout.pointer_align.abi); + + let filename = self.const_str_slice(filename); + + let with_msg_components; + let without_msg_components; + + let components = if let Some(msg) = msg { + let msg = self.const_str_slice(msg); + with_msg_components = [msg, filename, line, col]; + &with_msg_components as &[_] + } else { + without_msg_components = [filename, line, col]; + &without_msg_components as &[_] }; - ret.expect("LLVM does not have support for catchswitch") + + let struct_ = self.const_struct(&components, false); + self.static_addr_of(struct_, align, Some(kind)) } +} - fn add_handler(&mut self, catch_switch: &'ll Value, handler: &'ll BasicBlock) { +impl Builder<'a, 'll, 'tcx> { + pub fn llfn(&self) -> &'ll Value { unsafe { - llvm::LLVMRustAddHandler(catch_switch, handler); + llvm::LLVMGetBasicBlockParent(self.llbb()) } } - fn set_personality_fn(&mut self, personality: &'ll Value) { + fn count_insn(&self, category: &str) { + if self.sess().codegen_stats() { + self.stats.borrow_mut().n_llvm_insns += 1; + } + if self.sess().count_llvm_insns() { + *self.stats + .borrow_mut() + .llvm_insns + .entry(category.to_string()) + .or_insert(0) += 1; + } + } + + fn position_at_start(&mut self, llbb: &'ll BasicBlock) { unsafe { - llvm::LLVMSetPersonalityFn(self.llfn(), personality); + llvm::LLVMRustPositionBuilderAtStart(self.llbuilder, llbb); } } - // Atomic Operations - fn atomic_cmpxchg( - &mut self, - dst: &'ll Value, - cmp: &'ll Value, - src: &'ll Value, - order: rustc_codegen_ssa::common::AtomicOrdering, - failure_order: rustc_codegen_ssa::common::AtomicOrdering, - weak: bool, + pub fn minnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("minnum"); + unsafe { llvm::LLVMRustBuildMinNum(self.llbuilder, lhs, rhs) } + } + + pub fn maxnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("maxnum"); + unsafe { llvm::LLVMRustBuildMaxNum(self.llbuilder, lhs, rhs) } + } + + pub fn insert_element( + &mut self, vec: &'ll Value, + elt: &'ll Value, + idx: &'ll Value, ) -> &'ll Value { - let weak = if weak { llvm::True } else { llvm::False }; + self.count_insn("insertelement"); unsafe { - llvm::LLVMRustBuildAtomicCmpXchg( - self.llbuilder, - dst, - cmp, - src, - AtomicOrdering::from_generic(order), - AtomicOrdering::from_generic(failure_order), - weak - ) + llvm::LLVMBuildInsertElement(self.llbuilder, vec, elt, idx, noname()) } } - fn atomic_rmw( + + pub fn shuffle_vector( &mut self, - op: rustc_codegen_ssa::common::AtomicRmwBinOp, - dst: &'ll Value, - src: &'ll Value, - order: rustc_codegen_ssa::common::AtomicOrdering, + v1: &'ll Value, + v2: &'ll Value, + mask: &'ll Value, ) -> &'ll Value { + self.count_insn("shufflevector"); unsafe { - llvm::LLVMBuildAtomicRMW( - self.llbuilder, - AtomicRmwBinOp::from_generic(op), - dst, - src, - AtomicOrdering::from_generic(order), - False) + llvm::LLVMBuildShuffleVector(self.llbuilder, v1, v2, mask, noname()) } } - fn atomic_fence( - &mut self, - order: rustc_codegen_ssa::common::AtomicOrdering, - scope: rustc_codegen_ssa::common::SynchronizationScope - ) { + pub fn vector_reduce_fadd_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value { + self.count_insn("vector.reduce.fadd_fast"); unsafe { - llvm::LLVMRustBuildAtomicFence( - self.llbuilder, - AtomicOrdering::from_generic(order), - SynchronizationScope::from_generic(scope) - ); + // FIXME: add a non-fast math version once + // https://bugs.llvm.org/show_bug.cgi?id=36732 + // is fixed. + let instr = llvm::LLVMRustBuildVectorReduceFAdd(self.llbuilder, acc, src); + llvm::LLVMRustSetHasUnsafeAlgebra(instr); + instr } } - - fn add_case(&mut self, s: &'ll Value, on_val: &'ll Value, dest: &'ll BasicBlock) { + pub fn vector_reduce_fmul_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value { + self.count_insn("vector.reduce.fmul_fast"); unsafe { - llvm::LLVMAddCase(s, on_val, dest) + // FIXME: add a non-fast math version once + // https://bugs.llvm.org/show_bug.cgi?id=36732 + // is fixed. + let instr = llvm::LLVMRustBuildVectorReduceFMul(self.llbuilder, acc, src); + llvm::LLVMRustSetHasUnsafeAlgebra(instr); + instr } } - - fn add_incoming_to_phi(&mut self, phi: &'ll Value, val: &'ll Value, bb: &'ll BasicBlock) { - self.count_insn("addincoming"); + pub fn vector_reduce_add(&mut self, src: &'ll Value) -> &'ll Value { + self.count_insn("vector.reduce.add"); + unsafe { llvm::LLVMRustBuildVectorReduceAdd(self.llbuilder, src) } + } + pub fn vector_reduce_mul(&mut self, src: &'ll Value) -> &'ll Value { + self.count_insn("vector.reduce.mul"); + unsafe { llvm::LLVMRustBuildVectorReduceMul(self.llbuilder, src) } + } + pub fn vector_reduce_and(&mut self, src: &'ll Value) -> &'ll Value { + self.count_insn("vector.reduce.and"); + unsafe { llvm::LLVMRustBuildVectorReduceAnd(self.llbuilder, src) } + } + pub fn vector_reduce_or(&mut self, src: &'ll Value) -> &'ll Value { + self.count_insn("vector.reduce.or"); + unsafe { llvm::LLVMRustBuildVectorReduceOr(self.llbuilder, src) } + } + pub fn vector_reduce_xor(&mut self, src: &'ll Value) -> &'ll Value { + self.count_insn("vector.reduce.xor"); + unsafe { llvm::LLVMRustBuildVectorReduceXor(self.llbuilder, src) } + } + pub fn vector_reduce_fmin(&mut self, src: &'ll Value) -> &'ll Value { + self.count_insn("vector.reduce.fmin"); + unsafe { llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ false) } + } + pub fn vector_reduce_fmax(&mut self, src: &'ll Value) -> &'ll Value { + self.count_insn("vector.reduce.fmax"); + unsafe { llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ false) } + } + pub fn vector_reduce_fmin_fast(&mut self, src: &'ll Value) -> &'ll Value { + self.count_insn("vector.reduce.fmin_fast"); unsafe { - llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint); + let instr = llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ true); + llvm::LLVMRustSetHasUnsafeAlgebra(instr); + instr + } + } + pub fn vector_reduce_fmax_fast(&mut self, src: &'ll Value) -> &'ll Value { + self.count_insn("vector.reduce.fmax_fast"); + unsafe { + let instr = llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ true); + llvm::LLVMRustSetHasUnsafeAlgebra(instr); + instr } } + pub fn vector_reduce_min(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value { + self.count_insn("vector.reduce.min"); + unsafe { llvm::LLVMRustBuildVectorReduceMin(self.llbuilder, src, is_signed) } + } + pub fn vector_reduce_max(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value { + self.count_insn("vector.reduce.max"); + unsafe { llvm::LLVMRustBuildVectorReduceMax(self.llbuilder, src, is_signed) } + } - fn set_invariant_load(&mut self, load: &'ll Value) { + pub fn add_clause(&mut self, landing_pad: &'ll Value, clause: &'ll Value) { unsafe { - llvm::LLVMSetMetadata(load, llvm::MD_invariant_load as c_uint, - llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0)); + llvm::LLVMAddClause(landing_pad, clause); } } + pub fn catch_ret(&mut self, funclet: &Funclet<'ll>, unwind: &'ll BasicBlock) -> &'ll Value { + self.count_insn("catchret"); + let ret = unsafe { + llvm::LLVMRustBuildCatchRet(self.llbuilder, funclet.cleanuppad(), unwind) + }; + ret.expect("LLVM does not have support for catchret") + } + fn check_store<'b>(&mut self, val: &'ll Value, ptr: &'ll Value) -> &'ll Value { @@ -1407,76 +1471,13 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { Cow::Owned(casted_args) } - fn lifetime_start(&mut self, ptr: &'ll Value, size: Size) { - self.call_lifetime_intrinsic("llvm.lifetime.start", ptr, size); - } - - fn lifetime_end(&mut self, ptr: &'ll Value, size: Size) { - self.call_lifetime_intrinsic("llvm.lifetime.end", ptr, size); - } - - fn call( - &mut self, - llfn: &'ll Value, - args: &[&'ll Value], - funclet: Option<&Funclet<'ll>>, - ) -> &'ll Value { - self.count_insn("call"); - - debug!("Call {:?} with args ({:?})", - llfn, - args); - - let args = self.check_call("call", llfn, args); - let bundle = funclet.map(|funclet| funclet.bundle()); - let bundle = bundle.as_ref().map(|b| &*b.raw); - - unsafe { - llvm::LLVMRustBuildCall( - self.llbuilder, - llfn, - args.as_ptr() as *const &llvm::Value, - args.len() as c_uint, - bundle, noname() - ) - } - } - - fn zext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { - self.count_insn("zext"); - unsafe { - llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, noname()) - } - } - - fn struct_gep(&mut self, ptr: &'ll Value, idx: u64) -> &'ll Value { - self.count_insn("structgep"); - assert_eq!(idx as c_uint as u64, idx); + pub fn va_arg(&mut self, list: &'ll Value, ty: &'ll Type) -> &'ll Value { + self.count_insn("vaarg"); unsafe { - llvm::LLVMBuildStructGEP(self.llbuilder, ptr, idx as c_uint, noname()) + llvm::LLVMBuildVAArg(self.llbuilder, list, ty, noname()) } } - fn cx(&self) -> &CodegenCx<'ll, 'tcx> { - self.cx - } - - unsafe fn delete_basic_block(&mut self, bb: &'ll BasicBlock) { - llvm::LLVMDeleteBasicBlock(bb); - } - - fn do_not_inline(&mut self, llret: &'ll Value) { - llvm::Attribute::NoInline.apply_callsite(llvm::AttributePlace::Function, llret); - } -} - -impl StaticBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { - fn get_static(&self, def_id: DefId) -> &'ll Value { - self.cx().get_static(def_id) - } -} - -impl Builder<'a, 'll, 'tcx> { fn call_lifetime_intrinsic(&mut self, intrinsic: &str, ptr: &'ll Value, size: Size) { if self.cx.sess().opts.optimize == config::OptLevel::No { return; @@ -1492,4 +1493,25 @@ impl Builder<'a, 'll, 'tcx> { let ptr = self.pointercast(ptr, self.cx.type_i8p()); self.call(lifetime_intrinsic, &[self.cx.const_u64(size), ptr], None); } + + fn phi(&mut self, ty: &'ll Type, vals: &[&'ll Value], bbs: &[&'ll BasicBlock]) -> &'ll Value { + self.count_insn("addincoming"); + assert_eq!(vals.len(), bbs.len()); + let phi = unsafe { + llvm::LLVMBuildPhi(self.llbuilder, ty, noname()) + }; + unsafe { + llvm::LLVMAddIncoming(phi, vals.as_ptr(), + bbs.as_ptr(), + vals.len() as c_uint); + phi + } + } + + fn add_incoming_to_phi(&mut self, phi: &'ll Value, val: &'ll Value, bb: &'ll BasicBlock) { + self.count_insn("addincoming"); + unsafe { + llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint); + } + } } diff --git a/src/librustc_codegen_llvm/common.rs b/src/librustc_codegen_llvm/common.rs index 4bd036ea3b17a..b0579d8c6b5a8 100644 --- a/src/librustc_codegen_llvm/common.rs +++ b/src/librustc_codegen_llvm/common.rs @@ -86,6 +86,7 @@ impl Funclet<'ll> { impl BackendTypes for CodegenCx<'ll, 'tcx> { type Value = &'ll Value; + type Switch = &'ll Value; type BasicBlock = &'ll BasicBlock; type Type = &'ll Type; type Funclet = Funclet<'ll>; @@ -93,6 +94,100 @@ impl BackendTypes for CodegenCx<'ll, 'tcx> { type DIScope = &'ll llvm::debuginfo::DIScope; } +impl CodegenCx<'ll, 'tcx> { + pub fn const_fat_ptr( + &self, + ptr: &'ll Value, + meta: &'ll Value + ) -> &'ll Value { + assert_eq!(abi::FAT_PTR_ADDR, 0); + assert_eq!(abi::FAT_PTR_EXTRA, 1); + self.const_struct(&[ptr, meta], false) + } + + pub fn const_array(&self, ty: &'ll Type, elts: &[&'ll Value]) -> &'ll Value { + unsafe { + return llvm::LLVMConstArray(ty, elts.as_ptr(), elts.len() as c_uint); + } + } + + pub fn const_vector(&self, elts: &[&'ll Value]) -> &'ll Value { + unsafe { + return llvm::LLVMConstVector(elts.as_ptr(), elts.len() as c_uint); + } + } + + pub fn const_bytes(&self, bytes: &[u8]) -> &'ll Value { + bytes_in_context(self.llcx, bytes) + } + + fn const_cstr( + &self, + s: LocalInternedString, + null_terminated: bool, + ) -> &'ll Value { + unsafe { + if let Some(&llval) = self.const_cstr_cache.borrow().get(&s) { + return llval; + } + + let sc = llvm::LLVMConstStringInContext(self.llcx, + s.as_ptr() as *const c_char, + s.len() as c_uint, + !null_terminated as Bool); + let sym = self.generate_local_symbol_name("str"); + let g = self.define_global(&sym[..], self.val_ty(sc)).unwrap_or_else(||{ + bug!("symbol `{}` is already defined", sym); + }); + llvm::LLVMSetInitializer(g, sc); + llvm::LLVMSetGlobalConstant(g, True); + llvm::LLVMRustSetLinkage(g, llvm::Linkage::InternalLinkage); + + self.const_cstr_cache.borrow_mut().insert(s, g); + g + } + } + + pub fn const_str_slice(&self, s: LocalInternedString) -> &'ll Value { + let len = s.len(); + let cs = consts::ptrcast(self.const_cstr(s, false), + self.type_ptr_to(self.layout_of(self.tcx.mk_str()).llvm_type(self))); + self.const_fat_ptr(cs, self.const_usize(len as u64)) + } + + pub fn const_get_elt(&self, v: &'ll Value, idx: u64) -> &'ll Value { + unsafe { + assert_eq!(idx as c_uint as u64, idx); + let us = &[idx as c_uint]; + let r = llvm::LLVMConstExtractValue(v, us.as_ptr(), us.len() as c_uint); + + debug!("const_get_elt(v={:?}, idx={}, r={:?})", + v, idx, r); + + r + } + } + + pub fn const_get_real(&self, v: &'ll Value) -> Option<(f64, bool)> { + unsafe { + if self.is_const_real(v) { + let mut loses_info: llvm::Bool = ::std::mem::uninitialized(); + let r = llvm::LLVMConstRealGetDouble(v, &mut loses_info); + let loses_info = if loses_info == 1 { true } else { false }; + Some((r, loses_info)) + } else { + None + } + } + } + + fn is_const_real(&self, v: &'ll Value) -> bool { + unsafe { + llvm::LLVMIsAConstantFP(v).is_some() + } + } +} + impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { fn const_null(&self, t: &'ll Type) -> &'ll Value { unsafe { @@ -155,50 +250,6 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { self.const_uint(self.type_i8(), i as u64) } - fn const_cstr( - &self, - s: LocalInternedString, - null_terminated: bool, - ) -> &'ll Value { - unsafe { - if let Some(&llval) = self.const_cstr_cache.borrow().get(&s) { - return llval; - } - - let sc = llvm::LLVMConstStringInContext(self.llcx, - s.as_ptr() as *const c_char, - s.len() as c_uint, - !null_terminated as Bool); - let sym = self.generate_local_symbol_name("str"); - let g = self.define_global(&sym[..], self.val_ty(sc)).unwrap_or_else(||{ - bug!("symbol `{}` is already defined", sym); - }); - llvm::LLVMSetInitializer(g, sc); - llvm::LLVMSetGlobalConstant(g, True); - llvm::LLVMRustSetLinkage(g, llvm::Linkage::InternalLinkage); - - self.const_cstr_cache.borrow_mut().insert(s, g); - g - } - } - - fn const_str_slice(&self, s: LocalInternedString) -> &'ll Value { - let len = s.len(); - let cs = consts::ptrcast(self.const_cstr(s, false), - self.type_ptr_to(self.layout_of(self.tcx.mk_str()).llvm_type(self))); - self.const_fat_ptr(cs, self.const_usize(len as u64)) - } - - fn const_fat_ptr( - &self, - ptr: &'ll Value, - meta: &'ll Value - ) -> &'ll Value { - assert_eq!(abi::FAT_PTR_ADDR, 0); - assert_eq!(abi::FAT_PTR_EXTRA, 1); - self.const_struct(&[ptr, meta], false) - } - fn const_struct( &self, elts: &[&'ll Value], @@ -207,48 +258,6 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { struct_in_context(self.llcx, elts, packed) } - fn const_array(&self, ty: &'ll Type, elts: &[&'ll Value]) -> &'ll Value { - unsafe { - return llvm::LLVMConstArray(ty, elts.as_ptr(), elts.len() as c_uint); - } - } - - fn const_vector(&self, elts: &[&'ll Value]) -> &'ll Value { - unsafe { - return llvm::LLVMConstVector(elts.as_ptr(), elts.len() as c_uint); - } - } - - fn const_bytes(&self, bytes: &[u8]) -> &'ll Value { - bytes_in_context(self.llcx, bytes) - } - - fn const_get_elt(&self, v: &'ll Value, idx: u64) -> &'ll Value { - unsafe { - assert_eq!(idx as c_uint as u64, idx); - let us = &[idx as c_uint]; - let r = llvm::LLVMConstExtractValue(v, us.as_ptr(), us.len() as c_uint); - - debug!("const_get_elt(v={:?}, idx={}, r={:?})", - v, idx, r); - - r - } - } - - fn const_get_real(&self, v: &'ll Value) -> Option<(f64, bool)> { - unsafe { - if self.is_const_real(v) { - let mut loses_info: llvm::Bool = ::std::mem::uninitialized(); - let r = llvm::LLVMConstRealGetDouble(v, &mut loses_info); - let loses_info = if loses_info == 1 { true } else { false }; - Some((r, loses_info)) - } else { - None - } - } - } - fn const_to_uint(&self, v: &'ll Value) -> u64 { unsafe { llvm::LLVMConstIntGetZExtValue(v) @@ -261,12 +270,6 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { } } - fn is_const_real(&self, v: &'ll Value) -> bool { - unsafe { - llvm::LLVMIsAConstantFP(v).is_some() - } - } - fn const_to_opt_u128(&self, v: &'ll Value, sign_ext: bool) -> Option { unsafe { if self.is_const_integral(v) { diff --git a/src/librustc_codegen_llvm/context.rs b/src/librustc_codegen_llvm/context.rs index 23e3a8425d370..acdce095d4e52 100644 --- a/src/librustc_codegen_llvm/context.rs +++ b/src/librustc_codegen_llvm/context.rs @@ -10,7 +10,6 @@ use crate::monomorphize::partitioning::CodegenUnit; use crate::type_::Type; use crate::type_of::PointeeInfo; use rustc_codegen_ssa::traits::*; -use libc::c_uint; use rustc_data_structures::base_n; use rustc_data_structures::small_c_str::SmallCStr; @@ -326,10 +325,6 @@ impl MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> { get_fn(self, instance) } - fn get_param(&self, llfn: &'ll Value, index: c_uint) -> &'ll Value { - llvm::get_param(llfn, index) - } - fn eh_personality(&self) -> &'ll Value { // The exception handling personality function. // diff --git a/src/librustc_codegen_llvm/debuginfo/mod.rs b/src/librustc_codegen_llvm/debuginfo/mod.rs index c0869bb889afa..6abbcd9feba7a 100644 --- a/src/librustc_codegen_llvm/debuginfo/mod.rs +++ b/src/librustc_codegen_llvm/debuginfo/mod.rs @@ -32,7 +32,7 @@ use rustc_codegen_ssa::debuginfo::{FunctionDebugContext, MirDebugScope, Variable VariableKind, FunctionDebugContextData}; use libc::c_uint; -use std::cell::{Cell, RefCell}; +use std::cell::RefCell; use std::ffi::CString; use syntax_pos::{self, Span, Pos}; @@ -158,7 +158,7 @@ impl DebugInfoBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { variable_kind: VariableKind, span: Span, ) { - assert!(!dbg_context.get_ref(span).source_locations_enabled.get()); + assert!(!dbg_context.get_ref(span).source_locations_enabled); let cx = self.cx(); let file = span_start(cx, span).file; @@ -216,7 +216,7 @@ impl DebugInfoBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { fn set_source_location( &mut self, - debug_context: &FunctionDebugContext<&'ll DISubprogram>, + debug_context: &mut FunctionDebugContext<&'ll DISubprogram>, scope: Option<&'ll DIScope>, span: Span, ) { @@ -225,6 +225,13 @@ impl DebugInfoBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { fn insert_reference_to_gdb_debug_scripts_section_global(&mut self) { gdb::insert_reference_to_gdb_debug_scripts_section_global(self) } + + fn set_value_name(&mut self, value: &'ll Value, name: &str) { + let cname = SmallCStr::new(name); + unsafe { + llvm::LLVMSetValueName(value, cname.as_ptr()); + } + } } impl DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> { @@ -327,7 +334,7 @@ impl DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> { // Initialize fn debug context (including scope map and namespace map) let fn_debug_context = FunctionDebugContextData { fn_metadata, - source_locations_enabled: Cell::new(false), + source_locations_enabled: false, defining_crate: def_id.krate, }; @@ -519,7 +526,7 @@ impl DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> { fn create_mir_scopes( &self, mir: &mir::Mir<'_>, - debug_context: &FunctionDebugContext<&'ll DISubprogram>, + debug_context: &mut FunctionDebugContext<&'ll DISubprogram>, ) -> IndexVec> { create_scope_map::create_mir_scopes(self, mir, debug_context) } diff --git a/src/librustc_codegen_llvm/debuginfo/source_loc.rs b/src/librustc_codegen_llvm/debuginfo/source_loc.rs index f7620e11c233d..dec93a65dbaf4 100644 --- a/src/librustc_codegen_llvm/debuginfo/source_loc.rs +++ b/src/librustc_codegen_llvm/debuginfo/source_loc.rs @@ -30,7 +30,7 @@ pub fn set_source_location( FunctionDebugContext::RegularContext(ref data) => data }; - let dbg_loc = if function_debug_context.source_locations_enabled.get() { + let dbg_loc = if function_debug_context.source_locations_enabled { debug!("set_source_location: {}", bx.sess().source_map().span_to_string(span)); let loc = span_start(bx.cx(), span); InternalDebugLocation::new(scope.unwrap(), loc.line, loc.col.to_usize()) diff --git a/src/librustc_codegen_llvm/type_.rs b/src/librustc_codegen_llvm/type_.rs index d5424fa459166..a5ed64a66a39d 100644 --- a/src/librustc_codegen_llvm/type_.rs +++ b/src/librustc_codegen_llvm/type_.rs @@ -11,15 +11,14 @@ use rustc_codegen_ssa::traits::*; use crate::common; use crate::type_of::LayoutLlvmExt; use crate::abi::{LlvmType, FnTypeExt}; -use rustc::util::nodemap::FxHashMap; +use syntax::ast; use rustc::ty::Ty; -use rustc::ty::layout::TyLayout; +use rustc::ty::layout::{self, Align, Size, TyLayout}; use rustc_target::abi::call::{CastTarget, FnType, Reg}; use rustc_data_structures::small_c_str::SmallCStr; use rustc_codegen_ssa::common::TypeKind; use std::fmt; -use std::cell::RefCell; use std::ptr; use libc::c_uint; @@ -52,21 +51,116 @@ impl CodegenCx<'ll, 'tcx> { els.len() as c_uint, packed as Bool) } } -} -impl BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { - fn type_void(&self) -> &'ll Type { + crate fn type_void(&self) -> &'ll Type { unsafe { llvm::LLVMVoidTypeInContext(self.llcx) } } - fn type_metadata(&self) -> &'ll Type { + crate fn type_metadata(&self) -> &'ll Type { unsafe { llvm::LLVMRustMetadataTypeInContext(self.llcx) } } + ///x Creates an integer type with the given number of bits, e.g., i24 + crate fn type_ix(&self, num_bits: u64) -> &'ll Type { + unsafe { + llvm::LLVMIntTypeInContext(self.llcx, num_bits as c_uint) + } + } + + crate fn type_x86_mmx(&self) -> &'ll Type { + unsafe { + llvm::LLVMX86MMXTypeInContext(self.llcx) + } + } + + crate fn type_vector(&self, ty: &'ll Type, len: u64) -> &'ll Type { + unsafe { + llvm::LLVMVectorType(ty, len as c_uint) + } + } + + crate fn func_params_types(&self, ty: &'ll Type) -> Vec<&'ll Type> { + unsafe { + let n_args = llvm::LLVMCountParamTypes(ty) as usize; + let mut args = Vec::with_capacity(n_args); + llvm::LLVMGetParamTypes(ty, args.as_mut_ptr()); + args.set_len(n_args); + args + } + } + + crate fn type_bool(&self) -> &'ll Type { + self.type_i8() + } + + crate fn type_int_from_ty(&self, t: ast::IntTy) -> &'ll Type { + match t { + ast::IntTy::Isize => self.type_isize(), + ast::IntTy::I8 => self.type_i8(), + ast::IntTy::I16 => self.type_i16(), + ast::IntTy::I32 => self.type_i32(), + ast::IntTy::I64 => self.type_i64(), + ast::IntTy::I128 => self.type_i128(), + } + } + + crate fn type_uint_from_ty(&self, t: ast::UintTy) -> &'ll Type { + match t { + ast::UintTy::Usize => self.type_isize(), + ast::UintTy::U8 => self.type_i8(), + ast::UintTy::U16 => self.type_i16(), + ast::UintTy::U32 => self.type_i32(), + ast::UintTy::U64 => self.type_i64(), + ast::UintTy::U128 => self.type_i128(), + } + } + + crate fn type_float_from_ty(&self, t: ast::FloatTy) -> &'ll Type { + match t { + ast::FloatTy::F32 => self.type_f32(), + ast::FloatTy::F64 => self.type_f64(), + } + } + + crate fn type_pointee_for_align(&self, align: Align) -> &'ll Type { + // FIXME(eddyb) We could find a better approximation if ity.align < align. + let ity = layout::Integer::approximate_align(self, align); + self.type_from_integer(ity) + } + + /// Return a LLVM type that has at most the required alignment, + /// and exactly the required size, as a best-effort padding array. + crate fn type_padding_filler(&self, size: Size, align: Align) -> &'ll Type { + let unit = layout::Integer::approximate_align(self, align); + let size = size.bytes(); + let unit_size = unit.size().bytes(); + assert_eq!(size % unit_size, 0); + self.type_array(self.type_from_integer(unit), size / unit_size) + } + + crate fn type_variadic_func( + &self, + args: &[&'ll Type], + ret: &'ll Type + ) -> &'ll Type { + unsafe { + llvm::LLVMFunctionType(ret, args.as_ptr(), + args.len() as c_uint, True) + } + } + + crate fn type_array(&self, ty: &'ll Type, len: u64) -> &'ll Type { + unsafe { + llvm::LLVMRustArrayType(ty, len) + } + } +} + +impl BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { fn type_i1(&self) -> &'ll Type { unsafe { llvm::LLVMInt1TypeInContext(self.llcx) @@ -104,12 +198,6 @@ impl BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { } } - fn type_ix(&self, num_bits: u64) -> &'ll Type { - unsafe { - llvm::LLVMIntTypeInContext(self.llcx, num_bits as c_uint) - } - } - fn type_isize(&self) -> &'ll Type { self.isize_ty } @@ -126,12 +214,6 @@ impl BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { } } - fn type_x86_mmx(&self) -> &'ll Type { - unsafe { - llvm::LLVMX86MMXTypeInContext(self.llcx) - } - } - fn type_func( &self, args: &[&'ll Type], @@ -143,17 +225,6 @@ impl BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { } } - fn type_variadic_func( - &self, - args: &[&'ll Type], - ret: &'ll Type - ) -> &'ll Type { - unsafe { - llvm::LLVMFunctionType(ret, args.as_ptr(), - args.len() as c_uint, True) - } - } - fn type_struct( &self, els: &[&'ll Type], @@ -166,19 +237,6 @@ impl BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { } } - - fn type_array(&self, ty: &'ll Type, len: u64) -> &'ll Type { - unsafe { - llvm::LLVMRustArrayType(ty, len) - } - } - - fn type_vector(&self, ty: &'ll Type, len: u64) -> &'ll Type { - unsafe { - llvm::LLVMVectorType(ty, len as c_uint) - } - } - fn type_kind(&self, ty: &'ll Type) -> TypeKind { unsafe { llvm::LLVMRustGetTypeKind(ty).to_generic() @@ -203,16 +261,6 @@ impl BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { } } - fn func_params_types(&self, ty: &'ll Type) -> Vec<&'ll Type> { - unsafe { - let n_args = llvm::LLVMCountParamTypes(ty) as usize; - let mut args = Vec::with_capacity(n_args); - llvm::LLVMGetParamTypes(ty, args.as_mut_ptr()); - args.set_len(n_args); - args - } - } - fn float_width(&self, ty: &'ll Type) -> usize { match self.type_kind(ty) { TypeKind::Float => 32, @@ -232,10 +280,6 @@ impl BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { fn val_ty(&self, v: &'ll Value) -> &'ll Type { common::val_ty(v) } - - fn scalar_lltypes(&self) -> &RefCell, Self::Type>> { - &self.scalar_lltypes - } } impl Type { @@ -294,9 +338,6 @@ impl LayoutTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { fn cast_backend_type(&self, ty: &CastTarget) -> &'ll Type { ty.llvm_type(self) } - fn fn_backend_type(&self, ty: &FnType<'tcx, Ty<'tcx>>) -> &'ll Type { - ty.llvm_type(self) - } fn fn_ptr_backend_type(&self, ty: &FnType<'tcx, Ty<'tcx>>) -> &'ll Type { ty.ptr_to_llvm_type(self) } diff --git a/src/librustc_codegen_llvm/va_arg.rs b/src/librustc_codegen_llvm/va_arg.rs index 7aceaea4510ce..1c42da94aa551 100644 --- a/src/librustc_codegen_llvm/va_arg.rs +++ b/src/librustc_codegen_llvm/va_arg.rs @@ -3,7 +3,7 @@ use crate::type_::Type; use crate::type_of::LayoutLlvmExt; use crate::value::Value; use rustc_codegen_ssa::mir::operand::OperandRef; -use rustc_codegen_ssa::traits::{BaseTypeMethods, BuilderMethods, ConstMethods, DerivedTypeMethods}; +use rustc_codegen_ssa::traits::*; use rustc::ty::layout::{Align, HasDataLayout, HasTyCtxt, LayoutOf, Size}; use rustc::ty::Ty; @@ -145,4 +145,3 @@ pub(super) fn emit_va_arg( } } } - diff --git a/src/librustc_codegen_ssa/base.rs b/src/librustc_codegen_ssa/base.rs index 39ce15e477296..6c81428bd6426 100644 --- a/src/librustc_codegen_ssa/base.rs +++ b/src/librustc_codegen_ssa/base.rs @@ -21,7 +21,7 @@ use rustc::middle::lang_items::StartFnLangItem; use rustc::middle::weak_lang_items; use rustc::mir::mono::{Stats, CodegenUnitNameBuilder}; use rustc::ty::{self, Ty, TyCtxt}; -use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, VariantIdx, HasTyCtxt}; +use rustc::ty::layout::{self, Align, TyLayout, VariantIdx, HasTyCtxt}; use rustc::ty::query::Providers; use rustc::middle::cstore::{self, LinkagePreference}; use rustc::util::common::{time, print_time_passes_entry}; @@ -162,16 +162,16 @@ pub fn compare_simd_types<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( /// The `old_info` argument is a bit funny. It is intended for use /// in an upcast, where the new vtable for an object will be derived /// from the old one. -pub fn unsized_info<'tcx, Cx: CodegenMethods<'tcx>>( - cx: &Cx, +pub fn unsized_info<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + bx: &Bx, source: Ty<'tcx>, target: Ty<'tcx>, - old_info: Option, -) -> Cx::Value { - let (source, target) = cx.tcx().struct_lockstep_tails(source, target); + old_info: Option, +) -> Bx::Value { + let (source, target) = bx.tcx().struct_lockstep_tails(source, target); match (&source.sty, &target.sty) { (&ty::Array(_, len), &ty::Slice(_)) => { - cx.const_usize(len.unwrap_usize(cx.tcx())) + bx.const_usize(len.unwrap_usize(bx.tcx())) } (&ty::Dynamic(..), &ty::Dynamic(..)) => { // For now, upcasts are limited to changes in marker @@ -180,10 +180,10 @@ pub fn unsized_info<'tcx, Cx: CodegenMethods<'tcx>>( old_info.expect("unsized_info: missing old info for trait upcast") } (_, &ty::Dynamic(ref data, ..)) => { - let vtable_ptr = cx.layout_of(cx.tcx().mk_mut_ptr(target)) - .field(cx, FAT_PTR_EXTRA); - cx.const_ptrcast(meth::get_vtable(cx, source, data.principal()), - cx.backend_type(vtable_ptr)) + let vtable_ptr = bx.layout_of(bx.tcx().mk_mut_ptr(target)) + .field(bx, FAT_PTR_EXTRA); + bx.const_ptrcast(meth::get_vtable(bx.cx(), source, data.principal()), + bx.backend_type(vtable_ptr)) } _ => bug!("unsized_info: invalid unsizing {:?} -> {:?}", source, @@ -206,24 +206,24 @@ pub fn unsize_thin_ptr<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) | (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }), &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => { - assert!(bx.cx().type_is_sized(a)); - let ptr_ty = bx.cx().type_ptr_to(bx.cx().backend_type(bx.cx().layout_of(b))); - (bx.pointercast(src, ptr_ty), unsized_info(bx.cx(), a, b, None)) + assert!(bx.type_is_sized(a)); + let ptr_ty = bx.type_ptr_to(bx.backend_type(bx.layout_of(b))); + (bx.pointercast(src, ptr_ty), unsized_info(bx, a, b, None)) } (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => { let (a, b) = (src_ty.boxed_ty(), dst_ty.boxed_ty()); - assert!(bx.cx().type_is_sized(a)); - let ptr_ty = bx.cx().type_ptr_to(bx.cx().backend_type(bx.cx().layout_of(b))); - (bx.pointercast(src, ptr_ty), unsized_info(bx.cx(), a, b, None)) + assert!(bx.type_is_sized(a)); + let ptr_ty = bx.type_ptr_to(bx.backend_type(bx.layout_of(b))); + (bx.pointercast(src, ptr_ty), unsized_info(bx, a, b, None)) } (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => { assert_eq!(def_a, def_b); - let src_layout = bx.cx().layout_of(src_ty); - let dst_layout = bx.cx().layout_of(dst_ty); + let src_layout = bx.layout_of(src_ty); + let dst_layout = bx.layout_of(dst_ty); let mut result = None; for i in 0..src_layout.fields.count() { - let src_f = src_layout.field(bx.cx(), i); + let src_f = src_layout.field(bx, i); assert_eq!(src_layout.fields.offset(i).bytes(), 0); assert_eq!(dst_layout.fields.offset(i).bytes(), 0); if src_f.is_zst() { @@ -231,15 +231,15 @@ pub fn unsize_thin_ptr<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( } assert_eq!(src_layout.size, src_f.size); - let dst_f = dst_layout.field(bx.cx(), i); + let dst_f = dst_layout.field(bx, i); assert_ne!(src_f.ty, dst_f.ty); assert_eq!(result, None); result = Some(unsize_thin_ptr(bx, src, src_f.ty, dst_f.ty)); } let (lldata, llextra) = result.unwrap(); // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. - (bx.bitcast(lldata, bx.cx().scalar_pair_element_backend_type(dst_layout, 0, true)), - bx.bitcast(llextra, bx.cx().scalar_pair_element_backend_type(dst_layout, 1, true))) + (bx.bitcast(lldata, bx.scalar_pair_element_backend_type(dst_layout, 0, true)), + bx.bitcast(llextra, bx.scalar_pair_element_backend_type(dst_layout, 1, true))) } _ => bug!("unsize_thin_ptr: called on bad types"), } @@ -261,8 +261,8 @@ pub fn coerce_unsized_into<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( // i.e., &'a fmt::Debug+Send => &'a fmt::Debug // So we need to pointercast the base to ensure // the types match up. - let thin_ptr = dst.layout.field(bx.cx(), FAT_PTR_ADDR); - (bx.pointercast(base, bx.cx().backend_type(thin_ptr)), info) + let thin_ptr = dst.layout.field(bx, FAT_PTR_ADDR); + (bx.pointercast(base, bx.backend_type(thin_ptr)), info) } OperandValue::Immediate(base) => { unsize_thin_ptr(bx, base, src_ty, dst_ty) @@ -323,16 +323,16 @@ fn cast_shift_rhs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( ) -> Bx::Value { // Shifts may have any size int on the rhs if op.is_shift() { - let mut rhs_llty = bx.cx().val_ty(rhs); - let mut lhs_llty = bx.cx().val_ty(lhs); - if bx.cx().type_kind(rhs_llty) == TypeKind::Vector { - rhs_llty = bx.cx().element_type(rhs_llty) + let mut rhs_llty = bx.val_ty(rhs); + let mut lhs_llty = bx.val_ty(lhs); + if bx.type_kind(rhs_llty) == TypeKind::Vector { + rhs_llty = bx.element_type(rhs_llty) } - if bx.cx().type_kind(lhs_llty) == TypeKind::Vector { - lhs_llty = bx.cx().element_type(lhs_llty) + if bx.type_kind(lhs_llty) == TypeKind::Vector { + lhs_llty = bx.element_type(lhs_llty) } - let rhs_sz = bx.cx().int_width(rhs_llty); - let lhs_sz = bx.cx().int_width(lhs_llty); + let rhs_sz = bx.int_width(rhs_llty); + let lhs_sz = bx.int_width(lhs_llty); if lhs_sz < rhs_sz { bx.trunc(rhs, lhs_llty) } else if lhs_sz > rhs_sz { @@ -356,18 +356,18 @@ pub fn wants_msvc_seh(sess: &Session) -> bool { sess.target.target.options.is_like_msvc } -pub fn from_immediate<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( +pub fn from_immediate<'a, 'tcx: 'a, Bx: NumBuilderMethods<'tcx>>( bx: &mut Bx, val: Bx::Value ) -> Bx::Value { - if bx.cx().val_ty(val) == bx.cx().type_i1() { - bx.zext(val, bx.cx().type_i8()) + if bx.val_ty(val) == bx.type_i1() { + bx.zext(val, bx.type_i8()) } else { val } } -pub fn to_immediate<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( +pub fn to_immediate<'a, 'tcx: 'a, Bx: NumBuilderMethods<'tcx>>( bx: &mut Bx, val: Bx::Value, layout: layout::TyLayout<'_>, @@ -378,18 +378,18 @@ pub fn to_immediate<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( val } -pub fn to_immediate_scalar<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( +pub fn to_immediate_scalar<'a, 'tcx: 'a, Bx: NumBuilderMethods<'tcx>>( bx: &mut Bx, val: Bx::Value, scalar: &layout::Scalar, ) -> Bx::Value { if scalar.is_bool() { - return bx.trunc(val, bx.cx().type_i1()); + return bx.trunc(val, bx.type_i1()); } val } -pub fn memcpy_ty<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( +pub fn memcpy_ty<'a, 'tcx: 'a, Bx: MemoryBuilderMethods<'tcx>>( bx: &mut Bx, dst: Bx::Value, dst_align: Align, @@ -397,13 +397,15 @@ pub fn memcpy_ty<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( src_align: Align, layout: TyLayout<'tcx>, flags: MemFlags, -) { +) + where Bx::CodegenCx: ConstMethods<'tcx> +{ let size = layout.size.bytes(); if size == 0 { return; } - bx.memcpy(dst, dst_align, src, src_align, bx.cx().const_usize(size), flags); + bx.memcpy(dst, dst_align, src, src_align, bx.const_usize(size), flags); } pub fn codegen_instance<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( @@ -502,8 +504,8 @@ pub fn maybe_create_entry_wrapper<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( bx.insert_reference_to_gdb_debug_scripts_section_global(); // Params from native main() used as args for rust start function - let param_argc = cx.get_param(llfn, 0); - let param_argv = cx.get_param(llfn, 1); + let param_argc = bx.get_param(0); + let param_argv = bx.get_param(1); let arg_argc = bx.intcast(param_argc, cx.type_isize(), true); let arg_argv = param_argv; diff --git a/src/librustc_codegen_ssa/common.rs b/src/librustc_codegen_ssa/common.rs index db77074deef94..0e1885fe29ba6 100644 --- a/src/librustc_codegen_ssa/common.rs +++ b/src/librustc_codegen_ssa/common.rs @@ -1,7 +1,7 @@ #![allow(non_camel_case_types, non_snake_case)] -use rustc::ty::{self, Ty, TyCtxt}; -use syntax_pos::{DUMMY_SP, Span}; +use rustc::ty::{Ty, TyCtxt}; +use syntax_pos::Span; use rustc::hir::def_id::DefId; use rustc::middle::lang_items::LangItem; @@ -11,18 +11,6 @@ use crate::traits::*; use rustc::hir; use crate::traits::BuilderMethods; -pub fn type_needs_drop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { - ty.needs_drop(tcx, ty::ParamEnv::reveal_all()) -} - -pub fn type_is_sized<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { - ty.is_sized(tcx.at(DUMMY_SP), ty::ParamEnv::reveal_all()) -} - -pub fn type_is_freeze<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { - ty.is_freeze(tcx, ty::ParamEnv::reveal_all(), DUMMY_SP) -} - pub enum IntPredicate { IntEQ, IntNE, diff --git a/src/librustc_codegen_ssa/debuginfo.rs b/src/librustc_codegen_ssa/debuginfo.rs index c4531ff90ae7c..aa7cdbed99446 100644 --- a/src/librustc_codegen_ssa/debuginfo.rs +++ b/src/librustc_codegen_ssa/debuginfo.rs @@ -1,6 +1,5 @@ use syntax_pos::{BytePos, Span}; use rustc::hir::def_id::CrateNum; -use std::cell::Cell; pub enum FunctionDebugContext { RegularContext(FunctionDebugContextData), @@ -36,10 +35,10 @@ impl FunctionDebugContext { /// they are disabled when beginning to codegen a new function. This functions /// switches source location emitting on and must therefore be called before the /// first real statement/expression of the function is codegened. -pub fn start_emitting_source_locations(dbg_context: &FunctionDebugContext) { +pub fn start_emitting_source_locations(dbg_context: &mut FunctionDebugContext) { match *dbg_context { - FunctionDebugContext::RegularContext(ref data) => { - data.source_locations_enabled.set(true) + FunctionDebugContext::RegularContext(ref mut data) => { + data.source_locations_enabled = true; }, _ => { /* safe to ignore */ } } @@ -47,7 +46,7 @@ pub fn start_emitting_source_locations(dbg_context: &FunctionDebugContext) pub struct FunctionDebugContextData { pub fn_metadata: D, - pub source_locations_enabled: Cell, + pub source_locations_enabled: bool, pub defining_crate: CrateNum, } diff --git a/src/librustc_codegen_ssa/glue.rs b/src/librustc_codegen_ssa/glue.rs index e2b49de05bd11..dbe82e8621cb7 100644 --- a/src/librustc_codegen_ssa/glue.rs +++ b/src/librustc_codegen_ssa/glue.rs @@ -7,10 +7,14 @@ use crate::common::IntPredicate; use crate::meth; use crate::traits::*; -pub fn size_and_align_of_dst<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( +pub fn size_and_align_of_dst< + 'a, + 'tcx: 'a, + Bx: MemoryBuilderMethods<'tcx> + NumBuilderMethods<'tcx>, +>( bx: &mut Bx, t: Ty<'tcx>, - info: Option + info: Option, ) -> (Bx::Value, Bx::Value) { let layout = bx.layout_of(t); debug!("size_and_align_of_dst(ty={}, info={:?}): layout: {:?}", diff --git a/src/librustc_codegen_ssa/meth.rs b/src/librustc_codegen_ssa/meth.rs index 49f3c87ee2d9d..688e905202bdc 100644 --- a/src/librustc_codegen_ssa/meth.rs +++ b/src/librustc_codegen_ssa/meth.rs @@ -18,7 +18,7 @@ impl<'a, 'tcx: 'a> VirtualIndex { VirtualIndex(index as u64 + 3) } - pub fn get_fn>( + pub fn get_fn>( self, bx: &mut Bx, llvtable: Bx::Value, @@ -40,7 +40,7 @@ impl<'a, 'tcx: 'a> VirtualIndex { ptr } - pub fn get_usize>( + pub fn get_usize>( self, bx: &mut Bx, llvtable: Bx::Value diff --git a/src/librustc_codegen_ssa/mir/block.rs b/src/librustc_codegen_ssa/mir/block.rs index 627380ee38ff1..00c261af49ed5 100644 --- a/src/librustc_codegen_ssa/mir/block.rs +++ b/src/librustc_codegen_ssa/mir/block.rs @@ -1,6 +1,6 @@ use rustc::middle::lang_items; use rustc::ty::{self, Ty, TypeFoldable}; -use rustc::ty::layout::{self, LayoutOf, HasTyCtxt}; +use rustc::ty::layout::{self, HasTyCtxt, LayoutOf}; use rustc::mir; use rustc::mir::interpret::EvalErrorKind; use rustc_target::abi::call::{ArgType, FnType, PassMode, IgnoreMode}; @@ -214,17 +214,20 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } } else { let (otherwise, targets) = targets.split_last().unwrap(); - let switch = bx.switch(discr.immediate(), - helper.llblock(self, *otherwise), - values.len()); + let mut switch = bx.switch_new( + discr.immediate(), + helper.llblock(self, *otherwise), + values.len(), + ); let switch_llty = bx.immediate_backend_type( bx.layout_of(switch_ty) ); for (&value, target) in values.iter().zip(targets) { let llval = bx.const_uint_big(switch_llty, value); let llbb = helper.llblock(self, *target); - bx.add_case(switch, llval, llbb) + bx.switch_add_case(&mut switch, llval, llbb) } + bx.switch_emit(switch); } } @@ -394,12 +397,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // Get the location information. let loc = bx.sess().source_map().lookup_char_pos(span.lo()); let filename = Symbol::intern(&loc.file.name.to_string()).as_str(); - let filename = bx.const_str_slice(filename); let line = bx.const_u32(loc.line as u32); let col = bx.const_u32(loc.col.to_usize() as u32 + 1); - let align = self.cx.tcx().data_layout.aggregate_align.abi - .max(self.cx.tcx().data_layout.i32_align.abi) - .max(self.cx.tcx().data_layout.pointer_align.abi); // Put together the arguments to the panic entry point. let (lang_item, args) = match *msg { @@ -407,30 +406,28 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let len = self.codegen_operand(&mut bx, len).immediate(); let index = self.codegen_operand(&mut bx, index).immediate(); - let file_line_col = bx.const_struct(&[filename, line, col], false); - let file_line_col = bx.static_addr_of( - file_line_col, - align, - Some("panic_bounds_check_loc") + let file_line_col = bx.static_panic_msg( + None, + filename, + line, + col, + "panic_bounds_check_loc", ); (lang_items::PanicBoundsCheckFnLangItem, - vec![file_line_col, index, len]) + vec![file_line_col, index, len]) } _ => { let str = msg.description(); let msg_str = Symbol::intern(str).as_str(); - let msg_str = bx.const_str_slice(msg_str); - let msg_file_line_col = bx.const_struct( - &[msg_str, filename, line, col], - false - ); - let msg_file_line_col = bx.static_addr_of( - msg_file_line_col, - align, - Some("panic_loc") + let msg_file_line_col = bx.static_panic_msg( + Some(msg_str), + filename, + line, + col, + "panic_loc", ); (lang_items::PanicFnLangItem, - vec![msg_file_line_col]) + vec![msg_file_line_col]) } }; @@ -534,27 +531,20 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { if layout.abi.is_uninhabited() { let loc = bx.sess().source_map().lookup_char_pos(span.lo()); let filename = Symbol::intern(&loc.file.name.to_string()).as_str(); - let filename = bx.const_str_slice(filename); let line = bx.const_u32(loc.line as u32); let col = bx.const_u32(loc.col.to_usize() as u32 + 1); - let align = self.cx.tcx().data_layout.aggregate_align.abi - .max(self.cx.tcx().data_layout.i32_align.abi) - .max(self.cx.tcx().data_layout.pointer_align.abi); let str = format!( "Attempted to instantiate uninhabited type {}", ty ); let msg_str = Symbol::intern(&str).as_str(); - let msg_str = bx.const_str_slice(msg_str); - let msg_file_line_col = bx.const_struct( - &[msg_str, filename, line, col], - false, - ); - let msg_file_line_col = bx.static_addr_of( - msg_file_line_col, - align, - Some("panic_loc"), + let msg_file_line_col = bx.static_panic_msg( + Some(msg_str), + filename, + line, + col, + "panic_loc", ); // Obtain the panic entry point. @@ -640,7 +630,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { span_bug!(span, "shuffle indices must be constant"); } mir::Operand::Constant(ref constant) => { - let c = self.eval_mir_constant(&bx, constant); + let c = self.eval_mir_constant(constant); let (llval, ty) = self.simd_shuffle_indices( &bx, constant.span, @@ -1012,13 +1002,12 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { &mut self, bx: &mut Bx ) -> PlaceRef<'tcx, Bx::Value> { - let cx = bx.cx(); if let Some(slot) = self.personality_slot { slot } else { - let layout = cx.layout_of(cx.tcx().intern_tup(&[ - cx.tcx().mk_mut_ptr(cx.tcx().types.u8), - cx.tcx().types.i32 + let layout = bx.layout_of(bx.tcx().intern_tup(&[ + bx.tcx().mk_mut_ptr(bx.tcx().types.u8), + bx.tcx().types.i32 ])); let slot = PlaceRef::alloca(bx, layout, "personalityslot"); self.personality_slot = Some(slot); diff --git a/src/librustc_codegen_ssa/mir/constant.rs b/src/librustc_codegen_ssa/mir/constant.rs index 6bc69efa4a7d5..7db0ca309f6b8 100644 --- a/src/librustc_codegen_ssa/mir/constant.rs +++ b/src/librustc_codegen_ssa/mir/constant.rs @@ -3,41 +3,38 @@ use rustc_mir::const_eval::const_field; use rustc::mir; use rustc_data_structures::indexed_vec::Idx; use rustc::mir::interpret::GlobalId; -use rustc::ty::{self, Ty}; -use rustc::ty::layout; +use rustc::ty::{self, Ty, TyCtxt}; +use rustc::ty::layout::{self, HasTyCtxt}; use syntax::source_map::Span; use crate::traits::*; use super::FunctionCx; -impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { - fn fully_evaluate( - &mut self, - bx: &Bx, - constant: &'tcx ty::LazyConst<'tcx>, - ) -> Result, ErrorHandled> { - match *constant { - ty::LazyConst::Unevaluated(def_id, ref substs) => { - let tcx = bx.tcx(); - let param_env = ty::ParamEnv::reveal_all(); - let instance = ty::Instance::resolve(tcx, param_env, def_id, substs).unwrap(); - let cid = GlobalId { - instance, - promoted: None, - }; - tcx.const_eval(param_env.and(cid)) - }, - ty::LazyConst::Evaluated(constant) => Ok(constant), - } +fn fully_evaluate<'a, 'tcx: 'a>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + constant: &'tcx ty::LazyConst<'tcx>, +) -> Result, ErrorHandled> { + match *constant { + ty::LazyConst::Unevaluated(def_id, ref substs) => { + let param_env = ty::ParamEnv::reveal_all(); + let instance = ty::Instance::resolve(tcx, param_env, def_id, substs).unwrap(); + let cid = GlobalId { + instance, + promoted: None, + }; + tcx.const_eval(param_env.and(cid)) + }, + ty::LazyConst::Evaluated(constant) => Ok(constant), } +} +impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { pub fn eval_mir_constant( - &mut self, - bx: &Bx, + &self, constant: &mir::Constant<'tcx>, ) -> Result, ErrorHandled> { let c = self.monomorphize(&constant.literal); - self.fully_evaluate(bx, c) + fully_evaluate(self.cx.tcx(), c) } /// process constant containing SIMD shuffle indices diff --git a/src/librustc_codegen_ssa/mir/mod.rs b/src/librustc_codegen_ssa/mir/mod.rs index dc77d4673cd2a..d9ed7d581b755 100644 --- a/src/librustc_codegen_ssa/mir/mod.rs +++ b/src/librustc_codegen_ssa/mir/mod.rs @@ -1,8 +1,6 @@ -use libc::c_uint; use rustc::ty::{self, Ty, TypeFoldable, UpvarSubsts}; use rustc::ty::layout::{TyLayout, HasTyCtxt}; use rustc::mir::{self, Mir}; -use rustc::ty::subst::SubstsRef; use rustc::session::config::DebugInfo; use rustc_mir::monomorphize::Instance; use rustc_target::abi::call::{FnType, PassMode, IgnoreMode}; @@ -25,7 +23,7 @@ use rustc::mir::traversal; use self::operand::{OperandRef, OperandValue}; /// Master context for codegenning from MIR. -pub struct FunctionCx<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> { +pub struct FunctionCx<'a, 'tcx: 'a, Bx: HasCodegen<'tcx>> { instance: Instance<'tcx>, mir: &'a mir::Mir<'tcx>, @@ -84,20 +82,17 @@ pub struct FunctionCx<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> { /// Debug information for MIR scopes. scopes: IndexVec>, - /// If this function is being monomorphized, this contains the type substitutions used. - param_substs: SubstsRef<'tcx>, - /// If this function is a C-variadic function, this contains the `PlaceRef` of the /// "spoofed" `VaList`. va_list_ref: Option>, } -impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { +impl<'a, 'tcx: 'a, Bx: HasCodegen<'tcx> + DebugInfoBuilderMethods<'tcx>> FunctionCx<'a, 'tcx, Bx> { pub fn monomorphize(&self, value: &T) -> T where T: TypeFoldable<'tcx> { self.cx.tcx().subst_and_normalize_erasing_regions( - self.param_substs, + self.instance.substs, ty::ParamEnv::reveal_all(), value, ) @@ -109,7 +104,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { source_info: mir::SourceInfo ) { let (scope, span) = self.debug_loc(source_info); - bx.set_source_location(&self.debug_context, scope, span); + bx.set_source_location(&mut self.debug_context, scope, span); } pub fn debug_loc(&self, source_info: mir::SourceInfo) -> (Option, Span) { @@ -179,16 +174,16 @@ enum LocalRef<'tcx, V> { Operand(Option>), } -impl<'tcx, V: CodegenObject> LocalRef<'tcx, V> { - fn new_operand>( - cx: &Cx, +impl<'a, 'tcx: 'a, V: CodegenObject> LocalRef<'tcx, V> { + fn new_operand>( + bx: &mut Bx, layout: TyLayout<'tcx>, ) -> LocalRef<'tcx, V> { if layout.is_zst() { // Zero-size temporaries aren't always initialized, which // doesn't matter because they don't contain data, but // we need something in the operand. - LocalRef::Operand(Some(OperandRef::new_zst(cx, layout))) + LocalRef::Operand(Some(OperandRef::new_zst(bx, layout))) } else { LocalRef::Operand(None) } @@ -204,9 +199,11 @@ pub fn codegen_mir<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( instance: Instance<'tcx>, sig: ty::FnSig<'tcx>, ) { + assert!(!instance.substs.needs_infer()); + let fn_ty = cx.new_fn_type(sig, &[]); debug!("fn_ty: {:?}", fn_ty); - let debug_context = + let mut debug_context = cx.create_function_debug_context(instance, sig, llfn, mir); let mut bx = Bx::new_block(cx, llfn, "start"); @@ -228,7 +225,7 @@ pub fn codegen_mir<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( }).collect(); // Compute debuginfo scopes from MIR scopes. - let scopes = cx.create_mir_scopes(mir, &debug_context); + let scopes = cx.create_mir_scopes(mir, &mut debug_context); let (landing_pads, funclets) = create_funclets(mir, &mut bx, &cleanup_kinds, &block_bxs); let mut fx = FunctionCx { @@ -246,10 +243,6 @@ pub fn codegen_mir<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( scopes, locals: IndexVec::new(), debug_context, - param_substs: { - assert!(!instance.substs.needs_infer()); - instance.substs - }, va_list_ref: None, }; @@ -260,7 +253,7 @@ pub fn codegen_mir<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( // FIXME(dlrobertson): This is ugly. Find a better way of getting the `PlaceRef` or // `LocalRef` from `arg_local_refs` let mut va_list_ref = None; - let args = arg_local_refs(&mut bx, &fx, &fx.scopes, &memory_locals, &mut va_list_ref); + let args = arg_local_refs(&mut bx, &fx, &memory_locals, &mut va_list_ref); fx.va_list_ref = va_list_ref; let mut allocate_local = |local| { @@ -276,7 +269,7 @@ pub fn codegen_mir<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( if !memory_locals.contains(local) && !dbg { debug!("alloc: {:?} ({}) -> operand", local, name); - return LocalRef::new_operand(bx.cx(), layout); + return LocalRef::new_operand(&mut bx, layout); } debug!("alloc: {:?} ({}) -> place", local, name); @@ -302,7 +295,7 @@ pub fn codegen_mir<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( // Temporary or return place if local == mir::RETURN_PLACE && fx.fn_ty.ret.is_indirect() { debug!("alloc: {:?} (return place) -> place", local); - let llretptr = fx.cx.get_param(llfn, 0); + let llretptr = bx.get_param(0); LocalRef::Place(PlaceRef::new_sized(llretptr, layout, layout.align.abi)) } else if memory_locals.contains(local) { debug!("alloc: {:?} -> place", local); @@ -321,7 +314,7 @@ pub fn codegen_mir<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( // alloca in advance. Instead we wait until we see the // definition and update the operand there. debug!("alloc: {:?} -> operand", local); - LocalRef::new_operand(bx.cx(), layout) + LocalRef::new_operand(&mut bx, layout) } } }; @@ -341,7 +334,7 @@ pub fn codegen_mir<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( // Up until here, IR instructions for this function have explicitly not been annotated with // source code location, so we don't step into call setup code. From here on, source location // emitting should be enabled. - debuginfo::start_emitting_source_locations(&fx.debug_context); + debuginfo::start_emitting_source_locations(&mut fx.debug_context); let rpo = traversal::reverse_postorder(&mir); let mut visited = BitSet::new_empty(mir.basic_blocks().len()); @@ -437,10 +430,6 @@ fn create_funclets<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( fn arg_local_refs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( bx: &mut Bx, fx: &FunctionCx<'a, 'tcx, Bx>, - scopes: &IndexVec< - mir::SourceScope, - debuginfo::MirDebugScope - >, memory_locals: &BitSet, va_list_ref: &mut Option>, ) -> Vec> { @@ -450,7 +439,7 @@ fn arg_local_refs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( let mut llarg_idx = fx.fn_ty.ret.is_indirect() as usize; // Get the argument scope, if it exists and if we need it. - let arg_scope = scopes[mir::OUTERMOST_SOURCE_SCOPE]; + let arg_scope = fx.scopes[mir::OUTERMOST_SOURCE_SCOPE]; let arg_scope = if bx.sess().opts.debuginfo == DebugInfo::Full { arg_scope.scope_metadata } else { @@ -530,7 +519,7 @@ fn arg_local_refs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( let local = |op| LocalRef::Operand(Some(op)); match arg.mode { PassMode::Ignore(IgnoreMode::Zst) => { - return local(OperandRef::new_zst(bx.cx(), arg.layout)); + return local(OperandRef::new_zst(bx, arg.layout)); } PassMode::Ignore(IgnoreMode::CVarArgs) => { let backend_type = bx.cx().immediate_backend_type(arg.layout); @@ -540,18 +529,18 @@ fn arg_local_refs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( }); } PassMode::Direct(_) => { - let llarg = bx.get_param(bx.llfn(), llarg_idx as c_uint); + let llarg = bx.get_param(llarg_idx); bx.set_value_name(llarg, &name); llarg_idx += 1; return local( OperandRef::from_immediate_or_packed_pair(bx, llarg, arg.layout)); } PassMode::Pair(..) => { - let a = bx.get_param(bx.llfn(), llarg_idx as c_uint); + let a = bx.get_param(llarg_idx); bx.set_value_name(a, &(name.clone() + ".0")); llarg_idx += 1; - let b = bx.get_param(bx.llfn(), llarg_idx as c_uint); + let b = bx.get_param(llarg_idx); bx.set_value_name(b, &(name + ".1")); llarg_idx += 1; @@ -568,16 +557,16 @@ fn arg_local_refs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( // Don't copy an indirect argument to an alloca, the caller // already put it in a temporary alloca and gave it up. // FIXME: lifetimes - let llarg = bx.get_param(bx.llfn(), llarg_idx as c_uint); + let llarg = bx.get_param(llarg_idx); bx.set_value_name(llarg, &name); llarg_idx += 1; PlaceRef::new_sized(llarg, arg.layout, arg.layout.align.abi) } else if arg.is_unsized_indirect() { // As the storage for the indirect argument lives during // the whole function call, we just copy the fat pointer. - let llarg = bx.get_param(bx.llfn(), llarg_idx as c_uint); + let llarg = bx.get_param(llarg_idx); llarg_idx += 1; - let llextra = bx.get_param(bx.llfn(), llarg_idx as c_uint); + let llextra = bx.get_param(llarg_idx); llarg_idx += 1; let indirect_operand = OperandValue::Pair(llarg, llextra); diff --git a/src/librustc_codegen_ssa/mir/operand.rs b/src/librustc_codegen_ssa/mir/operand.rs index 0e8cdc83b486e..6a8ca3d69f16c 100644 --- a/src/librustc_codegen_ssa/mir/operand.rs +++ b/src/librustc_codegen_ssa/mir/operand.rs @@ -1,7 +1,7 @@ use rustc::mir::interpret::{ConstValue, ErrorHandled}; use rustc::mir; use rustc::ty; -use rustc::ty::layout::{self, Align, LayoutOf, TyLayout}; +use rustc::ty::layout::{self, Align, TyLayout}; use crate::base; use crate::MemFlags; @@ -54,13 +54,15 @@ impl fmt::Debug for OperandRef<'tcx, V> { } impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> { - pub fn new_zst>( - cx: &Cx, + pub fn new_zst>( + bx: &mut Bx, layout: TyLayout<'tcx> - ) -> OperandRef<'tcx, V> { + ) -> OperandRef<'tcx, V> + where Bx::CodegenCx: ConstMethods<'tcx> + { assert!(layout.is_zst()); OperandRef { - val: OperandValue::Immediate(cx.const_undef(cx.immediate_backend_type(layout))), + val: OperandValue::Immediate(bx.const_undef(bx.immediate_backend_type(layout))), layout } } @@ -69,10 +71,10 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> { bx: &mut Bx, val: ty::Const<'tcx> ) -> Result { - let layout = bx.cx().layout_of(val.ty); + let layout = bx.layout_of(val.ty); if layout.is_zst() { - return Ok(OperandRef::new_zst(bx.cx(), layout)); + return Ok(OperandRef::new_zst(bx, layout)); } let val = match val.val { @@ -81,10 +83,10 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> { layout::Abi::Scalar(ref x) => x, _ => bug!("from_const: invalid ByVal layout: {:#?}", layout) }; - let llval = bx.cx().scalar_to_backend( + let llval = bx.scalar_to_backend( x, scalar, - bx.cx().immediate_backend_type(layout), + bx.immediate_backend_type(layout), ); OperandValue::Immediate(llval) }, @@ -93,16 +95,16 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> { layout::Abi::ScalarPair(ref a, _) => a, _ => bug!("from_const: invalid ScalarPair layout: {:#?}", layout) }; - let a_llval = bx.cx().scalar_to_backend( + let a_llval = bx.scalar_to_backend( a, a_scalar, - bx.cx().scalar_pair_element_backend_type(layout, 0, true), + bx.scalar_pair_element_backend_type(layout, 0, true), ); - let b_llval = bx.cx().const_usize(b); + let b_llval = bx.const_usize(b); OperandValue::Pair(a_llval, b_llval) }, ConstValue::ByRef(ptr, alloc) => { - return Ok(bx.load_operand(bx.cx().from_const_alloc(layout, alloc, ptr.offset))); + return Ok(bx.load_operand(bx.from_const_alloc(layout, alloc, ptr.offset))); }, }; @@ -121,7 +123,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> { } } - pub fn deref>( + pub fn deref>( self, cx: &Cx ) -> PlaceRef<'tcx, V> { @@ -148,11 +150,11 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> { bx: &mut Bx ) -> V { if let OperandValue::Pair(a, b) = self.val { - let llty = bx.cx().backend_type(self.layout); + let llty = bx.backend_type(self.layout); debug!("Operand::immediate_or_packed_pair: packing {:?} into {:?}", self, llty); // Reconstruct the immediate aggregate. - let mut llpair = bx.cx().const_undef(llty); + let mut llpair = bx.const_undef(llty); let imm_a = base::from_immediate(bx, a); let imm_b = base::from_immediate(bx, b); llpair = bx.insert_value(llpair, imm_a, 0); @@ -190,13 +192,13 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> { bx: &mut Bx, i: usize ) -> Self { - let field = self.layout.field(bx.cx(), i); + let field = self.layout.field(bx, i); let offset = self.layout.fields.offset(i); let mut val = match (self.val, &self.layout.abi) { // If the field is ZST, it has no data. _ if field.is_zst() => { - return OperandRef::new_zst(bx.cx(), field); + return OperandRef::new_zst(bx, field); } // Newtype of a scalar, scalar pair or vector. @@ -209,12 +211,12 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> { // Extract a scalar component from a pair. (OperandValue::Pair(a_llval, b_llval), &layout::Abi::ScalarPair(ref a, ref b)) => { if offset.bytes() == 0 { - assert_eq!(field.size, a.value.size(bx.cx())); + assert_eq!(field.size, a.value.size(bx)); OperandValue::Immediate(a_llval) } else { - assert_eq!(offset, a.value.size(bx.cx()) - .align_to(b.value.align(bx.cx()).abi)); - assert_eq!(field.size, b.value.size(bx.cx())); + assert_eq!(offset, a.value.size(bx) + .align_to(b.value.align(bx).abi)); + assert_eq!(field.size, b.value.size(bx)); OperandValue::Immediate(b_llval) } } @@ -222,7 +224,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> { // `#[repr(simd)]` types are also immediate. (OperandValue::Immediate(llval), &layout::Abi::Vector { .. }) => { OperandValue::Immediate( - bx.extract_element(llval, bx.cx().const_usize(i as u64))) + bx.extract_element(llval, bx.const_usize(i as u64))) } _ => bug!("OperandRef::extract_field({:?}): not applicable", self) @@ -231,7 +233,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> { // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. // Bools in union fields needs to be truncated. let to_immediate_or_cast = |bx: &mut Bx, val, ty| { - if ty == bx.cx().type_i1() { + if ty == bx.type_i1() { bx.trunc(val, ty) } else { bx.bitcast(val, ty) @@ -240,12 +242,12 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> { match val { OperandValue::Immediate(ref mut llval) => { - *llval = to_immediate_or_cast(bx, *llval, bx.cx().immediate_backend_type(field)); + *llval = to_immediate_or_cast(bx, *llval, bx.immediate_backend_type(field)); } OperandValue::Pair(ref mut a, ref mut b) => { - *a = to_immediate_or_cast(bx, *a, bx.cx() + *a = to_immediate_or_cast(bx, *a, bx .scalar_pair_element_backend_type(field, 0, true)); - *b = to_immediate_or_cast(bx, *b, bx.cx() + *b = to_immediate_or_cast(bx, *b, bx .scalar_pair_element_backend_type(field, 1, true)); } OperandValue::Ref(..) => bug!() @@ -259,7 +261,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> { } impl<'a, 'tcx: 'a, V: CodegenObject> OperandValue { - pub fn store>( + pub fn store + NumBuilderMethods<'tcx>>( self, bx: &mut Bx, dest: PlaceRef<'tcx, V> @@ -267,7 +269,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandValue { self.store_with_flags(bx, dest, MemFlags::empty()); } - pub fn volatile_store>( + pub fn volatile_store + NumBuilderMethods<'tcx>>( self, bx: &mut Bx, dest: PlaceRef<'tcx, V> @@ -275,7 +277,9 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandValue { self.store_with_flags(bx, dest, MemFlags::VOLATILE); } - pub fn unaligned_volatile_store>( + pub fn unaligned_volatile_store< + Bx: MemoryBuilderMethods<'tcx, Value = V> + NumBuilderMethods<'tcx> + >( self, bx: &mut Bx, dest: PlaceRef<'tcx, V>, @@ -283,7 +287,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandValue { self.store_with_flags(bx, dest, MemFlags::VOLATILE | MemFlags::UNALIGNED); } - pub fn nontemporal_store>( + pub fn nontemporal_store + NumBuilderMethods<'tcx>>( self, bx: &mut Bx, dest: PlaceRef<'tcx, V> @@ -291,7 +295,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandValue { self.store_with_flags(bx, dest, MemFlags::NONTEMPORAL); } - fn store_with_flags>( + fn store_with_flags + NumBuilderMethods<'tcx>>( self, bx: &mut Bx, dest: PlaceRef<'tcx, V>, @@ -359,7 +363,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandValue { // Allocate an appropriate region on the stack, and copy the value into it let (llsize, _) = glue::size_and_align_of_dst(bx, unsized_ty, Some(llextra)); - let lldst = bx.array_alloca(bx.cx().type_i8(), llsize, "unsized_tmp", max_align); + let lldst = bx.array_alloca(bx.type_i8(), llsize, "unsized_tmp", max_align); bx.memcpy(lldst, max_align, llptr, min_align, llsize, flags); // Store the allocated region and the extra to the indirect place. @@ -404,9 +408,9 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // ZSTs don't require any actual memory access. // FIXME(eddyb) deduplicate this with the identical // checks in `codegen_consume` and `extract_field`. - let elem = o.layout.field(bx.cx(), 0); + let elem = o.layout.field(bx, 0); if elem.is_zst() { - return Some(OperandRef::new_zst(bx.cx(), elem)); + return Some(OperandRef::new_zst(bx, elem)); } } _ => {} @@ -425,11 +429,11 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { debug!("codegen_consume(place={:?})", place); let ty = self.monomorphized_place_ty(place); - let layout = bx.cx().layout_of(ty); + let layout = bx.layout_of(ty); // ZSTs don't require any actual memory access. if layout.is_zst() { - return OperandRef::new_zst(bx.cx(), layout); + return OperandRef::new_zst(bx, layout); } if let Some(o) = self.maybe_codegen_consume_direct(bx, place) { @@ -457,7 +461,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { mir::Operand::Constant(ref constant) => { let ty = self.monomorphize(&constant.ty); - self.eval_mir_constant(bx, constant) + self.eval_mir_constant(constant) .and_then(|c| OperandRef::from_const(bx, c)) .unwrap_or_else(|err| { match err { @@ -471,9 +475,9 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // the above error (or silence it under some conditions) will not cause UB bx.abort(); // We've errored, so we don't have to produce working code. - let layout = bx.cx().layout_of(ty); + let layout = bx.layout_of(ty); bx.load_operand(PlaceRef::new_sized( - bx.cx().const_undef(bx.cx().type_ptr_to(bx.cx().backend_type(layout))), + bx.const_undef(bx.type_ptr_to(bx.backend_type(layout))), layout, layout.align.abi, )) diff --git a/src/librustc_codegen_ssa/mir/place.rs b/src/librustc_codegen_ssa/mir/place.rs index 1edcbfead2c94..82a9a801f0e8a 100644 --- a/src/librustc_codegen_ssa/mir/place.rs +++ b/src/librustc_codegen_ssa/mir/place.rs @@ -56,31 +56,31 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { } } - pub fn alloca>( + pub fn alloca>( bx: &mut Bx, layout: TyLayout<'tcx>, name: &str ) -> Self { debug!("alloca({:?}: {:?})", name, layout); assert!(!layout.is_unsized(), "tried to statically allocate unsized place"); - let tmp = bx.alloca(bx.cx().backend_type(layout), name, layout.align.abi); + let tmp = bx.alloca(bx.backend_type(layout), name, layout.align.abi); Self::new_sized(tmp, layout, layout.align.abi) } /// Returns a place for an indirect reference to an unsized place. - pub fn alloca_unsized_indirect>( + pub fn alloca_unsized_indirect>( bx: &mut Bx, layout: TyLayout<'tcx>, name: &str, ) -> Self { debug!("alloca_unsized_indirect({:?}: {:?})", name, layout); assert!(layout.is_unsized(), "tried to allocate indirect place for sized values"); - let ptr_ty = bx.cx().tcx().mk_mut_ptr(layout.ty); - let ptr_layout = bx.cx().layout_of(ptr_ty); + let ptr_ty = bx.tcx().mk_mut_ptr(layout.ty); + let ptr_layout = bx.layout_of(ptr_ty); Self::alloca(bx, ptr_layout, name) } - pub fn len>( + pub fn len>( &self, cx: &Cx ) -> V { @@ -100,11 +100,11 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { /// Access a field, at a point when the value's case is known. - pub fn project_field>( + pub fn project_field + NumBuilderMethods<'tcx>>( self, bx: &mut Bx, ix: usize, ) -> Self { - let field = self.layout.field(bx.cx(), ix); + let field = self.layout.field(bx, ix); let offset = self.layout.fields.offset(ix); let effective_field_align = self.align.restrict_for_offset(offset); @@ -114,15 +114,15 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { self.llval } else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi { // Offsets have to match either first or second field. - assert_eq!(offset, a.value.size(bx.cx()).align_to(b.value.align(bx.cx()).abi)); + assert_eq!(offset, a.value.size(bx).align_to(b.value.align(bx).abi)); bx.struct_gep(self.llval, 1) } else { - bx.struct_gep(self.llval, bx.cx().backend_field_index(self.layout, ix)) + bx.struct_gep(self.llval, bx.backend_field_index(self.layout, ix)) }; PlaceRef { // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. - llval: bx.pointercast(llval, bx.cx().type_ptr_to(bx.cx().backend_type(field))), - llextra: if bx.cx().type_has_metadata(field.ty) { + llval: bx.pointercast(llval, bx.type_ptr_to(bx.backend_type(field))), + llextra: if bx.type_has_metadata(field.ty) { self.llextra } else { None @@ -172,7 +172,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { let meta = self.llextra; - let unaligned_offset = bx.cx().const_usize(offset.bytes()); + let unaligned_offset = bx.const_usize(offset.bytes()); // Get the alignment of the field let (_, unsized_align) = glue::size_and_align_of_dst(bx, field.ty, meta); @@ -183,7 +183,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { // (unaligned offset + (align - 1)) & -align // Calculate offset - let align_sub_1 = bx.sub(unsized_align, bx.cx().const_usize(1u64)); + let align_sub_1 = bx.sub(unsized_align, bx.const_usize(1u64)); let and_lhs = bx.add(unaligned_offset, align_sub_1); let and_rhs = bx.neg(unsized_align); let offset = bx.and(and_lhs, and_rhs); @@ -191,15 +191,15 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { debug!("struct_field_ptr: DST field offset: {:?}", offset); // Cast and adjust pointer - let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p()); + let byte_ptr = bx.pointercast(self.llval, bx.type_i8p()); let byte_ptr = bx.gep(byte_ptr, &[offset]); // Finally, cast back to the type expected - let ll_fty = bx.cx().backend_type(field); + let ll_fty = bx.backend_type(field); debug!("struct_field_ptr: Field type is {:?}", ll_fty); PlaceRef { - llval: bx.pointercast(byte_ptr, bx.cx().type_ptr_to(ll_fty)), + llval: bx.pointercast(byte_ptr, bx.type_ptr_to(ll_fty)), llextra: self.llextra, layout: field, align: effective_field_align, @@ -212,16 +212,16 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { bx: &mut Bx, cast_to: Ty<'tcx> ) -> V { - let cast_to = bx.cx().immediate_backend_type(bx.cx().layout_of(cast_to)); + let cast_to = bx.immediate_backend_type(bx.layout_of(cast_to)); if self.layout.abi.is_uninhabited() { - return bx.cx().const_undef(cast_to); + return bx.const_undef(cast_to); } match self.layout.variants { layout::Variants::Single { index } => { let discr_val = self.layout.ty.ty_adt_def().map_or( index.as_u32() as u128, - |def| def.discriminant_for_variant(bx.cx().tcx(), index).val); - return bx.cx().const_uint_big(cast_to, discr_val); + |def| def.discriminant_for_variant(bx.tcx(), index).val); + return bx.const_uint_big(cast_to, discr_val); } layout::Variants::Tagged { .. } | layout::Variants::NicheFilling { .. } => {}, @@ -248,30 +248,30 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { niche_start, .. } => { - let niche_llty = bx.cx().immediate_backend_type(discr.layout); + let niche_llty = bx.immediate_backend_type(discr.layout); if niche_variants.start() == niche_variants.end() { // FIXME(eddyb) Check the actual primitive type here. let niche_llval = if niche_start == 0 { // HACK(eddyb) Using `c_null` as it works on all types. - bx.cx().const_null(niche_llty) + bx.const_null(niche_llty) } else { - bx.cx().const_uint_big(niche_llty, niche_start) + bx.const_uint_big(niche_llty, niche_start) }; let select_arg = bx.icmp(IntPredicate::IntEQ, lldiscr, niche_llval); bx.select(select_arg, - bx.cx().const_uint(cast_to, niche_variants.start().as_u32() as u64), - bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64)) + bx.const_uint(cast_to, niche_variants.start().as_u32() as u64), + bx.const_uint(cast_to, dataful_variant.as_u32() as u64)) } else { // Rebase from niche values to discriminant values. let delta = niche_start.wrapping_sub(niche_variants.start().as_u32() as u128); - let lldiscr = bx.sub(lldiscr, bx.cx().const_uint_big(niche_llty, delta)); + let lldiscr = bx.sub(lldiscr, bx.const_uint_big(niche_llty, delta)); let lldiscr_max = - bx.cx().const_uint(niche_llty, niche_variants.end().as_u32() as u64); + bx.const_uint(niche_llty, niche_variants.end().as_u32() as u64); let select_arg = bx.icmp(IntPredicate::IntULE, lldiscr, lldiscr_max); let cast = bx.intcast(lldiscr, cast_to, false); bx.select(select_arg, cast, - bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64)) + bx.const_uint(cast_to, dataful_variant.as_u32() as u64)) } } } @@ -284,7 +284,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { bx: &mut Bx, variant_index: VariantIdx ) { - if self.layout.for_variant(bx.cx(), variant_index).abi.is_uninhabited() { + if self.layout.for_variant(bx, variant_index).abi.is_uninhabited() { return; } match self.layout.variants { @@ -297,7 +297,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { .discriminant_for_variant(bx.tcx(), variant_index) .val; bx.store( - bx.cx().const_uint_big(bx.cx().backend_type(ptr.layout), to), + bx.const_uint_big(bx.backend_type(ptr.layout), to), ptr.llval, ptr.align); } @@ -308,26 +308,26 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { .. } => { if variant_index != dataful_variant { - if bx.cx().sess().target.target.arch == "arm" || - bx.cx().sess().target.target.arch == "aarch64" { + if bx.sess().target.target.arch == "arm" || + bx.sess().target.target.arch == "aarch64" { // Issue #34427: As workaround for LLVM bug on ARM, // use memset of 0 before assigning niche value. - let fill_byte = bx.cx().const_u8(0); - let size = bx.cx().const_usize(self.layout.size.bytes()); + let fill_byte = bx.const_u8(0); + let size = bx.const_usize(self.layout.size.bytes()); bx.memset(self.llval, fill_byte, size, self.align, MemFlags::empty()); } let niche = self.project_field(bx, 0); - let niche_llty = bx.cx().immediate_backend_type(niche.layout); + let niche_llty = bx.immediate_backend_type(niche.layout); let niche_value = variant_index.as_u32() - niche_variants.start().as_u32(); let niche_value = (niche_value as u128) .wrapping_add(niche_start); // FIXME(eddyb) Check the actual primitive type here. let niche_llval = if niche_value == 0 { // HACK(eddyb) Using `c_null` as it works on all types. - bx.cx().const_null(niche_llty) + bx.const_null(niche_llty) } else { - bx.cx().const_uint_big(niche_llty, niche_value) + bx.const_uint_big(niche_llty, niche_value) }; OperandValue::Immediate(niche_llval).store(bx, niche); } @@ -335,7 +335,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { } } - pub fn project_index>( + pub fn project_index>( &self, bx: &mut Bx, llindex: V @@ -350,7 +350,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { }; PlaceRef { - llval: bx.inbounds_gep(self.llval, &[bx.cx().const_usize(0), llindex]), + llval: bx.inbounds_gep(self.llval, &[bx.const_usize(0), llindex]), llextra: None, layout, align: self.align.restrict_for_offset(offset), @@ -363,11 +363,11 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { variant_index: VariantIdx ) -> Self { let mut downcast = *self; - downcast.layout = self.layout.for_variant(bx.cx(), variant_index); + downcast.layout = self.layout.for_variant(bx, variant_index); // Cast to the appropriate variant struct type. - let variant_ty = bx.cx().backend_type(downcast.layout); - downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty)); + let variant_ty = bx.backend_type(downcast.layout); + downcast.llval = bx.pointercast(downcast.llval, bx.type_ptr_to(variant_ty)); downcast } @@ -418,7 +418,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { match bx.tcx().const_eval(param_env.and(cid)) { Ok(val) => match val.val { mir::interpret::ConstValue::ByRef(ptr, alloc) => { - bx.cx().from_const_alloc(layout, alloc, ptr.offset) + bx.from_const_alloc(layout, alloc, ptr.offset) } _ => bug!("promoteds should have an allocation: {:?}", val), }, @@ -428,8 +428,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // With floats that won't always be true // so we generate an abort bx.abort(); - let llval = bx.cx().const_undef( - bx.cx().type_ptr_to(bx.cx().backend_type(layout)) + let llval = bx.const_undef( + bx.type_ptr_to(bx.backend_type(layout)) ); PlaceRef::new_sized(llval, layout, layout.align.abi) } @@ -439,7 +439,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // NB: The layout of a static may be unsized as is the case when working // with a static that is an extern_type. let layout = cx.layout_of(self.monomorphize(&ty)); - PlaceRef::new_thin_place(bx, bx.get_static(def_id), layout, layout.align.abi) + let static_ = bx.get_static(def_id); + PlaceRef::new_thin_place(bx, static_, layout, layout.align.abi) }, mir::Place::Projection(box mir::Projection { ref base, @@ -465,33 +466,33 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { mir::ProjectionElem::ConstantIndex { offset, from_end: false, min_length: _ } => { - let lloffset = bx.cx().const_usize(offset as u64); + let lloffset = bx.const_usize(offset as u64); cg_base.project_index(bx, lloffset) } mir::ProjectionElem::ConstantIndex { offset, from_end: true, min_length: _ } => { - let lloffset = bx.cx().const_usize(offset as u64); + let lloffset = bx.const_usize(offset as u64); let lllen = cg_base.len(bx.cx()); let llindex = bx.sub(lllen, lloffset); cg_base.project_index(bx, llindex) } mir::ProjectionElem::Subslice { from, to } => { let mut subslice = cg_base.project_index(bx, - bx.cx().const_usize(from as u64)); + bx.const_usize(from as u64)); let projected_ty = PlaceTy::Ty { ty: cg_base.layout.ty } .projection_ty(tcx, &projection.elem).to_ty(tcx); - subslice.layout = bx.cx().layout_of(self.monomorphize(&projected_ty)); + subslice.layout = bx.layout_of(self.monomorphize(&projected_ty)); if subslice.layout.is_unsized() { subslice.llextra = Some(bx.sub(cg_base.llextra.unwrap(), - bx.cx().const_usize((from as u64) + (to as u64)))); + bx.const_usize((from as u64) + (to as u64)))); } // Cast the place pointer type to the new // array or slice type (*[%_; new_len]). subslice.llval = bx.pointercast(subslice.llval, - bx.cx().type_ptr_to(bx.cx().backend_type(subslice.layout))); + bx.type_ptr_to(bx.backend_type(subslice.layout))); subslice } diff --git a/src/librustc_codegen_ssa/mir/rvalue.rs b/src/librustc_codegen_ssa/mir/rvalue.rs index 25a7754d118d7..9d0d0b5259143 100644 --- a/src/librustc_codegen_ssa/mir/rvalue.rs +++ b/src/librustc_codegen_ssa/mir/rvalue.rs @@ -40,7 +40,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { mir::Rvalue::Cast(mir::CastKind::Unsize, ref source, _) => { // The destination necessarily contains a fat pointer, so if // it's a scalar pair, it's a fat pointer or newtype thereof. - if bx.cx().is_backend_scalar_pair(dest.layout) { + if bx.is_backend_scalar_pair(dest.layout) { // into-coerce of a thin pointer to a fat pointer - just // use the operand path. let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue); @@ -87,49 +87,28 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { if dest.layout.is_zst() { return bx; } - let zero = bx.cx().const_usize(0); - let start = dest.project_index(&mut bx, zero).llval; if let OperandValue::Immediate(v) = cg_elem.val { - let size = bx.cx().const_usize(dest.layout.size.bytes()); + let zero = bx.const_usize(0); + let start = dest.project_index(&mut bx, zero).llval; + let size = bx.const_usize(dest.layout.size.bytes()); // Use llvm.memset.p0i8.* to initialize all zero arrays - if bx.cx().is_const_integral(v) && bx.cx().const_to_uint(v) == 0 { - let fill = bx.cx().const_u8(0); + if bx.is_const_integral(v) && bx.const_to_uint(v) == 0 { + let fill = bx.const_u8(0); bx.memset(start, fill, size, dest.align, MemFlags::empty()); return bx; } // Use llvm.memset.p0i8.* to initialize byte arrays let v = base::from_immediate(&mut bx, v); - if bx.cx().val_ty(v) == bx.cx().type_i8() { + if bx.val_ty(v) == bx.type_i8() { bx.memset(start, v, size, dest.align, MemFlags::empty()); return bx; } } - let count = bx.cx().const_usize(count); - let end = dest.project_index(&mut bx, count).llval; - - let mut header_bx = bx.build_sibling_block("repeat_loop_header"); - let mut body_bx = bx.build_sibling_block("repeat_loop_body"); - let next_bx = bx.build_sibling_block("repeat_loop_next"); - - bx.br(header_bx.llbb()); - let current = header_bx.phi(bx.cx().val_ty(start), &[start], &[bx.llbb()]); - - let keep_going = header_bx.icmp(IntPredicate::IntNE, current, end); - header_bx.cond_br(keep_going, body_bx.llbb(), next_bx.llbb()); - - let align = dest.align.restrict_for_offset(dest.layout.field(bx.cx(), 0).size); - cg_elem.val.store(&mut body_bx, - PlaceRef::new_sized(current, cg_elem.layout, align)); - - let next = body_bx.inbounds_gep(current, &[bx.cx().const_usize(1)]); - body_bx.br(header_bx.llbb()); - header_bx.add_incoming_to_phi(current, next, body_bx.llbb()); - - next_bx + bx.write_operand_repeatedly(cg_elem, count, dest) } mir::Rvalue::Aggregate(ref kind, ref operands) => { @@ -196,13 +175,13 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => { let operand = self.codegen_operand(&mut bx, source); debug!("cast operand is {:?}", operand); - let cast = bx.cx().layout_of(self.monomorphize(&mir_cast_ty)); + let cast = bx.layout_of(self.monomorphize(&mir_cast_ty)); let val = match *kind { mir::CastKind::ReifyFnPointer => { match operand.layout.ty.sty { ty::FnDef(def_id, substs) => { - if bx.cx().tcx().has_attr(def_id, "rustc_args_required_const") { + if bx.tcx().has_attr(def_id, "rustc_args_required_const") { bug!("reifying a fn ptr that requires \ const arguments"); } @@ -218,8 +197,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { match operand.layout.ty.sty { ty::Closure(def_id, substs) => { let instance = monomorphize::resolve_closure( - bx.cx().tcx(), def_id, substs, ty::ClosureKind::FnOnce); - OperandValue::Immediate(bx.cx().get_fn(instance)) + bx.tcx(), def_id, substs, ty::ClosureKind::FnOnce); + OperandValue::Immediate(bx.get_fn(instance)) } _ => { bug!("{} cannot be cast to a fn ptr", operand.layout.ty) @@ -231,7 +210,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { operand.val } mir::CastKind::Unsize => { - assert!(bx.cx().is_backend_scalar_pair(cast)); + assert!(bx.is_backend_scalar_pair(cast)); match operand.val { OperandValue::Pair(lldata, llextra) => { // unsize from a fat pointer - this is a @@ -242,7 +221,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // HACK(eddyb) have to bitcast pointers // until LLVM removes pointee types. let lldata = bx.pointercast(lldata, - bx.cx().scalar_pair_element_backend_type(cast, 0, true)); + bx.scalar_pair_element_backend_type(cast, 0, true)); OperandValue::Pair(lldata, llextra) } OperandValue::Immediate(lldata) => { @@ -257,16 +236,16 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } } } - mir::CastKind::Misc if bx.cx().is_backend_scalar_pair(operand.layout) => { + mir::CastKind::Misc if bx.is_backend_scalar_pair(operand.layout) => { if let OperandValue::Pair(data_ptr, meta) = operand.val { - if bx.cx().is_backend_scalar_pair(cast) { + if bx.is_backend_scalar_pair(cast) { let data_cast = bx.pointercast(data_ptr, - bx.cx().scalar_pair_element_backend_type(cast, 0, true)); + bx.scalar_pair_element_backend_type(cast, 0, true)); OperandValue::Pair(data_cast, meta) } else { // cast to thin-ptr // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and // pointer-cast of that pointer to desired pointer type. - let llcast_ty = bx.cx().immediate_backend_type(cast); + let llcast_ty = bx.immediate_backend_type(cast); let llval = bx.pointercast(data_ptr, llcast_ty); OperandValue::Immediate(llval) } @@ -275,10 +254,10 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } } mir::CastKind::Misc => { - assert!(bx.cx().is_backend_immediate(cast)); - let ll_t_out = bx.cx().immediate_backend_type(cast); + assert!(bx.is_backend_immediate(cast)); + let ll_t_out = bx.immediate_backend_type(cast); if operand.layout.abi.is_uninhabited() { - let val = OperandValue::Immediate(bx.cx().const_undef(ll_t_out)); + let val = OperandValue::Immediate(bx.const_undef(ll_t_out)); return (bx, OperandRef { val, layout: cast, @@ -287,14 +266,14 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let r_t_in = CastTy::from_ty(operand.layout.ty) .expect("bad input type for cast"); let r_t_out = CastTy::from_ty(cast.ty).expect("bad output type for cast"); - let ll_t_in = bx.cx().immediate_backend_type(operand.layout); + let ll_t_in = bx.immediate_backend_type(operand.layout); match operand.layout.variants { layout::Variants::Single { index } => { if let Some(def) = operand.layout.ty.ty_adt_def() { let discr_val = def - .discriminant_for_variant(bx.cx().tcx(), index) + .discriminant_for_variant(bx.tcx(), index) .val; - let discr = bx.cx().const_uint_big(ll_t_out, discr_val); + let discr = bx.const_uint_big(ll_t_out, discr_val); return (bx, OperandRef { val: OperandValue::Immediate(discr), layout: cast, @@ -322,7 +301,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // have bound checks, and this is the most // convenient place to put the `assume`. let ll_t_in_const = - bx.cx().const_uint_big(ll_t_in, *scalar.valid_range.end()); + bx.const_uint_big(ll_t_in, *scalar.valid_range.end()); let cmp = bx.icmp( IntPredicate::IntULE, llval, @@ -338,8 +317,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { bx.intcast(llval, ll_t_out, signed) } (CastTy::Float, CastTy::Float) => { - let srcsz = bx.cx().float_width(ll_t_in); - let dstsz = bx.cx().float_width(ll_t_out); + let srcsz = bx.float_width(ll_t_in); + let dstsz = bx.float_width(ll_t_out); if dstsz > srcsz { bx.fpext(llval, ll_t_out) } else if srcsz > dstsz { @@ -356,7 +335,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { (CastTy::FnPtr, CastTy::Int(_)) => bx.ptrtoint(llval, ll_t_out), (CastTy::Int(_), CastTy::Ptr(_)) => { - let usize_llval = bx.intcast(llval, bx.cx().type_isize(), signed); + let usize_llval = bx.intcast(llval, bx.type_isize(), signed); bx.inttoptr(usize_llval, ll_t_out) } (CastTy::Int(_), CastTy::Float) => @@ -383,7 +362,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // Note: places are indirect, so storing the `llval` into the // destination effectively creates a reference. - let val = if !bx.cx().type_has_metadata(ty) { + let val = if !bx.type_has_metadata(ty) { OperandValue::Immediate(cg_place.llval) } else { OperandValue::Pair(cg_place.llval, cg_place.llextra.unwrap()) @@ -401,7 +380,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let size = self.evaluate_array_len(&mut bx, place); let operand = OperandRef { val: OperandValue::Immediate(size), - layout: bx.cx().layout_of(bx.tcx().types.usize), + layout: bx.layout_of(bx.tcx().types.usize), }; (bx, operand) } @@ -427,7 +406,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { }; let operand = OperandRef { val: OperandValue::Immediate(llresult), - layout: bx.cx().layout_of( + layout: bx.layout_of( op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)), }; (bx, operand) @@ -442,7 +421,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let operand_ty = bx.tcx().intern_tup(&[val_ty, bx.tcx().types.bool]); let operand = OperandRef { val: result, - layout: bx.cx().layout_of(operand_ty) + layout: bx.layout_of(operand_ty) }; (bx, operand) @@ -477,8 +456,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => { - assert!(bx.cx().type_is_sized(ty)); - let val = bx.cx().const_usize(bx.cx().layout_of(ty).size.bytes()); + assert!(bx.type_is_sized(ty)); + let val = bx.const_usize(bx.layout_of(ty).size.bytes()); let tcx = self.cx.tcx(); (bx, OperandRef { val: OperandValue::Immediate(val), @@ -488,21 +467,21 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { mir::Rvalue::NullaryOp(mir::NullOp::Box, content_ty) => { let content_ty = self.monomorphize(&content_ty); - let content_layout = bx.cx().layout_of(content_ty); - let llsize = bx.cx().const_usize(content_layout.size.bytes()); - let llalign = bx.cx().const_usize(content_layout.align.abi.bytes()); - let box_layout = bx.cx().layout_of(bx.tcx().mk_box(content_ty)); - let llty_ptr = bx.cx().backend_type(box_layout); + let content_layout = bx.layout_of(content_ty); + let llsize = bx.const_usize(content_layout.size.bytes()); + let llalign = bx.const_usize(content_layout.align.abi.bytes()); + let box_layout = bx.layout_of(bx.tcx().mk_box(content_ty)); + let llty_ptr = bx.backend_type(box_layout); // Allocate space: let def_id = match bx.tcx().lang_items().require(ExchangeMallocFnLangItem) { Ok(id) => id, Err(s) => { - bx.cx().sess().fatal(&format!("allocation of `{}` {}", box_layout.ty, s)); + bx.sess().fatal(&format!("allocation of `{}` {}", box_layout.ty, s)); } }; let instance = ty::Instance::mono(bx.tcx(), def_id); - let r = bx.cx().get_fn(instance); + let r = bx.get_fn(instance); let call = bx.call(r, &[llsize, llalign], None); let val = bx.pointercast(call, llty_ptr); @@ -521,8 +500,11 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // According to `rvalue_creates_operand`, only ZST // aggregate rvalues are allowed to be operands. let ty = rvalue.ty(self.mir, self.cx.tcx()); - (bx, OperandRef::new_zst(self.cx, - self.cx.layout_of(self.monomorphize(&ty)))) + let operand = OperandRef::new_zst( + &mut bx, + self.cx.layout_of(self.monomorphize(&ty)), + ); + (bx, operand) } } } @@ -537,8 +519,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { if let mir::Place::Local(index) = *place { if let LocalRef::Operand(Some(op)) = self.locals[index] { if let ty::Array(_, n) = op.layout.ty.sty { - let n = n.unwrap_usize(bx.cx().tcx()); - return bx.cx().const_usize(n); + let n = n.unwrap_usize(bx.tcx()); + return bx.const_usize(n); } } } @@ -596,7 +578,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { mir::BinOp::Shr => common::build_unchecked_rshift(bx, input_ty, lhs, rhs), mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt | mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => if is_unit { - bx.cx().const_bool(match op { + bx.const_bool(match op { mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt => false, mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => true, _ => unreachable!() @@ -670,9 +652,9 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // with #[rustc_inherit_overflow_checks] and inlined from // another crate (mostly core::num generic/#[inline] fns), // while the current crate doesn't use overflow checks. - if !bx.cx().check_overflow() { + if !bx.check_overflow() { let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty); - return OperandValue::Pair(val, bx.cx().const_bool(false)); + return OperandValue::Pair(val, bx.const_bool(false)); } let (val, of) = match op { @@ -687,12 +669,12 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { bx.checked_binop(oop, input_ty, lhs, rhs) } mir::BinOp::Shl | mir::BinOp::Shr => { - let lhs_llty = bx.cx().val_ty(lhs); - let rhs_llty = bx.cx().val_ty(rhs); + let lhs_llty = bx.val_ty(lhs); + let rhs_llty = bx.val_ty(rhs); let invert_mask = common::shift_mask_val(bx, lhs_llty, rhs_llty, true); let outer_bits = bx.and(rhs, invert_mask); - let of = bx.icmp(IntPredicate::IntNE, outer_bits, bx.cx().const_null(rhs_llty)); + let of = bx.icmp(IntPredicate::IntNE, outer_bits, bx.const_null(rhs_llty)); let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty); (val, of) @@ -742,8 +724,8 @@ fn cast_int_to_float<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( // It's only u128 -> f32 that can cause overflows (i.e., should yield infinity). // LLVM's uitofp produces undef in those cases, so we manually check for that case. let is_u128_to_f32 = !signed && - bx.cx().int_width(int_ty) == 128 && - bx.cx().float_width(float_ty) == 32; + bx.int_width(int_ty) == 128 && + bx.float_width(float_ty) == 32; if is_u128_to_f32 { // All inputs greater or equal to (f32::MAX + 0.5 ULP) are rounded to infinity, // and for everything else LLVM's uitofp works just fine. @@ -751,9 +733,9 @@ fn cast_int_to_float<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( use rustc_apfloat::Float; const MAX_F32_PLUS_HALF_ULP: u128 = ((1 << (Single::PRECISION + 1)) - 1) << (Single::MAX_EXP - Single::PRECISION as i16); - let max = bx.cx().const_uint_big(int_ty, MAX_F32_PLUS_HALF_ULP); + let max = bx.const_uint_big(int_ty, MAX_F32_PLUS_HALF_ULP); let overflow = bx.icmp(IntPredicate::IntUGE, x, max); - let infinity_bits = bx.cx().const_u32(ieee::Single::INFINITY.to_bits() as u32); + let infinity_bits = bx.const_u32(ieee::Single::INFINITY.to_bits() as u32); let infinity = bx.bitcast(infinity_bits, float_ty); let fp = bx.uitofp(x, float_ty); bx.select(overflow, infinity, fp) @@ -779,12 +761,12 @@ fn cast_float_to_int<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( bx.fptoui(x, int_ty) }; - if !bx.cx().sess().opts.debugging_opts.saturating_float_casts { + if !bx.sess().opts.debugging_opts.saturating_float_casts { return fptosui_result; } - let int_width = bx.cx().int_width(int_ty); - let float_width = bx.cx().float_width(float_ty); + let int_width = bx.int_width(int_ty); + let float_width = bx.float_width(float_ty); // LLVM's fpto[su]i returns undef when the input x is infinite, NaN, or does not fit into the // destination integer type after rounding towards zero. This `undef` value can cause UB in // safe code (see issue #10184), so we implement a saturating conversion on top of it: @@ -839,8 +821,8 @@ fn cast_float_to_int<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( let mut float_bits_to_llval = |bits| { let bits_llval = match float_width { - 32 => bx.cx().const_u32(bits as u32), - 64 => bx.cx().const_u64(bits as u64), + 32 => bx.const_u32(bits as u32), + 64 => bx.const_u64(bits as u64), n => bug!("unsupported float width {}", n), }; bx.bitcast(bits_llval, float_ty) @@ -895,8 +877,8 @@ fn cast_float_to_int<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( // performed is ultimately up to the backend, but at least x86 does perform them. let less_or_nan = bx.fcmp(RealPredicate::RealULT, x, f_min); let greater = bx.fcmp(RealPredicate::RealOGT, x, f_max); - let int_max = bx.cx().const_uint_big(int_ty, int_max(signed, int_width)); - let int_min = bx.cx().const_uint_big(int_ty, int_min(signed, int_width) as u128); + let int_max = bx.const_uint_big(int_ty, int_max(signed, int_width)); + let int_min = bx.const_uint_big(int_ty, int_min(signed, int_width) as u128); let s0 = bx.select(less_or_nan, int_min, fptosui_result); let s1 = bx.select(greater, int_max, s0); @@ -905,7 +887,7 @@ fn cast_float_to_int<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( // Therefore we only need to execute this step for signed integer types. if signed { // LLVM has no isNaN predicate, so we use (x == x) instead - let zero = bx.cx().const_uint(int_ty, 0); + let zero = bx.const_uint(int_ty, 0); let cmp = bx.fcmp(RealPredicate::RealOEQ, x, x); bx.select(cmp, s1, zero) } else { diff --git a/src/librustc_codegen_ssa/traits/abi.rs b/src/librustc_codegen_ssa/traits/abi.rs index 8f7fa199b057a..a8fd4e1d2c7c7 100644 --- a/src/librustc_codegen_ssa/traits/abi.rs +++ b/src/librustc_codegen_ssa/traits/abi.rs @@ -10,4 +10,5 @@ pub trait AbiMethods<'tcx> { pub trait AbiBuilderMethods<'tcx>: BackendTypes { fn apply_attrs_callsite(&mut self, ty: &FnType<'tcx, Ty<'tcx>>, callsite: Self::Value); + fn get_param(&self, index: usize) -> Self::Value; } diff --git a/src/librustc_codegen_ssa/traits/backend.rs b/src/librustc_codegen_ssa/traits/backend.rs index 00eae9098e74f..98c6cf28422ec 100644 --- a/src/librustc_codegen_ssa/traits/backend.rs +++ b/src/librustc_codegen_ssa/traits/backend.rs @@ -14,6 +14,7 @@ use syntax_pos::symbol::InternedString; pub trait BackendTypes { type Value: CodegenObject; + type Switch; type BasicBlock: Copy; type Type: CodegenObject; type Funclet; diff --git a/src/librustc_codegen_ssa/traits/builder.rs b/src/librustc_codegen_ssa/traits/builder.rs index 5099107a39303..47ddeaef1a189 100644 --- a/src/librustc_codegen_ssa/traits/builder.rs +++ b/src/librustc_codegen_ssa/traits/builder.rs @@ -11,11 +11,7 @@ use crate::mir::place::PlaceRef; use crate::MemFlags; use rustc::ty::Ty; use rustc::ty::layout::{Align, Size}; -use std::ffi::CStr; - -use std::borrow::Cow; use std::ops::Range; -use syntax::ast::AsmDialect; #[derive(Copy, Clone)] pub enum OverflowOp { @@ -24,26 +20,13 @@ pub enum OverflowOp { Mul, } -pub trait BuilderMethods<'a, 'tcx: 'a>: - HasCodegen<'tcx> - + DebugInfoBuilderMethods<'tcx> - + ArgTypeMethods<'tcx> - + AbiBuilderMethods<'tcx> - + IntrinsicCallMethods<'tcx> - + AsmBuilderMethods<'tcx> - + StaticBuilderMethods<'tcx> -{ +pub trait ControlFlowBuilderMethods<'a, 'tcx: 'a>: HasCodegen<'tcx> { fn new_block<'b>(cx: &'a Self::CodegenCx, llfn: Self::Value, name: &'b str) -> Self; fn with_cx(cx: &'a Self::CodegenCx) -> Self; fn build_sibling_block<'b>(&self, name: &'b str) -> Self; - fn cx(&self) -> &Self::CodegenCx; - fn llfn(&self) -> Self::Value; fn llbb(&self) -> Self::BasicBlock; - fn count_insn(&self, category: &str); - fn set_value_name(&mut self, value: Self::Value, name: &str); fn position_at_end(&mut self, llbb: Self::BasicBlock); - fn position_at_start(&mut self, llbb: Self::BasicBlock); fn ret_void(&mut self); fn ret(&mut self, v: Self::Value); fn br(&mut self, dest: Self::BasicBlock); @@ -53,58 +36,25 @@ pub trait BuilderMethods<'a, 'tcx: 'a>: then_llbb: Self::BasicBlock, else_llbb: Self::BasicBlock, ); - fn switch( + fn switch_new( &mut self, v: Self::Value, else_llbb: Self::BasicBlock, num_cases: usize, - ) -> Self::Value; - fn invoke( + ) -> Self::Switch; + fn switch_add_case( &mut self, - llfn: Self::Value, - args: &[Self::Value], - then: Self::BasicBlock, - catch: Self::BasicBlock, - funclet: Option<&Self::Funclet>, - ) -> Self::Value; - fn unreachable(&mut self); - fn add(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn fadd(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn fadd_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn sub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn fsub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn fsub_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn mul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn fmul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn fmul_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn udiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn exactudiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn sdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn exactsdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn fdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn fdiv_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn urem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn srem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn frem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn frem_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn shl(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn lshr(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn ashr(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn and(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn or(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn xor(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn neg(&mut self, v: Self::Value) -> Self::Value; - fn fneg(&mut self, v: Self::Value) -> Self::Value; - fn not(&mut self, v: Self::Value) -> Self::Value; + s: &mut Self::Switch, + on_val: Self::Value, + dest: Self::BasicBlock, + ); + fn switch_emit(&mut self, s: Self::Switch); - fn checked_binop( - &mut self, - oop: OverflowOp, - ty: Ty<'_>, - lhs: Self::Value, - rhs: Self::Value, - ) -> (Self::Value, Self::Value); + fn unreachable(&mut self); +} +pub trait MemoryBuilderMethods<'tcx>: HasCodegen<'tcx> { + // Stack allocations fn alloca(&mut self, ty: Self::Type, name: &str, align: Align) -> Self::Value; fn dynamic_alloca(&mut self, ty: Self::Type, name: &str, align: Align) -> Self::Value; fn array_alloca( @@ -117,13 +67,9 @@ pub trait BuilderMethods<'a, 'tcx: 'a>: fn load(&mut self, ptr: Self::Value, align: Align) -> Self::Value; fn volatile_load(&mut self, ptr: Self::Value) -> Self::Value; - fn atomic_load(&mut self, ptr: Self::Value, order: AtomicOrdering, size: Size) -> Self::Value; fn load_operand(&mut self, place: PlaceRef<'tcx, Self::Value>) -> OperandRef<'tcx, Self::Value>; - fn range_metadata(&mut self, load: Self::Value, range: Range); - fn nonnull_metadata(&mut self, load: Self::Value); - fn store(&mut self, val: Self::Value, ptr: Self::Value, align: Align) -> Self::Value; fn store_with_flags( &mut self, @@ -132,53 +78,22 @@ pub trait BuilderMethods<'a, 'tcx: 'a>: align: Align, flags: MemFlags, ) -> Self::Value; - fn atomic_store( - &mut self, - val: Self::Value, - ptr: Self::Value, - order: AtomicOrdering, - size: Size, - ); + // Pointer operations fn gep(&mut self, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value; fn inbounds_gep(&mut self, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value; fn struct_gep(&mut self, ptr: Self::Value, idx: u64) -> Self::Value; - fn trunc(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; - fn sext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; - fn fptoui(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; - fn fptosi(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; - fn uitofp(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; - fn sitofp(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; - fn fptrunc(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; - fn fpext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + fn pointercast(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; fn ptrtoint(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; fn inttoptr(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; - fn bitcast(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; - fn intcast(&mut self, val: Self::Value, dest_ty: Self::Type, is_signed: bool) -> Self::Value; - fn pointercast(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; - fn icmp(&mut self, op: IntPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn fcmp(&mut self, op: RealPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - - fn empty_phi(&mut self, ty: Self::Type) -> Self::Value; - fn phi( - &mut self, - ty: Self::Type, - vals: &[Self::Value], - bbs: &[Self::BasicBlock], - ) -> Self::Value; - fn inline_asm_call( - &mut self, - asm: &CStr, - cons: &CStr, - inputs: &[Self::Value], - output: Self::Type, - volatile: bool, - alignstack: bool, - dia: AsmDialect, - ) -> Option; + // Optimization metadata + fn range_metadata(&mut self, load: Self::Value, range: Range); + fn nonnull_metadata(&mut self, load: Self::Value); + fn set_invariant_load(&mut self, load: Self::Value); + // Bulk memory operations fn memcpy( &mut self, dst: Self::Value, @@ -206,53 +121,124 @@ pub trait BuilderMethods<'a, 'tcx: 'a>: flags: MemFlags, ); - fn minnum(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn maxnum(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + /// Called for Rvalue::Repeat when the elem is neither a ZST nor optimizable using memset. + fn write_operand_repeatedly( + &mut self, + elem: OperandRef<'tcx, Self::Value>, + count: u64, + dest: PlaceRef<'tcx, Self::Value>, + ) -> Self; + + // Atomics + fn atomic_load(&mut self, ptr: Self::Value, order: AtomicOrdering, size: Size) -> Self::Value; + fn atomic_store( + &mut self, + val: Self::Value, + ptr: Self::Value, + order: AtomicOrdering, + size: Size, + ); + fn atomic_cmpxchg( + &mut self, + dst: Self::Value, + cmp: Self::Value, + src: Self::Value, + order: AtomicOrdering, + failure_order: AtomicOrdering, + weak: bool, + ) -> Self::Value; + fn atomic_rmw( + &mut self, + op: AtomicRmwBinOp, + dst: Self::Value, + src: Self::Value, + order: AtomicOrdering, + ) -> Self::Value; + fn atomic_fence(&mut self, order: AtomicOrdering, scope: SynchronizationScope); +} + +pub trait NumBuilderMethods<'tcx>: HasCodegen<'tcx> { + // Integers + fn add(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn sub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn mul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn udiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn exactudiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn sdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn exactsdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn urem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn srem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn shl(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn lshr(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn ashr(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn and(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn or(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn xor(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn neg(&mut self, v: Self::Value) -> Self::Value; + fn fneg(&mut self, v: Self::Value) -> Self::Value; + fn not(&mut self, v: Self::Value) -> Self::Value; + + fn checked_binop( + &mut self, + oop: OverflowOp, + ty: Ty<'_>, + lhs: Self::Value, + rhs: Self::Value, + ) -> (Self::Value, Self::Value); + + fn trunc(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + fn sext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + fn zext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + + fn bitcast(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + fn intcast(&mut self, val: Self::Value, dest_ty: Self::Type, is_signed: bool) -> Self::Value; + + fn icmp(&mut self, op: IntPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + + // Floats + fn fadd(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn fadd_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn fsub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn fsub_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn fmul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn fmul_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn fdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn fdiv_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn frem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn frem_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + + fn fptoui(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + fn fptosi(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + fn uitofp(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + fn sitofp(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + fn fptrunc(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + fn fpext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + fn fcmp(&mut self, op: RealPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + + /// This is not really only for numbers, but often used functions which also use numbers fn select( &mut self, cond: Self::Value, then_val: Self::Value, else_val: Self::Value, ) -> Self::Value; +} - fn va_arg(&mut self, list: Self::Value, ty: Self::Type) -> Self::Value; - fn extract_element(&mut self, vec: Self::Value, idx: Self::Value) -> Self::Value; - fn insert_element( - &mut self, - vec: Self::Value, - elt: Self::Value, - idx: Self::Value, - ) -> Self::Value; - fn shuffle_vector( +pub trait UnwindBuilderMethods<'tcx>: HasCodegen<'tcx> { + fn invoke( &mut self, - v1: Self::Value, - v2: Self::Value, - mask: Self::Value, + llfn: Self::Value, + args: &[Self::Value], + then: Self::BasicBlock, + catch: Self::BasicBlock, + funclet: Option<&Self::Funclet>, ) -> Self::Value; - fn vector_splat(&mut self, num_elts: usize, elt: Self::Value) -> Self::Value; - fn vector_reduce_fadd_fast(&mut self, acc: Self::Value, src: Self::Value) -> Self::Value; - fn vector_reduce_fmul_fast(&mut self, acc: Self::Value, src: Self::Value) -> Self::Value; - fn vector_reduce_add(&mut self, src: Self::Value) -> Self::Value; - fn vector_reduce_mul(&mut self, src: Self::Value) -> Self::Value; - fn vector_reduce_and(&mut self, src: Self::Value) -> Self::Value; - fn vector_reduce_or(&mut self, src: Self::Value) -> Self::Value; - fn vector_reduce_xor(&mut self, src: Self::Value) -> Self::Value; - fn vector_reduce_fmin(&mut self, src: Self::Value) -> Self::Value; - fn vector_reduce_fmax(&mut self, src: Self::Value) -> Self::Value; - fn vector_reduce_fmin_fast(&mut self, src: Self::Value) -> Self::Value; - fn vector_reduce_fmax_fast(&mut self, src: Self::Value) -> Self::Value; - fn vector_reduce_min(&mut self, src: Self::Value, is_signed: bool) -> Self::Value; - fn vector_reduce_max(&mut self, src: Self::Value, is_signed: bool) -> Self::Value; - fn extract_value(&mut self, agg_val: Self::Value, idx: u64) -> Self::Value; - fn insert_value(&mut self, agg_val: Self::Value, elt: Self::Value, idx: u64) -> Self::Value; - fn landing_pad( &mut self, ty: Self::Type, pers_fn: Self::Value, num_clauses: usize, ) -> Self::Value; - fn add_clause(&mut self, landing_pad: Self::Value, clause: Self::Value); fn set_cleanup(&mut self, landing_pad: Self::Value); fn resume(&mut self, exn: Self::Value) -> Self::Value; fn cleanup_pad(&mut self, parent: Option, args: &[Self::Value]) -> Self::Funclet; @@ -262,7 +248,6 @@ pub trait BuilderMethods<'a, 'tcx: 'a>: unwind: Option, ) -> Self::Value; fn catch_pad(&mut self, parent: Self::Value, args: &[Self::Value]) -> Self::Funclet; - fn catch_ret(&mut self, funclet: &Self::Funclet, unwind: Self::BasicBlock) -> Self::Value; fn catch_switch( &mut self, parent: Option, @@ -271,40 +256,26 @@ pub trait BuilderMethods<'a, 'tcx: 'a>: ) -> Self::Value; fn add_handler(&mut self, catch_switch: Self::Value, handler: Self::BasicBlock); fn set_personality_fn(&mut self, personality: Self::Value); +} - fn atomic_cmpxchg( - &mut self, - dst: Self::Value, - cmp: Self::Value, - src: Self::Value, - order: AtomicOrdering, - failure_order: AtomicOrdering, - weak: bool, - ) -> Self::Value; - fn atomic_rmw( - &mut self, - op: AtomicRmwBinOp, - dst: Self::Value, - src: Self::Value, - order: AtomicOrdering, - ) -> Self::Value; - fn atomic_fence(&mut self, order: AtomicOrdering, scope: SynchronizationScope); - fn add_case(&mut self, s: Self::Value, on_val: Self::Value, dest: Self::BasicBlock); - fn add_incoming_to_phi(&mut self, phi: Self::Value, val: Self::Value, bb: Self::BasicBlock); - fn set_invariant_load(&mut self, load: Self::Value); - - /// Returns the ptr value that should be used for storing `val`. - fn check_store(&mut self, val: Self::Value, ptr: Self::Value) -> Self::Value; +pub trait BuilderMethods<'a, 'tcx: 'a>: + HasCodegen<'tcx> + + DebugInfoBuilderMethods<'tcx> + + ArgTypeMethods<'tcx> + + AbiBuilderMethods<'tcx> + + IntrinsicCallMethods<'tcx> + + AsmBuilderMethods<'tcx> + + StaticBuilderMethods<'tcx> - /// Returns the args that should be used for a call to `llfn`. - fn check_call<'b>( - &mut self, - typ: &str, - llfn: Self::Value, - args: &'b [Self::Value], - ) -> Cow<'b, [Self::Value]> - where - [Self::Value]: ToOwned; + + ControlFlowBuilderMethods<'a, 'tcx> + + MemoryBuilderMethods<'tcx> + + NumBuilderMethods<'tcx> + + UnwindBuilderMethods<'tcx> +{ + fn extract_element(&mut self, vec: Self::Value, idx: Self::Value) -> Self::Value; + fn vector_splat(&mut self, num_elts: usize, elt: Self::Value) -> Self::Value; + fn extract_value(&mut self, agg_val: Self::Value, idx: u64) -> Self::Value; + fn insert_value(&mut self, agg_val: Self::Value, elt: Self::Value, idx: u64) -> Self::Value; /// Called for `StorageLive` fn lifetime_start(&mut self, ptr: Self::Value, size: Size); @@ -318,7 +289,6 @@ pub trait BuilderMethods<'a, 'tcx: 'a>: args: &[Self::Value], funclet: Option<&Self::Funclet>, ) -> Self::Value; - fn zext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; unsafe fn delete_basic_block(&mut self, bb: Self::BasicBlock); fn do_not_inline(&mut self, llret: Self::Value); diff --git a/src/librustc_codegen_ssa/traits/consts.rs b/src/librustc_codegen_ssa/traits/consts.rs index 319f4b4e5e4b5..32412f303c155 100644 --- a/src/librustc_codegen_ssa/traits/consts.rs +++ b/src/librustc_codegen_ssa/traits/consts.rs @@ -3,7 +3,6 @@ use crate::mir::place::PlaceRef; use rustc::mir::interpret::Allocation; use rustc::mir::interpret::Scalar; use rustc::ty::layout; -use syntax::symbol::LocalInternedString; pub trait ConstMethods<'tcx>: BackendTypes { // Constant constructors @@ -19,24 +18,12 @@ pub trait ConstMethods<'tcx>: BackendTypes { fn const_usize(&self, i: u64) -> Self::Value; fn const_u8(&self, i: u8) -> Self::Value; - // This is a 'c-like' raw string, which differs from - // our boxed-and-length-annotated strings. - fn const_cstr(&self, s: LocalInternedString, null_terminated: bool) -> Self::Value; - - fn const_str_slice(&self, s: LocalInternedString) -> Self::Value; - fn const_fat_ptr(&self, ptr: Self::Value, meta: Self::Value) -> Self::Value; fn const_struct(&self, elts: &[Self::Value], packed: bool) -> Self::Value; - fn const_array(&self, ty: Self::Type, elts: &[Self::Value]) -> Self::Value; - fn const_vector(&self, elts: &[Self::Value]) -> Self::Value; - fn const_bytes(&self, bytes: &[u8]) -> Self::Value; - fn const_get_elt(&self, v: Self::Value, idx: u64) -> Self::Value; - fn const_get_real(&self, v: Self::Value) -> Option<(f64, bool)>; fn const_to_uint(&self, v: Self::Value) -> u64; fn const_to_opt_u128(&self, v: Self::Value, sign_ext: bool) -> Option; fn is_const_integral(&self, v: Self::Value) -> bool; - fn is_const_real(&self, v: Self::Value) -> bool; fn scalar_to_backend( &self, diff --git a/src/librustc_codegen_ssa/traits/debuginfo.rs b/src/librustc_codegen_ssa/traits/debuginfo.rs index 135188e98c71c..a0b53fde09c4d 100644 --- a/src/librustc_codegen_ssa/traits/debuginfo.rs +++ b/src/librustc_codegen_ssa/traits/debuginfo.rs @@ -28,7 +28,7 @@ pub trait DebugInfoMethods<'tcx>: BackendTypes { fn create_mir_scopes( &self, mir: &mir::Mir<'_>, - debug_context: &FunctionDebugContext, + debug_context: &mut FunctionDebugContext, ) -> IndexVec>; fn extend_scope_to_file( &self, @@ -53,9 +53,10 @@ pub trait DebugInfoBuilderMethods<'tcx>: BackendTypes { ); fn set_source_location( &mut self, - debug_context: &FunctionDebugContext, + debug_context: &mut FunctionDebugContext, scope: Option, span: Span, ); fn insert_reference_to_gdb_debug_scripts_section_global(&mut self); + fn set_value_name(&mut self, value: Self::Value, name: &str); } diff --git a/src/librustc_codegen_ssa/traits/misc.rs b/src/librustc_codegen_ssa/traits/misc.rs index b23155563665d..2797dd89f5b15 100644 --- a/src/librustc_codegen_ssa/traits/misc.rs +++ b/src/librustc_codegen_ssa/traits/misc.rs @@ -1,5 +1,4 @@ use super::BackendTypes; -use libc::c_uint; use rustc::mir::mono::Stats; use rustc::session::Session; use rustc::ty::{self, Instance, Ty}; @@ -15,7 +14,6 @@ pub trait MiscMethods<'tcx>: BackendTypes { fn check_overflow(&self) -> bool; fn instances(&self) -> &RefCell, Self::Value>>; fn get_fn(&self, instance: Instance<'tcx>) -> Self::Value; - fn get_param(&self, llfn: Self::Value, index: c_uint) -> Self::Value; fn eh_personality(&self) -> Self::Value; fn eh_unwind_resume(&self) -> Self::Value; fn sess(&self) -> &Session; diff --git a/src/librustc_codegen_ssa/traits/mod.rs b/src/librustc_codegen_ssa/traits/mod.rs index 8fe8b7ecd4709..04eca25ef98f4 100644 --- a/src/librustc_codegen_ssa/traits/mod.rs +++ b/src/librustc_codegen_ssa/traits/mod.rs @@ -30,7 +30,7 @@ mod write; pub use self::abi::{AbiBuilderMethods, AbiMethods}; pub use self::asm::{AsmBuilderMethods, AsmMethods}; pub use self::backend::{Backend, BackendTypes, ExtraBackendMethods}; -pub use self::builder::{BuilderMethods, OverflowOp}; +pub use self::builder::*; pub use self::consts::ConstMethods; pub use self::debuginfo::{DebugInfoBuilderMethods, DebugInfoMethods}; pub use self::declare::{DeclareMethods, PreDefineMethods}; @@ -85,5 +85,10 @@ pub trait HasCodegen<'tcx>: Type = Self::Type, Funclet = Self::Funclet, DIScope = Self::DIScope, - >; + > + + BaseTypeMethods<'tcx>; + + fn cx(&self) -> &Self::CodegenCx { + &**self + } } diff --git a/src/librustc_codegen_ssa/traits/statics.rs b/src/librustc_codegen_ssa/traits/statics.rs index c4e7fe703c219..d8992c159337d 100644 --- a/src/librustc_codegen_ssa/traits/statics.rs +++ b/src/librustc_codegen_ssa/traits/statics.rs @@ -1,4 +1,5 @@ use super::BackendTypes; +use syntax_pos::symbol::LocalInternedString; use rustc::hir::def_id::DefId; use rustc::ty::layout::Align; @@ -8,5 +9,13 @@ pub trait StaticMethods: BackendTypes { } pub trait StaticBuilderMethods<'tcx>: BackendTypes { - fn get_static(&self, def_id: DefId) -> Self::Value; + fn get_static(&mut self, def_id: DefId) -> Self::Value; + fn static_panic_msg( + &mut self, + msg: Option, + filename: LocalInternedString, + line: Self::Value, + col: Self::Value, + kind: &str, + ) -> Self::Value; } diff --git a/src/librustc_codegen_ssa/traits/type_.rs b/src/librustc_codegen_ssa/traits/type_.rs index 7c5e615f22452..8f690ca726386 100644 --- a/src/librustc_codegen_ssa/traits/type_.rs +++ b/src/librustc_codegen_ssa/traits/type_.rs @@ -1,40 +1,29 @@ use super::misc::MiscMethods; use super::Backend; use super::HasCodegen; -use crate::common::{self, TypeKind}; +use crate::common::TypeKind; use crate::mir::place::PlaceRef; -use rustc::ty::layout::{self, Align, Size, TyLayout}; use rustc::ty::{self, Ty}; -use rustc::util::nodemap::FxHashMap; +use rustc::ty::layout::{self, TyLayout}; use rustc_target::abi::call::{ArgType, CastTarget, FnType, Reg}; -use std::cell::RefCell; -use syntax::ast; +use syntax_pos::DUMMY_SP; // This depends on `Backend` and not `BackendTypes`, because consumers will probably want to use // `LayoutOf` or `HasTyCtxt`. This way, they don't have to add a constraint on it themselves. pub trait BaseTypeMethods<'tcx>: Backend<'tcx> { - fn type_void(&self) -> Self::Type; - fn type_metadata(&self) -> Self::Type; fn type_i1(&self) -> Self::Type; fn type_i8(&self) -> Self::Type; fn type_i16(&self) -> Self::Type; fn type_i32(&self) -> Self::Type; fn type_i64(&self) -> Self::Type; fn type_i128(&self) -> Self::Type; - - // Creates an integer type with the given number of bits, e.g., i24 - fn type_ix(&self, num_bits: u64) -> Self::Type; fn type_isize(&self) -> Self::Type; fn type_f32(&self) -> Self::Type; fn type_f64(&self) -> Self::Type; - fn type_x86_mmx(&self) -> Self::Type; fn type_func(&self, args: &[Self::Type], ret: Self::Type) -> Self::Type; - fn type_variadic_func(&self, args: &[Self::Type], ret: Self::Type) -> Self::Type; fn type_struct(&self, els: &[Self::Type], packed: bool) -> Self::Type; - fn type_array(&self, ty: Self::Type, len: u64) -> Self::Type; - fn type_vector(&self, ty: Self::Type, len: u64) -> Self::Type; fn type_kind(&self, ty: Self::Type) -> TypeKind; fn type_ptr_to(&self, ty: Self::Type) -> Self::Type; fn element_type(&self, ty: Self::Type) -> Self::Type; @@ -42,21 +31,15 @@ pub trait BaseTypeMethods<'tcx>: Backend<'tcx> { /// Returns the number of elements in `self` if it is a LLVM vector type. fn vector_length(&self, ty: Self::Type) -> usize; - fn func_params_types(&self, ty: Self::Type) -> Vec; fn float_width(&self, ty: Self::Type) -> usize; /// Retrieves the bit width of the integer type `self`. fn int_width(&self, ty: Self::Type) -> u64; fn val_ty(&self, v: Self::Value) -> Self::Type; - fn scalar_lltypes(&self) -> &RefCell, Self::Type>>; } pub trait DerivedTypeMethods<'tcx>: BaseTypeMethods<'tcx> + MiscMethods<'tcx> { - fn type_bool(&self) -> Self::Type { - self.type_i8() - } - fn type_i8p(&self) -> Self::Type { self.type_ptr_to(self.type_i8()) } @@ -70,35 +53,6 @@ pub trait DerivedTypeMethods<'tcx>: BaseTypeMethods<'tcx> + MiscMethods<'tcx> { } } - fn type_int_from_ty(&self, t: ast::IntTy) -> Self::Type { - match t { - ast::IntTy::Isize => self.type_isize(), - ast::IntTy::I8 => self.type_i8(), - ast::IntTy::I16 => self.type_i16(), - ast::IntTy::I32 => self.type_i32(), - ast::IntTy::I64 => self.type_i64(), - ast::IntTy::I128 => self.type_i128(), - } - } - - fn type_uint_from_ty(&self, t: ast::UintTy) -> Self::Type { - match t { - ast::UintTy::Usize => self.type_isize(), - ast::UintTy::U8 => self.type_i8(), - ast::UintTy::U16 => self.type_i16(), - ast::UintTy::U32 => self.type_i32(), - ast::UintTy::U64 => self.type_i64(), - ast::UintTy::U128 => self.type_i128(), - } - } - - fn type_float_from_ty(&self, t: ast::FloatTy) -> Self::Type { - match t { - ast::FloatTy::F32 => self.type_f32(), - ast::FloatTy::F64 => self.type_f64(), - } - } - fn type_from_integer(&self, i: layout::Integer) -> Self::Type { use rustc::ty::layout::Integer::*; match i { @@ -110,32 +64,16 @@ pub trait DerivedTypeMethods<'tcx>: BaseTypeMethods<'tcx> + MiscMethods<'tcx> { } } - fn type_pointee_for_align(&self, align: Align) -> Self::Type { - // FIXME(eddyb) We could find a better approximation if ity.align < align. - let ity = layout::Integer::approximate_align(self, align); - self.type_from_integer(ity) - } - - /// Return a LLVM type that has at most the required alignment, - /// and exactly the required size, as a best-effort padding array. - fn type_padding_filler(&self, size: Size, align: Align) -> Self::Type { - let unit = layout::Integer::approximate_align(self, align); - let size = size.bytes(); - let unit_size = unit.size().bytes(); - assert_eq!(size % unit_size, 0); - self.type_array(self.type_from_integer(unit), size / unit_size) - } - fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool { - common::type_needs_drop(self.tcx(), ty) + ty.needs_drop(self.tcx(), ty::ParamEnv::reveal_all()) } fn type_is_sized(&self, ty: Ty<'tcx>) -> bool { - common::type_is_sized(self.tcx(), ty) + ty.is_sized(self.tcx().at(DUMMY_SP), ty::ParamEnv::reveal_all()) } fn type_is_freeze(&self, ty: Ty<'tcx>) -> bool { - common::type_is_freeze(self.tcx(), ty) + ty.is_freeze(self.tcx(), ty::ParamEnv::reveal_all(), DUMMY_SP) } fn type_has_metadata(&self, ty: Ty<'tcx>) -> bool { @@ -158,7 +96,6 @@ impl DerivedTypeMethods<'tcx> for T where Self: BaseTypeMethods<'tcx> + MiscM pub trait LayoutTypeMethods<'tcx>: Backend<'tcx> { fn backend_type(&self, layout: TyLayout<'tcx>) -> Self::Type; fn cast_backend_type(&self, ty: &CastTarget) -> Self::Type; - fn fn_backend_type(&self, ty: &FnType<'tcx, Ty<'tcx>>) -> Self::Type; fn fn_ptr_backend_type(&self, ty: &FnType<'tcx, Ty<'tcx>>) -> Self::Type; fn reg_backend_type(&self, ty: &Reg) -> Self::Type; fn immediate_backend_type(&self, layout: TyLayout<'tcx>) -> Self::Type;