From 5207298a2c2cfe322e351617915a92f70f5e0301 Mon Sep 17 00:00:00 2001 From: James Miller Date: Tue, 20 Sep 2016 14:50:03 +1200 Subject: [PATCH 01/12] Implement TypeFoldable on the MIR types Inlining needs to apply the substitutions from the callsite. Also implements TypeFoldable on IndexVec --- src/librustc/mir/repr.rs | 387 ++++++++++++++++++++ src/librustc/ty/structural_impls.rs | 12 + src/librustc_data_structures/indexed_vec.rs | 10 + 3 files changed, 409 insertions(+) diff --git a/src/librustc/mir/repr.rs b/src/librustc/mir/repr.rs index 70b0b810c6c48..513c3a6030192 100644 --- a/src/librustc/mir/repr.rs +++ b/src/librustc/mir/repr.rs @@ -18,6 +18,7 @@ use rustc_data_structures::control_flow_graph::ControlFlowGraph; use hir::def_id::DefId; use ty::subst::Substs; use ty::{self, AdtDef, ClosureSubsts, Region, Ty}; +use ty::fold::{TypeFoldable, TypeFolder, TypeVisitor}; use util::ppaux; use rustc_back::slice; use hir::InlineAsm; @@ -1326,3 +1327,389 @@ impl Location { } } } + +/* + * TypeFoldable implementations for MIR types + */ + +impl<'tcx> TypeFoldable<'tcx> for Mir<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + Mir { + basic_blocks: self.basic_blocks.fold_with(folder), + visibility_scopes: self.visibility_scopes.clone(), + promoted: self.promoted.fold_with(folder), + return_ty: self.return_ty.fold_with(folder), + var_decls: self.var_decls.fold_with(folder), + arg_decls: self.arg_decls.fold_with(folder), + temp_decls: self.temp_decls.fold_with(folder), + upvar_decls: self.upvar_decls.clone(), + span: self.span, + cache: Cache::new() + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.basic_blocks.visit_with(visitor) || + self.promoted.visit_with(visitor) || + self.return_ty.visit_with(visitor) || + self.var_decls.visit_with(visitor) || + self.arg_decls.visit_with(visitor) || + self.temp_decls.visit_with(visitor) + } +} + +impl<'tcx> TypeFoldable<'tcx> for VarDecl<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + VarDecl { + ty: self.ty.fold_with(folder), + ..self.clone() + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.ty.visit_with(visitor) + } +} + +impl<'tcx> TypeFoldable<'tcx> for TempDecl<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + TempDecl { + ty: self.ty.fold_with(folder), + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.ty.visit_with(visitor) + } +} + +impl<'tcx> TypeFoldable<'tcx> for ArgDecl<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + ArgDecl { + ty: self.ty.fold_with(folder), + ..self.clone() + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.ty.visit_with(visitor) + } +} + +impl<'tcx> TypeFoldable<'tcx> for BasicBlockData<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + BasicBlockData { + statements: self.statements.fold_with(folder), + terminator: self.terminator.fold_with(folder), + is_cleanup: self.is_cleanup + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.statements.visit_with(visitor) || self.terminator.visit_with(visitor) + } +} + +impl<'tcx> TypeFoldable<'tcx> for Statement<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + use mir::repr::StatementKind::*; + + let kind = match self.kind { + Assign(ref lval, ref rval) => Assign(lval.fold_with(folder), rval.fold_with(folder)), + SetDiscriminant { ref lvalue, variant_index } => SetDiscriminant{ + lvalue: lvalue.fold_with(folder), + variant_index: variant_index + }, + StorageLive(ref lval) => StorageLive(lval.fold_with(folder)), + StorageDead(ref lval) => StorageDead(lval.fold_with(folder)), + }; + Statement { + source_info: self.source_info, + kind: kind + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + use mir::repr::StatementKind::*; + + match self.kind { + Assign(ref lval, ref rval) => { lval.visit_with(visitor) || rval.visit_with(visitor) } + SetDiscriminant { ref lvalue, .. } | + StorageLive(ref lvalue) | + StorageDead(ref lvalue) => lvalue.visit_with(visitor) + } + } +} + +impl<'tcx> TypeFoldable<'tcx> for Terminator<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + use mir::repr::TerminatorKind::*; + + let kind = match self.kind { + Goto { target } => Goto { target: target }, + If { ref cond, targets } => If { + cond: cond.fold_with(folder), + targets: targets + }, + Switch { ref discr, adt_def, ref targets } => Switch { + discr: discr.fold_with(folder), + adt_def: adt_def, + targets: targets.clone() + }, + SwitchInt { ref discr, switch_ty, ref values, ref targets } => SwitchInt { + discr: discr.fold_with(folder), + switch_ty: switch_ty.fold_with(folder), + values: values.clone(), + targets: targets.clone() + }, + Drop { ref location, target, unwind } => Drop { + location: location.fold_with(folder), + target: target, + unwind: unwind + }, + DropAndReplace { ref location, ref value, target, unwind } => DropAndReplace { + location: location.fold_with(folder), + value: value.fold_with(folder), + target: target, + unwind: unwind + }, + Call { ref func, ref args, ref destination, cleanup } => { + let dest = if let Some((ref loc, dest)) = *destination { + Some((loc.fold_with(folder), dest)) + } else { None }; + + Call { + func: func.fold_with(folder), + args: args.fold_with(folder), + destination: dest, + cleanup: cleanup + } + }, + Assert { ref cond, expected, ref msg, target, cleanup } => { + let msg = if let AssertMessage::BoundsCheck { ref len, ref index } = *msg { + AssertMessage::BoundsCheck { + len: len.fold_with(folder), + index: index.fold_with(folder), + } + } else { + msg.clone() + }; + Assert { + cond: cond.fold_with(folder), + expected: expected, + msg: msg, + target: target, + cleanup: cleanup + } + }, + Resume => Resume, + Return => Return, + Unreachable => Unreachable, + }; + Terminator { + source_info: self.source_info, + kind: kind + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + use mir::repr::TerminatorKind::*; + + match self.kind { + If { ref cond, .. } => cond.visit_with(visitor), + Switch { ref discr, .. } => discr.visit_with(visitor), + SwitchInt { ref discr, switch_ty, .. } => + discr.visit_with(visitor) || switch_ty.visit_with(visitor), + Drop { ref location, ..} => location.visit_with(visitor), + DropAndReplace { ref location, ref value, ..} => + location.visit_with(visitor) || value.visit_with(visitor), + Call { ref func, ref args, ref destination, .. } => { + let dest = if let Some((ref loc, _)) = *destination { + loc.visit_with(visitor) + } else { false }; + dest || func.visit_with(visitor) || args.visit_with(visitor) + }, + Assert { ref cond, ref msg, .. } => { + if cond.visit_with(visitor) { + if let AssertMessage::BoundsCheck { ref len, ref index } = *msg { + len.visit_with(visitor) || index.visit_with(visitor) + } else { + false + } + } else { + false + } + }, + Goto { .. } | + Resume | + Return | + Unreachable => false + } + } +} + +impl<'tcx> TypeFoldable<'tcx> for Lvalue<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + match self { + &Lvalue::Projection(ref p) => Lvalue::Projection(p.fold_with(folder)), + _ => self.clone() + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + if let &Lvalue::Projection(ref p) = self { + p.visit_with(visitor) + } else { + false + } + } +} + +impl<'tcx> TypeFoldable<'tcx> for Rvalue<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + use mir::repr::Rvalue::*; + match *self { + Use(ref op) => Use(op.fold_with(folder)), + Repeat(ref op, ref len) => Repeat(op.fold_with(folder), len.fold_with(folder)), + Ref(region, bk, ref lval) => Ref(region.fold_with(folder), bk, lval.fold_with(folder)), + Len(ref lval) => Len(lval.fold_with(folder)), + Cast(kind, ref op, ty) => Cast(kind, op.fold_with(folder), ty.fold_with(folder)), + BinaryOp(op, ref rhs, ref lhs) => BinaryOp(op, rhs.fold_with(folder), lhs.fold_with(folder)), + CheckedBinaryOp(op, ref rhs, ref lhs) => + CheckedBinaryOp(op, rhs.fold_with(folder), lhs.fold_with(folder)), + UnaryOp(op, ref val) => UnaryOp(op, val.fold_with(folder)), + Box(ty) => Box(ty.fold_with(folder)), + Aggregate(ref kind, ref fields) => { + let kind = match *kind { + AggregateKind::Vec => AggregateKind::Vec, + AggregateKind::Tuple => AggregateKind::Tuple, + AggregateKind::Adt(def, v, substs, n) => + AggregateKind::Adt(def, v, substs.fold_with(folder), n), + AggregateKind::Closure(id, substs) => AggregateKind::Closure(id, substs.fold_with(folder)) + }; + Aggregate(kind, fields.fold_with(folder)) + } + InlineAsm { ref asm, ref outputs, ref inputs } => InlineAsm { + asm: asm.clone(), + outputs: outputs.fold_with(folder), + inputs: inputs.fold_with(folder) + } + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + use mir::repr::Rvalue::*; + match *self { + Use(ref op) => op.visit_with(visitor), + Repeat(ref op, ref len) => op.visit_with(visitor) || len.visit_with(visitor), + Ref(region, _, ref lval) => region.visit_with(visitor) || lval.visit_with(visitor), + Len(ref lval) => lval.visit_with(visitor), + Cast(_, ref op, ty) => op.visit_with(visitor) || ty.visit_with(visitor), + BinaryOp(_, ref rhs, ref lhs) | + CheckedBinaryOp(_, ref rhs, ref lhs) => rhs.visit_with(visitor) || lhs.visit_with(visitor), + UnaryOp(_, ref val) => val.visit_with(visitor), + Box(ty) => ty.visit_with(visitor), + Aggregate(ref kind, ref fields) => { + (match *kind { + AggregateKind::Vec => false, + AggregateKind::Tuple => false, + AggregateKind::Adt(_, _, substs, _) => substs.visit_with(visitor), + AggregateKind::Closure(_, substs) => substs.visit_with(visitor) + }) || fields.visit_with(visitor) + } + InlineAsm { ref outputs, ref inputs, .. } => outputs.visit_with(visitor) || inputs.visit_with(visitor) + } + } +} + +impl<'tcx> TypeFoldable<'tcx> for Operand<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + match *self { + Operand::Consume(ref lval) => Operand::Consume(lval.fold_with(folder)), + Operand::Constant(ref c) => Operand::Constant(c.fold_with(folder)), + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + match *self { + Operand::Consume(ref lval) => lval.visit_with(visitor), + Operand::Constant(ref c) => c.visit_with(visitor) + } + } +} + +impl<'tcx, B, V> TypeFoldable<'tcx> for Projection<'tcx, B, V> + where B: TypeFoldable<'tcx>, V: TypeFoldable<'tcx> +{ + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + use mir::repr::ProjectionElem::*; + + let base = self.base.fold_with(folder); + let elem = match self.elem { + Deref => Deref, + Field(f, ty) => Field(f, ty.fold_with(folder)), + Index(ref v) => Index(v.fold_with(folder)), + ref elem => elem.clone() + }; + + Projection { + base: base, + elem: elem + } + } + + fn super_visit_with>(&self, visitor: &mut Vs) -> bool { + use mir::repr::ProjectionElem::*; + + self.base.visit_with(visitor) || + match self.elem { + Field(_, ty) => ty.visit_with(visitor), + Index(ref v) => v.visit_with(visitor), + _ => false + } + } +} + +impl<'tcx> TypeFoldable<'tcx> for Constant<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + Constant { + span: self.span.clone(), + ty: self.ty.fold_with(folder), + literal: self.literal.fold_with(folder) + } + } + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.ty.visit_with(visitor) || self.literal.visit_with(visitor) + } +} + +impl<'tcx> TypeFoldable<'tcx> for TypedConstVal<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + TypedConstVal { + ty: self.ty.fold_with(folder), + span: self.span.clone(), + value: self.value.clone() + } + } + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.ty.visit_with(visitor) + } +} + +impl<'tcx> TypeFoldable<'tcx> for Literal<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + match *self { + Literal::Item { def_id, substs } => Literal::Item { + def_id: def_id, + substs: substs.fold_with(folder) + }, + _ => self.clone() + } + } + fn super_visit_with>(&self, visitor: &mut V) -> bool { + match *self { + Literal::Item { substs, .. } => substs.visit_with(visitor), + _ => false + } + } +} diff --git a/src/librustc/ty/structural_impls.rs b/src/librustc/ty/structural_impls.rs index 5a87ea1473d98..bba3e6f634693 100644 --- a/src/librustc/ty/structural_impls.rs +++ b/src/librustc/ty/structural_impls.rs @@ -12,6 +12,8 @@ use infer::type_variable; use ty::{self, Lift, Ty, TyCtxt}; use ty::fold::{TypeFoldable, TypeFolder, TypeVisitor}; +use rustc_data_structures::indexed_vec::{Idx, IndexVec}; + use std::rc::Rc; use syntax::abi; @@ -418,6 +420,16 @@ impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Vec { } } +impl<'tcx, I: Idx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for IndexVec { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + self.iter().map(|t| t.fold_with(folder)).collect() + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.iter().any(|t| t.visit_with(visitor)) + } +} + impl<'tcx, T:TypeFoldable<'tcx>> TypeFoldable<'tcx> for ty::Binder { fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { ty::Binder(self.0.fold_with(folder)) diff --git a/src/librustc_data_structures/indexed_vec.rs b/src/librustc_data_structures/indexed_vec.rs index 9123463149936..fc5509b48fe13 100644 --- a/src/librustc_data_structures/indexed_vec.rs +++ b/src/librustc_data_structures/indexed_vec.rs @@ -149,6 +149,16 @@ impl IndexVec { pub fn last(&self) -> Option { self.len().checked_sub(1).map(I::new) } + + #[inline] + pub fn get<'a>(&'a self, index: I) -> Option<&'a T> { + self.raw.get(index.index()) + } + + #[inline] + pub fn get_mut<'a>(&'a mut self, index: I) -> Option<&'a mut T> { + self.raw.get_mut(index.index()) + } } impl Index for IndexVec { From 064306b0394b11c69d1d22fb171f589ad3227757 Mon Sep 17 00:00:00 2001 From: James Miller Date: Tue, 20 Sep 2016 15:14:15 +1200 Subject: [PATCH 02/12] Don't run the deaggregator on const fns The deaggregator will generate assignments that aren't allowed in const expressions. This also refactors `is_const_fn` into a method on `TyCtxt`. --- src/librustc/ty/mod.rs | 21 ++++++++++++++++++ src/librustc_mir/transform/deaggregator.rs | 6 ++++- src/librustc_mir/transform/qualify_consts.rs | 23 ++------------------ 3 files changed, 28 insertions(+), 22 deletions(-) diff --git a/src/librustc/ty/mod.rs b/src/librustc/ty/mod.rs index 717b8923a1635..140f2fa2bb8e1 100644 --- a/src/librustc/ty/mod.rs +++ b/src/librustc/ty/mod.rs @@ -2868,6 +2868,27 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { autoderefs)) } + /// Checks if the given def is a const fn + pub fn is_const_fn(self, def_id: DefId) -> bool { + use hir::intravisit::FnKind; + use hir::map::blocks::FnLikeNode; + + if let Some(node_id) = self.map.as_local_node_id(def_id) { + let fn_like = FnLikeNode::from_node(self.map.get(node_id)); + match fn_like.map(|f| f.kind()) { + Some(FnKind::ItemFn(_, _, _, c, ..)) => { + c == hir::Constness::Const + } + Some(FnKind::Method(_, m, ..)) => { + m.constness == hir::Constness::Const + } + _ => false + } + } else { + self.sess.cstore.is_const_fn(def_id) + } + } + pub fn upvar_capture(self, upvar_id: ty::UpvarId) -> Option> { Some(self.tables.borrow().upvar_capture_map.get(&upvar_id).unwrap().clone()) } diff --git a/src/librustc_mir/transform/deaggregator.rs b/src/librustc_mir/transform/deaggregator.rs index 77af02c18c60e..9fc0b6006075d 100644 --- a/src/librustc_mir/transform/deaggregator.rs +++ b/src/librustc_mir/transform/deaggregator.rs @@ -22,7 +22,8 @@ impl<'tcx> MirPass<'tcx> for Deaggregator { fn run_pass<'a>(&mut self, tcx: TyCtxt<'a, 'tcx, 'tcx>, source: MirSource, mir: &mut Mir<'tcx>) { let node_id = source.item_id(); - let node_path = tcx.item_path_str(tcx.map.local_def_id(node_id)); + let def_id = tcx.map.local_def_id(node_id); + let node_path = tcx.item_path_str(def_id); debug!("running on: {:?}", node_path); // we only run when mir_opt_level > 1 match tcx.sess.opts.debugging_opts.mir_opt_level { @@ -37,6 +38,9 @@ impl<'tcx> MirPass<'tcx> for Deaggregator { // In fact, we might not want to trigger in other cases. // Ex: when we could use SROA. See issue #35259 + // Don't deaggregate in const fns + if tcx.is_const_fn(def_id) { return; } + let mut curr: usize = 0; for bb in mir.basic_blocks_mut() { let idx = match get_aggregate_statement_index(curr, &bb.statements) { diff --git a/src/librustc_mir/transform/qualify_consts.rs b/src/librustc_mir/transform/qualify_consts.rs index 2c03af2c8e97a..a1cbc334d4424 100644 --- a/src/librustc_mir/transform/qualify_consts.rs +++ b/src/librustc_mir/transform/qualify_consts.rs @@ -20,8 +20,6 @@ use rustc::dep_graph::DepNode; use rustc::hir; use rustc::hir::map as hir_map; use rustc::hir::def_id::DefId; -use rustc::hir::intravisit::FnKind; -use rustc::hir::map::blocks::FnLikeNode; use rustc::traits::{self, Reveal}; use rustc::ty::{self, TyCtxt, Ty}; use rustc::ty::cast::CastTy; @@ -116,23 +114,6 @@ impl fmt::Display for Mode { } } -pub fn is_const_fn(tcx: TyCtxt, def_id: DefId) -> bool { - if let Some(node_id) = tcx.map.as_local_node_id(def_id) { - let fn_like = FnLikeNode::from_node(tcx.map.get(node_id)); - match fn_like.map(|f| f.kind()) { - Some(FnKind::ItemFn(_, _, _, c, ..)) => { - c == hir::Constness::Const - } - Some(FnKind::Method(_, m, ..)) => { - m.constness == hir::Constness::Const - } - _ => false - } - } else { - tcx.sess.cstore.is_const_fn(def_id) - } -} - struct Qualifier<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { mode: Mode, span: Span, @@ -778,7 +759,7 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { ty::TyFnDef(def_id, _, f) => { (f.abi == Abi::PlatformIntrinsic && self.tcx.item_name(def_id).as_str().starts_with("simd_shuffle"), - is_const_fn(self.tcx, def_id)) + self.tcx.is_const_fn(def_id)) } _ => (false, false) }; @@ -997,7 +978,7 @@ impl<'tcx> MirMapPass<'tcx> for QualifyAndPromoteConstants { let src = MirSource::from_node(tcx, id); let mode = match src { MirSource::Fn(_) => { - if is_const_fn(tcx, def_id) { + if tcx.is_const_fn(def_id) { Mode::ConstFn } else { Mode::Fn From 11d95c873f7192f5ce134833d087f4f727478a5b Mon Sep 17 00:00:00 2001 From: James Miller Date: Tue, 20 Sep 2016 15:51:33 +1200 Subject: [PATCH 03/12] Initial implementation of inlining Implements basic inlining for MIR functions. The cases it will inline for are conservative, any function that can unwind will not be inlined. Inlining is currently only done if `mir-opt-level` is set to 2. Does not handle debuginfo correctly at the moment. --- src/librustc/dep_graph/dep_node.rs | 3 +- src/librustc/mir/repr.rs | 2 +- src/librustc_mir/callgraph.rs | 242 +++++++++ src/librustc_mir/lib.rs | 2 +- src/librustc_mir/transform/inline.rs | 730 +++++++++++++++++++++++++++ src/librustc_mir/transform/mod.rs | 1 + 6 files changed, 976 insertions(+), 4 deletions(-) create mode 100644 src/librustc_mir/callgraph.rs create mode 100644 src/librustc_mir/transform/inline.rs diff --git a/src/librustc/dep_graph/dep_node.rs b/src/librustc/dep_graph/dep_node.rs index e99ffa95ed63c..c025da29eefcb 100644 --- a/src/librustc/dep_graph/dep_node.rs +++ b/src/librustc/dep_graph/dep_node.rs @@ -34,7 +34,7 @@ pub enum DepNode { // `Krate` value gives you access to all other items. To avoid // this fate, do not call `tcx.map.krate()`; instead, prefer // wrappers like `tcx.visit_all_items_in_krate()`. If there is no - // suitable wrapper, you can use `tcx.dep_graph.ignore()` to gain + // suitable wrapper, you can use `tcx.dep_graph.in_ignore()` to gain // access to the krate, but you must remember to add suitable // edges yourself for the individual items that you read. Krate, @@ -243,4 +243,3 @@ impl DepNode { /// them even in the absence of a tcx.) #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] pub struct WorkProductId(pub String); - diff --git a/src/librustc/mir/repr.rs b/src/librustc/mir/repr.rs index 513c3a6030192..4f2fecce1126a 100644 --- a/src/librustc/mir/repr.rs +++ b/src/librustc/mir/repr.rs @@ -63,7 +63,7 @@ macro_rules! newtype_index { pub struct Mir<'tcx> { /// List of basic blocks. References to basic block use a newtyped index type `BasicBlock` /// that indexes into this vector. - basic_blocks: IndexVec>, + pub basic_blocks: IndexVec>, /// List of visibility (lexical) scopes; these are referenced by statements /// and used (eventually) for debuginfo. Indexed by a `VisibilityScope`. diff --git a/src/librustc_mir/callgraph.rs b/src/librustc_mir/callgraph.rs new file mode 100644 index 0000000000000..209567924ade1 --- /dev/null +++ b/src/librustc_mir/callgraph.rs @@ -0,0 +1,242 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! MIR-based callgraph. +//! +//! This only considers direct calls + +use rustc::hir::def_id::DefId; +use rustc_data_structures::graph; + +use rustc::mir::repr::*; +use rustc::mir::visit::*; +use rustc::mir::mir_map::MirMap; + +use rustc::ty; + +use rustc::util::nodemap::DefIdMap; + +pub struct CallGraph { + node_map: DefIdMap, + graph: graph::Graph +} + +impl CallGraph { + pub fn build<'tcx>(map: &MirMap<'tcx>) -> CallGraph { + let def_ids = map.map.keys(); + + let mut callgraph = CallGraph { + node_map: DefIdMap(), + graph: graph::Graph::new() + }; + + for def_id in def_ids { + let idx = callgraph.add_node(def_id); + + let mut call_visitor = CallVisitor { + caller: idx, + graph: &mut callgraph + }; + + let mir = map.map.get(&def_id).unwrap(); + call_visitor.visit_mir(&mir); + } + + callgraph + } + + pub fn scc_iter<'g>(&'g self) -> SCCIterator<'g> { + SCCIterator::new(&self.graph) + } + + pub fn def_id(&self, node: graph::NodeIndex) -> DefId { + *self.graph.node_data(node) + } + + fn add_node(&mut self, id: DefId) -> graph::NodeIndex { + let graph = &mut self.graph; + *self.node_map.entry(id).or_insert_with(|| { + graph.add_node(id) + }) + } +} + +struct CallVisitor<'a> { + caller: graph::NodeIndex, + graph: &'a mut CallGraph +} + +impl<'a, 'tcx> Visitor<'tcx> for CallVisitor<'a> { + fn visit_terminator_kind(&mut self, _block: BasicBlock, + kind: &TerminatorKind<'tcx>, _loc: Location) { + if let TerminatorKind::Call { + func: Operand::Constant(ref f) + , .. } = *kind { + if let ty::TyFnDef(def_id, _, _) = f.ty.sty { + let callee = self.graph.add_node(def_id); + self.graph.graph.add_edge(self.caller, callee, ()); + } + } + } +} + +struct StackElement<'g> { + node: graph::NodeIndex, + lowlink: usize, + children: graph::AdjacentTargets<'g, DefId, ()> +} + +pub struct SCCIterator<'g> { + graph: &'g graph::Graph, + index: usize, + node_indices: Vec>, + scc_stack: Vec, + current_scc: Vec, + visit_stack: Vec>, +} + +impl<'g> SCCIterator<'g> { + pub fn new(graph: &'g graph::Graph) -> SCCIterator<'g> { + if graph.len_nodes() == 0 { + return SCCIterator { + graph: graph, + index: 0, + node_indices: Vec::new(), + scc_stack: Vec::new(), + current_scc: Vec::new(), + visit_stack: Vec::new() + }; + } + + let first = graph::NodeIndex(0); + + SCCIterator::with_entry(graph, first) + } + + pub fn with_entry(graph: &'g graph::Graph, + entry: graph::NodeIndex) -> SCCIterator<'g> { + let mut iter = SCCIterator { + graph: graph, + index: 0, + node_indices: Vec::with_capacity(graph.len_nodes()), + scc_stack: Vec::new(), + current_scc: Vec::new(), + visit_stack: Vec::new() + }; + + iter.visit_one(entry); + + iter + } + + fn get_next(&mut self) { + self.current_scc.clear(); + + while !self.visit_stack.is_empty() { + self.visit_children(); + + let node = self.visit_stack.pop().unwrap(); + + if let Some(last) = self.visit_stack.last_mut() { + if last.lowlink > node.lowlink { + last.lowlink = node.lowlink; + } + } + + debug!("TarjanSCC: Popped node {:?} : lowlink = {:?}; index = {:?}", + node.node, node.lowlink, self.node_index(node.node).unwrap()); + + if node.lowlink != self.node_index(node.node).unwrap() { + continue; + } + + loop { + let n = self.scc_stack.pop().unwrap(); + self.current_scc.push(n); + self.set_node_index(n, !0); + if n == node.node { return; } + } + } + } + + fn visit_one(&mut self, node: graph::NodeIndex) { + self.index += 1; + let idx = self.index; + self.set_node_index(node, idx); + self.scc_stack.push(node); + self.visit_stack.push(StackElement { + node: node, + lowlink: self.index, + children: self.graph.successor_nodes(node) + }); + debug!("TarjanSCC: Node {:?} : index = {:?}", node, idx); + } + + fn visit_children(&mut self) { + while let Some(child) = self.visit_stack.last_mut().unwrap().children.next() { + if let Some(child_num) = self.node_index(child) { + let cur = self.visit_stack.last_mut().unwrap(); + if cur.lowlink > child_num { + cur.lowlink = child_num; + } + } else { + self.visit_one(child); + } + } + } + + fn node_index(&self, node: graph::NodeIndex) -> Option { + self.node_indices.get(node.node_id()).and_then(|&idx| idx) + } + + fn set_node_index(&mut self, node: graph::NodeIndex, idx: usize) { + let i = node.node_id(); + if i >= self.node_indices.len() { + self.node_indices.resize(i + 1, None); + } + self.node_indices[i] = Some(idx); + } +} + +impl<'g> Iterator for SCCIterator<'g> { + type Item = Vec; + + fn next(&mut self) -> Option> { + self.get_next(); + + if self.current_scc.is_empty() { + // Try a new root for the next SCC, if the node_indices + // map is doesn't contain all nodes, use the smallest one + // with no entry, otherwise find the first empty node. + // + // FIXME: This should probably use a set of precomputed + // roots instead + if self.node_indices.len() < self.graph.len_nodes() { + let idx = graph::NodeIndex(self.node_indices.len()); + self.visit_one(idx); + } else { + for idx in 0..self.node_indices.len() { + if self.node_indices[idx].is_none() { + let idx = graph::NodeIndex(idx); + self.visit_one(idx); + break; + } + } + } + self.get_next(); + } + + if self.current_scc.is_empty() { + None + } else { + Some(self.current_scc.clone()) + } + } +} diff --git a/src/librustc_mir/lib.rs b/src/librustc_mir/lib.rs index 12f1eb8535a3e..6c6f46e9a8189 100644 --- a/src/librustc_mir/lib.rs +++ b/src/librustc_mir/lib.rs @@ -47,9 +47,9 @@ pub mod diagnostics; pub mod build; pub mod def_use; +pub mod callgraph; pub mod graphviz; mod hair; pub mod mir_map; pub mod pretty; pub mod transform; - diff --git a/src/librustc_mir/transform/inline.rs b/src/librustc_mir/transform/inline.rs new file mode 100644 index 0000000000000..8c6aa845692af --- /dev/null +++ b/src/librustc_mir/transform/inline.rs @@ -0,0 +1,730 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Inlining pass for MIR functions +//! +//! Inlines functions. Is quite conservative in it's conditions, functions +//! that can unwind are not inlined. + +use rustc::hir::def_id::DefId; + +use rustc_data_structures::indexed_vec::{Idx, IndexVec}; +use rustc_data_structures::graph; + +use rustc::dep_graph::DepNode; +use rustc::mir::mir_map::MirMap; +use rustc::mir::repr::*; +use rustc::mir::transform::{MirMapPass, MirPassHook, MirSource, Pass}; +use rustc::mir::visit::*; +use rustc::traits; +use rustc::ty::{self, Ty, TyCtxt}; +use rustc::ty::subst::{Subst,Substs}; +use rustc::util::nodemap::{DefIdMap, DefIdSet}; + +use syntax::attr; +use syntax_pos::Span; + +use callgraph; + +const DEFAULT_THRESHOLD : usize = 50; +const HINT_THRESHOLD : usize = 100; + +const INSTR_COST : usize = 5; +const CALL_PENALTY : usize = 25; + +const UNKNOWN_SIZE_COST : usize = 10; + +use std::rc::Rc; + +pub struct Inline; + +impl<'tcx> MirMapPass<'tcx> for Inline { + fn run_pass<'a>( + &mut self, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + map: &mut MirMap<'tcx>, + _: &mut [Box MirPassHook<'s>>]) { + + match tcx.sess.opts.debugging_opts.mir_opt_level { + Some(0) | + Some(1) | + None => { return; }, + _ => {} + }; + + let _ignore = tcx.dep_graph.in_ignore(); + + let callgraph = callgraph::CallGraph::build(map); + + let mut inliner = Inliner { + tcx: tcx, + foreign_mirs: DefIdMap() + }; + + for scc in callgraph.scc_iter() { + debug!("Inlining SCC {:?}", scc); + inliner.inline_scc(map, &callgraph, &scc); + } + } +} + +impl<'tcx> Pass for Inline { } + +struct Inliner<'a, 'tcx: 'a> { + tcx: TyCtxt<'a, 'tcx, 'tcx>, + foreign_mirs: DefIdMap>>, +} + +#[derive(Copy, Clone)] +struct CallSite<'tcx> { + caller: DefId, + callee: DefId, + substs: &'tcx Substs<'tcx>, + bb: BasicBlock, + location: SourceInfo, +} + +impl<'a, 'tcx> Inliner<'a, 'tcx> { + fn inline_scc(&mut self, map: &mut MirMap<'tcx>, + callgraph: &callgraph::CallGraph, scc: &[graph::NodeIndex]) -> bool { + let mut callsites = Vec::new(); + let mut in_scc = DefIdSet(); + + for &node in scc { + let def_id = callgraph.def_id(node); + + // Don't inspect functions from other crates + let id = if let Some(id) = self.tcx.map.as_local_node_id(def_id) { + id + } else { + continue; + }; + let src = MirSource::from_node(self.tcx, id); + if let MirSource::Fn(_) = src { + let mir = if let Some(m) = map.map.get(&def_id) { + m + } else { + continue; + }; + for (bb, bb_data) in mir.basic_blocks().iter_enumerated() { + // Only consider direct calls to functions + let terminator = bb_data.terminator(); + if let TerminatorKind::Call { + func: Operand::Constant(ref f), .. } = terminator.kind { + if let ty::TyFnDef(callee_def_id, substs, _) = f.ty.sty { + callsites.push(CallSite { + caller: def_id, + callee: callee_def_id, + substs: substs, + bb: bb, + location: terminator.source_info + }); + } + } + } + + in_scc.insert(def_id); + } + } + + // Move callsites that are in the the SCC to the end so + // they're inlined after calls to outside the SCC + let mut first_call_in_scc = callsites.len(); + + let mut i = 0; + while i < first_call_in_scc { + let f = callsites[i].caller; + if in_scc.contains(&f) { + first_call_in_scc -= 1; + callsites.swap(i, first_call_in_scc); + } else { + i += 1; + } + } + + let mut local_change; + let mut changed = false; + + loop { + local_change = false; + let mut csi = 0; + while csi < callsites.len() { + let foreign_mir; + + let callsite = callsites[csi]; + csi += 1; + + let callee_mir = { + let callee_mir : Option<&Mir<'tcx>> = if callsite.callee.is_local() { + map.map.get(&callsite.callee) + } else { + foreign_mir = self.get_foreign_mir(callsite.callee); + foreign_mir.as_ref().map(|m| &**m) + }; + + let callee_mir = if let Some(m) = callee_mir { + m + } else { + continue; + }; + + if !self.should_inline(callsite, callee_mir) { + continue; + } + + callee_mir.subst(self.tcx, callsite.substs) + }; + + let caller_mir = map.map.get_mut(&callsite.caller).unwrap(); + + let start = caller_mir.basic_blocks().len(); + + if !self.inline_call(callsite, caller_mir, callee_mir) { + continue; + } + + // Add callsites from inlined function + for (bb, bb_data) in caller_mir.basic_blocks().iter_enumerated().skip(start) { + // Only consider direct calls to functions + let terminator = bb_data.terminator(); + if let TerminatorKind::Call { + func: Operand::Constant(ref f), .. } = terminator.kind { + if let ty::TyFnDef(callee_def_id, substs, _) = f.ty.sty { + // Don't inline the same function multiple times. + if callsite.callee != callee_def_id { + callsites.push(CallSite { + caller: callsite.caller, + callee: callee_def_id, + substs: substs, + bb: bb, + location: terminator.source_info + }); + } + } + } + } + + + csi -= 1; + if scc.len() == 1 { + callsites.swap_remove(csi); + } else { + callsites.remove(csi); + } + + local_change = true; + changed = true; + } + + if !local_change { + break; + } + } + + changed + } + + fn get_foreign_mir(&mut self, def_id: DefId) -> Option>> { + if let Some(mir) = self.foreign_mirs.get(&def_id).cloned() { + return Some(mir); + } + // Cache the foreign MIR + let mir = self.tcx.sess.cstore.maybe_get_item_mir(self.tcx, def_id); + let mir = mir.map(Rc::new); + if let Some(ref mir) = mir { + self.foreign_mirs.insert(def_id, mir.clone()); + } + + mir + } + + fn should_inline(&self, callsite: CallSite<'tcx>, + callee_mir: &'a Mir<'tcx>) -> bool { + + let tcx = self.tcx; + + // Don't inline closures + if callee_mir.upvar_decls.len() > 0 { + return false; + } + + // Don't inline calls to trait methods + // FIXME: Should try to resolve it to a concrete method, and + // only bail if that isn't possible + let trait_def = tcx.trait_of_item(callsite.callee); + if trait_def.is_some() { return false; } + + let attrs = tcx.get_attrs(callsite.callee); + let hint = attr::find_inline_attr(None, &attrs[..]); + + let hinted = match hint { + // Just treat inline(always) as a hint for now, + // there are cases that prevent unwinding that we + // need to check for first. + attr::InlineAttr::Always => true, + attr::InlineAttr::Never => return false, + attr::InlineAttr::Hint => true, + attr::InlineAttr::None => false, + }; + + // Only inline local functions if they would be eligible for + // cross-crate inlining. This ensures that any symbols they + // use are reachable cross-crate + // FIXME: This shouldn't be necessary, trans should generate + // the reachable set from the MIR. + if callsite.callee.is_local() { + // No type substs and no inline hint means this function + // wouldn't be eligible for cross-crate inlining + if callsite.substs.types().count() == 0 && !hinted { + return false; + } + + } + + let mut threshold = if hinted { + HINT_THRESHOLD + } else { + DEFAULT_THRESHOLD + }; + + // Significantly lower the threshold for inlining cold functions + if attr::contains_name(&attrs[..], "cold") { + threshold /= 5; + } + + // Give a bonus functions with a small number of blocks, + // We normally have two or three blocks for even + // very small functions. + if callee_mir.basic_blocks().len() <= 3 { + threshold += threshold / 4; + } + + + let id = tcx.map.as_local_node_id(callsite.caller).expect("Caller not local"); + let param_env = ty::ParameterEnvironment::for_item(tcx, id); + + let mut first_block = true; + let mut cost = 0; + + for blk in callee_mir.basic_blocks() { + for stmt in &blk.statements { + // Don't count StorageLive/StorageDead in the inlining cost. + match stmt.kind { + StatementKind::StorageLive(_) | + StatementKind::StorageDead(_) => {} + _ => cost += INSTR_COST + } + } + match blk.terminator().kind { + TerminatorKind::Drop { ref location, unwind, .. } | + TerminatorKind::DropAndReplace { ref location, unwind, .. } => { + // If the location doesn't actually need dropping, treat it like + // a regular goto. + let ty = location.ty(&callee_mir, tcx).subst(tcx, callsite.substs); + let ty = ty.to_ty(tcx); + if tcx.type_needs_drop_given_env(ty, ¶m_env) { + if unwind.is_some() { + // FIXME: Should be able to handle this better + return false; + } else { + cost += CALL_PENALTY; + } + } else { + cost += INSTR_COST; + } + } + // FIXME: Should be able to handle this better + TerminatorKind::Call { cleanup: Some(_), .. } | + TerminatorKind::Assert { cleanup: Some(_), .. } => return false, + + TerminatorKind::Unreachable | + TerminatorKind::Call { destination: None, .. } if first_block => { + // If the function always diverges, don't inline + // unless the cost is zero + threshold = 0; + } + + TerminatorKind::Call { .. } | + TerminatorKind::Assert { .. } => cost += CALL_PENALTY, + _ => cost += INSTR_COST + } + first_block = false; + } + + // Count up the cost of local variables and temps, if we know the size + // use that, otherwise we use a moderately-large dummy cost. + + let ptr_size = tcx.data_layout.pointer_size.bytes(); + + for v in &callee_mir.var_decls { + let ty = v.ty.subst(tcx, callsite.substs); + // Cost of the var is the size in machine-words, if we know + // it. + if let Some(size) = type_size_of(tcx, param_env.clone(), ty) { + cost += (size / ptr_size) as usize; + } else { + cost += UNKNOWN_SIZE_COST; + } + } + for t in &callee_mir.temp_decls { + let ty = t.ty.subst(tcx, callsite.substs); + // Cost of the var is the size in machine-words, if we know + // it. + if let Some(size) = type_size_of(tcx, param_env.clone(), ty) { + cost += (size / ptr_size) as usize; + } else { + cost += UNKNOWN_SIZE_COST; + } + } + + debug!("Inline cost for {:?} is {}", callsite.callee, cost); + + if let attr::InlineAttr::Always = hint { + true + } else { + cost <= threshold + } + } + + + fn inline_call(&self, callsite: CallSite, + caller_mir: &mut Mir<'tcx>, callee_mir: Mir<'tcx>) -> bool { + + // Don't inline a function into itself + if callsite.caller == callsite.callee { return false; } + + let _task = self.tcx.dep_graph.in_task(DepNode::Mir(callsite.caller)); + + + let terminator = caller_mir[callsite.bb].terminator.take().unwrap(); + match terminator.kind { + TerminatorKind::Call { + func: _, args, destination: Some(destination), cleanup } => { + + debug!("Inlined {:?} into {:?}", callsite.callee, callsite.caller); + + let call_scope = terminator.source_info.scope; + let call_span = terminator.source_info.span; + let bb_len = caller_mir.basic_blocks().len(); + + let mut var_map = IndexVec::with_capacity(callee_mir.var_decls.len()); + let mut temp_map = IndexVec::with_capacity(callee_mir.temp_decls.len()); + let mut scope_map = IndexVec::with_capacity(callee_mir.visibility_scopes.len()); + let mut promoted_map = IndexVec::with_capacity(callee_mir.promoted.len()); + + for mut scope in callee_mir.visibility_scopes { + if scope.parent_scope.is_none() { + scope.parent_scope = Some(call_scope); + } + + scope.span = call_span; + + let idx = caller_mir.visibility_scopes.push(scope); + scope_map.push(idx); + } + + for mut var in callee_mir.var_decls { + var.source_info.scope = scope_map[var.source_info.scope]; + let idx = caller_mir.var_decls.push(var); + var_map.push(idx); + } + + for temp in callee_mir.temp_decls { + let idx = caller_mir.temp_decls.push(temp); + temp_map.push(idx); + } + + for p in callee_mir.promoted { + let idx = caller_mir.promoted.push(p); + promoted_map.push(idx); + } + + // If the call is something like `a[*i] = f(i)`, where + // `i : &mut usize`, then just duplicating the `a[*i]` + // Lvalue could result in two different locations if `f` + // writes to `i`. To prevent this we need to create a temporary + // borrow of the lvalue and pass the destination as `*temp` instead. + fn dest_needs_borrow(lval: &Lvalue) -> bool { + match *lval { + Lvalue::Projection(ref p) => { + match p.elem { + ProjectionElem::Deref | + ProjectionElem::Index(_) => true, + _ => dest_needs_borrow(&p.base) + } + } + // Static variables need a borrow because the callee + // might modify the same static. + Lvalue::Static(_) => true, + _ => false + } + } + + let dest = if dest_needs_borrow(&destination.0) { + debug!("Creating temp for return destination"); + let dest = Rvalue::Ref( + self.tcx.mk_region(ty::ReErased), + BorrowKind::Mut, + destination.0); + + let ty = dest.ty(caller_mir, self.tcx).expect("Rvalue has no type!"); + + let temp = TempDecl { ty: ty }; + let tmp = caller_mir.temp_decls.push(temp); + let tmp = Lvalue::Temp(tmp); + + let stmt = Statement { + source_info: callsite.location, + kind: StatementKind::Assign(tmp.clone(), dest) + }; + caller_mir[callsite.bb] + .statements.push(stmt); + tmp.deref() + } else { + destination.0 + }; + + let return_block = destination.1; + + // Copy the arguments if needed. + let args : Vec<_> = { + + let tcx = self.tcx; + args.iter().map(|a| { + if let Operand::Consume(Lvalue::Temp(_)) = *a { + // Reuse the operand if it's a temporary already + a.clone() + } else { + debug!("Creating temp for argument"); + // Otherwise, create a temporary for the arg + let arg = Rvalue::Use(a.clone()); + + let ty = arg.ty(caller_mir, tcx).expect("arg has no type!"); + + let temp = TempDecl { ty: ty }; + let tmp = caller_mir.temp_decls.push(temp); + let tmp = Lvalue::Temp(tmp); + + let stmt = Statement { + source_info: callsite.location, + kind: StatementKind::Assign(tmp.clone(), arg) + }; + caller_mir[callsite.bb].statements.push(stmt); + Operand::Consume(tmp) + } + }).collect() + }; + + let mut integrator = Integrator { + block_idx: bb_len, + args: &args, + var_map: var_map, + tmp_map: temp_map, + scope_map: scope_map, + promoted_map: promoted_map, + inline_location: callsite.location, + destination: dest, + return_block: return_block, + cleanup_block: cleanup + }; + + + for (bb, mut block) in callee_mir.basic_blocks.into_iter_enumerated() { + integrator.visit_basic_block_data(bb, &mut block); + caller_mir.basic_blocks_mut().push(block); + } + + let terminator = Terminator { + source_info: terminator.source_info, + kind: TerminatorKind::Goto { target: BasicBlock::new(bb_len) } + }; + + caller_mir[callsite.bb].terminator = Some(terminator); + + true + } + kind => { + caller_mir[callsite.bb].terminator = Some(Terminator { + source_info: terminator.source_info, + kind: kind + }); + false + } + } + } +} + +fn type_size_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, param_env: ty::ParameterEnvironment<'tcx>, + ty: Ty<'tcx>) -> Option { + tcx.infer_ctxt(None, Some(param_env), traits::Reveal::All).enter(|infcx| { + ty.layout(&infcx).ok().map(|layout| { + layout.size(&tcx.data_layout).bytes() + }) + }) +} + +struct Integrator<'a, 'tcx: 'a> { + block_idx: usize, + args: &'a [Operand<'tcx>], + var_map: IndexVec, + tmp_map: IndexVec, + scope_map: IndexVec, + promoted_map: IndexVec, + inline_location: SourceInfo, + destination: Lvalue<'tcx>, + return_block: BasicBlock, + cleanup_block: Option +} + +impl<'a, 'tcx> Integrator<'a, 'tcx> { + fn update_target(&self, tgt: BasicBlock) -> BasicBlock { + let new = BasicBlock::new(tgt.index() + self.block_idx); + debug!("Updating target `{:?}`, new: `{:?}`", tgt, new); + new + } +} + +impl<'a, 'tcx> MutVisitor<'tcx> for Integrator<'a, 'tcx> { + fn visit_lvalue(&mut self, + lvalue: &mut Lvalue<'tcx>, + _ctxt: LvalueContext, + _location: Location) { + match *lvalue { + Lvalue::Var(ref mut var) => { + if let Some(v) = self.var_map.get(*var).cloned() { + debug!("Replacing {:?} with {:?}", var, v); + *var = v; + } + } + Lvalue::Temp(ref mut tmp) => { + if let Some(t) = self.tmp_map.get(*tmp).cloned() { + debug!("Replacing {:?} with {:?}", tmp, t); + *tmp = t; + } + } + Lvalue::ReturnPointer => { + debug!("Replacing return pointer with {:?}", self.destination); + *lvalue = self.destination.clone(); + } + Lvalue::Arg(arg) => { + let idx = arg.index(); + if let Operand::Consume(ref lval) = self.args[idx] { + debug!("Replacing {:?} with {:?}", lvalue, lval); + *lvalue = lval.clone(); + } + } + _ => self.super_lvalue(lvalue, _ctxt, _location) + } + } + + fn visit_operand(&mut self, operand: &mut Operand<'tcx>, location: Location) { + if let Operand::Consume(Lvalue::Arg(arg)) = *operand { + let idx = arg.index(); + let new_arg = self.args[idx].clone(); + debug!("Replacing use of {:?} with {:?}", arg, new_arg); + *operand = new_arg; + } else { + self.super_operand(operand, location); + } + } + + fn visit_terminator_kind(&mut self, block: BasicBlock, + kind: &mut TerminatorKind<'tcx>, loc: Location) { + match *kind { + TerminatorKind::Goto { ref mut target} => { + *target = self.update_target(*target); + } + TerminatorKind::If { ref mut targets, .. } => { + targets.0 = self.update_target(targets.0); + targets.1 = self.update_target(targets.1); + } + TerminatorKind::Switch { ref mut targets, .. } | + TerminatorKind::SwitchInt { ref mut targets, .. } => { + for tgt in targets { + *tgt = self.update_target(*tgt); + } + } + TerminatorKind::Drop { ref mut target, ref mut unwind, .. } | + TerminatorKind::DropAndReplace { ref mut target, ref mut unwind, .. } => { + *target = self.update_target(*target); + if let Some(tgt) = *unwind { + *unwind = Some(self.update_target(tgt)); + } else { + if Some(*target) != self.cleanup_block { + *unwind = self.cleanup_block; + } + } + + if Some(*target) == *unwind { + *unwind == None; + } + } + TerminatorKind::Call { ref mut destination, ref mut cleanup, .. } => { + let mut target = None; + if let Some((_, ref mut tgt)) = *destination { + *tgt = self.update_target(*tgt); + target = Some(*tgt); + } + if let Some(tgt) = *cleanup { + *cleanup = Some(self.update_target(tgt)); + } else { + *cleanup = self.cleanup_block; + } + + if target == *cleanup { + *cleanup == None; + } + } + TerminatorKind::Assert { ref mut target, ref mut cleanup, .. } => { + *target = self.update_target(*target); + if let Some(tgt) = *cleanup { + *cleanup = Some(self.update_target(tgt)); + } else { + if Some(*target) != self.cleanup_block { + *cleanup = self.cleanup_block; + } + } + + if Some(*target) == *cleanup { + *cleanup == None; + } + } + TerminatorKind::Return => { + *kind = TerminatorKind::Goto { target: self.return_block }; + } + TerminatorKind::Resume => { + if let Some(tgt) = self.cleanup_block { + *kind = TerminatorKind::Goto { target: tgt } + } + } + TerminatorKind::Unreachable => { } + } + + self.super_terminator_kind(block, kind, loc); + } + fn visit_visibility_scope(&mut self, scope: &mut VisibilityScope) { + *scope = self.scope_map[*scope]; + } + fn visit_span(&mut self, span: &mut Span) { + // FIXME: probably shouldn't use the inline location span, + // but not doing so causes errors + *span = self.inline_location.span; + } + + fn visit_literal(&mut self, literal: &mut Literal<'tcx>, loc: Location) { + if let Literal::Promoted { ref mut index } = *literal { + if let Some(p) = self.promoted_map.get(*index).cloned() { + *index = p; + } + } else { + self.super_literal(literal, loc); + } + } +} diff --git a/src/librustc_mir/transform/mod.rs b/src/librustc_mir/transform/mod.rs index 7bcb89b5895e7..3723296b926f0 100644 --- a/src/librustc_mir/transform/mod.rs +++ b/src/librustc_mir/transform/mod.rs @@ -18,5 +18,6 @@ pub mod promote_consts; pub mod qualify_consts; pub mod dump_mir; pub mod deaggregator; +pub mod inline; pub mod instcombine; pub mod copy_prop; From d78103ab077debb25790e6381b28e05ea52139ac Mon Sep 17 00:00:00 2001 From: James Miller Date: Tue, 20 Sep 2016 15:55:50 +1200 Subject: [PATCH 04/12] Add inlining to the set of passes --- src/librustc_driver/driver.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/librustc_driver/driver.rs b/src/librustc_driver/driver.rs index ae80bc53c4c36..39cc123dc5e8a 100644 --- a/src/librustc_driver/driver.rs +++ b/src/librustc_driver/driver.rs @@ -1019,7 +1019,10 @@ pub fn phase_4_translate_to_llvm<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, passes.push_pass(box mir::transform::simplify_cfg::SimplifyCfg::new("elaborate-drops")); // No lifetime analysis based on borrowing can be done from here on out. + passes.push_pass(box mir::transform::inline::Inline); + passes.push_pass(box mir::transform::simplify_cfg::SimplifyCfg::new("inline")); passes.push_pass(box mir::transform::instcombine::InstCombine::new()); + passes.push_pass(box mir::transform::deaggregator::Deaggregator); passes.push_pass(box mir::transform::copy_prop::CopyPropagation); From e30b2114d1ff708e1eab9a12742321f1ccce6275 Mon Sep 17 00:00:00 2001 From: James Miller Date: Thu, 22 Sep 2016 12:22:36 +1200 Subject: [PATCH 05/12] Various inlining improvements Don't add an unwind edge if the call/drop/assert is in a cleanup block. This allows the pass to inline more functions correctly. Don't penalize intrinsics as harshly. They were previously treated the same as regular calls but now have the same cost as a statement. Run the before/after hooks for the pass. Since this is a MirMapPass, the hooks weren't run. Run copy propagation each time a function is inlined to remove any unecessary copies introduced during integration. This also makes copy propagation remove any nops it generated once it has finished. Run simplify cfg after inlining has finished for a SCC. This removes the need for a separate simplify cfg pass to be run. --- src/librustc/mir/repr.rs | 4 +- src/librustc_driver/driver.rs | 1 - src/librustc_mir/transform/copy_prop.rs | 13 +- src/librustc_mir/transform/inline.rs | 136 +++++++++++++-------- src/librustc_mir/transform/simplify_cfg.rs | 6 +- 5 files changed, 104 insertions(+), 56 deletions(-) diff --git a/src/librustc/mir/repr.rs b/src/librustc/mir/repr.rs index 4f2fecce1126a..3ccefc0d5ebd2 100644 --- a/src/librustc/mir/repr.rs +++ b/src/librustc/mir/repr.rs @@ -1422,6 +1422,7 @@ impl<'tcx> TypeFoldable<'tcx> for Statement<'tcx> { }, StorageLive(ref lval) => StorageLive(lval.fold_with(folder)), StorageDead(ref lval) => StorageDead(lval.fold_with(folder)), + Nop => Nop, }; Statement { source_info: self.source_info, @@ -1436,7 +1437,8 @@ impl<'tcx> TypeFoldable<'tcx> for Statement<'tcx> { Assign(ref lval, ref rval) => { lval.visit_with(visitor) || rval.visit_with(visitor) } SetDiscriminant { ref lvalue, .. } | StorageLive(ref lvalue) | - StorageDead(ref lvalue) => lvalue.visit_with(visitor) + StorageDead(ref lvalue) => lvalue.visit_with(visitor), + Nop => false, } } } diff --git a/src/librustc_driver/driver.rs b/src/librustc_driver/driver.rs index 39cc123dc5e8a..e4585ccc70db4 100644 --- a/src/librustc_driver/driver.rs +++ b/src/librustc_driver/driver.rs @@ -1020,7 +1020,6 @@ pub fn phase_4_translate_to_llvm<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // No lifetime analysis based on borrowing can be done from here on out. passes.push_pass(box mir::transform::inline::Inline); - passes.push_pass(box mir::transform::simplify_cfg::SimplifyCfg::new("inline")); passes.push_pass(box mir::transform::instcombine::InstCombine::new()); passes.push_pass(box mir::transform::deaggregator::Deaggregator); diff --git a/src/librustc_mir/transform/copy_prop.rs b/src/librustc_mir/transform/copy_prop.rs index 79fd16012d9ee..11df8e0a8ff12 100644 --- a/src/librustc_mir/transform/copy_prop.rs +++ b/src/librustc_mir/transform/copy_prop.rs @@ -58,7 +58,7 @@ impl<'tcx> MirPass<'tcx> for CopyPropagation { return } MirSource::Fn(function_node_id) => { - if qualify_consts::is_const_fn(tcx, tcx.map.local_def_id(function_node_id)) { + if tcx.is_const_fn(tcx, tcx.map.local_def_id(function_node_id)) { // Don't run on const functions, as, again, trans might not be able to evaluate // the optimized IR. return @@ -153,6 +153,15 @@ impl<'tcx> MirPass<'tcx> for CopyPropagation { break } } + + // Strip out nops + for blk in mir.basic_blocks_mut() { + blk.statements.retain(|stmt| if let StatementKind::Nop = stmt.kind { + false + } else { + true + }) + } } } @@ -329,6 +338,6 @@ impl<'tcx> MutVisitor<'tcx> for ConstantPropagationVisitor<'tcx> { *operand = Operand::Constant(self.constant.clone()); self.uses_replaced += 1 + } } - diff --git a/src/librustc_mir/transform/inline.rs b/src/librustc_mir/transform/inline.rs index 8c6aa845692af..68ae040b1b0ba 100644 --- a/src/librustc_mir/transform/inline.rs +++ b/src/librustc_mir/transform/inline.rs @@ -28,7 +28,11 @@ use rustc::ty::{self, Ty, TyCtxt}; use rustc::ty::subst::{Subst,Substs}; use rustc::util::nodemap::{DefIdMap, DefIdSet}; +use super::simplify_cfg::{remove_dead_blocks, CfgSimplifier}; +use super::copy_prop::CopyPropagation; + use syntax::attr; +use syntax::abi::Abi; use syntax_pos::Span; use callgraph; @@ -50,7 +54,7 @@ impl<'tcx> MirMapPass<'tcx> for Inline { &mut self, tcx: TyCtxt<'a, 'tcx, 'tcx>, map: &mut MirMap<'tcx>, - _: &mut [Box MirPassHook<'s>>]) { + hooks: &mut [Box MirPassHook<'s>>]) { match tcx.sess.opts.debugging_opts.mir_opt_level { Some(0) | @@ -68,10 +72,32 @@ impl<'tcx> MirMapPass<'tcx> for Inline { foreign_mirs: DefIdMap() }; + let def_ids = map.map.keys(); + for &def_id in &def_ids { + let _task = tcx.dep_graph.in_task(DepNode::Mir(def_id)); + let mir = map.map.get_mut(&def_id).unwrap(); + let id = tcx.map.as_local_node_id(def_id).unwrap(); + let src = MirSource::from_node(tcx, id); + + for hook in &mut *hooks { + hook.on_mir_pass(tcx, src, mir, self, false); + } + } + for scc in callgraph.scc_iter() { - debug!("Inlining SCC {:?}", scc); inliner.inline_scc(map, &callgraph, &scc); } + + for def_id in def_ids { + let _task = tcx.dep_graph.in_task(DepNode::Mir(def_id)); + let mir = map.map.get_mut(&def_id).unwrap(); + let id = tcx.map.as_local_node_id(def_id).unwrap(); + let src = MirSource::from_node(tcx, id); + + for hook in &mut *hooks { + hook.on_mir_pass(tcx, src, mir, self, true); + } + } } } @@ -97,6 +123,8 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { let mut callsites = Vec::new(); let mut in_scc = DefIdSet(); + let mut inlined_into = DefIdSet(); + for &node in scc { let def_id = callgraph.def_id(node); @@ -190,6 +218,8 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { continue; } + inlined_into.insert(callsite.caller); + // Add callsites from inlined function for (bb, bb_data) in caller_mir.basic_blocks().iter_enumerated().skip(start) { // Only consider direct calls to functions @@ -228,6 +258,13 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { } } + // Simplify functions we inlined into. + for def_id in inlined_into { + let caller_mir = map.map.get_mut(&def_id).unwrap(); + debug!("Running simplify cfg on {:?}", def_id); + CfgSimplifier::new(caller_mir).simplify(); + remove_dead_blocks(caller_mir); + } changed } @@ -266,7 +303,7 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { let hinted = match hint { // Just treat inline(always) as a hint for now, - // there are cases that prevent unwinding that we + // there are cases that prevent inlining that we // need to check for first. attr::InlineAttr::Always => true, attr::InlineAttr::Never => return false, @@ -318,31 +355,24 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { // Don't count StorageLive/StorageDead in the inlining cost. match stmt.kind { StatementKind::StorageLive(_) | - StatementKind::StorageDead(_) => {} + StatementKind::StorageDead(_) | + StatementKind::Nop => {} _ => cost += INSTR_COST } } match blk.terminator().kind { - TerminatorKind::Drop { ref location, unwind, .. } | - TerminatorKind::DropAndReplace { ref location, unwind, .. } => { + TerminatorKind::Drop { ref location, .. } | + TerminatorKind::DropAndReplace { ref location, .. } => { // If the location doesn't actually need dropping, treat it like // a regular goto. let ty = location.ty(&callee_mir, tcx).subst(tcx, callsite.substs); let ty = ty.to_ty(tcx); if tcx.type_needs_drop_given_env(ty, ¶m_env) { - if unwind.is_some() { - // FIXME: Should be able to handle this better - return false; - } else { - cost += CALL_PENALTY; - } + cost += CALL_PENALTY; } else { cost += INSTR_COST; } } - // FIXME: Should be able to handle this better - TerminatorKind::Call { cleanup: Some(_), .. } | - TerminatorKind::Assert { cleanup: Some(_), .. } => return false, TerminatorKind::Unreachable | TerminatorKind::Call { destination: None, .. } if first_block => { @@ -351,7 +381,16 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { threshold = 0; } - TerminatorKind::Call { .. } | + TerminatorKind::Call {func: Operand::Constant(ref f), .. } => { + if let ty::TyFnDef(.., f) = f.ty.sty { + // Don't give intrinsics the extra penalty for calls + if f.abi == Abi::RustIntrinsic || f.abi == Abi::PlatformIntrinsic { + cost += INSTR_COST; + } else { + cost += CALL_PENALTY; + } + } + } TerminatorKind::Assert { .. } => cost += CALL_PENALTY, _ => cost += INSTR_COST } @@ -405,8 +444,7 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { let terminator = caller_mir[callsite.bb].terminator.take().unwrap(); match terminator.kind { - TerminatorKind::Call { - func: _, args, destination: Some(destination), cleanup } => { + TerminatorKind::Call { args, destination: Some(destination), cleanup, .. } => { debug!("Inlined {:?} into {:?}", callsite.callee, callsite.caller); @@ -532,7 +570,8 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { inline_location: callsite.location, destination: dest, return_block: return_block, - cleanup_block: cleanup + cleanup_block: cleanup, + in_cleanup_block: false }; @@ -548,6 +587,15 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { caller_mir[callsite.bb].terminator = Some(terminator); + if let Some(id) = self.tcx.map.as_local_node_id(callsite.caller) { + // Run copy propagation on the function to clean up any unnecessary + // assignments from integration. This also increases the chance that + // this function will be inlined as well + debug!("Running copy propagation"); + let src = MirSource::from_node(self.tcx, id); + CopyPropagation.run_pass(self.tcx, src, caller_mir); + }; + true } kind => { @@ -580,7 +628,8 @@ struct Integrator<'a, 'tcx: 'a> { inline_location: SourceInfo, destination: Lvalue<'tcx>, return_block: BasicBlock, - cleanup_block: Option + cleanup_block: Option, + in_cleanup_block: bool, } impl<'a, 'tcx> Integrator<'a, 'tcx> { @@ -594,29 +643,25 @@ impl<'a, 'tcx> Integrator<'a, 'tcx> { impl<'a, 'tcx> MutVisitor<'tcx> for Integrator<'a, 'tcx> { fn visit_lvalue(&mut self, lvalue: &mut Lvalue<'tcx>, - _ctxt: LvalueContext, + _ctxt: LvalueContext<'tcx>, _location: Location) { match *lvalue { Lvalue::Var(ref mut var) => { if let Some(v) = self.var_map.get(*var).cloned() { - debug!("Replacing {:?} with {:?}", var, v); *var = v; } } Lvalue::Temp(ref mut tmp) => { if let Some(t) = self.tmp_map.get(*tmp).cloned() { - debug!("Replacing {:?} with {:?}", tmp, t); *tmp = t; } } Lvalue::ReturnPointer => { - debug!("Replacing return pointer with {:?}", self.destination); *lvalue = self.destination.clone(); } Lvalue::Arg(arg) => { let idx = arg.index(); if let Operand::Consume(ref lval) = self.args[idx] { - debug!("Replacing {:?} with {:?}", lvalue, lval); *lvalue = lval.clone(); } } @@ -628,13 +673,18 @@ impl<'a, 'tcx> MutVisitor<'tcx> for Integrator<'a, 'tcx> { if let Operand::Consume(Lvalue::Arg(arg)) = *operand { let idx = arg.index(); let new_arg = self.args[idx].clone(); - debug!("Replacing use of {:?} with {:?}", arg, new_arg); *operand = new_arg; } else { self.super_operand(operand, location); } } + fn visit_basic_block_data(&mut self, block: BasicBlock, data: &mut BasicBlockData<'tcx>) { + self.in_cleanup_block = data.is_cleanup; + self.super_basic_block_data(block, data); + self.in_cleanup_block = false; + } + fn visit_terminator_kind(&mut self, block: BasicBlock, kind: &mut TerminatorKind<'tcx>, loc: Location) { match *kind { @@ -656,44 +706,32 @@ impl<'a, 'tcx> MutVisitor<'tcx> for Integrator<'a, 'tcx> { *target = self.update_target(*target); if let Some(tgt) = *unwind { *unwind = Some(self.update_target(tgt)); - } else { - if Some(*target) != self.cleanup_block { - *unwind = self.cleanup_block; - } - } - - if Some(*target) == *unwind { - *unwind == None; + } else if !self.in_cleanup_block { + // Unless this drop is in a cleanup block, add an unwind edge to + // the orignal call's cleanup block + *unwind = self.cleanup_block; } } TerminatorKind::Call { ref mut destination, ref mut cleanup, .. } => { - let mut target = None; if let Some((_, ref mut tgt)) = *destination { *tgt = self.update_target(*tgt); - target = Some(*tgt); } if let Some(tgt) = *cleanup { *cleanup = Some(self.update_target(tgt)); - } else { + } else if !self.in_cleanup_block { + // Unless this call is in a cleanup block, add an unwind edge to + // the orignal call's cleanup block *cleanup = self.cleanup_block; } - - if target == *cleanup { - *cleanup == None; - } } TerminatorKind::Assert { ref mut target, ref mut cleanup, .. } => { *target = self.update_target(*target); if let Some(tgt) = *cleanup { *cleanup = Some(self.update_target(tgt)); - } else { - if Some(*target) != self.cleanup_block { - *cleanup = self.cleanup_block; - } - } - - if Some(*target) == *cleanup { - *cleanup == None; + } else if !self.in_cleanup_block{ + // Unless this assert is in a cleanup block, add an unwind edge to + // the orignal call's cleanup block + *cleanup = self.cleanup_block; } } TerminatorKind::Return => { diff --git a/src/librustc_mir/transform/simplify_cfg.rs b/src/librustc_mir/transform/simplify_cfg.rs index 8e1b7b44976f3..237bc0861aa94 100644 --- a/src/librustc_mir/transform/simplify_cfg.rs +++ b/src/librustc_mir/transform/simplify_cfg.rs @@ -73,7 +73,7 @@ pub struct CfgSimplifier<'a, 'tcx: 'a> { } impl<'a, 'tcx: 'a> CfgSimplifier<'a, 'tcx> { - fn new(mir: &'a mut Mir<'tcx>) -> Self { + pub fn new(mir: &'a mut Mir<'tcx>) -> Self { let mut pred_count = IndexVec::from_elem(0u32, mir.basic_blocks()); // we can't use mir.predecessors() here because that counts @@ -94,7 +94,7 @@ impl<'a, 'tcx: 'a> CfgSimplifier<'a, 'tcx> { } } - fn simplify(mut self) { + pub fn simplify(mut self) { loop { let mut changed = false; @@ -219,7 +219,7 @@ impl<'a, 'tcx: 'a> CfgSimplifier<'a, 'tcx> { } } -fn remove_dead_blocks(mir: &mut Mir) { +pub fn remove_dead_blocks(mir: &mut Mir) { let mut seen = BitVector::new(mir.basic_blocks().len()); for (bb, _) in traversal::preorder(mir) { seen.insert(bb.index()); From 638c6038ee213704fb0674b3617dd812c614be76 Mon Sep 17 00:00:00 2001 From: James Miller Date: Sat, 24 Sep 2016 13:55:43 +1200 Subject: [PATCH 06/12] Handle inlining calls to box_free The box_free lang item is called with a `Box`, but the definition accepts a `*mut T`. When the call is inlined, it produces a `Box -> *mut u8` cast which trans chokes on. This generates the cast from `Box` to `*mut u8` manually. --- src/librustc_mir/transform/inline.rs | 83 ++++++++++++++++++++-------- src/librustc_trans/mir/rvalue.rs | 6 +- 2 files changed, 65 insertions(+), 24 deletions(-) diff --git a/src/librustc_mir/transform/inline.rs b/src/librustc_mir/transform/inline.rs index 68ae040b1b0ba..22a28c722568a 100644 --- a/src/librustc_mir/transform/inline.rs +++ b/src/librustc_mir/transform/inline.rs @@ -33,7 +33,6 @@ use super::copy_prop::CopyPropagation; use syntax::attr; use syntax::abi::Abi; -use syntax_pos::Span; use callgraph; @@ -56,12 +55,7 @@ impl<'tcx> MirMapPass<'tcx> for Inline { map: &mut MirMap<'tcx>, hooks: &mut [Box MirPassHook<'s>>]) { - match tcx.sess.opts.debugging_opts.mir_opt_level { - Some(0) | - Some(1) | - None => { return; }, - _ => {} - }; + if tcx.sess.opts.mir_opt_level < 2 { return; } let _ignore = tcx.dep_graph.in_ignore(); @@ -142,6 +136,9 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { continue; }; for (bb, bb_data) in mir.basic_blocks().iter_enumerated() { + // Don't inline calls that are in cleanup blocks. + if bb_data.is_cleanup { continue; } + // Only consider direct calls to functions let terminator = bb_data.terminator(); if let TerminatorKind::Call { @@ -448,8 +445,7 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { debug!("Inlined {:?} into {:?}", callsite.callee, callsite.caller); - let call_scope = terminator.source_info.scope; - let call_span = terminator.source_info.span; + let is_box_free = Some(callsite.callee) == self.tcx.lang_items.box_free_fn(); let bb_len = caller_mir.basic_blocks().len(); let mut var_map = IndexVec::with_capacity(callee_mir.var_decls.len()); @@ -459,10 +455,10 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { for mut scope in callee_mir.visibility_scopes { if scope.parent_scope.is_none() { - scope.parent_scope = Some(call_scope); + scope.parent_scope = Some(callsite.location.scope); } - scope.span = call_span; + scope.span = callsite.location.span; let idx = caller_mir.visibility_scopes.push(scope); scope_map.push(idx); @@ -532,8 +528,58 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { let return_block = destination.1; // Copy the arguments if needed. - let args : Vec<_> = { + let args : Vec<_> = if is_box_free { + assert!(args.len() == 1); + // box_free takes a Box, but is defined with a *mut T, inlining + // needs to generate the cast. + + let arg = if let Operand::Consume(ref lval) = args[0] { + lval.clone() + } else { + bug!("Constant arg to \"box_free\""); + }; + let arg = Rvalue::Ref( + self.tcx.mk_region(ty::ReErased), + BorrowKind::Mut, + arg.deref()); + + let ty = arg.ty(caller_mir, self.tcx).expect("Rvalue has no type!"); + let temp = TempDecl { ty: ty }; + let tmp = caller_mir.temp_decls.push(temp); + let tmp = Lvalue::Temp(tmp); + + let stmt = Statement { + source_info: callsite.location, + kind: StatementKind::Assign(tmp.clone(), arg) + }; + + caller_mir[callsite.bb] + .statements.push(stmt); + + let ptr_ty = args[0].ty(caller_mir, self.tcx); + let pointee_ty = match ptr_ty.sty { + ty::TyBox(ty) => ty, + ty::TyRawPtr(tm) | ty::TyRef(_, tm) => tm.ty, + _ => bug!("Invalid type `{:?}` for call to box_free", ptr_ty) + }; + let ptr_ty = self.tcx.mk_mut_ptr(pointee_ty); + + let raw_ptr = Rvalue::Cast(CastKind::Misc, Operand::Consume(tmp), ptr_ty); + + let temp = TempDecl { ty: ptr_ty }; + let tmp = caller_mir.temp_decls.push(temp); + let tmp = Lvalue::Temp(tmp); + let stmt = Statement { + source_info: callsite.location, + kind: StatementKind::Assign(tmp.clone(), raw_ptr) + }; + + caller_mir[callsite.bb] + .statements.push(stmt); + + vec![Operand::Consume(tmp)] + } else { let tcx = self.tcx; args.iter().map(|a| { if let Operand::Consume(Lvalue::Temp(_)) = *a { @@ -567,7 +613,6 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { tmp_map: temp_map, scope_map: scope_map, promoted_map: promoted_map, - inline_location: callsite.location, destination: dest, return_block: return_block, cleanup_block: cleanup, @@ -581,7 +626,7 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { } let terminator = Terminator { - source_info: terminator.source_info, + source_info: callsite.location, kind: TerminatorKind::Goto { target: BasicBlock::new(bb_len) } }; @@ -625,7 +670,6 @@ struct Integrator<'a, 'tcx: 'a> { tmp_map: IndexVec, scope_map: IndexVec, promoted_map: IndexVec, - inline_location: SourceInfo, destination: Lvalue<'tcx>, return_block: BasicBlock, cleanup_block: Option, @@ -687,6 +731,8 @@ impl<'a, 'tcx> MutVisitor<'tcx> for Integrator<'a, 'tcx> { fn visit_terminator_kind(&mut self, block: BasicBlock, kind: &mut TerminatorKind<'tcx>, loc: Location) { + self.super_terminator_kind(block, kind, loc); + match *kind { TerminatorKind::Goto { ref mut target} => { *target = self.update_target(*target); @@ -744,17 +790,10 @@ impl<'a, 'tcx> MutVisitor<'tcx> for Integrator<'a, 'tcx> { } TerminatorKind::Unreachable => { } } - - self.super_terminator_kind(block, kind, loc); } fn visit_visibility_scope(&mut self, scope: &mut VisibilityScope) { *scope = self.scope_map[*scope]; } - fn visit_span(&mut self, span: &mut Span) { - // FIXME: probably shouldn't use the inline location span, - // but not doing so causes errors - *span = self.inline_location.span; - } fn visit_literal(&mut self, literal: &mut Literal<'tcx>, loc: Location) { if let Literal::Promoted { ref mut index } = *literal { diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index 21b019d7e24df..9119d6c32fe7c 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -265,8 +265,10 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { } mir::CastKind::Misc => { debug_assert!(common::type_is_immediate(bcx.ccx(), cast_ty)); - let r_t_in = CastTy::from_ty(operand.ty).expect("bad input type for cast"); - let r_t_out = CastTy::from_ty(cast_ty).expect("bad output type for cast"); + let r_t_in = CastTy::from_ty(operand.ty) + .unwrap_or_else(|| bug!("bad input type `{:?}` for cast", operand.ty)); + let r_t_out = CastTy::from_ty(cast_ty) + .unwrap_or_else(|| bug!("bad output type `{:?}` for cast", cast_ty)); let ll_t_in = type_of::immediate_type_of(bcx.ccx(), operand.ty); let ll_t_out = type_of::immediate_type_of(bcx.ccx(), cast_ty); let (llval, signed) = if let CastTy::Int(IntTy::CEnum) = r_t_in { From b9d21d7adaa5c8c9f874e3474986d15ad93edc50 Mon Sep 17 00:00:00 2001 From: James Miller Date: Sat, 24 Sep 2016 14:25:44 +1200 Subject: [PATCH 07/12] Ignore lines where we don't have a source We don't have the source for spans from other crates. Inlining caused trans to emit a const-eval warning that is from another crate. This caused the json error emitter to panic since it assumed it could always get the text for a line. --- src/libsyntax/json.rs | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/libsyntax/json.rs b/src/libsyntax/json.rs index a40c30b3e3397..bda8fe1783b7c 100644 --- a/src/libsyntax/json.rs +++ b/src/libsyntax/json.rs @@ -283,13 +283,14 @@ impl DiagnosticSpanLine { fn line_from_filemap(fm: &syntax_pos::FileMap, index: usize, h_start: usize, - h_end: usize) - -> DiagnosticSpanLine { - DiagnosticSpanLine { - text: fm.get_line(index).unwrap().to_owned(), - highlight_start: h_start, - highlight_end: h_end, - } + h_end: usize) -> Option { + fm.get_line(index).map(|text| { + DiagnosticSpanLine { + text: text.to_owned(), + highlight_start: h_start, + highlight_end: h_end, + } + }) } /// Create a list of DiagnosticSpanLines from span - each line with any part @@ -301,7 +302,7 @@ impl DiagnosticSpanLine { let fm = &*lines.file; lines.lines .iter() - .map(|line| { + .filter_map(|line| { DiagnosticSpanLine::line_from_filemap(fm, line.line_index, line.start_col.0 + 1, @@ -343,4 +344,3 @@ impl JsonEmitter { } } } - From 11da9d65ad69c94f5743687c4c698ffaf3f80d71 Mon Sep 17 00:00:00 2001 From: James Miller Date: Sat, 24 Sep 2016 15:21:39 +1200 Subject: [PATCH 08/12] Try to find an appropriate span for const-eval warnings Inlining can cause trans to emit const-eval warnings for code in a different crate. Since we likely don't have any source available, search up the visibility scopes until we find a span we do have a source for. If there is no such span, we don't emit the error. --- src/librustc_trans/mir/block.rs | 32 +++++++++++++++++++++++++++----- src/libsyntax/codemap.rs | 6 ++++++ 2 files changed, 33 insertions(+), 5 deletions(-) diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index baeafbe3e346f..3f52c387e40f9 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -31,6 +31,7 @@ use type_::Type; use rustc_data_structures::fnv::FnvHashMap; use syntax::parse::token; +use syntax_pos::Span; use super::{MirContext, LocalRef}; use super::analyze::CleanupKind; @@ -372,11 +373,13 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { // is also constant, then we can produce a warning. if const_cond == Some(!expected) { if let Some(err) = const_err { - let err = ConstEvalErr{ span: span, kind: err }; - let mut diag = bcx.tcx().sess.struct_span_warn( - span, "this expression will panic at run-time"); - note_const_eval_err(bcx.tcx(), &err, span, "expression", &mut diag); - diag.emit(); + if let Some(span) = self.diag_span(span, terminator.source_info.scope) { + let err = ConstEvalErr{ span: span, kind: err }; + let mut diag = bcx.tcx().sess.struct_span_warn( + span, "this expression will panic at run-time"); + note_const_eval_err(bcx.tcx(), &err, span, "expression", &mut diag); + diag.emit(); + } } } @@ -944,6 +947,25 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { } } } + + // Attempts to get an appropriate span for diagnostics. We might not have the source + // available if the span is from an inlined crate, but there's usually a decent span + // in an ancestor scope somewhere + fn diag_span(&self, span: Span, scope: mir::VisibilityScope) -> Option { + let cm = self.fcx.ccx.sess().codemap(); + if cm.have_source_for_span(span) { return Some(span); } + + let mut scope = Some(scope); + while let Some(s) = scope { + let data = &self.mir.visibility_scopes[s]; + if cm.have_source_for_span(data.span) { + return Some(data.span); + } + scope = data.parent_scope; + } + + None + } } enum ReturnDest { diff --git a/src/libsyntax/codemap.rs b/src/libsyntax/codemap.rs index 6d68ce3646d53..5a2765b4b9af2 100644 --- a/src/libsyntax/codemap.rs +++ b/src/libsyntax/codemap.rs @@ -670,6 +670,12 @@ impl CodeMap { } } + // Returns whether or not we have the source a for the given span + pub fn have_source_for_span(&self, span: Span) -> bool { + let local_begin = self.lookup_byte_offset(span.lo); + local_begin.fm.src.is_some() + } + pub fn get_filemap(&self, filename: &str) -> Option> { for fm in self.files.borrow().iter() { if filename == fm.name { From f027275b60f594832b3892a888cfa3fc232a8810 Mon Sep 17 00:00:00 2001 From: James Miller Date: Sat, 24 Sep 2016 15:46:59 +1200 Subject: [PATCH 09/12] Add some more comments to inlining and callgraph Also fix some tidy errors --- src/librustc/mir/repr.rs | 12 ++++++++---- src/librustc_mir/callgraph.rs | 9 +++++++++ src/librustc_mir/transform/inline.rs | 19 ++++++++++++++++--- 3 files changed, 33 insertions(+), 7 deletions(-) diff --git a/src/librustc/mir/repr.rs b/src/librustc/mir/repr.rs index 3ccefc0d5ebd2..992979e8b4439 100644 --- a/src/librustc/mir/repr.rs +++ b/src/librustc/mir/repr.rs @@ -1576,7 +1576,8 @@ impl<'tcx> TypeFoldable<'tcx> for Rvalue<'tcx> { Ref(region, bk, ref lval) => Ref(region.fold_with(folder), bk, lval.fold_with(folder)), Len(ref lval) => Len(lval.fold_with(folder)), Cast(kind, ref op, ty) => Cast(kind, op.fold_with(folder), ty.fold_with(folder)), - BinaryOp(op, ref rhs, ref lhs) => BinaryOp(op, rhs.fold_with(folder), lhs.fold_with(folder)), + BinaryOp(op, ref rhs, ref lhs) => + BinaryOp(op, rhs.fold_with(folder), lhs.fold_with(folder)), CheckedBinaryOp(op, ref rhs, ref lhs) => CheckedBinaryOp(op, rhs.fold_with(folder), lhs.fold_with(folder)), UnaryOp(op, ref val) => UnaryOp(op, val.fold_with(folder)), @@ -1587,7 +1588,8 @@ impl<'tcx> TypeFoldable<'tcx> for Rvalue<'tcx> { AggregateKind::Tuple => AggregateKind::Tuple, AggregateKind::Adt(def, v, substs, n) => AggregateKind::Adt(def, v, substs.fold_with(folder), n), - AggregateKind::Closure(id, substs) => AggregateKind::Closure(id, substs.fold_with(folder)) + AggregateKind::Closure(id, substs) => + AggregateKind::Closure(id, substs.fold_with(folder)) }; Aggregate(kind, fields.fold_with(folder)) } @@ -1608,7 +1610,8 @@ impl<'tcx> TypeFoldable<'tcx> for Rvalue<'tcx> { Len(ref lval) => lval.visit_with(visitor), Cast(_, ref op, ty) => op.visit_with(visitor) || ty.visit_with(visitor), BinaryOp(_, ref rhs, ref lhs) | - CheckedBinaryOp(_, ref rhs, ref lhs) => rhs.visit_with(visitor) || lhs.visit_with(visitor), + CheckedBinaryOp(_, ref rhs, ref lhs) => + rhs.visit_with(visitor) || lhs.visit_with(visitor), UnaryOp(_, ref val) => val.visit_with(visitor), Box(ty) => ty.visit_with(visitor), Aggregate(ref kind, ref fields) => { @@ -1619,7 +1622,8 @@ impl<'tcx> TypeFoldable<'tcx> for Rvalue<'tcx> { AggregateKind::Closure(_, substs) => substs.visit_with(visitor) }) || fields.visit_with(visitor) } - InlineAsm { ref outputs, ref inputs, .. } => outputs.visit_with(visitor) || inputs.visit_with(visitor) + InlineAsm { ref outputs, ref inputs, .. } => + outputs.visit_with(visitor) || inputs.visit_with(visitor) } } } diff --git a/src/librustc_mir/callgraph.rs b/src/librustc_mir/callgraph.rs index 209567924ade1..7a830d77b9b9a 100644 --- a/src/librustc_mir/callgraph.rs +++ b/src/librustc_mir/callgraph.rs @@ -29,6 +29,8 @@ pub struct CallGraph { } impl CallGraph { + // FIXME: allow for construction of a callgraph that inspects + // cross-crate MIRs if available. pub fn build<'tcx>(map: &MirMap<'tcx>) -> CallGraph { let def_ids = map.map.keys(); @@ -52,10 +54,12 @@ impl CallGraph { callgraph } + // Iterate over the strongly-connected components of the graph pub fn scc_iter<'g>(&'g self) -> SCCIterator<'g> { SCCIterator::new(&self.graph) } + // Get the def_id for the given graph node pub fn def_id(&self, node: graph::NodeIndex) -> DefId { *self.graph.node_data(node) } @@ -93,6 +97,11 @@ struct StackElement<'g> { children: graph::AdjacentTargets<'g, DefId, ()> } +/** + * Iterator over strongly-connected-components using Tarjan's algorithm[1] + * + * [1]: https://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm + */ pub struct SCCIterator<'g> { graph: &'g graph::Graph, index: usize, diff --git a/src/librustc_mir/transform/inline.rs b/src/librustc_mir/transform/inline.rs index 22a28c722568a..08ccc66324b6d 100644 --- a/src/librustc_mir/transform/inline.rs +++ b/src/librustc_mir/transform/inline.rs @@ -284,7 +284,8 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { let tcx = self.tcx; - // Don't inline closures + // Don't inline closures that have captures + // FIXME: Handle closures better if callee_mir.upvar_decls.len() > 0 { return false; } @@ -441,12 +442,12 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { let terminator = caller_mir[callsite.bb].terminator.take().unwrap(); match terminator.kind { + // FIXME: Handle inlining of diverging calls TerminatorKind::Call { args, destination: Some(destination), cleanup, .. } => { debug!("Inlined {:?} into {:?}", callsite.callee, callsite.caller); let is_box_free = Some(callsite.callee) == self.tcx.lang_items.box_free_fn(); - let bb_len = caller_mir.basic_blocks().len(); let mut var_map = IndexVec::with_capacity(callee_mir.var_decls.len()); let mut temp_map = IndexVec::with_capacity(callee_mir.temp_decls.len()); @@ -527,7 +528,6 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { let return_block = destination.1; - // Copy the arguments if needed. let args : Vec<_> = if is_box_free { assert!(args.len() == 1); // box_free takes a Box, but is defined with a *mut T, inlining @@ -580,7 +580,10 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { vec![Operand::Consume(tmp)] } else { + // Copy the arguments if needed. let tcx = self.tcx; + // FIXME: Analysis of the usage of the arguments to avoid + // unnecessary temporaries. args.iter().map(|a| { if let Operand::Consume(Lvalue::Temp(_)) = *a { // Reuse the operand if it's a temporary already @@ -606,6 +609,7 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { }).collect() }; + let bb_len = caller_mir.basic_blocks.len(); let mut integrator = Integrator { block_idx: bb_len, args: &args, @@ -663,6 +667,13 @@ fn type_size_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, param_env: ty::ParameterE }) } +/** + * Integrator. + * + * Integrates blocks from the callee function into the calling function. + * Updates block indices, references to locals and other control flow + * stuff. + */ struct Integrator<'a, 'tcx: 'a> { block_idx: usize, args: &'a [Operand<'tcx>], @@ -707,6 +718,8 @@ impl<'a, 'tcx> MutVisitor<'tcx> for Integrator<'a, 'tcx> { let idx = arg.index(); if let Operand::Consume(ref lval) = self.args[idx] { *lvalue = lval.clone(); + } else { + bug!("Arg operand `{:?}` is not an Lvalue use.", arg) } } _ => self.super_lvalue(lvalue, _ctxt, _location) From 8ebd218213544acb698d9c3d7ea2ad395ae7c46a Mon Sep 17 00:00:00 2001 From: James Miller Date: Sat, 24 Sep 2016 17:27:40 +1200 Subject: [PATCH 10/12] Address nits and do some refactoring --- src/librustc/mir/repr.rs | 14 +- src/librustc_data_structures/indexed_vec.rs | 4 +- src/librustc_mir/callgraph.rs | 2 +- src/librustc_mir/transform/inline.rs | 155 +++++++++++--------- src/librustc_trans/mir/block.rs | 2 +- src/libsyntax/json.rs | 2 + 6 files changed, 96 insertions(+), 83 deletions(-) diff --git a/src/librustc/mir/repr.rs b/src/librustc/mir/repr.rs index 992979e8b4439..104dafece6311 100644 --- a/src/librustc/mir/repr.rs +++ b/src/librustc/mir/repr.rs @@ -1339,9 +1339,9 @@ impl<'tcx> TypeFoldable<'tcx> for Mir<'tcx> { visibility_scopes: self.visibility_scopes.clone(), promoted: self.promoted.fold_with(folder), return_ty: self.return_ty.fold_with(folder), - var_decls: self.var_decls.fold_with(folder), - arg_decls: self.arg_decls.fold_with(folder), - temp_decls: self.temp_decls.fold_with(folder), + var_decls: self.var_decls.fold_with(folder), + arg_decls: self.arg_decls.fold_with(folder), + temp_decls: self.temp_decls.fold_with(folder), upvar_decls: self.upvar_decls.clone(), span: self.span, cache: Cache::new() @@ -1416,7 +1416,7 @@ impl<'tcx> TypeFoldable<'tcx> for Statement<'tcx> { let kind = match self.kind { Assign(ref lval, ref rval) => Assign(lval.fold_with(folder), rval.fold_with(folder)), - SetDiscriminant { ref lvalue, variant_index } => SetDiscriminant{ + SetDiscriminant { ref lvalue, variant_index } => SetDiscriminant { lvalue: lvalue.fold_with(folder), variant_index: variant_index }, @@ -1476,9 +1476,9 @@ impl<'tcx> TypeFoldable<'tcx> for Terminator<'tcx> { unwind: unwind }, Call { ref func, ref args, ref destination, cleanup } => { - let dest = if let Some((ref loc, dest)) = *destination { - Some((loc.fold_with(folder), dest)) - } else { None }; + let dest = destination.as_ref().map(|&(ref loc, dest)| { + (loc.fold_with(folder), dest) + }); Call { func: func.fold_with(folder), diff --git a/src/librustc_data_structures/indexed_vec.rs b/src/librustc_data_structures/indexed_vec.rs index fc5509b48fe13..3977f4f7b6318 100644 --- a/src/librustc_data_structures/indexed_vec.rs +++ b/src/librustc_data_structures/indexed_vec.rs @@ -151,12 +151,12 @@ impl IndexVec { } #[inline] - pub fn get<'a>(&'a self, index: I) -> Option<&'a T> { + pub fn get(&self, index: I) -> Option<&T> { self.raw.get(index.index()) } #[inline] - pub fn get_mut<'a>(&'a mut self, index: I) -> Option<&'a mut T> { + pub fn get_mut(&mut self, index: I) -> Option<&mut T> { self.raw.get_mut(index.index()) } } diff --git a/src/librustc_mir/callgraph.rs b/src/librustc_mir/callgraph.rs index 7a830d77b9b9a..a919738a52580 100644 --- a/src/librustc_mir/callgraph.rs +++ b/src/librustc_mir/callgraph.rs @@ -55,7 +55,7 @@ impl CallGraph { } // Iterate over the strongly-connected components of the graph - pub fn scc_iter<'g>(&'g self) -> SCCIterator<'g> { + pub fn scc_iter(&self) -> SCCIterator { SCCIterator::new(&self.graph) } diff --git a/src/librustc_mir/transform/inline.rs b/src/librustc_mir/transform/inline.rs index 08ccc66324b6d..c682b0d110175 100644 --- a/src/librustc_mir/transform/inline.rs +++ b/src/librustc_mir/transform/inline.rs @@ -9,9 +9,6 @@ // except according to those terms. //! Inlining pass for MIR functions -//! -//! Inlines functions. Is quite conservative in it's conditions, functions -//! that can unwind are not inlined. use rustc::hir::def_id::DefId; @@ -312,8 +309,9 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { // Only inline local functions if they would be eligible for // cross-crate inlining. This ensures that any symbols they // use are reachable cross-crate - // FIXME: This shouldn't be necessary, trans should generate - // the reachable set from the MIR. + // FIXME(#36594): This shouldn't be necessary, and is more conservative + // than it could be, but trans should generate the reachable set from + // the MIR anyway, making any check obsolete. if callsite.callee.is_local() { // No type substs and no inline hint means this function // wouldn't be eligible for cross-crate inlining @@ -341,6 +339,7 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { threshold += threshold / 4; } + // FIXME: Give a bonus to functions with only a single caller let id = tcx.map.as_local_node_id(callsite.caller).expect("Caller not local"); let param_env = ty::ParameterEnvironment::for_item(tcx, id); @@ -431,7 +430,7 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { } - fn inline_call(&self, callsite: CallSite, + fn inline_call(&self, callsite: CallSite<'tcx>, caller_mir: &mut Mir<'tcx>, callee_mir: Mir<'tcx>) -> bool { // Don't inline a function into itself @@ -532,81 +531,19 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { assert!(args.len() == 1); // box_free takes a Box, but is defined with a *mut T, inlining // needs to generate the cast. + // FIXME: we should probably just generate correct MIR in the first place... let arg = if let Operand::Consume(ref lval) = args[0] { lval.clone() } else { bug!("Constant arg to \"box_free\""); }; - let arg = Rvalue::Ref( - self.tcx.mk_region(ty::ReErased), - BorrowKind::Mut, - arg.deref()); - - let ty = arg.ty(caller_mir, self.tcx).expect("Rvalue has no type!"); - let temp = TempDecl { ty: ty }; - let tmp = caller_mir.temp_decls.push(temp); - let tmp = Lvalue::Temp(tmp); - - let stmt = Statement { - source_info: callsite.location, - kind: StatementKind::Assign(tmp.clone(), arg) - }; - - caller_mir[callsite.bb] - .statements.push(stmt); let ptr_ty = args[0].ty(caller_mir, self.tcx); - let pointee_ty = match ptr_ty.sty { - ty::TyBox(ty) => ty, - ty::TyRawPtr(tm) | ty::TyRef(_, tm) => tm.ty, - _ => bug!("Invalid type `{:?}` for call to box_free", ptr_ty) - }; - let ptr_ty = self.tcx.mk_mut_ptr(pointee_ty); - - let raw_ptr = Rvalue::Cast(CastKind::Misc, Operand::Consume(tmp), ptr_ty); - - let temp = TempDecl { ty: ptr_ty }; - let tmp = caller_mir.temp_decls.push(temp); - let tmp = Lvalue::Temp(tmp); - - let stmt = Statement { - source_info: callsite.location, - kind: StatementKind::Assign(tmp.clone(), raw_ptr) - }; - - caller_mir[callsite.bb] - .statements.push(stmt); - - vec![Operand::Consume(tmp)] + vec![self.cast_box_free_arg(arg, ptr_ty, &callsite, caller_mir)] } else { // Copy the arguments if needed. - let tcx = self.tcx; - // FIXME: Analysis of the usage of the arguments to avoid - // unnecessary temporaries. - args.iter().map(|a| { - if let Operand::Consume(Lvalue::Temp(_)) = *a { - // Reuse the operand if it's a temporary already - a.clone() - } else { - debug!("Creating temp for argument"); - // Otherwise, create a temporary for the arg - let arg = Rvalue::Use(a.clone()); - - let ty = arg.ty(caller_mir, tcx).expect("arg has no type!"); - - let temp = TempDecl { ty: ty }; - let tmp = caller_mir.temp_decls.push(temp); - let tmp = Lvalue::Temp(tmp); - - let stmt = Statement { - source_info: callsite.location, - kind: StatementKind::Assign(tmp.clone(), arg) - }; - caller_mir[callsite.bb].statements.push(stmt); - Operand::Consume(tmp) - } - }).collect() + self.make_call_args(args, &callsite, caller_mir) }; let bb_len = caller_mir.basic_blocks.len(); @@ -656,6 +593,80 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { } } } + + fn cast_box_free_arg(&self, arg: Lvalue<'tcx>, ptr_ty: Ty<'tcx>, + callsite: &CallSite<'tcx>, caller_mir: &mut Mir<'tcx>) -> Operand<'tcx> { + let arg = Rvalue::Ref( + self.tcx.mk_region(ty::ReErased), + BorrowKind::Mut, + arg.deref()); + + let ty = arg.ty(caller_mir, self.tcx).expect("Rvalue has no type!"); + let ref_tmp = TempDecl { ty: ty }; + let ref_tmp = caller_mir.temp_decls.push(ref_tmp); + let ref_tmp = Lvalue::Temp(ref_tmp); + + let ref_stmt = Statement { + source_info: callsite.location, + kind: StatementKind::Assign(ref_tmp.clone(), arg) + }; + + caller_mir[callsite.bb] + .statements.push(ref_stmt); + + let pointee_ty = match ptr_ty.sty { + ty::TyBox(ty) => ty, + ty::TyRawPtr(tm) | ty::TyRef(_, tm) => tm.ty, + _ => bug!("Invalid type `{:?}` for call to box_free", ptr_ty) + }; + let ptr_ty = self.tcx.mk_mut_ptr(pointee_ty); + + let raw_ptr = Rvalue::Cast(CastKind::Misc, Operand::Consume(ref_tmp), ptr_ty); + + let cast_tmp = TempDecl { ty: ptr_ty }; + let cast_tmp = caller_mir.temp_decls.push(cast_tmp); + let cast_tmp = Lvalue::Temp(cast_tmp); + + let cast_stmt = Statement { + source_info: callsite.location, + kind: StatementKind::Assign(cast_tmp.clone(), raw_ptr) + }; + + caller_mir[callsite.bb] + .statements.push(cast_stmt); + + Operand::Consume(cast_tmp) + } + + fn make_call_args(&self, args: Vec>, + callsite: &CallSite<'tcx>, caller_mir: &mut Mir<'tcx>) -> Vec> { + let tcx = self.tcx; + // FIXME: Analysis of the usage of the arguments to avoid + // unnecessary temporaries. + args.into_iter().map(|a| { + if let Operand::Consume(Lvalue::Temp(_)) = a { + // Reuse the operand if it's a temporary already + return a; + } + + debug!("Creating temp for argument"); + // Otherwise, create a temporary for the arg + let arg = Rvalue::Use(a); + + let ty = arg.ty(caller_mir, tcx).expect("arg has no type!"); + + let arg_tmp = TempDecl { ty: ty }; + let arg_tmp = caller_mir.temp_decls.push(arg_tmp); + let arg_tmp = Lvalue::Temp(arg_tmp); + + let stmt = Statement { + source_info: callsite.location, + kind: StatementKind::Assign(arg_tmp.clone(), arg) + }; + caller_mir[callsite.bb].statements.push(stmt); + Operand::Consume(arg_tmp) + }).collect() + } } fn type_size_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, param_env: ty::ParameterEnvironment<'tcx>, @@ -787,7 +798,7 @@ impl<'a, 'tcx> MutVisitor<'tcx> for Integrator<'a, 'tcx> { *target = self.update_target(*target); if let Some(tgt) = *cleanup { *cleanup = Some(self.update_target(tgt)); - } else if !self.in_cleanup_block{ + } else if !self.in_cleanup_block { // Unless this assert is in a cleanup block, add an unwind edge to // the orignal call's cleanup block *cleanup = self.cleanup_block; diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index 3f52c387e40f9..87809aa7bf88f 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -374,7 +374,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { if const_cond == Some(!expected) { if let Some(err) = const_err { if let Some(span) = self.diag_span(span, terminator.source_info.scope) { - let err = ConstEvalErr{ span: span, kind: err }; + let err = ConstEvalErr { span: span, kind: err }; let mut diag = bcx.tcx().sess.struct_span_warn( span, "this expression will panic at run-time"); note_const_eval_err(bcx.tcx(), &err, span, "expression", &mut diag); diff --git a/src/libsyntax/json.rs b/src/libsyntax/json.rs index bda8fe1783b7c..3a1e296a61bee 100644 --- a/src/libsyntax/json.rs +++ b/src/libsyntax/json.rs @@ -284,6 +284,8 @@ impl DiagnosticSpanLine { index: usize, h_start: usize, h_end: usize) -> Option { + // MIR inlining can cause diagnostics that reference a file we + // don't have any source for fm.get_line(index).map(|text| { DiagnosticSpanLine { text: text.to_owned(), From 0f83f145555b951297f2a5c475a3aefbf14efe19 Mon Sep 17 00:00:00 2001 From: James Miller Date: Sun, 25 Sep 2016 14:27:58 +1300 Subject: [PATCH 11/12] Fix rebase fallout --- src/librustc_mir/transform/copy_prop.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/librustc_mir/transform/copy_prop.rs b/src/librustc_mir/transform/copy_prop.rs index 11df8e0a8ff12..279e32f8ff729 100644 --- a/src/librustc_mir/transform/copy_prop.rs +++ b/src/librustc_mir/transform/copy_prop.rs @@ -35,7 +35,6 @@ use rustc::mir::transform::{MirPass, MirSource, Pass}; use rustc::mir::visit::MutVisitor; use rustc::ty::TyCtxt; use rustc_data_structures::indexed_vec::Idx; -use transform::qualify_consts; pub struct CopyPropagation; @@ -58,7 +57,7 @@ impl<'tcx> MirPass<'tcx> for CopyPropagation { return } MirSource::Fn(function_node_id) => { - if tcx.is_const_fn(tcx, tcx.map.local_def_id(function_node_id)) { + if tcx.is_const_fn(tcx.map.local_def_id(function_node_id)) { // Don't run on const functions, as, again, trans might not be able to evaluate // the optimized IR. return From 76cc7cf058ad565349b5530425a3cac70551466f Mon Sep 17 00:00:00 2001 From: James Miller Date: Sun, 25 Sep 2016 17:45:48 +1300 Subject: [PATCH 12/12] Traverse the MIR in depth-first order, improve debuginfo handling Traverses the MIR's CFG instead of just iterating blocks so dead code isn't considered in the cost. Also makes debuginfo handling more robust by only updating source location if the span is valid. --- src/librustc_mir/transform/inline.rs | 63 +++++++++++++++++++--- src/librustc_trans/debuginfo/source_loc.rs | 15 ++++-- src/libsyntax/codemap.rs | 27 ++++++++++ 3 files changed, 94 insertions(+), 11 deletions(-) diff --git a/src/librustc_mir/transform/inline.rs b/src/librustc_mir/transform/inline.rs index c682b0d110175..2c837f2b07a72 100644 --- a/src/librustc_mir/transform/inline.rs +++ b/src/librustc_mir/transform/inline.rs @@ -12,13 +12,14 @@ use rustc::hir::def_id::DefId; +use rustc_data_structures::bitvec::BitVector; use rustc_data_structures::indexed_vec::{Idx, IndexVec}; use rustc_data_structures::graph; use rustc::dep_graph::DepNode; use rustc::mir::mir_map::MirMap; use rustc::mir::repr::*; -use rustc::mir::transform::{MirMapPass, MirPassHook, MirSource, Pass}; +use rustc::mir::transform::{MirMapPass, MirPass, MirPassHook, MirSource, Pass}; use rustc::mir::visit::*; use rustc::traits; use rustc::ty::{self, Ty, TyCtxt}; @@ -30,6 +31,7 @@ use super::copy_prop::CopyPropagation; use syntax::attr; use syntax::abi::Abi; +use syntax_pos::Span; use callgraph; @@ -347,7 +349,14 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { let mut first_block = true; let mut cost = 0; - for blk in callee_mir.basic_blocks() { + // Traverse the MIR manually so we can account for the effects of + // inlining on the CFG. + let mut work_list = vec![START_BLOCK]; + let mut visited = BitVector::new(callee_mir.basic_blocks.len()); + while let Some(bb) = work_list.pop() { + if !visited.insert(bb.index()) { continue; } + let blk = &callee_mir.basic_blocks[bb]; + for stmt in &blk.statements { // Don't count StorageLive/StorageDead in the inlining cost. match stmt.kind { @@ -357,15 +366,22 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { _ => cost += INSTR_COST } } - match blk.terminator().kind { - TerminatorKind::Drop { ref location, .. } | - TerminatorKind::DropAndReplace { ref location, .. } => { + let term = blk.terminator(); + let mut is_drop = false; + match term.kind { + TerminatorKind::Drop { ref location, target, unwind } | + TerminatorKind::DropAndReplace { ref location, target, unwind, .. } => { + is_drop = true; + work_list.push(target); // If the location doesn't actually need dropping, treat it like // a regular goto. let ty = location.ty(&callee_mir, tcx).subst(tcx, callsite.substs); let ty = ty.to_ty(tcx); if tcx.type_needs_drop_given_env(ty, ¶m_env) { cost += CALL_PENALTY; + if let Some(unwind) = unwind { + work_list.push(unwind); + } } else { cost += INSTR_COST; } @@ -391,6 +407,13 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { TerminatorKind::Assert { .. } => cost += CALL_PENALTY, _ => cost += INSTR_COST } + + if !is_drop { + for &succ in &term.successors()[..] { + work_list.push(succ); + } + } + first_block = false; } @@ -440,6 +463,7 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { let terminator = caller_mir[callsite.bb].terminator.take().unwrap(); + let cm = self.tcx.sess.codemap(); match terminator.kind { // FIXME: Handle inlining of diverging calls TerminatorKind::Call { args, destination: Some(destination), cleanup, .. } => { @@ -456,9 +480,12 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { for mut scope in callee_mir.visibility_scopes { if scope.parent_scope.is_none() { scope.parent_scope = Some(callsite.location.scope); + scope.span = callee_mir.span; } - scope.span = callsite.location.span; + if !cm.is_valid_span(scope.span) { + scope.span = callsite.location.span; + } let idx = caller_mir.visibility_scopes.push(scope); scope_map.push(idx); @@ -466,6 +493,10 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { for mut var in callee_mir.var_decls { var.source_info.scope = scope_map[var.source_info.scope]; + + if !cm.is_valid_span(var.source_info.span) { + var.source_info.span = callsite.location.span; + } let idx = caller_mir.var_decls.push(var); var_map.push(idx); } @@ -548,12 +579,14 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { let bb_len = caller_mir.basic_blocks.len(); let mut integrator = Integrator { + tcx: self.tcx, block_idx: bb_len, args: &args, var_map: var_map, tmp_map: temp_map, scope_map: scope_map, promoted_map: promoted_map, + callsite: callsite, destination: dest, return_block: return_block, cleanup_block: cleanup, @@ -579,7 +612,7 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { // this function will be inlined as well debug!("Running copy propagation"); let src = MirSource::from_node(self.tcx, id); - CopyPropagation.run_pass(self.tcx, src, caller_mir); + MirPass::run_pass(&mut CopyPropagation, self.tcx, src, caller_mir); }; true @@ -686,12 +719,14 @@ fn type_size_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, param_env: ty::ParameterE * stuff. */ struct Integrator<'a, 'tcx: 'a> { + tcx: TyCtxt<'a, 'tcx, 'tcx>, block_idx: usize, args: &'a [Operand<'tcx>], var_map: IndexVec, tmp_map: IndexVec, scope_map: IndexVec, promoted_map: IndexVec, + callsite: CallSite<'tcx>, destination: Lvalue<'tcx>, return_block: BasicBlock, cleanup_block: Option, @@ -704,6 +739,15 @@ impl<'a, 'tcx> Integrator<'a, 'tcx> { debug!("Updating target `{:?}`, new: `{:?}`", tgt, new); new } + + fn update_span(&self, span: Span) -> Span { + let cm = self.tcx.sess.codemap(); + if cm.is_valid_span(span) { + span + } else { + self.callsite.location.span + } + } } impl<'a, 'tcx> MutVisitor<'tcx> for Integrator<'a, 'tcx> { @@ -815,10 +859,15 @@ impl<'a, 'tcx> MutVisitor<'tcx> for Integrator<'a, 'tcx> { TerminatorKind::Unreachable => { } } } + fn visit_visibility_scope(&mut self, scope: &mut VisibilityScope) { *scope = self.scope_map[*scope]; } + fn visit_span(&mut self, span: &mut Span) { + *span = self.update_span(*span); + } + fn visit_literal(&mut self, literal: &mut Literal<'tcx>, loc: Location) { if let Literal::Promoted { ref mut index } = *literal { if let Some(p) = self.promoted_map.get(*index).cloned() { diff --git a/src/librustc_trans/debuginfo/source_loc.rs b/src/librustc_trans/debuginfo/source_loc.rs index 1aee27c144a36..3a20390123aef 100644 --- a/src/librustc_trans/debuginfo/source_loc.rs +++ b/src/librustc_trans/debuginfo/source_loc.rs @@ -54,10 +54,17 @@ pub fn set_source_location(fcx: &FunctionContext, } }; - debug!("set_source_location: {}", - fcx.ccx.sess().codemap().span_to_string(span)); - let loc = span_start(fcx.ccx, span); - InternalDebugLocation::new(scope, loc.line, loc.col.to_usize()) + let cm = fcx.ccx.sess().codemap(); + if cm.is_valid_span(span) { + debug!("set_source_location: {}", + fcx.ccx.sess().codemap().span_to_string(span)); + let loc = span_start(fcx.ccx, span); + InternalDebugLocation::new(scope, loc.line, loc.col.to_usize()) + } else { + // Span isn't invalid, just ignore the attempt to set a new debug + // location + return; + } } else { UnknownLocation }; diff --git a/src/libsyntax/codemap.rs b/src/libsyntax/codemap.rs index 5a2765b4b9af2..c63e18561fdd4 100644 --- a/src/libsyntax/codemap.rs +++ b/src/libsyntax/codemap.rs @@ -676,6 +676,33 @@ impl CodeMap { local_begin.fm.src.is_some() } + // Returns whether or not the given span is valid. + // This allows for graceful fallback when spans end up invalid + pub fn is_valid_span(&self, span: Span) -> bool { + self.is_valid_bytepos(span.lo) && self.is_valid_bytepos(span.hi) + } + + pub fn is_valid_bytepos(&self, bpos: BytePos) -> bool { + let idx = self.lookup_filemap_idx(bpos); + let files = self.files.borrow(); + let map = &(*files)[idx]; + + let mut total_extra_bytes = 0; + + for mbc in map.multibyte_chars.borrow().iter() { + if mbc.pos < bpos { + total_extra_bytes += mbc.bytes - 1; + if bpos.to_usize() < mbc.pos.to_usize() + mbc.bytes { + return false; + } + } else { + break; + } + } + + map.start_pos.to_usize() + total_extra_bytes <= bpos.to_usize() + } + pub fn get_filemap(&self, filename: &str) -> Option> { for fm in self.files.borrow().iter() { if filename == fm.name {