diff --git a/src/librustc/dep_graph/dep_node.rs b/src/librustc/dep_graph/dep_node.rs index e99ffa95ed63c..c025da29eefcb 100644 --- a/src/librustc/dep_graph/dep_node.rs +++ b/src/librustc/dep_graph/dep_node.rs @@ -34,7 +34,7 @@ pub enum DepNode { // `Krate` value gives you access to all other items. To avoid // this fate, do not call `tcx.map.krate()`; instead, prefer // wrappers like `tcx.visit_all_items_in_krate()`. If there is no - // suitable wrapper, you can use `tcx.dep_graph.ignore()` to gain + // suitable wrapper, you can use `tcx.dep_graph.in_ignore()` to gain // access to the krate, but you must remember to add suitable // edges yourself for the individual items that you read. Krate, @@ -243,4 +243,3 @@ impl DepNode { /// them even in the absence of a tcx.) #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] pub struct WorkProductId(pub String); - diff --git a/src/librustc/mir/repr.rs b/src/librustc/mir/repr.rs index 70b0b810c6c48..104dafece6311 100644 --- a/src/librustc/mir/repr.rs +++ b/src/librustc/mir/repr.rs @@ -18,6 +18,7 @@ use rustc_data_structures::control_flow_graph::ControlFlowGraph; use hir::def_id::DefId; use ty::subst::Substs; use ty::{self, AdtDef, ClosureSubsts, Region, Ty}; +use ty::fold::{TypeFoldable, TypeFolder, TypeVisitor}; use util::ppaux; use rustc_back::slice; use hir::InlineAsm; @@ -62,7 +63,7 @@ macro_rules! newtype_index { pub struct Mir<'tcx> { /// List of basic blocks. References to basic block use a newtyped index type `BasicBlock` /// that indexes into this vector. - basic_blocks: IndexVec>, + pub basic_blocks: IndexVec>, /// List of visibility (lexical) scopes; these are referenced by statements /// and used (eventually) for debuginfo. Indexed by a `VisibilityScope`. @@ -1326,3 +1327,395 @@ impl Location { } } } + +/* + * TypeFoldable implementations for MIR types + */ + +impl<'tcx> TypeFoldable<'tcx> for Mir<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + Mir { + basic_blocks: self.basic_blocks.fold_with(folder), + visibility_scopes: self.visibility_scopes.clone(), + promoted: self.promoted.fold_with(folder), + return_ty: self.return_ty.fold_with(folder), + var_decls: self.var_decls.fold_with(folder), + arg_decls: self.arg_decls.fold_with(folder), + temp_decls: self.temp_decls.fold_with(folder), + upvar_decls: self.upvar_decls.clone(), + span: self.span, + cache: Cache::new() + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.basic_blocks.visit_with(visitor) || + self.promoted.visit_with(visitor) || + self.return_ty.visit_with(visitor) || + self.var_decls.visit_with(visitor) || + self.arg_decls.visit_with(visitor) || + self.temp_decls.visit_with(visitor) + } +} + +impl<'tcx> TypeFoldable<'tcx> for VarDecl<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + VarDecl { + ty: self.ty.fold_with(folder), + ..self.clone() + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.ty.visit_with(visitor) + } +} + +impl<'tcx> TypeFoldable<'tcx> for TempDecl<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + TempDecl { + ty: self.ty.fold_with(folder), + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.ty.visit_with(visitor) + } +} + +impl<'tcx> TypeFoldable<'tcx> for ArgDecl<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + ArgDecl { + ty: self.ty.fold_with(folder), + ..self.clone() + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.ty.visit_with(visitor) + } +} + +impl<'tcx> TypeFoldable<'tcx> for BasicBlockData<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + BasicBlockData { + statements: self.statements.fold_with(folder), + terminator: self.terminator.fold_with(folder), + is_cleanup: self.is_cleanup + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.statements.visit_with(visitor) || self.terminator.visit_with(visitor) + } +} + +impl<'tcx> TypeFoldable<'tcx> for Statement<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + use mir::repr::StatementKind::*; + + let kind = match self.kind { + Assign(ref lval, ref rval) => Assign(lval.fold_with(folder), rval.fold_with(folder)), + SetDiscriminant { ref lvalue, variant_index } => SetDiscriminant { + lvalue: lvalue.fold_with(folder), + variant_index: variant_index + }, + StorageLive(ref lval) => StorageLive(lval.fold_with(folder)), + StorageDead(ref lval) => StorageDead(lval.fold_with(folder)), + Nop => Nop, + }; + Statement { + source_info: self.source_info, + kind: kind + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + use mir::repr::StatementKind::*; + + match self.kind { + Assign(ref lval, ref rval) => { lval.visit_with(visitor) || rval.visit_with(visitor) } + SetDiscriminant { ref lvalue, .. } | + StorageLive(ref lvalue) | + StorageDead(ref lvalue) => lvalue.visit_with(visitor), + Nop => false, + } + } +} + +impl<'tcx> TypeFoldable<'tcx> for Terminator<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + use mir::repr::TerminatorKind::*; + + let kind = match self.kind { + Goto { target } => Goto { target: target }, + If { ref cond, targets } => If { + cond: cond.fold_with(folder), + targets: targets + }, + Switch { ref discr, adt_def, ref targets } => Switch { + discr: discr.fold_with(folder), + adt_def: adt_def, + targets: targets.clone() + }, + SwitchInt { ref discr, switch_ty, ref values, ref targets } => SwitchInt { + discr: discr.fold_with(folder), + switch_ty: switch_ty.fold_with(folder), + values: values.clone(), + targets: targets.clone() + }, + Drop { ref location, target, unwind } => Drop { + location: location.fold_with(folder), + target: target, + unwind: unwind + }, + DropAndReplace { ref location, ref value, target, unwind } => DropAndReplace { + location: location.fold_with(folder), + value: value.fold_with(folder), + target: target, + unwind: unwind + }, + Call { ref func, ref args, ref destination, cleanup } => { + let dest = destination.as_ref().map(|&(ref loc, dest)| { + (loc.fold_with(folder), dest) + }); + + Call { + func: func.fold_with(folder), + args: args.fold_with(folder), + destination: dest, + cleanup: cleanup + } + }, + Assert { ref cond, expected, ref msg, target, cleanup } => { + let msg = if let AssertMessage::BoundsCheck { ref len, ref index } = *msg { + AssertMessage::BoundsCheck { + len: len.fold_with(folder), + index: index.fold_with(folder), + } + } else { + msg.clone() + }; + Assert { + cond: cond.fold_with(folder), + expected: expected, + msg: msg, + target: target, + cleanup: cleanup + } + }, + Resume => Resume, + Return => Return, + Unreachable => Unreachable, + }; + Terminator { + source_info: self.source_info, + kind: kind + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + use mir::repr::TerminatorKind::*; + + match self.kind { + If { ref cond, .. } => cond.visit_with(visitor), + Switch { ref discr, .. } => discr.visit_with(visitor), + SwitchInt { ref discr, switch_ty, .. } => + discr.visit_with(visitor) || switch_ty.visit_with(visitor), + Drop { ref location, ..} => location.visit_with(visitor), + DropAndReplace { ref location, ref value, ..} => + location.visit_with(visitor) || value.visit_with(visitor), + Call { ref func, ref args, ref destination, .. } => { + let dest = if let Some((ref loc, _)) = *destination { + loc.visit_with(visitor) + } else { false }; + dest || func.visit_with(visitor) || args.visit_with(visitor) + }, + Assert { ref cond, ref msg, .. } => { + if cond.visit_with(visitor) { + if let AssertMessage::BoundsCheck { ref len, ref index } = *msg { + len.visit_with(visitor) || index.visit_with(visitor) + } else { + false + } + } else { + false + } + }, + Goto { .. } | + Resume | + Return | + Unreachable => false + } + } +} + +impl<'tcx> TypeFoldable<'tcx> for Lvalue<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + match self { + &Lvalue::Projection(ref p) => Lvalue::Projection(p.fold_with(folder)), + _ => self.clone() + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + if let &Lvalue::Projection(ref p) = self { + p.visit_with(visitor) + } else { + false + } + } +} + +impl<'tcx> TypeFoldable<'tcx> for Rvalue<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + use mir::repr::Rvalue::*; + match *self { + Use(ref op) => Use(op.fold_with(folder)), + Repeat(ref op, ref len) => Repeat(op.fold_with(folder), len.fold_with(folder)), + Ref(region, bk, ref lval) => Ref(region.fold_with(folder), bk, lval.fold_with(folder)), + Len(ref lval) => Len(lval.fold_with(folder)), + Cast(kind, ref op, ty) => Cast(kind, op.fold_with(folder), ty.fold_with(folder)), + BinaryOp(op, ref rhs, ref lhs) => + BinaryOp(op, rhs.fold_with(folder), lhs.fold_with(folder)), + CheckedBinaryOp(op, ref rhs, ref lhs) => + CheckedBinaryOp(op, rhs.fold_with(folder), lhs.fold_with(folder)), + UnaryOp(op, ref val) => UnaryOp(op, val.fold_with(folder)), + Box(ty) => Box(ty.fold_with(folder)), + Aggregate(ref kind, ref fields) => { + let kind = match *kind { + AggregateKind::Vec => AggregateKind::Vec, + AggregateKind::Tuple => AggregateKind::Tuple, + AggregateKind::Adt(def, v, substs, n) => + AggregateKind::Adt(def, v, substs.fold_with(folder), n), + AggregateKind::Closure(id, substs) => + AggregateKind::Closure(id, substs.fold_with(folder)) + }; + Aggregate(kind, fields.fold_with(folder)) + } + InlineAsm { ref asm, ref outputs, ref inputs } => InlineAsm { + asm: asm.clone(), + outputs: outputs.fold_with(folder), + inputs: inputs.fold_with(folder) + } + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + use mir::repr::Rvalue::*; + match *self { + Use(ref op) => op.visit_with(visitor), + Repeat(ref op, ref len) => op.visit_with(visitor) || len.visit_with(visitor), + Ref(region, _, ref lval) => region.visit_with(visitor) || lval.visit_with(visitor), + Len(ref lval) => lval.visit_with(visitor), + Cast(_, ref op, ty) => op.visit_with(visitor) || ty.visit_with(visitor), + BinaryOp(_, ref rhs, ref lhs) | + CheckedBinaryOp(_, ref rhs, ref lhs) => + rhs.visit_with(visitor) || lhs.visit_with(visitor), + UnaryOp(_, ref val) => val.visit_with(visitor), + Box(ty) => ty.visit_with(visitor), + Aggregate(ref kind, ref fields) => { + (match *kind { + AggregateKind::Vec => false, + AggregateKind::Tuple => false, + AggregateKind::Adt(_, _, substs, _) => substs.visit_with(visitor), + AggregateKind::Closure(_, substs) => substs.visit_with(visitor) + }) || fields.visit_with(visitor) + } + InlineAsm { ref outputs, ref inputs, .. } => + outputs.visit_with(visitor) || inputs.visit_with(visitor) + } + } +} + +impl<'tcx> TypeFoldable<'tcx> for Operand<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + match *self { + Operand::Consume(ref lval) => Operand::Consume(lval.fold_with(folder)), + Operand::Constant(ref c) => Operand::Constant(c.fold_with(folder)), + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + match *self { + Operand::Consume(ref lval) => lval.visit_with(visitor), + Operand::Constant(ref c) => c.visit_with(visitor) + } + } +} + +impl<'tcx, B, V> TypeFoldable<'tcx> for Projection<'tcx, B, V> + where B: TypeFoldable<'tcx>, V: TypeFoldable<'tcx> +{ + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + use mir::repr::ProjectionElem::*; + + let base = self.base.fold_with(folder); + let elem = match self.elem { + Deref => Deref, + Field(f, ty) => Field(f, ty.fold_with(folder)), + Index(ref v) => Index(v.fold_with(folder)), + ref elem => elem.clone() + }; + + Projection { + base: base, + elem: elem + } + } + + fn super_visit_with>(&self, visitor: &mut Vs) -> bool { + use mir::repr::ProjectionElem::*; + + self.base.visit_with(visitor) || + match self.elem { + Field(_, ty) => ty.visit_with(visitor), + Index(ref v) => v.visit_with(visitor), + _ => false + } + } +} + +impl<'tcx> TypeFoldable<'tcx> for Constant<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + Constant { + span: self.span.clone(), + ty: self.ty.fold_with(folder), + literal: self.literal.fold_with(folder) + } + } + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.ty.visit_with(visitor) || self.literal.visit_with(visitor) + } +} + +impl<'tcx> TypeFoldable<'tcx> for TypedConstVal<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + TypedConstVal { + ty: self.ty.fold_with(folder), + span: self.span.clone(), + value: self.value.clone() + } + } + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.ty.visit_with(visitor) + } +} + +impl<'tcx> TypeFoldable<'tcx> for Literal<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + match *self { + Literal::Item { def_id, substs } => Literal::Item { + def_id: def_id, + substs: substs.fold_with(folder) + }, + _ => self.clone() + } + } + fn super_visit_with>(&self, visitor: &mut V) -> bool { + match *self { + Literal::Item { substs, .. } => substs.visit_with(visitor), + _ => false + } + } +} diff --git a/src/librustc/ty/mod.rs b/src/librustc/ty/mod.rs index 717b8923a1635..140f2fa2bb8e1 100644 --- a/src/librustc/ty/mod.rs +++ b/src/librustc/ty/mod.rs @@ -2868,6 +2868,27 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { autoderefs)) } + /// Checks if the given def is a const fn + pub fn is_const_fn(self, def_id: DefId) -> bool { + use hir::intravisit::FnKind; + use hir::map::blocks::FnLikeNode; + + if let Some(node_id) = self.map.as_local_node_id(def_id) { + let fn_like = FnLikeNode::from_node(self.map.get(node_id)); + match fn_like.map(|f| f.kind()) { + Some(FnKind::ItemFn(_, _, _, c, ..)) => { + c == hir::Constness::Const + } + Some(FnKind::Method(_, m, ..)) => { + m.constness == hir::Constness::Const + } + _ => false + } + } else { + self.sess.cstore.is_const_fn(def_id) + } + } + pub fn upvar_capture(self, upvar_id: ty::UpvarId) -> Option> { Some(self.tables.borrow().upvar_capture_map.get(&upvar_id).unwrap().clone()) } diff --git a/src/librustc/ty/structural_impls.rs b/src/librustc/ty/structural_impls.rs index 5a87ea1473d98..bba3e6f634693 100644 --- a/src/librustc/ty/structural_impls.rs +++ b/src/librustc/ty/structural_impls.rs @@ -12,6 +12,8 @@ use infer::type_variable; use ty::{self, Lift, Ty, TyCtxt}; use ty::fold::{TypeFoldable, TypeFolder, TypeVisitor}; +use rustc_data_structures::indexed_vec::{Idx, IndexVec}; + use std::rc::Rc; use syntax::abi; @@ -418,6 +420,16 @@ impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Vec { } } +impl<'tcx, I: Idx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for IndexVec { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + self.iter().map(|t| t.fold_with(folder)).collect() + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.iter().any(|t| t.visit_with(visitor)) + } +} + impl<'tcx, T:TypeFoldable<'tcx>> TypeFoldable<'tcx> for ty::Binder { fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { ty::Binder(self.0.fold_with(folder)) diff --git a/src/librustc_data_structures/indexed_vec.rs b/src/librustc_data_structures/indexed_vec.rs index 9123463149936..3977f4f7b6318 100644 --- a/src/librustc_data_structures/indexed_vec.rs +++ b/src/librustc_data_structures/indexed_vec.rs @@ -149,6 +149,16 @@ impl IndexVec { pub fn last(&self) -> Option { self.len().checked_sub(1).map(I::new) } + + #[inline] + pub fn get(&self, index: I) -> Option<&T> { + self.raw.get(index.index()) + } + + #[inline] + pub fn get_mut(&mut self, index: I) -> Option<&mut T> { + self.raw.get_mut(index.index()) + } } impl Index for IndexVec { diff --git a/src/librustc_driver/driver.rs b/src/librustc_driver/driver.rs index ae80bc53c4c36..e4585ccc70db4 100644 --- a/src/librustc_driver/driver.rs +++ b/src/librustc_driver/driver.rs @@ -1019,7 +1019,9 @@ pub fn phase_4_translate_to_llvm<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, passes.push_pass(box mir::transform::simplify_cfg::SimplifyCfg::new("elaborate-drops")); // No lifetime analysis based on borrowing can be done from here on out. + passes.push_pass(box mir::transform::inline::Inline); passes.push_pass(box mir::transform::instcombine::InstCombine::new()); + passes.push_pass(box mir::transform::deaggregator::Deaggregator); passes.push_pass(box mir::transform::copy_prop::CopyPropagation); diff --git a/src/librustc_mir/callgraph.rs b/src/librustc_mir/callgraph.rs new file mode 100644 index 0000000000000..a919738a52580 --- /dev/null +++ b/src/librustc_mir/callgraph.rs @@ -0,0 +1,251 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! MIR-based callgraph. +//! +//! This only considers direct calls + +use rustc::hir::def_id::DefId; +use rustc_data_structures::graph; + +use rustc::mir::repr::*; +use rustc::mir::visit::*; +use rustc::mir::mir_map::MirMap; + +use rustc::ty; + +use rustc::util::nodemap::DefIdMap; + +pub struct CallGraph { + node_map: DefIdMap, + graph: graph::Graph +} + +impl CallGraph { + // FIXME: allow for construction of a callgraph that inspects + // cross-crate MIRs if available. + pub fn build<'tcx>(map: &MirMap<'tcx>) -> CallGraph { + let def_ids = map.map.keys(); + + let mut callgraph = CallGraph { + node_map: DefIdMap(), + graph: graph::Graph::new() + }; + + for def_id in def_ids { + let idx = callgraph.add_node(def_id); + + let mut call_visitor = CallVisitor { + caller: idx, + graph: &mut callgraph + }; + + let mir = map.map.get(&def_id).unwrap(); + call_visitor.visit_mir(&mir); + } + + callgraph + } + + // Iterate over the strongly-connected components of the graph + pub fn scc_iter(&self) -> SCCIterator { + SCCIterator::new(&self.graph) + } + + // Get the def_id for the given graph node + pub fn def_id(&self, node: graph::NodeIndex) -> DefId { + *self.graph.node_data(node) + } + + fn add_node(&mut self, id: DefId) -> graph::NodeIndex { + let graph = &mut self.graph; + *self.node_map.entry(id).or_insert_with(|| { + graph.add_node(id) + }) + } +} + +struct CallVisitor<'a> { + caller: graph::NodeIndex, + graph: &'a mut CallGraph +} + +impl<'a, 'tcx> Visitor<'tcx> for CallVisitor<'a> { + fn visit_terminator_kind(&mut self, _block: BasicBlock, + kind: &TerminatorKind<'tcx>, _loc: Location) { + if let TerminatorKind::Call { + func: Operand::Constant(ref f) + , .. } = *kind { + if let ty::TyFnDef(def_id, _, _) = f.ty.sty { + let callee = self.graph.add_node(def_id); + self.graph.graph.add_edge(self.caller, callee, ()); + } + } + } +} + +struct StackElement<'g> { + node: graph::NodeIndex, + lowlink: usize, + children: graph::AdjacentTargets<'g, DefId, ()> +} + +/** + * Iterator over strongly-connected-components using Tarjan's algorithm[1] + * + * [1]: https://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm + */ +pub struct SCCIterator<'g> { + graph: &'g graph::Graph, + index: usize, + node_indices: Vec>, + scc_stack: Vec, + current_scc: Vec, + visit_stack: Vec>, +} + +impl<'g> SCCIterator<'g> { + pub fn new(graph: &'g graph::Graph) -> SCCIterator<'g> { + if graph.len_nodes() == 0 { + return SCCIterator { + graph: graph, + index: 0, + node_indices: Vec::new(), + scc_stack: Vec::new(), + current_scc: Vec::new(), + visit_stack: Vec::new() + }; + } + + let first = graph::NodeIndex(0); + + SCCIterator::with_entry(graph, first) + } + + pub fn with_entry(graph: &'g graph::Graph, + entry: graph::NodeIndex) -> SCCIterator<'g> { + let mut iter = SCCIterator { + graph: graph, + index: 0, + node_indices: Vec::with_capacity(graph.len_nodes()), + scc_stack: Vec::new(), + current_scc: Vec::new(), + visit_stack: Vec::new() + }; + + iter.visit_one(entry); + + iter + } + + fn get_next(&mut self) { + self.current_scc.clear(); + + while !self.visit_stack.is_empty() { + self.visit_children(); + + let node = self.visit_stack.pop().unwrap(); + + if let Some(last) = self.visit_stack.last_mut() { + if last.lowlink > node.lowlink { + last.lowlink = node.lowlink; + } + } + + debug!("TarjanSCC: Popped node {:?} : lowlink = {:?}; index = {:?}", + node.node, node.lowlink, self.node_index(node.node).unwrap()); + + if node.lowlink != self.node_index(node.node).unwrap() { + continue; + } + + loop { + let n = self.scc_stack.pop().unwrap(); + self.current_scc.push(n); + self.set_node_index(n, !0); + if n == node.node { return; } + } + } + } + + fn visit_one(&mut self, node: graph::NodeIndex) { + self.index += 1; + let idx = self.index; + self.set_node_index(node, idx); + self.scc_stack.push(node); + self.visit_stack.push(StackElement { + node: node, + lowlink: self.index, + children: self.graph.successor_nodes(node) + }); + debug!("TarjanSCC: Node {:?} : index = {:?}", node, idx); + } + + fn visit_children(&mut self) { + while let Some(child) = self.visit_stack.last_mut().unwrap().children.next() { + if let Some(child_num) = self.node_index(child) { + let cur = self.visit_stack.last_mut().unwrap(); + if cur.lowlink > child_num { + cur.lowlink = child_num; + } + } else { + self.visit_one(child); + } + } + } + + fn node_index(&self, node: graph::NodeIndex) -> Option { + self.node_indices.get(node.node_id()).and_then(|&idx| idx) + } + + fn set_node_index(&mut self, node: graph::NodeIndex, idx: usize) { + let i = node.node_id(); + if i >= self.node_indices.len() { + self.node_indices.resize(i + 1, None); + } + self.node_indices[i] = Some(idx); + } +} + +impl<'g> Iterator for SCCIterator<'g> { + type Item = Vec; + + fn next(&mut self) -> Option> { + self.get_next(); + + if self.current_scc.is_empty() { + // Try a new root for the next SCC, if the node_indices + // map is doesn't contain all nodes, use the smallest one + // with no entry, otherwise find the first empty node. + // + // FIXME: This should probably use a set of precomputed + // roots instead + if self.node_indices.len() < self.graph.len_nodes() { + let idx = graph::NodeIndex(self.node_indices.len()); + self.visit_one(idx); + } else { + for idx in 0..self.node_indices.len() { + if self.node_indices[idx].is_none() { + let idx = graph::NodeIndex(idx); + self.visit_one(idx); + break; + } + } + } + self.get_next(); + } + + if self.current_scc.is_empty() { + None + } else { + Some(self.current_scc.clone()) + } + } +} diff --git a/src/librustc_mir/lib.rs b/src/librustc_mir/lib.rs index 12f1eb8535a3e..6c6f46e9a8189 100644 --- a/src/librustc_mir/lib.rs +++ b/src/librustc_mir/lib.rs @@ -47,9 +47,9 @@ pub mod diagnostics; pub mod build; pub mod def_use; +pub mod callgraph; pub mod graphviz; mod hair; pub mod mir_map; pub mod pretty; pub mod transform; - diff --git a/src/librustc_mir/transform/copy_prop.rs b/src/librustc_mir/transform/copy_prop.rs index 79fd16012d9ee..279e32f8ff729 100644 --- a/src/librustc_mir/transform/copy_prop.rs +++ b/src/librustc_mir/transform/copy_prop.rs @@ -35,7 +35,6 @@ use rustc::mir::transform::{MirPass, MirSource, Pass}; use rustc::mir::visit::MutVisitor; use rustc::ty::TyCtxt; use rustc_data_structures::indexed_vec::Idx; -use transform::qualify_consts; pub struct CopyPropagation; @@ -58,7 +57,7 @@ impl<'tcx> MirPass<'tcx> for CopyPropagation { return } MirSource::Fn(function_node_id) => { - if qualify_consts::is_const_fn(tcx, tcx.map.local_def_id(function_node_id)) { + if tcx.is_const_fn(tcx.map.local_def_id(function_node_id)) { // Don't run on const functions, as, again, trans might not be able to evaluate // the optimized IR. return @@ -153,6 +152,15 @@ impl<'tcx> MirPass<'tcx> for CopyPropagation { break } } + + // Strip out nops + for blk in mir.basic_blocks_mut() { + blk.statements.retain(|stmt| if let StatementKind::Nop = stmt.kind { + false + } else { + true + }) + } } } @@ -329,6 +337,6 @@ impl<'tcx> MutVisitor<'tcx> for ConstantPropagationVisitor<'tcx> { *operand = Operand::Constant(self.constant.clone()); self.uses_replaced += 1 + } } - diff --git a/src/librustc_mir/transform/deaggregator.rs b/src/librustc_mir/transform/deaggregator.rs index 77af02c18c60e..9fc0b6006075d 100644 --- a/src/librustc_mir/transform/deaggregator.rs +++ b/src/librustc_mir/transform/deaggregator.rs @@ -22,7 +22,8 @@ impl<'tcx> MirPass<'tcx> for Deaggregator { fn run_pass<'a>(&mut self, tcx: TyCtxt<'a, 'tcx, 'tcx>, source: MirSource, mir: &mut Mir<'tcx>) { let node_id = source.item_id(); - let node_path = tcx.item_path_str(tcx.map.local_def_id(node_id)); + let def_id = tcx.map.local_def_id(node_id); + let node_path = tcx.item_path_str(def_id); debug!("running on: {:?}", node_path); // we only run when mir_opt_level > 1 match tcx.sess.opts.debugging_opts.mir_opt_level { @@ -37,6 +38,9 @@ impl<'tcx> MirPass<'tcx> for Deaggregator { // In fact, we might not want to trigger in other cases. // Ex: when we could use SROA. See issue #35259 + // Don't deaggregate in const fns + if tcx.is_const_fn(def_id) { return; } + let mut curr: usize = 0; for bb in mir.basic_blocks_mut() { let idx = match get_aggregate_statement_index(curr, &bb.statements) { diff --git a/src/librustc_mir/transform/inline.rs b/src/librustc_mir/transform/inline.rs new file mode 100644 index 0000000000000..2c837f2b07a72 --- /dev/null +++ b/src/librustc_mir/transform/inline.rs @@ -0,0 +1,880 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Inlining pass for MIR functions + +use rustc::hir::def_id::DefId; + +use rustc_data_structures::bitvec::BitVector; +use rustc_data_structures::indexed_vec::{Idx, IndexVec}; +use rustc_data_structures::graph; + +use rustc::dep_graph::DepNode; +use rustc::mir::mir_map::MirMap; +use rustc::mir::repr::*; +use rustc::mir::transform::{MirMapPass, MirPass, MirPassHook, MirSource, Pass}; +use rustc::mir::visit::*; +use rustc::traits; +use rustc::ty::{self, Ty, TyCtxt}; +use rustc::ty::subst::{Subst,Substs}; +use rustc::util::nodemap::{DefIdMap, DefIdSet}; + +use super::simplify_cfg::{remove_dead_blocks, CfgSimplifier}; +use super::copy_prop::CopyPropagation; + +use syntax::attr; +use syntax::abi::Abi; +use syntax_pos::Span; + +use callgraph; + +const DEFAULT_THRESHOLD : usize = 50; +const HINT_THRESHOLD : usize = 100; + +const INSTR_COST : usize = 5; +const CALL_PENALTY : usize = 25; + +const UNKNOWN_SIZE_COST : usize = 10; + +use std::rc::Rc; + +pub struct Inline; + +impl<'tcx> MirMapPass<'tcx> for Inline { + fn run_pass<'a>( + &mut self, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + map: &mut MirMap<'tcx>, + hooks: &mut [Box MirPassHook<'s>>]) { + + if tcx.sess.opts.mir_opt_level < 2 { return; } + + let _ignore = tcx.dep_graph.in_ignore(); + + let callgraph = callgraph::CallGraph::build(map); + + let mut inliner = Inliner { + tcx: tcx, + foreign_mirs: DefIdMap() + }; + + let def_ids = map.map.keys(); + for &def_id in &def_ids { + let _task = tcx.dep_graph.in_task(DepNode::Mir(def_id)); + let mir = map.map.get_mut(&def_id).unwrap(); + let id = tcx.map.as_local_node_id(def_id).unwrap(); + let src = MirSource::from_node(tcx, id); + + for hook in &mut *hooks { + hook.on_mir_pass(tcx, src, mir, self, false); + } + } + + for scc in callgraph.scc_iter() { + inliner.inline_scc(map, &callgraph, &scc); + } + + for def_id in def_ids { + let _task = tcx.dep_graph.in_task(DepNode::Mir(def_id)); + let mir = map.map.get_mut(&def_id).unwrap(); + let id = tcx.map.as_local_node_id(def_id).unwrap(); + let src = MirSource::from_node(tcx, id); + + for hook in &mut *hooks { + hook.on_mir_pass(tcx, src, mir, self, true); + } + } + } +} + +impl<'tcx> Pass for Inline { } + +struct Inliner<'a, 'tcx: 'a> { + tcx: TyCtxt<'a, 'tcx, 'tcx>, + foreign_mirs: DefIdMap>>, +} + +#[derive(Copy, Clone)] +struct CallSite<'tcx> { + caller: DefId, + callee: DefId, + substs: &'tcx Substs<'tcx>, + bb: BasicBlock, + location: SourceInfo, +} + +impl<'a, 'tcx> Inliner<'a, 'tcx> { + fn inline_scc(&mut self, map: &mut MirMap<'tcx>, + callgraph: &callgraph::CallGraph, scc: &[graph::NodeIndex]) -> bool { + let mut callsites = Vec::new(); + let mut in_scc = DefIdSet(); + + let mut inlined_into = DefIdSet(); + + for &node in scc { + let def_id = callgraph.def_id(node); + + // Don't inspect functions from other crates + let id = if let Some(id) = self.tcx.map.as_local_node_id(def_id) { + id + } else { + continue; + }; + let src = MirSource::from_node(self.tcx, id); + if let MirSource::Fn(_) = src { + let mir = if let Some(m) = map.map.get(&def_id) { + m + } else { + continue; + }; + for (bb, bb_data) in mir.basic_blocks().iter_enumerated() { + // Don't inline calls that are in cleanup blocks. + if bb_data.is_cleanup { continue; } + + // Only consider direct calls to functions + let terminator = bb_data.terminator(); + if let TerminatorKind::Call { + func: Operand::Constant(ref f), .. } = terminator.kind { + if let ty::TyFnDef(callee_def_id, substs, _) = f.ty.sty { + callsites.push(CallSite { + caller: def_id, + callee: callee_def_id, + substs: substs, + bb: bb, + location: terminator.source_info + }); + } + } + } + + in_scc.insert(def_id); + } + } + + // Move callsites that are in the the SCC to the end so + // they're inlined after calls to outside the SCC + let mut first_call_in_scc = callsites.len(); + + let mut i = 0; + while i < first_call_in_scc { + let f = callsites[i].caller; + if in_scc.contains(&f) { + first_call_in_scc -= 1; + callsites.swap(i, first_call_in_scc); + } else { + i += 1; + } + } + + let mut local_change; + let mut changed = false; + + loop { + local_change = false; + let mut csi = 0; + while csi < callsites.len() { + let foreign_mir; + + let callsite = callsites[csi]; + csi += 1; + + let callee_mir = { + let callee_mir : Option<&Mir<'tcx>> = if callsite.callee.is_local() { + map.map.get(&callsite.callee) + } else { + foreign_mir = self.get_foreign_mir(callsite.callee); + foreign_mir.as_ref().map(|m| &**m) + }; + + let callee_mir = if let Some(m) = callee_mir { + m + } else { + continue; + }; + + if !self.should_inline(callsite, callee_mir) { + continue; + } + + callee_mir.subst(self.tcx, callsite.substs) + }; + + let caller_mir = map.map.get_mut(&callsite.caller).unwrap(); + + let start = caller_mir.basic_blocks().len(); + + if !self.inline_call(callsite, caller_mir, callee_mir) { + continue; + } + + inlined_into.insert(callsite.caller); + + // Add callsites from inlined function + for (bb, bb_data) in caller_mir.basic_blocks().iter_enumerated().skip(start) { + // Only consider direct calls to functions + let terminator = bb_data.terminator(); + if let TerminatorKind::Call { + func: Operand::Constant(ref f), .. } = terminator.kind { + if let ty::TyFnDef(callee_def_id, substs, _) = f.ty.sty { + // Don't inline the same function multiple times. + if callsite.callee != callee_def_id { + callsites.push(CallSite { + caller: callsite.caller, + callee: callee_def_id, + substs: substs, + bb: bb, + location: terminator.source_info + }); + } + } + } + } + + + csi -= 1; + if scc.len() == 1 { + callsites.swap_remove(csi); + } else { + callsites.remove(csi); + } + + local_change = true; + changed = true; + } + + if !local_change { + break; + } + } + + // Simplify functions we inlined into. + for def_id in inlined_into { + let caller_mir = map.map.get_mut(&def_id).unwrap(); + debug!("Running simplify cfg on {:?}", def_id); + CfgSimplifier::new(caller_mir).simplify(); + remove_dead_blocks(caller_mir); + } + changed + } + + fn get_foreign_mir(&mut self, def_id: DefId) -> Option>> { + if let Some(mir) = self.foreign_mirs.get(&def_id).cloned() { + return Some(mir); + } + // Cache the foreign MIR + let mir = self.tcx.sess.cstore.maybe_get_item_mir(self.tcx, def_id); + let mir = mir.map(Rc::new); + if let Some(ref mir) = mir { + self.foreign_mirs.insert(def_id, mir.clone()); + } + + mir + } + + fn should_inline(&self, callsite: CallSite<'tcx>, + callee_mir: &'a Mir<'tcx>) -> bool { + + let tcx = self.tcx; + + // Don't inline closures that have captures + // FIXME: Handle closures better + if callee_mir.upvar_decls.len() > 0 { + return false; + } + + // Don't inline calls to trait methods + // FIXME: Should try to resolve it to a concrete method, and + // only bail if that isn't possible + let trait_def = tcx.trait_of_item(callsite.callee); + if trait_def.is_some() { return false; } + + let attrs = tcx.get_attrs(callsite.callee); + let hint = attr::find_inline_attr(None, &attrs[..]); + + let hinted = match hint { + // Just treat inline(always) as a hint for now, + // there are cases that prevent inlining that we + // need to check for first. + attr::InlineAttr::Always => true, + attr::InlineAttr::Never => return false, + attr::InlineAttr::Hint => true, + attr::InlineAttr::None => false, + }; + + // Only inline local functions if they would be eligible for + // cross-crate inlining. This ensures that any symbols they + // use are reachable cross-crate + // FIXME(#36594): This shouldn't be necessary, and is more conservative + // than it could be, but trans should generate the reachable set from + // the MIR anyway, making any check obsolete. + if callsite.callee.is_local() { + // No type substs and no inline hint means this function + // wouldn't be eligible for cross-crate inlining + if callsite.substs.types().count() == 0 && !hinted { + return false; + } + + } + + let mut threshold = if hinted { + HINT_THRESHOLD + } else { + DEFAULT_THRESHOLD + }; + + // Significantly lower the threshold for inlining cold functions + if attr::contains_name(&attrs[..], "cold") { + threshold /= 5; + } + + // Give a bonus functions with a small number of blocks, + // We normally have two or three blocks for even + // very small functions. + if callee_mir.basic_blocks().len() <= 3 { + threshold += threshold / 4; + } + + // FIXME: Give a bonus to functions with only a single caller + + let id = tcx.map.as_local_node_id(callsite.caller).expect("Caller not local"); + let param_env = ty::ParameterEnvironment::for_item(tcx, id); + + let mut first_block = true; + let mut cost = 0; + + // Traverse the MIR manually so we can account for the effects of + // inlining on the CFG. + let mut work_list = vec![START_BLOCK]; + let mut visited = BitVector::new(callee_mir.basic_blocks.len()); + while let Some(bb) = work_list.pop() { + if !visited.insert(bb.index()) { continue; } + let blk = &callee_mir.basic_blocks[bb]; + + for stmt in &blk.statements { + // Don't count StorageLive/StorageDead in the inlining cost. + match stmt.kind { + StatementKind::StorageLive(_) | + StatementKind::StorageDead(_) | + StatementKind::Nop => {} + _ => cost += INSTR_COST + } + } + let term = blk.terminator(); + let mut is_drop = false; + match term.kind { + TerminatorKind::Drop { ref location, target, unwind } | + TerminatorKind::DropAndReplace { ref location, target, unwind, .. } => { + is_drop = true; + work_list.push(target); + // If the location doesn't actually need dropping, treat it like + // a regular goto. + let ty = location.ty(&callee_mir, tcx).subst(tcx, callsite.substs); + let ty = ty.to_ty(tcx); + if tcx.type_needs_drop_given_env(ty, ¶m_env) { + cost += CALL_PENALTY; + if let Some(unwind) = unwind { + work_list.push(unwind); + } + } else { + cost += INSTR_COST; + } + } + + TerminatorKind::Unreachable | + TerminatorKind::Call { destination: None, .. } if first_block => { + // If the function always diverges, don't inline + // unless the cost is zero + threshold = 0; + } + + TerminatorKind::Call {func: Operand::Constant(ref f), .. } => { + if let ty::TyFnDef(.., f) = f.ty.sty { + // Don't give intrinsics the extra penalty for calls + if f.abi == Abi::RustIntrinsic || f.abi == Abi::PlatformIntrinsic { + cost += INSTR_COST; + } else { + cost += CALL_PENALTY; + } + } + } + TerminatorKind::Assert { .. } => cost += CALL_PENALTY, + _ => cost += INSTR_COST + } + + if !is_drop { + for &succ in &term.successors()[..] { + work_list.push(succ); + } + } + + first_block = false; + } + + // Count up the cost of local variables and temps, if we know the size + // use that, otherwise we use a moderately-large dummy cost. + + let ptr_size = tcx.data_layout.pointer_size.bytes(); + + for v in &callee_mir.var_decls { + let ty = v.ty.subst(tcx, callsite.substs); + // Cost of the var is the size in machine-words, if we know + // it. + if let Some(size) = type_size_of(tcx, param_env.clone(), ty) { + cost += (size / ptr_size) as usize; + } else { + cost += UNKNOWN_SIZE_COST; + } + } + for t in &callee_mir.temp_decls { + let ty = t.ty.subst(tcx, callsite.substs); + // Cost of the var is the size in machine-words, if we know + // it. + if let Some(size) = type_size_of(tcx, param_env.clone(), ty) { + cost += (size / ptr_size) as usize; + } else { + cost += UNKNOWN_SIZE_COST; + } + } + + debug!("Inline cost for {:?} is {}", callsite.callee, cost); + + if let attr::InlineAttr::Always = hint { + true + } else { + cost <= threshold + } + } + + + fn inline_call(&self, callsite: CallSite<'tcx>, + caller_mir: &mut Mir<'tcx>, callee_mir: Mir<'tcx>) -> bool { + + // Don't inline a function into itself + if callsite.caller == callsite.callee { return false; } + + let _task = self.tcx.dep_graph.in_task(DepNode::Mir(callsite.caller)); + + + let terminator = caller_mir[callsite.bb].terminator.take().unwrap(); + let cm = self.tcx.sess.codemap(); + match terminator.kind { + // FIXME: Handle inlining of diverging calls + TerminatorKind::Call { args, destination: Some(destination), cleanup, .. } => { + + debug!("Inlined {:?} into {:?}", callsite.callee, callsite.caller); + + let is_box_free = Some(callsite.callee) == self.tcx.lang_items.box_free_fn(); + + let mut var_map = IndexVec::with_capacity(callee_mir.var_decls.len()); + let mut temp_map = IndexVec::with_capacity(callee_mir.temp_decls.len()); + let mut scope_map = IndexVec::with_capacity(callee_mir.visibility_scopes.len()); + let mut promoted_map = IndexVec::with_capacity(callee_mir.promoted.len()); + + for mut scope in callee_mir.visibility_scopes { + if scope.parent_scope.is_none() { + scope.parent_scope = Some(callsite.location.scope); + scope.span = callee_mir.span; + } + + if !cm.is_valid_span(scope.span) { + scope.span = callsite.location.span; + } + + let idx = caller_mir.visibility_scopes.push(scope); + scope_map.push(idx); + } + + for mut var in callee_mir.var_decls { + var.source_info.scope = scope_map[var.source_info.scope]; + + if !cm.is_valid_span(var.source_info.span) { + var.source_info.span = callsite.location.span; + } + let idx = caller_mir.var_decls.push(var); + var_map.push(idx); + } + + for temp in callee_mir.temp_decls { + let idx = caller_mir.temp_decls.push(temp); + temp_map.push(idx); + } + + for p in callee_mir.promoted { + let idx = caller_mir.promoted.push(p); + promoted_map.push(idx); + } + + // If the call is something like `a[*i] = f(i)`, where + // `i : &mut usize`, then just duplicating the `a[*i]` + // Lvalue could result in two different locations if `f` + // writes to `i`. To prevent this we need to create a temporary + // borrow of the lvalue and pass the destination as `*temp` instead. + fn dest_needs_borrow(lval: &Lvalue) -> bool { + match *lval { + Lvalue::Projection(ref p) => { + match p.elem { + ProjectionElem::Deref | + ProjectionElem::Index(_) => true, + _ => dest_needs_borrow(&p.base) + } + } + // Static variables need a borrow because the callee + // might modify the same static. + Lvalue::Static(_) => true, + _ => false + } + } + + let dest = if dest_needs_borrow(&destination.0) { + debug!("Creating temp for return destination"); + let dest = Rvalue::Ref( + self.tcx.mk_region(ty::ReErased), + BorrowKind::Mut, + destination.0); + + let ty = dest.ty(caller_mir, self.tcx).expect("Rvalue has no type!"); + + let temp = TempDecl { ty: ty }; + let tmp = caller_mir.temp_decls.push(temp); + let tmp = Lvalue::Temp(tmp); + + let stmt = Statement { + source_info: callsite.location, + kind: StatementKind::Assign(tmp.clone(), dest) + }; + caller_mir[callsite.bb] + .statements.push(stmt); + tmp.deref() + } else { + destination.0 + }; + + let return_block = destination.1; + + let args : Vec<_> = if is_box_free { + assert!(args.len() == 1); + // box_free takes a Box, but is defined with a *mut T, inlining + // needs to generate the cast. + // FIXME: we should probably just generate correct MIR in the first place... + + let arg = if let Operand::Consume(ref lval) = args[0] { + lval.clone() + } else { + bug!("Constant arg to \"box_free\""); + }; + + let ptr_ty = args[0].ty(caller_mir, self.tcx); + vec![self.cast_box_free_arg(arg, ptr_ty, &callsite, caller_mir)] + } else { + // Copy the arguments if needed. + self.make_call_args(args, &callsite, caller_mir) + }; + + let bb_len = caller_mir.basic_blocks.len(); + let mut integrator = Integrator { + tcx: self.tcx, + block_idx: bb_len, + args: &args, + var_map: var_map, + tmp_map: temp_map, + scope_map: scope_map, + promoted_map: promoted_map, + callsite: callsite, + destination: dest, + return_block: return_block, + cleanup_block: cleanup, + in_cleanup_block: false + }; + + + for (bb, mut block) in callee_mir.basic_blocks.into_iter_enumerated() { + integrator.visit_basic_block_data(bb, &mut block); + caller_mir.basic_blocks_mut().push(block); + } + + let terminator = Terminator { + source_info: callsite.location, + kind: TerminatorKind::Goto { target: BasicBlock::new(bb_len) } + }; + + caller_mir[callsite.bb].terminator = Some(terminator); + + if let Some(id) = self.tcx.map.as_local_node_id(callsite.caller) { + // Run copy propagation on the function to clean up any unnecessary + // assignments from integration. This also increases the chance that + // this function will be inlined as well + debug!("Running copy propagation"); + let src = MirSource::from_node(self.tcx, id); + MirPass::run_pass(&mut CopyPropagation, self.tcx, src, caller_mir); + }; + + true + } + kind => { + caller_mir[callsite.bb].terminator = Some(Terminator { + source_info: terminator.source_info, + kind: kind + }); + false + } + } + } + + fn cast_box_free_arg(&self, arg: Lvalue<'tcx>, ptr_ty: Ty<'tcx>, + callsite: &CallSite<'tcx>, caller_mir: &mut Mir<'tcx>) -> Operand<'tcx> { + let arg = Rvalue::Ref( + self.tcx.mk_region(ty::ReErased), + BorrowKind::Mut, + arg.deref()); + + let ty = arg.ty(caller_mir, self.tcx).expect("Rvalue has no type!"); + let ref_tmp = TempDecl { ty: ty }; + let ref_tmp = caller_mir.temp_decls.push(ref_tmp); + let ref_tmp = Lvalue::Temp(ref_tmp); + + let ref_stmt = Statement { + source_info: callsite.location, + kind: StatementKind::Assign(ref_tmp.clone(), arg) + }; + + caller_mir[callsite.bb] + .statements.push(ref_stmt); + + let pointee_ty = match ptr_ty.sty { + ty::TyBox(ty) => ty, + ty::TyRawPtr(tm) | ty::TyRef(_, tm) => tm.ty, + _ => bug!("Invalid type `{:?}` for call to box_free", ptr_ty) + }; + let ptr_ty = self.tcx.mk_mut_ptr(pointee_ty); + + let raw_ptr = Rvalue::Cast(CastKind::Misc, Operand::Consume(ref_tmp), ptr_ty); + + let cast_tmp = TempDecl { ty: ptr_ty }; + let cast_tmp = caller_mir.temp_decls.push(cast_tmp); + let cast_tmp = Lvalue::Temp(cast_tmp); + + let cast_stmt = Statement { + source_info: callsite.location, + kind: StatementKind::Assign(cast_tmp.clone(), raw_ptr) + }; + + caller_mir[callsite.bb] + .statements.push(cast_stmt); + + Operand::Consume(cast_tmp) + } + + fn make_call_args(&self, args: Vec>, + callsite: &CallSite<'tcx>, caller_mir: &mut Mir<'tcx>) -> Vec> { + let tcx = self.tcx; + // FIXME: Analysis of the usage of the arguments to avoid + // unnecessary temporaries. + args.into_iter().map(|a| { + if let Operand::Consume(Lvalue::Temp(_)) = a { + // Reuse the operand if it's a temporary already + return a; + } + + debug!("Creating temp for argument"); + // Otherwise, create a temporary for the arg + let arg = Rvalue::Use(a); + + let ty = arg.ty(caller_mir, tcx).expect("arg has no type!"); + + let arg_tmp = TempDecl { ty: ty }; + let arg_tmp = caller_mir.temp_decls.push(arg_tmp); + let arg_tmp = Lvalue::Temp(arg_tmp); + + let stmt = Statement { + source_info: callsite.location, + kind: StatementKind::Assign(arg_tmp.clone(), arg) + }; + caller_mir[callsite.bb].statements.push(stmt); + Operand::Consume(arg_tmp) + }).collect() + } +} + +fn type_size_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, param_env: ty::ParameterEnvironment<'tcx>, + ty: Ty<'tcx>) -> Option { + tcx.infer_ctxt(None, Some(param_env), traits::Reveal::All).enter(|infcx| { + ty.layout(&infcx).ok().map(|layout| { + layout.size(&tcx.data_layout).bytes() + }) + }) +} + +/** + * Integrator. + * + * Integrates blocks from the callee function into the calling function. + * Updates block indices, references to locals and other control flow + * stuff. + */ +struct Integrator<'a, 'tcx: 'a> { + tcx: TyCtxt<'a, 'tcx, 'tcx>, + block_idx: usize, + args: &'a [Operand<'tcx>], + var_map: IndexVec, + tmp_map: IndexVec, + scope_map: IndexVec, + promoted_map: IndexVec, + callsite: CallSite<'tcx>, + destination: Lvalue<'tcx>, + return_block: BasicBlock, + cleanup_block: Option, + in_cleanup_block: bool, +} + +impl<'a, 'tcx> Integrator<'a, 'tcx> { + fn update_target(&self, tgt: BasicBlock) -> BasicBlock { + let new = BasicBlock::new(tgt.index() + self.block_idx); + debug!("Updating target `{:?}`, new: `{:?}`", tgt, new); + new + } + + fn update_span(&self, span: Span) -> Span { + let cm = self.tcx.sess.codemap(); + if cm.is_valid_span(span) { + span + } else { + self.callsite.location.span + } + } +} + +impl<'a, 'tcx> MutVisitor<'tcx> for Integrator<'a, 'tcx> { + fn visit_lvalue(&mut self, + lvalue: &mut Lvalue<'tcx>, + _ctxt: LvalueContext<'tcx>, + _location: Location) { + match *lvalue { + Lvalue::Var(ref mut var) => { + if let Some(v) = self.var_map.get(*var).cloned() { + *var = v; + } + } + Lvalue::Temp(ref mut tmp) => { + if let Some(t) = self.tmp_map.get(*tmp).cloned() { + *tmp = t; + } + } + Lvalue::ReturnPointer => { + *lvalue = self.destination.clone(); + } + Lvalue::Arg(arg) => { + let idx = arg.index(); + if let Operand::Consume(ref lval) = self.args[idx] { + *lvalue = lval.clone(); + } else { + bug!("Arg operand `{:?}` is not an Lvalue use.", arg) + } + } + _ => self.super_lvalue(lvalue, _ctxt, _location) + } + } + + fn visit_operand(&mut self, operand: &mut Operand<'tcx>, location: Location) { + if let Operand::Consume(Lvalue::Arg(arg)) = *operand { + let idx = arg.index(); + let new_arg = self.args[idx].clone(); + *operand = new_arg; + } else { + self.super_operand(operand, location); + } + } + + fn visit_basic_block_data(&mut self, block: BasicBlock, data: &mut BasicBlockData<'tcx>) { + self.in_cleanup_block = data.is_cleanup; + self.super_basic_block_data(block, data); + self.in_cleanup_block = false; + } + + fn visit_terminator_kind(&mut self, block: BasicBlock, + kind: &mut TerminatorKind<'tcx>, loc: Location) { + self.super_terminator_kind(block, kind, loc); + + match *kind { + TerminatorKind::Goto { ref mut target} => { + *target = self.update_target(*target); + } + TerminatorKind::If { ref mut targets, .. } => { + targets.0 = self.update_target(targets.0); + targets.1 = self.update_target(targets.1); + } + TerminatorKind::Switch { ref mut targets, .. } | + TerminatorKind::SwitchInt { ref mut targets, .. } => { + for tgt in targets { + *tgt = self.update_target(*tgt); + } + } + TerminatorKind::Drop { ref mut target, ref mut unwind, .. } | + TerminatorKind::DropAndReplace { ref mut target, ref mut unwind, .. } => { + *target = self.update_target(*target); + if let Some(tgt) = *unwind { + *unwind = Some(self.update_target(tgt)); + } else if !self.in_cleanup_block { + // Unless this drop is in a cleanup block, add an unwind edge to + // the orignal call's cleanup block + *unwind = self.cleanup_block; + } + } + TerminatorKind::Call { ref mut destination, ref mut cleanup, .. } => { + if let Some((_, ref mut tgt)) = *destination { + *tgt = self.update_target(*tgt); + } + if let Some(tgt) = *cleanup { + *cleanup = Some(self.update_target(tgt)); + } else if !self.in_cleanup_block { + // Unless this call is in a cleanup block, add an unwind edge to + // the orignal call's cleanup block + *cleanup = self.cleanup_block; + } + } + TerminatorKind::Assert { ref mut target, ref mut cleanup, .. } => { + *target = self.update_target(*target); + if let Some(tgt) = *cleanup { + *cleanup = Some(self.update_target(tgt)); + } else if !self.in_cleanup_block { + // Unless this assert is in a cleanup block, add an unwind edge to + // the orignal call's cleanup block + *cleanup = self.cleanup_block; + } + } + TerminatorKind::Return => { + *kind = TerminatorKind::Goto { target: self.return_block }; + } + TerminatorKind::Resume => { + if let Some(tgt) = self.cleanup_block { + *kind = TerminatorKind::Goto { target: tgt } + } + } + TerminatorKind::Unreachable => { } + } + } + + fn visit_visibility_scope(&mut self, scope: &mut VisibilityScope) { + *scope = self.scope_map[*scope]; + } + + fn visit_span(&mut self, span: &mut Span) { + *span = self.update_span(*span); + } + + fn visit_literal(&mut self, literal: &mut Literal<'tcx>, loc: Location) { + if let Literal::Promoted { ref mut index } = *literal { + if let Some(p) = self.promoted_map.get(*index).cloned() { + *index = p; + } + } else { + self.super_literal(literal, loc); + } + } +} diff --git a/src/librustc_mir/transform/mod.rs b/src/librustc_mir/transform/mod.rs index 7bcb89b5895e7..3723296b926f0 100644 --- a/src/librustc_mir/transform/mod.rs +++ b/src/librustc_mir/transform/mod.rs @@ -18,5 +18,6 @@ pub mod promote_consts; pub mod qualify_consts; pub mod dump_mir; pub mod deaggregator; +pub mod inline; pub mod instcombine; pub mod copy_prop; diff --git a/src/librustc_mir/transform/qualify_consts.rs b/src/librustc_mir/transform/qualify_consts.rs index 2c03af2c8e97a..a1cbc334d4424 100644 --- a/src/librustc_mir/transform/qualify_consts.rs +++ b/src/librustc_mir/transform/qualify_consts.rs @@ -20,8 +20,6 @@ use rustc::dep_graph::DepNode; use rustc::hir; use rustc::hir::map as hir_map; use rustc::hir::def_id::DefId; -use rustc::hir::intravisit::FnKind; -use rustc::hir::map::blocks::FnLikeNode; use rustc::traits::{self, Reveal}; use rustc::ty::{self, TyCtxt, Ty}; use rustc::ty::cast::CastTy; @@ -116,23 +114,6 @@ impl fmt::Display for Mode { } } -pub fn is_const_fn(tcx: TyCtxt, def_id: DefId) -> bool { - if let Some(node_id) = tcx.map.as_local_node_id(def_id) { - let fn_like = FnLikeNode::from_node(tcx.map.get(node_id)); - match fn_like.map(|f| f.kind()) { - Some(FnKind::ItemFn(_, _, _, c, ..)) => { - c == hir::Constness::Const - } - Some(FnKind::Method(_, m, ..)) => { - m.constness == hir::Constness::Const - } - _ => false - } - } else { - tcx.sess.cstore.is_const_fn(def_id) - } -} - struct Qualifier<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { mode: Mode, span: Span, @@ -778,7 +759,7 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { ty::TyFnDef(def_id, _, f) => { (f.abi == Abi::PlatformIntrinsic && self.tcx.item_name(def_id).as_str().starts_with("simd_shuffle"), - is_const_fn(self.tcx, def_id)) + self.tcx.is_const_fn(def_id)) } _ => (false, false) }; @@ -997,7 +978,7 @@ impl<'tcx> MirMapPass<'tcx> for QualifyAndPromoteConstants { let src = MirSource::from_node(tcx, id); let mode = match src { MirSource::Fn(_) => { - if is_const_fn(tcx, def_id) { + if tcx.is_const_fn(def_id) { Mode::ConstFn } else { Mode::Fn diff --git a/src/librustc_mir/transform/simplify_cfg.rs b/src/librustc_mir/transform/simplify_cfg.rs index 8e1b7b44976f3..237bc0861aa94 100644 --- a/src/librustc_mir/transform/simplify_cfg.rs +++ b/src/librustc_mir/transform/simplify_cfg.rs @@ -73,7 +73,7 @@ pub struct CfgSimplifier<'a, 'tcx: 'a> { } impl<'a, 'tcx: 'a> CfgSimplifier<'a, 'tcx> { - fn new(mir: &'a mut Mir<'tcx>) -> Self { + pub fn new(mir: &'a mut Mir<'tcx>) -> Self { let mut pred_count = IndexVec::from_elem(0u32, mir.basic_blocks()); // we can't use mir.predecessors() here because that counts @@ -94,7 +94,7 @@ impl<'a, 'tcx: 'a> CfgSimplifier<'a, 'tcx> { } } - fn simplify(mut self) { + pub fn simplify(mut self) { loop { let mut changed = false; @@ -219,7 +219,7 @@ impl<'a, 'tcx: 'a> CfgSimplifier<'a, 'tcx> { } } -fn remove_dead_blocks(mir: &mut Mir) { +pub fn remove_dead_blocks(mir: &mut Mir) { let mut seen = BitVector::new(mir.basic_blocks().len()); for (bb, _) in traversal::preorder(mir) { seen.insert(bb.index()); diff --git a/src/librustc_trans/debuginfo/source_loc.rs b/src/librustc_trans/debuginfo/source_loc.rs index 1aee27c144a36..3a20390123aef 100644 --- a/src/librustc_trans/debuginfo/source_loc.rs +++ b/src/librustc_trans/debuginfo/source_loc.rs @@ -54,10 +54,17 @@ pub fn set_source_location(fcx: &FunctionContext, } }; - debug!("set_source_location: {}", - fcx.ccx.sess().codemap().span_to_string(span)); - let loc = span_start(fcx.ccx, span); - InternalDebugLocation::new(scope, loc.line, loc.col.to_usize()) + let cm = fcx.ccx.sess().codemap(); + if cm.is_valid_span(span) { + debug!("set_source_location: {}", + fcx.ccx.sess().codemap().span_to_string(span)); + let loc = span_start(fcx.ccx, span); + InternalDebugLocation::new(scope, loc.line, loc.col.to_usize()) + } else { + // Span isn't invalid, just ignore the attempt to set a new debug + // location + return; + } } else { UnknownLocation }; diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index baeafbe3e346f..87809aa7bf88f 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -31,6 +31,7 @@ use type_::Type; use rustc_data_structures::fnv::FnvHashMap; use syntax::parse::token; +use syntax_pos::Span; use super::{MirContext, LocalRef}; use super::analyze::CleanupKind; @@ -372,11 +373,13 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { // is also constant, then we can produce a warning. if const_cond == Some(!expected) { if let Some(err) = const_err { - let err = ConstEvalErr{ span: span, kind: err }; - let mut diag = bcx.tcx().sess.struct_span_warn( - span, "this expression will panic at run-time"); - note_const_eval_err(bcx.tcx(), &err, span, "expression", &mut diag); - diag.emit(); + if let Some(span) = self.diag_span(span, terminator.source_info.scope) { + let err = ConstEvalErr { span: span, kind: err }; + let mut diag = bcx.tcx().sess.struct_span_warn( + span, "this expression will panic at run-time"); + note_const_eval_err(bcx.tcx(), &err, span, "expression", &mut diag); + diag.emit(); + } } } @@ -944,6 +947,25 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { } } } + + // Attempts to get an appropriate span for diagnostics. We might not have the source + // available if the span is from an inlined crate, but there's usually a decent span + // in an ancestor scope somewhere + fn diag_span(&self, span: Span, scope: mir::VisibilityScope) -> Option { + let cm = self.fcx.ccx.sess().codemap(); + if cm.have_source_for_span(span) { return Some(span); } + + let mut scope = Some(scope); + while let Some(s) = scope { + let data = &self.mir.visibility_scopes[s]; + if cm.have_source_for_span(data.span) { + return Some(data.span); + } + scope = data.parent_scope; + } + + None + } } enum ReturnDest { diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index 21b019d7e24df..9119d6c32fe7c 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -265,8 +265,10 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { } mir::CastKind::Misc => { debug_assert!(common::type_is_immediate(bcx.ccx(), cast_ty)); - let r_t_in = CastTy::from_ty(operand.ty).expect("bad input type for cast"); - let r_t_out = CastTy::from_ty(cast_ty).expect("bad output type for cast"); + let r_t_in = CastTy::from_ty(operand.ty) + .unwrap_or_else(|| bug!("bad input type `{:?}` for cast", operand.ty)); + let r_t_out = CastTy::from_ty(cast_ty) + .unwrap_or_else(|| bug!("bad output type `{:?}` for cast", cast_ty)); let ll_t_in = type_of::immediate_type_of(bcx.ccx(), operand.ty); let ll_t_out = type_of::immediate_type_of(bcx.ccx(), cast_ty); let (llval, signed) = if let CastTy::Int(IntTy::CEnum) = r_t_in { diff --git a/src/libsyntax/codemap.rs b/src/libsyntax/codemap.rs index 6d68ce3646d53..c63e18561fdd4 100644 --- a/src/libsyntax/codemap.rs +++ b/src/libsyntax/codemap.rs @@ -670,6 +670,39 @@ impl CodeMap { } } + // Returns whether or not we have the source a for the given span + pub fn have_source_for_span(&self, span: Span) -> bool { + let local_begin = self.lookup_byte_offset(span.lo); + local_begin.fm.src.is_some() + } + + // Returns whether or not the given span is valid. + // This allows for graceful fallback when spans end up invalid + pub fn is_valid_span(&self, span: Span) -> bool { + self.is_valid_bytepos(span.lo) && self.is_valid_bytepos(span.hi) + } + + pub fn is_valid_bytepos(&self, bpos: BytePos) -> bool { + let idx = self.lookup_filemap_idx(bpos); + let files = self.files.borrow(); + let map = &(*files)[idx]; + + let mut total_extra_bytes = 0; + + for mbc in map.multibyte_chars.borrow().iter() { + if mbc.pos < bpos { + total_extra_bytes += mbc.bytes - 1; + if bpos.to_usize() < mbc.pos.to_usize() + mbc.bytes { + return false; + } + } else { + break; + } + } + + map.start_pos.to_usize() + total_extra_bytes <= bpos.to_usize() + } + pub fn get_filemap(&self, filename: &str) -> Option> { for fm in self.files.borrow().iter() { if filename == fm.name { diff --git a/src/libsyntax/json.rs b/src/libsyntax/json.rs index a40c30b3e3397..3a1e296a61bee 100644 --- a/src/libsyntax/json.rs +++ b/src/libsyntax/json.rs @@ -283,13 +283,16 @@ impl DiagnosticSpanLine { fn line_from_filemap(fm: &syntax_pos::FileMap, index: usize, h_start: usize, - h_end: usize) - -> DiagnosticSpanLine { - DiagnosticSpanLine { - text: fm.get_line(index).unwrap().to_owned(), - highlight_start: h_start, - highlight_end: h_end, - } + h_end: usize) -> Option { + // MIR inlining can cause diagnostics that reference a file we + // don't have any source for + fm.get_line(index).map(|text| { + DiagnosticSpanLine { + text: text.to_owned(), + highlight_start: h_start, + highlight_end: h_end, + } + }) } /// Create a list of DiagnosticSpanLines from span - each line with any part @@ -301,7 +304,7 @@ impl DiagnosticSpanLine { let fm = &*lines.file; lines.lines .iter() - .map(|line| { + .filter_map(|line| { DiagnosticSpanLine::line_from_filemap(fm, line.line_index, line.start_col.0 + 1, @@ -343,4 +346,3 @@ impl JsonEmitter { } } } -