Skip to content

Commit

Permalink
Auto merge of #43576 - arielb1:no-unneeded-unwind, r=eddyb
Browse files Browse the repository at this point in the history
rustc_mir: don't build unused unwind cleanup blocks

When building a scope exit, don't build unwind cleanup blocks unless they will actually be used by the unwind path of a drop - the unused blocks are removed by SimplifyCfg, but they can cause a significant performance slowdown before they are removed. That fixes #43511.

Also a few other small MIR cleanups & optimizations.

r? @eddyb
  • Loading branch information
bors committed Aug 1, 2017
2 parents dd53dd5 + ce0ca76 commit 640cfc8
Show file tree
Hide file tree
Showing 13 changed files with 210 additions and 178 deletions.
13 changes: 9 additions & 4 deletions src/librustc/mir/visit.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@ use ty::subst::Substs;
use ty::{ClosureSubsts, Region, Ty};
use mir::*;
use rustc_const_math::ConstUsize;
use rustc_data_structures::indexed_vec::Idx;
use syntax_pos::Span;

// # The MIR Visitor
Expand Down Expand Up @@ -260,9 +259,15 @@ macro_rules! make_mir_visitor {

fn super_mir(&mut self,
mir: & $($mutability)* Mir<'tcx>) {
for index in 0..mir.basic_blocks().len() {
let block = BasicBlock::new(index);
self.visit_basic_block_data(block, &$($mutability)* mir[block]);
// for best performance, we want to use an iterator rather
// than a for-loop, to avoid calling Mir::invalidate for
// each basic block.
macro_rules! basic_blocks {
(mut) => (mir.basic_blocks_mut().iter_enumerated_mut());
() => (mir.basic_blocks().iter_enumerated());
};
for (bb, data) in basic_blocks!($($mutability)*) {
self.visit_basic_block_data(bb, data);
}

for scope in &$($mutability)* mir.visibility_scopes {
Expand Down
2 changes: 1 addition & 1 deletion src/librustc_mir/build/block.rs
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
let tcx = this.hir.tcx();

// Enter the remainder scope, i.e. the bindings' destruction scope.
this.push_scope(remainder_scope);
this.push_scope((remainder_scope, source_info));
let_extent_stack.push(remainder_scope);

// Declare the bindings, which may create a visibility scope.
Expand Down
2 changes: 1 addition & 1 deletion src/librustc_mir/build/expr/into.rs
Original file line number Diff line number Diff line change
Expand Up @@ -237,7 +237,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
.collect();

let success = this.cfg.start_new_block();
let cleanup = this.diverge_cleanup(expr_span);
let cleanup = this.diverge_cleanup();
this.cfg.terminate(block, source_info, TerminatorKind::Call {
func: fun,
args: args,
Expand Down
2 changes: 1 addition & 1 deletion src/librustc_mir/build/matches/test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -306,7 +306,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
let bool_ty = self.hir.bool_ty();
let eq_result = self.temp(bool_ty, test.span);
let eq_block = self.cfg.start_new_block();
let cleanup = self.diverge_cleanup(test.span);
let cleanup = self.diverge_cleanup();
self.cfg.terminate(block, source_info, TerminatorKind::Call {
func: Operand::Constant(box Constant {
span: test.span,
Expand Down
114 changes: 72 additions & 42 deletions src/librustc_mir/build/scope.rs
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,9 @@ pub struct Scope<'tcx> {
/// the extent of this scope within source code.
extent: CodeExtent,

/// the span of that extent
extent_span: Span,

/// Whether there's anything to do for the cleanup path, that is,
/// when unwinding through this scope. This includes destructors,
/// but not StorageDead statements, which don't get emitted at all
Expand All @@ -116,7 +119,7 @@ pub struct Scope<'tcx> {
/// * pollutting the cleanup MIR with StorageDead creates
/// landing pads even though there's no actual destructors
/// * freeing up stack space has no effect during unwinding
pub(super) needs_cleanup: bool,
needs_cleanup: bool,

/// set of lvalues to drop when exiting this scope. This starts
/// out empty but grows as variables are declared during the
Expand Down Expand Up @@ -197,6 +200,15 @@ pub struct BreakableScope<'tcx> {
pub break_destination: Lvalue<'tcx>,
}

impl DropKind {
fn may_panic(&self) -> bool {
match *self {
DropKind::Value { .. } => true,
DropKind::Storage => false
}
}
}

impl<'tcx> Scope<'tcx> {
/// Invalidate all the cached blocks in the scope.
///
Expand Down Expand Up @@ -282,7 +294,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
where F: FnOnce(&mut Builder<'a, 'gcx, 'tcx>) -> BlockAnd<R>
{
debug!("in_opt_scope(opt_extent={:?}, block={:?})", opt_extent, block);
if let Some(extent) = opt_extent { self.push_scope(extent.0); }
if let Some(extent) = opt_extent { self.push_scope(extent); }
let rv = unpack!(block = f(self));
if let Some(extent) = opt_extent {
unpack!(block = self.pop_scope(extent, block));
Expand All @@ -301,7 +313,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
where F: FnOnce(&mut Builder<'a, 'gcx, 'tcx>) -> BlockAnd<R>
{
debug!("in_scope(extent={:?}, block={:?})", extent, block);
self.push_scope(extent.0);
self.push_scope(extent);
let rv = unpack!(block = f(self));
unpack!(block = self.pop_scope(extent, block));
debug!("in_scope: exiting extent={:?} block={:?}", extent, block);
Expand All @@ -312,12 +324,13 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
/// scope and call `pop_scope` afterwards. Note that these two
/// calls must be paired; using `in_scope` as a convenience
/// wrapper maybe preferable.
pub fn push_scope(&mut self, extent: CodeExtent) {
pub fn push_scope(&mut self, extent: (CodeExtent, SourceInfo)) {
debug!("push_scope({:?})", extent);
let vis_scope = self.visibility_scope;
self.scopes.push(Scope {
visibility_scope: vis_scope,
extent: extent,
extent: extent.0,
extent_span: extent.1.span,
needs_cleanup: false,
drops: vec![],
free: None,
Expand All @@ -333,9 +346,13 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
mut block: BasicBlock)
-> BlockAnd<()> {
debug!("pop_scope({:?}, {:?})", extent, block);
// We need to have `cached_block`s available for all the drops, so we call diverge_cleanup
// to make sure all the `cached_block`s are filled in.
self.diverge_cleanup(extent.1.span);
// If we are emitting a `drop` statement, we need to have the cached
// diverge cleanup pads ready in case that drop panics.
let may_panic =
self.scopes.last().unwrap().drops.iter().any(|s| s.kind.may_panic());
if may_panic {
self.diverge_cleanup();
}
let scope = self.scopes.pop().unwrap();
assert_eq!(scope.extent, extent.0);
unpack!(block = build_scope_drops(&mut self.cfg,
Expand Down Expand Up @@ -366,6 +383,15 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
let len = self.scopes.len();
assert!(scope_count < len, "should not use `exit_scope` to pop ALL scopes");
let tmp = self.get_unit_temp();

// If we are emitting a `drop` statement, we need to have the cached
// diverge cleanup pads ready in case that drop panics.
let may_panic = self.scopes[(len - scope_count)..].iter()
.any(|s| s.drops.iter().any(|s| s.kind.may_panic()));
if may_panic {
self.diverge_cleanup();
}

{
let mut rest = &mut self.scopes[(len - scope_count)..];
while let Some((scope, rest_)) = {rest}.split_last_mut() {
Expand Down Expand Up @@ -618,7 +644,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
/// This path terminates in Resume. Returns the start of the path.
/// See module comment for more details. None indicates there’s no
/// cleanup to do at this point.
pub fn diverge_cleanup(&mut self, span: Span) -> Option<BasicBlock> {
pub fn diverge_cleanup(&mut self) -> Option<BasicBlock> {
if !self.scopes.iter().any(|scope| scope.needs_cleanup) {
return None;
}
Expand Down Expand Up @@ -652,7 +678,8 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
};

for scope in scopes.iter_mut() {
target = build_diverge_scope(hir.tcx(), cfg, &unit_temp, span, scope, target);
target = build_diverge_scope(
hir.tcx(), cfg, &unit_temp, scope.extent_span, scope, target);
}
Some(target)
}
Expand All @@ -668,7 +695,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
}
let source_info = self.source_info(span);
let next_target = self.cfg.start_new_block();
let diverge_target = self.diverge_cleanup(span);
let diverge_target = self.diverge_cleanup();
self.cfg.terminate(block, source_info,
TerminatorKind::Drop {
location: location,
Expand All @@ -686,7 +713,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
value: Operand<'tcx>) -> BlockAnd<()> {
let source_info = self.source_info(span);
let next_target = self.cfg.start_new_block();
let diverge_target = self.diverge_cleanup(span);
let diverge_target = self.diverge_cleanup();
self.cfg.terminate(block, source_info,
TerminatorKind::DropAndReplace {
location: location,
Expand All @@ -709,7 +736,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
let source_info = self.source_info(span);

let success_block = self.cfg.start_new_block();
let cleanup = self.diverge_cleanup(span);
let cleanup = self.diverge_cleanup();

self.cfg.terminate(block, source_info,
TerminatorKind::Assert {
Expand All @@ -731,45 +758,48 @@ fn build_scope_drops<'tcx>(cfg: &mut CFG<'tcx>,
mut block: BasicBlock,
arg_count: usize)
-> BlockAnd<()> {
debug!("build_scope_drops({:?} -> {:?})", block, scope);
let mut iter = scope.drops.iter().rev().peekable();
while let Some(drop_data) = iter.next() {
let source_info = scope.source_info(drop_data.span);
if let DropKind::Value { .. } = drop_data.kind {
// Try to find the next block with its cached block
// for us to diverge into in case the drop panics.
let on_diverge = iter.peek().iter().filter_map(|dd| {
match dd.kind {
DropKind::Value { cached_block } => cached_block,
DropKind::Storage => None
}
}).next();
// If there’s no `cached_block`s within current scope,
// we must look for one in the enclosing scope.
let on_diverge = on_diverge.or_else(||{
earlier_scopes.iter().rev().flat_map(|s| s.cached_block()).next()
});
let next = cfg.start_new_block();
cfg.terminate(block, source_info, TerminatorKind::Drop {
location: drop_data.location.clone(),
target: next,
unwind: on_diverge
});
block = next;
}
match drop_data.kind {
DropKind::Value { .. } |
DropKind::Storage => {
// Only temps and vars need their storage dead.
match drop_data.location {
Lvalue::Local(index) if index.index() > arg_count => {}
_ => continue
}
DropKind::Value { .. } => {
// Try to find the next block with its cached block
// for us to diverge into in case the drop panics.
let on_diverge = iter.peek().iter().filter_map(|dd| {
match dd.kind {
DropKind::Value { cached_block: None } =>
span_bug!(drop_data.span, "cached block not present?"),
DropKind::Value { cached_block } => cached_block,
DropKind::Storage => None
}
}).next();
// If there’s no `cached_block`s within current scope,
// we must look for one in the enclosing scope.
let on_diverge = on_diverge.or_else(|| {
earlier_scopes.iter().rev().flat_map(|s| s.cached_block()).next()
});
let next = cfg.start_new_block();
cfg.terminate(block, source_info, TerminatorKind::Drop {
location: drop_data.location.clone(),
target: next,
unwind: on_diverge
});
block = next;
}
DropKind::Storage => {}
}

// Drop the storage for both value and storage drops.
// Only temps and vars need their storage dead.
match drop_data.location {
Lvalue::Local(index) if index.index() > arg_count => {
cfg.push(block, Statement {
source_info: source_info,
kind: StatementKind::StorageDead(drop_data.location.clone())
});
}
_ => continue
}
}
block.unit()
Expand Down
4 changes: 2 additions & 2 deletions src/librustc_mir/transform/simplify.rs
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,8 @@ impl<'a, 'tcx: 'a> CfgSimplifier<'a, 'tcx> {
}

pub fn simplify(mut self) {
self.strip_nops();

loop {
let mut changed = false;

Expand Down Expand Up @@ -141,8 +143,6 @@ impl<'a, 'tcx: 'a> CfgSimplifier<'a, 'tcx> {

if !changed { break }
}

self.strip_nops()
}

// Collapse a goto chain starting from `start`
Expand Down
32 changes: 13 additions & 19 deletions src/test/mir-opt/basic_assignment.rs
Original file line number Diff line number Diff line change
Expand Up @@ -47,42 +47,36 @@ fn main() {
// StorageDead(_3);
// StorageLive(_4);
// _4 = std::option::Option<std::boxed::Box<u32>>::None;
// StorageLive(_5);
// StorageLive(_6);
// StorageLive(_7);
// _7 = _4;
// replace(_6 <- _7) -> [return: bb6, unwind: bb7];
// _6 = _4;
// replace(_5 <- _6) -> [return: bb1, unwind: bb5];
// }
// bb1: {
// resume;
// drop(_6) -> [return: bb6, unwind: bb4];
// }
// bb2: {
// drop(_4) -> bb1;
// resume;
// }
// bb3: {
// goto -> bb2;
// drop(_4) -> bb2;
// }
// bb4: {
// drop(_6) -> bb3;
// drop(_5) -> bb3;
// }
// bb5: {
// goto -> bb4;
// drop(_6) -> bb4;
// }
// bb6: {
// drop(_7) -> [return: bb8, unwind: bb4];
// StorageDead(_6);
// _0 = ();
// drop(_5) -> [return: bb7, unwind: bb3];
// }
// bb7: {
// drop(_7) -> bb5;
// StorageDead(_5);
// drop(_4) -> bb8;
// }
// bb8: {
// StorageDead(_7);
// _0 = ();
// drop(_6) -> [return: bb9, unwind: bb2];
// }
// bb9: {
// StorageDead(_6);
// drop(_4) -> bb10;
// }
// bb10: {
// StorageDead(_4);
// StorageDead(_2);
// StorageDead(_1);
Expand Down
Loading

0 comments on commit 640cfc8

Please sign in to comment.