Skip to content

Commit 8c0f4f5

Browse files
committed
Avoid quadratic growth of functions due to cleanups
If a new cleanup is added to a cleanup scope, the cached exits for that scope are cleared, so all previous cleanups have to be translated again. In the worst case this means that we get N distinct landing pads where the last one has N cleanups, then N-1 and so on. As new cleanups are to be executed before older ones, we can instead cache the number of already translated cleanups in addition to the block that contains them, and then only translate new ones, if any and then jump to the cached ones, getting away with linear growth instead. For the crate in rust-lang#31381 this reduces the compile time for an optimized build from >20 minutes (I cancelled the build at that point) to about 11 seconds. Testing a few crates that come with rustc show compile time improvements somewhere between 1 and 8%. The "big" winner being rustc_platform_intrinsics which features code similar to that in rust-lang#31381. Fixes rust-lang#31381
1 parent 2d4e94a commit 8c0f4f5

File tree

2 files changed

+71
-13
lines changed

2 files changed

+71
-13
lines changed

src/librustc_trans/trans/cleanup.rs

+24-13
Original file line numberDiff line numberDiff line change
@@ -200,6 +200,7 @@ pub enum UnwindKind {
200200
pub struct CachedEarlyExit {
201201
label: EarlyExitLabel,
202202
cleanup_block: BasicBlockRef,
203+
last_cleanup: usize,
203204
}
204205

205206
pub trait Cleanup<'tcx> {
@@ -560,7 +561,7 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
560561
for scope in self.scopes.borrow_mut().iter_mut().rev() {
561562
if scope.kind.is_ast_with_id(cleanup_scope) {
562563
scope.cleanups.push(cleanup);
563-
scope.clear_cached_exits();
564+
scope.cached_landing_pad = None;
564565
return;
565566
} else {
566567
// will be adding a cleanup to some enclosing scope
@@ -585,7 +586,7 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
585586
let mut scopes = self.scopes.borrow_mut();
586587
let scope = &mut (*scopes)[custom_scope.index];
587588
scope.cleanups.push(cleanup);
588-
scope.clear_cached_exits();
589+
scope.cached_landing_pad = None;
589590
}
590591

591592
/// Returns true if there are pending cleanups that should execute on panic.
@@ -723,6 +724,7 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx
723724
let orig_scopes_len = self.scopes_len();
724725
let mut prev_llbb;
725726
let mut popped_scopes = vec!();
727+
let mut skip = 0;
726728

727729
// First we pop off all the cleanup stacks that are
728730
// traversed until the exit is reached, pushing them
@@ -769,20 +771,25 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx
769771
}
770772
}
771773

774+
// Pop off the scope, since we may be generating
775+
// unwinding code for it.
776+
let top_scope = self.pop_scope();
777+
let cached_exit = top_scope.cached_early_exit(label);
778+
popped_scopes.push(top_scope);
779+
772780
// Check if we have already cached the unwinding of this
773781
// scope for this label. If so, we can stop popping scopes
774782
// and branch to the cached label, since it contains the
775783
// cleanups for any subsequent scopes.
776-
if let Some(exit) = self.top_scope(|s| s.cached_early_exit(label)) {
784+
if let Some((exit, last_cleanup)) = cached_exit {
777785
prev_llbb = exit;
786+
skip = last_cleanup;
778787
break;
779788
}
780789

781-
// Pop off the scope, since we will be generating
782-
// unwinding code for it. If we are searching for a loop exit,
790+
// If we are searching for a loop exit,
783791
// and this scope is that loop, then stop popping and set
784792
// `prev_llbb` to the appropriate exit block from the loop.
785-
popped_scopes.push(self.pop_scope());
786793
let scope = popped_scopes.last().unwrap();
787794
match label {
788795
UnwindExit(..) | ReturnExit => { }
@@ -826,13 +833,15 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx
826833
let bcx_in = self.new_block(&name[..], None);
827834
let exit_label = label.start(bcx_in);
828835
let mut bcx_out = bcx_in;
829-
for cleanup in scope.cleanups.iter().rev() {
836+
let len = scope.cleanups.len();
837+
for cleanup in scope.cleanups.iter().rev().take(len - skip) {
830838
bcx_out = cleanup.trans(bcx_out, scope.debug_loc);
831839
}
840+
skip = 0;
832841
exit_label.branch(bcx_out, prev_llbb);
833842
prev_llbb = bcx_in.llbb;
834843

835-
scope.add_cached_early_exit(exit_label, prev_llbb);
844+
scope.add_cached_early_exit(exit_label, prev_llbb, len);
836845
}
837846
self.push_scope(scope);
838847
}
@@ -938,18 +947,20 @@ impl<'blk, 'tcx> CleanupScope<'blk, 'tcx> {
938947

939948
fn cached_early_exit(&self,
940949
label: EarlyExitLabel)
941-
-> Option<BasicBlockRef> {
942-
self.cached_early_exits.iter().
950+
-> Option<(BasicBlockRef, usize)> {
951+
self.cached_early_exits.iter().rev().
943952
find(|e| e.label == label).
944-
map(|e| e.cleanup_block)
953+
map(|e| (e.cleanup_block, e.last_cleanup))
945954
}
946955

947956
fn add_cached_early_exit(&mut self,
948957
label: EarlyExitLabel,
949-
blk: BasicBlockRef) {
958+
blk: BasicBlockRef,
959+
last_cleanup: usize) {
950960
self.cached_early_exits.push(
951961
CachedEarlyExit { label: label,
952-
cleanup_block: blk });
962+
cleanup_block: blk,
963+
last_cleanup: last_cleanup});
953964
}
954965

955966
/// True if this scope has cleanups that need unwinding

src/test/codegen/drop.rs

+47
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
2+
// file at the top-level directory of this distribution and at
3+
// http://rust-lang.org/COPYRIGHT.
4+
//
5+
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6+
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7+
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8+
// option. This file may not be copied, modified, or distributed
9+
// except according to those terms.
10+
11+
// compile-flags: -C no-prepopulate-passes
12+
13+
#![crate_type = "lib"]
14+
15+
struct SomeUniqueName;
16+
17+
impl Drop for SomeUniqueName {
18+
fn drop(&mut self) {
19+
}
20+
}
21+
22+
pub fn possibly_unwinding() {
23+
}
24+
25+
// CHECK-LABEL: @droppy
26+
#[no_mangle]
27+
pub fn droppy() {
28+
// Check that there are exactly 6 drop calls. The cleanups for the unwinding should be reused, so
29+
// that's one new drop call per call to possibly_unwinding(), and finally 3 drop calls for the
30+
// regular function exit. We used to have problems with quadratic growths of drop calls in such
31+
// functions.
32+
// CHECK: call{{.*}}SomeUniqueName{{.*}}drop
33+
// CHECK: call{{.*}}SomeUniqueName{{.*}}drop
34+
// CHECK: call{{.*}}SomeUniqueName{{.*}}drop
35+
// CHECK: call{{.*}}SomeUniqueName{{.*}}drop
36+
// CHECK: call{{.*}}SomeUniqueName{{.*}}drop
37+
// CHECK: call{{.*}}SomeUniqueName{{.*}}drop
38+
// CHECK-NOT: call{{.*}}SomeUniqueName{{.*}}drop
39+
// The next line checks for the } that ends the function definition
40+
// CHECK-LABEL: {{^[}]}}
41+
let _s = SomeUniqueName;
42+
possibly_unwinding();
43+
let _s = SomeUniqueName;
44+
possibly_unwinding();
45+
let _s = SomeUniqueName;
46+
possibly_unwinding();
47+
}

0 commit comments

Comments
 (0)