Skip to content

Commit 7d4d205

Browse files
committed
Generate StorageDead along unwind paths for generators
1 parent 9069d8f commit 7d4d205

File tree

3 files changed

+88
-66
lines changed

3 files changed

+88
-66
lines changed

src/librustc/ty/layout.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -666,7 +666,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
666666
size,
667667
align,
668668
});
669-
debug!("generator layout: {:#?}", layout);
669+
debug!("generator layout ({:?}): {:#?}", ty, layout);
670670
layout
671671
}
672672

src/librustc_mir/build/matches/mod.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -544,7 +544,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
544544
let place = Place::Base(PlaceBase::Local(local_id));
545545
let var_ty = self.local_decls[local_id].ty;
546546
let region_scope = self.hir.region_scope_tree.var_scope(var.local_id);
547-
self.schedule_drop(span, region_scope, &place, var_ty, DropKind::Storage);
547+
self.schedule_drop(span, region_scope, &place, var_ty, DropKind::Storage { cached_block: CachedBlock::default() });
548548
place
549549
}
550550

src/librustc_mir/build/scope.rs

+86-64
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,7 @@ use rustc::middle::region;
8888
use rustc::ty::Ty;
8989
use rustc::hir;
9090
use rustc::mir::*;
91-
use syntax_pos::{Span, DUMMY_SP};
91+
use syntax_pos::{DUMMY_SP, Span};
9292
use rustc_data_structures::fx::FxHashMap;
9393
use std::collections::hash_map::Entry;
9494
use std::mem;
@@ -164,10 +164,8 @@ pub(crate) struct CachedBlock {
164164

165165
#[derive(Debug)]
166166
pub(crate) enum DropKind {
167-
Value {
168-
cached_block: CachedBlock,
169-
},
170-
Storage
167+
Value { cached_block: CachedBlock },
168+
Storage { cached_block: CachedBlock },
171169
}
172170

173171
#[derive(Clone, Debug)]
@@ -211,7 +209,7 @@ impl DropKind {
211209
fn may_panic(&self) -> bool {
212210
match *self {
213211
DropKind::Value { .. } => true,
214-
DropKind::Storage => false
212+
DropKind::Storage { .. } => false
215213
}
216214
}
217215
}
@@ -225,25 +223,28 @@ impl<'tcx> Scope<'tcx> {
225223
/// `storage_only` controls whether to invalidate only drop paths that run `StorageDead`.
226224
/// `this_scope_only` controls whether to invalidate only drop paths that refer to the current
227225
/// top-of-scope (as opposed to dependent scopes).
228-
fn invalidate_cache(&mut self, storage_only: bool, this_scope_only: bool) {
226+
fn invalidate_cache(&mut self, storage_only: bool, is_generator: bool, this_scope_only: bool) {
229227
// FIXME: maybe do shared caching of `cached_exits` etc. to handle functions
230228
// with lots of `try!`?
231229

232230
// cached exits drop storage and refer to the top-of-scope
233231
self.cached_exits.clear();
234232

235-
if !storage_only {
236-
// the current generator drop and unwind ignore
237-
// storage but refer to top-of-scope
238-
self.cached_generator_drop = None;
233+
// the current generator drop and unwind refer to top-of-scope
234+
self.cached_generator_drop = None;
235+
236+
let ignore_unwinds = storage_only && !is_generator;
237+
if !ignore_unwinds {
239238
self.cached_unwind.invalidate();
240239
}
241240

242-
if !storage_only && !this_scope_only {
241+
if !ignore_unwinds && !this_scope_only {
243242
for drop_data in &mut self.drops {
244-
if let DropKind::Value { ref mut cached_block } = drop_data.kind {
245-
cached_block.invalidate();
246-
}
243+
let cached_block = match drop_data.kind {
244+
DropKind::Storage { ref mut cached_block } => cached_block,
245+
DropKind::Value { ref mut cached_block } => cached_block,
246+
};
247+
cached_block.invalidate();
247248
}
248249
}
249250
}
@@ -388,6 +389,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
388389

389390
unpack!(block = build_scope_drops(
390391
&mut self.cfg,
392+
self.is_generator,
391393
&scope,
392394
block,
393395
unwind_to,
@@ -454,6 +456,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
454456

455457
unpack!(block = build_scope_drops(
456458
&mut self.cfg,
459+
self.is_generator,
457460
scope,
458461
block,
459462
unwind_to,
@@ -484,10 +487,6 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
484487
let result = block;
485488

486489
while let Some(scope) = scopes.next() {
487-
if !scope.needs_cleanup && !self.is_generator {
488-
continue;
489-
}
490-
491490
block = if let Some(b) = scope.cached_generator_drop {
492491
self.cfg.terminate(block, src_info,
493492
TerminatorKind::Goto { target: b });
@@ -508,6 +507,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
508507

509508
unpack!(block = build_scope_drops(
510509
&mut self.cfg,
510+
self.is_generator,
511511
scope,
512512
block,
513513
unwind_to,
@@ -644,7 +644,9 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
644644
) {
645645
self.schedule_drop(
646646
span, region_scope, place, place_ty,
647-
DropKind::Storage,
647+
DropKind::Storage {
648+
cached_block: CachedBlock::default(),
649+
},
648650
);
649651
self.schedule_drop(
650652
span, region_scope, place, place_ty,
@@ -672,7 +674,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
672674
let needs_drop = self.hir.needs_drop(place_ty);
673675
match drop_kind {
674676
DropKind::Value { .. } => if !needs_drop { return },
675-
DropKind::Storage => {
677+
DropKind::Storage { .. } => {
676678
match *place {
677679
Place::Base(PlaceBase::Local(index)) => if index.index() <= self.arg_count {
678680
span_bug!(
@@ -735,8 +737,8 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
735737
// Note that this code iterates scopes from the inner-most to the outer-most,
736738
// invalidating caches of each scope visited. This way bare minimum of the
737739
// caches gets invalidated. i.e., if a new drop is added into the middle scope, the
738-
// cache of outer scope stays intact.
739-
scope.invalidate_cache(!needs_drop, this_scope);
740+
// cache of outer scpoe stays intact.
741+
scope.invalidate_cache(!needs_drop, self.is_generator, this_scope);
740742
if this_scope {
741743
if let DropKind::Value { .. } = drop_kind {
742744
scope.needs_cleanup = true;
@@ -797,6 +799,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
797799
// to left reading the cached results but never created anything.
798800

799801
// Find the last cached block
802+
debug!("diverge_cleanup_gen(self.scopes = {:?})", self.scopes);
800803
let (mut target, first_uncached) = if let Some(cached_index) = self.scopes.iter()
801804
.rposition(|scope| scope.cached_unwind.get(generator_drop).is_some()) {
802805
(self.scopes[cached_index].cached_unwind.get(generator_drop).unwrap(), cached_index + 1)
@@ -890,7 +893,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
890893
assert_eq!(top_scope.region_scope, region_scope);
891894

892895
top_scope.drops.clear();
893-
top_scope.invalidate_cache(false, true);
896+
top_scope.invalidate_cache(false, self.is_generator, true);
894897
}
895898

896899
/// Drops the single variable provided
@@ -941,21 +944,22 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
941944
}
942945
}
943946

944-
top_scope.invalidate_cache(true, true);
947+
top_scope.invalidate_cache(true, self.is_generator, true);
945948
}
946949

947950
}
948951

949952
/// Builds drops for pop_scope and exit_scope.
950953
fn build_scope_drops<'tcx>(
951954
cfg: &mut CFG<'tcx>,
955+
is_generator: bool,
952956
scope: &Scope<'tcx>,
953957
mut block: BasicBlock,
954958
last_unwind_to: BasicBlock,
955959
arg_count: usize,
956960
generator_drop: bool,
957961
) -> BlockAnd<()> {
958-
debug!("build_scope_drops({:?} -> {:?}", block, scope);
962+
debug!("build_scope_drops({:?} -> {:?})", block, scope);
959963

960964
// Build up the drops in evaluation order. The end result will
961965
// look like:
@@ -969,28 +973,20 @@ fn build_scope_drops<'tcx>(
969973
// The horizontal arrows represent the execution path when the drops return
970974
// successfully. The downwards arrows represent the execution path when the
971975
// drops panic (panicking while unwinding will abort, so there's no need for
972-
// another set of arrows). The drops for the unwind path should have already
973-
// been generated by `diverge_cleanup_gen`.
974-
975-
let mut unwind_blocks = scope.drops.iter().rev().filter_map(|drop_data| {
976-
if let DropKind::Value { cached_block } = drop_data.kind {
977-
Some(cached_block.get(generator_drop).unwrap_or_else(|| {
978-
span_bug!(drop_data.span, "cached block not present?")
979-
}))
980-
} else {
981-
None
982-
}
983-
});
984-
985-
// When we unwind from a drop, we start cleaning up from the next one, so
986-
// we don't need this block.
987-
unwind_blocks.next();
976+
// another set of arrows).
977+
//
978+
// For generators, we unwind from a drop on a local to its StorageDead
979+
// statement. For other functions we don't worry about StorageDead. The
980+
// drops for the unwind path should have already been generated by
981+
// `diverge_cleanup_gen`.
988982

989-
for drop_data in scope.drops.iter().rev() {
983+
for drop_idx in (0..scope.drops.len()).rev() {
984+
let drop_data = &scope.drops[drop_idx];
990985
let source_info = scope.source_info(drop_data.span);
991986
match drop_data.kind {
992987
DropKind::Value { .. } => {
993-
let unwind_to = unwind_blocks.next().unwrap_or(last_unwind_to);
988+
let unwind_to = get_unwind_to(scope, is_generator, drop_idx, generator_drop)
989+
.unwrap_or(last_unwind_to);
994990

995991
let next = cfg.start_new_block();
996992
cfg.terminate(block, source_info, TerminatorKind::Drop {
@@ -1000,7 +996,7 @@ fn build_scope_drops<'tcx>(
1000996
});
1001997
block = next;
1002998
}
1003-
DropKind::Storage => {
999+
DropKind::Storage { .. } => {
10041000
// Drop the storage for both value and storage drops.
10051001
// Only temps and vars need their storage dead.
10061002
match drop_data.location {
@@ -1018,6 +1014,31 @@ fn build_scope_drops<'tcx>(
10181014
block.unit()
10191015
}
10201016

1017+
fn get_unwind_to<'tcx>(
1018+
scope: &Scope<'tcx>,
1019+
is_generator: bool,
1020+
unwind_from: usize,
1021+
generator_drop: bool,
1022+
) -> Option<BasicBlock> {
1023+
for drop_idx in (0..unwind_from).rev() {
1024+
let drop_data = &scope.drops[drop_idx];
1025+
match drop_data.kind {
1026+
DropKind::Storage { cached_block } if is_generator => {
1027+
return Some(cached_block.get(generator_drop).unwrap_or_else(|| {
1028+
span_bug!(drop_data.span, "cached block not present for {:?}", drop_data)
1029+
}));
1030+
}
1031+
DropKind::Value { cached_block } if !is_generator => {
1032+
return Some(cached_block.get(generator_drop).unwrap_or_else(|| {
1033+
span_bug!(drop_data.span, "cached block not present for {:?}", drop_data)
1034+
}));
1035+
}
1036+
_ => (),
1037+
}
1038+
}
1039+
None
1040+
}
1041+
10211042
fn build_diverge_scope<'tcx>(cfg: &mut CFG<'tcx>,
10221043
span: Span,
10231044
scope: &mut Scope<'tcx>,
@@ -1051,6 +1072,7 @@ fn build_diverge_scope<'tcx>(cfg: &mut CFG<'tcx>,
10511072
// Build up the drops. Here we iterate the vector in
10521073
// *forward* order, so that we generate drops[0] first (right to
10531074
// left in diagram above).
1075+
debug!("build_diverge_scope({:?})", scope.drops);
10541076
for (j, drop_data) in scope.drops.iter_mut().enumerate() {
10551077
debug!("build_diverge_scope drop_data[{}]: {:?}", j, drop_data);
10561078
// Only full value drops are emitted in the diverging path,
@@ -1062,28 +1084,38 @@ fn build_diverge_scope<'tcx>(cfg: &mut CFG<'tcx>,
10621084
// match the behavior of clang, but on inspection eddyb says
10631085
// this is not what clang does.
10641086
match drop_data.kind {
1065-
DropKind::Storage if is_generator => {
1087+
DropKind::Storage { ref mut cached_block } if is_generator => {
10661088
// Only temps and vars need their storage dead.
10671089
match drop_data.location {
10681090
Place::Base(PlaceBase::Local(index)) => {
10691091
storage_deads.push(Statement {
10701092
source_info: source_info(drop_data.span),
10711093
kind: StatementKind::StorageDead(index)
10721094
});
1095+
if !target_built_by_us {
1096+
// We cannot add statements to an existing block, so we create a new
1097+
// block for our StorageDead statements.
1098+
let block = cfg.start_new_cleanup_block();
1099+
let source_info = SourceInfo { span: DUMMY_SP, scope: source_scope };
1100+
cfg.terminate(block, source_info,
1101+
TerminatorKind::Goto { target: target });
1102+
target = block;
1103+
target_built_by_us = true;
1104+
}
10731105
}
10741106
_ => unreachable!(),
10751107
};
1108+
*cached_block.ref_mut(generator_drop) = Some(target);
10761109
}
1077-
DropKind::Storage => {}
1110+
DropKind::Storage { .. } => {}
10781111
DropKind::Value { ref mut cached_block } => {
10791112
let cached_block = cached_block.ref_mut(generator_drop);
10801113
target = if let Some(cached_block) = *cached_block {
10811114
storage_deads.clear();
10821115
target_built_by_us = false;
10831116
cached_block
10841117
} else {
1085-
push_storage_deads(
1086-
cfg, &mut target, &mut storage_deads, target_built_by_us, source_scope);
1118+
push_storage_deads(cfg, target, &mut storage_deads);
10871119
let block = cfg.start_new_cleanup_block();
10881120
cfg.terminate(block, source_info(drop_data.span),
10891121
TerminatorKind::Drop {
@@ -1098,7 +1130,7 @@ fn build_diverge_scope<'tcx>(cfg: &mut CFG<'tcx>,
10981130
}
10991131
};
11001132
}
1101-
push_storage_deads(cfg, &mut target, &mut storage_deads, target_built_by_us, source_scope);
1133+
push_storage_deads(cfg, target, &mut storage_deads);
11021134
*scope.cached_unwind.ref_mut(generator_drop) = Some(target);
11031135

11041136
assert!(storage_deads.is_empty());
@@ -1108,23 +1140,13 @@ fn build_diverge_scope<'tcx>(cfg: &mut CFG<'tcx>,
11081140
}
11091141

11101142
fn push_storage_deads(cfg: &mut CFG<'tcx>,
1111-
target: &mut BasicBlock,
1112-
storage_deads: &mut Vec<Statement<'tcx>>,
1113-
target_built_by_us: bool,
1114-
source_scope: SourceScope) {
1143+
target: BasicBlock,
1144+
storage_deads: &mut Vec<Statement<'tcx>>) {
11151145
if storage_deads.is_empty() { return; }
1116-
if !target_built_by_us {
1117-
// We cannot add statements to an existing block, so we create a new
1118-
// block for our StorageDead statements.
1119-
let block = cfg.start_new_cleanup_block();
1120-
let source_info = SourceInfo { span: DUMMY_SP, scope: source_scope };
1121-
cfg.terminate(block, source_info, TerminatorKind::Goto { target: *target });
1122-
*target = block;
1123-
}
1124-
let statements = &mut cfg.block_data_mut(*target).statements;
1146+
let statements = &mut cfg.block_data_mut(target).statements;
11251147
storage_deads.reverse();
11261148
debug!("push_storage_deads({:?}), storage_deads={:?}, statements={:?}",
1127-
*target, storage_deads, statements);
1149+
target, storage_deads, statements);
11281150
storage_deads.append(statements);
11291151
mem::swap(statements, storage_deads);
11301152
assert!(storage_deads.is_empty());

0 commit comments

Comments
 (0)