@@ -88,7 +88,7 @@ use rustc::middle::region;
88
88
use rustc:: ty:: Ty ;
89
89
use rustc:: hir;
90
90
use rustc:: mir:: * ;
91
- use syntax_pos:: { Span , DUMMY_SP } ;
91
+ use syntax_pos:: { DUMMY_SP , Span } ;
92
92
use rustc_data_structures:: fx:: FxHashMap ;
93
93
use std:: collections:: hash_map:: Entry ;
94
94
use std:: mem;
@@ -164,10 +164,8 @@ pub(crate) struct CachedBlock {
164
164
165
165
#[ derive( Debug ) ]
166
166
pub ( crate ) enum DropKind {
167
- Value {
168
- cached_block : CachedBlock ,
169
- } ,
170
- Storage
167
+ Value { cached_block : CachedBlock } ,
168
+ Storage { cached_block : CachedBlock } ,
171
169
}
172
170
173
171
#[ derive( Clone , Debug ) ]
@@ -211,7 +209,7 @@ impl DropKind {
211
209
fn may_panic ( & self ) -> bool {
212
210
match * self {
213
211
DropKind :: Value { .. } => true ,
214
- DropKind :: Storage => false
212
+ DropKind :: Storage { .. } => false
215
213
}
216
214
}
217
215
}
@@ -225,25 +223,28 @@ impl<'tcx> Scope<'tcx> {
225
223
/// `storage_only` controls whether to invalidate only drop paths that run `StorageDead`.
226
224
/// `this_scope_only` controls whether to invalidate only drop paths that refer to the current
227
225
/// top-of-scope (as opposed to dependent scopes).
228
- fn invalidate_cache ( & mut self , storage_only : bool , this_scope_only : bool ) {
226
+ fn invalidate_cache ( & mut self , storage_only : bool , is_generator : bool , this_scope_only : bool ) {
229
227
// FIXME: maybe do shared caching of `cached_exits` etc. to handle functions
230
228
// with lots of `try!`?
231
229
232
230
// cached exits drop storage and refer to the top-of-scope
233
231
self . cached_exits . clear ( ) ;
234
232
235
- if !storage_only {
236
- // the current generator drop and unwind ignore
237
- // storage but refer to top-of-scope
238
- self . cached_generator_drop = None ;
233
+ // the current generator drop and unwind refer to top-of-scope
234
+ self . cached_generator_drop = None ;
235
+
236
+ let ignore_unwinds = storage_only && !is_generator;
237
+ if !ignore_unwinds {
239
238
self . cached_unwind . invalidate ( ) ;
240
239
}
241
240
242
- if !storage_only && !this_scope_only {
241
+ if !ignore_unwinds && !this_scope_only {
243
242
for drop_data in & mut self . drops {
244
- if let DropKind :: Value { ref mut cached_block } = drop_data. kind {
245
- cached_block. invalidate ( ) ;
246
- }
243
+ let cached_block = match drop_data. kind {
244
+ DropKind :: Storage { ref mut cached_block } => cached_block,
245
+ DropKind :: Value { ref mut cached_block } => cached_block,
246
+ } ;
247
+ cached_block. invalidate ( ) ;
247
248
}
248
249
}
249
250
}
@@ -388,6 +389,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
388
389
389
390
unpack ! ( block = build_scope_drops(
390
391
& mut self . cfg,
392
+ self . is_generator,
391
393
& scope,
392
394
block,
393
395
unwind_to,
@@ -454,6 +456,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
454
456
455
457
unpack ! ( block = build_scope_drops(
456
458
& mut self . cfg,
459
+ self . is_generator,
457
460
scope,
458
461
block,
459
462
unwind_to,
@@ -484,10 +487,6 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
484
487
let result = block;
485
488
486
489
while let Some ( scope) = scopes. next ( ) {
487
- if !scope. needs_cleanup && !self . is_generator {
488
- continue ;
489
- }
490
-
491
490
block = if let Some ( b) = scope. cached_generator_drop {
492
491
self . cfg . terminate ( block, src_info,
493
492
TerminatorKind :: Goto { target : b } ) ;
@@ -508,6 +507,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
508
507
509
508
unpack ! ( block = build_scope_drops(
510
509
& mut self . cfg,
510
+ self . is_generator,
511
511
scope,
512
512
block,
513
513
unwind_to,
@@ -644,7 +644,9 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
644
644
) {
645
645
self . schedule_drop (
646
646
span, region_scope, place, place_ty,
647
- DropKind :: Storage ,
647
+ DropKind :: Storage {
648
+ cached_block : CachedBlock :: default ( ) ,
649
+ } ,
648
650
) ;
649
651
self . schedule_drop (
650
652
span, region_scope, place, place_ty,
@@ -672,7 +674,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
672
674
let needs_drop = self . hir . needs_drop ( place_ty) ;
673
675
match drop_kind {
674
676
DropKind :: Value { .. } => if !needs_drop { return } ,
675
- DropKind :: Storage => {
677
+ DropKind :: Storage { .. } => {
676
678
match * place {
677
679
Place :: Base ( PlaceBase :: Local ( index) ) => if index. index ( ) <= self . arg_count {
678
680
span_bug ! (
@@ -735,8 +737,8 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
735
737
// Note that this code iterates scopes from the inner-most to the outer-most,
736
738
// invalidating caches of each scope visited. This way bare minimum of the
737
739
// caches gets invalidated. i.e., if a new drop is added into the middle scope, the
738
- // cache of outer scope stays intact.
739
- scope. invalidate_cache ( !needs_drop, this_scope) ;
740
+ // cache of outer scpoe stays intact.
741
+ scope. invalidate_cache ( !needs_drop, self . is_generator , this_scope) ;
740
742
if this_scope {
741
743
if let DropKind :: Value { .. } = drop_kind {
742
744
scope. needs_cleanup = true ;
@@ -797,6 +799,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
797
799
// to left reading the cached results but never created anything.
798
800
799
801
// Find the last cached block
802
+ debug ! ( "diverge_cleanup_gen(self.scopes = {:?})" , self . scopes) ;
800
803
let ( mut target, first_uncached) = if let Some ( cached_index) = self . scopes . iter ( )
801
804
. rposition ( |scope| scope. cached_unwind . get ( generator_drop) . is_some ( ) ) {
802
805
( self . scopes [ cached_index] . cached_unwind . get ( generator_drop) . unwrap ( ) , cached_index + 1 )
@@ -890,7 +893,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
890
893
assert_eq ! ( top_scope. region_scope, region_scope) ;
891
894
892
895
top_scope. drops . clear ( ) ;
893
- top_scope. invalidate_cache ( false , true ) ;
896
+ top_scope. invalidate_cache ( false , self . is_generator , true ) ;
894
897
}
895
898
896
899
/// Drops the single variable provided
@@ -941,21 +944,22 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
941
944
}
942
945
}
943
946
944
- top_scope. invalidate_cache ( true , true ) ;
947
+ top_scope. invalidate_cache ( true , self . is_generator , true ) ;
945
948
}
946
949
947
950
}
948
951
949
952
/// Builds drops for pop_scope and exit_scope.
950
953
fn build_scope_drops < ' tcx > (
951
954
cfg : & mut CFG < ' tcx > ,
955
+ is_generator : bool ,
952
956
scope : & Scope < ' tcx > ,
953
957
mut block : BasicBlock ,
954
958
last_unwind_to : BasicBlock ,
955
959
arg_count : usize ,
956
960
generator_drop : bool ,
957
961
) -> BlockAnd < ( ) > {
958
- debug ! ( "build_scope_drops({:?} -> {:?}" , block, scope) ;
962
+ debug ! ( "build_scope_drops({:?} -> {:?}) " , block, scope) ;
959
963
960
964
// Build up the drops in evaluation order. The end result will
961
965
// look like:
@@ -969,28 +973,20 @@ fn build_scope_drops<'tcx>(
969
973
// The horizontal arrows represent the execution path when the drops return
970
974
// successfully. The downwards arrows represent the execution path when the
971
975
// drops panic (panicking while unwinding will abort, so there's no need for
972
- // another set of arrows). The drops for the unwind path should have already
973
- // been generated by `diverge_cleanup_gen`.
974
-
975
- let mut unwind_blocks = scope. drops . iter ( ) . rev ( ) . filter_map ( |drop_data| {
976
- if let DropKind :: Value { cached_block } = drop_data. kind {
977
- Some ( cached_block. get ( generator_drop) . unwrap_or_else ( || {
978
- span_bug ! ( drop_data. span, "cached block not present?" )
979
- } ) )
980
- } else {
981
- None
982
- }
983
- } ) ;
984
-
985
- // When we unwind from a drop, we start cleaning up from the next one, so
986
- // we don't need this block.
987
- unwind_blocks. next ( ) ;
976
+ // another set of arrows).
977
+ //
978
+ // For generators, we unwind from a drop on a local to its StorageDead
979
+ // statement. For other functions we don't worry about StorageDead. The
980
+ // drops for the unwind path should have already been generated by
981
+ // `diverge_cleanup_gen`.
988
982
989
- for drop_data in scope. drops . iter ( ) . rev ( ) {
983
+ for drop_idx in ( 0 ..scope. drops . len ( ) ) . rev ( ) {
984
+ let drop_data = & scope. drops [ drop_idx] ;
990
985
let source_info = scope. source_info ( drop_data. span ) ;
991
986
match drop_data. kind {
992
987
DropKind :: Value { .. } => {
993
- let unwind_to = unwind_blocks. next ( ) . unwrap_or ( last_unwind_to) ;
988
+ let unwind_to = get_unwind_to ( scope, is_generator, drop_idx, generator_drop)
989
+ . unwrap_or ( last_unwind_to) ;
994
990
995
991
let next = cfg. start_new_block ( ) ;
996
992
cfg. terminate ( block, source_info, TerminatorKind :: Drop {
@@ -1000,7 +996,7 @@ fn build_scope_drops<'tcx>(
1000
996
} ) ;
1001
997
block = next;
1002
998
}
1003
- DropKind :: Storage => {
999
+ DropKind :: Storage { .. } => {
1004
1000
// Drop the storage for both value and storage drops.
1005
1001
// Only temps and vars need their storage dead.
1006
1002
match drop_data. location {
@@ -1018,6 +1014,31 @@ fn build_scope_drops<'tcx>(
1018
1014
block. unit ( )
1019
1015
}
1020
1016
1017
+ fn get_unwind_to < ' tcx > (
1018
+ scope : & Scope < ' tcx > ,
1019
+ is_generator : bool ,
1020
+ unwind_from : usize ,
1021
+ generator_drop : bool ,
1022
+ ) -> Option < BasicBlock > {
1023
+ for drop_idx in ( 0 ..unwind_from) . rev ( ) {
1024
+ let drop_data = & scope. drops [ drop_idx] ;
1025
+ match drop_data. kind {
1026
+ DropKind :: Storage { cached_block } if is_generator => {
1027
+ return Some ( cached_block. get ( generator_drop) . unwrap_or_else ( || {
1028
+ span_bug ! ( drop_data. span, "cached block not present for {:?}" , drop_data)
1029
+ } ) ) ;
1030
+ }
1031
+ DropKind :: Value { cached_block } if !is_generator => {
1032
+ return Some ( cached_block. get ( generator_drop) . unwrap_or_else ( || {
1033
+ span_bug ! ( drop_data. span, "cached block not present for {:?}" , drop_data)
1034
+ } ) ) ;
1035
+ }
1036
+ _ => ( ) ,
1037
+ }
1038
+ }
1039
+ None
1040
+ }
1041
+
1021
1042
fn build_diverge_scope < ' tcx > ( cfg : & mut CFG < ' tcx > ,
1022
1043
span : Span ,
1023
1044
scope : & mut Scope < ' tcx > ,
@@ -1051,6 +1072,7 @@ fn build_diverge_scope<'tcx>(cfg: &mut CFG<'tcx>,
1051
1072
// Build up the drops. Here we iterate the vector in
1052
1073
// *forward* order, so that we generate drops[0] first (right to
1053
1074
// left in diagram above).
1075
+ debug ! ( "build_diverge_scope({:?})" , scope. drops) ;
1054
1076
for ( j, drop_data) in scope. drops . iter_mut ( ) . enumerate ( ) {
1055
1077
debug ! ( "build_diverge_scope drop_data[{}]: {:?}" , j, drop_data) ;
1056
1078
// Only full value drops are emitted in the diverging path,
@@ -1062,28 +1084,38 @@ fn build_diverge_scope<'tcx>(cfg: &mut CFG<'tcx>,
1062
1084
// match the behavior of clang, but on inspection eddyb says
1063
1085
// this is not what clang does.
1064
1086
match drop_data. kind {
1065
- DropKind :: Storage if is_generator => {
1087
+ DropKind :: Storage { ref mut cached_block } if is_generator => {
1066
1088
// Only temps and vars need their storage dead.
1067
1089
match drop_data. location {
1068
1090
Place :: Base ( PlaceBase :: Local ( index) ) => {
1069
1091
storage_deads. push ( Statement {
1070
1092
source_info : source_info ( drop_data. span ) ,
1071
1093
kind : StatementKind :: StorageDead ( index)
1072
1094
} ) ;
1095
+ if !target_built_by_us {
1096
+ // We cannot add statements to an existing block, so we create a new
1097
+ // block for our StorageDead statements.
1098
+ let block = cfg. start_new_cleanup_block ( ) ;
1099
+ let source_info = SourceInfo { span : DUMMY_SP , scope : source_scope } ;
1100
+ cfg. terminate ( block, source_info,
1101
+ TerminatorKind :: Goto { target : target } ) ;
1102
+ target = block;
1103
+ target_built_by_us = true ;
1104
+ }
1073
1105
}
1074
1106
_ => unreachable ! ( ) ,
1075
1107
} ;
1108
+ * cached_block. ref_mut ( generator_drop) = Some ( target) ;
1076
1109
}
1077
- DropKind :: Storage => { }
1110
+ DropKind :: Storage { .. } => { }
1078
1111
DropKind :: Value { ref mut cached_block } => {
1079
1112
let cached_block = cached_block. ref_mut ( generator_drop) ;
1080
1113
target = if let Some ( cached_block) = * cached_block {
1081
1114
storage_deads. clear ( ) ;
1082
1115
target_built_by_us = false ;
1083
1116
cached_block
1084
1117
} else {
1085
- push_storage_deads (
1086
- cfg, & mut target, & mut storage_deads, target_built_by_us, source_scope) ;
1118
+ push_storage_deads ( cfg, target, & mut storage_deads) ;
1087
1119
let block = cfg. start_new_cleanup_block ( ) ;
1088
1120
cfg. terminate ( block, source_info ( drop_data. span ) ,
1089
1121
TerminatorKind :: Drop {
@@ -1098,7 +1130,7 @@ fn build_diverge_scope<'tcx>(cfg: &mut CFG<'tcx>,
1098
1130
}
1099
1131
} ;
1100
1132
}
1101
- push_storage_deads ( cfg, & mut target, & mut storage_deads, target_built_by_us , source_scope ) ;
1133
+ push_storage_deads ( cfg, target, & mut storage_deads) ;
1102
1134
* scope. cached_unwind . ref_mut ( generator_drop) = Some ( target) ;
1103
1135
1104
1136
assert ! ( storage_deads. is_empty( ) ) ;
@@ -1108,23 +1140,13 @@ fn build_diverge_scope<'tcx>(cfg: &mut CFG<'tcx>,
1108
1140
}
1109
1141
1110
1142
fn push_storage_deads ( cfg : & mut CFG < ' tcx > ,
1111
- target : & mut BasicBlock ,
1112
- storage_deads : & mut Vec < Statement < ' tcx > > ,
1113
- target_built_by_us : bool ,
1114
- source_scope : SourceScope ) {
1143
+ target : BasicBlock ,
1144
+ storage_deads : & mut Vec < Statement < ' tcx > > ) {
1115
1145
if storage_deads. is_empty ( ) { return ; }
1116
- if !target_built_by_us {
1117
- // We cannot add statements to an existing block, so we create a new
1118
- // block for our StorageDead statements.
1119
- let block = cfg. start_new_cleanup_block ( ) ;
1120
- let source_info = SourceInfo { span : DUMMY_SP , scope : source_scope } ;
1121
- cfg. terminate ( block, source_info, TerminatorKind :: Goto { target : * target } ) ;
1122
- * target = block;
1123
- }
1124
- let statements = & mut cfg. block_data_mut ( * target) . statements ;
1146
+ let statements = & mut cfg. block_data_mut ( target) . statements ;
1125
1147
storage_deads. reverse ( ) ;
1126
1148
debug ! ( "push_storage_deads({:?}), storage_deads={:?}, statements={:?}" ,
1127
- * target, storage_deads, statements) ;
1149
+ target, storage_deads, statements) ;
1128
1150
storage_deads. append ( statements) ;
1129
1151
mem:: swap ( statements, storage_deads) ;
1130
1152
assert ! ( storage_deads. is_empty( ) ) ;
0 commit comments