Skip to content

Commit 11663cd

Browse files
committedDec 19, 2024
Auto merge of #134486 - compiler-errors:drop-for-lint, r=nikomatsakis
Make sure we handle `backwards_incompatible_lint` drops appropriately in drop elaboration In #131326, a new kind of scheduled drop (`drop_kind: DropKind::Value` + `backwards_incompatible_lint: true`) was added so that we could insert a new kind of no-op MIR statement (`backward incompatible drop`) for linting purposes. These drops were intended to have *no side-effects*, but drop elaboration code forgot to handle these drops specially and they were handled otherwise as normal drops in most of the code. This ends up being **unsound** since we insert more than one drop call for some values, which means that `Drop::drop` could be called more than once. This PR fixes this by splitting out the `DropKind::ForLint` and adjusting the code. I'm not totally certain if all of the places I've adjusted are either reachable or correct, but I'm pretty certain that it's *more* correct than it was previously. cc `@dingxiangfei2009` r? nikomatsakis Fixes #134482
2 parents 3bf62cc + 6564403 commit 11663cd

4 files changed

+461
-41
lines changed
 

‎compiler/rustc_mir_build/src/builder/scope.rs

+107-41
Original file line numberDiff line numberDiff line change
@@ -151,15 +151,13 @@ struct DropData {
151151

152152
/// Whether this is a value Drop or a StorageDead.
153153
kind: DropKind,
154-
155-
/// Whether this is a backwards-incompatible drop lint
156-
backwards_incompatible_lint: bool,
157154
}
158155

159156
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
160157
pub(crate) enum DropKind {
161158
Value,
162159
Storage,
160+
ForLint,
163161
}
164162

165163
#[derive(Debug)]
@@ -248,7 +246,7 @@ impl Scope {
248246
/// use of optimizations in the MIR coroutine transform.
249247
fn needs_cleanup(&self) -> bool {
250248
self.drops.iter().any(|drop| match drop.kind {
251-
DropKind::Value => true,
249+
DropKind::Value | DropKind::ForLint => true,
252250
DropKind::Storage => false,
253251
})
254252
}
@@ -277,12 +275,8 @@ impl DropTree {
277275
// represents the block in the tree that should be jumped to once all
278276
// of the required drops have been performed.
279277
let fake_source_info = SourceInfo::outermost(DUMMY_SP);
280-
let fake_data = DropData {
281-
source_info: fake_source_info,
282-
local: Local::MAX,
283-
kind: DropKind::Storage,
284-
backwards_incompatible_lint: false,
285-
};
278+
let fake_data =
279+
DropData { source_info: fake_source_info, local: Local::MAX, kind: DropKind::Storage };
286280
let drops = IndexVec::from_raw(vec![DropNode { data: fake_data, next: DropIdx::MAX }]);
287281
Self { drops, entry_points: Vec::new(), existing_drops_map: FxHashMap::default() }
288282
}
@@ -411,6 +405,27 @@ impl DropTree {
411405
};
412406
cfg.terminate(block, drop_node.data.source_info, terminator);
413407
}
408+
DropKind::ForLint => {
409+
let stmt = Statement {
410+
source_info: drop_node.data.source_info,
411+
kind: StatementKind::BackwardIncompatibleDropHint {
412+
place: Box::new(drop_node.data.local.into()),
413+
reason: BackwardIncompatibleDropReason::Edition2024,
414+
},
415+
};
416+
cfg.push(block, stmt);
417+
let target = blocks[drop_node.next].unwrap();
418+
if target != block {
419+
// Diagnostics don't use this `Span` but debuginfo
420+
// might. Since we don't want breakpoints to be placed
421+
// here, especially when this is on an unwind path, we
422+
// use `DUMMY_SP`.
423+
let source_info =
424+
SourceInfo { span: DUMMY_SP, ..drop_node.data.source_info };
425+
let terminator = TerminatorKind::Goto { target };
426+
cfg.terminate(block, source_info, terminator);
427+
}
428+
}
414429
// Root nodes don't correspond to a drop.
415430
DropKind::Storage if drop_idx == ROOT_NODE => {}
416431
DropKind::Storage => {
@@ -770,12 +785,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
770785
let local =
771786
place.as_local().unwrap_or_else(|| bug!("projection in tail call args"));
772787

773-
Some(DropData {
774-
source_info,
775-
local,
776-
kind: DropKind::Value,
777-
backwards_incompatible_lint: false,
778-
})
788+
Some(DropData { source_info, local, kind: DropKind::Value })
779789
}
780790
Operand::Constant(_) => None,
781791
})
@@ -822,6 +832,15 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
822832
});
823833
block = next;
824834
}
835+
DropKind::ForLint => {
836+
self.cfg.push(block, Statement {
837+
source_info,
838+
kind: StatementKind::BackwardIncompatibleDropHint {
839+
place: Box::new(local.into()),
840+
reason: BackwardIncompatibleDropReason::Edition2024,
841+
},
842+
});
843+
}
825844
DropKind::Storage => {
826845
// Only temps and vars need their storage dead.
827846
assert!(local.index() > self.arg_count);
@@ -1021,7 +1040,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
10211040
drop_kind: DropKind,
10221041
) {
10231042
let needs_drop = match drop_kind {
1024-
DropKind::Value => {
1043+
DropKind::Value | DropKind::ForLint => {
10251044
if !self.local_decls[local].ty.needs_drop(self.tcx, self.typing_env()) {
10261045
return;
10271046
}
@@ -1101,7 +1120,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
11011120
source_info: SourceInfo { span: scope_end, scope: scope.source_scope },
11021121
local,
11031122
kind: drop_kind,
1104-
backwards_incompatible_lint: false,
11051123
});
11061124

11071125
return;
@@ -1132,8 +1150,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
11321150
scope.drops.push(DropData {
11331151
source_info: SourceInfo { span: scope_end, scope: scope.source_scope },
11341152
local,
1135-
kind: DropKind::Value,
1136-
backwards_incompatible_lint: true,
1153+
kind: DropKind::ForLint,
11371154
});
11381155

11391156
return;
@@ -1376,12 +1393,23 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
13761393
}
13771394

13781395
/// Builds drops for `pop_scope` and `leave_top_scope`.
1396+
///
1397+
/// # Parameters
1398+
///
1399+
/// * `unwind_drops`, the drop tree data structure storing what needs to be cleaned up if unwind occurs
1400+
/// * `scope`, describes the drops that will occur on exiting the scope in regular execution
1401+
/// * `block`, the block to branch to once drops are complete (assuming no unwind occurs)
1402+
/// * `unwind_to`, describes the drops that would occur at this point in the code if a
1403+
/// panic occurred (a subset of the drops in `scope`, since we sometimes elide StorageDead and other
1404+
/// instructions on unwinding)
1405+
/// * `storage_dead_on_unwind`, if true, then we should emit `StorageDead` even when unwinding
1406+
/// * `arg_count`, number of MIR local variables corresponding to fn arguments (used to assert that we don't drop those)
13791407
fn build_scope_drops<'tcx>(
13801408
cfg: &mut CFG<'tcx>,
13811409
unwind_drops: &mut DropTree,
13821410
scope: &Scope,
1383-
mut block: BasicBlock,
1384-
mut unwind_to: DropIdx,
1411+
block: BasicBlock,
1412+
unwind_to: DropIdx,
13851413
storage_dead_on_unwind: bool,
13861414
arg_count: usize,
13871415
) -> BlockAnd<()> {
@@ -1406,6 +1434,18 @@ fn build_scope_drops<'tcx>(
14061434
// drops for the unwind path should have already been generated by
14071435
// `diverge_cleanup_gen`.
14081436

1437+
// `unwind_to` indicates what needs to be dropped should unwinding occur.
1438+
// This is a subset of what needs to be dropped when exiting the scope.
1439+
// As we unwind the scope, we will also move `unwind_to` backwards to match,
1440+
// so that we can use it should a destructor panic.
1441+
let mut unwind_to = unwind_to;
1442+
1443+
// The block that we should jump to after drops complete. We start by building the final drop (`drops[n]`
1444+
// in the diagram above) and then build the drops (e.g., `drop[1]`, `drop[0]`) that come before it.
1445+
// block begins as the successor of `drops[n]` and then becomes `drops[n]` so that `drops[n-1]`
1446+
// will branch to `drops[n]`.
1447+
let mut block = block;
1448+
14091449
for drop_data in scope.drops.iter().rev() {
14101450
let source_info = drop_data.source_info;
14111451
let local = drop_data.local;
@@ -1415,6 +1455,9 @@ fn build_scope_drops<'tcx>(
14151455
// `unwind_to` should drop the value that we're about to
14161456
// schedule. If dropping this value panics, then we continue
14171457
// with the *next* value on the unwind path.
1458+
//
1459+
// We adjust this BEFORE we create the drop (e.g., `drops[n]`)
1460+
// because `drops[n]` should unwind to `drops[n-1]`.
14181461
debug_assert_eq!(unwind_drops.drops[unwind_to].data.local, drop_data.local);
14191462
debug_assert_eq!(unwind_drops.drops[unwind_to].data.kind, drop_data.kind);
14201463
unwind_to = unwind_drops.drops[unwind_to].next;
@@ -1427,27 +1470,50 @@ fn build_scope_drops<'tcx>(
14271470
continue;
14281471
}
14291472

1430-
if drop_data.backwards_incompatible_lint {
1431-
cfg.push(block, Statement {
1432-
source_info,
1433-
kind: StatementKind::BackwardIncompatibleDropHint {
1434-
place: Box::new(local.into()),
1435-
reason: BackwardIncompatibleDropReason::Edition2024,
1436-
},
1437-
});
1438-
} else {
1439-
unwind_drops.add_entry_point(block, unwind_to);
1440-
let next = cfg.start_new_block();
1441-
cfg.terminate(block, source_info, TerminatorKind::Drop {
1442-
place: local.into(),
1443-
target: next,
1444-
unwind: UnwindAction::Continue,
1445-
replace: false,
1446-
});
1447-
block = next;
1473+
unwind_drops.add_entry_point(block, unwind_to);
1474+
let next = cfg.start_new_block();
1475+
cfg.terminate(block, source_info, TerminatorKind::Drop {
1476+
place: local.into(),
1477+
target: next,
1478+
unwind: UnwindAction::Continue,
1479+
replace: false,
1480+
});
1481+
block = next;
1482+
}
1483+
DropKind::ForLint => {
1484+
// If the operand has been moved, and we are not on an unwind
1485+
// path, then don't generate the drop. (We only take this into
1486+
// account for non-unwind paths so as not to disturb the
1487+
// caching mechanism.)
1488+
if scope.moved_locals.iter().any(|&o| o == local) {
1489+
continue;
14481490
}
1491+
1492+
// As in the `DropKind::Storage` case below:
1493+
// normally lint-related drops are not emitted for unwind,
1494+
// so we can just leave `unwind_to` unmodified, but in some
1495+
// cases we emit things ALSO on the unwind path, so we need to adjust
1496+
// `unwind_to` in that case.
1497+
if storage_dead_on_unwind {
1498+
debug_assert_eq!(unwind_drops.drops[unwind_to].data.local, drop_data.local);
1499+
debug_assert_eq!(unwind_drops.drops[unwind_to].data.kind, drop_data.kind);
1500+
unwind_to = unwind_drops.drops[unwind_to].next;
1501+
}
1502+
1503+
cfg.push(block, Statement {
1504+
source_info,
1505+
kind: StatementKind::BackwardIncompatibleDropHint {
1506+
place: Box::new(local.into()),
1507+
reason: BackwardIncompatibleDropReason::Edition2024,
1508+
},
1509+
});
14491510
}
14501511
DropKind::Storage => {
1512+
// Ordinarily, storage-dead nodes are not emitted on unwind, so we don't
1513+
// need to adjust `unwind_to` on this path. However, in some specific cases
1514+
// we *do* emit storage-dead nodes on the unwind path, and in that case now that
1515+
// the storage-dead has completed, we need to adjust the `unwind_to` pointer
1516+
// so that any future drops we emit will not register storage-dead.
14511517
if storage_dead_on_unwind {
14521518
debug_assert_eq!(unwind_drops.drops[unwind_to].data.local, drop_data.local);
14531519
debug_assert_eq!(unwind_drops.drops[unwind_to].data.kind, drop_data.kind);
@@ -1497,7 +1563,7 @@ impl<'a, 'tcx: 'a> Builder<'a, 'tcx> {
14971563
unwind_indices.push(unwind_indices[drop_node.next]);
14981564
}
14991565
}
1500-
DropKind::Value => {
1566+
DropKind::Value | DropKind::ForLint => {
15011567
let unwind_drop = self
15021568
.scopes
15031569
.unwind_drops

0 commit comments

Comments
 (0)