@@ -978,6 +978,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
978
978
// `head` and `len` are at most `isize::MAX` and `target_cap < self.capacity()`, so nothing can
979
979
// overflow.
980
980
let tail_outside = ( target_cap + 1 ..=self . capacity ( ) ) . contains ( & ( self . head + self . len ) ) ;
981
+ // Used in the drop guard below.
982
+ let old_head = self . head ;
981
983
982
984
if self . len == 0 {
983
985
self . head = 0 ;
@@ -1030,12 +1032,74 @@ impl<T, A: Allocator> VecDeque<T, A> {
1030
1032
}
1031
1033
self . head = new_head;
1032
1034
}
1033
- self . buf . shrink_to_fit ( target_cap) ;
1035
+
1036
+ struct Guard < ' a , T , A : Allocator > {
1037
+ deque : & ' a mut VecDeque < T , A > ,
1038
+ old_head : usize ,
1039
+ target_cap : usize ,
1040
+ }
1041
+
1042
+ impl < T , A : Allocator > Drop for Guard < ' _ , T , A > {
1043
+ #[ cold]
1044
+ fn drop ( & mut self ) {
1045
+ unsafe {
1046
+ // SAFETY: This is only called if `buf.shrink_to_fit` unwinds,
1047
+ // which is the only time it's safe to call `abort_shrink`.
1048
+ self . deque . abort_shrink ( self . old_head , self . target_cap )
1049
+ }
1050
+ }
1051
+ }
1052
+
1053
+ let guard = Guard { deque : self , old_head, target_cap } ;
1054
+
1055
+ guard. deque . buf . shrink_to_fit ( target_cap) ;
1056
+
1057
+ // Don't drop the guard if we didn't unwind.
1058
+ mem:: forget ( guard) ;
1034
1059
1035
1060
debug_assert ! ( self . head < self . capacity( ) || self . capacity( ) == 0 ) ;
1036
1061
debug_assert ! ( self . len <= self . capacity( ) ) ;
1037
1062
}
1038
1063
1064
+ /// Reverts the deque back into a consistent state in case `shrink_to` failed.
1065
+ /// This is necessary to prevent UB if the backing allocator returns an error
1066
+ /// from `shrink` and `handle_alloc_error` subsequently unwinds (see #123369).
1067
+ ///
1068
+ /// `old_head` refers to the head index before `shrink_to` was called. `target_cap`
1069
+ /// is the capacity that it was trying to shrink to.
1070
+ unsafe fn abort_shrink ( & mut self , old_head : usize , target_cap : usize ) {
1071
+ // Moral equivalent of self.head + self.len <= target_cap. Won't overflow
1072
+ // because `self.len <= target_cap`.
1073
+ if self . head <= target_cap - self . len {
1074
+ // The deque's buffer is contiguous, so no need to copy anything around.
1075
+ return ;
1076
+ }
1077
+
1078
+ // `shrink_to` already copied the head to fit into the new capacity, so this won't overflow.
1079
+ let head_len = target_cap - self . head ;
1080
+ // `self.head > target_cap - self.len` => `self.len > target_cap - self.head =: head_len` so this must be positive.
1081
+ let tail_len = self . len - head_len;
1082
+
1083
+ if tail_len <= cmp:: min ( head_len, self . capacity ( ) - target_cap) {
1084
+ // There's enough spare capacity to copy the tail to the back (because `tail_len < self.capacity() - target_cap`),
1085
+ // and copying the tail should be cheaper than copying the head (because `tail_len <= head_len`).
1086
+
1087
+ unsafe {
1088
+ // The old tail and the new tail can't overlap because the head slice lies between them. The
1089
+ // head slice ends at `target_cap`, so that's where we copy to.
1090
+ self . copy_nonoverlapping ( 0 , target_cap, tail_len) ;
1091
+ }
1092
+ } else {
1093
+ // Either there's not enough spare capacity to make the deque contiguous, or the head is shorter than the tail
1094
+ // (and therefore hopefully cheaper to copy).
1095
+ unsafe {
1096
+ // The old and the new head slice can overlap, so we can't use `copy_nonoverlapping` here.
1097
+ self . copy ( self . head , old_head, head_len) ;
1098
+ self . head = old_head;
1099
+ }
1100
+ }
1101
+ }
1102
+
1039
1103
/// Shortens the deque, keeping the first `len` elements and dropping
1040
1104
/// the rest.
1041
1105
///
0 commit comments