6
6
//! Unstable sorting is compatible with libcore because it doesn't allocate memory, unlike our
7
7
//! stable sorting implementation.
8
8
9
- // ignore-tidy-undocumented-unsafe
10
-
11
9
use crate :: cmp;
12
10
use crate :: mem:: { self , MaybeUninit } ;
13
11
use crate :: ptr;
@@ -291,6 +289,9 @@ where
291
289
} else if start_r < end_r {
292
290
block_l = rem;
293
291
} else {
292
+ // There were the same number of elements to switch on both blocks during the last
293
+ // iteration, so there are no remaining elements on either block. Cover the remaining
294
+ // items with roughly equally-sized blocks.
294
295
block_l = rem / 2 ;
295
296
block_r = rem - block_l;
296
297
}
@@ -437,6 +438,17 @@ where
437
438
// Move its remaining out-of-order elements to the far right.
438
439
debug_assert_eq ! ( width( l, r) , block_l) ;
439
440
while start_l < end_l {
441
+ // remaining-elements-safety
442
+ // SAFETY: while the loop condition holds there are still elements in `offsets_l`, so it
443
+ // is safe to point `end_l` to the previous element.
444
+ //
445
+ // The `ptr::swap` is safe if both its arguments are valid for reads and writes:
446
+ // - Per the debug assert above, the distance between `l` and `r` is `block_l`
447
+ // elements, so there can be at most `block_l` remaining offsets between `start_l`
448
+ // and `end_l`. This means `r` will be moved at most `block_l` steps back, which
449
+ // makes the `r.offset` calls valid (at that point `l == r`).
450
+ // - `offsets_l` contains valid offsets into `v` collected during the partitioning of
451
+ // the last block, so the `l.offset` calls are valid.
440
452
unsafe {
441
453
end_l = end_l. offset ( -1 ) ;
442
454
ptr:: swap ( l. offset ( * end_l as isize ) , r. offset ( -1 ) ) ;
@@ -449,6 +461,7 @@ where
449
461
// Move its remaining out-of-order elements to the far left.
450
462
debug_assert_eq ! ( width( l, r) , block_r) ;
451
463
while start_r < end_r {
464
+ // SAFETY: See the reasoning in [remaining-elements-safety].
452
465
unsafe {
453
466
end_r = end_r. offset ( -1 ) ;
454
467
ptr:: swap ( l, r. offset ( -( * end_r as isize ) - 1 ) ) ;
@@ -481,6 +494,8 @@ where
481
494
482
495
// Read the pivot into a stack-allocated variable for efficiency. If a following comparison
483
496
// operation panics, the pivot will be automatically written back into the slice.
497
+
498
+ // SAFETY: `pivot` is a reference to the first element of `v`, so `ptr::read` is safe.
484
499
let mut tmp = mem:: ManuallyDrop :: new ( unsafe { ptr:: read ( pivot) } ) ;
485
500
let _pivot_guard = CopyOnDrop { src : & mut * tmp, dest : pivot } ;
486
501
let pivot = & * tmp;
@@ -646,6 +661,12 @@ where
646
661
647
662
if len >= 8 {
648
663
// Swaps indices so that `v[a] <= v[b]`.
664
+ // SAFETY: `len >= 8` so there are at least two elements in the neighborhoods of
665
+ // `a`, `b` and `c`. This means the three calls to `sort_adjacent` result in
666
+ // corresponding calls to `sort3` with valid 3-item neighborhoods around each
667
+ // pointer, which in turn means the calls to `sort2` are done with valid
668
+ // references. Thus the `v.get_unchecked` calls are safe, as is the `ptr::swap`
669
+ // call.
649
670
let mut sort2 = |a : & mut usize , b : & mut usize | unsafe {
650
671
if is_less ( v. get_unchecked ( * b) , v. get_unchecked ( * a) ) {
651
672
ptr:: swap ( a, b) ;
0 commit comments