Skip to content

Commit 891ca5f

Browse files
committed
Auto merge of #90821 - scottmcm:new-slice-reverse, r=Mark-Simulacrum
MIRI says `reverse` is UB, so replace it with something LLVM can vectorize For small types with padding, the current implementation is UB because it does integer operations on uninit values. ``` error: Undefined Behavior: using uninitialized data, but this operation requires initialized memory --> /playground/.rustup/toolchains/nightly-x86_64-unknown-linux-gnu/lib/rustlib/src/rust/library/core/src/num/mod.rs:836:5 | 836 | / uint_impl! { u32, u32, i32, 32, 4294967295, 8, "0x10000b3", "0xb301", "0x12345678", 837 | | "0x78563412", "0x1e6a2c48", "[0x78, 0x56, 0x34, 0x12]", "[0x12, 0x34, 0x56, 0x78]", "", "" } | |________________________________________________________________________________________________^ using uninitialized data, but this operation requires initialized memory | = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information = note: inside `core::num::<impl u32>::rotate_left` at /playground/.rustup/toolchains/nightly-x86_64-unknown-linux-gnu/lib/rustlib/src/rust/library/core/src/num/uint_macros.rs:211:13 = note: inside `core::slice::<impl [Foo]>::reverse` at /playground/.rustup/toolchains/nightly-x86_64-unknown-linux-gnu/lib/rustlib/src/rust/library/core/src/slice/mod.rs:701:58 ``` <https://play.rust-lang.org/?version=stable&mode=debug&edition=2021&gist=340739f22ca5b457e1da6f361768edc6> But LLVM has gotten smarter since I wrote the previous implementation in 2017, so this PR removes all the manual magic and just writes it in such a way that LLVM will vectorize. This code is much simpler and has very little `unsafe`, and is actually faster to boot! If you're curious to see the codegen: <https://rust.godbolt.org/z/Pcn13Y9E3> Before: ``` running 7 tests test slice::reverse_simd_f64x4 ... bench: 17,940 ns/iter (+/- 481) = 58448 MB/s test slice::reverse_u128 ... bench: 17,758 ns/iter (+/- 205) = 59048 MB/s test slice::reverse_u16 ... bench: 158,234 ns/iter (+/- 6,876) = 6626 MB/s test slice::reverse_u32 ... bench: 62,047 ns/iter (+/- 1,117) = 16899 MB/s test slice::reverse_u64 ... bench: 31,582 ns/iter (+/- 552) = 33201 MB/s test slice::reverse_u8 ... bench: 81,253 ns/iter (+/- 1,510) = 12905 MB/s test slice::reverse_u8x3 ... bench: 270,615 ns/iter (+/- 11,463) = 3874 MB/s ``` After: ``` running 7 tests test slice::reverse_simd_f64x4 ... bench: 17,731 ns/iter (+/- 306) = 59137 MB/s test slice::reverse_u128 ... bench: 17,919 ns/iter (+/- 239) = 58517 MB/s test slice::reverse_u16 ... bench: 43,160 ns/iter (+/- 607) = 24295 MB/s test slice::reverse_u32 ... bench: 21,065 ns/iter (+/- 371) = 49778 MB/s test slice::reverse_u64 ... bench: 21,118 ns/iter (+/- 482) = 49653 MB/s test slice::reverse_u8 ... bench: 76,878 ns/iter (+/- 1,688) = 13639 MB/s test slice::reverse_u8x3 ... bench: 264,723 ns/iter (+/- 5,544) = 3961 MB/s ``` Those are the existing benches, <https://github.com/rust-lang/rust/blob/14a2fd640e0df9ee8cc1e04280b0c3aff93c42da/library/alloc/benches/slice.rs#L322-L346>
2 parents c26746a + f541dd1 commit 891ca5f

File tree

2 files changed

+59
-91
lines changed

2 files changed

+59
-91
lines changed

library/core/src/slice/mod.rs

+32-91
Original file line numberDiff line numberDiff line change
@@ -623,100 +623,41 @@ impl<T> [T] {
623623
#[stable(feature = "rust1", since = "1.0.0")]
624624
#[inline]
625625
pub fn reverse(&mut self) {
626-
let mut i: usize = 0;
627-
let ln = self.len();
628-
629-
// For very small types, all the individual reads in the normal
630-
// path perform poorly. We can do better, given efficient unaligned
631-
// load/store, by loading a larger chunk and reversing a register.
632-
633-
// Ideally LLVM would do this for us, as it knows better than we do
634-
// whether unaligned reads are efficient (since that changes between
635-
// different ARM versions, for example) and what the best chunk size
636-
// would be. Unfortunately, as of LLVM 4.0 (2017-05) it only unrolls
637-
// the loop, so we need to do this ourselves. (Hypothesis: reverse
638-
// is troublesome because the sides can be aligned differently --
639-
// will be, when the length is odd -- so there's no way of emitting
640-
// pre- and postludes to use fully-aligned SIMD in the middle.)
641-
642-
let fast_unaligned = cfg!(any(target_arch = "x86", target_arch = "x86_64"));
643-
644-
if fast_unaligned && mem::size_of::<T>() == 1 {
645-
// Use the llvm.bswap intrinsic to reverse u8s in a usize
646-
let chunk = mem::size_of::<usize>();
647-
while i + chunk - 1 < ln / 2 {
648-
// SAFETY: There are several things to check here:
649-
//
650-
// - Note that `chunk` is either 4 or 8 due to the cfg check
651-
// above. So `chunk - 1` is positive.
652-
// - Indexing with index `i` is fine as the loop check guarantees
653-
// `i + chunk - 1 < ln / 2`
654-
// <=> `i < ln / 2 - (chunk - 1) < ln / 2 < ln`.
655-
// - Indexing with index `ln - i - chunk = ln - (i + chunk)` is fine:
656-
// - `i + chunk > 0` is trivially true.
657-
// - The loop check guarantees:
658-
// `i + chunk - 1 < ln / 2`
659-
// <=> `i + chunk ≤ ln / 2 ≤ ln`, thus subtraction does not underflow.
660-
// - The `read_unaligned` and `write_unaligned` calls are fine:
661-
// - `pa` points to index `i` where `i < ln / 2 - (chunk - 1)`
662-
// (see above) and `pb` points to index `ln - i - chunk`, so
663-
// both are at least `chunk`
664-
// many bytes away from the end of `self`.
665-
// - Any initialized memory is valid `usize`.
666-
unsafe {
667-
let ptr = self.as_mut_ptr();
668-
let pa = ptr.add(i);
669-
let pb = ptr.add(ln - i - chunk);
670-
let va = ptr::read_unaligned(pa as *mut usize);
671-
let vb = ptr::read_unaligned(pb as *mut usize);
672-
ptr::write_unaligned(pa as *mut usize, vb.swap_bytes());
673-
ptr::write_unaligned(pb as *mut usize, va.swap_bytes());
674-
}
675-
i += chunk;
676-
}
677-
}
626+
let half_len = self.len() / 2;
627+
let Range { start, end } = self.as_mut_ptr_range();
628+
629+
// These slices will skip the middle item for an odd length,
630+
// since that one doesn't need to move.
631+
let (front_half, back_half) =
632+
// SAFETY: Both are subparts of the original slice, so the memory
633+
// range is valid, and they don't overlap because they're each only
634+
// half (or less) of the original slice.
635+
unsafe {
636+
(
637+
slice::from_raw_parts_mut(start, half_len),
638+
slice::from_raw_parts_mut(end.sub(half_len), half_len),
639+
)
640+
};
678641

679-
if fast_unaligned && mem::size_of::<T>() == 2 {
680-
// Use rotate-by-16 to reverse u16s in a u32
681-
let chunk = mem::size_of::<u32>() / 2;
682-
while i + chunk - 1 < ln / 2 {
683-
// SAFETY: An unaligned u32 can be read from `i` if `i + 1 < ln`
684-
// (and obviously `i < ln`), because each element is 2 bytes and
685-
// we're reading 4.
686-
//
687-
// `i + chunk - 1 < ln / 2` # while condition
688-
// `i + 2 - 1 < ln / 2`
689-
// `i + 1 < ln / 2`
690-
//
691-
// Since it's less than the length divided by 2, then it must be
692-
// in bounds.
693-
//
694-
// This also means that the condition `0 < i + chunk <= ln` is
695-
// always respected, ensuring the `pb` pointer can be used
696-
// safely.
697-
unsafe {
698-
let ptr = self.as_mut_ptr();
699-
let pa = ptr.add(i);
700-
let pb = ptr.add(ln - i - chunk);
701-
let va = ptr::read_unaligned(pa as *mut u32);
702-
let vb = ptr::read_unaligned(pb as *mut u32);
703-
ptr::write_unaligned(pa as *mut u32, vb.rotate_left(16));
704-
ptr::write_unaligned(pb as *mut u32, va.rotate_left(16));
705-
}
706-
i += chunk;
707-
}
708-
}
642+
// Introducing a function boundary here means that the two halves
643+
// get `noalias` markers, allowing better optimization as LLVM
644+
// knows that they're disjoint, unlike in the original slice.
645+
revswap(front_half, back_half, half_len);
709646

710-
while i < ln / 2 {
711-
// SAFETY: `i` is inferior to half the length of the slice so
712-
// accessing `i` and `ln - i - 1` is safe (`i` starts at 0 and
713-
// will not go further than `ln / 2 - 1`).
714-
// The resulting pointers `pa` and `pb` are therefore valid and
715-
// aligned, and can be read from and written to.
716-
unsafe {
717-
self.swap_unchecked(i, ln - i - 1);
647+
#[inline]
648+
fn revswap<T>(a: &mut [T], b: &mut [T], n: usize) {
649+
debug_assert_eq!(a.len(), n);
650+
debug_assert_eq!(b.len(), n);
651+
652+
// Because this function is first compiled in isolation,
653+
// this check tells LLVM that the indexing below is
654+
// in-bounds. Then after inlining -- once the actual
655+
// lengths of the slices are known -- it's removed.
656+
let (a, b) = (&mut a[..n], &mut b[..n]);
657+
658+
for i in 0..n {
659+
mem::swap(&mut a[i], &mut b[n - 1 - i]);
718660
}
719-
i += 1;
720661
}
721662
}
722663

src/test/codegen/slice-reverse.rs

+27
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
// compile-flags: -O
2+
// only-x86_64
3+
// ignore-debug: the debug assertions in from_raw_parts get in the way
4+
5+
#![crate_type = "lib"]
6+
7+
// CHECK-LABEL: @slice_reverse_u8
8+
#[no_mangle]
9+
pub fn slice_reverse_u8(slice: &mut [u8]) {
10+
// CHECK-NOT: panic_bounds_check
11+
// CHECK-NOT: slice_end_index_len_fail
12+
// CHECK: shufflevector <{{[0-9]+}} x i8>
13+
// CHECK-NOT: panic_bounds_check
14+
// CHECK-NOT: slice_end_index_len_fail
15+
slice.reverse();
16+
}
17+
18+
// CHECK-LABEL: @slice_reverse_i32
19+
#[no_mangle]
20+
pub fn slice_reverse_i32(slice: &mut [i32]) {
21+
// CHECK-NOT: panic_bounds_check
22+
// CHECK-NOT: slice_end_index_len_fail
23+
// CHECK: shufflevector <{{[0-9]+}} x i32>
24+
// CHECK-NOT: panic_bounds_check
25+
// CHECK-NOT: slice_end_index_len_fail
26+
slice.reverse();
27+
}

0 commit comments

Comments
 (0)