Skip to content

Commit 1256640

Browse files
committed
Auto merge of #123878 - jwong101:inplacecollect, r=<try>
optimize inplace collection of Vec This PR has the following changes: 1. Using `usize::unchecked_mul` in https://github.com/rust-lang/rust/blob/79424056b05eaa9563d16dfab9b9a0c8f033f220/library/alloc/src/vec/in_place_collect.rs#L262 as LLVM, does not know that the operation can't wrap, since that's the size of the original allocation. Given the following: ```rust pub struct Foo([usize; 3]); pub fn unwrap_copy(v: Vec<Foo>) -> Vec<[usize; 3]> { v.into_iter().map(|f| f.0).collect() } ``` <details> <summary>Before this commit:</summary> ```llvm define void `@unwrap_copy(ptr` noalias nocapture noundef writeonly sret([24 x i8]) align 8 dereferenceable(24) %_0, ptr noalias nocapture noundef readonly align 8 dereferenceable(24) %iter) { start: %me.sroa.0.0.copyload.i = load i64, ptr %iter, align 8 %me.sroa.4.0.self.sroa_idx.i = getelementptr inbounds i8, ptr %iter, i64 8 %me.sroa.4.0.copyload.i = load ptr, ptr %me.sroa.4.0.self.sroa_idx.i, align 8 %me.sroa.5.0.self.sroa_idx.i = getelementptr inbounds i8, ptr %iter, i64 16 %me.sroa.5.0.copyload.i = load i64, ptr %me.sroa.5.0.self.sroa_idx.i, align 8 %_19.i.idx = mul nsw i64 %me.sroa.5.0.copyload.i, 24 %0 = udiv i64 %_19.i.idx, 24 ; Unnecessary calculation %_16.i.i = mul i64 %me.sroa.0.0.copyload.i, 24 %dst_cap.i.i = udiv i64 %_16.i.i, 24 store i64 %dst_cap.i.i, ptr %_0, align 8 %1 = getelementptr inbounds i8, ptr %_0, i64 8 store ptr %me.sroa.4.0.copyload.i, ptr %1, align 8 %2 = getelementptr inbounds i8, ptr %_0, i64 16 store i64 %0, ptr %2, align 8 ret void } ``` </details> <details> <summary>After:</summary> ```llvm define void `@unwrap_copy(ptr` noalias nocapture noundef writeonly sret([24 x i8]) align 8 dereferenceable(24) %_0, ptr noalias nocapture noundef readonly align 8 dereferenceable(24) %iter) { start: %me.sroa.0.0.copyload.i = load i64, ptr %iter, align 8 %me.sroa.4.0.self.sroa_idx.i = getelementptr inbounds i8, ptr %iter, i64 8 %me.sroa.4.0.copyload.i = load ptr, ptr %me.sroa.4.0.self.sroa_idx.i, align 8 %me.sroa.5.0.self.sroa_idx.i = getelementptr inbounds i8, ptr %iter, i64 16 %me.sroa.5.0.copyload.i = load i64, ptr %me.sroa.5.0.self.sroa_idx.i, align 8 %_19.i.idx = mul nsw i64 %me.sroa.5.0.copyload.i, 24 %0 = udiv i64 %_19.i.idx, 24 store i64 %me.sroa.0.0.copyload.i, ptr %_0, align 8 %1 = getelementptr inbounds i8, ptr %_0, i64 8 store ptr %me.sroa.4.0.copyload.i, ptr %1, align 8 %2 = getelementptr inbounds i8, ptr %_0, i64 16 store i64 %0, ptr %2, align 8, !alias.scope !9, !noalias !14 ret void } ``` </details> Note that there is still one more `mul,udiv` pair that I couldn't get rid of. The root cause is the same issue as #121239, the `nuw` gets stripped off of `ptr::sub_ptr`. 2. `Iterator::try_fold` gets called on the underlying Iterator in `SpecInPlaceCollect::collect_in_place` whenever it does not implement `TrustedRandomAccess`. For types that impl `Drop`, LLVM currently can't tell that the drop can never occur, when using the default `Iterator::try_fold` implementation. For example, given the following code from #120493 ```rust #[repr(transparent)] struct WrappedClone { inner: String } #[no_mangle] pub fn unwrap_clone(list: Vec<WrappedClone>) -> Vec<String> { list.into_iter().map(|s| s.inner).collect() } ``` <details> <summary>The asm for the `unwrap_clone` method is currently:</summary> ```asm unwrap_clone: push rbp push r15 push r14 push r13 push r12 push rbx push rax mov rbx, rdi mov r12, qword ptr [rsi] mov rdi, qword ptr [rsi + 8] mov rax, qword ptr [rsi + 16] movabs rsi, -6148914691236517205 mov r14, r12 test rax, rax je .LBB0_10 lea rcx, [rax + 2*rax] lea r14, [r12 + 8*rcx] shl rax, 3 lea rax, [rax + 2*rax] xor ecx, ecx .LBB0_2: cmp qword ptr [r12 + rcx], 0 je .LBB0_4 add rcx, 24 cmp rax, rcx jne .LBB0_2 jmp .LBB0_10 .LBB0_4: lea rdx, [rax - 24] lea r14, [r12 + rcx] cmp rdx, rcx je .LBB0_10 mov qword ptr [rsp], rdi sub rax, rcx add rax, -24 mul rsi mov r15, rdx lea rbp, [r12 + rcx] add rbp, 32 shr r15, 4 mov r13, qword ptr [rip + __rust_dealloc@GOTPCREL] jmp .LBB0_6 .LBB0_8: add rbp, 24 dec r15 je .LBB0_9 .LBB0_6: mov rsi, qword ptr [rbp] test rsi, rsi je .LBB0_8 mov rdi, qword ptr [rbp - 8] mov edx, 1 call r13 jmp .LBB0_8 .LBB0_9: mov rdi, qword ptr [rsp] movabs rsi, -6148914691236517205 .LBB0_10: sub r14, r12 mov rax, r14 mul rsi shr rdx, 4 mov qword ptr [rbx], r12 mov qword ptr [rbx + 8], rdi mov qword ptr [rbx + 16], rdx mov rax, rbx add rsp, 8 pop rbx pop r12 pop r13 pop r14 pop r15 pop rbp ret ``` </details> <details> <summary>After this PR:</summary> ```asm unwrap_clone: mov rax, rdi movups xmm0, xmmword ptr [rsi] mov rcx, qword ptr [rsi + 16] movups xmmword ptr [rdi], xmm0 mov qword ptr [rdi + 16], rcx ret ``` </details> Fixes #120493
2 parents 9782770 + 4691ff7 commit 1256640

File tree

2 files changed

+35
-3
lines changed

2 files changed

+35
-3
lines changed

library/alloc/src/vec/in_place_collect.rs

+6-3
Original file line numberDiff line numberDiff line change
@@ -259,7 +259,8 @@ where
259259
inner.cap,
260260
inner.buf.as_ptr() as *mut T,
261261
inner.end as *const T,
262-
inner.cap * mem::size_of::<I::Src>() / mem::size_of::<T>(),
262+
// SAFETY: the multiplication can not overflow, since `inner.cap * size_of::<I::SRC>()` is the size of the allocation.
263+
inner.cap.unchecked_mul(mem::size_of::<I::Src>()) / mem::size_of::<T>(),
263264
)
264265
};
265266

@@ -372,8 +373,10 @@ where
372373
// - unlike most internal iteration methods, it only takes a &mut self
373374
// - it lets us thread the write pointer through its innards and get it back in the end
374375
let sink = InPlaceDrop { inner: dst_buf, dst: dst_buf };
375-
let sink =
376-
self.try_fold::<_, _, Result<_, !>>(sink, write_in_place_with_drop(end)).unwrap();
376+
let sink = match self.try_fold::<_, _, Result<_, !>>(sink, write_in_place_with_drop(end)) {
377+
Ok(sink) => sink,
378+
Err(never) => match never {},
379+
};
377380
// iteration succeeded, don't drop head
378381
unsafe { ManuallyDrop::new(sink).dst.sub_ptr(dst_buf) }
379382
}

library/alloc/src/vec/into_iter.rs

+29
Original file line numberDiff line numberDiff line change
@@ -289,6 +289,35 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
289289
};
290290
}
291291

292+
fn try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R
293+
where
294+
Self: Sized,
295+
F: FnMut(B, Self::Item) -> R,
296+
R: core::ops::Try<Output = B>,
297+
{
298+
let mut accum = init;
299+
if T::IS_ZST {
300+
while self.ptr.as_ptr() != self.end.cast_mut() {
301+
// SAFETY: we just checked that `self.ptr` is in bounds.
302+
let tmp = unsafe { self.ptr.read() };
303+
// See `next` for why we subtract from `end` here.
304+
self.end = self.end.wrapping_byte_sub(1);
305+
accum = f(accum, tmp)?;
306+
}
307+
} else {
308+
// SAFETY: `self.end` can only be null if `T` is a ZST.
309+
while self.ptr != non_null!(self.end, T) {
310+
// SAFETY: we just checked that `self.ptr` is in bounds.
311+
let tmp = unsafe { self.ptr.read() };
312+
// SAFETY: the maximum this can be is `self.end`.
313+
// Increment `self.ptr` first to avoid double dropping in the event of a panic.
314+
self.ptr = unsafe { self.ptr.add(1) };
315+
accum = f(accum, tmp)?;
316+
}
317+
}
318+
R::from_output(accum)
319+
}
320+
292321
unsafe fn __iterator_get_unchecked(&mut self, i: usize) -> Self::Item
293322
where
294323
Self: TrustedRandomAccessNoCoerce,

0 commit comments

Comments
 (0)