diff --git a/library/alloc/src/raw_vec.rs b/library/alloc/src/raw_vec.rs index 3d38e73305a37..4f402081c0df4 100644 --- a/library/alloc/src/raw_vec.rs +++ b/library/alloc/src/raw_vec.rs @@ -1,6 +1,5 @@ #![unstable(feature = "raw_vec_internals", reason = "unstable const warnings", issue = "none")] -use core::alloc::LayoutError; use core::cmp; use core::intrinsics; use core::mem::{self, ManuallyDrop, MaybeUninit}; @@ -103,19 +102,6 @@ impl RawVec { } impl RawVec { - // Tiny Vecs are dumb. Skip to: - // - 8 if the element size is 1, because any heap allocators is likely - // to round up a request of less than 8 bytes to at least 8 bytes. - // - 4 if elements are moderate-sized (<= 1 KiB). - // - 1 otherwise, to avoid wasting too much space for very short Vecs. - const MIN_NON_ZERO_CAP: usize = if mem::size_of::() == 1 { - 8 - } else if mem::size_of::() <= 1024 { - 4 - } else { - 1 - }; - /// Like `new`, but parameterized over the choice of allocator for /// the returned `RawVec`. #[rustc_allow_const_fn_unstable(const_fn)] @@ -192,7 +178,7 @@ impl RawVec { Self { ptr: unsafe { Unique::new_unchecked(ptr.cast().as_ptr()) }, - cap: Self::capacity_from_bytes(ptr.len()), + cap: ptr.len() / mem::size_of::(), alloc, } } @@ -360,16 +346,10 @@ impl RawVec { additional > self.capacity().wrapping_sub(len) } - fn capacity_from_bytes(excess: usize) -> usize { - debug_assert_ne!(mem::size_of::(), 0); - excess / mem::size_of::() - } - - fn set_ptr(&mut self, ptr: NonNull<[u8]>) { - self.ptr = unsafe { Unique::new_unchecked(ptr.cast().as_ptr()) }; - self.cap = Self::capacity_from_bytes(ptr.len()); - } - + // This method must only be called after `needs_to_grow(len, additional)` + // succeeds. Otherwise, if `T` is zero-sized it will cause a divide by + // zero. + // // This method is usually instantiated many times. So we want it to be as // small as possible, to improve compile times. But we also want as much of // its contents to be statically computable as possible, to make the @@ -378,47 +358,40 @@ impl RawVec { // of the code that doesn't depend on `T` as possible is in functions that // are non-generic over `T`. fn grow_amortized(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> { - // This is ensured by the calling contexts. - debug_assert!(additional > 0); - - if mem::size_of::() == 0 { - // Since we return a capacity of `usize::MAX` when `elem_size` is - // 0, getting to here necessarily means the `RawVec` is overfull. - return Err(CapacityOverflow.into()); - } - - // Nothing we can really do about these checks, sadly. - let required_cap = len.checked_add(additional).ok_or(CapacityOverflow)?; - - // This guarantees exponential growth. The doubling cannot overflow - // because `cap <= isize::MAX` and the type of `cap` is `usize`. - let cap = cmp::max(self.cap * 2, required_cap); - let cap = cmp::max(Self::MIN_NON_ZERO_CAP, cap); - - let new_layout = Layout::array::(cap); - - // `finish_grow` is non-generic over `T`. - let ptr = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?; - self.set_ptr(ptr); + // `finish_grow_amortized` is non-generic over `T`. + let elem_layout = Layout::new::(); + let (ptr, cap) = finish_grow_amortized( + len, + additional, + elem_layout, + self.cap, + self.current_memory(), + &mut self.alloc, + )?; + self.ptr = unsafe { Unique::new_unchecked(ptr.cast().as_ptr()) }; + self.cap = cap; Ok(()) } + // This method must only be called after `needs_to_grow(len, additional)` + // succeeds. Otherwise, if `T` is zero-sized it will cause a divide by + // zero. + // // The constraints on this method are much the same as those on // `grow_amortized`, but this method is usually instantiated less often so // it's less critical. fn grow_exact(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> { - if mem::size_of::() == 0 { - // Since we return a capacity of `usize::MAX` when the type size is - // 0, getting to here necessarily means the `RawVec` is overfull. - return Err(CapacityOverflow.into()); - } - - let cap = len.checked_add(additional).ok_or(CapacityOverflow)?; - let new_layout = Layout::array::(cap); - - // `finish_grow` is non-generic over `T`. - let ptr = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?; - self.set_ptr(ptr); + // `finish_grow_exact` is non-generic over `T`. + let elem_layout = Layout::new::(); + let (ptr, cap) = finish_grow_exact( + len, + additional, + elem_layout, + self.current_memory(), + &mut self.alloc, + )?; + self.ptr = unsafe { Unique::new_unchecked(ptr.cast().as_ptr()) }; + self.cap = cap; Ok(()) } @@ -434,7 +407,8 @@ impl RawVec { .shrink(ptr, layout, new_layout) .map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })? }; - self.set_ptr(ptr); + self.ptr = unsafe { Unique::new_unchecked(ptr.cast().as_ptr()) }; + self.cap = ptr.len() / mem::size_of::(); Ok(()) } } @@ -444,31 +418,100 @@ impl RawVec { // significant, because the number of different `A` types seen in practice is // much smaller than the number of `T` types.) #[inline(never)] +fn finish_grow_amortized( + len: usize, + additional: usize, + elem_layout: Layout, + current_cap: usize, + current_memory: Option<(NonNull, Layout)>, + alloc: &mut A, +) -> Result<(NonNull<[u8]>, usize), TryReserveError> +where + A: Allocator, +{ + // This is ensured by the calling contexts. + debug_assert!(additional > 0); + + // Tiny Vecs are dumb. Skip to: + // - 8 if the element size is 1, because any heap allocators is likely + // to round up a request of less than 8 bytes to at least 8 bytes. + // - 4 if elements are moderate-sized (<= 1 KiB). + // - 1 otherwise, to avoid wasting too much space for very short Vecs. + let min_non_zero_cap: usize = if elem_layout.size() == 1 { + 8 + } else if elem_layout.size() <= 1024 { + 4 + } else { + 1 + }; + + // Nothing we can really do about these checks, sadly. (If this method + // is called for a zero-sized `T` after `needs_to_grow()` has + // succeeded, this early return will occur.) + let required_cap = len.checked_add(additional).ok_or(CapacityOverflow)?; + + // This guarantees exponential growth. The doubling cannot overflow + // because `cap <= isize::MAX` and the type of `cap` is `usize`. + let cap = cmp::max(current_cap * 2, required_cap); + let cap = cmp::max(min_non_zero_cap, cap); + + finish_grow(elem_layout, cap, current_memory, alloc) +} + +// This function is outside `RawVec` to minimize compile times. See the comment +// above `RawVec::grow_exact` for details. (The `A` parameter isn't +// significant, because the number of different `A` types seen in practice is +// much smaller than the number of `T` types.) +#[inline(never)] +fn finish_grow_exact( + len: usize, + additional: usize, + elem_layout: Layout, + current_memory: Option<(NonNull, Layout)>, + alloc: &mut A, +) -> Result<(NonNull<[u8]>, usize), TryReserveError> +where + A: Allocator, +{ + // This is ensured by the calling contexts. + debug_assert!(additional > 0); + + // Nothing we can really do about these checks, sadly. (If this method + // is called for a zero-sized `T` after `needs_to_grow()` has + // succeeded, this early return will occur.) + let cap = len.checked_add(additional).ok_or(CapacityOverflow)?; + + finish_grow(elem_layout, cap, current_memory, alloc) +} + fn finish_grow( - new_layout: Result, + elem_layout: Layout, + cap: usize, current_memory: Option<(NonNull, Layout)>, alloc: &mut A, -) -> Result, TryReserveError> +) -> Result<(NonNull<[u8]>, usize), TryReserveError> where A: Allocator, { - // Check for the error here to minimize the size of `RawVec::grow_*`. - let new_layout = new_layout.map_err(|_| CapacityOverflow)?; + let array_size = elem_layout.size().checked_mul(cap).ok_or(CapacityOverflow)?; + alloc_guard(array_size)?; - alloc_guard(new_layout.size())?; + let new_layout = unsafe { Layout::from_size_align_unchecked(array_size, elem_layout.align()) }; - let memory = if let Some((ptr, old_layout)) = current_memory { + let new_ptr = if let Some((old_ptr, old_layout)) = current_memory { debug_assert_eq!(old_layout.align(), new_layout.align()); unsafe { // The allocator checks for alignment equality intrinsics::assume(old_layout.align() == new_layout.align()); - alloc.grow(ptr, old_layout, new_layout) + alloc.grow(old_ptr, old_layout, new_layout) } } else { alloc.allocate(new_layout) - }; + } + .map_err(|_| TryReserveError::from(AllocError { layout: new_layout, non_exhaustive: () }))?; - memory.map_err(|_| AllocError { layout: new_layout, non_exhaustive: () }.into()) + let new_cap = new_ptr.len() / elem_layout.size(); + Ok((new_ptr, new_cap)) } unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawVec { diff --git a/library/alloc/src/raw_vec/tests.rs b/library/alloc/src/raw_vec/tests.rs index ff322f0da97c6..403d495635023 100644 --- a/library/alloc/src/raw_vec/tests.rs +++ b/library/alloc/src/raw_vec/tests.rs @@ -135,11 +135,11 @@ fn zst() { assert_eq!(v.try_reserve_exact(101, usize::MAX - 100), cap_err); zst_sanity(&v); - assert_eq!(v.grow_amortized(100, usize::MAX - 100), cap_err); + //v.grow_amortized(100, usize::MAX - 100); // panics, in `zst_grow_amortized_panic` below assert_eq!(v.grow_amortized(101, usize::MAX - 100), cap_err); zst_sanity(&v); - assert_eq!(v.grow_exact(100, usize::MAX - 100), cap_err); + //v.grow_exact(100, usize::MAX - 100); // panics, in `zst_grow_exact_panic` below assert_eq!(v.grow_exact(101, usize::MAX - 100), cap_err); zst_sanity(&v); } @@ -161,3 +161,25 @@ fn zst_reserve_exact_panic() { v.reserve_exact(101, usize::MAX - 100); } + +#[test] +#[should_panic(expected = "divide by zero")] +fn zst_grow_amortized_panic() { + let mut v: RawVec = RawVec::new(); + zst_sanity(&v); + + // This shows the divide by zero that occurs when `grow_amortized()` is + // called when `needs_to_grow()` would have returned `false`. + let _ = v.grow_amortized(100, usize::MAX - 100); +} + +#[test] +#[should_panic(expected = "divide by zero")] +fn zst_grow_exact_panic() { + let mut v: RawVec = RawVec::new(); + zst_sanity(&v); + + // This shows the divide by zero that occurs when `grow_amortized()` is + // called when `needs_to_grow()` would have returned `false`. + let _ = v.grow_exact(100, usize::MAX - 100); +}