@@ -2811,11 +2811,11 @@ impl<T, I: SliceIndex<[T]>, A: Allocator> IndexMut<I> for Vec<T, A> {
2811
2811
/// * perform the iteration in-place on the original allocation backing the iterator
2812
2812
///
2813
2813
/// The last case warrants some attention. It is an optimization that in many cases reduces peak memory
2814
- /// consumption and improves cache locality. But when a large number of big, short-lived
2815
- /// allocations are created, only a small fraction of their items gets collected, no further use
2816
- /// is made of the spare capacity and the resulting `Vec` is moved into a longer-lived structure
2817
- /// this can lead to the large allocations having their lifetimes unnecessarily extended which
2818
- /// can result in increased memory footprint.
2814
+ /// consumption and improves cache locality. But when big, short-lived allocations are created,
2815
+ /// only a small fraction of their items gets collected, no further use is made of the spare capacity
2816
+ /// and the resulting `Vec` is moved into a longer-lived structure this can lead to the large
2817
+ /// allocations having their lifetimes unnecessarily extended which can result in increased memory
2818
+ /// footprint.
2819
2819
///
2820
2820
/// In cases where this is an issue, the excess capacity can be discarded with [`Vec::shrink_to()`],
2821
2821
/// [`Vec::shrink_to_fit()`] or by collecting into [`Box<[T]>`][owned slice] instead, which additionally reduces
@@ -2827,8 +2827,7 @@ impl<T, I: SliceIndex<[T]>, A: Allocator> IndexMut<I> for Vec<T, A> {
2827
2827
/// # use std::sync::Mutex;
2828
2828
/// static LONG_LIVED: Mutex<Vec<Vec<u16>>> = Mutex::new(Vec::new());
2829
2829
///
2830
- /// // many short-lived allocations
2831
- /// for i in 0..100 {
2830
+ /// for i in 0..10 {
2832
2831
/// let big_temporary: Vec<u16> = (0..1024).collect();
2833
2832
/// // discard most items
2834
2833
/// let mut result: Vec<_> = big_temporary.into_iter().filter(|i| i % 100 == 0).collect();
0 commit comments