@@ -2788,7 +2788,7 @@ impl<T, I: SliceIndex<[T]>, A: Allocator> IndexMut<I> for Vec<T, A> {
2788
2788
///
2789
2789
/// # Allocation behavior
2790
2790
///
2791
- /// In general `Vec` does not guarantee any particular grow/ allocation stategy .
2791
+ /// In general `Vec` does not guarantee any particular growth or allocation strategy .
2792
2792
/// That also applies to this trait impl.
2793
2793
///
2794
2794
/// **Note:** This section covers implementation details and is therefore exempt from
@@ -2798,29 +2798,28 @@ impl<T, I: SliceIndex<[T]>, A: Allocator> IndexMut<I> for Vec<T, A> {
2798
2798
/// depending on the supplied iterator:
2799
2799
///
2800
2800
/// * preallocate based on [`Iterator::size_hint()`]
2801
- /// * and panic if the number of items is not outside the provided lower/upper bounds
2801
+ /// * and panic if the number of items is outside the provided lower/upper bounds
2802
2802
/// * use an amortized growth strategy similar to `pushing` one item at a time
2803
2803
/// * perform the iteration in-place on the original allocation backing the iterator
2804
2804
///
2805
2805
/// The last case warrants some attention. It is an optimization that in many cases reduces peak memory
2806
- /// consumption and improves cache locality. But when a large number of big, short-lived
2807
- /// allocations are created, only a small fraction of their items gets collected, no further use
2808
- /// is made of the spare capacity and the resulting `Vec` is moved into a longer-lived structure
2809
- /// this can lead to the large allocations having their lifetimes unnecessarily extended which
2810
- /// can result in increased memory footprint.
2806
+ /// consumption and improves cache locality. But when big, short-lived allocations are created,
2807
+ /// only a small fraction of their items get collected, no further use is made of the spare capacity
2808
+ /// and the resulting `Vec` is moved into a longer-lived structure, then this can lead to the large
2809
+ /// allocations having their lifetimes unnecessarily extended which can result in increased memory
2810
+ /// footprint.
2811
2811
///
2812
- /// In cases where this is an issue the excess capacity can be discard with [`Vec::shrink_to()`],
2813
- /// [`Vec::shrink_to_fit()`] or by collecting into [`Box<[T]>`][owned slice] instead which additionally reduces
2814
- /// the size of the longlived struct.
2812
+ /// In cases where this is an issue, the excess capacity can be discarded with [`Vec::shrink_to()`],
2813
+ /// [`Vec::shrink_to_fit()`] or by collecting into [`Box<[T]>`][owned slice] instead, which additionally reduces
2814
+ /// the size of the long-lived struct.
2815
2815
///
2816
2816
/// [owned slice]: Box
2817
2817
///
2818
2818
/// ```rust
2819
2819
/// # use std::sync::Mutex;
2820
2820
/// static LONG_LIVED: Mutex<Vec<Vec<u16>>> = Mutex::new(Vec::new());
2821
2821
///
2822
- /// // many short-lived allocations
2823
- /// for i in 0..100 {
2822
+ /// for i in 0..10 {
2824
2823
/// let big_temporary: Vec<u16> = (0..1024).collect();
2825
2824
/// // discard most items
2826
2825
/// let mut result: Vec<_> = big_temporary.into_iter().filter(|i| i % 100 == 0).collect();
0 commit comments