11use std:: {
22 alloc:: { self , GlobalAlloc , Layout , System } ,
33 mem:: ManuallyDrop ,
4- ops:: { Deref , DerefMut } ,
4+ ops:: Deref ,
55 ptr:: NonNull ,
6+ sync:: Mutex ,
67} ;
78
89use crate :: {
@@ -13,6 +14,78 @@ use crate::{
1314const TWO_GIB : usize = 1 << 31 ;
1415const FOUR_GIB : usize = 1 << 32 ;
1516
17+ /// A thread-safe pool for reusing [`Allocator`] instances to reduce allocation overhead.
18+ ///
19+ /// Internally uses a `Vec` protected by a `Mutex` to store available allocators.
20+ #[ derive( Default ) ]
21+ pub struct AllocatorPool {
22+ allocators : Mutex < Vec < FixedSizeAllocator > > ,
23+ }
24+
25+ impl AllocatorPool {
26+ /// Creates a new [`AllocatorPool`] with capacity for the given number of `FixedSizeAllocator` instances.
27+ pub fn new ( size : usize ) -> AllocatorPool {
28+ // Each allocator consumes a large block of memory, so create them on demand instead of upfront
29+ let allocators = Vec :: with_capacity ( size) ;
30+ AllocatorPool { allocators : Mutex :: new ( allocators) }
31+ }
32+
33+ /// Retrieves an [`Allocator`] from the pool, or creates a new one if the pool is empty.
34+ ///
35+ /// Returns an [`AllocatorGuard`] that gives access to the allocator.
36+ ///
37+ /// # Panics
38+ ///
39+ /// Panics if the underlying mutex is poisoned.
40+ pub fn get ( & self ) -> AllocatorGuard {
41+ let allocator = {
42+ let mut allocators = self . allocators . lock ( ) . unwrap ( ) ;
43+ allocators. pop ( )
44+ } ;
45+ let allocator = allocator. unwrap_or_else ( FixedSizeAllocator :: new) ;
46+
47+ AllocatorGuard { allocator : ManuallyDrop :: new ( allocator) , pool : self }
48+ }
49+
50+ /// Add a [`FixedSizeAllocator`] to the pool.
51+ ///
52+ /// The `Allocator` should be empty, ready to be re-used.
53+ ///
54+ /// # Panics
55+ ///
56+ /// Panics if the underlying mutex is poisoned.
57+ fn add ( & self , allocator : FixedSizeAllocator ) {
58+ let mut allocators = self . allocators . lock ( ) . unwrap ( ) ;
59+ allocators. push ( allocator) ;
60+ }
61+ }
62+
63+ /// A guard object representing exclusive access to an [`Allocator`] from the pool.
64+ ///
65+ /// On drop, the `Allocator` is reset and returned to the pool.
66+ pub struct AllocatorGuard < ' alloc_pool > {
67+ allocator : ManuallyDrop < FixedSizeAllocator > ,
68+ pool : & ' alloc_pool AllocatorPool ,
69+ }
70+
71+ impl Deref for AllocatorGuard < ' _ > {
72+ type Target = Allocator ;
73+
74+ fn deref ( & self ) -> & Self :: Target {
75+ & self . allocator . allocator
76+ }
77+ }
78+
79+ impl Drop for AllocatorGuard < ' _ > {
80+ /// Return [`Allocator`] back to the pool.
81+ fn drop ( & mut self ) {
82+ // SAFETY: After taking ownership of the `FixedSizeAllocator`, we do not touch the `ManuallyDrop` again
83+ let mut allocator = unsafe { ManuallyDrop :: take ( & mut self . allocator ) } ;
84+ allocator. reset ( ) ;
85+ self . pool . add ( allocator) ;
86+ }
87+ }
88+
1689// What we ideally want is an allocation 2 GiB in size, aligned on 4 GiB.
1790// But system allocator on Mac OS refuses allocations with 4 GiB alignment.
1891// https://github.com/rust-lang/rust/blob/556d20a834126d2d0ac20743b9792b8474d6d03c/library/std/src/sys/alloc/unix.rs#L16-L27
@@ -41,7 +114,7 @@ const ALLOC_LAYOUT: Layout = match Layout::from_size_align(ALLOC_SIZE, ALLOC_ALI
41114/// To achieve this, we manually allocate memory to back the `Allocator`'s single chunk.
42115/// We over-allocate 4 GiB, and then use a part of that allocation to back the `Allocator`.
43116/// Inner `Allocator` is wrapped in `ManuallyDrop` to prevent it freeing the memory itself,
44- /// and `AllocatorWrapper ` has a custom `Drop` impl which frees the whole of the original allocation.
117+ /// and `FixedSizeAllocator ` has a custom `Drop` impl which frees the whole of the original allocation.
45118///
46119/// We allocate via `System` allocator, bypassing any registered alternative global allocator
47120/// (e.g. Mimalloc in linter). Mimalloc complains that it cannot serve allocations with high alignment,
@@ -59,7 +132,7 @@ impl FixedSizeAllocator {
59132 #[ expect( clippy:: items_after_statements) ]
60133 pub fn new ( ) -> Self {
61134 // Allocate block of memory.
62- // SAFETY: Layout does not have zero size.
135+ // SAFETY: `ALLOC_LAYOUT` does not have zero size.
63136 let alloc_ptr = unsafe { System . alloc ( ALLOC_LAYOUT ) } ;
64137 let Some ( alloc_ptr) = NonNull :: new ( alloc_ptr) else {
65138 alloc:: handle_alloc_error ( ALLOC_LAYOUT ) ;
@@ -92,6 +165,23 @@ impl FixedSizeAllocator {
92165 // Store pointer to original allocation, so it can be used to deallocate in `drop`
93166 Self { allocator : ManuallyDrop :: new ( allocator) , alloc_ptr }
94167 }
168+
169+ /// Reset this [`FixedSizeAllocator`].
170+ fn reset ( & mut self ) {
171+ // Set cursor back to end
172+ self . allocator . reset ( ) ;
173+
174+ // Set data pointer back to start.
175+ // SAFETY: Fixed-size allocators have data pointer originally aligned on `BUFFER_ALIGN`,
176+ // and size less than `BUFFER_ALIGN`. So we can restore original data pointer by rounding down
177+ // to next multiple of `BUFFER_ALIGN`.
178+ unsafe {
179+ let data_ptr = self . allocator . data_ptr ( ) ;
180+ let offset = data_ptr. as_ptr ( ) as usize % BUFFER_ALIGN ;
181+ let data_ptr = data_ptr. sub ( offset) ;
182+ self . allocator . set_data_ptr ( data_ptr) ;
183+ }
184+ }
95185}
96186
97187impl Drop for FixedSizeAllocator {
@@ -101,20 +191,6 @@ impl Drop for FixedSizeAllocator {
101191 }
102192}
103193
104- impl Deref for FixedSizeAllocator {
105- type Target = Allocator ;
106-
107- fn deref ( & self ) -> & Self :: Target {
108- & self . allocator
109- }
110- }
111-
112- impl DerefMut for FixedSizeAllocator {
113- fn deref_mut ( & mut self ) -> & mut Self :: Target {
114- & mut self . allocator
115- }
116- }
117-
118194// SAFETY: `Allocator` is `Send`.
119195// Moving `alloc_ptr: NonNull<u8>` across threads along with the `Allocator` is safe.
120196unsafe impl Send for FixedSizeAllocator { }
0 commit comments