Skip to content

Commit 70f88fc

Browse files
committed
refactor(allocator): move all fixed size allocator code into 1 file
1 parent 4f50750 commit 70f88fc

File tree

3 files changed

+130
-126
lines changed

3 files changed

+130
-126
lines changed

crates/oxc_allocator/src/lib.rs

Lines changed: 27 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -37,18 +37,9 @@ mod allocator_api2;
3737
mod boxed;
3838
mod clone_in;
3939
mod convert;
40-
// Fixed size allocators are only supported on 64-bit little-endian platforms at present
41-
#[cfg(all(
42-
feature = "fixed_size",
43-
not(feature = "disable_fixed_size"),
44-
target_pointer_width = "64",
45-
target_endian = "little"
46-
))]
47-
mod fixed_size;
4840
#[cfg(feature = "from_raw_parts")]
4941
mod from_raw_parts;
5042
pub mod hash_map;
51-
mod pool;
5243
mod string_builder;
5344
mod take_in;
5445
mod vec;
@@ -61,11 +52,37 @@ pub use boxed::Box;
6152
pub use clone_in::CloneIn;
6253
pub use convert::{FromIn, IntoIn};
6354
pub use hash_map::HashMap;
64-
pub use pool::{AllocatorGuard, AllocatorPool};
6555
pub use string_builder::StringBuilder;
6656
pub use take_in::{Dummy, TakeIn};
6757
pub use vec::Vec;
6858

59+
// Fixed size allocators are only supported on 64-bit little-endian platforms at present
60+
61+
#[cfg(not(all(
62+
feature = "fixed_size",
63+
not(feature = "disable_fixed_size"),
64+
target_pointer_width = "64",
65+
target_endian = "little"
66+
)))]
67+
mod pool;
68+
69+
#[cfg(all(
70+
feature = "fixed_size",
71+
not(feature = "disable_fixed_size"),
72+
target_pointer_width = "64",
73+
target_endian = "little"
74+
))]
75+
mod pool_fixed_size;
76+
#[cfg(all(
77+
feature = "fixed_size",
78+
not(feature = "disable_fixed_size"),
79+
target_pointer_width = "64",
80+
target_endian = "little"
81+
))]
82+
use pool_fixed_size as pool;
83+
84+
pub use pool::{AllocatorGuard, AllocatorPool};
85+
6986
mod generated {
7087
#[cfg(all(
7188
feature = "fixed_size",

crates/oxc_allocator/src/pool.rs

Lines changed: 10 additions & 99 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
use std::{mem::ManuallyDrop, ops::Deref, sync::Mutex};
1+
use std::{iter, mem::ManuallyDrop, ops::Deref, sync::Mutex};
22

33
use crate::Allocator;
44

@@ -7,13 +7,13 @@ use crate::Allocator;
77
/// Internally uses a `Vec` protected by a `Mutex` to store available allocators.
88
#[derive(Default)]
99
pub struct AllocatorPool {
10-
allocators: Mutex<Vec<AllocatorWrapper>>,
10+
allocators: Mutex<Vec<Allocator>>,
1111
}
1212

1313
impl AllocatorPool {
14-
/// Creates a new [`AllocatorPool`] pre-filled with the given number of default `AllocatorWrapper` instances.
14+
/// Creates a new [`AllocatorPool`] pre-filled with the given number of default [`Allocator`] instances.
1515
pub fn new(size: usize) -> AllocatorPool {
16-
let allocators = AllocatorWrapper::new_vec(size);
16+
let allocators = iter::repeat_with(Allocator::new).take(size).collect();
1717
AllocatorPool { allocators: Mutex::new(allocators) }
1818
}
1919

@@ -29,19 +29,19 @@ impl AllocatorPool {
2929
let mut allocators = self.allocators.lock().unwrap();
3030
allocators.pop()
3131
};
32-
let allocator = allocator.unwrap_or_else(AllocatorWrapper::new);
32+
let allocator = allocator.unwrap_or_else(Allocator::new);
3333

3434
AllocatorGuard { allocator: ManuallyDrop::new(allocator), pool: self }
3535
}
3636

37-
/// Add an [`AllocatorWrapper`] to the pool.
37+
/// Add an [`Allocator`] to the pool.
3838
///
3939
/// The `Allocator` should be empty, ready to be re-used.
4040
///
4141
/// # Panics
4242
///
4343
/// Panics if the underlying mutex is poisoned.
44-
fn add(&self, allocator: AllocatorWrapper) {
44+
fn add(&self, allocator: Allocator) {
4545
let mut allocators = self.allocators.lock().unwrap();
4646
allocators.push(allocator);
4747
}
@@ -51,113 +51,24 @@ impl AllocatorPool {
5151
///
5252
/// On drop, the `Allocator` is reset and returned to the pool.
5353
pub struct AllocatorGuard<'alloc_pool> {
54-
allocator: ManuallyDrop<AllocatorWrapper>,
54+
allocator: ManuallyDrop<Allocator>,
5555
pool: &'alloc_pool AllocatorPool,
5656
}
5757

5858
impl Deref for AllocatorGuard<'_> {
5959
type Target = Allocator;
6060

6161
fn deref(&self) -> &Self::Target {
62-
self.allocator.get()
62+
&self.allocator
6363
}
6464
}
6565

6666
impl Drop for AllocatorGuard<'_> {
6767
/// Return [`Allocator`] back to the pool.
6868
fn drop(&mut self) {
69-
// SAFETY: After taking ownership of the `AllocatorWrapper`, we do not touch the `ManuallyDrop` again
69+
// SAFETY: After taking ownership of the `Allocator`, we do not touch the `ManuallyDrop` again
7070
let mut allocator = unsafe { ManuallyDrop::take(&mut self.allocator) };
7171
allocator.reset();
7272
self.pool.add(allocator);
7373
}
7474
}
75-
76-
#[cfg(not(all(
77-
feature = "fixed_size",
78-
not(feature = "disable_fixed_size"),
79-
target_pointer_width = "64",
80-
target_endian = "little"
81-
)))]
82-
mod wrapper {
83-
use crate::Allocator;
84-
85-
/// Structure which wraps an [`Allocator`].
86-
///
87-
/// Default implementation which is just a wrapper around an [`Allocator`].
88-
pub struct AllocatorWrapper(Allocator);
89-
90-
impl AllocatorWrapper {
91-
/// Create a new [`AllocatorWrapper`].
92-
pub fn new() -> Self {
93-
Self(Allocator::default())
94-
}
95-
96-
/// Get reference to underlying [`Allocator`].
97-
pub fn get(&self) -> &Allocator {
98-
&self.0
99-
}
100-
101-
/// Reset the [`Allocator`] in this [`AllocatorWrapper`].
102-
pub fn reset(&mut self) {
103-
self.0.reset();
104-
}
105-
106-
/// Create a `Vec` of [`AllocatorWrapper`]s.
107-
pub fn new_vec(size: usize) -> Vec<Self> {
108-
std::iter::repeat_with(Self::new).take(size).collect()
109-
}
110-
}
111-
}
112-
113-
#[cfg(all(
114-
feature = "fixed_size",
115-
not(feature = "disable_fixed_size"),
116-
target_pointer_width = "64",
117-
target_endian = "little"
118-
))]
119-
mod wrapper {
120-
use crate::{Allocator, fixed_size::FixedSizeAllocator, fixed_size_constants::BUFFER_ALIGN};
121-
122-
/// Structure which wraps an [`Allocator`] with fixed size of 2 GiB, and aligned on 4 GiB.
123-
///
124-
/// See [`FixedSizeAllocator`] for more details.
125-
pub struct AllocatorWrapper(FixedSizeAllocator);
126-
127-
impl AllocatorWrapper {
128-
/// Create a new [`AllocatorWrapper`].
129-
pub fn new() -> Self {
130-
Self(FixedSizeAllocator::new())
131-
}
132-
133-
/// Get reference to underlying [`Allocator`].
134-
pub fn get(&self) -> &Allocator {
135-
&self.0
136-
}
137-
138-
/// Reset the [`Allocator`] in this [`AllocatorWrapper`].
139-
pub fn reset(&mut self) {
140-
// Set cursor back to end
141-
self.0.reset();
142-
143-
// Set data pointer back to start.
144-
// SAFETY: Fixed-size allocators have data pointer originally aligned on `BUFFER_ALIGN`,
145-
// and size less than `BUFFER_ALIGN`. So we can restore original data pointer by rounding down
146-
// to next multiple of `BUFFER_ALIGN`.
147-
unsafe {
148-
let data_ptr = self.0.data_ptr();
149-
let offset = data_ptr.as_ptr() as usize % BUFFER_ALIGN;
150-
let data_ptr = data_ptr.sub(offset);
151-
self.0.set_data_ptr(data_ptr);
152-
}
153-
}
154-
155-
/// Create a `Vec` of [`AllocatorWrapper`]s.
156-
pub fn new_vec(size: usize) -> Vec<Self> {
157-
// Each allocator consumes a large block of memory, so create them on demand instead of upfront
158-
Vec::with_capacity(size)
159-
}
160-
}
161-
}
162-
163-
use wrapper::AllocatorWrapper;

crates/oxc_allocator/src/fixed_size.rs renamed to crates/oxc_allocator/src/pool_fixed_size.rs

Lines changed: 93 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,9 @@
11
use std::{
22
alloc::{self, GlobalAlloc, Layout, System},
33
mem::ManuallyDrop,
4-
ops::{Deref, DerefMut},
4+
ops::Deref,
55
ptr::NonNull,
6+
sync::Mutex,
67
};
78

89
use crate::{
@@ -13,6 +14,78 @@ use crate::{
1314
const TWO_GIB: usize = 1 << 31;
1415
const FOUR_GIB: usize = 1 << 32;
1516

17+
/// A thread-safe pool for reusing [`Allocator`] instances to reduce allocation overhead.
18+
///
19+
/// Internally uses a `Vec` protected by a `Mutex` to store available allocators.
20+
#[derive(Default)]
21+
pub struct AllocatorPool {
22+
allocators: Mutex<Vec<FixedSizeAllocator>>,
23+
}
24+
25+
impl AllocatorPool {
26+
/// Creates a new [`AllocatorPool`] with capacity for the given number of `FixedSizeAllocator` instances.
27+
pub fn new(size: usize) -> AllocatorPool {
28+
// Each allocator consumes a large block of memory, so create them on demand instead of upfront
29+
let allocators = Vec::with_capacity(size);
30+
AllocatorPool { allocators: Mutex::new(allocators) }
31+
}
32+
33+
/// Retrieves an [`Allocator`] from the pool, or creates a new one if the pool is empty.
34+
///
35+
/// Returns an [`AllocatorGuard`] that gives access to the allocator.
36+
///
37+
/// # Panics
38+
///
39+
/// Panics if the underlying mutex is poisoned.
40+
pub fn get(&self) -> AllocatorGuard {
41+
let allocator = {
42+
let mut allocators = self.allocators.lock().unwrap();
43+
allocators.pop()
44+
};
45+
let allocator = allocator.unwrap_or_else(FixedSizeAllocator::new);
46+
47+
AllocatorGuard { allocator: ManuallyDrop::new(allocator), pool: self }
48+
}
49+
50+
/// Add a [`FixedSizeAllocator`] to the pool.
51+
///
52+
/// The `Allocator` should be empty, ready to be re-used.
53+
///
54+
/// # Panics
55+
///
56+
/// Panics if the underlying mutex is poisoned.
57+
fn add(&self, allocator: FixedSizeAllocator) {
58+
let mut allocators = self.allocators.lock().unwrap();
59+
allocators.push(allocator);
60+
}
61+
}
62+
63+
/// A guard object representing exclusive access to an [`Allocator`] from the pool.
64+
///
65+
/// On drop, the `Allocator` is reset and returned to the pool.
66+
pub struct AllocatorGuard<'alloc_pool> {
67+
allocator: ManuallyDrop<FixedSizeAllocator>,
68+
pool: &'alloc_pool AllocatorPool,
69+
}
70+
71+
impl Deref for AllocatorGuard<'_> {
72+
type Target = Allocator;
73+
74+
fn deref(&self) -> &Self::Target {
75+
&self.allocator.allocator
76+
}
77+
}
78+
79+
impl Drop for AllocatorGuard<'_> {
80+
/// Return [`Allocator`] back to the pool.
81+
fn drop(&mut self) {
82+
// SAFETY: After taking ownership of the `FixedSizeAllocator`, we do not touch the `ManuallyDrop` again
83+
let mut allocator = unsafe { ManuallyDrop::take(&mut self.allocator) };
84+
allocator.reset();
85+
self.pool.add(allocator);
86+
}
87+
}
88+
1689
// What we ideally want is an allocation 2 GiB in size, aligned on 4 GiB.
1790
// But system allocator on Mac OS refuses allocations with 4 GiB alignment.
1891
// https://github.com/rust-lang/rust/blob/556d20a834126d2d0ac20743b9792b8474d6d03c/library/std/src/sys/alloc/unix.rs#L16-L27
@@ -41,7 +114,7 @@ const ALLOC_LAYOUT: Layout = match Layout::from_size_align(ALLOC_SIZE, ALLOC_ALI
41114
/// To achieve this, we manually allocate memory to back the `Allocator`'s single chunk.
42115
/// We over-allocate 4 GiB, and then use a part of that allocation to back the `Allocator`.
43116
/// Inner `Allocator` is wrapped in `ManuallyDrop` to prevent it freeing the memory itself,
44-
/// and `AllocatorWrapper` has a custom `Drop` impl which frees the whole of the original allocation.
117+
/// and `FixedSizeAllocator` has a custom `Drop` impl which frees the whole of the original allocation.
45118
///
46119
/// We allocate via `System` allocator, bypassing any registered alternative global allocator
47120
/// (e.g. Mimalloc in linter). Mimalloc complains that it cannot serve allocations with high alignment,
@@ -59,7 +132,7 @@ impl FixedSizeAllocator {
59132
#[expect(clippy::items_after_statements)]
60133
pub fn new() -> Self {
61134
// Allocate block of memory.
62-
// SAFETY: Layout does not have zero size.
135+
// SAFETY: `ALLOC_LAYOUT` does not have zero size.
63136
let alloc_ptr = unsafe { System.alloc(ALLOC_LAYOUT) };
64137
let Some(alloc_ptr) = NonNull::new(alloc_ptr) else {
65138
alloc::handle_alloc_error(ALLOC_LAYOUT);
@@ -92,6 +165,23 @@ impl FixedSizeAllocator {
92165
// Store pointer to original allocation, so it can be used to deallocate in `drop`
93166
Self { allocator: ManuallyDrop::new(allocator), alloc_ptr }
94167
}
168+
169+
/// Reset this [`FixedSizeAllocator`].
170+
fn reset(&mut self) {
171+
// Set cursor back to end
172+
self.allocator.reset();
173+
174+
// Set data pointer back to start.
175+
// SAFETY: Fixed-size allocators have data pointer originally aligned on `BUFFER_ALIGN`,
176+
// and size less than `BUFFER_ALIGN`. So we can restore original data pointer by rounding down
177+
// to next multiple of `BUFFER_ALIGN`.
178+
unsafe {
179+
let data_ptr = self.allocator.data_ptr();
180+
let offset = data_ptr.as_ptr() as usize % BUFFER_ALIGN;
181+
let data_ptr = data_ptr.sub(offset);
182+
self.allocator.set_data_ptr(data_ptr);
183+
}
184+
}
95185
}
96186

97187
impl Drop for FixedSizeAllocator {
@@ -101,20 +191,6 @@ impl Drop for FixedSizeAllocator {
101191
}
102192
}
103193

104-
impl Deref for FixedSizeAllocator {
105-
type Target = Allocator;
106-
107-
fn deref(&self) -> &Self::Target {
108-
&self.allocator
109-
}
110-
}
111-
112-
impl DerefMut for FixedSizeAllocator {
113-
fn deref_mut(&mut self) -> &mut Self::Target {
114-
&mut self.allocator
115-
}
116-
}
117-
118194
// SAFETY: `Allocator` is `Send`.
119195
// Moving `alloc_ptr: NonNull<u8>` across threads along with the `Allocator` is safe.
120196
unsafe impl Send for FixedSizeAllocator {}

0 commit comments

Comments
 (0)