Skip to content

Commit 907ddd8

Browse files
committed
feat(allocator): block, waiting for new allocator if none are available
1 parent 4f715fe commit 907ddd8

File tree

1 file changed

+87
-25
lines changed

1 file changed

+87
-25
lines changed

crates/oxc_allocator/src/pool/fixed_size.rs

Lines changed: 87 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ use std::{
55
mem::{self, ManuallyDrop},
66
ptr::NonNull,
77
sync::{
8-
Mutex,
8+
Condvar, Mutex,
99
atomic::{AtomicBool, AtomicU32, Ordering},
1010
},
1111
};
@@ -47,45 +47,106 @@ pub struct FixedSizeAllocatorPool {
4747
allocators: Mutex<Vec<FixedSizeAllocator>>,
4848
/// ID to assign to next `Allocator` that's created
4949
next_id: AtomicU32,
50+
/// Maximum number of allocators this pool will create.
51+
/// Once this limit is reached, [`get`](Self::get) will block until an allocator
52+
/// is returned to the pool via [`add`](Self::add).
53+
capacity_limit: usize,
54+
/// Condition variable used to signal when an allocator is returned to the pool.
55+
/// Threads blocked in [`get`](Self::get) waiting for an allocator will be woken
56+
/// when [`add`](Self::add) returns one to the pool.
57+
available: Condvar,
58+
/// `true` if allocator creation has failed.
59+
/// Once set, no further attempts to create allocators will be made, since they would
60+
/// also fail.
61+
allocation_failed: AtomicBool,
5062
}
5163

5264
impl FixedSizeAllocatorPool {
5365
/// Create a new [`FixedSizeAllocatorPool`] for use across the specified number of threads.
66+
///
67+
/// The pool will create at most `thread_count` allocators. If all allocators are in use
68+
/// when [`get`](Self::get) is called, it will block until one is returned to the pool.
69+
///
70+
/// If `thread_count` is 0, the pool will create at most 1 allocator.
5471
pub fn new(thread_count: usize) -> FixedSizeAllocatorPool {
55-
// Each allocator consumes a large block of memory, so create them on demand instead of upfront,
56-
// in case not all threads end up being used (e.g. language server without `import` plugin)
57-
let allocators = Vec::with_capacity(thread_count);
58-
FixedSizeAllocatorPool { allocators: Mutex::new(allocators), next_id: AtomicU32::new(0) }
72+
let mut allocators = Vec::with_capacity(thread_count);
73+
let Ok(allocator) = FixedSizeAllocator::try_new(0) else {
74+
panic!("Failed to create any fixed-size allocators for the pool");
75+
};
76+
allocators.push(allocator);
77+
78+
FixedSizeAllocatorPool {
79+
allocators: Mutex::new(allocators),
80+
next_id: AtomicU32::new(1),
81+
available: Condvar::new(),
82+
capacity_limit: thread_count.max(1),
83+
allocation_failed: AtomicBool::new(false),
84+
}
5985
}
6086

6187
/// Retrieve an [`Allocator`] from the pool, or create a new one if the pool is empty.
6288
///
6389
/// # Panics
64-
/// Panics if the underlying mutex is poisoned.
90+
/// * Panics if the underlying mutex is poisoned.
91+
/// * Panics if a new allocator needs to be created but memory allocation fails.
6592
pub fn get(&self) -> Allocator {
66-
let fixed_size_allocator = {
67-
let mut allocators = self.allocators.lock().unwrap();
68-
allocators.pop()
69-
};
93+
fn into_allocator(allocator: FixedSizeAllocator) -> Allocator {
94+
// SAFETY: `FixedSizeAllocator` is just a wrapper around `ManuallyDrop<Allocator>`,
95+
// and is `#[repr(transparent)]`, so the 2 are equivalent.
96+
let allocator =
97+
unsafe { mem::transmute::<FixedSizeAllocator, ManuallyDrop<Allocator>>(allocator) };
98+
ManuallyDrop::into_inner(allocator)
99+
}
100+
101+
{
102+
let maybe_allocator = self.allocators.lock().unwrap().pop();
103+
if let Some(allocator) = maybe_allocator {
104+
return into_allocator(allocator);
105+
}
106+
}
107+
108+
if let Some(Ok(allocator)) = self.create_new_allocator() {
109+
return into_allocator(allocator);
110+
}
111+
112+
loop {
113+
let mut maybe_allocator = self.available.wait(self.allocators.lock().unwrap()).unwrap();
114+
if let Some(allocator) = maybe_allocator.pop() {
115+
return into_allocator(allocator);
116+
}
117+
}
118+
}
119+
120+
fn create_new_allocator(&self) -> Option<Result<FixedSizeAllocator, AllocError>> {
121+
// If a previous allocation attempt failed, don't try again - it will also fail.
122+
if self.allocation_failed.load(Ordering::Relaxed) {
123+
return None;
124+
}
125+
126+
loop {
127+
let id = self.next_id.load(Ordering::Relaxed);
128+
129+
if id as usize >= self.capacity_limit {
130+
return None;
131+
}
70132

71-
let fixed_size_allocator = fixed_size_allocator.unwrap_or_else(|| {
72-
// Each allocator needs to have a unique ID, but the order those IDs are assigned in
73-
// doesn't matter, so `Ordering::Relaxed` is fine
74-
let id = self.next_id.fetch_add(1, Ordering::Relaxed);
75133
// Protect against IDs wrapping around.
76134
// TODO: Does this work? Do we need it anyway?
77135
assert!(id < u32::MAX, "Created too many allocators");
78-
FixedSizeAllocator::try_new(id).unwrap()
79-
});
80-
81-
// Unwrap `FixedSizeAllocator`.
82-
// `add` method will wrap it again, before returning it to pool, ensuring it gets dropped properly.
83-
// SAFETY: `FixedSizeAllocator` is just a wrapper around `ManuallyDrop<Allocator>`,
84-
// and is `#[repr(transparent)]`, so the 2 are equivalent.
85-
let allocator = unsafe {
86-
mem::transmute::<FixedSizeAllocator, ManuallyDrop<Allocator>>(fixed_size_allocator)
87-
};
88-
ManuallyDrop::into_inner(allocator)
136+
137+
// Try to claim this ID. If another thread got there first, retry with new ID.
138+
if self
139+
.next_id
140+
.compare_exchange_weak(id, id + 1, Ordering::Relaxed, Ordering::Relaxed)
141+
.is_ok()
142+
{
143+
let result = FixedSizeAllocator::try_new(id);
144+
if result.is_err() {
145+
self.allocation_failed.store(true, Ordering::Relaxed);
146+
}
147+
return Some(result);
148+
}
149+
}
89150
}
90151

91152
/// Add an [`Allocator`] to the pool.
@@ -104,6 +165,7 @@ impl FixedSizeAllocatorPool {
104165

105166
let mut allocators = self.allocators.lock().unwrap();
106167
allocators.push(fixed_size_allocator);
168+
self.available.notify_one();
107169
}
108170
}
109171

0 commit comments

Comments
 (0)