@@ -5,7 +5,7 @@ use std::{
55 ptr:: NonNull ,
66 sync:: {
77 Mutex ,
8- atomic:: { AtomicBool , AtomicU32 , Ordering } ,
8+ atomic:: { AtomicU32 , Ordering } ,
99 } ,
1010} ;
1111
@@ -113,15 +113,14 @@ pub struct FixedSizeAllocatorMetadata {
113113 pub id : u32 ,
114114 /// Pointer to start of original allocation backing the `FixedSizeAllocator`
115115 pub alloc_ptr : NonNull < u8 > ,
116- /// `true` if both Rust and JS currently hold references to this `FixedSizeAllocator`.
116+ /// Number of references to this `FixedSizeAllocator`.
117117 ///
118- /// * `false` initially.
119- /// * Set to `true` when buffer is shared with JS.
120- /// * When JS garbage collector collects the buffer, set back to `false` again.
121- /// Memory will be freed when the `FixedSizeAllocator` is dropped on Rust side.
122- /// * Also set to `false` if `FixedSizeAllocator` is dropped on Rust side.
123- /// Memory will be freed in finalizer when JS garbage collector collects the buffer.
124- pub is_double_owned : AtomicBool ,
118+ /// * 1 initially.
119+ /// * Incremented when buffer is shared with JS.
120+ /// * Decremented when JS garbage collector collects the buffer.
121+ /// * Decremented when `FixedSizeAllocator` is dropped on Rust side.
122+ /// * Memory is freed when counter reaches 0.
123+ pub ref_count : AtomicU32 ,
125124}
126125
127126// What we ideally want is an allocation 2 GiB in size, aligned on 4 GiB.
@@ -260,8 +259,7 @@ impl FixedSizeAllocator {
260259
261260 // Write `FixedSizeAllocatorMetadata` to after space reserved for `RawTransferMetadata`,
262261 // which is after the end of the allocator chunk
263- let metadata =
264- FixedSizeAllocatorMetadata { alloc_ptr, id, is_double_owned : AtomicBool :: new ( false ) } ;
262+ let metadata = FixedSizeAllocatorMetadata { alloc_ptr, id, ref_count : AtomicU32 :: new ( 1 ) } ;
265263 // SAFETY: `FIXED_METADATA_OFFSET` is `FIXED_METADATA_SIZE_ROUNDED` bytes before end of
266264 // the allocation, so there's space for `FixedSizeAllocatorMetadata`.
267265 // It's sufficiently aligned for `FixedSizeAllocatorMetadata`.
@@ -304,11 +302,11 @@ impl Drop for FixedSizeAllocator {
304302 }
305303}
306304
307- /// Deallocate memory backing a `FixedSizeAllocator` if it's not double-owned
305+ /// Deallocate memory backing a `FixedSizeAllocator` if it's not multiply-referenced
308306/// (both owned by a `FixedSizeAllocator` on Rust side *and* held as a buffer on JS side).
309307///
310- /// If it is double-owned , don't deallocate the memory but set the flag that it's no longer double-owned
311- /// so next call to this function will deallocate it .
308+ /// If it is multiply-referenced , don't deallocate the memory, but decrement the ref counter.
309+ /// A later call to this function will deallocate the memory once the ref counter reaches 0 .
312310///
313311/// # SAFETY
314312///
@@ -326,22 +324,20 @@ pub unsafe fn free_fixed_size_allocator(metadata_ptr: NonNull<FixedSizeAllocator
326324 // `&FixedSizeAllocatorMetadata` ref only lives until end of this block.
327325 let metadata = unsafe { metadata_ptr. as_ref ( ) } ;
328326
329- // * If `is_double_owned` is already `false`, then one of:
330- // 1. The `Allocator` was never sent to JS side, or
331- // 2. The `FixedSizeAllocator` was already dropped on Rust side, or
332- // 3. Garbage collector already collected it on JS side.
333- // We can deallocate the memory.
327+ // Decrement the ref count.
334328 //
335- // * If `is_double_owned` is `true`, set it to `false` and exit.
336- // Memory will be freed when `FixedSizeAllocator` is dropped on Rust side
337- // or JS garbage collector collects the buffer.
329+ // If ref count is not 1 (before decrementing), then the memory is still in use elsewhere,
330+ // because either:
331+ // * `FixedSizeAllocator` has not yet been dropped on Rust side, or
332+ // * JS garbage collector has not yet collected the buffer on JS side.
333+ // Therefore we cannot free the memory yet. Exit.
338334 //
339335 // Maybe a more relaxed `Ordering` would be OK, but I (@overlookmotel) am not sure,
340336 // so going with `Ordering::SeqCst` to be on safe side.
341337 // Deallocation only happens at the end of the whole process, so it shouldn't matter much.
342338 // TODO: Figure out if can use `Ordering::Relaxed`.
343- let is_double_owned = metadata. is_double_owned . swap ( false , Ordering :: SeqCst ) ;
344- if is_double_owned {
339+ let old_ref_count = metadata. ref_count . fetch_sub ( 1 , Ordering :: SeqCst ) ;
340+ if old_ref_count != 1 {
345341 return ;
346342 }
347343
0 commit comments