@@ -5,7 +5,7 @@ use std::{
55 ptr:: NonNull ,
66 sync:: {
77 Mutex ,
8- atomic:: { AtomicBool , AtomicU32 , Ordering } ,
8+ atomic:: { AtomicU32 , Ordering } ,
99 } ,
1010} ;
1111
@@ -113,15 +113,14 @@ pub struct FixedSizeAllocatorMetadata {
113113 pub id : u32 ,
114114 /// Pointer to start of original allocation backing the `FixedSizeAllocator`
115115 pub alloc_ptr : NonNull < u8 > ,
116- /// `true` if both Rust and JS currently hold references to this `FixedSizeAllocator`.
116+ /// Number of references to this `FixedSizeAllocator`.
117117 ///
118- /// * `false` initially.
119- /// * Set to `true` when buffer is shared with JS.
120- /// * When JS garbage collector collects the buffer, set back to `false` again.
121- /// Memory will be freed when the `FixedSizeAllocator` is dropped on Rust side.
122- /// * Also set to `false` if `FixedSizeAllocator` is dropped on Rust side.
123- /// Memory will be freed in finalizer when JS garbage collector collects the buffer.
124- pub is_double_owned : AtomicBool ,
118+ /// * 1 initially.
119+ /// * Incremented when buffer is shared with JS.
120+ /// * Decremented when JS garbage collector collects the buffer.
121+ /// * Decremented when `FixedSizeAllocator` is dropped on Rust side.
122+ /// * Memory is freed when counter reaches 0.
123+ pub ref_count : AtomicU32 ,
125124}
126125
127126// What we ideally want is an allocation 2 GiB in size, aligned on 4 GiB.
@@ -260,8 +259,7 @@ impl FixedSizeAllocator {
260259
261260 // Write `FixedSizeAllocatorMetadata` to after space reserved for `RawTransferMetadata`,
262261 // which is after the end of the allocator chunk
263- let metadata =
264- FixedSizeAllocatorMetadata { alloc_ptr, id, is_double_owned : AtomicBool :: new ( false ) } ;
262+ let metadata = FixedSizeAllocatorMetadata { alloc_ptr, id, ref_count : AtomicU32 :: new ( 1 ) } ;
265263 // SAFETY: `FIXED_METADATA_OFFSET` is `FIXED_METADATA_SIZE_ROUNDED` bytes before end of
266264 // the allocation, so there's space for `FixedSizeAllocatorMetadata`.
267265 // It's sufficiently aligned for `FixedSizeAllocatorMetadata`.
@@ -302,11 +300,11 @@ impl Drop for FixedSizeAllocator {
302300 }
303301}
304302
305- /// Deallocate memory backing a `FixedSizeAllocator` if it's not double-owned
303+ /// Deallocate memory backing a `FixedSizeAllocator` if it's not multiply-referenced
306304/// (both owned by a `FixedSizeAllocator` on Rust side *and* held as a buffer on JS side).
307305///
308- /// If it is double-owned , don't deallocate the memory but set the flag that it's no longer double-owned
309- /// so next call to this function will deallocate it .
306+ /// If it is multiply-referenced , don't deallocate the memory, but decrement the ref counter.
307+ /// A later call to this function will deallocate the memory once the ref counter reaches 0 .
310308///
311309/// # SAFETY
312310///
@@ -324,22 +322,20 @@ pub unsafe fn free_fixed_size_allocator(metadata_ptr: NonNull<FixedSizeAllocator
324322 // `&FixedSizeAllocatorMetadata` ref only lives until end of this block.
325323 let metadata = unsafe { metadata_ptr. as_ref ( ) } ;
326324
327- // * If `is_double_owned` is already `false`, then one of:
328- // 1. The `Allocator` was never sent to JS side, or
329- // 2. The `FixedSizeAllocator` was already dropped on Rust side, or
330- // 3. Garbage collector already collected it on JS side.
331- // We can deallocate the memory.
325+ // Decrement the ref count.
332326 //
333- // * If `is_double_owned` is `true`, set it to `false` and exit.
334- // Memory will be freed when `FixedSizeAllocator` is dropped on Rust side
335- // or JS garbage collector collects the buffer.
327+ // If ref count is not 1 (before decrementing), then the memory is still in use elsewhere,
328+ // because either:
329+ // * `FixedSizeAllocator` has not yet been dropped on Rust side, or
330+ // * JS garbage collector has not yet collected the buffer on JS side.
331+ // Therefore we cannot free the memory yet. Exit.
336332 //
337333 // Maybe a more relaxed `Ordering` would be OK, but I (@overlookmotel) am not sure,
338334 // so going with `Ordering::SeqCst` to be on safe side.
339335 // Deallocation only happens at the end of the whole process, so it shouldn't matter much.
340336 // TODO: Figure out if can use `Ordering::Relaxed`.
341- let is_double_owned = metadata. is_double_owned . swap ( false , Ordering :: SeqCst ) ;
342- if is_double_owned {
337+ let old_ref_count = metadata. ref_count . fetch_sub ( 1 , Ordering :: SeqCst ) ;
338+ if old_ref_count != 1 {
343339 return ;
344340 }
345341
0 commit comments