diff --git a/crates/bevy_app/src/sub_app.rs b/crates/bevy_app/src/sub_app.rs index c340b80654a86..21b2dd2ae5007 100644 --- a/crates/bevy_app/src/sub_app.rs +++ b/crates/bevy_app/src/sub_app.rs @@ -142,6 +142,7 @@ impl SubApp { /// Runs the default schedule and updates internal component trackers. pub fn update(&mut self) { self.run_default_schedule(); + self.world.entities().queue_remote_pending_to_be_flushed(); self.world.clear_trackers(); } diff --git a/crates/bevy_ecs/src/archetype.rs b/crates/bevy_ecs/src/archetype.rs index f12cd03a69dbd..918c7bd7d81ef 100644 --- a/crates/bevy_ecs/src/archetype.rs +++ b/crates/bevy_ecs/src/archetype.rs @@ -89,6 +89,11 @@ pub struct ArchetypeId(u32); impl ArchetypeId { /// The ID for the [`Archetype`] without any components. pub const EMPTY: ArchetypeId = ArchetypeId(0); + /// This represents an archetype that does not actually exist. + /// This can be used as a placeholder. + /// + /// On an entity, this archetype signals that the entity is not yet part of any archetype. + /// /// # Safety: /// /// This must always have an all-1s bit pattern to ensure soundness in fast entity id space allocation. diff --git a/crates/bevy_ecs/src/bundle.rs b/crates/bevy_ecs/src/bundle.rs index e3e54c092f644..561d286e24457 100644 --- a/crates/bevy_ecs/src/bundle.rs +++ b/crates/bevy_ecs/src/bundle.rs @@ -1701,6 +1701,8 @@ impl<'w> BundleSpawner<'w> { table.reserve(additional); } + /// **Note:** This will not cause eny entities to be freed. + /// /// # Safety /// `entity` must be allocated (but non-existent), `T` must match this [`BundleInfo`]'s type #[inline] diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs new file mode 100644 index 0000000000000..18232341a9b8a --- /dev/null +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -0,0 +1,966 @@ +use bevy_platform::{ + prelude::Vec, + sync::{ + atomic::{AtomicBool, AtomicPtr, AtomicU32, AtomicU64, Ordering}, + Arc, + }, +}; +use core::mem::ManuallyDrop; +use log::warn; +use nonmax::NonMaxU32; + +use crate::query::DebugCheckedUnwrap; + +use super::{Entity, EntityRow, EntitySetIterator}; + +/// This is the item we store in the free list. +/// Effectively, this is a `MaybeUninit` where uninit is represented by `Entity::PLACEHOLDER`. +/// +/// We don't use atomics to achieve any particular ordering: we just need *some* ordering. +/// Conceptually, this could just be `SyncCell`, +/// but accessing that requires additional unsafe justification, and could cause unsound optimizations by the compiler. +/// +/// No [`Slot`] access is ever contested between two threads due to the ordering constraints in the [`FreeCount`]. +/// That also guarantees a proper ordering between slot access. +/// Hence these atomics don't need to account for any synchronization, and relaxed ordering is used everywhere. +// TODO: consider fully justifying `SyncCell` here with no atomics. +struct Slot { + #[cfg(not(target_has_atomic = "64"))] + entity_index: AtomicU32, + #[cfg(not(target_has_atomic = "64"))] + entity_generation: AtomicU32, + #[cfg(target_has_atomic = "64")] + inner_entity: AtomicU64, +} + +impl Slot { + /// Produces a meaningless empty value. This is a valid but incorrect `Entity`. + /// It's valid because the bits do represent a valid bit pattern of an `Entity`. + /// It's incorrect because this is in the free buffer even though the entity was never freed. + /// Importantly, [`FreeCount`] determines which part of the free buffer is the free list. + /// An empty slot may be in the free buffer, but should not be in the free list. + /// This can be thought of as the `MaybeUninit` uninit in `Vec`'s excess capacity. + fn empty() -> Self { + let source = Entity::PLACEHOLDER; + #[cfg(not(target_has_atomic = "64"))] + return Self { + entity_index: AtomicU32::new(source.index()), + entity_generation: AtomicU32::new(source.generation().to_bits()), + }; + #[cfg(target_has_atomic = "64")] + return Self { + inner_entity: AtomicU64::new(source.to_bits()), + }; + } + + #[inline] + fn set_entity(&self, entity: Entity) { + #[cfg(not(target_has_atomic = "64"))] + self.entity_generation + .store(entity.generation().to_bits(), Ordering::Relaxed); + #[cfg(not(target_has_atomic = "64"))] + self.entity_index.store(entity.index(), Ordering::Relaxed); + #[cfg(target_has_atomic = "64")] + self.inner_entity.store(entity.to_bits(), Ordering::Relaxed); + } + + /// Gets the stored entity. The result will be [`Entity::PLACEHOLDER`] unless [`set_entity`](Self::set_entity) has been called. + #[inline] + fn get_entity(&self) -> Entity { + #[cfg(not(target_has_atomic = "64"))] + return Entity::from_raw_and_generation( + // SAFETY: This is valid since it was from an entity's index to begin with. + unsafe { + EntityRow::new( + NonMaxU32::new(self.entity_index.load(Ordering::Relaxed)) + .debug_checked_unwrap(), + ) + }, + super::EntityGeneration::from_bits(self.entity_generation.load(Ordering::Relaxed)), + ); + + #[cfg(target_has_atomic = "64")] + // SAFETY: This is always sourced from a proper entity. + return unsafe { + Entity::try_from_bits(self.inner_entity.load(Ordering::Relaxed)).debug_checked_unwrap() + }; + } +} + +/// Each chunk stores a buffer of [`Slot`]s at a fixed capacity. +struct Chunk { + /// Points to the first slot. If this is null, we need to allocate it. + first: AtomicPtr, +} + +impl Chunk { + /// Constructs a null [`Chunk`]. + const fn new() -> Self { + Self { + first: AtomicPtr::new(core::ptr::null_mut()), + } + } + + /// Gets the entity at the index within this chunk. + /// + /// # Safety + /// + /// [`Self::set`] must have been called on this index before, ensuring it is in bounds and the chunk is initialized. + #[inline] + unsafe fn get(&self, index: u32) -> Entity { + // Relaxed is fine since caller ensures we are initialized already. + // In order for the caller to guarantee that, they must have an ordering that orders this `get` after the required `set`. + let head = self.first.load(Ordering::Relaxed); + // SAFETY: caller ensures we are in bounds and init (because `set` must be in bounds) + let target = unsafe { &*head.add(index as usize) }; + + target.get_entity() + } + + /// Gets a slice of indices. + /// + /// # Safety + /// + /// [`Self::set`] must have been called on these indices before, ensuring it is in bounds and the chunk is initialized. + #[inline] + unsafe fn get_slice(&self, index: u32, ideal_len: u32, chunk_capacity: u32) -> &[Slot] { + let after_index_slice_len = chunk_capacity - index; + let len = after_index_slice_len.min(ideal_len) as usize; + + // Relaxed is fine since caller ensures we are initialized already. + // In order for the caller to guarantee that, they must have an ordering that orders this `get` after the required `set`. + let head = self.first.load(Ordering::Relaxed); + + // SAFETY: Caller ensures we are init, so the chunk was allocated via a `Vec` and the index is within the capacity. + unsafe { core::slice::from_raw_parts(head, len) } + } + + /// Sets this entity at this index. + /// + /// # Safety + /// + /// This must not be called concurrently with itself. + /// Index must be in bounds. + /// Access does not conflict with another [`Self::get`]. + #[inline] + unsafe fn set(&self, index: u32, entity: Entity, chunk_capacity: u32) { + // Relaxed is fine here since this is not called concurrently and does not conflict with a `get`. + let ptr = self.first.load(Ordering::Relaxed); + let head = if ptr.is_null() { + self.init(chunk_capacity) + } else { + ptr + }; + + // SAFETY: caller ensures it is in bounds and we are not fighting with other `set` calls or `get` calls. + // A race condition is therefore impossible. + let target = unsafe { &*head.add(index as usize) }; + + target.set_entity(entity); + } + + /// Initializes the chunk to be valid, returning the pointer. + /// + /// # Safety + /// + /// This must not be called concurrently with itself. + #[cold] + unsafe fn init(&self, chunk_capacity: u32) -> *mut Slot { + let mut buff = ManuallyDrop::new(Vec::new()); + buff.reserve_exact(chunk_capacity as usize); + buff.resize_with(chunk_capacity as usize, Slot::empty); + let ptr = buff.as_mut_ptr(); + // Relaxed is fine here since this is not called concurrently. + self.first.store(ptr, Ordering::Relaxed); + ptr + } + + /// Frees memory + /// + /// # Safety + /// + /// This must not be called concurrently with itself. + /// `chunk_capacity` must be the same as it was initialized with. + unsafe fn dealloc(&self, chunk_capacity: u32) { + // Relaxed is fine here since this is not called concurrently. + let to_drop = self.first.load(Ordering::Relaxed); + if !to_drop.is_null() { + // SAFETY: This was created in [`Self::init`] from a standard Vec. + unsafe { + Vec::from_raw_parts(to_drop, chunk_capacity as usize, chunk_capacity as usize); + } + } + } +} + +/// This is a buffer that has been split into power-of-two sized chunks, so that each chunk is pinned in memory. +/// Conceptually, each chunk is put end-to-end to form the buffer. This ultimately avoids copying elements on resize, +/// while allowing it to expand in capacity as needed. A separate system must track the length of the list in the buffer. +/// Each chunk is twice as large as the last, except for the first two which have a capacity of 512. +struct FreeBuffer([Chunk; Self::NUM_CHUNKS as usize]); + +impl FreeBuffer { + const NUM_CHUNKS: u32 = 24; + const NUM_SKIPPED: u32 = u32::BITS - Self::NUM_CHUNKS; + + /// Constructs an empty [`FreeBuffer`]. + const fn new() -> Self { + Self([const { Chunk::new() }; Self::NUM_CHUNKS as usize]) + } + + /// Computes the capacity of the chunk at this index within [`Self::NUM_CHUNKS`]. + /// The first 2 have length 512 (2^9) and the last has length (2^31) + #[inline] + fn capacity_of_chunk(chunk_index: u32) -> u32 { + // We do this because we're skipping the first `NUM_SKIPPED` powers, so we need to make up for them by doubling the first index. + // This is why the first 2 indices both have a capacity of 512. + let corrected = chunk_index.max(1); + // We add NUM_SKIPPED because the total capacity should be as if [`Self::NUM_CHUNKS`] were 32. + // This skips the first NUM_SKIPPED powers. + let corrected = corrected + Self::NUM_SKIPPED; + // This bit shift is just 2^corrected. + 1 << corrected + } + + /// For this index in the whole buffer, returns the index of the [`Chunk`], the index within that chunk, and the capacity of that chunk. + #[inline] + fn index_info(full_index: u32) -> (u32, u32, u32) { + // We do a `saturating_sub` because we skip the first `NUM_SKIPPED` powers to make space for the first chunk's entity count. + // The -1 is because this is the number of chunks, but we want the index in the end. + // We store chunks in smallest to biggest order, so we need to reverse it. + let chunk_index = (Self::NUM_CHUNKS - 1).saturating_sub(full_index.leading_zeros()); + let chunk_capacity = Self::capacity_of_chunk(chunk_index); + // We only need to cut off this particular bit. + // The capacity is only one bit, and if other bits needed to be dropped, `leading` would have been greater + let index_in_chunk = full_index & !chunk_capacity; + + (chunk_index, index_in_chunk, chunk_capacity) + } + + /// For this index in the whole buffer, returns the [`Chunk`], the index within that chunk, and the capacity of that chunk. + #[inline] + fn index_in_chunk(&self, full_index: u32) -> (&Chunk, u32, u32) { + let (chunk_index, index_in_chunk, chunk_capacity) = Self::index_info(full_index); + // SAFETY: Caller ensures the chunk index is correct + let chunk = unsafe { self.0.get_unchecked(chunk_index as usize) }; + (chunk, index_in_chunk, chunk_capacity) + } + + /// Gets the entity at an index. + /// + /// # Safety + /// + /// [`set`](Self::set) must have been called on this index to initialize the its memory. + unsafe fn get(&self, full_index: u32) -> Entity { + let (chunk, index, _) = self.index_in_chunk(full_index); + // SAFETY: Caller ensures this index was set + unsafe { chunk.get(index) } + } + + /// Sets an entity at an index. + /// + /// # Safety + /// + /// This must not be called concurrently with itself. + /// Access must not conflict with another [`Self::get`]. + #[inline] + unsafe fn set(&self, full_index: u32, entity: Entity) { + let (chunk, index, chunk_capacity) = self.index_in_chunk(full_index); + // SAFETY: Ensured by caller and that the index is correct. + unsafe { chunk.set(index, entity, chunk_capacity) } + } + + /// Iterates the entities in these indices. + /// + /// # Safety + /// + /// [`Self::set`] must have been called on these indices before to initialize memory. + #[inline] + unsafe fn iter(&self, indices: core::ops::Range) -> FreeBufferIterator { + FreeBufferIterator { + buffer: self, + future_buffer_indices: indices, + current_chunk_slice: [].iter(), + } + } +} + +impl Drop for FreeBuffer { + fn drop(&mut self) { + for index in 0..Self::NUM_CHUNKS { + let capacity = Self::capacity_of_chunk(index); + // SAFETY: we have `&mut` and the capacity is correct. + unsafe { self.0[index as usize].dealloc(capacity) }; + } + } +} + +/// An iterator over a [`FreeBuffer`]. +/// +/// # Safety +/// +/// [`FreeBuffer::set`] must have been called on these indices beforehand to initialize memory. +struct FreeBufferIterator<'a> { + buffer: &'a FreeBuffer, + /// The part of the buffer we are iterating at the moment. + current_chunk_slice: core::slice::Iter<'a, Slot>, + /// The indices in the buffer that are not yet in `current_chunk_slice`. + future_buffer_indices: core::ops::Range, +} + +impl<'a> Iterator for FreeBufferIterator<'a> { + type Item = Entity; + + #[inline] + fn next(&mut self) -> Option { + if let Some(found) = self.current_chunk_slice.next() { + return Some(found.get_entity()); + } + + let still_need = self.future_buffer_indices.len() as u32; + let next_index = self.future_buffer_indices.next()?; + let (chunk, index, chunk_capacity) = self.buffer.index_in_chunk(next_index); + + // SAFETY: Assured by constructor + let slice = unsafe { chunk.get_slice(index, still_need, chunk_capacity) }; + self.future_buffer_indices.start += slice.len() as u32; + self.current_chunk_slice = slice.iter(); + + // SAFETY: Constructor ensures these indices are valid in the buffer; the buffer is not sparse, and we just got the next slice. + // So the only way for the slice to be empty is if the constructor did not uphold safety. + let next = unsafe { self.current_chunk_slice.next().debug_checked_unwrap() }; + Some(next.get_entity()) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + let len = self.future_buffer_indices.len() + self.current_chunk_slice.len(); + (len, Some(len)) + } +} + +impl<'a> ExactSizeIterator for FreeBufferIterator<'a> {} +impl<'a> core::iter::FusedIterator for FreeBufferIterator<'a> {} + +/// This tracks the state of a [`FreeCount`], which has lots of information packed into it. +/// +/// - The first 33 bits store a signed 33 bit integer. This behaves like a u33, but we define `1 << 32` as 0. +/// - The 34th bit stores a flag that indicates if the count has been disabled/suspended. +/// - The remaining 30 bits are the generation. The generation just differentiates different versions of the state that happen to encode the same length. +#[derive(Clone, Copy)] +struct FreeCountState(u64); + +impl FreeCountState { + /// When this bit is on, the count is disabled. + /// This is used to prevent remote allocations from running at the same time as a free operation. + const DISABLING_BIT: u64 = 1 << 33; + /// This is the mask for the length bits. + const LENGTH_MASK: u64 = (1 << 32) | u32::MAX as u64; + /// This is the value of the length mask we consider to be 0. + const LENGTH_0: u64 = 1 << 32; + /// This is the lowest bit in the u30 generation. + const GENERATION_LEAST_BIT: u64 = 1 << 34; + + /// Constructs a length of 0. + const fn new_zero_len() -> Self { + Self(Self::LENGTH_0) + } + + /// Gets the encoded length. + #[inline] + const fn length(self) -> u32 { + let unsigned_length = self.0 & Self::LENGTH_MASK; + unsigned_length.saturating_sub(Self::LENGTH_0) as u32 + } + + /// Returns whether or not the count is disabled. + #[inline] + const fn is_disabled(self) -> bool { + (self.0 & Self::DISABLING_BIT) > 0 + } + + /// Changes only the length of this count to `length`. + #[inline] + const fn with_length(self, length: u32) -> Self { + // Just turns on the "considered zero" bit since this is non-negative. + let length = length as u64 | Self::LENGTH_0; + Self(self.0 & !Self::LENGTH_MASK | length) + } + + /// For popping `num` off the count, subtract the resulting u64. + #[inline] + const fn encode_pop(num: u32) -> u64 { + let subtract_length = num as u64; + // Also subtract one from the generation bit. + subtract_length | Self::GENERATION_LEAST_BIT + } + + /// Returns the count after popping off `num` elements. + #[inline] + const fn pop(self, num: u32) -> Self { + Self(self.0.wrapping_sub(Self::encode_pop(num))) + } +} + +/// This is an atomic interface to [`FreeCountState`]. +struct FreeCount(AtomicU64); + +impl FreeCount { + /// Constructs a length of 0. + const fn new_zero_len() -> Self { + Self(AtomicU64::new(FreeCountState::new_zero_len().0)) + } + + /// Gets the current state of the buffer. + #[inline] + fn state(&self, order: Ordering) -> FreeCountState { + FreeCountState(self.0.load(order)) + } + + /// Subtracts `num` from the length, returning the previous state. + /// + /// **NOTE:** Caller should be careful that changing the state is allowed and that the state is not disabled. + #[inline] + fn pop_for_state(&self, num: u32, order: Ordering) -> FreeCountState { + let to_sub = FreeCountState::encode_pop(num); + let raw = self.0.fetch_sub(to_sub, order); + FreeCountState(raw) + } + + /// Marks the state as disabled, returning the previous state + #[inline] + fn disable_len_for_state(&self, order: Ordering) -> FreeCountState { + // We don't care about the generation here since this changes the value anyway. + FreeCountState(self.0.fetch_or(FreeCountState::DISABLING_BIT, order)) + } + + /// Sets the state explicitly. + /// Caller must be careful that the state has not changed since getting the state and setting it. + /// If that happens, the state may not properly reflect the length of the free list or its generation, + /// causing entities to be skipped or given out twice. + /// This is not a safety concern, but it is a major correctness concern. + #[inline] + fn set_state_risky(&self, state: FreeCountState, order: Ordering) { + self.0.store(state.0, order); + } + + /// Attempts to update the state, returning the new [`FreeCountState`] if it fails. + #[inline] + fn try_set_state( + &self, + expected_current_state: FreeCountState, + target_state: FreeCountState, + success: Ordering, + failure: Ordering, + ) -> Result<(), FreeCountState> { + self.0 + .compare_exchange(expected_current_state.0, target_state.0, success, failure) + .map(|_| ()) + .map_err(FreeCountState) + } +} + +/// This is conceptually like a `Vec` that stores entities pending reuse. +struct FreeList { + /// The actual buffer of [`Slot`]s. + /// Conceptually, this is like the `RawVec` for this `Vec`. + buffer: FreeBuffer, + /// The length of the free buffer + len: FreeCount, +} + +impl FreeList { + /// Constructs a empty [`FreeList`]. + fn new() -> Self { + Self { + buffer: FreeBuffer::new(), + len: FreeCount::new_zero_len(), + } + } + + /// Gets the number of free entities. + /// + /// # Safety + /// + /// For this to be accurate, this must not be called during a [`Self::free`]. + #[inline] + unsafe fn num_free(&self) -> u32 { + self.len.state(Ordering::Acquire).length() + } + + /// Frees the `entity` allowing it to be reused. + /// + /// # Safety + /// + /// This must not conflict with any other [`Self::free`] or [`Self::alloc`] calls. + #[inline] + unsafe fn free(&self, entity: Entity) { + // Disable remote allocation. + let state = self.len.disable_len_for_state(Ordering::Acquire); + + // Push onto the buffer + let len = state.length(); + // SAFETY: Caller ensures this does not conflict with `free` or `alloc` calls, + // and we just disabled remote allocation. + unsafe { + self.buffer.set(len, entity); + } + + // Update length + let new_state = state.with_length(len + 1); + // This is safe because `alloc` is not being called and `remote_alloc` checks that it is not disabled. + // We don't need to change the generation since this will change the length. + // If, from a `remote_alloc` perspective, this does not change the length (i.e. this changes it *back* to what it was), + // then `alloc` must have been called, which changes the generation. + self.len.set_state_risky(new_state, Ordering::Release); + } + + /// Allocates an [`Entity`] from the free list if one is available. + /// + /// # Safety + /// + /// This must not conflict with [`Self::free`] calls. + #[inline] + unsafe fn alloc(&self) -> Option { + // SAFETY: This will get a valid index because there is no way for `free` to be done at the same time. + let len = self.len.pop_for_state(1, Ordering::AcqRel).length(); + let index = len.checked_sub(1)?; + + // SAFETY: This was less then `len`, so it must have been `set` via `free` before. + Some(unsafe { self.buffer.get(index) }) + } + + /// Allocates as many [`Entity`]s from the free list as are available, up to `count`. + /// + /// # Safety + /// + /// This must not conflict with [`Self::free`] calls for the duration of the returned iterator. + #[inline] + unsafe fn alloc_many(&self, count: u32) -> FreeBufferIterator { + // SAFETY: This will get a valid index because there is no way for `free` to be done at the same time. + let len = self.len.pop_for_state(count, Ordering::AcqRel).length(); + let index = len.saturating_sub(count); + + // SAFETY: The iterator's items are all less than the length. + unsafe { self.buffer.iter(index..len) } + } + + /// Allocates an [`Entity`] from the free list if one is available and it is safe to do so. + #[inline] + fn remote_alloc(&self) -> Option { + // The goal is the same as `alloc`, so what's the difference? + // `alloc` knows `free` is not being called, but this does not. + // What if we `len.fetch_sub(1)` but then `free` overwrites the entity before we could read it? + // That would mean we would leak an entity and give another entity out twice. + // We get around this by only updating `len` after the read is complete. + // But that means something else could be trying to allocate the same index! + // So we need a `len.compare_exchange` loop to ensure the index is unique. + // Because we keep a generation value in the `FreeCount`, if any of these things happen, we simply try again. + + let mut state = self.len.state(Ordering::Acquire); + #[cfg(feature = "std")] + let mut attempts = 1u32; + loop { + // The state is only disabled when freeing. + // If a free is happening, we need to wait for the new entity to be ready on the free buffer. + // Then, we can allocate it. + if state.is_disabled() { + // Spin 64 times before yielding. + #[cfg(feature = "std")] + if attempts % 64 == 0 { + attempts += 1; + // scheduler probably isn't running the thread doing the `free` call, so yield so it can finish. + std::thread::yield_now(); + } else { + attempts += 1; + core::hint::spin_loop(); + } + + #[cfg(not(feature = "std"))] + core::hint::spin_loop(); + + state = self.len.state(Ordering::Acquire); + continue; + } + + let len = state.length(); + let index = len.checked_sub(1)?; + + // SAFETY: This was less than `len`, so it must have been `set` via `free` before. + let entity = unsafe { self.buffer.get(index) }; + + let ideal_state = state.pop(1); + match self + .len + .try_set_state(state, ideal_state, Ordering::AcqRel, Ordering::Acquire) + { + Ok(_) => return Some(entity), + Err(new_state) => state = new_state, + } + } + } +} + +/// This stores allocation data shared by all entity allocators. +struct SharedAllocator { + /// The entities pending reuse + free: FreeList, + /// The next value of [`Entity::index`] to give out if needed. + next_entity_index: AtomicU32, + /// Tracks whether or not the primary [`Allocator`] has been closed or not. + is_closed: AtomicBool, +} + +impl SharedAllocator { + /// Constructs a [`SharedAllocator`] + fn new() -> Self { + Self { + free: FreeList::new(), + next_entity_index: AtomicU32::new(0), + is_closed: AtomicBool::new(false), + } + } + + /// The total number of indices given out. + #[inline] + fn total_entity_indices(&self) -> u32 { + self.next_entity_index.load(Ordering::Relaxed) + } + + /// This just panics. + /// It is included to help with branch prediction, and put the panic message in one spot. + #[cold] + #[inline] + fn on_overflow() -> ! { + panic!("too many entities") + } + + /// Allocates a fresh [`EntityRow`]. This row has never been given out before. + #[inline] + pub(crate) fn alloc_unique_entity_row(&self) -> EntityRow { + let index = self.next_entity_index.fetch_add(1, Ordering::Relaxed); + if index == u32::MAX { + Self::on_overflow(); + } + // SAFETY: We just checked that this was not max. + unsafe { EntityRow::new(NonMaxU32::new_unchecked(index)) } + } + + /// Allocates `count` [`EntityRow`]s. These rows will be fresh. They have never been given out before. + pub(crate) fn alloc_unique_entity_rows(&self, count: u32) -> AllocUniqueEntityRowIterator { + let start_new = self.next_entity_index.fetch_add(count, Ordering::Relaxed); + let new = match start_new.checked_add(count) { + Some(new_next_entity_index) => start_new..new_next_entity_index, + None => Self::on_overflow(), + }; + AllocUniqueEntityRowIterator(new) + } + + /// Allocates a new [`Entity`], reusing a freed index if one exists. + /// + /// # Safety + /// + /// This must not conflict with [`FreeList::free`] calls. + #[inline] + unsafe fn alloc(&self) -> Entity { + // SAFETY: assured by caller + unsafe { self.free.alloc() } + .unwrap_or_else(|| Entity::from_raw(self.alloc_unique_entity_row())) + } + + /// Allocates a `count` [`Entity`]s, reusing freed indices if they exist. + /// + /// # Safety + /// + /// This must not conflict with [`FreeList::free`] calls for the duration of the iterator. + #[inline] + unsafe fn alloc_many(&self, count: u32) -> AllocEntitiesIterator { + let reused = self.free.alloc_many(count); + let still_need = count - reused.len() as u32; + let new = self.alloc_unique_entity_rows(still_need); + AllocEntitiesIterator { new, reused } + } + + /// Allocates a new [`Entity`]. + /// This will only try to reuse a freed index if it is safe to do so. + #[inline] + fn remote_alloc(&self) -> Entity { + self.free + .remote_alloc() + .unwrap_or_else(|| Entity::from_raw(self.alloc_unique_entity_row())) + } + + /// Marks the allocator as closed, but it will still function normally. + fn close(&self) { + self.is_closed.store(true, Ordering::Release); + } + + /// Returns true if [`Self::close`] has been called. + fn is_closed(&self) -> bool { + self.is_closed.load(Ordering::Acquire) + } +} + +/// This keeps track of freed entities and allows the allocation of new ones. +pub struct Allocator { + shared: Arc, +} + +impl Allocator { + /// Constructs a new [`Allocator`] + pub fn new() -> Self { + Self { + shared: Arc::new(SharedAllocator::new()), + } + } + + /// Allocates a new [`Entity`], reusing a freed index if one exists. + #[inline] + pub fn alloc(&self) -> Entity { + // SAFETY: violating safety requires a `&mut self` to exist, but rust does not allow that. + unsafe { self.shared.alloc() } + } + + /// The total number of indices given out. + #[inline] + pub fn total_entity_indices(&self) -> u32 { + self.shared.total_entity_indices() + } + + /// The number of free entities. + #[inline] + pub fn num_free(&self) -> u32 { + // SAFETY: `free` is not being called since it takes `&mut self`. + unsafe { self.shared.free.num_free() } + } + + /// Returns whether or not the index is valid in this allocator. + #[inline] + pub fn is_valid_row(&self, row: EntityRow) -> bool { + row.index() < self.total_entity_indices() + } + + /// Frees the entity allowing it to be reused. + #[inline] + pub fn free(&mut self, entity: Entity) { + // SAFETY: We have `&mut self`. + unsafe { + self.shared.free.free(entity); + } + } + + /// Allocates `count` entities in an iterator. + #[inline] + pub fn alloc_many(&self, count: u32) -> AllocEntitiesIterator { + // SAFETY: `free` takes `&mut self`, and this lifetime is captured by the iterator. + unsafe { self.shared.alloc_many(count) } + } + + /// Allocates `count` entities in an iterator. + /// + /// # Safety + /// + /// Caller ensures [`Self::free`] is not called for the duration of the iterator. + /// Caller ensures this allocator is not dropped for the lifetime of the iterator. + #[inline] + pub unsafe fn alloc_many_unsafe(&self, count: u32) -> AllocEntitiesIterator<'static> { + // SAFETY: Caller ensures this instance is valid until the returned value is dropped. + let this: &'static Self = unsafe { &*core::ptr::from_ref(self) }; + // SAFETY: Caller ensures free is not called. + unsafe { this.shared.alloc_many(count) } + } +} + +impl Drop for Allocator { + fn drop(&mut self) { + self.shared.close(); + } +} + +impl core::fmt::Debug for Allocator { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct(core::any::type_name::()) + .field("total_indices", &self.total_entity_indices()) + .field("total_free", &self.num_free()) + .finish() + } +} + +/// An [`Iterator`] returning a sequence of [`EntityRow`] values from an [`Allocator`] that are never aliased. +/// These rows have never been given out before. +/// +/// **NOTE:** Dropping will leak the remaining entity rows! +pub(crate) struct AllocUniqueEntityRowIterator(core::ops::Range); + +impl Iterator for AllocUniqueEntityRowIterator { + type Item = EntityRow; + + #[inline] + fn next(&mut self) -> Option { + self.0 + .next() + // SAFETY: This came from an *exclusive* range. It can never be max. + .map(|idx| unsafe { EntityRow::new(NonMaxU32::new_unchecked(idx)) }) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.0.size_hint() + } +} + +impl ExactSizeIterator for AllocUniqueEntityRowIterator {} +impl core::iter::FusedIterator for AllocUniqueEntityRowIterator {} + +/// An [`Iterator`] returning a sequence of [`Entity`] values from an [`Allocator`]. +/// +/// **NOTE:** Dropping will leak the remaining entities! +pub struct AllocEntitiesIterator<'a> { + new: AllocUniqueEntityRowIterator, + reused: FreeBufferIterator<'a>, +} + +impl<'a> Iterator for AllocEntitiesIterator<'a> { + type Item = Entity; + + fn next(&mut self) -> Option { + self.reused + .next() + .or_else(|| self.new.next().map(Entity::from_raw)) + } + + fn size_hint(&self) -> (usize, Option) { + let len = self.reused.len() + self.new.len(); + (len, Some(len)) + } +} + +impl<'a> ExactSizeIterator for AllocEntitiesIterator<'a> {} +impl<'a> core::iter::FusedIterator for AllocEntitiesIterator<'a> {} + +// SAFETY: Newly reserved entity values are unique. +unsafe impl EntitySetIterator for AllocEntitiesIterator<'_> {} + +impl Drop for AllocEntitiesIterator<'_> { + fn drop(&mut self) { + let leaking = self.len(); + if leaking > 0 { + warn!( + "{} entities being leaked via unfinished `AllocEntitiesIterator`", + leaking + ); + } + } +} + +/// This is a stripped down version of [`Allocator`] that operates on fewer assumptions. +/// As a result, using this will be slower than [`Allocator`] but this offers additional freedoms. +#[derive(Clone)] +pub struct RemoteAllocator { + shared: Arc, +} + +impl RemoteAllocator { + /// Creates a new [`RemoteAllocator`] with the provided [`Allocator`] source. + /// If the source is ever destroyed, [`Self::alloc`] will yield garbage values. + /// Be sure to use [`Self::is_closed`] to determine if it is safe to use these entities. + pub fn new(source: &Allocator) -> Self { + Self { + shared: source.shared.clone(), + } + } + + /// Allocates an entity remotely. + #[inline] + pub fn alloc(&self) -> Entity { + self.shared.remote_alloc() + } + + /// Returns whether or not this [`RemoteAllocator`] is still connected to its source [`Allocator`]. + /// Note that this could close immediately after the function returns false, so be careful. + pub fn is_closed(&self) -> bool { + self.shared.is_closed() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloc::vec; + + /// Ensure the total capacity of [`OwnedBuffer`] is `u32::MAX + 1`. + #[test] + fn chunk_capacity_sums() { + let total: u64 = (0..FreeBuffer::NUM_CHUNKS) + .map(FreeBuffer::capacity_of_chunk) + .map(|x| x as u64) + .sum(); + // The last 2 won't be used, but that's ok. + // Keeping them powers of 2 makes things faster. + let expected = u32::MAX as u64 + 1; + assert_eq!(total, expected); + } + + /// Ensure [`OwnedBuffer`] can be properly indexed + #[test] + fn chunk_indexing() { + let to_test = vec![ + (0, (0, 0, 512)), // index 0 cap = 512 + (1, (0, 1, 512)), + (256, (0, 256, 512)), + (511, (0, 511, 512)), + (512, (1, 0, 512)), // index 1 cap = 512 + (1023, (1, 511, 512)), + (1024, (2, 0, 1024)), // index 2 cap = 1024 + (1025, (2, 1, 1024)), + (2047, (2, 1023, 1024)), + (2048, (3, 0, 2048)), // index 3 cap = 2048 + (4095, (3, 2047, 2048)), + (4096, (4, 0, 4096)), // index 3 cap = 4096 + ]; + + for (input, output) in to_test { + assert_eq!(FreeBuffer::index_info(input), output); + } + } + + #[test] + fn buffer_len_encoding() { + let len = FreeCount::new_zero_len(); + assert_eq!(len.state(Ordering::Relaxed).length(), 0); + assert_eq!(len.pop_for_state(200, Ordering::Relaxed).length(), 0); + len.set_state_risky( + FreeCountState::new_zero_len().with_length(5), + Ordering::Relaxed, + ); + assert_eq!(len.pop_for_state(2, Ordering::Relaxed).length(), 5); + assert_eq!(len.pop_for_state(2, Ordering::Relaxed).length(), 3); + assert_eq!(len.pop_for_state(2, Ordering::Relaxed).length(), 1); + assert_eq!(len.pop_for_state(2, Ordering::Relaxed).length(), 0); + } + + #[test] + fn uniqueness() { + let mut entities = Vec::with_capacity(2000); + let mut allocator = Allocator::new(); + entities.extend(allocator.alloc_many(1000)); + + let pre_len = entities.len(); + entities.dedup(); + assert_eq!(pre_len, entities.len()); + + for e in entities.drain(..) { + allocator.free(e); + } + + entities.extend(allocator.alloc_many(500)); + for _ in 0..1000 { + entities.push(allocator.alloc()); + } + entities.extend(allocator.alloc_many(500)); + + let pre_len = entities.len(); + entities.dedup(); + assert_eq!(pre_len, entities.len()); + } +} diff --git a/crates/bevy_ecs/src/entity/map_entities.rs b/crates/bevy_ecs/src/entity/map_entities.rs index 3dac2fa749921..f80636bf9ccd2 100644 --- a/crates/bevy_ecs/src/entity/map_entities.rs +++ b/crates/bevy_ecs/src/entity/map_entities.rs @@ -53,7 +53,7 @@ use super::EntityIndexSet; pub trait MapEntities { /// Updates all [`Entity`] references stored inside using `entity_mapper`. /// - /// Implementors should look up any and all [`Entity`] values stored within `self` and + /// Implementers should look up any and all [`Entity`] values stored within `self` and /// update them to the mapped values via `entity_mapper`. fn map_entities(&mut self, entity_mapper: &mut E); } @@ -152,7 +152,7 @@ impl> MapEntities for SmallVec { /// /// More generally, this can be used to map [`Entity`] references between any two [`Worlds`](World). /// -/// This is used by [`MapEntities`] implementors. +/// This is used by [`MapEntities`] implementers. /// /// ## Example /// @@ -287,9 +287,6 @@ impl<'m> SceneEntityMapper<'m> { /// Creates a new [`SceneEntityMapper`], spawning a temporary base [`Entity`] in the provided [`World`] pub fn new(map: &'m mut EntityHashMap, world: &mut World) -> Self { - // We're going to be calling methods on `Entities` that require advance - // flushing, such as `alloc` and `free`. - world.flush_entities(); Self { map, // SAFETY: Entities data is kept in a valid state via `EntityMapper::world_scope` @@ -304,8 +301,9 @@ impl<'m> SceneEntityMapper<'m> { pub fn finish(self, world: &mut World) { // SAFETY: Entities data is kept in a valid state via `EntityMap::world_scope` let entities = unsafe { world.entities_mut() }; - assert!(entities.free(self.dead_start).is_some()); - assert!(entities.reserve_generations(self.dead_start.index(), self.generations)); + assert!(entities + .free_current_and_future_generations(self.dead_start, self.generations) + .is_some()); } /// Creates an [`SceneEntityMapper`] from a provided [`World`] and [`EntityHashMap`], then calls the @@ -379,17 +377,12 @@ mod tests { #[test] fn entity_mapper_no_panic() { let mut world = World::new(); - // "Dirty" the `Entities`, requiring a flush afterward. world.entities.reserve_entity(); - assert!(world.entities.needs_flush()); // Create and exercise a SceneEntityMapper - should not panic because it flushes // `Entities` first. SceneEntityMapper::world_scope(&mut Default::default(), &mut world, |_, m| { m.get_mapped(Entity::PLACEHOLDER); }); - - // The SceneEntityMapper should leave `Entities` in a flushed state. - assert!(!world.entities.needs_flush()); } } diff --git a/crates/bevy_ecs/src/entity/mod.rs b/crates/bevy_ecs/src/entity/mod.rs index 10aed9ff37988..f5c8a60c9de02 100644 --- a/crates/bevy_ecs/src/entity/mod.rs +++ b/crates/bevy_ecs/src/entity/mod.rs @@ -36,9 +36,12 @@ //! [`EntityWorldMut::insert`]: crate::world::EntityWorldMut::insert //! [`EntityWorldMut::remove`]: crate::world::EntityWorldMut::remove +pub(crate) mod allocator; mod clone_entities; mod entity_set; mod map_entities; + +use allocator::{Allocator, RemoteAllocator}; #[cfg(feature = "bevy_reflect")] use bevy_reflect::Reflect; #[cfg(all(feature = "bevy_reflect", feature = "serialize"))] @@ -80,26 +83,14 @@ use crate::{ storage::{SparseSetIndex, TableId, TableRow}, }; use alloc::vec::Vec; -use bevy_platform::sync::atomic::Ordering; +use bevy_platform::sync::Arc; +use concurrent_queue::ConcurrentQueue; use core::{fmt, hash::Hash, mem, num::NonZero, panic::Location}; use log::warn; #[cfg(feature = "serialize")] use serde::{Deserialize, Serialize}; -#[cfg(target_has_atomic = "64")] -use bevy_platform::sync::atomic::AtomicI64 as AtomicIdCursor; -#[cfg(target_has_atomic = "64")] -type IdCursor = i64; - -/// Most modern platforms support 64-bit atomics, but some less-common platforms -/// do not. This fallback allows compilation using a 32-bit cursor instead, with -/// the caveat that some conversions may fail (and panic) at runtime. -#[cfg(not(target_has_atomic = "64"))] -use bevy_platform::sync::atomic::AtomicIsize as AtomicIdCursor; -#[cfg(not(target_has_atomic = "64"))] -type IdCursor = isize; - /// This represents the row or "index" of an [`Entity`] within the [`Entities`] table. /// This is a lighter weight version of [`Entity`]. /// @@ -605,45 +596,119 @@ impl SparseSetIndex for Entity { } } -/// An [`Iterator`] returning a sequence of [`Entity`] values from -pub struct ReserveEntitiesIterator<'a> { - // Metas, so we can recover the current generation for anything in the freelist. - meta: &'a [EntityMeta], +/// Stores entities that need to be flushed. +#[derive(Clone)] +struct RemotePending { + pending: Arc>, +} + +impl RemotePending { + fn new() -> Self { + Self { + pending: Arc::new(ConcurrentQueue::unbounded()), + } + } + + fn queue_flush(&self, entity: Entity) { + // We don't need the result. If it's closed it doesn't matter, and it can't be full. + _ = self.pending.push(entity); + } +} + +struct Pending { + /// This is always available, but is slower. + remote: RemotePending, + /// This is not always available (on no std or when remotely reserving), but is faster. + #[cfg(feature = "std")] + local: bevy_utils::Parallel>, +} + +impl Pending { + fn new() -> Self { + #[cfg(feature = "std")] + { + Self { + remote: RemotePending::new(), + local: bevy_utils::Parallel::default(), + } + } + + #[cfg(not(feature = "std"))] + { + Self { + remote: RemotePending::new(), + } + } + } + + /// Queues this entity to be flushed. + /// This uses the most efficient queue available. + fn queue_flush(&self, entity: Entity) { + #[cfg(feature = "std")] + self.local.scope(|pending| pending.push(entity)); + + #[cfg(not(feature = "std"))] + self.remote.queue_flush(entity); + } + + /// Flushes the entities in the most efficient queue available. + fn flush_local(&mut self, mut flusher: impl FnMut(Entity)) { + #[cfg(feature = "std")] + let pending = self.local.iter_mut().flat_map(|pending| pending.drain(..)); + + #[cfg(not(feature = "std"))] + let pending = self.remote.pending.try_iter(); + + for pending in pending { + flusher(pending); + } + } - // Reserved indices formerly in the freelist to hand out. - freelist_indices: core::slice::Iter<'a, EntityRow>, + /// Moves the pending entities in the less efficient queue into the more efficient one, + /// so they are included int [`flush_local`](Self::flush_local). + fn queue_remote_pending_to_be_flushed(&self) { + // Note that without std, all pending entities are already in remote. + #[cfg(feature = "std")] + { + let remote = self.remote.pending.try_iter(); + self.local.scope(|pending| pending.extend(remote)); + } + } +} - // New Entity indices to hand out, outside the range of meta.len(). - new_indices: core::ops::Range, +impl fmt::Debug for Pending { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "opaque pending entities") + } +} + +/// An [`Iterator`] returning a sequence of [`Entity`] values from [`Entities`]. +/// These will be flushed. +/// +/// **NOTE:** Dropping will leak the remaining entities! +pub struct ReserveEntitiesIterator<'a> { + allocator: allocator::AllocEntitiesIterator<'a>, + entities: &'a Entities, } impl<'a> Iterator for ReserveEntitiesIterator<'a> { type Item = Entity; fn next(&mut self) -> Option { - self.freelist_indices + self.allocator .next() - .map(|&row| { - Entity::from_raw_and_generation(row, self.meta[row.index() as usize].generation) - }) - .or_else(|| { - self.new_indices.next().map(|index| { - // SAFETY: This came from an exclusive range so the max can't be hit. - let row = unsafe { EntityRow::new(NonMaxU32::new_unchecked(index)) }; - Entity::from_raw(row) - }) - }) + .inspect(|entity| self.entities.pending.queue_flush(*entity)) } fn size_hint(&self) -> (usize, Option) { - let len = self.freelist_indices.len() + self.new_indices.len(); - (len, Some(len)) + self.allocator.size_hint() } } -impl<'a> ExactSizeIterator for ReserveEntitiesIterator<'a> {} impl<'a> core::iter::FusedIterator for ReserveEntitiesIterator<'a> {} +impl<'a> ExactSizeIterator for ReserveEntitiesIterator<'a> {} + // SAFETY: Newly reserved entity values are unique. unsafe impl EntitySetIterator for ReserveEntitiesIterator<'_> {} @@ -658,113 +723,28 @@ unsafe impl EntitySetIterator for ReserveEntitiesIterator<'_> {} #[derive(Debug)] pub struct Entities { meta: Vec, - - /// The `pending` and `free_cursor` fields describe three sets of Entity IDs - /// that have been freed or are in the process of being allocated: - /// - /// - The `freelist` IDs, previously freed by `free()`. These IDs are available to any of - /// [`alloc`], [`reserve_entity`] or [`reserve_entities`]. Allocation will always prefer - /// these over brand new IDs. - /// - /// - The `reserved` list of IDs that were once in the freelist, but got reserved by - /// [`reserve_entities`] or [`reserve_entity`]. They are now waiting for [`flush`] to make them - /// fully allocated. - /// - /// - The count of new IDs that do not yet exist in `self.meta`, but which we have handed out - /// and reserved. [`flush`] will allocate room for them in `self.meta`. - /// - /// The contents of `pending` look like this: - /// - /// ```txt - /// ---------------------------- - /// | freelist | reserved | - /// ---------------------------- - /// ^ ^ - /// free_cursor pending.len() - /// ``` - /// - /// As IDs are allocated, `free_cursor` is atomically decremented, moving - /// items from the freelist into the reserved list by sliding over the boundary. - /// - /// Once the freelist runs out, `free_cursor` starts going negative. - /// The more negative it is, the more IDs have been reserved starting exactly at - /// the end of `meta.len()`. - /// - /// This formulation allows us to reserve any number of IDs first from the freelist - /// and then from the new IDs, using only a single atomic subtract. - /// - /// Once [`flush`] is done, `free_cursor` will equal `pending.len()`. - /// - /// [`alloc`]: Entities::alloc - /// [`reserve_entity`]: Entities::reserve_entity - /// [`reserve_entities`]: Entities::reserve_entities - /// [`flush`]: Entities::flush - pending: Vec, - free_cursor: AtomicIdCursor, + allocator: Allocator, + pending: Pending, } impl Entities { - pub(crate) const fn new() -> Self { + pub(crate) fn new() -> Self { Entities { meta: Vec::new(), - pending: Vec::new(), - free_cursor: AtomicIdCursor::new(0), + allocator: Allocator::new(), + pending: Pending::new(), } } /// Reserve entity IDs concurrently. /// - /// Storage for entity generation and location is lazily allocated by calling [`flush`](Entities::flush). - #[expect( - clippy::allow_attributes, - reason = "`clippy::unnecessary_fallible_conversions` may not always lint." - )] - #[allow( - clippy::unnecessary_fallible_conversions, - reason = "`IdCursor::try_from` may fail on 32-bit platforms." - )] + /// Storage for entity generation and location is lazily allocated by calling [`flush`](Entities::flush), + /// but, if desiered, caller may set the [`EntityLocation`] prior to the flush instead, + /// via [`flush_entity`](crate::world::World::flush_entity) for example. pub fn reserve_entities(&self, count: u32) -> ReserveEntitiesIterator { - // Use one atomic subtract to grab a range of new IDs. The range might be - // entirely nonnegative, meaning all IDs come from the freelist, or entirely - // negative, meaning they are all new IDs to allocate, or a mix of both. - let range_end = self.free_cursor.fetch_sub( - IdCursor::try_from(count) - .expect("64-bit atomic operations are not supported on this platform."), - Ordering::Relaxed, - ); - let range_start = range_end - - IdCursor::try_from(count) - .expect("64-bit atomic operations are not supported on this platform."); - - let freelist_range = range_start.max(0) as usize..range_end.max(0) as usize; - - let (new_id_start, new_id_end) = if range_start >= 0 { - // We satisfied all requests from the freelist. - (0, 0) - } else { - // We need to allocate some new Entity IDs outside of the range of self.meta. - // - // `range_start` covers some negative territory, e.g. `-3..6`. - // Since the nonnegative values `0..6` are handled by the freelist, that - // means we need to handle the negative range here. - // - // In this example, we truncate the end to 0, leaving us with `-3..0`. - // Then we negate these values to indicate how far beyond the end of `meta.end()` - // to go, yielding `meta.len()+0 .. meta.len()+3`. - let base = self.meta.len() as IdCursor; - - let new_id_end = u32::try_from(base - range_start).expect("too many entities"); - - // `new_id_end` is in range, so no need to check `start`. - let new_id_start = (base - range_end.min(0)) as u32; - - (new_id_start, new_id_end) - }; - ReserveEntitiesIterator { - meta: &self.meta[..], - freelist_indices: self.pending[freelist_range].iter(), - new_indices: new_id_start..new_id_end, + allocator: self.alloc_entities(count), + entities: self, } } @@ -772,65 +752,57 @@ impl Entities { /// /// Equivalent to `self.reserve_entities(1).next().unwrap()`, but more efficient. pub fn reserve_entity(&self) -> Entity { - let n = self.free_cursor.fetch_sub(1, Ordering::Relaxed); - if n > 0 { - // Allocate from the freelist. - let row = self.pending[(n - 1) as usize]; - Entity::from_raw_and_generation(row, self.meta[row.index() as usize].generation) - } else { - // Grab a new ID, outside the range of `meta.len()`. `flush()` must - // eventually be called to make it valid. - // - // As `self.free_cursor` goes more and more negative, we return IDs farther - // and farther beyond `meta.len()`. - let raw = self.meta.len() as IdCursor - n; - if raw >= u32::MAX as IdCursor { - panic!("too many entities"); - } - // SAFETY: We just checked the bounds - let row = unsafe { EntityRow::new(NonMaxU32::new_unchecked(raw as u32)) }; - Entity::from_raw(row) - } + let entity = self.alloc(); + self.pending.queue_flush(entity); + entity } - /// Check that we do not have pending work requiring `flush()` to be called. - fn verify_flushed(&mut self) { - debug_assert!( - !self.needs_flush(), - "flush() needs to be called before this operation is legal" - ); + /// Allocate an entity ID directly. + /// Caller is responsible for setting the [`EntityLocation`] if desired, + /// which must be done before [`get`](Self::get)ing its [`EntityLocation`]. + pub fn alloc(&self) -> Entity { + self.allocator.alloc() } - /// Allocate an entity ID directly. - pub fn alloc(&mut self) -> Entity { - self.verify_flushed(); - if let Some(row) = self.pending.pop() { - let new_free_cursor = self.pending.len() as IdCursor; - *self.free_cursor.get_mut() = new_free_cursor; - Entity::from_raw_and_generation(row, self.meta[row.index() as usize].generation) - } else { - let index = u32::try_from(self.meta.len()) - .ok() - .and_then(NonMaxU32::new) - .expect("too many entities"); - self.meta.push(EntityMeta::EMPTY); - Entity::from_raw(EntityRow::new(index)) - } + /// A more efficient way to [`alloc`](Self::alloc) multiple entities. + pub fn alloc_entities(&self, count: u32) -> allocator::AllocEntitiesIterator { + self.allocator.alloc_many(count) } - /// Destroy an entity, allowing it to be reused. + /// A version of [`alloc_entities`](Self::alloc_entities) that requires the caller to ensure safety. /// - /// Must not be called while reserved entities are awaiting `flush()`. - pub fn free(&mut self, entity: Entity) -> Option { - self.verify_flushed(); + /// # Safety + /// + /// Caller ensures [`Self::free`] is not called for the duration of the iterator. + /// Caller ensures this allocator is not dropped for the lifetime of the iterator. + pub(crate) unsafe fn alloc_entities_unsafe( + &self, + count: u32, + ) -> allocator::AllocEntitiesIterator<'static> { + self.allocator.alloc_many_unsafe(count) + } - let meta = &mut self.meta[entity.index() as usize]; - if meta.generation != entity.generation { + /// This is the same as [`free`](Entities::free), but it allows skipping some generations. + /// When the entity is reused, it will have a generation greater than the current generation + `generations`. + #[inline] + pub(crate) fn free_current_and_future_generations( + &mut self, + entity: Entity, + generations: u32, + ) -> Option { + let theoretical = self.resolve_from_id(entity.row()); + if theoretical.is_none_or(|theoretcal| theoretcal != entity) { return None; } - let (new_generation, aliased) = meta.generation.after_versions_and_could_alias(1); + // SAFETY: We resolved its id to ensure it is valid. + let meta = unsafe { self.force_get_meta_mut(entity.index() as usize) }; + let prev_generation = meta.generation; + let (new_generation, aliased) = + prev_generation.after_versions_and_could_alias(generations + 1); + meta.generation = new_generation; + if aliased { warn!( "Entity({}) generation wrapped on Entities::free, aliasing may occur", @@ -838,34 +810,23 @@ impl Entities { ); } - let loc = mem::replace(&mut meta.location, EntityMeta::EMPTY.location); + let new_entity = Entity::from_raw_and_generation(entity.row(), meta.generation); + let loc = mem::replace(&mut meta.location, EntityLocation::INVALID); + self.allocator.free(new_entity); - self.pending.push(entity.row()); - - let new_free_cursor = self.pending.len() as IdCursor; - *self.free_cursor.get_mut() = new_free_cursor; Some(loc) } - /// Ensure at least `n` allocations can succeed without reallocating. - #[expect( - clippy::allow_attributes, - reason = "`clippy::unnecessary_fallible_conversions` may not always lint." - )] - #[allow( - clippy::unnecessary_fallible_conversions, - reason = "`IdCursor::try_from` may fail on 32-bit platforms." - )] - pub fn reserve(&mut self, additional: u32) { - self.verify_flushed(); - - let freelist_size = *self.free_cursor.get_mut(); - let shortfall = IdCursor::try_from(additional) - .expect("64-bit atomic operations are not supported on this platform.") - - freelist_size; - if shortfall > 0 { - self.meta.reserve(shortfall as usize); - } + /// Destroy an entity, allowing it to be reused. + pub fn free(&mut self, entity: Entity) -> Option { + self.free_current_and_future_generations(entity, 0) + } + + /// Prepares the for `additional` allocations/reservations. + /// This can prevent reallocation, etc, but since allocation can happen from anywhere, it is not guaranteed. + pub fn prepare(&mut self, additional: u32) { + let shortfall = additional.saturating_sub(self.allocator.num_free()); + self.meta.reserve(shortfall as usize); } /// Returns true if the [`Entities`] contains [`entity`](Entity). @@ -879,12 +840,10 @@ impl Entities { /// Clears all [`Entity`] from the World. pub fn clear(&mut self) { self.meta.clear(); - self.pending.clear(); - *self.free_cursor.get_mut() = 0; + self.allocator = Allocator::new(); } /// Returns the location of an [`Entity`]. - /// Note: for pending entities, returns `None`. #[inline] pub fn get(&self, entity: Entity) -> Option { if let Some(meta) = self.meta.get(entity.index() as usize) { @@ -909,7 +868,7 @@ impl Entities { #[inline] pub(crate) unsafe fn set(&mut self, index: u32, location: EntityLocation) { // SAFETY: Caller guarantees that `index` a valid entity index - let meta = unsafe { self.meta.get_unchecked_mut(index as usize) }; + let meta = unsafe { self.force_get_meta_mut(index as usize) }; meta.location = location; } @@ -920,51 +879,60 @@ impl Entities { #[inline] pub(crate) unsafe fn mark_spawn_despawn(&mut self, index: u32, by: MaybeLocation, at: Tick) { // SAFETY: Caller guarantees that `index` a valid entity index - let meta = unsafe { self.meta.get_unchecked_mut(index as usize) }; + let meta = unsafe { self.force_get_meta_mut(index as usize) }; meta.spawned_or_despawned = SpawnedOrDespawned { by, at }; } - /// Increments the `generation` of a freed [`Entity`]. The next entity ID allocated with this - /// `index` will count `generation` starting from the prior `generation` + the specified - /// value + 1. + /// Gets the meta for this index mutably, creating it if it did not exist. /// - /// Does nothing if no entity with this `index` has been allocated yet. - pub(crate) fn reserve_generations(&mut self, index: u32, generations: u32) -> bool { - if (index as usize) >= self.meta.len() { - return false; - } - - let meta = &mut self.meta[index as usize]; - if meta.location.archetype_id == ArchetypeId::INVALID { - meta.generation = meta.generation.after_versions(generations); - true + /// # Safety + /// + /// `index` must be a valid index + #[inline] + unsafe fn force_get_meta_mut(&mut self, index: usize) -> &mut EntityMeta { + if index >= self.meta.len() { + self.resize_meta_for_index_risky(index) } else { - false + // SAFETY: index is in bounds + unsafe { self.meta.get_unchecked_mut(index) } } } + /// Changes the size of [`Self::meta`] to support this index. + /// This is risky because it assumes the index is not already in bounds. + /// + /// This is only used in `force_get_meta_mut` just to help branch prediction. + // TODO: Hint unlikely instead of #[cold] once it is stabilized. + #[cold] + fn resize_meta_for_index_risky(&mut self, index: usize) -> &mut EntityMeta { + self.meta.resize(index + 1, EntityMeta::FRESH); + // SAFETY: We just added it + unsafe { self.meta.get_unchecked_mut(index) } + } + /// Get the [`Entity`] with a given id, if it exists in this [`Entities`] collection - /// Returns `None` if this [`Entity`] is outside of the range of currently reserved Entities + /// Returns `None` if this [`Entity`] is outside of the range of currently allocated Entities /// /// Note: This method may return [`Entities`](Entity) which are currently free /// Note that [`contains`](Entities::contains) will correctly return false for freed /// entities, since it checks the generation + #[inline] pub fn resolve_from_id(&self, row: EntityRow) -> Option { let idu = row.index() as usize; if let Some(&EntityMeta { generation, .. }) = self.meta.get(idu) { Some(Entity::from_raw_and_generation(row, generation)) } else { - // `id` is outside of the meta list - check whether it is reserved but not yet flushed. - let free_cursor = self.free_cursor.load(Ordering::Relaxed); - // If this entity was manually created, then free_cursor might be positive - // Returning None handles that case correctly - let num_pending = usize::try_from(-free_cursor).ok()?; - (idu < self.meta.len() + num_pending).then_some(Entity::from_raw(row)) + self.allocator + .is_valid_row(row) + .then_some(Entity::from_raw(row)) } } - fn needs_flush(&mut self) -> bool { - *self.free_cursor.get_mut() != self.pending.len() as IdCursor + /// Entities reserved via [`RemoteEntities::reserve`] may or may not be flushed naturally. + /// Before using an entity reserved remotely, either set its location manually (usually though [`flush_entity`](crate::world::World::flush_entity)), + /// or call this method to queue remotely reserved entities to be flushed with the rest. + pub fn queue_remote_pending_to_be_flushed(&self) { + self.pending.queue_remote_pending_to_be_flushed(); } /// Allocates space for entities previously reserved with [`reserve_entity`](Entities::reserve_entity) or @@ -983,37 +951,16 @@ impl Entities { by: MaybeLocation, at: Tick, ) { - let free_cursor = self.free_cursor.get_mut(); - let current_free_cursor = *free_cursor; - - let new_free_cursor = if current_free_cursor >= 0 { - current_free_cursor as usize - } else { - let old_meta_len = self.meta.len(); - let new_meta_len = old_meta_len + -current_free_cursor as usize; - self.meta.resize(new_meta_len, EntityMeta::EMPTY); - for (index, meta) in self.meta.iter_mut().enumerate().skip(old_meta_len) { - // SAFETY: the index is less than the meta length, which can not exceeded u32::MAX - let row = EntityRow::new(unsafe { NonMaxU32::new_unchecked(index as u32) }); - init( - Entity::from_raw_and_generation(row, meta.generation), - &mut meta.location, - ); + let total = self.allocator.total_entity_indices() as usize; + self.meta.resize(total, EntityMeta::FRESH); + self.pending.flush_local(|entity| { + // SAFETY: `meta` has been resized to include all entities. + let meta = unsafe { self.meta.get_unchecked_mut(entity.index() as usize) }; + if meta.generation == entity.generation && meta.location == EntityLocation::INVALID { + init(entity, &mut meta.location); meta.spawned_or_despawned = SpawnedOrDespawned { by, at }; } - - *free_cursor = 0; - 0 - }; - - for row in self.pending.drain(new_free_cursor..) { - let meta = &mut self.meta[row.index() as usize]; - init( - Entity::from_raw_and_generation(row, meta.generation), - &mut meta.location, - ); - meta.spawned_or_despawned = SpawnedOrDespawned { by, at }; - } + }); } /// Flushes all reserved entities to an "invalid" state. Attempting to retrieve them will return `None` @@ -1033,40 +980,18 @@ impl Entities { } /// The count of all entities in the [`World`] that have ever been allocated - /// including the entities that are currently freed. - /// - /// This does not include entities that have been reserved but have never been - /// allocated yet. - /// - /// [`World`]: crate::world::World - #[inline] - pub fn total_count(&self) -> usize { - self.meta.len() - } - - /// The count of all entities in the [`World`] that are used, - /// including both those allocated and those reserved, but not those freed. - /// - /// [`World`]: crate::world::World - #[inline] - pub fn used_count(&self) -> usize { - (self.meta.len() as isize - self.free_cursor.load(Ordering::Relaxed) as isize) as usize - } - - /// The count of all entities in the [`World`] that have ever been allocated or reserved, including those that are freed. - /// This is the value that [`Self::total_count()`] would return if [`Self::flush()`] were called right now. + /// including the entities that are currently pending reuse. /// /// [`World`]: crate::world::World #[inline] - pub fn total_prospective_count(&self) -> usize { - self.meta.len() + (-self.free_cursor.load(Ordering::Relaxed)).min(0) as usize + pub fn total_count(&self) -> u32 { + self.allocator.total_entity_indices() } /// The count of currently allocated entities. #[inline] pub fn len(&self) -> u32 { - // `pending`, by definition, can't be bigger than `meta`. - (self.meta.len() - self.pending.len()) as u32 + self.allocator.total_entity_indices() - self.allocator.num_free() } /// Checks if any entity is currently active. @@ -1156,6 +1081,56 @@ impl Entities { } } +/// A remote version of [`Entities`] with limited functionality. +#[derive(Clone)] +pub struct RemoteEntities { + allocator: RemoteAllocator, + pending: RemotePending, +} + +impl RemoteEntities { + /// Creates a new [`RemoteEntities`] with this [`Entities`] as its source. + /// Note that this can be closed at any time, + /// so before using an allocated [`Entity`], + /// check [`is_closed`](Self::is_closed). + pub fn new(source: &Entities) -> Self { + Self { + allocator: RemoteAllocator::new(&source.allocator), + pending: source.pending.remote.clone(), + } + } + /// Allocates an [`Entity`]. Note that if the source [`Entities`] has been cleared or dropped, this will return a garbage value. + /// Use [`is_closed`](Self::is_closed) to ensure the entities are valid before using them! + /// + /// The caller takes responsibility for eventually setting the [`EntityLocation`], + /// usually via [`flush_entity`](crate::world::World::flush_entity). + pub fn alloc(&self) -> Entity { + self.allocator.alloc() + } + + /// Reserves an [`Entity`]. Note that if the source [`Entities`] has been cleared or dropped, this will return a garbage value. + /// Use [`is_closed`](Self::is_closed) to ensure the entities are valid before using them! + /// + /// This also queues it to be flushed after [`Entities::queue_remote_pending_to_be_flushed`] is called. + /// If waiting for that is not an option, it is also possible to set the [`EntityLocation`] manually, + /// usually via [`flush_entity`](crate::world::World::flush_entity). + pub fn reserve(&self) -> Entity { + let entity = self.alloc(); + self.pending.queue_flush(entity); + entity + } + + /// Returns true if this [`RemoteEntities`] is still connected to its source [`Entities`]. + /// This will return `false` if its source has been dropped or [`Entities::clear`]ed. + /// + /// Note that this can be closed immediately after returning false. + /// + /// Holding a reference to the source [`Entities`] while calling this will ensure the value does not change unknowingly. + pub fn is_closed(&self) -> bool { + self.allocator.is_closed() + } +} + /// An error that occurs when a specified [`Entity`] does not exist. #[derive(thiserror::Error, Debug, Clone, Copy, PartialEq, Eq)] #[error("The entity with ID {entity} {details}")] @@ -1215,8 +1190,8 @@ struct SpawnedOrDespawned { } impl EntityMeta { - /// meta for **pending entity** - const EMPTY: EntityMeta = EntityMeta { + /// This is the metadata for an entity index that has never had its location set or been freed. + const FRESH: EntityMeta = EntityMeta { generation: EntityGeneration::FIRST, location: EntityLocation::INVALID, spawned_or_despawned: SpawnedOrDespawned { @@ -1333,24 +1308,16 @@ mod tests { assert_eq!(0x00dd_00ff, C4); } - #[test] - fn reserve_generations() { - let mut entities = Entities::new(); - let entity = entities.alloc(); - entities.free(entity); - - assert!(entities.reserve_generations(entity.index(), 1)); - } - #[test] fn reserve_generations_and_alloc() { const GENERATIONS: u32 = 10; let mut entities = Entities::new(); let entity = entities.alloc(); - entities.free(entity); - assert!(entities.reserve_generations(entity.index(), GENERATIONS)); + assert!(entities + .free_current_and_future_generations(entity, GENERATIONS) + .is_some()); // The very next entity allocated should be a further generation on the same index let next_entity = entities.alloc(); diff --git a/crates/bevy_ecs/src/world/entity_ref.rs b/crates/bevy_ecs/src/world/entity_ref.rs index aa3e66ca143a3..69ecfb17c3fc2 100644 --- a/crates/bevy_ecs/src/world/entity_ref.rs +++ b/crates/bevy_ecs/src/world/entity_ref.rs @@ -2415,10 +2415,6 @@ impl<'w> EntityWorldMut<'w> { world.removed_components.send(component_id, self.entity); } - // Observers and on_remove hooks may reserve new entities, which - // requires a flush before Entities::free may be called. - world.flush_entities(); - let location = world .entities .free(self.entity) @@ -2760,6 +2756,7 @@ impl<'w> EntityWorldMut<'w> { self.assert_not_despawned(); let entity_clone = self.world.entities.reserve_entity(); + // If there is a command that could change what we are cloning, apply it. self.world.flush(); let mut builder = EntityCloner::build(self.world); @@ -5774,33 +5771,31 @@ mod tests { commands.queue(count_flush); }, ); - world.commands().queue(count_flush); let entity = world.spawn_empty().id(); - assert_eq!(world.resource::().0, 1); world.commands().queue(count_flush); let mut a = world.entity_mut(entity); a.trigger(TestEvent); - assert_eq!(a.world().resource::().0, 2); + assert_eq!(a.world().resource::().0, 1); a.insert(TestComponent(0)); - assert_eq!(a.world().resource::().0, 3); + assert_eq!(a.world().resource::().0, 2); a.remove::(); - assert_eq!(a.world().resource::().0, 4); + assert_eq!(a.world().resource::().0, 3); a.insert(TestComponent(0)); - assert_eq!(a.world().resource::().0, 5); + assert_eq!(a.world().resource::().0, 4); let _ = a.take::(); - assert_eq!(a.world().resource::().0, 6); + assert_eq!(a.world().resource::().0, 5); a.insert(TestComponent(0)); - assert_eq!(a.world().resource::().0, 7); + assert_eq!(a.world().resource::().0, 6); a.retain::<()>(); - assert_eq!(a.world().resource::().0, 8); + assert_eq!(a.world().resource::().0, 7); a.insert(TestComponent(0)); - assert_eq!(a.world().resource::().0, 9); + assert_eq!(a.world().resource::().0, 8); a.clear(); - assert_eq!(a.world().resource::().0, 10); + assert_eq!(a.world().resource::().0, 9); a.insert(TestComponent(0)); - assert_eq!(a.world().resource::().0, 11); + assert_eq!(a.world().resource::().0, 10); a.despawn(); - assert_eq!(world.resource::().0, 12); + assert_eq!(world.resource::().0, 11); } #[derive(Resource)] diff --git a/crates/bevy_ecs/src/world/mod.rs b/crates/bevy_ecs/src/world/mod.rs index 28a648c318290..e8f6328ac0a22 100644 --- a/crates/bevy_ecs/src/world/mod.rs +++ b/crates/bevy_ecs/src/world/mod.rs @@ -1073,7 +1073,6 @@ impl World { /// ``` #[track_caller] pub fn spawn_empty(&mut self) -> EntityWorldMut { - self.flush(); let entity = self.entities.alloc(); // SAFETY: entity was just allocated unsafe { self.spawn_at_empty_internal(entity, MaybeLocation::caller()) } @@ -1149,7 +1148,6 @@ impl World { bundle: B, caller: MaybeLocation, ) -> EntityWorldMut { - self.flush(); let change_tick = self.change_tick(); let entity = self.entities.alloc(); let mut bundle_spawner = BundleSpawner::new::(self, change_tick); @@ -1310,6 +1308,7 @@ impl World { let result = world.modify_component(entity, f)?; + // Handles queued commands from hooks, etc. self.flush(); Ok(result) } @@ -1339,6 +1338,7 @@ impl World { let result = world.modify_component_by_id(entity, component_id, f)?; + // Handles queued commands from hooks, etc. self.flush(); Ok(result) } @@ -1401,6 +1401,7 @@ impl World { entity: Entity, caller: MaybeLocation, ) -> Result<(), EntityDespawnError> { + // If any command depended on this entity, run those before we despawn. self.flush(); let entity = self.get_entity_mut(entity)?; entity.despawn_with_caller(caller); @@ -2275,7 +2276,6 @@ impl World { archetype_id: ArchetypeId, } - self.flush(); let change_tick = self.change_tick(); // SAFETY: These come from the same world. `Self.components_registrator` can't be used since we borrow other fields too. let mut registrator = @@ -2420,7 +2420,6 @@ impl World { archetype_id: ArchetypeId, } - self.flush(); let change_tick = self.change_tick(); // SAFETY: These come from the same world. `Self.components_registrator` can't be used since we borrow other fields too. let mut registrator = @@ -2733,6 +2732,19 @@ impl World { } } + /// If this entity is not in any [`Archetype`](crate::archetype::Archetype), this will flush it to the empty archetype. + /// Returns `Some` with the new [`EntityLocation`] if the entity is now valid in the empty archetype. + pub fn flush_entity(&mut self, entity: Entity) -> Option { + if !self.entities.contains(entity) || self.entities.get(entity).is_some() { + return None; + } + let empty_archetype = self.archetypes.empty_mut(); + let table = &mut self.storages.tables[empty_archetype.table_id()]; + // SAFETY: It's empty so no values need to be written + let new_location = unsafe { empty_archetype.allocate(entity, table.allocate(entity)) }; + Some(new_location) + } + /// Applies any commands in the world's internal [`CommandQueue`]. /// This does not apply commands from any systems, only those stored in the world. /// diff --git a/crates/bevy_ecs/src/world/spawn_batch.rs b/crates/bevy_ecs/src/world/spawn_batch.rs index 16bd9bb8059b4..fbaf9a48ea74e 100644 --- a/crates/bevy_ecs/src/world/spawn_batch.rs +++ b/crates/bevy_ecs/src/world/spawn_batch.rs @@ -18,6 +18,7 @@ where inner: I, spawner: BundleSpawner<'w>, caller: MaybeLocation, + allocator: crate::entity::allocator::AllocEntitiesIterator<'static>, } impl<'w, I> SpawnBatchIter<'w, I> @@ -28,15 +29,15 @@ where #[inline] #[track_caller] pub(crate) fn new(world: &'w mut World, iter: I, caller: MaybeLocation) -> Self { - // Ensure all entity allocations are accounted for so `self.entities` can realloc if - // necessary - world.flush(); - let change_tick = world.change_tick(); let (lower, upper) = iter.size_hint(); let length = upper.unwrap_or(lower); - world.entities.reserve(length as u32); + + world.entities.prepare(length as u32); + // SAFETY: We take the lifetime of the world, so the instance is valid. + // `BundleSpawner::spawn_non_existent` never frees entities, and that is the only thing we call on it while the iterator is not empty. + let allocator = unsafe { world.entities.alloc_entities_unsafe(lower as u32) }; let mut spawner = BundleSpawner::new::(world, change_tick); spawner.reserve_storage(length); @@ -45,6 +46,7 @@ where inner: iter, spawner, caller, + allocator, } } } @@ -72,8 +74,19 @@ where fn next(&mut self) -> Option { let bundle = self.inner.next()?; - // SAFETY: bundle matches spawner type - unsafe { Some(self.spawner.spawn(bundle, self.caller).0) } + let entity = self.allocator.next(); + + let spawned = match entity { + // SAFETY: bundle matches spawner type. `entity` is fresh + Some(entity) => unsafe { + self.spawner.spawn_non_existent(entity, bundle, self.caller); + entity + }, + // SAFETY: bundle matches spawner type + None => unsafe { self.spawner.spawn(bundle, self.caller).0 }, + }; + + Some(spawned) } fn size_hint(&self) -> (usize, Option) { diff --git a/release-content/migration-guides/new_entity_allocator.md b/release-content/migration-guides/new_entity_allocator.md new file mode 100644 index 0000000000000..1205c93b70e28 --- /dev/null +++ b/release-content/migration-guides/new_entity_allocator.md @@ -0,0 +1,26 @@ +--- +title: Entities Utilities +pull_requests: [18670] +--- + +`Entities::reserve` has been renamed `Entities::prepare`, as it has looser guarantees. + +Additionally, `Entities` debug methods `used_count` and `total_prospective_count` have been removed. +This is because the new allocator is much more flexible, which makes it unrealistic to track these quantities (and less meaningful). + +`Entities` debug methods `total_count` and `len` now return `u32` instead of `usize`. +Since `Entities` has a well defined upper bound, unlike other collections, it makes more since to use `u32` explicitly rather than `usize`. + +To migrate: + +```diff +- let entities: usize = entities.len(); ++ let entities: u32 = entities.len(); +``` + +```diff +- entities.reserve(128); ++ entities.prepare(128); +``` + +If you have any trouble migrating away from `Entities::used_count` and `Entities::total_prospective_count`, feel free to open an issue!