From 34728100d8c63f9140da82f794a9ee9bb6e9544c Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Mon, 31 Mar 2025 13:08:45 -0400 Subject: [PATCH 001/113] chunk data --- crates/bevy_ecs/src/entity/allocator.rs | 96 +++++++++++++++++++++++++ crates/bevy_ecs/src/entity/mod.rs | 2 + 2 files changed, 98 insertions(+) create mode 100644 crates/bevy_ecs/src/entity/allocator.rs diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs new file mode 100644 index 0000000000000..548c378ba9e46 --- /dev/null +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -0,0 +1,96 @@ +use bevy_platform_support::{ + prelude::Vec, + sync::{ + atomic::{AtomicPtr, AtomicU32, AtomicUsize, Ordering}, + Arc, + }, +}; +use core::mem::{ManuallyDrop, MaybeUninit}; + +use super::Entity; + +/// This is the item we store in the owned buffers. +/// It might not be init (if it's out of bounds). +type Slot = MaybeUninit; + +/// Each chunk stores a buffer of [`Slot`]s at a fixed capacity. +struct Chunk { + /// Points to the first slot. If this is null, we need to allocate it. + first: AtomicPtr, +} + +impl Chunk { + const NUM_CHUNKS: u32 = 24; + const NUM_SKIPPED: u32 = u32::BITS - Self::NUM_CHUNKS; + + /// Computes the capacity of the chunk at this index within [`Self::NUM_CHUNKS`]. + /// The first 2 have length 512 (2^9) and the last has length (2^31) + fn capacity_of_chunk(chunk_index: u32) -> u32 { + // We do this because we're skipping the first 8 powers, so we need to make up for them by doubling the first index. + // This is why the first 2 indices both have a capacity of 256. + let corrected = chunk_index.max(1); + // We add NUM_SKIPPED because the total capacity should be as if [`Self::NUM_CHUNKS`] were 32. + // This skips the first NUM_SKIPPED powers. + let corrected = corrected + Self::NUM_SKIPPED; + // This bit shift is just 2^corrected. + 1 << corrected + } + + /// For this index in the whole buffer, returns the index of the [`Chunk`] and the index within that chunk. + fn get_indices(full_idnex: u32) -> (u32, u32) { + // We're countint leading zeros since each chunk has power of 2 capacity. + // So the leading zeros will be proportional to the chunk index. + let leading = full_idnex + .leading_zeros() + // We do a min because we skip the first 8 powers. + // The -1 is because this is the number of chunks, but we want the index in the end. + .min(Self::NUM_CHUNKS - 1); + // We store chunks in smallest to biggest order, so we need to reverse it. + let chunk_index = Self::NUM_CHUNKS - 1 - leading; + // We only need to cut of this particular bit. + // The capacity is only one bit, and if other bits needed to be dropped, `leading` would have been greater + let slice_index = full_idnex & !Self::capacity_of_chunk(chunk_index); + + (chunk_index, slice_index) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloc::vec; + + /// Ensure the total capacity of [`OwnedBuffer`] is `u32::MAX + 1`, since the max *index* of an [`Entity`] is `u32::MAX`. + #[test] + fn chunk_capacity_sums() { + let total: usize = (0..Chunk::NUM_CHUNKS) + .map(Chunk::capacity_of_chunk) + .map(|x| x as usize) + .sum(); + let expected = u32::MAX as usize + 1; + assert_eq!(total, expected); + } + + /// Ensure [`OwnedBuffer`] can be properly indexed + #[test] + fn chunk_indexing() { + let to_test = vec![ + (0, (0, 0)), // index 0 cap = 512 + (1, (0, 1)), + (256, (0, 256)), + (511, (0, 511)), + (512, (1, 0)), // index 1 cap = 512 + (1023, (1, 511)), + (1024, (2, 0)), // index 2 cap = 1024 + (1025, (2, 1)), + (2047, (2, 1023)), + (2048, (3, 0)), // index 3 cap = 2048 + (4095, (3, 2047)), + (4096, (4, 0)), // index 3 cap = 4096 + ]; + + for (input, output) in to_test { + assert_eq!(Chunk::get_indices(input), output); + } + } +} diff --git a/crates/bevy_ecs/src/entity/mod.rs b/crates/bevy_ecs/src/entity/mod.rs index 38cda15eaa393..c02411badd695 100644 --- a/crates/bevy_ecs/src/entity/mod.rs +++ b/crates/bevy_ecs/src/entity/mod.rs @@ -36,9 +36,11 @@ //! [`EntityWorldMut::insert`]: crate::world::EntityWorldMut::insert //! [`EntityWorldMut::remove`]: crate::world::EntityWorldMut::remove +mod allocator; mod clone_entities; mod entity_set; mod map_entities; + #[cfg(feature = "bevy_reflect")] use bevy_reflect::Reflect; #[cfg(all(feature = "bevy_reflect", feature = "serialize"))] From 2ce0fb4eadf2f1edce2a258795fb929a651b1a0b Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Mon, 31 Mar 2025 13:14:18 -0400 Subject: [PATCH 002/113] get and set chunks --- crates/bevy_ecs/src/entity/allocator.rs | 77 ++++++++++++++++++++++++- 1 file changed, 76 insertions(+), 1 deletion(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 548c378ba9e46..4962ceb3447cb 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -7,9 +7,11 @@ use bevy_platform_support::{ }; use core::mem::{ManuallyDrop, MaybeUninit}; +use crate::query::DebugCheckedUnwrap; + use super::Entity; -/// This is the item we store in the owned buffers. +/// This is the item we store in the pending list. /// It might not be init (if it's out of bounds). type Slot = MaybeUninit; @@ -53,6 +55,79 @@ impl Chunk { (chunk_index, slice_index) } + + /// Gets the entity at the index within this chunk. + /// + /// # Safety + /// + /// [`Self::set`] must have been called on this index before. + unsafe fn get(&self, index: u32) -> Entity { + // SAFETY: caller ensure we are init. + let head = unsafe { self.ptr().debug_checked_unwrap() }; + let target = head.add(index as usize); + + // SAFETY: Ensured by caller. + unsafe { (*target).assume_init() } + } + + /// Sets this entity at this index. + /// + /// # Safety + /// + /// This must not be called concurrently. + /// Index must be in bounds. + /// Access does not conflict with another [`Self::get`]. + unsafe fn set(&self, index: u32, entity: Entity, index_of_self: u32) -> Slot { + let head = self.ptr().unwrap_or_else(|| self.init(index_of_self)); + let target = head.add(index as usize); + + // SAFETY: Caller ensures we are not fighting with other `set` calls or `get` calls. + // A race condition is therefore impossible. + unsafe { core::ptr::replace(target, Slot::new(entity)) } + } + + /// Initializes the chunk to be valid, returning the pointer. + /// + /// # Safety + /// + /// This must not be called concurrently. + #[cold] + unsafe fn init(&self, index: u32) -> *mut Slot { + let cap = Self::capacity_of_chunk(index); + let mut buff = ManuallyDrop::new(Vec::new()); + buff.reserve_exact(cap as usize); + let ptr = buff.as_mut_ptr(); + self.first.store(ptr, Ordering::Relaxed); + ptr + } + + /// Frees memory + /// + /// # Safety + /// + /// This must not be called concurrently. + unsafe fn dealloc(&self, index: u32) { + if let Some(to_drop) = self.ptr() { + let cap = Self::capacity_of_chunk(index) as usize; + // SAFETY: This was created in [`Self::init`] from a standard Vec. + unsafe { + Vec::from_raw_parts(to_drop, cap, cap); + } + } + } + + /// Returns [`Self::first`] if it is valid. + #[inline] + fn ptr(&self) -> Option<*mut Slot> { + let ptr = self.first.load(Ordering::Relaxed); + (!ptr.is_null()).then_some(ptr) + } + + fn new() -> Self { + Self { + first: AtomicPtr::new(core::ptr::null_mut()), + } + } } #[cfg(test)] From 3a8f328c70126b5556d3b584facc36f9922130a6 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Mon, 31 Mar 2025 13:41:53 -0400 Subject: [PATCH 003/113] free and alloc --- crates/bevy_ecs/src/entity/allocator.rs | 63 ++++++++++++++++++++++++- 1 file changed, 62 insertions(+), 1 deletion(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 4962ceb3447cb..8c5e6632bfb26 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -1,7 +1,7 @@ use bevy_platform_support::{ prelude::Vec, sync::{ - atomic::{AtomicPtr, AtomicU32, AtomicUsize, Ordering}, + atomic::{AtomicIsize, AtomicPtr, AtomicU32, Ordering}, Arc, }, }; @@ -130,6 +130,67 @@ impl Chunk { } } +struct PendingBuffer { + /// The chunks of the pending list. + /// Put end-to-end, these chunks form a list of pending entities. + chunks: [Chunk; Chunk::NUM_CHUNKS as usize], + /// The length of the pending buffer + len: AtomicIsize, +} + +impl PendingBuffer { + /// Frees the `entity` allowing it to be reused. + /// + /// # Safety + /// + /// This must not conflict with any other [`Self::free`] or [`Self::alloc`] calls. + unsafe fn free(&self, entity: Entity) { + // Disable remote allocation. (We could do a compare exchange loop, but this is faster in the common case.) + let len = self.len.swap(-1, Ordering::AcqRel).max(0); + // We can cast to u32 safely because if it were to overflow, there would already be too many entities. + let (chunk_index, index) = Chunk::get_indices(len as u32); + + // SAFETY: index is correct. + let chunk = unsafe { self.chunks.get_unchecked(chunk_index as usize) }; + + // SAFETY: Caller ensures this is not concurrent. The index is correct. + // This can not confluct with a `get` because we already disabled remote allocation. + unsafe { + chunk.set(index, entity, chunk_index); + } + + let new_len = len + 1; + // It doesn't matter when other threads realize remote allocation is enabled again. + self.len.store(new_len, Ordering::Relaxed); + } + + /// Allocates an [`Entity`] from the pending list if one is available. + /// + /// # Safety + /// + /// This must not conflict with [`Self::free`] calls. + unsafe fn alloc(&self) -> Option { + // SAFETY: This will get a valid index because there is no way for `free` to be done at the same time. + let len = self.len.fetch_sub(1, Ordering::Relaxed); + (len > 0).then(|| { + let idnex = len - 1; + // We can cast to u32 safely because if it were to overflow, there would already be too many entities. + let (chunk_index, index) = Chunk::get_indices(idnex as u32); + + // SAFETY: index is correct. + let chunk = unsafe { self.chunks.get_unchecked(chunk_index as usize) }; + + // SAFETY: This was less then `len`, so it must have been `set` via `free` before. + unsafe { chunk.get(index) } + }) + } + + /// Allocates an [`Entity`] from the pending list if one is available. + fn remote_alloc(&self) -> Option { + todo!() + } +} + #[cfg(test)] mod tests { use super::*; From c37c32b0fcc47889de7ec0d97b8652e058f279ec Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Mon, 31 Mar 2025 14:18:38 -0400 Subject: [PATCH 004/113] remote_alloc --- crates/bevy_ecs/src/entity/allocator.rs | 53 ++++++++++++++++++++++++- 1 file changed, 51 insertions(+), 2 deletions(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 8c5e6632bfb26..2e7604dae2eb3 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -171,7 +171,7 @@ impl PendingBuffer { /// This must not conflict with [`Self::free`] calls. unsafe fn alloc(&self) -> Option { // SAFETY: This will get a valid index because there is no way for `free` to be done at the same time. - let len = self.len.fetch_sub(1, Ordering::Relaxed); + let len = self.len.fetch_sub(1, Ordering::AcqRel); (len > 0).then(|| { let idnex = len - 1; // We can cast to u32 safely because if it were to overflow, there would already be too many entities. @@ -187,7 +187,56 @@ impl PendingBuffer { /// Allocates an [`Entity`] from the pending list if one is available. fn remote_alloc(&self) -> Option { - todo!() + // The goal is the same as `alloc`, so what's the difference? + // `alloc` knows `free` is not being called, but this does not. + // What if we `len.fetch_sub(1)` but then `free` overwrites the entity before we could read it? + // That would mean we would leak an entity and give another entity out twice. + // We get around this by only updating `len` after the read is complete. + // But that means something else could be trying to allocate the same index! + // So we need a `len.compare_exchange` loop to ensure the index is unique. + // + // Examples: + // + // What if another allocation happens during the loop? + // The exchange will fail, and we try again. + // + // What happens if a `free` starts during the loop? + // The exchange will fail, and we return `None`. + // + // What happens if a `free` starts and finishes during the loop? + // The exchange will fail (len is 1 more than expected) and we try again. + // + // What happens if a `free` starts and finishes, and then a different allocation takes the freed entity? + // The exchange will not fail, and we allocate the correct entity. + // The other allocation gets the newly freed one, and we get the previous one. + // If the `free`s and `alloc`s are not balanced, the exchange will fail, and we try again. + + let mut len = self.len.load(Ordering::Acquire); + loop { + if len == 0 { + return None; + } + + let target_new_len = len - 1; + // We can cast to u32 safely because if it were to overflow, there would already be too many entities. + let (chunk_index, index) = Chunk::get_indices(target_new_len as u32); + + // SAFETY: index is correct. + let chunk = unsafe { self.chunks.get_unchecked(chunk_index as usize) }; + + // SAFETY: This was less then `len`, so it must have been `set` via `free` before. + let entity = unsafe { chunk.get(index) }; + + match self.len.compare_exchange( + len, + target_new_len, + Ordering::AcqRel, + Ordering::Acquire, + ) { + Ok(_) => return Some(entity), + Err(updated_len) => len = updated_len, + } + } } } From d40c4ba46baeac7b9d5f8d38595d5cfde77acab2 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Mon, 31 Mar 2025 14:33:10 -0400 Subject: [PATCH 005/113] SharedAllocator --- crates/bevy_ecs/src/entity/allocator.rs | 47 ++++++++++++++++++++++++- 1 file changed, 46 insertions(+), 1 deletion(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 2e7604dae2eb3..a39b9198c2883 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -130,6 +130,7 @@ impl Chunk { } } +/// This is conceptually like a `Vec` that stores entities pending reuse. struct PendingBuffer { /// The chunks of the pending list. /// Put end-to-end, these chunks form a list of pending entities. @@ -185,7 +186,7 @@ impl PendingBuffer { }) } - /// Allocates an [`Entity`] from the pending list if one is available. + /// Allocates an [`Entity`] from the pending list if one is available and it is safe to do so. fn remote_alloc(&self) -> Option { // The goal is the same as `alloc`, so what's the difference? // `alloc` knows `free` is not being called, but this does not. @@ -240,6 +241,50 @@ impl PendingBuffer { } } +/// This stores allocation data shared by all entity allocators. +struct SharedAllocator { + /// The entities pending reuse + pending: PendingBuffer, + /// The next value of [`Entity::index`] to give out if needed. + next_entity_index: AtomicU32, +} + +impl SharedAllocator { + /// Allocates a new [`Entity`], reusing a freed index if one exists. + /// + /// # Safety + /// + /// This must not conflict with [`Self::free`] calls. + unsafe fn alloc(&self) -> Entity { + // SAFETY: assured by caller + unsafe { self.pending.alloc() }.unwrap_or_else(|| { + let index = self.next_entity_index.fetch_add(1, Ordering::Relaxed); + if index == 0 { + panic!("too many entities") + } + Entity::from_raw(index) + }) + } + + /// Allocates a new [`Entity`]. + /// This will only try to reuse a freed index if it is safe to do so. + fn remote_alloc(&self) -> Entity { + self.pending.remote_alloc().unwrap_or_else(|| { + let index = self.next_entity_index.fetch_add(1, Ordering::Relaxed); + if index == 0 { + panic!("too many entities") + } + Entity::from_raw(index) + }) + } + + /// Returns whether or not the index is valid in this allocator. + fn is_valid_index(&self, index: u32) -> bool { + let next = self.next_entity_index.load(Ordering::Relaxed); + index < next + } +} + #[cfg(test)] mod tests { use super::*; From e028a6df32948ebe25b33c8fe61b1e1f130cecfc Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Mon, 31 Mar 2025 14:41:27 -0400 Subject: [PATCH 006/113] Allocator --- crates/bevy_ecs/src/entity/allocator.rs | 36 ++++++++++++++++++++++++- 1 file changed, 35 insertions(+), 1 deletion(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index a39b9198c2883..6a75ccdc1458a 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -241,6 +241,15 @@ impl PendingBuffer { } } +impl Drop for PendingBuffer { + fn drop(&mut self) { + for index in 0..Chunk::NUM_CHUNKS { + // SAFETY: we have `&mut` + unsafe { self.chunks[index as usize].dealloc(index) }; + } + } +} + /// This stores allocation data shared by all entity allocators. struct SharedAllocator { /// The entities pending reuse @@ -254,7 +263,7 @@ impl SharedAllocator { /// /// # Safety /// - /// This must not conflict with [`Self::free`] calls. + /// This must not conflict with [`PendingBuffer::free`] calls. unsafe fn alloc(&self) -> Entity { // SAFETY: assured by caller unsafe { self.pending.alloc() }.unwrap_or_else(|| { @@ -285,6 +294,31 @@ impl SharedAllocator { } } +pub struct Allocator { + shared: Arc, +} + +impl Allocator { + /// Allocates a new [`Entity`], reusing a freed index if one exists. + pub fn alloc(&self) -> Entity { + // SAFETY: violating safety requires a `&mut self` to exist, but rust does not allow that. + unsafe { self.shared.alloc() } + } + + /// Returns whether or not the index is valid in this allocator. + pub fn is_valid_index(&self, index: u32) -> bool { + self.shared.is_valid_index(index) + } + + /// Frees the entity allowing it to be reused. + pub fn free(&mut self, entity: Entity) { + // SAFETY: We have `&mut self`. + unsafe { + self.shared.pending.free(entity); + } + } +} + #[cfg(test)] mod tests { use super::*; From af83bf33746011cc7dd8106783a347242b693afa Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Mon, 31 Mar 2025 14:44:13 -0400 Subject: [PATCH 007/113] construction --- crates/bevy_ecs/src/entity/allocator.rs | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 6a75ccdc1458a..edf8b32abfec6 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -239,6 +239,13 @@ impl PendingBuffer { } } } + + fn new() -> Self { + Self { + chunks: core::array::from_fn(|_index| Chunk::new()), + len: AtomicIsize::new(0), + } + } } impl Drop for PendingBuffer { @@ -292,6 +299,13 @@ impl SharedAllocator { let next = self.next_entity_index.load(Ordering::Relaxed); index < next } + + fn new() -> Self { + Self { + pending: PendingBuffer::new(), + next_entity_index: AtomicU32::new(0), + } + } } pub struct Allocator { @@ -299,6 +313,12 @@ pub struct Allocator { } impl Allocator { + pub fn new() -> Self { + Self { + shared: Arc::new(SharedAllocator::new()), + } + } + /// Allocates a new [`Entity`], reusing a freed index if one exists. pub fn alloc(&self) -> Entity { // SAFETY: violating safety requires a `&mut self` to exist, but rust does not allow that. From dac89216ffdda3aff6bc2518ea1b49bd6f6e349e Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Mon, 31 Mar 2025 14:59:36 -0400 Subject: [PATCH 008/113] fix small bug --- crates/bevy_ecs/src/entity/allocator.rs | 71 +++++++++++++++++-------- 1 file changed, 49 insertions(+), 22 deletions(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index edf8b32abfec6..c863d21d93118 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -1,7 +1,7 @@ use bevy_platform_support::{ prelude::Vec, sync::{ - atomic::{AtomicIsize, AtomicPtr, AtomicU32, Ordering}, + atomic::{AtomicBool, AtomicIsize, AtomicPtr, AtomicU32, Ordering}, Arc, }, }; @@ -263,9 +263,46 @@ struct SharedAllocator { pending: PendingBuffer, /// The next value of [`Entity::index`] to give out if needed. next_entity_index: AtomicU32, + /// If true, the [`Self::next_entity_index`] has been incremented before, + /// so if it hits or passes zero again, an overflow has occored. + entity_index_given: AtomicBool, } impl SharedAllocator { + /// The total number of indices given out. + fn total_entity_indices(&self) -> u64 { + let next = self.next_entity_index.load(Ordering::Relaxed); + if next == 0 { + if self.entity_index_given.load(Ordering::Relaxed) { + // every index has been given + u32::MAX as u64 + 1 + } else { + // no index has been given + 0 + } + } else { + (next - 1) as u64 + } + } + + /// Call this when the entity index is suspected to have overflown. + /// Panic if the overflow did happen. + #[cold] + fn check_overflow(&self) { + if self.entity_index_given.swap(true, Ordering::AcqRel) { + panic!("too many entities") + } + } + + /// Allocates an [`Entity`] with a brand new index. + fn alloc_new_index(&self) -> Entity { + let index = self.next_entity_index.fetch_add(1, Ordering::Relaxed); + if index == 0 { + self.check_overflow(); + } + Entity::from_raw(index) + } + /// Allocates a new [`Entity`], reusing a freed index if one exists. /// /// # Safety @@ -273,37 +310,22 @@ impl SharedAllocator { /// This must not conflict with [`PendingBuffer::free`] calls. unsafe fn alloc(&self) -> Entity { // SAFETY: assured by caller - unsafe { self.pending.alloc() }.unwrap_or_else(|| { - let index = self.next_entity_index.fetch_add(1, Ordering::Relaxed); - if index == 0 { - panic!("too many entities") - } - Entity::from_raw(index) - }) + unsafe { self.pending.alloc() }.unwrap_or_else(|| self.alloc_new_index()) } /// Allocates a new [`Entity`]. /// This will only try to reuse a freed index if it is safe to do so. fn remote_alloc(&self) -> Entity { - self.pending.remote_alloc().unwrap_or_else(|| { - let index = self.next_entity_index.fetch_add(1, Ordering::Relaxed); - if index == 0 { - panic!("too many entities") - } - Entity::from_raw(index) - }) - } - - /// Returns whether or not the index is valid in this allocator. - fn is_valid_index(&self, index: u32) -> bool { - let next = self.next_entity_index.load(Ordering::Relaxed); - index < next + self.pending + .remote_alloc() + .unwrap_or_else(|| self.alloc_new_index()) } fn new() -> Self { Self { pending: PendingBuffer::new(), next_entity_index: AtomicU32::new(0), + entity_index_given: AtomicBool::new(false), } } } @@ -325,9 +347,14 @@ impl Allocator { unsafe { self.shared.alloc() } } + /// The total number of indices given out. + pub fn total_entity_indices(&self) -> u64 { + self.shared.total_entity_indices() + } + /// Returns whether or not the index is valid in this allocator. pub fn is_valid_index(&self, index: u32) -> bool { - self.shared.is_valid_index(index) + (index as u64) < self.total_entity_indices() } /// Frees the entity allowing it to be reused. From cd864b48b1febbf3ece076317b4bf0357b543695 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Mon, 31 Mar 2025 15:02:47 -0400 Subject: [PATCH 009/113] fixed another small bug --- crates/bevy_ecs/src/entity/allocator.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index c863d21d93118..a8b39586ad3c2 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -214,7 +214,7 @@ impl PendingBuffer { let mut len = self.len.load(Ordering::Acquire); loop { - if len == 0 { + if len <= 0 { return None; } From b38df40195dbb69543528f2abf0bbb264d0942e2 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Mon, 31 Mar 2025 15:15:59 -0400 Subject: [PATCH 010/113] remote allocation --- crates/bevy_ecs/src/entity/allocator.rs | 29 ++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index a8b39586ad3c2..a6e2d523da9c7 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -2,7 +2,7 @@ use bevy_platform_support::{ prelude::Vec, sync::{ atomic::{AtomicBool, AtomicIsize, AtomicPtr, AtomicU32, Ordering}, - Arc, + Arc, Weak, }, }; use core::mem::{ManuallyDrop, MaybeUninit}; @@ -330,6 +330,7 @@ impl SharedAllocator { } } +/// This keeps track of freed entities and allows the allocation of new ones. pub struct Allocator { shared: Arc, } @@ -366,6 +367,32 @@ impl Allocator { } } +/// This is a stripped down version of [`Allocator`] that operates on fewer assumptions. +/// As a result, using this will be slower than [`Allocator`] but this offers additional freedoms. +pub struct RemoteAllocator { + shared: Weak, +} + +impl RemoteAllocator { + /// Allocates an entity remotely. + /// This is not guaranteed to reuse a freed entity, even if one exists. + /// + /// This will return [`None`] if the source [`Allocator`] is destroyed. + pub fn alloc(&self) -> Option { + self.shared + .upgrade() + .map(|allocator| allocator.remote_alloc()) + } + + /// Creates a new [`RemoteAllocator`] with the provided [`Allocator`] source. + /// If the source is ever destroyed, [`Self::alloc`] will yield [`None`]. + pub fn new(source: &Allocator) -> Self { + Self { + shared: Arc::downgrade(&source.shared), + } + } +} + #[cfg(test)] mod tests { use super::*; From 46dce5f042a1939ddbf613531872640002fec3d0 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Mon, 31 Mar 2025 15:36:02 -0400 Subject: [PATCH 011/113] begin integrating the allocator --- crates/bevy_ecs/src/entity/mod.rs | 280 +++--------------------------- 1 file changed, 25 insertions(+), 255 deletions(-) diff --git a/crates/bevy_ecs/src/entity/mod.rs b/crates/bevy_ecs/src/entity/mod.rs index c02411badd695..11883e69c1a28 100644 --- a/crates/bevy_ecs/src/entity/mod.rs +++ b/crates/bevy_ecs/src/entity/mod.rs @@ -41,6 +41,7 @@ mod clone_entities; mod entity_set; mod map_entities; +use allocator::Allocator; #[cfg(feature = "bevy_reflect")] use bevy_reflect::Reflect; #[cfg(all(feature = "bevy_reflect", feature = "serialize"))] @@ -92,19 +93,6 @@ use log::warn; #[cfg(feature = "serialize")] use serde::{Deserialize, Serialize}; -#[cfg(target_has_atomic = "64")] -use bevy_platform_support::sync::atomic::AtomicI64 as AtomicIdCursor; -#[cfg(target_has_atomic = "64")] -type IdCursor = i64; - -/// Most modern platforms support 64-bit atomics, but some less-common platforms -/// do not. This fallback allows compilation using a 32-bit cursor instead, with -/// the caveat that some conversions may fail (and panic) at runtime. -#[cfg(not(target_has_atomic = "64"))] -use bevy_platform_support::sync::atomic::AtomicIsize as AtomicIdCursor; -#[cfg(not(target_has_atomic = "64"))] -type IdCursor = isize; - /// Lightweight identifier of an [entity](crate::entity). /// /// The identifier is implemented using a [generational index]: a combination of an index and a generation. @@ -492,7 +480,7 @@ impl SparseSetIndex for Entity { } /// An [`Iterator`] returning a sequence of [`Entity`] values from -pub struct ReserveEntitiesIterator<'a> { +pub struct AllocEntitiesIterator<'a> { // Metas, so we can recover the current generation for anything in the freelist. meta: &'a [EntityMeta], @@ -503,7 +491,7 @@ pub struct ReserveEntitiesIterator<'a> { new_indices: core::ops::Range, } -impl<'a> Iterator for ReserveEntitiesIterator<'a> { +impl<'a> Iterator for AllocEntitiesIterator<'a> { type Item = Entity; fn next(&mut self) -> Option { @@ -521,11 +509,11 @@ impl<'a> Iterator for ReserveEntitiesIterator<'a> { } } -impl<'a> ExactSizeIterator for ReserveEntitiesIterator<'a> {} -impl<'a> core::iter::FusedIterator for ReserveEntitiesIterator<'a> {} +impl<'a> ExactSizeIterator for AllocEntitiesIterator<'a> {} +impl<'a> core::iter::FusedIterator for AllocEntitiesIterator<'a> {} // SAFETY: Newly reserved entity values are unique. -unsafe impl EntitySetIterator for ReserveEntitiesIterator<'_> {} +unsafe impl EntitySetIterator for AllocEntitiesIterator<'_> {} /// A [`World`]'s internal metadata store on all of its entities. /// @@ -535,160 +523,42 @@ unsafe impl EntitySetIterator for ReserveEntitiesIterator<'_> {} /// - The location of the entity's components in memory (via [`EntityLocation`]) /// /// [`World`]: crate::world::World -#[derive(Debug)] +// #[derive(Debug)] pub struct Entities { meta: Vec, - - /// The `pending` and `free_cursor` fields describe three sets of Entity IDs - /// that have been freed or are in the process of being allocated: - /// - /// - The `freelist` IDs, previously freed by `free()`. These IDs are available to any of - /// [`alloc`], [`reserve_entity`] or [`reserve_entities`]. Allocation will always prefer - /// these over brand new IDs. - /// - /// - The `reserved` list of IDs that were once in the freelist, but got reserved by - /// [`reserve_entities`] or [`reserve_entity`]. They are now waiting for [`flush`] to make them - /// fully allocated. - /// - /// - The count of new IDs that do not yet exist in `self.meta`, but which we have handed out - /// and reserved. [`flush`] will allocate room for them in `self.meta`. - /// - /// The contents of `pending` look like this: - /// - /// ```txt - /// ---------------------------- - /// | freelist | reserved | - /// ---------------------------- - /// ^ ^ - /// free_cursor pending.len() - /// ``` - /// - /// As IDs are allocated, `free_cursor` is atomically decremented, moving - /// items from the freelist into the reserved list by sliding over the boundary. - /// - /// Once the freelist runs out, `free_cursor` starts going negative. - /// The more negative it is, the more IDs have been reserved starting exactly at - /// the end of `meta.len()`. - /// - /// This formulation allows us to reserve any number of IDs first from the freelist - /// and then from the new IDs, using only a single atomic subtract. - /// - /// Once [`flush`] is done, `free_cursor` will equal `pending.len()`. - /// - /// [`alloc`]: Entities::alloc - /// [`reserve_entity`]: Entities::reserve_entity - /// [`reserve_entities`]: Entities::reserve_entities - /// [`flush`]: Entities::flush - pending: Vec, - free_cursor: AtomicIdCursor, + allocator: Allocator, } impl Entities { - pub(crate) const fn new() -> Self { + pub(crate) fn new() -> Self { Entities { meta: Vec::new(), - pending: Vec::new(), - free_cursor: AtomicIdCursor::new(0), + allocator: Allocator::new(), } } /// Reserve entity IDs concurrently. /// /// Storage for entity generation and location is lazily allocated by calling [`flush`](Entities::flush). - #[expect( - clippy::allow_attributes, - reason = "`clippy::unnecessary_fallible_conversions` may not always lint." - )] - #[allow( - clippy::unnecessary_fallible_conversions, - reason = "`IdCursor::try_from` may fail on 32-bit platforms." - )] - pub fn reserve_entities(&self, count: u32) -> ReserveEntitiesIterator { - // Use one atomic subtract to grab a range of new IDs. The range might be - // entirely nonnegative, meaning all IDs come from the freelist, or entirely - // negative, meaning they are all new IDs to allocate, or a mix of both. - let range_end = self.free_cursor.fetch_sub( - IdCursor::try_from(count) - .expect("64-bit atomic operations are not supported on this platform."), - Ordering::Relaxed, - ); - let range_start = range_end - - IdCursor::try_from(count) - .expect("64-bit atomic operations are not supported on this platform."); - - let freelist_range = range_start.max(0) as usize..range_end.max(0) as usize; - - let (new_id_start, new_id_end) = if range_start >= 0 { - // We satisfied all requests from the freelist. - (0, 0) - } else { - // We need to allocate some new Entity IDs outside of the range of self.meta. - // - // `range_start` covers some negative territory, e.g. `-3..6`. - // Since the nonnegative values `0..6` are handled by the freelist, that - // means we need to handle the negative range here. - // - // In this example, we truncate the end to 0, leaving us with `-3..0`. - // Then we negate these values to indicate how far beyond the end of `meta.end()` - // to go, yielding `meta.len()+0 .. meta.len()+3`. - let base = self.meta.len() as IdCursor; - - let new_id_end = u32::try_from(base - range_start).expect("too many entities"); - - // `new_id_end` is in range, so no need to check `start`. - let new_id_start = (base - range_end.min(0)) as u32; - - (new_id_start, new_id_end) - }; - - ReserveEntitiesIterator { - meta: &self.meta[..], - freelist_indices: self.pending[freelist_range].iter(), - new_indices: new_id_start..new_id_end, - } + pub fn reserve_entities(&self, count: u32) -> AllocEntitiesIterator { + self.alloc_entities(count) } /// Reserve one entity ID concurrently. /// /// Equivalent to `self.reserve_entities(1).next().unwrap()`, but more efficient. pub fn reserve_entity(&self) -> Entity { - let n = self.free_cursor.fetch_sub(1, Ordering::Relaxed); - if n > 0 { - // Allocate from the freelist. - let index = self.pending[(n - 1) as usize]; - Entity::from_raw_and_generation(index, self.meta[index as usize].generation) - } else { - // Grab a new ID, outside the range of `meta.len()`. `flush()` must - // eventually be called to make it valid. - // - // As `self.free_cursor` goes more and more negative, we return IDs farther - // and farther beyond `meta.len()`. - Entity::from_raw( - u32::try_from(self.meta.len() as IdCursor - n).expect("too many entities"), - ) - } + self.alloc() } - /// Check that we do not have pending work requiring `flush()` to be called. - fn verify_flushed(&mut self) { - debug_assert!( - !self.needs_flush(), - "flush() needs to be called before this operation is legal" - ); + /// Allocate an entity ID directly. + pub fn alloc(&self) -> Entity { + todo!() } - /// Allocate an entity ID directly. - pub fn alloc(&mut self) -> Entity { - self.verify_flushed(); - if let Some(index) = self.pending.pop() { - let new_free_cursor = self.pending.len() as IdCursor; - *self.free_cursor.get_mut() = new_free_cursor; - Entity::from_raw_and_generation(index, self.meta[index as usize].generation) - } else { - let index = u32::try_from(self.meta.len()).expect("too many entities"); - self.meta.push(EntityMeta::EMPTY); - Entity::from_raw(index) - } + /// A more efficient way to [`alloc`](Self::alloc) multiple entities. + pub fn alloc_entities(&self, count: u32) -> AllocEntitiesIterator { + todo!() } /// Allocate a specific entity ID, overwriting its generation. @@ -699,31 +569,7 @@ impl Entities { note = "This can cause extreme performance problems when used after freeing a large number of entities and requesting an arbitrary entity. See #18054 on GitHub." )] pub fn alloc_at(&mut self, entity: Entity) -> Option { - self.verify_flushed(); - - let loc = if entity.index() as usize >= self.meta.len() { - self.pending - .extend((self.meta.len() as u32)..entity.index()); - let new_free_cursor = self.pending.len() as IdCursor; - *self.free_cursor.get_mut() = new_free_cursor; - self.meta - .resize(entity.index() as usize + 1, EntityMeta::EMPTY); - None - } else if let Some(index) = self.pending.iter().position(|item| *item == entity.index()) { - self.pending.swap_remove(index); - let new_free_cursor = self.pending.len() as IdCursor; - *self.free_cursor.get_mut() = new_free_cursor; - None - } else { - Some(mem::replace( - &mut self.meta[entity.index() as usize].location, - EntityMeta::EMPTY.location, - )) - }; - - self.meta[entity.index() as usize].generation = entity.generation; - - loc + unimplemented!() } /// Allocate a specific entity ID, overwriting its generation. @@ -740,42 +586,13 @@ impl Entities { &mut self, entity: Entity, ) -> AllocAtWithoutReplacement { - self.verify_flushed(); - - let result = if entity.index() as usize >= self.meta.len() { - self.pending - .extend((self.meta.len() as u32)..entity.index()); - let new_free_cursor = self.pending.len() as IdCursor; - *self.free_cursor.get_mut() = new_free_cursor; - self.meta - .resize(entity.index() as usize + 1, EntityMeta::EMPTY); - AllocAtWithoutReplacement::DidNotExist - } else if let Some(index) = self.pending.iter().position(|item| *item == entity.index()) { - self.pending.swap_remove(index); - let new_free_cursor = self.pending.len() as IdCursor; - *self.free_cursor.get_mut() = new_free_cursor; - AllocAtWithoutReplacement::DidNotExist - } else { - let current_meta = &self.meta[entity.index() as usize]; - if current_meta.location.archetype_id == ArchetypeId::INVALID { - AllocAtWithoutReplacement::DidNotExist - } else if current_meta.generation == entity.generation { - AllocAtWithoutReplacement::Exists(current_meta.location) - } else { - return AllocAtWithoutReplacement::ExistsWithWrongGeneration; - } - }; - - self.meta[entity.index() as usize].generation = entity.generation; - result + unimplemented!() } /// Destroy an entity, allowing it to be reused. /// /// Must not be called while reserved entities are awaiting `flush()`. pub fn free(&mut self, entity: Entity) -> Option { - self.verify_flushed(); - let meta = &mut self.meta[entity.index() as usize]; if meta.generation != entity.generation { return None; @@ -790,13 +607,7 @@ impl Entities { ); } - let loc = mem::replace(&mut meta.location, EntityMeta::EMPTY.location); - - self.pending.push(entity.index()); - - let new_free_cursor = self.pending.len() as IdCursor; - *self.free_cursor.get_mut() = new_free_cursor; - Some(loc) + todo!() } /// Ensure at least `n` allocations can succeed without reallocating. @@ -809,15 +620,7 @@ impl Entities { reason = "`IdCursor::try_from` may fail on 32-bit platforms." )] pub fn reserve(&mut self, additional: u32) { - self.verify_flushed(); - - let freelist_size = *self.free_cursor.get_mut(); - let shortfall = IdCursor::try_from(additional) - .expect("64-bit atomic operations are not supported on this platform.") - - freelist_size; - if shortfall > 0 { - self.meta.reserve(shortfall as usize); - } + todo!() } /// Returns true if the [`Entities`] contains [`entity`](Entity). @@ -831,8 +634,7 @@ impl Entities { /// Clears all [`Entity`] from the World. pub fn clear(&mut self) { self.meta.clear(); - self.pending.clear(); - *self.free_cursor.get_mut() = 0; + self.allocator = Allocator::new(); } /// Returns the location of an [`Entity`]. @@ -904,10 +706,6 @@ impl Entities { } } - fn needs_flush(&mut self) -> bool { - *self.free_cursor.get_mut() != self.pending.len() as IdCursor - } - /// Allocates space for entities previously reserved with [`reserve_entity`](Entities::reserve_entity) or /// [`reserve_entities`](Entities::reserve_entities), then initializes each one using the supplied function. /// @@ -918,35 +716,7 @@ impl Entities { /// /// Note: freshly-allocated entities (ones which don't come from the pending list) are guaranteed /// to be initialized with the invalid archetype. - pub unsafe fn flush(&mut self, mut init: impl FnMut(Entity, &mut EntityLocation)) { - let free_cursor = self.free_cursor.get_mut(); - let current_free_cursor = *free_cursor; - - let new_free_cursor = if current_free_cursor >= 0 { - current_free_cursor as usize - } else { - let old_meta_len = self.meta.len(); - let new_meta_len = old_meta_len + -current_free_cursor as usize; - self.meta.resize(new_meta_len, EntityMeta::EMPTY); - for (index, meta) in self.meta.iter_mut().enumerate().skip(old_meta_len) { - init( - Entity::from_raw_and_generation(index as u32, meta.generation), - &mut meta.location, - ); - } - - *free_cursor = 0; - 0 - }; - - for index in self.pending.drain(new_free_cursor..) { - let meta = &mut self.meta[index as usize]; - init( - Entity::from_raw_and_generation(index, meta.generation), - &mut meta.location, - ); - } - } + pub unsafe fn flush(&mut self, mut _init: impl FnMut(Entity, &mut EntityLocation)) {} /// Flushes all reserved entities to an "invalid" state. Attempting to retrieve them will return `None` /// unless they are later populated with a valid archetype. From 2bda8ec5726e03a6ce5be390076030cf2f6c0747 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Mon, 31 Mar 2025 16:18:11 -0400 Subject: [PATCH 012/113] fixed length methods --- crates/bevy_ecs/src/entity/allocator.rs | 15 +++++++++++ crates/bevy_ecs/src/entity/mod.rs | 36 +++++-------------------- 2 files changed, 22 insertions(+), 29 deletions(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index a6e2d523da9c7..e7de16e364a8d 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -140,6 +140,15 @@ struct PendingBuffer { } impl PendingBuffer { + /// Gets the number of pending entities. + /// + /// # Safety + /// + /// For this to be accurate, this must not be called during a [`Self::free`]. + unsafe fn num_pending(&self) -> u64 { + self.len.load(Ordering::Relaxed).max(0) as u64 + } + /// Frees the `entity` allowing it to be reused. /// /// # Safety @@ -353,6 +362,12 @@ impl Allocator { self.shared.total_entity_indices() } + /// The number of pending entities. + pub fn num_pending(&self) -> u64 { + // SAFETY: `free` is not being called since it takes `&mut self`. + unsafe { self.shared.pending.num_pending() } + } + /// Returns whether or not the index is valid in this allocator. pub fn is_valid_index(&self, index: u32) -> bool { (index as u64) < self.total_entity_indices() diff --git a/crates/bevy_ecs/src/entity/mod.rs b/crates/bevy_ecs/src/entity/mod.rs index 11883e69c1a28..3a66d7aee916c 100644 --- a/crates/bevy_ecs/src/entity/mod.rs +++ b/crates/bevy_ecs/src/entity/mod.rs @@ -87,7 +87,7 @@ use crate::{ }; use alloc::vec::Vec; use bevy_platform_support::sync::atomic::Ordering; -use core::{fmt, hash::Hash, mem, num::NonZero, panic::Location}; +use core::{fmt, hash::Hash, num::NonZero, panic::Location}; use log::warn; #[cfg(feature = "serialize")] @@ -568,7 +568,7 @@ impl Entities { #[deprecated( note = "This can cause extreme performance problems when used after freeing a large number of entities and requesting an arbitrary entity. See #18054 on GitHub." )] - pub fn alloc_at(&mut self, entity: Entity) -> Option { + pub fn alloc_at(&mut self, _entity: Entity) -> Option { unimplemented!() } @@ -584,7 +584,7 @@ impl Entities { )] pub(crate) fn alloc_at_without_replacement( &mut self, - entity: Entity, + _entity: Entity, ) -> AllocAtWithoutReplacement { unimplemented!() } @@ -731,40 +731,18 @@ impl Entities { } /// The count of all entities in the [`World`] that have ever been allocated - /// including the entities that are currently freed. - /// - /// This does not include entities that have been reserved but have never been - /// allocated yet. + /// including the entities that are currently pending reuse. /// /// [`World`]: crate::world::World #[inline] pub fn total_count(&self) -> usize { - self.meta.len() - } - - /// The count of all entities in the [`World`] that are used, - /// including both those allocated and those reserved, but not those freed. - /// - /// [`World`]: crate::world::World - #[inline] - pub fn used_count(&self) -> usize { - (self.meta.len() as isize - self.free_cursor.load(Ordering::Relaxed) as isize) as usize - } - - /// The count of all entities in the [`World`] that have ever been allocated or reserved, including those that are freed. - /// This is the value that [`Self::total_count()`] would return if [`Self::flush()`] were called right now. - /// - /// [`World`]: crate::world::World - #[inline] - pub fn total_prospective_count(&self) -> usize { - self.meta.len() + (-self.free_cursor.load(Ordering::Relaxed)).min(0) as usize + self.allocator.total_entity_indices() as usize } /// The count of currently allocated entities. #[inline] - pub fn len(&self) -> u32 { - // `pending`, by definition, can't be bigger than `meta`. - (self.meta.len() - self.pending.len()) as u32 + pub fn len(&self) -> u64 { + self.allocator.total_entity_indices() - self.allocator.num_pending() } /// Checks if any entity is currently active. From 3bd566605bf7d24aeaddfcb609d695c8af1a0309 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Mon, 31 Mar 2025 16:24:13 -0400 Subject: [PATCH 013/113] docs and fix errors --- crates/bevy_ecs/src/archetype.rs | 5 +++++ crates/bevy_ecs/src/entity/map_entities.rs | 11 +++-------- crates/bevy_ecs/src/entity/mod.rs | 12 +++--------- 3 files changed, 11 insertions(+), 17 deletions(-) diff --git a/crates/bevy_ecs/src/archetype.rs b/crates/bevy_ecs/src/archetype.rs index f55acf6d35feb..d84a9d77c582e 100644 --- a/crates/bevy_ecs/src/archetype.rs +++ b/crates/bevy_ecs/src/archetype.rs @@ -81,6 +81,11 @@ pub struct ArchetypeId(u32); impl ArchetypeId { /// The ID for the [`Archetype`] without any components. pub const EMPTY: ArchetypeId = ArchetypeId(0); + /// This represents an archetype that does not actually exist. + /// This can be used as a placeholder. + /// + /// On an entity, this archetype signals that the entity is not yet part of any archetype. + /// /// # Safety: /// /// This must always have an all-1s bit pattern to ensure soundness in fast entity id space allocation. diff --git a/crates/bevy_ecs/src/entity/map_entities.rs b/crates/bevy_ecs/src/entity/map_entities.rs index 10da6cc559bfc..a79f1f7cf9493 100644 --- a/crates/bevy_ecs/src/entity/map_entities.rs +++ b/crates/bevy_ecs/src/entity/map_entities.rs @@ -48,7 +48,7 @@ use smallvec::SmallVec; pub trait MapEntities { /// Updates all [`Entity`] references stored inside using `entity_mapper`. /// - /// Implementors should look up any and all [`Entity`] values stored within `self` and + /// Implementers should look up any and all [`Entity`] values stored within `self` and /// update them to the mapped values via `entity_mapper`. fn map_entities(&mut self, entity_mapper: &mut E); } @@ -102,7 +102,7 @@ impl> MapEntities for SmallVec { /// /// More generally, this can be used to map [`Entity`] references between any two [`Worlds`](World). /// -/// This is used by [`MapEntities`] implementors. +/// This is used by [`MapEntities`] implementers. /// /// ## Example /// @@ -120,7 +120,7 @@ impl> MapEntities for SmallVec { /// fn get_mapped(&mut self, entity: Entity) -> Entity { /// self.map.get(&entity).copied().unwrap_or(entity) /// } -/// +/// /// fn set_mapped(&mut self, source: Entity, target: Entity) { /// self.map.insert(source, target); /// } @@ -333,17 +333,12 @@ mod tests { #[test] fn entity_mapper_no_panic() { let mut world = World::new(); - // "Dirty" the `Entities`, requiring a flush afterward. world.entities.reserve_entity(); - assert!(world.entities.needs_flush()); // Create and exercise a SceneEntityMapper - should not panic because it flushes // `Entities` first. SceneEntityMapper::world_scope(&mut Default::default(), &mut world, |_, m| { m.get_mapped(Entity::PLACEHOLDER); }); - - // The SceneEntityMapper should leave `Entities` in a flushed state. - assert!(!world.entities.needs_flush()); } } diff --git a/crates/bevy_ecs/src/entity/mod.rs b/crates/bevy_ecs/src/entity/mod.rs index 3a66d7aee916c..e58940458f036 100644 --- a/crates/bevy_ecs/src/entity/mod.rs +++ b/crates/bevy_ecs/src/entity/mod.rs @@ -86,7 +86,6 @@ use crate::{ storage::{SparseSetIndex, TableId, TableRow}, }; use alloc::vec::Vec; -use bevy_platform_support::sync::atomic::Ordering; use core::{fmt, hash::Hash, num::NonZero, panic::Location}; use log::warn; @@ -697,12 +696,7 @@ impl Entities { if let Some(&EntityMeta { generation, .. }) = self.meta.get(idu) { Some(Entity::from_raw_and_generation(index, generation)) } else { - // `id` is outside of the meta list - check whether it is reserved but not yet flushed. - let free_cursor = self.free_cursor.load(Ordering::Relaxed); - // If this entity was manually created, then free_cursor might be positive - // Returning None handles that case correctly - let num_pending = usize::try_from(-free_cursor).ok()?; - (idu < self.meta.len() + num_pending).then_some(Entity::from_raw(index)) + (self.allocator.is_valid_index(index)).then_some(Entity::from_raw(index)) } } @@ -848,8 +842,8 @@ struct EntityMeta { } impl EntityMeta { - /// meta for **pending entity** - const EMPTY: EntityMeta = EntityMeta { + /// This is the metadata for an entity index that has never had its location set or been freed. + const FRESH: EntityMeta = EntityMeta { generation: NonZero::::MIN, location: EntityLocation::INVALID, spawned_or_despawned_by: MaybeLocation::new(None), From 4752167ab420140b59ee6648a8f800321a035e17 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Mon, 31 Mar 2025 16:31:19 -0400 Subject: [PATCH 014/113] allocation implemented --- crates/bevy_ecs/src/entity/allocator.rs | 35 +++++++++++++++++- crates/bevy_ecs/src/entity/mod.rs | 47 ++++--------------------- 2 files changed, 40 insertions(+), 42 deletions(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index e7de16e364a8d..123d3dc12e296 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -9,7 +9,7 @@ use core::mem::{ManuallyDrop, MaybeUninit}; use crate::query::DebugCheckedUnwrap; -use super::Entity; +use super::{Entity, EntitySetIterator}; /// This is the item we store in the pending list. /// It might not be init (if it's out of bounds). @@ -380,8 +380,41 @@ impl Allocator { self.shared.pending.free(entity); } } + + /// Allocates `count` entities in an iterator. + pub fn alloc_many(&self, entities: u32) -> AllocEntitiesIterator { + AllocEntitiesIterator { + allocator: self, + num_left: entities, + } + } } +/// An [`Iterator`] returning a sequence of [`Entity`] values from an [`Allocator`]. +pub struct AllocEntitiesIterator<'a> { + allocator: &'a Allocator, + num_left: u32, +} + +impl<'a> Iterator for AllocEntitiesIterator<'a> { + type Item = Entity; + + fn next(&mut self) -> Option { + self.num_left.checked_sub(1).map(|_| self.allocator.alloc()) + } + + fn size_hint(&self) -> (usize, Option) { + let len = self.num_left as usize; + (len, Some(len)) + } +} + +impl<'a> ExactSizeIterator for AllocEntitiesIterator<'a> {} +impl<'a> core::iter::FusedIterator for AllocEntitiesIterator<'a> {} + +// SAFETY: Newly reserved entity values are unique. +unsafe impl EntitySetIterator for AllocEntitiesIterator<'_> {} + /// This is a stripped down version of [`Allocator`] that operates on fewer assumptions. /// As a result, using this will be slower than [`Allocator`] but this offers additional freedoms. pub struct RemoteAllocator { diff --git a/crates/bevy_ecs/src/entity/mod.rs b/crates/bevy_ecs/src/entity/mod.rs index e58940458f036..b4023ec32862e 100644 --- a/crates/bevy_ecs/src/entity/mod.rs +++ b/crates/bevy_ecs/src/entity/mod.rs @@ -235,6 +235,7 @@ impl Hash for Entity { #[deprecated( note = "This is exclusively used with the now deprecated `Entities::alloc_at_without_replacement`." )] +#[expect(unused, reason = "This is not implemented on this branch")] pub(crate) enum AllocAtWithoutReplacement { Exists(EntityLocation), DidNotExist, @@ -478,42 +479,6 @@ impl SparseSetIndex for Entity { } } -/// An [`Iterator`] returning a sequence of [`Entity`] values from -pub struct AllocEntitiesIterator<'a> { - // Metas, so we can recover the current generation for anything in the freelist. - meta: &'a [EntityMeta], - - // Reserved indices formerly in the freelist to hand out. - freelist_indices: core::slice::Iter<'a, u32>, - - // New Entity indices to hand out, outside the range of meta.len(). - new_indices: core::ops::Range, -} - -impl<'a> Iterator for AllocEntitiesIterator<'a> { - type Item = Entity; - - fn next(&mut self) -> Option { - self.freelist_indices - .next() - .map(|&index| { - Entity::from_raw_and_generation(index, self.meta[index as usize].generation) - }) - .or_else(|| self.new_indices.next().map(Entity::from_raw)) - } - - fn size_hint(&self) -> (usize, Option) { - let len = self.freelist_indices.len() + self.new_indices.len(); - (len, Some(len)) - } -} - -impl<'a> ExactSizeIterator for AllocEntitiesIterator<'a> {} -impl<'a> core::iter::FusedIterator for AllocEntitiesIterator<'a> {} - -// SAFETY: Newly reserved entity values are unique. -unsafe impl EntitySetIterator for AllocEntitiesIterator<'_> {} - /// A [`World`]'s internal metadata store on all of its entities. /// /// Contains metadata on: @@ -539,8 +504,8 @@ impl Entities { /// Reserve entity IDs concurrently. /// /// Storage for entity generation and location is lazily allocated by calling [`flush`](Entities::flush). - pub fn reserve_entities(&self, count: u32) -> AllocEntitiesIterator { - self.alloc_entities(count) + pub fn reserve_entities(&self, count: u32) -> allocator::AllocEntitiesIterator { + self.alloc_many(count) } /// Reserve one entity ID concurrently. @@ -552,12 +517,12 @@ impl Entities { /// Allocate an entity ID directly. pub fn alloc(&self) -> Entity { - todo!() + self.allocator.alloc() } /// A more efficient way to [`alloc`](Self::alloc) multiple entities. - pub fn alloc_entities(&self, count: u32) -> AllocEntitiesIterator { - todo!() + pub fn alloc_many(&self, count: u32) -> allocator::AllocEntitiesIterator { + self.allocator.alloc_many(count) } /// Allocate a specific entity ID, overwriting its generation. From 45693d228be087f2e5a501490b8bb7468fea42c5 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Mon, 31 Mar 2025 18:11:55 -0400 Subject: [PATCH 015/113] fix reserving --- crates/bevy_ecs/src/entity/mod.rs | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/crates/bevy_ecs/src/entity/mod.rs b/crates/bevy_ecs/src/entity/mod.rs index b4023ec32862e..ac770e8664640 100644 --- a/crates/bevy_ecs/src/entity/mod.rs +++ b/crates/bevy_ecs/src/entity/mod.rs @@ -575,16 +575,9 @@ impl Entities { } /// Ensure at least `n` allocations can succeed without reallocating. - #[expect( - clippy::allow_attributes, - reason = "`clippy::unnecessary_fallible_conversions` may not always lint." - )] - #[allow( - clippy::unnecessary_fallible_conversions, - reason = "`IdCursor::try_from` may fail on 32-bit platforms." - )] pub fn reserve(&mut self, additional: u32) { - todo!() + let shortfall = (additional as u64).saturating_sub(self.allocator.num_pending()); + self.meta.reserve(shortfall as usize); } /// Returns true if the [`Entities`] contains [`entity`](Entity). From e0105094bc4993554ec6606c70fdc042c046ca1f Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Mon, 31 Mar 2025 18:41:10 -0400 Subject: [PATCH 016/113] fix free --- crates/bevy_ecs/src/entity/map_entities.rs | 5 +- crates/bevy_ecs/src/entity/mod.rs | 106 ++++++++++++--------- 2 files changed, 65 insertions(+), 46 deletions(-) diff --git a/crates/bevy_ecs/src/entity/map_entities.rs b/crates/bevy_ecs/src/entity/map_entities.rs index a79f1f7cf9493..425932f9d67b4 100644 --- a/crates/bevy_ecs/src/entity/map_entities.rs +++ b/crates/bevy_ecs/src/entity/map_entities.rs @@ -256,8 +256,9 @@ impl<'m> SceneEntityMapper<'m> { pub fn finish(self, world: &mut World) { // SAFETY: Entities data is kept in a valid state via `EntityMap::world_scope` let entities = unsafe { world.entities_mut() }; - assert!(entities.free(self.dead_start).is_some()); - assert!(entities.reserve_generations(self.dead_start.index(), self.generations)); + assert!(entities + .free_current_and_future_generations(self.dead_start, self.generations) + .is_some()); } /// Creates an [`SceneEntityMapper`] from a provided [`World`] and [`EntityHashMap`], then calls the diff --git a/crates/bevy_ecs/src/entity/mod.rs b/crates/bevy_ecs/src/entity/mod.rs index ac770e8664640..e7bbaeab7b7f8 100644 --- a/crates/bevy_ecs/src/entity/mod.rs +++ b/crates/bevy_ecs/src/entity/mod.rs @@ -553,25 +553,44 @@ impl Entities { unimplemented!() } - /// Destroy an entity, allowing it to be reused. - /// - /// Must not be called while reserved entities are awaiting `flush()`. - pub fn free(&mut self, entity: Entity) -> Option { - let meta = &mut self.meta[entity.index() as usize]; - if meta.generation != entity.generation { + /// This is the same as [`free`](Entities::free), but it allows skipping some generations. + /// When the entity is reused, it will have a generation greater than the current generation + `generations`. + #[inline] + pub(crate) fn free_current_and_future_generations( + &mut self, + entity: Entity, + generations: u32, + ) -> Option { + let theoretical = self.resolve_from_id(entity.index()); + if theoretical.is_none_or(|theoretcal| theoretcal != entity) { return None; } - meta.generation = IdentifierMask::inc_masked_high_by(meta.generation, 1); + // SAFETY: We resolved its id to ensure it is valid. + let meta = unsafe { self.force_get_meta_mut(entity.index() as usize) }; + let prev_generation = meta.generation; - if meta.generation == NonZero::::MIN { + meta.generation = IdentifierMask::inc_masked_high_by(meta.generation, 1 + generations); + + if prev_generation > meta.generation || generations == u32::MAX { warn!( "Entity({}) generation wrapped on Entities::free, aliasing may occur", entity.index ); } - todo!() + let new_entity = Entity::from_raw_and_generation(entity.index, meta.generation); + let loc = core::mem::replace(&mut meta.location, EntityLocation::INVALID); + self.allocator.free(new_entity); + + Some(loc) + } + + /// Destroy an entity, allowing it to be reused. + /// + /// Must not be called while reserved entities are awaiting `flush()`. + pub fn free(&mut self, entity: Entity) -> Option { + self.free_current_and_future_generations(entity, 1) } /// Ensure at least `n` allocations can succeed without reallocating. @@ -595,16 +614,15 @@ impl Entities { } /// Returns the location of an [`Entity`]. - /// Note: for pending entities, returns `None`. #[inline] pub fn get(&self, entity: Entity) -> Option { if let Some(meta) = self.meta.get(entity.index() as usize) { - if meta.generation != entity.generation - || meta.location.archetype_id == ArchetypeId::INVALID - { + if meta.generation != entity.generation { return None; } Some(meta.location) + } else if self.allocator.is_valid_index(entity.index()) { + Some(EntityLocation::INVALID) } else { None } @@ -620,31 +638,38 @@ impl Entities { #[inline] pub(crate) unsafe fn set(&mut self, index: u32, location: EntityLocation) { // SAFETY: Caller guarantees that `index` a valid entity index - let meta = unsafe { self.meta.get_unchecked_mut(index as usize) }; + let meta = unsafe { self.force_get_meta_mut(index as usize) }; meta.location = location; } - /// Increments the `generation` of a freed [`Entity`]. The next entity ID allocated with this - /// `index` will count `generation` starting from the prior `generation` + the specified - /// value + 1. + /// Get's the meta for this index mutably, creating it if it did not exist. /// - /// Does nothing if no entity with this `index` has been allocated yet. - pub(crate) fn reserve_generations(&mut self, index: u32, generations: u32) -> bool { - if (index as usize) >= self.meta.len() { - return false; - } - - let meta = &mut self.meta[index as usize]; - if meta.location.archetype_id == ArchetypeId::INVALID { - meta.generation = IdentifierMask::inc_masked_high_by(meta.generation, generations); - true + /// # Safetey + /// + /// `idnex` must be a valid index + unsafe fn force_get_meta_mut(&mut self, index: usize) -> &mut EntityMeta { + if index >= self.meta.len() { + self.resize_meta_for_index_risky(index) } else { - false + // SAFETY: index is in bounds + unsafe { self.meta.get_unchecked_mut(index) } } } + /// Changes the size of [`Self::meta`] to support this index. + /// This is risky because it assumes the index is not already in bounds. + /// + /// This is only used in `force_get_meta_mut` just to help branch prediction. + // TODO: Hint unlikely instead of #[cold] once it is stabilized. + #[cold] + fn resize_meta_for_index_risky(&mut self, index: usize) -> &mut EntityMeta { + self.meta.resize(index + 1, EntityMeta::FRESH); + // SAFETY: We just added it + unsafe { self.meta.get_unchecked_mut(index) } + } + /// Get the [`Entity`] with a given id, if it exists in this [`Entities`] collection - /// Returns `None` if this [`Entity`] is outside of the range of currently reserved Entities + /// Returns `None` if this [`Entity`] is outside of the range of currently allocated Entities /// /// Note: This method may return [`Entities`](Entity) which are currently free /// Note that [`contains`](Entities::contains) will correctly return false for freed @@ -708,10 +733,11 @@ impl Entities { #[inline] pub(crate) fn set_spawned_or_despawned_by(&mut self, index: u32, caller: MaybeLocation) { caller.map(|caller| { - let meta = self - .meta - .get_mut(index as usize) - .expect("Entity index invalid"); + if !self.allocator.is_valid_index(index) { + panic!("Entity index invalid") + } + // SAFETY: We just checked that it is valid + let meta = unsafe { self.force_get_meta_mut(index as usize) }; meta.spawned_or_despawned_by = MaybeLocation::new(Some(caller)); }); } @@ -904,24 +930,16 @@ mod tests { assert_eq!(0x00dd_00ff, C4); } - #[test] - fn reserve_generations() { - let mut entities = Entities::new(); - let entity = entities.alloc(); - entities.free(entity); - - assert!(entities.reserve_generations(entity.index(), 1)); - } - #[test] fn reserve_generations_and_alloc() { const GENERATIONS: u32 = 10; let mut entities = Entities::new(); let entity = entities.alloc(); - entities.free(entity); - assert!(entities.reserve_generations(entity.index(), GENERATIONS)); + assert!(entities + .free_current_and_future_generations(entity, GENERATIONS) + .is_some()); // The very next entity allocated should be a further generation on the same index let next_entity = entities.alloc(); From 0c3507ed810146c3a2b4c0baf4952237508c128c Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Mon, 31 Mar 2025 18:54:12 -0400 Subject: [PATCH 017/113] RemoteEntities --- crates/bevy_ecs/src/entity/allocator.rs | 6 +++++ crates/bevy_ecs/src/entity/mod.rs | 34 ++++++++++++++++++++++++- 2 files changed, 39 insertions(+), 1 deletion(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 123d3dc12e296..d7714d1640391 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -417,6 +417,7 @@ unsafe impl EntitySetIterator for AllocEntitiesIterator<'_> {} /// This is a stripped down version of [`Allocator`] that operates on fewer assumptions. /// As a result, using this will be slower than [`Allocator`] but this offers additional freedoms. +#[derive(Clone)] pub struct RemoteAllocator { shared: Weak, } @@ -432,6 +433,11 @@ impl RemoteAllocator { .map(|allocator| allocator.remote_alloc()) } + /// Returns whether or not this [`RemoteAllocator`] is still connected to its source [`Allocator`]. + pub fn is_closed(&self) -> bool { + self.shared.strong_count() > 0 + } + /// Creates a new [`RemoteAllocator`] with the provided [`Allocator`] source. /// If the source is ever destroyed, [`Self::alloc`] will yield [`None`]. pub fn new(source: &Allocator) -> Self { diff --git a/crates/bevy_ecs/src/entity/mod.rs b/crates/bevy_ecs/src/entity/mod.rs index e7bbaeab7b7f8..bae8b5249043c 100644 --- a/crates/bevy_ecs/src/entity/mod.rs +++ b/crates/bevy_ecs/src/entity/mod.rs @@ -41,7 +41,7 @@ mod clone_entities; mod entity_set; mod map_entities; -use allocator::Allocator; +use allocator::{Allocator, RemoteAllocator}; #[cfg(feature = "bevy_reflect")] use bevy_reflect::Reflect; #[cfg(all(feature = "bevy_reflect", feature = "serialize"))] @@ -773,6 +773,38 @@ impl Entities { } } +/// A remote version of [`Entities`] with limited functionality. +#[derive(Clone)] +pub struct RemoteEntities { + allocator: RemoteAllocator, +} + +impl RemoteEntities { + /// Allocates an [`Entity`] if the source [`Entities`] is still linked to this [`RemoteEntities`]. + pub fn alloc(&self) -> Option { + self.allocator.alloc() + } + + /// Returns true if this [`RemoteEntities`] is still connected to its source [`Entities`]. + /// This will return `false` if its source has been dropped or [`Entities::clear`]ed. + /// + /// Note that this does not guarantee immediately calling [`Self::alloc`] will return `Some`, + /// as this can close at any time. + pub fn is_closed(&self) -> bool { + self.allocator.is_closed() + } + + /// Creates a new [`RemoteEntities`] with this [`Entities`] as its source. + /// Note that this can be closed at any time, + /// so before using an allocated [`Entity`], + /// check [`is_closed`](Self::is_closed). + pub fn new(source: &Entities) -> Self { + Self { + allocator: RemoteAllocator::new(&source.allocator), + } + } +} + /// An error that occurs when a specified [`Entity`] does not exist. #[derive(thiserror::Error, Debug, Clone, Copy, PartialEq, Eq)] #[error("The entity with ID {entity} {details}")] From 15e36d57248102e121d65056ce7340fa4259af42 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Mon, 31 Mar 2025 18:58:10 -0400 Subject: [PATCH 018/113] implement Debug again --- crates/bevy_ecs/src/entity/allocator.rs | 9 +++++++++ crates/bevy_ecs/src/entity/mod.rs | 2 +- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index d7714d1640391..5e12d74d8a451 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -390,6 +390,15 @@ impl Allocator { } } +impl core::fmt::Debug for Allocator { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct(core::any::type_name::()) + .field("total_indices", &self.total_entity_indices()) + .field("total_pending", &self.num_pending()) + .finish() + } +} + /// An [`Iterator`] returning a sequence of [`Entity`] values from an [`Allocator`]. pub struct AllocEntitiesIterator<'a> { allocator: &'a Allocator, diff --git a/crates/bevy_ecs/src/entity/mod.rs b/crates/bevy_ecs/src/entity/mod.rs index bae8b5249043c..b28b0d9a83971 100644 --- a/crates/bevy_ecs/src/entity/mod.rs +++ b/crates/bevy_ecs/src/entity/mod.rs @@ -487,7 +487,7 @@ impl SparseSetIndex for Entity { /// - The location of the entity's components in memory (via [`EntityLocation`]) /// /// [`World`]: crate::world::World -// #[derive(Debug)] +#[derive(Debug)] pub struct Entities { meta: Vec, allocator: Allocator, From c1bff6c39a9fb59d73c8a69e2da777d4a693c53b Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Mon, 31 Mar 2025 21:05:08 -0400 Subject: [PATCH 019/113] fixed bug where entity length was wrong --- crates/bevy_ecs/src/entity/allocator.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 5e12d74d8a451..8a8f0f484e024 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -290,7 +290,7 @@ impl SharedAllocator { 0 } } else { - (next - 1) as u64 + next as u64 } } From 867baff7099ed758c5dd983b11580bc118ace1e8 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Mon, 31 Mar 2025 21:07:14 -0400 Subject: [PATCH 020/113] don't test unimplemented --- crates/bevy_ecs/src/lib.rs | 92 -------------------------------------- 1 file changed, 92 deletions(-) diff --git a/crates/bevy_ecs/src/lib.rs b/crates/bevy_ecs/src/lib.rs index 79ba938f4b749..b078dceff0b9f 100644 --- a/crates/bevy_ecs/src/lib.rs +++ b/crates/bevy_ecs/src/lib.rs @@ -151,7 +151,6 @@ mod tests { use core::{ any::TypeId, marker::PhantomData, - num::NonZero, sync::atomic::{AtomicUsize, Ordering}, }; use std::sync::Mutex; @@ -1696,97 +1695,6 @@ mod tests { assert_eq!(0, query_min_size![(&A, &B), Or<(Changed, Changed)>]); } - #[test] - fn insert_or_spawn_batch() { - let mut world = World::default(); - let e0 = world.spawn(A(0)).id(); - let e1 = Entity::from_raw(1); - - let values = vec![(e0, (B(0), C)), (e1, (B(1), C))]; - - #[expect( - deprecated, - reason = "This needs to be supported for now, and therefore still needs the test." - )] - world.insert_or_spawn_batch(values).unwrap(); - - assert_eq!( - world.get::(e0), - Some(&A(0)), - "existing component was preserved" - ); - assert_eq!( - world.get::(e0), - Some(&B(0)), - "pre-existing entity received correct B component" - ); - assert_eq!( - world.get::(e1), - Some(&B(1)), - "new entity was spawned and received correct B component" - ); - assert_eq!( - world.get::(e0), - Some(&C), - "pre-existing entity received C component" - ); - assert_eq!( - world.get::(e1), - Some(&C), - "new entity was spawned and received C component" - ); - } - - #[test] - fn insert_or_spawn_batch_invalid() { - let mut world = World::default(); - let e0 = world.spawn(A(0)).id(); - let e1 = Entity::from_raw(1); - let e2 = world.spawn_empty().id(); - let invalid_e2 = - Entity::from_raw_and_generation(e2.index(), NonZero::::new(2).unwrap()); - - let values = vec![(e0, (B(0), C)), (e1, (B(1), C)), (invalid_e2, (B(2), C))]; - - #[expect( - deprecated, - reason = "This needs to be supported for now, and therefore still needs the test." - )] - let result = world.insert_or_spawn_batch(values); - - assert_eq!( - result, - Err(vec![invalid_e2]), - "e2 failed to be spawned or inserted into" - ); - - assert_eq!( - world.get::(e0), - Some(&A(0)), - "existing component was preserved" - ); - assert_eq!( - world.get::(e0), - Some(&B(0)), - "pre-existing entity received correct B component" - ); - assert_eq!( - world.get::(e1), - Some(&B(1)), - "new entity was spawned and received correct B component" - ); - assert_eq!( - world.get::(e0), - Some(&C), - "pre-existing entity received C component" - ); - assert_eq!( - world.get::(e1), - Some(&C), - "new entity was spawned and received C component" - ); - } - #[test] fn insert_batch() { let mut world = World::default(); From cb298fa58d0d5813d12cfb2b3c6ad6e22087bed3 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Mon, 31 Mar 2025 22:25:58 -0400 Subject: [PATCH 021/113] rename for clarity free and pending are now more distinct --- crates/bevy_ecs/src/entity/allocator.rs | 42 ++++++++++++------------- crates/bevy_ecs/src/entity/mod.rs | 4 +-- 2 files changed, 23 insertions(+), 23 deletions(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 8a8f0f484e024..839c33636e493 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -11,7 +11,7 @@ use crate::query::DebugCheckedUnwrap; use super::{Entity, EntitySetIterator}; -/// This is the item we store in the pending list. +/// This is the item we store in the free list. /// It might not be init (if it's out of bounds). type Slot = MaybeUninit; @@ -131,21 +131,21 @@ impl Chunk { } /// This is conceptually like a `Vec` that stores entities pending reuse. -struct PendingBuffer { - /// The chunks of the pending list. - /// Put end-to-end, these chunks form a list of pending entities. +struct FreeBuffer { + /// The chunks of the free list. + /// Put end-to-end, these chunks form a list of free entities. chunks: [Chunk; Chunk::NUM_CHUNKS as usize], - /// The length of the pending buffer + /// The length of the free buffer len: AtomicIsize, } -impl PendingBuffer { - /// Gets the number of pending entities. +impl FreeBuffer { + /// Gets the number of free entities. /// /// # Safety /// /// For this to be accurate, this must not be called during a [`Self::free`]. - unsafe fn num_pending(&self) -> u64 { + unsafe fn num_free(&self) -> u64 { self.len.load(Ordering::Relaxed).max(0) as u64 } @@ -174,7 +174,7 @@ impl PendingBuffer { self.len.store(new_len, Ordering::Relaxed); } - /// Allocates an [`Entity`] from the pending list if one is available. + /// Allocates an [`Entity`] from the free list if one is available. /// /// # Safety /// @@ -195,7 +195,7 @@ impl PendingBuffer { }) } - /// Allocates an [`Entity`] from the pending list if one is available and it is safe to do so. + /// Allocates an [`Entity`] from the free list if one is available and it is safe to do so. fn remote_alloc(&self) -> Option { // The goal is the same as `alloc`, so what's the difference? // `alloc` knows `free` is not being called, but this does not. @@ -257,7 +257,7 @@ impl PendingBuffer { } } -impl Drop for PendingBuffer { +impl Drop for FreeBuffer { fn drop(&mut self) { for index in 0..Chunk::NUM_CHUNKS { // SAFETY: we have `&mut` @@ -269,7 +269,7 @@ impl Drop for PendingBuffer { /// This stores allocation data shared by all entity allocators. struct SharedAllocator { /// The entities pending reuse - pending: PendingBuffer, + free: FreeBuffer, /// The next value of [`Entity::index`] to give out if needed. next_entity_index: AtomicU32, /// If true, the [`Self::next_entity_index`] has been incremented before, @@ -316,23 +316,23 @@ impl SharedAllocator { /// /// # Safety /// - /// This must not conflict with [`PendingBuffer::free`] calls. + /// This must not conflict with [`FreeBuffer::free`] calls. unsafe fn alloc(&self) -> Entity { // SAFETY: assured by caller - unsafe { self.pending.alloc() }.unwrap_or_else(|| self.alloc_new_index()) + unsafe { self.free.alloc() }.unwrap_or_else(|| self.alloc_new_index()) } /// Allocates a new [`Entity`]. /// This will only try to reuse a freed index if it is safe to do so. fn remote_alloc(&self) -> Entity { - self.pending + self.free .remote_alloc() .unwrap_or_else(|| self.alloc_new_index()) } fn new() -> Self { Self { - pending: PendingBuffer::new(), + free: FreeBuffer::new(), next_entity_index: AtomicU32::new(0), entity_index_given: AtomicBool::new(false), } @@ -362,10 +362,10 @@ impl Allocator { self.shared.total_entity_indices() } - /// The number of pending entities. - pub fn num_pending(&self) -> u64 { + /// The number of free entities. + pub fn num_free(&self) -> u64 { // SAFETY: `free` is not being called since it takes `&mut self`. - unsafe { self.shared.pending.num_pending() } + unsafe { self.shared.free.num_free() } } /// Returns whether or not the index is valid in this allocator. @@ -377,7 +377,7 @@ impl Allocator { pub fn free(&mut self, entity: Entity) { // SAFETY: We have `&mut self`. unsafe { - self.shared.pending.free(entity); + self.shared.free.free(entity); } } @@ -394,7 +394,7 @@ impl core::fmt::Debug for Allocator { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct(core::any::type_name::()) .field("total_indices", &self.total_entity_indices()) - .field("total_pending", &self.num_pending()) + .field("total_free", &self.num_free()) .finish() } } diff --git a/crates/bevy_ecs/src/entity/mod.rs b/crates/bevy_ecs/src/entity/mod.rs index b28b0d9a83971..a205ecb088f97 100644 --- a/crates/bevy_ecs/src/entity/mod.rs +++ b/crates/bevy_ecs/src/entity/mod.rs @@ -595,7 +595,7 @@ impl Entities { /// Ensure at least `n` allocations can succeed without reallocating. pub fn reserve(&mut self, additional: u32) { - let shortfall = (additional as u64).saturating_sub(self.allocator.num_pending()); + let shortfall = (additional as u64).saturating_sub(self.allocator.num_free()); self.meta.reserve(shortfall as usize); } @@ -719,7 +719,7 @@ impl Entities { /// The count of currently allocated entities. #[inline] pub fn len(&self) -> u64 { - self.allocator.total_entity_indices() - self.allocator.num_pending() + self.allocator.total_entity_indices() - self.allocator.num_free() } /// Checks if any entity is currently active. From 86c0cda6311c151d05c56b152b4b750a77dcf370 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Mon, 31 Mar 2025 22:53:59 -0400 Subject: [PATCH 022/113] track pending and flush --- crates/bevy_ecs/src/entity/mod.rs | 105 +++++++++++++++++++++++++++++- 1 file changed, 104 insertions(+), 1 deletion(-) diff --git a/crates/bevy_ecs/src/entity/mod.rs b/crates/bevy_ecs/src/entity/mod.rs index a205ecb088f97..0e6adf0b8fed4 100644 --- a/crates/bevy_ecs/src/entity/mod.rs +++ b/crates/bevy_ecs/src/entity/mod.rs @@ -86,6 +86,8 @@ use crate::{ storage::{SparseSetIndex, TableId, TableRow}, }; use alloc::vec::Vec; +use bevy_platform_support::sync::Arc; +use concurrent_queue::ConcurrentQueue; use core::{fmt, hash::Hash, num::NonZero, panic::Location}; use log::warn; @@ -479,6 +481,95 @@ impl SparseSetIndex for Entity { } } +/// Stores entities that need to be flushed. +struct RemotePending { + pending: Arc>, +} + +impl RemotePending { + fn new() -> Self { + Self { + pending: Arc::new(ConcurrentQueue::unbounded()), + } + } + + fn queue_flush(&self, entity: Entity) { + // We don't need the result. If it's closed it doesn't matter, and it can't be full. + _ = self.pending.push(entity); + } +} + +struct Pending { + remote: RemotePending, + #[cfg(feature = "std")] + local: bevy_utils::Parallel>, +} + +impl Pending { + fn new() -> Self { + #[cfg(feature = "std")] + { + Self { + remote: RemotePending::new(), + local: bevy_utils::Parallel::default(), + } + } + + #[cfg(not(feature = "std"))] + { + Self { + remote: RemotePending::new(), + } + } + } + + fn queue_flush(&self, entity: Entity) { + #[cfg(feature = "std")] + { + self.local.scope(|pending| pending.push(entity)); + } + + #[cfg(not(feature = "std"))] + { + self.remote.queue_flush(entity) + } + } + + fn flush_local(&mut self, mut flusher: impl FnMut(Entity)) { + #[cfg(feature = "std")] + let pending = { self.local.iter_mut().flat_map(|pending| pending.drain(..)) }; + + #[cfg(not(feature = "std"))] + let pending = { self.remote.pending.try_iter() }; + + for pending in pending { + flusher(pending) + } + } + + fn flush_all(&mut self, mut flusher: impl FnMut(Entity)) { + let pending = { self.remote.pending.try_iter() }; + + #[cfg(feature = "std")] + let pending = { + self.local + .iter_mut() + .flat_map(|pending| pending.drain(..)) + .chain(pending) + }; + + for pending in pending { + flusher(pending) + } + } +} + +impl fmt::Debug for Pending { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "opaque pending entities") + } +} + /// A [`World`]'s internal metadata store on all of its entities. /// /// Contains metadata on: @@ -491,6 +582,7 @@ impl SparseSetIndex for Entity { pub struct Entities { meta: Vec, allocator: Allocator, + pending: Pending, } impl Entities { @@ -498,6 +590,7 @@ impl Entities { Entities { meta: Vec::new(), allocator: Allocator::new(), + pending: Pending::new(), } } @@ -693,7 +786,17 @@ impl Entities { /// /// Note: freshly-allocated entities (ones which don't come from the pending list) are guaranteed /// to be initialized with the invalid archetype. - pub unsafe fn flush(&mut self, mut _init: impl FnMut(Entity, &mut EntityLocation)) {} + pub unsafe fn flush(&mut self, mut init: impl FnMut(Entity, &mut EntityLocation)) { + let total = self.allocator.total_entity_indices() as usize; + self.meta.resize(total, EntityMeta::FRESH); + self.pending.flush_local(|entity| { + // SAFETY: `meta` has been resized to include all entities. + let meta = unsafe { self.meta.get_unchecked_mut(entity.index() as usize) }; + if meta.generation == entity.generation { + init(entity, &mut meta.location); + } + }); + } /// Flushes all reserved entities to an "invalid" state. Attempting to retrieve them will return `None` /// unless they are later populated with a valid archetype. From c4e2f56b94986a8ef9f72663b53db7210c302f9d Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Mon, 31 Mar 2025 22:58:58 -0400 Subject: [PATCH 023/113] clarified flush behavior --- crates/bevy_ecs/src/entity/mod.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/crates/bevy_ecs/src/entity/mod.rs b/crates/bevy_ecs/src/entity/mod.rs index 0e6adf0b8fed4..c4fd02012148b 100644 --- a/crates/bevy_ecs/src/entity/mod.rs +++ b/crates/bevy_ecs/src/entity/mod.rs @@ -596,7 +596,8 @@ impl Entities { /// Reserve entity IDs concurrently. /// - /// Storage for entity generation and location is lazily allocated by calling [`flush`](Entities::flush). + /// Storage for entity generation and location is lazily allocated by calling [`flush`](Entities::flush), + /// but, if desiered, caller may [`set`](Self::set) the [`EntityLocation`] prior to the flush instead. pub fn reserve_entities(&self, count: u32) -> allocator::AllocEntitiesIterator { self.alloc_many(count) } @@ -609,6 +610,8 @@ impl Entities { } /// Allocate an entity ID directly. + /// Caller is responsible to [`set`](Self::set) the [`EntityLocation`] if desierd, + /// which must be done before [`get`](Self::get)ing its [`EntityLocation`]. pub fn alloc(&self) -> Entity { self.allocator.alloc() } @@ -714,8 +717,6 @@ impl Entities { return None; } Some(meta.location) - } else if self.allocator.is_valid_index(entity.index()) { - Some(EntityLocation::INVALID) } else { None } @@ -792,7 +793,7 @@ impl Entities { self.pending.flush_local(|entity| { // SAFETY: `meta` has been resized to include all entities. let meta = unsafe { self.meta.get_unchecked_mut(entity.index() as usize) }; - if meta.generation == entity.generation { + if meta.generation == entity.generation && meta.location != EntityLocation::INVALID { init(entity, &mut meta.location); } }); From 7333626c7c0a2c6e2175bff3cc60019b8ac45685 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Mon, 31 Mar 2025 23:11:30 -0400 Subject: [PATCH 024/113] allow remote pending to be flushed --- crates/bevy_app/src/sub_app.rs | 1 + crates/bevy_ecs/src/entity/mod.rs | 11 +++++++++++ crates/bevy_ecs/src/world/mod.rs | 14 ++++++++++++++ 3 files changed, 26 insertions(+) diff --git a/crates/bevy_app/src/sub_app.rs b/crates/bevy_app/src/sub_app.rs index ea51931b3507c..f3e2507f8495c 100644 --- a/crates/bevy_app/src/sub_app.rs +++ b/crates/bevy_app/src/sub_app.rs @@ -142,6 +142,7 @@ impl SubApp { /// Runs the default schedule and updates internal component trackers. pub fn update(&mut self) { self.run_default_schedule(); + self.world.entities().queue_remote_pending_to_be_flushed(); self.world.clear_trackers(); } diff --git a/crates/bevy_ecs/src/entity/mod.rs b/crates/bevy_ecs/src/entity/mod.rs index c4fd02012148b..0c5fde5481775 100644 --- a/crates/bevy_ecs/src/entity/mod.rs +++ b/crates/bevy_ecs/src/entity/mod.rs @@ -777,6 +777,17 @@ impl Entities { } } + /// Entities reserved via [`RemoteEntities::reserve`] may or may not be flushed naturally. + /// Before using an entity reserved remotely, either set its location manually (usually though [`flush_entity`](crate::world::World::flush_entity)), + /// or call this method to queue remotely reserved entities to be flushed with the rest. + pub fn queue_remote_pending_to_be_flushed(&self) { + #[cfg(feature = "std")] + { + let remote = self.pending.remote.pending.try_iter(); + self.pending.local.scope(|pending| pending.extend(remote)); + } + } + /// Allocates space for entities previously reserved with [`reserve_entity`](Entities::reserve_entity) or /// [`reserve_entities`](Entities::reserve_entities), then initializes each one using the supplied function. /// diff --git a/crates/bevy_ecs/src/world/mod.rs b/crates/bevy_ecs/src/world/mod.rs index 5aa87524e3174..89c7b52fd195b 100644 --- a/crates/bevy_ecs/src/world/mod.rs +++ b/crates/bevy_ecs/src/world/mod.rs @@ -2918,6 +2918,20 @@ impl World { } } + /// If this entity is not in any [`Archetype`](crate::archetype::Archetype), this will flush it to the empty archetype. + /// Returns `Some` with the new [`EntityLocation`] if the entity is now valid in the empty archetype. + pub fn flush_entity(&mut self, entity: Entity) -> Option { + if self.entities.contains(entity) && self.entities.get(entity).is_none() { + let empty_archetype = self.archetypes.empty_mut(); + let table = &mut self.storages.tables[empty_archetype.table_id()]; + // SAFETY: It's empty so no values need to be written + let new_location = unsafe { empty_archetype.allocate(entity, table.allocate(entity)) }; + Some(new_location) + } else { + None + } + } + /// Applies any commands in the world's internal [`CommandQueue`]. /// This does not apply commands from any systems, only those stored in the world. /// From a8ce70dce0bff4fdd64f479feb14e79fe64dc36b Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Mon, 31 Mar 2025 23:20:10 -0400 Subject: [PATCH 025/113] queue pending --- crates/bevy_ecs/src/entity/mod.rs | 38 ++++++++++++++++--------------- 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/crates/bevy_ecs/src/entity/mod.rs b/crates/bevy_ecs/src/entity/mod.rs index 0c5fde5481775..ac86c20d9d27b 100644 --- a/crates/bevy_ecs/src/entity/mod.rs +++ b/crates/bevy_ecs/src/entity/mod.rs @@ -482,6 +482,7 @@ impl SparseSetIndex for Entity { } /// Stores entities that need to be flushed. +#[derive(Clone)] struct RemotePending { pending: Arc>, } @@ -543,23 +544,7 @@ impl Pending { let pending = { self.remote.pending.try_iter() }; for pending in pending { - flusher(pending) - } - } - - fn flush_all(&mut self, mut flusher: impl FnMut(Entity)) { - let pending = { self.remote.pending.try_iter() }; - - #[cfg(feature = "std")] - let pending = { - self.local - .iter_mut() - .flat_map(|pending| pending.drain(..)) - .chain(pending) - }; - - for pending in pending { - flusher(pending) + flusher(pending); } } } @@ -606,7 +591,9 @@ impl Entities { /// /// Equivalent to `self.reserve_entities(1).next().unwrap()`, but more efficient. pub fn reserve_entity(&self) -> Entity { - self.alloc() + let entity = self.alloc(); + self.pending.queue_flush(entity); + entity } /// Allocate an entity ID directly. @@ -892,14 +879,28 @@ impl Entities { #[derive(Clone)] pub struct RemoteEntities { allocator: RemoteAllocator, + pending: RemotePending, } impl RemoteEntities { /// Allocates an [`Entity`] if the source [`Entities`] is still linked to this [`RemoteEntities`]. + /// + /// The caller takes responsibility for eventually [`set`](Entities::set)ing the [`EntityLocation`], + /// usually via [`flush_entity`](crate::world::World::flush_entity). pub fn alloc(&self) -> Option { self.allocator.alloc() } + /// Reserves an [`Entity`] if the source [`Entities`] is still linked to this [`RemoteEntities`]. + /// + /// This also queues it to be flushed after [`Entities::queue_remote_pending_to_be_flushed`] is called. + /// If waiting for that is not an option, it is also possible to [`set`](Entities::set) the [`EntityLocation`] manually, + /// usually via [`flush_entity`](crate::world::World::flush_entity). + pub fn reserve(&self) -> Option { + self.alloc() + .inspect(|entity| self.pending.queue_flush(*entity)) + } + /// Returns true if this [`RemoteEntities`] is still connected to its source [`Entities`]. /// This will return `false` if its source has been dropped or [`Entities::clear`]ed. /// @@ -916,6 +917,7 @@ impl RemoteEntities { pub fn new(source: &Entities) -> Self { Self { allocator: RemoteAllocator::new(&source.allocator), + pending: source.pending.remote.clone(), } } } From 3abdac72a3577780ce56a98968d3db5c507f3cb4 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Mon, 31 Mar 2025 23:26:34 -0400 Subject: [PATCH 026/113] flush reserve_entities --- crates/bevy_ecs/src/entity/mod.rs | 35 +++++++++++++++++++++++++++++-- 1 file changed, 33 insertions(+), 2 deletions(-) diff --git a/crates/bevy_ecs/src/entity/mod.rs b/crates/bevy_ecs/src/entity/mod.rs index ac86c20d9d27b..2c92379e35cbe 100644 --- a/crates/bevy_ecs/src/entity/mod.rs +++ b/crates/bevy_ecs/src/entity/mod.rs @@ -555,6 +555,34 @@ impl fmt::Debug for Pending { } } +/// An [`Iterator`] returning a sequence of [`Entity`] values from [`Entities`]. +/// These will be flushed. +pub struct ReserveEntitiesIterator<'a> { + allocator: allocator::AllocEntitiesIterator<'a>, + entities: &'a Entities, +} + +impl<'a> Iterator for ReserveEntitiesIterator<'a> { + type Item = Entity; + + fn next(&mut self) -> Option { + self.allocator + .next() + .inspect(|entity| self.entities.pending.queue_flush(*entity)) + } + + fn size_hint(&self) -> (usize, Option) { + self.allocator.size_hint() + } +} + +impl<'a> core::iter::FusedIterator for ReserveEntitiesIterator<'a> {} + +impl<'a> ExactSizeIterator for ReserveEntitiesIterator<'a> {} + +// SAFETY: Newly reserved entity values are unique. +unsafe impl EntitySetIterator for ReserveEntitiesIterator<'_> {} + /// A [`World`]'s internal metadata store on all of its entities. /// /// Contains metadata on: @@ -583,8 +611,11 @@ impl Entities { /// /// Storage for entity generation and location is lazily allocated by calling [`flush`](Entities::flush), /// but, if desiered, caller may [`set`](Self::set) the [`EntityLocation`] prior to the flush instead. - pub fn reserve_entities(&self, count: u32) -> allocator::AllocEntitiesIterator { - self.alloc_many(count) + pub fn reserve_entities(&self, count: u32) -> ReserveEntitiesIterator { + ReserveEntitiesIterator { + allocator: self.alloc_many(count), + entities: self, + } } /// Reserve one entity ID concurrently. From f72925314ec7921c63c5f9aec9d7fc27ce693c83 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Mon, 31 Mar 2025 23:26:59 -0400 Subject: [PATCH 027/113] naming consistency --- crates/bevy_ecs/src/entity/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/bevy_ecs/src/entity/mod.rs b/crates/bevy_ecs/src/entity/mod.rs index 2c92379e35cbe..85d1716c9a62e 100644 --- a/crates/bevy_ecs/src/entity/mod.rs +++ b/crates/bevy_ecs/src/entity/mod.rs @@ -613,7 +613,7 @@ impl Entities { /// but, if desiered, caller may [`set`](Self::set) the [`EntityLocation`] prior to the flush instead. pub fn reserve_entities(&self, count: u32) -> ReserveEntitiesIterator { ReserveEntitiesIterator { - allocator: self.alloc_many(count), + allocator: self.alloc_entities(count), entities: self, } } @@ -635,7 +635,7 @@ impl Entities { } /// A more efficient way to [`alloc`](Self::alloc) multiple entities. - pub fn alloc_many(&self, count: u32) -> allocator::AllocEntitiesIterator { + pub fn alloc_entities(&self, count: u32) -> allocator::AllocEntitiesIterator { self.allocator.alloc_many(count) } From af9f9dbbcc9b642d4a54d55dcb3a34b7c83a5ffb Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Mon, 31 Mar 2025 23:30:43 -0400 Subject: [PATCH 028/113] iterator drop warning --- crates/bevy_ecs/src/entity/allocator.rs | 14 ++++++++++++++ crates/bevy_ecs/src/entity/mod.rs | 2 ++ 2 files changed, 16 insertions(+) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 839c33636e493..b94b1089e8db5 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -6,6 +6,7 @@ use bevy_platform_support::{ }, }; use core::mem::{ManuallyDrop, MaybeUninit}; +use log::warn; use crate::query::DebugCheckedUnwrap; @@ -400,6 +401,8 @@ impl core::fmt::Debug for Allocator { } /// An [`Iterator`] returning a sequence of [`Entity`] values from an [`Allocator`]. +/// +/// **NOTE:** Dropping will leak the remaining entities! pub struct AllocEntitiesIterator<'a> { allocator: &'a Allocator, num_left: u32, @@ -424,6 +427,17 @@ impl<'a> core::iter::FusedIterator for AllocEntitiesIterator<'a> {} // SAFETY: Newly reserved entity values are unique. unsafe impl EntitySetIterator for AllocEntitiesIterator<'_> {} +impl Drop for AllocEntitiesIterator<'_> { + fn drop(&mut self) { + if self.num_left > 0 { + warn!( + "{} entities being leaked via unfinished `AllocEntitiesIterator`", + self.num_left + ); + } + } +} + /// This is a stripped down version of [`Allocator`] that operates on fewer assumptions. /// As a result, using this will be slower than [`Allocator`] but this offers additional freedoms. #[derive(Clone)] diff --git a/crates/bevy_ecs/src/entity/mod.rs b/crates/bevy_ecs/src/entity/mod.rs index 85d1716c9a62e..dec1e68cf06c7 100644 --- a/crates/bevy_ecs/src/entity/mod.rs +++ b/crates/bevy_ecs/src/entity/mod.rs @@ -557,6 +557,8 @@ impl fmt::Debug for Pending { /// An [`Iterator`] returning a sequence of [`Entity`] values from [`Entities`]. /// These will be flushed. +/// +/// **NOTE:** Dropping will leak the remaining entities! pub struct ReserveEntitiesIterator<'a> { allocator: allocator::AllocEntitiesIterator<'a>, entities: &'a Entities, From 7edefb5b7cdd3c1b3637705517116eb93c791e34 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Mon, 31 Mar 2025 23:36:12 -0400 Subject: [PATCH 029/113] restore old `get` behavior --- crates/bevy_ecs/src/entity/mod.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/crates/bevy_ecs/src/entity/mod.rs b/crates/bevy_ecs/src/entity/mod.rs index dec1e68cf06c7..eaba31c36298a 100644 --- a/crates/bevy_ecs/src/entity/mod.rs +++ b/crates/bevy_ecs/src/entity/mod.rs @@ -733,7 +733,9 @@ impl Entities { #[inline] pub fn get(&self, entity: Entity) -> Option { if let Some(meta) = self.meta.get(entity.index() as usize) { - if meta.generation != entity.generation { + if meta.generation != entity.generation + || meta.location.archetype_id == ArchetypeId::INVALID + { return None; } Some(meta.location) From e11d173d48547c4c65abd715c9b8bb05eb67d6c3 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Mon, 31 Mar 2025 23:39:43 -0400 Subject: [PATCH 030/113] fixed flushing --- crates/bevy_ecs/src/entity/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/bevy_ecs/src/entity/mod.rs b/crates/bevy_ecs/src/entity/mod.rs index eaba31c36298a..c3f9a024bcf3b 100644 --- a/crates/bevy_ecs/src/entity/mod.rs +++ b/crates/bevy_ecs/src/entity/mod.rs @@ -826,7 +826,7 @@ impl Entities { self.pending.flush_local(|entity| { // SAFETY: `meta` has been resized to include all entities. let meta = unsafe { self.meta.get_unchecked_mut(entity.index() as usize) }; - if meta.generation == entity.generation && meta.location != EntityLocation::INVALID { + if meta.generation == entity.generation && meta.location == EntityLocation::INVALID { init(entity, &mut meta.location); } }); From ee4aca202ae31db1ca3cbe02b6aac04eef7d68c9 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Mon, 31 Mar 2025 23:42:08 -0400 Subject: [PATCH 031/113] performance note --- crates/bevy_ecs/src/entity/allocator.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index b94b1089e8db5..3a54792a7327b 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -442,6 +442,8 @@ impl Drop for AllocEntitiesIterator<'_> { /// As a result, using this will be slower than [`Allocator`] but this offers additional freedoms. #[derive(Clone)] pub struct RemoteAllocator { + // PERF: We could avoid the extra 2 atomic ops from upgrading and then dropping the `Weak`, + // But this provides more safety and allows memory to be freed earlier. shared: Weak, } From fc8b22961ec4c1220dd4a87a3a16432e41e98c24 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Tue, 1 Apr 2025 00:27:45 -0400 Subject: [PATCH 032/113] improve bulk reservation performance via slice iterators --- crates/bevy_ecs/src/entity/allocator.rs | 131 ++++++++++++++++++++++-- 1 file changed, 120 insertions(+), 11 deletions(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 3a54792a7327b..32ca40d155d18 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -71,6 +71,23 @@ impl Chunk { unsafe { (*target).assume_init() } } + /// Gets a slice of indices. + /// + /// # Safety + /// + /// [`Self::set`] must have been called on these indices before. + unsafe fn get_slice(&self, index: u32, ideal_len: u32, index_of_self: u32) -> &[Slot] { + let cap = Self::capacity_of_chunk(index_of_self); + let after_index_slice_len = cap - index; + let len = after_index_slice_len.min(ideal_len) as usize; + + // SAFETY: caller ensure we are init. + let head = unsafe { self.ptr().debug_checked_unwrap() }; + + // SAFETY: The chunk was allocated via a `Vec` and the index is within the capacity. + unsafe { core::slice::from_raw_parts(head, len) } + } + /// Sets this entity at this index. /// /// # Safety @@ -196,6 +213,38 @@ impl FreeBuffer { }) } + /// Allocates an as many [`Entity`]s from the free list as are available, up to `count`. + /// + /// # Safety + /// + /// This must not conflict with [`Self::free`] calls for the duration of the returned iterator. + unsafe fn alloc_many(&self, count: u32) -> FreeListSliceIterator { + // SAFETY: This will get a valid index because there is no way for `free` to be done at the same time. + let len = self.len.fetch_sub(count as isize, Ordering::AcqRel); + let index = (len - count as isize).max(0); + + let indices = if index < len { + let end = (len - 1) as u32; + let start = index as u32; + start..=end + } else { + #[expect( + clippy::reversed_empty_ranges, + reason = "We intentionally need an empty range" + )] + { + 1..=0 + } + }; + + // SAFETY: The indices are all less then the length. + FreeListSliceIterator { + buffer: self, + indices, + current: [].iter(), + } + } + /// Allocates an [`Entity`] from the free list if one is available and it is safe to do so. fn remote_alloc(&self) -> Option { // The goal is the same as `alloc`, so what's the difference? @@ -267,6 +316,49 @@ impl Drop for FreeBuffer { } } +/// A list that iterates the [`FreeBuffer`]. +/// +/// # Safety +/// +/// Must be constructed to only iterate slots that have been initialized. +struct FreeListSliceIterator<'a> { + buffer: &'a FreeBuffer, + indices: core::ops::RangeInclusive, + current: core::slice::Iter<'a, Slot>, +} + +impl<'a> Iterator for FreeListSliceIterator<'a> { + type Item = Entity; + + fn next(&mut self) -> Option { + if let Some(sliced) = self.current.next() { + // SAFETY: Assured by constructor. + return unsafe { Some(sliced.assume_init_read()) }; + } + + let next_index = self.indices.next()?; + let (chunk_index, inner_index) = Chunk::get_indices(next_index); + // SAFETY: index is correct + let chunk = unsafe { self.buffer.chunks.get_unchecked(chunk_index as usize) }; + + // SAFETY: Assured by constructor + let slice = unsafe { chunk.get_slice(inner_index, self.len() as u32 + 1, chunk_index) }; + self.indices = (*self.indices.start() + slice.len() as u32 - 1)..=(*self.indices.end()); + + self.current = slice.iter(); + // SAFETY: Assured by constructor. + unsafe { Some(self.current.next()?.assume_init_read()) } + } + + fn size_hint(&self) -> (usize, Option) { + let len = self.indices.end().saturating_sub(*self.indices.start()) as usize; + (len, Some(len)) + } +} + +impl<'a> ExactSizeIterator for FreeListSliceIterator<'a> {} +impl<'a> core::iter::FusedIterator for FreeListSliceIterator<'a> {} + /// This stores allocation data shared by all entity allocators. struct SharedAllocator { /// The entities pending reuse @@ -323,6 +415,22 @@ impl SharedAllocator { unsafe { self.free.alloc() }.unwrap_or_else(|| self.alloc_new_index()) } + /// Allocates a `count` [`Entity`]s, reusing freed indices if they exist. + /// + /// # Safety + /// + /// This must not conflict with [`FreeBuffer::free`] calls for the duration of the iterator. + unsafe fn alloc_many(&self, count: u32) -> AllocEntitiesIterator { + let reused = self.free.alloc_many(count); + let missing = count - reused.len() as u32; + let start_new = self.next_entity_index.fetch_add(missing, Ordering::Relaxed); + if start_new < missing { + self.check_overflow(); + } + let new = start_new..=(start_new + missing); + AllocEntitiesIterator { new, reused } + } + /// Allocates a new [`Entity`]. /// This will only try to reuse a freed index if it is safe to do so. fn remote_alloc(&self) -> Entity { @@ -383,11 +491,9 @@ impl Allocator { } /// Allocates `count` entities in an iterator. - pub fn alloc_many(&self, entities: u32) -> AllocEntitiesIterator { - AllocEntitiesIterator { - allocator: self, - num_left: entities, - } + pub fn alloc_many(&self, count: u32) -> AllocEntitiesIterator { + // SAFETY: `free` takes `&mut self`, but this lifetime is captured by the iterator. + unsafe { self.shared.alloc_many(count) } } } @@ -404,19 +510,21 @@ impl core::fmt::Debug for Allocator { /// /// **NOTE:** Dropping will leak the remaining entities! pub struct AllocEntitiesIterator<'a> { - allocator: &'a Allocator, - num_left: u32, + new: core::ops::RangeInclusive, + reused: FreeListSliceIterator<'a>, } impl<'a> Iterator for AllocEntitiesIterator<'a> { type Item = Entity; fn next(&mut self) -> Option { - self.num_left.checked_sub(1).map(|_| self.allocator.alloc()) + self.reused + .next() + .or_else(|| self.new.next().map(Entity::from_raw)) } fn size_hint(&self) -> (usize, Option) { - let len = self.num_left as usize; + let len = self.reused.len() + self.new.end().saturating_sub(*self.new.end()) as usize; (len, Some(len)) } } @@ -429,10 +537,11 @@ unsafe impl EntitySetIterator for AllocEntitiesIterator<'_> {} impl Drop for AllocEntitiesIterator<'_> { fn drop(&mut self) { - if self.num_left > 0 { + let leaking = self.len(); + if leaking > 0 { warn!( "{} entities being leaked via unfinished `AllocEntitiesIterator`", - self.num_left + leaking ); } } From b8ca35100f92c00bbf6845b0b45d0d4628e35152 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Tue, 1 Apr 2025 11:42:35 -0400 Subject: [PATCH 033/113] remove unimplemented benches --- benches/benches/bevy_ecs/world/commands.rs | 44 +++++++++++----------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/benches/benches/bevy_ecs/world/commands.rs b/benches/benches/bevy_ecs/world/commands.rs index 8ad87862eba24..c943b82e6cc6b 100644 --- a/benches/benches/bevy_ecs/world/commands.rs +++ b/benches/benches/bevy_ecs/world/commands.rs @@ -92,28 +92,28 @@ pub fn insert_commands(criterion: &mut Criterion) { command_queue.apply(&mut world); }); }); - group.bench_function("insert_or_spawn_batch", |bencher| { - let mut world = World::default(); - let mut command_queue = CommandQueue::default(); - let mut entities = Vec::new(); - for _ in 0..entity_count { - entities.push(world.spawn_empty().id()); - } - - bencher.iter(|| { - let mut commands = Commands::new(&mut command_queue, &world); - let mut values = Vec::with_capacity(entity_count); - for entity in &entities { - values.push((*entity, (Matrix::default(), Vec3::default()))); - } - #[expect( - deprecated, - reason = "This needs to be supported for now, and therefore still needs the benchmark." - )] - commands.insert_or_spawn_batch(values); - command_queue.apply(&mut world); - }); - }); + // group.bench_function("insert_or_spawn_batch", |bencher| { + // let mut world = World::default(); + // let mut command_queue = CommandQueue::default(); + // let mut entities = Vec::new(); + // for _ in 0..entity_count { + // entities.push(world.spawn_empty().id()); + // } + + // bencher.iter(|| { + // let mut commands = Commands::new(&mut command_queue, &world); + // let mut values = Vec::with_capacity(entity_count); + // for entity in &entities { + // values.push((*entity, (Matrix::default(), Vec3::default()))); + // } + // #[expect( + // deprecated, + // reason = "This needs to be supported for now, and therefore still needs the benchmark." + // )] + // commands.insert_or_spawn_batch(values); + // command_queue.apply(&mut world); + // }); + // }); group.bench_function("insert_batch", |bencher| { let mut world = World::default(); let mut command_queue = CommandQueue::default(); From 9a227ec5750fea0d47f978506485fa523d1f3940 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Tue, 1 Apr 2025 11:47:41 -0400 Subject: [PATCH 034/113] inlining most of these functions are only called via `Entities`, so this shouldn't bloat the binary, but it will enable more optimizations. --- crates/bevy_ecs/src/entity/allocator.rs | 24 ++++++++++++++++++++++++ crates/bevy_ecs/src/entity/mod.rs | 2 ++ 2 files changed, 26 insertions(+) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 32ca40d155d18..eb3ed18a9b853 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -28,6 +28,7 @@ impl Chunk { /// Computes the capacity of the chunk at this index within [`Self::NUM_CHUNKS`]. /// The first 2 have length 512 (2^9) and the last has length (2^31) + #[inline] fn capacity_of_chunk(chunk_index: u32) -> u32 { // We do this because we're skipping the first 8 powers, so we need to make up for them by doubling the first index. // This is why the first 2 indices both have a capacity of 256. @@ -40,6 +41,7 @@ impl Chunk { } /// For this index in the whole buffer, returns the index of the [`Chunk`] and the index within that chunk. + #[inline] fn get_indices(full_idnex: u32) -> (u32, u32) { // We're countint leading zeros since each chunk has power of 2 capacity. // So the leading zeros will be proportional to the chunk index. @@ -62,6 +64,7 @@ impl Chunk { /// # Safety /// /// [`Self::set`] must have been called on this index before. + #[inline] unsafe fn get(&self, index: u32) -> Entity { // SAFETY: caller ensure we are init. let head = unsafe { self.ptr().debug_checked_unwrap() }; @@ -76,6 +79,7 @@ impl Chunk { /// # Safety /// /// [`Self::set`] must have been called on these indices before. + #[inline] unsafe fn get_slice(&self, index: u32, ideal_len: u32, index_of_self: u32) -> &[Slot] { let cap = Self::capacity_of_chunk(index_of_self); let after_index_slice_len = cap - index; @@ -95,6 +99,7 @@ impl Chunk { /// This must not be called concurrently. /// Index must be in bounds. /// Access does not conflict with another [`Self::get`]. + #[inline] unsafe fn set(&self, index: u32, entity: Entity, index_of_self: u32) -> Slot { let head = self.ptr().unwrap_or_else(|| self.init(index_of_self)); let target = head.add(index as usize); @@ -163,6 +168,7 @@ impl FreeBuffer { /// # Safety /// /// For this to be accurate, this must not be called during a [`Self::free`]. + #[inline] unsafe fn num_free(&self) -> u64 { self.len.load(Ordering::Relaxed).max(0) as u64 } @@ -172,6 +178,7 @@ impl FreeBuffer { /// # Safety /// /// This must not conflict with any other [`Self::free`] or [`Self::alloc`] calls. + #[inline] unsafe fn free(&self, entity: Entity) { // Disable remote allocation. (We could do a compare exchange loop, but this is faster in the common case.) let len = self.len.swap(-1, Ordering::AcqRel).max(0); @@ -197,6 +204,7 @@ impl FreeBuffer { /// # Safety /// /// This must not conflict with [`Self::free`] calls. + #[inline] unsafe fn alloc(&self) -> Option { // SAFETY: This will get a valid index because there is no way for `free` to be done at the same time. let len = self.len.fetch_sub(1, Ordering::AcqRel); @@ -218,6 +226,7 @@ impl FreeBuffer { /// # Safety /// /// This must not conflict with [`Self::free`] calls for the duration of the returned iterator. + #[inline] unsafe fn alloc_many(&self, count: u32) -> FreeListSliceIterator { // SAFETY: This will get a valid index because there is no way for `free` to be done at the same time. let len = self.len.fetch_sub(count as isize, Ordering::AcqRel); @@ -246,6 +255,7 @@ impl FreeBuffer { } /// Allocates an [`Entity`] from the free list if one is available and it is safe to do so. + #[inline] fn remote_alloc(&self) -> Option { // The goal is the same as `alloc`, so what's the difference? // `alloc` knows `free` is not being called, but this does not. @@ -330,6 +340,7 @@ struct FreeListSliceIterator<'a> { impl<'a> Iterator for FreeListSliceIterator<'a> { type Item = Entity; + #[inline] fn next(&mut self) -> Option { if let Some(sliced) = self.current.next() { // SAFETY: Assured by constructor. @@ -350,6 +361,7 @@ impl<'a> Iterator for FreeListSliceIterator<'a> { unsafe { Some(self.current.next()?.assume_init_read()) } } + #[inline] fn size_hint(&self) -> (usize, Option) { let len = self.indices.end().saturating_sub(*self.indices.start()) as usize; (len, Some(len)) @@ -372,6 +384,7 @@ struct SharedAllocator { impl SharedAllocator { /// The total number of indices given out. + #[inline] fn total_entity_indices(&self) -> u64 { let next = self.next_entity_index.load(Ordering::Relaxed); if next == 0 { @@ -397,6 +410,7 @@ impl SharedAllocator { } /// Allocates an [`Entity`] with a brand new index. + #[inline] fn alloc_new_index(&self) -> Entity { let index = self.next_entity_index.fetch_add(1, Ordering::Relaxed); if index == 0 { @@ -410,6 +424,7 @@ impl SharedAllocator { /// # Safety /// /// This must not conflict with [`FreeBuffer::free`] calls. + #[inline] unsafe fn alloc(&self) -> Entity { // SAFETY: assured by caller unsafe { self.free.alloc() }.unwrap_or_else(|| self.alloc_new_index()) @@ -420,6 +435,7 @@ impl SharedAllocator { /// # Safety /// /// This must not conflict with [`FreeBuffer::free`] calls for the duration of the iterator. + #[inline] unsafe fn alloc_many(&self, count: u32) -> AllocEntitiesIterator { let reused = self.free.alloc_many(count); let missing = count - reused.len() as u32; @@ -433,6 +449,7 @@ impl SharedAllocator { /// Allocates a new [`Entity`]. /// This will only try to reuse a freed index if it is safe to do so. + #[inline] fn remote_alloc(&self) -> Entity { self.free .remote_alloc() @@ -461,28 +478,33 @@ impl Allocator { } /// Allocates a new [`Entity`], reusing a freed index if one exists. + #[inline] pub fn alloc(&self) -> Entity { // SAFETY: violating safety requires a `&mut self` to exist, but rust does not allow that. unsafe { self.shared.alloc() } } /// The total number of indices given out. + #[inline] pub fn total_entity_indices(&self) -> u64 { self.shared.total_entity_indices() } /// The number of free entities. + #[inline] pub fn num_free(&self) -> u64 { // SAFETY: `free` is not being called since it takes `&mut self`. unsafe { self.shared.free.num_free() } } /// Returns whether or not the index is valid in this allocator. + #[inline] pub fn is_valid_index(&self, index: u32) -> bool { (index as u64) < self.total_entity_indices() } /// Frees the entity allowing it to be reused. + #[inline] pub fn free(&mut self, entity: Entity) { // SAFETY: We have `&mut self`. unsafe { @@ -491,6 +513,7 @@ impl Allocator { } /// Allocates `count` entities in an iterator. + #[inline] pub fn alloc_many(&self, count: u32) -> AllocEntitiesIterator { // SAFETY: `free` takes `&mut self`, but this lifetime is captured by the iterator. unsafe { self.shared.alloc_many(count) } @@ -561,6 +584,7 @@ impl RemoteAllocator { /// This is not guaranteed to reuse a freed entity, even if one exists. /// /// This will return [`None`] if the source [`Allocator`] is destroyed. + #[inline] pub fn alloc(&self) -> Option { self.shared .upgrade() diff --git a/crates/bevy_ecs/src/entity/mod.rs b/crates/bevy_ecs/src/entity/mod.rs index c3f9a024bcf3b..c7eea2974e1f9 100644 --- a/crates/bevy_ecs/src/entity/mod.rs +++ b/crates/bevy_ecs/src/entity/mod.rs @@ -763,6 +763,7 @@ impl Entities { /// # Safetey /// /// `idnex` must be a valid index + #[inline] unsafe fn force_get_meta_mut(&mut self, index: usize) -> &mut EntityMeta { if index >= self.meta.len() { self.resize_meta_for_index_risky(index) @@ -790,6 +791,7 @@ impl Entities { /// Note: This method may return [`Entities`](Entity) which are currently free /// Note that [`contains`](Entities::contains) will correctly return false for freed /// entities, since it checks the generation + #[inline] pub fn resolve_from_id(&self, index: u32) -> Option { let idu = index as usize; if let Some(&EntityMeta { generation, .. }) = self.meta.get(idu) { From 804c513eea80fe991660c3fc6db94d5c231232ba Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Tue, 1 Apr 2025 11:50:15 -0400 Subject: [PATCH 035/113] remove unneeded flush --- crates/bevy_ecs/src/entity/mod.rs | 2 -- crates/bevy_ecs/src/world/entity_ref.rs | 4 ---- 2 files changed, 6 deletions(-) diff --git a/crates/bevy_ecs/src/entity/mod.rs b/crates/bevy_ecs/src/entity/mod.rs index c7eea2974e1f9..324804bcb6ebc 100644 --- a/crates/bevy_ecs/src/entity/mod.rs +++ b/crates/bevy_ecs/src/entity/mod.rs @@ -703,8 +703,6 @@ impl Entities { } /// Destroy an entity, allowing it to be reused. - /// - /// Must not be called while reserved entities are awaiting `flush()`. pub fn free(&mut self, entity: Entity) -> Option { self.free_current_and_future_generations(entity, 1) } diff --git a/crates/bevy_ecs/src/world/entity_ref.rs b/crates/bevy_ecs/src/world/entity_ref.rs index 22383e86b36e9..cbd51a9db19f8 100644 --- a/crates/bevy_ecs/src/world/entity_ref.rs +++ b/crates/bevy_ecs/src/world/entity_ref.rs @@ -2545,10 +2545,6 @@ impl<'w> EntityWorldMut<'w> { world.removed_components.send(component_id, self.entity); } - // Observers and on_remove hooks may reserve new entities, which - // requires a flush before Entities::free may be called. - world.flush_entities(); - let location = world .entities .free(self.entity) From 73253705038314d7fbab77896071ee9191dde16d Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Tue, 1 Apr 2025 12:04:42 -0400 Subject: [PATCH 036/113] removed or justified more flush calls --- crates/bevy_ecs/src/entity/map_entities.rs | 3 --- crates/bevy_ecs/src/world/entity_ref.rs | 1 + crates/bevy_ecs/src/world/mod.rs | 7 +++---- crates/bevy_ecs/src/world/spawn_batch.rs | 4 ---- 4 files changed, 4 insertions(+), 11 deletions(-) diff --git a/crates/bevy_ecs/src/entity/map_entities.rs b/crates/bevy_ecs/src/entity/map_entities.rs index 425932f9d67b4..f6a10aba35ea7 100644 --- a/crates/bevy_ecs/src/entity/map_entities.rs +++ b/crates/bevy_ecs/src/entity/map_entities.rs @@ -239,9 +239,6 @@ impl<'m> SceneEntityMapper<'m> { /// Creates a new [`SceneEntityMapper`], spawning a temporary base [`Entity`] in the provided [`World`] pub fn new(map: &'m mut EntityHashMap, world: &mut World) -> Self { - // We're going to be calling methods on `Entities` that require advance - // flushing, such as `alloc` and `free`. - world.flush_entities(); Self { map, // SAFETY: Entities data is kept in a valid state via `EntityMapper::world_scope` diff --git a/crates/bevy_ecs/src/world/entity_ref.rs b/crates/bevy_ecs/src/world/entity_ref.rs index cbd51a9db19f8..56b5ecb263dc1 100644 --- a/crates/bevy_ecs/src/world/entity_ref.rs +++ b/crates/bevy_ecs/src/world/entity_ref.rs @@ -2884,6 +2884,7 @@ impl<'w> EntityWorldMut<'w> { self.assert_not_despawned(); let entity_clone = self.world.entities.reserve_entity(); + // If there is a command that could change what we are cloning, apply it. self.world.flush(); let mut builder = EntityCloner::build(self.world); diff --git a/crates/bevy_ecs/src/world/mod.rs b/crates/bevy_ecs/src/world/mod.rs index 89c7b52fd195b..5f9c1282435a5 100644 --- a/crates/bevy_ecs/src/world/mod.rs +++ b/crates/bevy_ecs/src/world/mod.rs @@ -1088,7 +1088,6 @@ impl World { /// ``` #[track_caller] pub fn spawn_empty(&mut self) -> EntityWorldMut { - self.flush(); let entity = self.entities.alloc(); // SAFETY: entity was just allocated unsafe { self.spawn_at_empty_internal(entity, MaybeLocation::caller()) } @@ -1164,7 +1163,6 @@ impl World { bundle: B, caller: MaybeLocation, ) -> EntityWorldMut { - self.flush(); let change_tick = self.change_tick(); let entity = self.entities.alloc(); let mut bundle_spawner = BundleSpawner::new::(self, change_tick); @@ -1328,6 +1326,7 @@ impl World { let result = world.modify_component(entity, f)?; + // Handles queued commands from hooks, etc. self.flush(); Ok(result) } @@ -1357,6 +1356,7 @@ impl World { let result = world.modify_component_by_id(entity, component_id, f)?; + // Handles queued commands from hooks, etc. self.flush(); Ok(result) } @@ -1419,6 +1419,7 @@ impl World { entity: Entity, caller: MaybeLocation, ) -> Result<(), EntityDespawnError> { + // If any command depended on this entity, run those before we despawn. self.flush(); let entity = self.get_entity_mut(entity)?; entity.despawn_with_caller(caller); @@ -2461,7 +2462,6 @@ impl World { archetype_id: ArchetypeId, } - self.flush(); let change_tick = self.change_tick(); // SAFETY: These come from the same world. `Self.components_registrator` can't be used since we borrow other fields too. let mut registrator = @@ -2606,7 +2606,6 @@ impl World { archetype_id: ArchetypeId, } - self.flush(); let change_tick = self.change_tick(); // SAFETY: These come from the same world. `Self.components_registrator` can't be used since we borrow other fields too. let mut registrator = diff --git a/crates/bevy_ecs/src/world/spawn_batch.rs b/crates/bevy_ecs/src/world/spawn_batch.rs index 16bd9bb8059b4..768f240c47248 100644 --- a/crates/bevy_ecs/src/world/spawn_batch.rs +++ b/crates/bevy_ecs/src/world/spawn_batch.rs @@ -28,10 +28,6 @@ where #[inline] #[track_caller] pub(crate) fn new(world: &'w mut World, iter: I, caller: MaybeLocation) -> Self { - // Ensure all entity allocations are accounted for so `self.entities` can realloc if - // necessary - world.flush(); - let change_tick = world.change_tick(); let (lower, upper) = iter.size_hint(); From c3530d9edb054f0c4660c2368e8f7c5af611faf9 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Tue, 1 Apr 2025 13:15:44 -0400 Subject: [PATCH 037/113] improve spawn batch perf by 50% --- crates/bevy_ecs/src/bundle.rs | 2 ++ crates/bevy_ecs/src/entity/allocator.rs | 14 ++++++++++++++ crates/bevy_ecs/src/entity/mod.rs | 20 +++++++++++++++++--- crates/bevy_ecs/src/world/spawn_batch.rs | 23 ++++++++++++++++++++--- 4 files changed, 53 insertions(+), 6 deletions(-) diff --git a/crates/bevy_ecs/src/bundle.rs b/crates/bevy_ecs/src/bundle.rs index d87ef517f15d2..cfd35ddf935fa 100644 --- a/crates/bevy_ecs/src/bundle.rs +++ b/crates/bevy_ecs/src/bundle.rs @@ -1420,6 +1420,8 @@ impl<'w> BundleSpawner<'w> { table.reserve(additional); } + /// **Note:** This will not cause eny entities to be freed. + /// /// # Safety /// `entity` must be allocated (but non-existent), `T` must match this [`BundleInfo`]'s type #[inline] diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index eb3ed18a9b853..287a7b6e8287f 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -518,6 +518,20 @@ impl Allocator { // SAFETY: `free` takes `&mut self`, but this lifetime is captured by the iterator. unsafe { self.shared.alloc_many(count) } } + + /// Allocates `count` entities in an iterator. + /// + /// # Safety + /// + /// Caller ensures [`Self::free`] is not called for the duration of the iterator. + /// Caller ensures this allocator is not dropped for the lifetime of the iterator. + #[inline] + pub unsafe fn alloc_many_unsafe(&self, count: u32) -> AllocEntitiesIterator<'static> { + // SAFETY: Caller ensures this instance is valid until the returned value is dropped. + let this: &'static Self = unsafe { &*core::ptr::from_ref(self) }; + // SAFETY: Caller ensures free is not called. + unsafe { this.shared.alloc_many(count) } + } } impl core::fmt::Debug for Allocator { diff --git a/crates/bevy_ecs/src/entity/mod.rs b/crates/bevy_ecs/src/entity/mod.rs index 324804bcb6ebc..408882fdbea3c 100644 --- a/crates/bevy_ecs/src/entity/mod.rs +++ b/crates/bevy_ecs/src/entity/mod.rs @@ -36,7 +36,7 @@ //! [`EntityWorldMut::insert`]: crate::world::EntityWorldMut::insert //! [`EntityWorldMut::remove`]: crate::world::EntityWorldMut::remove -mod allocator; +pub(crate) mod allocator; mod clone_entities; mod entity_set; mod map_entities; @@ -641,6 +641,19 @@ impl Entities { self.allocator.alloc_many(count) } + /// A version of [`alloc_entities`](Self::alloc_entities) that requires the caller to ensure safety. + /// + /// # Safety + /// + /// Caller ensures [`Self::free`] is not called for the duration of the iterator. + /// Caller ensures this allocator is not dropped for the lifetime of the iterator. + pub(crate) unsafe fn alloc_entities_unsafe( + &self, + count: u32, + ) -> allocator::AllocEntitiesIterator<'static> { + self.allocator.alloc_many_unsafe(count) + } + /// Allocate a specific entity ID, overwriting its generation. /// /// Returns the location of the entity currently using the given ID, if any. Location should be @@ -707,8 +720,9 @@ impl Entities { self.free_current_and_future_generations(entity, 1) } - /// Ensure at least `n` allocations can succeed without reallocating. - pub fn reserve(&mut self, additional: u32) { + /// Prepares the for `additional` allocations/reservations. + /// This can prevent reallocation, etc, but since allocation can happen from anywhere, it is not guaranteed. + pub fn prepare(&mut self, additional: u32) { let shortfall = (additional as u64).saturating_sub(self.allocator.num_free()); self.meta.reserve(shortfall as usize); } diff --git a/crates/bevy_ecs/src/world/spawn_batch.rs b/crates/bevy_ecs/src/world/spawn_batch.rs index 768f240c47248..fbaf9a48ea74e 100644 --- a/crates/bevy_ecs/src/world/spawn_batch.rs +++ b/crates/bevy_ecs/src/world/spawn_batch.rs @@ -18,6 +18,7 @@ where inner: I, spawner: BundleSpawner<'w>, caller: MaybeLocation, + allocator: crate::entity::allocator::AllocEntitiesIterator<'static>, } impl<'w, I> SpawnBatchIter<'w, I> @@ -32,7 +33,11 @@ where let (lower, upper) = iter.size_hint(); let length = upper.unwrap_or(lower); - world.entities.reserve(length as u32); + + world.entities.prepare(length as u32); + // SAFETY: We take the lifetime of the world, so the instance is valid. + // `BundleSpawner::spawn_non_existent` never frees entities, and that is the only thing we call on it while the iterator is not empty. + let allocator = unsafe { world.entities.alloc_entities_unsafe(lower as u32) }; let mut spawner = BundleSpawner::new::(world, change_tick); spawner.reserve_storage(length); @@ -41,6 +46,7 @@ where inner: iter, spawner, caller, + allocator, } } } @@ -68,8 +74,19 @@ where fn next(&mut self) -> Option { let bundle = self.inner.next()?; - // SAFETY: bundle matches spawner type - unsafe { Some(self.spawner.spawn(bundle, self.caller).0) } + let entity = self.allocator.next(); + + let spawned = match entity { + // SAFETY: bundle matches spawner type. `entity` is fresh + Some(entity) => unsafe { + self.spawner.spawn_non_existent(entity, bundle, self.caller); + entity + }, + // SAFETY: bundle matches spawner type + None => unsafe { self.spawner.spawn(bundle, self.caller).0 }, + }; + + Some(spawned) } fn size_hint(&self) -> (usize, Option) { From 6aa5de6efe8f2c623020fe01df5a44f09a645db9 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Tue, 1 Apr 2025 14:36:56 -0400 Subject: [PATCH 038/113] added a early return for local --- crates/bevy_ecs/src/entity/mod.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/crates/bevy_ecs/src/entity/mod.rs b/crates/bevy_ecs/src/entity/mod.rs index 408882fdbea3c..93a9e68cacc84 100644 --- a/crates/bevy_ecs/src/entity/mod.rs +++ b/crates/bevy_ecs/src/entity/mod.rs @@ -86,7 +86,10 @@ use crate::{ storage::{SparseSetIndex, TableId, TableRow}, }; use alloc::vec::Vec; -use bevy_platform_support::sync::Arc; +use bevy_platform_support::sync::{ + atomic::{AtomicBool, Ordering}, + Arc, +}; use concurrent_queue::ConcurrentQueue; use core::{fmt, hash::Hash, num::NonZero, panic::Location}; use log::warn; @@ -504,6 +507,7 @@ struct Pending { remote: RemotePending, #[cfg(feature = "std")] local: bevy_utils::Parallel>, + any_local: AtomicBool, } impl Pending { @@ -513,6 +517,7 @@ impl Pending { Self { remote: RemotePending::new(), local: bevy_utils::Parallel::default(), + any_local: AtomicBool::new(false), } } @@ -534,9 +539,14 @@ impl Pending { { self.remote.queue_flush(entity) } + self.any_local.store(true, Ordering::Relaxed); } fn flush_local(&mut self, mut flusher: impl FnMut(Entity)) { + if !core::mem::replace(self.any_local.get_mut(), false) { + return; + } + #[cfg(feature = "std")] let pending = { self.local.iter_mut().flat_map(|pending| pending.drain(..)) }; From c9ddbde034fefcbad98210b1aad76ceae75f22b2 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Tue, 1 Apr 2025 14:37:02 -0400 Subject: [PATCH 039/113] Revert "added a early return for local" This reverts commit 6aa5de6efe8f2c623020fe01df5a44f09a645db9. --- crates/bevy_ecs/src/entity/mod.rs | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/crates/bevy_ecs/src/entity/mod.rs b/crates/bevy_ecs/src/entity/mod.rs index 93a9e68cacc84..408882fdbea3c 100644 --- a/crates/bevy_ecs/src/entity/mod.rs +++ b/crates/bevy_ecs/src/entity/mod.rs @@ -86,10 +86,7 @@ use crate::{ storage::{SparseSetIndex, TableId, TableRow}, }; use alloc::vec::Vec; -use bevy_platform_support::sync::{ - atomic::{AtomicBool, Ordering}, - Arc, -}; +use bevy_platform_support::sync::Arc; use concurrent_queue::ConcurrentQueue; use core::{fmt, hash::Hash, num::NonZero, panic::Location}; use log::warn; @@ -507,7 +504,6 @@ struct Pending { remote: RemotePending, #[cfg(feature = "std")] local: bevy_utils::Parallel>, - any_local: AtomicBool, } impl Pending { @@ -517,7 +513,6 @@ impl Pending { Self { remote: RemotePending::new(), local: bevy_utils::Parallel::default(), - any_local: AtomicBool::new(false), } } @@ -539,14 +534,9 @@ impl Pending { { self.remote.queue_flush(entity) } - self.any_local.store(true, Ordering::Relaxed); } fn flush_local(&mut self, mut flusher: impl FnMut(Entity)) { - if !core::mem::replace(self.any_local.get_mut(), false) { - return; - } - #[cfg(feature = "std")] let pending = { self.local.iter_mut().flat_map(|pending| pending.drain(..)) }; From 6741f70e0b899a9a39eaafd924473b0e23adc2a1 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Tue, 1 Apr 2025 15:48:27 -0400 Subject: [PATCH 040/113] fixed doc we're removing this anyway --- crates/bevy_ecs/src/world/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/bevy_ecs/src/world/mod.rs b/crates/bevy_ecs/src/world/mod.rs index 5f9c1282435a5..6f97e372c5a3b 100644 --- a/crates/bevy_ecs/src/world/mod.rs +++ b/crates/bevy_ecs/src/world/mod.rs @@ -2235,7 +2235,7 @@ impl World { /// This method should generally only be used for sharing entities across apps, and only when they have a scheme /// worked out to share an ID space (which doesn't happen by default). /// - /// ``` + /// ```ignore /// use bevy_ecs::{entity::Entity, world::World, component::Component}; /// #[derive(Component)] /// struct A(&'static str); From d4ab0bef9611a470ac0bc24248ced0fef551d92e Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Tue, 1 Apr 2025 15:55:03 -0400 Subject: [PATCH 041/113] fix test Spawning an empty entity should be an isolated event. It does not need to flush previous commands since it neither produces new commands nor can be referenced in any previous commands. --- crates/bevy_ecs/src/world/entity_ref.rs | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/crates/bevy_ecs/src/world/entity_ref.rs b/crates/bevy_ecs/src/world/entity_ref.rs index 56b5ecb263dc1..9874e0c1760cf 100644 --- a/crates/bevy_ecs/src/world/entity_ref.rs +++ b/crates/bevy_ecs/src/world/entity_ref.rs @@ -5950,33 +5950,31 @@ mod tests { commands.queue(count_flush); }, ); - world.commands().queue(count_flush); let entity = world.spawn_empty().id(); - assert_eq!(world.resource::().0, 1); world.commands().queue(count_flush); let mut a = world.entity_mut(entity); a.trigger(TestEvent); - assert_eq!(a.world().resource::().0, 2); + assert_eq!(a.world().resource::().0, 1); a.insert(TestComponent(0)); - assert_eq!(a.world().resource::().0, 3); + assert_eq!(a.world().resource::().0, 2); a.remove::(); - assert_eq!(a.world().resource::().0, 4); + assert_eq!(a.world().resource::().0, 3); a.insert(TestComponent(0)); - assert_eq!(a.world().resource::().0, 5); + assert_eq!(a.world().resource::().0, 4); let _ = a.take::(); - assert_eq!(a.world().resource::().0, 6); + assert_eq!(a.world().resource::().0, 5); a.insert(TestComponent(0)); - assert_eq!(a.world().resource::().0, 7); + assert_eq!(a.world().resource::().0, 6); a.retain::<()>(); - assert_eq!(a.world().resource::().0, 8); + assert_eq!(a.world().resource::().0, 7); a.insert(TestComponent(0)); - assert_eq!(a.world().resource::().0, 9); + assert_eq!(a.world().resource::().0, 8); a.clear(); - assert_eq!(a.world().resource::().0, 10); + assert_eq!(a.world().resource::().0, 9); a.insert(TestComponent(0)); - assert_eq!(a.world().resource::().0, 11); + assert_eq!(a.world().resource::().0, 10); a.despawn(); - assert_eq!(world.resource::().0, 12); + assert_eq!(world.resource::().0, 11); } #[derive(Resource)] From 773cddf4e5334efd254dd666d31e3f8c4b3eac5f Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Tue, 1 Apr 2025 16:11:29 -0400 Subject: [PATCH 042/113] fix docs --- crates/bevy_ecs/src/entity/mod.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/crates/bevy_ecs/src/entity/mod.rs b/crates/bevy_ecs/src/entity/mod.rs index 408882fdbea3c..99894c6e8768b 100644 --- a/crates/bevy_ecs/src/entity/mod.rs +++ b/crates/bevy_ecs/src/entity/mod.rs @@ -612,7 +612,8 @@ impl Entities { /// Reserve entity IDs concurrently. /// /// Storage for entity generation and location is lazily allocated by calling [`flush`](Entities::flush), - /// but, if desiered, caller may [`set`](Self::set) the [`EntityLocation`] prior to the flush instead. + /// but, if desiered, caller may set the [`EntityLocation`] prior to the flush instead, + /// via [`flush_entity`](crate::world::World::flush_entity) for example. pub fn reserve_entities(&self, count: u32) -> ReserveEntitiesIterator { ReserveEntitiesIterator { allocator: self.alloc_entities(count), @@ -630,7 +631,7 @@ impl Entities { } /// Allocate an entity ID directly. - /// Caller is responsible to [`set`](Self::set) the [`EntityLocation`] if desierd, + /// Caller is responsible to set the [`EntityLocation`] if desierd, /// which must be done before [`get`](Self::get)ing its [`EntityLocation`]. pub fn alloc(&self) -> Entity { self.allocator.alloc() @@ -934,7 +935,7 @@ pub struct RemoteEntities { impl RemoteEntities { /// Allocates an [`Entity`] if the source [`Entities`] is still linked to this [`RemoteEntities`]. /// - /// The caller takes responsibility for eventually [`set`](Entities::set)ing the [`EntityLocation`], + /// The caller takes responsibility for eventually setting the [`EntityLocation`], /// usually via [`flush_entity`](crate::world::World::flush_entity). pub fn alloc(&self) -> Option { self.allocator.alloc() @@ -943,7 +944,7 @@ impl RemoteEntities { /// Reserves an [`Entity`] if the source [`Entities`] is still linked to this [`RemoteEntities`]. /// /// This also queues it to be flushed after [`Entities::queue_remote_pending_to_be_flushed`] is called. - /// If waiting for that is not an option, it is also possible to [`set`](Entities::set) the [`EntityLocation`] manually, + /// If waiting for that is not an option, it is also possible to set the [`EntityLocation`] manually, /// usually via [`flush_entity`](crate::world::World::flush_entity). pub fn reserve(&self) -> Option { self.alloc() From 4fa1764fde211e65a6dab6f4ea3af87fe8ebb5a1 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Wed, 2 Apr 2025 11:28:07 -0400 Subject: [PATCH 043/113] improve clarity of pending list --- crates/bevy_ecs/src/entity/mod.rs | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/crates/bevy_ecs/src/entity/mod.rs b/crates/bevy_ecs/src/entity/mod.rs index 99894c6e8768b..b0638a8b07a0f 100644 --- a/crates/bevy_ecs/src/entity/mod.rs +++ b/crates/bevy_ecs/src/entity/mod.rs @@ -526,22 +526,18 @@ impl Pending { fn queue_flush(&self, entity: Entity) { #[cfg(feature = "std")] - { - self.local.scope(|pending| pending.push(entity)); - } + self.local.scope(|pending| pending.push(entity)); #[cfg(not(feature = "std"))] - { - self.remote.queue_flush(entity) - } + self.remote.queue_flush(entity); } fn flush_local(&mut self, mut flusher: impl FnMut(Entity)) { #[cfg(feature = "std")] - let pending = { self.local.iter_mut().flat_map(|pending| pending.drain(..)) }; + let pending = self.local.iter_mut().flat_map(|pending| pending.drain(..)); #[cfg(not(feature = "std"))] - let pending = { self.remote.pending.try_iter() }; + let pending = self.remote.pending.try_iter(); for pending in pending { flusher(pending); From 7553d11bb87f143afbb0b15fea3dd540d2beace2 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Wed, 2 Apr 2025 12:26:12 -0400 Subject: [PATCH 044/113] use atomics for Slot --- crates/bevy_ecs/src/entity/allocator.rs | 49 +++++++++++++++++-------- 1 file changed, 34 insertions(+), 15 deletions(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 287a7b6e8287f..26e676a5810cc 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -5,7 +5,7 @@ use bevy_platform_support::{ Arc, Weak, }, }; -use core::mem::{ManuallyDrop, MaybeUninit}; +use core::{mem::ManuallyDrop, num::NonZero}; use log::warn; use crate::query::DebugCheckedUnwrap; @@ -13,8 +13,29 @@ use crate::query::DebugCheckedUnwrap; use super::{Entity, EntitySetIterator}; /// This is the item we store in the free list. -/// It might not be init (if it's out of bounds). -type Slot = MaybeUninit; +struct Slot { + entity_index: AtomicU32, + entity_generation: AtomicU32, +} + +impl Slot { + // TODO: could maybe make this `&mut`?? + fn set_entity(&self, entity: Entity) { + self.entity_generation + .store(entity.generation(), Ordering::Relaxed); + self.entity_index.store(entity.index(), Ordering::Relaxed); + } + + fn get_entity(&self) -> Entity { + Entity { + index: self.entity_index.load(Ordering::Relaxed), + // SAFETY: This is not 0 since it was from an entity's generation. + generation: unsafe { + NonZero::new_unchecked(self.entity_generation.load(Ordering::Relaxed)) + }, + } + } +} /// Each chunk stores a buffer of [`Slot`]s at a fixed capacity. struct Chunk { @@ -68,10 +89,10 @@ impl Chunk { unsafe fn get(&self, index: u32) -> Entity { // SAFETY: caller ensure we are init. let head = unsafe { self.ptr().debug_checked_unwrap() }; - let target = head.add(index as usize); + // SAFETY: caller ensures we are in bounds (because `set` must be in bounds) + let target = unsafe { &*head.add(index as usize) }; - // SAFETY: Ensured by caller. - unsafe { (*target).assume_init() } + target.get_entity() } /// Gets a slice of indices. @@ -100,13 +121,13 @@ impl Chunk { /// Index must be in bounds. /// Access does not conflict with another [`Self::get`]. #[inline] - unsafe fn set(&self, index: u32, entity: Entity, index_of_self: u32) -> Slot { + unsafe fn set(&self, index: u32, entity: Entity, index_of_self: u32) { let head = self.ptr().unwrap_or_else(|| self.init(index_of_self)); - let target = head.add(index as usize); - - // SAFETY: Caller ensures we are not fighting with other `set` calls or `get` calls. + // SAFETY: caller ensures it is in bounds and we are not fighting with other `set` calls or `get` calls. // A race condition is therefore impossible. - unsafe { core::ptr::replace(target, Slot::new(entity)) } + let target = unsafe { &*head.add(index as usize) }; + + target.set_entity(entity); } /// Initializes the chunk to be valid, returning the pointer. @@ -343,8 +364,7 @@ impl<'a> Iterator for FreeListSliceIterator<'a> { #[inline] fn next(&mut self) -> Option { if let Some(sliced) = self.current.next() { - // SAFETY: Assured by constructor. - return unsafe { Some(sliced.assume_init_read()) }; + return Some(sliced.get_entity()); } let next_index = self.indices.next()?; @@ -357,8 +377,7 @@ impl<'a> Iterator for FreeListSliceIterator<'a> { self.indices = (*self.indices.start() + slice.len() as u32 - 1)..=(*self.indices.end()); self.current = slice.iter(); - // SAFETY: Assured by constructor. - unsafe { Some(self.current.next()?.assume_init_read()) } + Some(self.current.next()?.get_entity()) } #[inline] From e22b93f7d8ee31d582db2da09f1348170d769b16 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Wed, 2 Apr 2025 16:58:22 -0400 Subject: [PATCH 045/113] new length tracking system --- crates/bevy_ecs/src/entity/allocator.rs | 158 ++++++++++++++++++------ crates/bevy_ecs/src/entity/mod.rs | 8 +- 2 files changed, 125 insertions(+), 41 deletions(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 26e676a5810cc..35a8d31b78a88 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -1,7 +1,7 @@ use bevy_platform_support::{ prelude::Vec, sync::{ - atomic::{AtomicBool, AtomicIsize, AtomicPtr, AtomicU32, Ordering}, + atomic::{AtomicBool, AtomicPtr, AtomicU32, AtomicU64, Ordering}, Arc, Weak, }, }; @@ -174,13 +174,105 @@ impl Chunk { } } +/// This stores two things: the length of the buffer (which can be negative) and a generation value to track *any* change to the length. +/// +/// The upper 48 bits store an unsigned integer of the length, and the lower 16 bits store the generation value. +/// By keeping the length in the upper bits, we can add and subtract anything to them without it affecting the generation bits. +/// When adding `x` to the length, we add `x << 16 + 1`, and when subtracting `x` from the length, we subtract `x << 16 - 1` so that the generation is incremented. +/// Finally, to prevent the generation from ever overflowing into the length, we follow up each operation with a bit and to turn of the must significant generation bits. +/// +/// Finally, to get the signed length from the unsigned 48 bit value, we simply set `u48::MAX - u32::MAX` equal to 0. +/// This is fine since for the length to go over `u32::MAX`, the entity index would first need to be exhausted, ausing a "too many entities" panic. +struct FreeBufferLen(AtomicU64); + +impl FreeBufferLen { + /// The bit of the u64 with the highest bit of the u16 generation. + const HIGHEST_GENERATION_BIT: u64 = 1 << 15; + /// The u48 encoded length considers this value to be 0. Lower values are considered negative. + const FALSE_ZERO: u64 = ((2 << 48) - 1) - ((2 << 32) - 1); + + /// Gets the current state of the buffer. + fn state(&self) -> u64 { + self.0.load(Ordering::Acquire) + } + + /// Constructs a length of 0. + fn new_zero_len() -> Self { + Self(AtomicU64::new(Self::FALSE_ZERO << 16)) + } + + /// Gets the length from a given state. Returns 0 if the length is negative or zero. + fn len_from_state(state: u64) -> u32 { + let encoded_length = state >> 16; + // Since `FALSE_ZERO` only leaves 32 bits of a u48 above it, the len must fit within 32 bits. + encoded_length.saturating_sub(Self::FALSE_ZERO) as u32 + } + + /// Gets the length. Returns 0 if the length is negative or zero. + fn len(&self) -> u32 { + Self::len_from_state(self.state()) + } + + /// Returns the number to subtract for subtracting this `num`. + fn encode_pop(num: u32) -> u64 { + let encoded_diff = (num as u64) << 16; + // subtracting 1 will add one to the generation. + encoded_diff - 1 + } + + /// Subtracts `num` from the length, returning the new state. + fn pop_from_state(mut state: u64, num: u32) -> u64 { + state -= Self::encode_pop(num); + // prevent generation overflow + state &= !Self::HIGHEST_GENERATION_BIT; + state + } + + /// Subtracts `num` from the length, returning the previous state. + fn pop_for_state(&self, num: u32) -> u64 { + let state = self.0.fetch_sub(Self::encode_pop(num), Ordering::AcqRel); + // This can be relaxed since it only affects the one bit, + // and 2^15 operations would need to happen with with this never being called for an overflow to occor. + self.0 + .fetch_and(!Self::HIGHEST_GENERATION_BIT, Ordering::Relaxed); + state + } + + /// Subtracts `num` from the length, returning the previous length. + fn pop_for_len(&self, num: u32) -> u32 { + Self::len_from_state(self.pop_for_state(num)) + } + + /// Sets the length explicitly. + fn set_len(&self, len: u32, recent_state: u64) { + let encoded_length = (len as u64 + Self::FALSE_ZERO) << 16; + let recent_generation = recent_state & (u16::MAX as u64 & !Self::HIGHEST_GENERATION_BIT); + // This effectively adds a 2^14 to the generation, so for recent `recent_state` values, this is very safe. + let far_generation = recent_generation ^ (1 << 14); + let fully_encoded = encoded_length | far_generation; + self.0.store(fully_encoded, Ordering::Release); + } + + /// Attempts to update the state, returning the new state if it fails. + fn try_set_state(&self, expected_current_state: u64, target_state: u64) -> Result<(), u64> { + self.0 + .compare_exchange( + expected_current_state, + target_state, + Ordering::AcqRel, + Ordering::Acquire, + ) + .map(|_| ()) + } +} + /// This is conceptually like a `Vec` that stores entities pending reuse. struct FreeBuffer { /// The chunks of the free list. /// Put end-to-end, these chunks form a list of free entities. chunks: [Chunk; Chunk::NUM_CHUNKS as usize], /// The length of the free buffer - len: AtomicIsize, + len: FreeBufferLen, } impl FreeBuffer { @@ -190,8 +282,8 @@ impl FreeBuffer { /// /// For this to be accurate, this must not be called during a [`Self::free`]. #[inline] - unsafe fn num_free(&self) -> u64 { - self.len.load(Ordering::Relaxed).max(0) as u64 + unsafe fn num_free(&self) -> u32 { + self.len.len() } /// Frees the `entity` allowing it to be reused. @@ -202,9 +294,10 @@ impl FreeBuffer { #[inline] unsafe fn free(&self, entity: Entity) { // Disable remote allocation. (We could do a compare exchange loop, but this is faster in the common case.) - let len = self.len.swap(-1, Ordering::AcqRel).max(0); + let state = self.len.pop_for_state(u32::MAX); + let len = FreeBufferLen::len_from_state(state); // We can cast to u32 safely because if it were to overflow, there would already be too many entities. - let (chunk_index, index) = Chunk::get_indices(len as u32); + let (chunk_index, index) = Chunk::get_indices(len); // SAFETY: index is correct. let chunk = unsafe { self.chunks.get_unchecked(chunk_index as usize) }; @@ -217,7 +310,7 @@ impl FreeBuffer { let new_len = len + 1; // It doesn't matter when other threads realize remote allocation is enabled again. - self.len.store(new_len, Ordering::Relaxed); + self.len.set_len(new_len, state); } /// Allocates an [`Entity`] from the free list if one is available. @@ -228,18 +321,16 @@ impl FreeBuffer { #[inline] unsafe fn alloc(&self) -> Option { // SAFETY: This will get a valid index because there is no way for `free` to be done at the same time. - let len = self.len.fetch_sub(1, Ordering::AcqRel); - (len > 0).then(|| { - let idnex = len - 1; - // We can cast to u32 safely because if it were to overflow, there would already be too many entities. - let (chunk_index, index) = Chunk::get_indices(idnex as u32); + let len = self.len.pop_for_len(1); + let index = len.checked_sub(1)?; + // We can cast to u32 safely because if it were to overflow, there would already be too many entities. + let (chunk_index, index) = Chunk::get_indices(index); - // SAFETY: index is correct. - let chunk = unsafe { self.chunks.get_unchecked(chunk_index as usize) }; + // SAFETY: index is correct. + let chunk = unsafe { self.chunks.get_unchecked(chunk_index as usize) }; - // SAFETY: This was less then `len`, so it must have been `set` via `free` before. - unsafe { chunk.get(index) } - }) + // SAFETY: This was less then `len`, so it must have been `set` via `free` before. + Some(unsafe { chunk.get(index) }) } /// Allocates an as many [`Entity`]s from the free list as are available, up to `count`. @@ -250,13 +341,12 @@ impl FreeBuffer { #[inline] unsafe fn alloc_many(&self, count: u32) -> FreeListSliceIterator { // SAFETY: This will get a valid index because there is no way for `free` to be done at the same time. - let len = self.len.fetch_sub(count as isize, Ordering::AcqRel); - let index = (len - count as isize).max(0); + let len = self.len.pop_for_len(count); + let index = len.saturating_sub(count); let indices = if index < len { - let end = (len - 1) as u32; - let start = index as u32; - start..=end + let end = len - 1; + index..=end } else { #[expect( clippy::reversed_empty_ranges, @@ -302,15 +392,13 @@ impl FreeBuffer { // The other allocation gets the newly freed one, and we get the previous one. // If the `free`s and `alloc`s are not balanced, the exchange will fail, and we try again. - let mut len = self.len.load(Ordering::Acquire); + let mut state = self.len.state(); loop { - if len <= 0 { - return None; - } + let len = FreeBufferLen::len_from_state(state); + let index = len.checked_sub(1)?; - let target_new_len = len - 1; // We can cast to u32 safely because if it were to overflow, there would already be too many entities. - let (chunk_index, index) = Chunk::get_indices(target_new_len as u32); + let (chunk_index, index) = Chunk::get_indices(index); // SAFETY: index is correct. let chunk = unsafe { self.chunks.get_unchecked(chunk_index as usize) }; @@ -318,14 +406,10 @@ impl FreeBuffer { // SAFETY: This was less then `len`, so it must have been `set` via `free` before. let entity = unsafe { chunk.get(index) }; - match self.len.compare_exchange( - len, - target_new_len, - Ordering::AcqRel, - Ordering::Acquire, - ) { + let ideal_state = FreeBufferLen::pop_from_state(state, 1); + match self.len.try_set_state(state, ideal_state) { Ok(_) => return Some(entity), - Err(updated_len) => len = updated_len, + Err(new_state) => state = new_state, } } } @@ -333,7 +417,7 @@ impl FreeBuffer { fn new() -> Self { Self { chunks: core::array::from_fn(|_index| Chunk::new()), - len: AtomicIsize::new(0), + len: FreeBufferLen::new_zero_len(), } } } @@ -511,7 +595,7 @@ impl Allocator { /// The number of free entities. #[inline] - pub fn num_free(&self) -> u64 { + pub fn num_free(&self) -> u32 { // SAFETY: `free` is not being called since it takes `&mut self`. unsafe { self.shared.free.num_free() } } diff --git a/crates/bevy_ecs/src/entity/mod.rs b/crates/bevy_ecs/src/entity/mod.rs index b0638a8b07a0f..31a5aaffde2a4 100644 --- a/crates/bevy_ecs/src/entity/mod.rs +++ b/crates/bevy_ecs/src/entity/mod.rs @@ -720,7 +720,7 @@ impl Entities { /// Prepares the for `additional` allocations/reservations. /// This can prevent reallocation, etc, but since allocation can happen from anywhere, it is not guaranteed. pub fn prepare(&mut self, additional: u32) { - let shortfall = (additional as u64).saturating_sub(self.allocator.num_free()); + let shortfall = additional.saturating_sub(self.allocator.num_free()); self.meta.reserve(shortfall as usize); } @@ -860,14 +860,14 @@ impl Entities { /// /// [`World`]: crate::world::World #[inline] - pub fn total_count(&self) -> usize { - self.allocator.total_entity_indices() as usize + pub fn total_count(&self) -> u64 { + self.allocator.total_entity_indices() } /// The count of currently allocated entities. #[inline] pub fn len(&self) -> u64 { - self.allocator.total_entity_indices() - self.allocator.num_free() + self.allocator.total_entity_indices() - self.allocator.num_free() as u64 } /// Checks if any entity is currently active. From aef45a93bc06f714d8d85504c37109cabf93e41d Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Wed, 2 Apr 2025 17:03:23 -0400 Subject: [PATCH 046/113] polish --- crates/bevy_ecs/src/entity/allocator.rs | 30 +++++++++++-------------- 1 file changed, 13 insertions(+), 17 deletions(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 35a8d31b78a88..1edf1d1207bde 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -20,12 +20,14 @@ struct Slot { impl Slot { // TODO: could maybe make this `&mut`?? + #[inline] fn set_entity(&self, entity: Entity) { self.entity_generation .store(entity.generation(), Ordering::Relaxed); self.entity_index.store(entity.index(), Ordering::Relaxed); } + #[inline] fn get_entity(&self) -> Entity { Entity { index: self.entity_index.load(Ordering::Relaxed), @@ -192,6 +194,7 @@ impl FreeBufferLen { const FALSE_ZERO: u64 = ((2 << 48) - 1) - ((2 << 32) - 1); /// Gets the current state of the buffer. + #[inline] fn state(&self) -> u64 { self.0.load(Ordering::Acquire) } @@ -202,6 +205,7 @@ impl FreeBufferLen { } /// Gets the length from a given state. Returns 0 if the length is negative or zero. + #[inline] fn len_from_state(state: u64) -> u32 { let encoded_length = state >> 16; // Since `FALSE_ZERO` only leaves 32 bits of a u48 above it, the len must fit within 32 bits. @@ -209,11 +213,13 @@ impl FreeBufferLen { } /// Gets the length. Returns 0 if the length is negative or zero. + #[inline] fn len(&self) -> u32 { Self::len_from_state(self.state()) } /// Returns the number to subtract for subtracting this `num`. + #[inline] fn encode_pop(num: u32) -> u64 { let encoded_diff = (num as u64) << 16; // subtracting 1 will add one to the generation. @@ -221,6 +227,7 @@ impl FreeBufferLen { } /// Subtracts `num` from the length, returning the new state. + #[inline] fn pop_from_state(mut state: u64, num: u32) -> u64 { state -= Self::encode_pop(num); // prevent generation overflow @@ -229,21 +236,24 @@ impl FreeBufferLen { } /// Subtracts `num` from the length, returning the previous state. + #[inline] fn pop_for_state(&self, num: u32) -> u64 { let state = self.0.fetch_sub(Self::encode_pop(num), Ordering::AcqRel); // This can be relaxed since it only affects the one bit, - // and 2^15 operations would need to happen with with this never being called for an overflow to occor. + // and 2^15 operations would need to happen with this never being called for an overflow to occor. self.0 .fetch_and(!Self::HIGHEST_GENERATION_BIT, Ordering::Relaxed); state } /// Subtracts `num` from the length, returning the previous length. + #[inline] fn pop_for_len(&self, num: u32) -> u32 { Self::len_from_state(self.pop_for_state(num)) } /// Sets the length explicitly. + #[inline] fn set_len(&self, len: u32, recent_state: u64) { let encoded_length = (len as u64 + Self::FALSE_ZERO) << 16; let recent_generation = recent_state & (u16::MAX as u64 & !Self::HIGHEST_GENERATION_BIT); @@ -254,6 +264,7 @@ impl FreeBufferLen { } /// Attempts to update the state, returning the new state if it fails. + #[inline] fn try_set_state(&self, expected_current_state: u64, target_state: u64) -> Result<(), u64> { self.0 .compare_exchange( @@ -375,22 +386,7 @@ impl FreeBuffer { // We get around this by only updating `len` after the read is complete. // But that means something else could be trying to allocate the same index! // So we need a `len.compare_exchange` loop to ensure the index is unique. - // - // Examples: - // - // What if another allocation happens during the loop? - // The exchange will fail, and we try again. - // - // What happens if a `free` starts during the loop? - // The exchange will fail, and we return `None`. - // - // What happens if a `free` starts and finishes during the loop? - // The exchange will fail (len is 1 more than expected) and we try again. - // - // What happens if a `free` starts and finishes, and then a different allocation takes the freed entity? - // The exchange will not fail, and we allocate the correct entity. - // The other allocation gets the newly freed one, and we get the previous one. - // If the `free`s and `alloc`s are not balanced, the exchange will fail, and we try again. + // Because we keep a generation value in the `FreeBufferLen`, if any of these things happen, we simply try again. let mut state = self.len.state(); loop { From ef1ff7d3bb7776fb5cc5ba30301eb46f94daf1cd Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Wed, 2 Apr 2025 17:11:27 -0400 Subject: [PATCH 047/113] Fix length encoding bug --- crates/bevy_ecs/src/entity/allocator.rs | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 1edf1d1207bde..d7d168e41e88d 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -191,7 +191,7 @@ impl FreeBufferLen { /// The bit of the u64 with the highest bit of the u16 generation. const HIGHEST_GENERATION_BIT: u64 = 1 << 15; /// The u48 encoded length considers this value to be 0. Lower values are considered negative. - const FALSE_ZERO: u64 = ((2 << 48) - 1) - ((2 << 32) - 1); + const FALSE_ZERO: u64 = ((1 << 48) - 1) - ((1 << 32) - 1); /// Gets the current state of the buffer. #[inline] @@ -756,4 +756,16 @@ mod tests { assert_eq!(Chunk::get_indices(input), output); } } + + #[test] + fn buffer_len_encoding() { + let len = FreeBufferLen::new_zero_len(); + assert_eq!(len.len(), 0); + assert_eq!(len.pop_for_len(200), 0); + len.set_len(5, 0); + assert_eq!(len.pop_for_len(2), 5); + assert_eq!(len.pop_for_len(2), 3); + assert_eq!(len.pop_for_len(2), 1); + assert_eq!(len.pop_for_len(2), 0); + } } From 2bb1ae53aa43b6e77773784eaabc5bdada0588e5 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Wed, 2 Apr 2025 21:39:13 -0400 Subject: [PATCH 048/113] fix generation logic --- crates/bevy_ecs/src/entity/allocator.rs | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index d7d168e41e88d..bc621f5171d0e 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -218,18 +218,20 @@ impl FreeBufferLen { Self::len_from_state(self.state()) } - /// Returns the number to subtract for subtracting this `num`. + /// Returns the number to add for subtracting this `num`. #[inline] fn encode_pop(num: u32) -> u64 { let encoded_diff = (num as u64) << 16; - // subtracting 1 will add one to the generation. - encoded_diff - 1 + // In modular arithmetic, this is equivalent to the requested subtraction. + let to_add = u64::MAX - encoded_diff; + // add one to the generation. + to_add + 1 } /// Subtracts `num` from the length, returning the new state. #[inline] fn pop_from_state(mut state: u64, num: u32) -> u64 { - state -= Self::encode_pop(num); + state += Self::encode_pop(num); // prevent generation overflow state &= !Self::HIGHEST_GENERATION_BIT; state @@ -238,7 +240,7 @@ impl FreeBufferLen { /// Subtracts `num` from the length, returning the previous state. #[inline] fn pop_for_state(&self, num: u32) -> u64 { - let state = self.0.fetch_sub(Self::encode_pop(num), Ordering::AcqRel); + let state = self.0.fetch_add(Self::encode_pop(num), Ordering::AcqRel); // This can be relaxed since it only affects the one bit, // and 2^15 operations would need to happen with this never being called for an overflow to occor. self.0 From 76f67093bb969148e018009af13187a76b06dd8d Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Wed, 2 Apr 2025 22:09:56 -0400 Subject: [PATCH 049/113] suggestions from review Co-Authored-By: andriyDev --- crates/bevy_ecs/src/entity/allocator.rs | 35 ++++++++++++++++--------- crates/bevy_ecs/src/world/mod.rs | 15 +++++------ 2 files changed, 30 insertions(+), 20 deletions(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index bc621f5171d0e..1f2e7d1209a46 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -27,8 +27,14 @@ impl Slot { self.entity_index.store(entity.index(), Ordering::Relaxed); } + /// Gets the stored entity. + /// + /// # Safety + /// + /// This slot *must* have been [`set`](Self::set) before this. + /// Otherwise, the entity may be invalid or meaningless. #[inline] - fn get_entity(&self) -> Entity { + unsafe fn get_entity(&self) -> Entity { Entity { index: self.entity_index.load(Ordering::Relaxed), // SAFETY: This is not 0 since it was from an entity's generation. @@ -49,11 +55,17 @@ impl Chunk { const NUM_CHUNKS: u32 = 24; const NUM_SKIPPED: u32 = u32::BITS - Self::NUM_CHUNKS; + fn new() -> Self { + Self { + first: AtomicPtr::new(core::ptr::null_mut()), + } + } + /// Computes the capacity of the chunk at this index within [`Self::NUM_CHUNKS`]. /// The first 2 have length 512 (2^9) and the last has length (2^31) #[inline] fn capacity_of_chunk(chunk_index: u32) -> u32 { - // We do this because we're skipping the first 8 powers, so we need to make up for them by doubling the first index. + // We do this because we're skipping the first `NUM_SKIPPED` powers, so we need to make up for them by doubling the first index. // This is why the first 2 indices both have a capacity of 256. let corrected = chunk_index.max(1); // We add NUM_SKIPPED because the total capacity should be as if [`Self::NUM_CHUNKS`] were 32. @@ -70,7 +82,7 @@ impl Chunk { // So the leading zeros will be proportional to the chunk index. let leading = full_idnex .leading_zeros() - // We do a min because we skip the first 8 powers. + // We do a min because we skip the first `NUM_SKIPPED` powers to make space for the first chunk's entity count. // The -1 is because this is the number of chunks, but we want the index in the end. .min(Self::NUM_CHUNKS - 1); // We store chunks in smallest to biggest order, so we need to reverse it. @@ -94,7 +106,8 @@ impl Chunk { // SAFETY: caller ensures we are in bounds (because `set` must be in bounds) let target = unsafe { &*head.add(index as usize) }; - target.get_entity() + // SAFETY: caller ensures `set` was called. + unsafe { target.get_entity() } } /// Gets a slice of indices. @@ -168,12 +181,6 @@ impl Chunk { let ptr = self.first.load(Ordering::Relaxed); (!ptr.is_null()).then_some(ptr) } - - fn new() -> Self { - Self { - first: AtomicPtr::new(core::ptr::null_mut()), - } - } } /// This stores two things: the length of the buffer (which can be negative) and a generation value to track *any* change to the length. @@ -446,7 +453,10 @@ impl<'a> Iterator for FreeListSliceIterator<'a> { #[inline] fn next(&mut self) -> Option { if let Some(sliced) = self.current.next() { - return Some(sliced.get_entity()); + // SAFETY: Ensured by constructor + unsafe { + return Some(sliced.get_entity()); + } } let next_index = self.indices.next()?; @@ -459,7 +469,8 @@ impl<'a> Iterator for FreeListSliceIterator<'a> { self.indices = (*self.indices.start() + slice.len() as u32 - 1)..=(*self.indices.end()); self.current = slice.iter(); - Some(self.current.next()?.get_entity()) + // SAFETY: Ensured by constructor + unsafe { Some(self.current.next()?.get_entity()) } } #[inline] diff --git a/crates/bevy_ecs/src/world/mod.rs b/crates/bevy_ecs/src/world/mod.rs index 6f97e372c5a3b..05203848eb397 100644 --- a/crates/bevy_ecs/src/world/mod.rs +++ b/crates/bevy_ecs/src/world/mod.rs @@ -2920,15 +2920,14 @@ impl World { /// If this entity is not in any [`Archetype`](crate::archetype::Archetype), this will flush it to the empty archetype. /// Returns `Some` with the new [`EntityLocation`] if the entity is now valid in the empty archetype. pub fn flush_entity(&mut self, entity: Entity) -> Option { - if self.entities.contains(entity) && self.entities.get(entity).is_none() { - let empty_archetype = self.archetypes.empty_mut(); - let table = &mut self.storages.tables[empty_archetype.table_id()]; - // SAFETY: It's empty so no values need to be written - let new_location = unsafe { empty_archetype.allocate(entity, table.allocate(entity)) }; - Some(new_location) - } else { - None + if !self.entities.contains(entity) || self.entities.get(entity).is_some() { + return None; } + let empty_archetype = self.archetypes.empty_mut(); + let table = &mut self.storages.tables[empty_archetype.table_id()]; + // SAFETY: It's empty so no values need to be written + let new_location = unsafe { empty_archetype.allocate(entity, table.allocate(entity)) }; + Some(new_location) } /// Applies any commands in the world's internal [`CommandQueue`]. From 3352576841efdd823bd29b9bb1c34564e56498e0 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Wed, 2 Apr 2025 22:14:18 -0400 Subject: [PATCH 050/113] ensure the free list has valid data --- crates/bevy_ecs/src/entity/allocator.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 1f2e7d1209a46..02bbe1c4252ee 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -19,6 +19,15 @@ struct Slot { } impl Slot { + /// Produces a meaningless an empty value. This produces a valid but incorrect `Entity`. + fn empty() -> Self { + let source = Entity::PLACEHOLDER; + Self { + entity_index: AtomicU32::new(source.index()), + entity_generation: AtomicU32::new(source.generation()), + } + } + // TODO: could maybe make this `&mut`?? #[inline] fn set_entity(&self, entity: Entity) { @@ -155,6 +164,7 @@ impl Chunk { let cap = Self::capacity_of_chunk(index); let mut buff = ManuallyDrop::new(Vec::new()); buff.reserve_exact(cap as usize); + buff.resize_with(cap as usize, Slot::empty); let ptr = buff.as_mut_ptr(); self.first.store(ptr, Ordering::Relaxed); ptr From bc1797e0dc054bddc603941e4c1f603430bf1291 Mon Sep 17 00:00:00 2001 From: Eagster <79881080+ElliottjPierce@users.noreply.github.com> Date: Wed, 2 Apr 2025 22:15:16 -0400 Subject: [PATCH 051/113] Fix typos Co-authored-by: andriyDev --- crates/bevy_ecs/src/entity/allocator.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 02bbe1c4252ee..35b8b246a4c38 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -86,7 +86,7 @@ impl Chunk { /// For this index in the whole buffer, returns the index of the [`Chunk`] and the index within that chunk. #[inline] - fn get_indices(full_idnex: u32) -> (u32, u32) { + fn get_indices(full_index: u32) -> (u32, u32) { // We're countint leading zeros since each chunk has power of 2 capacity. // So the leading zeros will be proportional to the chunk index. let leading = full_idnex @@ -96,7 +96,7 @@ impl Chunk { .min(Self::NUM_CHUNKS - 1); // We store chunks in smallest to biggest order, so we need to reverse it. let chunk_index = Self::NUM_CHUNKS - 1 - leading; - // We only need to cut of this particular bit. + // We only need to cut off this particular bit. // The capacity is only one bit, and if other bits needed to be dropped, `leading` would have been greater let slice_index = full_idnex & !Self::capacity_of_chunk(chunk_index); From 91113a2f9b3ae61c5f0c8590c7f3be167d4ec18b Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Wed, 2 Apr 2025 22:19:19 -0400 Subject: [PATCH 052/113] post review rename --- crates/bevy_ecs/src/entity/allocator.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 35b8b246a4c38..9836025d7b20f 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -86,10 +86,10 @@ impl Chunk { /// For this index in the whole buffer, returns the index of the [`Chunk`] and the index within that chunk. #[inline] - fn get_indices(full_index: u32) -> (u32, u32) { + fn map_to_indices(full_index: u32) -> (u32, u32) { // We're countint leading zeros since each chunk has power of 2 capacity. // So the leading zeros will be proportional to the chunk index. - let leading = full_idnex + let leading = full_index .leading_zeros() // We do a min because we skip the first `NUM_SKIPPED` powers to make space for the first chunk's entity count. // The -1 is because this is the number of chunks, but we want the index in the end. @@ -98,7 +98,7 @@ impl Chunk { let chunk_index = Self::NUM_CHUNKS - 1 - leading; // We only need to cut off this particular bit. // The capacity is only one bit, and if other bits needed to be dropped, `leading` would have been greater - let slice_index = full_idnex & !Self::capacity_of_chunk(chunk_index); + let slice_index = full_index & !Self::capacity_of_chunk(chunk_index); (chunk_index, slice_index) } @@ -327,7 +327,7 @@ impl FreeBuffer { let state = self.len.pop_for_state(u32::MAX); let len = FreeBufferLen::len_from_state(state); // We can cast to u32 safely because if it were to overflow, there would already be too many entities. - let (chunk_index, index) = Chunk::get_indices(len); + let (chunk_index, index) = Chunk::map_to_indices(len); // SAFETY: index is correct. let chunk = unsafe { self.chunks.get_unchecked(chunk_index as usize) }; @@ -354,7 +354,7 @@ impl FreeBuffer { let len = self.len.pop_for_len(1); let index = len.checked_sub(1)?; // We can cast to u32 safely because if it were to overflow, there would already be too many entities. - let (chunk_index, index) = Chunk::get_indices(index); + let (chunk_index, index) = Chunk::map_to_indices(index); // SAFETY: index is correct. let chunk = unsafe { self.chunks.get_unchecked(chunk_index as usize) }; @@ -413,7 +413,7 @@ impl FreeBuffer { let index = len.checked_sub(1)?; // We can cast to u32 safely because if it were to overflow, there would already be too many entities. - let (chunk_index, index) = Chunk::get_indices(index); + let (chunk_index, index) = Chunk::map_to_indices(index); // SAFETY: index is correct. let chunk = unsafe { self.chunks.get_unchecked(chunk_index as usize) }; @@ -470,7 +470,7 @@ impl<'a> Iterator for FreeListSliceIterator<'a> { } let next_index = self.indices.next()?; - let (chunk_index, inner_index) = Chunk::get_indices(next_index); + let (chunk_index, inner_index) = Chunk::map_to_indices(next_index); // SAFETY: index is correct let chunk = unsafe { self.buffer.chunks.get_unchecked(chunk_index as usize) }; @@ -776,7 +776,7 @@ mod tests { ]; for (input, output) in to_test { - assert_eq!(Chunk::get_indices(input), output); + assert_eq!(Chunk::map_to_indices(input), output); } } From ffbf746d6a7195cbfef1f9067f9d6ba833c3b937 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Wed, 2 Apr 2025 22:31:53 -0400 Subject: [PATCH 053/113] fix doc --- crates/bevy_ecs/src/entity/allocator.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 9836025d7b20f..b6ced5f51f7cf 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -40,7 +40,7 @@ impl Slot { /// /// # Safety /// - /// This slot *must* have been [`set`](Self::set) before this. + /// This slot *must* have been [`set_entity`](Self::set_entity) before this. /// Otherwise, the entity may be invalid or meaningless. #[inline] unsafe fn get_entity(&self) -> Entity { From cdafe068351b55b46f1d6286f779b9bac2216f81 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Wed, 2 Apr 2025 23:20:49 -0400 Subject: [PATCH 054/113] pack entities when supported --- benches/benches/bevy_ecs/main.rs | 10 +++++----- crates/bevy_ecs/src/entity/allocator.rs | 26 ++++++++++++++++++++----- 2 files changed, 26 insertions(+), 10 deletions(-) diff --git a/benches/benches/bevy_ecs/main.rs b/benches/benches/bevy_ecs/main.rs index 4a025ab829369..1079f75ed31a0 100644 --- a/benches/benches/bevy_ecs/main.rs +++ b/benches/benches/bevy_ecs/main.rs @@ -18,15 +18,15 @@ mod scheduling; mod world; criterion_main!( - change_detection::benches, + // change_detection::benches, components::benches, empty_archetypes::benches, entity_cloning::benches, - events::benches, + // events::benches, iteration::benches, - fragmentation::benches, + // fragmentation::benches, observers::benches, - scheduling::benches, + // scheduling::benches, world::benches, - param::benches, + // param::benches, ); diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index b6ced5f51f7cf..6c750bef8daa6 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -5,7 +5,7 @@ use bevy_platform_support::{ Arc, Weak, }, }; -use core::{mem::ManuallyDrop, num::NonZero}; +use core::mem::ManuallyDrop; use log::warn; use crate::query::DebugCheckedUnwrap; @@ -14,26 +14,39 @@ use super::{Entity, EntitySetIterator}; /// This is the item we store in the free list. struct Slot { + #[cfg(not(target_has_atomic = "64"))] entity_index: AtomicU32, + #[cfg(not(target_has_atomic = "64"))] entity_generation: AtomicU32, + #[cfg(target_has_atomic = "64")] + inner_entity: AtomicU64, } impl Slot { /// Produces a meaningless an empty value. This produces a valid but incorrect `Entity`. fn empty() -> Self { let source = Entity::PLACEHOLDER; - Self { + #[cfg(not(target_has_atomic = "64"))] + return Self { entity_index: AtomicU32::new(source.index()), entity_generation: AtomicU32::new(source.generation()), - } + }; + #[cfg(target_has_atomic = "64")] + return Self { + inner_entity: AtomicU64::new(source.to_bits()), + }; } // TODO: could maybe make this `&mut`?? #[inline] fn set_entity(&self, entity: Entity) { + #[cfg(not(target_has_atomic = "64"))] self.entity_generation .store(entity.generation(), Ordering::Relaxed); + #[cfg(not(target_has_atomic = "64"))] self.entity_index.store(entity.index(), Ordering::Relaxed); + #[cfg(target_has_atomic = "64")] + self.inner_entity.store(entity.to_bits(), Ordering::Relaxed); } /// Gets the stored entity. @@ -44,13 +57,16 @@ impl Slot { /// Otherwise, the entity may be invalid or meaningless. #[inline] unsafe fn get_entity(&self) -> Entity { - Entity { + #[cfg(not(target_has_atomic = "64"))] + return Entity { index: self.entity_index.load(Ordering::Relaxed), // SAFETY: This is not 0 since it was from an entity's generation. generation: unsafe { NonZero::new_unchecked(self.entity_generation.load(Ordering::Relaxed)) }, - } + }; + #[cfg(target_has_atomic = "64")] + return Entity::from_bits(self.inner_entity.load(Ordering::Relaxed)); } } From a3791ba2364a4766684c0321334638698656f97f Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Wed, 2 Apr 2025 23:23:36 -0400 Subject: [PATCH 055/113] Retain benchmarks Accidentally pushed these. I'm leaving irrelevant benches out in testing --- benches/benches/bevy_ecs/main.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/benches/benches/bevy_ecs/main.rs b/benches/benches/bevy_ecs/main.rs index 1079f75ed31a0..4a025ab829369 100644 --- a/benches/benches/bevy_ecs/main.rs +++ b/benches/benches/bevy_ecs/main.rs @@ -18,15 +18,15 @@ mod scheduling; mod world; criterion_main!( - // change_detection::benches, + change_detection::benches, components::benches, empty_archetypes::benches, entity_cloning::benches, - // events::benches, + events::benches, iteration::benches, - // fragmentation::benches, + fragmentation::benches, observers::benches, - // scheduling::benches, + scheduling::benches, world::benches, - // param::benches, + param::benches, ); From 9ee241b02a9aec9eec07f160ce9b50970c00edb9 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Wed, 2 Apr 2025 23:48:03 -0400 Subject: [PATCH 056/113] fix compile --- crates/bevy_ecs/src/entity/allocator.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 6c750bef8daa6..64e1a0bd0dad2 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -62,7 +62,7 @@ impl Slot { index: self.entity_index.load(Ordering::Relaxed), // SAFETY: This is not 0 since it was from an entity's generation. generation: unsafe { - NonZero::new_unchecked(self.entity_generation.load(Ordering::Relaxed)) + core::num::NonZero::new_unchecked(self.entity_generation.load(Ordering::Relaxed)) }, }; #[cfg(target_has_atomic = "64")] From 92a7c1c3a52c9b0f0d2533db117865e66e1e7c86 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Thu, 3 Apr 2025 10:12:53 -0400 Subject: [PATCH 057/113] justify `set_len` generation --- crates/bevy_ecs/src/entity/allocator.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 64e1a0bd0dad2..b4a0c557780c2 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -292,8 +292,14 @@ impl FreeBufferLen { fn set_len(&self, len: u32, recent_state: u64) { let encoded_length = (len as u64 + Self::FALSE_ZERO) << 16; let recent_generation = recent_state & (u16::MAX as u64 & !Self::HIGHEST_GENERATION_BIT); + // This effectively adds a 2^14 to the generation, so for recent `recent_state` values, this is very safe. + // It is worth mentioning that doing this back to back will negate it, but in theory, we don't even need this at all. + // If an uneven number of free and alloc calls are made, the length will be different, so the generation is a moot point. + // If they are even, then at least one alloc call has been made, which would have incremented the generation in `recent_state`. + // So in all cases, the state is sufficiently changed such that `try_set_state` will fail when needed. let far_generation = recent_generation ^ (1 << 14); + let fully_encoded = encoded_length | far_generation; self.0.store(fully_encoded, Ordering::Release); } From bf9e308785272b7c1d2f023168a52403fa2f8987 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Thu, 3 Apr 2025 10:20:42 -0400 Subject: [PATCH 058/113] use disabling for freeing --- crates/bevy_ecs/src/entity/allocator.rs | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index b4a0c557780c2..247b08dfe8bb0 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -225,6 +225,8 @@ impl FreeBufferLen { const HIGHEST_GENERATION_BIT: u64 = 1 << 15; /// The u48 encoded length considers this value to be 0. Lower values are considered negative. const FALSE_ZERO: u64 = ((1 << 48) - 1) - ((1 << 32) - 1); + /// This bit is off only when the length has been entirely disabled. + const DISABLING_BIT: u64 = 1 << 63; /// Gets the current state of the buffer. #[inline] @@ -287,6 +289,14 @@ impl FreeBufferLen { Self::len_from_state(self.pop_for_state(num)) } + /// Disables the length completely, returning the previous state. + #[inline] + fn disable_len_for_state(&self) -> u64 { + // We don't care about the generation here since the length is invalid anyway. + // In order to reset length, `set_len` must be called, which handles the generation. + self.0.fetch_add(!Self::DISABLING_BIT, Ordering::AcqRel) + } + /// Sets the length explicitly. #[inline] fn set_len(&self, len: u32, recent_state: u64) { @@ -345,8 +355,8 @@ impl FreeBuffer { /// This must not conflict with any other [`Self::free`] or [`Self::alloc`] calls. #[inline] unsafe fn free(&self, entity: Entity) { - // Disable remote allocation. (We could do a compare exchange loop, but this is faster in the common case.) - let state = self.len.pop_for_state(u32::MAX); + // Disable remote allocation. + let state = self.len.disable_len_for_state(); let len = FreeBufferLen::len_from_state(state); // We can cast to u32 safely because if it were to overflow, there would already be too many entities. let (chunk_index, index) = Chunk::map_to_indices(len); @@ -361,7 +371,6 @@ impl FreeBuffer { } let new_len = len + 1; - // It doesn't matter when other threads realize remote allocation is enabled again. self.len.set_len(new_len, state); } From 4fcb74cce19b801462a7e8fd7832fabd188d3ba4 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Thu, 3 Apr 2025 10:25:50 -0400 Subject: [PATCH 059/113] remote allocation never leaks --- crates/bevy_ecs/src/entity/allocator.rs | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 247b08dfe8bb0..27cfbc4be9333 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -247,6 +247,12 @@ impl FreeBufferLen { encoded_length.saturating_sub(Self::FALSE_ZERO) as u32 } + /// Returns true if the length is currently disabled. + #[inline] + fn is_state_disabled(state: u64) -> bool { + (state & Self::DISABLING_BIT) == 0 + } + /// Gets the length. Returns 0 if the length is negative or zero. #[inline] fn len(&self) -> u32 { @@ -440,6 +446,15 @@ impl FreeBuffer { let mut state = self.len.state(); loop { + // The state is only disabled when freeing. + // If a free is happening, we need to wait for the new entity to be ready on the free buffer. + // Then, we can allocate it. + if FreeBufferLen::is_state_disabled(state) { + core::hint::spin_loop(); + state = self.len.state(); + continue; + } + let len = FreeBufferLen::len_from_state(state); let index = len.checked_sub(1)?; From 2c0850385e6bfb887a313350131ab94255363a92 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Thu, 3 Apr 2025 10:28:19 -0400 Subject: [PATCH 060/113] justify encode_pop --- crates/bevy_ecs/src/entity/allocator.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 27cfbc4be9333..1ecd232888376 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -265,8 +265,13 @@ impl FreeBufferLen { let encoded_diff = (num as u64) << 16; // In modular arithmetic, this is equivalent to the requested subtraction. let to_add = u64::MAX - encoded_diff; + // add one to the generation. - to_add + 1 + // Note that if `num` is 0, this will wrap `to_add` to 0, + // which is correct since we aren't adding anything. + // Since we aren't really popping anything either, + // it is perfectly fine to not add to the generation too. + to_add.wrapping_add(1) } /// Subtracts `num` from the length, returning the new state. From 37732b2f8f52c77b90e992ffc0798afea3716782 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Thu, 3 Apr 2025 10:33:47 -0400 Subject: [PATCH 061/113] updated FreeBufferLen docs --- crates/bevy_ecs/src/entity/allocator.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 1ecd232888376..ad5326dd994c9 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -212,12 +212,15 @@ impl Chunk { /// This stores two things: the length of the buffer (which can be negative) and a generation value to track *any* change to the length. /// /// The upper 48 bits store an unsigned integer of the length, and the lower 16 bits store the generation value. -/// By keeping the length in the upper bits, we can add and subtract anything to them without it affecting the generation bits. -/// When adding `x` to the length, we add `x << 16 + 1`, and when subtracting `x` from the length, we subtract `x << 16 - 1` so that the generation is incremented. -/// Finally, to prevent the generation from ever overflowing into the length, we follow up each operation with a bit and to turn of the must significant generation bits. +/// By keeping the length in the upper bits, we can add anything to them without it affecting the generation bits. +/// See [`Self::encode_pop`] for how this is done. +/// To prevent the generation from ever overflowing into the length, +/// we follow up each operation with a bit-wise `&` to turn of the most significant generation bit, preventing overflow. /// /// Finally, to get the signed length from the unsigned 48 bit value, we simply set `u48::MAX - u32::MAX` equal to 0. /// This is fine since for the length to go over `u32::MAX`, the entity index would first need to be exhausted, ausing a "too many entities" panic. +/// In theory, the length should not drop below `-u32::MAX` since doing so would cause a "too many entities" panic. +/// However, using 48 bits provides a buffer here and allows extra flags like [`Self::DISABLING_BIT`]. struct FreeBufferLen(AtomicU64); impl FreeBufferLen { From fd68df5bd38ebe8392998cd341dfe23361c26050 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Thu, 3 Apr 2025 15:45:31 -0400 Subject: [PATCH 062/113] improve perf of Slot::set --- crates/bevy_ecs/src/entity/allocator.rs | 3 ++- crates/bevy_ecs/src/entity/mod.rs | 19 +++++++++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index ad5326dd994c9..38fde45886c13 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -66,7 +66,8 @@ impl Slot { }, }; #[cfg(target_has_atomic = "64")] - return Entity::from_bits(self.inner_entity.load(Ordering::Relaxed)); + // SAFETY: Caller ensures this was set first. + return unsafe { Entity::from_bits_unchecked(self.inner_entity.load(Ordering::Relaxed)) }; } } diff --git a/crates/bevy_ecs/src/entity/mod.rs b/crates/bevy_ecs/src/entity/mod.rs index 31a5aaffde2a4..3956600e35765 100644 --- a/crates/bevy_ecs/src/entity/mod.rs +++ b/crates/bevy_ecs/src/entity/mod.rs @@ -318,6 +318,25 @@ impl Entity { IdentifierMask::pack_into_u64(self.index, self.generation.get()) } + /// Same as [`from_bits`](Self::from_bits), but unchecked and faster. + /// + /// # Safety + /// + /// `bits` must come from [`Self::to_bits`]. + pub(crate) const unsafe fn from_bits_unchecked(bits: u64) -> Self { + match Self::try_from_bits(bits) { + Ok(entity) => entity, + Err(_) => { + #[cfg(not(debug_assertions))] + // SAFETY: Ensured by caller. + unsafe { + hint::unreachable_unchecked() + } + panic!("Unreachable code. `Entity::from_bits_unchecked` called dincorrectly.") + } + } + } + /// Reconstruct an `Entity` previously destructured with [`Entity::to_bits`]. /// /// Only useful when applied to results from `to_bits` in the same instance of an application. From 1b77882a43997208110641566f56fc1064f07ae9 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Thu, 3 Apr 2025 16:47:10 -0400 Subject: [PATCH 063/113] fix lint --- crates/bevy_ecs/src/entity/mod.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/crates/bevy_ecs/src/entity/mod.rs b/crates/bevy_ecs/src/entity/mod.rs index 3956600e35765..b571991ca349a 100644 --- a/crates/bevy_ecs/src/entity/mod.rs +++ b/crates/bevy_ecs/src/entity/mod.rs @@ -323,6 +323,11 @@ impl Entity { /// # Safety /// /// `bits` must come from [`Self::to_bits`]. + #[expect( + clippy::allow_attributes, + reason = "This is only used on some platforms." + )] + #[allow(dead_code, reason = "This is only used on some platforms.")] pub(crate) const unsafe fn from_bits_unchecked(bits: u64) -> Self { match Self::try_from_bits(bits) { Ok(entity) => entity, From 3e7ad2cc49d2968a91db22e9e63f89a51979caa9 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Fri, 4 Apr 2025 11:04:21 -0400 Subject: [PATCH 064/113] added migration guide --- .../18670_remote_entity_reservation.md | 22 +++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 release-content/migration-guides/18670_remote_entity_reservation.md diff --git a/release-content/migration-guides/18670_remote_entity_reservation.md b/release-content/migration-guides/18670_remote_entity_reservation.md new file mode 100644 index 0000000000000..7a777b1b507ad --- /dev/null +++ b/release-content/migration-guides/18670_remote_entity_reservation.md @@ -0,0 +1,22 @@ +--- +title: Entities Utilities +pull_requests: [18670] +--- + +`Entities::reserve` has been renamed `Entities::prepare`. Additionally, `Entities` methods `used_count` and `total_prospective_count` have been removed, and `total_count` and `len` now return `u64` instead of `usize`. + +These utility methods have changed because the backing entity allocator has had a rewrite. `Entities::prepare` is intentionally more generally named than `Entities::reserve` because it has looser guarantees, and it may do more than just reserving memory in the future. `Entities::used_count` and `Entities::total_prospective_count` were removed because they depend on knowing how many entities are pending being automatically flushed. However, tracking that quantity is now nontrivial, and these functions have always been intended for debugging use only. The new allocator allows entities to be reserved without them being added to the pending list for automatic flushing, and it allows pending entities to be manually flushed early. Effectively, that means debugging the entities that are pending is no longer relevant information, hence the removal of those methods. `total_count` and `len` now return `u64` instead of `usize` to better reflect the truth. Since `Entities` has a well defined upper bound, unlike other collections, it makes more since to use `u64` explicitly rather than `usize`. + +To migrate: + +```diff +- let entities: usize = entities.len(); ++ let entities: u64 = entities.len(); +``` + +```diff +- entities.reserve(128); ++ entities.prepare(128); +``` + +If you have any trouble migrating away from `Entities::used_count` and `Entities::total_prospective_count`, feel free to open an issue! From 1f186b9c2ba87b4b33b41098c06bfe351923980d Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Sat, 5 Apr 2025 10:08:43 -0400 Subject: [PATCH 065/113] relax safety on `Slot::get` --- crates/bevy_ecs/src/entity/allocator.rs | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 38fde45886c13..b02adc91dac83 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -49,14 +49,9 @@ impl Slot { self.inner_entity.store(entity.to_bits(), Ordering::Relaxed); } - /// Gets the stored entity. - /// - /// # Safety - /// - /// This slot *must* have been [`set_entity`](Self::set_entity) before this. - /// Otherwise, the entity may be invalid or meaningless. + /// Gets the stored entity. The result be [`Entity::PLACEHODLER`] unless [`set`](Self::set) has been called. #[inline] - unsafe fn get_entity(&self) -> Entity { + fn get_entity(&self) -> Entity { #[cfg(not(target_has_atomic = "64"))] return Entity { index: self.entity_index.load(Ordering::Relaxed), @@ -66,7 +61,7 @@ impl Slot { }, }; #[cfg(target_has_atomic = "64")] - // SAFETY: Caller ensures this was set first. + // SAFETY: This is always sourced from a proper entity. return unsafe { Entity::from_bits_unchecked(self.inner_entity.load(Ordering::Relaxed)) }; } } From 4a088b385bbc67dee94e7d0b9868d5959f7d42b8 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Sat, 5 Apr 2025 10:13:42 -0400 Subject: [PATCH 066/113] clean up safety comments --- crates/bevy_ecs/src/entity/allocator.rs | 19 +++++-------------- 1 file changed, 5 insertions(+), 14 deletions(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index b02adc91dac83..7af5f98c68533 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -119,7 +119,7 @@ impl Chunk { /// /// # Safety /// - /// [`Self::set`] must have been called on this index before. + /// [`Self::set`] must have been called on this index before, ensuring it is in bounds and the chunk is initialized. #[inline] unsafe fn get(&self, index: u32) -> Entity { // SAFETY: caller ensure we are init. @@ -127,15 +127,14 @@ impl Chunk { // SAFETY: caller ensures we are in bounds (because `set` must be in bounds) let target = unsafe { &*head.add(index as usize) }; - // SAFETY: caller ensures `set` was called. - unsafe { target.get_entity() } + target.get_entity() } /// Gets a slice of indices. /// /// # Safety /// - /// [`Self::set`] must have been called on these indices before. + /// [`Self::set`] must have been called on these indices before, ensuring it is in bounds and the chunk is initialized. #[inline] unsafe fn get_slice(&self, index: u32, ideal_len: u32, index_of_self: u32) -> &[Slot] { let cap = Self::capacity_of_chunk(index_of_self); @@ -497,10 +496,6 @@ impl Drop for FreeBuffer { } /// A list that iterates the [`FreeBuffer`]. -/// -/// # Safety -/// -/// Must be constructed to only iterate slots that have been initialized. struct FreeListSliceIterator<'a> { buffer: &'a FreeBuffer, indices: core::ops::RangeInclusive, @@ -513,10 +508,7 @@ impl<'a> Iterator for FreeListSliceIterator<'a> { #[inline] fn next(&mut self) -> Option { if let Some(sliced) = self.current.next() { - // SAFETY: Ensured by constructor - unsafe { - return Some(sliced.get_entity()); - } + return Some(sliced.get_entity()); } let next_index = self.indices.next()?; @@ -529,8 +521,7 @@ impl<'a> Iterator for FreeListSliceIterator<'a> { self.indices = (*self.indices.start() + slice.len() as u32 - 1)..=(*self.indices.end()); self.current = slice.iter(); - // SAFETY: Ensured by constructor - unsafe { Some(self.current.next()?.get_entity()) } + Some(self.current.next()?.get_entity()) } #[inline] From 72ec8a8cd6fb792f413ecedd33d1eda5c1ee4bcd Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Sat, 5 Apr 2025 10:16:27 -0400 Subject: [PATCH 067/113] simplified `map_to_indices` Co-Authored-By: Chris Russell <8494645+chescock@users.noreply.github.com> --- crates/bevy_ecs/src/entity/allocator.rs | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 7af5f98c68533..88712c0c932d4 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -99,15 +99,10 @@ impl Chunk { /// For this index in the whole buffer, returns the index of the [`Chunk`] and the index within that chunk. #[inline] fn map_to_indices(full_index: u32) -> (u32, u32) { - // We're countint leading zeros since each chunk has power of 2 capacity. - // So the leading zeros will be proportional to the chunk index. - let leading = full_index - .leading_zeros() - // We do a min because we skip the first `NUM_SKIPPED` powers to make space for the first chunk's entity count. - // The -1 is because this is the number of chunks, but we want the index in the end. - .min(Self::NUM_CHUNKS - 1); + // We do a `saturating_sub` because we skip the first `NUM_SKIPPED` powers to make space for the first chunk's entity count. + // The -1 is because this is the number of chunks, but we want the index in the end. // We store chunks in smallest to biggest order, so we need to reverse it. - let chunk_index = Self::NUM_CHUNKS - 1 - leading; + let chunk_index = (Self::NUM_CHUNKS - 1).saturating_sub(full_index.leading_zeros()); // We only need to cut off this particular bit. // The capacity is only one bit, and if other bits needed to be dropped, `leading` would have been greater let slice_index = full_index & !Self::capacity_of_chunk(chunk_index); From 3ec4d6ac25b2139b3d73c5a653d444790e9c78dc Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Sat, 5 Apr 2025 10:18:34 -0400 Subject: [PATCH 068/113] simplified buffer construction Co-Authored-By: Chris Russell <8494645+chescock@users.noreply.github.com> --- crates/bevy_ecs/src/entity/allocator.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 88712c0c932d4..58ac476d939bc 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -76,7 +76,7 @@ impl Chunk { const NUM_CHUNKS: u32 = 24; const NUM_SKIPPED: u32 = u32::BITS - Self::NUM_CHUNKS; - fn new() -> Self { + const fn new() -> Self { Self { first: AtomicPtr::new(core::ptr::null_mut()), } @@ -475,7 +475,7 @@ impl FreeBuffer { fn new() -> Self { Self { - chunks: core::array::from_fn(|_index| Chunk::new()), + chunks: [const { Chunk::new() }; Chunk::NUM_CHUNKS as usize], len: FreeBufferLen::new_zero_len(), } } From 335f916c1ad5a27a7d40e107875fd14ea597f110 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Sat, 5 Apr 2025 11:13:22 -0400 Subject: [PATCH 069/113] create a separate chunk buffer type --- crates/bevy_ecs/src/entity/allocator.rs | 324 +++++++++++++----------- 1 file changed, 183 insertions(+), 141 deletions(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 58ac476d939bc..7453bbeee5e87 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -73,43 +73,12 @@ struct Chunk { } impl Chunk { - const NUM_CHUNKS: u32 = 24; - const NUM_SKIPPED: u32 = u32::BITS - Self::NUM_CHUNKS; - const fn new() -> Self { Self { first: AtomicPtr::new(core::ptr::null_mut()), } } - /// Computes the capacity of the chunk at this index within [`Self::NUM_CHUNKS`]. - /// The first 2 have length 512 (2^9) and the last has length (2^31) - #[inline] - fn capacity_of_chunk(chunk_index: u32) -> u32 { - // We do this because we're skipping the first `NUM_SKIPPED` powers, so we need to make up for them by doubling the first index. - // This is why the first 2 indices both have a capacity of 256. - let corrected = chunk_index.max(1); - // We add NUM_SKIPPED because the total capacity should be as if [`Self::NUM_CHUNKS`] were 32. - // This skips the first NUM_SKIPPED powers. - let corrected = corrected + Self::NUM_SKIPPED; - // This bit shift is just 2^corrected. - 1 << corrected - } - - /// For this index in the whole buffer, returns the index of the [`Chunk`] and the index within that chunk. - #[inline] - fn map_to_indices(full_index: u32) -> (u32, u32) { - // We do a `saturating_sub` because we skip the first `NUM_SKIPPED` powers to make space for the first chunk's entity count. - // The -1 is because this is the number of chunks, but we want the index in the end. - // We store chunks in smallest to biggest order, so we need to reverse it. - let chunk_index = (Self::NUM_CHUNKS - 1).saturating_sub(full_index.leading_zeros()); - // We only need to cut off this particular bit. - // The capacity is only one bit, and if other bits needed to be dropped, `leading` would have been greater - let slice_index = full_index & !Self::capacity_of_chunk(chunk_index); - - (chunk_index, slice_index) - } - /// Gets the entity at the index within this chunk. /// /// # Safety @@ -131,9 +100,8 @@ impl Chunk { /// /// [`Self::set`] must have been called on these indices before, ensuring it is in bounds and the chunk is initialized. #[inline] - unsafe fn get_slice(&self, index: u32, ideal_len: u32, index_of_self: u32) -> &[Slot] { - let cap = Self::capacity_of_chunk(index_of_self); - let after_index_slice_len = cap - index; + unsafe fn get_slice(&self, index: u32, ideal_len: u32, chunk_capacity: u32) -> &[Slot] { + let after_index_slice_len = chunk_capacity - index; let len = after_index_slice_len.min(ideal_len) as usize; // SAFETY: caller ensure we are init. @@ -151,8 +119,8 @@ impl Chunk { /// Index must be in bounds. /// Access does not conflict with another [`Self::get`]. #[inline] - unsafe fn set(&self, index: u32, entity: Entity, index_of_self: u32) { - let head = self.ptr().unwrap_or_else(|| self.init(index_of_self)); + unsafe fn set(&self, index: u32, entity: Entity, chunk_capacity: u32) { + let head = self.ptr().unwrap_or_else(|| self.init(chunk_capacity)); // SAFETY: caller ensures it is in bounds and we are not fighting with other `set` calls or `get` calls. // A race condition is therefore impossible. let target = unsafe { &*head.add(index as usize) }; @@ -166,11 +134,10 @@ impl Chunk { /// /// This must not be called concurrently. #[cold] - unsafe fn init(&self, index: u32) -> *mut Slot { - let cap = Self::capacity_of_chunk(index); + unsafe fn init(&self, chunk_capacity: u32) -> *mut Slot { let mut buff = ManuallyDrop::new(Vec::new()); - buff.reserve_exact(cap as usize); - buff.resize_with(cap as usize, Slot::empty); + buff.reserve_exact(chunk_capacity as usize); + buff.resize_with(chunk_capacity as usize, Slot::empty); let ptr = buff.as_mut_ptr(); self.first.store(ptr, Ordering::Relaxed); ptr @@ -181,12 +148,12 @@ impl Chunk { /// # Safety /// /// This must not be called concurrently. - unsafe fn dealloc(&self, index: u32) { + /// `chunk_capacity` must be the same as it was initialized with. + unsafe fn dealloc(&self, chunk_capacity: u32) { if let Some(to_drop) = self.ptr() { - let cap = Self::capacity_of_chunk(index) as usize; // SAFETY: This was created in [`Self::init`] from a standard Vec. unsafe { - Vec::from_raw_parts(to_drop, cap, cap); + Vec::from_raw_parts(to_drop, chunk_capacity as usize, chunk_capacity as usize); } } } @@ -199,6 +166,147 @@ impl Chunk { } } +/// This is a buffer that has been split into chunks, so that each chunk is pinned in memory. +/// Conceptually, each chunk is put end-to-end to form the buffer. +/// This will expand in capacity as needed, but a separate system must track the length of the list in the buffer. +struct ChunkedBuffer([Chunk; Self::NUM_CHUNKS as usize]); + +impl ChunkedBuffer { + const NUM_CHUNKS: u32 = 24; + const NUM_SKIPPED: u32 = u32::BITS - Self::NUM_CHUNKS; + + const fn new() -> Self { + Self([const { Chunk::new() }; Self::NUM_CHUNKS as usize]) + } + + /// Computes the capacity of the chunk at this index within [`Self::NUM_CHUNKS`]. + /// The first 2 have length 512 (2^9) and the last has length (2^31) + #[inline] + fn capacity_of_chunk(chunk_index: u32) -> u32 { + // We do this because we're skipping the first `NUM_SKIPPED` powers, so we need to make up for them by doubling the first index. + // This is why the first 2 indices both have a capacity of 256. + let corrected = chunk_index.max(1); + // We add NUM_SKIPPED because the total capacity should be as if [`Self::NUM_CHUNKS`] were 32. + // This skips the first NUM_SKIPPED powers. + let corrected = corrected + Self::NUM_SKIPPED; + // This bit shift is just 2^corrected. + 1 << corrected + } + + /// For this index in the whole buffer, returns the index of the [`Chunk`], the index within that chunk, and the capacity of that chunk. + #[inline] + fn index_info(full_index: u32) -> (u32, u32, u32) { + // We do a `saturating_sub` because we skip the first `NUM_SKIPPED` powers to make space for the first chunk's entity count. + // The -1 is because this is the number of chunks, but we want the index in the end. + // We store chunks in smallest to biggest order, so we need to reverse it. + let chunk_index = (Self::NUM_CHUNKS - 1).saturating_sub(full_index.leading_zeros()); + let chunk_capacity = Self::capacity_of_chunk(chunk_index); + // We only need to cut off this particular bit. + // The capacity is only one bit, and if other bits needed to be dropped, `leading` would have been greater + let index_in_chunk = full_index & !chunk_capacity; + + (chunk_index, index_in_chunk, chunk_capacity) + } + + /// For this index in the whole buffer, returns the [`Chunk`], the index within that chunk, and the capacity of that chunk. + #[inline] + fn index_in_chunk(&self, full_index: u32) -> (&Chunk, u32, u32) { + let (chunk_index, index_in_chunk, chunk_capacity) = Self::index_info(full_index); + // SAFETY: The chunk index is correct + let chunk = unsafe { self.0.get_unchecked(chunk_index as usize) }; + (chunk, index_in_chunk, chunk_capacity) + } + + /// Gets the entity at an index. + /// + /// # Safety + /// + /// [`set`](Self::set) must have been called on this index to initialize the its memory. + unsafe fn get(&self, full_index: u32) -> Entity { + let (chunk, index, _) = self.index_in_chunk(full_index); + // SAFETY: Caller ensures this index was set + unsafe { chunk.get(index) } + } + + /// Sets an entity at an index. + /// + /// # Safety + /// + /// This must not be called concurrently. + /// Access does not conflict with another [`Self::get`]. + #[inline] + unsafe fn set(&self, full_index: u32, entity: Entity) { + let (chunk, index, chunk_capacity) = self.index_in_chunk(full_index); + // SAFETY: Ensured by caller and that the index is correct. + unsafe { chunk.set(index, entity, chunk_capacity) } + } + + /// Iterates the entities in these indices. + /// + /// # Safety + /// + /// [`Self::set`] must have been called on these indices before to initialize memory. + #[inline] + unsafe fn iter(&self, indices: core::ops::RangeInclusive) -> ChunkedBufferIterator { + ChunkedBufferIterator { + buffer: self, + indices, + current: [].iter(), + } + } +} + +impl Drop for ChunkedBuffer { + fn drop(&mut self) { + for index in 0..Self::NUM_CHUNKS { + let capacity = Self::capacity_of_chunk(index); + // SAFETY: we have `&mut` and the capacity is correct. + unsafe { self.0[index as usize].dealloc(capacity) }; + } + } +} + +/// An iterator over a [`ChunkedBuffer`]. +/// +/// # Safety +/// +/// [`ChunkedBuffer::set`] must have been called on these indices before to initialize memory. +struct ChunkedBufferIterator<'a> { + buffer: &'a ChunkedBuffer, + indices: core::ops::RangeInclusive, + current: core::slice::Iter<'a, Slot>, +} + +impl<'a> Iterator for ChunkedBufferIterator<'a> { + type Item = Entity; + + #[inline] + fn next(&mut self) -> Option { + if let Some(found) = self.current.next() { + return Some(found.get_entity()); + } + + let next_index = self.indices.next()?; + let (chunk, index, chunk_capacity) = self.buffer.index_in_chunk(next_index); + + // SAFETY: Assured by constructor + let slice = unsafe { chunk.get_slice(index, self.len() as u32 + 1, chunk_capacity) }; + self.indices = (*self.indices.start() + slice.len() as u32 - 1)..=(*self.indices.end()); + + self.current = slice.iter(); + Some(self.current.next()?.get_entity()) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + let len = self.indices.end().saturating_sub(*self.indices.start()) as usize; + (len, Some(len)) + } +} + +impl<'a> ExactSizeIterator for ChunkedBufferIterator<'a> {} +impl<'a> core::iter::FusedIterator for ChunkedBufferIterator<'a> {} + /// This stores two things: the length of the buffer (which can be negative) and a generation value to track *any* change to the length. /// /// The upper 48 bits store an unsigned integer of the length, and the lower 16 bits store the generation value. @@ -334,9 +442,9 @@ impl FreeBufferLen { /// This is conceptually like a `Vec` that stores entities pending reuse. struct FreeBuffer { - /// The chunks of the free list. - /// Put end-to-end, these chunks form a list of free entities. - chunks: [Chunk; Chunk::NUM_CHUNKS as usize], + /// The actual buffer of [`Slot`]s. + /// Conceptually, this is like the `RawVec` for this `Vec`. + buffer: ChunkedBuffer, /// The length of the free buffer len: FreeBufferLen, } @@ -361,19 +469,16 @@ impl FreeBuffer { unsafe fn free(&self, entity: Entity) { // Disable remote allocation. let state = self.len.disable_len_for_state(); - let len = FreeBufferLen::len_from_state(state); - // We can cast to u32 safely because if it were to overflow, there would already be too many entities. - let (chunk_index, index) = Chunk::map_to_indices(len); - - // SAFETY: index is correct. - let chunk = unsafe { self.chunks.get_unchecked(chunk_index as usize) }; - // SAFETY: Caller ensures this is not concurrent. The index is correct. - // This can not confluct with a `get` because we already disabled remote allocation. + // Push onto the buffer + let len = FreeBufferLen::len_from_state(state); + // SAFETY: Caller ensures this does not conflict with `free` or `alloc` calls, + // and we just disabled remote allocation. unsafe { - chunk.set(index, entity, chunk_index); + self.buffer.set(len, entity); } + // Update length let new_len = len + 1; self.len.set_len(new_len, state); } @@ -388,14 +493,9 @@ impl FreeBuffer { // SAFETY: This will get a valid index because there is no way for `free` to be done at the same time. let len = self.len.pop_for_len(1); let index = len.checked_sub(1)?; - // We can cast to u32 safely because if it were to overflow, there would already be too many entities. - let (chunk_index, index) = Chunk::map_to_indices(index); - - // SAFETY: index is correct. - let chunk = unsafe { self.chunks.get_unchecked(chunk_index as usize) }; // SAFETY: This was less then `len`, so it must have been `set` via `free` before. - Some(unsafe { chunk.get(index) }) + Some(unsafe { self.buffer.get(index) }) } /// Allocates an as many [`Entity`]s from the free list as are available, up to `count`. @@ -404,7 +504,7 @@ impl FreeBuffer { /// /// This must not conflict with [`Self::free`] calls for the duration of the returned iterator. #[inline] - unsafe fn alloc_many(&self, count: u32) -> FreeListSliceIterator { + unsafe fn alloc_many(&self, count: u32) -> ChunkedBufferIterator { // SAFETY: This will get a valid index because there is no way for `free` to be done at the same time. let len = self.len.pop_for_len(count); let index = len.saturating_sub(count); @@ -423,11 +523,7 @@ impl FreeBuffer { }; // SAFETY: The indices are all less then the length. - FreeListSliceIterator { - buffer: self, - indices, - current: [].iter(), - } + unsafe { self.buffer.iter(indices) } } /// Allocates an [`Entity`] from the free list if one is available and it is safe to do so. @@ -456,14 +552,8 @@ impl FreeBuffer { let len = FreeBufferLen::len_from_state(state); let index = len.checked_sub(1)?; - // We can cast to u32 safely because if it were to overflow, there would already be too many entities. - let (chunk_index, index) = Chunk::map_to_indices(index); - - // SAFETY: index is correct. - let chunk = unsafe { self.chunks.get_unchecked(chunk_index as usize) }; - // SAFETY: This was less then `len`, so it must have been `set` via `free` before. - let entity = unsafe { chunk.get(index) }; + let entity = unsafe { self.buffer.get(index) }; let ideal_state = FreeBufferLen::pop_from_state(state, 1); match self.len.try_set_state(state, ideal_state) { @@ -475,60 +565,12 @@ impl FreeBuffer { fn new() -> Self { Self { - chunks: [const { Chunk::new() }; Chunk::NUM_CHUNKS as usize], + buffer: ChunkedBuffer::new(), len: FreeBufferLen::new_zero_len(), } } } -impl Drop for FreeBuffer { - fn drop(&mut self) { - for index in 0..Chunk::NUM_CHUNKS { - // SAFETY: we have `&mut` - unsafe { self.chunks[index as usize].dealloc(index) }; - } - } -} - -/// A list that iterates the [`FreeBuffer`]. -struct FreeListSliceIterator<'a> { - buffer: &'a FreeBuffer, - indices: core::ops::RangeInclusive, - current: core::slice::Iter<'a, Slot>, -} - -impl<'a> Iterator for FreeListSliceIterator<'a> { - type Item = Entity; - - #[inline] - fn next(&mut self) -> Option { - if let Some(sliced) = self.current.next() { - return Some(sliced.get_entity()); - } - - let next_index = self.indices.next()?; - let (chunk_index, inner_index) = Chunk::map_to_indices(next_index); - // SAFETY: index is correct - let chunk = unsafe { self.buffer.chunks.get_unchecked(chunk_index as usize) }; - - // SAFETY: Assured by constructor - let slice = unsafe { chunk.get_slice(inner_index, self.len() as u32 + 1, chunk_index) }; - self.indices = (*self.indices.start() + slice.len() as u32 - 1)..=(*self.indices.end()); - - self.current = slice.iter(); - Some(self.current.next()?.get_entity()) - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - let len = self.indices.end().saturating_sub(*self.indices.start()) as usize; - (len, Some(len)) - } -} - -impl<'a> ExactSizeIterator for FreeListSliceIterator<'a> {} -impl<'a> core::iter::FusedIterator for FreeListSliceIterator<'a> {} - /// This stores allocation data shared by all entity allocators. struct SharedAllocator { /// The entities pending reuse @@ -706,7 +748,7 @@ impl core::fmt::Debug for Allocator { /// **NOTE:** Dropping will leak the remaining entities! pub struct AllocEntitiesIterator<'a> { new: core::ops::RangeInclusive, - reused: FreeListSliceIterator<'a>, + reused: ChunkedBufferIterator<'a>, } impl<'a> Iterator for AllocEntitiesIterator<'a> { @@ -785,11 +827,11 @@ mod tests { /// Ensure the total capacity of [`OwnedBuffer`] is `u32::MAX + 1`, since the max *index* of an [`Entity`] is `u32::MAX`. #[test] fn chunk_capacity_sums() { - let total: usize = (0..Chunk::NUM_CHUNKS) - .map(Chunk::capacity_of_chunk) - .map(|x| x as usize) + let total: u64 = (0..ChunkedBuffer::NUM_CHUNKS) + .map(ChunkedBuffer::capacity_of_chunk) + .map(|x| x as u64) .sum(); - let expected = u32::MAX as usize + 1; + let expected = u32::MAX as u64 + 1; assert_eq!(total, expected); } @@ -797,22 +839,22 @@ mod tests { #[test] fn chunk_indexing() { let to_test = vec![ - (0, (0, 0)), // index 0 cap = 512 - (1, (0, 1)), - (256, (0, 256)), - (511, (0, 511)), - (512, (1, 0)), // index 1 cap = 512 - (1023, (1, 511)), - (1024, (2, 0)), // index 2 cap = 1024 - (1025, (2, 1)), - (2047, (2, 1023)), - (2048, (3, 0)), // index 3 cap = 2048 - (4095, (3, 2047)), - (4096, (4, 0)), // index 3 cap = 4096 + (0, (0, 0, 512)), // index 0 cap = 512 + (1, (0, 1, 512)), + (256, (0, 256, 512)), + (511, (0, 511, 512)), + (512, (1, 0, 512)), // index 1 cap = 512 + (1023, (1, 511, 512)), + (1024, (2, 0, 1024)), // index 2 cap = 1024 + (1025, (2, 1, 1024)), + (2047, (2, 1023, 1024)), + (2048, (3, 0, 2048)), // index 3 cap = 2048 + (4095, (3, 2047, 2048)), + (4096, (4, 0, 4096)), // index 3 cap = 4096 ]; for (input, output) in to_test { - assert_eq!(Chunk::map_to_indices(input), output); + assert_eq!(ChunkedBuffer::index_info(input), output); } } From fa98c396c6ddf58c2979d433a8cac3eb60dea9ed Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Sat, 5 Apr 2025 11:16:06 -0400 Subject: [PATCH 070/113] document new functions --- crates/bevy_ecs/src/entity/allocator.rs | 27 ++++++++++++++----------- 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 7453bbeee5e87..4d7847fbf4ced 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -73,6 +73,7 @@ struct Chunk { } impl Chunk { + /// Constructs a null [`Chunk`]. const fn new() -> Self { Self { first: AtomicPtr::new(core::ptr::null_mut()), @@ -175,6 +176,7 @@ impl ChunkedBuffer { const NUM_CHUNKS: u32 = 24; const NUM_SKIPPED: u32 = u32::BITS - Self::NUM_CHUNKS; + /// Constructs a empty [`ChunkedBuffer`]. const fn new() -> Self { Self([const { Chunk::new() }; Self::NUM_CHUNKS as usize]) } @@ -329,17 +331,17 @@ impl FreeBufferLen { /// This bit is off only when the length has been entirely disabled. const DISABLING_BIT: u64 = 1 << 63; + /// Constructs a length of 0. + const fn new_zero_len() -> Self { + Self(AtomicU64::new(Self::FALSE_ZERO << 16)) + } + /// Gets the current state of the buffer. #[inline] fn state(&self) -> u64 { self.0.load(Ordering::Acquire) } - /// Constructs a length of 0. - fn new_zero_len() -> Self { - Self(AtomicU64::new(Self::FALSE_ZERO << 16)) - } - /// Gets the length from a given state. Returns 0 if the length is negative or zero. #[inline] fn len_from_state(state: u64) -> u32 { @@ -450,6 +452,14 @@ struct FreeBuffer { } impl FreeBuffer { + /// Constructs a empty [`FreeBuffer`]. + fn new() -> Self { + Self { + buffer: ChunkedBuffer::new(), + len: FreeBufferLen::new_zero_len(), + } + } + /// Gets the number of free entities. /// /// # Safety @@ -562,13 +572,6 @@ impl FreeBuffer { } } } - - fn new() -> Self { - Self { - buffer: ChunkedBuffer::new(), - len: FreeBufferLen::new_zero_len(), - } - } } /// This stores allocation data shared by all entity allocators. From 9a7133d36a682e227c3221b9ded2b4b18b3a58a3 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Sat, 5 Apr 2025 11:19:53 -0400 Subject: [PATCH 071/113] renames old ChunkedBuffer -> new FreeBuffer since it is specific to storing `Entity` old FreeBuffer -> new FreeList since it has a buffer and a length old FreeListLen -> new FreeCount since it tracks the number of freed entities and just happens to be used to track length --- crates/bevy_ecs/src/entity/allocator.rs | 74 ++++++++++++------------- 1 file changed, 37 insertions(+), 37 deletions(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 4d7847fbf4ced..71e899d6b8b1b 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -170,13 +170,13 @@ impl Chunk { /// This is a buffer that has been split into chunks, so that each chunk is pinned in memory. /// Conceptually, each chunk is put end-to-end to form the buffer. /// This will expand in capacity as needed, but a separate system must track the length of the list in the buffer. -struct ChunkedBuffer([Chunk; Self::NUM_CHUNKS as usize]); +struct FreeBuffer([Chunk; Self::NUM_CHUNKS as usize]); -impl ChunkedBuffer { +impl FreeBuffer { const NUM_CHUNKS: u32 = 24; const NUM_SKIPPED: u32 = u32::BITS - Self::NUM_CHUNKS; - /// Constructs a empty [`ChunkedBuffer`]. + /// Constructs a empty [`FreeBuffer`]. const fn new() -> Self { Self([const { Chunk::new() }; Self::NUM_CHUNKS as usize]) } @@ -249,8 +249,8 @@ impl ChunkedBuffer { /// /// [`Self::set`] must have been called on these indices before to initialize memory. #[inline] - unsafe fn iter(&self, indices: core::ops::RangeInclusive) -> ChunkedBufferIterator { - ChunkedBufferIterator { + unsafe fn iter(&self, indices: core::ops::RangeInclusive) -> FreeBufferIterator { + FreeBufferIterator { buffer: self, indices, current: [].iter(), @@ -258,7 +258,7 @@ impl ChunkedBuffer { } } -impl Drop for ChunkedBuffer { +impl Drop for FreeBuffer { fn drop(&mut self) { for index in 0..Self::NUM_CHUNKS { let capacity = Self::capacity_of_chunk(index); @@ -268,18 +268,18 @@ impl Drop for ChunkedBuffer { } } -/// An iterator over a [`ChunkedBuffer`]. +/// An iterator over a [`FreeBuffer`]. /// /// # Safety /// -/// [`ChunkedBuffer::set`] must have been called on these indices before to initialize memory. -struct ChunkedBufferIterator<'a> { - buffer: &'a ChunkedBuffer, +/// [`FreeBuffer::set`] must have been called on these indices before to initialize memory. +struct FreeBufferIterator<'a> { + buffer: &'a FreeBuffer, indices: core::ops::RangeInclusive, current: core::slice::Iter<'a, Slot>, } -impl<'a> Iterator for ChunkedBufferIterator<'a> { +impl<'a> Iterator for FreeBufferIterator<'a> { type Item = Entity; #[inline] @@ -306,8 +306,8 @@ impl<'a> Iterator for ChunkedBufferIterator<'a> { } } -impl<'a> ExactSizeIterator for ChunkedBufferIterator<'a> {} -impl<'a> core::iter::FusedIterator for ChunkedBufferIterator<'a> {} +impl<'a> ExactSizeIterator for FreeBufferIterator<'a> {} +impl<'a> core::iter::FusedIterator for FreeBufferIterator<'a> {} /// This stores two things: the length of the buffer (which can be negative) and a generation value to track *any* change to the length. /// @@ -321,9 +321,9 @@ impl<'a> core::iter::FusedIterator for ChunkedBufferIterator<'a> {} /// This is fine since for the length to go over `u32::MAX`, the entity index would first need to be exhausted, ausing a "too many entities" panic. /// In theory, the length should not drop below `-u32::MAX` since doing so would cause a "too many entities" panic. /// However, using 48 bits provides a buffer here and allows extra flags like [`Self::DISABLING_BIT`]. -struct FreeBufferLen(AtomicU64); +struct FreeCount(AtomicU64); -impl FreeBufferLen { +impl FreeCount { /// The bit of the u64 with the highest bit of the u16 generation. const HIGHEST_GENERATION_BIT: u64 = 1 << 15; /// The u48 encoded length considers this value to be 0. Lower values are considered negative. @@ -443,20 +443,20 @@ impl FreeBufferLen { } /// This is conceptually like a `Vec` that stores entities pending reuse. -struct FreeBuffer { +struct FreeList { /// The actual buffer of [`Slot`]s. /// Conceptually, this is like the `RawVec` for this `Vec`. - buffer: ChunkedBuffer, + buffer: FreeBuffer, /// The length of the free buffer - len: FreeBufferLen, + len: FreeCount, } -impl FreeBuffer { - /// Constructs a empty [`FreeBuffer`]. +impl FreeList { + /// Constructs a empty [`FreeList`]. fn new() -> Self { Self { - buffer: ChunkedBuffer::new(), - len: FreeBufferLen::new_zero_len(), + buffer: FreeBuffer::new(), + len: FreeCount::new_zero_len(), } } @@ -481,7 +481,7 @@ impl FreeBuffer { let state = self.len.disable_len_for_state(); // Push onto the buffer - let len = FreeBufferLen::len_from_state(state); + let len = FreeCount::len_from_state(state); // SAFETY: Caller ensures this does not conflict with `free` or `alloc` calls, // and we just disabled remote allocation. unsafe { @@ -514,7 +514,7 @@ impl FreeBuffer { /// /// This must not conflict with [`Self::free`] calls for the duration of the returned iterator. #[inline] - unsafe fn alloc_many(&self, count: u32) -> ChunkedBufferIterator { + unsafe fn alloc_many(&self, count: u32) -> FreeBufferIterator { // SAFETY: This will get a valid index because there is no way for `free` to be done at the same time. let len = self.len.pop_for_len(count); let index = len.saturating_sub(count); @@ -546,26 +546,26 @@ impl FreeBuffer { // We get around this by only updating `len` after the read is complete. // But that means something else could be trying to allocate the same index! // So we need a `len.compare_exchange` loop to ensure the index is unique. - // Because we keep a generation value in the `FreeBufferLen`, if any of these things happen, we simply try again. + // Because we keep a generation value in the `FreeCount`, if any of these things happen, we simply try again. let mut state = self.len.state(); loop { // The state is only disabled when freeing. // If a free is happening, we need to wait for the new entity to be ready on the free buffer. // Then, we can allocate it. - if FreeBufferLen::is_state_disabled(state) { + if FreeCount::is_state_disabled(state) { core::hint::spin_loop(); state = self.len.state(); continue; } - let len = FreeBufferLen::len_from_state(state); + let len = FreeCount::len_from_state(state); let index = len.checked_sub(1)?; // SAFETY: This was less then `len`, so it must have been `set` via `free` before. let entity = unsafe { self.buffer.get(index) }; - let ideal_state = FreeBufferLen::pop_from_state(state, 1); + let ideal_state = FreeCount::pop_from_state(state, 1); match self.len.try_set_state(state, ideal_state) { Ok(_) => return Some(entity), Err(new_state) => state = new_state, @@ -577,7 +577,7 @@ impl FreeBuffer { /// This stores allocation data shared by all entity allocators. struct SharedAllocator { /// The entities pending reuse - free: FreeBuffer, + free: FreeList, /// The next value of [`Entity::index`] to give out if needed. next_entity_index: AtomicU32, /// If true, the [`Self::next_entity_index`] has been incremented before, @@ -626,7 +626,7 @@ impl SharedAllocator { /// /// # Safety /// - /// This must not conflict with [`FreeBuffer::free`] calls. + /// This must not conflict with [`FreeList::free`] calls. #[inline] unsafe fn alloc(&self) -> Entity { // SAFETY: assured by caller @@ -637,7 +637,7 @@ impl SharedAllocator { /// /// # Safety /// - /// This must not conflict with [`FreeBuffer::free`] calls for the duration of the iterator. + /// This must not conflict with [`FreeList::free`] calls for the duration of the iterator. #[inline] unsafe fn alloc_many(&self, count: u32) -> AllocEntitiesIterator { let reused = self.free.alloc_many(count); @@ -661,7 +661,7 @@ impl SharedAllocator { fn new() -> Self { Self { - free: FreeBuffer::new(), + free: FreeList::new(), next_entity_index: AtomicU32::new(0), entity_index_given: AtomicBool::new(false), } @@ -751,7 +751,7 @@ impl core::fmt::Debug for Allocator { /// **NOTE:** Dropping will leak the remaining entities! pub struct AllocEntitiesIterator<'a> { new: core::ops::RangeInclusive, - reused: ChunkedBufferIterator<'a>, + reused: FreeBufferIterator<'a>, } impl<'a> Iterator for AllocEntitiesIterator<'a> { @@ -830,8 +830,8 @@ mod tests { /// Ensure the total capacity of [`OwnedBuffer`] is `u32::MAX + 1`, since the max *index* of an [`Entity`] is `u32::MAX`. #[test] fn chunk_capacity_sums() { - let total: u64 = (0..ChunkedBuffer::NUM_CHUNKS) - .map(ChunkedBuffer::capacity_of_chunk) + let total: u64 = (0..FreeBuffer::NUM_CHUNKS) + .map(FreeBuffer::capacity_of_chunk) .map(|x| x as u64) .sum(); let expected = u32::MAX as u64 + 1; @@ -857,13 +857,13 @@ mod tests { ]; for (input, output) in to_test { - assert_eq!(ChunkedBuffer::index_info(input), output); + assert_eq!(FreeBuffer::index_info(input), output); } } #[test] fn buffer_len_encoding() { - let len = FreeBufferLen::new_zero_len(); + let len = FreeCount::new_zero_len(); assert_eq!(len.len(), 0); assert_eq!(len.pop_for_len(200), 0); len.set_len(5, 0); From 81618f0d037a5c130e5f5f9423eb15c9bc034b1e Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Sat, 5 Apr 2025 11:33:17 -0400 Subject: [PATCH 072/113] use strong arcs for remote allocation --- crates/bevy_ecs/src/entity/allocator.rs | 65 ++++++++++++++++--------- crates/bevy_ecs/src/entity/mod.rs | 41 ++++++++-------- 2 files changed, 64 insertions(+), 42 deletions(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 71e899d6b8b1b..8402200b66234 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -2,7 +2,7 @@ use bevy_platform_support::{ prelude::Vec, sync::{ atomic::{AtomicBool, AtomicPtr, AtomicU32, AtomicU64, Ordering}, - Arc, Weak, + Arc, }, }; use core::mem::ManuallyDrop; @@ -583,9 +583,21 @@ struct SharedAllocator { /// If true, the [`Self::next_entity_index`] has been incremented before, /// so if it hits or passes zero again, an overflow has occored. entity_index_given: AtomicBool, + /// Tracks whether or not the primary [`Allocator`] has been closed or not. + is_closed: AtomicBool, } impl SharedAllocator { + /// Constructs a [`SharedAllocator`] + fn new() -> Self { + Self { + free: FreeList::new(), + next_entity_index: AtomicU32::new(0), + entity_index_given: AtomicBool::new(false), + is_closed: AtomicBool::new(false), + } + } + /// The total number of indices given out. #[inline] fn total_entity_indices(&self) -> u64 { @@ -659,12 +671,14 @@ impl SharedAllocator { .unwrap_or_else(|| self.alloc_new_index()) } - fn new() -> Self { - Self { - free: FreeList::new(), - next_entity_index: AtomicU32::new(0), - entity_index_given: AtomicBool::new(false), - } + /// Marks the allocator as closed, but it will still function normally. + fn close(&self) { + self.is_closed.store(true, Ordering::Release); + } + + /// Returns true if [`Self::close`] has been called. + fn is_closed(&self) -> bool { + self.is_closed.load(Ordering::Acquire) } } @@ -674,6 +688,7 @@ pub struct Allocator { } impl Allocator { + /// Constructs a new [`Allocator`] pub fn new() -> Self { Self { shared: Arc::new(SharedAllocator::new()), @@ -737,6 +752,12 @@ impl Allocator { } } +impl Drop for Allocator { + fn drop(&mut self) { + self.shared.close(); + } +} + impl core::fmt::Debug for Allocator { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct(core::any::type_name::()) @@ -791,34 +812,32 @@ impl Drop for AllocEntitiesIterator<'_> { /// As a result, using this will be slower than [`Allocator`] but this offers additional freedoms. #[derive(Clone)] pub struct RemoteAllocator { - // PERF: We could avoid the extra 2 atomic ops from upgrading and then dropping the `Weak`, - // But this provides more safety and allows memory to be freed earlier. - shared: Weak, + shared: Arc, } impl RemoteAllocator { + /// Creates a new [`RemoteAllocator`] with the provided [`Allocator`] source. + /// If the source is ever destroyed, [`Self::alloc`] will yield garbage values. + /// Be sure to use [`Self::is_closed`] to determine if it is safe to use these entities. + pub fn new(source: &Allocator) -> Self { + Self { + shared: source.shared.clone(), + } + } + /// Allocates an entity remotely. /// This is not guaranteed to reuse a freed entity, even if one exists. /// /// This will return [`None`] if the source [`Allocator`] is destroyed. #[inline] - pub fn alloc(&self) -> Option { - self.shared - .upgrade() - .map(|allocator| allocator.remote_alloc()) + pub fn alloc(&self) -> Entity { + self.shared.remote_alloc() } /// Returns whether or not this [`RemoteAllocator`] is still connected to its source [`Allocator`]. + /// Note that this could close immediately after the function returns false, so be careful. pub fn is_closed(&self) -> bool { - self.shared.strong_count() > 0 - } - - /// Creates a new [`RemoteAllocator`] with the provided [`Allocator`] source. - /// If the source is ever destroyed, [`Self::alloc`] will yield [`None`]. - pub fn new(source: &Allocator) -> Self { - Self { - shared: Arc::downgrade(&source.shared), - } + self.shared.is_closed() } } diff --git a/crates/bevy_ecs/src/entity/mod.rs b/crates/bevy_ecs/src/entity/mod.rs index b571991ca349a..0582c0e8e3806 100644 --- a/crates/bevy_ecs/src/entity/mod.rs +++ b/crates/bevy_ecs/src/entity/mod.rs @@ -953,43 +953,46 @@ pub struct RemoteEntities { } impl RemoteEntities { - /// Allocates an [`Entity`] if the source [`Entities`] is still linked to this [`RemoteEntities`]. + /// Creates a new [`RemoteEntities`] with this [`Entities`] as its source. + /// Note that this can be closed at any time, + /// so before using an allocated [`Entity`], + /// check [`is_closed`](Self::is_closed). + pub fn new(source: &Entities) -> Self { + Self { + allocator: RemoteAllocator::new(&source.allocator), + pending: source.pending.remote.clone(), + } + } + /// Allocates an [`Entity`]. Note that if the source [`Entities`] has been cleared or dropped, this will return a garbage value. + /// Use [`is_closed`] to ensure the entities are valid before using them! /// /// The caller takes responsibility for eventually setting the [`EntityLocation`], /// usually via [`flush_entity`](crate::world::World::flush_entity). - pub fn alloc(&self) -> Option { + pub fn alloc(&self) -> Entity { self.allocator.alloc() } - /// Reserves an [`Entity`] if the source [`Entities`] is still linked to this [`RemoteEntities`]. + /// Reserves an [`Entity`]. Note that if the source [`Entities`] has been cleared or dropped, this will return a garbage value. + /// Use [`is_closed`] to ensure the entities are valid before using them! /// /// This also queues it to be flushed after [`Entities::queue_remote_pending_to_be_flushed`] is called. /// If waiting for that is not an option, it is also possible to set the [`EntityLocation`] manually, /// usually via [`flush_entity`](crate::world::World::flush_entity). - pub fn reserve(&self) -> Option { - self.alloc() - .inspect(|entity| self.pending.queue_flush(*entity)) + pub fn reserve(&self) -> Entity { + let entity = self.alloc(); + self.pending.queue_flush(entity); + entity } /// Returns true if this [`RemoteEntities`] is still connected to its source [`Entities`]. /// This will return `false` if its source has been dropped or [`Entities::clear`]ed. /// - /// Note that this does not guarantee immediately calling [`Self::alloc`] will return `Some`, - /// as this can close at any time. + /// Note that this can be closed immediately after returning false. + /// + /// Holding a reference to the source [`Entities`] while calling this will ensure the value does not change unknowingly. pub fn is_closed(&self) -> bool { self.allocator.is_closed() } - - /// Creates a new [`RemoteEntities`] with this [`Entities`] as its source. - /// Note that this can be closed at any time, - /// so before using an allocated [`Entity`], - /// check [`is_closed`](Self::is_closed). - pub fn new(source: &Entities) -> Self { - Self { - allocator: RemoteAllocator::new(&source.allocator), - pending: source.pending.remote.clone(), - } - } } /// An error that occurs when a specified [`Entity`] does not exist. From 6d00049d49a5cd1d8a4980f11e004b16f0f71afa Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Sat, 5 Apr 2025 11:45:28 -0400 Subject: [PATCH 073/113] removed Slot::ptr --- crates/bevy_ecs/src/entity/allocator.rs | 37 +++++++++++++------------ 1 file changed, 20 insertions(+), 17 deletions(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 8402200b66234..68e08a543e3e5 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -8,8 +8,6 @@ use bevy_platform_support::{ use core::mem::ManuallyDrop; use log::warn; -use crate::query::DebugCheckedUnwrap; - use super::{Entity, EntitySetIterator}; /// This is the item we store in the free list. @@ -87,9 +85,10 @@ impl Chunk { /// [`Self::set`] must have been called on this index before, ensuring it is in bounds and the chunk is initialized. #[inline] unsafe fn get(&self, index: u32) -> Entity { - // SAFETY: caller ensure we are init. - let head = unsafe { self.ptr().debug_checked_unwrap() }; - // SAFETY: caller ensures we are in bounds (because `set` must be in bounds) + // Relaxed is fine since caller ensures we are iitialized already. + // In order for the caller to guarantee that, they must have an ordering that orders this get after the required `set`. + let head = self.first.load(Ordering::Relaxed); + // SAFETY: caller ensures we are in bounds and init (because `set` must be in bounds) let target = unsafe { &*head.add(index as usize) }; target.get_entity() @@ -105,10 +104,11 @@ impl Chunk { let after_index_slice_len = chunk_capacity - index; let len = after_index_slice_len.min(ideal_len) as usize; - // SAFETY: caller ensure we are init. - let head = unsafe { self.ptr().debug_checked_unwrap() }; + // Relaxed is fine since caller ensures we are iitialized already. + // In order for the caller to guarantee that, they must have an ordering that orders this get after the required `set`. + let head = self.first.load(Ordering::Relaxed); - // SAFETY: The chunk was allocated via a `Vec` and the index is within the capacity. + // SAFETY: Caller ensures we are init, so the chunk was allocated via a `Vec` and the index is within the capacity. unsafe { core::slice::from_raw_parts(head, len) } } @@ -121,7 +121,14 @@ impl Chunk { /// Access does not conflict with another [`Self::get`]. #[inline] unsafe fn set(&self, index: u32, entity: Entity, chunk_capacity: u32) { - let head = self.ptr().unwrap_or_else(|| self.init(chunk_capacity)); + // Relaxed is fine here since this is not called concurrently and does not conflict with a `get`. + let ptr = self.first.load(Ordering::Relaxed); + let head = if ptr.is_null() { + self.init(chunk_capacity) + } else { + ptr + }; + // SAFETY: caller ensures it is in bounds and we are not fighting with other `set` calls or `get` calls. // A race condition is therefore impossible. let target = unsafe { &*head.add(index as usize) }; @@ -140,6 +147,7 @@ impl Chunk { buff.reserve_exact(chunk_capacity as usize); buff.resize_with(chunk_capacity as usize, Slot::empty); let ptr = buff.as_mut_ptr(); + // Relaxed is fine here since this is not called concurrently. self.first.store(ptr, Ordering::Relaxed); ptr } @@ -151,20 +159,15 @@ impl Chunk { /// This must not be called concurrently. /// `chunk_capacity` must be the same as it was initialized with. unsafe fn dealloc(&self, chunk_capacity: u32) { - if let Some(to_drop) = self.ptr() { + // Relaxed is fine here since this is not called concurrently. + let to_drop = self.first.load(Ordering::Relaxed); + if !to_drop.is_null() { // SAFETY: This was created in [`Self::init`] from a standard Vec. unsafe { Vec::from_raw_parts(to_drop, chunk_capacity as usize, chunk_capacity as usize); } } } - - /// Returns [`Self::first`] if it is valid. - #[inline] - fn ptr(&self) -> Option<*mut Slot> { - let ptr = self.first.load(Ordering::Relaxed); - (!ptr.is_null()).then_some(ptr) - } } /// This is a buffer that has been split into chunks, so that each chunk is pinned in memory. From 27c17c6fe33f14dd77c3e7e08de32ed63d2b064d Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Sat, 5 Apr 2025 11:54:46 -0400 Subject: [PATCH 074/113] fixed alloc_many bug --- crates/bevy_ecs/src/entity/allocator.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 68e08a543e3e5..037c92f772811 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -658,10 +658,13 @@ impl SharedAllocator { let reused = self.free.alloc_many(count); let missing = count - reused.len() as u32; let start_new = self.next_entity_index.fetch_add(missing, Ordering::Relaxed); - if start_new < missing { + + let new_next_entity_index = start_new + missing; + if new_next_entity_index < missing { self.check_overflow(); } - let new = start_new..=(start_new + missing); + + let new = start_new..=(start_new + missing - 1); AllocEntitiesIterator { new, reused } } From 5fce1f952c5cf1fef2ebaccabaab500827e04e57 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Sat, 5 Apr 2025 12:21:16 -0400 Subject: [PATCH 075/113] fixed doc --- crates/bevy_ecs/src/entity/allocator.rs | 2 +- crates/bevy_ecs/src/entity/mod.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 037c92f772811..071e2ee56fc40 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -47,7 +47,7 @@ impl Slot { self.inner_entity.store(entity.to_bits(), Ordering::Relaxed); } - /// Gets the stored entity. The result be [`Entity::PLACEHODLER`] unless [`set`](Self::set) has been called. + /// Gets the stored entity. The result be [`Entity::PLACEHOLDER`] unless [`set_entity`](Self::set_entity) has been called. #[inline] fn get_entity(&self) -> Entity { #[cfg(not(target_has_atomic = "64"))] diff --git a/crates/bevy_ecs/src/entity/mod.rs b/crates/bevy_ecs/src/entity/mod.rs index 0582c0e8e3806..ade54bf8177ae 100644 --- a/crates/bevy_ecs/src/entity/mod.rs +++ b/crates/bevy_ecs/src/entity/mod.rs @@ -964,7 +964,7 @@ impl RemoteEntities { } } /// Allocates an [`Entity`]. Note that if the source [`Entities`] has been cleared or dropped, this will return a garbage value. - /// Use [`is_closed`] to ensure the entities are valid before using them! + /// Use [`is_closed`](Self::is_closed) to ensure the entities are valid before using them! /// /// The caller takes responsibility for eventually setting the [`EntityLocation`], /// usually via [`flush_entity`](crate::world::World::flush_entity). @@ -973,7 +973,7 @@ impl RemoteEntities { } /// Reserves an [`Entity`]. Note that if the source [`Entities`] has been cleared or dropped, this will return a garbage value. - /// Use [`is_closed`] to ensure the entities are valid before using them! + /// Use [`is_closed`](Self::is_closed) to ensure the entities are valid before using them! /// /// This also queues it to be flushed after [`Entities::queue_remote_pending_to_be_flushed`] is called. /// If waiting for that is not an option, it is also possible to set the [`EntityLocation`] manually, From a419834161a04d114ff6445f16f2be959e3be11f Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Sat, 5 Apr 2025 12:22:11 -0400 Subject: [PATCH 076/113] fix alloc_many bug --- crates/bevy_ecs/src/entity/allocator.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 071e2ee56fc40..5b6039930c017 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -660,7 +660,7 @@ impl SharedAllocator { let start_new = self.next_entity_index.fetch_add(missing, Ordering::Relaxed); let new_next_entity_index = start_new + missing; - if new_next_entity_index < missing { + if new_next_entity_index < missing || start_new == 0 { self.check_overflow(); } From a8e4fda8125f26b1ad9d4b9c88aaff50c572e726 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Sat, 5 Apr 2025 12:35:27 -0400 Subject: [PATCH 077/113] use try_from_bits --- crates/bevy_ecs/src/entity/allocator.rs | 4 +++- crates/bevy_ecs/src/entity/mod.rs | 24 ------------------------ 2 files changed, 3 insertions(+), 25 deletions(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 5b6039930c017..14d2070f209eb 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -60,7 +60,9 @@ impl Slot { }; #[cfg(target_has_atomic = "64")] // SAFETY: This is always sourced from a proper entity. - return unsafe { Entity::from_bits_unchecked(self.inner_entity.load(Ordering::Relaxed)) }; + return unsafe { + Entity::try_from_bits(self.inner_entity.load(Ordering::Relaxed)).unwrap_unchecked() + }; } } diff --git a/crates/bevy_ecs/src/entity/mod.rs b/crates/bevy_ecs/src/entity/mod.rs index ade54bf8177ae..f6564cedaa957 100644 --- a/crates/bevy_ecs/src/entity/mod.rs +++ b/crates/bevy_ecs/src/entity/mod.rs @@ -318,30 +318,6 @@ impl Entity { IdentifierMask::pack_into_u64(self.index, self.generation.get()) } - /// Same as [`from_bits`](Self::from_bits), but unchecked and faster. - /// - /// # Safety - /// - /// `bits` must come from [`Self::to_bits`]. - #[expect( - clippy::allow_attributes, - reason = "This is only used on some platforms." - )] - #[allow(dead_code, reason = "This is only used on some platforms.")] - pub(crate) const unsafe fn from_bits_unchecked(bits: u64) -> Self { - match Self::try_from_bits(bits) { - Ok(entity) => entity, - Err(_) => { - #[cfg(not(debug_assertions))] - // SAFETY: Ensured by caller. - unsafe { - hint::unreachable_unchecked() - } - panic!("Unreachable code. `Entity::from_bits_unchecked` called dincorrectly.") - } - } - } - /// Reconstruct an `Entity` previously destructured with [`Entity::to_bits`]. /// /// Only useful when applied to results from `to_bits` in the same instance of an application. From a590137928e95769121d65f08d98a2b63a014979 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Sat, 5 Apr 2025 15:05:53 -0400 Subject: [PATCH 078/113] reded FreeCount --- crates/bevy_ecs/src/entity/allocator.rs | 235 ++++++++++++------------ 1 file changed, 119 insertions(+), 116 deletions(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 14d2070f209eb..41555bd4067f2 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -314,136 +314,128 @@ impl<'a> Iterator for FreeBufferIterator<'a> { impl<'a> ExactSizeIterator for FreeBufferIterator<'a> {} impl<'a> core::iter::FusedIterator for FreeBufferIterator<'a> {} -/// This stores two things: the length of the buffer (which can be negative) and a generation value to track *any* change to the length. +/// This tracks the state of a [`FreeCount`], which has lots of information packed into it. /// -/// The upper 48 bits store an unsigned integer of the length, and the lower 16 bits store the generation value. -/// By keeping the length in the upper bits, we can add anything to them without it affecting the generation bits. -/// See [`Self::encode_pop`] for how this is done. -/// To prevent the generation from ever overflowing into the length, -/// we follow up each operation with a bit-wise `&` to turn of the most significant generation bit, preventing overflow. -/// -/// Finally, to get the signed length from the unsigned 48 bit value, we simply set `u48::MAX - u32::MAX` equal to 0. -/// This is fine since for the length to go over `u32::MAX`, the entity index would first need to be exhausted, ausing a "too many entities" panic. -/// In theory, the length should not drop below `-u32::MAX` since doing so would cause a "too many entities" panic. -/// However, using 48 bits provides a buffer here and allows extra flags like [`Self::DISABLING_BIT`]. -struct FreeCount(AtomicU64); - -impl FreeCount { - /// The bit of the u64 with the highest bit of the u16 generation. - const HIGHEST_GENERATION_BIT: u64 = 1 << 15; - /// The u48 encoded length considers this value to be 0. Lower values are considered negative. - const FALSE_ZERO: u64 = ((1 << 48) - 1) - ((1 << 32) - 1); - /// This bit is off only when the length has been entirely disabled. - const DISABLING_BIT: u64 = 1 << 63; +/// - The first 33 bits store a signed 33 bit integer. This behaves like a u33, but we define 1^33 as 0. +/// - The 34th bit stores a flag that indicates if the count has been disabled/suspended. +/// - The remaining 30 bits are the generation. The generation just differentiate different versions of the state that happen to encode the same length. +#[derive(Clone, Copy)] +struct PackedFreeCount(u64); + +impl PackedFreeCount { + /// When this bit is on, the count is disabled. + /// This is used to prevent remote allocations from running at the same time as a free operation. + const DISABLING_BIT: u64 = 1 << 33; + /// This is the mask for the length bits. + const LENGTH_MASK: u64 = (1 << 32) | u32::MAX as u64; + /// This is the value of the length mask we consider to be 0. + const LENGTH_0: u64 = 1 << 32; + /// This is the lowest bit in the u30 generation. + const GENERATION_LEAST_BIT: u64 = 1 << 34; /// Constructs a length of 0. const fn new_zero_len() -> Self { - Self(AtomicU64::new(Self::FALSE_ZERO << 16)) + Self(Self::LENGTH_0) } - /// Gets the current state of the buffer. - #[inline] - fn state(&self) -> u64 { - self.0.load(Ordering::Acquire) + /// Gets the encoded length. + const fn length(self) -> u32 { + let unsigned_length = self.0 & Self::LENGTH_MASK; + unsigned_length.saturating_sub(Self::LENGTH_0) as u32 } - /// Gets the length from a given state. Returns 0 if the length is negative or zero. - #[inline] - fn len_from_state(state: u64) -> u32 { - let encoded_length = state >> 16; - // Since `FALSE_ZERO` only leaves 32 bits of a u48 above it, the len must fit within 32 bits. - encoded_length.saturating_sub(Self::FALSE_ZERO) as u32 + /// Returns whether or not the count is disabled. + const fn is_disabled(self) -> bool { + (self.0 & Self::DISABLING_BIT) > 0 } - /// Returns true if the length is currently disabled. - #[inline] - fn is_state_disabled(state: u64) -> bool { - (state & Self::DISABLING_BIT) == 0 + /// Changes only the length of this count to `length`. + const fn with_length(self, length: u32) -> Self { + // Just turns on the "considered zero" bit since this is non-negative. + let length = length as u64 | Self::LENGTH_0; + Self(self.0 & !Self::LENGTH_MASK | length) } - /// Gets the length. Returns 0 if the length is negative or zero. - #[inline] - fn len(&self) -> u32 { - Self::len_from_state(self.state()) + /// Manually changes the generation. + const fn change_generation(self) -> Self { + Self(self.0.wrapping_sub(Self::GENERATION_LEAST_BIT)) } - /// Returns the number to add for subtracting this `num`. - #[inline] - fn encode_pop(num: u32) -> u64 { - let encoded_diff = (num as u64) << 16; - // In modular arithmetic, this is equivalent to the requested subtraction. - let to_add = u64::MAX - encoded_diff; + /// For popping `num` off the count, subtract the resulting u64. + const fn encode_pop(num: u32) -> u64 { + let subtract_length = num as u64; + // Also subtract one from the generation bit. + subtract_length | Self::GENERATION_LEAST_BIT + } - // add one to the generation. - // Note that if `num` is 0, this will wrap `to_add` to 0, - // which is correct since we aren't adding anything. - // Since we aren't really popping anything either, - // it is perfectly fine to not add to the generation too. - to_add.wrapping_add(1) + /// Returns the count after popping off `num` elements. + const fn pop(self, num: u32) -> Self { + Self(self.0.wrapping_sub(Self::encode_pop(num))) } +} - /// Subtracts `num` from the length, returning the new state. - #[inline] - fn pop_from_state(mut state: u64, num: u32) -> u64 { - state += Self::encode_pop(num); - // prevent generation overflow - state &= !Self::HIGHEST_GENERATION_BIT; - state +/// This stores two things: the length of the buffer (which can be negative) and a generation value to track *any* change to the length. +/// +/// The upper 48 bits store an unsigned integer of the length, and the lower 16 bits store the generation value. +/// By keeping the length in the upper bits, we can add anything to them without it affecting the generation bits. +/// See [`Self::encode_pop`] for how this is done. +/// To prevent the generation from ever overflowing into the length, +/// we follow up each operation with a bit-wise `&` to turn of the most significant generation bit, preventing overflow. +/// +/// Finally, to get the signed length from the unsigned 48 bit value, we simply set `u48::MAX - u32::MAX` equal to 0. +/// This is fine since for the length to go over `u32::MAX`, the entity index would first need to be exhausted, ausing a "too many entities" panic. +/// In theory, the length should not drop below `-u32::MAX` since doing so would cause a "too many entities" panic. +/// However, using 48 bits provides a buffer here and allows extra flags like [`Self::DISABLING_BIT`]. +struct FreeCount(AtomicU64); + +impl FreeCount { + /// Constructs a length of 0. + const fn new_zero_len() -> Self { + Self(AtomicU64::new(PackedFreeCount::new_zero_len().0)) } - /// Subtracts `num` from the length, returning the previous state. + /// Gets the current count of the buffer. #[inline] - fn pop_for_state(&self, num: u32) -> u64 { - let state = self.0.fetch_add(Self::encode_pop(num), Ordering::AcqRel); - // This can be relaxed since it only affects the one bit, - // and 2^15 operations would need to happen with this never being called for an overflow to occor. - self.0 - .fetch_and(!Self::HIGHEST_GENERATION_BIT, Ordering::Relaxed); - state + fn count(&self, order: Ordering) -> PackedFreeCount { + PackedFreeCount(self.0.load(order)) } - /// Subtracts `num` from the length, returning the previous length. + /// Subtracts `num` from the length, returning the previous count. + /// + /// **NOTE:** Caller should be careful that changing the count is allowed and that the count is not disabled. #[inline] - fn pop_for_len(&self, num: u32) -> u32 { - Self::len_from_state(self.pop_for_state(num)) + fn pop_for_count(&self, num: u32, order: Ordering) -> PackedFreeCount { + let to_sub = PackedFreeCount::encode_pop(num); + let raw = self.0.fetch_sub(to_sub, order); + PackedFreeCount(raw) } - /// Disables the length completely, returning the previous state. + /// Marks the count as disabled, returning the previous count #[inline] - fn disable_len_for_state(&self) -> u64 { - // We don't care about the generation here since the length is invalid anyway. - // In order to reset length, `set_len` must be called, which handles the generation. - self.0.fetch_add(!Self::DISABLING_BIT, Ordering::AcqRel) + fn disable_len_for_count(&self, order: Ordering) -> PackedFreeCount { + // We don't care about the generation here since this changes the value anyway. + PackedFreeCount(self.0.fetch_or(PackedFreeCount::DISABLING_BIT, order)) } - /// Sets the length explicitly. + /// Sets the length explicitly. Caller must be careful that the length has not changed since getting the count and setting it. #[inline] - fn set_len(&self, len: u32, recent_state: u64) { - let encoded_length = (len as u64 + Self::FALSE_ZERO) << 16; - let recent_generation = recent_state & (u16::MAX as u64 & !Self::HIGHEST_GENERATION_BIT); - - // This effectively adds a 2^14 to the generation, so for recent `recent_state` values, this is very safe. - // It is worth mentioning that doing this back to back will negate it, but in theory, we don't even need this at all. - // If an uneven number of free and alloc calls are made, the length will be different, so the generation is a moot point. - // If they are even, then at least one alloc call has been made, which would have incremented the generation in `recent_state`. - // So in all cases, the state is sufficiently changed such that `try_set_state` will fail when needed. - let far_generation = recent_generation ^ (1 << 14); - - let fully_encoded = encoded_length | far_generation; - self.0.store(fully_encoded, Ordering::Release); + fn set_count_risky(&self, count: PackedFreeCount, order: Ordering) { + self.0.store(count.0, order); } - /// Attempts to update the state, returning the new state if it fails. + /// Attempts to update the count, returning the new [`PackedFreeCount`] if it fails. #[inline] - fn try_set_state(&self, expected_current_state: u64, target_state: u64) -> Result<(), u64> { + fn try_set_count( + &self, + expected_current_count: PackedFreeCount, + target_count: PackedFreeCount, + success: Ordering, + failure: Ordering, + ) -> Result<(), PackedFreeCount> { self.0 - .compare_exchange( - expected_current_state, - target_state, - Ordering::AcqRel, - Ordering::Acquire, - ) + .compare_exchange(expected_current_count.0, target_count.0, success, failure) .map(|_| ()) + .map_err(PackedFreeCount) } } @@ -472,7 +464,8 @@ impl FreeList { /// For this to be accurate, this must not be called during a [`Self::free`]. #[inline] unsafe fn num_free(&self) -> u32 { - self.len.len() + // Relaxed would probably be fine here, but this is more precise. + self.len.count(Ordering::Acquire).length() } /// Frees the `entity` allowing it to be reused. @@ -483,10 +476,10 @@ impl FreeList { #[inline] unsafe fn free(&self, entity: Entity) { // Disable remote allocation. - let state = self.len.disable_len_for_state(); + let state = self.len.disable_len_for_count(Ordering::Acquire); // Push onto the buffer - let len = FreeCount::len_from_state(state); + let len = state.length(); // SAFETY: Caller ensures this does not conflict with `free` or `alloc` calls, // and we just disabled remote allocation. unsafe { @@ -494,8 +487,12 @@ impl FreeList { } // Update length - let new_len = len + 1; - self.len.set_len(new_len, state); + let new_state = state.with_length(len + 1); + // This is safe because `alloc` is not being called and `remote_alloc` checks that it is not disabled. + // We don't need to change the generation since this will change the length. + // If, from a `remote_alloc` perspective, this does not change the length (i.e. this changes it *back* to what it was), + // then `alloc` must have been called, which changes the generation. + self.len.set_count_risky(new_state, Ordering::Release); } /// Allocates an [`Entity`] from the free list if one is available. @@ -506,7 +503,7 @@ impl FreeList { #[inline] unsafe fn alloc(&self) -> Option { // SAFETY: This will get a valid index because there is no way for `free` to be done at the same time. - let len = self.len.pop_for_len(1); + let len = self.len.pop_for_count(1, Ordering::AcqRel).length(); let index = len.checked_sub(1)?; // SAFETY: This was less then `len`, so it must have been `set` via `free` before. @@ -521,7 +518,7 @@ impl FreeList { #[inline] unsafe fn alloc_many(&self, count: u32) -> FreeBufferIterator { // SAFETY: This will get a valid index because there is no way for `free` to be done at the same time. - let len = self.len.pop_for_len(count); + let len = self.len.pop_for_count(count, Ordering::AcqRel).length(); let index = len.saturating_sub(count); let indices = if index < len { @@ -553,25 +550,28 @@ impl FreeList { // So we need a `len.compare_exchange` loop to ensure the index is unique. // Because we keep a generation value in the `FreeCount`, if any of these things happen, we simply try again. - let mut state = self.len.state(); + let mut state = self.len.count(Ordering::Acquire); loop { // The state is only disabled when freeing. // If a free is happening, we need to wait for the new entity to be ready on the free buffer. // Then, we can allocate it. - if FreeCount::is_state_disabled(state) { + if state.is_disabled() { core::hint::spin_loop(); - state = self.len.state(); + state = self.len.count(Ordering::Acquire); continue; } - let len = FreeCount::len_from_state(state); + let len = state.length(); let index = len.checked_sub(1)?; // SAFETY: This was less then `len`, so it must have been `set` via `free` before. let entity = unsafe { self.buffer.get(index) }; - let ideal_state = FreeCount::pop_from_state(state, 1); - match self.len.try_set_state(state, ideal_state) { + let ideal_state = state.pop(1); + match self + .len + .try_set_count(state, ideal_state, Ordering::AcqRel, Ordering::Acquire) + { Ok(_) => return Some(entity), Err(new_state) => state = new_state, } @@ -891,12 +891,15 @@ mod tests { #[test] fn buffer_len_encoding() { let len = FreeCount::new_zero_len(); - assert_eq!(len.len(), 0); - assert_eq!(len.pop_for_len(200), 0); - len.set_len(5, 0); - assert_eq!(len.pop_for_len(2), 5); - assert_eq!(len.pop_for_len(2), 3); - assert_eq!(len.pop_for_len(2), 1); - assert_eq!(len.pop_for_len(2), 0); + assert_eq!(len.count(Ordering::Relaxed).length(), 0); + assert_eq!(len.pop_for_count(200, Ordering::Relaxed).length(), 0); + len.set_count_risky( + PackedFreeCount::new_zero_len().with_length(5), + Ordering::Relaxed, + ); + assert_eq!(len.pop_for_count(2, Ordering::Relaxed).length(), 5); + assert_eq!(len.pop_for_count(2, Ordering::Relaxed).length(), 3); + assert_eq!(len.pop_for_count(2, Ordering::Relaxed).length(), 1); + assert_eq!(len.pop_for_count(2, Ordering::Relaxed).length(), 0); } } From 797310c5cb1af4ca80e7468c83aeb307f00f6c17 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Sat, 5 Apr 2025 15:06:30 -0400 Subject: [PATCH 079/113] inlining --- crates/bevy_ecs/src/entity/allocator.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 41555bd4067f2..a9f7436aa285f 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -339,29 +339,28 @@ impl PackedFreeCount { } /// Gets the encoded length. + #[inline] const fn length(self) -> u32 { let unsigned_length = self.0 & Self::LENGTH_MASK; unsigned_length.saturating_sub(Self::LENGTH_0) as u32 } /// Returns whether or not the count is disabled. + #[inline] const fn is_disabled(self) -> bool { (self.0 & Self::DISABLING_BIT) > 0 } /// Changes only the length of this count to `length`. + #[inline] const fn with_length(self, length: u32) -> Self { // Just turns on the "considered zero" bit since this is non-negative. let length = length as u64 | Self::LENGTH_0; Self(self.0 & !Self::LENGTH_MASK | length) } - /// Manually changes the generation. - const fn change_generation(self) -> Self { - Self(self.0.wrapping_sub(Self::GENERATION_LEAST_BIT)) - } - /// For popping `num` off the count, subtract the resulting u64. + #[inline] const fn encode_pop(num: u32) -> u64 { let subtract_length = num as u64; // Also subtract one from the generation bit. @@ -369,6 +368,7 @@ impl PackedFreeCount { } /// Returns the count after popping off `num` elements. + #[inline] const fn pop(self, num: u32) -> Self { Self(self.0.wrapping_sub(Self::encode_pop(num))) } From b9a94306d34953191a1df3a4d1206cc7e5befdfe Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Sat, 5 Apr 2025 15:21:42 -0400 Subject: [PATCH 080/113] rename for clarity --- crates/bevy_ecs/src/entity/allocator.rs | 80 ++++++++++++------------- 1 file changed, 40 insertions(+), 40 deletions(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index a9f7436aa285f..a73f782fc49b7 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -320,9 +320,9 @@ impl<'a> core::iter::FusedIterator for FreeBufferIterator<'a> {} /// - The 34th bit stores a flag that indicates if the count has been disabled/suspended. /// - The remaining 30 bits are the generation. The generation just differentiate different versions of the state that happen to encode the same length. #[derive(Clone, Copy)] -struct PackedFreeCount(u64); +struct FreeCountState(u64); -impl PackedFreeCount { +impl FreeCountState { /// When this bit is on, the count is disabled. /// This is used to prevent remote allocations from running at the same time as a free operation. const DISABLING_BIT: u64 = 1 << 33; @@ -391,51 +391,51 @@ struct FreeCount(AtomicU64); impl FreeCount { /// Constructs a length of 0. const fn new_zero_len() -> Self { - Self(AtomicU64::new(PackedFreeCount::new_zero_len().0)) + Self(AtomicU64::new(FreeCountState::new_zero_len().0)) } - /// Gets the current count of the buffer. + /// Gets the current state of the buffer. #[inline] - fn count(&self, order: Ordering) -> PackedFreeCount { - PackedFreeCount(self.0.load(order)) + fn state(&self, order: Ordering) -> FreeCountState { + FreeCountState(self.0.load(order)) } - /// Subtracts `num` from the length, returning the previous count. + /// Subtracts `num` from the length, returning the previous state. /// - /// **NOTE:** Caller should be careful that changing the count is allowed and that the count is not disabled. + /// **NOTE:** Caller should be careful that changing the state is allowed and that the state is not disabled. #[inline] - fn pop_for_count(&self, num: u32, order: Ordering) -> PackedFreeCount { - let to_sub = PackedFreeCount::encode_pop(num); + fn pop_for_state(&self, num: u32, order: Ordering) -> FreeCountState { + let to_sub = FreeCountState::encode_pop(num); let raw = self.0.fetch_sub(to_sub, order); - PackedFreeCount(raw) + FreeCountState(raw) } - /// Marks the count as disabled, returning the previous count + /// Marks the state as disabled, returning the previous state #[inline] - fn disable_len_for_count(&self, order: Ordering) -> PackedFreeCount { + fn disable_len_for_state(&self, order: Ordering) -> FreeCountState { // We don't care about the generation here since this changes the value anyway. - PackedFreeCount(self.0.fetch_or(PackedFreeCount::DISABLING_BIT, order)) + FreeCountState(self.0.fetch_or(FreeCountState::DISABLING_BIT, order)) } - /// Sets the length explicitly. Caller must be careful that the length has not changed since getting the count and setting it. + /// Sets the length explicitly. Caller must be careful that the length has not changed since getting the state and setting it. #[inline] - fn set_count_risky(&self, count: PackedFreeCount, order: Ordering) { - self.0.store(count.0, order); + fn set_state_risky(&self, state: FreeCountState, order: Ordering) { + self.0.store(state.0, order); } - /// Attempts to update the count, returning the new [`PackedFreeCount`] if it fails. + /// Attempts to update the state, returning the new [`PackedFreeCount`] if it fails. #[inline] - fn try_set_count( + fn try_set_state( &self, - expected_current_count: PackedFreeCount, - target_count: PackedFreeCount, + expected_current_state: FreeCountState, + target_state: FreeCountState, success: Ordering, failure: Ordering, - ) -> Result<(), PackedFreeCount> { + ) -> Result<(), FreeCountState> { self.0 - .compare_exchange(expected_current_count.0, target_count.0, success, failure) + .compare_exchange(expected_current_state.0, target_state.0, success, failure) .map(|_| ()) - .map_err(PackedFreeCount) + .map_err(FreeCountState) } } @@ -465,7 +465,7 @@ impl FreeList { #[inline] unsafe fn num_free(&self) -> u32 { // Relaxed would probably be fine here, but this is more precise. - self.len.count(Ordering::Acquire).length() + self.len.state(Ordering::Acquire).length() } /// Frees the `entity` allowing it to be reused. @@ -476,7 +476,7 @@ impl FreeList { #[inline] unsafe fn free(&self, entity: Entity) { // Disable remote allocation. - let state = self.len.disable_len_for_count(Ordering::Acquire); + let state = self.len.disable_len_for_state(Ordering::Acquire); // Push onto the buffer let len = state.length(); @@ -492,7 +492,7 @@ impl FreeList { // We don't need to change the generation since this will change the length. // If, from a `remote_alloc` perspective, this does not change the length (i.e. this changes it *back* to what it was), // then `alloc` must have been called, which changes the generation. - self.len.set_count_risky(new_state, Ordering::Release); + self.len.set_state_risky(new_state, Ordering::Release); } /// Allocates an [`Entity`] from the free list if one is available. @@ -503,7 +503,7 @@ impl FreeList { #[inline] unsafe fn alloc(&self) -> Option { // SAFETY: This will get a valid index because there is no way for `free` to be done at the same time. - let len = self.len.pop_for_count(1, Ordering::AcqRel).length(); + let len = self.len.pop_for_state(1, Ordering::AcqRel).length(); let index = len.checked_sub(1)?; // SAFETY: This was less then `len`, so it must have been `set` via `free` before. @@ -518,7 +518,7 @@ impl FreeList { #[inline] unsafe fn alloc_many(&self, count: u32) -> FreeBufferIterator { // SAFETY: This will get a valid index because there is no way for `free` to be done at the same time. - let len = self.len.pop_for_count(count, Ordering::AcqRel).length(); + let len = self.len.pop_for_state(count, Ordering::AcqRel).length(); let index = len.saturating_sub(count); let indices = if index < len { @@ -550,14 +550,14 @@ impl FreeList { // So we need a `len.compare_exchange` loop to ensure the index is unique. // Because we keep a generation value in the `FreeCount`, if any of these things happen, we simply try again. - let mut state = self.len.count(Ordering::Acquire); + let mut state = self.len.state(Ordering::Acquire); loop { // The state is only disabled when freeing. // If a free is happening, we need to wait for the new entity to be ready on the free buffer. // Then, we can allocate it. if state.is_disabled() { core::hint::spin_loop(); - state = self.len.count(Ordering::Acquire); + state = self.len.state(Ordering::Acquire); continue; } @@ -570,7 +570,7 @@ impl FreeList { let ideal_state = state.pop(1); match self .len - .try_set_count(state, ideal_state, Ordering::AcqRel, Ordering::Acquire) + .try_set_state(state, ideal_state, Ordering::AcqRel, Ordering::Acquire) { Ok(_) => return Some(entity), Err(new_state) => state = new_state, @@ -891,15 +891,15 @@ mod tests { #[test] fn buffer_len_encoding() { let len = FreeCount::new_zero_len(); - assert_eq!(len.count(Ordering::Relaxed).length(), 0); - assert_eq!(len.pop_for_count(200, Ordering::Relaxed).length(), 0); - len.set_count_risky( - PackedFreeCount::new_zero_len().with_length(5), + assert_eq!(len.state(Ordering::Relaxed).length(), 0); + assert_eq!(len.pop_for_state(200, Ordering::Relaxed).length(), 0); + len.set_state_risky( + FreeCountState::new_zero_len().with_length(5), Ordering::Relaxed, ); - assert_eq!(len.pop_for_count(2, Ordering::Relaxed).length(), 5); - assert_eq!(len.pop_for_count(2, Ordering::Relaxed).length(), 3); - assert_eq!(len.pop_for_count(2, Ordering::Relaxed).length(), 1); - assert_eq!(len.pop_for_count(2, Ordering::Relaxed).length(), 0); + assert_eq!(len.pop_for_state(2, Ordering::Relaxed).length(), 5); + assert_eq!(len.pop_for_state(2, Ordering::Relaxed).length(), 3); + assert_eq!(len.pop_for_state(2, Ordering::Relaxed).length(), 1); + assert_eq!(len.pop_for_state(2, Ordering::Relaxed).length(), 0); } } From 880a98db348f696a047a8ddfef4df6bc67aaacfe Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Sat, 5 Apr 2025 19:54:09 -0400 Subject: [PATCH 081/113] fixed doc --- crates/bevy_ecs/src/entity/allocator.rs | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index a73f782fc49b7..a53e9ba630088 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -374,18 +374,7 @@ impl FreeCountState { } } -/// This stores two things: the length of the buffer (which can be negative) and a generation value to track *any* change to the length. -/// -/// The upper 48 bits store an unsigned integer of the length, and the lower 16 bits store the generation value. -/// By keeping the length in the upper bits, we can add anything to them without it affecting the generation bits. -/// See [`Self::encode_pop`] for how this is done. -/// To prevent the generation from ever overflowing into the length, -/// we follow up each operation with a bit-wise `&` to turn of the most significant generation bit, preventing overflow. -/// -/// Finally, to get the signed length from the unsigned 48 bit value, we simply set `u48::MAX - u32::MAX` equal to 0. -/// This is fine since for the length to go over `u32::MAX`, the entity index would first need to be exhausted, ausing a "too many entities" panic. -/// In theory, the length should not drop below `-u32::MAX` since doing so would cause a "too many entities" panic. -/// However, using 48 bits provides a buffer here and allows extra flags like [`Self::DISABLING_BIT`]. +/// This is an atomic interface to [`FreeCountState`]. struct FreeCount(AtomicU64); impl FreeCount { @@ -423,7 +412,7 @@ impl FreeCount { self.0.store(state.0, order); } - /// Attempts to update the state, returning the new [`PackedFreeCount`] if it fails. + /// Attempts to update the state, returning the new [`FreeCountState`] if it fails. #[inline] fn try_set_state( &self, From 24471901cef6ec48d21346897f8766172b600576 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Mon, 7 Apr 2025 15:05:25 -0400 Subject: [PATCH 082/113] provide spinloop escape hatch --- crates/bevy_ecs/src/entity/allocator.rs | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index a53e9ba630088..1992cb9cf9056 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -540,12 +540,27 @@ impl FreeList { // Because we keep a generation value in the `FreeCount`, if any of these things happen, we simply try again. let mut state = self.len.state(Ordering::Acquire); + #[cfg(feature = "std")] + let mut attempts = 0u32; loop { // The state is only disabled when freeing. // If a free is happening, we need to wait for the new entity to be ready on the free buffer. // Then, we can allocate it. if state.is_disabled() { + // Spin 64 times before yielding. + #[cfg(feature = "std")] + if attempts % 64 == 0 { + attempts += 1; + // scheduler probably isn't running the thead doing the `free` call, so yield so it can finish. + std::thread::yield_now(); + } else { + attempts += 1; + core::hint::spin_loop(); + } + + #[cfg(not(feature = "std"))] core::hint::spin_loop(); + state = self.len.state(Ordering::Acquire); continue; } From 4fd8d92b97a6a4b53c5fbc3bf2fae6c24843db69 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Fri, 18 Apr 2025 17:51:52 -0400 Subject: [PATCH 083/113] fixed bevy platform rename --- crates/bevy_ecs/src/entity/allocator.rs | 2 +- crates/bevy_ecs/src/entity/mod.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 1992cb9cf9056..546be66a951fc 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -1,4 +1,4 @@ -use bevy_platform_support::{ +use bevy_platform::{ prelude::Vec, sync::{ atomic::{AtomicBool, AtomicPtr, AtomicU32, AtomicU64, Ordering}, diff --git a/crates/bevy_ecs/src/entity/mod.rs b/crates/bevy_ecs/src/entity/mod.rs index 8f58b1954408f..8cb5a290b99a4 100644 --- a/crates/bevy_ecs/src/entity/mod.rs +++ b/crates/bevy_ecs/src/entity/mod.rs @@ -86,9 +86,9 @@ use crate::{ storage::{SparseSetIndex, TableId, TableRow}, }; use alloc::vec::Vec; -use bevy_platform_support::sync::Arc; +use bevy_platform::sync::Arc; use concurrent_queue::ConcurrentQueue; -use core::{fmt, hash::Hash, mem, num::NonZero, panic::Location}; +use core::{fmt, hash::Hash, num::NonZero, panic::Location}; use log::warn; #[cfg(feature = "serialize")] From 8850a55fae5cf176938d6553ecf04c2d84707968 Mon Sep 17 00:00:00 2001 From: Eagster <79881080+ElliottjPierce@users.noreply.github.com> Date: Sun, 20 Apr 2025 12:37:58 -0400 Subject: [PATCH 084/113] Apply suggestions from code review Co-authored-by: atlv --- crates/bevy_ecs/src/entity/allocator.rs | 27 +++++++++++++------------ 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 546be66a951fc..359f02023b322 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -87,8 +87,8 @@ impl Chunk { /// [`Self::set`] must have been called on this index before, ensuring it is in bounds and the chunk is initialized. #[inline] unsafe fn get(&self, index: u32) -> Entity { - // Relaxed is fine since caller ensures we are iitialized already. - // In order for the caller to guarantee that, they must have an ordering that orders this get after the required `set`. + // Relaxed is fine since caller ensures we are initialized already. + // In order for the caller to guarantee that, they must have an ordering that orders this `get` after the required `set`. let head = self.first.load(Ordering::Relaxed); // SAFETY: caller ensures we are in bounds and init (because `set` must be in bounds) let target = unsafe { &*head.add(index as usize) }; @@ -106,8 +106,8 @@ impl Chunk { let after_index_slice_len = chunk_capacity - index; let len = after_index_slice_len.min(ideal_len) as usize; - // Relaxed is fine since caller ensures we are iitialized already. - // In order for the caller to guarantee that, they must have an ordering that orders this get after the required `set`. + // Relaxed is fine since caller ensures we are initialized already. + // In order for the caller to guarantee that, they must have an ordering that orders this `get` after the required `set`. let head = self.first.load(Ordering::Relaxed); // SAFETY: Caller ensures we are init, so the chunk was allocated via a `Vec` and the index is within the capacity. @@ -172,16 +172,17 @@ impl Chunk { } } -/// This is a buffer that has been split into chunks, so that each chunk is pinned in memory. -/// Conceptually, each chunk is put end-to-end to form the buffer. -/// This will expand in capacity as needed, but a separate system must track the length of the list in the buffer. +/// This is a buffer that has been split into power-of-two sized chunks, so that each chunk is pinned in memory. +/// Conceptually, each chunk is put end-to-end to form the buffer. This ultimately avoids copying elements on resize, +/// while allowing it to expand in capacity as needed. A separate system must track the length of the list in the buffer. +/// Each chunk is twice as large as the last, except for the first two which have a capacity of 512. struct FreeBuffer([Chunk; Self::NUM_CHUNKS as usize]); impl FreeBuffer { const NUM_CHUNKS: u32 = 24; const NUM_SKIPPED: u32 = u32::BITS - Self::NUM_CHUNKS; - /// Constructs a empty [`FreeBuffer`]. + /// Constructs an empty [`FreeBuffer`]. const fn new() -> Self { Self([const { Chunk::new() }; Self::NUM_CHUNKS as usize]) } @@ -219,7 +220,7 @@ impl FreeBuffer { #[inline] fn index_in_chunk(&self, full_index: u32) -> (&Chunk, u32, u32) { let (chunk_index, index_in_chunk, chunk_capacity) = Self::index_info(full_index); - // SAFETY: The chunk index is correct + // SAFETY: Caller ensures the chunk index is correct let chunk = unsafe { self.0.get_unchecked(chunk_index as usize) }; (chunk, index_in_chunk, chunk_capacity) } @@ -277,7 +278,7 @@ impl Drop for FreeBuffer { /// /// # Safety /// -/// [`FreeBuffer::set`] must have been called on these indices before to initialize memory. +/// [`FreeBuffer::set`] must have been called on these indices beforehand to initialize memory. struct FreeBufferIterator<'a> { buffer: &'a FreeBuffer, indices: core::ops::RangeInclusive, @@ -316,9 +317,9 @@ impl<'a> core::iter::FusedIterator for FreeBufferIterator<'a> {} /// This tracks the state of a [`FreeCount`], which has lots of information packed into it. /// -/// - The first 33 bits store a signed 33 bit integer. This behaves like a u33, but we define 1^33 as 0. +/// - The first 33 bits store a signed 33 bit integer. This behaves like a u33, but we define `1 << 33` as 0. /// - The 34th bit stores a flag that indicates if the count has been disabled/suspended. -/// - The remaining 30 bits are the generation. The generation just differentiate different versions of the state that happen to encode the same length. +/// - The remaining 30 bits are the generation. The generation just differentiates different versions of the state that happen to encode the same length. #[derive(Clone, Copy)] struct FreeCountState(u64); @@ -551,7 +552,7 @@ impl FreeList { #[cfg(feature = "std")] if attempts % 64 == 0 { attempts += 1; - // scheduler probably isn't running the thead doing the `free` call, so yield so it can finish. + // scheduler probably isn't running the thread doing the `free` call, so yield so it can finish. std::thread::yield_now(); } else { attempts += 1; From ab9f9fddca6a1958f5b8c06bf2bad2cf4a8a0b3e Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Sun, 20 Apr 2025 13:43:48 -0400 Subject: [PATCH 085/113] remove unneeded parentheses --- crates/bevy_ecs/src/entity/mod.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/crates/bevy_ecs/src/entity/mod.rs b/crates/bevy_ecs/src/entity/mod.rs index 8cb5a290b99a4..ae6cfd31be368 100644 --- a/crates/bevy_ecs/src/entity/mod.rs +++ b/crates/bevy_ecs/src/entity/mod.rs @@ -809,7 +809,9 @@ impl Entities { if let Some(&EntityMeta { generation, .. }) = self.meta.get(idu) { Some(Entity::from_raw_and_generation(index, generation)) } else { - (self.allocator.is_valid_index(index)).then_some(Entity::from_raw(index)) + self.allocator + .is_valid_index(index) + .then_some(Entity::from_raw(index)) } } From 9e7c308eb0655157e0cda9f7eb1e226a7cecf260 Mon Sep 17 00:00:00 2001 From: Eagster <79881080+ElliottjPierce@users.noreply.github.com> Date: Sun, 20 Apr 2025 20:58:46 -0400 Subject: [PATCH 086/113] Clarify todo Co-authored-by: atlv --- crates/bevy_ecs/src/entity/allocator.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 359f02023b322..177c643170108 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -35,7 +35,7 @@ impl Slot { }; } - // TODO: could maybe make this `&mut`?? + // TODO: could maybe make this `&mut` so then we can use a `SyncCell` with no `Slot` atomics #[inline] fn set_entity(&self, entity: Entity) { #[cfg(not(target_has_atomic = "64"))] From f2b991a702d064b74db9af70eae75aa9cc8adb9b Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Sun, 20 Apr 2025 21:02:20 -0400 Subject: [PATCH 087/113] fix wrong comment --- crates/bevy_ecs/src/entity/allocator.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 177c643170108..44ea13facf142 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -192,7 +192,7 @@ impl FreeBuffer { #[inline] fn capacity_of_chunk(chunk_index: u32) -> u32 { // We do this because we're skipping the first `NUM_SKIPPED` powers, so we need to make up for them by doubling the first index. - // This is why the first 2 indices both have a capacity of 256. + // This is why the first 2 indices both have a capacity of 512. let corrected = chunk_index.max(1); // We add NUM_SKIPPED because the total capacity should be as if [`Self::NUM_CHUNKS`] were 32. // This skips the first NUM_SKIPPED powers. From 4eead350bed5db7f59d974b14e850843ee88dfed Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Sun, 20 Apr 2025 21:43:55 -0400 Subject: [PATCH 088/113] document and justify `Slot` --- crates/bevy_ecs/src/entity/allocator.rs | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 44ea13facf142..40cbf0a2e51f5 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -11,6 +11,16 @@ use log::warn; use super::{Entity, EntitySetIterator}; /// This is the item we store in the free list. +/// Effectively, this is a `MaybeUninit` where uninit is represented by `Entity::PLACEHOLDER`. +/// +/// We use atomics internally not for special ordring but for *a* ordering. +/// Conceptually, this could just be `SyncCell`, +/// but accessing that requires additional unsafe justification, and could cause unsound optimizations by the compiler. +/// +/// No [`Slot`] access is ever contested between two threads due to the ordering constraints in the [`FreeCount`]. +/// That also guarantees a proper ordering between slot access. +/// Hence these atomics don't need to account for any synchronization, and relaxed ordring is used everywhere. +// TODO: consider fully justifying `SyncCell` here with no atomics. struct Slot { #[cfg(not(target_has_atomic = "64"))] entity_index: AtomicU32, @@ -35,7 +45,6 @@ impl Slot { }; } - // TODO: could maybe make this `&mut` so then we can use a `SyncCell` with no `Slot` atomics #[inline] fn set_entity(&self, entity: Entity) { #[cfg(not(target_has_atomic = "64"))] From 17506fed46f0ef3937b4d28e2e9c2da775b9ece5 Mon Sep 17 00:00:00 2001 From: Eagster <79881080+ElliottjPierce@users.noreply.github.com> Date: Mon, 21 Apr 2025 12:08:46 -0400 Subject: [PATCH 089/113] Clarify comments Co-authored-by: atlv --- crates/bevy_ecs/src/entity/allocator.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 40cbf0a2e51f5..e51e90b7cad78 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -13,13 +13,13 @@ use super::{Entity, EntitySetIterator}; /// This is the item we store in the free list. /// Effectively, this is a `MaybeUninit` where uninit is represented by `Entity::PLACEHOLDER`. /// -/// We use atomics internally not for special ordring but for *a* ordering. +/// We use atomics internally not for special ordering but for *a* ordering. /// Conceptually, this could just be `SyncCell`, /// but accessing that requires additional unsafe justification, and could cause unsound optimizations by the compiler. /// /// No [`Slot`] access is ever contested between two threads due to the ordering constraints in the [`FreeCount`]. /// That also guarantees a proper ordering between slot access. -/// Hence these atomics don't need to account for any synchronization, and relaxed ordring is used everywhere. +/// Hence these atomics don't need to account for any synchronization, and relaxed ordering is used everywhere. // TODO: consider fully justifying `SyncCell` here with no atomics. struct Slot { #[cfg(not(target_has_atomic = "64"))] @@ -31,7 +31,7 @@ struct Slot { } impl Slot { - /// Produces a meaningless an empty value. This produces a valid but incorrect `Entity`. + /// Produces a meaningless empty value. This is a valid but incorrect `Entity`. fn empty() -> Self { let source = Entity::PLACEHOLDER; #[cfg(not(target_has_atomic = "64"))] @@ -56,7 +56,7 @@ impl Slot { self.inner_entity.store(entity.to_bits(), Ordering::Relaxed); } - /// Gets the stored entity. The result be [`Entity::PLACEHOLDER`] unless [`set_entity`](Self::set_entity) has been called. + /// Gets the stored entity. The result will be [`Entity::PLACEHOLDER`] unless [`set_entity`](Self::set_entity) has been called. #[inline] fn get_entity(&self) -> Entity { #[cfg(not(target_has_atomic = "64"))] From 1fdbdfac9840f8156e330ff9b4336318e3824387 Mon Sep 17 00:00:00 2001 From: Eagster <79881080+ElliottjPierce@users.noreply.github.com> Date: Mon, 21 Apr 2025 12:12:49 -0400 Subject: [PATCH 090/113] Fix more comments Co-authored-by: atlv --- crates/bevy_ecs/src/entity/allocator.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index e51e90b7cad78..b7c4b07f18e62 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -509,7 +509,7 @@ impl FreeList { Some(unsafe { self.buffer.get(index) }) } - /// Allocates an as many [`Entity`]s from the free list as are available, up to `count`. + /// Allocates as many [`Entity`]s from the free list as are available, up to `count`. /// /// # Safety /// @@ -533,7 +533,7 @@ impl FreeList { } }; - // SAFETY: The indices are all less then the length. + // SAFETY: The indices are all less than the length. unsafe { self.buffer.iter(indices) } } @@ -578,7 +578,7 @@ impl FreeList { let len = state.length(); let index = len.checked_sub(1)?; - // SAFETY: This was less then `len`, so it must have been `set` via `free` before. + // SAFETY: This was less than `len`, so it must have been `set` via `free` before. let entity = unsafe { self.buffer.get(index) }; let ideal_state = state.pop(1); @@ -769,7 +769,7 @@ impl Allocator { pub unsafe fn alloc_many_unsafe(&self, count: u32) -> AllocEntitiesIterator<'static> { // SAFETY: Caller ensures this instance is valid until the returned value is dropped. let this: &'static Self = unsafe { &*core::ptr::from_ref(self) }; - // SAFETY: Caller ensures free is not called. + // SAFETY: Caller ensures free is not called. unsafe { this.shared.alloc_many(count) } } } From 81658309403db9a7ba6dac1bf2b6b0eb577af308 Mon Sep 17 00:00:00 2001 From: Eagster <79881080+ElliottjPierce@users.noreply.github.com> Date: Mon, 21 Apr 2025 12:14:15 -0400 Subject: [PATCH 091/113] Fix even more comments Co-authored-by: atlv --- crates/bevy_ecs/src/entity/mod.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/crates/bevy_ecs/src/entity/mod.rs b/crates/bevy_ecs/src/entity/mod.rs index ae6cfd31be368..502674f2902b4 100644 --- a/crates/bevy_ecs/src/entity/mod.rs +++ b/crates/bevy_ecs/src/entity/mod.rs @@ -628,7 +628,7 @@ impl Entities { } /// Allocate an entity ID directly. - /// Caller is responsible to set the [`EntityLocation`] if desierd, + /// Caller is responsible for setting the [`EntityLocation`] if desired, /// which must be done before [`get`](Self::get)ing its [`EntityLocation`]. pub fn alloc(&self) -> Entity { self.allocator.alloc() @@ -770,11 +770,11 @@ impl Entities { meta.location = location; } - /// Get's the meta for this index mutably, creating it if it did not exist. + /// Gets the meta for this index mutably, creating it if it did not exist. /// - /// # Safetey + /// # Safety /// - /// `idnex` must be a valid index + /// `index` must be a valid index #[inline] unsafe fn force_get_meta_mut(&mut self, index: usize) -> &mut EntityMeta { if index >= self.meta.len() { From e0e5ffd2792572760d3a4d9e449546a64fd3977b Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Mon, 21 Apr 2025 12:16:18 -0400 Subject: [PATCH 092/113] clarify concurrency comments --- crates/bevy_ecs/src/entity/allocator.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 40cbf0a2e51f5..c8c816bec5926 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -127,7 +127,7 @@ impl Chunk { /// /// # Safety /// - /// This must not be called concurrently. + /// This must not be called concurrently with itself. /// Index must be in bounds. /// Access does not conflict with another [`Self::get`]. #[inline] @@ -151,7 +151,7 @@ impl Chunk { /// /// # Safety /// - /// This must not be called concurrently. + /// This must not be called concurrently with itself. #[cold] unsafe fn init(&self, chunk_capacity: u32) -> *mut Slot { let mut buff = ManuallyDrop::new(Vec::new()); @@ -167,7 +167,7 @@ impl Chunk { /// /// # Safety /// - /// This must not be called concurrently. + /// This must not be called concurrently with itself. /// `chunk_capacity` must be the same as it was initialized with. unsafe fn dealloc(&self, chunk_capacity: u32) { // Relaxed is fine here since this is not called concurrently. @@ -249,7 +249,7 @@ impl FreeBuffer { /// /// # Safety /// - /// This must not be called concurrently. + /// This must not be called concurrently with itself. /// Access does not conflict with another [`Self::get`]. #[inline] unsafe fn set(&self, full_index: u32, entity: Entity) { From 10932c792d7dde35cbdca79637854776deeff51c Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Mon, 21 Apr 2025 12:29:47 -0400 Subject: [PATCH 093/113] clarify what empty slot means --- crates/bevy_ecs/src/entity/allocator.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 1e90bae538b90..71180993df834 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -32,6 +32,11 @@ struct Slot { impl Slot { /// Produces a meaningless empty value. This is a valid but incorrect `Entity`. + /// It's valid because the bits do represent a valid bit pattern of an `Entity`. + /// It's incorrect because this is in the free buffer even though the entity was never freed. + /// Importantly, [`FreeCount`] determines which part of the free buffer is the free list. + /// An empty slot may be in the free buffer, but should not be in the free list. + /// This can be thought of as the `MaybeUninit` uninit in `Vec`'s excess capacity. fn empty() -> Self { let source = Entity::PLACEHOLDER; #[cfg(not(target_has_atomic = "64"))] From 1ce4951e32fca62fd51da316ec96afdba082463e Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Thu, 8 May 2025 11:52:49 -0400 Subject: [PATCH 094/113] fix Entities --- crates/bevy_ecs/src/entity/mod.rs | 55 +++++++++++++++++-------------- 1 file changed, 31 insertions(+), 24 deletions(-) diff --git a/crates/bevy_ecs/src/entity/mod.rs b/crates/bevy_ecs/src/entity/mod.rs index 2956b3fceb22a..ad1e18a0bf700 100644 --- a/crates/bevy_ecs/src/entity/mod.rs +++ b/crates/bevy_ecs/src/entity/mod.rs @@ -85,7 +85,13 @@ use crate::{ use alloc::vec::Vec; use bevy_platform::sync::Arc; use concurrent_queue::ConcurrentQueue; -use core::{fmt, hash::Hash, num::NonZero, panic::Location}; +use core::{ + fmt, + hash::Hash, + mem::{self, MaybeUninit}, + num::NonZero, + panic::Location, +}; use log::warn; #[cfg(feature = "serialize")] @@ -765,7 +771,7 @@ impl Entities { ) -> allocator::AllocEntitiesIterator<'static> { self.allocator.alloc_many_unsafe(count) } - + /// This is the same as [`free`](Entities::free), but it allows skipping some generations. /// When the entity is reused, it will have a generation greater than the current generation + `generations`. #[inline] @@ -774,7 +780,7 @@ impl Entities { entity: Entity, generations: u32, ) -> Option { - let theoretical = self.resolve_from_id(entity.index()); + let theoretical = self.resolve_from_id(entity.row()); if theoretical.is_none_or(|theoretcal| theoretcal != entity) { return None; } @@ -782,18 +788,19 @@ impl Entities { // SAFETY: We resolved its id to ensure it is valid. let meta = unsafe { self.force_get_meta_mut(entity.index() as usize) }; let prev_generation = meta.generation; + let (new_generation, aliased) = prev_generation.after_versions_and_could_alias(generations); - meta.generation = IdentifierMask::inc_masked_high_by(meta.generation, 1 + generations); + meta.generation = new_generation; - if prev_generation > meta.generation || generations == u32::MAX { + if aliased { warn!( "Entity({}) generation wrapped on Entities::free, aliasing may occur", entity.row() ); } - let new_entity = Entity::from_raw_and_generation(entity.index, meta.generation); - let loc = core::mem::replace(&mut meta.location, EntityLocation::INVALID); + let new_entity = Entity::from_raw_and_generation(entity.row(), meta.generation); + let loc = mem::replace(&mut meta.location, EntityLocation::INVALID); self.allocator.free(new_entity); Some(loc) @@ -891,14 +898,14 @@ impl Entities { /// Note that [`contains`](Entities::contains) will correctly return false for freed /// entities, since it checks the generation #[inline] - pub fn resolve_from_id(&self, index: u32) -> Option { - let idu = index as usize; + pub fn resolve_from_id(&self, row: EntityRow) -> Option { + let idu = row.index() as usize; if let Some(&EntityMeta { generation, .. }) = self.meta.get(idu) { Some(Entity::from_raw_and_generation(row, generation)) } else { self.allocator - .is_valid_index(index) - .then_some(Entity::from_raw(index)) + .is_valid_index(row) + .then_some(Entity::from_raw(row)) } } @@ -968,19 +975,19 @@ impl Entities { self.len() == 0 } - /// Sets the source code location from which this entity has last been spawned - /// or despawned. - #[inline] - pub(crate) fn set_spawned_or_despawned_by(&mut self, index: u32, caller: MaybeLocation) { - caller.map(|caller| { - if !self.allocator.is_valid_index(index) { - panic!("Entity index invalid") - } - // SAFETY: We just checked that it is valid - let meta = unsafe { self.force_get_meta_mut(index as usize) }; - meta.spawned_or_despawned_by = MaybeLocation::new(Some(caller)); - }); - } + // /// Sets the source code location from which this entity has last been spawned + // /// or despawned. + // #[inline] + // pub(crate) fn set_spawned_or_despawned_by(&mut self, index: u32, caller: MaybeLocation) { + // caller.map(|caller| { + // if !self.allocator.is_valid_index(index) { + // panic!("Entity index invalid") + // } + // // SAFETY: We just checked that it is valid + // let meta = unsafe { self.force_get_meta_mut(index as usize) }; + // meta.spawned_or_despawned_by = MaybeLocation::new(Some(caller)); + // }); + // } /// Returns the source code location from which this entity has last been spawned /// or despawned. Returns `None` if its index has been reused by another entity From 469589931a41063feefd46b716ba8a1ca39c6c57 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Thu, 8 May 2025 11:57:40 -0400 Subject: [PATCH 095/113] pushed problems to allocator --- crates/bevy_ecs/src/entity/allocator.rs | 4 +-- crates/bevy_ecs/src/entity/mod.rs | 35 +++++++++++++++---------- 2 files changed, 23 insertions(+), 16 deletions(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 71180993df834..7463dc84d5881 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -8,7 +8,7 @@ use bevy_platform::{ use core::mem::ManuallyDrop; use log::warn; -use super::{Entity, EntitySetIterator}; +use super::{Entity, EntityRow, EntitySetIterator}; /// This is the item we store in the free list. /// Effectively, this is a `MaybeUninit` where uninit is represented by `Entity::PLACEHOLDER`. @@ -744,7 +744,7 @@ impl Allocator { /// Returns whether or not the index is valid in this allocator. #[inline] - pub fn is_valid_index(&self, index: u32) -> bool { + pub fn is_valid_row(&self, row: EntityRow) -> bool { (index as u64) < self.total_entity_indices() } diff --git a/crates/bevy_ecs/src/entity/mod.rs b/crates/bevy_ecs/src/entity/mod.rs index ad1e18a0bf700..ad1a261e681c1 100644 --- a/crates/bevy_ecs/src/entity/mod.rs +++ b/crates/bevy_ecs/src/entity/mod.rs @@ -864,6 +864,27 @@ impl Entities { meta.location = location; } + /// Updates the location of an [`Entity`]. This must be called when moving the components of + /// the spawned or despawned entity around in storage. + /// + /// # Safety + /// - `index` must be a valid entity index. + /// - `location` must be valid for the entity at `index` or immediately made valid afterwards + /// before handing control to unknown code. + #[inline] + pub(crate) unsafe fn set_spawn_despawn( + &mut self, + index: u32, + location: EntityLocation, + by: MaybeLocation, + at: Tick, + ) { + // SAFETY: Caller guarantees that `index` a valid entity index + let meta = unsafe { self.force_get_meta_mut(index as usize) }; + meta.location = location; + meta.spawned_or_despawned = MaybeUninit::new(SpawnedOrDespawned { by, at }); + } + /// Gets the meta for this index mutably, creating it if it did not exist. /// /// # Safety @@ -975,20 +996,6 @@ impl Entities { self.len() == 0 } - // /// Sets the source code location from which this entity has last been spawned - // /// or despawned. - // #[inline] - // pub(crate) fn set_spawned_or_despawned_by(&mut self, index: u32, caller: MaybeLocation) { - // caller.map(|caller| { - // if !self.allocator.is_valid_index(index) { - // panic!("Entity index invalid") - // } - // // SAFETY: We just checked that it is valid - // let meta = unsafe { self.force_get_meta_mut(index as usize) }; - // meta.spawned_or_despawned_by = MaybeLocation::new(Some(caller)); - // }); - // } - /// Returns the source code location from which this entity has last been spawned /// or despawned. Returns `None` if its index has been reused by another entity /// or if this entity has never existed. From d5b95665c6fe4b6bbd0b6335ed17fcb6a2b50350 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Thu, 8 May 2025 12:10:14 -0400 Subject: [PATCH 096/113] fixed all the errors --- crates/bevy_ecs/src/entity/allocator.rs | 66 ++++++++++--------------- crates/bevy_ecs/src/entity/mod.rs | 8 +-- 2 files changed, 31 insertions(+), 43 deletions(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 7463dc84d5881..e2ecb2bfa63d6 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -7,6 +7,7 @@ use bevy_platform::{ }; use core::mem::ManuallyDrop; use log::warn; +use nonmax::NonMaxU32; use super::{Entity, EntityRow, EntitySetIterator}; @@ -604,9 +605,6 @@ struct SharedAllocator { free: FreeList, /// The next value of [`Entity::index`] to give out if needed. next_entity_index: AtomicU32, - /// If true, the [`Self::next_entity_index`] has been incremented before, - /// so if it hits or passes zero again, an overflow has occored. - entity_index_given: AtomicBool, /// Tracks whether or not the primary [`Allocator`] has been closed or not. is_closed: AtomicBool, } @@ -617,45 +615,33 @@ impl SharedAllocator { Self { free: FreeList::new(), next_entity_index: AtomicU32::new(0), - entity_index_given: AtomicBool::new(false), is_closed: AtomicBool::new(false), } } /// The total number of indices given out. #[inline] - fn total_entity_indices(&self) -> u64 { - let next = self.next_entity_index.load(Ordering::Relaxed); - if next == 0 { - if self.entity_index_given.load(Ordering::Relaxed) { - // every index has been given - u32::MAX as u64 + 1 - } else { - // no index has been given - 0 - } - } else { - next as u64 - } + fn total_entity_indices(&self) -> u32 { + self.next_entity_index.load(Ordering::Relaxed) } - /// Call this when the entity index is suspected to have overflown. - /// Panic if the overflow did happen. + /// This just panics. + /// It is included to help with branch prediction, and put the panic message in one spot. #[cold] - fn check_overflow(&self) { - if self.entity_index_given.swap(true, Ordering::AcqRel) { - panic!("too many entities") - } + #[inline] + fn on_overflow() -> ! { + panic!("too many entities") } /// Allocates an [`Entity`] with a brand new index. #[inline] fn alloc_new_index(&self) -> Entity { let index = self.next_entity_index.fetch_add(1, Ordering::Relaxed); - if index == 0 { - self.check_overflow(); + if index == u32::MAX { + Self::on_overflow(); } - Entity::from_raw(index) + // SAFETY: We just checked that this was not max. + unsafe { Entity::from_raw(EntityRow::new(NonMaxU32::new_unchecked(index))) } } /// Allocates a new [`Entity`], reusing a freed index if one exists. @@ -680,12 +666,10 @@ impl SharedAllocator { let missing = count - reused.len() as u32; let start_new = self.next_entity_index.fetch_add(missing, Ordering::Relaxed); - let new_next_entity_index = start_new + missing; - if new_next_entity_index < missing || start_new == 0 { - self.check_overflow(); - } - - let new = start_new..=(start_new + missing - 1); + let new = match start_new.checked_add(missing) { + Some(new_next_entity_index) => start_new..new_next_entity_index, + None => Self::on_overflow(), + }; AllocEntitiesIterator { new, reused } } @@ -731,7 +715,7 @@ impl Allocator { /// The total number of indices given out. #[inline] - pub fn total_entity_indices(&self) -> u64 { + pub fn total_entity_indices(&self) -> u32 { self.shared.total_entity_indices() } @@ -745,7 +729,7 @@ impl Allocator { /// Returns whether or not the index is valid in this allocator. #[inline] pub fn is_valid_row(&self, row: EntityRow) -> bool { - (index as u64) < self.total_entity_indices() + row.index() < self.total_entity_indices() } /// Frees the entity allowing it to be reused. @@ -798,7 +782,7 @@ impl core::fmt::Debug for Allocator { /// /// **NOTE:** Dropping will leak the remaining entities! pub struct AllocEntitiesIterator<'a> { - new: core::ops::RangeInclusive, + new: core::ops::Range, reused: FreeBufferIterator<'a>, } @@ -806,13 +790,17 @@ impl<'a> Iterator for AllocEntitiesIterator<'a> { type Item = Entity; fn next(&mut self) -> Option { - self.reused - .next() - .or_else(|| self.new.next().map(Entity::from_raw)) + self.reused.next().or_else(|| { + self.new.next().map(|idx| { + // SAFETY: This came from an *exclusive* range. It can never be max. + let row = unsafe { EntityRow::new(NonMaxU32::new_unchecked(idx)) }; + Entity::from_raw(row) + }) + }) } fn size_hint(&self) -> (usize, Option) { - let len = self.reused.len() + self.new.end().saturating_sub(*self.new.end()) as usize; + let len = self.reused.len() + self.new.len(); (len, Some(len)) } } diff --git a/crates/bevy_ecs/src/entity/mod.rs b/crates/bevy_ecs/src/entity/mod.rs index ad1a261e681c1..f851bfb867c18 100644 --- a/crates/bevy_ecs/src/entity/mod.rs +++ b/crates/bevy_ecs/src/entity/mod.rs @@ -925,7 +925,7 @@ impl Entities { Some(Entity::from_raw_and_generation(row, generation)) } else { self.allocator - .is_valid_index(row) + .is_valid_row(row) .then_some(Entity::from_raw(row)) } } @@ -980,14 +980,14 @@ impl Entities { /// /// [`World`]: crate::world::World #[inline] - pub fn total_count(&self) -> u64 { + pub fn total_count(&self) -> u32 { self.allocator.total_entity_indices() } /// The count of currently allocated entities. #[inline] - pub fn len(&self) -> u64 { - self.allocator.total_entity_indices() - self.allocator.num_free() as u64 + pub fn len(&self) -> u32 { + self.allocator.total_entity_indices() - self.allocator.num_free() } /// Checks if any entity is currently active. From 3bf8506ed81e2c0d1ee3933348b49ddf07b4ca0a Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Thu, 8 May 2025 12:34:11 -0400 Subject: [PATCH 097/113] small fixes --- crates/bevy_ecs/src/entity/allocator.rs | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index e2ecb2bfa63d6..2e79ee44e30b6 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -332,7 +332,7 @@ impl<'a> core::iter::FusedIterator for FreeBufferIterator<'a> {} /// This tracks the state of a [`FreeCount`], which has lots of information packed into it. /// -/// - The first 33 bits store a signed 33 bit integer. This behaves like a u33, but we define `1 << 33` as 0. +/// - The first 33 bits store a signed 33 bit integer. This behaves like a u33, but we define `1 << 32` as 0. /// - The 34th bit stores a flag that indicates if the count has been disabled/suspended. /// - The remaining 30 bits are the generation. The generation just differentiates different versions of the state that happen to encode the same length. #[derive(Clone, Copy)] @@ -422,7 +422,8 @@ impl FreeCount { FreeCountState(self.0.fetch_or(FreeCountState::DISABLING_BIT, order)) } - /// Sets the length explicitly. Caller must be careful that the length has not changed since getting the state and setting it. + /// Sets the state explicitly. + /// Caller must be careful that the state has not changed since getting the state and setting it. #[inline] fn set_state_risky(&self, state: FreeCountState, order: Ordering) { self.0.store(state.0, order); @@ -557,7 +558,7 @@ impl FreeList { let mut state = self.len.state(Ordering::Acquire); #[cfg(feature = "std")] - let mut attempts = 0u32; + let mut attempts = 1u32; loop { // The state is only disabled when freeing. // If a free is happening, we need to wait for the new entity to be ready on the free buffer. @@ -744,7 +745,7 @@ impl Allocator { /// Allocates `count` entities in an iterator. #[inline] pub fn alloc_many(&self, count: u32) -> AllocEntitiesIterator { - // SAFETY: `free` takes `&mut self`, but this lifetime is captured by the iterator. + // SAFETY: `free` takes `&mut self`, and this lifetime is captured by the iterator. unsafe { self.shared.alloc_many(count) } } @@ -841,9 +842,6 @@ impl RemoteAllocator { } /// Allocates an entity remotely. - /// This is not guaranteed to reuse a freed entity, even if one exists. - /// - /// This will return [`None`] if the source [`Allocator`] is destroyed. #[inline] pub fn alloc(&self) -> Entity { self.shared.remote_alloc() From 98d4ddcd77fd3b5796b02392f62ea84e5bcb34b4 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Thu, 8 May 2025 12:42:24 -0400 Subject: [PATCH 098/113] fix free list iterator --- crates/bevy_ecs/src/entity/allocator.rs | 38 +++++++++++-------------- 1 file changed, 16 insertions(+), 22 deletions(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 2e79ee44e30b6..eee2859927ceb 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -9,6 +9,8 @@ use core::mem::ManuallyDrop; use log::warn; use nonmax::NonMaxU32; +use crate::query::DebugCheckedUnwrap; + use super::{Entity, EntityRow, EntitySetIterator}; /// This is the item we store in the free list. @@ -270,7 +272,7 @@ impl FreeBuffer { /// /// [`Self::set`] must have been called on these indices before to initialize memory. #[inline] - unsafe fn iter(&self, indices: core::ops::RangeInclusive) -> FreeBufferIterator { + unsafe fn iter(&self, indices: core::ops::Range) -> FreeBufferIterator { FreeBufferIterator { buffer: self, indices, @@ -296,7 +298,8 @@ impl Drop for FreeBuffer { /// [`FreeBuffer::set`] must have been called on these indices beforehand to initialize memory. struct FreeBufferIterator<'a> { buffer: &'a FreeBuffer, - indices: core::ops::RangeInclusive, + /// The indices in the buffer that are not in `current` yet. + indices: core::ops::Range, current: core::slice::Iter<'a, Slot>, } @@ -309,20 +312,24 @@ impl<'a> Iterator for FreeBufferIterator<'a> { return Some(found.get_entity()); } + let still_need = self.indices.len() as u32; let next_index = self.indices.next()?; let (chunk, index, chunk_capacity) = self.buffer.index_in_chunk(next_index); // SAFETY: Assured by constructor - let slice = unsafe { chunk.get_slice(index, self.len() as u32 + 1, chunk_capacity) }; - self.indices = (*self.indices.start() + slice.len() as u32 - 1)..=(*self.indices.end()); - + let slice = unsafe { chunk.get_slice(index, still_need, chunk_capacity) }; + self.indices.start += slice.len() as u32; self.current = slice.iter(); - Some(self.current.next()?.get_entity()) + + // SAFETY: Constructor ensures these indices are valid in the buffer; the buffer is not sparse, and we just got the next slice. + // So the only way for the slice to be empty is if the constructor did not uphold safety. + let next = unsafe { self.current.next().debug_checked_unwrap() }; + Some(next.get_entity()) } #[inline] fn size_hint(&self) -> (usize, Option) { - let len = self.indices.end().saturating_sub(*self.indices.start()) as usize; + let len = self.indices.len() + self.current.len(); (len, Some(len)) } } @@ -527,21 +534,8 @@ impl FreeList { let len = self.len.pop_for_state(count, Ordering::AcqRel).length(); let index = len.saturating_sub(count); - let indices = if index < len { - let end = len - 1; - index..=end - } else { - #[expect( - clippy::reversed_empty_ranges, - reason = "We intentionally need an empty range" - )] - { - 1..=0 - } - }; - - // SAFETY: The indices are all less than the length. - unsafe { self.buffer.iter(indices) } + // SAFETY: The iterator's items are all less than the length. + unsafe { self.buffer.iter(index..len) } } /// Allocates an [`Entity`] from the free list if one is available and it is safe to do so. From 285387812ba22ebb40e7c5bee1041c924ac142e4 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Thu, 8 May 2025 13:00:34 -0400 Subject: [PATCH 099/113] tests pass --- crates/bevy_ecs/src/entity/allocator.rs | 27 +++++++++++++++++++++++++ crates/bevy_ecs/src/entity/mod.rs | 5 +++-- 2 files changed, 30 insertions(+), 2 deletions(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index eee2859927ceb..acd59337cf9a2 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -860,6 +860,8 @@ mod tests { .map(FreeBuffer::capacity_of_chunk) .map(|x| x as u64) .sum(); + // The last 2 won't be used, but that's ok. + // Keeping them powers of 2 makes things faster. let expected = u32::MAX as u64 + 1; assert_eq!(total, expected); } @@ -901,4 +903,29 @@ mod tests { assert_eq!(len.pop_for_state(2, Ordering::Relaxed).length(), 1); assert_eq!(len.pop_for_state(2, Ordering::Relaxed).length(), 0); } + + #[test] + fn uniqueness() { + let mut entities = Vec::with_capacity(2000); + let mut allocator = Allocator::new(); + entities.extend(allocator.alloc_many(1000)); + + let pre_len = entities.len(); + entities.dedup(); + assert_eq!(pre_len, entities.len()); + + for e in entities.drain(..) { + allocator.free(e); + } + + entities.extend(allocator.alloc_many(500)); + for _ in 0..1000 { + entities.push(allocator.alloc()); + } + entities.extend(allocator.alloc_many(500)); + + let pre_len = entities.len(); + entities.dedup(); + assert_eq!(pre_len, entities.len()); + } } diff --git a/crates/bevy_ecs/src/entity/mod.rs b/crates/bevy_ecs/src/entity/mod.rs index f851bfb867c18..b57d656301c2e 100644 --- a/crates/bevy_ecs/src/entity/mod.rs +++ b/crates/bevy_ecs/src/entity/mod.rs @@ -788,7 +788,8 @@ impl Entities { // SAFETY: We resolved its id to ensure it is valid. let meta = unsafe { self.force_get_meta_mut(entity.index() as usize) }; let prev_generation = meta.generation; - let (new_generation, aliased) = prev_generation.after_versions_and_could_alias(generations); + let (new_generation, aliased) = + prev_generation.after_versions_and_could_alias(generations + 1); meta.generation = new_generation; @@ -808,7 +809,7 @@ impl Entities { /// Destroy an entity, allowing it to be reused. pub fn free(&mut self, entity: Entity) -> Option { - self.free_current_and_future_generations(entity, 1) + self.free_current_and_future_generations(entity, 0) } /// Prepares the for `additional` allocations/reservations. From 92be1308bb5aa2ef837836b2bae3e8f452d0c84b Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Thu, 8 May 2025 13:13:38 -0400 Subject: [PATCH 100/113] use AllocUniqueEntitiyRowIterator --- crates/bevy_ecs/src/entity/allocator.rs | 68 +++++++++++++++++-------- 1 file changed, 48 insertions(+), 20 deletions(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index acd59337cf9a2..90a010862bdd8 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -628,15 +628,25 @@ impl SharedAllocator { panic!("too many entities") } - /// Allocates an [`Entity`] with a brand new index. + /// Allocates a fresh [`EntityRow`]. This row has never been given out before. #[inline] - fn alloc_new_index(&self) -> Entity { + pub(crate) fn alloc_unique_entity_row(&self) -> EntityRow { let index = self.next_entity_index.fetch_add(1, Ordering::Relaxed); if index == u32::MAX { Self::on_overflow(); } // SAFETY: We just checked that this was not max. - unsafe { Entity::from_raw(EntityRow::new(NonMaxU32::new_unchecked(index))) } + unsafe { EntityRow::new(NonMaxU32::new_unchecked(index)) } + } + + /// Allocates `count` [`EntityRow`]s. These rows will be fresh. They have never been given out before. + pub(crate) fn alloc_unique_entity_rows(&self, count: u32) -> AllocUniqueEntitiyRowIterator { + let start_new = self.next_entity_index.fetch_add(count, Ordering::Relaxed); + let new = match start_new.checked_add(count) { + Some(new_next_entity_index) => start_new..new_next_entity_index, + None => Self::on_overflow(), + }; + AllocUniqueEntitiyRowIterator(new) } /// Allocates a new [`Entity`], reusing a freed index if one exists. @@ -647,7 +657,8 @@ impl SharedAllocator { #[inline] unsafe fn alloc(&self) -> Entity { // SAFETY: assured by caller - unsafe { self.free.alloc() }.unwrap_or_else(|| self.alloc_new_index()) + unsafe { self.free.alloc() } + .unwrap_or_else(|| Entity::from_raw(self.alloc_unique_entity_row())) } /// Allocates a `count` [`Entity`]s, reusing freed indices if they exist. @@ -658,13 +669,8 @@ impl SharedAllocator { #[inline] unsafe fn alloc_many(&self, count: u32) -> AllocEntitiesIterator { let reused = self.free.alloc_many(count); - let missing = count - reused.len() as u32; - let start_new = self.next_entity_index.fetch_add(missing, Ordering::Relaxed); - - let new = match start_new.checked_add(missing) { - Some(new_next_entity_index) => start_new..new_next_entity_index, - None => Self::on_overflow(), - }; + let still_need = count - reused.len() as u32; + let new = self.alloc_unique_entity_rows(still_need); AllocEntitiesIterator { new, reused } } @@ -674,7 +680,7 @@ impl SharedAllocator { fn remote_alloc(&self) -> Entity { self.free .remote_alloc() - .unwrap_or_else(|| self.alloc_new_index()) + .unwrap_or_else(|| Entity::from_raw(self.alloc_unique_entity_row())) } /// Marks the allocator as closed, but it will still function normally. @@ -773,11 +779,37 @@ impl core::fmt::Debug for Allocator { } } +/// An [`Iterator`] returning a sequence of [`EntityRow`] values from an [`Allocator`] that are never aliased. +/// These rows have never been given out before. +/// +/// **NOTE:** Dropping will leak the remaining entitie rows! +pub struct AllocUniqueEntitiyRowIterator(core::ops::Range); + +impl Iterator for AllocUniqueEntitiyRowIterator { + type Item = EntityRow; + + #[inline] + fn next(&mut self) -> Option { + self.0 + .next() + // SAFETY: This came from an *exclusive* range. It can never be max. + .map(|idx| unsafe { EntityRow::new(NonMaxU32::new_unchecked(idx)) }) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.0.size_hint() + } +} + +impl ExactSizeIterator for AllocUniqueEntitiyRowIterator {} +impl core::iter::FusedIterator for AllocUniqueEntitiyRowIterator {} + /// An [`Iterator`] returning a sequence of [`Entity`] values from an [`Allocator`]. /// /// **NOTE:** Dropping will leak the remaining entities! pub struct AllocEntitiesIterator<'a> { - new: core::ops::Range, + new: AllocUniqueEntitiyRowIterator, reused: FreeBufferIterator<'a>, } @@ -785,13 +817,9 @@ impl<'a> Iterator for AllocEntitiesIterator<'a> { type Item = Entity; fn next(&mut self) -> Option { - self.reused.next().or_else(|| { - self.new.next().map(|idx| { - // SAFETY: This came from an *exclusive* range. It can never be max. - let row = unsafe { EntityRow::new(NonMaxU32::new_unchecked(idx)) }; - Entity::from_raw(row) - }) - }) + self.reused + .next() + .or_else(|| self.new.next().map(Entity::from_raw)) } fn size_hint(&self) -> (usize, Option) { From 3c653b50b8c45d6bfaf1fc1cc92f4febf70203bb Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Thu, 8 May 2025 13:14:27 -0400 Subject: [PATCH 101/113] final fixes to allocator --- crates/bevy_ecs/src/entity/allocator.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 90a010862bdd8..cd6546381de09 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -783,7 +783,7 @@ impl core::fmt::Debug for Allocator { /// These rows have never been given out before. /// /// **NOTE:** Dropping will leak the remaining entitie rows! -pub struct AllocUniqueEntitiyRowIterator(core::ops::Range); +pub(crate) struct AllocUniqueEntitiyRowIterator(core::ops::Range); impl Iterator for AllocUniqueEntitiyRowIterator { type Item = EntityRow; @@ -881,7 +881,7 @@ mod tests { use super::*; use alloc::vec; - /// Ensure the total capacity of [`OwnedBuffer`] is `u32::MAX + 1`, since the max *index* of an [`Entity`] is `u32::MAX`. + /// Ensure the total capacity of [`OwnedBuffer`] is `u32::MAX + 1`. #[test] fn chunk_capacity_sums() { let total: u64 = (0..FreeBuffer::NUM_CHUNKS) From 4841bea9e5c1e646d50af82203069954a5f87e25 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Thu, 8 May 2025 13:26:11 -0400 Subject: [PATCH 102/113] final touches to Entities --- crates/bevy_ecs/src/entity/mod.rs | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/crates/bevy_ecs/src/entity/mod.rs b/crates/bevy_ecs/src/entity/mod.rs index b57d656301c2e..5b17a93d03fa4 100644 --- a/crates/bevy_ecs/src/entity/mod.rs +++ b/crates/bevy_ecs/src/entity/mod.rs @@ -622,7 +622,9 @@ impl RemotePending { } struct Pending { + /// This is always available, but is slower. remote: RemotePending, + /// This is not always available (on no std or when remotely reserving), but is faster. #[cfg(feature = "std")] local: bevy_utils::Parallel>, } @@ -645,6 +647,8 @@ impl Pending { } } + /// Queues this entity to be flushed. + /// This uses the most efficient queue available. fn queue_flush(&self, entity: Entity) { #[cfg(feature = "std")] self.local.scope(|pending| pending.push(entity)); @@ -653,6 +657,7 @@ impl Pending { self.remote.queue_flush(entity); } + /// Flushes the entities in the most efficient queue available. fn flush_local(&mut self, mut flusher: impl FnMut(Entity)) { #[cfg(feature = "std")] let pending = self.local.iter_mut().flat_map(|pending| pending.drain(..)); @@ -664,6 +669,17 @@ impl Pending { flusher(pending); } } + + /// Moves the pending entities in the less efficient queue into the more efficient one, + /// so they are included int [`flush_local`](Self::flush_local). + fn queue_remote_pending_to_be_flushed(&self) { + // Note that without std, all pending entities are already in remote. + #[cfg(feature = "std")] + { + let remote = self.remote.pending.try_iter(); + self.local.scope(|pending| pending.extend(remote)); + } + } } impl fmt::Debug for Pending { @@ -935,11 +951,7 @@ impl Entities { /// Before using an entity reserved remotely, either set its location manually (usually though [`flush_entity`](crate::world::World::flush_entity)), /// or call this method to queue remotely reserved entities to be flushed with the rest. pub fn queue_remote_pending_to_be_flushed(&self) { - #[cfg(feature = "std")] - { - let remote = self.pending.remote.pending.try_iter(); - self.pending.local.scope(|pending| pending.extend(remote)); - } + self.pending.queue_remote_pending_to_be_flushed(); } /// Allocates space for entities previously reserved with [`reserve_entity`](Entities::reserve_entity) or From f7ccc671290ce908daa7040af1d292c62b19a9c9 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Thu, 8 May 2025 13:27:02 -0400 Subject: [PATCH 103/113] typos --- crates/bevy_ecs/src/entity/allocator.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index cd6546381de09..93147047574de 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -640,13 +640,13 @@ impl SharedAllocator { } /// Allocates `count` [`EntityRow`]s. These rows will be fresh. They have never been given out before. - pub(crate) fn alloc_unique_entity_rows(&self, count: u32) -> AllocUniqueEntitiyRowIterator { + pub(crate) fn alloc_unique_entity_rows(&self, count: u32) -> AllocUniqueEntityRowIterator { let start_new = self.next_entity_index.fetch_add(count, Ordering::Relaxed); let new = match start_new.checked_add(count) { Some(new_next_entity_index) => start_new..new_next_entity_index, None => Self::on_overflow(), }; - AllocUniqueEntitiyRowIterator(new) + AllocUniqueEntityRowIterator(new) } /// Allocates a new [`Entity`], reusing a freed index if one exists. @@ -782,10 +782,10 @@ impl core::fmt::Debug for Allocator { /// An [`Iterator`] returning a sequence of [`EntityRow`] values from an [`Allocator`] that are never aliased. /// These rows have never been given out before. /// -/// **NOTE:** Dropping will leak the remaining entitie rows! -pub(crate) struct AllocUniqueEntitiyRowIterator(core::ops::Range); +/// **NOTE:** Dropping will leak the remaining entity rows! +pub(crate) struct AllocUniqueEntityRowIterator(core::ops::Range); -impl Iterator for AllocUniqueEntitiyRowIterator { +impl Iterator for AllocUniqueEntityRowIterator { type Item = EntityRow; #[inline] @@ -802,14 +802,14 @@ impl Iterator for AllocUniqueEntitiyRowIterator { } } -impl ExactSizeIterator for AllocUniqueEntitiyRowIterator {} -impl core::iter::FusedIterator for AllocUniqueEntitiyRowIterator {} +impl ExactSizeIterator for AllocUniqueEntityRowIterator {} +impl core::iter::FusedIterator for AllocUniqueEntityRowIterator {} /// An [`Iterator`] returning a sequence of [`Entity`] values from an [`Allocator`]. /// /// **NOTE:** Dropping will leak the remaining entities! pub struct AllocEntitiesIterator<'a> { - new: AllocUniqueEntitiyRowIterator, + new: AllocUniqueEntityRowIterator, reused: FreeBufferIterator<'a>, } From 392b12b98f858fa094254796d6f6c6cb2e48acbc Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Thu, 8 May 2025 13:38:37 -0400 Subject: [PATCH 104/113] fix no u64 atomics --- crates/bevy_ecs/src/entity/allocator.rs | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 93147047574de..cb18a270decd2 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -57,7 +57,7 @@ impl Slot { fn set_entity(&self, entity: Entity) { #[cfg(not(target_has_atomic = "64"))] self.entity_generation - .store(entity.generation(), Ordering::Relaxed); + .store(entity.generation().to_bits(), Ordering::Relaxed); #[cfg(not(target_has_atomic = "64"))] self.entity_index.store(entity.index(), Ordering::Relaxed); #[cfg(target_has_atomic = "64")] @@ -69,11 +69,15 @@ impl Slot { fn get_entity(&self) -> Entity { #[cfg(not(target_has_atomic = "64"))] return Entity { - index: self.entity_index.load(Ordering::Relaxed), - // SAFETY: This is not 0 since it was from an entity's generation. - generation: unsafe { - core::num::NonZero::new_unchecked(self.entity_generation.load(Ordering::Relaxed)) + // SAFETY: This is valid since it was from an entity's index to begin with. + index: unsafe { + EntityRow::new(NonMaxU32::new_unchecked( + self.entity_index.load(Ordering::Relaxed), + )) }, + generation: super::EntityGeneration::from_bits( + self.entity_generation.load(Ordering::Relaxed), + ), }; #[cfg(target_has_atomic = "64")] // SAFETY: This is always sourced from a proper entity. From 6241bee3bfe5fc3736fc1df6d911720ee29ae998 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Thu, 8 May 2025 13:52:19 -0400 Subject: [PATCH 105/113] redo migration guide --- .../18670_remote_entity_reservation.md | 22 ---------------- .../migration-guides/new_entity_allocator.md | 26 +++++++++++++++++++ 2 files changed, 26 insertions(+), 22 deletions(-) delete mode 100644 release-content/migration-guides/18670_remote_entity_reservation.md create mode 100644 release-content/migration-guides/new_entity_allocator.md diff --git a/release-content/migration-guides/18670_remote_entity_reservation.md b/release-content/migration-guides/18670_remote_entity_reservation.md deleted file mode 100644 index 7a777b1b507ad..0000000000000 --- a/release-content/migration-guides/18670_remote_entity_reservation.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: Entities Utilities -pull_requests: [18670] ---- - -`Entities::reserve` has been renamed `Entities::prepare`. Additionally, `Entities` methods `used_count` and `total_prospective_count` have been removed, and `total_count` and `len` now return `u64` instead of `usize`. - -These utility methods have changed because the backing entity allocator has had a rewrite. `Entities::prepare` is intentionally more generally named than `Entities::reserve` because it has looser guarantees, and it may do more than just reserving memory in the future. `Entities::used_count` and `Entities::total_prospective_count` were removed because they depend on knowing how many entities are pending being automatically flushed. However, tracking that quantity is now nontrivial, and these functions have always been intended for debugging use only. The new allocator allows entities to be reserved without them being added to the pending list for automatic flushing, and it allows pending entities to be manually flushed early. Effectively, that means debugging the entities that are pending is no longer relevant information, hence the removal of those methods. `total_count` and `len` now return `u64` instead of `usize` to better reflect the truth. Since `Entities` has a well defined upper bound, unlike other collections, it makes more since to use `u64` explicitly rather than `usize`. - -To migrate: - -```diff -- let entities: usize = entities.len(); -+ let entities: u64 = entities.len(); -``` - -```diff -- entities.reserve(128); -+ entities.prepare(128); -``` - -If you have any trouble migrating away from `Entities::used_count` and `Entities::total_prospective_count`, feel free to open an issue! diff --git a/release-content/migration-guides/new_entity_allocator.md b/release-content/migration-guides/new_entity_allocator.md new file mode 100644 index 0000000000000..1205c93b70e28 --- /dev/null +++ b/release-content/migration-guides/new_entity_allocator.md @@ -0,0 +1,26 @@ +--- +title: Entities Utilities +pull_requests: [18670] +--- + +`Entities::reserve` has been renamed `Entities::prepare`, as it has looser guarantees. + +Additionally, `Entities` debug methods `used_count` and `total_prospective_count` have been removed. +This is because the new allocator is much more flexible, which makes it unrealistic to track these quantities (and less meaningful). + +`Entities` debug methods `total_count` and `len` now return `u32` instead of `usize`. +Since `Entities` has a well defined upper bound, unlike other collections, it makes more since to use `u32` explicitly rather than `usize`. + +To migrate: + +```diff +- let entities: usize = entities.len(); ++ let entities: u32 = entities.len(); +``` + +```diff +- entities.reserve(128); ++ entities.prepare(128); +``` + +If you have any trouble migrating away from `Entities::used_count` and `Entities::total_prospective_count`, feel free to open an issue! From ca8babb203b9e508a322b39580fa4c967519b005 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Thu, 8 May 2025 13:58:19 -0400 Subject: [PATCH 106/113] ok, now no u64 atomics should work. --- crates/bevy_ecs/src/entity/allocator.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index cb18a270decd2..a972afb2e19fa 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -45,7 +45,7 @@ impl Slot { #[cfg(not(target_has_atomic = "64"))] return Self { entity_index: AtomicU32::new(source.index()), - entity_generation: AtomicU32::new(source.generation()), + entity_generation: AtomicU32::new(source.generation().to_bits()), }; #[cfg(target_has_atomic = "64")] return Self { @@ -70,7 +70,7 @@ impl Slot { #[cfg(not(target_has_atomic = "64"))] return Entity { // SAFETY: This is valid since it was from an entity's index to begin with. - index: unsafe { + row: unsafe { EntityRow::new(NonMaxU32::new_unchecked( self.entity_index.load(Ordering::Relaxed), )) From 08883b1a9f346dc1868775a6c21fe4d57c87fe51 Mon Sep 17 00:00:00 2001 From: Eagster <79881080+ElliottjPierce@users.noreply.github.com> Date: Thu, 8 May 2025 23:18:20 -0400 Subject: [PATCH 107/113] Better wording for a comment Co-authored-by: atlv --- crates/bevy_ecs/src/entity/allocator.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index a972afb2e19fa..496e868ff8cd1 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -16,7 +16,7 @@ use super::{Entity, EntityRow, EntitySetIterator}; /// This is the item we store in the free list. /// Effectively, this is a `MaybeUninit` where uninit is represented by `Entity::PLACEHOLDER`. /// -/// We use atomics internally not for special ordering but for *a* ordering. +/// We don't use atomics to achieve any particular ordering: we just need *some* ordering. /// Conceptually, this could just be `SyncCell`, /// but accessing that requires additional unsafe justification, and could cause unsound optimizations by the compiler. /// From 5fdf8f4d8017450650507e6e4b829ae33d89782f Mon Sep 17 00:00:00 2001 From: Eagster <79881080+ElliottjPierce@users.noreply.github.com> Date: Sat, 10 May 2025 22:21:20 -0400 Subject: [PATCH 108/113] Apply suggestions from Doot's review Co-authored-by: Christian Hughes <9044780+ItsDoot@users.noreply.github.com> --- crates/bevy_ecs/src/entity/allocator.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 496e868ff8cd1..67b8c3e6072b5 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -82,7 +82,7 @@ impl Slot { #[cfg(target_has_atomic = "64")] // SAFETY: This is always sourced from a proper entity. return unsafe { - Entity::try_from_bits(self.inner_entity.load(Ordering::Relaxed)).unwrap_unchecked() + Entity::try_from_bits(self.inner_entity.load(Ordering::Relaxed)).debug_checked_unwrap() }; } } @@ -262,7 +262,7 @@ impl FreeBuffer { /// # Safety /// /// This must not be called concurrently with itself. - /// Access does not conflict with another [`Self::get`]. + /// Access must not conflict with another [`Self::get`]. #[inline] unsafe fn set(&self, full_index: u32, entity: Entity) { let (chunk, index, chunk_capacity) = self.index_in_chunk(full_index); From 016bbfd0b2e582866779cc40b26d20d3e2e74354 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Sat, 10 May 2025 22:23:23 -0400 Subject: [PATCH 109/113] use from_raw_and_generation --- crates/bevy_ecs/src/entity/allocator.rs | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 67b8c3e6072b5..af6535b428c01 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -68,17 +68,17 @@ impl Slot { #[inline] fn get_entity(&self) -> Entity { #[cfg(not(target_has_atomic = "64"))] - return Entity { + return Entity::from_raw_and_generation( // SAFETY: This is valid since it was from an entity's index to begin with. - row: unsafe { - EntityRow::new(NonMaxU32::new_unchecked( - self.entity_index.load(Ordering::Relaxed), - )) + unsafe { + EntityRow::new( + NonMaxU32::new(self.entity_index.load(Ordering::Relaxed)) + .debug_checked_unwrap(), + ) }, - generation: super::EntityGeneration::from_bits( - self.entity_generation.load(Ordering::Relaxed), - ), - }; + super::EntityGeneration::from_bits(self.entity_generation.load(Ordering::Relaxed)), + ); + #[cfg(target_has_atomic = "64")] // SAFETY: This is always sourced from a proper entity. return unsafe { From 5fb2e20a356c9ea978eaeb95a4d2433af106b4a2 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Wed, 14 May 2025 23:00:50 -0400 Subject: [PATCH 110/113] clarify risks of set_state_risky --- crates/bevy_ecs/src/entity/allocator.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index af6535b428c01..8c7af785c8790 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -435,6 +435,9 @@ impl FreeCount { /// Sets the state explicitly. /// Caller must be careful that the state has not changed since getting the state and setting it. + /// If that happens, the state may not properly reflect the length of the free list or its generation, + /// causing entities to be skipped or given out twice. + /// This is not a safety concern, but it is a major correctness concern. #[inline] fn set_state_risky(&self, state: FreeCountState, order: Ordering) { self.0.store(state.0, order); From 7213070ce119e968d0450e80ab4474355b7d6021 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Wed, 14 May 2025 23:03:47 -0400 Subject: [PATCH 111/113] remove comment for num_free Relaxed actually might not be fine here in some situations. Aquire and Release are used everywhere for the state anyway. --- crates/bevy_ecs/src/entity/allocator.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 8c7af785c8790..692589986682a 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -484,7 +484,6 @@ impl FreeList { /// For this to be accurate, this must not be called during a [`Self::free`]. #[inline] unsafe fn num_free(&self) -> u32 { - // Relaxed would probably be fine here, but this is more precise. self.len.state(Ordering::Acquire).length() } From 961ad11e01149c8da539ddffd37bfecf6a69f5f0 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Wed, 14 May 2025 23:06:00 -0400 Subject: [PATCH 112/113] better naming of FreeBufferIterator --- crates/bevy_ecs/src/entity/allocator.rs | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/crates/bevy_ecs/src/entity/allocator.rs b/crates/bevy_ecs/src/entity/allocator.rs index 692589986682a..18232341a9b8a 100644 --- a/crates/bevy_ecs/src/entity/allocator.rs +++ b/crates/bevy_ecs/src/entity/allocator.rs @@ -279,8 +279,8 @@ impl FreeBuffer { unsafe fn iter(&self, indices: core::ops::Range) -> FreeBufferIterator { FreeBufferIterator { buffer: self, - indices, - current: [].iter(), + future_buffer_indices: indices, + current_chunk_slice: [].iter(), } } } @@ -302,9 +302,10 @@ impl Drop for FreeBuffer { /// [`FreeBuffer::set`] must have been called on these indices beforehand to initialize memory. struct FreeBufferIterator<'a> { buffer: &'a FreeBuffer, - /// The indices in the buffer that are not in `current` yet. - indices: core::ops::Range, - current: core::slice::Iter<'a, Slot>, + /// The part of the buffer we are iterating at the moment. + current_chunk_slice: core::slice::Iter<'a, Slot>, + /// The indices in the buffer that are not yet in `current_chunk_slice`. + future_buffer_indices: core::ops::Range, } impl<'a> Iterator for FreeBufferIterator<'a> { @@ -312,28 +313,28 @@ impl<'a> Iterator for FreeBufferIterator<'a> { #[inline] fn next(&mut self) -> Option { - if let Some(found) = self.current.next() { + if let Some(found) = self.current_chunk_slice.next() { return Some(found.get_entity()); } - let still_need = self.indices.len() as u32; - let next_index = self.indices.next()?; + let still_need = self.future_buffer_indices.len() as u32; + let next_index = self.future_buffer_indices.next()?; let (chunk, index, chunk_capacity) = self.buffer.index_in_chunk(next_index); // SAFETY: Assured by constructor let slice = unsafe { chunk.get_slice(index, still_need, chunk_capacity) }; - self.indices.start += slice.len() as u32; - self.current = slice.iter(); + self.future_buffer_indices.start += slice.len() as u32; + self.current_chunk_slice = slice.iter(); // SAFETY: Constructor ensures these indices are valid in the buffer; the buffer is not sparse, and we just got the next slice. // So the only way for the slice to be empty is if the constructor did not uphold safety. - let next = unsafe { self.current.next().debug_checked_unwrap() }; + let next = unsafe { self.current_chunk_slice.next().debug_checked_unwrap() }; Some(next.get_entity()) } #[inline] fn size_hint(&self) -> (usize, Option) { - let len = self.indices.len() + self.current.len(); + let len = self.future_buffer_indices.len() + self.current_chunk_slice.len(); (len, Some(len)) } } From 71921def5d39058d56b7b044b56e657bda9f8f16 Mon Sep 17 00:00:00 2001 From: Elliott Pierce Date: Wed, 28 May 2025 12:37:29 -0400 Subject: [PATCH 113/113] fix formatting --- crates/bevy_ecs/src/entity/mod.rs | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/crates/bevy_ecs/src/entity/mod.rs b/crates/bevy_ecs/src/entity/mod.rs index f4bb7b0b92eb5..f5c8a60c9de02 100644 --- a/crates/bevy_ecs/src/entity/mod.rs +++ b/crates/bevy_ecs/src/entity/mod.rs @@ -85,13 +85,7 @@ use crate::{ use alloc::vec::Vec; use bevy_platform::sync::Arc; use concurrent_queue::ConcurrentQueue; -use core::{ - fmt, - hash::Hash, - mem, - num::NonZero, - panic::Location, -}; +use core::{fmt, hash::Hash, mem, num::NonZero, panic::Location}; use log::warn; #[cfg(feature = "serialize")]