diff --git a/crates/bevy_tasks/README.md b/crates/bevy_tasks/README.md index 523dbff7eaba9..881d7b802a50b 100644 --- a/crates/bevy_tasks/README.md +++ b/crates/bevy_tasks/README.md @@ -11,6 +11,23 @@ or ordering of spawned tasks. It is based on [`async-executor`][async-executor], a lightweight executor that allows the end user to manage their own threads. `async-executor` is based on async-task, a core piece of async-std. +## Usage + +In order to be able to optimize task execution in multi-threaded environments, +bevy provides three different thread pools via which tasks of different kinds can be spawned. +(The same API is used in single-threaded environments, even if execution is limited to a single thread. +This currently applies to WASM targets.) +The determining factor for what kind of work should go in each pool is latency requirements: + +* For CPU-intensive work (tasks that generally spin until completion) we have a standard + [`ComputeTaskPool`] and an [`AsyncComputeTaskPool`]. Work that does not need to be completed to + present the next frame should go to the [`AsyncComputeTaskPool`]. + +* For IO-intensive work (tasks that spend very little time in a "woken" state) we have an + [`IoTaskPool`] whose tasks are expected to complete very quickly. Generally speaking, they should just + await receiving data from somewhere (i.e. disk) and signal other systems when the data is ready + for consumption. (likely via channels) + [bevy]: https://bevyengine.org [rayon]: https://github.com/rayon-rs/rayon [async-executor]: https://github.com/stjepang/async-executor diff --git a/crates/bevy_tasks/src/task_pool.rs b/crates/bevy_tasks/src/task_pool.rs index 5903e67ea1205..90afe4b4bc2e3 100644 --- a/crates/bevy_tasks/src/task_pool.rs +++ b/crates/bevy_tasks/src/task_pool.rs @@ -93,8 +93,16 @@ impl TaskPoolBuilder { } } -/// A thread pool for executing tasks. Tasks are futures that are being automatically driven by -/// the pool on threads owned by the pool. +/// A thread pool for executing tasks. +/// +/// While futures usually need to be polled to be executed, Bevy tasks are being +/// automatically driven by the pool on threads owned by the pool. The [`Task`] +/// future only needs to be polled in order to receive the result. (For that +/// purpose, it is often stored in a component or resource, see the +/// `async_compute` example.) +/// +/// If the result is not required, one may also use [`Task::detach`] and the pool +/// will still execute a task, even if it is dropped. #[derive(Debug)] pub struct TaskPool { /// The executor for the pool @@ -509,11 +517,14 @@ impl TaskPool { execute_forever.or(get_results).await } - /// Spawns a static future onto the thread pool. The returned Task is a future. It can also be - /// canceled and "detached" allowing it to continue running without having to be polled by the + /// Spawns a static future onto the thread pool. The returned [`Task`] is a + /// future that can be polled for the result. It can also be canceled and + /// "detached", allowing the task to continue running even if dropped. In + /// any case, the pool will execute the task even without polling by the /// end-user. /// - /// If the provided future is non-`Send`, [`TaskPool::spawn_local`] should be used instead. + /// If the provided future is non-`Send`, [`TaskPool::spawn_local`] should + /// be used instead. pub fn spawn(&self, future: impl Future + Send + 'static) -> Task where T: Send + 'static, @@ -521,11 +532,17 @@ impl TaskPool { Task::new(self.executor.spawn(future)) } - /// Spawns a static future on the thread-local async executor for the current thread. The task - /// will run entirely on the thread the task was spawned on. The returned Task is a future. - /// It can also be canceled and "detached" allowing it to continue running without having - /// to be polled by the end-user. Users should generally prefer to use [`TaskPool::spawn`] - /// instead, unless the provided future is not `Send`. + /// Spawns a static future on the thread-local async executor for the + /// current thread. The task will run entirely on the thread the task was + /// spawned on. + /// + /// The returned [`Task`] is a future that can be polled for the + /// result. It can also be canceled and "detached", allowing the task to + /// continue running even if dropped. In any case, the pool will execute the + /// task even without polling by the end-user. + /// + /// Users should generally prefer to use [`TaskPool::spawn`] instead, + /// unless the provided future is not `Send`. pub fn spawn_local(&self, future: impl Future + 'static) -> Task where T: 'static, diff --git a/crates/bevy_tasks/src/usages.rs b/crates/bevy_tasks/src/usages.rs index 0ba38eb58c730..49b8b5cd2ff72 100644 --- a/crates/bevy_tasks/src/usages.rs +++ b/crates/bevy_tasks/src/usages.rs @@ -1,15 +1,3 @@ -//! Definitions for a few common task pools that we want. Generally the determining factor for what -//! kind of work should go in each pool is latency requirements. -//! -//! For CPU-intensive work (tasks that generally spin until completion) we have a standard -//! [`ComputeTaskPool`] and an [`AsyncComputeTaskPool`]. Work that does not need to be completed to -//! present the next frame should go to the [`AsyncComputeTaskPool`] -//! -//! For IO-intensive work (tasks that spend very little time in a "woken" state) we have an IO -//! task pool. The tasks here are expected to complete very quickly. Generally they should just -//! await receiving data from somewhere (i.e. disk) and signal other systems when the data is ready -//! for consumption. (likely via channels) - use super::TaskPool; use std::{ops::Deref, sync::OnceLock}; @@ -17,8 +5,12 @@ static COMPUTE_TASK_POOL: OnceLock = OnceLock::new(); static ASYNC_COMPUTE_TASK_POOL: OnceLock = OnceLock::new(); static IO_TASK_POOL: OnceLock = OnceLock::new(); -/// A newtype for a task pool for CPU-intensive work that must be completed to deliver the next -/// frame +/// A newtype for a task pool for CPU-intensive work that must be completed to +/// deliver the next frame +/// +/// See [`TaskPool`] documentation for details on Bevy tasks. +/// [`AsyncComputeTaskPool`] should be preferred if the work does not have to be +/// completed before the next frame. #[derive(Debug)] pub struct ComputeTaskPool(TaskPool); @@ -49,6 +41,9 @@ impl Deref for ComputeTaskPool { } /// A newtype for a task pool for CPU-intensive work that may span across multiple frames +/// +/// See [`TaskPool`] documentation for details on Bevy tasks. Use [`ComputeTaskPool`] if +/// the work must be complete before advancing to the next frame. #[derive(Debug)] pub struct AsyncComputeTaskPool(TaskPool); diff --git a/examples/async_tasks/async_compute.rs b/examples/async_tasks/async_compute.rs index 57f9ea9fa4f44..7ab793809776e 100644 --- a/examples/async_tasks/async_compute.rs +++ b/examples/async_tasks/async_compute.rs @@ -54,7 +54,9 @@ fn spawn_tasks(mut commands: Commands) { for x in 0..NUM_CUBES { for y in 0..NUM_CUBES { for z in 0..NUM_CUBES { - // Spawn new task on the AsyncComputeTaskPool + // Spawn new task on the AsyncComputeTaskPool; the task will be + // executed in the background, and the Task future returned by + // spawn() can be used to poll for the result let task = thread_pool.spawn(async move { let mut rng = rand::thread_rng(); let start_time = Instant::now();