diff --git a/Cargo.toml b/Cargo.toml index 35a675b62f244b..98915f8cd47b15 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1443,6 +1443,16 @@ description = "Shows how to rumble a gamepad using force feedback" category = "Input" wasm = false +[[example]] +name = "gpu_picking" +path = "examples/input/gpu_picking.rs" + +[package.metadata.example.gpu_picking] +name = "GPU picking" +description = "Mouse picking using the gpu" +category = "Input" +wasm = true + [[example]] name = "keyboard_input" path = "examples/input/keyboard_input.rs" diff --git a/assets/shaders/gpu_picking_material.wgsl b/assets/shaders/gpu_picking_material.wgsl new file mode 100644 index 00000000000000..bb50dd1c471909 --- /dev/null +++ b/assets/shaders/gpu_picking_material.wgsl @@ -0,0 +1,28 @@ +// This shader shows how to enable the gpu picking feature for a material + +// You'll need the mesh binding because that's where the entity index is +#import bevy_pbr::mesh_bindings mesh +#import bevy_pbr::mesh_vertex_output MeshVertexOutput + +@group(1) @binding(0) +var color: vec4; + +// Gpu picking uses multiple fragment output +struct FragmentOutput { + @location(0) color: vec4, +// You can detect the feature with this flag +#ifdef GPU_PICKING + @location(1) mesh_id: u32, +#endif +}; + +@fragment +fn fragment(in: MeshVertexOutput) -> FragmentOutput { + var out: FragmentOutput; + out.color = color; +// make sure to output the entity index for gpu picking to work correctly +#ifdef GPU_PICKING + out.mesh_id = mesh[in.instance_index].id; +#endif + return out; +} diff --git a/crates/bevy_core_pipeline/src/core_3d/main_opaque_pass_3d_node.rs b/crates/bevy_core_pipeline/src/core_3d/main_opaque_pass_3d_node.rs index 34d8c299c94c90..662718895461ea 100644 --- a/crates/bevy_core_pipeline/src/core_3d/main_opaque_pass_3d_node.rs +++ b/crates/bevy_core_pipeline/src/core_3d/main_opaque_pass_3d_node.rs @@ -7,6 +7,7 @@ use crate::{ use bevy_ecs::{prelude::*, query::QueryItem}; use bevy_render::{ camera::ExtractedCamera, + picking::{ExtractedGpuPickingCamera, VisibleMeshIdTextures}, render_graph::{NodeRunError, RenderGraphContext, ViewNode}, render_phase::RenderPhase, render_resource::{ @@ -34,8 +35,10 @@ impl ViewNode for MainOpaquePass3dNode { Option<&'static DepthPrepass>, Option<&'static NormalPrepass>, Option<&'static MotionVectorPrepass>, + Option<&'static ExtractedGpuPickingCamera>, Option<&'static SkyboxPipelineId>, Option<&'static SkyboxBindGroup>, + Option<&'static VisibleMeshIdTextures>, &'static ViewUniformOffset, ); @@ -53,8 +56,10 @@ impl ViewNode for MainOpaquePass3dNode { depth_prepass, normal_prepass, motion_vector_prepass, + gpu_picking_camera, skybox_pipeline, skybox_bind_group, + mesh_id_textures, view_uniform_offset, ): QueryItem, world: &World, @@ -64,21 +69,34 @@ impl ViewNode for MainOpaquePass3dNode { #[cfg(feature = "trace")] let _main_opaque_pass_3d_span = info_span!("main_opaque_pass_3d").entered(); + let mut color_attachments = vec![Some(target.get_color_attachment(Operations { + load: match camera_3d.clear_color { + ClearColorConfig::Default => LoadOp::Clear(world.resource::().0.into()), + ClearColorConfig::Custom(color) => LoadOp::Clear(color.into()), + ClearColorConfig::None => LoadOp::Load, + }, + store: true, + }))]; + + if gpu_picking_camera.is_some() { + if let Some(mesh_id_textures) = mesh_id_textures { + color_attachments.push(Some(mesh_id_textures.get_color_attachment(Operations { + load: match camera_3d.clear_color { + ClearColorConfig::None => LoadOp::Load, + // TODO clear this earlier? + _ => LoadOp::Clear(VisibleMeshIdTextures::clear_color()), + }, + store: true, + }))); + } + } + // Setup render pass let mut render_pass = render_context.begin_tracked_render_pass(RenderPassDescriptor { label: Some("main_opaque_pass_3d"), // NOTE: The opaque pass loads the color // buffer as well as writing to it. - color_attachments: &[Some(target.get_color_attachment(Operations { - load: match camera_3d.clear_color { - ClearColorConfig::Default => { - LoadOp::Clear(world.resource::().0.into()) - } - ClearColorConfig::Custom(color) => LoadOp::Clear(color.into()), - ClearColorConfig::None => LoadOp::Load, - }, - store: true, - }))], + color_attachments: &color_attachments, depth_stencil_attachment: Some(RenderPassDepthStencilAttachment { view: &depth.view, // NOTE: The opaque main pass loads the depth buffer and possibly overwrites it diff --git a/crates/bevy_core_pipeline/src/core_3d/main_transparent_pass_3d_node.rs b/crates/bevy_core_pipeline/src/core_3d/main_transparent_pass_3d_node.rs index ce5a2127f1d44f..a9c23c6762c2f1 100644 --- a/crates/bevy_core_pipeline/src/core_3d/main_transparent_pass_3d_node.rs +++ b/crates/bevy_core_pipeline/src/core_3d/main_transparent_pass_3d_node.rs @@ -2,6 +2,7 @@ use crate::core_3d::Transparent3d; use bevy_ecs::{prelude::*, query::QueryItem}; use bevy_render::{ camera::ExtractedCamera, + picking::{ExtractedGpuPickingCamera, VisibleMeshIdTextures}, render_graph::{NodeRunError, RenderGraphContext, ViewNode}, render_phase::RenderPhase, render_resource::{LoadOp, Operations, RenderPassDepthStencilAttachment, RenderPassDescriptor}, @@ -21,12 +22,16 @@ impl ViewNode for MainTransparentPass3dNode { &'static RenderPhase, &'static ViewTarget, &'static ViewDepthTexture, + Option<&'static ExtractedGpuPickingCamera>, + Option<&'static VisibleMeshIdTextures>, ); fn run( &self, graph: &mut RenderGraphContext, render_context: &mut RenderContext, - (camera, transparent_phase, target, depth): QueryItem, + (camera, transparent_phase, target, depth, gpu_picking_camera, mesh_id_textures): QueryItem< + Self::ViewQuery, + >, world: &World, ) -> Result<(), NodeRunError> { let view_entity = graph.view_entity(); @@ -37,13 +42,27 @@ impl ViewNode for MainTransparentPass3dNode { #[cfg(feature = "trace")] let _main_transparent_pass_3d_span = info_span!("main_transparent_pass_3d").entered(); + let mut color_attachments = vec![Some(target.get_color_attachment(Operations { + load: LoadOp::Load, + store: true, + }))]; + + if gpu_picking_camera.is_some() { + if let Some(mesh_id_textures) = mesh_id_textures { + color_attachments.push(Some(mesh_id_textures.get_color_attachment( + Operations { + // The texture is already cleared in the opaque pass + load: LoadOp::Load, + store: true, + }, + ))); + } + } + let mut render_pass = render_context.begin_tracked_render_pass(RenderPassDescriptor { label: Some("main_transparent_pass_3d"), // NOTE: The transparent pass loads the color buffer as well as overwriting it where appropriate. - color_attachments: &[Some(target.get_color_attachment(Operations { - load: LoadOp::Load, - store: true, - }))], + color_attachments: &color_attachments, depth_stencil_attachment: Some(RenderPassDepthStencilAttachment { view: &depth.view, // NOTE: For the transparent pass we load the depth buffer. There should be no diff --git a/crates/bevy_core_pipeline/src/core_3d/mod.rs b/crates/bevy_core_pipeline/src/core_3d/mod.rs index f415751cde7192..5f421713b4ad76 100644 --- a/crates/bevy_core_pipeline/src/core_3d/mod.rs +++ b/crates/bevy_core_pipeline/src/core_3d/mod.rs @@ -14,6 +14,7 @@ pub mod graph { pub const MAIN_OPAQUE_PASS: &str = "main_opaque_pass"; pub const MAIN_TRANSPARENT_PASS: &str = "main_transparent_pass"; pub const END_MAIN_PASS: &str = "end_main_pass"; + pub const ENTITY_INDEX_BUFFER_COPY: &str = "entity_index_buffer_copy"; pub const BLOOM: &str = "bloom"; pub const TONEMAPPING: &str = "tonemapping"; pub const FXAA: &str = "fxaa"; @@ -35,6 +36,7 @@ use bevy_ecs::prelude::*; use bevy_render::{ camera::{Camera, ExtractedCamera}, extract_component::ExtractComponentPlugin, + picking::{ExtractedGpuPickingCamera, VisibleMeshIdTextures, MESH_ID_TEXTURE_FORMAT}, prelude::Msaa, render_graph::{EmptyNode, RenderGraphApp, ViewNodeRunner}, render_phase::{ @@ -94,6 +96,7 @@ impl Plugin for Core3dPlugin { sort_phase_system::.in_set(RenderSet::PhaseSort), prepare_core_3d_depth_textures.in_set(RenderSet::PrepareResources), prepare_prepass_textures.in_set(RenderSet::PrepareResources), + prepare_entity_textures.in_set(RenderSet::PrepareResources), ), ); @@ -493,3 +496,63 @@ pub fn prepare_prepass_textures( }); } } + +/// Create the required textures based on the camera size +pub fn prepare_entity_textures( + mut commands: Commands, + mut texture_cache: ResMut, + msaa: Res, + render_device: Res, + views_3d: Query< + (Entity, &ExtractedCamera), + ( + With, + With>, + With>, + ), + >, +) { + for (entity, camera) in &views_3d { + let Some(physical_target_size) = camera.physical_target_size else { + continue; + }; + + let size = Extent3d { + depth_or_array_layers: 1, + width: physical_target_size.x, + height: physical_target_size.y, + }; + + let descriptor = TextureDescriptor { + label: None, + size, + mip_level_count: 1, + sample_count: 1, + dimension: TextureDimension::D2, + format: MESH_ID_TEXTURE_FORMAT, + usage: TextureUsages::RENDER_ATTACHMENT | TextureUsages::COPY_SRC, + view_formats: &[], + }; + + let mesh_id_textures = VisibleMeshIdTextures { + main: texture_cache.get( + &render_device, + TextureDescriptor { + label: Some("main_entity_texture"), + ..descriptor + }, + ), + sampled: (msaa.samples() > 1).then(|| { + texture_cache.get( + &render_device, + TextureDescriptor { + label: Some("main_entity_texture_sampled"), + sample_count: msaa.samples(), + ..descriptor + }, + ) + }), + }; + commands.entity(entity).insert(mesh_id_textures); + } +} diff --git a/crates/bevy_core_pipeline/src/entity_index_buffer_copy/mod.rs b/crates/bevy_core_pipeline/src/entity_index_buffer_copy/mod.rs new file mode 100644 index 00000000000000..38a0264366f8f0 --- /dev/null +++ b/crates/bevy_core_pipeline/src/entity_index_buffer_copy/mod.rs @@ -0,0 +1,53 @@ +use bevy_app::Plugin; +use bevy_ecs::{query::QueryItem, world::World}; +use bevy_render::{ + picking::{CurrentGpuPickingBufferIndex, ExtractedGpuPickingCamera, VisibleMeshIdTextures}, + render_graph::{RenderGraphApp, RenderGraphContext, ViewNode, ViewNodeRunner}, + renderer::RenderContext, + RenderApp, +}; + +use crate::core_3d::CORE_3D; + +#[derive(Default)] +pub struct EntityIndexBufferCopyNode; +impl ViewNode for EntityIndexBufferCopyNode { + type ViewQuery = ( + &'static VisibleMeshIdTextures, + &'static ExtractedGpuPickingCamera, + ); + + fn run( + &self, + _graph: &mut RenderGraphContext, + render_context: &mut RenderContext, + (mesh_id_textures, gpu_picking_camera): QueryItem, + world: &World, + ) -> Result<(), bevy_render::render_graph::NodeRunError> { + let current_buffer_index = world.resource::(); + gpu_picking_camera.run_node( + render_context.command_encoder(), + &mesh_id_textures.main.texture, + current_buffer_index, + ); + Ok(()) + } +} + +pub struct EntityIndexBufferCopyPlugin; +impl Plugin for EntityIndexBufferCopyPlugin { + fn build(&self, app: &mut bevy_app::App) { + let Ok(render_app) = app.get_sub_app_mut(RenderApp) else { + return; + }; + + // 3D + use crate::core_3d::graph::node::*; + render_app + .add_render_graph_node::>( + CORE_3D, + ENTITY_INDEX_BUFFER_COPY, + ) + .add_render_graph_edge(CORE_3D, UPSCALING, ENTITY_INDEX_BUFFER_COPY); + } +} diff --git a/crates/bevy_core_pipeline/src/lib.rs b/crates/bevy_core_pipeline/src/lib.rs index 51e5ae36278e98..e2639f8423d312 100644 --- a/crates/bevy_core_pipeline/src/lib.rs +++ b/crates/bevy_core_pipeline/src/lib.rs @@ -6,6 +6,7 @@ pub mod clear_color; pub mod contrast_adaptive_sharpening; pub mod core_2d; pub mod core_3d; +pub mod entity_index_buffer_copy; pub mod fullscreen_vertex_shader; pub mod fxaa; pub mod msaa_writeback; @@ -40,6 +41,7 @@ use crate::{ contrast_adaptive_sharpening::CASPlugin, core_2d::Core2dPlugin, core_3d::Core3dPlugin, + entity_index_buffer_copy::EntityIndexBufferCopyPlugin, fullscreen_vertex_shader::FULLSCREEN_SHADER_HANDLE, fxaa::FxaaPlugin, msaa_writeback::MsaaWritebackPlugin, @@ -79,6 +81,7 @@ impl Plugin for CorePipelinePlugin { BloomPlugin, FxaaPlugin, CASPlugin, + EntityIndexBufferCopyPlugin, )); } } diff --git a/crates/bevy_pbr/src/material.rs b/crates/bevy_pbr/src/material.rs index 68361938dec167..68ed224eb74ebe 100644 --- a/crates/bevy_pbr/src/material.rs +++ b/crates/bevy_pbr/src/material.rs @@ -23,6 +23,7 @@ use bevy_reflect::{TypePath, TypeUuid}; use bevy_render::{ extract_component::ExtractComponentPlugin, mesh::{Mesh, MeshVertexBufferLayout}, + picking::{ExtractedGpuPickingCamera, GpuPickingMesh}, prelude::Image, render_asset::{prepare_assets, RenderAssets}, render_phase::{ @@ -383,7 +384,12 @@ pub fn queue_material_meshes( msaa: Res, render_meshes: Res>, render_materials: Res>, - material_meshes: Query<(&Handle, &Handle, &MeshTransforms)>, + material_meshes: Query<( + &Handle, + &Handle, + &MeshTransforms, + Option<&GpuPickingMesh>, + )>, images: Res>, mut views: Query<( &ExtractedView, @@ -393,6 +399,7 @@ pub fn queue_material_meshes( Option<&EnvironmentMapLight>, Option<&ScreenSpaceAmbientOcclusionSettings>, Option<&NormalPrepass>, + Option<&ExtractedGpuPickingCamera>, Option<&TemporalAntiAliasSettings>, &mut RenderPhase, &mut RenderPhase, @@ -409,6 +416,7 @@ pub fn queue_material_meshes( environment_map, ssao, normal_prepass, + gpu_picking_camera, taa_settings, mut opaque_phase, mut alpha_mask_phase, @@ -467,7 +475,7 @@ pub fn queue_material_meshes( let rangefinder = view.rangefinder3d(); for visible_entity in &visible_entities.entities { - if let Ok((material_handle, mesh_handle, mesh_transforms)) = + if let Ok((material_handle, mesh_handle, mesh_transforms, gpu_picking_mesh)) = material_meshes.get(*visible_entity) { if let (Some(mesh), Some(material)) = ( @@ -498,6 +506,14 @@ pub fn queue_material_meshes( _ => (), } + if gpu_picking_camera.is_some() { + // This is to indicate that the mesh pipeline needs to have the target + mesh_key |= MeshPipelineKey::MESH_ID_TEXTURE_TARGET; + if gpu_picking_mesh.is_some() { + mesh_key |= MeshPipelineKey::GPU_PICKING; + } + } + let pipeline_id = pipelines.specialize( &pipeline_cache, &material_pipeline, diff --git a/crates/bevy_pbr/src/render/mesh.rs b/crates/bevy_pbr/src/render/mesh.rs index a52c54557ed822..a004bce2e0ee50 100644 --- a/crates/bevy_pbr/src/render/mesh.rs +++ b/crates/bevy_pbr/src/render/mesh.rs @@ -28,6 +28,7 @@ use bevy_render::{ GpuBufferInfo, InnerMeshVertexBufferLayout, Mesh, MeshVertexBufferLayout, VertexAttributeDescriptor, }, + picking::{VisibleMeshEntities, MESH_ID_TEXTURE_FORMAT}, prelude::Msaa, render_asset::RenderAssets, render_phase::{PhaseItem, RenderCommand, RenderCommandResult, RenderPhase, TrackedRenderPass}, @@ -175,6 +176,7 @@ impl Plugin for MeshRenderPlugin { pub struct MeshTransforms { pub transform: Affine3, pub previous_transform: Affine3, + pub id: u32, pub flags: u32, } @@ -189,6 +191,7 @@ pub struct MeshUniform { // [2].z pub inverse_transpose_model_a: [Vec4; 2], pub inverse_transpose_model_b: f32, + pub id: u32, pub flags: u32, } @@ -236,6 +239,7 @@ impl From<&MeshTransforms> for MeshUniform { .into(), ], inverse_transpose_model_b: inverse_transpose_model_3x3.z_axis.z, + id: mesh_transforms.id, flags: mesh_transforms.flags, } } @@ -274,8 +278,10 @@ pub fn extract_meshes( let mut not_caster_commands = Vec::with_capacity(*prev_not_caster_commands_len); let visible_meshes = meshes_query.iter().filter(|(_, vis, ..)| vis.is_visible()); - for (entity, _, transform, previous_transform, handle, not_receiver, not_caster) in - visible_meshes + let mut visible_mesh_entities = vec![Entity::PLACEHOLDER]; + + for (mesh_id, (entity, _, transform, previous_transform, handle, not_receiver, not_caster)) in + visible_meshes.enumerate() { let transform = transform.affine(); let previous_transform = previous_transform.map(|t| t.0).unwrap_or(transform); @@ -291,17 +297,21 @@ pub fn extract_meshes( transform: (&transform).into(), previous_transform: (&previous_transform).into(), flags: flags.bits(), + id: mesh_id as u32 + 1, }; if not_caster.is_some() { not_caster_commands.push((entity, (handle.clone_weak(), transforms, NotShadowCaster))); } else { caster_commands.push((entity, (handle.clone_weak(), transforms))); } + + visible_mesh_entities.push(entity); } *prev_caster_commands_len = caster_commands.len(); *prev_not_caster_commands_len = not_caster_commands.len(); commands.insert_or_spawn_batch(caster_commands); commands.insert_or_spawn_batch(not_caster_commands); + commands.insert_resource(VisibleMeshEntities(Some(visible_mesh_entities))); } #[derive(Component)] @@ -745,6 +755,10 @@ bitflags::bitflags! { const DEPTH_CLAMP_ORTHO = (1 << 9); const TAA = (1 << 10); const MORPH_TARGETS = (1 << 11); + /// Indicates if the mesh should output it's entity index + const GPU_PICKING = (1 << 12); + /// Indicates if the entity index texture should be added as a target + const MESH_ID_TEXTURE_TARGET = (1 << 13); const BLEND_RESERVED_BITS = Self::BLEND_MASK_BITS << Self::BLEND_SHIFT_BITS; // ← Bitmask reserving bits for the blend state const BLEND_OPAQUE = (0 << Self::BLEND_SHIFT_BITS); // ← Values are just sequential within the mask, and can range from 0 to 3 const BLEND_PREMULTIPLIED_ALPHA = (1 << Self::BLEND_SHIFT_BITS); // @@ -1023,6 +1037,25 @@ impl SpecializedMeshPipeline for MeshPipeline { }); } + let mut targets = vec![Some(ColorTargetState { + format, + blend, + write_mask: ColorWrites::ALL, + })]; + + if key.contains(MeshPipelineKey::GPU_PICKING) { + shader_defs.push("GPU_PICKING".into()); + } + + if key.contains(MeshPipelineKey::MESH_ID_TEXTURE_TARGET) { + // we need to add the target even if the mesh isn't pickable + targets.push(Some(ColorTargetState { + format: MESH_ID_TEXTURE_FORMAT, + blend: None, + write_mask: ColorWrites::ALL, + })); + } + Ok(RenderPipelineDescriptor { vertex: VertexState { shader: MESH_SHADER_HANDLE.typed::(), @@ -1034,11 +1067,7 @@ impl SpecializedMeshPipeline for MeshPipeline { shader: MESH_SHADER_HANDLE.typed::(), shader_defs, entry_point: "fragment".into(), - targets: vec![Some(ColorTargetState { - format, - blend, - write_mask: ColorWrites::ALL, - })], + targets, }), layout: bind_group_layout, push_constant_ranges, diff --git a/crates/bevy_pbr/src/render/mesh_types.wgsl b/crates/bevy_pbr/src/render/mesh_types.wgsl index 9b503364734b70..e7e473c0b93122 100644 --- a/crates/bevy_pbr/src/render/mesh_types.wgsl +++ b/crates/bevy_pbr/src/render/mesh_types.wgsl @@ -12,6 +12,7 @@ struct Mesh { // Use bevy_pbr::mesh_functions::mat2x4_f32_to_mat3x3_unpack to unpack inverse_transpose_model_a: mat2x4, inverse_transpose_model_b: f32, + id: u32, // 'flags' is a bit field indicating various options. u32 is 32 bits so we have up to 32 options. flags: u32, }; diff --git a/crates/bevy_pbr/src/render/pbr.wgsl b/crates/bevy_pbr/src/render/pbr.wgsl index 545b40c5b3a243..3d94dce7c0bf62 100644 --- a/crates/bevy_pbr/src/render/pbr.wgsl +++ b/crates/bevy_pbr/src/render/pbr.wgsl @@ -18,13 +18,19 @@ #import bevy_pbr::gtao_utils gtao_multibounce #endif +struct FragmentOutput { + @location(0) color: vec4, +#ifdef GPU_PICKING + @location(1) mesh_id: u32, +#endif +}; + @fragment fn fragment( in: MeshVertexOutput, @builtin(front_facing) is_front: bool, -) -> @location(0) vec4 { +) -> FragmentOutput { var output_color: vec4 = pbr_bindings::material.base_color; - let is_orthographic = view.projection[3].w == 1.0; let V = pbr_functions::calculate_view(in.world_position, is_orthographic); #ifdef VERTEX_UVS @@ -165,5 +171,11 @@ fn fragment( #ifdef PREMULTIPLY_ALPHA output_color = pbr_functions::premultiply_alpha(pbr_bindings::material.flags, output_color); #endif - return output_color; + + var out: FragmentOutput; + out.color = output_color; +#ifdef GPU_PICKING + out.mesh_id = mesh[in.instance_index].id; +#endif + return out; } diff --git a/crates/bevy_render/src/lib.rs b/crates/bevy_render/src/lib.rs index 473f75fc3aba40..1f9198bef4b030 100644 --- a/crates/bevy_render/src/lib.rs +++ b/crates/bevy_render/src/lib.rs @@ -13,6 +13,7 @@ pub mod extract_resource; pub mod globals; pub mod gpu_component_array_buffer; pub mod mesh; +pub mod picking; pub mod pipelined_rendering; pub mod primitives; pub mod render_asset; diff --git a/crates/bevy_render/src/picking.rs b/crates/bevy_render/src/picking.rs new file mode 100644 index 00000000000000..ac8ba560319098 --- /dev/null +++ b/crates/bevy_render/src/picking.rs @@ -0,0 +1,438 @@ +//! Gpu picking let's you know which entity is currently being rendered under the mouse. +//! +//! # How this works: +//! +//! This happens over multiple frames +//! - Frame N: +//! - For each visible mesh, generate a mesh id. +//! - For each mesh being rendered, output it's mesh id to a texture. +//! - Once everything is rendered copy that texture to the cpu +//! - Frame N + 1: +//! - Map the mesh id buffer and send it to the main world. +//! - This step will poll the gpu if necessary, so it could block here +//! - Frame N + 2: +//! - From the main world you can give it a position like the current mouse position and +//! know exactly which entity was rendered at that specific screen location. +//! Since this takes multiple frames, the exact entity under the mouse might not be +//! the same as the one visible. +//! +//! - This works at the `Camera` level, so it will work with multiple windows or split-screen. +//! +//! # Api Overview: +//! +//! To enable the feature, you need to add the [`GpuPickingPlugin`]. You then need to add +//! the [`GpuPickingCamera`] to any `Camera` that will be used for picking. Then add the +//! [`GpuPickingMesh`] comnponent to any `Mesh` that will need to be picked. +//! +//! Once those components are added, you can query for [`GpuPickingCamera`] +//! and use `GpuPickingCamera::get_entity(position)` to know which entity is at the +//! given position on screen +//! +//! # Warning +//! +//! The mesh id generated every frame is currently encoded as a u16. This means you can +//! only have up to 65536 _visible_ entities on screen. + +use crate::{ + camera::ExtractedCamera, + extract_component::{ExtractComponent, ExtractComponentPlugin}, + render_resource::{Buffer, Texture}, + renderer::RenderDevice, + texture::{CachedTexture, TextureFormatPixelInfo}, + Render, RenderApp, RenderSet, +}; +use async_channel::{Receiver, Sender}; +use bevy_app::{Plugin, PreUpdate}; +use bevy_ecs::{prelude::*, query::QueryItem}; + +use bevy_math::UVec2; +use bevy_utils::{default, HashMap}; +use wgpu::{ + BufferDescriptor, BufferUsages, Color, CommandEncoder, Extent3d, ImageDataLayout, MapMode, + Operations, RenderPassColorAttachment, TextureFormat, +}; + +pub const MESH_ID_TEXTURE_FORMAT: TextureFormat = TextureFormat::R16Uint; + +const BUFFER_COUNT: usize = 2; + +/// This plugin enables the gpu picking feature of bevy. +pub struct GpuPickingPlugin; +impl Plugin for GpuPickingPlugin { + fn build(&self, app: &mut bevy_app::App) { + app.add_plugins(( + ExtractComponentPlugin::::default(), + ExtractComponentPlugin::::default(), + )) + // WARN It's really important for this to run in PreUpdate, + // otherwise this might introduce another frame delay to picking + .add_systems(PreUpdate, receive_buffer); + + let Ok(render_app) = app.get_sub_app_mut(RenderApp) else { + return; + }; + + render_app + .insert_resource(CurrentGpuPickingBufferIndex(0)) + .add_systems( + Render, + ( + prepare_buffers.in_set(RenderSet::PrepareResources), + send_buffer.in_set(RenderSet::RenderFlush), + increment_index.in_set(RenderSet::Cleanup), + ), + ); + } +} + +/// Gpu picking uses a double buffer technique, this index is used to know which buffer should be used this frame +#[derive(Resource)] +pub struct CurrentGpuPickingBufferIndex(usize); +fn increment_index(mut curr_index: ResMut) { + curr_index.0 = (curr_index.0 + 1) % BUFFER_COUNT; +} + +/// Marker component to indicate that a mesh should be available for gpu picking +#[derive(Component, ExtractComponent, Clone)] +pub struct GpuPickingMesh; + +/// This component is used to indicate if a camera should support gpu picking. +/// Any mesh with the [`GpuPickingMesh`] component that is visible from this camera +/// will be pickable. +#[derive(Component)] +pub struct GpuPickingCamera { + /// Used to send the required data between the main world and render world + data_channel: (Sender, Receiver), + /// The latest picking data received + data: GpuPickingData, + /// Used to determine if the buffer is mapped + /// This is only used in the render world, but this is here to avoid needlessly creating a new channel every frame + map_status_channel: (Sender<()>, Receiver<()>), +} + +impl Default for GpuPickingCamera { + fn default() -> Self { + Self::new() + } +} + +impl GpuPickingCamera { + pub fn new() -> Self { + Self { + data_channel: async_channel::bounded(1), + data: GpuPickingData::default(), + map_status_channel: async_channel::bounded(1), + } + } + + /// Get the entity at the given position. + /// If there is no entity, returns `None`. + pub fn get_entity(&self, pos: UVec2) -> Option { + // We know the position, but in order to find the true position of the bytes + // we're interested in, we have to know how wide a single row in the GPU written buffer is. + // Due to alignment requirements this may be wider than the physical camera size because + // of padding. + let pixel_size = MESH_ID_TEXTURE_FORMAT.pixel_size(); + let start = + (pos.y as usize * self.data.padded_bytes_per_row) + (pos.x as usize * pixel_size); + let end = start + pixel_size; + if end > self.data.mesh_id_buffer.len() { + return None; + } + + // TODO This is currently a constant, but could be user configurable + let texture_bytes = &self.data.mesh_id_buffer[start..end]; + let index = match MESH_ID_TEXTURE_FORMAT { + TextureFormat::R16Uint => u16::from_ne_bytes(texture_bytes.try_into().ok()?) as usize, + TextureFormat::R32Uint => u32::from_ne_bytes(texture_bytes.try_into().ok()?) as usize, + _ => panic!("Unsupported mesh id texture format"), + }; + let entity = self.data.visible_mesh_entities[index]; + + if entity != Entity::PLACEHOLDER { + Some(entity) + } else { + None + } + } +} + +impl ExtractComponent for GpuPickingCamera { + type Query = &'static Self; + type Filter = (); + type Out = ExtractedGpuPickingCamera; + fn extract_component(picking_camera: QueryItem<'_, Self::Query>) -> Option { + let (sender, _) = picking_camera.data_channel.clone(); + Some(ExtractedGpuPickingCamera { + buffers: None, + sender, + map_status_channel: picking_camera.map_status_channel.clone(), + }) + } +} + +/// Data needed in the render world to manage the entity buffer +#[derive(Component)] +pub struct ExtractedGpuPickingCamera { + buffers: Option, + sender: Sender, + map_status_channel: (Sender<()>, Receiver<()>), +} + +impl ExtractedGpuPickingCamera { + /// Runs all the operation for the node + /// This needs to be here because it needs a dependency on wgpu and `bevy_core_pipeline` doens't have it. + pub fn run_node( + &self, + encoder: &mut CommandEncoder, + texture: &Texture, + current_buffer_index: &CurrentGpuPickingBufferIndex, + ) { + let Some(buffers) = self.buffers.as_ref() else { + return; + }; + + // Copy current frame to next buffer + let copy_index = (current_buffer_index.0 + 1) % BUFFER_COUNT; + buffers.copy_texture_to_buffer(encoder, texture, copy_index); + + // Map current buffer that will be copied and sent after the graph has finished + let map_index = current_buffer_index.0; + let buffer_slice = buffers.entity_buffers[map_index].slice(..); + let (tx, _) = self.map_status_channel.clone(); + buffer_slice.map_async(MapMode::Read, move |result| match result { + Ok(_) => tx.try_send(()).unwrap(), + Err(err) => panic!("Failed to map entity buffer {map_index}: {err}"), + }); + } +} + +/// Data sent between the render world and main world +#[derive(Default)] +struct GpuPickingData { + /// Padding required to compute the entity with the exact position in the buffer + padded_bytes_per_row: usize, + /// Buffer representing the entity texture + mesh_id_buffer: Vec, + /// A list of the visible entities during the frame the buffer was generated + /// The buffer contains an index into this list + visible_mesh_entities: Vec, +} + +/// Contains the buffers and their dimensions required for gpu picking +#[derive(Clone)] +struct GpuPickingCameraBuffers { + entity_buffers: [Buffer; BUFFER_COUNT], + // All buffers have the same dimension so we only need one + buffer_dimensions: BufferDimensions, +} + +impl GpuPickingCameraBuffers { + /// Copies the given texture to the buffer at the given index + fn copy_texture_to_buffer( + &self, + encoder: &mut CommandEncoder, + texture: &Texture, + buffer_index: usize, + ) { + encoder.copy_texture_to_buffer( + texture.as_image_copy(), + wgpu::ImageCopyBuffer { + buffer: &self.entity_buffers[buffer_index], + layout: ImageDataLayout { + offset: 0, + bytes_per_row: Some(self.buffer_dimensions.padded_bytes_per_row as u32), + rows_per_image: None, + }, + }, + Extent3d { + width: self.buffer_dimensions.width as u32, + height: self.buffer_dimensions.height as u32, + ..default() + }, + ); + } +} + +/// This is created every frame and contains a list of the currently visible entities +#[derive(Resource)] +pub struct VisibleMeshEntities(pub Option>); + +/// Sends the mesh id buffer to the main world +fn send_buffer( + query: Query<&ExtractedGpuPickingCamera>, + render_device: Res, + mut visible_mesh_entities: ResMut, + current_buffer_index: Res, +) { + let Some(visible_mesh_entities) = visible_mesh_entities.0.take() else { + return; + }; + + for gpu_picking_camera in &query { + let Some(buffers) = gpu_picking_camera.buffers.as_ref() else { + return; + }; + + // We need to make sure the map_async has completed before reading it + let (_, rx) = gpu_picking_camera.map_status_channel.clone(); + if rx.try_recv().is_err() { + // Sometimes the map isn't done at this point so we need to poll the gpu + // This will block until the map is done + render_device.poll(wgpu::MaintainBase::Wait); + // This is to empty the channel before we continue + rx.try_recv().expect("map_async should have been completed"); + } + + let send_index = current_buffer_index.0; + let buffer_slice = buffers.entity_buffers[send_index].slice(..); + let buffer_view = buffer_slice.get_mapped_range(); + let mesh_id_buffer = buffer_view.to_vec(); + // We have to make sure all mapped views are dropped before we unmap the buffer. + drop(buffer_view); + // We need to unmap the buffer because it will be used in the next frame and can't be mapped at that point + buffers.entity_buffers[send_index].unmap(); + + // Send the data to the main world + if let Err(err) = gpu_picking_camera.sender.try_send(GpuPickingData { + padded_bytes_per_row: buffers.buffer_dimensions.padded_bytes_per_row, + mesh_id_buffer, + visible_mesh_entities: visible_mesh_entities.clone(), + }) { + bevy_log::error!("Failed to send entity buffer: {err}"); + } + } +} + +/// Receives the mesh id buffer from the render world +fn receive_buffer(mut cameras: Query<&mut GpuPickingCamera>) { + for mut cam in &mut cameras { + let (_, receiver) = cam.data_channel.clone(); + let Ok(data) = receiver.try_recv() else { + continue; + }; + cam.data = data; + } +} + +/// The textures used to draw the entity for each rendered mesh +#[derive(Component, Clone)] +pub struct VisibleMeshIdTextures { + pub main: CachedTexture, + pub sampled: Option, +} + +impl VisibleMeshIdTextures { + /// This is the color that will represent "no entity" in the mesh id buffer + pub fn clear_color() -> wgpu::Color { + Color { + r: 0.0, + g: 0.0, + b: 0.0, + a: 0.0, + } + } + + /// Retrieve this target's color attachment. This will use [`Self::sampled`] and resolve to [`Self::main`] if + /// the target has sampling enabled. Otherwise it will use [`Self::main`] directly. + pub fn get_color_attachment(&self, ops: Operations) -> RenderPassColorAttachment { + match &self.sampled { + Some(sampled_texture) => RenderPassColorAttachment { + view: &sampled_texture.default_view, + resolve_target: Some(&self.main.default_view), + ops, + }, + None => RenderPassColorAttachment { + view: &self.main.default_view, + resolve_target: None, + ops, + }, + } + } +} + +/// This creates the required buffers for each camera +fn prepare_buffers( + render_device: Res, + mut cameras: Query< + (Entity, &ExtractedCamera, &mut ExtractedGpuPickingCamera), + Changed, + >, + mut buffer_cache: Local>, +) { + for (entity, camera, mut gpu_picking_camera) in &mut cameras { + let Some(size) = camera.physical_target_size else { + continue; + }; + + // We only want to create a buffer when there's no buffers in the cache + // or when the dimensions don't match + let mut create_buffer = true; + if let Some((buffer_dimensions, _)) = buffer_cache.get(&entity) { + // We could potentially account for padding and only re-create buffers + // when the full size of the buffer doesn't match + create_buffer = buffer_dimensions.width != size.x as usize + || buffer_dimensions.height != size.y as usize; + } + + if create_buffer { + let buffer_dimensions = + BufferDimensions::new(size.x as usize, size.y as usize, MESH_ID_TEXTURE_FORMAT); + let desc = BufferDescriptor { + label: None, + size: buffer_dimensions.size() as u64, + usage: BufferUsages::COPY_DST | BufferUsages::MAP_READ, + mapped_at_creation: false, + }; + let entity_buffers = [ + render_device.create_buffer(&BufferDescriptor { + label: Some("Entity Buffer 0"), + ..desc + }), + render_device.create_buffer(&BufferDescriptor { + label: Some("Entity Buffer 1"), + ..desc + }), + ]; + buffer_cache.insert(entity, (buffer_dimensions, entity_buffers)); + } + + let (buffer_dimensions, buffers) = buffer_cache + .get(&entity) + .expect("Buffers should have been created already"); + gpu_picking_camera.buffers = Some(GpuPickingCameraBuffers { + entity_buffers: buffers.clone(), + buffer_dimensions: *buffer_dimensions, + }); + } +} + +/// Used to represent the size of a [`Buffer`] and the padding required for each row. +/// We need to know the padding because the rows need to be 256 bit aligned. +/// +/// Copied from +#[derive(Clone, Copy)] +pub struct BufferDimensions { + width: usize, + height: usize, + padded_bytes_per_row: usize, +} + +impl BufferDimensions { + fn new(width: usize, height: usize, texture_format: TextureFormat) -> Self { + let bytes_per_pixel = texture_format.pixel_size(); + let unpadded_bytes_per_row = width * bytes_per_pixel; + let align = wgpu::COPY_BYTES_PER_ROW_ALIGNMENT as usize; + let padded_bytes_per_row_padding = (align - unpadded_bytes_per_row % align) % align; + let padded_bytes_per_row = unpadded_bytes_per_row + padded_bytes_per_row_padding; + Self { + width, + height, + padded_bytes_per_row, + } + } + + fn size(&self) -> usize { + self.padded_bytes_per_row * self.height + } +} diff --git a/examples/README.md b/examples/README.md index 6f53c34f7048ce..79b1c9de8e1e49 100644 --- a/examples/README.md +++ b/examples/README.md @@ -244,6 +244,7 @@ Example | Description Example | Description --- | --- [Char Input Events](../examples/input/char_input_events.rs) | Prints out all chars as they are inputted +[GPU picking](../examples/input/gpu_picking.rs) | Mouse picking using the gpu [Gamepad Input](../examples/input/gamepad_input.rs) | Shows handling of gamepad input, connections, and disconnections [Gamepad Input Events](../examples/input/gamepad_input_events.rs) | Iterates and prints gamepad input and connection events [Gamepad Rumble](../examples/input/gamepad_rumble.rs) | Shows how to rumble a gamepad using force feedback diff --git a/examples/input/gpu_picking.rs b/examples/input/gpu_picking.rs new file mode 100644 index 00000000000000..e2cd915a045a1e --- /dev/null +++ b/examples/input/gpu_picking.rs @@ -0,0 +1,212 @@ +//! This example shows how to use the gpu picking api. +//! +//! Gpu picking is a way to generate a texture of all the rendered entities and +//! use this texture to determine exactly which entity is under the mouse. + +use bevy::prelude::*; +use bevy_internal::{ + reflect::{TypePath, TypeUuid}, + render::{ + picking::{GpuPickingCamera, GpuPickingMesh, GpuPickingPlugin}, + render_resource::{AsBindGroup, ShaderRef}, + }, + window::PresentMode, +}; + +fn main() { + App::new() + .add_plugins(( + DefaultPlugins.set(WindowPlugin { + primary_window: Some(Window { + present_mode: PresentMode::AutoNoVsync, + ..default() + }), + ..default() + }), + MaterialPlugin::::default(), + // Add the plugin + GpuPickingPlugin, + )) + .add_systems(Startup, setup) + .add_systems(Update, (mouse_picking, move_cube)) + .run(); +} + +fn setup( + mut commands: Commands, + mut meshes: ResMut>, + mut materials: ResMut>, + mut custom_materials: ResMut>, + asset_server: Res, +) { + // opaque cube + commands.spawn(( + PbrBundle { + mesh: meshes.add(Mesh::from(shape::Cube { size: 1.0 })), + material: materials.add(Color::rgb(0.8, 0.7, 0.6).into()), + transform: Transform::from_xyz(0.0, 0.5, 0.0), + ..default() + }, + // Add this component to any mesh that you want to be able to pick + GpuPickingMesh, + )); + + // alpha mask cube + commands.spawn(( + PbrBundle { + mesh: meshes.add(Mesh::from(shape::Cube { size: 1.0 })), + material: materials.add(StandardMaterial { + alpha_mode: AlphaMode::Mask(1.0), + base_color_texture: Some(asset_server.load("branding/icon.png")), + ..default() + }), + transform: Transform::from_xyz(1.0, 0.5, 0.0), + ..default() + }, + GpuPickingMesh, + )); + + // transparent cube + commands.spawn(( + PbrBundle { + mesh: meshes.add(Mesh::from(shape::Cube { size: 1.0 })), + material: materials.add(Color::rgba(0.8, 0.7, 0.6, 0.5).into()), + transform: Transform::from_xyz(-1.0, 0.5, 0.0), + ..default() + }, + GpuPickingMesh, + )); + + // cube with custom material + commands.spawn(( + MaterialMeshBundle { + mesh: meshes.add(Mesh::from(shape::Cube { size: 1.0 })), + transform: Transform::from_xyz(2.0, 0.5, 0.0), + material: custom_materials.add(GpuPickingMaterial { + color: Color::GREEN, + }), + ..default() + }, + GpuPickingMesh, + )); + + // This cube will move from left to right. It shows that picking works correctly when things are moving. + commands.spawn(( + PbrBundle { + mesh: meshes.add(Mesh::from(shape::Cube { size: 1.0 })), + material: materials.add(Color::rgb(0.8, 0.7, 0.6).into()), + transform: Transform::from_xyz(0.0, 0.5, 1.0), + ..default() + }, + GpuPickingMesh, + MoveCube, + )); + + // plane + commands.spawn(PbrBundle { + mesh: meshes.add(shape::Plane::from_size(5.0).into()), + material: materials.add(Color::rgb(0.3, 0.5, 0.3).into()), + ..default() + }); + // light + commands.spawn(PointLightBundle { + point_light: PointLight { + intensity: 1500.0, + shadows_enabled: true, + ..default() + }, + transform: Transform::from_xyz(4.0, 8.0, 4.0), + ..default() + }); + // camera + commands.spawn(( + Camera3dBundle { + transform: Transform::from_xyz(-2.0, 2.5, 5.0).looking_at(Vec3::ZERO, Vec3::Y), + ..default() + }, + GpuPickingCamera::default(), + )); +} + +fn mouse_picking( + mut cursor_moved: EventReader, + gpu_picking_cameras: Query<&GpuPickingCamera>, + material_handle: Query<( + Option<&Handle>, + Option<&Handle>, + )>, + mut materials: ResMut>, + mut custom_materials: ResMut>, + mut hovered: Local>, +) { + // Sets the color of the given entity + let mut set_color = |entity, color: Color| { + let (std_handle, custom_handle) = material_handle.get(entity).expect("Entity should exist"); + if let Some(material) = std_handle.and_then(|h| materials.get_mut(h)) { + let a = material.base_color.a(); + material.base_color = color.with_a(a); + }; + if let Some(material) = custom_handle.and_then(|h| custom_materials.get_mut(h)) { + let a = material.color.a(); + material.color = color.with_a(a); + }; + }; + + let Some(moved_event) = cursor_moved.iter().last() else { return; }; + let mouse_position = moved_event.position.as_uvec2(); + + for gpu_picking_camera in &gpu_picking_cameras { + // This will read the entity texture and get the entity that is at the given position + if let Some(entity) = gpu_picking_camera.get_entity(mouse_position) { + if let Some(hovered) = *hovered { + if entity != hovered { + set_color(hovered, Color::BLUE); + } + } + set_color(entity, Color::RED); + *hovered = Some(entity); + } else { + if let Some(hovered) = *hovered { + set_color(hovered, Color::BLUE); + } + *hovered = None; + } + } +} + +// You can also use a custom material with it, you just need to make sure it correctly outputs the entity id +// See assets/shaders/gpu_picking_material.wgsl for more information +#[derive(AsBindGroup, TypeUuid, TypePath, Debug, Clone)] +#[uuid = "fb9ea5e0-316d-4992-852b-aa1faa2a5a0d"] +pub struct GpuPickingMaterial { + #[uniform(0)] + color: Color, +} + +impl Material for GpuPickingMaterial { + fn fragment_shader() -> ShaderRef { + "shaders/gpu_picking_material.wgsl".into() + } +} + +#[derive(Component)] +struct MoveCube; + +// Moves a mesh from left to right +// Used to show that picking works even if things are moving +fn move_cube( + mut q: Query<&mut Transform, With>, + time: Res