From 5e54305d9336379b02255e2a916ac890a9fbf2cb Mon Sep 17 00:00:00 2001 From: Rua Date: Sat, 22 Oct 2022 11:24:34 +0200 Subject: [PATCH 1/2] Near-full support for `VK_KHR_synchronization2` --- vulkano/src/command_buffer/commands/image.rs | 12 +- vulkano/src/command_buffer/commands/query.rs | 125 +- vulkano/src/command_buffer/commands/sync.rs | 720 +++++++++-- .../src/command_buffer/commands/transfer.rs | 20 +- vulkano/src/command_buffer/synced/builder.rs | 32 +- vulkano/src/macros.rs | 18 + vulkano/src/render_pass/create.rs | 299 +++-- vulkano/src/render_pass/macros.rs | 20 +- vulkano/src/render_pass/mod.rs | 68 +- vulkano/src/sync/pipeline.rs | 1121 +++++++++++++---- 10 files changed, 1928 insertions(+), 507 deletions(-) diff --git a/vulkano/src/command_buffer/commands/image.rs b/vulkano/src/command_buffer/commands/image.rs index 56a7db8592..67b394aecf 100644 --- a/vulkano/src/command_buffer/commands/image.rs +++ b/vulkano/src/command_buffer/commands/image.rs @@ -1217,7 +1217,7 @@ impl SyncCommandBufferBuilder { subresource_range: src_subresource.clone().into(), memory: PipelineMemoryAccess { stages: PipelineStages { - transfer: true, + all_transfer: true, ..PipelineStages::empty() }, access: AccessFlags { @@ -1237,7 +1237,7 @@ impl SyncCommandBufferBuilder { subresource_range: dst_subresource.clone().into(), memory: PipelineMemoryAccess { stages: PipelineStages { - transfer: true, + all_transfer: true, ..PipelineStages::empty() }, access: AccessFlags { @@ -1309,7 +1309,7 @@ impl SyncCommandBufferBuilder { subresource_range, memory: PipelineMemoryAccess { stages: PipelineStages { - transfer: true, + all_transfer: true, ..PipelineStages::empty() }, access: AccessFlags { @@ -1380,7 +1380,7 @@ impl SyncCommandBufferBuilder { subresource_range, memory: PipelineMemoryAccess { stages: PipelineStages { - transfer: true, + all_transfer: true, ..PipelineStages::empty() }, access: AccessFlags { @@ -1461,7 +1461,7 @@ impl SyncCommandBufferBuilder { subresource_range: src_subresource.clone().into(), memory: PipelineMemoryAccess { stages: PipelineStages { - transfer: true, + all_transfer: true, ..PipelineStages::empty() }, access: AccessFlags { @@ -1481,7 +1481,7 @@ impl SyncCommandBufferBuilder { subresource_range: dst_subresource.clone().into(), memory: PipelineMemoryAccess { stages: PipelineStages { - transfer: true, + all_transfer: true, ..PipelineStages::empty() }, access: AccessFlags { diff --git a/vulkano/src/command_buffer/commands/query.rs b/vulkano/src/command_buffer/commands/query.rs index 8a08d54ed8..781349d74c 100644 --- a/vulkano/src/command_buffer/commands/query.rs +++ b/vulkano/src/command_buffer/commands/query.rs @@ -22,7 +22,7 @@ use crate::{ QueryType, }, sync::{AccessFlags, PipelineMemoryAccess, PipelineStage, PipelineStages}, - DeviceSize, RequirementNotMet, RequiresOneOf, VulkanObject, + DeviceSize, RequirementNotMet, RequiresOneOf, Version, VulkanObject, }; use std::{ error::Error, @@ -239,6 +239,21 @@ where query: u32, stage: PipelineStage, ) -> Result<(), QueryError> { + let device = self.device(); + + if !device.enabled_features().synchronization2 && PipelineStages::from(stage).is_2() { + return Err(QueryError::RequirementNotMet { + required_for: "`stage` has bits set from `VkPipelineStageFlagBits2`", + requires_one_of: RequiresOneOf { + features: &["synchronization2"], + ..Default::default() + }, + }); + } + + // VUID-vkCmdWriteTimestamp-pipelineStage-parameter + stage.validate_device(device)?; + let queue_family_properties = self.queue_family_properties(); // VUID-vkCmdWriteTimestamp-commandBuffer-cmdpool @@ -285,6 +300,80 @@ where }); } } + PipelineStage::ConditionalRendering => { + // VUID-vkCmdWriteTimestamp-pipelineStage-04077 + if !device.enabled_features().conditional_rendering { + return Err(QueryError::RequirementNotMet { + required_for: "`stage` is `PipelineStage::ConditionalRendering`", + requires_one_of: RequiresOneOf { + features: &["conditional_rendering"], + ..Default::default() + }, + }); + } + } + PipelineStage::FragmentDensityProcess => { + // VUID-vkCmdWriteTimestamp-pipelineStage-04078 + if !device.enabled_features().fragment_density_map { + return Err(QueryError::RequirementNotMet { + required_for: "`stage` is `PipelineStage::FragmentDensityProcess`", + requires_one_of: RequiresOneOf { + features: &["fragment_density_map"], + ..Default::default() + }, + }); + } + } + PipelineStage::TransformFeedback => { + // VUID-vkCmdWriteTimestamp-pipelineStage-04079 + if !device.enabled_features().transform_feedback { + return Err(QueryError::RequirementNotMet { + required_for: "`stage` is `PipelineStage::TransformFeedback`", + requires_one_of: RequiresOneOf { + features: &["transform_feedback"], + ..Default::default() + }, + }); + } + } + PipelineStage::MeshShader => { + // VUID-vkCmdWriteTimestamp-pipelineStage-04080 + if !device.enabled_features().mesh_shader { + return Err(QueryError::RequirementNotMet { + required_for: "`stage` is `PipelineStage::MeshShader`", + requires_one_of: RequiresOneOf { + features: &["mesh_shader"], + ..Default::default() + }, + }); + } + } + PipelineStage::TaskShader => { + // VUID-vkCmdWriteTimestamp-pipelineStage-07077 + if !device.enabled_features().task_shader { + return Err(QueryError::RequirementNotMet { + required_for: "`stage` is `PipelineStage::TaskShader`", + requires_one_of: RequiresOneOf { + features: &["task_shader"], + ..Default::default() + }, + }); + } + } + PipelineStage::FragmentShadingRateAttachment => { + // VUID-vkCmdWriteTimestamp-pipelineStage-07314 + if !(device.enabled_features().attachment_fragment_shading_rate + || device.enabled_features().shading_rate_image) + { + return Err(QueryError::RequirementNotMet { + required_for: "`stage` is `PipelineStage::FragmentShadingRateAttachment`", + requires_one_of: RequiresOneOf { + features: &["attachment_fragment_shading_rate", "shading_rate_image"], + ..Default::default() + }, + }); + } + } _ => (), } @@ -620,7 +709,7 @@ impl SyncCommandBufferBuilder { range: 0..destination.size(), // TODO: memory: PipelineMemoryAccess { stages: PipelineStages { - transfer: true, + all_transfer: true, ..PipelineStages::empty() }, access: AccessFlags { @@ -700,12 +789,32 @@ impl UnsafeCommandBufferBuilder { #[inline] pub unsafe fn write_timestamp(&mut self, query: Query<'_>, stage: PipelineStage) { let fns = self.device.fns(); - (fns.v1_0.cmd_write_timestamp)( - self.handle, - stage.into(), - query.pool().handle(), - query.index(), - ); + + if self.device.enabled_features().synchronization2 { + if self.device.api_version() >= Version::V1_3 { + (fns.v1_3.cmd_write_timestamp2)( + self.handle, + stage.into(), + query.pool().handle(), + query.index(), + ); + } else { + debug_assert!(self.device.enabled_extensions().khr_synchronization2); + (fns.khr_synchronization2.cmd_write_timestamp2_khr)( + self.handle, + stage.into(), + query.pool().handle(), + query.index(), + ); + } + } else { + (fns.v1_0.cmd_write_timestamp)( + self.handle, + stage.into(), + query.pool().handle(), + query.index(), + ); + } } /// Calls `vkCmdCopyQueryPoolResults` on the builder. diff --git a/vulkano/src/command_buffer/commands/sync.rs b/vulkano/src/command_buffer/commands/sync.rs index f718380dce..b39113f930 100644 --- a/vulkano/src/command_buffer/commands/sync.rs +++ b/vulkano/src/command_buffer/commands/sync.rs @@ -20,15 +20,15 @@ use crate::{ Version, VulkanObject, }; use smallvec::SmallVec; -use std::sync::Arc; +use std::{ptr, sync::Arc}; impl SyncCommandBufferBuilder { /// Calls `vkCmdSetEvent` on the builder. #[inline] - pub unsafe fn set_event(&mut self, event: Arc, stages: PipelineStages) { + pub unsafe fn set_event(&mut self, event: Arc, dependency_info: DependencyInfo) { struct Cmd { event: Arc, - stages: PipelineStages, + dependency_info: DependencyInfo, } impl Command for Cmd { @@ -37,11 +37,43 @@ impl SyncCommandBufferBuilder { } unsafe fn send(&self, out: &mut UnsafeCommandBufferBuilder) { - out.set_event(&self.event, self.stages); + out.set_event(&self.event, &self.dependency_info); } } - self.commands.push(Box::new(Cmd { event, stages })); + self.commands.push(Box::new(Cmd { + event, + dependency_info, + })); + } + + /// Calls `vkCmdWaitEvents` on the builder. + #[inline] + pub unsafe fn wait_events( + &mut self, + events: impl IntoIterator, DependencyInfo)>, + ) { + struct Cmd { + events: SmallVec<[(Arc, DependencyInfo); 4]>, + } + + impl Command for Cmd { + fn name(&self) -> &'static str { + "wait_events" + } + + unsafe fn send(&self, out: &mut UnsafeCommandBufferBuilder) { + out.wait_events( + self.events + .iter() + .map(|&(ref event, ref dependency_info)| (event.as_ref(), dependency_info)), + ); + } + } + + self.commands.push(Box::new(Cmd { + events: events.into_iter().collect(), + })); } /// Calls `vkCmdResetEvent` on the builder. @@ -83,58 +115,54 @@ impl UnsafeCommandBufferBuilder { let dependency_flags = ash::vk::DependencyFlags::BY_REGION; if self.device.enabled_features().synchronization2 { - let memory_barriers: SmallVec<[_; 2]> = memory_barriers + let memory_barriers_vk: SmallVec<[_; 2]> = memory_barriers .into_iter() .map(|barrier| { let &MemoryBarrier { - source_stages, - source_access, - destination_stages, - destination_access, + src_stages, + src_access, + dst_stages, + dst_access, _ne: _, } = barrier; - debug_assert!(source_stages.supported_access().contains(&source_access)); - debug_assert!(destination_stages - .supported_access() - .contains(&destination_access)); + debug_assert!(src_stages.supported_access().contains(&src_access)); + debug_assert!(dst_stages.supported_access().contains(&dst_access)); ash::vk::MemoryBarrier2 { - src_stage_mask: source_stages.into(), - src_access_mask: source_access.into(), - dst_stage_mask: destination_stages.into(), - dst_access_mask: destination_access.into(), + src_stage_mask: src_stages.into(), + src_access_mask: src_access.into(), + dst_stage_mask: dst_stages.into(), + dst_access_mask: dst_access.into(), ..Default::default() } }) .collect(); - let buffer_memory_barriers: SmallVec<[_; 8]> = buffer_memory_barriers + let buffer_memory_barriers_vk: SmallVec<[_; 8]> = buffer_memory_barriers .into_iter() .map(|barrier| { let &BufferMemoryBarrier { - source_stages, - source_access, - destination_stages, - destination_access, + src_stages, + src_access, + dst_stages, + dst_access, queue_family_transfer, ref buffer, ref range, _ne: _, } = barrier; - debug_assert!(source_stages.supported_access().contains(&source_access)); - debug_assert!(destination_stages - .supported_access() - .contains(&destination_access)); + debug_assert!(src_stages.supported_access().contains(&src_access)); + debug_assert!(dst_stages.supported_access().contains(&dst_access)); debug_assert!(!range.is_empty()); debug_assert!(range.end <= buffer.size()); ash::vk::BufferMemoryBarrier2 { - src_stage_mask: source_stages.into(), - src_access_mask: source_access.into(), - dst_stage_mask: destination_stages.into(), - dst_access_mask: destination_access.into(), + src_stage_mask: src_stages.into(), + src_access_mask: src_access.into(), + dst_stage_mask: dst_stages.into(), + dst_access_mask: dst_access.into(), src_queue_family_index: queue_family_transfer .map_or(ash::vk::QUEUE_FAMILY_IGNORED, |transfer| { transfer.source_index @@ -151,14 +179,14 @@ impl UnsafeCommandBufferBuilder { }) .collect(); - let image_memory_barriers: SmallVec<[_; 8]> = image_memory_barriers + let image_memory_barriers_vk: SmallVec<[_; 8]> = image_memory_barriers .into_iter() .map(|barrier| { let &ImageMemoryBarrier { - source_stages, - source_access, - destination_stages, - destination_access, + src_stages, + src_access, + dst_stages, + dst_access, old_layout, new_layout, queue_family_transfer, @@ -167,10 +195,8 @@ impl UnsafeCommandBufferBuilder { _ne: _, } = barrier; - debug_assert!(source_stages.supported_access().contains(&source_access)); - debug_assert!(destination_stages - .supported_access() - .contains(&destination_access)); + debug_assert!(src_stages.supported_access().contains(&src_access)); + debug_assert!(dst_stages.supported_access().contains(&dst_access)); debug_assert!(!matches!( new_layout, ImageLayout::Undefined | ImageLayout::Preinitialized @@ -188,10 +214,10 @@ impl UnsafeCommandBufferBuilder { ); ash::vk::ImageMemoryBarrier2 { - src_stage_mask: source_stages.into(), - src_access_mask: source_access.into(), - dst_stage_mask: destination_stages.into(), - dst_access_mask: destination_access.into(), + src_stage_mask: src_stages.into(), + src_access_mask: src_access.into(), + dst_stage_mask: dst_stages.into(), + dst_access_mask: dst_access.into(), old_layout: old_layout.into(), new_layout: new_layout.into(), src_queue_family_index: queue_family_transfer @@ -209,82 +235,82 @@ impl UnsafeCommandBufferBuilder { }) .collect(); - let dependency_info = ash::vk::DependencyInfo { + let dependency_info_vk = ash::vk::DependencyInfo { dependency_flags, - memory_barrier_count: memory_barriers.len() as u32, - p_memory_barriers: memory_barriers.as_ptr(), - buffer_memory_barrier_count: buffer_memory_barriers.len() as u32, - p_buffer_memory_barriers: buffer_memory_barriers.as_ptr(), - image_memory_barrier_count: image_memory_barriers.len() as u32, - p_image_memory_barriers: image_memory_barriers.as_ptr(), + memory_barrier_count: memory_barriers_vk.len() as u32, + p_memory_barriers: memory_barriers_vk.as_ptr(), + buffer_memory_barrier_count: buffer_memory_barriers_vk.len() as u32, + p_buffer_memory_barriers: buffer_memory_barriers_vk.as_ptr(), + image_memory_barrier_count: image_memory_barriers_vk.len() as u32, + p_image_memory_barriers: image_memory_barriers_vk.as_ptr(), ..Default::default() }; let fns = self.device.fns(); if self.device.api_version() >= Version::V1_3 { - (fns.v1_3.cmd_pipeline_barrier2)(self.handle, &dependency_info); + (fns.v1_3.cmd_pipeline_barrier2)(self.handle, &dependency_info_vk); } else { - (fns.khr_synchronization2.cmd_pipeline_barrier2_khr)(self.handle, &dependency_info); + debug_assert!(self.device.enabled_extensions().khr_synchronization2); + (fns.khr_synchronization2.cmd_pipeline_barrier2_khr)( + self.handle, + &dependency_info_vk, + ); } } else { let mut src_stage_mask = ash::vk::PipelineStageFlags::empty(); let mut dst_stage_mask = ash::vk::PipelineStageFlags::empty(); - let memory_barriers: SmallVec<[_; 2]> = memory_barriers + let memory_barriers_vk: SmallVec<[_; 2]> = memory_barriers .into_iter() .map(|barrier| { let &MemoryBarrier { - source_stages, - source_access, - destination_stages, - destination_access, + src_stages, + src_access, + dst_stages, + dst_access, _ne: _, } = barrier; - debug_assert!(source_stages.supported_access().contains(&source_access)); - debug_assert!(destination_stages - .supported_access() - .contains(&destination_access)); + debug_assert!(src_stages.supported_access().contains(&src_access)); + debug_assert!(dst_stages.supported_access().contains(&dst_access)); - src_stage_mask |= source_stages.into(); - dst_stage_mask |= destination_stages.into(); + src_stage_mask |= src_stages.into(); + dst_stage_mask |= dst_stages.into(); ash::vk::MemoryBarrier { - src_access_mask: source_access.into(), - dst_access_mask: destination_access.into(), + src_access_mask: src_access.into(), + dst_access_mask: dst_access.into(), ..Default::default() } }) .collect(); - let buffer_memory_barriers: SmallVec<[_; 8]> = buffer_memory_barriers + let buffer_memory_barriers_vk: SmallVec<[_; 8]> = buffer_memory_barriers .into_iter() .map(|barrier| { let &BufferMemoryBarrier { - source_stages, - source_access, - destination_stages, - destination_access, + src_stages, + src_access, + dst_stages, + dst_access, queue_family_transfer, ref buffer, ref range, _ne: _, } = barrier; - debug_assert!(source_stages.supported_access().contains(&source_access)); - debug_assert!(destination_stages - .supported_access() - .contains(&destination_access)); + debug_assert!(src_stages.supported_access().contains(&src_access)); + debug_assert!(dst_stages.supported_access().contains(&dst_access)); debug_assert!(!range.is_empty()); debug_assert!(range.end <= buffer.size()); - src_stage_mask |= source_stages.into(); - dst_stage_mask |= destination_stages.into(); + src_stage_mask |= src_stages.into(); + dst_stage_mask |= dst_stages.into(); ash::vk::BufferMemoryBarrier { - src_access_mask: source_access.into(), - dst_access_mask: destination_access.into(), + src_access_mask: src_access.into(), + dst_access_mask: dst_access.into(), src_queue_family_index: queue_family_transfer .map_or(ash::vk::QUEUE_FAMILY_IGNORED, |transfer| { transfer.source_index @@ -301,14 +327,14 @@ impl UnsafeCommandBufferBuilder { }) .collect(); - let image_memory_barriers: SmallVec<[_; 8]> = image_memory_barriers + let image_memory_barriers_vk: SmallVec<[_; 8]> = image_memory_barriers .into_iter() .map(|barrier| { let &ImageMemoryBarrier { - source_stages, - source_access, - destination_stages, - destination_access, + src_stages, + src_access, + dst_stages, + dst_access, old_layout, new_layout, queue_family_transfer, @@ -317,10 +343,8 @@ impl UnsafeCommandBufferBuilder { _ne: _, } = barrier; - debug_assert!(source_stages.supported_access().contains(&source_access)); - debug_assert!(destination_stages - .supported_access() - .contains(&destination_access)); + debug_assert!(src_stages.supported_access().contains(&src_access)); + debug_assert!(dst_stages.supported_access().contains(&dst_access)); debug_assert!(!matches!( new_layout, ImageLayout::Undefined | ImageLayout::Preinitialized @@ -337,12 +361,12 @@ impl UnsafeCommandBufferBuilder { subresource_range.array_layers.end <= image.dimensions().array_layers() ); - src_stage_mask |= source_stages.into(); - dst_stage_mask |= destination_stages.into(); + src_stage_mask |= src_stages.into(); + dst_stage_mask |= dst_stages.into(); ash::vk::ImageMemoryBarrier { - src_access_mask: source_access.into(), - dst_access_mask: destination_access.into(), + src_access_mask: src_access.into(), + dst_access_mask: dst_access.into(), old_layout: old_layout.into(), new_layout: new_layout.into(), src_queue_family_index: queue_family_transfer @@ -378,33 +402,519 @@ impl UnsafeCommandBufferBuilder { src_stage_mask, dst_stage_mask, dependency_flags, - memory_barriers.len() as u32, - memory_barriers.as_ptr(), - buffer_memory_barriers.len() as u32, - buffer_memory_barriers.as_ptr(), - image_memory_barriers.len() as u32, - image_memory_barriers.as_ptr(), + memory_barriers_vk.len() as u32, + memory_barriers_vk.as_ptr(), + buffer_memory_barriers_vk.len() as u32, + buffer_memory_barriers_vk.as_ptr(), + image_memory_barriers_vk.len() as u32, + image_memory_barriers_vk.as_ptr(), ); } } /// Calls `vkCmdSetEvent` on the builder. #[inline] - pub unsafe fn set_event(&mut self, event: &Event, stages: PipelineStages) { - debug_assert!(!stages.host); - debug_assert_ne!(stages, PipelineStages::empty()); + pub unsafe fn set_event(&mut self, event: &Event, dependency_info: &DependencyInfo) { + let DependencyInfo { + memory_barriers, + buffer_memory_barriers, + image_memory_barriers, + _ne: _, + } = dependency_info; + + let dependency_flags = ash::vk::DependencyFlags::BY_REGION; + let fns = self.device.fns(); - (fns.v1_0.cmd_set_event)(self.handle, event.handle(), stages.into()); + + if self.device.enabled_features().synchronization2 { + let memory_barriers_vk: SmallVec<[_; 2]> = memory_barriers + .into_iter() + .map(|barrier| { + let &MemoryBarrier { + src_stages, + src_access, + dst_stages, + dst_access, + _ne: _, + } = barrier; + + ash::vk::MemoryBarrier2 { + src_stage_mask: src_stages.into(), + src_access_mask: src_access.into(), + dst_stage_mask: dst_stages.into(), + dst_access_mask: dst_access.into(), + ..Default::default() + } + }) + .collect(); + + let buffer_memory_barriers_vk: SmallVec<[_; 8]> = buffer_memory_barriers + .into_iter() + .map(|barrier| { + let &BufferMemoryBarrier { + src_stages, + src_access, + dst_stages, + dst_access, + queue_family_transfer, + ref buffer, + ref range, + _ne: _, + } = barrier; + + ash::vk::BufferMemoryBarrier2 { + src_stage_mask: src_stages.into(), + src_access_mask: src_access.into(), + dst_stage_mask: dst_stages.into(), + dst_access_mask: dst_access.into(), + src_queue_family_index: queue_family_transfer + .map_or(ash::vk::QUEUE_FAMILY_IGNORED, |transfer| { + transfer.source_index + }), + dst_queue_family_index: queue_family_transfer + .map_or(ash::vk::QUEUE_FAMILY_IGNORED, |transfer| { + transfer.destination_index + }), + buffer: buffer.handle(), + offset: range.start, + size: range.end - range.start, + ..Default::default() + } + }) + .collect(); + + let image_memory_barriers_vk: SmallVec<[_; 8]> = image_memory_barriers + .into_iter() + .map(|barrier| { + let &ImageMemoryBarrier { + src_stages, + src_access, + dst_stages, + dst_access, + old_layout, + new_layout, + queue_family_transfer, + ref image, + ref subresource_range, + _ne: _, + } = barrier; + + ash::vk::ImageMemoryBarrier2 { + src_stage_mask: src_stages.into(), + src_access_mask: src_access.into(), + dst_stage_mask: dst_stages.into(), + dst_access_mask: dst_access.into(), + old_layout: old_layout.into(), + new_layout: new_layout.into(), + src_queue_family_index: queue_family_transfer + .map_or(ash::vk::QUEUE_FAMILY_IGNORED, |transfer| { + transfer.source_index + }), + dst_queue_family_index: queue_family_transfer + .map_or(ash::vk::QUEUE_FAMILY_IGNORED, |transfer| { + transfer.destination_index + }), + image: image.handle(), + subresource_range: subresource_range.clone().into(), + ..Default::default() + } + }) + .collect(); + + let dependency_info_vk = ash::vk::DependencyInfo { + dependency_flags, + memory_barrier_count: memory_barriers_vk.len() as u32, + p_memory_barriers: memory_barriers_vk.as_ptr(), + buffer_memory_barrier_count: buffer_memory_barriers_vk.len() as u32, + p_buffer_memory_barriers: buffer_memory_barriers_vk.as_ptr(), + image_memory_barrier_count: image_memory_barriers_vk.len() as u32, + p_image_memory_barriers: image_memory_barriers_vk.as_ptr(), + ..Default::default() + }; + + if self.device.api_version() >= Version::V1_3 { + (fns.v1_3.cmd_set_event2)(self.handle, event.handle(), &dependency_info_vk); + } else { + debug_assert!(self.device.enabled_extensions().khr_synchronization2); + (fns.khr_synchronization2.cmd_set_event2_khr)( + self.handle, + event.handle(), + &dependency_info_vk, + ); + } + } else { + // The original function only takes a source stage mask; the rest of the info is + // provided with `wait_events` instead. Therefore, we condense the source stages + // here and ignore the rest. + + let mut stage_mask = ash::vk::PipelineStageFlags::empty(); + + for barrier in memory_barriers { + stage_mask |= barrier.src_stages.into(); + } + + for barrier in buffer_memory_barriers { + stage_mask |= barrier.src_stages.into(); + } + + for barrier in image_memory_barriers { + stage_mask |= barrier.src_stages.into(); + } + + if stage_mask.is_empty() { + // "VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT is [...] equivalent to + // VK_PIPELINE_STAGE_2_NONE in the first scope." + stage_mask |= ash::vk::PipelineStageFlags::TOP_OF_PIPE; + } + + (fns.v1_0.cmd_set_event)(self.handle, event.handle(), stage_mask); + } + } + + /// Calls `vkCmdWaitEvents` on the builder. + pub unsafe fn wait_events<'a>( + &mut self, + events: impl IntoIterator, + ) { + let fns = self.device.fns(); + + if self.device.enabled_features().synchronization2 { + struct PerDependencyInfo { + memory_barriers_vk: SmallVec<[ash::vk::MemoryBarrier2; 2]>, + buffer_memory_barriers_vk: SmallVec<[ash::vk::BufferMemoryBarrier2; 8]>, + image_memory_barriers_vk: SmallVec<[ash::vk::ImageMemoryBarrier2; 8]>, + } + + let mut events_vk: SmallVec<[_; 4]> = SmallVec::new(); + let mut dependency_infos_vk: SmallVec<[_; 4]> = SmallVec::new(); + let mut per_dependency_info_vk: SmallVec<[_; 4]> = SmallVec::new(); + + for (event, dependency_info) in events { + let DependencyInfo { + memory_barriers, + buffer_memory_barriers, + image_memory_barriers, + _ne: _, + } = dependency_info; + + let dependency_flags = ash::vk::DependencyFlags::BY_REGION; + + let memory_barriers_vk: SmallVec<[_; 2]> = memory_barriers + .into_iter() + .map(|barrier| { + let &MemoryBarrier { + src_stages, + src_access, + dst_stages, + dst_access, + _ne: _, + } = barrier; + + ash::vk::MemoryBarrier2 { + src_stage_mask: src_stages.into(), + src_access_mask: src_access.into(), + dst_stage_mask: dst_stages.into(), + dst_access_mask: dst_access.into(), + ..Default::default() + } + }) + .collect(); + + let buffer_memory_barriers_vk: SmallVec<[_; 8]> = buffer_memory_barriers + .into_iter() + .map(|barrier| { + let &BufferMemoryBarrier { + src_stages, + src_access, + dst_stages, + dst_access, + queue_family_transfer, + ref buffer, + ref range, + _ne: _, + } = barrier; + + ash::vk::BufferMemoryBarrier2 { + src_stage_mask: src_stages.into(), + src_access_mask: src_access.into(), + dst_stage_mask: dst_stages.into(), + dst_access_mask: dst_access.into(), + src_queue_family_index: queue_family_transfer + .map_or(ash::vk::QUEUE_FAMILY_IGNORED, |transfer| { + transfer.source_index + }), + dst_queue_family_index: queue_family_transfer + .map_or(ash::vk::QUEUE_FAMILY_IGNORED, |transfer| { + transfer.destination_index + }), + buffer: buffer.handle(), + offset: range.start, + size: range.end - range.start, + ..Default::default() + } + }) + .collect(); + + let image_memory_barriers_vk: SmallVec<[_; 8]> = image_memory_barriers + .into_iter() + .map(|barrier| { + let &ImageMemoryBarrier { + src_stages, + src_access, + dst_stages, + dst_access, + old_layout, + new_layout, + queue_family_transfer, + ref image, + ref subresource_range, + _ne: _, + } = barrier; + + ash::vk::ImageMemoryBarrier2 { + src_stage_mask: src_stages.into(), + src_access_mask: src_access.into(), + dst_stage_mask: dst_stages.into(), + dst_access_mask: dst_access.into(), + old_layout: old_layout.into(), + new_layout: new_layout.into(), + src_queue_family_index: queue_family_transfer + .map_or(ash::vk::QUEUE_FAMILY_IGNORED, |transfer| { + transfer.source_index + }), + dst_queue_family_index: queue_family_transfer + .map_or(ash::vk::QUEUE_FAMILY_IGNORED, |transfer| { + transfer.destination_index + }), + image: image.handle(), + subresource_range: subresource_range.clone().into(), + ..Default::default() + } + }) + .collect(); + + events_vk.push(event.handle()); + dependency_infos_vk.push(ash::vk::DependencyInfo { + dependency_flags, + memory_barrier_count: 0, + p_memory_barriers: ptr::null(), + buffer_memory_barrier_count: 0, + p_buffer_memory_barriers: ptr::null(), + image_memory_barrier_count: 0, + p_image_memory_barriers: ptr::null(), + ..Default::default() + }); + per_dependency_info_vk.push(PerDependencyInfo { + memory_barriers_vk, + buffer_memory_barriers_vk, + image_memory_barriers_vk, + }); + } + + for ( + dependency_info_vk, + PerDependencyInfo { + memory_barriers_vk, + buffer_memory_barriers_vk, + image_memory_barriers_vk, + }, + ) in (dependency_infos_vk.iter_mut()).zip(per_dependency_info_vk.iter_mut()) + { + *dependency_info_vk = ash::vk::DependencyInfo { + memory_barrier_count: memory_barriers_vk.len() as u32, + p_memory_barriers: memory_barriers_vk.as_ptr(), + buffer_memory_barrier_count: buffer_memory_barriers_vk.len() as u32, + p_buffer_memory_barriers: buffer_memory_barriers_vk.as_ptr(), + image_memory_barrier_count: image_memory_barriers_vk.len() as u32, + p_image_memory_barriers: image_memory_barriers_vk.as_ptr(), + ..*dependency_info_vk + } + } + + if self.device.api_version() >= Version::V1_3 { + (fns.v1_3.cmd_wait_events2)( + self.handle, + events_vk.len() as u32, + events_vk.as_ptr(), + dependency_infos_vk.as_ptr(), + ); + } else { + debug_assert!(self.device.enabled_extensions().khr_synchronization2); + (fns.khr_synchronization2.cmd_wait_events2_khr)( + self.handle, + events_vk.len() as u32, + events_vk.as_ptr(), + dependency_infos_vk.as_ptr(), + ); + } + } else { + // With the original function, you can only specify a single dependency info for all + // events at once, rather than separately for each event. Therefore, to achieve the + // same behaviour as the "2" function, we split it up into multiple Vulkan API calls, + // one per event. + + for (event, dependency_info) in events { + let events_vk = [event.handle()]; + + let DependencyInfo { + memory_barriers, + buffer_memory_barriers, + image_memory_barriers, + _ne: _, + } = dependency_info; + + let mut src_stage_mask = ash::vk::PipelineStageFlags::empty(); + let mut dst_stage_mask = ash::vk::PipelineStageFlags::empty(); + + let memory_barriers_vk: SmallVec<[_; 2]> = memory_barriers + .into_iter() + .map(|barrier| { + let &MemoryBarrier { + src_stages, + src_access, + dst_stages, + dst_access, + _ne: _, + } = barrier; + + src_stage_mask |= src_stages.into(); + dst_stage_mask |= dst_stages.into(); + + ash::vk::MemoryBarrier { + src_access_mask: src_access.into(), + dst_access_mask: dst_access.into(), + ..Default::default() + } + }) + .collect(); + + let buffer_memory_barriers_vk: SmallVec<[_; 8]> = buffer_memory_barriers + .into_iter() + .map(|barrier| { + let &BufferMemoryBarrier { + src_stages, + src_access, + dst_stages, + dst_access, + queue_family_transfer, + ref buffer, + ref range, + _ne: _, + } = barrier; + + src_stage_mask |= src_stages.into(); + dst_stage_mask |= dst_stages.into(); + + ash::vk::BufferMemoryBarrier { + src_access_mask: src_access.into(), + dst_access_mask: dst_access.into(), + src_queue_family_index: queue_family_transfer + .map_or(ash::vk::QUEUE_FAMILY_IGNORED, |transfer| { + transfer.source_index + }), + dst_queue_family_index: queue_family_transfer + .map_or(ash::vk::QUEUE_FAMILY_IGNORED, |transfer| { + transfer.destination_index + }), + buffer: buffer.handle(), + offset: range.start, + size: range.end - range.start, + ..Default::default() + } + }) + .collect(); + + let image_memory_barriers_vk: SmallVec<[_; 8]> = image_memory_barriers + .into_iter() + .map(|barrier| { + let &ImageMemoryBarrier { + src_stages, + src_access, + dst_stages, + dst_access, + old_layout, + new_layout, + queue_family_transfer, + ref image, + ref subresource_range, + _ne: _, + } = barrier; + + src_stage_mask |= src_stages.into(); + dst_stage_mask |= dst_stages.into(); + + ash::vk::ImageMemoryBarrier { + src_access_mask: src_access.into(), + dst_access_mask: dst_access.into(), + old_layout: old_layout.into(), + new_layout: new_layout.into(), + src_queue_family_index: queue_family_transfer + .map_or(ash::vk::QUEUE_FAMILY_IGNORED, |transfer| { + transfer.source_index + }), + dst_queue_family_index: queue_family_transfer + .map_or(ash::vk::QUEUE_FAMILY_IGNORED, |transfer| { + transfer.destination_index + }), + image: image.handle(), + subresource_range: subresource_range.clone().into(), + ..Default::default() + } + }) + .collect(); + + if src_stage_mask.is_empty() { + // "VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT is [...] equivalent to + // VK_PIPELINE_STAGE_2_NONE in the first scope." + src_stage_mask |= ash::vk::PipelineStageFlags::TOP_OF_PIPE; + } + + if dst_stage_mask.is_empty() { + // "VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT is [...] equivalent to + // VK_PIPELINE_STAGE_2_NONE in the second scope." + dst_stage_mask |= ash::vk::PipelineStageFlags::BOTTOM_OF_PIPE; + } + + (fns.v1_0.cmd_wait_events)( + self.handle, + 1, + events_vk.as_ptr(), + src_stage_mask, + dst_stage_mask, + memory_barriers_vk.len() as u32, + memory_barriers_vk.as_ptr(), + buffer_memory_barriers_vk.len() as u32, + buffer_memory_barriers_vk.as_ptr(), + image_memory_barriers_vk.len() as u32, + image_memory_barriers_vk.as_ptr(), + ); + } + } } /// Calls `vkCmdResetEvent` on the builder. #[inline] pub unsafe fn reset_event(&mut self, event: &Event, stages: PipelineStages) { - let fns = self.device.fns(); - debug_assert!(!stages.host); debug_assert_ne!(stages, PipelineStages::empty()); - (fns.v1_0.cmd_reset_event)(self.handle, event.handle(), stages.into()); + let fns = self.device.fns(); + + if self.device.enabled_features().synchronization2 { + if self.device.api_version() >= Version::V1_3 { + (fns.v1_3.cmd_reset_event2)(self.handle, event.handle(), stages.into()); + } else { + debug_assert!(self.device.enabled_extensions().khr_synchronization2); + (fns.khr_synchronization2.cmd_reset_event2_khr)( + self.handle, + event.handle(), + stages.into(), + ); + } + } else { + (fns.v1_0.cmd_reset_event)(self.handle, event.handle(), stages.into()); + } } + + // TODO: wait_event } diff --git a/vulkano/src/command_buffer/commands/transfer.rs b/vulkano/src/command_buffer/commands/transfer.rs index 69f469a1af..3c6bc6c8e0 100644 --- a/vulkano/src/command_buffer/commands/transfer.rs +++ b/vulkano/src/command_buffer/commands/transfer.rs @@ -1993,7 +1993,7 @@ impl SyncCommandBufferBuilder { range: src_offset..src_offset + size, memory: PipelineMemoryAccess { stages: PipelineStages { - transfer: true, + all_transfer: true, ..PipelineStages::empty() }, access: AccessFlags { @@ -2011,7 +2011,7 @@ impl SyncCommandBufferBuilder { range: dst_offset..dst_offset + size, memory: PipelineMemoryAccess { stages: PipelineStages { - transfer: true, + all_transfer: true, ..PipelineStages::empty() }, access: AccessFlags { @@ -2091,7 +2091,7 @@ impl SyncCommandBufferBuilder { subresource_range: src_subresource.clone().into(), memory: PipelineMemoryAccess { stages: PipelineStages { - transfer: true, + all_transfer: true, ..PipelineStages::empty() }, access: AccessFlags { @@ -2111,7 +2111,7 @@ impl SyncCommandBufferBuilder { subresource_range: dst_subresource.clone().into(), memory: PipelineMemoryAccess { stages: PipelineStages { - transfer: true, + all_transfer: true, ..PipelineStages::empty() }, access: AccessFlags { @@ -2194,7 +2194,7 @@ impl SyncCommandBufferBuilder { ..buffer_offset + region.buffer_copy_size(dst_image.format()), memory: PipelineMemoryAccess { stages: PipelineStages { - transfer: true, + all_transfer: true, ..PipelineStages::empty() }, access: AccessFlags { @@ -2212,7 +2212,7 @@ impl SyncCommandBufferBuilder { subresource_range: image_subresource.clone().into(), memory: PipelineMemoryAccess { stages: PipelineStages { - transfer: true, + all_transfer: true, ..PipelineStages::empty() }, access: AccessFlags { @@ -2296,7 +2296,7 @@ impl SyncCommandBufferBuilder { subresource_range: image_subresource.clone().into(), memory: PipelineMemoryAccess { stages: PipelineStages { - transfer: true, + all_transfer: true, ..PipelineStages::empty() }, access: AccessFlags { @@ -2317,7 +2317,7 @@ impl SyncCommandBufferBuilder { ..buffer_offset + region.buffer_copy_size(src_image.format()), memory: PipelineMemoryAccess { stages: PipelineStages { - transfer: true, + all_transfer: true, ..PipelineStages::empty() }, access: AccessFlags { @@ -2382,7 +2382,7 @@ impl SyncCommandBufferBuilder { range: dst_offset..dst_offset + size, memory: PipelineMemoryAccess { stages: PipelineStages { - transfer: true, + all_transfer: true, ..PipelineStages::empty() }, access: AccessFlags { @@ -2445,7 +2445,7 @@ impl SyncCommandBufferBuilder { range: dst_offset..dst_offset + size_of_val(data.deref()) as DeviceSize, memory: PipelineMemoryAccess { stages: PipelineStages { - transfer: true, + all_transfer: true, ..PipelineStages::empty() }, access: AccessFlags { diff --git a/vulkano/src/command_buffer/synced/builder.rs b/vulkano/src/command_buffer/synced/builder.rs index c801aacb5c..97bc44467b 100644 --- a/vulkano/src/command_buffer/synced/builder.rs +++ b/vulkano/src/command_buffer/synced/builder.rs @@ -477,10 +477,10 @@ impl SyncCommandBufferBuilder { self.pending_barrier .buffer_memory_barriers .push(BufferMemoryBarrier { - source_stages: state.memory.stages, - source_access: state.memory.access, - destination_stages: memory.stages, - destination_access: memory.access, + src_stages: state.memory.stages, + src_access: state.memory.access, + dst_stages: memory.stages, + dst_access: memory.access, range: range.clone(), ..BufferMemoryBarrier::buffer(inner.buffer.clone()) }); @@ -636,13 +636,13 @@ impl SyncCommandBufferBuilder { // cases, in the general situation it will be ok. self.pending_barrier.image_memory_barriers.push( ImageMemoryBarrier { - source_stages: PipelineStages { + src_stages: PipelineStages { bottom_of_pipe: true, ..PipelineStages::empty() }, - source_access: AccessFlags::empty(), - destination_stages: memory.stages, - destination_access: memory.access, + src_access: AccessFlags::empty(), + dst_stages: memory.stages, + dst_access: memory.access, old_layout: state.initial_layout, new_layout: start_layout, subresource_range: inner @@ -706,10 +706,10 @@ impl SyncCommandBufferBuilder { self.pending_barrier .image_memory_barriers .push(ImageMemoryBarrier { - source_stages: state.memory.stages, - source_access: state.memory.access, - destination_stages: memory.stages, - destination_access: memory.access, + src_stages: state.memory.stages, + src_access: state.memory.access, + dst_stages: memory.stages, + dst_access: memory.access, old_layout: state.current_layout, new_layout: start_layout, subresource_range: inner.image.range_to_subresources(range.clone()), @@ -768,13 +768,13 @@ impl SyncCommandBufferBuilder { self.pending_barrier .image_memory_barriers .push(ImageMemoryBarrier { - source_stages: state.memory.stages, - source_access: state.memory.access, - destination_stages: PipelineStages { + src_stages: state.memory.stages, + src_access: state.memory.access, + dst_stages: PipelineStages { top_of_pipe: true, ..PipelineStages::empty() }, - destination_access: AccessFlags::empty(), + dst_access: AccessFlags::empty(), old_layout: state.current_layout, new_layout: state.final_layout, subresource_range: image.range_to_subresources(range.clone()), diff --git a/vulkano/src/macros.rs b/vulkano/src/macros.rs index 9969780d61..1773a3bd77 100644 --- a/vulkano/src/macros.rs +++ b/vulkano/src/macros.rs @@ -248,6 +248,7 @@ macro_rules! vulkan_bitflags { $flag_name:ident = $flag_name_ffi:ident $({ $(api_version: $api_version:ident,)? + $(features: [$($feature:ident),+ $(,)?],)? $(device_extensions: [$($device_extension:ident),+ $(,)?],)? $(instance_extensions: [$($instance_extension:ident),+ $(,)?],)? })? @@ -364,6 +365,9 @@ macro_rules! vulkan_bitflags { $( device.api_version() >= crate::Version::$api_version, )? + $($( + device.enabled_features().$feature, + )+)? $($( device.enabled_extensions().$device_extension, )+)? @@ -375,6 +379,7 @@ macro_rules! vulkan_bitflags { required_for: concat!("`", stringify!($ty), "::", stringify!($flag_name), "`"), requires_one_of: crate::RequiresOneOf { $(api_version: Some(crate::Version::$api_version),)? + $(features: &[$(stringify!($feature)),+],)? $(device_extensions: &[$(stringify!($device_extension)),+],)? $(instance_extensions: &[$(stringify!($instance_extension)),+],)? ..Default::default() @@ -398,6 +403,9 @@ macro_rules! vulkan_bitflags { $( physical_device.api_version() >= crate::Version::$api_version, )? + $($( + physical_device.supported_features().$feature, + )+)? $($( physical_device.supported_extensions().$device_extension, )+)? @@ -409,6 +417,7 @@ macro_rules! vulkan_bitflags { required_for: concat!("`", stringify!($ty), "::", stringify!($flag_name), "`"), requires_one_of: crate::RequiresOneOf { $(api_version: Some(crate::Version::$api_version),)? + $(features: &[$(stringify!($feature)),+],)? $(device_extensions: &[$(stringify!($device_extension)),+],)? $(instance_extensions: &[$(stringify!($instance_extension)),+],)? ..Default::default() @@ -605,6 +614,7 @@ macro_rules! vulkan_enum { $flag_name:ident = $flag_name_ffi:ident $({ $(api_version: $api_version:ident,)? + $(features: [$($feature:ident),+ $(,)?],)? $(device_extensions: [$($device_extension:ident),+ $(,)?],)? $(instance_extensions: [$($instance_extension:ident),+ $(,)?],)? })? @@ -636,6 +646,9 @@ macro_rules! vulkan_enum { $( device.api_version() >= crate::Version::$api_version, )? + $($( + device.enabled_features().$feature, + )+)? $($( device.enabled_extensions().$device_extension, )+)? @@ -647,6 +660,7 @@ macro_rules! vulkan_enum { required_for: concat!("`", stringify!($ty), "::", stringify!($flag_name), "`"), requires_one_of: crate::RequiresOneOf { $(api_version: Some(crate::Version::$api_version),)? + $(features: &[$(stringify!($feature)),+],)? $(device_extensions: &[$(stringify!($device_extension)),+],)? $(instance_extensions: &[$(stringify!($instance_extension)),+],)? ..Default::default() @@ -675,6 +689,9 @@ macro_rules! vulkan_enum { $( physical_device.api_version() >= crate::Version::$api_version, )? + $($( + physical_device.supported_features().$feature, + )+)? $($( physical_device.supported_extensions().$device_extension, )+)? @@ -686,6 +703,7 @@ macro_rules! vulkan_enum { required_for: concat!("`", stringify!($ty), "::", stringify!($flag_name), "`"), requires_one_of: crate::RequiresOneOf { $(api_version: Some(crate::Version::$api_version),)? + $(features: &[$(stringify!($feature)),+],)? $(device_extensions: &[$(stringify!($device_extension)),+],)? $(instance_extensions: &[$(stringify!($instance_extension)),+],)? ..Default::default() diff --git a/vulkano/src/render_pass/create.rs b/vulkano/src/render_pass/create.rs index 7e79ae936b..2e2ffbe384 100644 --- a/vulkano/src/render_pass/create.rs +++ b/vulkano/src/render_pass/create.rs @@ -621,22 +621,69 @@ impl RenderPass { for (dependency_num, dependency) in dependencies.iter().enumerate() { let &SubpassDependency { - source_subpass, - destination_subpass, - source_stages, - destination_stages, - source_access, - destination_access, + src_subpass, + dst_subpass, + src_stages, + dst_stages, + src_access, + dst_access, by_region, view_local, _ne: _, } = dependency; let dependency_num = dependency_num as u32; - for (stages, access) in [ - (source_stages, source_access), - (destination_stages, destination_access), - ] { + for (stages, access) in [(src_stages, src_access), (dst_stages, dst_access)] { + if !device.enabled_features().synchronization2 { + if stages.is_2() { + return Err(RenderPassCreationError::RequirementNotMet { + required_for: "`create_info.dependencies` has an element where `src_stages` or `dst_stages` has bits set from `VkPipelineStageFlagBits2`", + requires_one_of: RequiresOneOf { + features: &["synchronization2"], + ..Default::default() + }, + }); + } + + if access.is_2() { + return Err(RenderPassCreationError::RequirementNotMet { + required_for: "`create_info.dependencies` has an element where `src_access` or `dst_access` has bits set from `VkAccessFlagBits2`", + requires_one_of: RequiresOneOf { + features: &["synchronization2"], + ..Default::default() + }, + }); + } + } else if !(device.api_version() >= Version::V1_2 + || device.enabled_extensions().khr_create_renderpass2) + { + // If synchronization2 is enabled but we don't have create_renderpass2, + // we are unable to use extension structs, so we can't use the + // extra flag bits. + + if stages.is_2() { + return Err(RenderPassCreationError::RequirementNotMet { + required_for: "`create_info.dependencies` has an element where `src_stages` or `dst_stages` has bits set from `VkPipelineStageFlagBits2`", + requires_one_of: RequiresOneOf { + api_version: Some(Version::V1_2), + device_extensions: &["khr_create_renderpass2"], + ..Default::default() + }, + }); + } + + if access.is_2() { + return Err(RenderPassCreationError::RequirementNotMet { + required_for: "`create_info.dependencies` has an element where `src_access` or `dst_access` has bits set from `VkAccessFlagBits2`", + requires_one_of: RequiresOneOf { + api_version: Some(Version::V1_2), + device_extensions: &["khr_create_renderpass2"], + ..Default::default() + }, + }); + } + } + // VUID-VkSubpassDependency2-srcStageMask-parameter // VUID-VkSubpassDependency2-dstStageMask-parameter stages.validate_device(device)?; @@ -671,6 +718,84 @@ impl RenderPass { }); } + // VUID-VkSubpassDependency2-srcStageMask-04092 + // VUID-VkSubpassDependency2-dstStageMask-04092 + if stages.conditional_rendering && !device.enabled_features().conditional_rendering + { + return Err(RenderPassCreationError::RequirementNotMet { + required_for: "`create_info.dependencies` has an element where `stages.conditional_rendering` is set", + requires_one_of: RequiresOneOf { + features: &["conditional_rendering"], + ..Default::default() + }, + }); + } + + // VUID-VkSubpassDependency2-srcStageMask-04093 + // VUID-VkSubpassDependency2-dstStageMask-04093 + if stages.fragment_density_process + && !device.enabled_features().fragment_density_map + { + return Err(RenderPassCreationError::RequirementNotMet { + required_for: "`create_info.dependencies` has an element where `stages.fragment_density_process` is set", + requires_one_of: RequiresOneOf { + features: &["fragment_density_map"], + ..Default::default() + }, + }); + } + + // VUID-VkSubpassDependency2-srcStageMask-04094 + // VUID-VkSubpassDependency2-dstStageMask-04094 + if stages.transform_feedback && !device.enabled_features().transform_feedback { + return Err(RenderPassCreationError::RequirementNotMet { + required_for: "`create_info.dependencies` has an element where `stages.transform_feedback` is set", + requires_one_of: RequiresOneOf { + features: &["transform_feedback"], + ..Default::default() + }, + }); + } + + // VUID-VkSubpassDependency2-srcStageMask-04095 + // VUID-VkSubpassDependency2-dstStageMask-04095 + if stages.mesh_shader && !device.enabled_features().mesh_shader { + return Err(RenderPassCreationError::RequirementNotMet { + required_for: "`create_info.dependencies` has an element where `stages.mesh_shader` is set", + requires_one_of: RequiresOneOf { + features: &["mesh_shader"], + ..Default::default() + }, + }); + } + + // VUID-VkSubpassDependency2-srcStageMask-04096 + // VUID-VkSubpassDependency2-dstStageMask-04096 + if stages.task_shader && !device.enabled_features().task_shader { + return Err(RenderPassCreationError::RequirementNotMet { + required_for: "`create_info.dependencies` has an element where `stages.task_shader` is set", + requires_one_of: RequiresOneOf { + features: &["task_shader"], + ..Default::default() + }, + }); + } + + // VUID-VkSubpassDependency2-srcStageMask-07318 + // VUID-VkSubpassDependency2-dstStageMask-07318 + if stages.fragment_shading_rate_attachment + && !(device.enabled_features().attachment_fragment_shading_rate + || device.enabled_features().shading_rate_image) + { + return Err(RenderPassCreationError::RequirementNotMet { + required_for: "`create_info.dependencies` has an element where `stages.fragment_shading_rate_attachment` is set", + requires_one_of: RequiresOneOf { + features: &["attachment_fragment_shading_rate", "shading_rate_image"], + ..Default::default() + }, + }); + } + // VUID-VkSubpassDependency2-srcStageMask-03937 // VUID-VkSubpassDependency2-dstStageMask-03937 if stages.is_empty() && !device.enabled_features().synchronization2 { @@ -705,16 +830,13 @@ impl RenderPass { } // VUID-VkSubpassDependency2-srcSubpass-03085 - if source_subpass.is_none() && destination_subpass.is_none() { + if src_subpass.is_none() && dst_subpass.is_none() { return Err(RenderPassCreationError::DependencyBothSubpassesExternal { dependency: dependency_num, }); } - for (subpass, stages) in [ - (source_subpass, source_stages), - (destination_subpass, destination_stages), - ] { + for (subpass, stages) in [(src_subpass, src_stages), (dst_subpass, dst_stages)] { if let Some(subpass) = subpass { // VUID-VkRenderPassCreateInfo2-srcSubpass-02526 // VUID-VkRenderPassCreateInfo2-dstSubpass-02527 @@ -763,11 +885,9 @@ impl RenderPass { } } - if let (Some(source_subpass), Some(destination_subpass)) = - (source_subpass, destination_subpass) - { + if let (Some(src_subpass), Some(dst_subpass)) = (src_subpass, dst_subpass) { // VUID-VkSubpassDependency2-srcSubpass-03084 - if source_subpass > destination_subpass { + if src_subpass > dst_subpass { return Err( RenderPassCreationError::DependencySourceSubpassAfterDestinationSubpass { dependency: dependency_num, @@ -775,26 +895,26 @@ impl RenderPass { ); } - if source_subpass == destination_subpass { - let source_stages_non_framebuffer = PipelineStages { + if src_subpass == dst_subpass { + let src_stages_non_framebuffer = PipelineStages { early_fragment_tests: false, fragment_shader: false, late_fragment_tests: false, color_attachment_output: false, - ..source_stages + ..src_stages }; - let destination_stages_non_framebuffer = PipelineStages { + let dst_stages_non_framebuffer = PipelineStages { early_fragment_tests: false, fragment_shader: false, late_fragment_tests: false, color_attachment_output: false, - ..destination_stages + ..dst_stages }; - if !source_stages_non_framebuffer.is_empty() - || !destination_stages_non_framebuffer.is_empty() + if !src_stages_non_framebuffer.is_empty() + || !dst_stages_non_framebuffer.is_empty() { - let source_latest_stage = if source_stages.all_graphics { + let src_latest_stage = if src_stages.all_graphics { 13 } else { let PipelineStages { @@ -812,7 +932,7 @@ impl RenderPass { late_fragment_tests, color_attachment_output, .. - } = source_stages; + } = src_stages; #[allow(clippy::identity_op)] [ @@ -835,7 +955,7 @@ impl RenderPass { .unwrap() }; - let destination_earliest_stage = if destination_stages.all_graphics { + let dst_earliest_stage = if dst_stages.all_graphics { 1 } else { let PipelineStages { @@ -853,7 +973,7 @@ impl RenderPass { late_fragment_tests, color_attachment_output, .. - } = destination_stages; + } = dst_stages; #[allow(clippy::identity_op)] [ @@ -877,7 +997,7 @@ impl RenderPass { }; // VUID-VkSubpassDependency2-srcSubpass-03087 - if source_latest_stage > destination_earliest_stage { + if src_latest_stage > dst_earliest_stage { return Err( RenderPassCreationError::DependencySelfDependencySourceStageAfterDestinationStage { dependency: dependency_num, @@ -886,20 +1006,17 @@ impl RenderPass { } } - let source_has_framebuffer_stage = source_stages.fragment_shader - || source_stages.early_fragment_tests - || source_stages.late_fragment_tests - || source_stages.color_attachment_output; - let destination_has_framebuffer_stage = destination_stages.fragment_shader - || destination_stages.early_fragment_tests - || destination_stages.late_fragment_tests - || destination_stages.color_attachment_output; + let src_has_framebuffer_stage = src_stages.fragment_shader + || src_stages.early_fragment_tests + || src_stages.late_fragment_tests + || src_stages.color_attachment_output; + let dst_has_framebuffer_stage = dst_stages.fragment_shader + || dst_stages.early_fragment_tests + || dst_stages.late_fragment_tests + || dst_stages.color_attachment_output; // VUID-VkSubpassDependency2-srcSubpass-02245 - if source_has_framebuffer_stage - && destination_has_framebuffer_stage - && !by_region - { + if src_has_framebuffer_stage && dst_has_framebuffer_stage && !by_region { return Err( RenderPassCreationError::DependencySelfDependencyFramebufferStagesWithoutByRegion { dependency: dependency_num, @@ -918,11 +1035,11 @@ impl RenderPass { } } else { // VUID-VkRenderPassCreateInfo2-pDependencies-03060 - if subpasses[source_subpass as usize].view_mask.count_ones() > 1 { + if subpasses[src_subpass as usize].view_mask.count_ones() > 1 { return Err( RenderPassCreationError::DependencySelfDependencyViewMaskMultiple { dependency: dependency_num, - subpass: source_subpass, + subpass: src_subpass, }, ); } @@ -1070,9 +1187,29 @@ impl RenderPass { out }; + let memory_barriers_vk: SmallVec<[_; 4]> = if device.enabled_features().synchronization2 { + debug_assert!( + device.api_version() >= Version::V1_3 + || device.enabled_extensions().khr_synchronization2 + ); + dependencies + .iter() + .map(|dependency| ash::vk::MemoryBarrier2 { + src_stage_mask: dependency.src_stages.into(), + src_access_mask: dependency.src_access.into(), + dst_stage_mask: dependency.dst_stages.into(), + dst_access_mask: dependency.dst_access.into(), + ..Default::default() + }) + .collect() + } else { + SmallVec::new() + }; + let dependencies_vk = dependencies .iter() - .map(|dependency| { + .enumerate() + .map(|(index, dependency)| { let mut dependency_flags = ash::vk::DependencyFlags::empty(); if dependency.by_region { @@ -1084,16 +1221,15 @@ impl RenderPass { } ash::vk::SubpassDependency2 { - src_subpass: dependency - .source_subpass - .unwrap_or(ash::vk::SUBPASS_EXTERNAL), - dst_subpass: dependency - .destination_subpass - .unwrap_or(ash::vk::SUBPASS_EXTERNAL), - src_stage_mask: dependency.source_stages.into(), - dst_stage_mask: dependency.destination_stages.into(), - src_access_mask: dependency.source_access.into(), - dst_access_mask: dependency.destination_access.into(), + p_next: memory_barriers_vk + .get(index) + .map_or(ptr::null(), |mb| mb as *const _ as *const _), + src_subpass: dependency.src_subpass.unwrap_or(ash::vk::SUBPASS_EXTERNAL), + dst_subpass: dependency.dst_subpass.unwrap_or(ash::vk::SUBPASS_EXTERNAL), + src_stage_mask: dependency.src_stages.into(), + dst_stage_mask: dependency.dst_stages.into(), + src_access_mask: dependency.src_access.into(), + dst_access_mask: dependency.dst_access.into(), dependency_flags, // VUID-VkSubpassDependency2-dependencyFlags-03092 view_offset: dependency.view_local.unwrap_or(0), @@ -1267,16 +1403,12 @@ impl RenderPass { let dependencies_vk = dependencies .iter() .map(|dependency| ash::vk::SubpassDependency { - src_subpass: dependency - .source_subpass - .unwrap_or(ash::vk::SUBPASS_EXTERNAL), - dst_subpass: dependency - .destination_subpass - .unwrap_or(ash::vk::SUBPASS_EXTERNAL), - src_stage_mask: dependency.source_stages.into(), - dst_stage_mask: dependency.destination_stages.into(), - src_access_mask: dependency.source_access.into(), - dst_access_mask: dependency.destination_access.into(), + src_subpass: dependency.src_subpass.unwrap_or(ash::vk::SUBPASS_EXTERNAL), + dst_subpass: dependency.dst_subpass.unwrap_or(ash::vk::SUBPASS_EXTERNAL), + src_stage_mask: dependency.src_stages.into(), + dst_stage_mask: dependency.dst_stages.into(), + src_access_mask: dependency.src_access.into(), + dst_access_mask: dependency.dst_access.into(), dependency_flags: if dependency.by_region { ash::vk::DependencyFlags::BY_REGION } else { @@ -1441,17 +1573,16 @@ pub enum RenderPassCreationError { /// A subpass dependency specified an access type that was not supported by the given stages. DependencyAccessNotSupportedByStages { dependency: u32 }, - /// A subpass dependency has both `source_subpass` and `destination_subpass` set to `None`. + /// A subpass dependency has both `src_subpass` and `dst_subpass` set to `None`. DependencyBothSubpassesExternal { dependency: u32 }, /// A subpass dependency specifies a subpass self-dependency and includes framebuffer stages in - /// both `source_stages` and `destination_stages`, but the `by_region` dependency was not - /// enabled. + /// both `src_stages` and `dst_stages`, but the `by_region` dependency was not enabled. DependencySelfDependencyFramebufferStagesWithoutByRegion { dependency: u32 }, /// A subpass dependency specifies a subpass self-dependency and includes - /// non-framebuffer stages, but the latest stage in `source_stages` is after the earliest stage - /// in `destination_stages`. + /// non-framebuffer stages, but the latest stage in `src_stages` is after the earliest stage + /// in `dst_stages`. DependencySelfDependencySourceStageAfterDestinationStage { dependency: u32 }, /// A subpass dependency specifies a subpass self-dependency and has the `view_local` dependency @@ -1462,10 +1593,10 @@ pub enum RenderPassCreationError { /// dependency, but the referenced subpass has more than one bit set in its `view_mask`. DependencySelfDependencyViewMaskMultiple { dependency: u32, subpass: u32 }, - /// A subpass dependency has a `source_subpass` that is later than the `destination_subpass`. + /// A subpass dependency has a `src_subpass` that is later than the `dst_subpass`. DependencySourceSubpassAfterDestinationSubpass { dependency: u32 }, - /// A subpass dependency has a bit set in the `source_stages` or `destination_stages` that is + /// A subpass dependency has a bit set in the `src_stages` or `dst_stages` that is /// not supported for graphics pipelines. DependencyStageNotSupported { dependency: u32 }, @@ -1473,8 +1604,8 @@ pub enum RenderPassCreationError { /// render pass. DependencySubpassOutOfRange { dependency: u32, subpass: u32 }, - /// A subpass dependency has the `view_local` dependency enabled, but `source_subpass` or - /// `destination_subpass` were set to `None`. + /// A subpass dependency has the `view_local` dependency enabled, but `src_subpass` or + /// `dst_subpass` were set to `None`. DependencyViewLocalExternalDependency { dependency: u32 }, /// A subpass dependency has the `view_local` dependency enabled, but multiview is not enabled @@ -1633,7 +1764,7 @@ impl Display for RenderPassCreationError { write!( f, "subpass dependency {} specifies a subpass self-dependency and includes \ - framebuffer stages in both `source_stages` and `destination_stages`, but the \ + framebuffer stages in both `src_stages` and `dst_stages`, but the \ `by_region` dependency was not enabled", dependency, ) @@ -1642,8 +1773,8 @@ impl Display for RenderPassCreationError { write!( f, "subpass dependency {} specifies a subpass self-dependency and includes \ - non-framebuffer stages, but the latest stage in `source_stages` is after the \ - earliest stage in `destination_stages`", + non-framebuffer stages, but the latest stage in `src_stages` is after the \ + earliest stage in `dst_stages`", dependency, ) } @@ -1665,19 +1796,19 @@ impl Display for RenderPassCreationError { ), Self::DependencySourceSubpassAfterDestinationSubpass { dependency } => write!( f, - "subpass dependency {} has a `source_subpass` that is later than the \ - `destination_subpass`", + "subpass dependency {} has a `src_subpass` that is later than the \ + `dst_subpass`", dependency, ), Self::DependencyStageNotSupported { dependency } => write!( f, - "subpass dependency {} has a bit set in the `source_stages` or \ - `destination_stages` that is not supported for graphics pipelines", + "subpass dependency {} has a bit set in the `src_stages` or \ + `dst_stages` that is not supported for graphics pipelines", dependency, ), Self::DependencyBothSubpassesExternal { dependency } => write!( f, - "subpass dependency {} has both `source_subpass` and `destination_subpass` set to \ + "subpass dependency {} has both `src_subpass` and `dst_subpass` set to \ `None`", dependency, ), @@ -1693,7 +1824,7 @@ impl Display for RenderPassCreationError { Self::DependencyViewLocalExternalDependency { dependency } => write!( f, "subpass dependency {} has the `view_local` dependency enabled, but \ - `source_subpass` or `destination_subpass` were set to `None`", + `src_subpass` or `dst_subpass` were set to `None`", dependency, ), Self::DependencyViewLocalMultiviewNotEnabled { dependency } => write!( diff --git a/vulkano/src/render_pass/macros.rs b/vulkano/src/render_pass/macros.rs index 3d12bce365..3f5df810a1 100644 --- a/vulkano/src/render_pass/macros.rs +++ b/vulkano/src/render_pass/macros.rs @@ -154,24 +154,24 @@ macro_rules! ordered_passes_renderpass { let dependencies: Vec<_> = (0..subpasses.len().saturating_sub(1) as u32) .map(|id| { // TODO: correct values - let source_stages = $crate::sync::PipelineStages { + let src_stages = $crate::sync::PipelineStages { all_graphics: true, ..$crate::sync::PipelineStages::empty() }; - let destination_stages = $crate::sync::PipelineStages { + let dst_stages = $crate::sync::PipelineStages { all_graphics: true, ..$crate::sync::PipelineStages::empty() }; - let source_access = source_stages.supported_access(); - let destination_access = destination_stages.supported_access(); + let src_access = src_stages.supported_access(); + let dst_access = dst_stages.supported_access(); $crate::render_pass::SubpassDependency { - source_subpass: id.into(), - destination_subpass: (id + 1).into(), - source_stages, - destination_stages, - source_access, - destination_access, + src_subpass: id.into(), + dst_subpass: (id + 1).into(), + src_stages, + dst_stages, + src_access, + dst_access, by_region: true, // TODO: correct values ..Default::default() } diff --git a/vulkano/src/render_pass/mod.rs b/vulkano/src/render_pass/mod.rs index 033eed9979..47daa3d866 100644 --- a/vulkano/src/render_pass/mod.rs +++ b/vulkano/src/render_pass/mod.rs @@ -1004,57 +1004,57 @@ impl Default for AttachmentReference { /// used as the input of another one). Subpass dependencies work similar to pipeline barriers, /// except that they operate on whole subpasses instead of individual images. /// -/// If `source_subpass` and `destination_subpass` are equal, then this specifies a +/// If `src_subpass` and `dst_subpass` are equal, then this specifies a /// [subpass self-dependency](https://registry.khronos.org/vulkan/specs/1.3-extensions/html/chap7.html#synchronization-pipeline-barriers-subpass-self-dependencies). -/// The `source_stages` must all be +/// The `src_stages` must all be /// [logically earlier in the pipeline](https://registry.khronos.org/vulkan/specs/1.3-extensions/html/chap7.html#synchronization-pipeline-stages-order) -/// than the `destination_stages`, and if they both contain a +/// than the `dst_stages`, and if they both contain a /// [framebuffer-space stage](https://registry.khronos.org/vulkan/specs/1.3-extensions/html/chap7.html#synchronization-framebuffer-regions), /// then `by_region` must be activated. /// -/// If `source_subpass` or `destination_subpass` are set to `None`, this specifies an external +/// If `src_subpass` or `dst_subpass` are set to `None`, this specifies an external /// dependency. An external dependency specifies a dependency on commands that were submitted before -/// the render pass instance began (for `source_subpass`), or on commands that will be submitted -/// after the render pass instance ends (for `destination_subpass`). The values must not both be +/// the render pass instance began (for `src_subpass`), or on commands that will be submitted +/// after the render pass instance ends (for `dst_subpass`). The values must not both be /// `None`. #[derive(Clone, Debug, PartialEq, Eq)] pub struct SubpassDependency { - /// The index of the subpass that writes the data that `destination_subpass` is going to use. + /// The index of the subpass that writes the data that `dst_subpass` is going to use. /// /// `None` specifies an external dependency. /// /// The default value is `None`. - pub source_subpass: Option, + pub src_subpass: Option, - /// The index of the subpass that reads the data that `source_subpass` wrote. + /// The index of the subpass that reads the data that `src_subpass` wrote. /// /// `None` specifies an external dependency. /// /// The default value is `None`. - pub destination_subpass: Option, + pub dst_subpass: Option, - /// The pipeline stages that must be finished on `source_subpass` before the - /// `destination_stages` of `destination_subpass` can start. + /// The pipeline stages that must be finished on `src_subpass` before the + /// `dst_stages` of `dst_subpass` can start. /// /// The default value is [`PipelineStages::empty()`]. - pub source_stages: PipelineStages, + pub src_stages: PipelineStages, - /// The pipeline stages of `destination_subpass` that must wait for the `source_stages` of - /// `source_subpass` to be finished. Stages that are earlier than the stages specified here can - /// start before the `source_stages` are finished. + /// The pipeline stages of `dst_subpass` that must wait for the `src_stages` of + /// `src_subpass` to be finished. Stages that are earlier than the stages specified here can + /// start before the `src_stages` are finished. /// /// The default value is [`PipelineStages::empty()`]. - pub destination_stages: PipelineStages, + pub dst_stages: PipelineStages, - /// The way `source_subpass` accesses the attachments on which we depend. + /// The way `src_subpass` accesses the attachments on which we depend. /// /// The default value is [`AccessFlags::empty()`]. - pub source_access: AccessFlags, + pub src_access: AccessFlags, - /// The way `destination_subpass` accesses the attachments on which we depend. + /// The way `dst_subpass` accesses the attachments on which we depend. /// /// The default value is [`AccessFlags::empty()`]. - pub destination_access: AccessFlags, + pub dst_access: AccessFlags, /// If false, then the source operations must be fully finished for the destination operations /// to start. If true, then the implementation can start the destination operation for some @@ -1070,16 +1070,16 @@ pub struct SubpassDependency { pub by_region: bool, /// If multiview rendering is being used (the subpasses have a nonzero `view_mask`), then - /// setting this to `Some` creates a view-local dependency, between views in `source_subpass` - /// and views in `destination_subpass`. + /// setting this to `Some` creates a view-local dependency, between views in `src_subpass` + /// and views in `dst_subpass`. /// - /// The inner value specifies an offset relative to the view index of `destination_subpass`: - /// each view `d` in `destination_subpass` depends on view `d + view_offset` in - /// `source_subpass`. If the source view index does not exist, the dependency is ignored for + /// The inner value specifies an offset relative to the view index of `dst_subpass`: + /// each view `d` in `dst_subpass` depends on view `d + view_offset` in + /// `src_subpass`. If the source view index does not exist, the dependency is ignored for /// that view. /// - /// If multiview rendering is not being used, the value must be `None`. If `source_subpass` - /// and `destination_subpass` are the same, only `Some(0)` and `None` are allowed as values, and + /// If multiview rendering is not being used, the value must be `None`. If `src_subpass` + /// and `dst_subpass` are the same, only `Some(0)` and `None` are allowed as values, and /// if that subpass also has multiple bits set in its `view_mask`, the value must be `Some(0)`. /// /// The default value is `None`. @@ -1092,12 +1092,12 @@ impl Default for SubpassDependency { #[inline] fn default() -> Self { Self { - source_subpass: None, - destination_subpass: None, - source_stages: PipelineStages::empty(), - destination_stages: PipelineStages::empty(), - source_access: AccessFlags::empty(), - destination_access: AccessFlags::empty(), + src_subpass: None, + dst_subpass: None, + src_stages: PipelineStages::empty(), + dst_stages: PipelineStages::empty(), + src_access: AccessFlags::empty(), + dst_access: AccessFlags::empty(), by_region: false, view_local: None, _ne: crate::NonExhaustive(()), diff --git a/vulkano/src/sync/pipeline.rs b/vulkano/src/sync/pipeline.rs index e01fd3a93d..45d3477a96 100644 --- a/vulkano/src/sync/pipeline.rs +++ b/vulkano/src/sync/pipeline.rs @@ -17,114 +17,247 @@ use smallvec::SmallVec; use std::{ops::Range, sync::Arc}; vulkan_enum! { - // TODO: document + /// A single stage in the device's processing pipeline. #[non_exhaustive] PipelineStage = PipelineStageFlags2(u64); - // TODO: document + /// A pseudo-stage representing the start of the pipeline. TopOfPipe = TOP_OF_PIPE, - // TODO: document + /// Indirect buffers are read. DrawIndirect = DRAW_INDIRECT, - // TODO: document + /// Vertex and index buffers are read. + /// + /// It is currently equivalent to setting all of the following flags, but automatically + /// omitting any that are not supported in a given context. It also implicitly includes future + /// flags that are added to Vulkan, if they are not yet supported by Vulkano. + /// - `index_input` + /// - `vertex_attribute_input` VertexInput = VERTEX_INPUT, - // TODO: document + /// Vertex shaders are executed. VertexShader = VERTEX_SHADER, - // TODO: document + /// Tessellation control shaders are executed. TessellationControlShader = TESSELLATION_CONTROL_SHADER, - // TODO: document + /// Tessellation evaluation shaders are executed. TessellationEvaluationShader = TESSELLATION_EVALUATION_SHADER, - // TODO: document + /// Geometry shaders are executed. GeometryShader = GEOMETRY_SHADER, - // TODO: document + /// Fragment shaders are executed. FragmentShader = FRAGMENT_SHADER, - // TODO: document + /// Early fragment tests (depth and stencil tests before fragment shading) are performed. + /// Subpass load operations for framebuffer attachments with a depth/stencil format are + /// performed. EarlyFragmentTests = EARLY_FRAGMENT_TESTS, - // TODO: document + /// Late fragment tests (depth and stencil tests after fragment shading) are performed. + /// Subpass store operations for framebuffer attachments with a depth/stencil format are + /// performed. LateFragmentTests = LATE_FRAGMENT_TESTS, - // TODO: document + /// The final color values are output from the pipeline after blending. + /// Subpass load and store operations, multisample resolve operations for framebuffer + /// attachments with a color or depth/stencil format, and `clear_attachments` are performed. ColorAttachmentOutput = COLOR_ATTACHMENT_OUTPUT, - // TODO: document + /// Compute shaders are executed. ComputeShader = COMPUTE_SHADER, - // TODO: document - Transfer = TRANSFER, - - // TODO: document + /// The set of all current and future transfer pipeline stages. + /// + /// It is currently equivalent to setting all of the following flags, but automatically + /// omitting any that are not supported in a given context. It also implicitly includes future + /// flags that are added to Vulkan, if they are not yet supported by Vulkano. + /// - `copy` + /// - `blit` + /// - `resolve` + /// - `clear` + /// - `acceleration_structure_copy` + AllTransfer = ALL_TRANSFER, + + /// A pseudo-stage representing the end of the pipeline. BottomOfPipe = BOTTOM_OF_PIPE, - // TODO: document + /// A pseudo-stage representing reads and writes to device memory on the host. Host = HOST, - // TODO: document + /// The set of all current and future graphics pipeline stages. + /// + /// It is currently equivalent to setting all of the following flags, but automatically + /// omitting any that are not supported in a given context. It also implicitly includes future + /// flags that are added to Vulkan, if they are not yet supported by Vulkano. + /// - `draw_indirect` + /// - `task_shader` + /// - `mesh_shader` + /// - `vertex_input` + /// - `vertex_shader` + /// - `tessellation_control_shader` + /// - `tessellation_evaluation_shader` + /// - `geometry_shader` + /// - `fragment_shader` + /// - `early_fragment_tests` + /// - `late_fragment_tests` + /// - `color_attachment_output` + /// - `conditional_rendering` + /// - `transform_feedback` + /// - `fragment_shading_rate_attachment` + /// - `fragment_density_process` + /// - `invocation_mask` AllGraphics = ALL_GRAPHICS, - // TODO: document + /// The set of all current and future pipeline stages of all types. + /// + /// It is currently equivalent to setting all flags in `PipelineStages`, but automatically + /// omitting any that are not supported in a given context. It also implicitly includes future + /// flags that are added to Vulkan, if they are not yet supported by Vulkano. AllCommands = ALL_COMMANDS, - /* - // TODO: document + /// The `copy_buffer`, `copy_image`, `copy_buffer_to_image`, `copy_image_to_buffer` and + /// `copy_query_pool_results` commands are executed. + Copy = COPY { + api_version: V1_3, + device_extensions: [khr_synchronization2], + }, + + /// The `resolve_image` command is executed. + Resolve = RESOLVE { + api_version: V1_3, + device_extensions: [khr_synchronization2], + }, + + /// The `blit_image` command is executed. + Blit = BLIT { + api_version: V1_3, + device_extensions: [khr_synchronization2], + }, + + /// The `clear_color_image`, `clear_depth_stencil_image`, `fill_buffer` and `update_buffer` + /// commands are executed. + Clear = CLEAR { + api_version: V1_3, + device_extensions: [khr_synchronization2], + }, + + /// Index buffers are read. + IndexInput = INDEX_INPUT { + api_version: V1_3, + device_extensions: [khr_synchronization2], + }, + + /// Vertex buffers are read. + VertexAttributeInput = VERTEX_ATTRIBUTE_INPUT { + api_version: V1_3, + device_extensions: [khr_synchronization2], + }, + + /// The various pre-rasterization shader types are executed. + /// + /// It is currently equivalent to setting all of the following flags, but automatically + /// omitting any that are not supported in a given context. It also implicitly includes future + /// flags that are added to Vulkan, if they are not yet supported by Vulkano. + /// - `vertex_shader` + /// - `tessellation_control_shader` + /// - `tessellation_evaluation_shader` + /// - `geometry_shader` + /// - `task_shader` + /// - `mesh_shader` + PreRasterizationShaders = PRE_RASTERIZATION_SHADERS { + api_version: V1_3, + device_extensions: [khr_synchronization2], + }, + + /// Video decode operations are performed. + VideoDecode = VIDEO_DECODE_KHR { + device_extensions: [khr_video_decode_queue], + }, + + /// Video encode operations are performed. + VideoEncode = VIDEO_ENCODE_KHR { + device_extensions: [khr_video_encode_queue], + }, + + /// Vertex attribute output values are written to the transform feedback buffers. TransformFeedback = TRANSFORM_FEEDBACK_EXT { device_extensions: [ext_transform_feedback], }, - // TODO: document + /// The predicate of conditional rendering is read. ConditionalRendering = CONDITIONAL_RENDERING_EXT { device_extensions: [ext_conditional_rendering], }, - // TODO: document + /// Acceleration_structure commands are executed. AccelerationStructureBuild = ACCELERATION_STRUCTURE_BUILD_KHR { device_extensions: [khr_acceleration_structure, nv_ray_tracing], }, - */ - // TODO: document + /// The various ray tracing shader types are executed. RayTracingShader = RAY_TRACING_SHADER_KHR { device_extensions: [khr_ray_tracing_pipeline, nv_ray_tracing], }, - /* - // TODO: document + /// The fragment density map is read to generate the fragment areas. FragmentDensityProcess = FRAGMENT_DENSITY_PROCESS_EXT { device_extensions: [ext_fragment_density_map], }, - // TODO: document + /// The fragment shading rate attachment or shading rate image is read to determine the + /// fragment shading rate for portions of a rasterized primitive. FragmentShadingRateAttachment = FRAGMENT_SHADING_RATE_ATTACHMENT_KHR { device_extensions: [khr_fragment_shading_rate], }, - // TODO: document + /// Device-side preprocessing for generated commands via the `preprocess_generated_commands` + /// command is handled. CommandPreprocess = COMMAND_PREPROCESS_NV { device_extensions: [nv_device_generated_commands], }, - // TODO: document + /// Task shaders are executed. TaskShader = TASK_SHADER_NV { device_extensions: [nv_mesh_shader], }, - // TODO: document + /// Mesh shaders are executed. MeshShader = MESH_SHADER_NV { device_extensions: [nv_mesh_shader], }, + + /// Subpass shading shaders are executed. + SubpassShading = SUBPASS_SHADING_HUAWEI { + device_extensions: [huawei_subpass_shading], + }, + + /// The invocation mask image is read to optimize ray dispatch. + InvocationMask = INVOCATION_MASK_HUAWEI { + device_extensions: [huawei_invocation_mask], + }, + + /* + AccelerationStructureCopy = ACCELERATION_STRUCTURE_COPY_KHR { + device_extensions: [khr_ray_tracing_maintenance1], + }, + + MicromapBuild = MICROMAP_BUILD_EXT { + device_extensions: [ext_opacity_micromap], + }, + + OpticalFlow = OPTICAL_FLOW_NV { + device_extensions: [nv_optical_flow], + }, */ } impl PipelineStage { #[inline] pub fn required_queue_flags(&self) -> ash::vk::QueueFlags { + // https://registry.khronos.org/vulkan/specs/1.3-extensions/html/chap7.html#synchronization-pipeline-stages-supported match self { Self::TopOfPipe => ash::vk::QueueFlags::empty(), Self::DrawIndirect => ash::vk::QueueFlags::GRAPHICS | ash::vk::QueueFlags::COMPUTE, @@ -138,7 +271,7 @@ impl PipelineStage { Self::LateFragmentTests => ash::vk::QueueFlags::GRAPHICS, Self::ColorAttachmentOutput => ash::vk::QueueFlags::GRAPHICS, Self::ComputeShader => ash::vk::QueueFlags::COMPUTE, - Self::Transfer => { + Self::AllTransfer => { ash::vk::QueueFlags::GRAPHICS | ash::vk::QueueFlags::COMPUTE | ash::vk::QueueFlags::TRANSFER @@ -147,11 +280,38 @@ impl PipelineStage { Self::Host => ash::vk::QueueFlags::empty(), Self::AllGraphics => ash::vk::QueueFlags::GRAPHICS, Self::AllCommands => ash::vk::QueueFlags::empty(), - Self::RayTracingShader => { - ash::vk::QueueFlags::GRAPHICS - | ash::vk::QueueFlags::COMPUTE - | ash::vk::QueueFlags::TRANSFER + Self::Copy => todo!( + "The spec doesn't currently say which queue flags support this pipeline stage" + ), + Self::Resolve => todo!( + "The spec doesn't currently say which queue flags support this pipeline stage" + ), + Self::Blit => todo!( + "The spec doesn't currently say which queue flags support this pipeline stage" + ), + Self::Clear => todo!( + "The spec doesn't currently say which queue flags support this pipeline stage" + ), + Self::IndexInput => ash::vk::QueueFlags::GRAPHICS, + Self::VertexAttributeInput => ash::vk::QueueFlags::GRAPHICS, + Self::PreRasterizationShaders => ash::vk::QueueFlags::GRAPHICS, + Self::VideoDecode => ash::vk::QueueFlags::VIDEO_DECODE_KHR, + Self::VideoEncode => ash::vk::QueueFlags::VIDEO_ENCODE_KHR, + Self::ConditionalRendering => { + ash::vk::QueueFlags::GRAPHICS | ash::vk::QueueFlags::COMPUTE } + Self::TransformFeedback => ash::vk::QueueFlags::GRAPHICS, + Self::CommandPreprocess => ash::vk::QueueFlags::GRAPHICS | ash::vk::QueueFlags::COMPUTE, + Self::FragmentShadingRateAttachment => ash::vk::QueueFlags::GRAPHICS, + Self::TaskShader => ash::vk::QueueFlags::GRAPHICS, + Self::MeshShader => ash::vk::QueueFlags::GRAPHICS, + Self::AccelerationStructureBuild => ash::vk::QueueFlags::COMPUTE, + Self::RayTracingShader => ash::vk::QueueFlags::COMPUTE, + Self::FragmentDensityProcess => ash::vk::QueueFlags::GRAPHICS, + Self::SubpassShading => ash::vk::QueueFlags::GRAPHICS, + Self::InvocationMask => todo!( + "The spec doesn't currently say which queue flags support this pipeline stage" + ), } } } @@ -164,112 +324,393 @@ impl From for ash::vk::PipelineStageFlags { } vulkan_bitflags! { - // TODO: document + /// A set of stages in the device's processing pipeline. #[non_exhaustive] PipelineStages = PipelineStageFlags2(u64); - // TODO: document + /// A pseudo-stage representing the start of the pipeline. top_of_pipe = TOP_OF_PIPE, - // TODO: document + /// Indirect buffers are read. draw_indirect = DRAW_INDIRECT, - // TODO: document + /// Vertex and index buffers are read. + /// + /// It is currently equivalent to setting all of the following flags, but automatically + /// omitting any that are not supported in a given context. It also implicitly includes future + /// flags that are added to Vulkan, if they are not yet supported by Vulkano. + /// - `index_input` + /// - `vertex_attribute_input` vertex_input = VERTEX_INPUT, - // TODO: document + /// Vertex shaders are executed. vertex_shader = VERTEX_SHADER, - // TODO: document + /// Tessellation control shaders are executed. tessellation_control_shader = TESSELLATION_CONTROL_SHADER, - // TODO: document + /// Tessellation evaluation shaders are executed. tessellation_evaluation_shader = TESSELLATION_EVALUATION_SHADER, - // TODO: document + /// Geometry shaders are executed. geometry_shader = GEOMETRY_SHADER, - // TODO: document + /// Fragment shaders are executed. fragment_shader = FRAGMENT_SHADER, - // TODO: document + /// Early fragment tests (depth and stencil tests before fragment shading) are performed. + /// Subpass load operations for framebuffer attachments with a depth/stencil format are + /// performed. early_fragment_tests = EARLY_FRAGMENT_TESTS, - // TODO: document + /// Late fragment tests (depth and stencil tests after fragment shading) are performed. + /// Subpass store operations for framebuffer attachments with a depth/stencil format are + /// performed. late_fragment_tests = LATE_FRAGMENT_TESTS, - // TODO: document + /// The final color values are output from the pipeline after blending. + /// Subpass load and store operations, multisample resolve operations for framebuffer + /// attachments with a color or depth/stencil format, and `clear_attachments` are performed. color_attachment_output = COLOR_ATTACHMENT_OUTPUT, - // TODO: document + /// Compute shaders are executed. compute_shader = COMPUTE_SHADER, - // TODO: document - transfer = TRANSFER, - - // TODO: document + /// The set of all current and future transfer pipeline stages. + /// + /// It is currently equivalent to setting all of the following flags, but automatically + /// omitting any that are not supported in a given context. It also implicitly includes future + /// flags that are added to Vulkan, if they are not yet supported by Vulkano. + /// - `copy` + /// - `blit` + /// - `resolve` + /// - `clear` + /// - `acceleration_structure_copy` + all_transfer = ALL_TRANSFER, + + /// A pseudo-stage representing the end of the pipeline. bottom_of_pipe = BOTTOM_OF_PIPE, - // TODO: document + /// A pseudo-stage representing reads and writes to device memory on the host. host = HOST, - // TODO: document + /// The set of all current and future graphics pipeline stages. + /// + /// It is currently equivalent to setting all of the following flags, but automatically + /// omitting any that are not supported in a given context. It also implicitly includes future + /// flags that are added to Vulkan, if they are not yet supported by Vulkano. + /// - `draw_indirect` + /// - `task_shader` + /// - `mesh_shader` + /// - `vertex_input` + /// - `vertex_shader` + /// - `tessellation_control_shader` + /// - `tessellation_evaluation_shader` + /// - `geometry_shader` + /// - `fragment_shader` + /// - `early_fragment_tests` + /// - `late_fragment_tests` + /// - `color_attachment_output` + /// - `conditional_rendering` + /// - `transform_feedback` + /// - `fragment_shading_rate_attachment` + /// - `fragment_density_process` + /// - `invocation_mask` all_graphics = ALL_GRAPHICS, - // TODO: document + /// The set of all current and future pipeline stages of all types. + /// + /// It is currently equivalent to setting all flags in `PipelineStages`, but automatically + /// omitting any that are not supported in a given context. It also implicitly includes future + /// flags that are added to Vulkan, if they are not yet supported by Vulkano. all_commands = ALL_COMMANDS, - /* - // TODO: document + /// The `copy_buffer`, `copy_image`, `copy_buffer_to_image`, `copy_image_to_buffer` and + /// `copy_query_pool_results` commands are executed. + copy = COPY { + api_version: V1_3, + device_extensions: [khr_synchronization2], + }, + + /// The `resolve_image` command is executed. + resolve = RESOLVE { + api_version: V1_3, + device_extensions: [khr_synchronization2], + }, + + /// The `blit_image` command is executed. + blit = BLIT { + api_version: V1_3, + device_extensions: [khr_synchronization2], + }, + + /// The `clear_color_image`, `clear_depth_stencil_image`, `fill_buffer` and `update_buffer` + /// commands are executed. + clear = CLEAR { + api_version: V1_3, + device_extensions: [khr_synchronization2], + }, + + /// Index buffers are read. + index_input = INDEX_INPUT { + api_version: V1_3, + device_extensions: [khr_synchronization2], + }, + + /// Vertex buffers are read. + vertex_attribute_input = VERTEX_ATTRIBUTE_INPUT { + api_version: V1_3, + device_extensions: [khr_synchronization2], + }, + + /// The various pre-rasterization shader types are executed. + /// + /// It is currently equivalent to setting all of the following flags, but automatically + /// omitting any that are not supported in a given context. It also implicitly includes future + /// flags that are added to Vulkan, if they are not yet supported by Vulkano. + /// - `vertex_shader` + /// - `tessellation_control_shader` + /// - `tessellation_evaluation_shader` + /// - `geometry_shader` + /// - `task_shader` + /// - `mesh_shader` + pre_rasterization_shaders = PRE_RASTERIZATION_SHADERS { + api_version: V1_3, + device_extensions: [khr_synchronization2], + }, + + /// Video decode operations are performed. + video_decode = VIDEO_DECODE_KHR { + device_extensions: [khr_video_decode_queue], + }, + + /// Video encode operations are performed. + video_encode = VIDEO_ENCODE_KHR { + device_extensions: [khr_video_encode_queue], + }, + + /// Vertex attribute output values are written to the transform feedback buffers. transform_feedback = TRANSFORM_FEEDBACK_EXT { device_extensions: [ext_transform_feedback], }, - // TODO: document + /// The predicate of conditional rendering is read. conditional_rendering = CONDITIONAL_RENDERING_EXT { device_extensions: [ext_conditional_rendering], }, - // TODO: document + /// Acceleration_structure commands are executed. acceleration_structure_build = ACCELERATION_STRUCTURE_BUILD_KHR { device_extensions: [khr_acceleration_structure, nv_ray_tracing], }, - */ - // TODO: document + /// The various ray tracing shader types are executed. ray_tracing_shader = RAY_TRACING_SHADER_KHR { device_extensions: [khr_ray_tracing_pipeline, nv_ray_tracing], }, - /* - // TODO: document + /// The fragment density map is read to generate the fragment areas. fragment_density_process = FRAGMENT_DENSITY_PROCESS_EXT { device_extensions: [ext_fragment_density_map], }, - // TODO: document + /// The fragment shading rate attachment or shading rate image is read to determine the + /// fragment shading rate for portions of a rasterized primitive. fragment_shading_rate_attachment = FRAGMENT_SHADING_RATE_ATTACHMENT_KHR { - device_extensions: [khr_fragment_shading_rate], + device_extensions: [khr_fragment_shading_rate, nv_shading_rate_image], }, - // TODO: document + /// Device-side preprocessing for generated commands via the `preprocess_generated_commands` + /// command is handled. command_preprocess = COMMAND_PREPROCESS_NV { device_extensions: [nv_device_generated_commands], }, - // TODO: document + /// Task shaders are executed. task_shader = TASK_SHADER_NV { device_extensions: [nv_mesh_shader], }, - // TODO: document + /// Mesh shaders are executed. mesh_shader = MESH_SHADER_NV { device_extensions: [nv_mesh_shader], }, + + /// Subpass shading shaders are executed. + subpass_shading = SUBPASS_SHADING_HUAWEI { + device_extensions: [huawei_subpass_shading], + }, + + /// The invocation mask image is read to optimize ray dispatch. + invocation_mask = INVOCATION_MASK_HUAWEI { + device_extensions: [huawei_invocation_mask], + }, + + /* + acceleration_structure_copy = ACCELERATION_STRUCTURE_COPY_KHR { + device_extensions: [khr_ray_tracing_maintenance1], + }, + + micromap_build = MICROMAP_BUILD_EXT { + device_extensions: [ext_opacity_micromap], + }, + + optical_flow = OPTICAL_FLOW_NV { + device_extensions: [nv_optical_flow], + }, */ } impl PipelineStages { + /// Returns whether `self` contains stages that are only available in + /// `VkPipelineStageFlagBits2`. + pub(crate) fn is_2(&self) -> bool { + !Self { + top_of_pipe: false, + draw_indirect: false, + vertex_input: false, + vertex_shader: false, + tessellation_control_shader: false, + tessellation_evaluation_shader: false, + geometry_shader: false, + fragment_shader: false, + early_fragment_tests: false, + late_fragment_tests: false, + color_attachment_output: false, + compute_shader: false, + all_transfer: false, + bottom_of_pipe: false, + host: false, + all_graphics: false, + all_commands: false, + transform_feedback: false, + conditional_rendering: false, + acceleration_structure_build: false, + ray_tracing_shader: false, + fragment_density_process: false, + fragment_shading_rate_attachment: false, + command_preprocess: false, + task_shader: false, + mesh_shader: false, + ..*self + } + .is_empty() + } + + /// Replaces and unsets flags that are equivalent to multiple other flags. + /// + /// This may set flags that are not supported by the device, so this is for internal use only + /// and should not be passed on to Vulkan. + pub(crate) fn normalize(mut self) -> Self { + if self.all_commands { + self = Self { + all_commands: false, + + top_of_pipe: true, + draw_indirect: true, + vertex_input: true, + vertex_shader: true, + tessellation_control_shader: true, + tessellation_evaluation_shader: true, + geometry_shader: true, + fragment_shader: true, + early_fragment_tests: true, + late_fragment_tests: true, + color_attachment_output: true, + compute_shader: true, + all_transfer: true, + bottom_of_pipe: true, + host: true, + all_graphics: true, + copy: true, + resolve: true, + blit: true, + clear: true, + index_input: true, + vertex_attribute_input: true, + pre_rasterization_shaders: true, + video_decode: true, + video_encode: true, + transform_feedback: true, + conditional_rendering: true, + acceleration_structure_build: true, + ray_tracing_shader: true, + fragment_density_process: true, + fragment_shading_rate_attachment: true, + command_preprocess: true, + task_shader: true, + mesh_shader: true, + subpass_shading: true, + invocation_mask: true, + _ne: crate::NonExhaustive(()), + } + } + + if self.all_graphics { + self = Self { + all_graphics: false, + + draw_indirect: true, + task_shader: true, + mesh_shader: true, + vertex_input: true, + vertex_shader: true, + tessellation_control_shader: true, + tessellation_evaluation_shader: true, + geometry_shader: true, + fragment_shader: true, + early_fragment_tests: true, + late_fragment_tests: true, + color_attachment_output: true, + transform_feedback: true, + conditional_rendering: true, + fragment_shading_rate_attachment: true, + fragment_density_process: true, + invocation_mask: true, + ..self + } + } + + if self.vertex_input { + self = Self { + vertex_input: false, + + index_input: true, + vertex_attribute_input: true, + ..self + } + } + + if self.pre_rasterization_shaders { + self = Self { + pre_rasterization_shaders: false, + + vertex_shader: true, + tessellation_control_shader: true, + tessellation_evaluation_shader: true, + geometry_shader: true, + task_shader: true, + mesh_shader: true, + ..self + } + } + + if self.all_transfer { + self = Self { + all_transfer: false, + + copy: true, + resolve: true, + blit: true, + clear: true, + //acceleration_structure_copy: true, + ..self + } + } + + self + } + /// Returns the access types that are supported with the given pipeline stages. /// /// Corresponds to the table @@ -277,118 +718,150 @@ impl PipelineStages { /// in the Vulkan specification. #[inline] pub fn supported_access(&self) -> AccessFlags { - if self.all_commands { - return AccessFlags::all(); - } - let PipelineStages { top_of_pipe: _, - mut draw_indirect, - mut vertex_input, - mut vertex_shader, - mut tessellation_control_shader, - mut tessellation_evaluation_shader, - mut geometry_shader, - mut fragment_shader, - mut early_fragment_tests, - mut late_fragment_tests, - mut color_attachment_output, + draw_indirect, + vertex_input: _, + vertex_shader, + tessellation_control_shader, + tessellation_evaluation_shader, + geometry_shader, + fragment_shader, + early_fragment_tests, + late_fragment_tests, + color_attachment_output, compute_shader, - transfer, + all_transfer: _, bottom_of_pipe: _, host, - all_graphics, + all_graphics: _, all_commands: _, + copy, + resolve, + blit, + clear, + index_input, + vertex_attribute_input, + pre_rasterization_shaders: _, + video_decode, + video_encode, + transform_feedback, + conditional_rendering, + acceleration_structure_build, ray_tracing_shader, + fragment_density_process, + fragment_shading_rate_attachment, + command_preprocess, + task_shader, + mesh_shader, + subpass_shading, + invocation_mask, + //acceleration_structure_copy, _ne: _, - } = *self; - - if all_graphics { - draw_indirect = true; - //task_shader = true; - //mesh_shader = true; - vertex_input = true; - vertex_shader = true; - tessellation_control_shader = true; - tessellation_evaluation_shader = true; - geometry_shader = true; - fragment_shader = true; - early_fragment_tests = true; - late_fragment_tests = true; - color_attachment_output = true; - //conditional_rendering = true; - //transform_feedback = true; - //fragment_shading_rate_attachment = true; - //fragment_density_process = true; - } + } = self.normalize(); AccessFlags { - indirect_command_read: draw_indirect, /*|| acceleration_structure_build*/ - index_read: vertex_input, - vertex_attribute_read: vertex_input, - uniform_read: - // task_shader - // mesh_shader - ray_tracing_shader + indirect_command_read: draw_indirect || acceleration_structure_build, + index_read: index_input, + vertex_attribute_read: vertex_attribute_input, + uniform_read: task_shader + || mesh_shader + || ray_tracing_shader || vertex_shader || tessellation_control_shader || tessellation_evaluation_shader || geometry_shader || fragment_shader || compute_shader, - shader_read: - // acceleration_structure_build - // task_shader - // mesh_shader - ray_tracing_shader + shader_read: acceleration_structure_build + || task_shader + || mesh_shader + || ray_tracing_shader + // || micromap_build || vertex_shader || tessellation_control_shader || tessellation_evaluation_shader || geometry_shader || fragment_shader || compute_shader, - shader_write: - // task_shader - // mesh_shader - ray_tracing_shader + shader_write: task_shader + || mesh_shader + || ray_tracing_shader || vertex_shader || tessellation_control_shader || tessellation_evaluation_shader || geometry_shader || fragment_shader || compute_shader, - input_attachment_read: - // subpass_shading - fragment_shader, + input_attachment_read: subpass_shading || fragment_shader, color_attachment_read: color_attachment_output, color_attachment_write: color_attachment_output, depth_stencil_attachment_read: early_fragment_tests || late_fragment_tests, depth_stencil_attachment_write: early_fragment_tests || late_fragment_tests, - transfer_read: transfer, - // acceleration_structure_build - transfer_write: transfer, - // acceleration_structure_build + transfer_read: copy || blit || resolve || acceleration_structure_build, + transfer_write: copy || blit || resolve || clear || acceleration_structure_build, host_read: host, host_write: host, memory_read: true, memory_write: true, - - /* - color_attachment_read_noncoherent: color_attachment_output, - preprocess_read: command_preprocess, - preprocess_write: command_preprocess, - conditional_rendering_read: conditional_rendering, - fragment_shading_rate_attachment_read: fragment_shading_rate_attachment, - invocation_mask_read: invocation_mask, + shader_sampled_read: acceleration_structure_build + || task_shader + || mesh_shader + || ray_tracing_shader + // || micromap_build + || vertex_shader + || tessellation_control_shader + || tessellation_evaluation_shader + || geometry_shader + || fragment_shader + || compute_shader, + shader_storage_read: acceleration_structure_build + || task_shader + || mesh_shader + || ray_tracing_shader + // || micromap_build + || vertex_shader + || tessellation_control_shader + || tessellation_evaluation_shader + || geometry_shader + || fragment_shader + || compute_shader, + shader_storage_write: acceleration_structure_build + || task_shader + || mesh_shader + || ray_tracing_shader + || vertex_shader + || tessellation_control_shader + || tessellation_evaluation_shader + || geometry_shader + || fragment_shader + || compute_shader, + video_decode_read: video_decode, + video_decode_write: video_decode, + video_encode_read: video_encode, + video_encode_write: video_encode, transform_feedback_write: transform_feedback, transform_feedback_counter_write: transform_feedback, transform_feedback_counter_read: transform_feedback || draw_indirect, - acceleration_structure_read: task_shader || mesh_shader || vertex_shader || tessellation_control_shader || tessellation_evaluation_shader || geometry_shader || fragment_shader || compute_shader || ray_tracing_shader || acceleration_structure_build, + conditional_rendering_read: conditional_rendering, + command_preprocess_read: command_preprocess, + command_preprocess_write: command_preprocess, + fragment_shading_rate_attachment_read: fragment_shading_rate_attachment, + acceleration_structure_read: task_shader + || mesh_shader + || vertex_shader + || tessellation_control_shader + || tessellation_evaluation_shader + || geometry_shader + || fragment_shader + || compute_shader + || ray_tracing_shader + || acceleration_structure_build, acceleration_structure_write: acceleration_structure_build, fragment_density_map_read: fragment_density_process, - */ - - ..AccessFlags::empty() + color_attachment_read_noncoherent: color_attachment_output, + invocation_mask_read: invocation_mask, + _ne: crate::NonExhaustive(()), } } } @@ -400,142 +873,322 @@ impl From for ash::vk::PipelineStageFlags { } } +impl From for PipelineStages { + #[inline] + fn from(val: PipelineStage) -> Self { + let mut result = Self::empty(); + + match val { + PipelineStage::TopOfPipe => result.top_of_pipe = true, + PipelineStage::DrawIndirect => result.draw_indirect = true, + PipelineStage::VertexInput => result.vertex_input = true, + PipelineStage::VertexShader => result.vertex_shader = true, + PipelineStage::TessellationControlShader => result.tessellation_control_shader = true, + PipelineStage::TessellationEvaluationShader => { + result.tessellation_evaluation_shader = true + } + PipelineStage::GeometryShader => result.geometry_shader = true, + PipelineStage::FragmentShader => result.fragment_shader = true, + PipelineStage::EarlyFragmentTests => result.early_fragment_tests = true, + PipelineStage::LateFragmentTests => result.late_fragment_tests = true, + PipelineStage::ColorAttachmentOutput => result.color_attachment_output = true, + PipelineStage::ComputeShader => result.compute_shader = true, + PipelineStage::AllTransfer => result.all_transfer = true, + PipelineStage::BottomOfPipe => result.bottom_of_pipe = true, + PipelineStage::Host => result.host = true, + PipelineStage::AllGraphics => result.all_graphics = true, + PipelineStage::AllCommands => result.all_commands = true, + PipelineStage::Copy => result.copy = true, + PipelineStage::Resolve => result.resolve = true, + PipelineStage::Blit => result.blit = true, + PipelineStage::Clear => result.clear = true, + PipelineStage::IndexInput => result.index_input = true, + PipelineStage::VertexAttributeInput => result.vertex_attribute_input = true, + PipelineStage::PreRasterizationShaders => result.pre_rasterization_shaders = true, + PipelineStage::VideoDecode => result.video_decode = true, + PipelineStage::VideoEncode => result.video_encode = true, + PipelineStage::TransformFeedback => result.transform_feedback = true, + PipelineStage::ConditionalRendering => result.conditional_rendering = true, + PipelineStage::AccelerationStructureBuild => result.acceleration_structure_build = true, + PipelineStage::RayTracingShader => result.ray_tracing_shader = true, + PipelineStage::FragmentDensityProcess => result.fragment_density_process = true, + PipelineStage::FragmentShadingRateAttachment => { + result.fragment_shading_rate_attachment = true + } + PipelineStage::CommandPreprocess => result.command_preprocess = true, + PipelineStage::TaskShader => result.task_shader = true, + PipelineStage::MeshShader => result.mesh_shader = true, + PipelineStage::SubpassShading => result.subpass_shading = true, + PipelineStage::InvocationMask => result.invocation_mask = true, + } + + result + } +} + vulkan_bitflags! { - // TODO: document + /// A set of memory access types that are included in a memory dependency. #[non_exhaustive] AccessFlags = AccessFlags2(u64); - // TODO: document + /// Read access to an indirect buffer. indirect_command_read = INDIRECT_COMMAND_READ, - // TODO: document + /// Read access to an index buffer. index_read = INDEX_READ, - // TODO: document + /// Read access to a vertex buffer. vertex_attribute_read = VERTEX_ATTRIBUTE_READ, - // TODO: document + /// Read access to a uniform buffer in a shader. uniform_read = UNIFORM_READ, - // TODO: document + /// Read access to an input attachment in a fragment shader, within a render pass. input_attachment_read = INPUT_ATTACHMENT_READ, - // TODO: document + /// Read access to a buffer or image in a shader. + /// + /// It is currently equivalent to setting all of the following flags, but automatically + /// omitting any that are not supported in a given context. It also implicitly includes future + /// flags that are added to Vulkan, if they are not yet supported by Vulkano. + /// - `uniform_read` + /// - `shader_sampled_read` + /// - `shader_storage_read` shader_read = SHADER_READ, - // TODO: document + /// Write access to a buffer or image in a shader. + /// + /// It is currently equivalent to `shader_storage_write`. It also implicitly includes future + /// flags that are added to Vulkan, if they are not yet supported by Vulkano. shader_write = SHADER_WRITE, - // TODO: document + /// Read access to a color attachment during blending, logic operations or + /// subpass load operations. color_attachment_read = COLOR_ATTACHMENT_READ, - // TODO: document + /// Write access to a color, resolve or depth/stencil resolve attachment during a render pass + /// or subpass store operations. color_attachment_write = COLOR_ATTACHMENT_WRITE, - // TODO: document + /// Read access to a depth/stencil attachment during depth/stencil operations or + /// subpass load operations. depth_stencil_attachment_read = DEPTH_STENCIL_ATTACHMENT_READ, - // TODO: document + /// Write access to a depth/stencil attachment during depth/stencil operations or + /// subpass store operations. depth_stencil_attachment_write = DEPTH_STENCIL_ATTACHMENT_WRITE, - // TODO: document + /// Read access to a buffer or image during a copy, blit or resolve command. transfer_read = TRANSFER_READ, - // TODO: document + /// Write access to a buffer or image during a copy, blit, resolve or clear command. transfer_write = TRANSFER_WRITE, - // TODO: document + /// Read access performed by the host. host_read = HOST_READ, - // TODO: document + /// Write access performed by the host. host_write = HOST_WRITE, - // TODO: document + /// Any type of read access. + /// + /// This is equivalent to setting all `_read` flags that are allowed in the given context. memory_read = MEMORY_READ, - // TODO: document + /// Any type of write access. + /// + /// This is equivalent to setting all `_write` flags that are allowed in the given context. memory_write = MEMORY_WRITE, - /* - // Provided by VK_EXT_transform_feedback + /// Read access to a uniform texel buffer or sampled image in a shader. + shader_sampled_read = SHADER_SAMPLED_READ { + api_version: V1_3, + device_extensions: [khr_synchronization2], + }, + + /// Read access to a storage buffer, storage texel buffer or storage image in a shader. + shader_storage_read = SHADER_STORAGE_READ { + api_version: V1_3, + device_extensions: [khr_synchronization2], + }, + + /// Write access to a storage buffer, storage texel buffer or storage image in a shader. + shader_storage_write = SHADER_STORAGE_WRITE { + api_version: V1_3, + device_extensions: [khr_synchronization2], + }, + + /// Read access to an image or buffer as part of a video decode operation. + video_decode_read = VIDEO_DECODE_READ_KHR { + device_extensions: [khr_video_decode_queue], + }, + + /// Write access to an image or buffer as part of a video decode operation. + video_decode_write = VIDEO_DECODE_WRITE_KHR { + device_extensions: [khr_video_decode_queue], + }, + + /// Read access to an image or buffer as part of a video encode operation. + video_encode_read = VIDEO_ENCODE_READ_KHR { + device_extensions: [khr_video_encode_queue], + }, + + /// Write access to an image or buffer as part of a video encode operation. + video_encode_write = VIDEO_ENCODE_WRITE_KHR { + device_extensions: [khr_video_encode_queue], + }, + + /// Write access to a transform feedback buffer during transform feedback operations. transform_feedback_write = TRANSFORM_FEEDBACK_WRITE_EXT { device_extensions: [ext_transform_feedback], }, - // Provided by VK_EXT_transform_feedback + /// Read access to a transform feedback counter buffer during transform feedback operations. transform_feedback_counter_read = TRANSFORM_FEEDBACK_COUNTER_READ_EXT { device_extensions: [ext_transform_feedback], }, - // Provided by VK_EXT_transform_feedback + /// Write access to a transform feedback counter buffer during transform feedback operations. transform_feedback_counter_write = TRANSFORM_FEEDBACK_COUNTER_WRITE_EXT { device_extensions: [ext_transform_feedback], }, - // Provided by VK_EXT_conditional_rendering + /// Read access to a predicate during conditional rendering. conditional_rendering_read = CONDITIONAL_RENDERING_READ_EXT { device_extensions: [ext_conditional_rendering], }, - // Provided by VK_EXT_blend_operation_advanced - color_attachment_read_noncoherent = COLOR_ATTACHMENT_READ_NONCOHERENT_EXT { - device_extensions: [ext_blend_operation_advanced], + /// Read access to preprocess buffers input to `preprocess_generated_commands`. + command_preprocess_read = COMMAND_PREPROCESS_READ_NV { + device_extensions: [nv_device_generated_commands], }, - // Provided by VK_KHR_acceleration_structure + /// Read access to sequences buffers output by `preprocess_generated_commands`. + command_preprocess_write = COMMAND_PREPROCESS_WRITE_NV { + device_extensions: [nv_device_generated_commands], + }, + + /// Read access to a fragment shading rate attachment during rasterization. + fragment_shading_rate_attachment_read = FRAGMENT_SHADING_RATE_ATTACHMENT_READ_KHR { + device_extensions: [khr_fragment_shading_rate], + }, + + /// Read access to an acceleration structure or acceleration structure scratch buffer during + /// trace, build or copy commands. acceleration_structure_read = ACCELERATION_STRUCTURE_READ_KHR { device_extensions: [khr_acceleration_structure, nv_ray_tracing], }, - // Provided by VK_KHR_acceleration_structure + /// Write access to an acceleration structure or acceleration structure scratch buffer during + /// trace, build or copy commands. acceleration_structure_write = ACCELERATION_STRUCTURE_WRITE_KHR { device_extensions: [khr_acceleration_structure, nv_ray_tracing], }, - // Provided by VK_EXT_fragment_density_map + /// Read access to a fragment density map attachment during dynamic fragment density map + /// operations. fragment_density_map_read = FRAGMENT_DENSITY_MAP_READ_EXT { device_extensions: [ext_fragment_density_map], }, - // Provided by VK_KHR_fragment_shading_rate - fragment_shading_rate_attachment_read = FRAGMENT_SHADING_RATE_ATTACHMENT_READ_KHR { - device_extensions: [khr_fragment_shading_rate], + /// Read access to color attachments when performing advanced blend operations. + color_attachment_read_noncoherent = COLOR_ATTACHMENT_READ_NONCOHERENT_EXT { + device_extensions: [ext_blend_operation_advanced], }, - // Provided by VK_NV_device_generated_commands - command_preprocess_read = COMMAND_PREPROCESS_READ_NV { - device_extensions: [nv_device_generated_commands], + /// Read access to an invocation mask image. + invocation_mask_read = INVOCATION_MASK_READ_HUAWEI { + device_extensions: [huawei_invocation_mask], }, - // Provided by VK_NV_device_generated_commands - command_preprocess_write = COMMAND_PREPROCESS_WRITE_NV { - device_extensions: [nv_device_generated_commands], + /* + shader_binding_table_read = SHADER_BINDING_TABLE_READ_KHR { + device_extensions: [khr_ray_tracing_maintenance1], }, - */ + + micromap_read = MICROMAP_READ_EXT { + device_extensions: [ext_opacity_micromap], + }, + + micromap_write = MICROMAP_WRITE_EXT { + device_extensions: [ext_opacity_micromap], + }, + + optical_flow_read = OPTICAL_FLOW_READ_NV { + device_extensions: [nv_optical_flow], + }, + + optical_flow_write = OPTICAL_FLOW_WRITE_NV { + device_extensions: [nv_optical_flow], + }, + */ } impl AccessFlags { - pub(crate) fn all() -> AccessFlags { - AccessFlags { - indirect_command_read: true, - index_read: true, - vertex_attribute_read: true, - uniform_read: true, - input_attachment_read: true, - shader_read: true, - shader_write: true, - color_attachment_read: true, - color_attachment_write: true, - depth_stencil_attachment_read: true, - depth_stencil_attachment_write: true, - transfer_read: true, - transfer_write: true, - host_read: true, - host_write: true, - memory_read: true, - memory_write: true, - _ne: crate::NonExhaustive(()), + /// Returns whether `self` contains stages that are only available in + /// `VkAccessFlagBits2`. + pub(crate) fn is_2(&self) -> bool { + !Self { + indirect_command_read: false, + index_read: false, + vertex_attribute_read: false, + uniform_read: false, + input_attachment_read: false, + shader_read: false, + shader_write: false, + color_attachment_read: false, + color_attachment_write: false, + depth_stencil_attachment_read: false, + depth_stencil_attachment_write: false, + transfer_read: false, + transfer_write: false, + host_read: false, + host_write: false, + memory_read: false, + memory_write: false, + transform_feedback_write: false, + transform_feedback_counter_read: false, + transform_feedback_counter_write: false, + conditional_rendering_read: false, + color_attachment_read_noncoherent: false, + acceleration_structure_read: false, + acceleration_structure_write: false, + fragment_density_map_read: false, + fragment_shading_rate_attachment_read: false, + command_preprocess_read: false, + command_preprocess_write: false, + ..*self } + .is_empty() + } + + /// Replaces and unsets flags that are equivalent to multiple other flags. + /// + /// This may set flags that are not supported by the device, so this is for internal use only + /// and should not be passed on to Vulkan. + #[allow(dead_code)] // TODO: use this function + pub(crate) fn normalize(mut self) -> Self { + if self.shader_read { + self = Self { + shader_read: false, + + uniform_read: true, + shader_sampled_read: true, + shader_storage_read: true, + ..self + } + } + + if self.shader_write { + self = Self { + shader_write: false, + + shader_storage_write: true, + ..self + } + } + + self } } @@ -565,11 +1218,11 @@ pub struct PipelineMemoryAccess { /// operate globally. /// /// Each barrier has a set of source/destination pipeline stages and source/destination memory -/// access types. The pipeline stages create an *execution dependency*: the `source_stages` of +/// access types. The pipeline stages create an *execution dependency*: the `src_stages` of /// commands submitted before the barrier must be completely finished before before any of the -/// `destination_stages` of commands after the barrier are allowed to start. The memory access types -/// create a *memory dependency*: in addition to the execution dependency, any `source_access` -/// performed before the barrier must be made available and visible before any `destination_access` +/// `dst_stages` of commands after the barrier are allowed to start. The memory access types +/// create a *memory dependency*: in addition to the execution dependency, any `src_access` +/// performed before the barrier must be made available and visible before any `dst_access` /// are made after the barrier. #[derive(Clone, Debug)] pub struct DependencyInfo { @@ -621,21 +1274,21 @@ pub struct MemoryBarrier { /// The pipeline stages in the source scope to wait for. /// /// The default value is [`PipelineStages::empty()`]. - pub source_stages: PipelineStages, + pub src_stages: PipelineStages, /// The memory accesses in the source scope to make available and visible. /// /// The default value is [`AccessFlags::empty()`]. - pub source_access: AccessFlags, + pub src_access: AccessFlags, - /// The pipeline stages in the destination scope that must wait for `source_stages`. + /// The pipeline stages in the destination scope that must wait for `src_stages`. /// /// The default value is [`PipelineStages::empty()`]. - pub destination_stages: PipelineStages, + pub dst_stages: PipelineStages, - /// The memory accesses in the destination scope that must wait for `source_access` to be made + /// The memory accesses in the destination scope that must wait for `src_access` to be made /// available and visible. - pub destination_access: AccessFlags, + pub dst_access: AccessFlags, pub _ne: crate::NonExhaustive, } @@ -644,10 +1297,10 @@ impl Default for MemoryBarrier { #[inline] fn default() -> Self { Self { - source_stages: PipelineStages::empty(), - source_access: AccessFlags::empty(), - destination_stages: PipelineStages::empty(), - destination_access: AccessFlags::empty(), + src_stages: PipelineStages::empty(), + src_access: AccessFlags::empty(), + dst_stages: PipelineStages::empty(), + dst_access: AccessFlags::empty(), _ne: crate::NonExhaustive(()), } } @@ -659,21 +1312,21 @@ pub struct BufferMemoryBarrier { /// The pipeline stages in the source scope to wait for. /// /// The default value is [`PipelineStages::empty()`]. - pub source_stages: PipelineStages, + pub src_stages: PipelineStages, /// The memory accesses in the source scope to make available and visible. /// /// The default value is [`AccessFlags::empty()`]. - pub source_access: AccessFlags, + pub src_access: AccessFlags, - /// The pipeline stages in the destination scope that must wait for `source_stages`. + /// The pipeline stages in the destination scope that must wait for `src_stages`. /// /// The default value is [`PipelineStages::empty()`]. - pub destination_stages: PipelineStages, + pub dst_stages: PipelineStages, - /// The memory accesses in the destination scope that must wait for `source_access` to be made + /// The memory accesses in the destination scope that must wait for `src_access` to be made /// available and visible. - pub destination_access: AccessFlags, + pub dst_access: AccessFlags, /// For resources created with [`Sharing::Exclusive`](crate::sync::Sharing), transfers /// ownership of a resource from one queue family to another. @@ -692,10 +1345,10 @@ impl BufferMemoryBarrier { #[inline] pub fn buffer(buffer: Arc) -> Self { Self { - source_stages: PipelineStages::empty(), - source_access: AccessFlags::empty(), - destination_stages: PipelineStages::empty(), - destination_access: AccessFlags::empty(), + src_stages: PipelineStages::empty(), + src_access: AccessFlags::empty(), + dst_stages: PipelineStages::empty(), + dst_access: AccessFlags::empty(), queue_family_transfer: None, buffer, range: 0..0, @@ -710,21 +1363,21 @@ pub struct ImageMemoryBarrier { /// The pipeline stages in the source scope to wait for. /// /// The default value is [`PipelineStages::empty()`]. - pub source_stages: PipelineStages, + pub src_stages: PipelineStages, /// The memory accesses in the source scope to make available and visible. /// /// The default value is [`AccessFlags::empty()`]. - pub source_access: AccessFlags, + pub src_access: AccessFlags, - /// The pipeline stages in the destination scope that must wait for `source_stages`. + /// The pipeline stages in the destination scope that must wait for `src_stages`. /// /// The default value is [`PipelineStages::empty()`]. - pub destination_stages: PipelineStages, + pub dst_stages: PipelineStages, - /// The memory accesses in the destination scope that must wait for `source_access` to be made + /// The memory accesses in the destination scope that must wait for `src_access` to be made /// available and visible. - pub destination_access: AccessFlags, + pub dst_access: AccessFlags, /// The layout that the specified `subresource_range` of `image` is expected to be in when the /// source scope completes. @@ -751,10 +1404,10 @@ impl ImageMemoryBarrier { #[inline] pub fn image(image: Arc) -> Self { Self { - source_stages: PipelineStages::empty(), - source_access: AccessFlags::empty(), - destination_stages: PipelineStages::empty(), - destination_access: AccessFlags::empty(), + src_stages: PipelineStages::empty(), + src_access: AccessFlags::empty(), + dst_stages: PipelineStages::empty(), + dst_access: AccessFlags::empty(), old_layout: ImageLayout::Undefined, new_layout: ImageLayout::Undefined, queue_family_transfer: None, From c461f6e7ce96ab97e32a19e27f206c3774f8ba9a Mon Sep 17 00:00:00 2001 From: Rua Date: Sat, 22 Oct 2022 11:47:59 +0200 Subject: [PATCH 2/2] Missing validation checks --- vulkano/src/command_buffer/commands/query.rs | 64 ++++++++++++++------ vulkano/src/render_pass/create.rs | 64 ++++++++++++++------ 2 files changed, 89 insertions(+), 39 deletions(-) diff --git a/vulkano/src/command_buffer/commands/query.rs b/vulkano/src/command_buffer/commands/query.rs index 781349d74c..ea6aba90ea 100644 --- a/vulkano/src/command_buffer/commands/query.rs +++ b/vulkano/src/command_buffer/commands/query.rs @@ -251,32 +251,34 @@ where }); } - // VUID-vkCmdWriteTimestamp-pipelineStage-parameter + // VUID-vkCmdWriteTimestamp2-stage-parameter stage.validate_device(device)?; let queue_family_properties = self.queue_family_properties(); - // VUID-vkCmdWriteTimestamp-commandBuffer-cmdpool + // VUID-vkCmdWriteTimestamp2-commandBuffer-cmdpool if !(queue_family_properties.queue_flags.transfer || queue_family_properties.queue_flags.graphics - || queue_family_properties.queue_flags.compute) + || queue_family_properties.queue_flags.compute + || queue_family_properties.queue_flags.video_decode + || queue_family_properties.queue_flags.video_encode) { return Err(QueryError::NotSupportedByQueueFamily); } let device = self.device(); - // VUID-vkCmdWriteTimestamp-commonparent + // VUID-vkCmdWriteTimestamp2-commonparent assert_eq!(device, query_pool.device()); - // VUID-vkCmdWriteTimestamp-pipelineStage-04074 + // VUID-vkCmdWriteTimestamp2-stage-03860 if !queue_family_properties.supports_stage(stage) { return Err(QueryError::StageNotSupported); } match stage { PipelineStage::GeometryShader => { - // VUID-vkCmdWriteTimestamp-pipelineStage-04075 + // VUID-vkCmdWriteTimestamp2-stage-03929 if !device.enabled_features().geometry_shader { return Err(QueryError::RequirementNotMet { required_for: "`stage` is `PipelineStage::GeometryShader`", @@ -289,7 +291,7 @@ where } PipelineStage::TessellationControlShader | PipelineStage::TessellationEvaluationShader => { - // VUID-vkCmdWriteTimestamp-pipelineStage-04076 + // VUID-vkCmdWriteTimestamp2-stage-03930 if !device.enabled_features().tessellation_shader { return Err(QueryError::RequirementNotMet { required_for: "`stage` is `PipelineStage::TessellationControlShader` or `PipelineStage::TessellationEvaluationShader`", @@ -301,7 +303,7 @@ where } } PipelineStage::ConditionalRendering => { - // VUID-vkCmdWriteTimestamp-pipelineStage-04077 + // VUID-vkCmdWriteTimestamp2-stage-03931 if !device.enabled_features().conditional_rendering { return Err(QueryError::RequirementNotMet { required_for: "`stage` is `PipelineStage::ConditionalRendering`", @@ -313,7 +315,7 @@ where } } PipelineStage::FragmentDensityProcess => { - // VUID-vkCmdWriteTimestamp-pipelineStage-04078 + // VUID-vkCmdWriteTimestamp2-stage-03932 if !device.enabled_features().fragment_density_map { return Err(QueryError::RequirementNotMet { required_for: "`stage` is `PipelineStage::FragmentDensityProcess`", @@ -325,7 +327,7 @@ where } } PipelineStage::TransformFeedback => { - // VUID-vkCmdWriteTimestamp-pipelineStage-04079 + // VUID-vkCmdWriteTimestamp2-stage-03933 if !device.enabled_features().transform_feedback { return Err(QueryError::RequirementNotMet { required_for: "`stage` is `PipelineStage::TransformFeedback`", @@ -337,7 +339,7 @@ where } } PipelineStage::MeshShader => { - // VUID-vkCmdWriteTimestamp-pipelineStage-04080 + // VUID-vkCmdWriteTimestamp2-stage-03934 if !device.enabled_features().mesh_shader { return Err(QueryError::RequirementNotMet { required_for: "`stage` is `PipelineStage::MeshShader`", @@ -349,7 +351,7 @@ where } } PipelineStage::TaskShader => { - // VUID-vkCmdWriteTimestamp-pipelineStage-07077 + // VUID-vkCmdWriteTimestamp2-stage-03935 if !device.enabled_features().task_shader { return Err(QueryError::RequirementNotMet { required_for: "`stage` is `PipelineStage::TaskShader`", @@ -361,7 +363,7 @@ where } } PipelineStage::FragmentShadingRateAttachment => { - // VUID-vkCmdWriteTimestamp-pipelineStage-07314 + // VUID-vkCmdWriteTimestamp2-shadingRateImage-07316 if !(device.enabled_features().attachment_fragment_shading_rate || device.enabled_features().shading_rate_image) { @@ -374,31 +376,55 @@ where }); } } + PipelineStage::SubpassShading => { + // VUID-vkCmdWriteTimestamp2-stage-04957 + if !device.enabled_features().subpass_shading { + return Err(QueryError::RequirementNotMet { + required_for: "`stage` is `PipelineStage::SubpassShading`", + requires_one_of: RequiresOneOf { + features: &["subpass_shading"], + ..Default::default() + }, + }); + } + } + PipelineStage::InvocationMask => { + // VUID-vkCmdWriteTimestamp2-stage-04995 + if !device.enabled_features().invocation_mask { + return Err(QueryError::RequirementNotMet { + required_for: "`stage` is `PipelineStage::InvocationMask`", + requires_one_of: RequiresOneOf { + features: &["invocation_mask"], + ..Default::default() + }, + }); + } + } _ => (), } - // VUID-vkCmdWriteTimestamp-queryPool-01416 + // VUID-vkCmdWriteTimestamp2-queryPool-03861 if !matches!(query_pool.query_type(), QueryType::Timestamp) { return Err(QueryError::NotPermitted); } - // VUID-vkCmdWriteTimestamp-timestampValidBits-00829 + // VUID-vkCmdWriteTimestamp2-timestampValidBits-03863 if queue_family_properties.timestamp_valid_bits.is_none() { return Err(QueryError::NoTimestampValidBits); } - // VUID-vkCmdWriteTimestamp-query-04904 + // VUID-vkCmdWriteTimestamp2-query-04903 query_pool.query(query).ok_or(QueryError::OutOfRange)?; if let Some(render_pass_state) = &self.render_pass_state { - // VUID-vkCmdWriteTimestamp-query-00831 + // VUID-vkCmdWriteTimestamp2-query-03865 if query + render_pass_state.view_mask.count_ones() > query_pool.query_count() { return Err(QueryError::OutOfRangeMultiview); } } - // VUID-vkCmdWriteTimestamp-queryPool-00828 - // VUID-vkCmdWriteTimestamp-None-00830 + // VUID-vkCmdWriteTimestamp2-queryPool-03862 + // VUID-vkCmdWriteTimestamp2-None-03864 // Not checked, therefore unsafe. // TODO: add check. diff --git a/vulkano/src/render_pass/create.rs b/vulkano/src/render_pass/create.rs index 2e2ffbe384..d26f3a2b87 100644 --- a/vulkano/src/render_pass/create.rs +++ b/vulkano/src/render_pass/create.rs @@ -684,16 +684,16 @@ impl RenderPass { } } - // VUID-VkSubpassDependency2-srcStageMask-parameter - // VUID-VkSubpassDependency2-dstStageMask-parameter + // VUID-VkMemoryBarrier2-srcStageMask-parameter + // VUID-VkMemoryBarrier2-dstStageMask-parameter stages.validate_device(device)?; - // VUID-VkSubpassDependency2-srcAccessMask-parameter - // VUID-VkSubpassDependency2-dstAccessMask-parameter + // VUID-VkMemoryBarrier2-srcAccessMask-parameter + // VUID-VkMemoryBarrier2-dstAccessMask-parameter access.validate_device(device)?; - // VUID-VkSubpassDependency2-srcStageMask-04090 - // VUID-VkSubpassDependency2-dstStageMask-04090 + // VUID-VkMemoryBarrier2-srcStageMask-03929 + // VUID-VkMemoryBarrier2-dstStageMask-03929 if stages.geometry_shader && !device.enabled_features().geometry_shader { return Err(RenderPassCreationError::RequirementNotMet { required_for: "`create_info.dependencies` has an element where `stages.geometry_shader` is set", @@ -704,8 +704,8 @@ impl RenderPass { }); } - // VUID-VkSubpassDependency2-srcStageMask-04091 - // VUID-VkSubpassDependency2-dstStageMask-04091 + // VUID-VkMemoryBarrier2-srcStageMask-03930 + // VUID-VkMemoryBarrier2-dstStageMask-03930 if (stages.tessellation_control_shader || stages.tessellation_evaluation_shader) && !device.enabled_features().tessellation_shader { @@ -718,8 +718,8 @@ impl RenderPass { }); } - // VUID-VkSubpassDependency2-srcStageMask-04092 - // VUID-VkSubpassDependency2-dstStageMask-04092 + // VUID-VkMemoryBarrier2-srcStageMask-03931 + // VUID-VkMemoryBarrier2-dstStageMask-03931 if stages.conditional_rendering && !device.enabled_features().conditional_rendering { return Err(RenderPassCreationError::RequirementNotMet { @@ -731,8 +731,8 @@ impl RenderPass { }); } - // VUID-VkSubpassDependency2-srcStageMask-04093 - // VUID-VkSubpassDependency2-dstStageMask-04093 + // VUID-VkMemoryBarrier2-srcStageMask-03932 + // VUID-VkMemoryBarrier2-dstStageMask-03932 if stages.fragment_density_process && !device.enabled_features().fragment_density_map { @@ -745,8 +745,8 @@ impl RenderPass { }); } - // VUID-VkSubpassDependency2-srcStageMask-04094 - // VUID-VkSubpassDependency2-dstStageMask-04094 + // VUID-VkMemoryBarrier2-srcStageMask-03933 + // VUID-VkMemoryBarrier2-dstStageMask-03933 if stages.transform_feedback && !device.enabled_features().transform_feedback { return Err(RenderPassCreationError::RequirementNotMet { required_for: "`create_info.dependencies` has an element where `stages.transform_feedback` is set", @@ -757,8 +757,8 @@ impl RenderPass { }); } - // VUID-VkSubpassDependency2-srcStageMask-04095 - // VUID-VkSubpassDependency2-dstStageMask-04095 + // VUID-VkMemoryBarrier2-srcStageMask-03934 + // VUID-VkMemoryBarrier2-dstStageMask-03934 if stages.mesh_shader && !device.enabled_features().mesh_shader { return Err(RenderPassCreationError::RequirementNotMet { required_for: "`create_info.dependencies` has an element where `stages.mesh_shader` is set", @@ -769,8 +769,8 @@ impl RenderPass { }); } - // VUID-VkSubpassDependency2-srcStageMask-04096 - // VUID-VkSubpassDependency2-dstStageMask-04096 + // VUID-VkMemoryBarrier2-srcStageMask-03935 + // VUID-VkMemoryBarrier2-dstStageMask-03935 if stages.task_shader && !device.enabled_features().task_shader { return Err(RenderPassCreationError::RequirementNotMet { required_for: "`create_info.dependencies` has an element where `stages.task_shader` is set", @@ -781,8 +781,8 @@ impl RenderPass { }); } - // VUID-VkSubpassDependency2-srcStageMask-07318 - // VUID-VkSubpassDependency2-dstStageMask-07318 + // VUID-VkMemoryBarrier2-shadingRateImage-07316 + // VUID-VkMemoryBarrier2-shadingRateImage-07316 if stages.fragment_shading_rate_attachment && !(device.enabled_features().attachment_fragment_shading_rate || device.enabled_features().shading_rate_image) @@ -796,6 +796,30 @@ impl RenderPass { }); } + // VUID-VkMemoryBarrier2-srcStageMask-04957 + // VUID-VkMemoryBarrier2-dstStageMask-04957 + if stages.subpass_shading && !device.enabled_features().subpass_shading { + return Err(RenderPassCreationError::RequirementNotMet { + required_for: "`create_info.dependencies` has an element where `stages.subpass_shading` is set", + requires_one_of: RequiresOneOf { + features: &["subpass_shading"], + ..Default::default() + }, + }); + } + + // VUID-VkMemoryBarrier2-srcStageMask-04995 + // VUID-VkMemoryBarrier2-dstStageMask-04995 + if stages.invocation_mask && !device.enabled_features().invocation_mask { + return Err(RenderPassCreationError::RequirementNotMet { + required_for: "`create_info.dependencies` has an element where `stages.invocation_mask` is set", + requires_one_of: RequiresOneOf { + features: &["invocation_mask"], + ..Default::default() + }, + }); + } + // VUID-VkSubpassDependency2-srcStageMask-03937 // VUID-VkSubpassDependency2-dstStageMask-03937 if stages.is_empty() && !device.enabled_features().synchronization2 {