Skip to content

Commit

Permalink
Add support for inline uniform blocks (#2225)
Browse files Browse the repository at this point in the history
* Add support for inline uniform blocks

* Make this match exhaustive so it triggers if new descriptor types are added in future

* Return RuntimeError from pool allocation

* Doc improvement

* Doc fix

* Apply fixes
  • Loading branch information
Rua authored Jun 17, 2023
1 parent 921d0a5 commit f10c594
Show file tree
Hide file tree
Showing 11 changed files with 817 additions and 433 deletions.
34 changes: 19 additions & 15 deletions vulkano/src/command_buffer/commands/bind_push.rs
Original file line number Diff line number Diff line change
Expand Up @@ -795,7 +795,7 @@ where
{
return Err(ValidationError {
context: "self".into(),
problem: "pipeline_bind_point is PipelineBindPoint::Compute, and the \
problem: "`pipeline_bind_point` is `PipelineBindPoint::Compute`, and the \
queue family does not support compute operations"
.into(),
vuids: &[
Expand All @@ -813,7 +813,7 @@ where
{
return Err(ValidationError {
context: "self".into(),
problem: "pipeline_bind_point is PipelineBindPoint::Graphics, and the \
problem: "`pipeline_bind_point` is `PipelineBindPoint::Graphics`, and the \
queue family does not support graphics operations"
.into(),
vuids: &[
Expand All @@ -831,8 +831,8 @@ where

if set_num as usize > pipeline_layout.set_layouts().len() {
return Err(ValidationError {
problem: "set_num is greater than the number of descriptor set layouts in \
pipeline_layout"
problem: "`set_num` is greater than the number of descriptor set layouts in \
`pipeline_layout`"
.into(),
vuids: &["VUID-vkCmdPushDescriptorSetKHR-set-00364"],
..Default::default()
Expand All @@ -846,9 +846,9 @@ where
.intersects(DescriptorSetLayoutCreateFlags::PUSH_DESCRIPTOR)
{
return Err(ValidationError {
problem: "the descriptor set layout with the number set_num in pipeline_layout \
was not created with the DescriptorSetLayoutCreateFlags::PUSH_DESCRIPTOR \
flag"
problem: "the descriptor set layout with the number `set_num` in \
`pipeline_layout` was not created with the \
`DescriptorSetLayoutCreateFlags::PUSH_DESCRIPTOR` flag"
.into(),
vuids: &["VUID-vkCmdPushDescriptorSetKHR-set-00365"],
..Default::default()
Expand Down Expand Up @@ -1107,32 +1107,30 @@ where
let set_layout = &pipeline_layout.set_layouts()[set_num as usize];

struct PerDescriptorWrite {
write_info: DescriptorWriteInfo,
acceleration_structures: ash::vk::WriteDescriptorSetAccelerationStructureKHR,
inline_uniform_block: ash::vk::WriteDescriptorSetInlineUniformBlock,
}

let mut infos_vk: SmallVec<[_; 8]> = SmallVec::with_capacity(descriptor_writes.len());
let mut writes_vk: SmallVec<[_; 8]> = SmallVec::with_capacity(descriptor_writes.len());
let mut per_writes_vk: SmallVec<[_; 8]> = SmallVec::with_capacity(descriptor_writes.len());

for write in descriptor_writes {
let layout_binding = &set_layout.bindings()[&write.binding()];

infos_vk.push(write.to_vulkan_info(layout_binding.descriptor_type));
writes_vk.push(write.to_vulkan(
ash::vk::DescriptorSet::null(),
layout_binding.descriptor_type,
));
per_writes_vk.push(PerDescriptorWrite {
write_info: write.to_vulkan_info(layout_binding.descriptor_type),
acceleration_structures: Default::default(),
inline_uniform_block: Default::default(),
});
}

for ((info_vk, write_vk), per_write_vk) in infos_vk
.iter()
.zip(writes_vk.iter_mut())
.zip(per_writes_vk.iter_mut())
{
match info_vk {
for (write_vk, per_write_vk) in writes_vk.iter_mut().zip(per_writes_vk.iter_mut()) {
match &mut per_write_vk.write_info {
DescriptorWriteInfo::Image(info) => {
write_vk.descriptor_count = info.len() as u32;
write_vk.p_image_info = info.as_ptr();
Expand All @@ -1145,6 +1143,12 @@ where
write_vk.descriptor_count = info.len() as u32;
write_vk.p_texel_buffer_view = info.as_ptr();
}
DescriptorWriteInfo::InlineUniformBlock(data) => {
write_vk.descriptor_count = data.len() as u32;
write_vk.p_next = &per_write_vk.inline_uniform_block as *const _ as _;
per_write_vk.inline_uniform_block.data_size = write_vk.descriptor_count;
per_write_vk.inline_uniform_block.p_data = data.as_ptr() as *const _;
}
DescriptorWriteInfo::AccelerationStructure(info) => {
write_vk.descriptor_count = info.len() as u32;
write_vk.p_next = &per_write_vk.acceleration_structures as *const _ as _;
Expand Down
10 changes: 9 additions & 1 deletion vulkano/src/command_buffer/commands/pipeline.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1170,6 +1170,13 @@ where
check_sampler,
)?;
}
// Spec:
// Descriptor bindings with descriptor type of
// VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK can be undefined when
// the descriptor set is consumed; though values in that block will be undefined.
//
// TODO: We *may* still want to validate this?
DescriptorBindingResources::InlineUniformBlock => (),
DescriptorBindingResources::AccelerationStructure(elements) => {
validate_resources(
set_num,
Expand Down Expand Up @@ -1945,7 +1952,7 @@ where
let descriptor_set_state = &descriptor_sets_state.descriptor_sets[&set];

match descriptor_set_state.resources().binding(binding).unwrap() {
DescriptorBindingResources::None(_) => continue,
DescriptorBindingResources::None(_) => (),
DescriptorBindingResources::Buffer(elements) => {
if matches!(
descriptor_type,
Expand Down Expand Up @@ -2075,6 +2082,7 @@ where
}
}
DescriptorBindingResources::Sampler(_) => (),
DescriptorBindingResources::InlineUniformBlock => (),
DescriptorBindingResources::AccelerationStructure(elements) => {
for (index, element) in elements.iter().enumerate() {
if let Some(acceleration_structure) = element {
Expand Down
107 changes: 57 additions & 50 deletions vulkano/src/descriptor_set/allocator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,16 +19,13 @@
use self::sorted_map::SortedMap;
use super::{
layout::DescriptorSetLayout,
pool::{
DescriptorPool, DescriptorPoolAllocError, DescriptorPoolCreateInfo,
DescriptorSetAllocateInfo,
},
pool::{DescriptorPool, DescriptorPoolCreateInfo, DescriptorSetAllocateInfo},
sys::UnsafeDescriptorSet,
};
use crate::{
descriptor_set::layout::DescriptorSetLayoutCreateFlags,
descriptor_set::layout::{DescriptorSetLayoutCreateFlags, DescriptorType},
device::{Device, DeviceOwned},
RuntimeError,
RuntimeError, VulkanError,
};
use crossbeam_queue::ArrayQueue;
use std::{cell::UnsafeCell, mem::ManuallyDrop, num::NonZeroU64, sync::Arc, thread};
Expand Down Expand Up @@ -267,42 +264,46 @@ impl FixedPool {
pool_sizes: layout
.descriptor_counts()
.iter()
.map(|(&ty, &count)| (ty, count * set_count as u32))
.map(|(&ty, &count)| {
assert!(ty != DescriptorType::InlineUniformBlock);
(ty, count * set_count as u32)
})
.collect(),
..Default::default()
},
)?;
)
.map_err(VulkanError::unwrap_runtime)?;

let allocate_infos = (0..set_count).map(|_| DescriptorSetAllocateInfo {
layout,
variable_descriptor_count: 0,
});

let reserve = match unsafe { inner.allocate_descriptor_sets(allocate_infos) } {
Ok(allocs) => {
let reserve = ArrayQueue::new(set_count);
for alloc in allocs {
let _ = reserve.push(alloc);
}

reserve
}
Err(DescriptorPoolAllocError::OutOfHostMemory) => {
return Err(RuntimeError::OutOfHostMemory);
}
Err(DescriptorPoolAllocError::OutOfDeviceMemory) => {
return Err(RuntimeError::OutOfDeviceMemory);
}
Err(DescriptorPoolAllocError::FragmentedPool) => {
// This can't happen as we don't free individual sets.
unreachable!();
}
Err(DescriptorPoolAllocError::OutOfPoolMemory) => {
// We created the pool with an exact size.
unreachable!();
}
let allocs = unsafe {
inner
.allocate_descriptor_sets(allocate_infos)
.map_err(|err| match err {
RuntimeError::OutOfHostMemory | RuntimeError::OutOfDeviceMemory => err,
RuntimeError::FragmentedPool => {
// This can't happen as we don't free individual sets.
unreachable!();
}
RuntimeError::OutOfPoolMemory => {
// We created the pool with an exact size.
unreachable!();
}
_ => {
// Shouldn't ever be returned.
unreachable!();
}
})?
};

let reserve = ArrayQueue::new(set_count);
for alloc in allocs {
let _ = reserve.push(alloc);
}

Ok(Arc::new(FixedPool {
_inner: inner,
reserve,
Expand Down Expand Up @@ -356,28 +357,30 @@ impl VariableEntry {
variable_descriptor_count,
};

let inner = match unsafe { self.pool.inner.allocate_descriptor_sets([allocate_info]) } {
Ok(mut sets) => sets.next().unwrap(),
Err(DescriptorPoolAllocError::OutOfHostMemory) => {
return Err(RuntimeError::OutOfHostMemory);
}
Err(DescriptorPoolAllocError::OutOfDeviceMemory) => {
return Err(RuntimeError::OutOfDeviceMemory);
}
Err(DescriptorPoolAllocError::FragmentedPool) => {
// This can't happen as we don't free individual sets.
unreachable!();
}
Err(DescriptorPoolAllocError::OutOfPoolMemory) => {
// We created the pool to fit the maximum variable descriptor count.
unreachable!();
}
let mut sets = unsafe {
self.pool
.inner
.allocate_descriptor_sets([allocate_info])
.map_err(|err| match err {
RuntimeError::OutOfHostMemory | RuntimeError::OutOfDeviceMemory => err,
RuntimeError::FragmentedPool => {
// This can't happen as we don't free individual sets.
unreachable!();
}
RuntimeError::OutOfPoolMemory => {
// We created the pool to fit the maximum variable descriptor count.
unreachable!();
}
_ => {
// Shouldn't ever be returned.
unreachable!();
}
})?
};

self.allocations += 1;

Ok(StandardDescriptorSetAlloc {
inner: ManuallyDrop::new(inner),
inner: ManuallyDrop::new(sets.next().unwrap()),
parent: AllocParent::Variable(self.pool.clone()),
})
}
Expand All @@ -403,7 +406,10 @@ impl VariablePool {
pool_sizes: layout
.descriptor_counts()
.iter()
.map(|(&ty, &count)| (ty, count * MAX_SETS as u32))
.map(|(&ty, &count)| {
assert!(ty != DescriptorType::InlineUniformBlock);
(ty, count * MAX_SETS as u32)
})
.collect(),
..Default::default()
},
Expand All @@ -414,6 +420,7 @@ impl VariablePool {
reserve,
})
})
.map_err(VulkanError::unwrap_runtime)
}
}

Expand Down
Loading

0 comments on commit f10c594

Please sign in to comment.