From 231996c60b480b0aeccc06bf274ebaf833822a3b Mon Sep 17 00:00:00 2001 From: Ivan Avdeev Date: Thu, 30 May 2024 11:56:57 -0400 Subject: [PATCH 01/62] vk: move BOUNDED_ARRAY to arrays.h Also add dynamic array (not used in this commit) --- ref/vk/arrays.c | 50 ++++++++++++++++++++++++++++ ref/vk/arrays.h | 76 +++++++++++++++++++++++++++++++++++++++++++ ref/vk/vk_common.h | 16 --------- ref/vk/vk_core.c | 3 +- ref/vk/vk_framectl.c | 9 ++--- ref/vk/vk_image.c | 1 + ref/vk/vk_ray_accel.c | 3 +- ref/vk/vk_resources.c | 1 + ref/vk/vk_resources.h | 3 +- ref/vk/vk_rtx.c | 2 ++ ref/vk/vk_scene.c | 1 + ref/vk/vk_staging.c | 3 +- ref/vk/vk_textures.c | 2 +- 13 files changed, 145 insertions(+), 25 deletions(-) create mode 100644 ref/vk/arrays.c create mode 100644 ref/vk/arrays.h diff --git a/ref/vk/arrays.c b/ref/vk/arrays.c new file mode 100644 index 0000000000..9515561f7d --- /dev/null +++ b/ref/vk/arrays.c @@ -0,0 +1,50 @@ +#include "arrays.h" + +#include "vk_core.h" // Mem_Malloc + +#include // NULL + + +void arrayDynamicInit(array_dynamic_t *array, int item_size) { + array->items = NULL; + array->count = 0; + array->capacity = 0; + array->item_size = item_size; +} + +void arrayDynamicDestroy(array_dynamic_t *array) { + if (array->items) + Mem_Free(array->items); +} + +static void arrayDynamicEnsureCapacity(array_dynamic_t *array, int min_capacity) { + if (array->capacity >= min_capacity) + return; + + if (array->capacity == 0) + array->capacity = 2; + + while (array->capacity < min_capacity) + array->capacity = array->capacity * 3 / 2; + + void *new_buffer = Mem_Malloc(vk_core.pool, array->capacity * array->item_size); + if (array->items) { + memcpy(new_buffer, array->items, array->count * array->item_size); + Mem_Free(array->items); + } + array->items = new_buffer; +} + +void arrayDynamicResize(array_dynamic_t *array, int count) { + arrayDynamicEnsureCapacity(array, count); + array->count = count; +} + +void arrayDynamicAppend(array_dynamic_t *array, void *item) { + const int new_count = array->count + 1; + arrayDynamicEnsureCapacity(array, new_count); + + memcpy(array->items + array->count * array->item_size, item, array->item_size); + array->count = new_count; +} + diff --git a/ref/vk/arrays.h b/ref/vk/arrays.h new file mode 100644 index 0000000000..0c42745a97 --- /dev/null +++ b/ref/vk/arrays.h @@ -0,0 +1,76 @@ +#pragma once + +#include // size_t + +// Array with compile-time maximum size + +#define BOUNDED_ARRAY_DECLARE(TYPE, NAME, MAX_SIZE) \ + struct { \ + TYPE items[MAX_SIZE]; \ + int count; \ + } NAME + +#define BOUNDED_ARRAY(TYPE, NAME, MAX_SIZE) \ + BOUNDED_ARRAY_DECLARE(TYPE, NAME, MAX_SIZE) = {0} + +#define BOUNDED_ARRAY_APPEND(var, item) \ + do { \ + ASSERT(var.count < COUNTOF(var.items)); \ + var.items[var.count++] = item; \ + } while(0) + + +// Dynamically-sized array +// I. Type-agnostic + +typedef struct array_dynamic_s { + void *items; + size_t count, capacity; + size_t item_size; +} array_dynamic_t; + +void arrayDynamicInit(array_dynamic_t *array, int item_size); +void arrayDynamicDestroy(array_dynamic_t *array); + +void arrayDynamicReserve(array_dynamic_t *array, int capacity); +void arrayDynamicAppend(array_dynamic_t *array, void *item); +#define arrayDynamicAppendItem(array, item) \ + do { \ + ASSERT((array)->item_size == sizeof(&(item))); \ + arrayDynamicAppend(array, item); \ + } while (0) +/* void *arrayDynamicGet(array_dynamic_t *array, int index); */ +/* #define arrayDynamicAt(array, type, index) \ */ +/* (ASSERT((array)->item_size == sizeof(type)), \ */ +/* ASSERT((array)->count > (index)), \ */ +/* arrayDynamicGet(array, index)) */ +void arrayDynamicResize(array_dynamic_t *array, int count); +//void arrayDynamicErase(array_dynamic_t *array, int begin, int end); + +//void arrayDynamicInsert(array_dynamic_t *array, int before, int count, void *items); + +// II. Type-specific +#define ARRAY_DYNAMIC_DECLARE(TYPE, NAME) \ + struct { \ + TYPE *items; \ + size_t count, capacity; \ + size_t item_size; \ + } NAME + +#define arrayDynamicInitT(array) \ + arrayDynamicInit((array_dynamic_t*)array, sizeof((array)->items[0])) + +#define arrayDynamicDestroyT(array) \ + arrayDynamicDestroy((array_dynamic_t*)array) + +#define arrayDynamicResizeT(array, size) \ + arrayDynamicResize((array_dynamic_t*)(array), (size)) + +#define arrayDynamicAppendT(array, item) \ + arrayDynamicAppend((array_dynamic_t*)(array), (item)) + +#define arrayDynamicInsertT(array, before, count, items) \ + arrayDynamicInsert((array_dynamic_t*)(array), before, count, items) + +#define arrayDynamicAppendManyT(array, items_count, items) \ + arrayDynamicInsert((array_dynamic_t*)(array), (array)->count, items_count, items) diff --git a/ref/vk/vk_common.h b/ref/vk/vk_common.h index 9fb8d36faf..eb1156bb4b 100644 --- a/ref/vk/vk_common.h +++ b/ref/vk/vk_common.h @@ -30,19 +30,3 @@ inline static int clampi32(int v, int min, int max) { extern ref_api_t gEngine; extern ref_globals_t *gpGlobals; - -// TODO improve and make its own file -#define BOUNDED_ARRAY_DECLARE(NAME, TYPE, MAX_SIZE) \ - struct { \ - TYPE items[MAX_SIZE]; \ - int count; \ - } NAME - -#define BOUNDED_ARRAY(NAME, TYPE, MAX_SIZE) \ - BOUNDED_ARRAY_DECLARE(NAME, TYPE, MAX_SIZE) = {0} - -#define BOUNDED_ARRAY_APPEND(var, item) \ - do { \ - ASSERT(var.count < COUNTOF(var.items)); \ - var.items[var.count++] = item; \ - } while(0) diff --git a/ref/vk/vk_core.c b/ref/vk/vk_core.c index e1c7fcb1fd..b215bca153 100644 --- a/ref/vk/vk_core.c +++ b/ref/vk/vk_core.c @@ -24,6 +24,7 @@ #include "vk_combuf.h" #include "vk_entity_data.h" #include "vk_logs.h" +#include "arrays.h" // FIXME move this rt-specific stuff out #include "vk_light.h" @@ -191,7 +192,7 @@ static qboolean createInstance( void ) .pEngineName = "xash3d-fwgs", }; - BOUNDED_ARRAY(validation_features, VkValidationFeatureEnableEXT, 8); + BOUNDED_ARRAY(VkValidationFeatureEnableEXT, validation_features, 8); BOUNDED_ARRAY_APPEND(validation_features, VK_VALIDATION_FEATURE_ENABLE_SYNCHRONIZATION_VALIDATION_EXT); BOUNDED_ARRAY_APPEND(validation_features, VK_VALIDATION_FEATURE_ENABLE_BEST_PRACTICES_EXT); diff --git a/ref/vk/vk_framectl.c b/ref/vk/vk_framectl.c index a7e43176f2..034843cea8 100644 --- a/ref/vk/vk_framectl.c +++ b/ref/vk/vk_framectl.c @@ -11,6 +11,7 @@ #include "vk_commandpool.h" #include "vk_combuf.h" +#include "arrays.h" #include "profiler.h" #include "r_speeds.h" @@ -141,7 +142,7 @@ static VkRenderPass createRenderPass( VkFormat depth_format, qboolean ray_tracin .pDepthStencilAttachment = &depth_attachment, }; - BOUNDED_ARRAY(dependencies, VkSubpassDependency, 2); + BOUNDED_ARRAY(VkSubpassDependency, dependencies, 2); if (vk_core.rtx) { const VkSubpassDependency color = { .srcSubpass = VK_SUBPASS_EXTERNAL, @@ -376,9 +377,9 @@ static void submit( vk_combuf_t* combuf, qboolean wait, qboolean draw ) { VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, }; // TODO for RT renderer we only touch framebuffer at the very end of rendering/cmdbuf. - // Can we postpone waitinf for framebuffer semaphore until we actually need it. - BOUNDED_ARRAY(waitophores, VkSemaphore, 2); - BOUNDED_ARRAY(signalphores, VkSemaphore, 2); + // Can we postpone waiting for framebuffer semaphore until we actually need it. + BOUNDED_ARRAY(VkSemaphore, waitophores, 2); + BOUNDED_ARRAY(VkSemaphore, signalphores, 2); if (draw) { BOUNDED_ARRAY_APPEND(waitophores, frame->sem_framebuffer_ready); diff --git a/ref/vk/vk_image.c b/ref/vk/vk_image.c index c2ede53fc2..0628e3c88d 100644 --- a/ref/vk/vk_image.c +++ b/ref/vk/vk_image.c @@ -2,6 +2,7 @@ #include "vk_staging.h" #include "vk_combuf.h" #include "vk_logs.h" +#include "arrays.h" #include "xash3d_mathlib.h" // Q_max diff --git a/ref/vk/vk_ray_accel.c b/ref/vk/vk_ray_accel.c index ef7cf98af1..83de6848a7 100644 --- a/ref/vk/vk_ray_accel.c +++ b/ref/vk/vk_ray_accel.c @@ -11,6 +11,7 @@ #include "vk_render.h" #include "vk_logs.h" +#include "arrays.h" #include "profiler.h" #include "xash3d_mathlib.h" @@ -76,7 +77,7 @@ static struct { struct { // TODO two arrays for a single vkCmdBuildAccelerationStructuresKHR() call // FIXME This is for testing only - BOUNDED_ARRAY_DECLARE(blas, rt_blas_t*, 256); + BOUNDED_ARRAY_DECLARE(rt_blas_t*, blas, 256); } build; cvar_t *cv_force_culling; diff --git a/ref/vk/vk_resources.c b/ref/vk/vk_resources.c index e9ea71aca7..adf4e221a2 100644 --- a/ref/vk/vk_resources.c +++ b/ref/vk/vk_resources.c @@ -3,6 +3,7 @@ #include "vk_image.h" #include "vk_common.h" #include "vk_logs.h" +#include "arrays.h" #define LOG_MODULE rt diff --git a/ref/vk/vk_resources.h b/ref/vk/vk_resources.h index 909c9d3fb4..056bedd82a 100644 --- a/ref/vk/vk_resources.h +++ b/ref/vk/vk_resources.h @@ -3,6 +3,7 @@ #include "vk_core.h" #include "vk_descriptor.h" #include "vk_image.h" +#include "arrays.h" // TODO remove #include "vk_light.h" @@ -82,7 +83,7 @@ void R_VkResourcesFrameBeginStateChangeFIXME(VkCommandBuffer cmdbuf, qboolean di typedef struct { // TODO VK_KHR_synchronization2, has a slightly different (better) semantics VkPipelineStageFlags src_stage_mask; - BOUNDED_ARRAY_DECLARE(images, VkImageMemoryBarrier, 16); + BOUNDED_ARRAY_DECLARE(VkImageMemoryBarrier, images, 16); //BOUNDED_ARRAY_DECLARE(buffers, VkBufferMemoryBarrier, 16); } r_vk_barrier_t; diff --git a/ref/vk/vk_rtx.c b/ref/vk/vk_rtx.c index 31a631a437..7e47478e28 100644 --- a/ref/vk/vk_rtx.c +++ b/ref/vk/vk_rtx.c @@ -69,6 +69,8 @@ static struct { } g_rtx = {0}; void VK_RayNewMapBegin( void ) { + // TODO it seems like these are unnecessary leftovers. Moreover, they are actively harmful, + // as they recreate things that are in fact pretty much static. Untangle this. RT_VkAccelNewMap(); RT_RayModel_Clear(); } diff --git a/ref/vk/vk_scene.c b/ref/vk/vk_scene.c index 1c057d901b..7fee599ff0 100644 --- a/ref/vk/vk_scene.c +++ b/ref/vk/vk_scene.c @@ -138,6 +138,7 @@ static void loadMap(const model_t* const map, qboolean force_reload) { RT_LightsNewMap(map); + // TODO doesn't really need to exist: sprite instance models are static R_SpriteNewMapFIXME(); // Load light entities and patch data prior to loading map brush model diff --git a/ref/vk/vk_staging.c b/ref/vk/vk_staging.c index 4e7ceee05f..3d5aa1f003 100644 --- a/ref/vk/vk_staging.c +++ b/ref/vk/vk_staging.c @@ -6,6 +6,7 @@ #include "r_speeds.h" #include "vk_combuf.h" #include "vk_logs.h" +#include "arrays.h" #include @@ -200,7 +201,7 @@ static void commitBuffers(vk_combuf_t *combuf) { // - upload once per buffer // - join adjacent regions - BOUNDED_ARRAY(barriers, VkBufferMemoryBarrier, 4); + BOUNDED_ARRAY(VkBufferMemoryBarrier, barriers, 4); for (int i = 0; i < g_staging.buffers.count; i++) { const VkBuffer dst_buf = g_staging.buffers.dest[i]; diff --git a/ref/vk/vk_textures.c b/ref/vk/vk_textures.c index 80600e0caa..af7aef5acc 100644 --- a/ref/vk/vk_textures.c +++ b/ref/vk/vk_textures.c @@ -395,6 +395,7 @@ static qboolean uploadRawKtx2( int tex_index, vk_texture_t *tex, const rgbdata_t .height = header->pixelHeight, .depth = Q_max(1, header->pixelDepth), .mips = header->levelCount, + // header->layerCount? header->faceCount? .layers = 1, // TODO or 6 for cubemap; header->faceCount .format = header->vkFormat, .tiling = VK_IMAGE_TILING_OPTIMAL, @@ -408,7 +409,6 @@ static qboolean uploadRawKtx2( int tex_index, vk_texture_t *tex, const rgbdata_t { R_VkImageUploadBegin(&tex->vk.image); - // TODO layers for (int mip = 0; mip < header->levelCount; ++mip) { const ktx2_level_t* const level = levels + mip; const size_t mip_size = level->byteLength; From db7ca294938e1f4ee5881538127fec9c4fde45be Mon Sep 17 00:00:00 2001 From: Ivan Avdeev Date: Thu, 30 May 2024 12:04:48 -0400 Subject: [PATCH 02/62] NOT TESTED vk: stage images using new mechanism This is untested PoC quality. Staging regions are not tracked properly yet. Image upload commit is also done at a weird place. --- ref/vk/TODO.md | 18 +++ ref/vk/vk_framectl.c | 16 ++- ref/vk/vk_image.c | 330 +++++++++++++++++++++++++++++++++---------- ref/vk/vk_image.h | 11 ++ ref/vk/vk_logs.h | 1 + ref/vk/vk_staging.c | 99 +++++-------- ref/vk/vk_staging.h | 33 +++-- ref/vk/vk_textures.c | 7 - 8 files changed, 356 insertions(+), 159 deletions(-) diff --git a/ref/vk/TODO.md b/ref/vk/TODO.md index 6d04930d65..74fc3e7458 100644 --- a/ref/vk/TODO.md +++ b/ref/vk/TODO.md @@ -2,6 +2,24 @@ - [ ] Render graph - [ ] performance profiling and comparison +## 2024-05-24 E379 +- [ ] refactor staging: + - [ ] move destination image tracking to outside of staging + - [x] vk_image ← vk_texture (E380) + - [x] implement generic staging regions (E380) + - [ ] implement stricter staging regions tracking + - [ ] move destination buffer tracking to outside of staging: + - [ ] vk_geometry + - [ ] vk_light: grid, metadata + - [ ] vk_ray_accel: TLAS geometries + - [ ] vk_ray_model: kusochki + - [ ] staging should not be aware of cmdbuf either + - [ ] `R_VkStagingCommit()`: + - [ ] vk_image + - [ ] vk_ray_accel + - [ ] `R_VkStagingGetCommandBuffer()` + - [ ] vk_image + ## 2024-05-07 E376 - [ ] resource manager - [x] extract all resource mgmt from vk_rtx into a designated file diff --git a/ref/vk/vk_framectl.c b/ref/vk/vk_framectl.c index 034843cea8..2dd1394ddd 100644 --- a/ref/vk/vk_framectl.c +++ b/ref/vk/vk_framectl.c @@ -285,6 +285,8 @@ void R_BeginFrame( qboolean clearScene ) { R_VkCombufBegin( frame->combuf ); + R_VkImageUploadCommit(frame->combuf, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | (vk_frame.rtx_enabled ? VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT : 0)); + g_frame.current.phase = Phase_FrameBegan; APROF_SCOPE_END(begin_frame); } @@ -366,10 +368,12 @@ static void submit( vk_combuf_t* combuf, qboolean wait, qboolean draw ) { frame->staging_combuf = R_VkStagingFrameEnd(); - const VkCommandBuffer cmdbufs[] = { - frame->staging_combuf ? frame->staging_combuf->cmdbuf : NULL, - cmdbuf, - }; + BOUNDED_ARRAY(VkCommandBuffer, cmdbufs, 2); + + if (frame->staging_combuf) + BOUNDED_ARRAY_APPEND(cmdbufs, frame->staging_combuf->cmdbuf); + + BOUNDED_ARRAY_APPEND(cmdbufs, cmdbuf); { const VkPipelineStageFlags stageflags[] = { @@ -395,8 +399,8 @@ static void submit( vk_combuf_t* combuf, qboolean wait, qboolean draw ) { .waitSemaphoreCount = waitophores.count, .pWaitSemaphores = waitophores.items, .pWaitDstStageMask = stageflags, - .commandBufferCount = cmdbufs[0] ? 2 : 1, - .pCommandBuffers = cmdbufs[0] ? cmdbufs : cmdbufs + 1, + .commandBufferCount = cmdbufs.count, + .pCommandBuffers = cmdbufs.items, .signalSemaphoreCount = signalphores.count, .pSignalSemaphores = signalphores.items, }; diff --git a/ref/vk/vk_image.c b/ref/vk/vk_image.c index 0628e3c88d..9c1fbfb010 100644 --- a/ref/vk/vk_image.c +++ b/ref/vk/vk_image.c @@ -9,6 +9,8 @@ // Long type lists functions #include "vk_image_extra.h" +#define LOG_MODULE img + static const VkImageUsageFlags usage_bits_implying_views = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT | @@ -115,11 +117,25 @@ r_vk_image_t R_VkImageCreate(const r_vk_image_create_t *create) { image.mips = create->mips; image.layers = create->layers; image.flags = create->flags; + image.image_size = memreq.size; + image.upload_slot = -1; return image; } void R_VkImageDestroy(r_vk_image_t *img) { + // Need to make sure that there are no references to this image anywhere. + // It might have been added to upload queue, but then immediately deleted, leaving references + // in the queue. See https://github.com/w23/xash3d-fwgs/issues/464 + R_VkImageUploadCancel(img); + + // Image destroy calls are not explicitly synchronized with rendering. GPU might still be + // processing previous frame. We need to make sure that GPU is done by the time we start + // messing with any VkImage objects. + // TODO: textures are usually destroyed in bulk, so we don't really need to wait for each one. + // TODO: check with framectl for any in-flight frames or any other GPU activity + XVK_CHECK(vkDeviceWaitIdle(vk_core.device)); + if (img->view_unorm != VK_NULL_HANDLE) vkDestroyImageView(vk_core.device, img->view_unorm, NULL); @@ -239,29 +255,199 @@ void R_VkImageBlit(VkCommandBuffer cmdbuf, const r_vkimage_blit_args *blit_args) } } +typedef struct { + r_vk_image_t *image; + + struct { + // arena for entire layers * mips image + r_vkstaging_region_t lock; + + // current write offset into the arena + int cursor; + } staging; + + struct { + int begin, cursor, end; + } slices; +} image_upload_t; + +static struct { + ARRAY_DYNAMIC_DECLARE(image_upload_t, images); + ARRAY_DYNAMIC_DECLARE(VkBufferImageCopy, slices); + ARRAY_DYNAMIC_DECLARE(VkImageMemoryBarrier, barriers); +} g_image_upload; + +qboolean R_VkImageInit(void) { + arrayDynamicInitT(&g_image_upload.images); + arrayDynamicInitT(&g_image_upload.slices); + arrayDynamicInitT(&g_image_upload.barriers); + + return true; +} + +void R_VkImageShutdown(void) { + ASSERT(g_image_upload.images.count == 0); + arrayDynamicDestroyT(&g_image_upload.images); + arrayDynamicDestroyT(&g_image_upload.slices); + arrayDynamicDestroyT(&g_image_upload.barriers); +} + +void R_VkImageUploadCommit( struct vk_combuf_s *combuf, VkPipelineStageFlagBits dst_stages ) { + const int images_count = g_image_upload.images.count; + if (images_count == 0) + return; + + DEBUG("Uploading %d images", images_count); + + static int gpu_scope_id = -2; + if (gpu_scope_id == -2) + gpu_scope_id = R_VkGpuScope_Register("image_upload"); + const int gpu_scope_begin = R_VkCombufScopeBegin(combuf, gpu_scope_id); + + // Pre-allocate temp barriers buffer + arrayDynamicResizeT(&g_image_upload.barriers, images_count); + + // 1. Phase I: prepare all images to be transferred into + // 1.a Set up barriers for every valid image + for (int i = 0; i < images_count; ++i) { + image_upload_t *const up = g_image_upload.images.items + i; + if (!up->image) + continue; + + ASSERT(up->image->upload_slot == i); + + g_image_upload.barriers.items[i] = (VkImageMemoryBarrier) { + .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, + .image = up->image->image, + .srcAccessMask = 0, + .dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT, + .oldLayout = VK_IMAGE_LAYOUT_UNDEFINED, + .newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, + .subresourceRange = (VkImageSubresourceRange) { + .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, + .baseMipLevel = 0, + .levelCount = up->image->mips, + .baseArrayLayer = 0, + .layerCount = up->image->layers, + }, + }; + } + + // 1.b Invoke the barriers + vkCmdPipelineBarrier(combuf->cmdbuf, + VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, + VK_PIPELINE_STAGE_TRANSFER_BIT, + 0, 0, NULL, 0, NULL, + images_count, (VkImageMemoryBarrier*)g_image_upload.barriers.items + ); + + // 2. Phase 2: issue copy commands for each valid image + for (int i = 0; i < images_count; ++i) { + image_upload_t *const up = g_image_upload.images.items + i; + if (!up->image) + continue; + + ASSERT(up->staging.lock.buffer != VK_NULL_HANDLE); + ASSERT(up->slices.end == up->slices.cursor); + + vkCmdCopyBufferToImage(combuf->cmdbuf, + up->staging.lock.buffer, + up->image->image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, + up->slices.end - up->slices.begin, + g_image_upload.slices.items + up->slices.begin); + } + + // 3. Phase 3: change all images layout to shader read only optimal + // 3.a Set up barriers for layout transition + for (int i = 0; i < images_count; ++i) { + image_upload_t *const up = g_image_upload.images.items + i; + if (!up->image) + continue; + + g_image_upload.barriers.items[i] = (VkImageMemoryBarrier) { + .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, + .image = up->image->image, + .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT, + .dstAccessMask = VK_ACCESS_SHADER_READ_BIT, + .oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, + .newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, + .subresourceRange = (VkImageSubresourceRange) { + .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, + .baseMipLevel = 0, + .levelCount = up->image->mips, + .baseArrayLayer = 0, + .layerCount = up->image->layers, + }, + }; + + R_VkStagingReleaseAfterNextFrame(up->staging.lock.handle); + + // Mark image as uploaded + up->image = NULL; + up->image->upload_slot = -1; + + // TODO it would be nice to track uploading status further: + // 1. When uploading cmdbuf has been submitted to the GPU + // 2. When that cmdbuf has been processed. + // But that would entail quite a bit more state tracking, etc etc. Discomfort. + } + + // 3.b Submit the barriers + /* const VkPipelineStageFlagBits dest_stages = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | ( */ + /* vk_core.rtx */ + /* ? VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR */ + /* : 0); */ + vkCmdPipelineBarrier(combuf->cmdbuf, + VK_PIPELINE_STAGE_TRANSFER_BIT, dst_stages, + 0, 0, NULL, 0, NULL, + images_count, (VkImageMemoryBarrier*)g_image_upload.barriers.items + ); + + R_VkCombufScopeEnd(combuf, gpu_scope_begin, VK_PIPELINE_STAGE_TRANSFER_BIT); + + // Clear out image upload queue + arrayDynamicResizeT(&g_image_upload.images, 0); + arrayDynamicResizeT(&g_image_upload.slices, 0); + arrayDynamicResizeT(&g_image_upload.barriers, 0); +} + void R_VkImageUploadBegin( r_vk_image_t *img ) { - const VkImageMemoryBarrier image_barrier = { - .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, - .image = img->image, - .srcAccessMask = 0, - .dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT, - .oldLayout = VK_IMAGE_LAYOUT_UNDEFINED, - .newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, - .subresourceRange = (VkImageSubresourceRange) { - .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, - .baseMipLevel = 0, - .levelCount = img->mips, - .baseArrayLayer = 0, - .layerCount = img->layers, - } - }; + ASSERT(img->upload_slot == -1); - // Command buffer might be invalidated on any slice load - const VkCommandBuffer cmdbuf = R_VkStagingGetCommandBuffer(); - vkCmdPipelineBarrier(cmdbuf, - VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, - VK_PIPELINE_STAGE_TRANSFER_BIT, - 0, 0, NULL, 0, NULL, 1, &image_barrier); + /* TODO compute staging slices sizes properly + const uint32_t texel_block_size = R_VkImageFormatTexelBlockSize(img->format); + for (int layer = 0; layer < img->layers; ++layer) { + for (int mip = 0; mip < img->mips; ++mip) { + const int width = Q_max( 1, ( img->width >> mip )); + const int height = Q_max( 1, ( img->height >> mip )); + const int depth = Q_max( 1, ( img->depth >> mip )); + const size_t mip_size = CalcImageSize( pic->type, width, height, depth ); + } + } + */ + const size_t staging_size = img->image_size; + + // This is done speculatively to preserve internal image_upload invariant. + // Speculation: we might end up with staging implementation that, upon discovering that it ran out of free memory, + // would notify other modules that they'd need to commit their staging data, and thus we'd return to this module's + // R_VkImageUploadCommit(), which needs to see valid data. Therefore, don't touch its state until + // R_VkStagingLock returns. + const r_vkstaging_region_t staging_lock = R_VkStagingLock(staging_size); + + img->upload_slot = g_image_upload.images.count; + arrayDynamicAppendT(&g_image_upload.images, NULL); + image_upload_t *const up = g_image_upload.images.items + img->upload_slot; + + up->image = img; + up->staging.lock = staging_lock; + up->staging.cursor = 0; + + const int slices = img->layers * img->mips; + up->slices.begin = up->slices.cursor = g_image_upload.slices.count; + up->slices.end = up->slices.begin + slices; + + //arrayDynamicAppendManyT(&g_image_upload.slices, slices, NULL); + arrayDynamicResizeT(&g_image_upload.slices, g_image_upload.slices.count + slices); } void R_VkImageUploadSlice( r_vk_image_t *img, int layer, int mip, int size, const void *data ) { @@ -270,63 +456,63 @@ void R_VkImageUploadSlice( r_vk_image_t *img, int layer, int mip, int size, cons const uint32_t depth = Q_max(1, img->depth >> mip); const uint32_t texel_block_size = R_VkImageFormatTexelBlockSize(img->format); - const vk_staging_image_args_t staging_args = { - .image = img->image, - .region = (VkBufferImageCopy) { - .bufferOffset = 0, - .bufferRowLength = 0, - .bufferImageHeight = 0, - .imageSubresource = (VkImageSubresourceLayers){ - .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, - .mipLevel = mip, - .baseArrayLayer = layer, - .layerCount = 1, - }, - .imageExtent = (VkExtent3D){ - .width = width, - .height = height, - .depth = depth, - }, + ASSERT(img->upload_slot >= 0); + ASSERT(img->upload_slot < g_image_upload.images.count); + + image_upload_t *const up = g_image_upload.images.items + img->upload_slot; + ASSERT(up->image == img); + + ASSERT(up->slices.cursor < up->slices.end); + ASSERT(up->staging.cursor < img->image_size); + ASSERT(img->image_size - up->staging.cursor >= size); + + memcpy(up->staging.lock.ptr + up->staging.cursor, data, size); + + g_image_upload.slices.items[up->slices.cursor] = (VkBufferImageCopy) { + .bufferOffset = up->staging.lock.offset + up->staging.cursor, + .bufferRowLength = 0, + .bufferImageHeight = 0, + .imageSubresource = (VkImageSubresourceLayers){ + .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, + .mipLevel = mip, + .baseArrayLayer = layer, + .layerCount = 1, + }, + .imageExtent = (VkExtent3D){ + .width = width, + .height = height, + .depth = depth, }, - .layout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, - .size = size, - .alignment = texel_block_size, }; - { - const vk_staging_region_t staging = R_VkStagingLockForImage(staging_args); - ASSERT(staging.ptr); - memcpy(staging.ptr, data, size); - R_VkStagingUnlock(staging.handle); - } + up->staging.cursor += size; + up->slices.cursor += 1; } void R_VkImageUploadEnd( r_vk_image_t *img ) { - // TODO Don't change layout here. Alternatively: - // I. Attach layout metadata to the image, and request its change next time it is used. - // II. Build-in layout transfer to staging commit and do it there on commit. + ASSERT(img->upload_slot >= 0); + ASSERT(img->upload_slot < g_image_upload.images.count); - const VkImageMemoryBarrier image_barrier = { - .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, - .image = img->image, - .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT, - .dstAccessMask = VK_ACCESS_SHADER_READ_BIT, - .oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, - .newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, - .subresourceRange = (VkImageSubresourceRange) { - .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, - .baseMipLevel = 0, - .levelCount = img->mips, - .baseArrayLayer = 0, - .layerCount = img->layers, - } - }; + image_upload_t *const up = g_image_upload.images.items + img->upload_slot; + ASSERT(up->image == img); - // Commit is needed to make sure that all previous image loads have been submitted to cmdbuf - const VkCommandBuffer cmdbuf = R_VkStagingCommit()->cmdbuf; - vkCmdPipelineBarrier(cmdbuf, - VK_PIPELINE_STAGE_TRANSFER_BIT, - // FIXME incorrect, we also use them in compute and potentially ray tracing shaders - VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, - 0, 0, NULL, 0, NULL, 1, &image_barrier); + ASSERT(up->slices.cursor == up->slices.end); + ASSERT(up->staging.cursor <= img->image_size); +} + +void R_VkImageUploadCancel( r_vk_image_t *img ) { + // Skip already uploaded (or never uploaded) images + if (img->upload_slot < 0) + return; + + image_upload_t *const up = g_image_upload.images.items + img->upload_slot; + ASSERT(up->image == img); + + // Technically we won't need that staging region anymore at all, but it doesn't matter, + // it's just easier to mark it to be freed this way. + R_VkStagingReleaseAfterNextFrame(up->staging.lock.handle); + + // Mark upload slot as unused, and image as not subjet to uploading + up->image = NULL; + img->upload_slot = -1; } diff --git a/ref/vk/vk_image.h b/ref/vk/vk_image.h index 55da9f9243..132b6f08ed 100644 --- a/ref/vk/vk_image.h +++ b/ref/vk/vk_image.h @@ -15,6 +15,9 @@ typedef struct r_vk_image_s { int mips, layers; VkFormat format; uint32_t flags; + uint32_t image_size; + + int upload_slot; } r_vk_image_t; enum { @@ -53,6 +56,14 @@ void R_VkImageBlit( VkCommandBuffer cmdbuf, const r_vkimage_blit_args *blit_args uint32_t R_VkImageFormatTexelBlockSize( VkFormat format ); +// Expects *img to be pinned and valid until either cancel or commit is called void R_VkImageUploadBegin( r_vk_image_t *img ); void R_VkImageUploadSlice( r_vk_image_t *img, int layer, int mip, int size, const void *data ); void R_VkImageUploadEnd( r_vk_image_t *img ); + +// If this image has its upload scheduled, it should be cancelled +void R_VkImageUploadCancel( r_vk_image_t *img ); + +// Upload all enqueued images using the given command buffer +struct vk_combuf_s; +void R_VkImageUploadCommit( struct vk_combuf_s *combuf, VkPipelineStageFlagBits dst_stages ); diff --git a/ref/vk/vk_logs.h b/ref/vk/vk_logs.h index 8f427776f5..9e0a44e1bd 100644 --- a/ref/vk/vk_logs.h +++ b/ref/vk/vk_logs.h @@ -15,6 +15,7 @@ X(rt) \ X(rmain) \ X(sprite) \ + X(img) \ enum { #define X(m) LogModule_##m, diff --git a/ref/vk/vk_staging.c b/ref/vk/vk_staging.c index 3d5aa1f003..17af06483a 100644 --- a/ref/vk/vk_staging.c +++ b/ref/vk/vk_staging.c @@ -24,22 +24,36 @@ typedef struct { size_t size; // for stats only } staging_image_t; +/* TODO +typedef enum { + RegionState_Locked, + RegionState_Released, +} region_state_e; + +typedef struct { + region_state_e debug_state; + //int buffer_index; + VkDeviceSize begin, end; + uint32_t cmdbuf_sequence; +} region_t; +*/ + static struct { vk_buffer_t buffer; r_flipping_buffer_t buffer_alloc; + /* TODO + struct { + ARRAY_DYNAMIC_DECLARE(region_t, regions); + } regions; + */ + struct { VkBuffer dest[MAX_STAGING_ALLOCS]; VkBufferCopy copy[MAX_STAGING_ALLOCS]; int count; } buffers; - struct { - staging_image_t dest[MAX_STAGING_ALLOCS]; - VkBufferImageCopy copy[MAX_STAGING_ALLOCS]; - int count; - } images; - vk_combuf_t *combuf[3]; // Currently opened command buffer, ready to accept new commands @@ -114,7 +128,6 @@ void R_VkStagingFlushSync( void ) { } g_staging.buffers.count = 0; - g_staging.images.count = 0; R_FlippingBuffer_Clear(&g_staging.buffer_alloc); end: @@ -158,32 +171,7 @@ vk_staging_region_t R_VkStagingLockForBuffer(vk_staging_buffer_args_t args) { }; } -vk_staging_region_t R_VkStagingLockForImage(vk_staging_image_args_t args) { - if ( g_staging.images.count >= MAX_STAGING_ALLOCS ) - R_VkStagingFlushSync(); - - const uint32_t offset = allocateInRing(args.size, args.alignment); - if (offset == ALO_ALLOC_FAILED) - return (vk_staging_region_t){0}; - - const int index = g_staging.images.count; - staging_image_t *const dest = g_staging.images.dest + index; - - dest->image = args.image; - dest->layout = args.layout; - dest->size = args.size; - g_staging.images.copy[index] = args.region; - g_staging.images.copy[index].bufferOffset += offset; - - g_staging.images.count++; - - return (vk_staging_region_t){ - .ptr = (char*)g_staging.buffer.mapped + offset, - .handle = index + MAX_STAGING_ALLOCS, - }; -} - -void R_VkStagingUnlock(staging_handle_t handle) { +void R_VkStagingUnlock(r_vkstaging_handle_t handle) { ASSERT(handle >= 0); ASSERT(handle < MAX_STAGING_ALLOCS * 2); @@ -278,31 +266,6 @@ static void commitBuffers(vk_combuf_t *combuf) { R_VkCombufScopeEnd(combuf, begin_index, VK_PIPELINE_STAGE_TRANSFER_BIT); } -static void commitImages(vk_combuf_t *combuf) { - if (!g_staging.images.count) - return; - - const VkCommandBuffer cmdbuf = g_staging.current->cmdbuf; - const int begin_index = R_VkCombufScopeBegin(combuf, g_staging.image_upload_scope_id); - for (int i = 0; i < g_staging.images.count; i++) { - /* { */ - /* const VkBufferImageCopy *const copy = g_staging.images.copy + i; */ - /* gEngine.Con_Reportf(" i%d: [%08llx, ?) => %p\n", i, copy->bufferOffset, g_staging.images.dest[i].image); */ - /* } */ - - g_staging.stats.images++; - g_staging.stats.images_size += g_staging.images.dest[i].size; - - vkCmdCopyBufferToImage(cmdbuf, g_staging.buffer.buffer, - g_staging.images.dest[i].image, - g_staging.images.dest[i].layout, - 1, g_staging.images.copy + i); - } - - g_staging.images.count = 0; - R_VkCombufScopeEnd(combuf, begin_index, VK_PIPELINE_STAGE_TRANSFER_BIT); -} - static vk_combuf_t *getCurrentCombuf(void) { if (!g_staging.current) { g_staging.current = g_staging.combuf[0]; @@ -317,12 +280,11 @@ VkCommandBuffer R_VkStagingGetCommandBuffer(void) { } vk_combuf_t *R_VkStagingCommit(void) { - if (!g_staging.images.count && !g_staging.buffers.count && !g_staging.current) + if (!g_staging.buffers.count && !g_staging.current) return VK_NULL_HANDLE; getCurrentCombuf(); commitBuffers(g_staging.current); - commitImages(g_staging.current); return g_staging.current; } @@ -330,7 +292,6 @@ void R_VkStagingFrameBegin(void) { R_FlippingBuffer_Flip(&g_staging.buffer_alloc); g_staging.buffers.count = 0; - g_staging.images.count = 0; } vk_combuf_t *R_VkStagingFrameEnd(void) { @@ -351,3 +312,19 @@ vk_combuf_t *R_VkStagingFrameEnd(void) { return current; } + +r_vkstaging_region_t R_VkStagingLock(uint32_t size) { + const uint32_t alignment = 4; + const uint32_t offset = R_FlippingBuffer_Alloc(&g_staging.buffer_alloc, size, alignment); + ASSERT(offset != ALO_ALLOC_FAILED); + return (r_vkstaging_region_t){ + .handle = 31337, // FAKE + .offset = offset, + .buffer = g_staging.buffer.buffer, + .ptr = g_staging.buffer.mapped + offset, + }; +} + +void R_VkStagingReleaseAfterNextFrame(r_vkstaging_handle_t handle) { + // FIXME +} diff --git a/ref/vk/vk_staging.h b/ref/vk/vk_staging.h index 2caa14230f..7c0c1eb603 100644 --- a/ref/vk/vk_staging.h +++ b/ref/vk/vk_staging.h @@ -5,11 +5,28 @@ qboolean R_VkStagingInit(void); void R_VkStagingShutdown(void); -typedef int staging_handle_t; +typedef int r_vkstaging_handle_t; typedef struct { void *ptr; - staging_handle_t handle; + r_vkstaging_handle_t handle; + + // TODO maybe return these on lock? + VkBuffer buffer; + VkDeviceSize offset; +} r_vkstaging_region_t; + +// Allocate CPU-accessible memory in staging buffer +r_vkstaging_region_t R_VkStagingLock(uint32_t size); + +// Release when next frame is done +// TODO synch with specific combuf: void R_VkStagingRelease(r_vkstaging_handle_t handle, uint32_t gen); +void R_VkStagingReleaseAfterNextFrame(r_vkstaging_handle_t handle); + + +typedef struct { + void *ptr; + r_vkstaging_handle_t handle; } vk_staging_region_t; // Allocate region for uploadting to buffer @@ -21,18 +38,8 @@ typedef struct { } vk_staging_buffer_args_t; vk_staging_region_t R_VkStagingLockForBuffer(vk_staging_buffer_args_t args); -// Allocate region for uploading to image -typedef struct { - VkImage image; - VkImageLayout layout; - VkBufferImageCopy region; - uint32_t size; - uint32_t alignment; -} vk_staging_image_args_t; -vk_staging_region_t R_VkStagingLockForImage(vk_staging_image_args_t args); - // Mark allocated region as ready for upload -void R_VkStagingUnlock(staging_handle_t handle); +void R_VkStagingUnlock(r_vkstaging_handle_t handle); // Append copy commands to command buffer. struct vk_combuf_s* R_VkStagingCommit(void); diff --git a/ref/vk/vk_textures.c b/ref/vk/vk_textures.c index af7aef5acc..ae633ba546 100644 --- a/ref/vk/vk_textures.c +++ b/ref/vk/vk_textures.c @@ -2,7 +2,6 @@ #include "vk_core.h" #include "vk_descriptor.h" -#include "vk_staging.h" #include "vk_logs.h" #include "r_textures.h" #include "r_speeds.h" @@ -615,12 +614,6 @@ void R_VkTextureDestroy( int index, vk_texture_t *tex ) { if (tex->vk.image.image == VK_NULL_HANDLE) return; - // Need to make sure that there are no references to this texture anywhere. - // It might have been added to staging and then immediately deleted, leaving references to its vkimage - // in the staging command buffer. See https://github.com/w23/xash3d-fwgs/issues/464 - R_VkStagingFlushSync(); - XVK_CHECK(vkDeviceWaitIdle(vk_core.device)); - R_VkImageDestroy(&tex->vk.image); g_vktextures.stats.size_total -= tex->total_size; g_vktextures.stats.count--; From 243ef2558733169e032a45619848d5adfba8955f Mon Sep 17 00:00:00 2001 From: Ivan 'provod' Avdeev Date: Thu, 30 May 2024 13:03:05 -0400 Subject: [PATCH 03/62] vk: fixup new image staging fixes building on windows handles canceling holes still, corrupts some textures for some reason --- ref/vk/arrays.c | 4 +++- ref/vk/vk_core.c | 6 ++++-- ref/vk/vk_framectl.c | 5 +++-- ref/vk/vk_image.c | 23 ++++++++++++++++------- ref/vk/vk_image.h | 5 +++++ ref/vk/vk_staging.c | 5 +++-- 6 files changed, 34 insertions(+), 14 deletions(-) diff --git a/ref/vk/arrays.c b/ref/vk/arrays.c index 9515561f7d..fe6d970c86 100644 --- a/ref/vk/arrays.c +++ b/ref/vk/arrays.c @@ -44,7 +44,9 @@ void arrayDynamicAppend(array_dynamic_t *array, void *item) { const int new_count = array->count + 1; arrayDynamicEnsureCapacity(array, new_count); - memcpy(array->items + array->count * array->item_size, item, array->item_size); + if (item) + memcpy((char*)array->items + array->count * array->item_size, item, array->item_size); + array->count = new_count; } diff --git a/ref/vk/vk_core.c b/ref/vk/vk_core.c index b215bca153..b6dcd10789 100644 --- a/ref/vk/vk_core.c +++ b/ref/vk/vk_core.c @@ -3,7 +3,7 @@ #include "vk_common.h" #include "r_textures.h" #include "vk_overlay.h" -#include "vk_renderstate.h" +#include "vk_image.h" #include "vk_staging.h" #include "vk_framectl.h" #include "vk_brush.h" @@ -40,7 +40,6 @@ #include "debugbreak.h" #include -#include #define LOG_MODULE core @@ -794,6 +793,9 @@ qboolean R_VkInit( void ) VK_LoadCvarsAfterInit(); + if (!R_VkImageInit()) + return false; + if (!R_VkCombuf_Init()) return false; diff --git a/ref/vk/vk_framectl.c b/ref/vk/vk_framectl.c index 2dd1394ddd..a52642e5a6 100644 --- a/ref/vk/vk_framectl.c +++ b/ref/vk/vk_framectl.c @@ -285,8 +285,6 @@ void R_BeginFrame( qboolean clearScene ) { R_VkCombufBegin( frame->combuf ); - R_VkImageUploadCommit(frame->combuf, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | (vk_frame.rtx_enabled ? VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT : 0)); - g_frame.current.phase = Phase_FrameBegan; APROF_SCOPE_END(begin_frame); } @@ -307,6 +305,9 @@ static void enqueueRendering( vk_combuf_t* combuf, qboolean draw ) { ASSERT(g_frame.current.phase == Phase_FrameBegan); + // FIXME, should be done by rendering when it requests textures + R_VkImageUploadCommit(combuf, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | (vk_frame.rtx_enabled ? VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT : 0)); + const VkCommandBuffer cmdbuf = combuf->cmdbuf; VK_Render_FIXME_Barrier(cmdbuf); diff --git a/ref/vk/vk_image.c b/ref/vk/vk_image.c index 9c1fbfb010..7d8175c954 100644 --- a/ref/vk/vk_image.c +++ b/ref/vk/vk_image.c @@ -111,6 +111,7 @@ r_vk_image_t R_VkImageCreate(const r_vk_image_create_t *create) { } } + Q_strncpy(image.name, create->debug_name, sizeof(image.name)); image.width = create->width; image.height = create->height; image.depth = create->depth; @@ -309,14 +310,19 @@ void R_VkImageUploadCommit( struct vk_combuf_s *combuf, VkPipelineStageFlagBits // 1. Phase I: prepare all images to be transferred into // 1.a Set up barriers for every valid image + int barriers_count = 0; for (int i = 0; i < images_count; ++i) { image_upload_t *const up = g_image_upload.images.items + i; - if (!up->image) + if (!up->image) { + DEBUG("Skipping image upload slot %d", i); continue; + } + + DEBUG("Uploading image \"%s\"", up->image->name); ASSERT(up->image->upload_slot == i); - g_image_upload.barriers.items[i] = (VkImageMemoryBarrier) { + g_image_upload.barriers.items[barriers_count++] = (VkImageMemoryBarrier) { .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, .image = up->image->image, .srcAccessMask = 0, @@ -338,7 +344,7 @@ void R_VkImageUploadCommit( struct vk_combuf_s *combuf, VkPipelineStageFlagBits VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, NULL, 0, NULL, - images_count, (VkImageMemoryBarrier*)g_image_upload.barriers.items + barriers_count, (VkImageMemoryBarrier*)g_image_upload.barriers.items ); // 2. Phase 2: issue copy commands for each valid image @@ -359,12 +365,13 @@ void R_VkImageUploadCommit( struct vk_combuf_s *combuf, VkPipelineStageFlagBits // 3. Phase 3: change all images layout to shader read only optimal // 3.a Set up barriers for layout transition + barriers_count = 0; for (int i = 0; i < images_count; ++i) { image_upload_t *const up = g_image_upload.images.items + i; if (!up->image) continue; - g_image_upload.barriers.items[i] = (VkImageMemoryBarrier) { + g_image_upload.barriers.items[barriers_count++] = (VkImageMemoryBarrier) { .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, .image = up->image->image, .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT, @@ -383,8 +390,8 @@ void R_VkImageUploadCommit( struct vk_combuf_s *combuf, VkPipelineStageFlagBits R_VkStagingReleaseAfterNextFrame(up->staging.lock.handle); // Mark image as uploaded - up->image = NULL; up->image->upload_slot = -1; + up->image = NULL; // TODO it would be nice to track uploading status further: // 1. When uploading cmdbuf has been submitted to the GPU @@ -400,7 +407,7 @@ void R_VkImageUploadCommit( struct vk_combuf_s *combuf, VkPipelineStageFlagBits vkCmdPipelineBarrier(combuf->cmdbuf, VK_PIPELINE_STAGE_TRANSFER_BIT, dst_stages, 0, 0, NULL, 0, NULL, - images_count, (VkImageMemoryBarrier*)g_image_upload.barriers.items + barriers_count, (VkImageMemoryBarrier*)g_image_upload.barriers.items ); R_VkCombufScopeEnd(combuf, gpu_scope_begin, VK_PIPELINE_STAGE_TRANSFER_BIT); @@ -466,7 +473,7 @@ void R_VkImageUploadSlice( r_vk_image_t *img, int layer, int mip, int size, cons ASSERT(up->staging.cursor < img->image_size); ASSERT(img->image_size - up->staging.cursor >= size); - memcpy(up->staging.lock.ptr + up->staging.cursor, data, size); + memcpy((char*)up->staging.lock.ptr + up->staging.cursor, data, size); g_image_upload.slices.items[up->slices.cursor] = (VkBufferImageCopy) { .bufferOffset = up->staging.lock.offset + up->staging.cursor, @@ -505,6 +512,8 @@ void R_VkImageUploadCancel( r_vk_image_t *img ) { if (img->upload_slot < 0) return; + WARN("Canceling uploading image \"%s\"", img->name); + image_upload_t *const up = g_image_upload.images.items + img->upload_slot; ASSERT(up->image == img); diff --git a/ref/vk/vk_image.h b/ref/vk/vk_image.h index 132b6f08ed..be9d5e1411 100644 --- a/ref/vk/vk_image.h +++ b/ref/vk/vk_image.h @@ -2,7 +2,12 @@ #include "vk_core.h" #include "vk_devmem.h" +qboolean R_VkImageInit(void); +void R_VkImageShutdown(void); + typedef struct r_vk_image_s { + char name[64]; + vk_devmem_t devmem; VkImage image; VkImageView view; diff --git a/ref/vk/vk_staging.c b/ref/vk/vk_staging.c index 17af06483a..b455e62c3e 100644 --- a/ref/vk/vk_staging.c +++ b/ref/vk/vk_staging.c @@ -13,7 +13,8 @@ #define MODULE_NAME "staging" #define LOG_MODULE staging -#define DEFAULT_STAGING_SIZE (128*1024*1024) +// FIXME don't do this, mkay +#define DEFAULT_STAGING_SIZE (2*128*1024*1024) #define MAX_STAGING_ALLOCS (2048) #define MAX_CONCURRENT_FRAMES 2 #define COMMAND_BUFFER_COUNT (MAX_CONCURRENT_FRAMES + 1) // to accommodate two frames in flight plus something trying to upload data before waiting for the next frame to complete @@ -321,7 +322,7 @@ r_vkstaging_region_t R_VkStagingLock(uint32_t size) { .handle = 31337, // FAKE .offset = offset, .buffer = g_staging.buffer.buffer, - .ptr = g_staging.buffer.mapped + offset, + .ptr = (char*)g_staging.buffer.mapped + offset, }; } From ddbad5e240eb9c736131a9de612a8082506bb7ae Mon Sep 17 00:00:00 2001 From: Ivan Avdeev Date: Fri, 31 May 2024 16:03:01 -0700 Subject: [PATCH 04/62] vk: silence sync validation woes around image clearing Prior to this change `R_VkImageClear()` functtion was causing SYNC-HAZARD-WRITE-AFTER-READ error, thinking that clearing `[dest]` image is not synchronized with blit during the previous frame. However, there's an explicit semaphore sync with the previous frame, and as such it seems this validation complaint is baseless. I'd make a simple repro and submit it to validation repo, but who am i kidding, i have like 10 minutes left to do anything today, and i likely won't be able to get back to this in several days. --- ref/vk/vk_image.c | 6 +++--- ref/vk/vk_image.h | 2 +- ref/vk/vk_resources.c | 2 +- ref/vk/vk_rtx.c | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/ref/vk/vk_image.c b/ref/vk/vk_image.c index 7d8175c954..4508c8d636 100644 --- a/ref/vk/vk_image.c +++ b/ref/vk/vk_image.c @@ -150,11 +150,11 @@ void R_VkImageDestroy(r_vk_image_t *img) { *img = (r_vk_image_t){0}; } -void R_VkImageClear(VkCommandBuffer cmdbuf, VkImage image) { +void R_VkImageClear(VkCommandBuffer cmdbuf, VkImage image, VkAccessFlags src_access, VkPipelineStageFlags from_stage) { const VkImageMemoryBarrier image_barriers[] = { { .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, .image = image, - .srcAccessMask = 0, + .srcAccessMask = src_access, .dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT, .oldLayout = VK_IMAGE_LAYOUT_UNDEFINED, .newLayout = VK_IMAGE_LAYOUT_GENERAL, @@ -168,7 +168,7 @@ void R_VkImageClear(VkCommandBuffer cmdbuf, VkImage image) { const VkClearColorValue clear_value = {0}; - vkCmdPipelineBarrier(cmdbuf, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, + vkCmdPipelineBarrier(cmdbuf, from_stage, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, NULL, 0, NULL, COUNTOF(image_barriers), image_barriers); vkCmdClearColorImage(cmdbuf, image, VK_IMAGE_LAYOUT_GENERAL, &clear_value, 1, &image_barriers->subresourceRange); diff --git a/ref/vk/vk_image.h b/ref/vk/vk_image.h index be9d5e1411..b06e3a2f86 100644 --- a/ref/vk/vk_image.h +++ b/ref/vk/vk_image.h @@ -45,7 +45,7 @@ typedef struct { r_vk_image_t R_VkImageCreate(const r_vk_image_create_t *create); void R_VkImageDestroy(r_vk_image_t *img); -void R_VkImageClear(VkCommandBuffer cmdbuf, VkImage image); +void R_VkImageClear(VkCommandBuffer cmdbuf, VkImage image, VkAccessFlags src_access, VkPipelineStageFlags from_stage); typedef struct { VkPipelineStageFlags in_stage; diff --git a/ref/vk/vk_resources.c b/ref/vk/vk_resources.c index adf4e221a2..9cef06654a 100644 --- a/ref/vk/vk_resources.c +++ b/ref/vk/vk_resources.c @@ -157,7 +157,7 @@ void R_VkResourcesFrameBeginStateChangeFIXME(VkCommandBuffer cmdbuf, qboolean di if (discontinuity || res->resource.write.pipelines == 0) { // TODO is there a better way? Can image be cleared w/o explicit clear op? DEBUG("discontinuity: %s", res->name); - R_VkImageClear( cmdbuf, res->image.image ); + R_VkImageClear( cmdbuf, res->image.image, 0, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT ); res->resource.write.pipelines = VK_PIPELINE_STAGE_TRANSFER_BIT; res->resource.write.image_layout = VK_IMAGE_LAYOUT_GENERAL; res->resource.write.access_mask = VK_ACCESS_TRANSFER_WRITE_BIT; diff --git a/ref/vk/vk_rtx.c b/ref/vk/vk_rtx.c index 7e47478e28..472f20b639 100644 --- a/ref/vk/vk_rtx.c +++ b/ref/vk/vk_rtx.c @@ -574,7 +574,7 @@ void VK_RayFrameEnd(const vk_ray_frame_render_args_t* args) }, }; - R_VkImageClear( cmdbuf, g_rtx.mainpipe_out->image.image ); + R_VkImageClear( cmdbuf, g_rtx.mainpipe_out->image.image, VK_ACCESS_TRANSFER_READ_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT ); R_VkImageBlit( cmdbuf, &blit_args ); } else { const perform_tracing_args_t trace_args = { From 7502303fad27732e573c54834323f45f17cf9c3f Mon Sep 17 00:00:00 2001 From: Ivan 'provod' Avdeev Date: Tue, 26 Nov 2024 17:21:14 -0500 Subject: [PATCH 05/62] vk: silence inefficient buffer barrier usage validation messages See https://github.com/w23/xash3d-fwgs/issues/743 --- ref/vk/vk_core.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/ref/vk/vk_core.c b/ref/vk/vk_core.c index b6dcd10789..08a7479d6b 100644 --- a/ref/vk/vk_core.c +++ b/ref/vk/vk_core.c @@ -126,13 +126,18 @@ VkBool32 VKAPI_PTR debugCallback( if (Q_strcmp(pCallbackData->pMessageIdName, "VUID-vkMapMemory-memory-00683") == 0) return VK_FALSE; + // FIXME: remove this when new buffer staging is done, see https://github.com/w23/xash3d-fwgs/issues/743 + // For now, ignore a firehose of "inefficient srcStageMask using VK_PIPELINE_STAGE_ALL_COMMANDS_BIT" messages. + if (Q_strcmp(pCallbackData->pMessageIdName, "BestPractices-pipeline-stage-flags-compute") == 0) + return VK_FALSE; + /* if (messageSeverity != VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT) { */ /* gEngine.Con_Printf(S_WARN "Validation: %s\n", pCallbackData->pMessage); */ /* } */ // TODO better messages, not only errors, what are other arguments for, ... if (messageSeverity == VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT) { - gEngine.Con_Printf(S_ERROR "%s\n", pCallbackData->pMessage); + gEngine.Con_Printf(S_ERROR "vk/dbg: %s\n", pCallbackData->pMessage); #ifdef _MSC_VER __debugbreak(); #else @@ -140,9 +145,9 @@ VkBool32 VKAPI_PTR debugCallback( #endif } else { if (Q_strcmp(pCallbackData->pMessageIdName, "UNASSIGNED-DEBUG-PRINTF") == 0) { - gEngine.Con_Printf(S_ERROR "%s\n", pCallbackData->pMessage); + gEngine.Con_Printf(S_ERROR "vk/dbg: %s\n", pCallbackData->pMessage); } else { - gEngine.Con_Printf(S_WARN "%s\n", pCallbackData->pMessage); + gEngine.Con_Printf(S_WARN "vk/dbg: %s\n", pCallbackData->pMessage); } } From ade78d0bf7527d47f488b6cf242c92c5f5d802d0 Mon Sep 17 00:00:00 2001 From: Ivan 'provod' Avdeev Date: Wed, 27 Nov 2024 12:56:46 -0500 Subject: [PATCH 06/62] vk: add logs to staging also add some notes about clangdb and staging problems --- ref/vk/NOTES.md | 23 +++++++++++++++++++++++ ref/vk/vk_image.c | 21 ++++++++++++++++++--- ref/vk/vk_logs.h | 1 + ref/vk/vk_staging.c | 5 +++++ 4 files changed, 47 insertions(+), 3 deletions(-) diff --git a/ref/vk/NOTES.md b/ref/vk/NOTES.md index adc0d7006a..216b505ccf 100644 --- a/ref/vk/NOTES.md +++ b/ref/vk/NOTES.md @@ -1177,3 +1177,26 @@ Cons: ridiculous texture explosion - `performTracing()` write resource desc values passed from outside on each call - new resources are added in `reloadMainpipe()` - resource with zero refcount are destroyed in `cleanupResources()` + + +# 2024-11-26 +`./waf clangdb` produces `compile_commands.json` file inside of the build directory. All the paths in the file are relative to that directory. +If the build directory is something 2nd level, like `build/amd64-debug`, and the file is then symlinked to (as nvim/lsp/clangd only looks for the file in the root and in the `./build` dir), then it confuses nvim/lsp/clangd. +Solution: make build dir literally just `./build`. + + +# 2024-11-27 E381 +## Removing staging flush + +### vk_scene.c/reloadPatches() +- Can ignore for now + +### Staging full +- (I) Just allocate another buffer for staging +- (II) Figure out why the hell do we need so much staging memory + - PBR/remastered textures + - possible solution: lazy/ondemand loading + +### vk_brush.c / collect emissive surfaces +- (I) try to merge emissive collection with surface loading +- (II) convert from pushing material data to pulling. Not really clear how to do easily. diff --git a/ref/vk/vk_image.c b/ref/vk/vk_image.c index 4508c8d636..d2af0da1f1 100644 --- a/ref/vk/vk_image.c +++ b/ref/vk/vk_image.c @@ -318,8 +318,6 @@ void R_VkImageUploadCommit( struct vk_combuf_s *combuf, VkPipelineStageFlagBits continue; } - DEBUG("Uploading image \"%s\"", up->image->name); - ASSERT(up->image->upload_slot == i); g_image_upload.barriers.items[barriers_count++] = (VkImageMemoryBarrier) { @@ -353,13 +351,30 @@ void R_VkImageUploadCommit( struct vk_combuf_s *combuf, VkPipelineStageFlagBits if (!up->image) continue; + const int slices_count = up->slices.end - up->slices.begin; + DEBUG("Uploading image \"%s\": buffer=%p slices=%d", up->image->name, up->staging.lock.buffer, slices_count); + ASSERT(up->staging.lock.buffer != VK_NULL_HANDLE); ASSERT(up->slices.end == up->slices.cursor); + ASSERT(slices_count > 0); + + for (int j = 0; j < slices_count; ++j) { + const VkBufferImageCopy *const slice = g_image_upload.slices.items + up->slices.begin + j; + DEBUG(" slice[%d]: off=%d rowl=%d height=%d off=(%d,%d,%d) ext=(%d,%d,%d)", + j, slice->bufferOffset, slice->bufferRowLength, slice->bufferImageHeight, + slice->imageOffset.x, + slice->imageOffset.y, + slice->imageOffset.z, + slice->imageExtent.width, + slice->imageExtent.height, + slice->imageExtent.depth + ); + } vkCmdCopyBufferToImage(combuf->cmdbuf, up->staging.lock.buffer, up->image->image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, - up->slices.end - up->slices.begin, + slices_count, g_image_upload.slices.items + up->slices.begin); } diff --git a/ref/vk/vk_logs.h b/ref/vk/vk_logs.h index 9e0a44e1bd..3c69c859a7 100644 --- a/ref/vk/vk_logs.h +++ b/ref/vk/vk_logs.h @@ -16,6 +16,7 @@ X(rmain) \ X(sprite) \ X(img) \ + X(staging) \ enum { #define X(m) LogModule_##m, diff --git a/ref/vk/vk_staging.c b/ref/vk/vk_staging.c index b455e62c3e..cbae1283ed 100644 --- a/ref/vk/vk_staging.c +++ b/ref/vk/vk_staging.c @@ -155,6 +155,8 @@ vk_staging_region_t R_VkStagingLockForBuffer(vk_staging_buffer_args_t args) { if (offset == ALO_ALLOC_FAILED) return (vk_staging_region_t){0}; + DEBUG("Lock buf alignment=%d size=%d region=%d..%d", args.alignment, args.size, offset, offset + args.size); + const int index = g_staging.buffers.count; g_staging.buffers.dest[index] = args.buffer; @@ -318,6 +320,9 @@ r_vkstaging_region_t R_VkStagingLock(uint32_t size) { const uint32_t alignment = 4; const uint32_t offset = R_FlippingBuffer_Alloc(&g_staging.buffer_alloc, size, alignment); ASSERT(offset != ALO_ALLOC_FAILED); + + DEBUG("Lock alignment=%d size=%d region=%d..%d", alignment, size, offset, offset + size); + return (r_vkstaging_region_t){ .handle = 31337, // FAKE .offset = offset, From c1f629dc444e4562572d036a31260f64cd3415a5 Mon Sep 17 00:00:00 2001 From: Ivan 'provod' Avdeev Date: Wed, 27 Nov 2024 14:42:56 -0500 Subject: [PATCH 07/62] vk: do not upload emissive materials twice Apparently now it is possible to handle emissive brush surfaces at the same time as generating geometry. No second pass for emissive extraction is needed. This allows skipping extra `R_VkStagingFlushSync()`. Not all flush-sync usages are removed, though. --- ref/vk/vk_brush.c | 286 +++++++++++++++++++------------------------- ref/vk/vk_brush.h | 2 - ref/vk/vk_scene.c | 27 +---- ref/vk/vk_staging.c | 9 +- 4 files changed, 131 insertions(+), 193 deletions(-) diff --git a/ref/vk/vk_brush.c b/ref/vk/vk_brush.c index 6e9d552e09..61ac7f5aeb 100644 --- a/ref/vk/vk_brush.c +++ b/ref/vk/vk_brush.c @@ -13,6 +13,7 @@ #include "vk_staging.h" #include "vk_logs.h" #include "profiler.h" +#include "arrays.h" #include #include @@ -59,8 +60,7 @@ typedef struct vk_brush_model_s { vk_vertex_t *conveyors_vertices; // Polylights which need to be added per-frame dynamically - struct rt_light_add_polygon_s *dynamic_polylights; - int dynamic_polylights_count; + ARRAY_DYNAMIC_DECLARE(struct rt_light_add_polygon_s, dynamic_polylights); } vk_brush_model_t; typedef struct { @@ -814,8 +814,8 @@ void R_BrushModelDraw( const cl_entity_t *ent, int render_mode, float blend, con render_mode = bmodel->patch_rendermode; // Add dynamic polylights if any - for (int i = 0; i < bmodel->dynamic_polylights_count; ++i) { - rt_light_add_polygon_t *const polylight = bmodel->dynamic_polylights + i; + for (int i = 0; i < bmodel->dynamic_polylights.count; ++i) { + rt_light_add_polygon_t *const polylight = bmodel->dynamic_polylights.items + i; polylight->transform_row = (const matrix3x4*)transform; polylight->dynamic = true; RT_LightAddPolygon(polylight); @@ -1031,7 +1031,9 @@ typedef struct { vk_render_geometry_t *out_geometries; vk_vertex_t *out_vertices; uint16_t *out_indices; + const xvk_mapent_func_any_t *func_any; qboolean is_worldmodel; + qboolean is_static; } fill_geometries_args_t; static void getSurfaceNormal( const msurface_t *surf, vec3_t out_normal) { @@ -1273,12 +1275,102 @@ static const xvk_mapent_func_any_t *getModelFuncAnyPatch( const model_t *const m return NULL; } +typedef struct { + const model_t *mod; + const xvk_mapent_func_any_t *func_any; + qboolean is_static; + vk_brush_model_t *bmodel; + const msurface_t *surf; + int surface_index; + brush_surface_type_e type; + int tex_id; + const xvk_patch_surface_t *psurf; + vk_render_geometry_t *model_geometry; + int *emissive_surfaces_count; +} SurfaceHandleEmissiveArgs; + +static void surfaceHandleEmissive(SurfaceHandleEmissiveArgs args) { + VectorClear(args.model_geometry->emissive); + + switch (args.type) { + case BrushSurface_Regular: + case BrushSurface_Water: + // No known cases, also needs to be dynamic case BrushSurface_WaterSide: + break; + // Animated textures are enumerated in `R_BrushModelDraw()` and are added as dynamic lights + // when their current frame is emissive. Do not add such surfaces here to avoid adding them twice. + // TODO: Most of the animated surfaces are techically static: i.e. they don't really move. + // Make a special case for static lights that can be off. + case BrushSurface_Animated: + default: + return; + } + + vec3_t emissive; + if (args.psurf && (args.psurf->flags & Patch_Surface_Emissive)) { + VectorCopy(args.psurf->emissive, emissive); + } else if (RT_GetEmissiveForTexture(emissive, args.tex_id)) { + // emissive + } else { + // not emissive, continue to the next + return; + } + + DEBUG("emissive[%d] surf_index=%d tex_id=%d patch=%d(%#x) => emissive=(%f,%f,%f)", + *args.emissive_surfaces_count, args.surface_index, args.tex_id, !!args.psurf, args.psurf?args.psurf->flags:0, emissive[0], emissive[1], emissive[2]); + + (*args.emissive_surfaces_count)++; + + /* const qboolean is_water = type == BrushSurface_Water; */ + VectorCopy(emissive, args.model_geometry->emissive); + + rt_light_add_polygon_t polylight; + if (!loadPolyLight(&polylight, args.mod, args.surface_index, args.surf, emissive)) + return; + + // func_any surfaces do not really belong to BSP+PVS system, so they can't be used + // for lights visibility calculation directly. + if (args.func_any && args.func_any->origin_patched) { + // TODO this is not really dynamic, but this flag signals using MovingSurface visibility calc + polylight.dynamic = true; + matrix3x4 m; + Matrix3x4_LoadIdentity(m); + Matrix3x4_SetOrigin(m, args.func_any->origin[0], args.func_any->origin[1], args.func_any->origin[2]); + polylight.transform_row = &m; + } + + // Static emissive surfaces are added immediately, as they are drawn all the time. + // Non-static ones will be applied later when the model is actually rendered + // Non-static brush models may move around and so must have their emissive surfaces treated as dynamic + if (args.is_static) { + RT_LightAddPolygon(&polylight); + + /* TODO figure out when this is needed. + * This is needed in cases where we can dive into emissive acid, which should illuminate what's under it + * Likely, this is not a correct fix, though, see https://github.com/w23/xash3d-fwgs/issues/56 + if (is_water) { + // Add backside for water + for (int i = 0; i < polylight.num_vertices; ++i) { + vec3_t tmp; + VectorCopy(polylight.vertices[i], tmp); + VectorCopy(polylight.vertices[polylight.num_vertices-1-i], polylight.vertices[i]); + VectorCopy(tmp, polylight.vertices[polylight.num_vertices-1-i]); + RT_LightAddPolygon(&polylight); + } + } + */ + } else { + arrayDynamicAppendT(&args.bmodel->dynamic_polylights, &polylight); + } +} + static qboolean fillBrushSurfaces(fill_geometries_args_t args) { int vertex_offset = 0; int num_geometries = 0; int animated_count = 0; int conveyors_count = 0; int conveyors_vertices_count = 0; + int emissive_surfaces_count = 0; vk_vertex_t *p_vert = args.out_vertices; uint16_t *p_ind = args.out_indices; @@ -1403,7 +1495,19 @@ static qboolean fillBrushSurfaces(fill_geometries_args_t args) { if (type == BrushSurface_Animated) model_geometry->ye_olde_texture = -1; - VectorClear(model_geometry->emissive); + surfaceHandleEmissive((SurfaceHandleEmissiveArgs){ + .mod = args.mod, + .func_any = args.func_any, + .is_static = args.is_static, + .bmodel = args.bmodel, + .surf = surf, + .surface_index = surface_index, + .type = type, + .tex_id = tex_id, + .psurf = psurf, + .model_geometry = model_geometry, + .emissive_surfaces_count = &emissive_surfaces_count, + }); model_geometry->surf_deprecate = surf; @@ -1542,7 +1646,7 @@ static qboolean fillBrushSurfaces(fill_geometries_args_t args) { if (area2 <= 0.) { // Do not produce triangle if it has zero area // NOTE: this is suboptimal in the sense that points that might be necessary for proper - // normal smoothing might be skippedk. In case that this causes undesirable rendering + // normal smoothing might be skipped. In case that this causes undesirable rendering // artifacts, a more proper triangulation algorithm, that doesn't skip points, would // be needed. E.g. ear clipping. /* diagnostics @@ -1582,6 +1686,10 @@ static qboolean fillBrushSurfaces(fill_geometries_args_t args) { } // for mod->nummodelsurfaces } + // Apply all emissive surfaces found + INFO("Loaded %d polylights, %d dynamic for %s model %s", + emissive_surfaces_count, args.bmodel->dynamic_polylights.count, args.is_static ? "static" : "movable", args.mod->name); + ASSERT(args.sizes.num_surfaces == num_geometries); ASSERT(args.sizes.animated_count == animated_count); ASSERT(args.sizes.conveyors_count == conveyors_count); @@ -1615,6 +1723,8 @@ static qboolean createRenderModel( const model_t *mod, vk_brush_model_t *bmodel, } const r_geometry_range_lock_t geom_lock = R_GeometryRangeLock(&bmodel->geometry); + const xvk_mapent_func_any_t *func_any = getModelFuncAnyPatch(mod); + const qboolean is_static = is_worldmodel || (func_any && func_any->origin_patched); const qboolean fill_result = fillBrushSurfaces((fill_geometries_args_t){ .mod = mod, @@ -1625,7 +1735,9 @@ static qboolean createRenderModel( const model_t *mod, vk_brush_model_t *bmodel, .out_geometries = geometries, .out_vertices = geom_lock.vertices, .out_indices = geom_lock.indices, + .func_any = func_any, .is_worldmodel = is_worldmodel, + .is_static = is_static, }); R_GeometryRangeUnlock( &geom_lock ); @@ -1671,6 +1783,8 @@ qboolean R_BrushModelLoad( model_t *mod, qboolean is_worldmodel ) { Matrix4x4_LoadIdentity(bmodel->prev_transform); bmodel->prev_time = gpGlobals->time; + arrayDynamicInitT(&bmodel->dynamic_polylights); + const model_sizes_t sizes = computeSizes( mod, is_worldmodel ); if (is_worldmodel) { @@ -1719,8 +1833,7 @@ static void R_BrushModelDestroy( vk_brush_model_t *bmodel ) { ASSERT(bmodel->engine_model->cache.data == bmodel); ASSERT(bmodel->engine_model->type == mod_brush); - if (bmodel->dynamic_polylights) - Mem_Free(bmodel->dynamic_polylights); + arrayDynamicDestroyT(&bmodel->dynamic_polylights); if (bmodel->conveyors_vertices) Mem_Free(bmodel->conveyors_vertices); @@ -1814,163 +1927,6 @@ static qboolean loadPolyLight(rt_light_add_polygon_t *out_polygon, const model_t return true; } -void R_VkBrushModelCollectEmissiveSurfaces( const struct model_s *mod, qboolean is_worldmodel ) { - vk_brush_model_t *const bmodel = mod->cache.data; - ASSERT(bmodel); - - const xvk_mapent_func_any_t *func_any = getModelFuncAnyPatch(mod); - const qboolean is_static = is_worldmodel || (func_any && func_any->origin_patched); - - typedef struct { - int model_surface_index; - int surface_index; - const msurface_t *surf; - vec3_t emissive; - qboolean is_water; - } emissive_surface_t; - emissive_surface_t emissive_surfaces[MAX_SURFACE_LIGHTS]; - int geom_indices[MAX_SURFACE_LIGHTS]; - int emissive_surfaces_count = 0; - - // Load list of all emissive surfaces - for( int i = 0; i < mod->nummodelsurfaces; ++i) { - const int surface_index = mod->firstmodelsurface + i; - const msurface_t *surf = mod->surfaces + surface_index; - const brush_surface_type_e type = getSurfaceType(surf, surface_index, is_worldmodel); - - switch (type) { - case BrushSurface_Regular: - case BrushSurface_Water: - // No known cases, also needs to be dynamic case BrushSurface_WaterSide: - break; - // Animated textures are enumerated in `R_BrushModelDraw()` and are added as dynamic lights - // when their current frame is emissive. Do not add such surfaces here to avoid adding them twice. - // TODO: Most of the animated surfaces are techically static: i.e. they don't really move. - // Make a special case for static lights that can be off. - case BrushSurface_Animated: - default: - continue; - } - - const int tex_id = surf->texinfo->texture->gl_texturenum; // TODO animation? - - vec3_t emissive; - const xvk_patch_surface_t *const psurf = R_VkPatchGetSurface(surface_index); - if (psurf && (psurf->flags & Patch_Surface_Emissive)) { - VectorCopy(psurf->emissive, emissive); - } else if (RT_GetEmissiveForTexture(emissive, tex_id)) { - // emissive - } else { - // not emissive, continue to the next - continue; - } - - DEBUG("%d: i=%d surf_index=%d tex_id=%d patch=%d(%#x) => emissive=(%f,%f,%f)", emissive_surfaces_count, i, surface_index, tex_id, !!psurf, psurf?psurf->flags:0, emissive[0], emissive[1], emissive[2]); - - if (emissive_surfaces_count == MAX_SURFACE_LIGHTS) { - ERR("Too many emissive surfaces for model %s: max=%d", mod->name, MAX_SURFACE_LIGHTS); - break; - } - - emissive_surface_t* const surface = &emissive_surfaces[emissive_surfaces_count++]; - surface->model_surface_index = i; - surface->surface_index = surface_index; - surface->surf = surf; - surface->is_water = type == BrushSurface_Water; - VectorCopy(emissive, surface->emissive); - } - - // Clear old per-geometry emissive values. The new emissive values will be assigned by the loop below only to the relevant geoms - // This is relevant for updating lights during development - for (int i = 0; i < bmodel->render_model.num_geometries; ++i) { - vk_render_geometry_t *const geom = bmodel->render_model.geometries + i; - VectorClear(geom->emissive); - } - - // Non-static brush models may move around and so must have their emissive surfaces treated as dynamic - if (!is_static) { - if (bmodel->dynamic_polylights) - Mem_Free(bmodel->dynamic_polylights); - bmodel->dynamic_polylights_count = 0; - bmodel->dynamic_polylights = Mem_Malloc(vk_core.pool, sizeof(bmodel->dynamic_polylights[0]) * emissive_surfaces_count); - } - - // Apply all emissive surfaces found - int geom_indices_count = 0; - for (int i = 0; i < emissive_surfaces_count; ++i) { - const emissive_surface_t* const s = emissive_surfaces + i; - rt_light_add_polygon_t polylight; - if (!loadPolyLight(&polylight, mod, s->surface_index, s->surf, s->emissive)) - continue; - - // func_any surfaces do not really belong to BSP+PVS system, so they can't be used - // for lights visibility calculation directly. - if (func_any && func_any->origin_patched) { - // TODO this is not really dynamic, but this flag signals using MovingSurface visibility calc - polylight.dynamic = true; - matrix3x4 m; - Matrix3x4_LoadIdentity(m); - Matrix3x4_SetOrigin(m, func_any->origin[0], func_any->origin[1], func_any->origin[2]); - polylight.transform_row = &m; - } - - // Static emissive surfaces are added immediately, as they are drawn all the time. - // Non-static ones will be applied later when the model is actually rendered - if (is_static) { - RT_LightAddPolygon(&polylight); - - /* TODO figure out when this is needed. - * This is needed in cases where we can dive into emissive acid, which should illuminate what's under it - * Likely, this is not a correct fix, though, see https://github.com/w23/xash3d-fwgs/issues/56 - if (s->is_water) { - // Add backside for water - for (int i = 0; i < polylight.num_vertices; ++i) { - vec3_t tmp; - VectorCopy(polylight.vertices[i], tmp); - VectorCopy(polylight.vertices[polylight.num_vertices-1-i], polylight.vertices[i]); - VectorCopy(tmp, polylight.vertices[polylight.num_vertices-1-i]); - RT_LightAddPolygon(&polylight); - } - } - */ - } else { - ASSERT(bmodel->dynamic_polylights_count < emissive_surfaces_count); - bmodel->dynamic_polylights[bmodel->dynamic_polylights_count++] = polylight; - } - - // Assign the emissive value to the right geometry - if (bmodel->surface_to_geometry_index) { // Can be absent for water-only models - const int geom_index = bmodel->surface_to_geometry_index[s->model_surface_index]; - if (geom_index != -1) { // can be missing for water surfaces - ASSERT(geom_index >= 0); - ASSERT(geom_index < bmodel->render_model.num_geometries); - ASSERT(geom_indices_count < COUNTOF(geom_indices)); - geom_indices[geom_indices_count++] = geom_index; - VectorCopy(polylight.emissive, bmodel->render_model.geometries[geom_index].emissive); - } - } - } - - if (emissive_surfaces_count > 0) { - // Update emissive values in kusochki. This is required because initial R_BrushModelLoad happens before we've read - // RAD data in vk_light.c, so the emissive values are empty. This is the place and time where we actually get to - // know them, so let's fixup things. - // TODO minor optimization: sort geom_indices to have a better chance for them to be sequential - - { - // Make sure that staging has been flushed. - // Updating materials leads to staging an upload to the same memory that we've just staged an upload to. - // This doesn't please the validator. - // Ensure that these uploads are not mixed into the same unsynchronized stream. - // TODO this might be not great for performance (extra waiting for GPU), so a better solution should be considered. E.g. tracking and barrier-syncing regions to-be-reuploaded. - R_VkStagingFlushSync(); - } - - R_RenderModelUpdateMaterials(&bmodel->render_model, geom_indices, geom_indices_count); - INFO("Loaded %d polylights for %s model %s", emissive_surfaces_count, is_static ? "static" : "movable", mod->name); - } -} - void R_BrushUnloadTextures( model_t *mod ) { int i; diff --git a/ref/vk/vk_brush.h b/ref/vk/vk_brush.h index 66c44afd73..03884c7355 100644 --- a/ref/vk/vk_brush.h +++ b/ref/vk/vk_brush.h @@ -18,6 +18,4 @@ void R_BrushModelDraw( const cl_entity_t *ent, int render_mode, float blend, con const texture_t *R_TextureAnimation( const cl_entity_t *ent, const msurface_t *s ); -void R_VkBrushModelCollectEmissiveSurfaces( const struct model_s *mod, qboolean is_worldmodel ); - void R_BrushUnloadTextures( model_t *mod ); diff --git a/ref/vk/vk_scene.c b/ref/vk/vk_scene.c index 7fee599ff0..740ebe8656 100644 --- a/ref/vk/vk_scene.c +++ b/ref/vk/vk_scene.c @@ -65,28 +65,6 @@ static struct { draw_list_t *draw_list; } g_lists; -static void loadLights( const model_t *const map ) { - RT_LightsLoadBegin(map); - - const int num_models = gEngine.EngineGetParm( PARM_NUMMODELS, 0 ); - for( int i = 0; i < num_models; i++ ) { - const model_t *const mod = gEngine.pfnGetModelByIndex( i + 1 ); - - if (!mod) - continue; - - if( mod->type != mod_brush ) - continue; - - const qboolean is_worldmodel = i == 0; - R_VkBrushModelCollectEmissiveSurfaces(mod, is_worldmodel); - } - - // Load static map lights - // Reads surfaces from loaded brush models (must happen after all brushes are loaded) - RT_LightsLoadEnd(); -} - static void preloadModels( void ) { const int num_models = gEngine.EngineGetParm( PARM_NUMMODELS, 0 ); @@ -151,13 +129,14 @@ static void loadMap(const model_t* const map, qboolean force_reload) { // Depends on loaded materials. Must preceed loading brush models. XVK_ParseMapPatches(); + RT_LightsLoadBegin(map); preloadModels(); + // Marks all loaded lights as static. Should happen after preloadModels(), where brush models are loaded. + RT_LightsLoadEnd(); // Can only do after preloadModels(), as we need to know whether there are SURF_DRAWSKY R_TextureSetupSky( gEngine.pfnGetMoveVars()->skyName, force_reload ); - loadLights(map); - // TODO should we do something like R_BrushEndLoad? VK_UploadLightmap(); } diff --git a/ref/vk/vk_staging.c b/ref/vk/vk_staging.c index cbae1283ed..98582b5972 100644 --- a/ref/vk/vk_staging.c +++ b/ref/vk/vk_staging.c @@ -13,8 +13,8 @@ #define MODULE_NAME "staging" #define LOG_MODULE staging -// FIXME don't do this, mkay -#define DEFAULT_STAGING_SIZE (2*128*1024*1024) +// FIXME decrease size to something reasonable, see https://github.com/w23/xash3d-fwgs/issues/746 +#define DEFAULT_STAGING_SIZE (4*128*1024*1024) #define MAX_STAGING_ALLOCS (2048) #define MAX_CONCURRENT_FRAMES 2 #define COMMAND_BUFFER_COUNT (MAX_CONCURRENT_FRAMES + 1) // to accommodate two frames in flight plus something trying to upload data before waiting for the next frame to complete @@ -102,6 +102,7 @@ void R_VkStagingShutdown(void) { // FIXME There's a severe race condition here. Submitting things manually and prematurely (before framectl had a chance to synchronize with the previous frame) // may lead to data races and memory corruption (e.g. writing into memory that's being read in some pipeline stage still going) void R_VkStagingFlushSync( void ) { + ASSERT(!"SHOULD NEVER HAPPEN"); APROF_SCOPE_DECLARE_BEGIN(function, __FUNCTION__); vk_combuf_t *combuf = R_VkStagingCommit(); @@ -283,6 +284,8 @@ VkCommandBuffer R_VkStagingGetCommandBuffer(void) { } vk_combuf_t *R_VkStagingCommit(void) { + DEBUG("%s: buffers.count=%d current=%p", __FUNCTION__, g_staging.buffers.count, g_staging.current); + if (!g_staging.buffers.count && !g_staging.current) return VK_NULL_HANDLE; @@ -292,6 +295,8 @@ vk_combuf_t *R_VkStagingCommit(void) { } void R_VkStagingFrameBegin(void) { + R_VkStagingCommit(); // .... ugh + R_FlippingBuffer_Flip(&g_staging.buffer_alloc); g_staging.buffers.count = 0; From dadc47c9e947a51585b7729317b5fd42b7b4b667 Mon Sep 17 00:00:00 2001 From: Ivan Avdeev Date: Fri, 29 Nov 2024 15:18:49 -0500 Subject: [PATCH 08/62] vk: add PTR_CAST to slience -Werror=cast-align --- ref/vk/r_textures.c | 11 ++--- ref/vk/unordered_roadmap.c | 11 ++--- ref/vk/vk_common.h | 4 ++ ref/vk/vk_geometry.c | 4 +- ref/vk/vk_render.c | 6 +-- ref/vk/vk_rtx.c | 2 +- ref/vk/vk_sprite.c | 14 +++---- ref/vk/vk_studio.c | 82 +++++++++++++++++++------------------- ref/vk/vk_studio_model.c | 10 ++--- ref/vk/vk_textures.c | 8 ++-- 10 files changed, 79 insertions(+), 73 deletions(-) diff --git a/ref/vk/r_textures.c b/ref/vk/r_textures.c index 4a581d267c..55a7611c2e 100644 --- a/ref/vk/r_textures.c +++ b/ref/vk/r_textures.c @@ -177,14 +177,15 @@ static void createDefaultTextures( void ) // emo-texture from quake1 pic = Common_FakeImage( 16, 16, 1, IMAGE_HAS_COLOR ); + uint *const buffer = PTR_CAST(uint, pic->buffer); for( y = 0; y < 16; y++ ) { for( x = 0; x < 16; x++ ) { if(( y < 8 ) ^ ( x < 8 )) - ((uint *)pic->buffer)[y*16+x] = 0xFFFF00FF; - else ((uint *)pic->buffer)[y*16+x] = 0xFF000000; + buffer[y*16+x] = 0xFFFF00FF; + else buffer[y*16+x] = 0xFF000000; } } @@ -211,19 +212,19 @@ static void createDefaultTextures( void ) // white texture pic = Common_FakeImage( 4, 4, 1, IMAGE_HAS_COLOR ); for( x = 0; x < 16; x++ ) - ((uint *)pic->buffer)[x] = 0xFFFFFFFF; + buffer[x] = 0xFFFFFFFF; tglob.whiteTexture = R_TextureUploadFromBufferNew( REF_WHITE_TEXTURE, pic, TF_COLORMAP ); // gray texture pic = Common_FakeImage( 4, 4, 1, IMAGE_HAS_COLOR ); for( x = 0; x < 16; x++ ) - ((uint *)pic->buffer)[x] = 0xFF7F7F7F; + buffer[x] = 0xFF7F7F7F; tglob.grayTexture = R_TextureUploadFromBufferNew( REF_GRAY_TEXTURE, pic, TF_COLORMAP ); // black texture pic = Common_FakeImage( 4, 4, 1, IMAGE_HAS_COLOR ); for( x = 0; x < 16; x++ ) - ((uint *)pic->buffer)[x] = 0xFF000000; + buffer[x] = 0xFF000000; tglob.blackTexture = R_TextureUploadFromBufferNew( REF_BLACK_TEXTURE, pic, TF_COLORMAP ); // cinematic dummy diff --git a/ref/vk/unordered_roadmap.c b/ref/vk/unordered_roadmap.c index e98935a7d5..50917a5360 100644 --- a/ref/vk/unordered_roadmap.c +++ b/ref/vk/unordered_roadmap.c @@ -10,6 +10,7 @@ #define ERR(msg, ...) fprintf(stderr, msg, ##__VA_ARGS__) #define ASSERT(...) assert(__VA_ARGS__) #define COUNTOF(a) (sizeof(a)/sizeof(a[0])) +#define PTR_CAST(type, ptr) ((type*)(void*)(ptr)) #endif #if defined(_WIN32) && !defined(strcasecmp) @@ -49,7 +50,7 @@ void urmomInit(const urmom_desc_t* desc) { ASSERT((desc->count & (desc->count - 1)) == 0); for (int i = 0; i < desc->count; ++i) { - urmom_header_t *hdr = (urmom_header_t*)(ptr + desc->item_size * i); + urmom_header_t *hdr = PTR_CAST(urmom_header_t, ptr + desc->item_size * i); hdr->state = 0; hdr->hash = 0; } @@ -92,7 +93,7 @@ int urmomFind(const urmom_desc_t* desc, const char* key) { const int start_index = hash & mask; for (int index = start_index;;) { - const urmom_header_t *hdr = (urmom_header_t*)(ptr + desc->item_size * index); + const urmom_header_t *hdr = PTR_CAST(const urmom_header_t, ptr + desc->item_size * index); if (URMOM_IS_OCCUPIED(*hdr)) { if (hdr->hash == hash && sameKey(desc->type, key, hdr->key)) @@ -122,7 +123,7 @@ urmom_insert_t urmomInsert(const urmom_desc_t* desc, const char *key) { int index = start_index; int first_available = -1; for (;;) { - const urmom_header_t *hdr = (urmom_header_t*)(ptr + desc->item_size * index); + const urmom_header_t *hdr = PTR_CAST(const urmom_header_t, ptr + desc->item_size * index); if (URMOM_IS_OCCUPIED(*hdr)) { if (hdr->hash == hash && sameKey(desc->type, key, hdr->key)) @@ -149,7 +150,7 @@ urmom_insert_t urmomInsert(const urmom_desc_t* desc, const char *key) { if (first_available < 0) return (urmom_insert_t){.index = -1, .created = 0}; - urmom_header_t *hdr = (urmom_header_t*)(ptr + desc->item_size * first_available); + urmom_header_t *hdr = PTR_CAST(urmom_header_t, ptr + desc->item_size * first_available); hdr->hash = hash; hdr->state = 1; @@ -169,7 +170,7 @@ int urmomRemove(const urmom_desc_t* desc, const char *key) { void urmomRemoveByIndex(const urmom_desc_t* desc, int index) { char *ptr = desc->array; - urmom_header_t *hdr = (urmom_header_t*)(ptr + desc->item_size * index); + urmom_header_t *hdr = PTR_CAST(urmom_header_t, ptr + desc->item_size * index); if (!URMOM_IS_OCCUPIED(*hdr)) { ERR("Hashmap=%p(is=%d, n=%d): lot %d is not occupied", desc->array, desc->item_size, desc->count, index); diff --git a/ref/vk/vk_common.h b/ref/vk/vk_common.h index eb1156bb4b..0e41e2f8d8 100644 --- a/ref/vk/vk_common.h +++ b/ref/vk/vk_common.h @@ -22,6 +22,10 @@ #define COUNTOF(a) (sizeof(a)/sizeof((a)[0])) +// Sliences -Werror=cast-align +// TODO assert for proper alignment for type_ +#define PTR_CAST(type_, ptr_) ((type_*)(void*)(ptr_)) + inline static int clampi32(int v, int min, int max) { if (v < min) return min; if (v > max) return max; diff --git a/ref/vk/vk_geometry.c b/ref/vk/vk_geometry.c index d9725381e5..e4fa0b2652 100644 --- a/ref/vk/vk_geometry.c +++ b/ref/vk/vk_geometry.c @@ -76,7 +76,7 @@ r_geometry_range_lock_t R_GeometryRangeLock(const r_geometry_range_t *range) { return (r_geometry_range_lock_t){ .vertices = (vk_vertex_t *)staging.ptr, - .indices = (uint16_t *)((char*)staging.ptr + vertices_size), + .indices = PTR_CAST(uint16_t, (char*)staging.ptr + vertices_size), .impl_ = { .staging_handle = staging.handle, }, @@ -150,7 +150,7 @@ qboolean R_GeometryBufferAllocOnceAndLock(r_geometry_buffer_lock_t *lock, int ve }, .indices = { .count = index_count, - .ptr = (uint16_t *)((char*)staging.ptr + vertices_size), + .ptr = PTR_CAST(uint16_t, (char*)staging.ptr + vertices_size), .unit_offset = indices_offset, }, .impl_ = { diff --git a/ref/vk/vk_render.c b/ref/vk/vk_render.c index 5ad72af9bd..2505f30f20 100644 --- a/ref/vk/vk_render.c +++ b/ref/vk/vk_render.c @@ -552,7 +552,7 @@ static uint32_t getUboOffset_FIXME( void ) { if (g_render_state.current_ubo_offset_FIXME == ALO_ALLOC_FAILED) return UINT32_MAX; - uniform_data_t *const ubo = (uniform_data_t*)((byte*)g_render.uniform_buffer.mapped + g_render_state.current_ubo_offset_FIXME); + uniform_data_t *const ubo = PTR_CAST(uniform_data_t, (byte*)g_render.uniform_buffer.mapped + g_render_state.current_ubo_offset_FIXME); memcpy(&g_render_state.current_uniform_data, &g_render_state.dirty_uniform_data, sizeof(g_render_state.dirty_uniform_data)); memcpy(ubo, &g_render_state.current_uniform_data, sizeof(*ubo)); g_render_state.uniform_data_set_mask |= UNIFORM_UPLOADED; @@ -613,7 +613,7 @@ static uint32_t writeDlightsToUBO( void ) gEngine.Con_Printf(S_ERROR "Cannot allocate UBO for DLights\n"); return UINT32_MAX; } - ubo_lights = (vk_ubo_lights_t*)((byte*)(g_render.uniform_buffer.mapped) + ubo_lights_offset); + ubo_lights = PTR_CAST(vk_ubo_lights_t, (byte*)(g_render.uniform_buffer.mapped) + ubo_lights_offset); // TODO this should not be here (where? vk_scene?) for (int i = 0; i < MAX_DLIGHTS && num_lights < ARRAYSIZE(ubo_lights->light); ++i) { @@ -747,7 +747,7 @@ void VK_RenderEnd( VkCommandBuffer cmdbuf, qboolean draw, uint32_t width, uint32 // Compute and upload UBO stuff { - sky_uniform_data_t* const sky_ubo = (sky_uniform_data_t*)((byte*)g_render.uniform_buffer.mapped + ubo_offset); + sky_uniform_data_t* const sky_ubo = PTR_CAST(sky_uniform_data_t, (byte*)g_render.uniform_buffer.mapped + ubo_offset); // FIXME model matrix Matrix4x4_ToArrayFloatGL(g_render_state.projection_view, (float*)sky_ubo->mvp); diff --git a/ref/vk/vk_rtx.c b/ref/vk/vk_rtx.c index 472f20b639..eb717777eb 100644 --- a/ref/vk/vk_rtx.c +++ b/ref/vk/vk_rtx.c @@ -167,7 +167,7 @@ static uint32_t getRandomSeed( void ) { } static void prepareUniformBuffer( const vk_ray_frame_render_args_t *args, int frame_index, uint32_t frame_counter, float fov_angle_y, int frame_width, int frame_height ) { - struct UniformBuffer *ubo = (struct UniformBuffer*)((char*)g_rtx.uniform_buffer.mapped + frame_index * g_rtx.uniform_unit_size); + struct UniformBuffer *ubo = PTR_CAST(struct UniformBuffer, (char*)g_rtx.uniform_buffer.mapped + frame_index * g_rtx.uniform_unit_size); matrix4x4 proj_inv, view_inv; Matrix4x4_Invert_Full(proj_inv, *args->projection); diff --git a/ref/vk/vk_sprite.c b/ref/vk/vk_sprite.c index f85549cfe7..244af16feb 100644 --- a/ref/vk/vk_sprite.c +++ b/ref/vk/vk_sprite.c @@ -174,7 +174,7 @@ static mspriteframe_t *R_GetSpriteFrame( const model_t *pModel, int frame, float } else if( psprite->frames[frame].type == SPR_GROUP ) { - pspritegroup = (mspritegroup_t *)psprite->frames[frame].frameptr; + pspritegroup = PTR_CAST(mspritegroup_t, psprite->frames[frame].frameptr); pintervals = pspritegroup->intervals; numframes = pspritegroup->numframes; fullinterval = pintervals[numframes-1]; @@ -198,7 +198,7 @@ static mspriteframe_t *R_GetSpriteFrame( const model_t *pModel, int frame, float gEngine.Con_Printf(S_WARN "VK FIXME: %s doesn't know about viewangles\n", __FUNCTION__); // e.g. doom-style sprite monsters - pspritegroup = (mspritegroup_t *)psprite->frames[frame].frameptr; + pspritegroup = PTR_CAST(mspritegroup_t, psprite->frames[frame].frameptr); pspriteframe = pspritegroup->frames[angleframe]; } @@ -261,7 +261,7 @@ static const dframetype_t *VK_SpriteLoadFrame( model_t *mod, const void *pin, ms pspriteframe->gl_texturenum = gl_texturenum; *ppframe = pspriteframe; - return ( const dframetype_t* )(( const byte* )pin + sizeof( dspriteframe_t ) + pinframe.width * pinframe.height * bytes ); + return PTR_CAST(const dframetype_t, ( const byte* )pin + sizeof( dspriteframe_t ) + pinframe.width * pinframe.height * bytes ); } static const dframetype_t *VK_SpriteLoadGroup( model_t *mod, const void *pin, mspriteframe_t **ppframe, int framenum, const SpriteLoadContext *ctx ) @@ -609,7 +609,7 @@ static float R_GetSpriteFrameInterpolant( cl_entity_t *ent, mspriteframe_t **old } else if( psprite->frames[frame].type == FRAME_GROUP ) { - pspritegroup = (mspritegroup_t *)psprite->frames[frame].frameptr; + pspritegroup = PTR_CAST(mspritegroup_t, psprite->frames[frame].frameptr); pintervals = pspritegroup->intervals; numframes = pspritegroup->numframes; fullinterval = pintervals[numframes-1]; @@ -681,10 +681,10 @@ static float R_GetSpriteFrameInterpolant( cl_entity_t *ent, mspriteframe_t **old lerpFrac = 1.0f; } - pspritegroup = (mspritegroup_t *)psprite->frames[ent->latched.prevblending[0]].frameptr; + pspritegroup = PTR_CAST(mspritegroup_t, psprite->frames[ent->latched.prevblending[0]].frameptr); if( oldframe ) *oldframe = pspritegroup->frames[angleframe]; - pspritegroup = (mspritegroup_t *)psprite->frames[frame].frameptr; + pspritegroup = PTR_CAST(mspritegroup_t, psprite->frames[frame].frameptr); if( curframe ) *curframe = pspritegroup->frames[angleframe]; } @@ -1099,7 +1099,7 @@ void Mod_SpriteUnloadTextures( void *data ) } else { - pspritegroup = (mspritegroup_t *)psprite->frames[i].frameptr; + pspritegroup = PTR_CAST(mspritegroup_t, psprite->frames[i].frameptr); for( j = 0; j < pspritegroup->numframes; j++ ) { diff --git a/ref/vk/vk_studio.c b/ref/vk/vk_studio.c index 174649bdaa..cc2eab0623 100644 --- a/ref/vk/vk_studio.c +++ b/ref/vk/vk_studio.c @@ -228,7 +228,7 @@ static qboolean R_StudioComputeBBox( vec3_t bbox[8] ) if( e->curstate.sequence < 0 || e->curstate.sequence >= m_pStudioHeader->numseq ) e->curstate.sequence = 0; - pseqdesc = (mstudioseqdesc_t *)((byte *)m_pStudioHeader + m_pStudioHeader->seqindex) + e->curstate.sequence; + pseqdesc = PTR_CAST(mstudioseqdesc_t, (byte *)m_pStudioHeader + m_pStudioHeader->seqindex) + e->curstate.sequence; // add sequence box to the model box AddPointToBounds( pseqdesc->bbmin, mins, maxs ); @@ -520,7 +520,7 @@ void R_StudioCalcBoneAdj( float dadt, float *adj, const byte *pcontroller1, cons float value = 0.0f; int i, j; - pbonecontroller = (mstudiobonecontroller_t *)((byte *)m_pStudioHeader + m_pStudioHeader->bonecontrollerindex); + pbonecontroller = PTR_CAST(mstudiobonecontroller_t, (byte *)m_pStudioHeader + m_pStudioHeader->bonecontrollerindex); for( j = 0; j < m_pStudioHeader->numbonecontrollers; j++ ) { @@ -599,7 +599,7 @@ void R_StudioCalcRotations( cl_entity_t *e, float pos[][3], vec4_t *q, mstudiose s = (f - frame); // add in programtic controllers - pbone = (mstudiobone_t *)((byte *)m_pStudioHeader + m_pStudioHeader->boneindex); + pbone = PTR_CAST(mstudiobone_t, (byte *)m_pStudioHeader + m_pStudioHeader->boneindex); R_StudioCalcBoneAdj( dadt, adj, e->curstate.controller, e->latched.prevcontroller, e->mouth.mouthopen ); @@ -628,13 +628,13 @@ void R_StudioMergeBones( cl_entity_t *e, model_t *m_pSubModel ) if( e->curstate.sequence >= m_pStudioHeader->numseq ) e->curstate.sequence = 0; - pseqdesc = (mstudioseqdesc_t *)((byte *)m_pStudioHeader + m_pStudioHeader->seqindex) + e->curstate.sequence; + pseqdesc = PTR_CAST(mstudioseqdesc_t, (byte *)m_pStudioHeader + m_pStudioHeader->seqindex) + e->curstate.sequence; f = R_StudioEstimateFrame( e, pseqdesc, g_studio.time ); panim = gEngine.R_StudioGetAnim( m_pStudioHeader, m_pSubModel, pseqdesc ); R_StudioCalcRotations( e, pos, q, pseqdesc, panim, f ); - pbones = (mstudiobone_t *)((byte *)m_pStudioHeader + m_pStudioHeader->boneindex); + pbones = PTR_CAST(mstudiobone_t, (byte *)m_pStudioHeader + m_pStudioHeader->boneindex); for( i = 0; i < m_pStudioHeader->numbones; i++ ) { @@ -688,7 +688,7 @@ void R_StudioSetupBones( cl_entity_t *e ) if( e->curstate.sequence >= m_pStudioHeader->numseq ) e->curstate.sequence = 0; - pseqdesc = (mstudioseqdesc_t *)((byte *)m_pStudioHeader + m_pStudioHeader->seqindex) + e->curstate.sequence; + pseqdesc = PTR_CAST(mstudioseqdesc_t, (byte *)m_pStudioHeader + m_pStudioHeader->seqindex) + e->curstate.sequence; f = R_StudioEstimateFrame( e, pseqdesc, g_studio.time ); @@ -731,7 +731,7 @@ void R_StudioSetupBones( cl_entity_t *e ) static vec4_t q1b[MAXSTUDIOBONES]; float s; - pseqdesc = (mstudioseqdesc_t *)((byte *)m_pStudioHeader + m_pStudioHeader->seqindex) + e->latched.prevsequence; + pseqdesc = PTR_CAST(mstudioseqdesc_t, (byte *)m_pStudioHeader + m_pStudioHeader->seqindex) + e->latched.prevsequence; panim = gEngine.R_StudioGetAnim( m_pStudioHeader, RI.currentmodel, pseqdesc ); // clip prevframe @@ -770,7 +770,7 @@ void R_StudioSetupBones( cl_entity_t *e ) e->latched.prevframe = f; } - pbones = (mstudiobone_t *)((byte *)m_pStudioHeader + m_pStudioHeader->boneindex); + pbones = PTR_CAST(mstudiobone_t, (byte *)m_pStudioHeader + m_pStudioHeader->boneindex); // calc gait animation if( m_pPlayerInfo && m_pPlayerInfo->gaitsequence != 0 ) @@ -780,7 +780,7 @@ void R_StudioSetupBones( cl_entity_t *e ) if( m_pPlayerInfo->gaitsequence >= m_pStudioHeader->numseq ) m_pPlayerInfo->gaitsequence = 0; - pseqdesc = (mstudioseqdesc_t *)((byte *)m_pStudioHeader + m_pStudioHeader->seqindex) + m_pPlayerInfo->gaitsequence; + pseqdesc = PTR_CAST(mstudioseqdesc_t, (byte *)m_pStudioHeader + m_pStudioHeader->seqindex) + m_pPlayerInfo->gaitsequence; panim = gEngine.R_StudioGetAnim( m_pStudioHeader, RI.currentmodel, pseqdesc ); R_StudioCalcRotations( e, pos2, q2, pseqdesc, panim, m_pPlayerInfo->gaitframe ); @@ -824,7 +824,7 @@ static void R_StudioSaveBones( void ) mstudiobone_t *pbones; int i; - pbones = (mstudiobone_t *)((byte *)m_pStudioHeader + m_pStudioHeader->boneindex); + pbones = PTR_CAST(mstudiobone_t, (byte *)m_pStudioHeader + m_pStudioHeader->boneindex); g_studio.cached_numbones = m_pStudioHeader->numbones; for( i = 0; i < m_pStudioHeader->numbones; i++ ) @@ -861,8 +861,8 @@ void R_StudioBuildNormalTable( void ) { short *ptricmds; - pmesh = (mstudiomesh_t *)((byte *)m_pStudioHeader + m_pSubModel->meshindex) + j; - ptricmds = (short *)((byte *)m_pStudioHeader + pmesh->triindex); + pmesh = PTR_CAST(mstudiomesh_t, (byte *)m_pStudioHeader + m_pSubModel->meshindex) + j; + ptricmds = PTR_CAST(short, (byte *)m_pStudioHeader + pmesh->triindex); while(( i = *( ptricmds++ ))) { @@ -912,8 +912,8 @@ void R_StudioGenerateNormals( void ) { short *ptricmds; - pmesh = (mstudiomesh_t *)((byte *)m_pStudioHeader + m_pSubModel->meshindex) + j; - ptricmds = (short *)((byte *)m_pStudioHeader + pmesh->triindex); + pmesh = PTR_CAST(mstudiomesh_t, (byte *)m_pStudioHeader + m_pSubModel->meshindex) + j; + ptricmds = PTR_CAST(short, (byte *)m_pStudioHeader + pmesh->triindex); while(( i = *( ptricmds++ ))) { @@ -1058,7 +1058,7 @@ static void R_StudioCalcAttachments( void ) int i; // calculate attachment points - pAtt = (mstudioattachment_t *)((byte *)m_pStudioHeader + m_pStudioHeader->attachmentindex); + pAtt = PTR_CAST(mstudioattachment_t, (byte *)m_pStudioHeader + m_pStudioHeader->attachmentindex); for( i = 0; i < Q_min( MAXSTUDIOATTACHMENTS, m_pStudioHeader->numattachments ); i++ ) { @@ -1075,12 +1075,12 @@ static void R_StudioSetupModel( int bodypart, void **ppbodypart, void **ppsubmod g_studio_current.bodypart_index = bodypart; - m_pBodyPart = (mstudiobodyparts_t *)((byte *)m_pStudioHeader + m_pStudioHeader->bodypartindex) + bodypart; + m_pBodyPart = PTR_CAST(mstudiobodyparts_t, (byte *)m_pStudioHeader + m_pStudioHeader->bodypartindex) + bodypart; index = RI.currententity->curstate.body / m_pBodyPart->base; index = index % m_pBodyPart->nummodels; - m_pSubModel = (mstudiomodel_t *)((byte *)m_pStudioHeader + m_pBodyPart->modelindex) + index; + m_pSubModel = PTR_CAST(mstudiomodel_t, (byte *)m_pStudioHeader + m_pBodyPart->modelindex) + index; if( ppbodypart ) *ppbodypart = m_pBodyPart; if( ppsubmodel ) *ppsubmodel = m_pSubModel; @@ -1543,7 +1543,7 @@ static int R_StudioSetupSkin( studiohdr_t *ptexturehdr, int index ) // NOTE: user may ignore to call StudioRemapColors and remap_info will be unavailable if( m_fDoRemap ) ptexture = gEngine.CL_GetRemapInfoForEntity( RI.currententity )->ptexture; - if( !ptexture ) ptexture = (mstudiotexture_t *)((byte *)ptexturehdr + ptexturehdr->textureindex); // fallback + if( !ptexture ) ptexture = PTR_CAST(mstudiotexture_t, (byte *)ptexturehdr + ptexturehdr->textureindex); // fallback /* FIXME VK if( r_lightmap->value && !r_fullbright->value ) @@ -1572,7 +1572,7 @@ mstudiotexture_t *R_StudioGetTexture( cl_entity_t *e ) if( !thdr ) return NULL; if( m_fDoRemap ) ptexture = gEngine.CL_GetRemapInfoForEntity( e )->ptexture; - else ptexture = (mstudiotexture_t *)((byte *)thdr + thdr->textureindex); + else ptexture = PTR_CAST(mstudiotexture_t, (byte *)thdr + thdr->textureindex); return ptexture; } @@ -1909,14 +1909,14 @@ static void buildStudioSubmodelGeometry(build_submodel_geometry_t args) { // safety bounding the skinnum const int m_skinnum = bound( 0, RI.currententity->curstate.skin, ( m_pStudioHeader->numskinfamilies - 1 )); - const mstudiotexture_t *const ptexture = (const mstudiotexture_t *)((const byte *)m_pStudioHeader + m_pStudioHeader->textureindex); + const mstudiotexture_t *const ptexture = PTR_CAST(const mstudiotexture_t, (const byte *)m_pStudioHeader + m_pStudioHeader->textureindex); const byte *const pvertbone = ((const byte *)m_pStudioHeader + m_pSubModel->vertinfoindex); const byte *pnormbone = ((const byte *)m_pStudioHeader + m_pSubModel->norminfoindex); - const vec3_t *pstudioverts = (const vec3_t *)((const byte *)m_pStudioHeader + m_pSubModel->vertindex); - const vec3_t *pstudionorms = (const vec3_t *)((const byte *)m_pStudioHeader + m_pSubModel->normindex); + const vec3_t *pstudioverts = PTR_CAST(const vec3_t, (const byte *)m_pStudioHeader + m_pSubModel->vertindex); + const vec3_t *pstudionorms = PTR_CAST(const vec3_t, (const byte *)m_pStudioHeader + m_pSubModel->normindex); - const short *pskinref = (short *)((byte *)m_pStudioHeader + m_pStudioHeader->skinindex); + const short *pskinref = PTR_CAST(const short, (byte *)m_pStudioHeader + m_pStudioHeader->skinindex); if( m_skinnum != 0 ) pskinref += (m_skinnum * m_pStudioHeader->numskinref); // Compute inverse entity matrix, as we need vertices to be in local model space instead of global world space. @@ -1973,7 +1973,7 @@ static void buildStudioSubmodelGeometry(build_submodel_geometry_t args) { R_StudioGenerateNormals(); - const mstudiomesh_t *const pmesh = (mstudiomesh_t *)((byte *)m_pStudioHeader + m_pSubModel->meshindex); + const mstudiomesh_t *const pmesh = PTR_CAST(const mstudiomesh_t, (byte *)m_pStudioHeader + m_pSubModel->meshindex); qboolean need_sort = false; for( int j = 0, k = 0; j < m_pSubModel->nummesh; j++ ) @@ -2024,12 +2024,12 @@ static void buildStudioSubmodelGeometry(build_submodel_geometry_t args) { */ // NOTE: rewind normals at start - pstudionorms = (const vec3_t *)((const byte *)m_pStudioHeader + m_pSubModel->normindex); + pstudionorms = PTR_CAST(const vec3_t, (const byte *)m_pStudioHeader + m_pSubModel->normindex); int vertices_offset = 0, indices_offset = 0; for( int j = 0; j < m_pSubModel->nummesh; j++ ) { const mstudiomesh_t *const pmesh = g_studio.meshes[j].mesh; - const short *const ptricmds = (short *)((byte *)m_pStudioHeader + pmesh->triindex); + const short *const ptricmds = PTR_CAST(const short, (byte *)m_pStudioHeader + pmesh->triindex); const int face_flags = ptexture[pskinref[pmesh->skinref]].flags | g_nForceFaceFlags; @@ -2113,9 +2113,9 @@ static qboolean studioSubmodelRenderInit(r_studio_submodel_render_t *render_subm // TODO should this be part of r_studio_model_info_t? int vertex_count = 0, index_count = 0; { - const mstudiomesh_t *const pmesh = (mstudiomesh_t *)((byte *)m_pStudioHeader + m_pSubModel->meshindex); + const mstudiomesh_t *const pmesh = PTR_CAST(const mstudiomesh_t, (byte *)m_pStudioHeader + m_pSubModel->meshindex); for(int i = 0; i < submodel->nummesh; i++) { - const short* const ptricmds = (short *)((byte *)m_pStudioHeader + pmesh[i].triindex); + const short* const ptricmds = PTR_CAST(const short, (byte *)m_pStudioHeader + pmesh[i].triindex); addVerticesIndicesCounts(ptricmds, &vertex_count, &index_count); } @@ -2449,7 +2449,7 @@ int R_GetEntityRenderMode( cl_entity_t *ent ) } return ent->curstate.rendermode; } - ptexture = (mstudiotexture_t *)((byte *)phdr + phdr->textureindex); + ptexture = PTR_CAST(mstudiotexture_t, (byte *)phdr + phdr->textureindex); for( opaque = trans = i = 0; i < phdr->numtextures; i++, ptexture++ ) { @@ -2506,7 +2506,7 @@ static void R_StudioClientEvents( void ) } sequence = bound( 0, e->curstate.sequence, m_pStudioHeader->numseq - 1 ); - pseqdesc = (mstudioseqdesc_t *)((byte *)m_pStudioHeader + m_pStudioHeader->seqindex) + sequence; + pseqdesc = PTR_CAST(mstudioseqdesc_t, (byte *)m_pStudioHeader + m_pStudioHeader->seqindex) + sequence; // no events for this animation if( pseqdesc->numevents == 0 ) @@ -2514,7 +2514,7 @@ static void R_StudioClientEvents( void ) end = R_StudioEstimateFrame( e, pseqdesc, g_studio.time ); start = end - e->curstate.framerate * gpGlobals->frametime * pseqdesc->fps; - pevent = (mstudioevent_t *)((byte *)m_pStudioHeader + pseqdesc->eventindex); + pevent = PTR_CAST(mstudioevent_t, (byte *)m_pStudioHeader + pseqdesc->eventindex); if( e->latched.sequencetime == e->curstate.animtime ) { @@ -2572,7 +2572,7 @@ static void R_StudioSetupRenderer( int rendermode ) if( phdr && FBitSet( phdr->flags, STUDIO_HAS_BONEINFO )) { // NOTE: extended boneinfo goes immediately after bones - mstudioboneinfo_t *boneinfo = (mstudioboneinfo_t *)((byte *)phdr + phdr->boneindex + phdr->numbones * sizeof( mstudiobone_t )); + mstudioboneinfo_t *boneinfo = PTR_CAST(mstudioboneinfo_t, (byte *)phdr + phdr->boneindex + phdr->numbones * sizeof( mstudiobone_t )); for( i = 0; i < phdr->numbones; i++ ) Matrix3x4_ConcatTransforms( g_studio.worldtransform[i], g_studio.bonestransform[i], boneinfo[i].poseToBone ); @@ -2625,8 +2625,8 @@ static void R_StudioDrawPointsShadow( void ) { short *ptricmds; - pmesh = (mstudiomesh_t *)((byte *)m_pStudioHeader + m_pSubModel->meshindex) + k; - ptricmds = (short *)((byte *)m_pStudioHeader + pmesh->triindex); + pmesh = PTR_CAST(mstudiomesh_t, (byte *)m_pStudioHeader + m_pSubModel->meshindex) + k; + ptricmds = PTR_CAST(short, (byte *)m_pStudioHeader + pmesh->triindex); /* FIXME VK r_stats.c_studio_polys += pmesh->numtris; @@ -2815,7 +2815,7 @@ void R_StudioProcessGait( entity_state_t *pplayer ) dt = bound( 0.0f, g_studio.frametime, 1.0f ); - pseqdesc = (mstudioseqdesc_t *)((byte *)m_pStudioHeader + m_pStudioHeader->seqindex) + RI.currententity->curstate.sequence; + pseqdesc = PTR_CAST(mstudioseqdesc_t, (byte *)m_pStudioHeader + m_pStudioHeader->seqindex) + RI.currententity->curstate.sequence; R_StudioPlayerBlend( pseqdesc, &iBlend, &RI.currententity->angles[PITCH] ); @@ -2861,7 +2861,7 @@ void R_StudioProcessGait( entity_state_t *pplayer ) if( pplayer->gaitsequence >= m_pStudioHeader->numseq ) pplayer->gaitsequence = 0; - pseqdesc = (mstudioseqdesc_t *)((byte *)m_pStudioHeader + m_pStudioHeader->seqindex) + pplayer->gaitsequence; + pseqdesc = PTR_CAST(mstudioseqdesc_t, (byte *)m_pStudioHeader + m_pStudioHeader->seqindex) + pplayer->gaitsequence; // calc gait frame if( pseqdesc->linearmovement[0] > 0 ) @@ -3403,7 +3403,7 @@ void Mod_StudioLoadTextures( model_t *mod, void *data ) if( !phdr ) return; - ptexture = (mstudiotexture_t *)(((byte *)phdr) + phdr->textureindex); + ptexture = PTR_CAST(mstudiotexture_t, ((byte *)phdr) + phdr->textureindex); if( phdr->textureindex > 0 && phdr->numtextures <= MAXSTUDIOSKINS ) { for( i = 0; i < phdr->numtextures; i++ ) @@ -3420,7 +3420,7 @@ void Mod_StudioUnloadTextures( void *data ) if( !phdr ) return; - ptexture = (mstudiotexture_t *)(((byte *)phdr) + phdr->textureindex); + ptexture = PTR_CAST(mstudiotexture_t, ((byte *)phdr) + phdr->textureindex); // release all textures for( i = 0; i < phdr->numtextures; i++ ) @@ -3538,12 +3538,12 @@ static void pfnGetAliasScale( float *x, float *y ) static float ****pfnStudioGetBoneTransform( void ) { - return (float ****)g_studio.bonestransform; + return PTR_CAST(float ***, g_studio.bonestransform); } static float ****pfnStudioGetLightTransform( void ) { - return (float ****)g_studio.lighttransform; + return PTR_CAST(float ***, g_studio.lighttransform); } static float ***pfnStudioGetAliasTransform( void ) @@ -3553,7 +3553,7 @@ static float ***pfnStudioGetAliasTransform( void ) static float ***pfnStudioGetRotationMatrix( void ) { - return (float ***)g_studio.rotationmatrix; + return PTR_CAST(float **, g_studio.rotationmatrix); } static engine_studio_api_t gStudioAPI = diff --git a/ref/vk/vk_studio_model.c b/ref/vk/vk_studio_model.c index aaddafaeaa..7c0c480df5 100644 --- a/ref/vk/vk_studio_model.c +++ b/ref/vk/vk_studio_model.c @@ -119,7 +119,7 @@ static qboolean isBoneSame(int b) { /* } */ static void studioModelProcessBonesAnimations(const model_t *const model, const studiohdr_t *const hdr, r_studio_submodel_info_t *submodels, int submodels_count) { - const mstudiobone_t* const pbone = (mstudiobone_t *)((byte *)hdr + hdr->boneindex); + const mstudiobone_t* const pbone = PTR_CAST(const mstudiobone_t, (byte *)hdr + hdr->boneindex); /* for (int i = 0; i < hdr->numbones; ++i) { */ /* const mstudiobone_t* const bone = pbone + i; */ @@ -127,7 +127,7 @@ static void studioModelProcessBonesAnimations(const model_t *const model, const /* } */ for (int i = 0; i < hdr->numseq; ++i) { - const mstudioseqdesc_t *const pseqdesc = (mstudioseqdesc_t *)((byte *)hdr + hdr->seqindex) + i; + const mstudioseqdesc_t *const pseqdesc = PTR_CAST(const mstudioseqdesc_t, (byte *)hdr + hdr->seqindex) + i; const mstudioanim_t* const panim = gEngine.R_StudioGetAnim( (studiohdr_t*)hdr, (model_t*)model, (mstudioseqdesc_t*)pseqdesc ); @@ -187,11 +187,11 @@ static void studioModelProcessBonesAnimations(const model_t *const model, const static int studioModelGetSubmodels(const studiohdr_t *hdr, r_studio_submodel_info_t *out_submodels) { int count = 0; for (int i = 0; i < hdr->numbodyparts; ++i) { - const mstudiobodyparts_t* const bodypart = (mstudiobodyparts_t *)((byte *)hdr + hdr->bodypartindex) + i; + const mstudiobodyparts_t* const bodypart = PTR_CAST(const mstudiobodyparts_t, (byte *)hdr + hdr->bodypartindex) + i; if (out_submodels) { DEBUG(" Bodypart %d/%d: %s (nummodels=%d)", i, hdr->numbodyparts - 1, bodypart->name, bodypart->nummodels); for (int j = 0; j < bodypart->nummodels; ++j) { - const mstudiomodel_t * const submodel = (mstudiomodel_t *)((byte *)hdr + bodypart->modelindex) + j; + const mstudiomodel_t * const submodel = PTR_CAST(const mstudiomodel_t, (byte *)hdr + bodypart->modelindex) + j; DEBUG(" Submodel %d: %s", j, submodel->name); out_submodels[count++].submodel_key = submodel; } @@ -212,7 +212,7 @@ const r_studio_model_info_t* R_StudioModelPreload(model_t *mod) { DEBUG("Studio model %p(%s) hdr=%p(%s), sequences=%d:", mod, mod->name, hdr, hdr->name, hdr->numseq); for (int i = 0; i < hdr->numseq; ++i) { - const mstudioseqdesc_t *const pseqdesc = (mstudioseqdesc_t *)((byte *)hdr + hdr->seqindex) + i; + const mstudioseqdesc_t *const pseqdesc = PTR_CAST(const mstudioseqdesc_t, (byte *)hdr + hdr->seqindex) + i; DEBUG(" %d: fps=%f numframes=%d", i, pseqdesc->fps, pseqdesc->numframes); } diff --git a/ref/vk/vk_textures.c b/ref/vk/vk_textures.c index ae633ba546..bb2e76a565 100644 --- a/ref/vk/vk_textures.c +++ b/ref/vk/vk_textures.c @@ -58,7 +58,7 @@ static void generateFallbackNoiseTextures( const rgbdata_t *pic ) { ERR("Generating bad quality regular noise textures as a fallback for blue noise textures"); const int blue_noise_count = pic->size / sizeof(uint32_t); - uint32_t *const scratch = (uint32_t*)pic->buffer; + uint32_t *const scratch = PTR_CAST(uint32_t, pic->buffer); // Fill with random data { @@ -348,9 +348,9 @@ static qboolean uploadRawKtx2( int tex_index, vk_texture_t *tex, const rgbdata_t const ktx2_index_t* index; const ktx2_level_t* levels; - header = (const ktx2_header_t*)(data + KTX2_IDENTIFIER_SIZE); - index = (const ktx2_index_t*)(data + KTX2_IDENTIFIER_SIZE + sizeof(ktx2_header_t)); - levels = (const ktx2_level_t*)(data + KTX2_IDENTIFIER_SIZE + sizeof(ktx2_header_t) + sizeof(ktx2_index_t)); + header = PTR_CAST(const ktx2_header_t, data + KTX2_IDENTIFIER_SIZE); + index = PTR_CAST(const ktx2_index_t, data + KTX2_IDENTIFIER_SIZE + sizeof(ktx2_header_t)); + levels = PTR_CAST(const ktx2_level_t, data + KTX2_IDENTIFIER_SIZE + sizeof(ktx2_header_t) + sizeof(ktx2_index_t)); DEBUG(" header:"); #define X(field) DEBUG(" " # field "=%d", header->field); From 0e79f699f5d9be1a4b9ee60745144fdb20baf30c Mon Sep 17 00:00:00 2001 From: Ivan Avdeev Date: Fri, 29 Nov 2024 15:19:02 -0500 Subject: [PATCH 09/62] vk: fixup printf types to fix linux build --- ref/vk/vk_brush.c | 2 +- ref/vk/vk_image.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ref/vk/vk_brush.c b/ref/vk/vk_brush.c index 61ac7f5aeb..f8830510bb 100644 --- a/ref/vk/vk_brush.c +++ b/ref/vk/vk_brush.c @@ -1688,7 +1688,7 @@ static qboolean fillBrushSurfaces(fill_geometries_args_t args) { // Apply all emissive surfaces found INFO("Loaded %d polylights, %d dynamic for %s model %s", - emissive_surfaces_count, args.bmodel->dynamic_polylights.count, args.is_static ? "static" : "movable", args.mod->name); + emissive_surfaces_count, (int)args.bmodel->dynamic_polylights.count, args.is_static ? "static" : "movable", args.mod->name); ASSERT(args.sizes.num_surfaces == num_geometries); ASSERT(args.sizes.animated_count == animated_count); diff --git a/ref/vk/vk_image.c b/ref/vk/vk_image.c index d2af0da1f1..4a6a660cbd 100644 --- a/ref/vk/vk_image.c +++ b/ref/vk/vk_image.c @@ -360,8 +360,8 @@ void R_VkImageUploadCommit( struct vk_combuf_s *combuf, VkPipelineStageFlagBits for (int j = 0; j < slices_count; ++j) { const VkBufferImageCopy *const slice = g_image_upload.slices.items + up->slices.begin + j; - DEBUG(" slice[%d]: off=%d rowl=%d height=%d off=(%d,%d,%d) ext=(%d,%d,%d)", - j, slice->bufferOffset, slice->bufferRowLength, slice->bufferImageHeight, + DEBUG(" slice[%d]: off=%llu rowl=%d height=%d off=(%d,%d,%d) ext=(%d,%d,%d)", + j, (unsigned long long)slice->bufferOffset, slice->bufferRowLength, slice->bufferImageHeight, slice->imageOffset.x, slice->imageOffset.y, slice->imageOffset.z, From c66ac8df7221dd4249fb41a423bc87a7b47e0e76 Mon Sep 17 00:00:00 2001 From: Ivan Avdeev Date: Fri, 29 Nov 2024 16:03:31 -0500 Subject: [PATCH 10/62] vk: fixup printf format of VkBuffer for 32bit --- ref/vk/vk_image.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ref/vk/vk_image.c b/ref/vk/vk_image.c index 4a6a660cbd..25ac052437 100644 --- a/ref/vk/vk_image.c +++ b/ref/vk/vk_image.c @@ -352,7 +352,7 @@ void R_VkImageUploadCommit( struct vk_combuf_s *combuf, VkPipelineStageFlagBits continue; const int slices_count = up->slices.end - up->slices.begin; - DEBUG("Uploading image \"%s\": buffer=%p slices=%d", up->image->name, up->staging.lock.buffer, slices_count); + DEBUG("Uploading image \"%s\": buffer=%08llx slices=%d", up->image->name, (unsigned long long)up->staging.lock.buffer, slices_count); ASSERT(up->staging.lock.buffer != VK_NULL_HANDLE); ASSERT(up->slices.end == up->slices.cursor); From 5001c22f877be5e5f61c744d5215a656a002de95 Mon Sep 17 00:00:00 2001 From: Ivan Avdeev Date: Fri, 29 Nov 2024 23:15:08 -0500 Subject: [PATCH 11/62] vk: rt: make all kusochki functions internal and static --- ref/vk/vk_ray_internal.h | 13 ------------- ref/vk/vk_ray_model.c | 36 +++++++++++++++++++++--------------- 2 files changed, 21 insertions(+), 28 deletions(-) diff --git a/ref/vk/vk_ray_internal.h b/ref/vk/vk_ray_internal.h index b9a3132712..84e7dd8dbe 100644 --- a/ref/vk/vk_ray_internal.h +++ b/ref/vk/vk_ray_internal.h @@ -93,19 +93,6 @@ qboolean RT_BlasUpdate(struct rt_blas_s *blas, const struct vk_render_geometry_s // TODO blas struct can have its addr field known VkDeviceAddress RT_BlasGetDeviceAddress(struct rt_blas_s *blas); -typedef struct rt_kusochki_s { - uint32_t offset; - int count; - int internal_index__; -} rt_kusochki_t; - -rt_kusochki_t RT_KusochkiAllocLong(int count); -uint32_t RT_KusochkiAllocOnce(int count); -void RT_KusochkiFree(const rt_kusochki_t*); - -//struct vk_render_geometry_s; -//qboolean RT_KusochkiUpload(uint32_t kusochki_offset, const struct vk_render_geometry_s *geoms, int geoms_count, int override_texture_id, const vec4_t *override_color); - qboolean RT_DynamicModelInit(void); void RT_DynamicModelShutdown(void); diff --git a/ref/vk/vk_ray_model.c b/ref/vk/vk_ray_model.c index 9f62e2fb03..11dc93899b 100644 --- a/ref/vk/vk_ray_model.c +++ b/ref/vk/vk_ray_model.c @@ -14,6 +14,12 @@ xvk_ray_model_state_t g_ray_model_state; +typedef struct rt_kusochki_s { + uint32_t offset; + int count; + int internal_index__; +} rt_kusochki_t; + typedef struct rt_model_s { struct rt_blas_s *blas; VkDeviceAddress blas_addr; @@ -151,7 +157,7 @@ void XVK_RayModel_ClearForNextFrame( void ) { R_DEBuffer_Flip(&g_ray_model_state.kusochki_alloc); } -rt_kusochki_t RT_KusochkiAllocLong(int count) { +static rt_kusochki_t kusochkiAllocLong(int count) { // TODO Proper block allocator, not just double-ended buffer uint32_t kusochki_offset = R_DEBuffer_Alloc(&g_ray_model_state.kusochki_alloc, LifetimeStatic, count, 1); @@ -167,7 +173,7 @@ rt_kusochki_t RT_KusochkiAllocLong(int count) { }; } -uint32_t RT_KusochkiAllocOnce(int count) { +static uint32_t kusochkiAllocOnce(int count) { // TODO Proper block allocator uint32_t kusochki_offset = R_DEBuffer_Alloc(&g_ray_model_state.kusochki_alloc, LifetimeDynamic, count, 1); @@ -179,13 +185,13 @@ uint32_t RT_KusochkiAllocOnce(int count) { return kusochki_offset; } -void RT_KusochkiFree(const rt_kusochki_t *kusochki) { +static void kusochkiFree(const rt_kusochki_t *kusochki) { // TODO block alloc PRINT_NOT_IMPLEMENTED(); } // TODO this function can't really fail. It'd mean that staging is completely broken. -qboolean RT_KusochkiUpload(uint32_t kusochki_offset, const struct vk_render_geometry_s *geoms, int geoms_count, const r_vk_material_t *override_material, const vec4_t *override_colors) { +qboolean kusochkiUpload(uint32_t kusochki_offset, const struct vk_render_geometry_s *geoms, int geoms_count, const r_vk_material_t *override_material, const vec4_t *override_colors) { const vk_staging_buffer_args_t staging_args = { .buffer = g_ray_model_state.kusochki_buffer.buffer, .offset = kusochki_offset * sizeof(vk_kusok_data_t), @@ -210,7 +216,7 @@ qboolean RT_KusochkiUpload(uint32_t kusochki_offset, const struct vk_render_geom } struct rt_model_s *RT_ModelCreate(rt_model_create_t args) { - const rt_kusochki_t kusochki = RT_KusochkiAllocLong(args.geometries_count); + const rt_kusochki_t kusochki = kusochkiAllocLong(args.geometries_count); if (kusochki.count == 0) { gEngine.Con_Printf(S_ERROR "Cannot allocate kusochki for %s\n", args.debug_name); return NULL; @@ -228,7 +234,7 @@ struct rt_model_s *RT_ModelCreate(rt_model_create_t args) { } // Invokes staging, so this should be after all resource creation - RT_KusochkiUpload(kusochki.offset, args.geometries, args.geometries_count, NULL, NULL); + kusochkiUpload(kusochki.offset, args.geometries, args.geometries_count, NULL, NULL); { rt_model_t *const ret = Mem_Malloc(vk_core.pool, sizeof(*ret)); @@ -243,7 +249,7 @@ struct rt_model_s *RT_ModelCreate(rt_model_create_t args) { RT_BlasDestroy(blas); if (kusochki.count) - RT_KusochkiFree(&kusochki); + kusochkiFree(&kusochki); return NULL; } @@ -256,7 +262,7 @@ void RT_ModelDestroy(struct rt_model_s* model) { RT_BlasDestroy(model->blas); if (model->kusochki.count) - RT_KusochkiFree(&model->kusochki); + kusochkiFree(&model->kusochki); Mem_Free(model); } @@ -274,7 +280,7 @@ qboolean RT_ModelUpdate(struct rt_model_s *model, const struct vk_render_geometr return false; // Also update materials - RT_KusochkiUpload(model->kusochki.offset, geometries, geometries_count, NULL, NULL); + kusochkiUpload(model->kusochki.offset, geometries, geometries_count, NULL, NULL); return true; } @@ -294,7 +300,7 @@ qboolean RT_ModelUpdateMaterials(struct rt_model_s *model, const struct vk_rende const int offset = geom_indices[begin]; const int count = i - begin; ASSERT(offset + count <= geometries_count); - if (!RT_KusochkiUpload(model->kusochki.offset + offset, geometries + offset, count, NULL, NULL)) { + if (!kusochkiUpload(model->kusochki.offset + offset, geometries + offset, count, NULL, NULL)) { APROF_SCOPE_END(update_materials); return false; } @@ -307,7 +313,7 @@ qboolean RT_ModelUpdateMaterials(struct rt_model_s *model, const struct vk_rende const int offset = geom_indices[begin]; const int count = geom_indices_count - begin; ASSERT(offset + count <= geometries_count); - if (!RT_KusochkiUpload(model->kusochki.offset + offset, geometries + offset, count, NULL, NULL)) { + if (!kusochkiUpload(model->kusochki.offset + offset, geometries + offset, count, NULL, NULL)) { APROF_SCOPE_END(update_materials); return false; @@ -374,11 +380,11 @@ void RT_FrameAddModel( struct rt_model_s *model, rt_frame_add_model_t args ) { uint32_t kusochki_offset = model->kusochki.offset; if (args.override.material != NULL) { - kusochki_offset = RT_KusochkiAllocOnce(args.override.geoms_count); + kusochki_offset = kusochkiAllocOnce(args.override.geoms_count); if (kusochki_offset == ALO_ALLOC_FAILED) return; - if (!RT_KusochkiUpload(kusochki_offset, args.override.geoms, args.override.geoms_count, args.override.material, NULL)) { + if (!kusochkiUpload(kusochki_offset, args.override.geoms, args.override.geoms_count, args.override.material, NULL)) { gEngine.Con_Printf(S_ERROR "Couldn't upload kusochki for instanced model\n"); return; } @@ -474,13 +480,13 @@ void RT_DynamicModelProcessFrame(void) { continue; rt_draw_instance_t* draw_instance; - const uint32_t kusochki_offset = RT_KusochkiAllocOnce(dyn->geometries_count); + const uint32_t kusochki_offset = kusochkiAllocOnce(dyn->geometries_count); if (kusochki_offset == ALO_ALLOC_FAILED) { gEngine.Con_Printf(S_ERROR "Couldn't allocate kusochki once for %d geoms of %s, skipping\n", dyn->geometries_count, group_names[i]); goto tail; } - if (!RT_KusochkiUpload(kusochki_offset, dyn->geometries, dyn->geometries_count, NULL, dyn->colors)) { + if (!kusochkiUpload(kusochki_offset, dyn->geometries, dyn->geometries_count, NULL, dyn->colors)) { gEngine.Con_Printf(S_ERROR "Couldn't build blas for %d geoms of %s, skipping\n", dyn->geometries_count, group_names[i]); goto tail; } From 7ea67a0651463bd01966f0000c67f2bf0b7a273a Mon Sep 17 00:00:00 2001 From: Ivan 'provod' Avdeev Date: Thu, 5 Dec 2024 09:44:10 -0500 Subject: [PATCH 12/62] vk: cleanup unused staging code --- ref/vk/vk_core.c | 2 +- ref/vk/vk_staging.c | 30 ------------------------------ ref/vk/vk_staging.h | 4 ---- 3 files changed, 1 insertion(+), 35 deletions(-) diff --git a/ref/vk/vk_core.c b/ref/vk/vk_core.c index 08a7479d6b..dcaca3db31 100644 --- a/ref/vk/vk_core.c +++ b/ref/vk/vk_core.c @@ -114,7 +114,7 @@ static const char* device_extensions_extra[] = { VK_EXT_CALIBRATED_TIMESTAMPS_EXTENSION_NAME, }; -VkBool32 VKAPI_PTR debugCallback( +static VkBool32 VKAPI_PTR debugCallback( VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, VkDebugUtilsMessageTypeFlagsEXT messageTypes, const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData, diff --git a/ref/vk/vk_staging.c b/ref/vk/vk_staging.c index 98582b5972..1b35396865 100644 --- a/ref/vk/vk_staging.c +++ b/ref/vk/vk_staging.c @@ -19,36 +19,10 @@ #define MAX_CONCURRENT_FRAMES 2 #define COMMAND_BUFFER_COUNT (MAX_CONCURRENT_FRAMES + 1) // to accommodate two frames in flight plus something trying to upload data before waiting for the next frame to complete -typedef struct { - VkImage image; - VkImageLayout layout; - size_t size; // for stats only -} staging_image_t; - -/* TODO -typedef enum { - RegionState_Locked, - RegionState_Released, -} region_state_e; - -typedef struct { - region_state_e debug_state; - //int buffer_index; - VkDeviceSize begin, end; - uint32_t cmdbuf_sequence; -} region_t; -*/ - static struct { vk_buffer_t buffer; r_flipping_buffer_t buffer_alloc; - /* TODO - struct { - ARRAY_DYNAMIC_DECLARE(region_t, regions); - } regions; - */ - struct { VkBuffer dest[MAX_STAGING_ALLOCS]; VkBufferCopy copy[MAX_STAGING_ALLOCS]; @@ -279,10 +253,6 @@ static vk_combuf_t *getCurrentCombuf(void) { return g_staging.current; } -VkCommandBuffer R_VkStagingGetCommandBuffer(void) { - return getCurrentCombuf()->cmdbuf; -} - vk_combuf_t *R_VkStagingCommit(void) { DEBUG("%s: buffers.count=%d current=%p", __FUNCTION__, g_staging.buffers.count, g_staging.current); diff --git a/ref/vk/vk_staging.h b/ref/vk/vk_staging.h index 7c0c1eb603..a06843b4e9 100644 --- a/ref/vk/vk_staging.h +++ b/ref/vk/vk_staging.h @@ -51,10 +51,6 @@ void R_VkStagingFrameBegin(void); // Can return NULL if there's nothing to upload. struct vk_combuf_s *R_VkStagingFrameEnd(void); -// Gets the current command buffer. -// WARNING: Can be invalidated by any of the Lock calls -VkCommandBuffer R_VkStagingGetCommandBuffer(void); - // Commit all staging data into current cmdbuf, submit it and wait for completion. // Needed for CPU-GPU sync void R_VkStagingFlushSync( void ); From dda1a304cf57daea5438278fc90fb8009c8b8ba7 Mon Sep 17 00:00:00 2001 From: Ivan 'provod' Avdeev Date: Fri, 6 Dec 2024 14:19:37 -0500 Subject: [PATCH 13/62] vk: [WIP|BROKEN] begin massive staging and barrier refactoring It's an incomplete intermediary change. This commit doesn't work. It compiles tho. Changes: - Move buffer staging tracking to vk_buffer - Sketch automatic buffer barriers tied to vk_combuf - Remove all combuf handling from staging. That was just gross. Breaks: - Everything. - RT AS building is commented out for now --- ref/vk/arrays.h | 17 ++- ref/vk/vk_buffer.c | 110 +++++++++++++++++++ ref/vk/vk_buffer.h | 37 ++++++- ref/vk/vk_combuf.c | 135 ++++++++++++++++++++++++ ref/vk/vk_combuf.h | 25 +++++ ref/vk/vk_common.h | 2 +- ref/vk/vk_core.c | 6 +- ref/vk/vk_core.h | 1 + ref/vk/vk_framectl.c | 35 +++---- ref/vk/vk_geometry.c | 28 ++--- ref/vk/vk_geometry.h | 11 +- ref/vk/vk_image.c | 4 +- ref/vk/vk_light.c | 22 ++-- ref/vk/vk_logs.h | 1 + ref/vk/vk_ray_accel.c | 32 +++--- ref/vk/vk_ray_model.c | 13 +-- ref/vk/vk_resources.c | 2 +- ref/vk/vk_scene.c | 4 +- ref/vk/vk_staging.c | 239 +++--------------------------------------- ref/vk/vk_staging.h | 39 ++----- 20 files changed, 417 insertions(+), 346 deletions(-) diff --git a/ref/vk/arrays.h b/ref/vk/arrays.h index 0c42745a97..560cf879c7 100644 --- a/ref/vk/arrays.h +++ b/ref/vk/arrays.h @@ -2,8 +2,13 @@ #include // size_t -// Array with compile-time maximum size +#define VIEW_DECLARE_CONST(TYPE, NAME) \ + struct { \ + const TYPE *items; \ + int count; \ + } NAME +// Array with compile-time maximum size #define BOUNDED_ARRAY_DECLARE(TYPE, NAME, MAX_SIZE) \ struct { \ TYPE items[MAX_SIZE]; \ @@ -13,9 +18,15 @@ #define BOUNDED_ARRAY(TYPE, NAME, MAX_SIZE) \ BOUNDED_ARRAY_DECLARE(TYPE, NAME, MAX_SIZE) = {0} -#define BOUNDED_ARRAY_APPEND(var, item) \ +#define BOUNDED_ARRAY_HAS_SPACE(array_, space_) \ + ((COUNTOF((array_).items) - (array_).count) >= space_) + +#define BOUNDED_ARRAY_APPEND_UNSAFE(array_) \ + ((array_).items[(array_).count++]) + +#define BOUNDED_ARRAY_APPEND_ITEM(var, item) \ do { \ - ASSERT(var.count < COUNTOF(var.items)); \ + ASSERT(BOUNDED_ARRAY_HAS_SPACE(var, 1)); \ var.items[var.count++] = item; \ } while(0) diff --git a/ref/vk/vk_buffer.c b/ref/vk/vk_buffer.c index 9de7524b0d..6412e70690 100644 --- a/ref/vk/vk_buffer.c +++ b/ref/vk/vk_buffer.c @@ -1,4 +1,10 @@ #include "vk_buffer.h" +#include "vk_logs.h" +#include "vk_combuf.h" + +#include "arrays.h" + +#define LOG_MODULE buf qboolean VK_BufferCreate(const char *debug_name, vk_buffer_t *buf, uint32_t size, VkBufferUsageFlags usage, VkMemoryPropertyFlags flags) { @@ -35,6 +41,8 @@ qboolean VK_BufferCreate(const char *debug_name, vk_buffer_t *buf, uint32_t size } void VK_BufferDestroy(vk_buffer_t *buf) { + // FIXME destroy staging slot + if (buf->buffer) { vkDestroyBuffer(vk_core.device, buf->buffer, NULL); buf->buffer = VK_NULL_HANDLE; @@ -116,3 +124,105 @@ uint32_t R_DEBuffer_Alloc(r_debuffer_t* debuf, r_lifetime_t lifetime, uint32_t s void R_DEBuffer_Flip(r_debuffer_t* debuf) { R_FlippingBuffer_Flip(&debuf->dynamic); } + +#define MAX_STAGING_BUFFERS 16 +#define MAX_STAGING_ENTRIES 2048 + +// TODO this should be part of the vk_buffer_t object itself +typedef struct { + vk_buffer_t *buffer; + VkBuffer staging; + BOUNDED_ARRAY_DECLARE(VkBufferCopy, regions, MAX_STAGING_ENTRIES); +} r_vk_staging_buffer_t; + +// TODO remove this when staging is tracked by the buffer object itself +static struct { + BOUNDED_ARRAY_DECLARE(r_vk_staging_buffer_t, staging, MAX_STAGING_BUFFERS); +} g_buf; + +static r_vk_staging_buffer_t *findExistingStagingSlotForBuffer(vk_buffer_t *buf) { + for (int i = 0; i < g_buf.staging.count; ++i) { + r_vk_staging_buffer_t *const stb = g_buf.staging.items + i; + if (stb->buffer == buf) + return stb; + } + + return NULL; +} + +static r_vk_staging_buffer_t *findOrCreateStagingSlotForBuffer(vk_buffer_t *buf) { + r_vk_staging_buffer_t *stb = findExistingStagingSlotForBuffer(buf); + if (stb) + return stb; + + ASSERT(BOUNDED_ARRAY_HAS_SPACE(g_buf.staging, 1)); + stb = &BOUNDED_ARRAY_APPEND_UNSAFE(g_buf.staging); + stb->staging = VK_NULL_HANDLE; + stb->buffer = buf; + stb->regions.count = 0; + return stb; +} + +vk_buffer_locked_t R_VkBufferLock(vk_buffer_t *buf, vk_buffer_lock_t lock) { + DEBUG("Lock buf=%p size=%d region=%d..%d", buf, lock.size, lock.offset, lock.offset + lock.size); + + r_vk_staging_buffer_t *const stb = findOrCreateStagingSlotForBuffer(buf); + ASSERT(stb); + + r_vkstaging_region_t staging_lock = R_VkStagingLock(lock.size); + ASSERT(staging_lock.ptr); + + // TODO perf: adjacent region coalescing + + ASSERT(BOUNDED_ARRAY_HAS_SPACE(stb->regions, 1)); + BOUNDED_ARRAY_APPEND_UNSAFE(stb->regions) = (VkBufferCopy){ + .srcOffset = staging_lock.offset, + .dstOffset = lock.offset, + .size = lock.size, + }; + + if (stb->staging != VK_NULL_HANDLE) + ASSERT(stb->staging == staging_lock.buffer); + else + stb->staging = staging_lock.buffer; + + return (vk_buffer_locked_t) { + .ptr = staging_lock.ptr, + .impl_ = { + .buf = buf, + .handle = staging_lock.handle, + }, + }; +} + +void R_VkBufferUnlock(vk_buffer_locked_t lock) { + R_VkStagingUnlock(lock.impl_.handle); +} + +void R_VkBufferStagingCommit(vk_buffer_t *buf, struct vk_combuf_s *combuf) { + r_vk_staging_buffer_t *const stb = findExistingStagingSlotForBuffer(buf); + if (!stb || stb->regions.count == 0) + return; + + const r_vkcombuf_barrier_buffer_t barrier[] = {{ + .buffer = buf, + .access = VK_ACCESS_TRANSFER_WRITE_BIT, + }}; + + R_VkCombufIssueBarrier(combuf, (r_vkcombuf_barrier_t) { + .stage = VK_PIPELINE_STAGE_2_TRANSFER_BIT, + .buffers = { barrier, COUNTOF(barrier) }, + .images = { NULL, 0 }, + }); + + //FIXME const int begin_index = R_VkCombufScopeBegin(combuf, g_staging.buffer_upload_scope_id); + + const VkCommandBuffer cmdbuf = combuf->cmdbuf; + DEBUG_NV_CHECKPOINTF(cmdbuf, "staging dst_buffer=%p count=%d", buf->buffer, stb->regions.count); + vkCmdCopyBuffer(cmdbuf, stb->staging, buf->buffer, stb->regions.count, stb->regions.items); + + stb->regions.count = 0; + + //FIXME R_VkCombufScopeEnd(combuf, begin_index, VK_PIPELINE_STAGE_TRANSFER_BIT); +} + diff --git a/ref/vk/vk_buffer.h b/ref/vk/vk_buffer.h index 5e977fe022..dd1e4ed005 100644 --- a/ref/vk/vk_buffer.h +++ b/ref/vk/vk_buffer.h @@ -2,8 +2,19 @@ #include "vk_core.h" #include "vk_devmem.h" +#include "vk_staging.h" #include "r_flipping.h" -#include "alolcator.h" + +typedef struct { + VkAccessFlags2 access; + VkPipelineStageFlagBits2 stage; + //VkImageLayout layout; +} r_vksync_scope_t; + +typedef struct { + uint32_t combuf_tag; + r_vksync_scope_t write, read; +} r_vksync_state_t; typedef struct vk_buffer_s { vk_devmem_t devmem; @@ -11,6 +22,8 @@ typedef struct vk_buffer_s { void *mapped; uint32_t size; + + r_vksync_state_t sync; } vk_buffer_t; qboolean VK_BufferCreate(const char *debug_name, vk_buffer_t *buf, uint32_t size, VkBufferUsageFlags usage, VkMemoryPropertyFlags flags); @@ -31,3 +44,25 @@ typedef enum { void R_DEBuffer_Init(r_debuffer_t *debuf, uint32_t static_size, uint32_t dynamic_size); uint32_t R_DEBuffer_Alloc(r_debuffer_t* debuf, r_lifetime_t lifetime, uint32_t size, uint32_t align); void R_DEBuffer_Flip(r_debuffer_t* debuf); + +typedef struct { + void *ptr; + + struct { + vk_buffer_t *buf; + r_vkstaging_handle_t handle; + } impl_; +} vk_buffer_locked_t; + +typedef struct { + uint32_t offset; + uint32_t size; +} vk_buffer_lock_t; + +vk_buffer_locked_t R_VkBufferLock(vk_buffer_t *buf, vk_buffer_lock_t lock); + +void R_VkBufferUnlock(vk_buffer_locked_t lock); + +// Commits any staged regions for the specified buffer +struct vk_combuf_s; +void R_VkBufferStagingCommit(vk_buffer_t *buf, struct vk_combuf_s *combuf); diff --git a/ref/vk/vk_combuf.c b/ref/vk/vk_combuf.c index ba7339a03b..66960fe57d 100644 --- a/ref/vk/vk_combuf.c +++ b/ref/vk/vk_combuf.c @@ -1,11 +1,14 @@ #include "vk_combuf.h" #include "vk_commandpool.h" +#include "vk_buffer.h" #include "profiler.h" #define MAX_COMMANDBUFFERS 6 #define MAX_QUERY_COUNT 128 +#define MAX_BUFFER_BARRIERS 16 + #define BEGIN_INDEX_TAG 0x10000000 typedef struct { @@ -16,6 +19,8 @@ typedef struct { int scopes[MAX_GPU_SCOPES]; int scopes_count; } profiler; + + uint32_t tag; } vk_combuf_impl_t; static struct { @@ -31,6 +36,8 @@ static struct { int scopes_count; int entire_combuf_scope_id; + + uint32_t tag; } g_combuf; qboolean R_VkCombuf_Init( void ) { @@ -58,6 +65,7 @@ qboolean R_VkCombuf_Init( void ) { } g_combuf.entire_combuf_scope_id = R_VkGpuScope_Register("GPU"); + g_combuf.tag = 1; // Do not start with special value of zero return true; } @@ -94,6 +102,13 @@ void R_VkCombufClose( vk_combuf_t* pub ) { void R_VkCombufBegin( vk_combuf_t* pub ) { vk_combuf_impl_t *const cb = (vk_combuf_impl_t*)pub; + g_combuf.tag++; + // Skip zero as special initial value for objects meaning "not yet used in combuf" + if (g_combuf.tag == 0) + g_combuf.tag = 1; + + cb->tag = g_combuf.tag; + cb->profiler.scopes_count = 0; const VkCommandBufferBeginInfo beginfo = { @@ -120,6 +135,126 @@ static const char* myStrdup(const char *src) { return ret; } +#define ACCESS_WRITE_BITS 0 \ + | VK_ACCESS_2_SHADER_WRITE_BIT \ + | VK_ACCESS_2_COLOR_ATTACHMENT_WRITE_BIT \ + | VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT \ + | VK_ACCESS_2_TRANSFER_WRITE_BIT \ + | VK_ACCESS_2_HOST_WRITE_BIT \ + | VK_ACCESS_2_MEMORY_WRITE_BIT \ + | VK_ACCESS_2_SHADER_STORAGE_WRITE_BIT \ + | VK_ACCESS_2_ACCELERATION_STRUCTURE_WRITE_BIT_KHR \ + +#define ACCESS_READ_BITS 0 \ + | VK_ACCESS_2_INDIRECT_COMMAND_READ_BIT \ + | VK_ACCESS_2_INDEX_READ_BIT \ + | VK_ACCESS_2_VERTEX_ATTRIBUTE_READ_BIT \ + | VK_ACCESS_2_UNIFORM_READ_BIT \ + | VK_ACCESS_2_INPUT_ATTACHMENT_READ_BIT \ + | VK_ACCESS_2_SHADER_READ_BIT \ + | VK_ACCESS_2_COLOR_ATTACHMENT_READ_BIT \ + | VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_READ_BIT \ + | VK_ACCESS_2_TRANSFER_READ_BIT \ + | VK_ACCESS_2_HOST_READ_BIT \ + | VK_ACCESS_2_MEMORY_READ_BIT \ + | VK_ACCESS_2_SHADER_SAMPLED_READ_BIT \ + | VK_ACCESS_2_SHADER_STORAGE_READ_BIT \ + | VK_ACCESS_2_ACCELERATION_STRUCTURE_READ_BIT_KHR \ + +#define ACCESS_KNOWN_BITS (ACCESS_WRITE_BITS | ACCESS_READ_BITS) + +void R_VkCombufIssueBarrier(vk_combuf_t* combuf, r_vkcombuf_barrier_t bar) { + vk_combuf_impl_t *const cb = (vk_combuf_impl_t*)combuf; + ASSERT(bar.images.count == 0 && "TODO"); + + BOUNDED_ARRAY(VkBufferMemoryBarrier2, buffer_barriers, MAX_BUFFER_BARRIERS); + + for (int i = 0; i < bar.buffers.count; ++i) { + const r_vkcombuf_barrier_buffer_t *const bufbar = bar.buffers.items + i; + vk_buffer_t *const buf = bufbar->buffer; + const qboolean is_write = (bufbar->access & ACCESS_WRITE_BITS) != 0; + const qboolean is_read = (bufbar->access & ACCESS_READ_BITS) != 0; + ASSERT((bufbar->access & ~(ACCESS_KNOWN_BITS)) == 0); + + if (buf->sync.combuf_tag != cb->tag) { + // This buffer hasn't been yet used in this command buffer, no need to issue a barrier + buf->sync.combuf_tag = cb->tag; + buf->sync.write = is_write + ? (r_vksync_scope_t){.access = bufbar->access & ACCESS_WRITE_BITS, .stage = bar.stage} + : (r_vksync_scope_t){.access = 0, .stage = 0 }; + buf->sync.read = is_read + ? (r_vksync_scope_t){.access = bufbar->access & ACCESS_READ_BITS, .stage = bar.stage} + : (r_vksync_scope_t){.access = 0, .stage = 0 }; + continue; + } + + VkBufferMemoryBarrier2 bmb = { + .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER_2, + .pNext = NULL, + .buffer = buf->buffer, + .offset = 0, + .size = VK_WHOLE_SIZE, + .dstStageMask = bar.stage, + .dstAccessMask = bufbar->access, + }; + + // TODO: support read-and-write scenarios + ASSERT(is_read ^ is_write); + if (is_write) { + // Write is synchronized with previous reads and writes + bmb.srcStageMask = buf->sync.write.stage | buf->sync.read.stage; + bmb.srcAccessMask = buf->sync.write.access | buf->sync.read.access; + + // Store where write happened + buf->sync.write.access = bufbar->access; + buf->sync.write.stage = bar.stage; + + // If there were no previous reads, there no reason to synchronize with anything + if (buf->sync.read.stage == 0) + continue; + + // Reset read state + buf->sync.read.access = 0; + buf->sync.read.stage = 0; + } + + if (is_read) { + // Read is synchronized with previous writes only + bmb.srcStageMask = buf->sync.write.stage; + bmb.srcAccessMask = buf->sync.write.access; + + // Check whether this is a new barrier + if ((buf->sync.read.access & bufbar->access) != bufbar->access + && (buf->sync.read.stage & bar.stage) != bar.stage) { + // Remember this read happened + buf->sync.read.access |= bufbar->access; + buf->sync.read.stage |= bar.stage; + } else { + // Already synchronized, no need to do anything + continue; + } + + // Also skip issuing a barrier, if there were no previous writes -- nothing to sync with + // Note that this needs to happen late, as all reads must still be recorded in sync.read fields + if (buf->sync.write.stage == 0) + continue; + } + + BOUNDED_ARRAY_APPEND_ITEM(buffer_barriers, bmb); + } + + if (buffer_barriers.count) { + vkCmdPipelineBarrier2(combuf->cmdbuf, &(VkDependencyInfo) { + .sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO, + .pNext = NULL, + .dependencyFlags = 0, + .bufferMemoryBarrierCount = buffer_barriers.count, + .pBufferMemoryBarriers = buffer_barriers.items, + }); + } +} + + int R_VkGpuScope_Register(const char *name) { // Find existing scope with the same name for (int i = 0; i < g_combuf.scopes_count; ++i) { diff --git a/ref/vk/vk_combuf.h b/ref/vk/vk_combuf.h index 783e49d719..9283dd585c 100644 --- a/ref/vk/vk_combuf.h +++ b/ref/vk/vk_combuf.h @@ -1,6 +1,7 @@ #pragma once #include "vk_core.h" +#include "arrays.h" #define MAX_GPU_SCOPES 64 @@ -18,6 +19,30 @@ void R_VkCombufBegin( vk_combuf_t* ); void R_VkCombufEnd( vk_combuf_t* ); +struct vk_buffer_s; +typedef struct { + struct vk_buffer_s *buffer; + VkAccessFlags2 access; +} r_vkcombuf_barrier_buffer_t; + +struct vk_image_s; +typedef struct { + struct vk_image_s *image; + VkImageLayout layout; + VkAccessFlags2 access; +} r_vkcombuf_barrier_image_t; + +typedef struct { + VkPipelineStageFlags2 stage; + VIEW_DECLARE_CONST(r_vkcombuf_barrier_buffer_t, buffers); + VIEW_DECLARE_CONST(r_vkcombuf_barrier_image_t, images); +} r_vkcombuf_barrier_t; + +// Immediately issues a barrier for the set of resources given desired usage and resources states +void R_VkCombufIssueBarrier(vk_combuf_t*, r_vkcombuf_barrier_t); + + +// TODO rename consistently int R_VkGpuScope_Register(const char *name); int R_VkCombufScopeBegin(vk_combuf_t*, int scope_id); diff --git a/ref/vk/vk_common.h b/ref/vk/vk_common.h index 0e41e2f8d8..4ed94c8035 100644 --- a/ref/vk/vk_common.h +++ b/ref/vk/vk_common.h @@ -7,7 +7,7 @@ #include "com_strings.h" #include "crtlib.h" -#define ASSERT(x) if(!( x )) gEngine.Host_Error( "assert %s failed at %s:%d\n", #x, __FILE__, __LINE__ ) +#define ASSERT(x) do { if(!( x )) gEngine.Host_Error( "assert %s failed at %s:%d\n", #x, __FILE__, __LINE__ ); } while (0) // TODO ASSERTF(x, fmt, ...) #define Mem_Malloc( pool, size ) gEngine._Mem_Alloc( pool, size, false, __FILE__, __LINE__ ) diff --git a/ref/vk/vk_core.c b/ref/vk/vk_core.c index dcaca3db31..fd4df34266 100644 --- a/ref/vk/vk_core.c +++ b/ref/vk/vk_core.c @@ -197,11 +197,11 @@ static qboolean createInstance( void ) }; BOUNDED_ARRAY(VkValidationFeatureEnableEXT, validation_features, 8); - BOUNDED_ARRAY_APPEND(validation_features, VK_VALIDATION_FEATURE_ENABLE_SYNCHRONIZATION_VALIDATION_EXT); - BOUNDED_ARRAY_APPEND(validation_features, VK_VALIDATION_FEATURE_ENABLE_BEST_PRACTICES_EXT); + BOUNDED_ARRAY_APPEND_ITEM(validation_features, VK_VALIDATION_FEATURE_ENABLE_SYNCHRONIZATION_VALIDATION_EXT); + BOUNDED_ARRAY_APPEND_ITEM(validation_features, VK_VALIDATION_FEATURE_ENABLE_BEST_PRACTICES_EXT); if (!!gEngine.Sys_CheckParm("-vkdbg_shaderprintf")) - BOUNDED_ARRAY_APPEND(validation_features, VK_VALIDATION_FEATURE_ENABLE_DEBUG_PRINTF_EXT); + BOUNDED_ARRAY_APPEND_ITEM(validation_features, VK_VALIDATION_FEATURE_ENABLE_DEBUG_PRINTF_EXT); const VkValidationFeaturesEXT validation_ext = { .sType = VK_STRUCTURE_TYPE_VALIDATION_FEATURES_EXT, diff --git a/ref/vk/vk_core.h b/ref/vk/vk_core.h index 55112fa0aa..ea4dc38f60 100644 --- a/ref/vk/vk_core.h +++ b/ref/vk/vk_core.h @@ -224,6 +224,7 @@ do { \ X(vkGetImageMemoryRequirements) \ X(vkBindImageMemory) \ X(vkCmdPipelineBarrier) \ + X(vkCmdPipelineBarrier2) \ X(vkCmdCopyBufferToImage) \ X(vkCmdCopyBuffer) \ X(vkQueueWaitIdle) \ diff --git a/ref/vk/vk_framectl.c b/ref/vk/vk_framectl.c index a52642e5a6..838ef61402 100644 --- a/ref/vk/vk_framectl.c +++ b/ref/vk/vk_framectl.c @@ -44,7 +44,7 @@ typedef struct { // so we can't reuse the same one for two purposes and need to mnozhit sunchnosti VkSemaphore sem_done2; - vk_combuf_t *staging_combuf; + uint32_t staging_generation_tag; } vk_framectl_frame_t; static struct { @@ -153,7 +153,7 @@ static VkRenderPass createRenderPass( VkFormat depth_format, qboolean ray_tracin .dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, .dependencyFlags = VK_DEPENDENCY_BY_REGION_BIT, }; - BOUNDED_ARRAY_APPEND(dependencies, color); + BOUNDED_ARRAY_APPEND_ITEM(dependencies, color); } else { const VkSubpassDependency color = { .srcSubpass = VK_SUBPASS_EXTERNAL, @@ -164,7 +164,7 @@ static VkRenderPass createRenderPass( VkFormat depth_format, qboolean ray_tracin .dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, .dependencyFlags = VK_DEPENDENCY_BY_REGION_BIT, }; - BOUNDED_ARRAY_APPEND(dependencies, color); + BOUNDED_ARRAY_APPEND_ITEM(dependencies, color); } const VkSubpassDependency depth = { @@ -176,7 +176,7 @@ static VkRenderPass createRenderPass( VkFormat depth_format, qboolean ray_tracin .dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT, .dependencyFlags = 0, }; - BOUNDED_ARRAY_APPEND(dependencies, depth); + BOUNDED_ARRAY_APPEND_ITEM(dependencies, depth); const VkRenderPassCreateInfo rpci = { .sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, @@ -258,12 +258,8 @@ void R_BeginFrame( qboolean clearScene ) { APROF_SCOPE_BEGIN(begin_frame); { - const vk_combuf_scopes_t gpurofl[] = { - frame->staging_combuf ? R_VkCombufScopesGet(frame->staging_combuf) : (vk_combuf_scopes_t){.entries_count=0}, - R_VkCombufScopesGet(frame->combuf), - }; - - R_SpeedsDisplayMore(prev_frame_event_index, frame->staging_combuf ? gpurofl : gpurofl + 1, frame->staging_combuf ? 2 : 1); + const vk_combuf_scopes_t gpurofl[] = { R_VkCombufScopesGet(frame->combuf) }; + R_SpeedsDisplayMore(prev_frame_event_index, gpurofl, COUNTOF(gpurofl)); } if (vk_core.rtx && FBitSet( rt_enable->flags, FCVAR_CHANGED )) { @@ -275,7 +271,8 @@ void R_BeginFrame( qboolean clearScene ) { ASSERT(!g_frame.current.framebuffer.framebuffer); - R_VkStagingFrameBegin(); + // TODO explicit frame dependency synced on frame-end-event/sema + R_VkStagingGenerationRelease(frame->staging_generation_tag); g_frame.current.framebuffer = R_VkSwapchainAcquire( frame->sem_framebuffer_ready ); vk_frame.width = g_frame.current.framebuffer.width; @@ -367,14 +364,10 @@ static void submit( vk_combuf_t* combuf, qboolean wait, qboolean draw ) { R_VkCombufEnd(combuf); - frame->staging_combuf = R_VkStagingFrameEnd(); + frame->staging_generation_tag = R_VkStagingGenerationCommit(); BOUNDED_ARRAY(VkCommandBuffer, cmdbufs, 2); - - if (frame->staging_combuf) - BOUNDED_ARRAY_APPEND(cmdbufs, frame->staging_combuf->cmdbuf); - - BOUNDED_ARRAY_APPEND(cmdbufs, cmdbuf); + BOUNDED_ARRAY_APPEND_ITEM(cmdbufs, cmdbuf); { const VkPipelineStageFlags stageflags[] = { @@ -387,12 +380,12 @@ static void submit( vk_combuf_t* combuf, qboolean wait, qboolean draw ) { BOUNDED_ARRAY(VkSemaphore, signalphores, 2); if (draw) { - BOUNDED_ARRAY_APPEND(waitophores, frame->sem_framebuffer_ready); - BOUNDED_ARRAY_APPEND(signalphores, frame->sem_done); + BOUNDED_ARRAY_APPEND_ITEM(waitophores, frame->sem_framebuffer_ready); + BOUNDED_ARRAY_APPEND_ITEM(signalphores, frame->sem_done); } - BOUNDED_ARRAY_APPEND(waitophores, prev_frame->sem_done2); - BOUNDED_ARRAY_APPEND(signalphores, frame->sem_done2); + BOUNDED_ARRAY_APPEND_ITEM(waitophores, prev_frame->sem_done2); + BOUNDED_ARRAY_APPEND_ITEM(signalphores, frame->sem_done2); const VkSubmitInfo subinfo = { .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO, diff --git a/ref/vk/vk_geometry.c b/ref/vk/vk_geometry.c index e4fa0b2652..e1a0e52afb 100644 --- a/ref/vk/vk_geometry.c +++ b/ref/vk/vk_geometry.c @@ -59,14 +59,12 @@ void R_GeometryRangeFree(const r_geometry_range_t* range) { } r_geometry_range_lock_t R_GeometryRangeLock(const r_geometry_range_t *range) { - const vk_staging_buffer_args_t staging_args = { - .buffer = g_geom.buffer.buffer, + const vk_buffer_lock_t staging_args = { .offset = range->block_handle.offset, .size = range->block_handle.size, - .alignment = 4, }; - const vk_staging_region_t staging = R_VkStagingLockForBuffer(staging_args); + const vk_buffer_locked_t staging = R_VkBufferLock(&g_geom.buffer, staging_args); ASSERT(staging.ptr); const uint32_t vertices_size = range->vertices.count * sizeof(vk_vertex_t); @@ -78,23 +76,21 @@ r_geometry_range_lock_t R_GeometryRangeLock(const r_geometry_range_t *range) { .vertices = (vk_vertex_t *)staging.ptr, .indices = PTR_CAST(uint16_t, (char*)staging.ptr + vertices_size), .impl_ = { - .staging_handle = staging.handle, + .staging_handle = staging, }, }; } r_geometry_range_lock_t R_GeometryRangeLockSubrange(const r_geometry_range_t *range, int vertices_offset, int vertices_count ) { - const vk_staging_buffer_args_t staging_args = { - .buffer = g_geom.buffer.buffer, + const vk_buffer_lock_t staging_args = { .offset = range->block_handle.offset + sizeof(vk_vertex_t) * vertices_offset, .size = sizeof(vk_vertex_t) * vertices_count, - .alignment = 4, }; ASSERT(staging_args.offset >= range->block_handle.offset); ASSERT(staging_args.offset + staging_args.size <= range->block_handle.offset + range->block_handle.size); - const vk_staging_region_t staging = R_VkStagingLockForBuffer(staging_args); + const vk_buffer_locked_t staging = R_VkBufferLock(&g_geom.buffer, staging_args); ASSERT(staging.ptr); ASSERT( range->block_handle.offset % sizeof(vk_vertex_t) == 0 ); @@ -103,13 +99,13 @@ r_geometry_range_lock_t R_GeometryRangeLockSubrange(const r_geometry_range_t *ra .vertices = (vk_vertex_t *)staging.ptr, .indices = NULL, .impl_ = { - .staging_handle = staging.handle, + .staging_handle = staging, }, }; } void R_GeometryRangeUnlock(const r_geometry_range_lock_t *lock) { - R_VkStagingUnlock(lock->impl_.staging_handle); + R_VkBufferUnlock(lock->impl_.staging_handle); } qboolean R_GeometryBufferAllocOnceAndLock(r_geometry_buffer_lock_t *lock, int vertex_count, int index_count) { @@ -129,14 +125,12 @@ qboolean R_GeometryBufferAllocOnceAndLock(r_geometry_buffer_lock_t *lock, int ve { const uint32_t vertices_offset = offset / sizeof(vk_vertex_t); const uint32_t indices_offset = (offset + vertices_size) / sizeof(uint16_t); - const vk_staging_buffer_args_t staging_args = { - .buffer = g_geom.buffer.buffer, + const vk_buffer_lock_t staging_args = { .offset = offset, .size = total_size, - .alignment = 4, }; - const vk_staging_region_t staging = R_VkStagingLockForBuffer(staging_args); + const vk_buffer_locked_t staging = R_VkBufferLock(&g_geom.buffer, staging_args); ASSERT(staging.ptr); ASSERT( offset % sizeof(vk_vertex_t) == 0 ); @@ -154,7 +148,7 @@ qboolean R_GeometryBufferAllocOnceAndLock(r_geometry_buffer_lock_t *lock, int ve .unit_offset = indices_offset, }, .impl_ = { - .staging_handle = staging.handle, + .handle_ = staging, }, }; } @@ -166,7 +160,7 @@ qboolean R_GeometryBufferAllocOnceAndLock(r_geometry_buffer_lock_t *lock, int ve } void R_GeometryBufferUnlock( const r_geometry_buffer_lock_t *lock ) { - R_VkStagingUnlock(lock->impl_.staging_handle); + R_VkBufferUnlock(lock->impl_.handle_); } void R_GeometryBuffer_MapClear( void ) { diff --git a/ref/vk/vk_geometry.h b/ref/vk/vk_geometry.h index 7063798801..6799ad833d 100644 --- a/ref/vk/vk_geometry.h +++ b/ref/vk/vk_geometry.h @@ -1,6 +1,8 @@ #pragma once #include "vk_common.h" #include "r_block.h" +#include "vk_staging.h" +#include "vk_buffer.h" // FIXME vk_buffer_locked_t should not be exposed #include "vk_core.h" #include @@ -41,12 +43,15 @@ typedef struct { r_geometry_range_t R_GeometryRangeAlloc(int vertices, int indices); void R_GeometryRangeFree(const r_geometry_range_t*); +// TODO combine with r_geometry_buffer_lock_t typedef struct { vk_vertex_t *vertices; uint16_t *indices; struct { - int staging_handle; + // FIXME hide behind some index in geometry buffer + // Think: what's the max simultaneously locked regions count + vk_buffer_locked_t staging_handle; } impl_; } r_geometry_range_lock_t; @@ -69,7 +74,9 @@ typedef struct { } indices; struct { - int staging_handle; + // FIXME hide behind some index in geometry buffer + // Think: what's the max simultaneously locked regions count + vk_buffer_locked_t handle_; } impl_; } r_geometry_buffer_lock_t; diff --git a/ref/vk/vk_image.c b/ref/vk/vk_image.c index 25ac052437..2b2779c15b 100644 --- a/ref/vk/vk_image.c +++ b/ref/vk/vk_image.c @@ -402,7 +402,7 @@ void R_VkImageUploadCommit( struct vk_combuf_s *combuf, VkPipelineStageFlagBits }, }; - R_VkStagingReleaseAfterNextFrame(up->staging.lock.handle); + R_VkStagingUnlock(up->staging.lock.handle); // Mark image as uploaded up->image->upload_slot = -1; @@ -534,7 +534,7 @@ void R_VkImageUploadCancel( r_vk_image_t *img ) { // Technically we won't need that staging region anymore at all, but it doesn't matter, // it's just easier to mark it to be freed this way. - R_VkStagingReleaseAfterNextFrame(up->staging.lock.handle); + R_VkStagingUnlock(up->staging.lock.handle); // Mark upload slot as unused, and image as not subjet to uploading up->image = NULL; diff --git a/ref/vk/vk_light.c b/ref/vk/vk_light.c index a7549696c5..56eaf0508f 100644 --- a/ref/vk/vk_light.c +++ b/ref/vk/vk_light.c @@ -1200,11 +1200,10 @@ static void uploadGridRange( int begin, int end ) { ASSERT( count > 0 ); const int size = count * sizeof(struct LightCluster); - const vk_staging_region_t locked = R_VkStagingLockForBuffer( (vk_staging_buffer_args_t) { - .buffer = g_lights_.buffer.buffer, - .offset = sizeof(struct LightsMetadata) + begin * sizeof(struct LightCluster), - .size = size, - .alignment = 16, // WHY? + const vk_buffer_locked_t locked = R_VkBufferLock(&g_lights_.buffer, + (vk_buffer_lock_t) { + .offset = sizeof(struct LightsMetadata) + begin * sizeof(struct LightCluster), + .size = size, } ); ASSERT(locked.ptr); @@ -1222,7 +1221,7 @@ static void uploadGridRange( int begin, int end ) { memcpy(dst->polygons, src->polygons, sizeof(uint8_t) * src->num_polygons); } - R_VkStagingUnlock( locked.handle ); + R_VkBufferUnlock( locked ); g_lights_.stats.ranges_uploaded++; } @@ -1298,11 +1297,10 @@ static void uploadPointLights( struct LightsMetadata *metadata ) { vk_lights_bindings_t VK_LightsUpload( void ) { APROF_SCOPE_DECLARE_BEGIN(upload, __FUNCTION__); - const vk_staging_region_t locked = R_VkStagingLockForBuffer( (vk_staging_buffer_args_t) { - .buffer = g_lights_.buffer.buffer, - .offset = 0, - .size = sizeof(struct LightsMetadata), - .alignment = 16, // WHY? + const vk_buffer_locked_t locked = R_VkBufferLock(&g_lights_.buffer, + (vk_buffer_lock_t) { + .offset = 0, + .size = sizeof(struct LightsMetadata), } ); ASSERT(locked.ptr); @@ -1316,7 +1314,7 @@ vk_lights_bindings_t VK_LightsUpload( void ) { uploadPolygonLights( metadata ); uploadPointLights( metadata ); - R_VkStagingUnlock( locked.handle ); + R_VkBufferUnlock( locked ); uploadGrid(); diff --git a/ref/vk/vk_logs.h b/ref/vk/vk_logs.h index 3c69c859a7..23c5e2edd9 100644 --- a/ref/vk/vk_logs.h +++ b/ref/vk/vk_logs.h @@ -17,6 +17,7 @@ X(sprite) \ X(img) \ X(staging) \ + X(buf) \ enum { #define X(m) LogModule_##m, diff --git a/ref/vk/vk_ray_accel.c b/ref/vk/vk_ray_accel.c index 83de6848a7..b94eccf1c6 100644 --- a/ref/vk/vk_ray_accel.c +++ b/ref/vk/vk_ray_accel.c @@ -126,7 +126,10 @@ static VkDeviceAddress getAccelAddress(VkAccelerationStructureKHR as) { static qboolean buildAccel(VkBuffer geometry_buffer, VkAccelerationStructureBuildGeometryInfoKHR *build_info, uint32_t scratch_buffer_size, const VkAccelerationStructureBuildRangeInfoKHR *build_ranges) { // FIXME this is definitely not the right place. We should upload everything in bulk, and only then build blases in bulk too - vk_combuf_t *const combuf = R_VkStagingCommit(); + //vk_combuf_t *const combuf = R_VkStagingCommit(); + ASSERT(!"AS build is broken, needs to be rewritten to support the new sync"); + return false; +#if 0 { const VkBufferMemoryBarrier bmb[] = { { .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, @@ -171,6 +174,7 @@ static qboolean buildAccel(VkBuffer geometry_buffer, VkAccelerationStructureBuil R_VkCombufScopeEnd(combuf, begin_index, VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR); return true; +#endif } // TODO split this into smaller building blocks in a separate module @@ -302,9 +306,6 @@ static void buildBlases(vk_combuf_t *combuf) { const VkBuffer geometry_buffer = R_GeometryBuffer_Get(); const VkDeviceAddress geometry_addr = R_VkBufferGetDeviceAddress(geometry_buffer); - // FIXME get rid of this when staging doesn't own copying ops anymore - vk_combuf_t *const combuf_staging_fixme = R_VkStagingCommit(); - // TODO remove, should be handled by render graph { const VkBufferMemoryBarrier bmb[] = { { @@ -315,7 +316,7 @@ static void buildBlases(vk_combuf_t *combuf) { .offset = 0, .size = VK_WHOLE_SIZE, } }; - vkCmdPipelineBarrier(combuf_staging_fixme->cmdbuf, + vkCmdPipelineBarrier(combuf->cmdbuf, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR | VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR, 0, 0, NULL, COUNTOF(bmb), bmb, 0, NULL); @@ -330,11 +331,11 @@ static void buildBlases(vk_combuf_t *combuf) { static int scope_id = -2; if (scope_id == -2) scope_id = R_VkGpuScope_Register("build_as"); - const int begin_index = R_VkCombufScopeBegin(combuf_staging_fixme, scope_id); + const int begin_index = R_VkCombufScopeBegin(combuf, scope_id); const VkAccelerationStructureBuildRangeInfoKHR *p_build_ranges = blas->build.ranges; // TODO one call to build them all - vkCmdBuildAccelerationStructuresKHR(combuf_staging_fixme->cmdbuf, 1, &blas->build.info, &p_build_ranges); - R_VkCombufScopeEnd(combuf_staging_fixme, begin_index, VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR); + vkCmdBuildAccelerationStructuresKHR(combuf->cmdbuf, 1, &blas->build.info, &p_build_ranges); + R_VkCombufScopeEnd(combuf, begin_index, VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR); blas->build.built = true; } @@ -357,11 +358,10 @@ vk_resource_t RT_VkAccelPrepareTlas(vk_combuf_t *combuf) { // Upload all blas instances references to GPU mem { - const vk_staging_region_t headers_lock = R_VkStagingLockForBuffer((vk_staging_buffer_args_t){ - .buffer = g_ray_model_state.model_headers_buffer.buffer, - .offset = 0, - .size = g_ray_model_state.frame.instances_count * sizeof(struct ModelHeader), - .alignment = 16, + const vk_buffer_locked_t headers_lock = R_VkBufferLock(&g_ray_model_state.model_headers_buffer, + (vk_buffer_lock_t){ + .offset = 0, + .size = g_ray_model_state.frame.instances_count * sizeof(struct ModelHeader), }); ASSERT(headers_lock.ptr); @@ -420,7 +420,7 @@ vk_resource_t RT_VkAccelPrepareTlas(vk_combuf_t *combuf) { Matrix4x4_ToArrayFloatGL(instance->prev_transform_row, (float*)header->prev_transform); } - R_VkStagingUnlock(headers_lock.handle); + R_VkBufferUnlock(headers_lock); } g_accel.stats.instances_count = g_ray_model_state.frame.instances_count; @@ -639,7 +639,7 @@ struct rt_blas_s* RT_BlasCreate(rt_blas_create_t args) { blas->max_geoms = blas->build.info.geometryCount; if (!args.dont_build) - BOUNDED_ARRAY_APPEND(g_accel.build.blas, blas); + BOUNDED_ARRAY_APPEND_ITEM(g_accel.build.blas, blas); return blas; @@ -705,6 +705,6 @@ qboolean RT_BlasUpdate(struct rt_blas_s *blas, const struct vk_render_geometry_s return false; } - BOUNDED_ARRAY_APPEND(g_accel.build.blas, blas); + BOUNDED_ARRAY_APPEND_ITEM(g_accel.build.blas, blas); return true; } diff --git a/ref/vk/vk_ray_model.c b/ref/vk/vk_ray_model.c index 11dc93899b..65f2711644 100644 --- a/ref/vk/vk_ray_model.c +++ b/ref/vk/vk_ray_model.c @@ -3,7 +3,6 @@ #include "vk_rtx.h" #include "vk_materials.h" #include "vk_render.h" -#include "vk_staging.h" #include "vk_logs.h" #include "profiler.h" @@ -192,26 +191,24 @@ static void kusochkiFree(const rt_kusochki_t *kusochki) { // TODO this function can't really fail. It'd mean that staging is completely broken. qboolean kusochkiUpload(uint32_t kusochki_offset, const struct vk_render_geometry_s *geoms, int geoms_count, const r_vk_material_t *override_material, const vec4_t *override_colors) { - const vk_staging_buffer_args_t staging_args = { - .buffer = g_ray_model_state.kusochki_buffer.buffer, + const vk_buffer_lock_t lock_args = { .offset = kusochki_offset * sizeof(vk_kusok_data_t), .size = geoms_count * sizeof(vk_kusok_data_t), - .alignment = 16, }; - const vk_staging_region_t kusok_staging = R_VkStagingLockForBuffer(staging_args); + const vk_buffer_locked_t lock = R_VkBufferLock(&g_ray_model_state.kusochki_buffer, lock_args); - if (!kusok_staging.ptr) { + if (!lock.ptr) { gEngine.Con_Printf(S_ERROR "Couldn't allocate staging for %d kusochkov\n", geoms_count); return false; } - vk_kusok_data_t *const p = kusok_staging.ptr; + vk_kusok_data_t *const p = lock.ptr; for (int i = 0; i < geoms_count; ++i) { const vk_render_geometry_t *geom = geoms + i; applyMaterialToKusok(p + i, geom, override_material, override_colors ? override_colors[i] : NULL); } - R_VkStagingUnlock(kusok_staging.handle); + R_VkBufferUnlock(lock); return true; } diff --git a/ref/vk/vk_resources.c b/ref/vk/vk_resources.c index 9cef06654a..9fad231396 100644 --- a/ref/vk/vk_resources.c +++ b/ref/vk/vk_resources.c @@ -246,7 +246,7 @@ void R_VkBarrierAddImage(r_vk_barrier_t *barrier, r_vk_barrier_image_t image) { .layerCount = 1, }, }; - BOUNDED_ARRAY_APPEND(barrier->images, ib); + BOUNDED_ARRAY_APPEND_ITEM(barrier->images, ib); } void R_VkBarrierCommit(VkCommandBuffer cmdbuf, r_vk_barrier_t *barrier, VkPipelineStageFlags dst_stage_mask) { diff --git a/ref/vk/vk_scene.c b/ref/vk/vk_scene.c index 740ebe8656..c16afdc029 100644 --- a/ref/vk/vk_scene.c +++ b/ref/vk/vk_scene.c @@ -144,7 +144,7 @@ static void loadMap(const model_t* const map, qboolean force_reload) { static void reloadPatches( void ) { INFO("Reloading patches and materials"); - R_VkStagingFlushSync(); + // FIXME R_VkStagingFlushSync(); XVK_CHECK(vkDeviceWaitIdle( vk_core.device )); @@ -153,8 +153,6 @@ static void reloadPatches( void ) { const model_t *const map = gEngine.pfnGetModelByIndex( 1 ); const qboolean force_reload = true; loadMap(map, force_reload); - - R_VkStagingFlushSync(); } void VK_SceneInit( void ) diff --git a/ref/vk/vk_staging.c b/ref/vk/vk_staging.c index 1b35396865..f9f06afa80 100644 --- a/ref/vk/vk_staging.c +++ b/ref/vk/vk_staging.c @@ -15,24 +15,13 @@ // FIXME decrease size to something reasonable, see https://github.com/w23/xash3d-fwgs/issues/746 #define DEFAULT_STAGING_SIZE (4*128*1024*1024) -#define MAX_STAGING_ALLOCS (2048) -#define MAX_CONCURRENT_FRAMES 2 -#define COMMAND_BUFFER_COUNT (MAX_CONCURRENT_FRAMES + 1) // to accommodate two frames in flight plus something trying to upload data before waiting for the next frame to complete static struct { vk_buffer_t buffer; r_flipping_buffer_t buffer_alloc; - struct { - VkBuffer dest[MAX_STAGING_ALLOCS]; - VkBufferCopy copy[MAX_STAGING_ALLOCS]; - int count; - } buffers; - - vk_combuf_t *combuf[3]; - - // Currently opened command buffer, ready to accept new commands - vk_combuf_t *current; + uint32_t locked_count; + uint32_t current_generation; struct { int total_size; @@ -50,10 +39,6 @@ qboolean R_VkStagingInit(void) { if (!VK_BufferCreate("staging", &g_staging.buffer, DEFAULT_STAGING_SIZE, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) return false; - g_staging.combuf[0] = R_VkCombufOpen(); - g_staging.combuf[1] = R_VkCombufOpen(); - g_staging.combuf[2] = R_VkCombufOpen(); - R_FlippingBuffer_Init(&g_staging.buffer_alloc, DEFAULT_STAGING_SIZE); R_SPEEDS_COUNTER(g_staging.stats.total_size, "total_size", kSpeedsMetricBytes); @@ -73,222 +58,24 @@ void R_VkStagingShutdown(void) { VK_BufferDestroy(&g_staging.buffer); } -// FIXME There's a severe race condition here. Submitting things manually and prematurely (before framectl had a chance to synchronize with the previous frame) -// may lead to data races and memory corruption (e.g. writing into memory that's being read in some pipeline stage still going) -void R_VkStagingFlushSync( void ) { - ASSERT(!"SHOULD NEVER HAPPEN"); - APROF_SCOPE_DECLARE_BEGIN(function, __FUNCTION__); - - vk_combuf_t *combuf = R_VkStagingCommit(); - if (!combuf) - goto end; - - R_VkCombufEnd(combuf); - g_staging.current = NULL; - - //gEngine.Con_Reportf(S_WARN "flushing staging buffer img count=%d\n", g_staging.images.count); - - { - const VkSubmitInfo subinfo = { - .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO, - .commandBufferCount = 1, - .pCommandBuffers = &combuf->cmdbuf, - }; - - // TODO wait for previous command buffer completion. Why: we might end up writing into the same dst - - XVK_CHECK(vkQueueSubmit(vk_core.queue, 1, &subinfo, VK_NULL_HANDLE)); - - // TODO wait for fence, not this - XVK_CHECK(vkQueueWaitIdle(vk_core.queue)); - } - - g_staging.buffers.count = 0; - R_FlippingBuffer_Clear(&g_staging.buffer_alloc); - -end: - APROF_SCOPE_END(function); -}; - static uint32_t allocateInRing(uint32_t size, uint32_t alignment) { alignment = alignment < 1 ? 1 : alignment; const uint32_t offset = R_FlippingBuffer_Alloc(&g_staging.buffer_alloc, size, alignment ); - if (offset != ALO_ALLOC_FAILED) - return offset; - - R_VkStagingFlushSync(); + ASSERT(offset != ALO_ALLOC_FAILED && "FIXME increase staging buffer size as a quick fix"); return R_FlippingBuffer_Alloc(&g_staging.buffer_alloc, size, alignment ); } -vk_staging_region_t R_VkStagingLockForBuffer(vk_staging_buffer_args_t args) { - if ( g_staging.buffers.count >= MAX_STAGING_ALLOCS ) - R_VkStagingFlushSync(); - - const uint32_t offset = allocateInRing(args.size, args.alignment); - if (offset == ALO_ALLOC_FAILED) - return (vk_staging_region_t){0}; - - DEBUG("Lock buf alignment=%d size=%d region=%d..%d", args.alignment, args.size, offset, offset + args.size); - - const int index = g_staging.buffers.count; - - g_staging.buffers.dest[index] = args.buffer; - g_staging.buffers.copy[index] = (VkBufferCopy){ - .srcOffset = offset, - .dstOffset = args.offset, - .size = args.size, - }; - - g_staging.buffers.count++; - - return (vk_staging_region_t){ - .ptr = (char*)g_staging.buffer.mapped + offset, - .handle = index, - }; -} - -void R_VkStagingUnlock(r_vkstaging_handle_t handle) { - ASSERT(handle >= 0); - ASSERT(handle < MAX_STAGING_ALLOCS * 2); - - // FIXME mark and check ready -} - -static void commitBuffers(vk_combuf_t *combuf) { - if (!g_staging.buffers.count) - return; - - const VkCommandBuffer cmdbuf = g_staging.current->cmdbuf; - const int begin_index = R_VkCombufScopeBegin(combuf, g_staging.buffer_upload_scope_id); - - // TODO better coalescing: - // - upload once per buffer - // - join adjacent regions - - BOUNDED_ARRAY(VkBufferMemoryBarrier, barriers, 4); - - for (int i = 0; i < g_staging.buffers.count; i++) { - const VkBuffer dst_buf = g_staging.buffers.dest[i]; - for (int j = 0;; ++j) { - if (j == COUNTOF(barriers.items)) { - ERR("Ran out of buffer barrier slots, oh no"); - break; - } - - // Instert last - if (j == barriers.count) { - barriers.count++; - barriers.items[j] = (VkBufferMemoryBarrier){ - .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, - .srcAccessMask = VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT, - .dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT, - .buffer = dst_buf, - .offset = 0, - .size = VK_WHOLE_SIZE, - }; - break; - } - - // Already inserted - if (barriers.items[j].buffer == dst_buf) - break; - } - } - - if (barriers.count) { - vkCmdPipelineBarrier(cmdbuf, - // FIXME this should be more concrete. Will need to pass buffer "state" around. - // For now it works, but makes validation uhappy. - VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, - VK_PIPELINE_STAGE_TRANSFER_BIT, - 0, 0, NULL, barriers.count, barriers.items, 0, NULL); - } - - VkBuffer prev_buffer = VK_NULL_HANDLE; - int first_copy = 0; - for (int i = 0; i < g_staging.buffers.count; i++) { - /* { */ - /* const VkBufferCopy *const copy = g_staging.buffers.copy + i; */ - /* gEngine.Con_Reportf(" %d: [%08llx, %08llx) => [%08llx, %08llx)\n", i, copy->srcOffset, copy->srcOffset + copy->size, copy->dstOffset, copy->dstOffset + copy->size); */ - /* } */ - - if (prev_buffer == g_staging.buffers.dest[i]) - continue; - - if (prev_buffer != VK_NULL_HANDLE) { - DEBUG_NV_CHECKPOINTF(cmdbuf, "staging dst_buffer=%p count=%d", prev_buffer, i-first_copy); - g_staging.stats.buffer_chunks++; - vkCmdCopyBuffer(cmdbuf, g_staging.buffer.buffer, - prev_buffer, - i - first_copy, g_staging.buffers.copy + first_copy); - } - - g_staging.stats.buffers_size += g_staging.buffers.copy[i].size; - - prev_buffer = g_staging.buffers.dest[i]; - first_copy = i; - } - - if (prev_buffer != VK_NULL_HANDLE) { - DEBUG_NV_CHECKPOINTF(cmdbuf, "staging dst_buffer=%p count=%d", prev_buffer, g_staging.buffers.count-first_copy); - g_staging.stats.buffer_chunks++; - vkCmdCopyBuffer(cmdbuf, g_staging.buffer.buffer, - prev_buffer, - g_staging.buffers.count - first_copy, g_staging.buffers.copy + first_copy); - } - - g_staging.buffers.count = 0; - - R_VkCombufScopeEnd(combuf, begin_index, VK_PIPELINE_STAGE_TRANSFER_BIT); -} - -static vk_combuf_t *getCurrentCombuf(void) { - if (!g_staging.current) { - g_staging.current = g_staging.combuf[0]; - R_VkCombufBegin(g_staging.current); - } - - return g_staging.current; -} - -vk_combuf_t *R_VkStagingCommit(void) { - DEBUG("%s: buffers.count=%d current=%p", __FUNCTION__, g_staging.buffers.count, g_staging.current); - - if (!g_staging.buffers.count && !g_staging.current) - return VK_NULL_HANDLE; - - getCurrentCombuf(); - commitBuffers(g_staging.current); - return g_staging.current; -} - -void R_VkStagingFrameBegin(void) { - R_VkStagingCommit(); // .... ugh - +void R_VkStagingGenerationRelease(uint32_t gen) { + ASSERT(gen == (g_staging.current_generation - 1)); R_FlippingBuffer_Flip(&g_staging.buffer_alloc); - - g_staging.buffers.count = 0; } -vk_combuf_t *R_VkStagingFrameEnd(void) { - R_VkStagingCommit(); - vk_combuf_t *current = g_staging.current; - - if (current) { - R_VkCombufEnd(g_staging.current); - } - - g_staging.current = NULL; - vk_combuf_t *const tmp = g_staging.combuf[0]; - g_staging.combuf[0] = g_staging.combuf[1]; - g_staging.combuf[1] = g_staging.combuf[2]; - g_staging.combuf[2] = tmp; - +uint32_t R_VkStagingGenerationCommit(void) { + ASSERT(g_staging.locked_count == 0); g_staging.stats.total_size = g_staging.stats.images_size + g_staging.stats.buffers_size; - - return current; + return g_staging.current_generation++; } r_vkstaging_region_t R_VkStagingLock(uint32_t size) { @@ -298,14 +85,18 @@ r_vkstaging_region_t R_VkStagingLock(uint32_t size) { DEBUG("Lock alignment=%d size=%d region=%d..%d", alignment, size, offset, offset + size); + g_staging.locked_count++; return (r_vkstaging_region_t){ - .handle = 31337, // FAKE + .handle.generation = g_staging.current_generation, .offset = offset, .buffer = g_staging.buffer.buffer, .ptr = (char*)g_staging.buffer.mapped + offset, }; } -void R_VkStagingReleaseAfterNextFrame(r_vkstaging_handle_t handle) { - // FIXME +void R_VkStagingUnlock(r_vkstaging_handle_t handle) { + ASSERT(g_staging.current_generation == handle.generation); + ASSERT(g_staging.locked_count > 0); + g_staging.locked_count--; } + diff --git a/ref/vk/vk_staging.h b/ref/vk/vk_staging.h index a06843b4e9..331644d603 100644 --- a/ref/vk/vk_staging.h +++ b/ref/vk/vk_staging.h @@ -5,7 +5,9 @@ qboolean R_VkStagingInit(void); void R_VkStagingShutdown(void); -typedef int r_vkstaging_handle_t; +typedef struct { + uint32_t generation; +} r_vkstaging_handle_t; typedef struct { void *ptr; @@ -19,38 +21,11 @@ typedef struct { // Allocate CPU-accessible memory in staging buffer r_vkstaging_region_t R_VkStagingLock(uint32_t size); -// Release when next frame is done -// TODO synch with specific combuf: void R_VkStagingRelease(r_vkstaging_handle_t handle, uint32_t gen); -void R_VkStagingReleaseAfterNextFrame(r_vkstaging_handle_t handle); - - -typedef struct { - void *ptr; - r_vkstaging_handle_t handle; -} vk_staging_region_t; - -// Allocate region for uploadting to buffer -typedef struct { - VkBuffer buffer; - uint32_t offset; - uint32_t size; - uint32_t alignment; -} vk_staging_buffer_args_t; -vk_staging_region_t R_VkStagingLockForBuffer(vk_staging_buffer_args_t args); - // Mark allocated region as ready for upload void R_VkStagingUnlock(r_vkstaging_handle_t handle); -// Append copy commands to command buffer. -struct vk_combuf_s* R_VkStagingCommit(void); - -// Mark previous frame data as uploaded and safe to use. -void R_VkStagingFrameBegin(void); - -// Uploads staging contents and returns the command buffer ready to be submitted. -// Can return NULL if there's nothing to upload. -struct vk_combuf_s *R_VkStagingFrameEnd(void); +// Finalize current generation, return its tag for R_VkStagingGenerationRelease() call +uint32_t R_VkStagingGenerationCommit(void); -// Commit all staging data into current cmdbuf, submit it and wait for completion. -// Needed for CPU-GPU sync -void R_VkStagingFlushSync( void ); +// Free all data for generation tag (returned by commit) +void R_VkStagingGenerationRelease(uint32_t gen); From 85c3cb17f023fa39815da3a8dca4bed624004144 Mon Sep 17 00:00:00 2001 From: Ivan Avdeev Date: Fri, 6 Dec 2024 21:53:39 -0500 Subject: [PATCH 14/62] vk: fix compilation errors; up vk to 1.3 and use synchronization2 --- ref/vk/vk_combuf.c | 6 ++++-- ref/vk/vk_core.c | 9 ++++++++- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/ref/vk/vk_combuf.c b/ref/vk/vk_combuf.c index 66960fe57d..ccb88304f4 100644 --- a/ref/vk/vk_combuf.c +++ b/ref/vk/vk_combuf.c @@ -135,7 +135,7 @@ static const char* myStrdup(const char *src) { return ret; } -#define ACCESS_WRITE_BITS 0 \ +#define ACCESS_WRITE_BITS (0 \ | VK_ACCESS_2_SHADER_WRITE_BIT \ | VK_ACCESS_2_COLOR_ATTACHMENT_WRITE_BIT \ | VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT \ @@ -144,8 +144,9 @@ static const char* myStrdup(const char *src) { | VK_ACCESS_2_MEMORY_WRITE_BIT \ | VK_ACCESS_2_SHADER_STORAGE_WRITE_BIT \ | VK_ACCESS_2_ACCELERATION_STRUCTURE_WRITE_BIT_KHR \ + ) -#define ACCESS_READ_BITS 0 \ +#define ACCESS_READ_BITS (0 \ | VK_ACCESS_2_INDIRECT_COMMAND_READ_BIT \ | VK_ACCESS_2_INDEX_READ_BIT \ | VK_ACCESS_2_VERTEX_ATTRIBUTE_READ_BIT \ @@ -160,6 +161,7 @@ static const char* myStrdup(const char *src) { | VK_ACCESS_2_SHADER_SAMPLED_READ_BIT \ | VK_ACCESS_2_SHADER_STORAGE_READ_BIT \ | VK_ACCESS_2_ACCELERATION_STRUCTURE_READ_BIT_KHR \ + ) #define ACCESS_KNOWN_BITS (ACCESS_WRITE_BITS | ACCESS_READ_BITS) diff --git a/ref/vk/vk_core.c b/ref/vk/vk_core.c index fd4df34266..677ec40b37 100644 --- a/ref/vk/vk_core.c +++ b/ref/vk/vk_core.c @@ -189,7 +189,7 @@ static qboolean createInstance( void ) // TODO support versions 1.0 and 1.1 for simple traditional rendering // This would require using older physical device features and props query structures // .apiVersion = vk_core.rtx ? VK_API_VERSION_1_2 : VK_API_VERSION_1_1, - .apiVersion = VK_API_VERSION_1_2, + .apiVersion = VK_API_VERSION_1_3, .applicationVersion = VK_MAKE_VERSION(0, 0, 0), // TODO .engineVersion = VK_MAKE_VERSION(0, 0, 0), .pApplicationName = "", @@ -572,6 +572,13 @@ static qboolean createDevice( void ) { head = NULL; } + VkPhysicalDeviceVulkan13Features vk13_features = { + .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_3_FEATURES, + .pNext = head, + .synchronization2 = VK_TRUE, + }; + head = &vk13_features; + VkPhysicalDeviceFeatures2 features = { .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2, .pNext = head, From 62f140a5cd3897655226bf0609a6db8eac9ede57 Mon Sep 17 00:00:00 2001 From: Ivan Avdeev Date: Fri, 6 Dec 2024 21:55:27 -0500 Subject: [PATCH 15/62] vk: start using the new combuf-barriers thing it render quite a few traditional frames but then fails with cross-cmdbuf sync validation errors --- ref/vk/vk_framectl.c | 21 ++++++++++++++-- ref/vk/vk_geometry.c | 4 +-- ref/vk/vk_geometry.h | 5 +--- ref/vk/vk_ray_accel.c | 57 +++++++++++++++++++++++-------------------- ref/vk/vk_render.c | 25 ++++++++++++------- ref/vk/vk_render.h | 5 ++-- 6 files changed, 70 insertions(+), 47 deletions(-) diff --git a/ref/vk/vk_framectl.c b/ref/vk/vk_framectl.c index 838ef61402..5ab6d66ca8 100644 --- a/ref/vk/vk_framectl.c +++ b/ref/vk/vk_framectl.c @@ -11,6 +11,9 @@ #include "vk_commandpool.h" #include "vk_combuf.h" +#include "vk_buffer.h" +#include "vk_geometry.h" + #include "arrays.h" #include "profiler.h" #include "r_speeds.h" @@ -306,12 +309,26 @@ static void enqueueRendering( vk_combuf_t* combuf, qboolean draw ) { R_VkImageUploadCommit(combuf, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | (vk_frame.rtx_enabled ? VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT : 0)); const VkCommandBuffer cmdbuf = combuf->cmdbuf; - VK_Render_FIXME_Barrier(cmdbuf); if (vk_frame.rtx_enabled) VK_RenderEndRTX( combuf, g_frame.current.framebuffer.view, g_frame.current.framebuffer.image, g_frame.current.framebuffer.width, g_frame.current.framebuffer.height ); if (draw) { + // FIXME: how to do this properly before render pass? + // Needed to avoid VUID-vkCmdCopyBuffer-renderpass + vk_buffer_t* const geom = R_GeometryBuffer_Get(); + R_VkBufferStagingCommit(geom, combuf); + R_VkCombufIssueBarrier(combuf, (r_vkcombuf_barrier_t){ + .stage = VK_PIPELINE_STAGE_2_VERTEX_INPUT_BIT, + .buffers = { + .count = 1, + .items = &(r_vkcombuf_barrier_buffer_t){ + .buffer = geom, + .access = VK_ACCESS_2_INDEX_READ_BIT | VK_ACCESS_2_VERTEX_ATTRIBUTE_READ_BIT, + }, + }, + }); + const VkRenderPassBeginInfo rpbi = { .sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, .renderPass = vk_frame.rtx_enabled ? vk_frame.render_pass.after_ray_tracing : vk_frame.render_pass.raster, @@ -338,7 +355,7 @@ static void enqueueRendering( vk_combuf_t* combuf, qboolean draw ) { } if (!vk_frame.rtx_enabled) - VK_RenderEnd( cmdbuf, draw, + VK_RenderEnd( combuf, draw, g_frame.current.framebuffer.width, g_frame.current.framebuffer.height, g_frame.current.index ); diff --git a/ref/vk/vk_geometry.c b/ref/vk/vk_geometry.c index e1a0e52afb..886ca80a1a 100644 --- a/ref/vk/vk_geometry.c +++ b/ref/vk/vk_geometry.c @@ -197,6 +197,6 @@ void R_GeometryBuffer_Flip(void) { R_BlocksClearOnce(&g_geom.alloc); } -VkBuffer R_GeometryBuffer_Get(void) { - return g_geom.buffer.buffer; +vk_buffer_t* R_GeometryBuffer_Get(void) { + return &g_geom.buffer; } diff --git a/ref/vk/vk_geometry.h b/ref/vk/vk_geometry.h index 6799ad833d..3e73b44847 100644 --- a/ref/vk/vk_geometry.h +++ b/ref/vk/vk_geometry.h @@ -1,7 +1,6 @@ #pragma once #include "vk_common.h" #include "r_block.h" -#include "vk_staging.h" #include "vk_buffer.h" // FIXME vk_buffer_locked_t should not be exposed #include "vk_core.h" @@ -95,6 +94,4 @@ void R_GeometryBuffer_Shutdown(void); void R_GeometryBuffer_Flip(void); -// FIXME is there a better way? -VkBuffer R_GeometryBuffer_Get(void); - +vk_buffer_t* R_GeometryBuffer_Get(void); diff --git a/ref/vk/vk_ray_accel.c b/ref/vk/vk_ray_accel.c index b94eccf1c6..a1b5c14323 100644 --- a/ref/vk/vk_ray_accel.c +++ b/ref/vk/vk_ray_accel.c @@ -124,18 +124,26 @@ static VkDeviceAddress getAccelAddress(VkAccelerationStructureKHR as) { return vkGetAccelerationStructureDeviceAddressKHR(vk_core.device, &asdai); } -static qboolean buildAccel(VkBuffer geometry_buffer, VkAccelerationStructureBuildGeometryInfoKHR *build_info, uint32_t scratch_buffer_size, const VkAccelerationStructureBuildRangeInfoKHR *build_ranges) { - // FIXME this is definitely not the right place. We should upload everything in bulk, and only then build blases in bulk too - //vk_combuf_t *const combuf = R_VkStagingCommit(); - ASSERT(!"AS build is broken, needs to be rewritten to support the new sync"); - return false; -#if 0 +static qboolean buildAccel(vk_combuf_t* combuf, VkAccelerationStructureBuildGeometryInfoKHR *build_info, uint32_t scratch_buffer_size, const VkAccelerationStructureBuildRangeInfoKHR *build_ranges) { + vk_buffer_t* const geom = R_GeometryBuffer_Get(); + R_VkBufferStagingCommit(geom, combuf); + R_VkCombufIssueBarrier(combuf, (r_vkcombuf_barrier_t){ + .stage = VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_BUILD_BIT_KHR, + .buffers = { + .count = 1, + .items = &(r_vkcombuf_barrier_buffer_t){ + .buffer = geom, + .access = VK_ACCESS_2_ACCELERATION_STRUCTURE_READ_BIT_KHR, + }, + }, + }); + { const VkBufferMemoryBarrier bmb[] = { { .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT, .dstAccessMask = VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_KHR | VK_ACCESS_SHADER_READ_BIT, // FIXME - .buffer = geometry_buffer, + .buffer = geom->buffer, .offset = 0, // FIXME .size = VK_WHOLE_SIZE, // FIXME } }; @@ -170,11 +178,11 @@ static qboolean buildAccel(VkBuffer geometry_buffer, VkAccelerationStructureBuil scope_id = R_VkGpuScope_Register("build_as"); const int begin_index = R_VkCombufScopeBegin(combuf, scope_id); const VkAccelerationStructureBuildRangeInfoKHR *p_build_ranges = build_ranges; + // FIXME upload everything in bulk, and only then build blases in bulk too vkCmdBuildAccelerationStructuresKHR(combuf->cmdbuf, 1, build_info, &p_build_ranges); R_VkCombufScopeEnd(combuf, begin_index, VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR); return true; -#endif } // TODO split this into smaller building blocks in a separate module @@ -220,8 +228,7 @@ qboolean createOrUpdateAccelerationStructure(vk_combuf_t *combuf, const as_build ASSERT(*args->inout_size >= build_size.accelerationStructureSize); build_info.dstAccelerationStructure = *args->p_accel; - const VkBuffer geometry_buffer = R_GeometryBuffer_Get(); - return buildAccel(geometry_buffer, &build_info, build_size.buildScratchSize, args->build_ranges); + return buildAccel(combuf, &build_info, build_size.buildScratchSize, args->build_ranges); } static void createTlas( vk_combuf_t *combuf, VkDeviceAddress instances_addr ) { @@ -303,24 +310,20 @@ static qboolean blasPrepareBuild(struct rt_blas_s *blas, VkDeviceAddress geometr static void buildBlases(vk_combuf_t *combuf) { (void)(combuf); - const VkBuffer geometry_buffer = R_GeometryBuffer_Get(); - const VkDeviceAddress geometry_addr = R_VkBufferGetDeviceAddress(geometry_buffer); + vk_buffer_t* const geom = R_GeometryBuffer_Get(); + R_VkBufferStagingCommit(geom, combuf); + R_VkCombufIssueBarrier(combuf, (r_vkcombuf_barrier_t){ + .stage = VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_BUILD_BIT_KHR, + .buffers = { + .count = 1, + .items = &(r_vkcombuf_barrier_buffer_t){ + .buffer = geom, + .access = VK_ACCESS_2_ACCELERATION_STRUCTURE_READ_BIT_KHR, + }, + }, + }); - // TODO remove, should be handled by render graph - { - const VkBufferMemoryBarrier bmb[] = { { - .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, - .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT, - .dstAccessMask = VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_KHR | VK_ACCESS_SHADER_READ_BIT, - .buffer = geometry_buffer, - .offset = 0, - .size = VK_WHOLE_SIZE, - } }; - vkCmdPipelineBarrier(combuf->cmdbuf, - VK_PIPELINE_STAGE_TRANSFER_BIT, - VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR | VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR, - 0, 0, NULL, COUNTOF(bmb), bmb, 0, NULL); - } + const VkDeviceAddress geometry_addr = R_VkBufferGetDeviceAddress(geom->buffer); for (int i = 0; i < g_accel.build.blas.count; ++i) { rt_blas_t *const blas = g_accel.build.blas.items[i]; diff --git a/ref/vk/vk_render.c b/ref/vk/vk_render.c index 2505f30f20..6b3238ccf2 100644 --- a/ref/vk/vk_render.c +++ b/ref/vk/vk_render.c @@ -3,6 +3,7 @@ #include "vk_core.h" #include "vk_buffer.h" #include "vk_geometry.h" +#include "vk_combuf.h" #include "vk_const.h" #include "vk_common.h" #include "vk_cvar.h" @@ -658,9 +659,10 @@ static void debugBarrier( VkCommandBuffer cmdbuf, VkBuffer buf) { } */ +/* OBSOLETE, remove void VK_Render_FIXME_Barrier( VkCommandBuffer cmdbuf ) { - const VkBuffer geom_buffer = R_GeometryBuffer_Get(); - //debugBarrier(cmdbuf, geom_buffer); + const VkBuffer geom = R_GeometryBuffer_Get(); + //debugBarrier(cmdbuf, geom); // FIXME: this should be automatic and dynamically depend on actual usage, resolving this with render graph { const VkBufferMemoryBarrier bmb[] = { { @@ -670,7 +672,7 @@ void VK_Render_FIXME_Barrier( VkCommandBuffer cmdbuf ) { = VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | (vk_core.rtx ? ( VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_KHR | VK_ACCESS_SHADER_READ_BIT) : 0), - .buffer = geom_buffer, + .buffer = geom, .offset = 0, .size = VK_WHOLE_SIZE, } }; @@ -684,12 +686,15 @@ void VK_Render_FIXME_Barrier( VkCommandBuffer cmdbuf ) { 0, 0, NULL, ARRAYSIZE(bmb), bmb, 0, NULL); } } +*/ -void VK_RenderEnd( VkCommandBuffer cmdbuf, qboolean draw, uint32_t width, uint32_t height, int frame_index ) +void VK_RenderEnd( vk_combuf_t* combuf, qboolean draw, uint32_t width, uint32_t height, int frame_index ) { if (!draw) return; + VkCommandBuffer cmdbuf = combuf->cmdbuf; + // TODO we can sort collected draw commands for more efficient and correct rendering // that requires adding info about distance to camera for correct order-dependent blending @@ -712,10 +717,10 @@ void VK_RenderEnd( VkCommandBuffer cmdbuf, qboolean draw, uint32_t width, uint32 ASSERT(!g_render_state.current_frame_is_ray_traced); { - const VkBuffer geom_buffer = R_GeometryBuffer_Get(); + vk_buffer_t* const geom = R_GeometryBuffer_Get(); const VkDeviceSize offset = 0; - vkCmdBindVertexBuffers(cmdbuf, 0, 1, &geom_buffer, &offset); - vkCmdBindIndexBuffer(cmdbuf, geom_buffer, 0, VK_INDEX_TYPE_UINT16); + vkCmdBindVertexBuffers(cmdbuf, 0, 1, &geom->buffer, &offset); + vkCmdBindIndexBuffer(cmdbuf, geom->buffer, 0, VK_INDEX_TYPE_UINT16); } for (int i = 0; i < g_render_state.num_draw_commands; ++i) { @@ -849,9 +854,11 @@ void VK_RenderDebugLabelEnd( void ) void VK_RenderEndRTX( struct vk_combuf_s* combuf, VkImageView img_dst_view, VkImage img_dst, uint32_t w, uint32_t h ) { - const VkBuffer geom_buffer = R_GeometryBuffer_Get(); + vk_buffer_t *const geom = R_GeometryBuffer_Get(); ASSERT(vk_core.rtx); + R_VkBufferStagingCommit(geom, combuf); + { const vk_ray_frame_render_args_t args = { .combuf = combuf, @@ -866,7 +873,7 @@ void VK_RenderEndRTX( struct vk_combuf_s* combuf, VkImageView img_dst_view, VkIm .view = &g_camera.viewMatrix, .geometry_data = { - .buffer = geom_buffer, + .buffer = geom->buffer, .size = VK_WHOLE_SIZE, }, diff --git a/ref/vk/vk_render.h b/ref/vk/vk_render.h index e9a5747949..cc871b338a 100644 --- a/ref/vk/vk_render.h +++ b/ref/vk/vk_render.h @@ -165,8 +165,7 @@ void VK_RenderDebugLabelBegin( const char *label ); void VK_RenderDebugLabelEnd( void ); void VK_RenderBegin( qboolean ray_tracing ); -void VK_RenderEnd( VkCommandBuffer cmdbuf, qboolean draw, uint32_t width, uint32_t height, int frame_index ); +struct vk_combuf_s; +void VK_RenderEnd( struct vk_combuf_s*, qboolean draw, uint32_t width, uint32_t height, int frame_index ); struct vk_combuf_s; void VK_RenderEndRTX( struct vk_combuf_s* combuf, VkImageView img_dst_view, VkImage img_dst, uint32_t w, uint32_t h ); - -void VK_Render_FIXME_Barrier( VkCommandBuffer cmdbuf ); From 97ed9c2d549f778a863dbf755813222b34afc06a Mon Sep 17 00:00:00 2001 From: Ivan Avdeev Date: Sun, 8 Dec 2024 23:43:37 -0500 Subject: [PATCH 16/62] vk: rt: tweak output image staging to allow for a couple more frames before crash lol --- ref/vk/vk_rtx.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ref/vk/vk_rtx.c b/ref/vk/vk_rtx.c index eb717777eb..5b80084344 100644 --- a/ref/vk/vk_rtx.c +++ b/ref/vk/vk_rtx.c @@ -303,7 +303,7 @@ static void performTracing( vk_combuf_t *combuf, const perform_tracing_args_t* a { const r_vkimage_blit_args blit_args = { - .in_stage = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, + .in_stage = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, .src = { .image = g_rtx.mainpipe_out->image.image, .width = args->frame_width, @@ -316,7 +316,7 @@ static void performTracing( vk_combuf_t *combuf, const perform_tracing_args_t* a .width = args->render_args->dst.width, .height = args->render_args->dst.height, .oldLayout = VK_IMAGE_LAYOUT_UNDEFINED, - .srcAccessMask = 0, + .srcAccessMask = VK_ACCESS_MEMORY_READ_BIT, }, }; @@ -557,7 +557,7 @@ void VK_RayFrameEnd(const vk_ray_frame_render_args_t* args) if (g_ray_model_state.frame.instances_count == 0) { const r_vkimage_blit_args blit_args = { - .in_stage = VK_PIPELINE_STAGE_TRANSFER_BIT, + .in_stage = VK_PIPELINE_STAGE_TRANSFER_BIT | VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, .src = { .image = g_rtx.mainpipe_out->image.image, .width = frame_width, @@ -570,7 +570,7 @@ void VK_RayFrameEnd(const vk_ray_frame_render_args_t* args) .width = args->dst.width, .height = args->dst.height, .oldLayout = VK_IMAGE_LAYOUT_UNDEFINED, - .srcAccessMask = 0, + .srcAccessMask = VK_ACCESS_MEMORY_READ_BIT, }, }; From 239f374a0a538acdfcf9c2c4ac992366ceafcc0c Mon Sep 17 00:00:00 2001 From: Ivan Avdeev Date: Mon, 9 Dec 2024 14:44:09 -0500 Subject: [PATCH 17/62] oh god why --- .editorconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.editorconfig b/.editorconfig index 77807af0d6..84af569000 100644 --- a/.editorconfig +++ b/.editorconfig @@ -2,7 +2,7 @@ root = true [*] -charset = latin1 +charset = utf-8 end_of_line = lf indent_style = tab insert_final_newline = true From 43fc05f8a5aca3069a1f21732a135f925a12be0c Mon Sep 17 00:00:00 2001 From: Ivan Avdeev Date: Mon, 9 Dec 2024 18:18:11 -0500 Subject: [PATCH 18/62] vk: add a pile of new staging/sync debug log messages - print out vkQueueSubmit with its semaphores - print out buffer barriers - print out when buffer copy submission happens - print various ref_vk initialization stages --- ref/vk/vk_buffer.c | 3 +- ref/vk/vk_combuf.c | 118 +++++++++++++++++++++++++++++++++++++++++++ ref/vk/vk_core.c | 2 + ref/vk/vk_framectl.c | 15 ++++++ ref/vk/vk_logs.h | 2 + ref/vk/vk_rmain.c | 3 ++ ref/vk/vk_staging.c | 7 ++- 7 files changed, 148 insertions(+), 2 deletions(-) diff --git a/ref/vk/vk_buffer.c b/ref/vk/vk_buffer.c index 6412e70690..d38f8bb39d 100644 --- a/ref/vk/vk_buffer.c +++ b/ref/vk/vk_buffer.c @@ -164,7 +164,7 @@ static r_vk_staging_buffer_t *findOrCreateStagingSlotForBuffer(vk_buffer_t *buf) } vk_buffer_locked_t R_VkBufferLock(vk_buffer_t *buf, vk_buffer_lock_t lock) { - DEBUG("Lock buf=%p size=%d region=%d..%d", buf, lock.size, lock.offset, lock.offset + lock.size); + //DEBUG("Lock buf=%p size=%d region=%d..%d", buf, lock.size, lock.offset, lock.offset + lock.size); r_vk_staging_buffer_t *const stb = findOrCreateStagingSlotForBuffer(buf); ASSERT(stb); @@ -219,6 +219,7 @@ void R_VkBufferStagingCommit(vk_buffer_t *buf, struct vk_combuf_s *combuf) { const VkCommandBuffer cmdbuf = combuf->cmdbuf; DEBUG_NV_CHECKPOINTF(cmdbuf, "staging dst_buffer=%p count=%d", buf->buffer, stb->regions.count); + //DEBUG("buffer=%p copy %d regions from staging buffer=%p", buf->buffer, stb->regions.count, stb->staging); vkCmdCopyBuffer(cmdbuf, stb->staging, buf->buffer, stb->regions.count, stb->regions.items); stb->regions.count = 0; diff --git a/ref/vk/vk_combuf.c b/ref/vk/vk_combuf.c index ccb88304f4..e204ad3c46 100644 --- a/ref/vk/vk_combuf.c +++ b/ref/vk/vk_combuf.c @@ -1,9 +1,12 @@ #include "vk_combuf.h" #include "vk_commandpool.h" #include "vk_buffer.h" +#include "vk_logs.h" #include "profiler.h" +#define LOG_MODULE combuf + #define MAX_COMMANDBUFFERS 6 #define MAX_QUERY_COUNT 128 @@ -165,6 +168,109 @@ static const char* myStrdup(const char *src) { #define ACCESS_KNOWN_BITS (ACCESS_WRITE_BITS | ACCESS_READ_BITS) +#define PRINT_FLAG(mask, flag) \ + if ((flag) & (mask)) DEBUG("%s%s", prefix, #flag) +static void printAccessMask(const char *prefix, VkAccessFlags2 access) { + PRINT_FLAG(access, VK_ACCESS_2_INDIRECT_COMMAND_READ_BIT); + PRINT_FLAG(access, VK_ACCESS_2_INDEX_READ_BIT); + PRINT_FLAG(access, VK_ACCESS_2_VERTEX_ATTRIBUTE_READ_BIT); + PRINT_FLAG(access, VK_ACCESS_2_UNIFORM_READ_BIT); + PRINT_FLAG(access, VK_ACCESS_2_INPUT_ATTACHMENT_READ_BIT); + PRINT_FLAG(access, VK_ACCESS_2_SHADER_READ_BIT); + PRINT_FLAG(access, VK_ACCESS_2_SHADER_WRITE_BIT); + PRINT_FLAG(access, VK_ACCESS_2_COLOR_ATTACHMENT_READ_BIT); + PRINT_FLAG(access, VK_ACCESS_2_COLOR_ATTACHMENT_WRITE_BIT); + PRINT_FLAG(access, VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_READ_BIT); + PRINT_FLAG(access, VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT); + PRINT_FLAG(access, VK_ACCESS_2_TRANSFER_READ_BIT); + PRINT_FLAG(access, VK_ACCESS_2_TRANSFER_WRITE_BIT); + PRINT_FLAG(access, VK_ACCESS_2_HOST_READ_BIT); + PRINT_FLAG(access, VK_ACCESS_2_HOST_WRITE_BIT); + PRINT_FLAG(access, VK_ACCESS_2_MEMORY_READ_BIT); + PRINT_FLAG(access, VK_ACCESS_2_MEMORY_WRITE_BIT); + PRINT_FLAG(access, VK_ACCESS_2_SHADER_SAMPLED_READ_BIT); + PRINT_FLAG(access, VK_ACCESS_2_SHADER_STORAGE_READ_BIT); + PRINT_FLAG(access, VK_ACCESS_2_SHADER_STORAGE_WRITE_BIT); + PRINT_FLAG(access, VK_ACCESS_2_VIDEO_DECODE_READ_BIT_KHR); + PRINT_FLAG(access, VK_ACCESS_2_VIDEO_DECODE_WRITE_BIT_KHR); + PRINT_FLAG(access, VK_ACCESS_2_VIDEO_ENCODE_READ_BIT_KHR); + PRINT_FLAG(access, VK_ACCESS_2_VIDEO_ENCODE_WRITE_BIT_KHR); + PRINT_FLAG(access, VK_ACCESS_2_TRANSFORM_FEEDBACK_WRITE_BIT_EXT); + PRINT_FLAG(access, VK_ACCESS_2_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT); + PRINT_FLAG(access, VK_ACCESS_2_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT); + PRINT_FLAG(access, VK_ACCESS_2_CONDITIONAL_RENDERING_READ_BIT_EXT); + PRINT_FLAG(access, VK_ACCESS_2_COMMAND_PREPROCESS_READ_BIT_NV); + PRINT_FLAG(access, VK_ACCESS_2_COMMAND_PREPROCESS_WRITE_BIT_NV); + PRINT_FLAG(access, VK_ACCESS_2_COMMAND_PREPROCESS_READ_BIT_EXT); + PRINT_FLAG(access, VK_ACCESS_2_COMMAND_PREPROCESS_WRITE_BIT_EXT); + PRINT_FLAG(access, VK_ACCESS_2_FRAGMENT_SHADING_RATE_ATTACHMENT_READ_BIT_KHR); + PRINT_FLAG(access, VK_ACCESS_2_SHADING_RATE_IMAGE_READ_BIT_NV); + PRINT_FLAG(access, VK_ACCESS_2_ACCELERATION_STRUCTURE_READ_BIT_KHR); + PRINT_FLAG(access, VK_ACCESS_2_ACCELERATION_STRUCTURE_WRITE_BIT_KHR); + PRINT_FLAG(access, VK_ACCESS_2_ACCELERATION_STRUCTURE_READ_BIT_NV); + PRINT_FLAG(access, VK_ACCESS_2_ACCELERATION_STRUCTURE_WRITE_BIT_NV); + PRINT_FLAG(access, VK_ACCESS_2_FRAGMENT_DENSITY_MAP_READ_BIT_EXT); + PRINT_FLAG(access, VK_ACCESS_2_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT); + PRINT_FLAG(access, VK_ACCESS_2_DESCRIPTOR_BUFFER_READ_BIT_EXT); + PRINT_FLAG(access, VK_ACCESS_2_INVOCATION_MASK_READ_BIT_HUAWEI); + PRINT_FLAG(access, VK_ACCESS_2_SHADER_BINDING_TABLE_READ_BIT_KHR); + PRINT_FLAG(access, VK_ACCESS_2_MICROMAP_READ_BIT_EXT); + PRINT_FLAG(access, VK_ACCESS_2_MICROMAP_WRITE_BIT_EXT); + PRINT_FLAG(access, VK_ACCESS_2_OPTICAL_FLOW_READ_BIT_NV); + PRINT_FLAG(access, VK_ACCESS_2_OPTICAL_FLOW_WRITE_BIT_NV); +} + +static void printStageMask(const char *prefix, VkPipelineStageFlags2 stages) { + PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT); + PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_DRAW_INDIRECT_BIT); + PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_VERTEX_INPUT_BIT); + PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_VERTEX_SHADER_BIT); + PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_TESSELLATION_CONTROL_SHADER_BIT); + PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_TESSELLATION_EVALUATION_SHADER_BIT); + PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_GEOMETRY_SHADER_BIT); + PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT); + PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_EARLY_FRAGMENT_TESTS_BIT); + PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_LATE_FRAGMENT_TESTS_BIT); + PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT); + PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT); + PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_ALL_TRANSFER_BIT); + PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_TRANSFER_BIT); + PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT); + PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_HOST_BIT); + PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_ALL_GRAPHICS_BIT); + PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT); + PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_COPY_BIT); + PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_RESOLVE_BIT); + PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_BLIT_BIT); + PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_CLEAR_BIT); + PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_INDEX_INPUT_BIT); + PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_VERTEX_ATTRIBUTE_INPUT_BIT); + PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_PRE_RASTERIZATION_SHADERS_BIT); + PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_VIDEO_DECODE_BIT_KHR); + PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_VIDEO_ENCODE_BIT_KHR); + PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_TRANSFORM_FEEDBACK_BIT_EXT); + PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_CONDITIONAL_RENDERING_BIT_EXT); + PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_COMMAND_PREPROCESS_BIT_NV); + PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_COMMAND_PREPROCESS_BIT_EXT); + PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR); + PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_SHADING_RATE_IMAGE_BIT_NV); + PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_BUILD_BIT_KHR); + PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_RAY_TRACING_SHADER_BIT_KHR); + PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_RAY_TRACING_SHADER_BIT_NV); + PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_BUILD_BIT_NV); + PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_FRAGMENT_DENSITY_PROCESS_BIT_EXT); + PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_TASK_SHADER_BIT_NV); + PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_MESH_SHADER_BIT_NV); + PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_TASK_SHADER_BIT_EXT); + PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_MESH_SHADER_BIT_EXT); + PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_SUBPASS_SHADER_BIT_HUAWEI); + PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_INVOCATION_MASK_BIT_HUAWEI); + PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_COPY_BIT_KHR); + PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_MICROMAP_BUILD_BIT_EXT); + PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_CLUSTER_CULLING_SHADER_BIT_HUAWEI); + PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_OPTICAL_FLOW_BIT_NV); +} + void R_VkCombufIssueBarrier(vk_combuf_t* combuf, r_vkcombuf_barrier_t bar) { vk_combuf_impl_t *const cb = (vk_combuf_impl_t*)combuf; ASSERT(bar.images.count == 0 && "TODO"); @@ -242,6 +348,18 @@ void R_VkCombufIssueBarrier(vk_combuf_t* combuf, r_vkcombuf_barrier_t bar) { continue; } + if (LOG_VERBOSE) { + DEBUG(" buf[%d]: buf=%p barrier:", i, buf->buffer); + DEBUG(" srcAccessMask = %x", bmb.srcAccessMask); + printAccessMask(" ", bmb.srcAccessMask); + DEBUG(" srcStageMask = %x", bmb.srcStageMask); + printStageMask(" ", bmb.srcStageMask); + DEBUG(" dstAccessMask = %x", bmb.dstAccessMask); + printAccessMask(" ", bmb.dstAccessMask); + DEBUG(" dstStageMask = %x", bmb.dstStageMask); + printStageMask(" ", bmb.dstStageMask); + } + BOUNDED_ARRAY_APPEND_ITEM(buffer_barriers, bmb); } diff --git a/ref/vk/vk_core.c b/ref/vk/vk_core.c index 677ec40b37..8859330765 100644 --- a/ref/vk/vk_core.c +++ b/ref/vk/vk_core.c @@ -741,6 +741,7 @@ static const r_vk_module_t *const modules[] = { qboolean R_VkInit( void ) { // FIXME !!!! handle initialization errors properly: destroy what has already been created + INFO("R_VkInit"); vk_core.validate = !!gEngine.Sys_CheckParm("-vkvalidate"); vk_core.debug = vk_core.validate || !!(gEngine.Sys_CheckParm("-vkdebug") || gEngine.Sys_CheckParm("-gldebug")); @@ -862,6 +863,7 @@ qboolean R_VkInit( void ) R_SpriteInit(); R_BeamInit(); + INFO("R_VkInit done"); return true; } diff --git a/ref/vk/vk_framectl.c b/ref/vk/vk_framectl.c index 5ab6d66ca8..8f2f02304b 100644 --- a/ref/vk/vk_framectl.c +++ b/ref/vk/vk_framectl.c @@ -10,6 +10,7 @@ #include "vk_staging.h" #include "vk_commandpool.h" #include "vk_combuf.h" +#include "vk_logs.h" #include "vk_buffer.h" #include "vk_geometry.h" @@ -22,6 +23,8 @@ #include +#define LOG_MODULE fctl + extern ref_globals_t *gpGlobals; vk_framectl_t vk_frame = {0}; @@ -404,6 +407,18 @@ static void submit( vk_combuf_t* combuf, qboolean wait, qboolean draw ) { BOUNDED_ARRAY_APPEND_ITEM(waitophores, prev_frame->sem_done2); BOUNDED_ARRAY_APPEND_ITEM(signalphores, frame->sem_done2); + DEBUG("submit: frame=%d, staging_tag=%u, combuf=%p, wait for semaphores[%d]={%p, %p}, signal semaphores[%d]={%p, %p}\n", + g_frame.current.index, + frame->staging_generation_tag, + frame->combuf->cmdbuf, + waitophores.count, + waitophores.items[0], + waitophores.items[1], + signalphores.count, + signalphores.items[0], + signalphores.items[1] + ); + const VkSubmitInfo subinfo = { .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO, .pNext = NULL, diff --git a/ref/vk/vk_logs.h b/ref/vk/vk_logs.h index 23c5e2edd9..91e671ed7d 100644 --- a/ref/vk/vk_logs.h +++ b/ref/vk/vk_logs.h @@ -18,6 +18,8 @@ X(img) \ X(staging) \ X(buf) \ + X(fctl) \ + X(combuf) \ enum { #define X(m) LogModule_##m, diff --git a/ref/vk/vk_rmain.c b/ref/vk/vk_rmain.c index 507c1de576..2ae89423ba 100644 --- a/ref/vk/vk_rmain.c +++ b/ref/vk/vk_rmain.c @@ -693,5 +693,8 @@ int EXPORT GetRefAPI( int version, ref_interface_t *funcs, ref_api_t *engfuncs, memcpy( &gEngine, engfuncs, sizeof( ref_api_t )); gpGlobals = globals; + INFO("GetRefAPI version=%d (REF_API_VERSION=%d) funcs=%p engfuncs=%p globals=%p", + version, REF_API_VERSION, funcs, engfuncs, globals); + return REF_API_VERSION; } diff --git a/ref/vk/vk_staging.c b/ref/vk/vk_staging.c index f9f06afa80..e2b54add0a 100644 --- a/ref/vk/vk_staging.c +++ b/ref/vk/vk_staging.c @@ -68,11 +68,16 @@ static uint32_t allocateInRing(uint32_t size, uint32_t alignment) { } void R_VkStagingGenerationRelease(uint32_t gen) { - ASSERT(gen == (g_staging.current_generation - 1)); + DEBUG("Release: gen=%u current_gen=%u ring offsets=[%u, %u, %u]", gen, g_staging.current_generation, + g_staging.buffer_alloc.frame_offsets[0], + g_staging.buffer_alloc.frame_offsets[1], + g_staging.buffer_alloc.ring.head + ); R_FlippingBuffer_Flip(&g_staging.buffer_alloc); } uint32_t R_VkStagingGenerationCommit(void) { + DEBUG("Commit: locked_count=%d gen=%u", g_staging.locked_count, g_staging.current_generation); ASSERT(g_staging.locked_count == 0); g_staging.stats.total_size = g_staging.stats.images_size + g_staging.stats.buffers_size; return g_staging.current_generation++; From cede8ddeeea73a47890a1da1983b47e39df5b2b8 Mon Sep 17 00:00:00 2001 From: Ivan Avdeev Date: Mon, 9 Dec 2024 18:25:26 -0500 Subject: [PATCH 19/62] vk: fix serious submit synchronization issue Validation was complaining about odd SYNC-HAZARD-WRITE-AFTER-READ lack of buffer barrier at the very beginning of a frame, while I thought that command buffers are properly serialized by semaphores. Turns out, `VkSubmitInfo::pWaitDstStageMask` should accompany each wait semaphore with its corresponding stage. Properly setting TOP_OF_PIPE for the wait semaphore of a previous submission fixes the complaint. --- ref/vk/vk_framectl.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/ref/vk/vk_framectl.c b/ref/vk/vk_framectl.c index 8f2f02304b..f9f3bdf260 100644 --- a/ref/vk/vk_framectl.c +++ b/ref/vk/vk_framectl.c @@ -390,21 +390,21 @@ static void submit( vk_combuf_t* combuf, qboolean wait, qboolean draw ) { BOUNDED_ARRAY_APPEND_ITEM(cmdbufs, cmdbuf); { - const VkPipelineStageFlags stageflags[] = { - VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, - VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, - }; // TODO for RT renderer we only touch framebuffer at the very end of rendering/cmdbuf. // Can we postpone waiting for framebuffer semaphore until we actually need it. BOUNDED_ARRAY(VkSemaphore, waitophores, 2); + BOUNDED_ARRAY(VkPipelineStageFlags, wait_stageflags, 2); BOUNDED_ARRAY(VkSemaphore, signalphores, 2); if (draw) { BOUNDED_ARRAY_APPEND_ITEM(waitophores, frame->sem_framebuffer_ready); + BOUNDED_ARRAY_APPEND_ITEM(wait_stageflags, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT); + BOUNDED_ARRAY_APPEND_ITEM(signalphores, frame->sem_done); } BOUNDED_ARRAY_APPEND_ITEM(waitophores, prev_frame->sem_done2); + BOUNDED_ARRAY_APPEND_ITEM(wait_stageflags, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT); BOUNDED_ARRAY_APPEND_ITEM(signalphores, frame->sem_done2); DEBUG("submit: frame=%d, staging_tag=%u, combuf=%p, wait for semaphores[%d]={%p, %p}, signal semaphores[%d]={%p, %p}\n", @@ -419,18 +419,19 @@ static void submit( vk_combuf_t* combuf, qboolean wait, qboolean draw ) { signalphores.items[1] ); + ASSERT(waitophores.count == wait_stageflags.count); + const VkSubmitInfo subinfo = { .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO, .pNext = NULL, .waitSemaphoreCount = waitophores.count, .pWaitSemaphores = waitophores.items, - .pWaitDstStageMask = stageflags, + .pWaitDstStageMask = wait_stageflags.items, .commandBufferCount = cmdbufs.count, .pCommandBuffers = cmdbufs.items, .signalSemaphoreCount = signalphores.count, .pSignalSemaphores = signalphores.items, }; - //gEngine.Con_Printf("SYNC: wait for semaphore %d, signal semaphore %d\n", (g_frame.current.index + 1) % MAX_CONCURRENT_FRAMES, g_frame.current.index); XVK_CHECK(vkQueueSubmit(vk_core.queue, 1, &subinfo, frame->fence_done)); g_frame.current.phase = Phase_Submitted; } @@ -505,7 +506,6 @@ qboolean VK_FrameCtlInit( void ) // Signal first frame semaphore as done { - const VkPipelineStageFlags stageflags = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT; const VkSubmitInfo subinfo = { .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO, .pNext = NULL, @@ -513,7 +513,7 @@ qboolean VK_FrameCtlInit( void ) .pCommandBuffers = NULL, .waitSemaphoreCount = 0, .pWaitSemaphores = NULL, - .pWaitDstStageMask = &stageflags, + .pWaitDstStageMask = NULL, .signalSemaphoreCount = 1, .pSignalSemaphores = &g_frame.frames[0].sem_done2, }; From 2f6755cdfa6f76cfe1f9efa53ba192f0e5e94e7f Mon Sep 17 00:00:00 2001 From: Ivan 'provod' Avdeev Date: Mon, 9 Dec 2024 21:56:42 -0500 Subject: [PATCH 20/62] vk: remove duplicate stage flags from logs --- ref/vk/vk_combuf.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/ref/vk/vk_combuf.c b/ref/vk/vk_combuf.c index e204ad3c46..6124a6fa15 100644 --- a/ref/vk/vk_combuf.c +++ b/ref/vk/vk_combuf.c @@ -256,11 +256,7 @@ static void printStageMask(const char *prefix, VkPipelineStageFlags2 stages) { PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_SHADING_RATE_IMAGE_BIT_NV); PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_BUILD_BIT_KHR); PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_RAY_TRACING_SHADER_BIT_KHR); - PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_RAY_TRACING_SHADER_BIT_NV); - PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_BUILD_BIT_NV); PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_FRAGMENT_DENSITY_PROCESS_BIT_EXT); - PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_TASK_SHADER_BIT_NV); - PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_MESH_SHADER_BIT_NV); PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_TASK_SHADER_BIT_EXT); PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_MESH_SHADER_BIT_EXT); PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_SUBPASS_SHADER_BIT_HUAWEI); From 44da344fb1cb9dc5a6db2769171534dbf950a3fd Mon Sep 17 00:00:00 2001 From: Ivan 'provod' Avdeev Date: Mon, 9 Dec 2024 21:58:22 -0500 Subject: [PATCH 21/62] vk: issue index+vertex geom buf barrier for trad rendering only --- ref/vk/vk_framectl.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/ref/vk/vk_framectl.c b/ref/vk/vk_framectl.c index f9f3bdf260..6db73ca7a5 100644 --- a/ref/vk/vk_framectl.c +++ b/ref/vk/vk_framectl.c @@ -313,10 +313,9 @@ static void enqueueRendering( vk_combuf_t* combuf, qboolean draw ) { const VkCommandBuffer cmdbuf = combuf->cmdbuf; - if (vk_frame.rtx_enabled) + if (vk_frame.rtx_enabled) { VK_RenderEndRTX( combuf, g_frame.current.framebuffer.view, g_frame.current.framebuffer.image, g_frame.current.framebuffer.width, g_frame.current.framebuffer.height ); - - if (draw) { + } else { // FIXME: how to do this properly before render pass? // Needed to avoid VUID-vkCmdCopyBuffer-renderpass vk_buffer_t* const geom = R_GeometryBuffer_Get(); @@ -331,7 +330,9 @@ static void enqueueRendering( vk_combuf_t* combuf, qboolean draw ) { }, }, }); + } + if (draw) { const VkRenderPassBeginInfo rpbi = { .sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, .renderPass = vk_frame.rtx_enabled ? vk_frame.render_pass.after_ray_tracing : vk_frame.render_pass.raster, From 5d5f70de1ae7fb854f959035784fa822167d7d63 Mon Sep 17 00:00:00 2001 From: Ivan 'provod' Avdeev Date: Mon, 9 Dec 2024 22:16:31 -0500 Subject: [PATCH 22/62] vk: precalculate gamma table early at startup This is likely no the right way to do this. Address this when focusing on correct gamma overall for traditional renderer. Currently this is here just to make it have some non-zero values early. --- engine/platform/sdl/vid_sdl.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/engine/platform/sdl/vid_sdl.c b/engine/platform/sdl/vid_sdl.c index a27eceaec6..61326f735a 100644 --- a/engine/platform/sdl/vid_sdl.c +++ b/engine/platform/sdl/vid_sdl.c @@ -861,7 +861,11 @@ qboolean VID_CreateWindow( int width, int height, window_mode_t window_mode ) if( !GL_UpdateContext( )) return false; - + } + else if( glw_state.context_type == REF_VULKAN ) + { + // FIXME this is probably not correct place or way to do it, just copypasting GL stuff + VID_StartupGamma(); } #else // SDL_VERSION_ATLEAST( 2, 0, 0 ) From 0ce6da2b4529fec773180c4757c1ea2e0efc985d Mon Sep 17 00:00:00 2001 From: Ivan 'provod' Avdeev Date: Mon, 9 Dec 2024 22:19:57 -0500 Subject: [PATCH 23/62] ci: update vulkan sdk version for --- .github/workflows/c-cpp.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/c-cpp.yml b/.github/workflows/c-cpp.yml index 4728662711..ef135cc9ac 100644 --- a/.github/workflows/c-cpp.yml +++ b/.github/workflows/c-cpp.yml @@ -1,5 +1,5 @@ name: Build & Deploy Engine -on: +on: push: paths-ignore: - '**.md' @@ -57,7 +57,7 @@ jobs: targetarch: i386 env: SDL_VERSION: 2.26.2 - VULKAN_SDK_VERSION: 1.3.239 + VULKAN_SDK_VERSION: 1.3.296 GH_CPU_ARCH: ${{ matrix.targetarch }} ANDROID_SDK_TOOLS_VER: 4333796 steps: From 51f120b3384339439c1b6caffc8745dcdaddba60 Mon Sep 17 00:00:00 2001 From: Ivan 'provod' Avdeev Date: Mon, 9 Dec 2024 22:38:07 -0500 Subject: [PATCH 24/62] ci: update ubuntu to 22.04 Previous 20.04 doesn't have the latest Vulkan SDK. Also print deps script commands verbosely for easier CI debugging. --- .github/workflows/c-cpp.yml | 4 ++-- scripts/gha/deps_linux.sh | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/c-cpp.yml b/.github/workflows/c-cpp.yml index ef135cc9ac..1b397fc94a 100644 --- a/.github/workflows/c-cpp.yml +++ b/.github/workflows/c-cpp.yml @@ -21,10 +21,10 @@ jobs: fail-fast: false matrix: include: - - os: ubuntu-20.04 + - os: ubuntu-22.04 targetos: linux targetarch: amd64 - - os: ubuntu-20.04 + - os: ubuntu-22.04 targetos: linux targetarch: i386 # TODO enable and test ref_vk for it too diff --git a/scripts/gha/deps_linux.sh b/scripts/gha/deps_linux.sh index f0a05a2b82..fc8dc127ac 100755 --- a/scripts/gha/deps_linux.sh +++ b/scripts/gha/deps_linux.sh @@ -1,4 +1,5 @@ #!/bin/bash +set -x cd $GITHUB_WORKSPACE From 80f4069fd80414c22c188805ced82848b92f6718 Mon Sep 17 00:00:00 2001 From: Ivan 'provod' Avdeev Date: Mon, 9 Dec 2024 22:50:05 -0500 Subject: [PATCH 25/62] ci: don't forget to update ubuntu name you dummy --- scripts/gha/deps_linux.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/gha/deps_linux.sh b/scripts/gha/deps_linux.sh index fc8dc127ac..29cb9e1a96 100755 --- a/scripts/gha/deps_linux.sh +++ b/scripts/gha/deps_linux.sh @@ -31,7 +31,7 @@ mv SDL2-$SDL_VERSION SDL2_src # ref_vk requires Vulkan SDK wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | sudo apt-key add - -sudo wget -qO /etc/apt/sources.list.d/lunarg-vulkan-${VULKAN_SDK_VERSION}-focal.list https://packages.lunarg.com/vulkan/${VULKAN_SDK_VERSION}/lunarg-vulkan-${VULKAN_SDK_VERSION}-focal.list +sudo wget -qO /etc/apt/sources.list.d/lunarg-vulkan-${VULKAN_SDK_VERSION}-jammy.list https://packages.lunarg.com/vulkan/${VULKAN_SDK_VERSION}/lunarg-vulkan-${VULKAN_SDK_VERSION}-jammy.list sudo apt update [ "$ARCH" = "i386" ] && SUFFIX=":i386" || SUFFIX="" sudo apt install -y vulkan-sdk"$SUFFIX" From a9cf15bcfbeb6a8afde6b89cf0ee9be22d59a70e Mon Sep 17 00:00:00 2001 From: Ivan Avdeev Date: Mon, 9 Dec 2024 23:10:11 -0500 Subject: [PATCH 26/62] vk: fixup printf types to fix linux build --- ref/vk/vk_combuf.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ref/vk/vk_combuf.c b/ref/vk/vk_combuf.c index 6124a6fa15..e2743e6fc6 100644 --- a/ref/vk/vk_combuf.c +++ b/ref/vk/vk_combuf.c @@ -346,13 +346,13 @@ void R_VkCombufIssueBarrier(vk_combuf_t* combuf, r_vkcombuf_barrier_t bar) { if (LOG_VERBOSE) { DEBUG(" buf[%d]: buf=%p barrier:", i, buf->buffer); - DEBUG(" srcAccessMask = %x", bmb.srcAccessMask); + DEBUG(" srcAccessMask = %llx", (unsigned long long)bmb.srcAccessMask); printAccessMask(" ", bmb.srcAccessMask); - DEBUG(" srcStageMask = %x", bmb.srcStageMask); + DEBUG(" srcStageMask = %llx", (unsigned long long)bmb.srcStageMask); printStageMask(" ", bmb.srcStageMask); - DEBUG(" dstAccessMask = %x", bmb.dstAccessMask); + DEBUG(" dstAccessMask = %llx", (unsigned long long)bmb.dstAccessMask); printAccessMask(" ", bmb.dstAccessMask); - DEBUG(" dstStageMask = %x", bmb.dstStageMask); + DEBUG(" dstStageMask = %llx", (unsigned long long)bmb.dstStageMask); printStageMask(" ", bmb.dstStageMask); } From 116214c2c9bfc682b4c85797358294439c06ba5e Mon Sep 17 00:00:00 2001 From: Ivan Avdeev Date: Mon, 9 Dec 2024 23:16:08 -0500 Subject: [PATCH 27/62] vk: fixup printf for 32-bit linux --- ref/vk/vk_framectl.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/ref/vk/vk_framectl.c b/ref/vk/vk_framectl.c index 6db73ca7a5..b5418780eb 100644 --- a/ref/vk/vk_framectl.c +++ b/ref/vk/vk_framectl.c @@ -408,16 +408,16 @@ static void submit( vk_combuf_t* combuf, qboolean wait, qboolean draw ) { BOUNDED_ARRAY_APPEND_ITEM(wait_stageflags, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT); BOUNDED_ARRAY_APPEND_ITEM(signalphores, frame->sem_done2); - DEBUG("submit: frame=%d, staging_tag=%u, combuf=%p, wait for semaphores[%d]={%p, %p}, signal semaphores[%d]={%p, %p}\n", + DEBUG("submit: frame=%d, staging_tag=%u, combuf=%p, wait for semaphores[%d]={%llx, %llx}, signal semaphores[%d]={%llx, %llx}\n", g_frame.current.index, frame->staging_generation_tag, frame->combuf->cmdbuf, waitophores.count, - waitophores.items[0], - waitophores.items[1], + (unsigned long long)waitophores.items[0], + (unsigned long long)waitophores.items[1], signalphores.count, - signalphores.items[0], - signalphores.items[1] + (unsigned long long)signalphores.items[0], + (unsigned long long)signalphores.items[1] ); ASSERT(waitophores.count == wait_stageflags.count); From 8afa9e71328663f290b58ad4c93189e0f1ff06a4 Mon Sep 17 00:00:00 2001 From: Ivan Avdeev Date: Tue, 10 Dec 2024 09:23:56 -0500 Subject: [PATCH 28/62] vk: yet another 32 bit linux fixup --- ref/vk/vk_combuf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ref/vk/vk_combuf.c b/ref/vk/vk_combuf.c index e2743e6fc6..f4e8ca5052 100644 --- a/ref/vk/vk_combuf.c +++ b/ref/vk/vk_combuf.c @@ -345,7 +345,7 @@ void R_VkCombufIssueBarrier(vk_combuf_t* combuf, r_vkcombuf_barrier_t bar) { } if (LOG_VERBOSE) { - DEBUG(" buf[%d]: buf=%p barrier:", i, buf->buffer); + DEBUG(" buf[%d]: buf=%llx barrier:", i, (unsigned long long)buf->buffer); DEBUG(" srcAccessMask = %llx", (unsigned long long)bmb.srcAccessMask); printAccessMask(" ", bmb.srcAccessMask); DEBUG(" srcStageMask = %llx", (unsigned long long)bmb.srcStageMask); From 485e9866bed524b160edd68a917986b36e0ad214 Mon Sep 17 00:00:00 2001 From: Ivan 'provod' Avdeev Date: Tue, 10 Dec 2024 12:19:39 -0500 Subject: [PATCH 29/62] vk: rt: upload more staged buffers Manually for now, just to check that it works somewhat. Also synchronize command buffer submissions with semaphores with specific stages. Cannot enter transfer stage before previous frame is done because of staging. --- ref/vk/vk_framectl.c | 6 +++--- ref/vk/vk_light.c | 6 ++++-- ref/vk/vk_light.h | 6 ++++-- ref/vk/vk_resources.c | 4 ++-- ref/vk/vk_rtx.c | 8 ++++++-- 5 files changed, 19 insertions(+), 11 deletions(-) diff --git a/ref/vk/vk_framectl.c b/ref/vk/vk_framectl.c index b5418780eb..c1492482c4 100644 --- a/ref/vk/vk_framectl.c +++ b/ref/vk/vk_framectl.c @@ -391,8 +391,6 @@ static void submit( vk_combuf_t* combuf, qboolean wait, qboolean draw ) { BOUNDED_ARRAY_APPEND_ITEM(cmdbufs, cmdbuf); { - // TODO for RT renderer we only touch framebuffer at the very end of rendering/cmdbuf. - // Can we postpone waiting for framebuffer semaphore until we actually need it. BOUNDED_ARRAY(VkSemaphore, waitophores, 2); BOUNDED_ARRAY(VkPipelineStageFlags, wait_stageflags, 2); BOUNDED_ARRAY(VkSemaphore, signalphores, 2); @@ -405,7 +403,9 @@ static void submit( vk_combuf_t* combuf, qboolean wait, qboolean draw ) { } BOUNDED_ARRAY_APPEND_ITEM(waitophores, prev_frame->sem_done2); - BOUNDED_ARRAY_APPEND_ITEM(wait_stageflags, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT); + // TODO remove this second semaphore altogether, replace it with properly tracked barriers. + // Why: would allow more parallelizm between consecutive frames. + BOUNDED_ARRAY_APPEND_ITEM(wait_stageflags, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT | VK_PIPELINE_STAGE_2_ALL_TRANSFER_BIT); BOUNDED_ARRAY_APPEND_ITEM(signalphores, frame->sem_done2); DEBUG("submit: frame=%d, staging_tag=%u, combuf=%p, wait for semaphores[%d]={%llx, %llx}, signal semaphores[%d]={%llx, %llx}\n", diff --git a/ref/vk/vk_light.c b/ref/vk/vk_light.c index 56eaf0508f..21b0e839ef 100644 --- a/ref/vk/vk_light.c +++ b/ref/vk/vk_light.c @@ -1295,7 +1295,7 @@ static void uploadPointLights( struct LightsMetadata *metadata ) { } } -vk_lights_bindings_t VK_LightsUpload( void ) { +vk_lights_bindings_t VK_LightsUpload( struct vk_combuf_s *combuf ) { APROF_SCOPE_DECLARE_BEGIN(upload, __FUNCTION__); const vk_buffer_locked_t locked = R_VkBufferLock(&g_lights_.buffer, (vk_buffer_lock_t) { @@ -1322,8 +1322,10 @@ vk_lights_bindings_t VK_LightsUpload( void ) { APROF_SCOPE_END(upload); + R_VkBufferStagingCommit(&g_lights_.buffer, combuf); + return (vk_lights_bindings_t){ - .buffer = g_lights_.buffer.buffer, + .buffer = &g_lights_.buffer, .metadata = { .offset = 0, .size = sizeof(struct LightsMetadata), diff --git a/ref/vk/vk_light.h b/ref/vk/vk_light.h index e800075c67..d127b312be 100644 --- a/ref/vk/vk_light.h +++ b/ref/vk/vk_light.h @@ -1,5 +1,6 @@ #pragma once +#include "vk_buffer.h" #include "vk_const.h" #include "vk_core.h" @@ -81,12 +82,13 @@ void RT_LightsFrameBegin( void ); void RT_LightsFrameEnd( void ); typedef struct { - VkBuffer buffer; + vk_buffer_t *buffer; struct { uint32_t offset, size; } metadata, grid; } vk_lights_bindings_t; -vk_lights_bindings_t VK_LightsUpload( void ); +struct vk_combuf_s; +vk_lights_bindings_t VK_LightsUpload( struct vk_combuf_s* ); qboolean RT_GetEmissiveForTexture( vec3_t out, int texture_id ); diff --git a/ref/vk/vk_resources.c b/ref/vk/vk_resources.c index 9fad231396..8941e5a086 100644 --- a/ref/vk/vk_resources.c +++ b/ref/vk/vk_resources.c @@ -124,8 +124,8 @@ void R_VkResourcesSetBuiltinFIXME(r_vk_resources_builtin_fixme_t args) { RES_SET_SBUFFER_FULL(vertices, args.geometry_data); // TODO move this to lights - RES_SET_BUFFER(lights, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, args.light_bindings->buffer, args.light_bindings->metadata.offset, args.light_bindings->metadata.size); - RES_SET_BUFFER(light_grid, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, args.light_bindings->buffer, args.light_bindings->grid.offset, args.light_bindings->grid.size); + RES_SET_BUFFER(lights, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, args.light_bindings->buffer->buffer, args.light_bindings->metadata.offset, args.light_bindings->metadata.size); + RES_SET_BUFFER(light_grid, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, args.light_bindings->buffer->buffer, args.light_bindings->grid.offset, args.light_bindings->grid.size); #undef RES_SET_SBUFFER_FULL #undef RES_SET_BUFFER } diff --git a/ref/vk/vk_rtx.c b/ref/vk/vk_rtx.c index 5b80084344..9db005ec1d 100644 --- a/ref/vk/vk_rtx.c +++ b/ref/vk/vk_rtx.c @@ -225,6 +225,10 @@ static void performTracing( vk_combuf_t *combuf, const perform_tracing_args_t* a .light_bindings = args->light_bindings, }); + // FIXME what's the right place for these + R_VkBufferStagingCommit(&g_ray_model_state.kusochki_buffer, combuf); + R_VkBufferStagingCommit(&g_ray_model_state.model_headers_buffer, combuf); + // Upload kusochki updates { const VkBufferMemoryBarrier bmb[] = { { @@ -272,7 +276,7 @@ static void performTracing( vk_combuf_t *combuf, const perform_tracing_args_t* a .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT, .dstAccessMask = VK_ACCESS_SHADER_READ_BIT, - .buffer = args->light_bindings->buffer, + .buffer = args->light_bindings->buffer->buffer, .offset = 0, .size = VK_WHOLE_SIZE, }}; @@ -507,7 +511,7 @@ void VK_RayFrameEnd(const vk_ray_frame_render_args_t* args) // FIXME pass these matrices explicitly to let RTX module handle ubo itself RT_LightsFrameEnd(); - const vk_lights_bindings_t light_bindings = VK_LightsUpload(); + const vk_lights_bindings_t light_bindings = VK_LightsUpload(args->combuf); g_rtx.frame_number++; From 1354bb1a8161b5bf949f56eb0b8aeb7afcba6695 Mon Sep 17 00:00:00 2001 From: Ivan 'provod' Avdeev Date: Tue, 10 Dec 2024 12:29:16 -0500 Subject: [PATCH 30/62] vk: minor metadata and things update Update TODO with latest state. Fixup staging write-after-write buffer tracking. Fixes #743 -- was fixed a couple of commits ago. --- ref/vk/TODO.md | 37 ++++++++++++++++++++++++++----------- ref/vk/vk_buffer.c | 1 - ref/vk/vk_combuf.c | 5 +++-- ref/vk/vk_core.c | 9 --------- ref/vk/vk_scene.c | 1 - 5 files changed, 29 insertions(+), 24 deletions(-) diff --git a/ref/vk/TODO.md b/ref/vk/TODO.md index 74fc3e7458..9bd7408de5 100644 --- a/ref/vk/TODO.md +++ b/ref/vk/TODO.md @@ -1,24 +1,39 @@ ## Next +- [ ] Proper staging-vs-frame tracking, replace tag with something sensitive + - currently assert fails because there's 2 frame latency, not one. + - [ ] comment for future: full staging might want to wait for previous frame to finish + +## Upcoming +- [ ] framectl frame tracking, e.g.: + - [ ] wait for frame fence only really before actually starting to build combuf in R_BeginFrame() + - why: there should be nothing to synchronize with + - why: more straightforward dependency tracking +- [ ] Remove second semaphore from submit, replace it with explicit barriers for e.g. geom buffer + - why: best practice validation complains about too wide ALL_COMMANDS semaphore + - why: explicit barriers are more clear, better perf possible too + - [ ] Do not lose barrier-tracking state between frames - [ ] Render graph - [ ] performance profiling and comparison +## 2024-12-10 E383 +- [x] Add transfer stage to submit semaphore separating command buffer: fixes sync for rt +- [x] Issue staging commit for a bunch of RT buffers (likely not all of them) +- [ ] Go through all staged buffers and make sure that they are committed +- [x] move destination buffer tracking to outside of staging: + - [x] vk_geometry + - [x] vk_light: grid, metadata + - [x] vk_ray_accel: TLAS geometries + - [x] vk_ray_model: kusochki +- [x] staging should not be aware of cmdbuf either + - [x] `R_VkStagingCommit()`: -- removed + - [x] `R_VkStagingGetCommandBuffer()` -- removed + ## 2024-05-24 E379 - [ ] refactor staging: - [ ] move destination image tracking to outside of staging - [x] vk_image ← vk_texture (E380) - [x] implement generic staging regions (E380) - [ ] implement stricter staging regions tracking - - [ ] move destination buffer tracking to outside of staging: - - [ ] vk_geometry - - [ ] vk_light: grid, metadata - - [ ] vk_ray_accel: TLAS geometries - - [ ] vk_ray_model: kusochki - - [ ] staging should not be aware of cmdbuf either - - [ ] `R_VkStagingCommit()`: - - [ ] vk_image - - [ ] vk_ray_accel - - [ ] `R_VkStagingGetCommandBuffer()` - - [ ] vk_image ## 2024-05-07 E376 - [ ] resource manager diff --git a/ref/vk/vk_buffer.c b/ref/vk/vk_buffer.c index d38f8bb39d..57188a43c3 100644 --- a/ref/vk/vk_buffer.c +++ b/ref/vk/vk_buffer.c @@ -226,4 +226,3 @@ void R_VkBufferStagingCommit(vk_buffer_t *buf, struct vk_combuf_s *combuf) { //FIXME R_VkCombufScopeEnd(combuf, begin_index, VK_PIPELINE_STAGE_TRANSFER_BIT); } - diff --git a/ref/vk/vk_combuf.c b/ref/vk/vk_combuf.c index f4e8ca5052..1180047273 100644 --- a/ref/vk/vk_combuf.c +++ b/ref/vk/vk_combuf.c @@ -313,11 +313,12 @@ void R_VkCombufIssueBarrier(vk_combuf_t* combuf, r_vkcombuf_barrier_t bar) { buf->sync.write.access = bufbar->access; buf->sync.write.stage = bar.stage; - // If there were no previous reads, there no reason to synchronize with anything - if (buf->sync.read.stage == 0) + // If there were no previous reads or writes, there no reason to synchronize with anything + if (bmb.srcStageMask == 0) continue; // Reset read state + // TOOD is_read? for read-and-write buf->sync.read.access = 0; buf->sync.read.stage = 0; } diff --git a/ref/vk/vk_core.c b/ref/vk/vk_core.c index 8859330765..3df2cab699 100644 --- a/ref/vk/vk_core.c +++ b/ref/vk/vk_core.c @@ -126,15 +126,6 @@ static VkBool32 VKAPI_PTR debugCallback( if (Q_strcmp(pCallbackData->pMessageIdName, "VUID-vkMapMemory-memory-00683") == 0) return VK_FALSE; - // FIXME: remove this when new buffer staging is done, see https://github.com/w23/xash3d-fwgs/issues/743 - // For now, ignore a firehose of "inefficient srcStageMask using VK_PIPELINE_STAGE_ALL_COMMANDS_BIT" messages. - if (Q_strcmp(pCallbackData->pMessageIdName, "BestPractices-pipeline-stage-flags-compute") == 0) - return VK_FALSE; - - /* if (messageSeverity != VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT) { */ - /* gEngine.Con_Printf(S_WARN "Validation: %s\n", pCallbackData->pMessage); */ - /* } */ - // TODO better messages, not only errors, what are other arguments for, ... if (messageSeverity == VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT) { gEngine.Con_Printf(S_ERROR "vk/dbg: %s\n", pCallbackData->pMessage); diff --git a/ref/vk/vk_scene.c b/ref/vk/vk_scene.c index c16afdc029..6de06bbaeb 100644 --- a/ref/vk/vk_scene.c +++ b/ref/vk/vk_scene.c @@ -1,6 +1,5 @@ #include "vk_scene.h" #include "vk_brush.h" -#include "vk_staging.h" #include "vk_studio.h" #include "vk_lightmap.h" #include "vk_const.h" From bf53d6dbf543e0d84d783963c4304e2fecd233a1 Mon Sep 17 00:00:00 2001 From: Ivan 'provod' Avdeev Date: Tue, 10 Dec 2024 13:41:35 -0500 Subject: [PATCH 31/62] vk: do not print light stats if there are no brush lights --- ref/vk/vk_brush.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/ref/vk/vk_brush.c b/ref/vk/vk_brush.c index f8830510bb..8b8112f10a 100644 --- a/ref/vk/vk_brush.c +++ b/ref/vk/vk_brush.c @@ -1687,8 +1687,10 @@ static qboolean fillBrushSurfaces(fill_geometries_args_t args) { } // Apply all emissive surfaces found - INFO("Loaded %d polylights, %d dynamic for %s model %s", - emissive_surfaces_count, (int)args.bmodel->dynamic_polylights.count, args.is_static ? "static" : "movable", args.mod->name); + if (emissive_surfaces_count > 0) { + INFO("Loaded %d polylights, %d dynamic for %s model %s", + emissive_surfaces_count, (int)args.bmodel->dynamic_polylights.count, args.is_static ? "static" : "movable", args.mod->name); + } ASSERT(args.sizes.num_surfaces == num_geometries); ASSERT(args.sizes.animated_count == animated_count); From 8406ee1145833013135207f15d41b0968a3a2e7c Mon Sep 17 00:00:00 2001 From: Ivan 'provod' Avdeev Date: Tue, 10 Dec 2024 13:42:56 -0500 Subject: [PATCH 32/62] vk: improve staging debug tracking Move RT buffer commits to better locations, right after they have locked staging. --- ref/vk/TODO.md | 6 +++++- ref/vk/vk_buffer.c | 5 +++++ ref/vk/vk_cvar.c | 1 + ref/vk/vk_image.c | 2 ++ ref/vk/vk_rtx.c | 12 ++++++++---- ref/vk/vk_staging.c | 43 +++++++++++++++++++++++++++---------------- 6 files changed, 48 insertions(+), 21 deletions(-) diff --git a/ref/vk/TODO.md b/ref/vk/TODO.md index 9bd7408de5..7a11bf6abc 100644 --- a/ref/vk/TODO.md +++ b/ref/vk/TODO.md @@ -1,4 +1,6 @@ ## Next +- [ ] Fix glitch geometry + - [ ] Which specific models produce it? Use nsight btw - [ ] Proper staging-vs-frame tracking, replace tag with something sensitive - currently assert fails because there's 2 frame latency, not one. - [ ] comment for future: full staging might want to wait for previous frame to finish @@ -18,7 +20,6 @@ ## 2024-12-10 E383 - [x] Add transfer stage to submit semaphore separating command buffer: fixes sync for rt - [x] Issue staging commit for a bunch of RT buffers (likely not all of them) -- [ ] Go through all staged buffers and make sure that they are committed - [x] move destination buffer tracking to outside of staging: - [x] vk_geometry - [x] vk_light: grid, metadata @@ -27,6 +28,9 @@ - [x] staging should not be aware of cmdbuf either - [x] `R_VkStagingCommit()`: -- removed - [x] `R_VkStagingGetCommandBuffer()` -- removed +- [x] Go through all staged buffers and make sure that they are committed +- [x] Commit staging in right places for right buffers +- [x] Add mode staging debug tracking/logs ## 2024-05-24 E379 - [ ] refactor staging: diff --git a/ref/vk/vk_buffer.c b/ref/vk/vk_buffer.c index 57188a43c3..93b655151a 100644 --- a/ref/vk/vk_buffer.c +++ b/ref/vk/vk_buffer.c @@ -37,6 +37,8 @@ qboolean VK_BufferCreate(const char *debug_name, vk_buffer_t *buf, uint32_t size buf->size = size; + INFO("Created buffer=%llx, name=\"%s\", size=%u", (unsigned long long)buf->buffer, debug_name, size); + return true; } @@ -196,6 +198,7 @@ vk_buffer_locked_t R_VkBufferLock(vk_buffer_t *buf, vk_buffer_lock_t lock) { } void R_VkBufferUnlock(vk_buffer_locked_t lock) { + DEBUG("buf=%llx staging pending++", (unsigned long long)lock.impl_.buf->buffer); R_VkStagingUnlock(lock.impl_.handle); } @@ -222,6 +225,8 @@ void R_VkBufferStagingCommit(vk_buffer_t *buf, struct vk_combuf_s *combuf) { //DEBUG("buffer=%p copy %d regions from staging buffer=%p", buf->buffer, stb->regions.count, stb->staging); vkCmdCopyBuffer(cmdbuf, stb->staging, buf->buffer, stb->regions.count, stb->regions.items); + DEBUG("buf=%llx staging pending-=%u", (unsigned long long)buf->buffer, stb->regions.count); + R_VkStagingCopied(stb->regions.count); stb->regions.count = 0; //FIXME R_VkCombufScopeEnd(combuf, begin_index, VK_PIPELINE_STAGE_TRANSFER_BIT); diff --git a/ref/vk/vk_cvar.c b/ref/vk/vk_cvar.c index 64e4e0c9b4..d04e39b647 100644 --- a/ref/vk/vk_cvar.c +++ b/ref/vk/vk_cvar.c @@ -29,6 +29,7 @@ void VK_LoadCvars( void ) vk_device_target_id = gEngine.Cvar_Get( "vk_device_target_id", "", FCVAR_GLCONFIG, "Selected video device id" ); vk_debug_log = gEngine.Cvar_Get("vk_debug_log_", "", FCVAR_GLCONFIG | FCVAR_READ_ONLY, ""); + R_LogSetVerboseModules( vk_debug_log->string ); gEngine.Cmd_AddCommand("vk_debug_log", setDebugLog, "Set modules to enable debug logs for"); } diff --git a/ref/vk/vk_image.c b/ref/vk/vk_image.c index 2b2779c15b..36baa6b461 100644 --- a/ref/vk/vk_image.c +++ b/ref/vk/vk_image.c @@ -403,6 +403,7 @@ void R_VkImageUploadCommit( struct vk_combuf_s *combuf, VkPipelineStageFlagBits }; R_VkStagingUnlock(up->staging.lock.handle); + R_VkStagingCopied(1); // Mark image as uploaded up->image->upload_slot = -1; @@ -535,6 +536,7 @@ void R_VkImageUploadCancel( r_vk_image_t *img ) { // Technically we won't need that staging region anymore at all, but it doesn't matter, // it's just easier to mark it to be freed this way. R_VkStagingUnlock(up->staging.lock.handle); + R_VkStagingCopied(1); // Mark upload slot as unused, and image as not subjet to uploading up->image = NULL; diff --git a/ref/vk/vk_rtx.c b/ref/vk/vk_rtx.c index 9db005ec1d..f3cdfcc40c 100644 --- a/ref/vk/vk_rtx.c +++ b/ref/vk/vk_rtx.c @@ -225,10 +225,6 @@ static void performTracing( vk_combuf_t *combuf, const perform_tracing_args_t* a .light_bindings = args->light_bindings, }); - // FIXME what's the right place for these - R_VkBufferStagingCommit(&g_ray_model_state.kusochki_buffer, combuf); - R_VkBufferStagingCommit(&g_ray_model_state.model_headers_buffer, combuf); - // Upload kusochki updates { const VkBufferMemoryBarrier bmb[] = { { @@ -265,6 +261,7 @@ static void performTracing( vk_combuf_t *combuf, const perform_tracing_args_t* a { rt_resource_t *const tlas = R_VkResourceGetByIndex(ExternalResource_tlas); tlas->resource = RT_VkAccelPrepareTlas(combuf); + R_VkBufferStagingCommit(&g_ray_model_state.model_headers_buffer, combuf); } prepareUniformBuffer(args->render_args, args->frame_index, args->frame_counter, args->fov_angle_y, args->frame_width, args->frame_height); @@ -548,6 +545,13 @@ void VK_RayFrameEnd(const vk_ray_frame_render_args_t* args) // Feed tlas with dynamic data RT_DynamicModelProcessFrame(); + // FIXME what's the right place for this? + // This needs to happen every frame where we might've locked staging for kusochki + // - After dynamic stuff (might upload kusochki) + // - Before performTracing(), even if it is not called + // See ~3:00:00-3:40:00 of stream E383 about push-vs-pull models and their boundaries. + R_VkBufferStagingCommit(&g_ray_model_state.kusochki_buffer, args->combuf); + ASSERT(args->dst.width <= g_rtx.max_frame_width); ASSERT(args->dst.height <= g_rtx.max_frame_height); diff --git a/ref/vk/vk_staging.c b/ref/vk/vk_staging.c index e2b54add0a..cb47a0fc23 100644 --- a/ref/vk/vk_staging.c +++ b/ref/vk/vk_staging.c @@ -21,6 +21,8 @@ static struct { r_flipping_buffer_t buffer_alloc; uint32_t locked_count; + uint32_t pending_count; + uint32_t current_generation; struct { @@ -67,22 +69,6 @@ static uint32_t allocateInRing(uint32_t size, uint32_t alignment) { return R_FlippingBuffer_Alloc(&g_staging.buffer_alloc, size, alignment ); } -void R_VkStagingGenerationRelease(uint32_t gen) { - DEBUG("Release: gen=%u current_gen=%u ring offsets=[%u, %u, %u]", gen, g_staging.current_generation, - g_staging.buffer_alloc.frame_offsets[0], - g_staging.buffer_alloc.frame_offsets[1], - g_staging.buffer_alloc.ring.head - ); - R_FlippingBuffer_Flip(&g_staging.buffer_alloc); -} - -uint32_t R_VkStagingGenerationCommit(void) { - DEBUG("Commit: locked_count=%d gen=%u", g_staging.locked_count, g_staging.current_generation); - ASSERT(g_staging.locked_count == 0); - g_staging.stats.total_size = g_staging.stats.images_size + g_staging.stats.buffers_size; - return g_staging.current_generation++; -} - r_vkstaging_region_t R_VkStagingLock(uint32_t size) { const uint32_t alignment = 4; const uint32_t offset = R_FlippingBuffer_Alloc(&g_staging.buffer_alloc, size, alignment); @@ -100,8 +86,33 @@ r_vkstaging_region_t R_VkStagingLock(uint32_t size) { } void R_VkStagingUnlock(r_vkstaging_handle_t handle) { + DEBUG("Unlock: locked_count=%u pending_count=%u gen=%u", g_staging.locked_count, g_staging.pending_count, g_staging.current_generation); ASSERT(g_staging.current_generation == handle.generation); ASSERT(g_staging.locked_count > 0); g_staging.locked_count--; + g_staging.pending_count++; +} + +void R_VkStagingCopied(uint32_t count) { + ASSERT(g_staging.pending_count >= count); + g_staging.pending_count -= count; } +void R_VkStagingGenerationRelease(uint32_t gen) { + DEBUG("Release: gen=%u current_gen=%u ring offsets=[%u, %u, %u]", gen, g_staging.current_generation, + g_staging.buffer_alloc.frame_offsets[0], + g_staging.buffer_alloc.frame_offsets[1], + g_staging.buffer_alloc.ring.head + ); + R_FlippingBuffer_Flip(&g_staging.buffer_alloc); +} + +uint32_t R_VkStagingGenerationCommit(void) { + DEBUG("Commit: locked_count=%u pending_count=%u gen=%u", g_staging.locked_count, g_staging.pending_count, g_staging.current_generation); + + ASSERT(g_staging.locked_count == 0); + ASSERT(g_staging.pending_count == 0); + + g_staging.stats.total_size = g_staging.stats.images_size + g_staging.stats.buffers_size; + return g_staging.current_generation++; +} From 87946855c0b41fea6458440ef7e98cbe881d574f Mon Sep 17 00:00:00 2001 From: Ivan 'provod' Avdeev Date: Tue, 10 Dec 2024 13:53:34 -0500 Subject: [PATCH 33/62] wasuremono! --- ref/vk/vk_ray_accel.c | 2 +- ref/vk/vk_staging.h | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/ref/vk/vk_ray_accel.c b/ref/vk/vk_ray_accel.c index a1b5c14323..875727a27b 100644 --- a/ref/vk/vk_ray_accel.c +++ b/ref/vk/vk_ray_accel.c @@ -77,7 +77,7 @@ static struct { struct { // TODO two arrays for a single vkCmdBuildAccelerationStructuresKHR() call // FIXME This is for testing only - BOUNDED_ARRAY_DECLARE(rt_blas_t*, blas, 256); + BOUNDED_ARRAY_DECLARE(rt_blas_t*, blas, 2048); } build; cvar_t *cv_force_culling; diff --git a/ref/vk/vk_staging.h b/ref/vk/vk_staging.h index 331644d603..b16e25d307 100644 --- a/ref/vk/vk_staging.h +++ b/ref/vk/vk_staging.h @@ -24,6 +24,9 @@ r_vkstaging_region_t R_VkStagingLock(uint32_t size); // Mark allocated region as ready for upload void R_VkStagingUnlock(r_vkstaging_handle_t handle); +// Notify staging that this amount of regions were scheduled to be copied +void R_VkStagingCopied(uint32_t count); + // Finalize current generation, return its tag for R_VkStagingGenerationRelease() call uint32_t R_VkStagingGenerationCommit(void); From 2b5a02c321062945596d0f78f04321ff487b6d00 Mon Sep 17 00:00:00 2001 From: Ivan 'provod' Avdeev Date: Tue, 10 Dec 2024 18:55:11 -0500 Subject: [PATCH 34/62] vk: rt: fix glitched dynamic geometry by actually tracking counts properly We were always trying to build the same amount of geometries, although this count is dynamic. --- ref/vk/vk_ray_accel.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/ref/vk/vk_ray_accel.c b/ref/vk/vk_ray_accel.c index 875727a27b..98b6b154cd 100644 --- a/ref/vk/vk_ray_accel.c +++ b/ref/vk/vk_ray_accel.c @@ -5,7 +5,6 @@ #include "vk_ray_internal.h" #include "r_speeds.h" #include "vk_combuf.h" -#include "vk_staging.h" #include "vk_math.h" #include "vk_geometry.h" #include "vk_render.h" @@ -560,6 +559,11 @@ void RT_VkAccelFrameBegin(void) { } static void blasFillGeometries(rt_blas_t *blas, const vk_render_geometry_t *geoms, int geoms_count) { + // geoms_count is not constant for dynamic models, and it shouldn't exceed max_geoms by design + ASSERT(geoms_count <= blas->max_geoms); + + blas->build.info.geometryCount = geoms_count; + for (int i = 0; i < geoms_count; ++i) { const vk_render_geometry_t *mg = geoms + i; const uint32_t prim_count = mg->element_count / 3; @@ -597,6 +601,7 @@ struct rt_blas_s* RT_BlasCreate(rt_blas_create_t args) { blas->debug_name = args.name; blas->usage = args.usage; + blas->max_geoms = args.geoms_count; blas->build.info = (VkAccelerationStructureBuildGeometryInfoKHR){ .sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_GEOMETRY_INFO_KHR, From a4eb783fb266f4d98e1a246225638ad9940e5f12 Mon Sep 17 00:00:00 2001 From: Ivan 'provod' Avdeev Date: Tue, 10 Dec 2024 19:04:30 -0500 Subject: [PATCH 35/62] vk: update todo --- ref/vk/TODO.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/ref/vk/TODO.md b/ref/vk/TODO.md index 7a11bf6abc..c6b1eb4f5c 100644 --- a/ref/vk/TODO.md +++ b/ref/vk/TODO.md @@ -1,6 +1,4 @@ ## Next -- [ ] Fix glitch geometry - - [ ] Which specific models produce it? Use nsight btw - [ ] Proper staging-vs-frame tracking, replace tag with something sensitive - currently assert fails because there's 2 frame latency, not one. - [ ] comment for future: full staging might want to wait for previous frame to finish @@ -32,6 +30,10 @@ - [x] Commit staging in right places for right buffers - [x] Add mode staging debug tracking/logs +### After stream +- [x] Fix glitch geometry + - [x] Which specific models produce it? Use nsight btw + ## 2024-05-24 E379 - [ ] refactor staging: - [ ] move destination image tracking to outside of staging From 637b4d26d6a88f042215629066b59390b673ddfe Mon Sep 17 00:00:00 2001 From: Ivan 'provod' Avdeev Date: Wed, 11 Dec 2024 19:22:23 -0500 Subject: [PATCH 36/62] vk: make image upload cancel function private --- ref/vk/vk_image.c | 6 ++++-- ref/vk/vk_image.h | 3 --- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/ref/vk/vk_image.c b/ref/vk/vk_image.c index 36baa6b461..c97ac2e430 100644 --- a/ref/vk/vk_image.c +++ b/ref/vk/vk_image.c @@ -124,11 +124,13 @@ r_vk_image_t R_VkImageCreate(const r_vk_image_create_t *create) { return image; } +static void cancelUpload( r_vk_image_t *img ); + void R_VkImageDestroy(r_vk_image_t *img) { // Need to make sure that there are no references to this image anywhere. // It might have been added to upload queue, but then immediately deleted, leaving references // in the queue. See https://github.com/w23/xash3d-fwgs/issues/464 - R_VkImageUploadCancel(img); + cancelUpload(img); // Image destroy calls are not explicitly synchronized with rendering. GPU might still be // processing previous frame. We need to make sure that GPU is done by the time we start @@ -523,7 +525,7 @@ void R_VkImageUploadEnd( r_vk_image_t *img ) { ASSERT(up->staging.cursor <= img->image_size); } -void R_VkImageUploadCancel( r_vk_image_t *img ) { +static void cancelUpload( r_vk_image_t *img ) { // Skip already uploaded (or never uploaded) images if (img->upload_slot < 0) return; diff --git a/ref/vk/vk_image.h b/ref/vk/vk_image.h index b06e3a2f86..4ea71f9802 100644 --- a/ref/vk/vk_image.h +++ b/ref/vk/vk_image.h @@ -66,9 +66,6 @@ void R_VkImageUploadBegin( r_vk_image_t *img ); void R_VkImageUploadSlice( r_vk_image_t *img, int layer, int mip, int size, const void *data ); void R_VkImageUploadEnd( r_vk_image_t *img ); -// If this image has its upload scheduled, it should be cancelled -void R_VkImageUploadCancel( r_vk_image_t *img ); - // Upload all enqueued images using the given command buffer struct vk_combuf_s; void R_VkImageUploadCommit( struct vk_combuf_s *combuf, VkPipelineStageFlagBits dst_stages ); From e7dcf3d97026e81632934125bd1ddb1587c9adda Mon Sep 17 00:00:00 2001 From: Ivan 'provod' Avdeev Date: Wed, 11 Dec 2024 22:57:27 -0500 Subject: [PATCH 37/62] vk: just a bit of cleaning commented out stuff --- ref/vk/vk_render.c | 58 +--------------------------------------------- 1 file changed, 1 insertion(+), 57 deletions(-) diff --git a/ref/vk/vk_render.c b/ref/vk/vk_render.c index 6b3238ccf2..58ebe2ca18 100644 --- a/ref/vk/vk_render.c +++ b/ref/vk/vk_render.c @@ -58,8 +58,6 @@ enum { kVkPipeline_AT, // no blend, depth RW, alpha test kVkPipeline_1_1_R, // blend: src + dst, depth test - // Special pipeline for skybox (tex = TEX_BASE_SKYBOX) - //kVkPipeline_Sky, kVkPipeline_COUNT, }; @@ -182,12 +180,6 @@ static qboolean createSkyboxPipeline( void ) { static qboolean createPipelines( void ) { - /* VkPushConstantRange push_const = { */ - /* .offset = 0, */ - /* .size = sizeof(AVec3f), */ - /* .stageFlags = VK_SHADER_STAGE_VERTEX_BIT, */ - /* }; */ - VkDescriptorSetLayout descriptor_layouts[] = { vk_desc_fixme.one_uniform_buffer_layout, vk_desc_fixme.one_texture_layout, @@ -199,8 +191,6 @@ static qboolean createPipelines( void ) .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, .setLayoutCount = ARRAYSIZE(descriptor_layouts), .pSetLayouts = descriptor_layouts, - /* .pushConstantRangeCount = 1, */ - /* .pPushConstantRanges = &push_const, */ }; // FIXME store layout separately @@ -642,52 +632,6 @@ static uint32_t writeDlightsToUBO( void ) return ubo_lights_offset; } -/* -static void debugBarrier( VkCommandBuffer cmdbuf, VkBuffer buf) { - const VkBufferMemoryBarrier bmb[] = { { - .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, - .srcAccessMask = VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT, - .dstAccessMask = VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT, - .buffer = buf, - .offset = 0, - .size = VK_WHOLE_SIZE, - } }; - vkCmdPipelineBarrier(cmdbuf, - VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, - VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, - 0, 0, NULL, ARRAYSIZE(bmb), bmb, 0, NULL); -} -*/ - -/* OBSOLETE, remove -void VK_Render_FIXME_Barrier( VkCommandBuffer cmdbuf ) { - const VkBuffer geom = R_GeometryBuffer_Get(); - //debugBarrier(cmdbuf, geom); - // FIXME: this should be automatic and dynamically depend on actual usage, resolving this with render graph - { - const VkBufferMemoryBarrier bmb[] = { { - .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, - .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT, - .dstAccessMask - = VK_ACCESS_INDEX_READ_BIT - | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT - | (vk_core.rtx ? ( VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_KHR | VK_ACCESS_SHADER_READ_BIT) : 0), - .buffer = geom, - .offset = 0, - .size = VK_WHOLE_SIZE, - } }; - vkCmdPipelineBarrier(cmdbuf, - VK_PIPELINE_STAGE_TRANSFER_BIT, - VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | (vk_core.rtx - ? VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR - | VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR - | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT - : 0), - 0, 0, NULL, ARRAYSIZE(bmb), bmb, 0, NULL); - } -} -*/ - void VK_RenderEnd( vk_combuf_t* combuf, qboolean draw, uint32_t width, uint32_t height, int frame_index ) { if (!draw) @@ -873,7 +817,7 @@ void VK_RenderEndRTX( struct vk_combuf_s* combuf, VkImageView img_dst_view, VkIm .view = &g_camera.viewMatrix, .geometry_data = { - .buffer = geom->buffer, + .buffer = geom, .size = VK_WHOLE_SIZE, }, From 96667c751bae9ef100e6245128b1d08261e57f0b Mon Sep 17 00:00:00 2001 From: Ivan 'provod' Avdeev Date: Wed, 11 Dec 2024 23:42:49 -0500 Subject: [PATCH 38/62] vk: rt: deduse barriers automatically for buffers too Allows to remove a small pile of manual barriers yay. --- ref/vk/ray_pass.c | 4 +- ref/vk/vk_ray_accel.c | 34 +------------ ref/vk/vk_resources.c | 113 ++++++++++++++++++++++++++++-------------- ref/vk/vk_resources.h | 27 ++++++---- ref/vk/vk_rtx.c | 47 ++---------------- ref/vk/vk_rtx.h | 2 +- 6 files changed, 101 insertions(+), 126 deletions(-) diff --git a/ref/vk/ray_pass.c b/ref/vk/ray_pass.c index 6b985aa404..71ad4f5c4f 100644 --- a/ref/vk/ray_pass.c +++ b/ref/vk/ray_pass.c @@ -276,7 +276,7 @@ void RayPassPerform(struct ray_pass_s *pass, vk_combuf_t *combuf, ray_pass_perfo if (res->type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) { dst_value->image = (VkDescriptorImageInfo) { - .imageLayout = write ? res->write.image_layout : res->read.image_layout, + .imageLayout = write ? res->deprecate.write.image_layout : res->deprecate.read.image_layout, .imageView = src_value->image_object->view, .sampler = VK_NULL_HANDLE, }; @@ -288,7 +288,7 @@ void RayPassPerform(struct ray_pass_s *pass, vk_combuf_t *combuf, ray_pass_perfo VK_DescriptorsWrite(&pass->desc.riptors, args.frame_set_slot); DEBUG_BEGIN(combuf->cmdbuf, pass->debug_name); - R_VkBarrierCommit(combuf->cmdbuf, &barrier, pass->pipeline_type); + R_VkBarrierCommit(combuf, &barrier, pass->pipeline_type); switch (pass->type) { case RayPassType_Tracing: diff --git a/ref/vk/vk_ray_accel.c b/ref/vk/vk_ray_accel.c index 98b6b154cd..ae123702d1 100644 --- a/ref/vk/vk_ray_accel.c +++ b/ref/vk/vk_ray_accel.c @@ -137,22 +137,6 @@ static qboolean buildAccel(vk_combuf_t* combuf, VkAccelerationStructureBuildGeom }, }); - { - const VkBufferMemoryBarrier bmb[] = { { - .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, - .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT, - .dstAccessMask = VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_KHR | VK_ACCESS_SHADER_READ_BIT, // FIXME - .buffer = geom->buffer, - .offset = 0, // FIXME - .size = VK_WHOLE_SIZE, // FIXME - } }; - vkCmdPipelineBarrier(combuf->cmdbuf, - VK_PIPELINE_STAGE_TRANSFER_BIT, - //VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR, - VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR | VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR, - 0, 0, NULL, COUNTOF(bmb), bmb, 0, NULL); - } - //gEngine.Con_Reportf("sratch offset = %d, req=%d", g_accel.frame.scratch_offset, scratch_buffer_size); if (MAX_SCRATCH_BUFFER < g_accel.frame.scratch_offset + scratch_buffer_size) { @@ -427,6 +411,7 @@ vk_resource_t RT_VkAccelPrepareTlas(vk_combuf_t *combuf) { g_accel.stats.instances_count = g_ray_model_state.frame.instances_count; + // FIXME use combuf barrier // Barrier for building all BLASes // BLAS building is now in cmdbuf, need to synchronize with results { @@ -449,23 +434,6 @@ vk_resource_t RT_VkAccelPrepareTlas(vk_combuf_t *combuf) { createTlas(combuf, g_accel.tlas_geom_buffer_addr + instance_offset * sizeof(VkAccelerationStructureInstanceKHR)); DEBUG_END(combuf->cmdbuf); - // TODO return vk_resource_t with callback to all this "do the preparation and barriers" crap, instead of doing it here - { - const VkBufferMemoryBarrier bmb[] = { { - .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, - .srcAccessMask = VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_KHR, - .dstAccessMask = VK_ACCESS_SHADER_READ_BIT, - // FIXME also incorrect -- here we must barrier on tlas_geom_buffer, not accels_buffer - .buffer = g_accel.accels_buffer.buffer, - .offset = 0, - .size = VK_WHOLE_SIZE, - } }; - vkCmdPipelineBarrier(combuf->cmdbuf, - VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR, - VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, - 0, 0, NULL, COUNTOF(bmb), bmb, 0, NULL); - } - APROF_SCOPE_END(prepare); return (vk_resource_t){ .type = VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR, diff --git a/ref/vk/vk_resources.c b/ref/vk/vk_resources.c index 8941e5a086..f037c63d13 100644 --- a/ref/vk/vk_resources.c +++ b/ref/vk/vk_resources.c @@ -3,6 +3,7 @@ #include "vk_image.h" #include "vk_common.h" #include "vk_logs.h" +#include "vk_combuf.h" #include "arrays.h" #define LOG_MODULE rt @@ -101,9 +102,10 @@ void R_VkResourcesSetBuiltinFIXME(r_vk_resources_builtin_fixme_t args) { #define RES_SET_BUFFER(name, type_, source_, offset_, size_) \ g_res.res[ExternalResource_##name].resource = (vk_resource_t){ \ .type = type_, \ + .ref.buffer = (source_), \ .value = (vk_descriptor_value_t) { \ .buffer = (VkDescriptorBufferInfo) { \ - .buffer = (source_), \ + .buffer = (source_)->buffer, \ .offset = (offset_), \ .range = (size_), \ } \ @@ -113,19 +115,19 @@ void R_VkResourcesSetBuiltinFIXME(r_vk_resources_builtin_fixme_t args) { RES_SET_BUFFER(ubo, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, args.uniform_buffer, args.frame_index * args.uniform_unit_size, sizeof(struct UniformBuffer)); #define RES_SET_SBUFFER_FULL(name, source_) \ - RES_SET_BUFFER(name, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, source_.buffer, 0, source_.size) + RES_SET_BUFFER(name, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, source_, 0, (source_)->size) // TODO move this to ray model producer - RES_SET_SBUFFER_FULL(kusochki, g_ray_model_state.kusochki_buffer); - RES_SET_SBUFFER_FULL(model_headers, g_ray_model_state.model_headers_buffer); + RES_SET_SBUFFER_FULL(kusochki, &g_ray_model_state.kusochki_buffer); + RES_SET_SBUFFER_FULL(model_headers, &g_ray_model_state.model_headers_buffer); // TODO move these to vk_geometry - RES_SET_SBUFFER_FULL(indices, args.geometry_data); - RES_SET_SBUFFER_FULL(vertices, args.geometry_data); + RES_SET_SBUFFER_FULL(indices, args.geometry_data.buffer); + RES_SET_SBUFFER_FULL(vertices, args.geometry_data.buffer); // TODO move this to lights - RES_SET_BUFFER(lights, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, args.light_bindings->buffer->buffer, args.light_bindings->metadata.offset, args.light_bindings->metadata.size); - RES_SET_BUFFER(light_grid, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, args.light_bindings->buffer->buffer, args.light_bindings->grid.offset, args.light_bindings->grid.size); + RES_SET_BUFFER(lights, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, args.light_bindings->buffer, args.light_bindings->metadata.offset, args.light_bindings->metadata.size); + RES_SET_BUFFER(light_grid, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, args.light_bindings->buffer, args.light_bindings->grid.offset, args.light_bindings->grid.size); #undef RES_SET_SBUFFER_FULL #undef RES_SET_BUFFER } @@ -154,13 +156,13 @@ void R_VkResourcesFrameBeginStateChangeFIXME(VkCommandBuffer cmdbuf, qboolean di src->image = tmp_img; // If there was no initial state, prepare it. (this should happen only for the first frame) - if (discontinuity || res->resource.write.pipelines == 0) { + if (discontinuity || res->resource.deprecate.write.pipelines == 0) { // TODO is there a better way? Can image be cleared w/o explicit clear op? DEBUG("discontinuity: %s", res->name); R_VkImageClear( cmdbuf, res->image.image, 0, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT ); - res->resource.write.pipelines = VK_PIPELINE_STAGE_TRANSFER_BIT; - res->resource.write.image_layout = VK_IMAGE_LAYOUT_GENERAL; - res->resource.write.access_mask = VK_ACCESS_TRANSFER_WRITE_BIT; + res->resource.deprecate.write.pipelines = VK_PIPELINE_STAGE_TRANSFER_BIT; + res->resource.deprecate.write.image_layout = VK_IMAGE_LAYOUT_GENERAL; + res->resource.deprecate.write.access_mask = VK_ACCESS_TRANSFER_WRITE_BIT; } } @@ -170,65 +172,95 @@ void R_VkResourcesFrameBeginStateChangeFIXME(VkCommandBuffer cmdbuf, qboolean di if (!res->name[0] || !res->image.image || res->source_index_plus_1 > 0) continue; - //res->resource.read = res->resource.write = (ray_resource_state_t){0}; - res->resource.write = (ray_resource_state_t){0}; + //res->resource.read = res->resource.deprecate.write = (ray_resource_state_t){0}; + res->resource.deprecate.write = (ray_resource_state_t){0}; } } -void R_VkResourceAddToBarrier(vk_resource_t *res, qboolean write, VkPipelineStageFlags dst_stage_mask, r_vk_barrier_t *barrier) { - if (res->type != VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) { - // TODO - return; - } +static void barrierAddImage(r_vk_barrier_t *barrier, vk_resource_t *res, VkAccessFlags access, VkPipelineStageFlags dst_stage_mask) { + const qboolean write = (access & VK_ACCESS_SHADER_WRITE_BIT) != 0; if (write) { + // TODO: support other access modes + ASSERT(access == VK_ACCESS_SHADER_WRITE_BIT); // No reads are happening //ASSERT(res->read.pipelines == 0); const ray_resource_state_t new_state = { .pipelines = dst_stage_mask, - .access_mask = VK_ACCESS_SHADER_WRITE_BIT, + .access_mask = access, .image_layout = VK_IMAGE_LAYOUT_GENERAL, }; R_VkBarrierAddImage(barrier, (r_vk_barrier_image_t){ .image = res->value.image_object->image, - .src_stage_mask = res->read.pipelines | res->write.pipelines, + .src_stage_mask = res->deprecate.read.pipelines | res->deprecate.write.pipelines, // FIXME MEMORY_WRITE is needed to silence write-after-write layout-transition validation hazard - .src_access_mask = res->read.access_mask | res->write.access_mask | VK_ACCESS_MEMORY_WRITE_BIT, + .src_access_mask = res->deprecate.read.access_mask | res->deprecate.write.access_mask | VK_ACCESS_MEMORY_WRITE_BIT, .dst_access_mask = new_state.access_mask, .old_layout = VK_IMAGE_LAYOUT_UNDEFINED, .new_layout = new_state.image_layout, }); // Mark that read would need a transition - res->read = (ray_resource_state_t){0}; - res->write = new_state; + res->deprecate.read = (ray_resource_state_t){0}; + res->deprecate.write = new_state; } else { + // TODO: support other access modes + ASSERT(access == VK_ACCESS_SHADER_READ_BIT); // Write happened - ASSERT(res->write.pipelines != 0); + ASSERT(res->deprecate.write.pipelines != 0); // Check if no more barriers needed - if ((res->read.pipelines & dst_stage_mask) == dst_stage_mask) + if ((res->deprecate.read.pipelines & dst_stage_mask) == dst_stage_mask) return; - res->read = (ray_resource_state_t) { - .pipelines = res->read.pipelines | dst_stage_mask, - .access_mask = VK_ACCESS_SHADER_READ_BIT, + res->deprecate.read = (ray_resource_state_t) { + .pipelines = res->deprecate.read.pipelines | dst_stage_mask, + .access_mask = access, .image_layout = VK_IMAGE_LAYOUT_GENERAL, }; R_VkBarrierAddImage(barrier, (r_vk_barrier_image_t){ .image = res->value.image_object->image, - .src_stage_mask = res->write.pipelines, - .src_access_mask = res->write.access_mask, - .dst_access_mask = res->read.access_mask, - .old_layout = res->write.image_layout, - .new_layout = res->read.image_layout, + .src_stage_mask = res->deprecate.write.pipelines, + .src_access_mask = res->deprecate.write.access_mask, + .dst_access_mask = res->deprecate.read.access_mask, + .old_layout = res->deprecate.write.image_layout, + .new_layout = res->deprecate.read.image_layout, }); } } +static void barrierAddBuffer(r_vk_barrier_t *barrier, vk_buffer_t *buf, VkAccessFlags access) { + const r_vkcombuf_barrier_buffer_t bb = { + .buffer = buf, + .access = access, + }; + BOUNDED_ARRAY_APPEND_ITEM(barrier->buffers, bb); +} + +void R_VkResourceAddToBarrier(vk_resource_t *res, qboolean write, VkPipelineStageFlags2 dst_stage_mask, r_vk_barrier_t *barrier) { + switch (res->type) { + case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: + barrierAddImage(barrier, res, write ? VK_ACCESS_SHADER_WRITE_BIT : VK_ACCESS_SHADER_READ_BIT, dst_stage_mask); + break; + case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: + ASSERT(!write); + barrierAddBuffer(barrier, res->ref.buffer, VK_ACCESS_2_SHADER_READ_BIT); + break; + case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: + // nothing for now, as all textures are static at this point + break; + case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR: + case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: + // nop + break; + default: + ASSERT(!"Unsupported descriptor type"); + } +} + void R_VkBarrierAddImage(r_vk_barrier_t *barrier, r_vk_barrier_image_t image) { barrier->src_stage_mask |= image.src_stage_mask; const VkImageMemoryBarrier ib = (VkImageMemoryBarrier) { @@ -249,19 +281,26 @@ void R_VkBarrierAddImage(r_vk_barrier_t *barrier, r_vk_barrier_image_t image) { BOUNDED_ARRAY_APPEND_ITEM(barrier->images, ib); } -void R_VkBarrierCommit(VkCommandBuffer cmdbuf, r_vk_barrier_t *barrier, VkPipelineStageFlags dst_stage_mask) { +void R_VkBarrierCommit(vk_combuf_t* combuf, r_vk_barrier_t *barrier, VkPipelineStageFlags2 dst_stage_mask) { if (barrier->images.count == 0) return; - // TODO vkCmdPipelineBarrier2() - vkCmdPipelineBarrier(cmdbuf, + // FIXME use combuf barrier vkCmdPipelineBarrier2() + vkCmdPipelineBarrier(combuf->cmdbuf, barrier->src_stage_mask == 0 ? VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT : barrier->src_stage_mask, dst_stage_mask, 0, 0, NULL, 0, NULL, barrier->images.count, barrier->images.items); + R_VkCombufIssueBarrier(combuf, (r_vkcombuf_barrier_t){ + .stage = dst_stage_mask, + .buffers.items = barrier->buffers.items, + .buffers.count = barrier->buffers.count, + }); + // Mark as used barrier->src_stage_mask = 0; barrier->images.count = 0; + barrier->buffers.count = 0; } diff --git a/ref/vk/vk_resources.h b/ref/vk/vk_resources.h index 056bedd82a..4a2fe56299 100644 --- a/ref/vk/vk_resources.h +++ b/ref/vk/vk_resources.h @@ -3,6 +3,7 @@ #include "vk_core.h" #include "vk_descriptor.h" #include "vk_image.h" +#include "vk_combuf.h" // r_vkcombuf_barrier_buffer_t #include "arrays.h" // TODO remove @@ -32,14 +33,20 @@ enum { typedef struct { VkAccessFlags access_mask; VkImageLayout image_layout; - VkPipelineStageFlagBits pipelines; + VkPipelineStageFlagBits2 pipelines; } ray_resource_state_t; struct xvk_image_s; typedef struct vk_resource_s { VkDescriptorType type; - ray_resource_state_t write, read; + struct { + ray_resource_state_t write, read; + } deprecate; vk_descriptor_value_t value; + union { + vk_buffer_t *buffer; + r_vk_image_t *image; + } ref; } vk_resource_t; typedef struct vk_resource_s *vk_resource_p; @@ -48,6 +55,7 @@ typedef struct { char name[64]; vk_resource_t resource; r_vk_image_t image; + vk_buffer_t *buffer; int refcount; int source_index_plus_1; } rt_resource_t; @@ -66,11 +74,11 @@ void R_VkResourcesCleanup(void); typedef struct { uint32_t frame_index; - VkBuffer uniform_buffer; + vk_buffer_t *uniform_buffer; uint32_t uniform_unit_size; struct { - VkBuffer buffer; // must be the same as in vk_ray_model_create_t TODO: validate or make impossible to specify incorrectly + vk_buffer_t *buffer; // must be the same as in vk_ray_model_create_t TODO: validate or make impossible to specify incorrectly uint64_t size; } geometry_data; const vk_lights_bindings_t *light_bindings; @@ -79,12 +87,11 @@ void R_VkResourcesSetBuiltinFIXME(r_vk_resources_builtin_fixme_t builtin); void R_VkResourcesFrameBeginStateChangeFIXME(VkCommandBuffer cmdbuf, qboolean discontinuity); - typedef struct { // TODO VK_KHR_synchronization2, has a slightly different (better) semantics - VkPipelineStageFlags src_stage_mask; + VkPipelineStageFlags2 src_stage_mask; BOUNDED_ARRAY_DECLARE(VkImageMemoryBarrier, images, 16); - //BOUNDED_ARRAY_DECLARE(buffers, VkBufferMemoryBarrier, 16); + BOUNDED_ARRAY_DECLARE(r_vkcombuf_barrier_buffer_t, buffers, 16); } r_vk_barrier_t; typedef struct { @@ -97,6 +104,8 @@ typedef struct { } r_vk_barrier_image_t; void R_VkBarrierAddImage(r_vk_barrier_t *barrier, r_vk_barrier_image_t image); -void R_VkBarrierCommit(VkCommandBuffer cmdbuf, r_vk_barrier_t *barrier, VkPipelineStageFlags dst_stage_mask); -void R_VkResourceAddToBarrier(vk_resource_t *res, qboolean write, VkPipelineStageFlags dst_stage_mask, r_vk_barrier_t *barrier); +struct vk_combuf_s; +void R_VkBarrierCommit(struct vk_combuf_s* combuf, r_vk_barrier_t *barrier, VkPipelineStageFlags2 dst_stage_mask); + +void R_VkResourceAddToBarrier(vk_resource_t *res, qboolean write, VkPipelineStageFlags2 dst_stage_mask, r_vk_barrier_t *barrier); diff --git a/ref/vk/vk_rtx.c b/ref/vk/vk_rtx.c index f3cdfcc40c..b1a72f3548 100644 --- a/ref/vk/vk_rtx.c +++ b/ref/vk/vk_rtx.c @@ -218,37 +218,13 @@ static void performTracing( vk_combuf_t *combuf, const perform_tracing_args_t* a R_VkResourcesSetBuiltinFIXME((r_vk_resources_builtin_fixme_t){ .frame_index = args->frame_index, - .uniform_buffer = g_rtx.uniform_buffer.buffer, + .uniform_buffer = &g_rtx.uniform_buffer, .uniform_unit_size = g_rtx.uniform_unit_size, .geometry_data.buffer = args->render_args->geometry_data.buffer, .geometry_data.size = args->render_args->geometry_data.size, .light_bindings = args->light_bindings, }); - // Upload kusochki updates - { - const VkBufferMemoryBarrier bmb[] = { { - .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, - .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT, - .dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_KHR, - .buffer = g_ray_model_state.kusochki_buffer.buffer, - .offset = 0, - .size = VK_WHOLE_SIZE, - }, { - .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, - .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT, - .dstAccessMask = VK_ACCESS_SHADER_READ_BIT, - .buffer = g_ray_model_state.model_headers_buffer.buffer, - .offset = 0, - .size = VK_WHOLE_SIZE, - } }; - - vkCmdPipelineBarrier(cmdbuf, - VK_PIPELINE_STAGE_TRANSFER_BIT, - VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR | VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, - 0, 0, NULL, ARRAYSIZE(bmb), bmb, 0, NULL); - } - R_VkResourcesFrameBeginStateChangeFIXME(cmdbuf, g_rtx.discontinuity); if (g_rtx.discontinuity) { DEBUG("discontinuity => false"); @@ -266,23 +242,6 @@ static void performTracing( vk_combuf_t *combuf, const perform_tracing_args_t* a prepareUniformBuffer(args->render_args, args->frame_index, args->frame_counter, args->fov_angle_y, args->frame_width, args->frame_height); - { // FIXME this should be done automatically inside meatpipe, TODO - //const uint32_t size = sizeof(struct Lights); - //const uint32_t size = sizeof(struct LightsMetadata); // + 8 * sizeof(uint32_t); - const VkBufferMemoryBarrier bmb[] = {{ - .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, - .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT, - .dstAccessMask = VK_ACCESS_SHADER_READ_BIT, - .buffer = args->light_bindings->buffer->buffer, - .offset = 0, - .size = VK_WHOLE_SIZE, - }}; - vkCmdPipelineBarrier(cmdbuf, - VK_PIPELINE_STAGE_TRANSFER_BIT, - VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, - 0, 0, NULL, ARRAYSIZE(bmb), bmb, 0, NULL); - } - // Update image resource links after the prev_-related swap above // TODO Preserve the indexes somewhere to avoid searching // FIXME I don't really get why we need this, the pointers should have been preserved ?! @@ -325,7 +284,7 @@ static void performTracing( vk_combuf_t *combuf, const perform_tracing_args_t* a // TODO this is to make sure we remember image layout after image_blit // The proper way to do this would be to teach R_VkImageBlit to properly track the image metadata (i.e. vk_resource_t state) - g_rtx.mainpipe_out->resource.write.image_layout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL; + g_rtx.mainpipe_out->resource.deprecate.write.image_layout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL; } DEBUG_END(cmdbuf); @@ -429,7 +388,7 @@ static void reloadMainpipe(void) { } // TODO full r/w initialization - res->resource.write.pipelines = 0; + res->resource.deprecate.write.pipelines = 0; res->resource.type = mr->descriptor_type; } else { // TODO no assert, complain and exit diff --git a/ref/vk/vk_rtx.h b/ref/vk/vk_rtx.h index f3288033e9..fb2e5a48d9 100644 --- a/ref/vk/vk_rtx.h +++ b/ref/vk/vk_rtx.h @@ -19,7 +19,7 @@ typedef struct { // Buffer holding vertex and index data // TODO remove struct { - VkBuffer buffer; // must be the same as in vk_ray_model_create_t TODO: validate or make impossible to specify incorrectly + vk_buffer_t* buffer; // must be the same as in vk_ray_model_create_t TODO: validate or make impossible to specify incorrectly uint64_t size; } geometry_data; From 6146adc95c8035ae79035be23af7b29b06338ed8 Mon Sep 17 00:00:00 2001 From: Ivan 'provod' Avdeev Date: Thu, 12 Dec 2024 15:17:06 -0500 Subject: [PATCH 39/62] vk: WIP begin using combuf barriers for images It compiles, but it's broken and doesn't pass validation yet. Resource part doesn't collect barriers correctly somehow, needs debugging. --- ref/vk/TODO.md | 6 + ref/vk/ray_pass.c | 15 ++- ref/vk/vk_buffer.h | 6 - ref/vk/vk_combuf.c | 279 +++++++++++++++++++++++++++++------------- ref/vk/vk_combuf.h | 4 +- ref/vk/vk_core.h | 7 ++ ref/vk/vk_framectl.c | 46 ++++++- ref/vk/vk_image.c | 156 +++++++++-------------- ref/vk/vk_image.h | 18 +-- ref/vk/vk_misc.c | 36 ++++++ ref/vk/vk_render.c | 10 +- ref/vk/vk_render.h | 6 +- ref/vk/vk_resources.c | 115 +++-------------- ref/vk/vk_resources.h | 28 +---- ref/vk/vk_rtx.c | 83 ++++--------- ref/vk/vk_rtx.h | 8 +- 16 files changed, 429 insertions(+), 394 deletions(-) diff --git a/ref/vk/TODO.md b/ref/vk/TODO.md index c6b1eb4f5c..83ee7f40d4 100644 --- a/ref/vk/TODO.md +++ b/ref/vk/TODO.md @@ -15,6 +15,12 @@ - [ ] Render graph - [ ] performance profiling and comparison +## 2024-12-12 E384 +- [ ] zero vkCmdPipelineBarriers calls + - [ ] track image sync state with image (and not with resource) + - [x] the code is there, but it's broken and remains to be debugged + - [ ] grep for anything else + ## 2024-12-10 E383 - [x] Add transfer stage to submit semaphore separating command buffer: fixes sync for rt - [x] Issue staging commit for a bunch of RT buffers (likely not all of them) diff --git a/ref/vk/ray_pass.c b/ref/vk/ray_pass.c index 71ad4f5c4f..42deafe341 100644 --- a/ref/vk/ray_pass.c +++ b/ref/vk/ray_pass.c @@ -270,13 +270,23 @@ void RayPassPerform(struct ray_pass_s *pass, vk_combuf_t *combuf, ray_pass_perfo const qboolean write = i >= pass->desc.write_from; R_VkResourceAddToBarrier(res, write, pass->pipeline_type, &barrier); + } + + DEBUG_BEGIN(combuf->cmdbuf, pass->debug_name); + R_VkBarrierCommit(combuf, &barrier, pass->pipeline_type); + + for (int i = 0; i < num_bindings; ++i) { + const int index = args.resources_map ? args.resources_map[i] : i; + vk_resource_t* const res = args.resources[index]; const vk_descriptor_value_t *const src_value = &res->value; vk_descriptor_value_t *const dst_value = pass->desc.riptors.values + i; + // layout is only known after barrier + // FIXME this is not true, it can be known earlier if (res->type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) { dst_value->image = (VkDescriptorImageInfo) { - .imageLayout = write ? res->deprecate.write.image_layout : res->deprecate.read.image_layout, + .imageLayout = src_value->image_object->sync.layout, .imageView = src_value->image_object->view, .sampler = VK_NULL_HANDLE, }; @@ -287,9 +297,6 @@ void RayPassPerform(struct ray_pass_s *pass, vk_combuf_t *combuf, ray_pass_perfo VK_DescriptorsWrite(&pass->desc.riptors, args.frame_set_slot); - DEBUG_BEGIN(combuf->cmdbuf, pass->debug_name); - R_VkBarrierCommit(combuf, &barrier, pass->pipeline_type); - switch (pass->type) { case RayPassType_Tracing: { diff --git a/ref/vk/vk_buffer.h b/ref/vk/vk_buffer.h index dd1e4ed005..f661e76d95 100644 --- a/ref/vk/vk_buffer.h +++ b/ref/vk/vk_buffer.h @@ -5,12 +5,6 @@ #include "vk_staging.h" #include "r_flipping.h" -typedef struct { - VkAccessFlags2 access; - VkPipelineStageFlagBits2 stage; - //VkImageLayout layout; -} r_vksync_scope_t; - typedef struct { uint32_t combuf_tag; r_vksync_scope_t write, read; diff --git a/ref/vk/vk_combuf.c b/ref/vk/vk_combuf.c index 1180047273..072bd239e3 100644 --- a/ref/vk/vk_combuf.c +++ b/ref/vk/vk_combuf.c @@ -2,6 +2,7 @@ #include "vk_commandpool.h" #include "vk_buffer.h" #include "vk_logs.h" +#include "vk_image.h" #include "profiler.h" @@ -11,6 +12,7 @@ #define MAX_QUERY_COUNT 128 #define MAX_BUFFER_BARRIERS 16 +#define MAX_IMAGE_BARRIERS 16 #define BEGIN_INDEX_TAG 0x10000000 @@ -267,108 +269,213 @@ static void printStageMask(const char *prefix, VkPipelineStageFlags2 stages) { PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_OPTICAL_FLOW_BIT_NV); } +static qboolean makeBufferBarrier(VkBufferMemoryBarrier2* out_bmb, const r_vkcombuf_barrier_buffer_t *const bufbar, VkPipelineStageFlags2 dst_stage, uint32_t cb_tag) { + vk_buffer_t *const buf = bufbar->buffer; + const qboolean is_write = (bufbar->access & ACCESS_WRITE_BITS) != 0; + const qboolean is_read = (bufbar->access & ACCESS_READ_BITS) != 0; + ASSERT((bufbar->access & ~(ACCESS_KNOWN_BITS)) == 0); + + if (buf->sync.combuf_tag != cb_tag) { + // This buffer hasn't been yet used in this command buffer, no need to issue a barrier + buf->sync.combuf_tag = cb_tag; + buf->sync.write = is_write + ? (r_vksync_scope_t){.access = bufbar->access & ACCESS_WRITE_BITS, .stage = dst_stage} + : (r_vksync_scope_t){.access = 0, .stage = 0 }; + buf->sync.read = is_read + ? (r_vksync_scope_t){.access = bufbar->access & ACCESS_READ_BITS, .stage = dst_stage} + : (r_vksync_scope_t){.access = 0, .stage = 0 }; + return false; + } + + *out_bmb = (VkBufferMemoryBarrier2) { + .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER_2, + .pNext = NULL, + .buffer = buf->buffer, + .offset = 0, + .size = VK_WHOLE_SIZE, + .dstStageMask = dst_stage, + .dstAccessMask = bufbar->access, + }; + + // TODO: support read-and-write scenarios + ASSERT(is_read ^ is_write); + if (is_write) { + // Write is synchronized with previous reads and writes + out_bmb->srcStageMask = buf->sync.write.stage | buf->sync.read.stage; + out_bmb->srcAccessMask = buf->sync.write.access | buf->sync.read.access; + + // Store where write happened + buf->sync.write.access = bufbar->access; + buf->sync.write.stage = dst_stage; + + // If there were no previous reads or writes, there no reason to synchronize with anything + if (out_bmb->srcStageMask == 0) + return false; + + // Reset read state + // TOOD is_read? for read-and-write + buf->sync.read.access = 0; + buf->sync.read.stage = 0; + } + + if (is_read) { + // Read is synchronized with previous writes only + out_bmb->srcStageMask = buf->sync.write.stage; + out_bmb->srcAccessMask = buf->sync.write.access; + + // Check whether this is a new barrier + if ((buf->sync.read.access & bufbar->access) != bufbar->access + && (buf->sync.read.stage & dst_stage) != dst_stage) { + // Remember this read happened + buf->sync.read.access |= bufbar->access; + buf->sync.read.stage |= dst_stage; + } else { + // Already synchronized, no need to do anything + return false; + } + + // Also skip issuing a barrier, if there were no previous writes -- nothing to sync with + // Note that this needs to happen late, as all reads must still be recorded in sync.read fields + if (buf->sync.write.stage == 0) + return false; + } + + if (LOG_VERBOSE) { + DEBUG(" srcAccessMask = %llx", (unsigned long long)out_bmb->srcAccessMask); + printAccessMask(" ", out_bmb->srcAccessMask); + DEBUG(" srcStageMask = %llx", (unsigned long long)out_bmb->srcStageMask); + printStageMask(" ", out_bmb->srcStageMask); + DEBUG(" dstAccessMask = %llx", (unsigned long long)out_bmb->dstAccessMask); + printAccessMask(" ", out_bmb->dstAccessMask); + DEBUG(" dstStageMask = %llx", (unsigned long long)out_bmb->dstStageMask); + printStageMask(" ", out_bmb->dstStageMask); + } + + return true; +} + +static qboolean makeImageBarrier(VkImageMemoryBarrier2* out_imb, const r_vkcombuf_barrier_image_t *const imgbar, VkPipelineStageFlags2 dst_stage) { + r_vk_image_t *const img = imgbar->image; + const qboolean is_write = (imgbar->access & ACCESS_WRITE_BITS) != 0; + const qboolean is_read = (imgbar->access & ACCESS_READ_BITS) != 0; + const VkImageLayout old_layout = is_write ? VK_IMAGE_LAYOUT_UNDEFINED : img->sync.layout; + const qboolean is_layout_transfer = imgbar->layout != old_layout; + ASSERT((imgbar->access & ~(ACCESS_KNOWN_BITS)) == 0); + + *out_imb = (VkImageMemoryBarrier2) { + .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2, + .pNext = NULL, + .srcStageMask = img->sync.write.stage, + .srcAccessMask = img->sync.write.access, + .dstStageMask = dst_stage, + .dstAccessMask = imgbar->access, + .oldLayout = old_layout, + .newLayout = imgbar->layout, + .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .image = img->image, + .subresourceRange = (VkImageSubresourceRange) { + .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, + .baseMipLevel = 0, + .levelCount = 1, + .baseArrayLayer = 0, + .layerCount = 1, + }, + }; + + // TODO: support read-and-write scenarios + //ASSERT(is_read ^ is_write); + + if (is_write || is_layout_transfer) { + out_imb->srcStageMask |= img->sync.read.stage; + out_imb->srcAccessMask |= img->sync.read.access; + + img->sync.write.access = imgbar->access; + img->sync.write.stage = dst_stage; + + img->sync.read.access = 0; + img->sync.read.stage = 0; + } + + if (is_read) { + const qboolean same_access = (img->sync.read.access & imgbar->access) != imgbar->access; + const qboolean same_stage = (img->sync.read.stage & dst_stage) != dst_stage; + + if (same_access && same_stage && !is_layout_transfer) + return false; + + img->sync.read.access |= imgbar->access; + img->sync.read.stage |= dst_stage; + } + + if (!is_layout_transfer && out_imb->srcAccessMask == 0 && out_imb->srcStageMask == 0) { + return false; + } + + if (LOG_VERBOSE) { + DEBUG(" srcAccessMask = %llx", (unsigned long long)out_imb->srcAccessMask); + printAccessMask(" ", out_imb->srcAccessMask); + DEBUG(" srcStageMask = %llx", (unsigned long long)out_imb->srcStageMask); + printStageMask(" ", out_imb->srcStageMask); + DEBUG(" dstAccessMask = %llx", (unsigned long long)out_imb->dstAccessMask); + printAccessMask(" ", out_imb->dstAccessMask); + DEBUG(" dstStageMask = %llx", (unsigned long long)out_imb->dstStageMask); + printStageMask(" ", out_imb->dstStageMask); + DEBUG(" oldLayout = %s (%llx)", R_VkImageLayoutName(out_imb->oldLayout), (unsigned long long)out_imb->oldLayout); + DEBUG(" newLayout = %s (%llx)", R_VkImageLayoutName(out_imb->newLayout), (unsigned long long)out_imb->newLayout); + } + + // Store new layout + img->sync.layout = imgbar->layout; + + return true; +} + void R_VkCombufIssueBarrier(vk_combuf_t* combuf, r_vkcombuf_barrier_t bar) { vk_combuf_impl_t *const cb = (vk_combuf_impl_t*)combuf; - ASSERT(bar.images.count == 0 && "TODO"); BOUNDED_ARRAY(VkBufferMemoryBarrier2, buffer_barriers, MAX_BUFFER_BARRIERS); - for (int i = 0; i < bar.buffers.count; ++i) { const r_vkcombuf_barrier_buffer_t *const bufbar = bar.buffers.items + i; - vk_buffer_t *const buf = bufbar->buffer; - const qboolean is_write = (bufbar->access & ACCESS_WRITE_BITS) != 0; - const qboolean is_read = (bufbar->access & ACCESS_READ_BITS) != 0; - ASSERT((bufbar->access & ~(ACCESS_KNOWN_BITS)) == 0); - - if (buf->sync.combuf_tag != cb->tag) { - // This buffer hasn't been yet used in this command buffer, no need to issue a barrier - buf->sync.combuf_tag = cb->tag; - buf->sync.write = is_write - ? (r_vksync_scope_t){.access = bufbar->access & ACCESS_WRITE_BITS, .stage = bar.stage} - : (r_vksync_scope_t){.access = 0, .stage = 0 }; - buf->sync.read = is_read - ? (r_vksync_scope_t){.access = bufbar->access & ACCESS_READ_BITS, .stage = bar.stage} - : (r_vksync_scope_t){.access = 0, .stage = 0 }; - continue; + if (LOG_VERBOSE) { + DEBUG(" buf[%d]: buf=%llx barrier:", i, (unsigned long long)bufbar->buffer->buffer); } - VkBufferMemoryBarrier2 bmb = { - .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER_2, - .pNext = NULL, - .buffer = buf->buffer, - .offset = 0, - .size = VK_WHOLE_SIZE, - .dstStageMask = bar.stage, - .dstAccessMask = bufbar->access, - }; - - // TODO: support read-and-write scenarios - ASSERT(is_read ^ is_write); - if (is_write) { - // Write is synchronized with previous reads and writes - bmb.srcStageMask = buf->sync.write.stage | buf->sync.read.stage; - bmb.srcAccessMask = buf->sync.write.access | buf->sync.read.access; - - // Store where write happened - buf->sync.write.access = bufbar->access; - buf->sync.write.stage = bar.stage; - - // If there were no previous reads or writes, there no reason to synchronize with anything - if (bmb.srcStageMask == 0) - continue; - - // Reset read state - // TOOD is_read? for read-and-write - buf->sync.read.access = 0; - buf->sync.read.stage = 0; + VkBufferMemoryBarrier2 bmb; + if (!makeBufferBarrier(&bmb, bufbar, bar.stage, cb->tag)) { + continue; } - if (is_read) { - // Read is synchronized with previous writes only - bmb.srcStageMask = buf->sync.write.stage; - bmb.srcAccessMask = buf->sync.write.access; - - // Check whether this is a new barrier - if ((buf->sync.read.access & bufbar->access) != bufbar->access - && (buf->sync.read.stage & bar.stage) != bar.stage) { - // Remember this read happened - buf->sync.read.access |= bufbar->access; - buf->sync.read.stage |= bar.stage; - } else { - // Already synchronized, no need to do anything - continue; - } - - // Also skip issuing a barrier, if there were no previous writes -- nothing to sync with - // Note that this needs to happen late, as all reads must still be recorded in sync.read fields - if (buf->sync.write.stage == 0) - continue; - } + BOUNDED_ARRAY_APPEND_ITEM(buffer_barriers, bmb); + } + BOUNDED_ARRAY(VkImageMemoryBarrier2, image_barriers, MAX_IMAGE_BARRIERS); + for (int i = 0; i < bar.images.count; ++i) { + const r_vkcombuf_barrier_image_t *const imgbar = bar.images.items + i; if (LOG_VERBOSE) { - DEBUG(" buf[%d]: buf=%llx barrier:", i, (unsigned long long)buf->buffer); - DEBUG(" srcAccessMask = %llx", (unsigned long long)bmb.srcAccessMask); - printAccessMask(" ", bmb.srcAccessMask); - DEBUG(" srcStageMask = %llx", (unsigned long long)bmb.srcStageMask); - printStageMask(" ", bmb.srcStageMask); - DEBUG(" dstAccessMask = %llx", (unsigned long long)bmb.dstAccessMask); - printAccessMask(" ", bmb.dstAccessMask); - DEBUG(" dstStageMask = %llx", (unsigned long long)bmb.dstStageMask); - printStageMask(" ", bmb.dstStageMask); + DEBUG(" img[%d]: img=%llx (%s) barrier:", i, (unsigned long long)imgbar->image->image, imgbar->image->name); } - BOUNDED_ARRAY_APPEND_ITEM(buffer_barriers, bmb); - } + VkImageMemoryBarrier2 imb; + if (!makeImageBarrier(&imb, imgbar, bar.stage)) { + continue; + } - if (buffer_barriers.count) { - vkCmdPipelineBarrier2(combuf->cmdbuf, &(VkDependencyInfo) { - .sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO, - .pNext = NULL, - .dependencyFlags = 0, - .bufferMemoryBarrierCount = buffer_barriers.count, - .pBufferMemoryBarriers = buffer_barriers.items, - }); + BOUNDED_ARRAY_APPEND_ITEM(image_barriers, imb); } + + if (buffer_barriers.count == 0 && image_barriers.count == 0) + return; + + vkCmdPipelineBarrier2(combuf->cmdbuf, &(VkDependencyInfo) { + .sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO, + .pNext = NULL, + .dependencyFlags = 0, + .bufferMemoryBarrierCount = buffer_barriers.count, + .pBufferMemoryBarriers = buffer_barriers.items, + .imageMemoryBarrierCount = image_barriers.count, + .pImageMemoryBarriers = image_barriers.items, + }); } diff --git a/ref/vk/vk_combuf.h b/ref/vk/vk_combuf.h index 9283dd585c..1ca6980b07 100644 --- a/ref/vk/vk_combuf.h +++ b/ref/vk/vk_combuf.h @@ -25,9 +25,9 @@ typedef struct { VkAccessFlags2 access; } r_vkcombuf_barrier_buffer_t; -struct vk_image_s; +struct r_vk_image_s; typedef struct { - struct vk_image_s *image; + struct r_vk_image_s *image; VkImageLayout layout; VkAccessFlags2 access; } r_vkcombuf_barrier_image_t; diff --git a/ref/vk/vk_core.h b/ref/vk/vk_core.h index ea4dc38f60..41ab363ea5 100644 --- a/ref/vk/vk_core.h +++ b/ref/vk/vk_core.h @@ -69,6 +69,7 @@ const char *R_VkResultName(VkResult result); const char *R_VkPresentModeName(VkPresentModeKHR present_mode); const char *R_VkFormatName(VkFormat format); const char *R_VkColorSpaceName(VkColorSpaceKHR colorspace); +const char *R_VkImageLayoutName(VkImageLayout); #define SET_DEBUG_NAME(object, type, name) \ do { \ @@ -277,3 +278,9 @@ do { \ INSTANCE_FUNCS(X) INSTANCE_DEBUG_FUNCS(X) #undef X + +// TODO is there a better place for this, vk_utils.h? +typedef struct { + VkAccessFlags2 access; + VkPipelineStageFlagBits2 stage; +} r_vksync_scope_t; diff --git a/ref/vk/vk_framectl.c b/ref/vk/vk_framectl.c index c1492482c4..49b4a2e61c 100644 --- a/ref/vk/vk_framectl.c +++ b/ref/vk/vk_framectl.c @@ -313,8 +313,37 @@ static void enqueueRendering( vk_combuf_t* combuf, qboolean draw ) { const VkCommandBuffer cmdbuf = combuf->cmdbuf; + // This is temporary non-owning placeholder object. + // It is used only for combuf barrier tracking. + r_vk_image_t tmp_dst_image = { + .image = g_frame.current.framebuffer.image, + .view = g_frame.current.framebuffer.view, + .width = g_frame.current.framebuffer.width, + .height = g_frame.current.framebuffer.height, + .depth = 1, + .mips = 1, + .layers = 1, + + // TODO .format = g_frame.current.framebuffer.??? + // TODO .image_size = ??? + + // TODO is this correct? + .sync = { + .layout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, + .write = { + .access = VK_ACCESS_2_COLOR_ATTACHMENT_WRITE_BIT, + .stage = VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT + }, + .read = { + .access = VK_ACCESS_2_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_2_MEMORY_READ_BIT, + .stage = VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT + }, + }, + }; + snprintf(tmp_dst_image.name, sizeof(tmp_dst_image.name), "framebuffer[%d]", g_frame.current.framebuffer.index); + if (vk_frame.rtx_enabled) { - VK_RenderEndRTX( combuf, g_frame.current.framebuffer.view, g_frame.current.framebuffer.image, g_frame.current.framebuffer.width, g_frame.current.framebuffer.height ); + VK_RenderEndRTX( combuf, &tmp_dst_image ); } else { // FIXME: how to do this properly before render pass? // Needed to avoid VUID-vkCmdCopyBuffer-renderpass @@ -333,6 +362,19 @@ static void enqueueRendering( vk_combuf_t* combuf, qboolean draw ) { } if (draw) { + const r_vkcombuf_barrier_image_t dst_use[] = {{ + .image = &tmp_dst_image, + .layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, + .access = VK_ACCESS_2_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_2_COLOR_ATTACHMENT_WRITE_BIT, + }}; + R_VkCombufIssueBarrier(combuf, (r_vkcombuf_barrier_t) { + .stage = VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT, + .images = { + .items = dst_use, + .count = COUNTOF(dst_use), + }, + }); + const VkRenderPassBeginInfo rpbi = { .sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, .renderPass = vk_frame.rtx_enabled ? vk_frame.render_pass.after_ray_tracing : vk_frame.render_pass.raster, @@ -408,7 +450,7 @@ static void submit( vk_combuf_t* combuf, qboolean wait, qboolean draw ) { BOUNDED_ARRAY_APPEND_ITEM(wait_stageflags, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT | VK_PIPELINE_STAGE_2_ALL_TRANSFER_BIT); BOUNDED_ARRAY_APPEND_ITEM(signalphores, frame->sem_done2); - DEBUG("submit: frame=%d, staging_tag=%u, combuf=%p, wait for semaphores[%d]={%llx, %llx}, signal semaphores[%d]={%llx, %llx}\n", + DEBUG("submit: frame=%d, staging_tag=%u, combuf=%p, wait for semaphores[%d]={%llx, %llx}, signal semaphores[%d]={%llx, %llx}", g_frame.current.index, frame->staging_generation_tag, frame->combuf->cmdbuf, diff --git a/ref/vk/vk_image.c b/ref/vk/vk_image.c index c97ac2e430..8625f11593 100644 --- a/ref/vk/vk_image.c +++ b/ref/vk/vk_image.c @@ -152,110 +152,68 @@ void R_VkImageDestroy(r_vk_image_t *img) { *img = (r_vk_image_t){0}; } -void R_VkImageClear(VkCommandBuffer cmdbuf, VkImage image, VkAccessFlags src_access, VkPipelineStageFlags from_stage) { - const VkImageMemoryBarrier image_barriers[] = { { - .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, - .image = image, - .srcAccessMask = src_access, - .dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT, - .oldLayout = VK_IMAGE_LAYOUT_UNDEFINED, - .newLayout = VK_IMAGE_LAYOUT_GENERAL, - .subresourceRange = (VkImageSubresourceRange) { - .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, - .baseMipLevel = 0, - .levelCount = 1, - .baseArrayLayer = 0, - .layerCount = 1, - }} }; +void R_VkImageClear(r_vk_image_t *img, struct vk_combuf_s* combuf) { + const VkImageSubresourceRange ranges[] = {{ + .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, + .baseMipLevel = 0, + .levelCount = VK_REMAINING_MIP_LEVELS, + .baseArrayLayer = 0, + .layerCount = VK_REMAINING_ARRAY_LAYERS, + }}; + const r_vkcombuf_barrier_image_t ib[] = {{ + .image = img, + // Could be VK_IMAGE_LAYOUT_GENERAL too + .layout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, + .access = VK_ACCESS_2_TRANSFER_WRITE_BIT, + }}; + R_VkCombufIssueBarrier(combuf, (r_vkcombuf_barrier_t){ + .stage = VK_PIPELINE_STAGE_2_TRANSFER_BIT, + .images = { + .items = ib, + .count = COUNTOF(ib), + }, + }); const VkClearColorValue clear_value = {0}; - - vkCmdPipelineBarrier(cmdbuf, from_stage, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, - 0, NULL, 0, NULL, COUNTOF(image_barriers), image_barriers); - - vkCmdClearColorImage(cmdbuf, image, VK_IMAGE_LAYOUT_GENERAL, &clear_value, 1, &image_barriers->subresourceRange); + vkCmdClearColorImage(combuf->cmdbuf, img->image, img->sync.layout, &clear_value, COUNTOF(ranges), ranges); } -void R_VkImageBlit(VkCommandBuffer cmdbuf, const r_vkimage_blit_args *blit_args) { - { - const VkImageMemoryBarrier image_barriers[] = { { - .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, - .image = blit_args->src.image, - .srcAccessMask = blit_args->src.srcAccessMask, - .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT, - .oldLayout = blit_args->src.oldLayout, - .newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, - .subresourceRange = - (VkImageSubresourceRange){ - .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, - .baseMipLevel = 0, - .levelCount = 1, - .baseArrayLayer = 0, - .layerCount = 1, - }, - }, { - .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, - .image = blit_args->dst.image, - .srcAccessMask = blit_args->dst.srcAccessMask, - .dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT, - .oldLayout = blit_args->dst.oldLayout, - .newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, - .subresourceRange = - (VkImageSubresourceRange){ - .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, - .baseMipLevel = 0, - .levelCount = 1, - .baseArrayLayer = 0, - .layerCount = 1, - }, - } }; - - vkCmdPipelineBarrier(cmdbuf, - blit_args->in_stage, - VK_PIPELINE_STAGE_TRANSFER_BIT, - 0, 0, NULL, 0, NULL, COUNTOF(image_barriers), image_barriers); - } +void R_VkImageBlit(struct vk_combuf_s *combuf, const r_vkimage_blit_args *args ) { + const r_vkcombuf_barrier_image_t ib[] = {{ + .image = args->src.image, + .layout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, + .access = VK_ACCESS_2_TRANSFER_READ_BIT, + }, { + .image = args->dst.image, + .layout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, + .access = VK_ACCESS_2_TRANSFER_WRITE_BIT, + }}; + R_VkCombufIssueBarrier(combuf, (r_vkcombuf_barrier_t){ + .stage = VK_PIPELINE_STAGE_2_TRANSFER_BIT, + .images = { + .items = ib, + .count = COUNTOF(ib), + }, + }); { VkImageBlit region = {0}; - region.srcOffsets[1].x = blit_args->src.width; - region.srcOffsets[1].y = blit_args->src.height; - region.srcOffsets[1].z = 1; - region.dstOffsets[1].x = blit_args->dst.width; - region.dstOffsets[1].y = blit_args->dst.height; - region.dstOffsets[1].z = 1; + region.srcOffsets[1].x = args->src.width ? args->src.width : args->src.image->width; + region.srcOffsets[1].y = args->src.height ? args->src.height : args->src.image->height; + region.srcOffsets[1].z = args->src.depth ? args->src.depth : args->src.image->depth; + + region.dstOffsets[1].x = args->dst.width ? args->dst.width : args->dst.image->width; + region.dstOffsets[1].y = args->dst.height ? args->dst.height : args->dst.image->height; + region.dstOffsets[1].z = args->dst.depth ? args->dst.depth : args->dst.image->depth; + region.srcSubresource.aspectMask = region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; - region.srcSubresource.layerCount = region.dstSubresource.layerCount = 1; - vkCmdBlitImage(cmdbuf, - blit_args->src.image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, - blit_args->dst.image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, + region.srcSubresource.layerCount = region.dstSubresource.layerCount = 1; // VK_REMAINING_ARRAY_LAYERS requires maintenance5. No need to use it now. + vkCmdBlitImage(combuf->cmdbuf, + args->src.image->image, args->src.image->sync.layout, + args->dst.image->image, args->dst.image->sync.layout, 1, ®ion, VK_FILTER_NEAREST); } - - { - VkImageMemoryBarrier image_barriers[] = { - { - .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, - .image = blit_args->dst.image, - .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT, - .dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, - .oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, - .newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, - .subresourceRange = - (VkImageSubresourceRange){ - .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, - .baseMipLevel = 0, - .levelCount = 1, - .baseArrayLayer = 0, - .layerCount = 1, - }, - }}; - vkCmdPipelineBarrier(cmdbuf, - VK_PIPELINE_STAGE_TRANSFER_BIT, - VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, - 0, 0, NULL, 0, NULL, COUNTOF(image_barriers), image_barriers); - } } typedef struct { @@ -341,10 +299,11 @@ void R_VkImageUploadCommit( struct vk_combuf_s *combuf, VkPipelineStageFlagBits // 1.b Invoke the barriers vkCmdPipelineBarrier(combuf->cmdbuf, - VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, + //VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, + VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, NULL, 0, NULL, - barriers_count, (VkImageMemoryBarrier*)g_image_upload.barriers.items + barriers_count, g_image_upload.barriers.items ); // 2. Phase 2: issue copy commands for each valid image @@ -388,6 +347,13 @@ void R_VkImageUploadCommit( struct vk_combuf_s *combuf, VkPipelineStageFlagBits if (!up->image) continue; + // Update image tracking state + up->image->sync.layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + up->image->sync.read.access = VK_ACCESS_SHADER_READ_BIT; + up->image->sync.read.stage = dst_stages; + up->image->sync.write.access = VK_ACCESS_TRANSFER_WRITE_BIT; + up->image->sync.write.stage = VK_PIPELINE_STAGE_2_TRANSFER_BIT; + g_image_upload.barriers.items[barriers_count++] = (VkImageMemoryBarrier) { .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, .image = up->image->image, diff --git a/ref/vk/vk_image.h b/ref/vk/vk_image.h index 4ea71f9802..c951233751 100644 --- a/ref/vk/vk_image.h +++ b/ref/vk/vk_image.h @@ -23,6 +23,11 @@ typedef struct r_vk_image_s { uint32_t image_size; int upload_slot; + + struct { + VkImageLayout layout; + r_vksync_scope_t write, read; + } sync; } r_vk_image_t; enum { @@ -45,19 +50,17 @@ typedef struct { r_vk_image_t R_VkImageCreate(const r_vk_image_create_t *create); void R_VkImageDestroy(r_vk_image_t *img); -void R_VkImageClear(VkCommandBuffer cmdbuf, VkImage image, VkAccessFlags src_access, VkPipelineStageFlags from_stage); +struct vk_combuf_s; +void R_VkImageClear(r_vk_image_t *img, struct vk_combuf_s* combuf); typedef struct { - VkPipelineStageFlags in_stage; struct { - VkImage image; - int width, height; - VkImageLayout oldLayout; - VkAccessFlags srcAccessMask; + r_vk_image_t *image; + int width, height, depth; } src, dst; } r_vkimage_blit_args; -void R_VkImageBlit( VkCommandBuffer cmdbuf, const r_vkimage_blit_args *blit_args ); +void R_VkImageBlit(struct vk_combuf_s *combuf, const r_vkimage_blit_args *blit_args ); uint32_t R_VkImageFormatTexelBlockSize( VkFormat format ); @@ -67,5 +70,4 @@ void R_VkImageUploadSlice( r_vk_image_t *img, int layer, int mip, int size, cons void R_VkImageUploadEnd( r_vk_image_t *img ); // Upload all enqueued images using the given command buffer -struct vk_combuf_s; void R_VkImageUploadCommit( struct vk_combuf_s *combuf, VkPipelineStageFlagBits dst_stages ); diff --git a/ref/vk/vk_misc.c b/ref/vk/vk_misc.c index afa1230b8e..2506b6602a 100644 --- a/ref/vk/vk_misc.c +++ b/ref/vk/vk_misc.c @@ -331,3 +331,39 @@ const char *R_VkColorSpaceName(VkColorSpaceKHR colorspace) { default: return "UNKNOWN"; } } + +const char *R_VkImageLayoutName(VkImageLayout layout) { + switch (layout) { + case VK_IMAGE_LAYOUT_UNDEFINED: return "VK_IMAGE_LAYOUT_UNDEFINED"; + case VK_IMAGE_LAYOUT_GENERAL: return "VK_IMAGE_LAYOUT_GENERAL"; + case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: return "VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL"; + case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: return "VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL"; + case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: return "VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL"; + case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: return "VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL"; + case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: return "VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL"; + case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: return "VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL"; + case VK_IMAGE_LAYOUT_PREINITIALIZED: return "VK_IMAGE_LAYOUT_PREINITIALIZED"; + case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL: return "VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL"; + case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL: return "VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL"; + case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL: return "VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL"; + case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL: return "VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL"; + case VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL: return "VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL"; + case VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL: return "VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL"; + case VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL: return "VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL"; + case VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL: return "VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL"; + case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR: return "VK_IMAGE_LAYOUT_PRESENT_SRC_KHR"; + case VK_IMAGE_LAYOUT_VIDEO_DECODE_DST_KHR: return "VK_IMAGE_LAYOUT_VIDEO_DECODE_DST_KHR"; + case VK_IMAGE_LAYOUT_VIDEO_DECODE_SRC_KHR: return "VK_IMAGE_LAYOUT_VIDEO_DECODE_SRC_KHR"; + case VK_IMAGE_LAYOUT_VIDEO_DECODE_DPB_KHR: return "VK_IMAGE_LAYOUT_VIDEO_DECODE_DPB_KHR"; + case VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR: return "VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR"; + case VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT: return "VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT"; + case VK_IMAGE_LAYOUT_FRAGMENT_SHADING_RATE_ATTACHMENT_OPTIMAL_KHR: return "VK_IMAGE_LAYOUT_FRAGMENT_SHADING_RATE_ATTACHMENT_OPTIMAL_KHR"; + case VK_IMAGE_LAYOUT_RENDERING_LOCAL_READ_KHR: return "VK_IMAGE_LAYOUT_RENDERING_LOCAL_READ_KHR"; + case VK_IMAGE_LAYOUT_VIDEO_ENCODE_DST_KHR: return "VK_IMAGE_LAYOUT_VIDEO_ENCODE_DST_KHR"; + case VK_IMAGE_LAYOUT_VIDEO_ENCODE_SRC_KHR: return "VK_IMAGE_LAYOUT_VIDEO_ENCODE_SRC_KHR"; + case VK_IMAGE_LAYOUT_VIDEO_ENCODE_DPB_KHR: return "VK_IMAGE_LAYOUT_VIDEO_ENCODE_DPB_KHR"; + case VK_IMAGE_LAYOUT_ATTACHMENT_FEEDBACK_LOOP_OPTIMAL_EXT: return "VK_IMAGE_LAYOUT_ATTACHMENT_FEEDBACK_LOOP_OPTIMAL_EXT"; + case VK_IMAGE_LAYOUT_MAX_ENUM: break; + } + return "UNKNOWN"; +} diff --git a/ref/vk/vk_render.c b/ref/vk/vk_render.c index 58ebe2ca18..037ffd25b2 100644 --- a/ref/vk/vk_render.c +++ b/ref/vk/vk_render.c @@ -796,8 +796,7 @@ void VK_RenderDebugLabelEnd( void ) drawCmdPushDebugLabelEnd(); } -void VK_RenderEndRTX( struct vk_combuf_s* combuf, VkImageView img_dst_view, VkImage img_dst, uint32_t w, uint32_t h ) -{ +void VK_RenderEndRTX( struct vk_combuf_s* combuf, struct r_vk_image_s *dst) { vk_buffer_t *const geom = R_GeometryBuffer_Get(); ASSERT(vk_core.rtx); @@ -806,12 +805,7 @@ void VK_RenderEndRTX( struct vk_combuf_s* combuf, VkImageView img_dst_view, VkIm { const vk_ray_frame_render_args_t args = { .combuf = combuf, - .dst = { - .image_view = img_dst_view, - .image = img_dst, - .width = w, - .height = h, - }, + .dst = dst, .projection = &g_render_state.vk_projection, .view = &g_camera.viewMatrix, diff --git a/ref/vk/vk_render.h b/ref/vk/vk_render.h index cc871b338a..b17d397df2 100644 --- a/ref/vk/vk_render.h +++ b/ref/vk/vk_render.h @@ -165,7 +165,9 @@ void VK_RenderDebugLabelBegin( const char *label ); void VK_RenderDebugLabelEnd( void ); void VK_RenderBegin( qboolean ray_tracing ); + struct vk_combuf_s; void VK_RenderEnd( struct vk_combuf_s*, qboolean draw, uint32_t width, uint32_t height, int frame_index ); -struct vk_combuf_s; -void VK_RenderEndRTX( struct vk_combuf_s* combuf, VkImageView img_dst_view, VkImage img_dst, uint32_t w, uint32_t h ); + +struct r_vk_image_s; +void VK_RenderEndRTX( struct vk_combuf_s* combuf, struct r_vk_image_s *dst); diff --git a/ref/vk/vk_resources.c b/ref/vk/vk_resources.c index f037c63d13..f8ad95acb4 100644 --- a/ref/vk/vk_resources.c +++ b/ref/vk/vk_resources.c @@ -133,7 +133,7 @@ void R_VkResourcesSetBuiltinFIXME(r_vk_resources_builtin_fixme_t args) { } // FIXME not even sure what this functions is supposed to do in the end -void R_VkResourcesFrameBeginStateChangeFIXME(VkCommandBuffer cmdbuf, qboolean discontinuity) { +void R_VkResourcesFrameBeginStateChangeFIXME(vk_combuf_t* combuf, qboolean discontinuity) { // Transfer previous frames before they had a chance of their resource-barrier metadata overwritten (as there's no guaranteed order for them) for (int i = ExternalResource_COUNT; i < MAX_RESOURCES; ++i) { rt_resource_t* const res = g_res.res + i; @@ -156,13 +156,10 @@ void R_VkResourcesFrameBeginStateChangeFIXME(VkCommandBuffer cmdbuf, qboolean di src->image = tmp_img; // If there was no initial state, prepare it. (this should happen only for the first frame) - if (discontinuity || res->resource.deprecate.write.pipelines == 0) { + if (discontinuity || res->image.sync.write.stage == 0) { // TODO is there a better way? Can image be cleared w/o explicit clear op? - DEBUG("discontinuity: %s", res->name); - R_VkImageClear( cmdbuf, res->image.image, 0, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT ); - res->resource.deprecate.write.pipelines = VK_PIPELINE_STAGE_TRANSFER_BIT; - res->resource.deprecate.write.image_layout = VK_IMAGE_LAYOUT_GENERAL; - res->resource.deprecate.write.access_mask = VK_ACCESS_TRANSFER_WRITE_BIT; + WARN("discontinuity: %s", res->name); + R_VkImageClear( &res->image, combuf ); } } @@ -172,63 +169,8 @@ void R_VkResourcesFrameBeginStateChangeFIXME(VkCommandBuffer cmdbuf, qboolean di if (!res->name[0] || !res->image.image || res->source_index_plus_1 > 0) continue; - //res->resource.read = res->resource.deprecate.write = (ray_resource_state_t){0}; - res->resource.deprecate.write = (ray_resource_state_t){0}; - } -} - -static void barrierAddImage(r_vk_barrier_t *barrier, vk_resource_t *res, VkAccessFlags access, VkPipelineStageFlags dst_stage_mask) { - const qboolean write = (access & VK_ACCESS_SHADER_WRITE_BIT) != 0; - - if (write) { - // TODO: support other access modes - ASSERT(access == VK_ACCESS_SHADER_WRITE_BIT); - // No reads are happening - //ASSERT(res->read.pipelines == 0); - - const ray_resource_state_t new_state = { - .pipelines = dst_stage_mask, - .access_mask = access, - .image_layout = VK_IMAGE_LAYOUT_GENERAL, - }; - - R_VkBarrierAddImage(barrier, (r_vk_barrier_image_t){ - .image = res->value.image_object->image, - .src_stage_mask = res->deprecate.read.pipelines | res->deprecate.write.pipelines, - // FIXME MEMORY_WRITE is needed to silence write-after-write layout-transition validation hazard - .src_access_mask = res->deprecate.read.access_mask | res->deprecate.write.access_mask | VK_ACCESS_MEMORY_WRITE_BIT, - .dst_access_mask = new_state.access_mask, - .old_layout = VK_IMAGE_LAYOUT_UNDEFINED, - .new_layout = new_state.image_layout, - }); - - // Mark that read would need a transition - res->deprecate.read = (ray_resource_state_t){0}; - res->deprecate.write = new_state; - } else { - // TODO: support other access modes - ASSERT(access == VK_ACCESS_SHADER_READ_BIT); - // Write happened - ASSERT(res->deprecate.write.pipelines != 0); - - // Check if no more barriers needed - if ((res->deprecate.read.pipelines & dst_stage_mask) == dst_stage_mask) - return; - - res->deprecate.read = (ray_resource_state_t) { - .pipelines = res->deprecate.read.pipelines | dst_stage_mask, - .access_mask = access, - .image_layout = VK_IMAGE_LAYOUT_GENERAL, - }; - - R_VkBarrierAddImage(barrier, (r_vk_barrier_image_t){ - .image = res->value.image_object->image, - .src_stage_mask = res->deprecate.write.pipelines, - .src_access_mask = res->deprecate.write.access_mask, - .dst_access_mask = res->deprecate.read.access_mask, - .old_layout = res->deprecate.write.image_layout, - .new_layout = res->deprecate.read.image_layout, - }); + // 2024-12-12 E384 1:56:00 Commented out: Try not clearing this state. Could be beneficial for later barrier-based extra-cmdbuf sync + //res->resource.deprecate.write = (ray_resource_state_t){0}; } } @@ -243,7 +185,19 @@ static void barrierAddBuffer(r_vk_barrier_t *barrier, vk_buffer_t *buf, VkAccess void R_VkResourceAddToBarrier(vk_resource_t *res, qboolean write, VkPipelineStageFlags2 dst_stage_mask, r_vk_barrier_t *barrier) { switch (res->type) { case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: - barrierAddImage(barrier, res, write ? VK_ACCESS_SHADER_WRITE_BIT : VK_ACCESS_SHADER_READ_BIT, dst_stage_mask); + { + r_vkcombuf_barrier_image_t image_barrier = { + .image = res->ref.image, + }; + if (write) { + image_barrier.access = VK_ACCESS_2_SHADER_WRITE_BIT; + image_barrier.layout = VK_IMAGE_LAYOUT_GENERAL; + } else { + image_barrier.access = VK_ACCESS_2_SHADER_READ_BIT; + image_barrier.layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + } + BOUNDED_ARRAY_APPEND_ITEM(barrier->images, image_barrier); + } break; case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: ASSERT(!write); @@ -261,38 +215,10 @@ void R_VkResourceAddToBarrier(vk_resource_t *res, qboolean write, VkPipelineStag } } -void R_VkBarrierAddImage(r_vk_barrier_t *barrier, r_vk_barrier_image_t image) { - barrier->src_stage_mask |= image.src_stage_mask; - const VkImageMemoryBarrier ib = (VkImageMemoryBarrier) { - .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, - .image = image.image, - .srcAccessMask = image.src_access_mask, - .dstAccessMask = image.dst_access_mask, - .oldLayout = image.old_layout, - .newLayout = image.new_layout, - .subresourceRange = (VkImageSubresourceRange) { - .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, - .baseMipLevel = 0, - .levelCount = 1, - .baseArrayLayer = 0, - .layerCount = 1, - }, - }; - BOUNDED_ARRAY_APPEND_ITEM(barrier->images, ib); -} - void R_VkBarrierCommit(vk_combuf_t* combuf, r_vk_barrier_t *barrier, VkPipelineStageFlags2 dst_stage_mask) { - if (barrier->images.count == 0) + if (barrier->images.count == 0 && barrier->buffers.count == 0) return; - // FIXME use combuf barrier vkCmdPipelineBarrier2() - vkCmdPipelineBarrier(combuf->cmdbuf, - barrier->src_stage_mask == 0 - ? VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT - : barrier->src_stage_mask, - dst_stage_mask, - 0, 0, NULL, 0, NULL, barrier->images.count, barrier->images.items); - R_VkCombufIssueBarrier(combuf, (r_vkcombuf_barrier_t){ .stage = dst_stage_mask, .buffers.items = barrier->buffers.items, @@ -300,7 +226,6 @@ void R_VkBarrierCommit(vk_combuf_t* combuf, r_vk_barrier_t *barrier, VkPipelineS }); // Mark as used - barrier->src_stage_mask = 0; barrier->images.count = 0; barrier->buffers.count = 0; } diff --git a/ref/vk/vk_resources.h b/ref/vk/vk_resources.h index 4a2fe56299..7b5e3c2e7b 100644 --- a/ref/vk/vk_resources.h +++ b/ref/vk/vk_resources.h @@ -30,18 +30,9 @@ enum { ExternalResource_COUNT, }; -typedef struct { - VkAccessFlags access_mask; - VkImageLayout image_layout; - VkPipelineStageFlagBits2 pipelines; -} ray_resource_state_t; - struct xvk_image_s; typedef struct vk_resource_s { VkDescriptorType type; - struct { - ray_resource_state_t write, read; - } deprecate; vk_descriptor_value_t value; union { vk_buffer_t *buffer; @@ -85,27 +76,14 @@ typedef struct { } r_vk_resources_builtin_fixme_t; void R_VkResourcesSetBuiltinFIXME(r_vk_resources_builtin_fixme_t builtin); -void R_VkResourcesFrameBeginStateChangeFIXME(VkCommandBuffer cmdbuf, qboolean discontinuity); +struct vk_combuf_s; +void R_VkResourcesFrameBeginStateChangeFIXME(struct vk_combuf_s* combuf, qboolean discontinuity); typedef struct { - // TODO VK_KHR_synchronization2, has a slightly different (better) semantics - VkPipelineStageFlags2 src_stage_mask; - BOUNDED_ARRAY_DECLARE(VkImageMemoryBarrier, images, 16); + BOUNDED_ARRAY_DECLARE(r_vkcombuf_barrier_image_t, images, 16); BOUNDED_ARRAY_DECLARE(r_vkcombuf_barrier_buffer_t, buffers, 16); } r_vk_barrier_t; -typedef struct { - VkImage image; - VkPipelineStageFlags src_stage_mask; - VkAccessFlags src_access_mask; - VkAccessFlags dst_access_mask; - VkImageLayout old_layout; - VkImageLayout new_layout; -} r_vk_barrier_image_t; - -void R_VkBarrierAddImage(r_vk_barrier_t *barrier, r_vk_barrier_image_t image); - -struct vk_combuf_s; void R_VkBarrierCommit(struct vk_combuf_s* combuf, r_vk_barrier_t *barrier, VkPipelineStageFlags2 dst_stage_mask); void R_VkResourceAddToBarrier(vk_resource_t *res, qboolean write, VkPipelineStageFlags2 dst_stage_mask, r_vk_barrier_t *barrier); diff --git a/ref/vk/vk_rtx.c b/ref/vk/vk_rtx.c index b1a72f3548..de8530ea21 100644 --- a/ref/vk/vk_rtx.c +++ b/ref/vk/vk_rtx.c @@ -225,7 +225,7 @@ static void performTracing( vk_combuf_t *combuf, const perform_tracing_args_t* a .light_bindings = args->light_bindings, }); - R_VkResourcesFrameBeginStateChangeFIXME(cmdbuf, g_rtx.discontinuity); + R_VkResourcesFrameBeginStateChangeFIXME(combuf, g_rtx.discontinuity); if (g_rtx.discontinuity) { DEBUG("discontinuity => false"); g_rtx.discontinuity = false; @@ -261,31 +261,6 @@ static void performTracing( vk_combuf_t *combuf, const perform_tracing_args_t* a .resources = g_rtx.mainpipe_resources, }); - { - const r_vkimage_blit_args blit_args = { - .in_stage = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, - .src = { - .image = g_rtx.mainpipe_out->image.image, - .width = args->frame_width, - .height = args->frame_height, - .oldLayout = VK_IMAGE_LAYOUT_GENERAL, - .srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT, - }, - .dst = { - .image = args->render_args->dst.image, - .width = args->render_args->dst.width, - .height = args->render_args->dst.height, - .oldLayout = VK_IMAGE_LAYOUT_UNDEFINED, - .srcAccessMask = VK_ACCESS_MEMORY_READ_BIT, - }, - }; - - R_VkImageBlit( cmdbuf, &blit_args ); - - // TODO this is to make sure we remember image layout after image_blit - // The proper way to do this would be to teach R_VkImageBlit to properly track the image metadata (i.e. vk_resource_t state) - g_rtx.mainpipe_out->resource.deprecate.write.image_layout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL; - } DEBUG_END(cmdbuf); APROF_SCOPE_END(perform); @@ -388,7 +363,7 @@ static void reloadMainpipe(void) { } // TODO full r/w initialization - res->resource.deprecate.write.pipelines = 0; + // FIXME not sure if not needed res->resource.deprecate.write.pipelines = 0; res->resource.type = mr->descriptor_type; } else { // TODO no assert, complain and exit @@ -476,15 +451,15 @@ void VK_RayFrameEnd(const vk_ray_frame_render_args_t* args) qboolean need_reload = g_rtx.reload_pipeline; - if (g_rtx.max_frame_width < args->dst.width) { - g_rtx.max_frame_width = ALIGN_UP(args->dst.width, 16); + if (g_rtx.max_frame_width < args->dst->width) { + g_rtx.max_frame_width = ALIGN_UP(args->dst->width, 16); WARN("Increasing max_frame_width to %d", g_rtx.max_frame_width); // TODO only reload resources, no need to reload the entire pipeline need_reload = true; } - if (g_rtx.max_frame_height < args->dst.height) { - g_rtx.max_frame_height = ALIGN_UP(args->dst.height, 16); + if (g_rtx.max_frame_height < args->dst->height) { + g_rtx.max_frame_height = ALIGN_UP(args->dst->height, 16); WARN("Increasing max_frame_height to %d", g_rtx.max_frame_height); // TODO only reload resources, no need to reload the entire pipeline need_reload = true; @@ -511,38 +486,19 @@ void VK_RayFrameEnd(const vk_ray_frame_render_args_t* args) // See ~3:00:00-3:40:00 of stream E383 about push-vs-pull models and their boundaries. R_VkBufferStagingCommit(&g_ray_model_state.kusochki_buffer, args->combuf); - ASSERT(args->dst.width <= g_rtx.max_frame_width); - ASSERT(args->dst.height <= g_rtx.max_frame_height); + ASSERT(args->dst->width <= g_rtx.max_frame_width); + ASSERT(args->dst->height <= g_rtx.max_frame_height); // TODO dynamic scaling based on perf - const int frame_width = args->dst.width; - const int frame_height = args->dst.height; + const int frame_width = args->dst->width; + const int frame_height = args->dst->height; // Do not draw when we have no swapchain - if (args->dst.image_view == VK_NULL_HANDLE) + if (!args->dst->image) goto tail; if (g_ray_model_state.frame.instances_count == 0) { - const r_vkimage_blit_args blit_args = { - .in_stage = VK_PIPELINE_STAGE_TRANSFER_BIT | VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, - .src = { - .image = g_rtx.mainpipe_out->image.image, - .width = frame_width, - .height = frame_height, - .oldLayout = VK_IMAGE_LAYOUT_GENERAL, - .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT, - }, - .dst = { - .image = args->dst.image, - .width = args->dst.width, - .height = args->dst.height, - .oldLayout = VK_IMAGE_LAYOUT_UNDEFINED, - .srcAccessMask = VK_ACCESS_MEMORY_READ_BIT, - }, - }; - - R_VkImageClear( cmdbuf, g_rtx.mainpipe_out->image.image, VK_ACCESS_TRANSFER_READ_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT ); - R_VkImageBlit( cmdbuf, &blit_args ); + R_VkImageClear( &g_rtx.mainpipe_out->image, args->combuf ); } else { const perform_tracing_args_t trace_args = { .render_args = args, @@ -556,6 +512,21 @@ void VK_RayFrameEnd(const vk_ray_frame_render_args_t* args) performTracing( args->combuf, &trace_args ); } + { + const r_vkimage_blit_args blit_args = { + .src = { + .image = &g_rtx.mainpipe_out->image, + .width = frame_width, + .height = frame_height, + }, + .dst = { + .image = args->dst, + }, + }; + + R_VkImageBlit( args->combuf, &blit_args ); + } + tail: APROF_SCOPE_END(ray_frame_end); } diff --git a/ref/vk/vk_rtx.h b/ref/vk/vk_rtx.h index fb2e5a48d9..b7669c9398 100644 --- a/ref/vk/vk_rtx.h +++ b/ref/vk/vk_rtx.h @@ -5,14 +5,12 @@ void VK_RayFrameBegin( void ); +struct vk_combuf_s; +struct r_vk_image_s; typedef struct { struct vk_combuf_s *combuf; - struct { - VkImageView image_view; - VkImage image; - uint32_t width, height; - } dst; + struct r_vk_image_s *dst; const matrix4x4 *projection, *view; From 7b93e13b4a81e6d6c4916dc77f807120f1ecda44 Mon Sep 17 00:00:00 2001 From: Ivan 'provod' Avdeev Date: Thu, 12 Dec 2024 15:17:24 -0500 Subject: [PATCH 40/62] ignore ./prefix directory too --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 994693c8e7..ef9d884634 100644 --- a/.gitignore +++ b/.gitignore @@ -6,6 +6,7 @@ # Other *.save +prefix/ # Qt Creator for some reason creates *.user.$version files, so exclude it too *.user* From d20e868d5b737df3c291f9335fc8176b7563665e Mon Sep 17 00:00:00 2001 From: Ivan 'provod' Avdeev Date: Thu, 12 Dec 2024 20:43:33 -0500 Subject: [PATCH 41/62] vk: rt: fixup resource-image tracking Now it works! Needs a bit of a cleanup, though. --- ref/vk/TODO.md | 3 +-- ref/vk/ray_pass.c | 4 ++-- ref/vk/vk_descriptor.h | 1 - ref/vk/vk_resources.c | 15 +++++++-------- ref/vk/vk_resources.h | 2 +- ref/vk/vk_rtx.c | 10 ++++++---- 6 files changed, 17 insertions(+), 18 deletions(-) diff --git a/ref/vk/TODO.md b/ref/vk/TODO.md index 83ee7f40d4..fd8c6d0ba2 100644 --- a/ref/vk/TODO.md +++ b/ref/vk/TODO.md @@ -17,8 +17,7 @@ ## 2024-12-12 E384 - [ ] zero vkCmdPipelineBarriers calls - - [ ] track image sync state with image (and not with resource) - - [x] the code is there, but it's broken and remains to be debugged + - [x] track image sync state with image (and not with resource) - [ ] grep for anything else ## 2024-12-10 E383 diff --git a/ref/vk/ray_pass.c b/ref/vk/ray_pass.c index 42deafe341..1a7e48387d 100644 --- a/ref/vk/ray_pass.c +++ b/ref/vk/ray_pass.c @@ -286,8 +286,8 @@ void RayPassPerform(struct ray_pass_s *pass, vk_combuf_t *combuf, ray_pass_perfo // FIXME this is not true, it can be known earlier if (res->type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) { dst_value->image = (VkDescriptorImageInfo) { - .imageLayout = src_value->image_object->sync.layout, - .imageView = src_value->image_object->view, + .imageLayout = res->ref.image->sync.layout, + .imageView = res->ref.image->view, .sampler = VK_NULL_HANDLE, }; } else { diff --git a/ref/vk/vk_descriptor.h b/ref/vk/vk_descriptor.h index 8464ba3b79..93e4c9fde2 100644 --- a/ref/vk/vk_descriptor.h +++ b/ref/vk/vk_descriptor.h @@ -29,7 +29,6 @@ typedef union { VkDescriptorImageInfo image; const VkDescriptorImageInfo *image_array; VkWriteDescriptorSetAccelerationStructureKHR accel; - const struct r_vk_image_s *image_object; } vk_descriptor_value_t; typedef struct { diff --git a/ref/vk/vk_resources.c b/ref/vk/vk_resources.c index f8ad95acb4..93fedc2129 100644 --- a/ref/vk/vk_resources.c +++ b/ref/vk/vk_resources.c @@ -186,16 +186,13 @@ void R_VkResourceAddToBarrier(vk_resource_t *res, qboolean write, VkPipelineStag switch (res->type) { case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: { - r_vkcombuf_barrier_image_t image_barrier = { + const r_vkcombuf_barrier_image_t image_barrier = { .image = res->ref.image, + // Image must remain in GENERAL layout regardless of r/w. + // Storage image reads still require GENERAL, not SHADER_READ_ONLY_OPTIMAL + .layout = VK_IMAGE_LAYOUT_GENERAL, + .access = write ? VK_ACCESS_2_SHADER_WRITE_BIT : VK_ACCESS_2_SHADER_READ_BIT, }; - if (write) { - image_barrier.access = VK_ACCESS_2_SHADER_WRITE_BIT; - image_barrier.layout = VK_IMAGE_LAYOUT_GENERAL; - } else { - image_barrier.access = VK_ACCESS_2_SHADER_READ_BIT; - image_barrier.layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; - } BOUNDED_ARRAY_APPEND_ITEM(barrier->images, image_barrier); } break; @@ -223,6 +220,8 @@ void R_VkBarrierCommit(vk_combuf_t* combuf, r_vk_barrier_t *barrier, VkPipelineS .stage = dst_stage_mask, .buffers.items = barrier->buffers.items, .buffers.count = barrier->buffers.count, + .images.items = barrier->images.items, + .images.count = barrier->images.count, }); // Mark as used diff --git a/ref/vk/vk_resources.h b/ref/vk/vk_resources.h index 7b5e3c2e7b..75db73f79f 100644 --- a/ref/vk/vk_resources.h +++ b/ref/vk/vk_resources.h @@ -80,7 +80,7 @@ struct vk_combuf_s; void R_VkResourcesFrameBeginStateChangeFIXME(struct vk_combuf_s* combuf, qboolean discontinuity); typedef struct { - BOUNDED_ARRAY_DECLARE(r_vkcombuf_barrier_image_t, images, 16); + BOUNDED_ARRAY_DECLARE(r_vkcombuf_barrier_image_t, images, 32); BOUNDED_ARRAY_DECLARE(r_vkcombuf_barrier_buffer_t, buffers, 16); } r_vk_barrier_t; diff --git a/ref/vk/vk_rtx.c b/ref/vk/vk_rtx.c index de8530ea21..aececf42d3 100644 --- a/ref/vk/vk_rtx.c +++ b/ref/vk/vk_rtx.c @@ -19,7 +19,6 @@ #include "profiler.h" -#include "eiface.h" #include "xash3d_mathlib.h" #include @@ -251,7 +250,7 @@ static void performTracing( vk_combuf_t *combuf, const perform_tracing_args_t* a const qboolean create = !!(mr->flags & MEATPIPE_RES_CREATE); if (create && mr->descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) // THIS FAILS WHY?! ASSERT(g_rtx.mainpipe_resources[i]->value.image_object == &res->image); - g_rtx.mainpipe_resources[i]->value.image_object = &res->image; + g_rtx.mainpipe_resources[i]->ref.image = &res->image; } R_VkMeatpipePerform(g_rtx.mainpipe, combuf, (vk_meatpipe_perfrom_args_t) { @@ -347,7 +346,10 @@ static void reloadMainpipe(void) { .tiling = VK_IMAGE_TILING_OPTIMAL, // TODO figure out how to detect this need properly. prev_dest is not defined as "output" //.usage = VK_IMAGE_USAGE_STORAGE_BIT | (output ? VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT : 0), - .usage = VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, + .usage = VK_IMAGE_USAGE_STORAGE_BIT + //| VK_IMAGE_USAGE_SAMPLED_BIT // required by VK_IMAGE_LAYOUT_SHADER_READ_OPTIMAL + | VK_IMAGE_USAGE_TRANSFER_SRC_BIT + | VK_IMAGE_USAGE_TRANSFER_DST_BIT, .flags = 0, }; res->image = R_VkImageCreate(&create); @@ -359,7 +361,7 @@ static void reloadMainpipe(void) { if (create) { if (mr->descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) { - newpipe_resources[i]->value.image_object = &res->image; + newpipe_resources[i]->ref.image = &res->image; } // TODO full r/w initialization From 4a3ec4f54e7060e792195ea096b22d1a073d3a3d Mon Sep 17 00:00:00 2001 From: Ivan Avdeev Date: Sat, 14 Dec 2024 19:04:05 -0500 Subject: [PATCH 42/62] vk: track pending staging items and push on frame end This adds explicit staging user tracking, which allows: - tracking whether there are any unclaimed items, and pushing them (or ignoring, if the user decides so, for transient stuff) - having more granular stats for staging, i.e. which buffer/subsystem used staging in this frame, and how much (not implemented yet) This commit also changes staging from using flip buffer to just ring buffer allocator. --- ref/vk/alolcator.c | 2 +- ref/vk/r_block.c | 2 +- ref/vk/vk_buffer.c | 36 ++++++++---- ref/vk/vk_buffer.h | 2 +- ref/vk/vk_framectl.c | 10 ++-- ref/vk/vk_image.c | 26 +++++++-- ref/vk/vk_staging.c | 134 +++++++++++++++++++++++++------------------ ref/vk/vk_staging.h | 54 +++++++++++------ 8 files changed, 169 insertions(+), 97 deletions(-) diff --git a/ref/vk/alolcator.c b/ref/vk/alolcator.c index 544e0b7248..120da4bc95 100644 --- a/ref/vk/alolcator.c +++ b/ref/vk/alolcator.c @@ -287,7 +287,7 @@ uint32_t aloRingAlloc(alo_ring_t* ring, uint32_t size, uint32_t alignment) { // 1. Check if we have enough space immediately in front of head if (pos + size <= ring->size) { - ring->head = (pos + size) % ring->size; + ring->head = pos + size; return pos; } diff --git a/ref/vk/r_block.c b/ref/vk/r_block.c index 8924464a4c..470a73d25a 100644 --- a/ref/vk/r_block.c +++ b/ref/vk/r_block.c @@ -14,7 +14,7 @@ typedef struct r_blocks_block_s { // <--- pool --><-- ring ---> // offset ? ---> -int allocMetablock(r_blocks_t *blocks) { +static int allocMetablock(r_blocks_t *blocks) { return aloIntPoolAlloc(&blocks->blocks.freelist); // TODO grow if needed } diff --git a/ref/vk/vk_buffer.c b/ref/vk/vk_buffer.c index 93b655151a..63fd884a02 100644 --- a/ref/vk/vk_buffer.c +++ b/ref/vk/vk_buffer.c @@ -34,8 +34,8 @@ qboolean VK_BufferCreate(const char *debug_name, vk_buffer_t *buf, uint32_t size XVK_CHECK(vkBindBufferMemory(vk_core.device, buf->buffer, buf->devmem.device_memory, buf->devmem.offset)); buf->mapped = buf->devmem.mapped; - buf->size = size; + buf->name = debug_name; INFO("Created buffer=%llx, name=\"%s\", size=%u", (unsigned long long)buf->buffer, debug_name, size); @@ -133,7 +133,8 @@ void R_DEBuffer_Flip(r_debuffer_t* debuf) { // TODO this should be part of the vk_buffer_t object itself typedef struct { vk_buffer_t *buffer; - VkBuffer staging; + r_vkstaging_user_handle_t staging_handle; + VkBuffer staging_buffer; BOUNDED_ARRAY_DECLARE(VkBufferCopy, regions, MAX_STAGING_ENTRIES); } r_vk_staging_buffer_t; @@ -152,6 +153,12 @@ static r_vk_staging_buffer_t *findExistingStagingSlotForBuffer(vk_buffer_t *buf) return NULL; } +static void stagingBufferPush(void* userptr, struct vk_combuf_s *combuf, uint32_t pending) { + r_vk_staging_buffer_t *const stb = userptr; + ASSERT(pending == stb->regions.count); + R_VkBufferStagingCommit(stb->buffer, combuf); +} + static r_vk_staging_buffer_t *findOrCreateStagingSlotForBuffer(vk_buffer_t *buf) { r_vk_staging_buffer_t *stb = findExistingStagingSlotForBuffer(buf); if (stb) @@ -159,9 +166,14 @@ static r_vk_staging_buffer_t *findOrCreateStagingSlotForBuffer(vk_buffer_t *buf) ASSERT(BOUNDED_ARRAY_HAS_SPACE(g_buf.staging, 1)); stb = &BOUNDED_ARRAY_APPEND_UNSAFE(g_buf.staging); - stb->staging = VK_NULL_HANDLE; + stb->staging_buffer = VK_NULL_HANDLE; stb->buffer = buf; stb->regions.count = 0; + stb->staging_handle = R_VkStagingUserCreate((r_vkstaging_user_create_t){ + .name = buf->name, + .userptr = stb, + .push = stagingBufferPush, + }); return stb; } @@ -171,7 +183,7 @@ vk_buffer_locked_t R_VkBufferLock(vk_buffer_t *buf, vk_buffer_lock_t lock) { r_vk_staging_buffer_t *const stb = findOrCreateStagingSlotForBuffer(buf); ASSERT(stb); - r_vkstaging_region_t staging_lock = R_VkStagingLock(lock.size); + r_vkstaging_region_t staging_lock = R_VkStagingAlloc(stb->staging_handle, lock.size); ASSERT(staging_lock.ptr); // TODO perf: adjacent region coalescing @@ -183,23 +195,23 @@ vk_buffer_locked_t R_VkBufferLock(vk_buffer_t *buf, vk_buffer_lock_t lock) { .size = lock.size, }; - if (stb->staging != VK_NULL_HANDLE) - ASSERT(stb->staging == staging_lock.buffer); + if (stb->staging_buffer != VK_NULL_HANDLE) + // TODO implement this if staging ever grows to multiple buffers + ASSERT(stb->staging_buffer == staging_lock.buffer); else - stb->staging = staging_lock.buffer; + stb->staging_buffer = staging_lock.buffer; return (vk_buffer_locked_t) { .ptr = staging_lock.ptr, .impl_ = { .buf = buf, - .handle = staging_lock.handle, }, }; } void R_VkBufferUnlock(vk_buffer_locked_t lock) { - DEBUG("buf=%llx staging pending++", (unsigned long long)lock.impl_.buf->buffer); - R_VkStagingUnlock(lock.impl_.handle); + //DEBUG("buf=%llx staging pending++", (unsigned long long)lock.impl_.buf->buffer); + // Nothing to do? } void R_VkBufferStagingCommit(vk_buffer_t *buf, struct vk_combuf_s *combuf) { @@ -223,10 +235,10 @@ void R_VkBufferStagingCommit(vk_buffer_t *buf, struct vk_combuf_s *combuf) { const VkCommandBuffer cmdbuf = combuf->cmdbuf; DEBUG_NV_CHECKPOINTF(cmdbuf, "staging dst_buffer=%p count=%d", buf->buffer, stb->regions.count); //DEBUG("buffer=%p copy %d regions from staging buffer=%p", buf->buffer, stb->regions.count, stb->staging); - vkCmdCopyBuffer(cmdbuf, stb->staging, buf->buffer, stb->regions.count, stb->regions.items); + vkCmdCopyBuffer(cmdbuf, stb->staging_buffer, buf->buffer, stb->regions.count, stb->regions.items); DEBUG("buf=%llx staging pending-=%u", (unsigned long long)buf->buffer, stb->regions.count); - R_VkStagingCopied(stb->regions.count); + R_VkStagingMarkFree(stb->staging_handle, stb->regions.count); stb->regions.count = 0; //FIXME R_VkCombufScopeEnd(combuf, begin_index, VK_PIPELINE_STAGE_TRANSFER_BIT); diff --git a/ref/vk/vk_buffer.h b/ref/vk/vk_buffer.h index f661e76d95..9404c9cd57 100644 --- a/ref/vk/vk_buffer.h +++ b/ref/vk/vk_buffer.h @@ -11,6 +11,7 @@ typedef struct { } r_vksync_state_t; typedef struct vk_buffer_s { + const char *name; // static vk_devmem_t devmem; VkBuffer buffer; @@ -44,7 +45,6 @@ typedef struct { struct { vk_buffer_t *buf; - r_vkstaging_handle_t handle; } impl_; } vk_buffer_locked_t; diff --git a/ref/vk/vk_framectl.c b/ref/vk/vk_framectl.c index 49b4a2e61c..50b3d5900d 100644 --- a/ref/vk/vk_framectl.c +++ b/ref/vk/vk_framectl.c @@ -50,7 +50,7 @@ typedef struct { // so we can't reuse the same one for two purposes and need to mnozhit sunchnosti VkSemaphore sem_done2; - uint32_t staging_generation_tag; + uint32_t staging_frame_tag; } vk_framectl_frame_t; static struct { @@ -278,7 +278,7 @@ void R_BeginFrame( qboolean clearScene ) { ASSERT(!g_frame.current.framebuffer.framebuffer); // TODO explicit frame dependency synced on frame-end-event/sema - R_VkStagingGenerationRelease(frame->staging_generation_tag); + R_VkStagingFrameCompleted(frame->staging_frame_tag); g_frame.current.framebuffer = R_VkSwapchainAcquire( frame->sem_framebuffer_ready ); vk_frame.width = g_frame.current.framebuffer.width; @@ -425,9 +425,11 @@ static void submit( vk_combuf_t* combuf, qboolean wait, qboolean draw ) { vk_framectl_frame_t *const frame = g_frame.frames + g_frame.current.index; vk_framectl_frame_t *const prev_frame = g_frame.frames + (g_frame.current.index + 1) % MAX_CONCURRENT_FRAMES; + // Push things from staging that weren't explicitly pulled by frame builder + frame->staging_frame_tag = R_VkStagingFrameEpilogue(combuf); + R_VkCombufEnd(combuf); - frame->staging_generation_tag = R_VkStagingGenerationCommit(); BOUNDED_ARRAY(VkCommandBuffer, cmdbufs, 2); BOUNDED_ARRAY_APPEND_ITEM(cmdbufs, cmdbuf); @@ -452,7 +454,7 @@ static void submit( vk_combuf_t* combuf, qboolean wait, qboolean draw ) { DEBUG("submit: frame=%d, staging_tag=%u, combuf=%p, wait for semaphores[%d]={%llx, %llx}, signal semaphores[%d]={%llx, %llx}", g_frame.current.index, - frame->staging_generation_tag, + frame->staging_frame_tag, frame->combuf->cmdbuf, waitophores.count, (unsigned long long)waitophores.items[0], diff --git a/ref/vk/vk_image.c b/ref/vk/vk_image.c index 8625f11593..797358e198 100644 --- a/ref/vk/vk_image.c +++ b/ref/vk/vk_image.c @@ -233,21 +233,37 @@ typedef struct { } image_upload_t; static struct { + r_vkstaging_user_handle_t staging; + ARRAY_DYNAMIC_DECLARE(image_upload_t, images); ARRAY_DYNAMIC_DECLARE(VkBufferImageCopy, slices); ARRAY_DYNAMIC_DECLARE(VkImageMemoryBarrier, barriers); } g_image_upload; +static void imageStagingPush(void* userptr, struct vk_combuf_s *combuf, uint32_t allocations) { + (void)userptr; + const VkPipelineStageFlags2 assume_stage + = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; + R_VkImageUploadCommit(combuf, assume_stage); +} + qboolean R_VkImageInit(void) { arrayDynamicInitT(&g_image_upload.images); arrayDynamicInitT(&g_image_upload.slices); arrayDynamicInitT(&g_image_upload.barriers); + g_image_upload.staging = R_VkStagingUserCreate((r_vkstaging_user_create_t){ + .name = "image", + .userptr = NULL, + .push = imageStagingPush, + }); + return true; } void R_VkImageShutdown(void) { ASSERT(g_image_upload.images.count == 0); + R_VkStagingUserDestroy(g_image_upload.staging); arrayDynamicDestroyT(&g_image_upload.images); arrayDynamicDestroyT(&g_image_upload.slices); arrayDynamicDestroyT(&g_image_upload.barriers); @@ -370,9 +386,6 @@ void R_VkImageUploadCommit( struct vk_combuf_s *combuf, VkPipelineStageFlagBits }, }; - R_VkStagingUnlock(up->staging.lock.handle); - R_VkStagingCopied(1); - // Mark image as uploaded up->image->upload_slot = -1; up->image = NULL; @@ -394,6 +407,8 @@ void R_VkImageUploadCommit( struct vk_combuf_s *combuf, VkPipelineStageFlagBits barriers_count, (VkImageMemoryBarrier*)g_image_upload.barriers.items ); + R_VkStagingMarkFree(g_image_upload.staging, barriers_count); + R_VkCombufScopeEnd(combuf, gpu_scope_begin, VK_PIPELINE_STAGE_TRANSFER_BIT); // Clear out image upload queue @@ -423,7 +438,7 @@ void R_VkImageUploadBegin( r_vk_image_t *img ) { // would notify other modules that they'd need to commit their staging data, and thus we'd return to this module's // R_VkImageUploadCommit(), which needs to see valid data. Therefore, don't touch its state until // R_VkStagingLock returns. - const r_vkstaging_region_t staging_lock = R_VkStagingLock(staging_size); + const r_vkstaging_region_t staging_lock = R_VkStagingAlloc(g_image_upload.staging, staging_size); img->upload_slot = g_image_upload.images.count; arrayDynamicAppendT(&g_image_upload.images, NULL); @@ -503,8 +518,7 @@ static void cancelUpload( r_vk_image_t *img ) { // Technically we won't need that staging region anymore at all, but it doesn't matter, // it's just easier to mark it to be freed this way. - R_VkStagingUnlock(up->staging.lock.handle); - R_VkStagingCopied(1); + R_VkStagingMarkFree(g_image_upload.staging, 1); // Mark upload slot as unused, and image as not subjet to uploading up->image = NULL; diff --git a/ref/vk/vk_staging.c b/ref/vk/vk_staging.c index cb47a0fc23..9cf1afb60a 100644 --- a/ref/vk/vk_staging.c +++ b/ref/vk/vk_staging.c @@ -1,11 +1,11 @@ #include "vk_staging.h" + #include "vk_buffer.h" -#include "alolcator.h" -#include "vk_commandpool.h" -#include "profiler.h" -#include "r_speeds.h" #include "vk_combuf.h" #include "vk_logs.h" +#include "r_speeds.h" + +#include "alolcator.h" #include "arrays.h" #include @@ -16,103 +16,127 @@ // FIXME decrease size to something reasonable, see https://github.com/w23/xash3d-fwgs/issues/746 #define DEFAULT_STAGING_SIZE (4*128*1024*1024) -static struct { - vk_buffer_t buffer; - r_flipping_buffer_t buffer_alloc; +#define MAX_STAGING_USERS 8 - uint32_t locked_count; +typedef struct r_vkstaging_user_t { + r_vkstaging_user_create_t info; uint32_t pending_count; - uint32_t current_generation; + struct { + uint32_t allocs; + uint32_t size; + } stats; +} r_vkstaging_user_t; + +static struct { + vk_buffer_t buffer; + alo_ring_t buffer_alloc_ring; + + BOUNDED_ARRAY_DECLARE(r_vkstaging_user_t, users, MAX_STAGING_USERS); struct { int total_size; - int buffers_size; - int images_size; - int buffer_chunks; - int images; + int total_chunks; + //int buffers_size; + //int images_size; + //int buffer_chunks; + //int images; } stats; - int buffer_upload_scope_id; - int image_upload_scope_id; + //int buffer_upload_scope_id; + //int image_upload_scope_id; } g_staging = {0}; qboolean R_VkStagingInit(void) { if (!VK_BufferCreate("staging", &g_staging.buffer, DEFAULT_STAGING_SIZE, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) return false; - R_FlippingBuffer_Init(&g_staging.buffer_alloc, DEFAULT_STAGING_SIZE); + aloRingInit(&g_staging.buffer_alloc_ring, g_staging.buffer.size); R_SPEEDS_COUNTER(g_staging.stats.total_size, "total_size", kSpeedsMetricBytes); - R_SPEEDS_COUNTER(g_staging.stats.buffers_size, "buffers_size", kSpeedsMetricBytes); - R_SPEEDS_COUNTER(g_staging.stats.images_size, "images_size", kSpeedsMetricBytes); + R_SPEEDS_COUNTER(g_staging.stats.total_chunks, "total_chunks", kSpeedsMetricBytes); - R_SPEEDS_COUNTER(g_staging.stats.buffer_chunks, "buffer_chunks", kSpeedsMetricCount); - R_SPEEDS_COUNTER(g_staging.stats.images, "images", kSpeedsMetricCount); + //R_SPEEDS_COUNTER(g_staging.stats.buffers_size, "buffers_size", kSpeedsMetricBytes); + //R_SPEEDS_COUNTER(g_staging.stats.images_size, "images_size", kSpeedsMetricBytes); - g_staging.buffer_upload_scope_id = R_VkGpuScope_Register("staging_buffers"); - g_staging.image_upload_scope_id = R_VkGpuScope_Register("staging_images"); + //R_SPEEDS_COUNTER(g_staging.stats.buffer_chunks, "buffer_chunks", kSpeedsMetricCount); + //R_SPEEDS_COUNTER(g_staging.stats.images, "images", kSpeedsMetricCount); + + //g_staging.buffer_upload_scope_id = R_VkGpuScope_Register("staging_buffers"); + //g_staging.image_upload_scope_id = R_VkGpuScope_Register("staging_images"); return true; } void R_VkStagingShutdown(void) { + // TODO ASSERT(g_staging.users.count == 0); VK_BufferDestroy(&g_staging.buffer); } -static uint32_t allocateInRing(uint32_t size, uint32_t alignment) { - alignment = alignment < 1 ? 1 : alignment; +r_vkstaging_user_t *R_VkStagingUserCreate(r_vkstaging_user_create_t info) { + ASSERT(g_staging.users.count < MAX_STAGING_USERS); + g_staging.users.items[g_staging.users.count] = (r_vkstaging_user_t) { + .info = info, + }; - const uint32_t offset = R_FlippingBuffer_Alloc(&g_staging.buffer_alloc, size, alignment ); - ASSERT(offset != ALO_ALLOC_FAILED && "FIXME increase staging buffer size as a quick fix"); + // TODO register counters - return R_FlippingBuffer_Alloc(&g_staging.buffer_alloc, size, alignment ); + return g_staging.users.items + (g_staging.users.count++); } -r_vkstaging_region_t R_VkStagingLock(uint32_t size) { +void R_VkStagingUserDestroy(r_vkstaging_user_t *user) { + ASSERT(user->pending_count == 0); + // TODO destroy +} + +r_vkstaging_region_t R_VkStagingAlloc(r_vkstaging_user_t* user, uint32_t size) { const uint32_t alignment = 4; - const uint32_t offset = R_FlippingBuffer_Alloc(&g_staging.buffer_alloc, size, alignment); - ASSERT(offset != ALO_ALLOC_FAILED); + const uint32_t offset = aloRingAlloc(&g_staging.buffer_alloc_ring, size, alignment); + ASSERT(offset != ALO_ALLOC_FAILED && "FIXME: workaround: increase staging buffer size"); DEBUG("Lock alignment=%d size=%d region=%d..%d", alignment, size, offset, offset + size); - g_staging.locked_count++; + user->pending_count++; + + user->stats.allocs++; + user->stats.size += size; + return (r_vkstaging_region_t){ - .handle.generation = g_staging.current_generation, .offset = offset, .buffer = g_staging.buffer.buffer, .ptr = (char*)g_staging.buffer.mapped + offset, }; } -void R_VkStagingUnlock(r_vkstaging_handle_t handle) { - DEBUG("Unlock: locked_count=%u pending_count=%u gen=%u", g_staging.locked_count, g_staging.pending_count, g_staging.current_generation); - ASSERT(g_staging.current_generation == handle.generation); - ASSERT(g_staging.locked_count > 0); - g_staging.locked_count--; - g_staging.pending_count++; +void R_VkStagingMarkFree(r_vkstaging_user_t* user, uint32_t count) { + ASSERT(user->pending_count >= count); + user->pending_count -= count; } -void R_VkStagingCopied(uint32_t count) { - ASSERT(g_staging.pending_count >= count); - g_staging.pending_count -= count; -} +uint32_t R_VkStagingFrameEpilogue(vk_combuf_t* combuf) { + for (int i = 0; i < g_staging.users.count; ++i) { + r_vkstaging_user_t *const user = g_staging.users.items + i; + if (user->pending_count == 0) + continue; + + WARN("%s has %u pending staging items, pushing", user->info.name, user->pending_count); + user->info.push(user->info.userptr, combuf, user->pending_count); + ASSERT(user->pending_count == 0); + } -void R_VkStagingGenerationRelease(uint32_t gen) { - DEBUG("Release: gen=%u current_gen=%u ring offsets=[%u, %u, %u]", gen, g_staging.current_generation, - g_staging.buffer_alloc.frame_offsets[0], - g_staging.buffer_alloc.frame_offsets[1], - g_staging.buffer_alloc.ring.head - ); - R_FlippingBuffer_Flip(&g_staging.buffer_alloc); + return g_staging.buffer_alloc_ring.head; } -uint32_t R_VkStagingGenerationCommit(void) { - DEBUG("Commit: locked_count=%u pending_count=%u gen=%u", g_staging.locked_count, g_staging.pending_count, g_staging.current_generation); +void R_VkStagingFrameCompleted(uint32_t frame_boundary_addr) { + // Note that these stats are for latest frame, not the one for which the frame boundary is. + g_staging.stats.total_size = 0; + g_staging.stats.total_chunks = 0; - ASSERT(g_staging.locked_count == 0); - ASSERT(g_staging.pending_count == 0); + for (int i = 0; i < g_staging.users.count; ++i) { + r_vkstaging_user_t *const user = g_staging.users.items + i; + user->stats.allocs = 0; + user->stats.size = 0; + } - g_staging.stats.total_size = g_staging.stats.images_size + g_staging.stats.buffers_size; - return g_staging.current_generation++; + aloRingFree(&g_staging.buffer_alloc_ring, frame_boundary_addr); } diff --git a/ref/vk/vk_staging.h b/ref/vk/vk_staging.h index b16e25d307..2e9adcc335 100644 --- a/ref/vk/vk_staging.h +++ b/ref/vk/vk_staging.h @@ -5,30 +5,50 @@ qboolean R_VkStagingInit(void); void R_VkStagingShutdown(void); +struct vk_combuf_s; +typedef void (r_vkstaging_push_f)(void* userptr, struct vk_combuf_s *combuf, uint32_t pending); + typedef struct { - uint32_t generation; -} r_vkstaging_handle_t; + // Expected to be static, stored as a pointer + const char *name; + + void *userptr; + r_vkstaging_push_f *push; +} r_vkstaging_user_create_t; + +struct r_vkstaging_user_t; +typedef struct r_vkstaging_user_t *r_vkstaging_user_handle_t; +r_vkstaging_user_handle_t R_VkStagingUserCreate(r_vkstaging_user_create_t); +void R_VkStagingUserDestroy(r_vkstaging_user_handle_t); typedef struct { + // CPU-accessible memory void *ptr; - r_vkstaging_handle_t handle; - // TODO maybe return these on lock? + // GPU buffer to copy from VkBuffer buffer; VkDeviceSize offset; } r_vkstaging_region_t; // Allocate CPU-accessible memory in staging buffer -r_vkstaging_region_t R_VkStagingLock(uint32_t size); - -// Mark allocated region as ready for upload -void R_VkStagingUnlock(r_vkstaging_handle_t handle); - -// Notify staging that this amount of regions were scheduled to be copied -void R_VkStagingCopied(uint32_t count); - -// Finalize current generation, return its tag for R_VkStagingGenerationRelease() call -uint32_t R_VkStagingGenerationCommit(void); - -// Free all data for generation tag (returned by commit) -void R_VkStagingGenerationRelease(uint32_t gen); +r_vkstaging_region_t R_VkStagingAlloc(r_vkstaging_user_handle_t, uint32_t size); + +// Notify staging that this amount of regions are about to be consumed when the next combuf ends +// I.e. they're "free" from the staging standpoint +void R_VkStagingMarkFree(r_vkstaging_user_handle_t, uint32_t count); + +// This gets called just before the combuf is ended and submitted. +// Gives the last chance for the users that haven't yet used their data. +// This is a workaround to patch up the impedance mismatch between top-down push model, +// where the engine "pushes down" the data to be rendered, and "bottom-up" pull model, +// where the frame is constructed based on render graph dependency tree. Not all pushed +// resources could be used, and this gives the opportunity to at least ingest the data +// to make sure that it remains complete, in case it might be needed in the future. +// Returns current frame tag to be closed in the R_VkStagingCombufCompleted() function. +uint32_t R_VkStagingFrameEpilogue(struct vk_combuf_s*); + +// This function is called when a frame is finished. It allows staging to free all the +// data used in that frame. +// TODO make this dependency more explicit, i.e. combuf should track when it's done +// and what finalization functions it should call when it's done (there are many). +void R_VkStagingFrameCompleted(uint32_t tag); From 0fcb033d434b66ecf1d3001d41ba43276a4fbbad Mon Sep 17 00:00:00 2001 From: Ivan Avdeev Date: Sun, 15 Dec 2024 13:35:53 -0500 Subject: [PATCH 43/62] vk: add per-user staging stats `staging..size` and `staging..allocs` r_speeds metrics are now available for every staging user. --- ref/vk/vk_staging.c | 35 ++++++++++++++++++----------------- 1 file changed, 18 insertions(+), 17 deletions(-) diff --git a/ref/vk/vk_staging.c b/ref/vk/vk_staging.c index 9cf1afb60a..61d546f0a3 100644 --- a/ref/vk/vk_staging.c +++ b/ref/vk/vk_staging.c @@ -23,8 +23,8 @@ typedef struct r_vkstaging_user_t { uint32_t pending_count; struct { - uint32_t allocs; - uint32_t size; + int allocs; + int size; } stats; } r_vkstaging_user_t; @@ -37,10 +37,6 @@ static struct { struct { int total_size; int total_chunks; - //int buffers_size; - //int images_size; - //int buffer_chunks; - //int images; } stats; //int buffer_upload_scope_id; @@ -48,7 +44,8 @@ static struct { } g_staging = {0}; qboolean R_VkStagingInit(void) { - if (!VK_BufferCreate("staging", &g_staging.buffer, DEFAULT_STAGING_SIZE, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) + if (!VK_BufferCreate("staging", &g_staging.buffer, DEFAULT_STAGING_SIZE, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, + VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) return false; aloRingInit(&g_staging.buffer_alloc_ring, g_staging.buffer.size); @@ -56,12 +53,6 @@ qboolean R_VkStagingInit(void) { R_SPEEDS_COUNTER(g_staging.stats.total_size, "total_size", kSpeedsMetricBytes); R_SPEEDS_COUNTER(g_staging.stats.total_chunks, "total_chunks", kSpeedsMetricBytes); - //R_SPEEDS_COUNTER(g_staging.stats.buffers_size, "buffers_size", kSpeedsMetricBytes); - //R_SPEEDS_COUNTER(g_staging.stats.images_size, "images_size", kSpeedsMetricBytes); - - //R_SPEEDS_COUNTER(g_staging.stats.buffer_chunks, "buffer_chunks", kSpeedsMetricCount); - //R_SPEEDS_COUNTER(g_staging.stats.images, "images", kSpeedsMetricCount); - //g_staging.buffer_upload_scope_id = R_VkGpuScope_Register("staging_buffers"); //g_staging.image_upload_scope_id = R_VkGpuScope_Register("staging_images"); @@ -75,18 +66,25 @@ void R_VkStagingShutdown(void) { r_vkstaging_user_t *R_VkStagingUserCreate(r_vkstaging_user_create_t info) { ASSERT(g_staging.users.count < MAX_STAGING_USERS); - g_staging.users.items[g_staging.users.count] = (r_vkstaging_user_t) { + + r_vkstaging_user_t *const user = g_staging.users.items + (g_staging.users.count++); + *user = (r_vkstaging_user_t) { .info = info, }; - // TODO register counters + char buf[64]; + snprintf(buf, sizeof(buf), "%s.size", info.name); + R_SPEEDS_COUNTER(user->stats.size, buf, kSpeedsMetricBytes); - return g_staging.users.items + (g_staging.users.count++); + snprintf(buf, sizeof(buf), "%s.allocs", info.name); + R_SPEEDS_COUNTER(user->stats.allocs, buf, kSpeedsMetricCount); + + return user; } void R_VkStagingUserDestroy(r_vkstaging_user_t *user) { ASSERT(user->pending_count == 0); - // TODO destroy + // TODO remove from the table } r_vkstaging_region_t R_VkStagingAlloc(r_vkstaging_user_t* user, uint32_t size) { @@ -101,6 +99,9 @@ r_vkstaging_region_t R_VkStagingAlloc(r_vkstaging_user_t* user, uint32_t size) { user->stats.allocs++; user->stats.size += size; + g_staging.stats.total_chunks++; + g_staging.stats.total_size += size; + return (r_vkstaging_region_t){ .offset = offset, .buffer = g_staging.buffer.buffer, From 3cf98c114962c0048b92c0c0de30ea2d72dcb68a Mon Sep 17 00:00:00 2001 From: Ivan 'provod' Avdeev Date: Sun, 15 Dec 2024 22:11:37 -0500 Subject: [PATCH 44/62] vk: rt: build BLASes lazily Move draw_instance into ray_accel module. Then, when building TLAS, go through all instances, and check whether their blases need to be (re)built. Enqueue those who need to be rebuilt before building TLAS. Fixes crashing when doing changelevel w/o rt, and then enabling rt. --- ref/vk/vk_ray_accel.c | 136 +++++++++++++++++++++++++++++---------- ref/vk/vk_ray_accel.h | 12 ++++ ref/vk/vk_ray_internal.h | 45 ------------- ref/vk/vk_ray_model.c | 62 ++++++++---------- ref/vk/vk_rtx.c | 12 ++-- 5 files changed, 145 insertions(+), 122 deletions(-) diff --git a/ref/vk/vk_ray_accel.c b/ref/vk/vk_ray_accel.c index ae123702d1..5511fc2feb 100644 --- a/ref/vk/vk_ray_accel.c +++ b/ref/vk/vk_ray_accel.c @@ -18,12 +18,19 @@ #define MODULE_NAME "accel" #define LOG_MODULE rt +#define MAX_SCRATCH_BUFFER (32*1024*1024) +// FIXME compute this by lazily allocating #define MAX_ACCELS_BUFFER (128*1024*1024) +#define MAX_ACCELS_BUFFER (256*1024*1024) + typedef struct rt_blas_s { const char *debug_name; rt_blas_usage_e usage; VkAccelerationStructureKHR blas; + // Zero if not built + VkDeviceAddress address; + // Max dynamic geoms for usage == kBlasBuildDynamicFast int max_geoms; @@ -33,7 +40,8 @@ typedef struct rt_blas_s { VkAccelerationStructureGeometryKHR *geoms; uint32_t *max_prim_counts; VkAccelerationStructureBuildRangeInfoKHR *ranges; - qboolean built; + + qboolean is_built, needs_to_be_built; } build; } rt_blas_t; @@ -44,13 +52,14 @@ static struct { // TODO: unify this with render buffer -- really? // Needs: AS_STORAGE_BIT, SHADER_DEVICE_ADDRESS_BIT vk_buffer_t accels_buffer; + VkDeviceAddress accels_buffer_addr; struct alo_pool_s *accels_buffer_alloc; // Temp: lives only during a single frame (may have many in flight) // Used for building ASes; // Needs: AS_STORAGE_BIT, SHADER_DEVICE_ADDRESS_BIT vk_buffer_t scratch_buffer; - VkDeviceAddress accels_buffer_addr, scratch_buffer_addr; + VkDeviceAddress scratch_buffer_addr; // Temp-ish: used for making TLAS, contains addressed to all used BLASes // Lifetime and nature of usage similar to scratch_buffer @@ -65,6 +74,8 @@ static struct { // Per-frame data that is accumulated between RayFrameBegin and End calls struct { + BOUNDED_ARRAY_DECLARE(rt_draw_instance_t, instances, MAX_INSTANCES); + uint32_t scratch_offset; // for building dynamic blases } frame; @@ -75,8 +86,7 @@ static struct { struct { // TODO two arrays for a single vkCmdBuildAccelerationStructuresKHR() call - // FIXME This is for testing only - BOUNDED_ARRAY_DECLARE(rt_blas_t*, blas, 2048); + BOUNDED_ARRAY_DECLARE(rt_blas_t*, queue, MAX_INSTANCES); } build; cvar_t *cv_force_culling; @@ -168,6 +178,20 @@ static qboolean buildAccel(vk_combuf_t* combuf, VkAccelerationStructureBuildGeom return true; } +typedef struct { + const char *debug_name; + VkAccelerationStructureKHR *p_accel; + const VkAccelerationStructureGeometryKHR *geoms; + const uint32_t *max_prim_counts; + const VkAccelerationStructureBuildRangeInfoKHR *build_ranges; + uint32_t n_geoms; + VkAccelerationStructureTypeKHR type; + qboolean dynamic; + + VkDeviceAddress *out_accel_addr; + uint32_t *inout_size; +} as_build_args_t; + // TODO split this into smaller building blocks in a separate module qboolean createOrUpdateAccelerationStructure(vk_combuf_t *combuf, const as_build_args_t *args) { ASSERT(args->geoms); @@ -230,7 +254,7 @@ static void createTlas( vk_combuf_t *combuf, VkDeviceAddress instances_addr ) { }; const uint32_t tl_max_prim_counts[COUNTOF(tl_geom)] = { MAX_INSTANCES }; const VkAccelerationStructureBuildRangeInfoKHR tl_build_range = { - .primitiveCount = g_ray_model_state.frame.instances_count, + .primitiveCount = g_accel.frame.instances.count, }; const as_build_args_t asrgs = { .geoms = tl_geom, @@ -255,7 +279,7 @@ static qboolean blasPrepareBuild(struct rt_blas_s *blas, VkDeviceAddress geometr ASSERT(blas); ASSERT(blas->blas); - if (blas->build.built && blas->usage == kBlasBuildStatic) { + if (blas->build.is_built && blas->usage == kBlasBuildStatic) { ASSERT(!"Attempting to build static BLAS twice"); return false; } @@ -290,10 +314,22 @@ static qboolean blasPrepareBuild(struct rt_blas_s *blas, VkDeviceAddress geometr return true; } -static void buildBlases(vk_combuf_t *combuf) { - (void)(combuf); +static void blasBuildEnqueue(rt_blas_t* blas, VkDeviceAddress geometry_buffer_adderss) { + // If all sequences match, no rebuild is needed + if (!blas->build.needs_to_be_built) + return; - vk_buffer_t* const geom = R_GeometryBuffer_Get(); + // FIXME handle: at the very least we could just ignore this BLAS for this frame + ASSERT(blasPrepareBuild(blas, geometry_buffer_adderss)); + + // Mark as built, and also store address for future use + blas->build.is_built = true; + blas->build.needs_to_be_built = false; + + BOUNDED_ARRAY_APPEND_ITEM(g_accel.build.queue, blas); +} + +static void blasBuildPerform(vk_combuf_t *combuf, vk_buffer_t *geom) { R_VkBufferStagingCommit(geom, combuf); R_VkCombufIssueBarrier(combuf, (r_vkcombuf_barrier_t){ .stage = VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_BUILD_BIT_KHR, @@ -306,13 +342,8 @@ static void buildBlases(vk_combuf_t *combuf) { }, }); - const VkDeviceAddress geometry_addr = R_VkBufferGetDeviceAddress(geom->buffer); - - for (int i = 0; i < g_accel.build.blas.count; ++i) { - rt_blas_t *const blas = g_accel.build.blas.items[i]; - if (!blasPrepareBuild(blas, geometry_addr)) - // FIXME handle - continue; + for (int i = 0; i < g_accel.build.queue.count; ++i) { + rt_blas_t *const blas = g_accel.build.queue.items[i]; static int scope_id = -2; if (scope_id == -2) @@ -322,44 +353,62 @@ static void buildBlases(vk_combuf_t *combuf) { // TODO one call to build them all vkCmdBuildAccelerationStructuresKHR(combuf->cmdbuf, 1, &blas->build.info, &p_build_ranges); R_VkCombufScopeEnd(combuf, begin_index, VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR); - - blas->build.built = true; } - g_accel.build.blas.count = 0; + g_accel.build.queue.count = 0; } vk_resource_t RT_VkAccelPrepareTlas(vk_combuf_t *combuf) { APROF_SCOPE_DECLARE_BEGIN(prepare, __FUNCTION__); - ASSERT(g_ray_model_state.frame.instances_count > 0); - buildBlases(combuf); + const uint32_t instances_count = g_accel.frame.instances.count; + + if (instances_count == 0) { + APROF_SCOPE_END(prepare); + return (vk_resource_t){ + .type = VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR, + .value = (vk_descriptor_value_t){ + .accel = (VkWriteDescriptorSetAccelerationStructureKHR) { + .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR, + .accelerationStructureCount = 0, + .pAccelerationStructures = NULL, + .pNext = NULL, + }, + }, + }; + } DEBUG_BEGIN(combuf->cmdbuf, "prepare tlas"); R_FlippingBuffer_Flip( &g_accel.tlas_geom_buffer_alloc ); - const uint32_t instance_offset = R_FlippingBuffer_Alloc(&g_accel.tlas_geom_buffer_alloc, g_ray_model_state.frame.instances_count, 1); + const uint32_t instance_offset = R_FlippingBuffer_Alloc(&g_accel.tlas_geom_buffer_alloc, instances_count, 1); ASSERT(instance_offset != ALO_ALLOC_FAILED); + vk_buffer_t* const geom = R_GeometryBuffer_Get(); + const VkDeviceAddress geometry_buffer_address = R_VkBufferGetDeviceAddress(geom->buffer); + // Upload all blas instances references to GPU mem { const vk_buffer_locked_t headers_lock = R_VkBufferLock(&g_ray_model_state.model_headers_buffer, (vk_buffer_lock_t){ .offset = 0, - .size = g_ray_model_state.frame.instances_count * sizeof(struct ModelHeader), + .size = instances_count * sizeof(struct ModelHeader), }); ASSERT(headers_lock.ptr); VkAccelerationStructureInstanceKHR* inst = ((VkAccelerationStructureInstanceKHR*)g_accel.tlas_geom_buffer.mapped) + instance_offset; - for (int i = 0; i < g_ray_model_state.frame.instances_count; ++i) { - const rt_draw_instance_t* const instance = g_ray_model_state.frame.instances + i; - ASSERT(instance->blas_addr != 0); + for (uint32_t i = 0; i < instances_count; ++i) { + const rt_draw_instance_t* const instance = g_accel.frame.instances.items + i; + + blasBuildEnqueue(instance->blas, geometry_buffer_address); + + ASSERT(instance->blas->address != 0); inst[i] = (VkAccelerationStructureInstanceKHR){ .instanceCustomIndex = instance->kusochki_offset, .instanceShaderBindingTableRecordOffset = 0, - .accelerationStructureReference = instance->blas_addr, + .accelerationStructureReference = instance->blas->address, }; const VkGeometryInstanceFlagsKHR flags = @@ -407,9 +456,13 @@ vk_resource_t RT_VkAccelPrepareTlas(vk_combuf_t *combuf) { } R_VkBufferUnlock(headers_lock); + R_VkBufferStagingCommit(&g_ray_model_state.model_headers_buffer, combuf); } - g_accel.stats.instances_count = g_ray_model_state.frame.instances_count; + g_accel.stats.instances_count = instances_count; + + // Build all scheduled BLASes + blasBuildPerform(combuf, geom); // FIXME use combuf barrier // Barrier for building all BLASes @@ -422,7 +475,7 @@ vk_resource_t RT_VkAccelPrepareTlas(vk_combuf_t *combuf) { .buffer = g_accel.accels_buffer.buffer, // FIXME this is completely wrong. Offset ans size are BLAS-specifig .offset = instance_offset * sizeof(VkAccelerationStructureInstanceKHR), - .size = g_ray_model_state.frame.instances_count * sizeof(VkAccelerationStructureInstanceKHR), + .size = instances_count * sizeof(VkAccelerationStructureInstanceKHR), }}; vkCmdPipelineBarrier(combuf->cmdbuf, VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR, @@ -434,6 +487,9 @@ vk_resource_t RT_VkAccelPrepareTlas(vk_combuf_t *combuf) { createTlas(combuf, g_accel.tlas_geom_buffer_addr + instance_offset * sizeof(VkAccelerationStructureInstanceKHR)); DEBUG_END(combuf->cmdbuf); + // Consume instances into this frame, no further instances are expected + g_accel.frame.instances.count = 0; + APROF_SCOPE_END(prepare); return (vk_resource_t){ .type = VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR, @@ -605,6 +661,7 @@ struct rt_blas_s* RT_BlasCreate(rt_blas_create_t args) { blas->build.sizes = getAccelSizes(&blas->build.info, blas->build.max_prim_counts); blas->blas = createAccel(blas->debug_name, VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR, blas->build.sizes.accelerationStructureSize); + blas->address = getAccelAddress(blas->blas); if (!blas->blas) { ERR("Couldn't create vk accel"); @@ -614,8 +671,8 @@ struct rt_blas_s* RT_BlasCreate(rt_blas_create_t args) { blas->build.info.dstAccelerationStructure = blas->blas; blas->max_geoms = blas->build.info.geometryCount; - if (!args.dont_build) - BOUNDED_ARRAY_APPEND_ITEM(g_accel.build.blas, blas); + blas->build.is_built = false; + blas->build.needs_to_be_built = true; return blas; @@ -657,7 +714,7 @@ qboolean RT_BlasUpdate(struct rt_blas_s *blas, const struct vk_render_geometry_s break; case kBlasBuildDynamicUpdate: ASSERT(geoms_count == blas->max_geoms); - if (blas->build.built) { + if (blas->build.is_built) { blas->build.info.mode = VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR; blas->build.info.srcAccelerationStructure = blas->blas; } @@ -666,6 +723,8 @@ qboolean RT_BlasUpdate(struct rt_blas_s *blas, const struct vk_render_geometry_s break; } + blas->build.needs_to_be_built = true; + blasFillGeometries(blas, geoms, geoms_count); const VkAccelerationStructureBuildSizesInfoKHR sizes = getAccelSizes(&blas->build.info, blas->build.max_prim_counts); @@ -681,6 +740,17 @@ qboolean RT_BlasUpdate(struct rt_blas_s *blas, const struct vk_render_geometry_s return false; } - BOUNDED_ARRAY_APPEND_ITEM(g_accel.build.blas, blas); + // TODO infos and ranges separately + BOUNDED_ARRAY_APPEND_ITEM(g_accel.build.queue, blas); return true; } + +void RT_VkAccelAddDrawInstance(const rt_draw_instance_t* instance) { + const int max_instances = (int)COUNTOF(g_accel.frame.instances.items); + if (g_accel.frame.instances.count >= max_instances) { + gEngine.Con_Printf(S_ERROR "Too many RT draw instances, max = %d\n", max_instances); + return; + } + + BOUNDED_ARRAY_APPEND_UNSAFE(g_accel.frame.instances) = *instance; +} diff --git a/ref/vk/vk_ray_accel.h b/ref/vk/vk_ray_accel.h index 21bbed7e83..efa274ab94 100644 --- a/ref/vk/vk_ray_accel.h +++ b/ref/vk/vk_ray_accel.h @@ -10,3 +10,15 @@ void RT_VkAccelFrameBegin(void); struct vk_combuf_s; vk_resource_t RT_VkAccelPrepareTlas(struct vk_combuf_s *combuf); + +typedef struct rt_draw_instance_t { + struct rt_blas_s *blas; + uint32_t kusochki_offset; + matrix3x4 transform_row; + matrix4x4 prev_transform_row; + vec4_t color; + uint32_t material_mode; // MATERIAL_MODE_ from ray_interop.h + uint32_t material_flags; // material_flag_bits_e +} rt_draw_instance_t; + +void RT_VkAccelAddDrawInstance(const rt_draw_instance_t*); diff --git a/ref/vk/vk_ray_internal.h b/ref/vk/vk_ray_internal.h index 84e7dd8dbe..fe86ef7cb0 100644 --- a/ref/vk/vk_ray_internal.h +++ b/ref/vk/vk_ray_internal.h @@ -1,8 +1,6 @@ #pragma once -#include "vk_core.h" #include "vk_buffer.h" -#include "vk_const.h" #include "vk_rtx.h" #define MAX_INSTANCES 2048 @@ -13,37 +11,6 @@ typedef struct Kusok vk_kusok_data_t; -typedef struct rt_draw_instance_s { - VkDeviceAddress blas_addr; - uint32_t kusochki_offset; - matrix3x4 transform_row; - matrix4x4 prev_transform_row; - vec4_t color; - uint32_t material_mode; // MATERIAL_MODE_ from ray_interop.h - uint32_t material_flags; // material_flag_bits_e -} rt_draw_instance_t; - -typedef struct { - const char *debug_name; - VkAccelerationStructureKHR *p_accel; - const VkAccelerationStructureGeometryKHR *geoms; - const uint32_t *max_prim_counts; - const VkAccelerationStructureBuildRangeInfoKHR *build_ranges; - uint32_t n_geoms; - VkAccelerationStructureTypeKHR type; - qboolean dynamic; - - VkDeviceAddress *out_accel_addr; - uint32_t *inout_size; -} as_build_args_t; - -struct vk_combuf_s; -qboolean createOrUpdateAccelerationStructure(struct vk_combuf_s *combuf, const as_build_args_t *args); - -#define MAX_SCRATCH_BUFFER (32*1024*1024) -// FIXME compute this by lazily allocating #define MAX_ACCELS_BUFFER (128*1024*1024) -#define MAX_ACCELS_BUFFER (256*1024*1024) - typedef struct { // Geometry metadata. Lifetime is similar to geometry lifetime itself. // Semantically close to render buffer (describes layout for those objects) @@ -56,14 +23,6 @@ typedef struct { // Model header // Array of struct ModelHeader: color, material_mode, prev_transform vk_buffer_t model_headers_buffer; - - // Per-frame data that is accumulated between RayFrameBegin and End calls - struct { - rt_draw_instance_t instances[MAX_INSTANCES]; - int instances_count; - - uint32_t scratch_offset; // for building dynamic blases - } frame; } xvk_ray_model_state_t; extern xvk_ray_model_state_t g_ray_model_state; @@ -79,7 +38,6 @@ typedef struct { rt_blas_usage_e usage; const struct vk_render_geometry_s *geoms; int geoms_count; - qboolean dont_build; // for dynamic models } rt_blas_create_t; // Creates BLAS and schedules it to be built next frame @@ -90,9 +48,6 @@ void RT_BlasDestroy(struct rt_blas_s* blas); // Update dynamic BLAS, schedule it for build/update qboolean RT_BlasUpdate(struct rt_blas_s *blas, const struct vk_render_geometry_s *geoms, int geoms_count); -// TODO blas struct can have its addr field known -VkDeviceAddress RT_BlasGetDeviceAddress(struct rt_blas_s *blas); - qboolean RT_DynamicModelInit(void); void RT_DynamicModelShutdown(void); diff --git a/ref/vk/vk_ray_model.c b/ref/vk/vk_ray_model.c index 65f2711644..dbb965a6f3 100644 --- a/ref/vk/vk_ray_model.c +++ b/ref/vk/vk_ray_model.c @@ -4,9 +4,9 @@ #include "vk_materials.h" #include "vk_render.h" #include "vk_logs.h" +#include "vk_ray_accel.h" #include "profiler.h" -#include "eiface.h" #include "xash3d_mathlib.h" #include @@ -152,7 +152,6 @@ void RT_RayModel_Clear(void) { } void XVK_RayModel_ClearForNextFrame( void ) { - g_ray_model_state.frame.instances_count = 0; R_DEBuffer_Flip(&g_ray_model_state.kusochki_alloc); } @@ -321,15 +320,6 @@ qboolean RT_ModelUpdateMaterials(struct rt_model_s *model, const struct vk_rende return true; } -rt_draw_instance_t *getDrawInstance(void) { - if (g_ray_model_state.frame.instances_count >= ARRAYSIZE(g_ray_model_state.frame.instances)) { - gEngine.Con_Printf(S_ERROR "Too many RT draw instances, max = %d\n", (int)(ARRAYSIZE(g_ray_model_state.frame.instances))); - return NULL; - } - - return g_ray_model_state.frame.instances + (g_ray_model_state.frame.instances_count++); -} - static qboolean isLegacyBlendingMode(int material_mode) { switch (material_mode) { case MATERIAL_MODE_BLEND_ADD: @@ -387,23 +377,23 @@ void RT_FrameAddModel( struct rt_model_s *model, rt_frame_add_model_t args ) { } } - rt_draw_instance_t *const draw_instance = getDrawInstance(); - if (!draw_instance) - return; - - draw_instance->blas_addr = model->blas_addr; - draw_instance->kusochki_offset = kusochki_offset; - draw_instance->material_mode = args.material_mode; - draw_instance->material_flags = args.material_flags; + rt_draw_instance_t draw_instance = { + .blas = model->blas, + .kusochki_offset = kusochki_offset, + .material_mode = args.material_mode, + .material_flags = args.material_flags, + }; // Legacy blending is done in sRGB-γ space if (isLegacyBlendingMode(args.material_mode)) - Vector4Copy(*args.color_srgb, draw_instance->color); + Vector4Copy(*args.color_srgb, draw_instance.color); else - sRGBtoLinearVec4(*args.color_srgb, draw_instance->color); + sRGBtoLinearVec4(*args.color_srgb, draw_instance.color); + + Matrix3x4_Copy(draw_instance.transform_row, args.transform); + Matrix4x4_Copy(draw_instance.prev_transform_row, args.prev_transform); - Matrix3x4_Copy(draw_instance->transform_row, args.transform); - Matrix4x4_Copy(draw_instance->prev_transform_row, args.prev_transform); + RT_VkAccelAddDrawInstance(&draw_instance); } #define MAX_RT_DYNAMIC_GEOMETRIES 256 @@ -444,7 +434,6 @@ qboolean RT_DynamicModelInit(void) { .usage = kBlasBuildDynamicFast, .geoms = fake_geoms, .geoms_count = MAX_RT_DYNAMIC_GEOMETRIES, - .dont_build = true, }); if (!blas) { @@ -454,7 +443,6 @@ qboolean RT_DynamicModelInit(void) { } g_dyn.groups[i].blas = blas; - g_dyn.groups[i].blas_addr = RT_BlasGetDeviceAddress(blas); } Mem_Free(fake_geoms); @@ -476,7 +464,6 @@ void RT_DynamicModelProcessFrame(void) { if (!dyn->geometries_count) continue; - rt_draw_instance_t* draw_instance; const uint32_t kusochki_offset = kusochkiAllocOnce(dyn->geometries_count); if (kusochki_offset == ALO_ALLOC_FAILED) { gEngine.Con_Printf(S_ERROR "Couldn't allocate kusochki once for %d geoms of %s, skipping\n", dyn->geometries_count, group_names[i]); @@ -493,17 +480,20 @@ void RT_DynamicModelProcessFrame(void) { goto tail; } - draw_instance = getDrawInstance(); - if (!draw_instance) - goto tail; + rt_draw_instance_t draw_instance = { + .blas = dyn->blas, + .kusochki_offset = kusochki_offset, + .material_mode = i, + .material_flags = 0, + .color = {1, 1, 1, 1}, + }; + + // xash3d_mathlib is weird, can't just assign these + // TODO: make my own mathlib of perfectly assignable structs + Matrix3x4_LoadIdentity(draw_instance.transform_row); + Matrix4x4_LoadIdentity(draw_instance.prev_transform_row); - draw_instance->blas_addr = dyn->blas_addr; - draw_instance->kusochki_offset = kusochki_offset; - draw_instance->material_mode = i; - draw_instance->material_flags = 0; - Vector4Set(draw_instance->color, 1, 1, 1, 1); - Matrix3x4_LoadIdentity(draw_instance->transform_row); - Matrix4x4_LoadIdentity(draw_instance->prev_transform_row); + RT_VkAccelAddDrawInstance(&draw_instance); tail: dyn->geometries_count = 0; diff --git a/ref/vk/vk_rtx.c b/ref/vk/vk_rtx.c index aececf42d3..fc3ede2a21 100644 --- a/ref/vk/vk_rtx.c +++ b/ref/vk/vk_rtx.c @@ -232,13 +232,6 @@ static void performTracing( vk_combuf_t *combuf, const perform_tracing_args_t* a DEBUG_BEGIN(cmdbuf, "yay tracing"); - // FIXME move this to "TLAS producer" - { - rt_resource_t *const tlas = R_VkResourceGetByIndex(ExternalResource_tlas); - tlas->resource = RT_VkAccelPrepareTlas(combuf); - R_VkBufferStagingCommit(&g_ray_model_state.model_headers_buffer, combuf); - } - prepareUniformBuffer(args->render_args, args->frame_index, args->frame_counter, args->fov_angle_y, args->frame_width, args->frame_height); // Update image resource links after the prev_-related swap above @@ -499,7 +492,10 @@ void VK_RayFrameEnd(const vk_ray_frame_render_args_t* args) if (!args->dst->image) goto tail; - if (g_ray_model_state.frame.instances_count == 0) { + // TODO move this to "TLAS producer" + rt_resource_t *const tlas = R_VkResourceGetByIndex(ExternalResource_tlas); + tlas->resource = RT_VkAccelPrepareTlas(args->combuf); + if (tlas->resource.value.accel.accelerationStructureCount == 0) { R_VkImageClear( &g_rtx.mainpipe_out->image, args->combuf ); } else { const perform_tracing_args_t trace_args = { From 221da267e22e425eab4e2bbb2f1df4498074b041 Mon Sep 17 00:00:00 2001 From: Ivan 'provod' Avdeev Date: Sun, 15 Dec 2024 22:34:44 -0500 Subject: [PATCH 45/62] vk: rt: build all BLASes in a single command --- ref/vk/vk_ray_accel.c | 51 ++++++++++++++++++++++--------------------- 1 file changed, 26 insertions(+), 25 deletions(-) diff --git a/ref/vk/vk_ray_accel.c b/ref/vk/vk_ray_accel.c index 5511fc2feb..fb1765393b 100644 --- a/ref/vk/vk_ray_accel.c +++ b/ref/vk/vk_ray_accel.c @@ -85,8 +85,8 @@ static struct { } stats; struct { - // TODO two arrays for a single vkCmdBuildAccelerationStructuresKHR() call - BOUNDED_ARRAY_DECLARE(rt_blas_t*, queue, MAX_INSTANCES); + BOUNDED_ARRAY_DECLARE(VkAccelerationStructureBuildGeometryInfoKHR, geometry_infos, MAX_INSTANCES); + BOUNDED_ARRAY_DECLARE(VkAccelerationStructureBuildRangeInfoKHR*, range_infos, MAX_INSTANCES); } build; cvar_t *cv_force_culling; @@ -164,11 +164,9 @@ static qboolean buildAccel(vk_combuf_t* combuf, VkAccelerationStructureBuildGeom //gEngine.Con_Reportf("AS=%p, n_geoms=%u, scratch: %#x %d %#x", *args->p_accel, args->n_geoms, scratch_offset_initial, scratch_buffer_size, scratch_offset_initial + scratch_buffer_size); - g_accel.stats.accels_built++; - static int scope_id = -2; if (scope_id == -2) - scope_id = R_VkGpuScope_Register("build_as"); + scope_id = R_VkGpuScope_Register("build_tlas"); const int begin_index = R_VkCombufScopeBegin(combuf, scope_id); const VkAccelerationStructureBuildRangeInfoKHR *p_build_ranges = build_ranges; // FIXME upload everything in bulk, and only then build blases in bulk too @@ -192,6 +190,7 @@ typedef struct { uint32_t *inout_size; } as_build_args_t; +// FIXME this function isn't really needed anymore, it's for TLAS creation only // TODO split this into smaller building blocks in a separate module qboolean createOrUpdateAccelerationStructure(vk_combuf_t *combuf, const as_build_args_t *args) { ASSERT(args->geoms); @@ -309,8 +308,6 @@ static qboolean blasPrepareBuild(struct rt_blas_s *blas, VkDeviceAddress geometr //gEngine.Con_Reportf("AS=%p, n_geoms=%u, scratch: %#x %d %#x", *args->p_accel, args->n_geoms, scratch_offset_initial, scratch_buffer_size, scratch_offset_initial + scratch_buffer_size); - g_accel.stats.accels_built++; - return true; } @@ -326,7 +323,9 @@ static void blasBuildEnqueue(rt_blas_t* blas, VkDeviceAddress geometry_buffer_ad blas->build.is_built = true; blas->build.needs_to_be_built = false; - BOUNDED_ARRAY_APPEND_ITEM(g_accel.build.queue, blas); + BOUNDED_ARRAY_APPEND_ITEM(g_accel.build.geometry_infos, blas->build.info); + BOUNDED_ARRAY_APPEND_ITEM(g_accel.build.range_infos, blas->build.ranges); + ASSERT(g_accel.build.geometry_infos.count == g_accel.build.range_infos.count); } static void blasBuildPerform(vk_combuf_t *combuf, vk_buffer_t *geom) { @@ -342,20 +341,25 @@ static void blasBuildPerform(vk_combuf_t *combuf, vk_buffer_t *geom) { }, }); - for (int i = 0; i < g_accel.build.queue.count; ++i) { - rt_blas_t *const blas = g_accel.build.queue.items[i]; - - static int scope_id = -2; - if (scope_id == -2) - scope_id = R_VkGpuScope_Register("build_as"); - const int begin_index = R_VkCombufScopeBegin(combuf, scope_id); - const VkAccelerationStructureBuildRangeInfoKHR *p_build_ranges = blas->build.ranges; - // TODO one call to build them all - vkCmdBuildAccelerationStructuresKHR(combuf->cmdbuf, 1, &blas->build.info, &p_build_ranges); - R_VkCombufScopeEnd(combuf, begin_index, VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR); - } + ASSERT(g_accel.build.geometry_infos.count == g_accel.build.range_infos.count); + const uint32_t count = g_accel.build.geometry_infos.count; + if (count == 0) + return; // Nothing to build - g_accel.build.queue.count = 0; + static int scope_id = -2; + if (scope_id == -2) + scope_id = R_VkGpuScope_Register("build_blases"); + + const int begin_index = R_VkCombufScopeBegin(combuf, scope_id); + vkCmdBuildAccelerationStructuresKHR(combuf->cmdbuf, count, + g_accel.build.geometry_infos.items, + g_accel.build.range_infos.items); + + R_VkCombufScopeEnd(combuf, begin_index, VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR); + + g_accel.stats.accels_built = count; + g_accel.build.geometry_infos.count = 0; + g_accel.build.range_infos.count = 0; } vk_resource_t RT_VkAccelPrepareTlas(vk_combuf_t *combuf) { @@ -723,8 +727,6 @@ qboolean RT_BlasUpdate(struct rt_blas_s *blas, const struct vk_render_geometry_s break; } - blas->build.needs_to_be_built = true; - blasFillGeometries(blas, geoms, geoms_count); const VkAccelerationStructureBuildSizesInfoKHR sizes = getAccelSizes(&blas->build.info, blas->build.max_prim_counts); @@ -740,8 +742,7 @@ qboolean RT_BlasUpdate(struct rt_blas_s *blas, const struct vk_render_geometry_s return false; } - // TODO infos and ranges separately - BOUNDED_ARRAY_APPEND_ITEM(g_accel.build.queue, blas); + blas->build.needs_to_be_built = true; return true; } From 6632b1b5642e2978c38204b75e973ae038933268 Mon Sep 17 00:00:00 2001 From: Ivan Avdeev Date: Sun, 15 Dec 2024 22:47:34 -0500 Subject: [PATCH 46/62] vk: rt: fixup linux build; clean a few things --- ref/vk/vk_ray_accel.c | 6 +----- ref/vk/vk_ray_model.c | 6 +++--- ref/vk/vk_rtx.c | 3 ++- 3 files changed, 6 insertions(+), 9 deletions(-) diff --git a/ref/vk/vk_ray_accel.c b/ref/vk/vk_ray_accel.c index fb1765393b..9ad9176572 100644 --- a/ref/vk/vk_ray_accel.c +++ b/ref/vk/vk_ray_accel.c @@ -353,7 +353,7 @@ static void blasBuildPerform(vk_combuf_t *combuf, vk_buffer_t *geom) { const int begin_index = R_VkCombufScopeBegin(combuf, scope_id); vkCmdBuildAccelerationStructuresKHR(combuf->cmdbuf, count, g_accel.build.geometry_infos.items, - g_accel.build.range_infos.items); + (const VkAccelerationStructureBuildRangeInfoKHR* const *)g_accel.build.range_infos.items); R_VkCombufScopeEnd(combuf, begin_index, VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR); @@ -707,10 +707,6 @@ void RT_BlasDestroy(struct rt_blas_s* blas) { Mem_Free(blas); } -VkDeviceAddress RT_BlasGetDeviceAddress(struct rt_blas_s *blas) { - return getAccelAddress(blas->blas); -} - qboolean RT_BlasUpdate(struct rt_blas_s *blas, const struct vk_render_geometry_s *geoms, int geoms_count) { switch (blas->usage) { case kBlasBuildStatic: diff --git a/ref/vk/vk_ray_model.c b/ref/vk/vk_ray_model.c index dbb965a6f3..0016cf6621 100644 --- a/ref/vk/vk_ray_model.c +++ b/ref/vk/vk_ray_model.c @@ -21,7 +21,6 @@ typedef struct rt_kusochki_s { typedef struct rt_model_s { struct rt_blas_s *blas; - VkDeviceAddress blas_addr; rt_kusochki_t kusochki; } rt_model_t; @@ -235,7 +234,6 @@ struct rt_model_s *RT_ModelCreate(rt_model_create_t args) { { rt_model_t *const ret = Mem_Malloc(vk_core.pool, sizeof(*ret)); ret->blas = blas; - ret->blas_addr = RT_BlasGetDeviceAddress(ret->blas); ret->kusochki = kusochki; return ret; } @@ -461,6 +459,8 @@ void RT_DynamicModelProcessFrame(void) { APROF_SCOPE_DECLARE_BEGIN(process, __FUNCTION__); for (int i = 0; i < MATERIAL_MODE_COUNT; ++i) { rt_dynamic_t *const dyn = g_dyn.groups + i; + rt_draw_instance_t draw_instance; + if (!dyn->geometries_count) continue; @@ -480,7 +480,7 @@ void RT_DynamicModelProcessFrame(void) { goto tail; } - rt_draw_instance_t draw_instance = { + draw_instance = (rt_draw_instance_t){ .blas = dyn->blas, .kusochki_offset = kusochki_offset, .material_mode = i, diff --git a/ref/vk/vk_rtx.c b/ref/vk/vk_rtx.c index fc3ede2a21..98665dc33e 100644 --- a/ref/vk/vk_rtx.c +++ b/ref/vk/vk_rtx.c @@ -488,12 +488,13 @@ void VK_RayFrameEnd(const vk_ray_frame_render_args_t* args) const int frame_width = args->dst->width; const int frame_height = args->dst->height; + rt_resource_t *const tlas = R_VkResourceGetByIndex(ExternalResource_tlas); + // Do not draw when we have no swapchain if (!args->dst->image) goto tail; // TODO move this to "TLAS producer" - rt_resource_t *const tlas = R_VkResourceGetByIndex(ExternalResource_tlas); tlas->resource = RT_VkAccelPrepareTlas(args->combuf); if (tlas->resource.value.accel.accelerationStructureCount == 0) { R_VkImageClear( &g_rtx.mainpipe_out->image, args->combuf ); From 66432040c1c9b5f73449122b07d6d6c11eac5c18 Mon Sep 17 00:00:00 2001 From: Ivan Avdeev Date: Mon, 16 Dec 2024 16:37:46 -0500 Subject: [PATCH 47/62] vk: rt: use combuf barriers for accels_buffer --- ref/vk/vk_ray_accel.c | 49 ++++++++++++++++++++++--------------------- 1 file changed, 25 insertions(+), 24 deletions(-) diff --git a/ref/vk/vk_ray_accel.c b/ref/vk/vk_ray_accel.c index 9ad9176572..d47fa60362 100644 --- a/ref/vk/vk_ray_accel.c +++ b/ref/vk/vk_ray_accel.c @@ -330,16 +330,22 @@ static void blasBuildEnqueue(rt_blas_t* blas, VkDeviceAddress geometry_buffer_ad static void blasBuildPerform(vk_combuf_t *combuf, vk_buffer_t *geom) { R_VkBufferStagingCommit(geom, combuf); - R_VkCombufIssueBarrier(combuf, (r_vkcombuf_barrier_t){ - .stage = VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_BUILD_BIT_KHR, - .buffers = { - .count = 1, - .items = &(r_vkcombuf_barrier_buffer_t){ - .buffer = geom, - .access = VK_ACCESS_2_ACCELERATION_STRUCTURE_READ_BIT_KHR, + { + const r_vkcombuf_barrier_buffer_t buffers[] = {{ + .buffer = &g_accel.accels_buffer, + .access = VK_ACCESS_2_ACCELERATION_STRUCTURE_WRITE_BIT_KHR, + }, { + .buffer = geom, + .access = VK_ACCESS_2_ACCELERATION_STRUCTURE_READ_BIT_KHR, + }}; + R_VkCombufIssueBarrier(combuf, (r_vkcombuf_barrier_t){ + .stage = VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR, + .buffers = { + .count = COUNTOF(buffers), + .items = buffers, }, - }, - }); + }); + } ASSERT(g_accel.build.geometry_infos.count == g_accel.build.range_infos.count); const uint32_t count = g_accel.build.geometry_infos.count; @@ -468,23 +474,18 @@ vk_resource_t RT_VkAccelPrepareTlas(vk_combuf_t *combuf) { // Build all scheduled BLASes blasBuildPerform(combuf, geom); - // FIXME use combuf barrier - // Barrier for building all BLASes - // BLAS building is now in cmdbuf, need to synchronize with results { - VkBufferMemoryBarrier bmb[] = {{ - .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, - .srcAccessMask = VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_KHR, // | VK_ACCESS_TRANSFER_WRITE_BIT, - .dstAccessMask = VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_KHR, - .buffer = g_accel.accels_buffer.buffer, - // FIXME this is completely wrong. Offset ans size are BLAS-specifig - .offset = instance_offset * sizeof(VkAccelerationStructureInstanceKHR), - .size = instances_count * sizeof(VkAccelerationStructureInstanceKHR), + r_vkcombuf_barrier_buffer_t buffers[] = {{ + .buffer = &g_accel.accels_buffer, + .access = VK_ACCESS_2_ACCELERATION_STRUCTURE_READ_BIT_KHR, }}; - vkCmdPipelineBarrier(combuf->cmdbuf, - VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR, - VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR, - 0, 0, NULL, COUNTOF(bmb), bmb, 0, NULL); + R_VkCombufIssueBarrier(combuf, (r_vkcombuf_barrier_t){ + .stage = VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR, + .buffers = { + .count = COUNTOF(buffers), + .items = buffers, + }, + }); } // 2. Build TLAS From 86c3cb779ab0cba9a075ea2c28047c1610810720 Mon Sep 17 00:00:00 2001 From: Ivan Avdeev Date: Mon, 16 Dec 2024 16:58:14 -0500 Subject: [PATCH 48/62] vk: set up framebuffer image in swapchain --- ref/vk/vk_framectl.c | 64 +++++++++++++------------------------------ ref/vk/vk_swapchain.c | 31 ++++++++++++++++++--- ref/vk/vk_swapchain.h | 8 +++--- 3 files changed, 50 insertions(+), 53 deletions(-) diff --git a/ref/vk/vk_framectl.c b/ref/vk/vk_framectl.c index 50b3d5900d..cb1406047a 100644 --- a/ref/vk/vk_framectl.c +++ b/ref/vk/vk_framectl.c @@ -281,8 +281,8 @@ void R_BeginFrame( qboolean clearScene ) { R_VkStagingFrameCompleted(frame->staging_frame_tag); g_frame.current.framebuffer = R_VkSwapchainAcquire( frame->sem_framebuffer_ready ); - vk_frame.width = g_frame.current.framebuffer.width; - vk_frame.height = g_frame.current.framebuffer.height; + vk_frame.width = g_frame.current.framebuffer.image.width; + vk_frame.height = g_frame.current.framebuffer.image.height; VK_RenderBegin( vk_frame.rtx_enabled ); @@ -301,49 +301,19 @@ void VK_RenderFrame( const struct ref_viewpass_s *rvp ) static void enqueueRendering( vk_combuf_t* combuf, qboolean draw ) { APROF_SCOPE_DECLARE_BEGIN(enqueue, __FUNCTION__); - const VkClearValue clear_value[] = { - {.color = {{1., 0., 0., 0.}}}, - {.depthStencil = {1., 0.}} // TODO reverse-z - }; + const uint32_t frame_width = g_frame.current.framebuffer.image.width; + const uint32_t frame_height = g_frame.current.framebuffer.image.height; ASSERT(g_frame.current.phase == Phase_FrameBegan); - // FIXME, should be done by rendering when it requests textures - R_VkImageUploadCommit(combuf, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | (vk_frame.rtx_enabled ? VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT : 0)); + // TODO: should be done by rendering when it requests textures + R_VkImageUploadCommit(combuf, + VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | (vk_frame.rtx_enabled ? VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT : 0)); const VkCommandBuffer cmdbuf = combuf->cmdbuf; - // This is temporary non-owning placeholder object. - // It is used only for combuf barrier tracking. - r_vk_image_t tmp_dst_image = { - .image = g_frame.current.framebuffer.image, - .view = g_frame.current.framebuffer.view, - .width = g_frame.current.framebuffer.width, - .height = g_frame.current.framebuffer.height, - .depth = 1, - .mips = 1, - .layers = 1, - - // TODO .format = g_frame.current.framebuffer.??? - // TODO .image_size = ??? - - // TODO is this correct? - .sync = { - .layout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, - .write = { - .access = VK_ACCESS_2_COLOR_ATTACHMENT_WRITE_BIT, - .stage = VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT - }, - .read = { - .access = VK_ACCESS_2_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_2_MEMORY_READ_BIT, - .stage = VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT - }, - }, - }; - snprintf(tmp_dst_image.name, sizeof(tmp_dst_image.name), "framebuffer[%d]", g_frame.current.framebuffer.index); - if (vk_frame.rtx_enabled) { - VK_RenderEndRTX( combuf, &tmp_dst_image ); + VK_RenderEndRTX( combuf, &g_frame.current.framebuffer.image ); } else { // FIXME: how to do this properly before render pass? // Needed to avoid VUID-vkCmdCopyBuffer-renderpass @@ -363,7 +333,7 @@ static void enqueueRendering( vk_combuf_t* combuf, qboolean draw ) { if (draw) { const r_vkcombuf_barrier_image_t dst_use[] = {{ - .image = &tmp_dst_image, + .image = &g_frame.current.framebuffer.image, .layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, .access = VK_ACCESS_2_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_2_COLOR_ATTACHMENT_WRITE_BIT, }}; @@ -375,11 +345,15 @@ static void enqueueRendering( vk_combuf_t* combuf, qboolean draw ) { }, }); + const VkClearValue clear_value[] = { + {.color = {{1., 0., 0., 0.}}}, + {.depthStencil = {1., 0.}} // TODO reverse-z + }; const VkRenderPassBeginInfo rpbi = { .sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, .renderPass = vk_frame.rtx_enabled ? vk_frame.render_pass.after_ray_tracing : vk_frame.render_pass.raster, - .renderArea.extent.width = g_frame.current.framebuffer.width, - .renderArea.extent.height = g_frame.current.framebuffer.height, + .renderArea.extent.width = frame_width, + .renderArea.extent.height = frame_height, .clearValueCount = ARRAYSIZE(clear_value), .pClearValues = clear_value, .framebuffer = g_frame.current.framebuffer.framebuffer, @@ -388,11 +362,11 @@ static void enqueueRendering( vk_combuf_t* combuf, qboolean draw ) { { const VkViewport viewport[] = { - {0.f, 0.f, (float)g_frame.current.framebuffer.width, (float)g_frame.current.framebuffer.height, 0.f, 1.f}, + {0.f, 0.f, (float)frame_width, (float)frame_height, 0.f, 1.f}, }; const VkRect2D scissor[] = {{ {0, 0}, - {g_frame.current.framebuffer.width, g_frame.current.framebuffer.height}, + {frame_width, frame_height}, }}; vkCmdSetViewport(cmdbuf, 0, ARRAYSIZE(viewport), viewport); @@ -402,7 +376,7 @@ static void enqueueRendering( vk_combuf_t* combuf, qboolean draw ) { if (!vk_frame.rtx_enabled) VK_RenderEnd( combuf, draw, - g_frame.current.framebuffer.width, g_frame.current.framebuffer.height, + frame_width, frame_height, g_frame.current.index ); @@ -608,7 +582,7 @@ static qboolean canBlitFromSwapchainToFormat( VkFormat dest_format ) { static rgbdata_t *R_VkReadPixels( void ) { const VkFormat dest_format = VK_FORMAT_R8G8B8A8_UNORM; r_vk_image_t dest_image; - const VkImage frame_image = g_frame.current.framebuffer.image; + const VkImage frame_image = g_frame.current.framebuffer.image.image; rgbdata_t *r_shot = NULL; qboolean blit = canBlitFromSwapchainToFormat( dest_format ); diff --git a/ref/vk/vk_swapchain.c b/ref/vk/vk_swapchain.c index f0238d5c45..ad8aabd80a 100644 --- a/ref/vk/vk_swapchain.c +++ b/ref/vk/vk_swapchain.c @@ -259,11 +259,34 @@ r_vk_swapchain_framebuffer_t R_VkSwapchainAcquire( VkSemaphore sem_image_availa break; } + // This is temporary non-owning placeholder object. + // It is used only for combuf barrier tracking. + ret.image = (r_vk_image_t) { + .image = g_swapchain.images[ret.index], + .view = g_swapchain.image_views[ret.index], + .width = g_swapchain.width, + .height = g_swapchain.height, + .depth = 1, + .mips = 1, + .layers = 1, + + .format = g_swapchain.image_format, + // TODO? .image_size = ??? + + .sync = { + .layout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, + .write = { + .access = VK_ACCESS_2_COLOR_ATTACHMENT_WRITE_BIT, + .stage = VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT + }, + .read = { + .access = VK_ACCESS_2_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_2_MEMORY_READ_BIT, + .stage = VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT + }, + }, + }; + snprintf(ret.image.name, sizeof(ret.image.name), "framebuffer[%u]", ret.index); ret.framebuffer = g_swapchain.framebuffers[ret.index]; - ret.width = g_swapchain.width; - ret.height = g_swapchain.height; - ret.image = g_swapchain.images[ret.index]; - ret.view = g_swapchain.image_views[ret.index]; finalize: APROF_SCOPE_END(function); diff --git a/ref/vk/vk_swapchain.h b/ref/vk/vk_swapchain.h index 84bcadffd3..7dfc34d990 100644 --- a/ref/vk/vk_swapchain.h +++ b/ref/vk/vk_swapchain.h @@ -1,4 +1,5 @@ #include "vk_core.h" +#include "vk_image.h" // TODO this needs to be negotiated by swapchain creation // however, currently render pass also needs it so ugh @@ -11,10 +12,9 @@ void R_VkSwapchainShutdown( void ); typedef struct { uint32_t index; - uint32_t width, height; - VkFramebuffer framebuffer; // TODO move out - VkImage image; - VkImageView view; + // Non-owned image mostly for for sync/barrier tracking purposes + r_vk_image_t image; + VkFramebuffer framebuffer; } r_vk_swapchain_framebuffer_t; r_vk_swapchain_framebuffer_t R_VkSwapchainAcquire( VkSemaphore sem_image_available ); From 4e7ff4e848f1d12a8d24e8dfd9455fec58bbe3f6 Mon Sep 17 00:00:00 2001 From: Ivan Avdeev Date: Mon, 16 Dec 2024 17:48:55 -0500 Subject: [PATCH 49/62] vk: change verbose logs src/dst+acces/stage order a bit for combuf barriers Group by access/stage, not by src/dst. Makes logs a bit more readable. --- ref/vk/vk_combuf.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ref/vk/vk_combuf.c b/ref/vk/vk_combuf.c index 072bd239e3..6b8bac8d5b 100644 --- a/ref/vk/vk_combuf.c +++ b/ref/vk/vk_combuf.c @@ -343,10 +343,10 @@ static qboolean makeBufferBarrier(VkBufferMemoryBarrier2* out_bmb, const r_vkcom if (LOG_VERBOSE) { DEBUG(" srcAccessMask = %llx", (unsigned long long)out_bmb->srcAccessMask); printAccessMask(" ", out_bmb->srcAccessMask); - DEBUG(" srcStageMask = %llx", (unsigned long long)out_bmb->srcStageMask); - printStageMask(" ", out_bmb->srcStageMask); DEBUG(" dstAccessMask = %llx", (unsigned long long)out_bmb->dstAccessMask); printAccessMask(" ", out_bmb->dstAccessMask); + DEBUG(" srcStageMask = %llx", (unsigned long long)out_bmb->srcStageMask); + printStageMask(" ", out_bmb->srcStageMask); DEBUG(" dstStageMask = %llx", (unsigned long long)out_bmb->dstStageMask); printStageMask(" ", out_bmb->dstStageMask); } @@ -415,10 +415,10 @@ static qboolean makeImageBarrier(VkImageMemoryBarrier2* out_imb, const r_vkcombu if (LOG_VERBOSE) { DEBUG(" srcAccessMask = %llx", (unsigned long long)out_imb->srcAccessMask); printAccessMask(" ", out_imb->srcAccessMask); - DEBUG(" srcStageMask = %llx", (unsigned long long)out_imb->srcStageMask); - printStageMask(" ", out_imb->srcStageMask); DEBUG(" dstAccessMask = %llx", (unsigned long long)out_imb->dstAccessMask); printAccessMask(" ", out_imb->dstAccessMask); + DEBUG(" srcStageMask = %llx", (unsigned long long)out_imb->srcStageMask); + printStageMask(" ", out_imb->srcStageMask); DEBUG(" dstStageMask = %llx", (unsigned long long)out_imb->dstStageMask); printStageMask(" ", out_imb->dstStageMask); DEBUG(" oldLayout = %s (%llx)", R_VkImageLayoutName(out_imb->oldLayout), (unsigned long long)out_imb->oldLayout); From 513396cdabc2fa97fec34a37bc63bd7c1fa4db60 Mon Sep 17 00:00:00 2001 From: Ivan Avdeev Date: Mon, 16 Dec 2024 17:50:03 -0500 Subject: [PATCH 50/62] vk: make ReadPixels use combuf barriers --- ref/vk/vk_framectl.c | 155 ++++++++++++++++++------------------------- 1 file changed, 65 insertions(+), 90 deletions(-) diff --git a/ref/vk/vk_framectl.c b/ref/vk/vk_framectl.c index cb1406047a..12d8251fe8 100644 --- a/ref/vk/vk_framectl.c +++ b/ref/vk/vk_framectl.c @@ -382,9 +382,15 @@ static void enqueueRendering( vk_combuf_t* combuf, qboolean draw ) { R_VkOverlay_DrawAndFlip( cmdbuf, draw ); - if (draw) + if (draw) { vkCmdEndRenderPass(cmdbuf); + // Render pass's finalLayout transitions the image into this one + g_frame.current.framebuffer.image.sync.read.access = 0; + g_frame.current.framebuffer.image.sync.write.access = 0; + g_frame.current.framebuffer.image.sync.layout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR; + } + g_frame.current.phase = Phase_RenderingEnqueued; APROF_SCOPE_END(enqueue); } @@ -581,15 +587,15 @@ static qboolean canBlitFromSwapchainToFormat( VkFormat dest_format ) { static rgbdata_t *R_VkReadPixels( void ) { const VkFormat dest_format = VK_FORMAT_R8G8B8A8_UNORM; - r_vk_image_t dest_image; - const VkImage frame_image = g_frame.current.framebuffer.image.image; + r_vk_image_t temp_image; + r_vk_image_t *const framebuffer_image = &g_frame.current.framebuffer.image; rgbdata_t *r_shot = NULL; qboolean blit = canBlitFromSwapchainToFormat( dest_format ); vk_combuf_t *const combuf = g_frame.frames[g_frame.current.index].combuf; const VkCommandBuffer cmdbuf = combuf->cmdbuf; - if (frame_image == VK_NULL_HANDLE) { + if (framebuffer_image->image == VK_NULL_HANDLE) { gEngine.Con_Printf(S_ERROR "no current image, can't take screenshot\n"); return NULL; } @@ -609,63 +615,47 @@ static rgbdata_t *R_VkReadPixels( void ) { .flags = 0, .memory_props = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT, }; - dest_image = R_VkImageCreate(&xic); + temp_image = R_VkImageCreate(&xic); } // Make sure that all rendering ops are enqueued const qboolean draw = true; enqueueRendering( combuf, draw ); - { - // Barrier 1: dest image - const VkImageMemoryBarrier image_barrier[2] = {{ - .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, - .image = dest_image.image, - .srcAccessMask = 0, - .dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT, - .oldLayout = VK_IMAGE_LAYOUT_UNDEFINED, - .newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, - .subresourceRange = (VkImageSubresourceRange) { - .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, - .baseMipLevel = 0, - .levelCount = 1, - .baseArrayLayer = 0, - .layerCount = 1, - }}, { // Barrier 2: source swapchain image - .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, - .image = frame_image, - .srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, - .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT, - .oldLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, - .newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, - .subresourceRange = (VkImageSubresourceRange) { - .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, - .baseMipLevel = 0, - .levelCount = 1, - .baseArrayLayer = 0, - .layerCount = 1, - }}}; - - vkCmdPipelineBarrier(cmdbuf, - VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, - VK_PIPELINE_STAGE_TRANSFER_BIT, - 0, 0, NULL, 0, NULL, ARRAYSIZE(image_barrier), image_barrier); - } - // Blit/transfer if (blit) { - const VkImageBlit blit = { - .srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, - .dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, - .srcSubresource.layerCount = 1, - .dstSubresource.layerCount = 1, - .srcOffsets = {{0}, {vk_frame.width, vk_frame.height, 1}}, - .dstOffsets = {{0}, {vk_frame.width, vk_frame.height, 1}} - }; - vkCmdBlitImage(cmdbuf, - frame_image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, - dest_image.image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &blit, VK_FILTER_NEAREST); + R_VkImageBlit(combuf, &(r_vkimage_blit_args){ + .src = { + .image = framebuffer_image, + .width = vk_frame.width, + .height = vk_frame.height, + .depth = 1, + }, + .dst = { + .image = &temp_image, + .width = vk_frame.width, + .height = vk_frame.height, + .depth = 1, + }, + }); } else { + const r_vkcombuf_barrier_image_t image_barriers[] = {{ + .image = &temp_image, + .access = VK_ACCESS_2_TRANSFER_WRITE_BIT, + .layout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, + }, { + .image = framebuffer_image, + .access = VK_ACCESS_2_TRANSFER_READ_BIT, + .layout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, + }}; + R_VkCombufIssueBarrier(combuf, (r_vkcombuf_barrier_t){ + .stage = VK_PIPELINE_STAGE_2_TRANSFER_BIT, + .images = { + .count = COUNTOF(image_barriers), + .items = image_barriers, + }, + }); + const VkImageCopy copy = { .srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, .dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, @@ -677,46 +667,31 @@ static rgbdata_t *R_VkReadPixels( void ) { }; vkCmdCopyImage(cmdbuf, - frame_image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, - dest_image.image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ©); + framebuffer_image->image, framebuffer_image->sync.layout, + temp_image.image, temp_image.sync.layout, 1, ©); gEngine.Con_Printf(S_WARN "Blit is not supported, screenshot will likely have mixed components; TODO: swizzle in software\n"); } { - // Barrier 1: dest image - VkImageMemoryBarrier image_barrier[2] = {{ - .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, - .image = dest_image.image, - .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT, - .dstAccessMask = VK_ACCESS_MEMORY_READ_BIT, - .oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, - .newLayout = VK_IMAGE_LAYOUT_GENERAL, - .subresourceRange = (VkImageSubresourceRange) { - .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, - .baseMipLevel = 0, - .levelCount = 1, - .baseArrayLayer = 0, - .layerCount = 1, - }}, { // Barrier 2: source swapchain image - .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, - .image = frame_image, - .srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT, - .dstAccessMask = VK_ACCESS_MEMORY_READ_BIT, - .oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, - .newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, - .subresourceRange = (VkImageSubresourceRange) { - .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, - .baseMipLevel = 0, - .levelCount = 1, - .baseArrayLayer = 0, - .layerCount = 1, - }}}; - - vkCmdPipelineBarrier(cmdbuf, - VK_PIPELINE_STAGE_TRANSFER_BIT, - VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, - 0, 0, NULL, 0, NULL, ARRAYSIZE(image_barrier), image_barrier); + const r_vkcombuf_barrier_image_t image_barriers[] = {{ + // Temp image: prepare for reading on CPU + .image = &temp_image, + .access = VK_ACCESS_2_MEMORY_READ_BIT, + .layout = VK_IMAGE_LAYOUT_GENERAL, + }, { + // Framebuffer image: prepare for displaying + .image = framebuffer_image, + .access = 0, + .layout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, + }}; + R_VkCombufIssueBarrier(combuf, (r_vkcombuf_barrier_t){ + .stage = VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT, + .images = { + .count = COUNTOF(image_barriers), + .items = image_barriers, + }, + }); } { @@ -730,8 +705,8 @@ static rgbdata_t *R_VkReadPixels( void ) { .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, }; VkSubresourceLayout layout; - const char *mapped = dest_image.devmem.mapped; - vkGetImageSubresourceLayout(vk_core.device, dest_image.image, &subres, &layout); + const char *mapped = temp_image.devmem.mapped; + vkGetImageSubresourceLayout(vk_core.device, temp_image.image, &subres, &layout); mapped += layout.offset; @@ -774,7 +749,7 @@ static rgbdata_t *R_VkReadPixels( void ) { } } - R_VkImageDestroy( &dest_image ); + R_VkImageDestroy( &temp_image ); return r_shot; } From 4fb325aec17c879dd2870208806a8ac1e1e9b721 Mon Sep 17 00:00:00 2001 From: Ivan Avdeev Date: Mon, 16 Dec 2024 18:05:11 -0500 Subject: [PATCH 51/62] vk: minor comments update --- ref/vk/TODO.md | 14 ++++++++------ ref/vk/vk_image.c | 8 ++------ ref/vk/vk_staging.c | 4 ++++ 3 files changed, 14 insertions(+), 12 deletions(-) diff --git a/ref/vk/TODO.md b/ref/vk/TODO.md index fd8c6d0ba2..d3ca2ef6e9 100644 --- a/ref/vk/TODO.md +++ b/ref/vk/TODO.md @@ -1,7 +1,4 @@ ## Next -- [ ] Proper staging-vs-frame tracking, replace tag with something sensitive - - currently assert fails because there's 2 frame latency, not one. - - [ ] comment for future: full staging might want to wait for previous frame to finish ## Upcoming - [ ] framectl frame tracking, e.g.: @@ -16,9 +13,14 @@ - [ ] performance profiling and comparison ## 2024-12-12 E384 -- [ ] zero vkCmdPipelineBarriers calls - - [x] track image sync state with image (and not with resource) - - [ ] grep for anything else +- [x] track image sync state with the image object itself (and not with vk_resource) + +### After stream +- [x] Proper staging-vs-frame tracking, replace tag with something sensitive + - currently assert fails because there's 1 frame latency, not one. + - [x] comment for future: full staging might want to wait for previous frame to finish +- [x] zero vkCmdPipelineBarriers calls + - [x] grep for anything else ## 2024-12-10 E383 - [x] Add transfer stage to submit semaphore separating command buffer: fixes sync for rt diff --git a/ref/vk/vk_image.c b/ref/vk/vk_image.c index 797358e198..6f9f883132 100644 --- a/ref/vk/vk_image.c +++ b/ref/vk/vk_image.c @@ -315,8 +315,7 @@ void R_VkImageUploadCommit( struct vk_combuf_s *combuf, VkPipelineStageFlagBits // 1.b Invoke the barriers vkCmdPipelineBarrier(combuf->cmdbuf, - //VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, - VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, + VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, NULL, 0, NULL, barriers_count, g_image_upload.barriers.items @@ -397,10 +396,7 @@ void R_VkImageUploadCommit( struct vk_combuf_s *combuf, VkPipelineStageFlagBits } // 3.b Submit the barriers - /* const VkPipelineStageFlagBits dest_stages = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | ( */ - /* vk_core.rtx */ - /* ? VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR */ - /* : 0); */ + // It's a massive set of barriers (1e3+), so using manual barriers instead of automatic combuf ones vkCmdPipelineBarrier(combuf->cmdbuf, VK_PIPELINE_STAGE_TRANSFER_BIT, dst_stages, 0, 0, NULL, 0, NULL, diff --git a/ref/vk/vk_staging.c b/ref/vk/vk_staging.c index 61d546f0a3..2ed57f02a7 100644 --- a/ref/vk/vk_staging.c +++ b/ref/vk/vk_staging.c @@ -125,6 +125,10 @@ uint32_t R_VkStagingFrameEpilogue(vk_combuf_t* combuf) { ASSERT(user->pending_count == 0); } + // TODO it would be nice to attach a finalization callback to combuf + // So that when the combuf is done on GPU, the callback is called and we can clean its memory + // instead of depending on framectl calling Completed function manually. + return g_staging.buffer_alloc_ring.head; } From f9b5cc81f29d439224f14b321172360582cb51ef Mon Sep 17 00:00:00 2001 From: Ivan Avdeev Date: Mon, 16 Dec 2024 18:14:58 -0500 Subject: [PATCH 52/62] vk: specity transfer stages more precisely --- ref/vk/vk_buffer.c | 6 +++--- ref/vk/vk_combuf.c | 1 - ref/vk/vk_framectl.c | 4 ++-- ref/vk/vk_image.c | 6 +++--- 4 files changed, 8 insertions(+), 9 deletions(-) diff --git a/ref/vk/vk_buffer.c b/ref/vk/vk_buffer.c index 63fd884a02..98205db02a 100644 --- a/ref/vk/vk_buffer.c +++ b/ref/vk/vk_buffer.c @@ -225,12 +225,12 @@ void R_VkBufferStagingCommit(vk_buffer_t *buf, struct vk_combuf_s *combuf) { }}; R_VkCombufIssueBarrier(combuf, (r_vkcombuf_barrier_t) { - .stage = VK_PIPELINE_STAGE_2_TRANSFER_BIT, + .stage = VK_PIPELINE_STAGE_2_COPY_BIT, .buffers = { barrier, COUNTOF(barrier) }, .images = { NULL, 0 }, }); - //FIXME const int begin_index = R_VkCombufScopeBegin(combuf, g_staging.buffer_upload_scope_id); + //TODO const int begin_index = R_VkCombufScopeBegin(combuf, g_staging.buffer_upload_scope_id); const VkCommandBuffer cmdbuf = combuf->cmdbuf; DEBUG_NV_CHECKPOINTF(cmdbuf, "staging dst_buffer=%p count=%d", buf->buffer, stb->regions.count); @@ -241,5 +241,5 @@ void R_VkBufferStagingCommit(vk_buffer_t *buf, struct vk_combuf_s *combuf) { R_VkStagingMarkFree(stb->staging_handle, stb->regions.count); stb->regions.count = 0; - //FIXME R_VkCombufScopeEnd(combuf, begin_index, VK_PIPELINE_STAGE_TRANSFER_BIT); + //TODO R_VkCombufScopeEnd(combuf, begin_index, VK_PIPELINE_STAGE_TRANSFER_BIT); } diff --git a/ref/vk/vk_combuf.c b/ref/vk/vk_combuf.c index 6b8bac8d5b..b9d4009411 100644 --- a/ref/vk/vk_combuf.c +++ b/ref/vk/vk_combuf.c @@ -236,7 +236,6 @@ static void printStageMask(const char *prefix, VkPipelineStageFlags2 stages) { PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT); PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT); PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_ALL_TRANSFER_BIT); - PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_TRANSFER_BIT); PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT); PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_HOST_BIT); PRINT_FLAG(stages, VK_PIPELINE_STAGE_2_ALL_GRAPHICS_BIT); diff --git a/ref/vk/vk_framectl.c b/ref/vk/vk_framectl.c index 12d8251fe8..642dda3971 100644 --- a/ref/vk/vk_framectl.c +++ b/ref/vk/vk_framectl.c @@ -649,7 +649,7 @@ static rgbdata_t *R_VkReadPixels( void ) { .layout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, }}; R_VkCombufIssueBarrier(combuf, (r_vkcombuf_barrier_t){ - .stage = VK_PIPELINE_STAGE_2_TRANSFER_BIT, + .stage = VK_PIPELINE_STAGE_2_COPY_BIT, .images = { .count = COUNTOF(image_barriers), .items = image_barriers, @@ -686,7 +686,7 @@ static rgbdata_t *R_VkReadPixels( void ) { .layout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, }}; R_VkCombufIssueBarrier(combuf, (r_vkcombuf_barrier_t){ - .stage = VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT, + .stage = VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT | VK_PIPELINE_STAGE_2_HOST_BIT, .images = { .count = COUNTOF(image_barriers), .items = image_barriers, diff --git a/ref/vk/vk_image.c b/ref/vk/vk_image.c index 6f9f883132..0216ac9bb5 100644 --- a/ref/vk/vk_image.c +++ b/ref/vk/vk_image.c @@ -167,7 +167,7 @@ void R_VkImageClear(r_vk_image_t *img, struct vk_combuf_s* combuf) { .access = VK_ACCESS_2_TRANSFER_WRITE_BIT, }}; R_VkCombufIssueBarrier(combuf, (r_vkcombuf_barrier_t){ - .stage = VK_PIPELINE_STAGE_2_TRANSFER_BIT, + .stage = VK_PIPELINE_STAGE_2_CLEAR_BIT, .images = { .items = ib, .count = COUNTOF(ib), @@ -189,7 +189,7 @@ void R_VkImageBlit(struct vk_combuf_s *combuf, const r_vkimage_blit_args *args ) .access = VK_ACCESS_2_TRANSFER_WRITE_BIT, }}; R_VkCombufIssueBarrier(combuf, (r_vkcombuf_barrier_t){ - .stage = VK_PIPELINE_STAGE_2_TRANSFER_BIT, + .stage = VK_PIPELINE_STAGE_2_BLIT_BIT, .images = { .items = ib, .count = COUNTOF(ib), @@ -316,7 +316,7 @@ void R_VkImageUploadCommit( struct vk_combuf_s *combuf, VkPipelineStageFlagBits // 1.b Invoke the barriers vkCmdPipelineBarrier(combuf->cmdbuf, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, - VK_PIPELINE_STAGE_TRANSFER_BIT, + VK_PIPELINE_STAGE_TRANSFER_BIT, // TODO VK_PIPELINE_STAGE_2_COPY_BIT 0, 0, NULL, 0, NULL, barriers_count, g_image_upload.barriers.items ); From 814458a05d19d24de411439bd63c6dabc2ff5651 Mon Sep 17 00:00:00 2001 From: Ivan 'provod' Avdeev Date: Mon, 16 Dec 2024 23:19:01 -0500 Subject: [PATCH 53/62] vk: rt: remove unnecessary accel frame boundary func --- ref/vk/vk_ray_accel.c | 8 +------- ref/vk/vk_ray_accel.h | 1 - ref/vk/vk_rtx.c | 1 - 3 files changed, 1 insertion(+), 9 deletions(-) diff --git a/ref/vk/vk_ray_accel.c b/ref/vk/vk_ray_accel.c index d47fa60362..6db1e1ee36 100644 --- a/ref/vk/vk_ray_accel.c +++ b/ref/vk/vk_ray_accel.c @@ -494,6 +494,7 @@ vk_resource_t RT_VkAccelPrepareTlas(vk_combuf_t *combuf) { // Consume instances into this frame, no further instances are expected g_accel.frame.instances.count = 0; + g_accel.frame.scratch_offset = 0; APROF_SCOPE_END(prepare); return (vk_resource_t){ @@ -583,10 +584,6 @@ void RT_VkAccelNewMap(void) { } } -void RT_VkAccelFrameBegin(void) { - g_accel.frame.scratch_offset = 0; -} - static void blasFillGeometries(rt_blas_t *blas, const vk_render_geometry_t *geoms, int geoms_count) { // geoms_count is not constant for dynamic models, and it shouldn't exceed max_geoms by design ASSERT(geoms_count <= blas->max_geoms); @@ -699,9 +696,6 @@ void RT_BlasDestroy(struct rt_blas_s* blas) { if (blas->build.ranges) Mem_Free(blas->build.ranges); - /* if (blas->max_prims) */ - /* Mem_Free(blas->max_prims); */ - if (blas->blas) vkDestroyAccelerationStructureKHR(vk_core.device, blas->blas, NULL); diff --git a/ref/vk/vk_ray_accel.h b/ref/vk/vk_ray_accel.h index efa274ab94..3e246f88b9 100644 --- a/ref/vk/vk_ray_accel.h +++ b/ref/vk/vk_ray_accel.h @@ -6,7 +6,6 @@ qboolean RT_VkAccelInit(void); void RT_VkAccelShutdown(void); void RT_VkAccelNewMap(void); -void RT_VkAccelFrameBegin(void); struct vk_combuf_s; vk_resource_t RT_VkAccelPrepareTlas(struct vk_combuf_s *combuf); diff --git a/ref/vk/vk_rtx.c b/ref/vk/vk_rtx.c index 98665dc33e..771d615f14 100644 --- a/ref/vk/vk_rtx.c +++ b/ref/vk/vk_rtx.c @@ -77,7 +77,6 @@ void VK_RayNewMapBegin( void ) { void VK_RayFrameBegin( void ) { ASSERT(vk_core.rtx); - RT_VkAccelFrameBegin(); XVK_RayModel_ClearForNextFrame(); RT_LightsFrameBegin(); } From 4a195838d57dddf3b496e07cab918f01a29edf2b Mon Sep 17 00:00:00 2001 From: Ivan Avdeev Date: Tue, 17 Dec 2024 11:03:02 -0500 Subject: [PATCH 54/62] vk: s/alloc|free/lock|unlock/ for staging for readability --- ref/vk/vk_buffer.c | 4 ++-- ref/vk/vk_image.c | 6 +++--- ref/vk/vk_staging.c | 22 +++++++++++----------- ref/vk/vk_staging.h | 4 ++-- 4 files changed, 18 insertions(+), 18 deletions(-) diff --git a/ref/vk/vk_buffer.c b/ref/vk/vk_buffer.c index 98205db02a..44eea986d9 100644 --- a/ref/vk/vk_buffer.c +++ b/ref/vk/vk_buffer.c @@ -183,7 +183,7 @@ vk_buffer_locked_t R_VkBufferLock(vk_buffer_t *buf, vk_buffer_lock_t lock) { r_vk_staging_buffer_t *const stb = findOrCreateStagingSlotForBuffer(buf); ASSERT(stb); - r_vkstaging_region_t staging_lock = R_VkStagingAlloc(stb->staging_handle, lock.size); + r_vkstaging_region_t staging_lock = R_VkStagingLock(stb->staging_handle, lock.size); ASSERT(staging_lock.ptr); // TODO perf: adjacent region coalescing @@ -238,7 +238,7 @@ void R_VkBufferStagingCommit(vk_buffer_t *buf, struct vk_combuf_s *combuf) { vkCmdCopyBuffer(cmdbuf, stb->staging_buffer, buf->buffer, stb->regions.count, stb->regions.items); DEBUG("buf=%llx staging pending-=%u", (unsigned long long)buf->buffer, stb->regions.count); - R_VkStagingMarkFree(stb->staging_handle, stb->regions.count); + R_VkStagingUnlockBulk(stb->staging_handle, stb->regions.count); stb->regions.count = 0; //TODO R_VkCombufScopeEnd(combuf, begin_index, VK_PIPELINE_STAGE_TRANSFER_BIT); diff --git a/ref/vk/vk_image.c b/ref/vk/vk_image.c index 0216ac9bb5..2e7846c9fd 100644 --- a/ref/vk/vk_image.c +++ b/ref/vk/vk_image.c @@ -403,7 +403,7 @@ void R_VkImageUploadCommit( struct vk_combuf_s *combuf, VkPipelineStageFlagBits barriers_count, (VkImageMemoryBarrier*)g_image_upload.barriers.items ); - R_VkStagingMarkFree(g_image_upload.staging, barriers_count); + R_VkStagingUnlockBulk(g_image_upload.staging, barriers_count); R_VkCombufScopeEnd(combuf, gpu_scope_begin, VK_PIPELINE_STAGE_TRANSFER_BIT); @@ -434,7 +434,7 @@ void R_VkImageUploadBegin( r_vk_image_t *img ) { // would notify other modules that they'd need to commit their staging data, and thus we'd return to this module's // R_VkImageUploadCommit(), which needs to see valid data. Therefore, don't touch its state until // R_VkStagingLock returns. - const r_vkstaging_region_t staging_lock = R_VkStagingAlloc(g_image_upload.staging, staging_size); + const r_vkstaging_region_t staging_lock = R_VkStagingLock(g_image_upload.staging, staging_size); img->upload_slot = g_image_upload.images.count; arrayDynamicAppendT(&g_image_upload.images, NULL); @@ -514,7 +514,7 @@ static void cancelUpload( r_vk_image_t *img ) { // Technically we won't need that staging region anymore at all, but it doesn't matter, // it's just easier to mark it to be freed this way. - R_VkStagingMarkFree(g_image_upload.staging, 1); + R_VkStagingUnlockBulk(g_image_upload.staging, 1); // Mark upload slot as unused, and image as not subjet to uploading up->image = NULL; diff --git a/ref/vk/vk_staging.c b/ref/vk/vk_staging.c index 2ed57f02a7..55b2806f1b 100644 --- a/ref/vk/vk_staging.c +++ b/ref/vk/vk_staging.c @@ -20,7 +20,7 @@ typedef struct r_vkstaging_user_t { r_vkstaging_user_create_t info; - uint32_t pending_count; + uint32_t locked_count; struct { int allocs; @@ -83,18 +83,18 @@ r_vkstaging_user_t *R_VkStagingUserCreate(r_vkstaging_user_create_t info) { } void R_VkStagingUserDestroy(r_vkstaging_user_t *user) { - ASSERT(user->pending_count == 0); + ASSERT(user->locked_count == 0); // TODO remove from the table } -r_vkstaging_region_t R_VkStagingAlloc(r_vkstaging_user_t* user, uint32_t size) { +r_vkstaging_region_t R_VkStagingLock(r_vkstaging_user_t* user, uint32_t size) { const uint32_t alignment = 4; const uint32_t offset = aloRingAlloc(&g_staging.buffer_alloc_ring, size, alignment); ASSERT(offset != ALO_ALLOC_FAILED && "FIXME: workaround: increase staging buffer size"); DEBUG("Lock alignment=%d size=%d region=%d..%d", alignment, size, offset, offset + size); - user->pending_count++; + user->locked_count++; user->stats.allocs++; user->stats.size += size; @@ -109,20 +109,20 @@ r_vkstaging_region_t R_VkStagingAlloc(r_vkstaging_user_t* user, uint32_t size) { }; } -void R_VkStagingMarkFree(r_vkstaging_user_t* user, uint32_t count) { - ASSERT(user->pending_count >= count); - user->pending_count -= count; +void R_VkStagingUnlockBulk(r_vkstaging_user_t* user, uint32_t count) { + ASSERT(user->locked_count >= count); + user->locked_count -= count; } uint32_t R_VkStagingFrameEpilogue(vk_combuf_t* combuf) { for (int i = 0; i < g_staging.users.count; ++i) { r_vkstaging_user_t *const user = g_staging.users.items + i; - if (user->pending_count == 0) + if (user->locked_count == 0) continue; - WARN("%s has %u pending staging items, pushing", user->info.name, user->pending_count); - user->info.push(user->info.userptr, combuf, user->pending_count); - ASSERT(user->pending_count == 0); + WARN("%s has %u locked staging items, pushing", user->info.name, user->locked_count); + user->info.push(user->info.userptr, combuf, user->locked_count); + ASSERT(user->locked_count == 0); } // TODO it would be nice to attach a finalization callback to combuf diff --git a/ref/vk/vk_staging.h b/ref/vk/vk_staging.h index 2e9adcc335..601fa68b0a 100644 --- a/ref/vk/vk_staging.h +++ b/ref/vk/vk_staging.h @@ -31,11 +31,11 @@ typedef struct { } r_vkstaging_region_t; // Allocate CPU-accessible memory in staging buffer -r_vkstaging_region_t R_VkStagingAlloc(r_vkstaging_user_handle_t, uint32_t size); +r_vkstaging_region_t R_VkStagingLock(r_vkstaging_user_handle_t, uint32_t size); // Notify staging that this amount of regions are about to be consumed when the next combuf ends // I.e. they're "free" from the staging standpoint -void R_VkStagingMarkFree(r_vkstaging_user_handle_t, uint32_t count); +void R_VkStagingUnlockBulk(r_vkstaging_user_handle_t, uint32_t count); // This gets called just before the combuf is ended and submitted. // Gives the last chance for the users that haven't yet used their data. From 7217b69a7b5750908c1c9512f83a7594f5cf5498 Mon Sep 17 00:00:00 2001 From: Ivan Avdeev Date: Tue, 17 Dec 2024 12:48:10 -0500 Subject: [PATCH 55/62] vk: fix unintended clearing of images on barriers Previously we forced src image layout to be UNDEFINED if the image was to be written into. This lead to RADV driver to completely clear our so painfully constructed ray traced frame. The correct layout transition should probably be something like this: if we're not to _read_ from image contents, only then we can be sure that its contents are not needed anymore, and can be discarded by settind the src layout to UNDEFINED. --- ref/vk/vk_combuf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ref/vk/vk_combuf.c b/ref/vk/vk_combuf.c index b9d4009411..1460d74f56 100644 --- a/ref/vk/vk_combuf.c +++ b/ref/vk/vk_combuf.c @@ -357,7 +357,7 @@ static qboolean makeImageBarrier(VkImageMemoryBarrier2* out_imb, const r_vkcombu r_vk_image_t *const img = imgbar->image; const qboolean is_write = (imgbar->access & ACCESS_WRITE_BITS) != 0; const qboolean is_read = (imgbar->access & ACCESS_READ_BITS) != 0; - const VkImageLayout old_layout = is_write ? VK_IMAGE_LAYOUT_UNDEFINED : img->sync.layout; + const VkImageLayout old_layout = (!is_read) ? VK_IMAGE_LAYOUT_UNDEFINED : img->sync.layout; const qboolean is_layout_transfer = imgbar->layout != old_layout; ASSERT((imgbar->access & ~(ACCESS_KNOWN_BITS)) == 0); From 38ae4fe47522e18960d73355465c1769440d1cbd Mon Sep 17 00:00:00 2001 From: Ivan Avdeev Date: Tue, 17 Dec 2024 12:51:20 -0500 Subject: [PATCH 56/62] vk: allow specifying color for image clearing --- ref/vk/vk_framectl.c | 3 ++- ref/vk/vk_image.c | 8 +++++--- ref/vk/vk_image.h | 2 +- ref/vk/vk_resources.c | 2 +- ref/vk/vk_rtx.c | 2 +- 5 files changed, 10 insertions(+), 7 deletions(-) diff --git a/ref/vk/vk_framectl.c b/ref/vk/vk_framectl.c index 642dda3971..3b7ac39acd 100644 --- a/ref/vk/vk_framectl.c +++ b/ref/vk/vk_framectl.c @@ -346,7 +346,8 @@ static void enqueueRendering( vk_combuf_t* combuf, qboolean draw ) { }); const VkClearValue clear_value[] = { - {.color = {{1., 0., 0., 0.}}}, + // *_UNORM is float + {.color = {.float32 = {1.f, 0.f, 0.f, 0.f}}}, {.depthStencil = {1., 0.}} // TODO reverse-z }; const VkRenderPassBeginInfo rpbi = { diff --git a/ref/vk/vk_image.c b/ref/vk/vk_image.c index 2e7846c9fd..4b550468ed 100644 --- a/ref/vk/vk_image.c +++ b/ref/vk/vk_image.c @@ -152,7 +152,7 @@ void R_VkImageDestroy(r_vk_image_t *img) { *img = (r_vk_image_t){0}; } -void R_VkImageClear(r_vk_image_t *img, struct vk_combuf_s* combuf) { +void R_VkImageClear(r_vk_image_t *img, struct vk_combuf_s* combuf, const VkClearColorValue* value) { const VkImageSubresourceRange ranges[] = {{ .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, .baseMipLevel = 0, @@ -174,8 +174,10 @@ void R_VkImageClear(r_vk_image_t *img, struct vk_combuf_s* combuf) { }, }); - const VkClearColorValue clear_value = {0}; - vkCmdClearColorImage(combuf->cmdbuf, img->image, img->sync.layout, &clear_value, COUNTOF(ranges), ranges); + const VkClearColorValue zero = {0}; + vkCmdClearColorImage(combuf->cmdbuf, img->image, img->sync.layout, + value ? value : &zero, + COUNTOF(ranges), ranges); } void R_VkImageBlit(struct vk_combuf_s *combuf, const r_vkimage_blit_args *args ) { diff --git a/ref/vk/vk_image.h b/ref/vk/vk_image.h index c951233751..7d62217b0c 100644 --- a/ref/vk/vk_image.h +++ b/ref/vk/vk_image.h @@ -51,7 +51,7 @@ r_vk_image_t R_VkImageCreate(const r_vk_image_create_t *create); void R_VkImageDestroy(r_vk_image_t *img); struct vk_combuf_s; -void R_VkImageClear(r_vk_image_t *img, struct vk_combuf_s* combuf); +void R_VkImageClear(r_vk_image_t *img, struct vk_combuf_s* combuf, const VkClearColorValue*); typedef struct { struct { diff --git a/ref/vk/vk_resources.c b/ref/vk/vk_resources.c index 93fedc2129..1e9f74d085 100644 --- a/ref/vk/vk_resources.c +++ b/ref/vk/vk_resources.c @@ -159,7 +159,7 @@ void R_VkResourcesFrameBeginStateChangeFIXME(vk_combuf_t* combuf, qboolean disco if (discontinuity || res->image.sync.write.stage == 0) { // TODO is there a better way? Can image be cleared w/o explicit clear op? WARN("discontinuity: %s", res->name); - R_VkImageClear( &res->image, combuf ); + R_VkImageClear( &res->image, combuf, NULL ); } } diff --git a/ref/vk/vk_rtx.c b/ref/vk/vk_rtx.c index 771d615f14..83bd6353b2 100644 --- a/ref/vk/vk_rtx.c +++ b/ref/vk/vk_rtx.c @@ -496,7 +496,7 @@ void VK_RayFrameEnd(const vk_ray_frame_render_args_t* args) // TODO move this to "TLAS producer" tlas->resource = RT_VkAccelPrepareTlas(args->combuf); if (tlas->resource.value.accel.accelerationStructureCount == 0) { - R_VkImageClear( &g_rtx.mainpipe_out->image, args->combuf ); + R_VkImageClear( &g_rtx.mainpipe_out->image, args->combuf, NULL ); } else { const perform_tracing_args_t trace_args = { .render_args = args, From 5e66d6dcee7ddf00f955ac4934442cf476234986 Mon Sep 17 00:00:00 2001 From: Ivan Avdeev Date: Tue, 17 Dec 2024 23:41:26 -0500 Subject: [PATCH 57/62] vk: rt: cleanup TLAS creation and building code --- ref/vk/vk_ray_accel.c | 214 ++++++++++++++---------------------------- 1 file changed, 73 insertions(+), 141 deletions(-) diff --git a/ref/vk/vk_ray_accel.c b/ref/vk/vk_ray_accel.c index 6db1e1ee36..705384207e 100644 --- a/ref/vk/vk_ray_accel.c +++ b/ref/vk/vk_ray_accel.c @@ -69,8 +69,16 @@ static struct { VkDeviceAddress tlas_geom_buffer_addr; r_flipping_buffer_t tlas_geom_buffer_alloc; - // TODO need several TLASes for N frames in flight - VkAccelerationStructureKHR tlas; + struct { + VkAccelerationStructureKHR handle; + + VkAccelerationStructureGeometryKHR geometry; + uint32_t max_prim_count; + VkAccelerationStructureBuildRangeInfoKHR range_info; + VkAccelerationStructureBuildGeometryInfoKHR geometry_info; + VkAccelerationStructureBuildSizesInfoKHR sizes_info; + } tlas; + // Per-frame data that is accumulated between RayFrameBegin and End calls struct { @@ -133,19 +141,56 @@ static VkDeviceAddress getAccelAddress(VkAccelerationStructureKHR as) { return vkGetAccelerationStructureDeviceAddressKHR(vk_core.device, &asdai); } -static qboolean buildAccel(vk_combuf_t* combuf, VkAccelerationStructureBuildGeometryInfoKHR *build_info, uint32_t scratch_buffer_size, const VkAccelerationStructureBuildRangeInfoKHR *build_ranges) { - vk_buffer_t* const geom = R_GeometryBuffer_Get(); - R_VkBufferStagingCommit(geom, combuf); - R_VkCombufIssueBarrier(combuf, (r_vkcombuf_barrier_t){ - .stage = VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_BUILD_BIT_KHR, - .buffers = { - .count = 1, - .items = &(r_vkcombuf_barrier_buffer_t){ - .buffer = geom, - .access = VK_ACCESS_2_ACCELERATION_STRUCTURE_READ_BIT_KHR, +static void tlasCreate(void) { + g_accel.tlas.geometry = (VkAccelerationStructureGeometryKHR) { + .sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_KHR, + .geometryType = VK_GEOMETRY_TYPE_INSTANCES_KHR, + .geometry.instances = + (VkAccelerationStructureGeometryInstancesDataKHR){ + .sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_INSTANCES_DATA_KHR, + .data.deviceAddress = 0, + .arrayOfPointers = VK_FALSE, }, - }, - }); + }; + g_accel.tlas.max_prim_count = MAX_INSTANCES; + g_accel.tlas.range_info = (VkAccelerationStructureBuildRangeInfoKHR) { + .primitiveCount = g_accel.frame.instances.count, + }; + g_accel.tlas.geometry_info = (VkAccelerationStructureBuildGeometryInfoKHR) { + .sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_GEOMETRY_INFO_KHR, + .type = VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR, + .flags = VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_KHR, + .mode = VK_BUILD_ACCELERATION_STRUCTURE_MODE_BUILD_KHR, + .geometryCount = 1, + .pGeometries = &g_accel.tlas.geometry, + .srcAccelerationStructure = VK_NULL_HANDLE, + }; + g_accel.tlas.sizes_info = getAccelSizes(&g_accel.tlas.geometry_info, &g_accel.tlas.max_prim_count); + g_accel.tlas.handle = createAccel("TLAS", g_accel.tlas.geometry_info.type, g_accel.tlas.sizes_info.accelerationStructureSize); + ASSERT(g_accel.tlas.handle != VK_NULL_HANDLE); + g_accel.tlas.geometry_info.dstAccelerationStructure = g_accel.tlas.handle; +} + +static void tlasBuild(vk_combuf_t *combuf, VkDeviceAddress instances_addr) { + R_VkBufferStagingCommit(&g_accel.tlas_geom_buffer, combuf); + { + const r_vkcombuf_barrier_buffer_t buffers[] = {{ + .buffer = &g_accel.accels_buffer, + .access = VK_ACCESS_2_ACCELERATION_STRUCTURE_READ_BIT_KHR, // TODO? WRITE? we're writing tlas here too + }, { + .buffer = &g_accel.tlas_geom_buffer, + .access = VK_ACCESS_2_ACCELERATION_STRUCTURE_READ_BIT_KHR, + }}; + R_VkCombufIssueBarrier(combuf, (r_vkcombuf_barrier_t){ + .stage = VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR, + .buffers = { + .count = COUNTOF(buffers), + .items = buffers, + }, + }); + } + + const uint32_t scratch_buffer_size = g_accel.tlas.sizes_info.buildScratchSize; //gEngine.Con_Reportf("sratch offset = %d, req=%d", g_accel.frame.scratch_offset, scratch_buffer_size); @@ -153,10 +198,12 @@ static qboolean buildAccel(vk_combuf_t* combuf, VkAccelerationStructureBuildGeom ERR("Scratch buffer overflow: left %u bytes, but need %u", MAX_SCRATCH_BUFFER - g_accel.frame.scratch_offset, scratch_buffer_size); - return false; + ASSERT(!"Scratch buffer overflow"); } - build_info->scratchData.deviceAddress = g_accel.scratch_buffer_addr + g_accel.frame.scratch_offset; + g_accel.tlas.geometry.geometry.instances.data.deviceAddress = instances_addr; + g_accel.tlas.range_info.primitiveCount = g_accel.frame.instances.count; + g_accel.tlas.geometry_info.scratchData.deviceAddress = g_accel.scratch_buffer_addr + g_accel.frame.scratch_offset; //uint32_t scratch_offset_initial = g_accel.frame.scratch_offset; g_accel.frame.scratch_offset += scratch_buffer_size; @@ -168,110 +215,9 @@ static qboolean buildAccel(vk_combuf_t* combuf, VkAccelerationStructureBuildGeom if (scope_id == -2) scope_id = R_VkGpuScope_Register("build_tlas"); const int begin_index = R_VkCombufScopeBegin(combuf, scope_id); - const VkAccelerationStructureBuildRangeInfoKHR *p_build_ranges = build_ranges; - // FIXME upload everything in bulk, and only then build blases in bulk too - vkCmdBuildAccelerationStructuresKHR(combuf->cmdbuf, 1, build_info, &p_build_ranges); + const VkAccelerationStructureBuildRangeInfoKHR *p_build_ranges = &g_accel.tlas.range_info; + vkCmdBuildAccelerationStructuresKHR(combuf->cmdbuf, 1, &g_accel.tlas.geometry_info, &p_build_ranges); R_VkCombufScopeEnd(combuf, begin_index, VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR); - - return true; -} - -typedef struct { - const char *debug_name; - VkAccelerationStructureKHR *p_accel; - const VkAccelerationStructureGeometryKHR *geoms; - const uint32_t *max_prim_counts; - const VkAccelerationStructureBuildRangeInfoKHR *build_ranges; - uint32_t n_geoms; - VkAccelerationStructureTypeKHR type; - qboolean dynamic; - - VkDeviceAddress *out_accel_addr; - uint32_t *inout_size; -} as_build_args_t; - -// FIXME this function isn't really needed anymore, it's for TLAS creation only -// TODO split this into smaller building blocks in a separate module -qboolean createOrUpdateAccelerationStructure(vk_combuf_t *combuf, const as_build_args_t *args) { - ASSERT(args->geoms); - ASSERT(args->n_geoms > 0); - ASSERT(args->p_accel); - - const qboolean should_create = *args->p_accel == VK_NULL_HANDLE; - - VkAccelerationStructureBuildGeometryInfoKHR build_info = { - .sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_GEOMETRY_INFO_KHR, - .type = args->type, - .flags = VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_KHR, - .mode = VK_BUILD_ACCELERATION_STRUCTURE_MODE_BUILD_KHR, - .geometryCount = args->n_geoms, - .pGeometries = args->geoms, - .srcAccelerationStructure = VK_NULL_HANDLE, - }; - - const VkAccelerationStructureBuildSizesInfoKHR build_size = getAccelSizes(&build_info, args->max_prim_counts); - - if (should_create) { - *args->p_accel = createAccel(args->debug_name, args->type, build_size.accelerationStructureSize); - - if (!args->p_accel) - return false; - - if (args->out_accel_addr) - *args->out_accel_addr = getAccelAddress(*args->p_accel); - - if (args->inout_size) - *args->inout_size = build_size.accelerationStructureSize; - - // gEngine.Con_Reportf("AS=%p, n_geoms=%u, build: %#x %d %#x", *args->p_accel, args->n_geoms, buffer_offset, asci.size, buffer_offset + asci.size); - } - - // If not enough data for building, just create - if (!combuf || !args->build_ranges) - return true; - - if (args->inout_size) - ASSERT(*args->inout_size >= build_size.accelerationStructureSize); - - build_info.dstAccelerationStructure = *args->p_accel; - return buildAccel(combuf, &build_info, build_size.buildScratchSize, args->build_ranges); -} - -static void createTlas( vk_combuf_t *combuf, VkDeviceAddress instances_addr ) { - const VkAccelerationStructureGeometryKHR tl_geom[] = { - { - .sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_KHR, - //.flags = VK_GEOMETRY_OPAQUE_BIT, - .geometryType = VK_GEOMETRY_TYPE_INSTANCES_KHR, - .geometry.instances = - (VkAccelerationStructureGeometryInstancesDataKHR){ - .sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_INSTANCES_DATA_KHR, - .data.deviceAddress = instances_addr, - .arrayOfPointers = VK_FALSE, - }, - }, - }; - const uint32_t tl_max_prim_counts[COUNTOF(tl_geom)] = { MAX_INSTANCES }; - const VkAccelerationStructureBuildRangeInfoKHR tl_build_range = { - .primitiveCount = g_accel.frame.instances.count, - }; - const as_build_args_t asrgs = { - .geoms = tl_geom, - .max_prim_counts = tl_max_prim_counts, - .build_ranges = !combuf ? NULL : &tl_build_range, - .n_geoms = COUNTOF(tl_geom), - .type = VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR, - // we can't really rebuild TLAS because instance count changes are not allowed .dynamic = true, - .dynamic = false, - .p_accel = &g_accel.tlas, - .debug_name = "TLAS", - .out_accel_addr = NULL, - .inout_size = NULL, - }; - if (!createOrUpdateAccelerationStructure(combuf, &asrgs)) { - gEngine.Host_Error("Could not create/update TLAS\n"); - return; - } } static qboolean blasPrepareBuild(struct rt_blas_s *blas, VkDeviceAddress geometry_addr) { @@ -474,22 +420,8 @@ vk_resource_t RT_VkAccelPrepareTlas(vk_combuf_t *combuf) { // Build all scheduled BLASes blasBuildPerform(combuf, geom); - { - r_vkcombuf_barrier_buffer_t buffers[] = {{ - .buffer = &g_accel.accels_buffer, - .access = VK_ACCESS_2_ACCELERATION_STRUCTURE_READ_BIT_KHR, - }}; - R_VkCombufIssueBarrier(combuf, (r_vkcombuf_barrier_t){ - .stage = VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR, - .buffers = { - .count = COUNTOF(buffers), - .items = buffers, - }, - }); - } - // 2. Build TLAS - createTlas(combuf, g_accel.tlas_geom_buffer_addr + instance_offset * sizeof(VkAccelerationStructureInstanceKHR)); + tlasBuild(combuf, g_accel.tlas_geom_buffer_addr + instance_offset * sizeof(VkAccelerationStructureInstanceKHR)); DEBUG_END(combuf->cmdbuf); // Consume instances into this frame, no further instances are expected @@ -503,7 +435,7 @@ vk_resource_t RT_VkAccelPrepareTlas(vk_combuf_t *combuf) { .accel = (VkWriteDescriptorSetAccelerationStructureKHR) { .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR, .accelerationStructureCount = 1, - .pAccelerationStructures = &g_accel.tlas, + .pAccelerationStructures = &g_accel.tlas.handle, .pNext = NULL, }, }, @@ -550,8 +482,8 @@ qboolean RT_VkAccelInit(void) { } void RT_VkAccelShutdown(void) { - if (g_accel.tlas != VK_NULL_HANDLE) - vkDestroyAccelerationStructureKHR(vk_core.device, g_accel.tlas, NULL); + if (g_accel.tlas.handle != VK_NULL_HANDLE) + vkDestroyAccelerationStructureKHR(vk_core.device, g_accel.tlas.handle, NULL); VK_BufferDestroy(&g_accel.scratch_buffer); VK_BufferDestroy(&g_accel.accels_buffer); @@ -575,12 +507,12 @@ void RT_VkAccelNewMap(void) { // Recreate tlas // Why here and not in init: to make sure that its memory is preserved. Map init will clear all memory regions. { - if (g_accel.tlas != VK_NULL_HANDLE) { - vkDestroyAccelerationStructureKHR(vk_core.device, g_accel.tlas, NULL); - g_accel.tlas = VK_NULL_HANDLE; + if (g_accel.tlas.handle != VK_NULL_HANDLE) { + vkDestroyAccelerationStructureKHR(vk_core.device, g_accel.tlas.handle, NULL); + g_accel.tlas.handle = VK_NULL_HANDLE; } - createTlas(VK_NULL_HANDLE, g_accel.tlas_geom_buffer_addr); + tlasCreate(); } } From e76d6d9880afeb71f5ef1a107f21a383ae5f914a Mon Sep 17 00:00:00 2001 From: Ivan Avdeev Date: Tue, 17 Dec 2024 23:46:32 -0500 Subject: [PATCH 58/62] vk: update TODO --- ref/vk/TODO.md | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/ref/vk/TODO.md b/ref/vk/TODO.md index d3ca2ef6e9..2065dafcd9 100644 --- a/ref/vk/TODO.md +++ b/ref/vk/TODO.md @@ -5,13 +5,19 @@ - [ ] wait for frame fence only really before actually starting to build combuf in R_BeginFrame() - why: there should be nothing to synchronize with - why: more straightforward dependency tracking + - why not: waiting on frame fence allows freeing up staging and other temp memory - [ ] Remove second semaphore from submit, replace it with explicit barriers for e.g. geom buffer - - why: best practice validation complains about too wide ALL_COMMANDS semaphore + - [x] why: best practice validation complains about too wide ALL_COMMANDS semaphore - why: explicit barriers are more clear, better perf possible too - [ ] Do not lose barrier-tracking state between frames - [ ] Render graph - [ ] performance profiling and comparison +## 2024-12-17 E385 +- [x] fix rendering on amdgpu+radv +### After stream +- [x] cleanup TLAS creation and building code + ## 2024-12-12 E384 - [x] track image sync state with the image object itself (and not with vk_resource) From 8fb8edee5444e2442a132e8068fb228bcbdabcbc Mon Sep 17 00:00:00 2001 From: Ivan Avdeev Date: Wed, 18 Dec 2024 17:22:40 -0500 Subject: [PATCH 59/62] vk: handle emissive brush surfaces earlier in the loading loop This makes c0a0d toxic pool emissive again, but it still doesn't make all known toxic water objects emissive. --- ref/vk/vk_brush.c | 34 ++++++++++++++++++---------------- 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/ref/vk/vk_brush.c b/ref/vk/vk_brush.c index 8b8112f10a..64c073839a 100644 --- a/ref/vk/vk_brush.c +++ b/ref/vk/vk_brush.c @@ -1,7 +1,6 @@ #include "vk_brush.h" #include "vk_core.h" -#include "vk_const.h" #include "vk_math.h" #include "r_textures.h" #include "vk_lightmap.h" @@ -10,7 +9,6 @@ #include "vk_light.h" #include "vk_mapents.h" #include "r_speeds.h" -#include "vk_staging.h" #include "vk_logs.h" #include "profiler.h" #include "arrays.h" @@ -1408,6 +1406,24 @@ static qboolean fillBrushSurfaces(fill_geometries_args_t args) { const xvk_patch_surface_t *const psurf = R_VkPatchGetSurface(surface_index); const brush_surface_type_e type = getSurfaceType(surf, surface_index, args.is_worldmodel); + + // Check whether this surface is emissive early, before bailing out on surface type. + // TODO consider moving this to outside of this loop, as it still might skip some surfaces + // e.g. if the model doesn't have any static surfaces at all. + surfaceHandleEmissive((SurfaceHandleEmissiveArgs){ + .mod = args.mod, + .func_any = args.func_any, + .is_static = args.is_static, + .bmodel = args.bmodel, + .surf = surf, + .surface_index = surface_index, + .type = type, + .tex_id = tex_id, + .psurf = psurf, + .model_geometry = model_geometry, + .emissive_surfaces_count = &emissive_surfaces_count, + }); + switch (type) { case BrushSurface_Water: case BrushSurface_WaterSide: @@ -1495,20 +1511,6 @@ static qboolean fillBrushSurfaces(fill_geometries_args_t args) { if (type == BrushSurface_Animated) model_geometry->ye_olde_texture = -1; - surfaceHandleEmissive((SurfaceHandleEmissiveArgs){ - .mod = args.mod, - .func_any = args.func_any, - .is_static = args.is_static, - .bmodel = args.bmodel, - .surf = surf, - .surface_index = surface_index, - .type = type, - .tex_id = tex_id, - .psurf = psurf, - .model_geometry = model_geometry, - .emissive_surfaces_count = &emissive_surfaces_count, - }); - model_geometry->surf_deprecate = surf; model_geometry->vertex_offset = args.base_vertex_offset; From 149cd01e14bf877fa31270718d4838c66ff0b49d Mon Sep 17 00:00:00 2001 From: Ivan Avdeev Date: Wed, 18 Dec 2024 19:04:32 -0500 Subject: [PATCH 60/62] vk: remove old validation message silencing workaround This was a bug in validation layers, it's been fixed back in 2021. --- ref/vk/vk_core.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/ref/vk/vk_core.c b/ref/vk/vk_core.c index 3df2cab699..48763d5152 100644 --- a/ref/vk/vk_core.c +++ b/ref/vk/vk_core.c @@ -123,9 +123,6 @@ static VkBool32 VKAPI_PTR debugCallback( (void)(messageTypes); (void)(messageSeverity); - if (Q_strcmp(pCallbackData->pMessageIdName, "VUID-vkMapMemory-memory-00683") == 0) - return VK_FALSE; - // TODO better messages, not only errors, what are other arguments for, ... if (messageSeverity == VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT) { gEngine.Con_Printf(S_ERROR "vk/dbg: %s\n", pCallbackData->pMessage); From 6ac2fe362b3a9665376a29b1c16d871e2426a5e8 Mon Sep 17 00:00:00 2001 From: Ivan Avdeev Date: Wed, 18 Dec 2024 19:06:42 -0500 Subject: [PATCH 61/62] vk: use correct swapchain image sync state Swapchain framebuffer image being in VK_IMAGE_LAYOUT_PRESENT_SRC_KHR layout has zero access flags, and is probably synced with bottom-of-pipe stage. At least this does please validation layers. --- ref/vk/vk_swapchain.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ref/vk/vk_swapchain.c b/ref/vk/vk_swapchain.c index ad8aabd80a..605abb6d0d 100644 --- a/ref/vk/vk_swapchain.c +++ b/ref/vk/vk_swapchain.c @@ -276,12 +276,12 @@ r_vk_swapchain_framebuffer_t R_VkSwapchainAcquire( VkSemaphore sem_image_availa .sync = { .layout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, .write = { - .access = VK_ACCESS_2_COLOR_ATTACHMENT_WRITE_BIT, - .stage = VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT + .access = VK_ACCESS_2_NONE, + .stage = VK_PIPELINE_STAGE_2_NONE, }, .read = { - .access = VK_ACCESS_2_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_2_MEMORY_READ_BIT, - .stage = VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT + .access = VK_ACCESS_2_NONE, + .stage = VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT, }, }, }; From 9ad08888e8601fbed07b2e406df1db467922f21c Mon Sep 17 00:00:00 2001 From: Ivan Avdeev Date: Wed, 18 Dec 2024 19:14:09 -0500 Subject: [PATCH 62/62] vk: also print out current resolution for r_speeds --- ref/vk/r_speeds.c | 1 + 1 file changed, 1 insertion(+) diff --git a/ref/vk/r_speeds.c b/ref/vk/r_speeds.c index 64c772ae12..d3bde354aa 100644 --- a/ref/vk/r_speeds.c +++ b/ref/vk/r_speeds.c @@ -946,6 +946,7 @@ void R_SpeedsDisplayMore(uint32_t prev_frame_index, const struct vk_combuf_scope speedsPrintf( "Driver: %u.%u.%u, Vulkan: %u.%u.%u\n", XVK_PARSE_VERSION(vk_core.physical_device.properties.driverVersion), XVK_PARSE_VERSION(vk_core.physical_device.properties.apiVersion)); + speedsPrintf( "Resolution: %ux%u\n", vk_frame.width, vk_frame.height); } const uint32_t events = g_aprof.events_last_frame - prev_frame_index;