From d28e9dc006697bd9ab424a1a5060da2ee7b80525 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Henrik=20Rydg=C3=A5rd?= Date: Fri, 5 Apr 2024 16:36:47 +0200 Subject: [PATCH 1/5] Minor cleanup --- Common/GPU/Vulkan/VulkanContext.cpp | 14 ++ Common/GPU/Vulkan/VulkanContext.h | 7 +- Common/GPU/Vulkan/VulkanQueueRunner.cpp | 165 ++++++++++++------------ 3 files changed, 103 insertions(+), 83 deletions(-) diff --git a/Common/GPU/Vulkan/VulkanContext.cpp b/Common/GPU/Vulkan/VulkanContext.cpp index 56f99d1ce7ba..238c15ab1b7e 100644 --- a/Common/GPU/Vulkan/VulkanContext.cpp +++ b/Common/GPU/Vulkan/VulkanContext.cpp @@ -76,6 +76,20 @@ const char *VulkanPresentModeToString(VkPresentModeKHR presentMode) { } } +const char *VulkanImageLayoutToString(VkImageLayout imageLayout) { + switch (imageLayout) { + case VK_IMAGE_LAYOUT_UNDEFINED: return "UNDEFINED"; + case VK_IMAGE_LAYOUT_GENERAL: return "GENERAL"; + case VK_IMAGE_LAYOUT_PREINITIALIZED: return "PREINITIALIZED"; + case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: return "TRANSFER_SRC_OPTIMAL"; + case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: return "TRANSFER_DST_OPTIMAL"; + case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: return "SHADER_READ_ONLY_OPTIMAL"; + case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: return "COLOR_ATTACHMENT_OPTIMAL"; + case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: return "DEPTH_STENCIL_ATTACHMENT_OPTIMAL"; + default: return "OTHER"; + } +} + VulkanContext::VulkanContext() { // Do nothing here. } diff --git a/Common/GPU/Vulkan/VulkanContext.h b/Common/GPU/Vulkan/VulkanContext.h index a3d494fa4aea..5683bc2713de 100644 --- a/Common/GPU/Vulkan/VulkanContext.h +++ b/Common/GPU/Vulkan/VulkanContext.h @@ -419,7 +419,7 @@ class VulkanContext { bool CheckLayers(const std::vector &layer_props, const std::vector &layer_names) const; - WindowSystem winsys_; + WindowSystem winsys_{}; // Don't use the real types here to avoid having to include platform-specific stuff // that we really don't want in everything that uses VulkanContext. @@ -483,7 +483,7 @@ class VulkanContext { std::vector utils_callbacks; VkSwapchainKHR swapchain_ = VK_NULL_HANDLE; - VkFormat swapchainFormat_; + VkFormat swapchainFormat_ = VK_FORMAT_UNDEFINED; uint32_t queue_count = 0; @@ -492,7 +492,7 @@ class VulkanContext { VkSurfaceCapabilitiesKHR surfCapabilities_{}; std::vector surfFormats_{}; - VkPresentModeKHR presentMode_; + VkPresentModeKHR presentMode_ = VK_PRESENT_MODE_FIFO_KHR; std::vector availablePresentModes_; std::vector cmdQueue_; @@ -515,6 +515,7 @@ bool GLSLtoSPV(const VkShaderStageFlagBits shader_type, const char *sourceCode, const char *VulkanColorSpaceToString(VkColorSpaceKHR colorSpace); const char *VulkanFormatToString(VkFormat format); const char *VulkanPresentModeToString(VkPresentModeKHR presentMode); +const char *VulkanImageLayoutToString(VkImageLayout imageLayout); std::string FormatDriverVersion(const VkPhysicalDeviceProperties &props); diff --git a/Common/GPU/Vulkan/VulkanQueueRunner.cpp b/Common/GPU/Vulkan/VulkanQueueRunner.cpp index 949355e81d79..716aa3ead851 100644 --- a/Common/GPU/Vulkan/VulkanQueueRunner.cpp +++ b/Common/GPU/Vulkan/VulkanQueueRunner.cpp @@ -931,86 +931,84 @@ void VulkanQueueRunner::LogReadbackImage(const VKRStep &step) { INFO_LOG(G3D, "%s", StepToString(vulkan_, step).c_str()); } -void TransitionToOptimal(VkCommandBuffer cmd, VkImage colorImage, VkImageLayout colorLayout, VkImage depthStencilImage, VkImageLayout depthStencilLayout, int numLayers, VulkanBarrierBatch *recordBarrier) { - if (colorLayout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) { - VkPipelineStageFlags srcStageMask = 0; - VkAccessFlags srcAccessMask = 0; - switch (colorLayout) { - case VK_IMAGE_LAYOUT_UNDEFINED: - // No need to specify stage or access. - break; - case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: - // Already the right color layout. Unclear that we need to do a lot here.. - break; - case VK_IMAGE_LAYOUT_GENERAL: - // We came from the Mali workaround, and are transitioning back to COLOR_ATTACHMENT_OPTIMAL. - srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; - srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; - break; - case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: - srcAccessMask = VK_ACCESS_SHADER_READ_BIT; - srcStageMask = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; - break; - case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: - srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; - srcStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT; - break; - case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: - srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT; - srcStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT; - break; - default: - _dbg_assert_msg_(false, "TransitionToOptimal: Unexpected color layout %d", (int)colorLayout); - break; - } - recordBarrier->TransitionImage( - colorImage, 0, 1, numLayers, VK_IMAGE_ASPECT_COLOR_BIT, - colorLayout, - VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, - srcAccessMask, - VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, - srcStageMask, - VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT); - } - - if (depthStencilImage != VK_NULL_HANDLE && depthStencilLayout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL) { - VkPipelineStageFlags srcStageMask = 0; - VkAccessFlags srcAccessMask = 0; - switch (depthStencilLayout) { - case VK_IMAGE_LAYOUT_UNDEFINED: - // No need to specify stage or access. - break; - case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: - // Already the right depth layout. Unclear that we need to do a lot here.. - break; - case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: - srcAccessMask = VK_ACCESS_SHADER_READ_BIT; - srcStageMask = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; - break; - case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: - srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT; - srcStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT; - break; - case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: - srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; - srcStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT; - break; - default: - _dbg_assert_msg_(false, "TransitionToOptimal: Unexpected depth layout %d", (int)depthStencilLayout); - break; - } - recordBarrier->TransitionImage( - depthStencilImage, 0, 1, numLayers, VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT, - depthStencilLayout, - VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, - srcAccessMask, - VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT, - srcStageMask, - VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT); +void TransitionColorToOptimal(VkCommandBuffer cmd, VkImage colorImage, VkImageLayout colorLayout, int numLayers, VulkanBarrierBatch *recordBarrier) { + VkPipelineStageFlags srcStageMask = 0; + VkAccessFlags srcAccessMask = 0; + switch (colorLayout) { + case VK_IMAGE_LAYOUT_UNDEFINED: + // No need to specify stage or access. + break; + case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: + // Already the right color layout. Unclear that we need to do a lot here.. + return; + case VK_IMAGE_LAYOUT_GENERAL: + // We came from the Mali workaround, and are transitioning back to COLOR_ATTACHMENT_OPTIMAL. + srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; + srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; + break; + case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: + srcAccessMask = VK_ACCESS_SHADER_READ_BIT; + srcStageMask = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; + break; + case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: + srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; + srcStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT; + break; + case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: + srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT; + srcStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT; + break; + default: + _dbg_assert_msg_(false, "TransitionColorToOptimal: Unexpected layout %d", (int)colorLayout); + break; + } + recordBarrier->TransitionImage( + colorImage, 0, 1, numLayers, VK_IMAGE_ASPECT_COLOR_BIT, + colorLayout, + VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, + srcAccessMask, + VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, + srcStageMask, + VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT); +} + +void TransitionDepthToOptimal(VkCommandBuffer cmd, VkImage depthStencilImage, VkImageLayout depthStencilLayout, int numLayers, VulkanBarrierBatch *recordBarrier) { + VkPipelineStageFlags srcStageMask = 0; + VkAccessFlags srcAccessMask = 0; + switch (depthStencilLayout) { + case VK_IMAGE_LAYOUT_UNDEFINED: + // No need to specify stage or access. + break; + case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: + // Already the right depth layout. Unclear that we need to do a lot here.. + return; + case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: + srcAccessMask = VK_ACCESS_SHADER_READ_BIT; + srcStageMask = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; + break; + case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: + srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT; + srcStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT; + break; + case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: + srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; + srcStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT; + break; + default: + _dbg_assert_msg_(false, "TransitionDepthToOptimal: Unexpected layout %d", (int)depthStencilLayout); + break; } + recordBarrier->TransitionImage( + depthStencilImage, 0, 1, numLayers, VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT, + depthStencilLayout, + VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, + srcAccessMask, + VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT, + srcStageMask, + VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT); } -void TransitionFromOptimal(VkCommandBuffer cmd, VkImage colorImage, VkImageLayout colorLayout, VkImage depthStencilImage, int numLayers, VkImageLayout depthStencilLayout) { +void TransitionFromOptimal(VkCommandBuffer cmd, VkImage colorImage, VkImageLayout colorLayout, VkImage depthStencilImage, VkImageLayout depthStencilLayout, int numLayers) { VkPipelineStageFlags srcStageMask = 0; VkPipelineStageFlags dstStageMask = 0; @@ -1180,7 +1178,7 @@ void VulkanQueueRunner::PerformRenderPass(const VKRStep &step, VkCommandBuffer c // This reads the layout of the color and depth images, and chooses a render pass using them that // will transition to the desired final layout. // - // NOTE: Flushes recordBarrier_. + // NOTE: Unconditionally flushes recordBarrier_. VKRRenderPass *renderPass = PerformBindFramebufferAsRenderTarget(step, cmd); int curWidth = step.render.framebuffer ? step.render.framebuffer->width : vulkan_->GetBackbufferWidth(); @@ -1401,9 +1399,11 @@ void VulkanQueueRunner::PerformRenderPass(const VKRStep &step, VkCommandBuffer c } vkCmdEndRenderPass(cmd); + _dbg_assert_(recordBarrier_.empty()); + if (fb) { // If the desired final layout aren't the optimal layout for rendering, transition. - TransitionFromOptimal(cmd, fb->color.image, step.render.finalColorLayout, fb->depth.image, fb->numLayers, step.render.finalDepthStencilLayout); + TransitionFromOptimal(cmd, fb->color.image, step.render.finalColorLayout, fb->depth.image, step.render.finalDepthStencilLayout, fb->numLayers); fb->color.layout = step.render.finalColorLayout; fb->depth.layout = step.render.finalDepthStencilLayout; @@ -1422,6 +1422,8 @@ VKRRenderPass *VulkanQueueRunner::PerformBindFramebufferAsRenderTarget(const VKR VkSampleCountFlagBits sampleCount; + recordBarrier_.Flush(cmd); + if (step.render.framebuffer) { _dbg_assert_(step.render.finalColorLayout != VK_IMAGE_LAYOUT_UNDEFINED); _dbg_assert_(step.render.finalDepthStencilLayout != VK_IMAGE_LAYOUT_UNDEFINED); @@ -1455,7 +1457,10 @@ VKRRenderPass *VulkanQueueRunner::PerformBindFramebufferAsRenderTarget(const VKR fb->color.layout = VK_IMAGE_LAYOUT_GENERAL; } - TransitionToOptimal(cmd, fb->color.image, fb->color.layout, fb->depth.image, fb->depth.layout, fb->numLayers, &recordBarrier_); + TransitionColorToOptimal(cmd, fb->color.image, fb->color.layout, fb->numLayers, &recordBarrier_); + if (fb->depth.image && RenderPassTypeHasDepth(step.render.renderPassType)) { + TransitionDepthToOptimal(cmd, fb->depth.image, fb->depth.layout, fb->numLayers, &recordBarrier_); + } // The transition from the optimal format happens after EndRenderPass, now that we don't // do it as part of the renderpass itself anymore. From 4a5eb4bdfbb36bdd70963a48c7d74ef392f1799a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Henrik=20Rydg=C3=A5rd?= Date: Fri, 5 Apr 2024 17:09:57 +0200 Subject: [PATCH 2/5] More cleanup --- Common/GPU/Vulkan/VulkanDebug.cpp | 7 -- Common/GPU/Vulkan/VulkanQueueRunner.cpp | 95 +++++++++++-------------- 2 files changed, 40 insertions(+), 62 deletions(-) diff --git a/Common/GPU/Vulkan/VulkanDebug.cpp b/Common/GPU/Vulkan/VulkanDebug.cpp index b9638394adaf..bc7dafb4e45c 100644 --- a/Common/GPU/Vulkan/VulkanDebug.cpp +++ b/Common/GPU/Vulkan/VulkanDebug.cpp @@ -62,13 +62,6 @@ VKAPI_ATTR VkBool32 VKAPI_CALL VulkanDebugUtilsCallback( return false; break; - case 606910136: - case -392708513: - case -384083808: - // VUID-vkCmdDraw-None-02686 - // Kinda false positive, or at least very unnecessary, now that I solved the real issue. - // See https://github.com/hrydgard/ppsspp/pull/16354 - return false; case -375211665: // VUID-vkAllocateMemory-pAllocateInfo-01713 // Can happen when VMA aggressively tries to allocate aperture memory for upload. It gracefully diff --git a/Common/GPU/Vulkan/VulkanQueueRunner.cpp b/Common/GPU/Vulkan/VulkanQueueRunner.cpp index 716aa3ead851..5618fe602bb4 100644 --- a/Common/GPU/Vulkan/VulkanQueueRunner.cpp +++ b/Common/GPU/Vulkan/VulkanQueueRunner.cpp @@ -931,7 +931,7 @@ void VulkanQueueRunner::LogReadbackImage(const VKRStep &step) { INFO_LOG(G3D, "%s", StepToString(vulkan_, step).c_str()); } -void TransitionColorToOptimal(VkCommandBuffer cmd, VkImage colorImage, VkImageLayout colorLayout, int numLayers, VulkanBarrierBatch *recordBarrier) { +static void TransitionColorToOptimal(VkCommandBuffer cmd, VkImage colorImage, VkImageLayout colorLayout, int numLayers, VulkanBarrierBatch *recordBarrier) { VkPipelineStageFlags srcStageMask = 0; VkAccessFlags srcAccessMask = 0; switch (colorLayout) { @@ -972,7 +972,7 @@ void TransitionColorToOptimal(VkCommandBuffer cmd, VkImage colorImage, VkImageLa VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT); } -void TransitionDepthToOptimal(VkCommandBuffer cmd, VkImage depthStencilImage, VkImageLayout depthStencilLayout, int numLayers, VulkanBarrierBatch *recordBarrier) { +static void TransitionDepthToOptimal(VkCommandBuffer cmd, VkImage depthStencilImage, VkImageLayout depthStencilLayout, int numLayers, VulkanBarrierBatch *recordBarrier) { VkPipelineStageFlags srcStageMask = 0; VkAccessFlags srcAccessMask = 0; switch (depthStencilLayout) { @@ -1008,33 +1008,24 @@ void TransitionDepthToOptimal(VkCommandBuffer cmd, VkImage depthStencilImage, Vk VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT); } -void TransitionFromOptimal(VkCommandBuffer cmd, VkImage colorImage, VkImageLayout colorLayout, VkImage depthStencilImage, VkImageLayout depthStencilLayout, int numLayers) { - VkPipelineStageFlags srcStageMask = 0; - VkPipelineStageFlags dstStageMask = 0; - - // If layouts aren't optimal, transition them. - VkImageMemoryBarrier barrier[2]{}; - - int barrierCount = 0; +static void TransitionFromOptimal(VkCommandBuffer cmd, VkImage colorImage, VkImageLayout colorLayout, VkImage depthStencilImage, VkImageLayout depthStencilLayout, int numLayers, VulkanBarrierBatch *recordBarrier) { if (colorLayout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) { - barrier[0].sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; - barrier[0].pNext = nullptr; - srcStageMask |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; - barrier[0].srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; + VkPipelineStageFlags dstStageMask = 0; + VkAccessFlags dstAccessMask = 0; // And the final transition. // Don't need to transition it if VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL. switch (colorLayout) { case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: - barrier[0].dstAccessMask = VK_ACCESS_SHADER_READ_BIT; - dstStageMask |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; + dstAccessMask = VK_ACCESS_SHADER_READ_BIT; + dstStageMask = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; break; case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: - barrier[0].dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT; - dstStageMask |= VK_PIPELINE_STAGE_TRANSFER_BIT; + dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT; + dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT; break; case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: - barrier[0].dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT; - dstStageMask |= VK_PIPELINE_STAGE_TRANSFER_BIT; + dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT; + dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT; break; case VK_IMAGE_LAYOUT_UNDEFINED: case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: @@ -1044,36 +1035,32 @@ void TransitionFromOptimal(VkCommandBuffer cmd, VkImage colorImage, VkImageLayou _dbg_assert_msg_(false, "TransitionFromOptimal: Unexpected final color layout %d", (int)colorLayout); break; } - barrier[0].oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; - barrier[0].newLayout = colorLayout; - barrier[0].image = colorImage; - barrier[0].subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; - barrier[0].subresourceRange.baseMipLevel = 0; - barrier[0].subresourceRange.levelCount = 1; - barrier[0].subresourceRange.layerCount = numLayers; - barrier[0].srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; - barrier[0].dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; - barrierCount++; + recordBarrier->TransitionImage( + colorImage, 0, 1, numLayers, VK_IMAGE_ASPECT_COLOR_BIT, + VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, + colorLayout, + VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // srcAccessMask + dstAccessMask, + VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, + dstStageMask + ); } if (depthStencilImage && depthStencilLayout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL) { - barrier[barrierCount].sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; - barrier[barrierCount].pNext = nullptr; - - srcStageMask |= VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT; - barrier[barrierCount].srcAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; + VkPipelineStageFlags dstStageMask = 0; + VkAccessFlags dstAccessMask = 0; switch (depthStencilLayout) { case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: - barrier[barrierCount].dstAccessMask |= VK_ACCESS_SHADER_READ_BIT; - dstStageMask |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; + dstAccessMask = VK_ACCESS_SHADER_READ_BIT; + dstStageMask = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; break; case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: - barrier[barrierCount].dstAccessMask |= VK_ACCESS_TRANSFER_READ_BIT; - dstStageMask |= VK_PIPELINE_STAGE_TRANSFER_BIT; + dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT; + dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT; break; case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: - barrier[barrierCount].dstAccessMask |= VK_ACCESS_TRANSFER_READ_BIT; - dstStageMask |= VK_PIPELINE_STAGE_TRANSFER_BIT; + dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT; + dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT; break; case VK_IMAGE_LAYOUT_UNDEFINED: case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: @@ -1083,19 +1070,15 @@ void TransitionFromOptimal(VkCommandBuffer cmd, VkImage colorImage, VkImageLayou _dbg_assert_msg_(false, "TransitionFromOptimal: Unexpected final depth layout %d", (int)depthStencilLayout); break; } - barrier[barrierCount].oldLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; - barrier[barrierCount].newLayout = depthStencilLayout; - barrier[barrierCount].image = depthStencilImage; - barrier[barrierCount].subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; - barrier[barrierCount].subresourceRange.baseMipLevel = 0; - barrier[barrierCount].subresourceRange.levelCount = 1; - barrier[barrierCount].subresourceRange.layerCount = numLayers; - barrier[barrierCount].srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; - barrier[barrierCount].dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; - barrierCount++; - } - if (barrierCount) { - vkCmdPipelineBarrier(cmd, srcStageMask, dstStageMask, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, barrierCount, barrier); + recordBarrier->TransitionImage( + depthStencilImage, 0, 1, numLayers, VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT, + VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, + depthStencilLayout, + VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, // srcAccessMask + dstAccessMask, + VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, + dstStageMask + ); } } @@ -1403,7 +1386,9 @@ void VulkanQueueRunner::PerformRenderPass(const VKRStep &step, VkCommandBuffer c if (fb) { // If the desired final layout aren't the optimal layout for rendering, transition. - TransitionFromOptimal(cmd, fb->color.image, step.render.finalColorLayout, fb->depth.image, step.render.finalDepthStencilLayout, fb->numLayers); + // TODO: Not sure when we use this now? + TransitionFromOptimal(cmd, fb->color.image, step.render.finalColorLayout, fb->depth.image, step.render.finalDepthStencilLayout, fb->numLayers, &recordBarrier_); + recordBarrier_.Flush(cmd); fb->color.layout = step.render.finalColorLayout; fb->depth.layout = step.render.finalDepthStencilLayout; From e00f1f10a304001911183a288e654989c04afc22 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Henrik=20Rydg=C3=A5rd?= Date: Fri, 5 Apr 2024 17:48:07 +0200 Subject: [PATCH 3/5] More barrier code simplification --- Common/GPU/Vulkan/VulkanQueueRunner.cpp | 148 ++++++++++++------------ 1 file changed, 74 insertions(+), 74 deletions(-) diff --git a/Common/GPU/Vulkan/VulkanQueueRunner.cpp b/Common/GPU/Vulkan/VulkanQueueRunner.cpp index 5618fe602bb4..30a6e91a84d5 100644 --- a/Common/GPU/Vulkan/VulkanQueueRunner.cpp +++ b/Common/GPU/Vulkan/VulkanQueueRunner.cpp @@ -1008,78 +1008,76 @@ static void TransitionDepthToOptimal(VkCommandBuffer cmd, VkImage depthStencilIm VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT); } -static void TransitionFromOptimal(VkCommandBuffer cmd, VkImage colorImage, VkImageLayout colorLayout, VkImage depthStencilImage, VkImageLayout depthStencilLayout, int numLayers, VulkanBarrierBatch *recordBarrier) { - if (colorLayout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) { - VkPipelineStageFlags dstStageMask = 0; - VkAccessFlags dstAccessMask = 0; - // And the final transition. - // Don't need to transition it if VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL. - switch (colorLayout) { - case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: - dstAccessMask = VK_ACCESS_SHADER_READ_BIT; - dstStageMask = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; - break; - case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: - dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT; - dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT; - break; - case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: - dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT; - dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT; - break; - case VK_IMAGE_LAYOUT_UNDEFINED: - case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: - // Nothing to do. - break; - default: - _dbg_assert_msg_(false, "TransitionFromOptimal: Unexpected final color layout %d", (int)colorLayout); - break; - } - recordBarrier->TransitionImage( - colorImage, 0, 1, numLayers, VK_IMAGE_ASPECT_COLOR_BIT, - VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, - colorLayout, - VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // srcAccessMask - dstAccessMask, - VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, - dstStageMask - ); - } - - if (depthStencilImage && depthStencilLayout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL) { - VkPipelineStageFlags dstStageMask = 0; - VkAccessFlags dstAccessMask = 0; - switch (depthStencilLayout) { - case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: - dstAccessMask = VK_ACCESS_SHADER_READ_BIT; - dstStageMask = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; - break; - case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: - dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT; - dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT; - break; - case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: - dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT; - dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT; - break; - case VK_IMAGE_LAYOUT_UNDEFINED: - case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: - // Nothing to do. - break; - default: - _dbg_assert_msg_(false, "TransitionFromOptimal: Unexpected final depth layout %d", (int)depthStencilLayout); - break; - } - recordBarrier->TransitionImage( - depthStencilImage, 0, 1, numLayers, VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT, - VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, - depthStencilLayout, - VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, // srcAccessMask - dstAccessMask, - VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, - dstStageMask - ); +static void TransitionColorFromOptimal(VkCommandBuffer cmd, VkImage colorImage, VkImageLayout colorLayout, int numLayers, VulkanBarrierBatch *recordBarrier) { + VkPipelineStageFlags dstStageMask = 0; + VkAccessFlags dstAccessMask = 0; + // And the final transition. + // Don't need to transition it if VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL. + switch (colorLayout) { + case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: + dstAccessMask = VK_ACCESS_SHADER_READ_BIT; + dstStageMask = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; + break; + case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: + dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT; + dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT; + break; + case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: + dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT; + dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT; + break; + case VK_IMAGE_LAYOUT_UNDEFINED: + case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: + // Nothing to do. + return; + default: + _dbg_assert_msg_(false, "TransitionFromOptimal: Unexpected final color layout %d", (int)colorLayout); + break; } + recordBarrier->TransitionImage( + colorImage, 0, 1, numLayers, VK_IMAGE_ASPECT_COLOR_BIT, + VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, + colorLayout, + VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // srcAccessMask + dstAccessMask, + VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, + dstStageMask + ); +} + +static void TransitionDepthFromOptimal(VkCommandBuffer cmd, VkImage depthStencilImage, VkImageLayout depthStencilLayout, int numLayers, VulkanBarrierBatch *recordBarrier) { + VkPipelineStageFlags dstStageMask = 0; + VkAccessFlags dstAccessMask = 0; + switch (depthStencilLayout) { + case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: + dstAccessMask = VK_ACCESS_SHADER_READ_BIT; + dstStageMask = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; + break; + case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: + dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT; + dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT; + break; + case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: + dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT; + dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT; + break; + case VK_IMAGE_LAYOUT_UNDEFINED: + case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: + // Nothing to do. + return; + default: + _dbg_assert_msg_(false, "TransitionFromOptimal: Unexpected final depth layout %d", (int)depthStencilLayout); + break; + } + recordBarrier->TransitionImage( + depthStencilImage, 0, 1, numLayers, VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT, + VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, + depthStencilLayout, + VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, // srcAccessMask + dstAccessMask, + VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, + dstStageMask + ); } void VulkanQueueRunner::PerformRenderPass(const VKRStep &step, VkCommandBuffer cmd, int curFrame, QueueProfileContext &profile) { @@ -1385,9 +1383,11 @@ void VulkanQueueRunner::PerformRenderPass(const VKRStep &step, VkCommandBuffer c _dbg_assert_(recordBarrier_.empty()); if (fb) { - // If the desired final layout aren't the optimal layout for rendering, transition. - // TODO: Not sure when we use this now? - TransitionFromOptimal(cmd, fb->color.image, step.render.finalColorLayout, fb->depth.image, step.render.finalDepthStencilLayout, fb->numLayers, &recordBarrier_); + // If the desired final layout aren't the optimal layout needed next, transition. + TransitionColorFromOptimal(cmd, fb->color.image, step.render.finalColorLayout, fb->numLayers, &recordBarrier_); + if (fb->depth.image) { + TransitionDepthFromOptimal(cmd, fb->depth.image, step.render.finalDepthStencilLayout, fb->numLayers, &recordBarrier_); + } recordBarrier_.Flush(cmd); fb->color.layout = step.render.finalColorLayout; From a3d0cb1f12e994615678179fbff21e7e888a2b33 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Henrik=20Rydg=C3=A5rd?= Date: Sat, 6 Apr 2024 17:09:36 +0200 Subject: [PATCH 4/5] Switch away from TransitionImageLayout2 --- Common/GPU/Vulkan/VulkanImage.cpp | 8 ++++---- Common/GPU/Vulkan/VulkanImage.h | 2 +- Common/GPU/Vulkan/VulkanQueueRunner.cpp | 23 +++++++++++++---------- Common/GPU/Vulkan/thin3d_vulkan.cpp | 10 +++++++--- GPU/Vulkan/TextureCacheVulkan.cpp | 8 ++++++-- 5 files changed, 31 insertions(+), 20 deletions(-) diff --git a/Common/GPU/Vulkan/VulkanImage.cpp b/Common/GPU/Vulkan/VulkanImage.cpp index 0361d66d4aec..f5406a569883 100644 --- a/Common/GPU/Vulkan/VulkanImage.cpp +++ b/Common/GPU/Vulkan/VulkanImage.cpp @@ -38,7 +38,7 @@ static bool IsDepthStencilFormat(VkFormat format) { } } -bool VulkanTexture::CreateDirect(VkCommandBuffer cmd, int w, int h, int depth, int numMips, VkFormat format, VkImageLayout initialLayout, VkImageUsageFlags usage, const VkComponentMapping *mapping) { +bool VulkanTexture::CreateDirect(int w, int h, int depth, int numMips, VkFormat format, VkImageLayout initialLayout, VkImageUsageFlags usage, VulkanBarrierBatch *barrierBatch, const VkComponentMapping *mapping) { if (w == 0 || h == 0 || numMips == 0) { ERROR_LOG(G3D, "Can't create a zero-size VulkanTexture"); return false; @@ -113,10 +113,10 @@ bool VulkanTexture::CreateDirect(VkCommandBuffer cmd, int w, int h, int depth, i _assert_(false); break; } - TransitionImageLayout2(cmd, image_, 0, numMips, 1, VK_IMAGE_ASPECT_COLOR_BIT, + barrierBatch->TransitionImage(image_, 0, numMips, 1, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_UNDEFINED, initialLayout, - VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, dstStage, - 0, dstAccessFlags); + 0, dstAccessFlags, + VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, dstStage); } // Create the view while we're at it. diff --git a/Common/GPU/Vulkan/VulkanImage.h b/Common/GPU/Vulkan/VulkanImage.h index 4adaf4f1a1e2..05e60391084f 100644 --- a/Common/GPU/Vulkan/VulkanImage.h +++ b/Common/GPU/Vulkan/VulkanImage.h @@ -28,7 +28,7 @@ class VulkanTexture { // Fast uploads from buffer. Mipmaps supported. // Usage must at least include VK_IMAGE_USAGE_TRANSFER_DST_BIT in order to use UploadMip. // When using UploadMip, initialLayout should be VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL. - bool CreateDirect(VkCommandBuffer cmd, int w, int h, int depth, int numMips, VkFormat format, VkImageLayout initialLayout, VkImageUsageFlags usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT, const VkComponentMapping *mapping = nullptr); + bool CreateDirect(int w, int h, int depth, int numMips, VkFormat format, VkImageLayout initialLayout, VkImageUsageFlags usage, VulkanBarrierBatch *barrierBatch, const VkComponentMapping *mapping = nullptr); void ClearMip(VkCommandBuffer cmd, int mip, uint32_t value); // Can also be used to copy individual levels of a 3D texture. diff --git a/Common/GPU/Vulkan/VulkanQueueRunner.cpp b/Common/GPU/Vulkan/VulkanQueueRunner.cpp index 30a6e91a84d5..a0164c03ad72 100644 --- a/Common/GPU/Vulkan/VulkanQueueRunner.cpp +++ b/Common/GPU/Vulkan/VulkanQueueRunner.cpp @@ -1913,10 +1913,10 @@ void VulkanQueueRunner::PerformReadback(const VKRStep &step, VkCommandBuffer cmd // We only take screenshots after the main render pass (anything else would be stupid) so we need to transition out of PRESENT, // and then back into it. // Regarding layers, backbuffer currently only has one layer. - TransitionImageLayout2(cmd, backbufferImage_, 0, 1, 1, VK_IMAGE_ASPECT_COLOR_BIT, + recordBarrier_.TransitionImage(backbufferImage_, 0, 1, 1, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, - VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, - 0, VK_ACCESS_TRANSFER_READ_BIT); + 0, VK_ACCESS_TRANSFER_READ_BIT, + VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT); copyLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL; image = backbufferImage_; } else { @@ -1933,12 +1933,13 @@ void VulkanQueueRunner::PerformReadback(const VKRStep &step, VkCommandBuffer cmd if (srcImage->layout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) { SetupTransitionToTransferSrc(*srcImage, step.readback.aspectMask, &recordBarrier_); - recordBarrier_.Flush(cmd); } image = srcImage->image; copyLayout = srcImage->layout; } + recordBarrier_.Flush(cmd); + // TODO: Handle different readback formats! u32 readbackSizeInBytes = sizeof(uint32_t) * step.readback.srcRect.extent.width * step.readback.srcRect.extent.height; @@ -1980,10 +1981,11 @@ void VulkanQueueRunner::PerformReadback(const VKRStep &step, VkCommandBuffer cmd // We only take screenshots after the main render pass (anything else would be stupid) so we need to transition out of PRESENT, // and then back into it. // Regarding layers, backbuffer currently only has one layer. - TransitionImageLayout2(cmd, backbufferImage_, 0, 1, 1, VK_IMAGE_ASPECT_COLOR_BIT, + recordBarrier_.TransitionImage(backbufferImage_, 0, 1, 1, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, - VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, - VK_ACCESS_TRANSFER_READ_BIT, 0); + VK_ACCESS_TRANSFER_READ_BIT, 0, + VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT); + recordBarrier_.Flush(cmd); // probably not needed copyLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL; } } @@ -2012,11 +2014,12 @@ void VulkanQueueRunner::PerformReadbackImage(const VKRStep &step, VkCommandBuffe vkCmdCopyImageToBuffer(cmd, step.readback_image.image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, syncReadback_.buffer, 1, ®ion); // Now transfer it back to a texture. - TransitionImageLayout2(cmd, step.readback_image.image, 0, 1, 1, // I don't think we have any multilayer cases for regular textures. Above in PerformReadback, though.. + recordBarrier_.TransitionImage(step.readback_image.image, 0, 1, 1, // I don't think we have any multilayer cases for regular textures. Above in PerformReadback, though.. VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, - VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, - VK_ACCESS_TRANSFER_READ_BIT, VK_ACCESS_SHADER_READ_BIT); + VK_ACCESS_TRANSFER_READ_BIT, VK_ACCESS_SHADER_READ_BIT, + VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT); + recordBarrier_.Flush(cmd); // probably not needed // NOTE: Can't read the buffer using the CPU here - need to sync first. // Doing that will also act like a heavyweight barrier ensuring that device writes are visible on the host. diff --git a/Common/GPU/Vulkan/thin3d_vulkan.cpp b/Common/GPU/Vulkan/thin3d_vulkan.cpp index eaace00be079..8061ba1cf760 100644 --- a/Common/GPU/Vulkan/thin3d_vulkan.cpp +++ b/Common/GPU/Vulkan/thin3d_vulkan.cpp @@ -699,8 +699,10 @@ VulkanTexture *VKContext::GetNullTexture() { nullTexture_ = new VulkanTexture(vulkan_, "Null"); int w = 8; int h = 8; - nullTexture_->CreateDirect(cmdInit, w, h, 1, 1, VK_FORMAT_A8B8G8R8_UNORM_PACK32, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, - VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT); + VulkanBarrierBatch barrier; + nullTexture_->CreateDirect(w, h, 1, 1, VK_FORMAT_A8B8G8R8_UNORM_PACK32, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, + VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT, &barrier); + barrier.Flush(cmdInit); uint32_t bindOffset; VkBuffer bindBuf; uint32_t *data = (uint32_t *)push_->Allocate(w * h * 4, 4, &bindBuf, &bindOffset); @@ -791,10 +793,12 @@ bool VKTexture::Create(VkCommandBuffer cmd, VulkanBarrierBatch *postBarriers, Vu VkComponentMapping r8AsAlpha[4] = { VK_COMPONENT_SWIZZLE_ONE, VK_COMPONENT_SWIZZLE_ONE, VK_COMPONENT_SWIZZLE_ONE, VK_COMPONENT_SWIZZLE_R }; - if (!vkTex_->CreateDirect(cmd, width_, height_, 1, mipLevels_, vulkanFormat, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, usageBits, desc.swizzle == TextureSwizzle::R8_AS_ALPHA ? r8AsAlpha : nullptr)) { + VulkanBarrierBatch barrier; + if (!vkTex_->CreateDirect(width_, height_, 1, mipLevels_, vulkanFormat, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, usageBits, &barrier, desc.swizzle == TextureSwizzle::R8_AS_ALPHA ? r8AsAlpha : nullptr)) { ERROR_LOG(G3D, "Failed to create VulkanTexture: %dx%dx%d fmt %d, %d levels", width_, height_, depth_, (int)vulkanFormat, mipLevels_); return false; } + barrier.Flush(cmd); VkImageLayout layout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; if (desc.initData.size()) { UpdateInternal(cmd, pushBuffer, desc.initData.data(), desc.initDataCallback, (int)desc.initData.size()); diff --git a/GPU/Vulkan/TextureCacheVulkan.cpp b/GPU/Vulkan/TextureCacheVulkan.cpp index 08efbc62ed0c..d12b3267fcbb 100644 --- a/GPU/Vulkan/TextureCacheVulkan.cpp +++ b/GPU/Vulkan/TextureCacheVulkan.cpp @@ -512,7 +512,10 @@ void TextureCacheVulkan::BuildTexture(TexCacheEntry *const entry) { snprintf(texName, sizeof(texName), "tex_%08x_%s_%s", entry->addr, GeTextureFormatToString((GETextureFormat)entry->format, gstate.getClutPaletteFormat()), gstate.isTextureSwizzled() ? "swz" : "lin"); entry->vkTex = new VulkanTexture(vulkan, texName); VulkanTexture *image = entry->vkTex; - bool allocSuccess = image->CreateDirect(cmdInit, plan.createW, plan.createH, plan.depth, plan.levelsToCreate, actualFmt, imageLayout, usage, mapping); + + VulkanBarrierBatch barrier; + bool allocSuccess = image->CreateDirect(plan.createW, plan.createH, plan.depth, plan.levelsToCreate, actualFmt, imageLayout, usage, &barrier, mapping); + barrier.Flush(cmdInit); if (!allocSuccess && !lowMemoryMode_) { WARN_LOG_REPORT(G3D, "Texture cache ran out of GPU memory; switching to low memory mode"); lowMemoryMode_ = true; @@ -537,7 +540,8 @@ void TextureCacheVulkan::BuildTexture(TexCacheEntry *const entry) { plan.scaleFactor = 1; actualFmt = dstFmt; - allocSuccess = image->CreateDirect(cmdInit, plan.createW, plan.createH, plan.depth, plan.levelsToCreate, actualFmt, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT, mapping); + allocSuccess = image->CreateDirect(plan.createW, plan.createH, plan.depth, plan.levelsToCreate, actualFmt, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT, &barrier, mapping); + barrier.Flush(cmdInit); } if (!allocSuccess) { From a66affe5db2bc1658b58e776f938c8b68da7aba0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Henrik=20Rydg=C3=A5rd?= Date: Sat, 6 Apr 2024 17:12:40 +0200 Subject: [PATCH 5/5] Delete the function TransitionImageLayout2 --- Common/GPU/Vulkan/VulkanBarrier.cpp | 19 ------------------- Common/GPU/Vulkan/VulkanBarrier.h | 6 ------ 2 files changed, 25 deletions(-) diff --git a/Common/GPU/Vulkan/VulkanBarrier.cpp b/Common/GPU/Vulkan/VulkanBarrier.cpp index 585fe98c04d5..c99a48a10ea5 100644 --- a/Common/GPU/Vulkan/VulkanBarrier.cpp +++ b/Common/GPU/Vulkan/VulkanBarrier.cpp @@ -114,22 +114,3 @@ void VulkanBarrierBatch::TransitionImageAuto( imageBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; imageBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; } - -void TransitionImageLayout2(VkCommandBuffer cmd, VkImage image, int baseMip, int numMipLevels, int numLayers, VkImageAspectFlags aspectMask, - VkImageLayout oldImageLayout, VkImageLayout newImageLayout, - VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, - VkAccessFlags srcAccessMask, VkAccessFlags dstAccessMask) { - VkImageMemoryBarrier image_memory_barrier{ VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER }; - image_memory_barrier.srcAccessMask = srcAccessMask; - image_memory_barrier.dstAccessMask = dstAccessMask; - image_memory_barrier.oldLayout = oldImageLayout; - image_memory_barrier.newLayout = newImageLayout; - image_memory_barrier.image = image; - image_memory_barrier.subresourceRange.aspectMask = aspectMask; - image_memory_barrier.subresourceRange.baseMipLevel = baseMip; - image_memory_barrier.subresourceRange.levelCount = numMipLevels; - image_memory_barrier.subresourceRange.layerCount = numLayers; // We never use more than one layer, and old Mali drivers have problems with VK_REMAINING_ARRAY_LAYERS/VK_REMAINING_MIP_LEVELS. - image_memory_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; - image_memory_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; - vkCmdPipelineBarrier(cmd, srcStageMask, dstStageMask, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); -} diff --git a/Common/GPU/Vulkan/VulkanBarrier.h b/Common/GPU/Vulkan/VulkanBarrier.h index 60b1ea3fc08f..824515786cc4 100644 --- a/Common/GPU/Vulkan/VulkanBarrier.h +++ b/Common/GPU/Vulkan/VulkanBarrier.h @@ -55,9 +55,3 @@ class VulkanBarrierBatch { VkPipelineStageFlags dstStageMask_ = 0; VkDependencyFlags dependencyFlags_ = 0; }; - -// Detailed control, but just a single image. Use the barrier batch when possible. -void TransitionImageLayout2(VkCommandBuffer cmd, VkImage image, int baseMip, int mipLevels, int numLayers, VkImageAspectFlags aspectMask, - VkImageLayout oldImageLayout, VkImageLayout newImageLayout, - VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, - VkAccessFlags srcAccessMask, VkAccessFlags dstAccessMask);