Fix image memory barriers

Use proper stages and access masks
This fixes image memory barrier validation
Refs #631
This commit is contained in:
Sascha Willems 2019-11-19 19:07:03 +01:00
parent 39852c4a27
commit 7086684979
2 changed files with 72 additions and 64 deletions

View file

@ -1,7 +1,7 @@
/*
* Vulkan Example - Runtime mip map generation
*
* Copyright (C) 2016 by Sascha Willems - www.saschawillems.de
* Copyright (C) by Sascha Willems - www.saschawillems.de
*
* This code is licensed under the MIT license (MIT) (http://opensource.org/licenses/MIT)
*/
@ -35,7 +35,6 @@ class VulkanExample : public VulkanExampleBase
public:
struct Texture {
VkImage image;
VkImageLayout imageLayout;
VkDeviceMemory deviceMemory;
VkImageView view;
uint32_t width, height;
@ -110,6 +109,13 @@ public:
models.tunnel.destroy();
}
virtual void getEnabledFeatures()
{
if (deviceFeatures.samplerAnisotropy) {
enabledFeatures.samplerAnisotropy = VK_TRUE;
}
}
void loadTexture(std::string fileName, VkFormat format, bool forceLinearTiling)
{
#if defined(__ANDROID__)
@ -200,11 +206,15 @@ public:
subresourceRange.layerCount = 1;
// Optimal image will be used as destination for the copy, so we must transfer from our initial undefined image layout to the transfer destination layout
vks::tools::setImageLayout(
vks::tools::insertImageMemoryBarrier(
copyCmd,
texture.image,
0,
VK_ACCESS_TRANSFER_WRITE_BIT,
VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT,
subresourceRange);
// Copy the first mip of the chain, remaining mips will be generated
@ -220,12 +230,15 @@ public:
vkCmdCopyBufferToImage(copyCmd, stagingBuffer, texture.image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &bufferCopyRegion);
// Transition first mip level to transfer source for read during blit
texture.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
vks::tools::setImageLayout(
vks::tools::insertImageMemoryBarrier(
copyCmd,
texture.image,
VK_ACCESS_TRANSFER_WRITE_BIT,
VK_ACCESS_TRANSFER_READ_BIT,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT,
subresourceRange);
VulkanExampleBase::flushCommandBuffer(copyCmd, queue, true);
@ -267,15 +280,17 @@ public:
mipSubRange.levelCount = 1;
mipSubRange.layerCount = 1;
// Transiton current mip level to transfer dest
vks::tools::setImageLayout(
// Prepare current mip level as image blit destination
vks::tools::insertImageMemoryBarrier(
blitCmd,
texture.image,
0,
VK_ACCESS_TRANSFER_WRITE_BIT,
VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
mipSubRange,
VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_HOST_BIT);
VK_PIPELINE_STAGE_TRANSFER_BIT,
mipSubRange);
// Blit from previous level
vkCmdBlitImage(
@ -288,24 +303,30 @@ public:
&imageBlit,
VK_FILTER_LINEAR);
// Transiton current mip level to transfer source for read in next iteration
vks::tools::setImageLayout(
blitCmd,
// Prepare current mip level as image blit source for next level
vks::tools::insertImageMemoryBarrier(
copyCmd,
texture.image,
VK_ACCESS_TRANSFER_WRITE_BIT,
VK_ACCESS_TRANSFER_READ_BIT,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
mipSubRange,
VK_PIPELINE_STAGE_HOST_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT);
VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT,
mipSubRange);
}
// After the loop, all mip layers are in TRANSFER_SRC layout, so transition all to SHADER_READ
subresourceRange.levelCount = texture.mipLevels;
vks::tools::setImageLayout(
blitCmd,
vks::tools::insertImageMemoryBarrier(
copyCmd,
texture.image,
VK_ACCESS_TRANSFER_READ_BIT,
VK_ACCESS_SHADER_READ_BIT,
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
texture.imageLayout,
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
subresourceRange);
VulkanExampleBase::flushCommandBuffer(blitCmd, queue, true);
@ -534,34 +555,17 @@ public:
void setupDescriptorSet()
{
VkDescriptorSetAllocateInfo allocInfo =
vks::initializers::descriptorSetAllocateInfo(
descriptorPool,
&descriptorSetLayout,
1);
VkDescriptorSetAllocateInfo allocInfo = vks::initializers::descriptorSetAllocateInfo(descriptorPool, &descriptorSetLayout,1);
VK_CHECK_RESULT(vkAllocateDescriptorSets(device, &allocInfo, &descriptorSet));
std::vector<VkWriteDescriptorSet> writeDescriptorSets;
VkDescriptorImageInfo textureDescriptor = vks::initializers::descriptorImageInfo(VK_NULL_HANDLE, texture.view, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
std::vector<VkWriteDescriptorSet> writeDescriptorSets = {
// Binding 0: Vertex shader uniform buffer
writeDescriptorSets.push_back(vks::initializers::writeDescriptorSet(
descriptorSet,
VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
0,
&uniformBufferVS.descriptor));
// Binding 1: Sampled image
VkDescriptorImageInfo textureDescriptor =
vks::initializers::descriptorImageInfo(
VK_NULL_HANDLE,
texture.view,
texture.imageLayout);
writeDescriptorSets.push_back(vks::initializers::writeDescriptorSet(
descriptorSet,
VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE,
1,
&textureDescriptor));
// Binding 0: Vertex shader uniform buffer
vks::initializers::writeDescriptorSet(descriptorSet, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 0, &uniformBufferVS.descriptor),
// Binding 1: Sampled image
vks::initializers::writeDescriptorSet(descriptorSet, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 1, &textureDescriptor)
};
// Binding 2: Sampler array
std::vector<VkDescriptorImageInfo> samplerDescriptors;
@ -578,7 +582,6 @@ public:
samplerDescriptorWrite.dstBinding = 2;
samplerDescriptorWrite.dstArrayElement = 0;
writeDescriptorSets.push_back(samplerDescriptorWrite);
vkUpdateDescriptorSets(device, static_cast<uint32_t>(writeDescriptorSets.size()), writeDescriptorSets.data(), 0, NULL);
}