/* * Vulkan Example - Host image copy using VK_EXT_host_image_copy * * This sample shows how to use host image copies to directly upload an image to the devic without having to use staging * Copyright (C) 2024 by Sascha Willems - www.saschawillems.de * * This code is licensed under the MIT license (MIT) (http://opensource.org/licenses/MIT) */ #include "vulkanexamplebase.h" #include "VulkanglTFModel.h" #include #include class VulkanExample : public VulkanExampleBase { public: // Pointers for functions added by the host image copy extension; PFN_vkCopyMemoryToImageEXT vkCopyMemoryToImageEXT{ nullptr }; PFN_vkTransitionImageLayoutEXT vkTransitionImageLayoutEXT{ nullptr }; // Used to check feature image format support for host image copies PFN_vkGetPhysicalDeviceFormatProperties2 vkGetPhysicalDeviceFormatProperties2{ nullptr }; VkPhysicalDeviceHostImageCopyFeaturesEXT enabledPhysicalDeviceHostImageCopyFeaturesEXT{}; // Contains all Vulkan objects that are required to store and use a texture struct Texture { VkSampler sampler{ VK_NULL_HANDLE }; VkImage image{ VK_NULL_HANDLE }; VkDeviceMemory deviceMemory{ VK_NULL_HANDLE }; VkImageView view{ VK_NULL_HANDLE }; uint32_t width{ 0 }; uint32_t height{ 0 }; uint32_t mipLevels{ 0 }; } texture; vkglTF::Model plane; struct UniformData { glm::mat4 projection; glm::mat4 modelView; glm::vec4 viewPos; float lodBias = 0.0f; } uniformData; vks::Buffer uniformBuffer; VkPipeline pipeline{ VK_NULL_HANDLE }; VkPipelineLayout pipelineLayout{ VK_NULL_HANDLE }; VkDescriptorSet descriptorSet{ VK_NULL_HANDLE }; VkDescriptorSetLayout descriptorSetLayout{ VK_NULL_HANDLE }; VulkanExample() : VulkanExampleBase() { title = "Host image copy"; camera.type = Camera::CameraType::lookat; camera.setPosition(glm::vec3(0.0f, 0.0f, -1.5f)); camera.setRotation(glm::vec3(0.0f, 15.0f, 0.0f)); camera.setPerspective(60.0f, (float)width / (float)height, 0.1f, 256.0f); // Enable required extensions enabledInstanceExtensions.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); enabledDeviceExtensions.push_back(VK_KHR_FORMAT_FEATURE_FLAGS_2_EXTENSION_NAME); enabledDeviceExtensions.push_back(VK_KHR_COPY_COMMANDS_2_EXTENSION_NAME); enabledDeviceExtensions.push_back(VK_EXT_HOST_IMAGE_COPY_EXTENSION_NAME); // Enable host image copy feature enabledPhysicalDeviceHostImageCopyFeaturesEXT.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_IMAGE_COPY_FEATURES_EXT; enabledPhysicalDeviceHostImageCopyFeaturesEXT.hostImageCopy = VK_TRUE; deviceCreatepNextChain = &enabledPhysicalDeviceHostImageCopyFeaturesEXT; } ~VulkanExample() { if (device) { destroyTextureImage(texture); vkDestroyPipeline(device, pipeline, nullptr); vkDestroyPipelineLayout(device, pipelineLayout, nullptr); vkDestroyDescriptorSetLayout(device, descriptorSetLayout, nullptr); uniformBuffer.destroy(); } } // Enable physical device features required for this example virtual void getEnabledFeatures() { // Enable anisotropic filtering if supported if (deviceFeatures.samplerAnisotropy) { enabledFeatures.samplerAnisotropy = VK_TRUE; }; } /* Upload texture image data to the GPU Unlike the texture(3d/array/etc) samples, this one uses the VK_EXT_host_image_copy to drasticly simplify the process of uploading an image from the host to the GPU. This new extension adds a way of directly uploading image data from host memory to an optimal tiled image on the device (GPU). This no longer requires a staging buffer in between, as we can now directly copy data stored in host memory to the image. The extension also adds new functionality to simplfy image barriers */ void loadTexture() { // We use the Khronos texture format (https://www.khronos.org/opengles/sdk/tools/KTX/file_format_spec/) std::string filename = getAssetPath() + "textures/metalplate01_rgba.ktx"; ktxResult result; ktxTexture* ktxTexture; #if defined(__ANDROID__) // Textures are stored inside the apk on Android (compressed) // So they need to be loaded via the asset manager AAsset* asset = AAssetManager_open(androidApp->activity->assetManager, filename.c_str(), AASSET_MODE_STREAMING); if (!asset) { vks::tools::exitFatal("Could not load texture from " + filename + "\n\nMake sure the assets submodule has been checked out and is up-to-date.", -1); } size_t size = AAsset_getLength(asset); assert(size > 0); ktx_uint8_t *textureData = new ktx_uint8_t[size]; AAsset_read(asset, textureData, size); AAsset_close(asset); result = ktxTexture_CreateFromMemory(textureData, size, KTX_TEXTURE_CREATE_LOAD_IMAGE_DATA_BIT, &ktxTexture); delete[] textureData; #else if (!vks::tools::fileExists(filename)) { vks::tools::exitFatal("Could not load texture from " + filename + "\n\nMake sure the assets submodule has been checked out and is up-to-date.", -1); } result = ktxTexture_CreateFromNamedFile(filename.c_str(), KTX_TEXTURE_CREATE_LOAD_IMAGE_DATA_BIT, &ktxTexture); #endif assert(result == KTX_SUCCESS); // Get properties required for using and upload texture data from the ktx texture object texture.width = ktxTexture->baseWidth; texture.height = ktxTexture->baseHeight; texture.mipLevels = ktxTexture->numLevels; ktx_uint8_t *ktxTextureData = ktxTexture_GetData(ktxTexture); ktx_size_t ktxTextureSize = ktxTexture_GetSize(ktxTexture); const VkFormat imageFormat = VK_FORMAT_R8G8B8A8_UNORM; // Check if the image format supports the host image copy flag // Note: All formats that support sampling are required to support this flag // So for the format used here (R8G8B8A8_UNORM) we could skip this check // The flag we need to check is an extension flag, so we need to go through VkFormatProperties3 VkFormatProperties3 formatProperties3{}; formatProperties3.sType = VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_3_KHR; // Properties3 need to be chained into Properties2 VkFormatProperties2 formatProperties2{}; formatProperties2.sType = VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2; formatProperties2.pNext = &formatProperties3; vkGetPhysicalDeviceFormatProperties2(physicalDevice, imageFormat, &formatProperties2); if ((formatProperties3.optimalTilingFeatures & VK_FORMAT_FEATURE_2_HOST_IMAGE_TRANSFER_BIT_EXT) == 0) { vks::tools::exitFatal("The selected image format does not support the required host transfer bit.", -1); } // Create optimal tiled target image on the device VkImageCreateInfo imageCreateInfo = vks::initializers::imageCreateInfo(); imageCreateInfo.imageType = VK_IMAGE_TYPE_2D; imageCreateInfo.format = imageFormat; imageCreateInfo.mipLevels = texture.mipLevels; imageCreateInfo.arrayLayers = 1; imageCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; imageCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; imageCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; imageCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; imageCreateInfo.extent = { texture.width, texture.height, 1 }; // For images that use host image copy we need to specify the VK_IMAGE_USAGE_HOST_TRANSFER_BIT_EXT usage flag imageCreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_HOST_TRANSFER_BIT_EXT; VK_CHECK_RESULT(vkCreateImage(device, &imageCreateInfo, nullptr, &texture.image)); VkMemoryAllocateInfo memAllocInfo = vks::initializers::memoryAllocateInfo(); VkMemoryRequirements memReqs = {}; vkGetImageMemoryRequirements(device, texture.image, &memReqs); memAllocInfo.allocationSize = memReqs.size; memAllocInfo.memoryTypeIndex = vulkanDevice->getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); VK_CHECK_RESULT(vkAllocateMemory(device, &memAllocInfo, nullptr, &texture.deviceMemory)); VK_CHECK_RESULT(vkBindImageMemory(device, texture.image, texture.deviceMemory, 0)); // With host image copy we can directly copy from the KTX image in host memory to the device // This is pretty straight forward, as the KTX image is already tightly packed, doesn't need and swizzle and as such matches // what the device expects // Set up copy information for all mip levels stored in the image std::vector memoryToImageCopies{}; for (uint32_t i = 0; i < texture.mipLevels; i++) { // Setup a buffer image copy structure for the current mip level VkMemoryToImageCopyEXT memoryToImageCopy = {}; memoryToImageCopy.sType = VK_STRUCTURE_TYPE_MEMORY_TO_IMAGE_COPY_EXT; memoryToImageCopy.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; memoryToImageCopy.imageSubresource.mipLevel = i; memoryToImageCopy.imageSubresource.baseArrayLayer = 0; memoryToImageCopy.imageSubresource.layerCount = 1; memoryToImageCopy.imageExtent.width = ktxTexture->baseWidth >> i; memoryToImageCopy.imageExtent.height = ktxTexture->baseHeight >> i; memoryToImageCopy.imageExtent.depth = 1; // This tells the implementation where to read the data from // As the KTX file is tightly packed, we can simply offset into that buffer for the current mip level ktx_size_t offset; KTX_error_code ret = ktxTexture_GetImageOffset(ktxTexture, i, 0, 0, &offset); assert(ret == KTX_SUCCESS); memoryToImageCopy.pHostPointer = ktxTextureData + offset; memoryToImageCopies.push_back(memoryToImageCopy); } VkImageSubresourceRange subresourceRange{}; subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; subresourceRange.baseMipLevel = 0; subresourceRange.levelCount = texture.mipLevels; subresourceRange.layerCount = 1; // VK_EXT_host_image_copy als introduces a simplified way of doing the required image transition on the host // This no longer requires a dedicated command buffer to submit the barrier // We also no longer need multiple transitions, and only have to do one for the final layout VkHostImageLayoutTransitionInfoEXT hostImageLayoutTransitionInfo{}; hostImageLayoutTransitionInfo.sType = VK_STRUCTURE_TYPE_HOST_IMAGE_LAYOUT_TRANSITION_INFO_EXT; hostImageLayoutTransitionInfo.image = texture.image; hostImageLayoutTransitionInfo.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED; hostImageLayoutTransitionInfo.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; hostImageLayoutTransitionInfo.subresourceRange = subresourceRange; vkTransitionImageLayoutEXT(device, 1, &hostImageLayoutTransitionInfo); // With the image in the correct layout and copy information for all mip levels setup, we can now issue the copy to our taget image from the host // The implementation will then convert this to an implementation specific optimal tiling layout VkCopyMemoryToImageInfoEXT copyMemoryInfo{}; copyMemoryInfo.sType = VK_STRUCTURE_TYPE_COPY_MEMORY_TO_IMAGE_INFO_EXT; copyMemoryInfo.dstImage = texture.image; copyMemoryInfo.dstImageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; copyMemoryInfo.regionCount = static_cast(memoryToImageCopies.size()); copyMemoryInfo.pRegions = memoryToImageCopies.data(); vkCopyMemoryToImageEXT(device, ©MemoryInfo); ktxTexture_Destroy(ktxTexture); // Create a texture sampler VkSamplerCreateInfo sampler = vks::initializers::samplerCreateInfo(); sampler.magFilter = VK_FILTER_LINEAR; sampler.minFilter = VK_FILTER_LINEAR; sampler.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR; sampler.addressModeU = VK_SAMPLER_ADDRESS_MODE_REPEAT; sampler.addressModeV = VK_SAMPLER_ADDRESS_MODE_REPEAT; sampler.addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT; sampler.mipLodBias = 0.0f; sampler.compareOp = VK_COMPARE_OP_NEVER; sampler.minLod = 0.0f; sampler.maxLod = (float)texture.mipLevels; sampler.maxAnisotropy = vulkanDevice->properties.limits.maxSamplerAnisotropy; sampler.anisotropyEnable = VK_TRUE; sampler.borderColor = VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE; VK_CHECK_RESULT(vkCreateSampler(device, &sampler, nullptr, &texture.sampler)); // Create image view VkImageViewCreateInfo view = vks::initializers::imageViewCreateInfo(); view.viewType = VK_IMAGE_VIEW_TYPE_2D; view.format = imageFormat; view.subresourceRange = subresourceRange; view.image = texture.image; VK_CHECK_RESULT(vkCreateImageView(device, &view, nullptr, &texture.view)); } // Free all Vulkan resources used by a texture object void destroyTextureImage(Texture texture) { vkDestroyImageView(device, texture.view, nullptr); vkDestroyImage(device, texture.image, nullptr); vkDestroySampler(device, texture.sampler, nullptr); vkFreeMemory(device, texture.deviceMemory, nullptr); } void buildCommandBuffers() { VkCommandBufferBeginInfo cmdBufInfo = vks::initializers::commandBufferBeginInfo(); VkClearValue clearValues[2]; clearValues[0].color = defaultClearColor; clearValues[1].depthStencil = { 1.0f, 0 }; VkRenderPassBeginInfo renderPassBeginInfo = vks::initializers::renderPassBeginInfo(); renderPassBeginInfo.renderPass = renderPass; renderPassBeginInfo.renderArea.offset.x = 0; renderPassBeginInfo.renderArea.offset.y = 0; renderPassBeginInfo.renderArea.extent.width = width; renderPassBeginInfo.renderArea.extent.height = height; renderPassBeginInfo.clearValueCount = 2; renderPassBeginInfo.pClearValues = clearValues; for (int32_t i = 0; i < drawCmdBuffers.size(); ++i) { // Set target frame buffer renderPassBeginInfo.framebuffer = frameBuffers[i]; VK_CHECK_RESULT(vkBeginCommandBuffer(drawCmdBuffers[i], &cmdBufInfo)); vkCmdBeginRenderPass(drawCmdBuffers[i], &renderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE); VkViewport viewport = vks::initializers::viewport((float)width, (float)height, 0.0f, 1.0f); vkCmdSetViewport(drawCmdBuffers[i], 0, 1, &viewport); VkRect2D scissor = vks::initializers::rect2D(width, height, 0, 0); vkCmdSetScissor(drawCmdBuffers[i], 0, 1, &scissor); vkCmdBindDescriptorSets(drawCmdBuffers[i], VK_PIPELINE_BIND_POINT_GRAPHICS, pipelineLayout, 0, 1, &descriptorSet, 0, nullptr); vkCmdBindPipeline(drawCmdBuffers[i], VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline); plane.draw(drawCmdBuffers[i]); drawUI(drawCmdBuffers[i]); vkCmdEndRenderPass(drawCmdBuffers[i]); VK_CHECK_RESULT(vkEndCommandBuffer(drawCmdBuffers[i])); } } void setupDescriptors() { // Pool std::vector poolSizes = { vks::initializers::descriptorPoolSize(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1), vks::initializers::descriptorPoolSize(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1) }; VkDescriptorPoolCreateInfo descriptorPoolInfo = vks::initializers::descriptorPoolCreateInfo(poolSizes, 2); VK_CHECK_RESULT(vkCreateDescriptorPool(device, &descriptorPoolInfo, nullptr, &descriptorPool)); // Layout std::vector setLayoutBindings = { // Binding 0 : Vertex shader uniform buffer vks::initializers::descriptorSetLayoutBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_SHADER_STAGE_VERTEX_BIT, 0), // Binding 1 : Fragment shader image sampler vks::initializers::descriptorSetLayoutBinding(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, VK_SHADER_STAGE_FRAGMENT_BIT, 1) }; VkDescriptorSetLayoutCreateInfo descriptorLayout = vks::initializers::descriptorSetLayoutCreateInfo(setLayoutBindings); VK_CHECK_RESULT(vkCreateDescriptorSetLayout(device, &descriptorLayout, nullptr, &descriptorSetLayout)); // Set VkDescriptorSetAllocateInfo allocInfo = vks::initializers::descriptorSetAllocateInfo(descriptorPool, &descriptorSetLayout, 1); VK_CHECK_RESULT(vkAllocateDescriptorSets(device, &allocInfo, &descriptorSet)); // Setup a descriptor image info for the current texture to be used as a combined image sampler VkDescriptorImageInfo textureDescriptor; textureDescriptor.imageView = texture.view; textureDescriptor.sampler = texture.sampler; textureDescriptor.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; std::vector writeDescriptorSets = { // Binding 0 : Vertex shader uniform buffer vks::initializers::writeDescriptorSet(descriptorSet, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 0, &uniformBuffer.descriptor), // Binding 1 : Fragment shader texture sampler vks::initializers::writeDescriptorSet(descriptorSet, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, &textureDescriptor) }; vkUpdateDescriptorSets(device, static_cast(writeDescriptorSets.size()), writeDescriptorSets.data(), 0, nullptr); } void preparePipelines() { // Layout VkPipelineLayoutCreateInfo pipelineLayoutCreateInfo = vks::initializers::pipelineLayoutCreateInfo(&descriptorSetLayout, 1); VK_CHECK_RESULT(vkCreatePipelineLayout(device, &pipelineLayoutCreateInfo, nullptr, &pipelineLayout)); // Pipeline VkPipelineInputAssemblyStateCreateInfo inputAssemblyState = vks::initializers::pipelineInputAssemblyStateCreateInfo(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 0, VK_FALSE); VkPipelineRasterizationStateCreateInfo rasterizationState = vks::initializers::pipelineRasterizationStateCreateInfo(VK_POLYGON_MODE_FILL, VK_CULL_MODE_NONE, VK_FRONT_FACE_COUNTER_CLOCKWISE, 0); VkPipelineColorBlendAttachmentState blendAttachmentState = vks::initializers::pipelineColorBlendAttachmentState(0xf, VK_FALSE); VkPipelineColorBlendStateCreateInfo colorBlendState = vks::initializers::pipelineColorBlendStateCreateInfo(1, &blendAttachmentState); VkPipelineDepthStencilStateCreateInfo depthStencilState = vks::initializers::pipelineDepthStencilStateCreateInfo(VK_TRUE, VK_TRUE, VK_COMPARE_OP_LESS_OR_EQUAL); VkPipelineViewportStateCreateInfo viewportState = vks::initializers::pipelineViewportStateCreateInfo(1, 1, 0); VkPipelineMultisampleStateCreateInfo multisampleState = vks::initializers::pipelineMultisampleStateCreateInfo(VK_SAMPLE_COUNT_1_BIT, 0); std::vector dynamicStateEnables = { VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR }; VkPipelineDynamicStateCreateInfo dynamicState = vks::initializers::pipelineDynamicStateCreateInfo(dynamicStateEnables); std::array shaderStages; // Shaders shaderStages[0] = loadShader(getShadersPath() + "texture/texture.vert.spv", VK_SHADER_STAGE_VERTEX_BIT); shaderStages[1] = loadShader(getShadersPath() + "texture/texture.frag.spv", VK_SHADER_STAGE_FRAGMENT_BIT); VkGraphicsPipelineCreateInfo pipelineCreateInfo = vks::initializers::pipelineCreateInfo(pipelineLayout, renderPass, 0); pipelineCreateInfo.pInputAssemblyState = &inputAssemblyState; pipelineCreateInfo.pRasterizationState = &rasterizationState; pipelineCreateInfo.pColorBlendState = &colorBlendState; pipelineCreateInfo.pMultisampleState = &multisampleState; pipelineCreateInfo.pViewportState = &viewportState; pipelineCreateInfo.pDepthStencilState = &depthStencilState; pipelineCreateInfo.pDynamicState = &dynamicState; pipelineCreateInfo.stageCount = static_cast(shaderStages.size()); pipelineCreateInfo.pStages = shaderStages.data(); pipelineCreateInfo.pVertexInputState = vkglTF::Vertex::getPipelineVertexInputState({ vkglTF::VertexComponent::Position, vkglTF::VertexComponent::UV, vkglTF::VertexComponent::Normal }); VK_CHECK_RESULT(vkCreateGraphicsPipelines(device, pipelineCache, 1, &pipelineCreateInfo, nullptr, &pipeline)); } // Prepare and initialize uniform buffer containing shader uniforms void prepareUniformBuffers() { // Vertex shader uniform buffer block VK_CHECK_RESULT(vulkanDevice->createBuffer(VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, &uniformBuffer, sizeof(uniformData), &uniformData)); VK_CHECK_RESULT(uniformBuffer.map()); } void updateUniformBuffers() { uniformData.projection = camera.matrices.perspective; uniformData.modelView = camera.matrices.view; uniformData.viewPos = camera.viewPos; memcpy(uniformBuffer.mapped, &uniformData, sizeof(uniformData)); } void loadAssets() { const uint32_t glTFLoadingFlags = vkglTF::FileLoadingFlags::PreTransformVertices | vkglTF::FileLoadingFlags::PreMultiplyVertexColors | vkglTF::FileLoadingFlags::FlipY; plane.loadFromFile(getAssetPath() + "models/plane_z.gltf", vulkanDevice, queue, glTFLoadingFlags); } void prepare() { VulkanExampleBase::prepare(); // Get the function pointers required host image copies vkCopyMemoryToImageEXT = reinterpret_cast(vkGetDeviceProcAddr(device, "vkCopyMemoryToImageEXT")); vkTransitionImageLayoutEXT = reinterpret_cast(vkGetDeviceProcAddr(device, "vkTransitionImageLayoutEXT")); vkGetPhysicalDeviceFormatProperties2 = reinterpret_cast(vkGetInstanceProcAddr(instance, "vkGetPhysicalDeviceFormatProperties2")); loadAssets(); loadTexture(); prepareUniformBuffers(); setupDescriptors(); preparePipelines(); buildCommandBuffers(); prepared = true; } void draw() { VulkanExampleBase::prepareFrame(); submitInfo.commandBufferCount = 1; submitInfo.pCommandBuffers = &drawCmdBuffers[currentBuffer]; VK_CHECK_RESULT(vkQueueSubmit(queue, 1, &submitInfo, VK_NULL_HANDLE)); VulkanExampleBase::submitFrame(); } virtual void render() { if (!prepared) return; updateUniformBuffers(); draw(); } virtual void OnUpdateUIOverlay(vks::UIOverlay *overlay) { if (overlay->header("Settings")) { if (overlay->sliderFloat("LOD bias", &uniformData.lodBias, 0.0f, (float)texture.mipLevels)) { updateUniformBuffers(); } } } }; VULKAN_EXAMPLE_MAIN()