procedural-3d-engine/examples/mesh/mesh.cpp
Sascha Willems 9f7d13d5e0 Mesh loading and rendering now properly displaying glTF mesh
Added textures, modified structure to be more in line with glTF layout
Split matrices and material descriptor set
2020-04-12 21:24:33 +02:00

754 lines
No EOL
29 KiB
C++

/*
* Vulkan Example - Model loading and rendering
*
* Copyright (C) 2016-2020 by Sascha Willems - www.saschawillems.de
*
* This code is licensed under the MIT license (MIT) (http://opensource.org/licenses/MIT)
*/
/*
* Shows how to load and display a simple mesh from a glTF file
* Note that this isn't a complete glTF loader and only basic functions are shown here
* This means only linear nodes (no parent<->child tree), no animations, no skins, etc.
* For details on how glTF 2.0 works, see the official spec at https://github.com/KhronosGroup/glTF/tree/master/specification/2.0
*
* If you are looking for a complete glTF implementation, check out https://github.com/SaschaWillems/Vulkan-glTF-PBR/
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <vector>
#define GLM_FORCE_RADIANS
#define GLM_FORCE_DEPTH_ZERO_TO_ONE
#include <glm/glm.hpp>
#include <glm/gtc/matrix_transform.hpp>
#include <glm/gtc/type_ptr.hpp>
#define TINYGLTF_IMPLEMENTATION
#define STB_IMAGE_IMPLEMENTATION
#define TINYGLTF_NO_STB_IMAGE_WRITE
#include "tiny_gltf.h"
#include <vulkan/vulkan.h>
#include "vulkanexamplebase.h"
#include "VulkanTexture.hpp"
#define ENABLE_VALIDATION false
// Contains everything required to render a glTF model in Vulkan
// This class is very simplified but retains the basic glTF structure
class VulkanglTFModel
{
// The vertex layout for the samples' model
struct Vertex {
glm::vec3 pos;
glm::vec3 normal;
glm::vec2 uv;
glm::vec3 color;
};
};
class VulkanExample : public VulkanExampleBase
{
public:
bool wireframe = false;
struct Vertex {
glm::vec3 pos;
glm::vec3 normal;
glm::vec2 uv;
glm::vec3 color;
};
struct ModelNode;
// A primitive contains the data for a single draw call
struct Primitive {
uint32_t firstIndex;
uint32_t indexCount;
int32_t materialIndex;
};
// Contains the node's geometry and can be made up of an arbitrary number of primitives
struct Mesh {
std::vector<Primitive> primitives;
};
// A node represents an object in the glTF scene graph
struct ModelNode {
ModelNode* parent;
std::vector<ModelNode> children;
Mesh mesh;
glm::mat4 matrix;
};
// Represents a glTF material used to access e.g. the texture to choose for a mesh
struct ModelMaterial {
glm::vec4 baseColorFactor = glm::vec4(1.0f);
uint32_t baseColorTextureIndex;
};
// @todo
struct ModelImage {
vks::Texture2D texture;
VkDescriptorSet descriptorSet;
};
// Contains all Vulkan resources required to represent vertex and index buffers for a model
// This is for demonstration and learning purposes, the other examples use a model loader class for easy access
struct Model {
std::vector<ModelImage> images;
// Textures in glTF are indices used by material to select an image (and optionally samplers)
std::vector<uint32_t> textures;
std::vector<ModelMaterial> materials;
std::vector<ModelNode> nodes;
struct {
VkBuffer buffer;
VkDeviceMemory memory;
} vertices;
struct {
int count;
VkBuffer buffer;
VkDeviceMemory memory;
} indices;
// Destroys all Vulkan resources created for this model
void destroy(VkDevice device)
{
vkDestroyBuffer(device, vertices.buffer, nullptr);
vkFreeMemory(device, vertices.memory, nullptr);
vkDestroyBuffer(device, indices.buffer, nullptr);
vkFreeMemory(device, indices.memory, nullptr);
};
} model;
struct {
vks::Buffer scene;
} uniformBuffers;
struct {
glm::mat4 projection;
glm::mat4 model;
glm::vec4 lightPos = glm::vec4(25.0f, 5.0f, 5.0f, 1.0f);
} uboVS;
struct Pipelines {
VkPipeline solid;
VkPipeline wireframe = VK_NULL_HANDLE;
} pipelines;
VkPipelineLayout pipelineLayout;
VkDescriptorSet descriptorSet;
struct DescriptorSetLayouts {
VkDescriptorSetLayout matrices;
VkDescriptorSetLayout textures;
} descriptorSetLayouts;
VulkanExample() : VulkanExampleBase(ENABLE_VALIDATION)
{
zoom = -5.5f;
zoomSpeed = 2.5f;
rotationSpeed = 0.5f;
rotation = { -0.5f, -112.75f, 0.0f };
cameraPos = { 0.1f, 1.1f, 0.0f };
title = "glTF model rendering";
settings.overlay = true;
//@todo: Use camera
}
~VulkanExample()
{
// Clean up used Vulkan resources
// Note : Inherited destructor cleans up resources stored in base class
vkDestroyPipeline(device, pipelines.solid, nullptr);
if (pipelines.wireframe != VK_NULL_HANDLE) {
vkDestroyPipeline(device, pipelines.wireframe, nullptr);
}
vkDestroyPipelineLayout(device, pipelineLayout, nullptr);
vkDestroyDescriptorSetLayout(device, descriptorSetLayouts.matrices, nullptr);
vkDestroyDescriptorSetLayout(device, descriptorSetLayouts.textures, nullptr);
model.destroy(device);
uniformBuffers.scene.destroy();
}
virtual void getEnabledFeatures()
{
// Fill mode non solid is required for wireframe display
if (deviceFeatures.fillModeNonSolid) {
enabledFeatures.fillModeNonSolid = VK_TRUE;
};
}
void buildCommandBuffers()
{
VkCommandBufferBeginInfo cmdBufInfo = vks::initializers::commandBufferBeginInfo();
VkClearValue clearValues[2];
clearValues[0].color = defaultClearColor;
clearValues[1].depthStencil = { 1.0f, 0 };
VkRenderPassBeginInfo renderPassBeginInfo = vks::initializers::renderPassBeginInfo();
renderPassBeginInfo.renderPass = renderPass;
renderPassBeginInfo.renderArea.offset.x = 0;
renderPassBeginInfo.renderArea.offset.y = 0;
renderPassBeginInfo.renderArea.extent.width = width;
renderPassBeginInfo.renderArea.extent.height = height;
renderPassBeginInfo.clearValueCount = 2;
renderPassBeginInfo.pClearValues = clearValues;
const VkViewport viewport = vks::initializers::viewport((float)width, (float)height, 0.0f, 1.0f);
const VkRect2D scissor = vks::initializers::rect2D(width, height, 0, 0);
for (int32_t i = 0; i < drawCmdBuffers.size(); ++i)
{
renderPassBeginInfo.framebuffer = frameBuffers[i];
VK_CHECK_RESULT(vkBeginCommandBuffer(drawCmdBuffers[i], &cmdBufInfo));
vkCmdBeginRenderPass(drawCmdBuffers[i], &renderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE);
vkCmdSetViewport(drawCmdBuffers[i], 0, 1, &viewport);
vkCmdSetScissor(drawCmdBuffers[i], 0, 1, &scissor);
// Bind scene matrices descriptor to set 0
vkCmdBindDescriptorSets(drawCmdBuffers[i], VK_PIPELINE_BIND_POINT_GRAPHICS, pipelineLayout, 0, 1, &descriptorSet, 0, nullptr);
vkCmdBindPipeline(drawCmdBuffers[i], VK_PIPELINE_BIND_POINT_GRAPHICS, wireframe ? pipelines.wireframe : pipelines.solid);
drawglTFModel(drawCmdBuffers[i]);
drawUI(drawCmdBuffers[i]);
vkCmdEndRenderPass(drawCmdBuffers[i]);
VK_CHECK_RESULT(vkEndCommandBuffer(drawCmdBuffers[i]));
}
}
void drawglTFNode(VkCommandBuffer commandBuffer, ModelNode node)
{
if (node.mesh.primitives.size() > 0) {
for (Primitive& primitive : node.mesh.primitives) {
if (primitive.indexCount > 0) {
// @todo: link mat to node
uint32_t texture = model.textures[model.materials[primitive.materialIndex].baseColorTextureIndex];
vkCmdBindDescriptorSets(commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipelineLayout, 1, 1, &model.images[texture].descriptorSet, 0, nullptr);
vkCmdDrawIndexed(commandBuffer, primitive.indexCount, 1, primitive.firstIndex, 0, 0);
}
}
}
for (auto& child : node.children) {
drawglTFNode(commandBuffer, child);
}
}
void drawglTFModel(VkCommandBuffer commandBuffer)
{
// All vertices and indices are stored in single buffers, so we only need to bind once
VkDeviceSize offsets[1] = { 0 };
vkCmdBindVertexBuffers(commandBuffer, 0, 1, &model.vertices.buffer, offsets);
vkCmdBindIndexBuffer(commandBuffer, model.indices.buffer, 0, VK_INDEX_TYPE_UINT32);
for (auto& node : model.nodes) {
drawglTFNode(commandBuffer, node);
}
}
/*
Load images from the glTF file
Textures can be stored inside the glTF (which is the case for the sample model), so instead of directly
loading them from disk, we fetch them from the glTF loader and upload the buffers
*/
void loadglTFImages(tinygltf::Model& glTFModel)
{
model.images.resize(glTFModel.images.size());
for (size_t i = 0; i < glTFModel.images.size(); i++) {
tinygltf::Image& glTFImage = glTFModel.images[i];
// Get the image data from the glTF loader
unsigned char* buffer = nullptr;
VkDeviceSize bufferSize = 0;
bool deleteBuffer = false;
// We convert RGB-only images to RGBA, as most devices don't support RGB-formats in Vulkan
if (glTFImage.component == 3) {
bufferSize = glTFImage.width * glTFImage.height * 4;
buffer = new unsigned char[bufferSize];
unsigned char* rgba = buffer;
unsigned char* rgb = &glTFImage.image[0];
for (size_t i = 0; i < glTFImage.width * glTFImage.height; ++i) {
for (int32_t j = 0; j < 3; ++j) {
rgba[j] = rgb[j];
}
rgba += 4;
rgb += 3;
}
deleteBuffer = true;
}
else {
buffer = &glTFImage.image[0];
bufferSize = glTFImage.image.size();
}
// Load texture from image buffer
model.images[i].texture.fromBuffer(buffer, bufferSize, VK_FORMAT_R8G8B8A8_UNORM, glTFImage.width, glTFImage.height, vulkanDevice, queue);
}
}
/*
Load texture information
These nodes store the index of the image used by a material that sources this texture
*/
void loadglTFTextures(tinygltf::Model& glTFModel)
{
model.textures.resize(glTFModel.textures.size());
for (size_t i = 0; i < glTFModel.textures.size(); i++) {
model.textures[i] = glTFModel.textures[i].source;
}
}
/*
Load Materials from the glTF file
Materials contain basic properties like colors and references to the textures used by that material
We only read the most basic properties required for our sample
*/
void loadglTFMaterials(const tinygltf::Model& glTFModel)
{
model.materials.resize(glTFModel.materials.size());
for (size_t i = 0; i < glTFModel.materials.size(); i++) {
tinygltf::Material glTFMaterial = glTFModel.materials[i];
// Get the base color factor
if (glTFMaterial.values.find("baseColorFactor") != glTFMaterial.values.end()) {
model.materials[i].baseColorFactor = glm::make_vec4(glTFMaterial.values["baseColorFactor"].ColorFactor().data());
}
// Get base color texture index
if (glTFMaterial.values.find("baseColorTexture") != glTFMaterial.values.end()) {
model.materials[i].baseColorTextureIndex = glTFMaterial.values["baseColorTexture"].TextureIndex();
}
}
}
// Load a single glTF node
// glTF scenes are made up of nodes that contain mesh data
// This is the most basic way of loading a glTF node that ignores parent->child relations and nested matrices
void loadglTFNode(ModelNode* parent, const tinygltf::Node& glTFNode, const tinygltf::Model& glTFModel, std::vector<uint32_t>& indexBuffer, std::vector<Vertex>& vertexBuffer)
{
ModelNode node{};
node.matrix = glm::mat4(1.0f);
// Get the local node matrix
// It's either made up from translation, rotation, scale or a 4x4 matrix
if (glTFNode.translation.size() == 3) {
node.matrix = glm::translate(node.matrix, glm::vec3(glm::make_vec3(glTFNode.translation.data())));
}
if (glTFNode.rotation.size() == 4) {
glm::quat q = glm::make_quat(glTFNode.rotation.data());
node.matrix *= glm::mat4(q);
}
if (glTFNode.scale.size() == 3) {
node.matrix = glm::scale(node.matrix, glm::vec3(glm::make_vec3(glTFNode.translation.data())));
}
if (glTFNode.matrix.size() == 16) {
node.matrix = glm::make_mat4x4(glTFNode.matrix.data());
};
// Load node's children
if (glTFNode.children.size() > 0) {
for (size_t i = 0; i < glTFNode.children.size(); i++) {
loadglTFNode(&node, glTFModel.nodes[glTFNode.children[i]], glTFModel, indexBuffer, vertexBuffer);
}
}
// If the node contains mesh data, we load vertices and indices from the the buffers
// In glTF this is done via accessors and buffer views
if (glTFNode.mesh > -1) {
const tinygltf::Mesh mesh = glTFModel.meshes[glTFNode.mesh];
// Iterate through all primitives of this node's mesh
for (size_t i = 0; i < mesh.primitives.size(); i++) {
const tinygltf::Primitive& glTFPrimitive = mesh.primitives[i];
uint32_t firstIndex = static_cast<uint32_t>(indexBuffer.size());
uint32_t vertexStart = static_cast<uint32_t>(vertexBuffer.size());
uint32_t indexCount = 0;
// Vertices
{
const float* positionBuffer = nullptr;
const float* normalsBuffer = nullptr;
const float* texCoordsBuffer = nullptr;
size_t vertexCount = 0;
// Get buffer data for vertex normals
if (glTFPrimitive.attributes.find("POSITION") != glTFPrimitive.attributes.end()) {
const tinygltf::Accessor& accessor = glTFModel.accessors[glTFPrimitive.attributes.find("POSITION")->second];
const tinygltf::BufferView& view = glTFModel.bufferViews[accessor.bufferView];
positionBuffer = reinterpret_cast<const float*>(&(glTFModel.buffers[view.buffer].data[accessor.byteOffset + view.byteOffset]));
vertexCount = accessor.count;
}
// Get buffer data for vertex normals
if (glTFPrimitive.attributes.find("NORMAL") != glTFPrimitive.attributes.end()) {
const tinygltf::Accessor& accessor = glTFModel.accessors[glTFPrimitive.attributes.find("NORMAL")->second];
const tinygltf::BufferView& view = glTFModel.bufferViews[accessor.bufferView];
normalsBuffer = reinterpret_cast<const float*>(&(glTFModel.buffers[view.buffer].data[accessor.byteOffset + view.byteOffset]));
}
// Get buffer data for vertex texture coordinates
// glTF supports multiple sets, we only load the first one
if (glTFPrimitive.attributes.find("TEXCOORD_0") != glTFPrimitive.attributes.end()) {
const tinygltf::Accessor& accessor = glTFModel.accessors[glTFPrimitive.attributes.find("TEXCOORD_0")->second];
const tinygltf::BufferView& view = glTFModel.bufferViews[accessor.bufferView];
texCoordsBuffer = reinterpret_cast<const float*>(&(glTFModel.buffers[view.buffer].data[accessor.byteOffset + view.byteOffset]));
}
// Append data to model's vertex buffer
for (size_t v = 0; v < vertexCount; v++) {
Vertex vert{};
vert.pos = glm::vec4(glm::make_vec3(&positionBuffer[v * 3]), 1.0f);
vert.normal = glm::normalize(glm::vec3(normalsBuffer ? glm::make_vec3(&normalsBuffer[v * 3]) : glm::vec3(0.0f)));
vert.uv = texCoordsBuffer ? glm::make_vec2(&texCoordsBuffer[v * 2]) : glm::vec3(0.0f);
vert.color = glm::vec3(1.0f);
// Flip Y-Axis
vert.pos.y *= -1.0f;
vertexBuffer.push_back(vert);
}
}
// Indices
{
const tinygltf::Accessor& accessor = glTFModel.accessors[glTFPrimitive.indices];
const tinygltf::BufferView& bufferView = glTFModel.bufferViews[accessor.bufferView];
const tinygltf::Buffer& buffer = glTFModel.buffers[bufferView.buffer];
indexCount += static_cast<uint32_t>(accessor.count);
// glTF supports different component types of indices
switch (accessor.componentType) {
case TINYGLTF_PARAMETER_TYPE_UNSIGNED_INT: {
uint32_t* buf = new uint32_t[accessor.count];
memcpy(buf, &buffer.data[accessor.byteOffset + bufferView.byteOffset], accessor.count * sizeof(uint32_t));
for (size_t index = 0; index < accessor.count; index++) {
indexBuffer.push_back(buf[index] + vertexStart);
}
break;
}
case TINYGLTF_PARAMETER_TYPE_UNSIGNED_SHORT: {
uint16_t* buf = new uint16_t[accessor.count];
memcpy(buf, &buffer.data[accessor.byteOffset + bufferView.byteOffset], accessor.count * sizeof(uint16_t));
for (size_t index = 0; index < accessor.count; index++) {
indexBuffer.push_back(buf[index] + vertexStart);
}
break;
}
case TINYGLTF_PARAMETER_TYPE_UNSIGNED_BYTE: {
uint8_t* buf = new uint8_t[accessor.count];
memcpy(buf, &buffer.data[accessor.byteOffset + bufferView.byteOffset], accessor.count * sizeof(uint8_t));
for (size_t index = 0; index < accessor.count; index++) {
indexBuffer.push_back(buf[index] + vertexStart);
}
break;
}
default:
std::cerr << "Index component type " << accessor.componentType << " not supported!" << std::endl;
return;
}
}
Primitive primitive{};
primitive.firstIndex = firstIndex;
primitive.indexCount = indexCount;
primitive.materialIndex = glTFPrimitive.material;
node.mesh.primitives.push_back(primitive);
}
}
if (parent) {
parent->children.push_back(node);
}
else {
model.nodes.push_back(node);
}
}
// @todo
void loadglTF(std::string filename)
{
tinygltf::Model gltfModel;
tinygltf::TinyGLTF gltfContext;
std::string error, warning;
this->device = device;
#if defined(__ANDROID__)
// On Android all assets are packed with the apk in a compressed form, so we need to open them using the asset manager
AAsset* asset = AAssetManager_open(androidApp->activity->assetManager, filename.c_str(), AASSET_MODE_STREAMING);
assert(asset);
size_t size = AAsset_getLength(asset);
assert(size > 0);
char* fileData = new char[size];
AAsset_read(asset, fileData, size);
AAsset_close(asset);
std::string baseDir;
bool fileLoaded = gltfContext.LoadASCIIFromString(&gltfModel, &error, &warning, fileData, size, baseDir);
free(fileData);
#else
bool fileLoaded = gltfContext.LoadASCIIFromFile(&gltfModel, &error, &warning, filename);
#endif
std::vector<uint32_t> indexBuffer;
std::vector<Vertex> vertexBuffer;
if (fileLoaded) {
loadglTFImages(gltfModel);
loadglTFMaterials(gltfModel);
loadglTFTextures(gltfModel);
const tinygltf::Scene& scene = gltfModel.scenes[0];
for (size_t i = 0; i < scene.nodes.size(); i++) {
const tinygltf::Node node = gltfModel.nodes[scene.nodes[i]];
loadglTFNode(nullptr, node, gltfModel, indexBuffer, vertexBuffer);
}
}
else {
// TODO: throw
std::cerr << "Could not load gltf file: " << error << std::endl;
return;
}
size_t vertexBufferSize = vertexBuffer.size() * sizeof(Vertex);
size_t indexBufferSize = indexBuffer.size() * sizeof(uint32_t);
model.indices.count = static_cast<uint32_t>(indexBuffer.size());
//assert((vertexBufferSize > 0) && (indexBufferSize > 0));
struct StagingBuffer {
VkBuffer buffer;
VkDeviceMemory memory;
} vertexStaging, indexStaging;
// Create staging buffers
// Vertex data
VK_CHECK_RESULT(vulkanDevice->createBuffer(
VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
vertexBufferSize,
&vertexStaging.buffer,
&vertexStaging.memory,
vertexBuffer.data()));
// Index data
VK_CHECK_RESULT(vulkanDevice->createBuffer(
VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
indexBufferSize,
&indexStaging.buffer,
&indexStaging.memory,
indexBuffer.data()));
// Create device local buffers
// Vertex buffer
VK_CHECK_RESULT(vulkanDevice->createBuffer(
VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT,
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
vertexBufferSize,
&model.vertices.buffer,
&model.vertices.memory));
// Index buffer
VK_CHECK_RESULT(vulkanDevice->createBuffer(
VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT,
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
indexBufferSize,
&model.indices.buffer,
&model.indices.memory));
// Copy data from staging buffers (host) do device local buffer (gpu)
VkCommandBuffer copyCmd = VulkanExampleBase::createCommandBuffer(VK_COMMAND_BUFFER_LEVEL_PRIMARY, true);
VkBufferCopy copyRegion = {};
copyRegion.size = vertexBufferSize;
vkCmdCopyBuffer(
copyCmd,
vertexStaging.buffer,
model.vertices.buffer,
1,
&copyRegion);
copyRegion.size = indexBufferSize;
vkCmdCopyBuffer(
copyCmd,
indexStaging.buffer,
model.indices.buffer,
1,
&copyRegion);
VulkanExampleBase::flushCommandBuffer(copyCmd, queue, true);
vkDestroyBuffer(device, vertexStaging.buffer, nullptr);
vkFreeMemory(device, vertexStaging.memory, nullptr);
vkDestroyBuffer(device, indexStaging.buffer, nullptr);
vkFreeMemory(device, indexStaging.memory, nullptr);
}
void loadAssets()
{
loadglTF(getAssetPath() + "models/voyager/voyager.gltf");
}
void setupDescriptors()
{
/*
This sample uses separate descriptor sets (and layouts) for the matrices and materials (textures)
*/
std::vector<VkDescriptorPoolSize> poolSizes = {
vks::initializers::descriptorPoolSize(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1),
// One combined image sampler per model image/texture
vks::initializers::descriptorPoolSize(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, static_cast<uint32_t>(model.images.size())),
};
// One set for matrices and one per model image/texture
const uint32_t maxSetCount = static_cast<uint32_t>(model.images.size()) + 1;
VkDescriptorPoolCreateInfo descriptorPoolInfo = vks::initializers::descriptorPoolCreateInfo(poolSizes, maxSetCount);
VK_CHECK_RESULT(vkCreateDescriptorPool(device, &descriptorPoolInfo, nullptr, &descriptorPool));
// Descriptor set layout for passing matrices
VkDescriptorSetLayoutBinding setLayoutBinding = vks::initializers::descriptorSetLayoutBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_SHADER_STAGE_VERTEX_BIT, 0);
VkDescriptorSetLayoutCreateInfo descriptorSetLayoutCI = vks::initializers::descriptorSetLayoutCreateInfo(&setLayoutBinding, 1);
VK_CHECK_RESULT(vkCreateDescriptorSetLayout(device, &descriptorSetLayoutCI, nullptr, &descriptorSetLayouts.matrices));
// Descriptor set layout for passing material textures
setLayoutBinding = vks::initializers::descriptorSetLayoutBinding(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, VK_SHADER_STAGE_FRAGMENT_BIT, 0);
VK_CHECK_RESULT(vkCreateDescriptorSetLayout(device, &descriptorSetLayoutCI, nullptr, &descriptorSetLayouts.textures));
// Pipeline layout using both descriptor sets (set 0 = matrices, set 1 = material)
std::array<VkDescriptorSetLayout, 2> setLayouts = { descriptorSetLayouts.matrices, descriptorSetLayouts.textures };
VkPipelineLayoutCreateInfo pipelineLayoutCI= vks::initializers::pipelineLayoutCreateInfo(setLayouts.data(), static_cast<uint32_t>(setLayouts.size()));
VK_CHECK_RESULT(vkCreatePipelineLayout(device, &pipelineLayoutCI, nullptr, &pipelineLayout));
// Descriptor set for scene matrices
VkDescriptorSetAllocateInfo allocInfo = vks::initializers::descriptorSetAllocateInfo(descriptorPool, &descriptorSetLayouts.matrices, 1);
VK_CHECK_RESULT(vkAllocateDescriptorSets(device, &allocInfo, &descriptorSet));
VkWriteDescriptorSet writeDescriptorSet = vks::initializers::writeDescriptorSet(descriptorSet, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 0, &uniformBuffers.scene.descriptor);
vkUpdateDescriptorSets(device, 1, &writeDescriptorSet, 0, nullptr);
// Descriptor sets for materials
for (auto& image : model.images) {
const VkDescriptorSetAllocateInfo allocInfo = vks::initializers::descriptorSetAllocateInfo(descriptorPool, &descriptorSetLayouts.textures, 1);
VK_CHECK_RESULT(vkAllocateDescriptorSets(device, &allocInfo, &image.descriptorSet));
VkWriteDescriptorSet writeDescriptorSet = vks::initializers::writeDescriptorSet(image.descriptorSet, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 0, &image.texture.descriptor);
vkUpdateDescriptorSets(device, 1, &writeDescriptorSet, 0, nullptr);
}
}
void preparePipelines()
{
VkPipelineInputAssemblyStateCreateInfo inputAssemblyStateCI = vks::initializers::pipelineInputAssemblyStateCreateInfo(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 0, VK_FALSE);
VkPipelineRasterizationStateCreateInfo rasterizationStateCI = vks::initializers::pipelineRasterizationStateCreateInfo(VK_POLYGON_MODE_FILL, VK_CULL_MODE_BACK_BIT, VK_FRONT_FACE_COUNTER_CLOCKWISE, 0);
VkPipelineColorBlendAttachmentState blendAttachmentStateCI = vks::initializers::pipelineColorBlendAttachmentState(0xf, VK_FALSE);
VkPipelineColorBlendStateCreateInfo colorBlendStateCI = vks::initializers::pipelineColorBlendStateCreateInfo(1, &blendAttachmentStateCI);
VkPipelineDepthStencilStateCreateInfo depthStencilStateCI = vks::initializers::pipelineDepthStencilStateCreateInfo(VK_TRUE, VK_TRUE, VK_COMPARE_OP_LESS_OR_EQUAL);
VkPipelineViewportStateCreateInfo viewportStateCI = vks::initializers::pipelineViewportStateCreateInfo(1, 1, 0);
VkPipelineMultisampleStateCreateInfo multisampleStateCI = vks::initializers::pipelineMultisampleStateCreateInfo(VK_SAMPLE_COUNT_1_BIT, 0);
const std::vector<VkDynamicState> dynamicStateEnables = { VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR };
VkPipelineDynamicStateCreateInfo dynamicStateCI = vks::initializers::pipelineDynamicStateCreateInfo(dynamicStateEnables.data(), static_cast<uint32_t>(dynamicStateEnables.size()), 0);
// Vertex input bindings and attributes
const std::vector<VkVertexInputBindingDescription> vertexInputBindings = {
vks::initializers::vertexInputBindingDescription(0, sizeof(Vertex), VK_VERTEX_INPUT_RATE_VERTEX),
};
const std::vector<VkVertexInputAttributeDescription> vertexInputAttributes = {
vks::initializers::vertexInputAttributeDescription(0, 0, VK_FORMAT_R32G32B32_SFLOAT, offsetof(Vertex, pos)), // Location 0: Position
vks::initializers::vertexInputAttributeDescription(0, 1, VK_FORMAT_R32G32B32_SFLOAT, offsetof(Vertex, normal)), // Location 1: Normal
vks::initializers::vertexInputAttributeDescription(0, 2, VK_FORMAT_R32G32B32_SFLOAT, offsetof(Vertex, uv)), // Location 2: Texture coordinates
vks::initializers::vertexInputAttributeDescription(0, 3, VK_FORMAT_R32G32B32_SFLOAT, offsetof(Vertex, color)), // Location 3: Color
};
VkPipelineVertexInputStateCreateInfo vertexInputStateCI = vks::initializers::pipelineVertexInputStateCreateInfo();
vertexInputStateCI.vertexBindingDescriptionCount = static_cast<uint32_t>(vertexInputBindings.size());
vertexInputStateCI.pVertexBindingDescriptions = vertexInputBindings.data();
vertexInputStateCI.vertexAttributeDescriptionCount = static_cast<uint32_t>(vertexInputAttributes.size());
vertexInputStateCI.pVertexAttributeDescriptions = vertexInputAttributes.data();
const std::array<VkPipelineShaderStageCreateInfo, 2> shaderStages = {
loadShader(getAssetPath() + "shaders/mesh/mesh.vert.spv", VK_SHADER_STAGE_VERTEX_BIT),
loadShader(getAssetPath() + "shaders/mesh/mesh.frag.spv", VK_SHADER_STAGE_FRAGMENT_BIT)
};
VkGraphicsPipelineCreateInfo pipelineCI = vks::initializers::pipelineCreateInfo(pipelineLayout, renderPass, 0);
pipelineCI.pVertexInputState = &vertexInputStateCI;
pipelineCI.pInputAssemblyState = &inputAssemblyStateCI;
pipelineCI.pRasterizationState = &rasterizationStateCI;
pipelineCI.pColorBlendState = &colorBlendStateCI;
pipelineCI.pMultisampleState = &multisampleStateCI;
pipelineCI.pViewportState = &viewportStateCI;
pipelineCI.pDepthStencilState = &depthStencilStateCI;
pipelineCI.pDynamicState = &dynamicStateCI;
pipelineCI.stageCount = static_cast<uint32_t>(shaderStages.size());
pipelineCI.pStages = shaderStages.data();
// Solid rendering pipeline
VK_CHECK_RESULT(vkCreateGraphicsPipelines(device, pipelineCache, 1, &pipelineCI, nullptr, &pipelines.solid));
// Wire frame rendering pipeline
if (deviceFeatures.fillModeNonSolid) {
rasterizationStateCI.polygonMode = VK_POLYGON_MODE_LINE;
rasterizationStateCI.lineWidth = 1.0f;
VK_CHECK_RESULT(vkCreateGraphicsPipelines(device, pipelineCache, 1, &pipelineCI, nullptr, &pipelines.wireframe));
}
}
// Prepare and initialize uniform buffer containing shader uniforms
void prepareUniformBuffers()
{
// Vertex shader uniform buffer block
VK_CHECK_RESULT(vulkanDevice->createBuffer(
VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
&uniformBuffers.scene,
sizeof(uboVS)));
// Map persistent
VK_CHECK_RESULT(uniformBuffers.scene.map());
updateUniformBuffers();
}
void updateUniformBuffers()
{
uboVS.projection = glm::perspective(glm::radians(60.0f), (float)width / (float)height, 0.1f, 256.0f);
glm::mat4 viewMatrix = glm::translate(glm::mat4(1.0f), glm::vec3(0.0f, 0.0f, zoom));
uboVS.model = viewMatrix * glm::translate(glm::mat4(1.0f), cameraPos);
uboVS.model = glm::rotate(uboVS.model, glm::radians(rotation.x), glm::vec3(1.0f, 0.0f, 0.0f));
uboVS.model = glm::rotate(uboVS.model, glm::radians(rotation.y), glm::vec3(0.0f, 1.0f, 0.0f));
uboVS.model = glm::rotate(uboVS.model, glm::radians(rotation.z), glm::vec3(0.0f, 0.0f, 1.0f));
memcpy(uniformBuffers.scene.mapped, &uboVS, sizeof(uboVS));
}
void draw()
{
VulkanExampleBase::prepareFrame();
// Command buffer to be sumitted to the queue
submitInfo.commandBufferCount = 1;
submitInfo.pCommandBuffers = &drawCmdBuffers[currentBuffer];
// Submit to queue
VK_CHECK_RESULT(vkQueueSubmit(queue, 1, &submitInfo, VK_NULL_HANDLE));
VulkanExampleBase::submitFrame();
}
void prepare()
{
VulkanExampleBase::prepare();
loadAssets();
prepareUniformBuffers();
setupDescriptors();
preparePipelines();
buildCommandBuffers();
prepared = true;
}
virtual void render()
{
if (!prepared)
return;
draw();
if (camera.updated)
updateUniformBuffers();
}
virtual void OnUpdateUIOverlay(vks::UIOverlay *overlay)
{
if (overlay->header("Settings")) {
if (overlay->checkBox("Wireframe", &wireframe)) {
buildCommandBuffers();
}
}
}
};
VULKAN_EXAMPLE_MAIN()