Merge branch 'master' into imgui_162

This commit is contained in:
saschawillems 2018-09-07 13:44:01 +02:00
commit d39738f39e
13 changed files with 26675 additions and 239 deletions

View file

@ -1,7 +1,25 @@
language: cpp
language: gneric
sudo: required
os: linux
dist: trusty
matrix:
include:
- os: linux
env: COMPILER_NAME=gcc CXX=g++-7 CC=gcc-7
addons:
apt:
packages:
- g++-7
sources:
- ubuntu-toolchain-r-test
- os: linux
env: COMPILER_NAME=clang CXX=clang++-5.0 CC=clang-5.0
addons:
apt:
packages:
- clang-5.0
sources:
- llvm-toolchain-trusty-5.0
compiler:
- clang
- gcc

View file

@ -11,6 +11,7 @@ include_directories(external/glm)
include_directories(external/gli)
include_directories(external/assimp)
include_directories(external/imgui)
include_directories(external/tinygltf)
include_directories(base)
OPTION(USE_D2D_WSI "Build the project using Direct to Display swapchain" OFF)

1264
base/VulkanglTFModel.hpp Normal file

File diff suppressed because it is too large Load diff

View file

@ -4,16 +4,19 @@ layout (location = 0) in vec3 inPos;
layout (location = 1) in vec3 inNormal;
layout (location = 2) in vec3 inColor;
layout (set = 0, binding = 0) uniform UBO
{
layout (set = 0, binding = 0) uniform UBO {
mat4 projection;
mat4 view;
mat4 model;
} ubo;
layout (set = 1, binding = 0) uniform Node {
mat4 matrix;
} node;
layout(push_constant) uniform PushBlock {
vec4 offset;
vec4 color;
} pushBlock;
vec4 baseColorFactor;
} material;
layout (location = 0) out vec3 outNormal;
layout (location = 1) out vec3 outColor;
@ -28,14 +31,14 @@ out gl_PerVertex
void main()
{
outNormal = inNormal;
outColor = inColor * pushBlock.color.rgb;
vec4 pos = vec4(inPos + pushBlock.offset.xyz, 1.0);
gl_Position = ubo.projection * ubo.model * pos;
outColor = material.baseColorFactor.rgb;
vec4 pos = vec4(inPos, 1.0);
gl_Position = ubo.projection * ubo.view * ubo.model * node.matrix * pos;
outNormal = mat3(ubo.model) * inNormal;
outNormal = mat3(ubo.view * ubo.model * node.matrix) * inNormal;
vec4 localpos = ubo.model * pos;
vec3 lightPos = vec3(1.0f, -1.0f, 1.0f);
vec4 localpos = ubo.view * ubo.model * node.matrix * pos;
vec3 lightPos = vec3(10.0f, -10.0f, 10.0f);
outLightVec = lightPos.xyz - localpos.xyz;
outViewVec = -localpos.xyz;
}

View file

@ -47,6 +47,7 @@ set(EXAMPLES
computenbody
computeparticles
computeshader
conditionalrender
conservativeraster
debugmarker
deferred

View file

@ -3,8 +3,8 @@
*
* Note: Requires a device that supports the VK_EXT_conditional_rendering extension
*
* With conditional rendering it's possible to execute certain rendering commands based
* on a buffer value instead of having to rebuild the command buffers.
* With conditional rendering it's possible to execute certain rendering commands based on a buffer value instead of having to rebuild the command buffers.
* This example sets up a conditonal buffer with one value per glTF part, that is used to toggle visibility of single model parts.
*
* Copyright (C) 2018 by Sascha Willems - www.saschawillems.de
*
@ -24,35 +24,27 @@
#include <vulkan/vulkan.h>
#include "vulkanexamplebase.h"
#include "VulkanModel.hpp"
#include "VulkanglTFModel.hpp"
#define ENABLE_VALIDATION false
#define MODEL_ROWS 3
class VulkanExample : public VulkanExampleBase
{
public:
PFN_vkCmdBeginConditionalRenderingEXT vkCmdBeginConditionalRenderingEXT;
PFN_vkCmdEndConditionalRenderingEXT vkCmdEndConditionalRenderingEXT;
VkPhysicalDeviceConditionalRenderingFeaturesEXT conditionalRenderingFeatures{};
// Vertex layout for the models
vks::VertexLayout vertexLayout = vks::VertexLayout({
vks::VERTEX_COMPONENT_POSITION,
vks::VERTEX_COMPONENT_NORMAL,
vks::VERTEX_COMPONENT_COLOR,
});
vks::Model model;
vkglTF::Model scene;
struct {
glm::mat4 projection;
glm::mat4 modelview;
glm::mat4 view;
glm::mat4 model;
} uboVS;
vks::Buffer uniformBuffer;
std::array<int32_t, MODEL_ROWS> conditionalVisibility{};
std::vector<int32_t> conditionalVisibility;
vks::Buffer conditionalBuffer;
VkPipelineLayout pipelineLayout;
@ -65,47 +57,75 @@ public:
title = "Conditional rendering";
settings.overlay = true;
camera.type = Camera::CameraType::lookat;
camera.setPerspective(60.0f, (float)width / (float)height, 0.1f, 512.0f);
camera.setRotation(glm::vec3(0.0f, 0.0f, 0.0f));
camera.setTranslation(glm::vec3(0.0f, 0.0f, -15.0f));
rotationSpeed *= 0.25f;
camera.setPerspective(45.0f, (float)width / (float)height, 0.1f, 512.0f);
camera.setRotation(glm::vec3(-2.25f, -52.0f, 0.0f));
camera.setTranslation(glm::vec3(1.9f, -2.05f, -18.0f));
camera.rotationSpeed *= 0.25f;
// Enable extension required for conditional rendering
/*
[POI] Enable extension required for conditional rendering
*/
enabledDeviceExtensions.push_back(VK_EXT_CONDITIONAL_RENDERING_EXTENSION_NAME);
// Enable extension required to get conditional rendering supported features
enabledInstanceExtensions.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
}
~VulkanExample()
{
// Clean up used Vulkan resources
// Note : Inherited destructor cleans up resources stored in base class
vkDestroyPipeline(device, pipeline, nullptr);
vkDestroyPipelineLayout(device, pipelineLayout, nullptr);
vkDestroyDescriptorSetLayout(device, descriptorSetLayout, nullptr);
model.destroy();
uniformBuffer.destroy();
conditionalBuffer.destroy();
}
// Enable physical device features required for this example
virtual void getEnabledFeatures()
{
// Geometry shader support is required for this example
if (deviceFeatures.geometryShader) {
enabledFeatures.geometryShader = VK_TRUE;
void renderNode(vkglTF::Node *node, VkCommandBuffer commandBuffer) {
if (node->mesh) {
for (vkglTF::Primitive * primitive : node->mesh->primitives) {
const std::vector<VkDescriptorSet> descriptorsets = {
descriptorSet,
node->mesh->uniformBuffer.descriptorSet
};
vkCmdBindDescriptorSets(commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipelineLayout, 0, static_cast<uint32_t>(descriptorsets.size()), descriptorsets.data(), 0, NULL);
struct PushBlock {
glm::vec4 baseColorFactor;
} pushBlock;
pushBlock.baseColorFactor = primitive->material.baseColorFactor;
vkCmdPushConstants(commandBuffer, pipelineLayout, VK_SHADER_STAGE_VERTEX_BIT, 0, sizeof(PushBlock), &pushBlock);
/*
[POI] Setup the conditional rendering
*/
VkConditionalRenderingBeginInfoEXT conditionalRenderingBeginInfo{};
conditionalRenderingBeginInfo.sType = VK_STRUCTURE_TYPE_CONDITIONAL_RENDERING_BEGIN_INFO_EXT;
conditionalRenderingBeginInfo.buffer = conditionalBuffer.buffer;
conditionalRenderingBeginInfo.offset = sizeof(int32_t) * node->index;
/*
[POI] Begin conditionally rendered section
If the value from the conditional rendering buffer at the given offset is != 0, the draw commands will be executed
*/
vkCmdBeginConditionalRenderingEXT(commandBuffer, &conditionalRenderingBeginInfo);
vkCmdDrawIndexed(commandBuffer, primitive->indexCount, 1, primitive->firstIndex, 0, 0);
vkCmdEndConditionalRenderingEXT(commandBuffer);
}
else {
vks::tools::exitFatal("Selected GPU does not support geometry shaders!", VK_ERROR_FEATURE_NOT_PRESENT);
};
for (auto child : node->children) {
renderNode(child, commandBuffer);
}
}
void buildCommandBuffers()
{
VkCommandBufferBeginInfo cmdBufInfo = vks::initializers::commandBufferBeginInfo();
VkClearValue clearValues[2];
clearValues[0].color = { { 0.0f, 0.0f, 0.0f, 0.0f } };
clearValues[0].color = { { 1.0f, 1.0f, 1.0f, 1.0f } };
clearValues[1].depthStencil = { 1.0f, 0 };
VkRenderPassBeginInfo renderPassBeginInfo = vks::initializers::renderPassBeginInfo();
@ -117,9 +137,7 @@ public:
renderPassBeginInfo.clearValueCount = 2;
renderPassBeginInfo.pClearValues = clearValues;
for (int32_t i = 0; i < drawCmdBuffers.size(); ++i)
{
// Set target frame buffer
for (int32_t i = 0; i < drawCmdBuffers.size(); ++i) {
renderPassBeginInfo.framebuffer = frameBuffers[i];
VK_CHECK_RESULT(vkBeginCommandBuffer(drawCmdBuffers[i], &cmdBufInfo));
@ -133,49 +151,13 @@ public:
vkCmdBindDescriptorSets(drawCmdBuffers[i], VK_PIPELINE_BIND_POINT_GRAPHICS, pipelineLayout, 0, 1, &descriptorSet, 0, NULL);
VkDeviceSize offsets[1] = { 0 };
vkCmdBindVertexBuffers(drawCmdBuffers[i], 0, 1, &model.vertices.buffer, offsets);
vkCmdBindIndexBuffer(drawCmdBuffers[i], model.indices.buffer, 0, VK_INDEX_TYPE_UINT32);
struct PushBlock {
glm::vec4 offset;
glm::vec4 color;
} pushBlock;
const std::array<glm::vec3, 3> colors = {
glm::vec3(1.0f, 0.0f, 0.0f),
glm::vec3(0.0f, 1.0f, 0.0f),
glm::vec3(0.0f, 0.0f, 1.0f),
};
/*
[POI] Setup the conditional rendering structure that decides on wether the commands are rendered or discarded
*/
VkConditionalRenderingBeginInfoEXT conditionalRenderingBeginInfo{};
conditionalRenderingBeginInfo.sType = VK_STRUCTURE_TYPE_CONDITIONAL_RENDERING_BEGIN_INFO_EXT;
// If the value in this buffer at the given offset is zero, commands are discadrd
conditionalRenderingBeginInfo.buffer = conditionalBuffer.buffer;
// Offset will be changed in the loop below to toggle visibility of whole rows
conditionalRenderingBeginInfo.offset = 0;
vkCmdBindPipeline(drawCmdBuffers[i], VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
for (int32_t x = -1; x < MODEL_ROWS - 1; x++) {
for (int32_t y = -2; y < 3; y++) {
pushBlock.offset = glm::vec4((float)x * 3.0f, (float)y * 2.5f, 0.0f, 1.0f);
pushBlock.color = glm::vec4(colors[x+1], 1.0f);
/*
[POI] Start the conditionally rendered part (for this row)
*/
conditionalRenderingBeginInfo.offset = sizeof(uint32_t) * (x + 1);
vkCmdBeginConditionalRenderingEXT(drawCmdBuffers[i], &conditionalRenderingBeginInfo);
vkCmdPushConstants(drawCmdBuffers[i], pipelineLayout, VK_SHADER_STAGE_VERTEX_BIT, 0, sizeof(pushBlock), &pushBlock);
vkCmdDrawIndexed(drawCmdBuffers[i], model.indexCount, 1, 0, 0, 0);
vkCmdEndConditionalRenderingEXT(drawCmdBuffers[i]);
}
const VkDeviceSize offsets[1] = { 0 };
vkCmdBindVertexBuffers(drawCmdBuffers[i], 0, 1, &scene.vertices.buffer, offsets);
vkCmdBindIndexBuffer(drawCmdBuffers[i], scene.indices.buffer, 0, VK_INDEX_TYPE_UINT32);
for (auto node : scene.nodes) {
renderNode(node, drawCmdBuffers[i]);
}
vkCmdEndRenderPass(drawCmdBuffers[i]);
@ -186,7 +168,7 @@ public:
void loadAssets()
{
model.loadFromFile(getAssetPath() + "models/suzanne.obj", vertexLayout, 0.1f, vulkanDevice, queue);
scene.loadFromFile(getAssetPath() + "models/gltf/glTF-Embedded/Buggy.gltf", vulkanDevice, queue);
}
void setupDescriptorSets()
@ -206,7 +188,10 @@ public:
descriptorLayoutCI.pBindings = setLayoutBindings.data();
VK_CHECK_RESULT(vkCreateDescriptorSetLayout(device, &descriptorLayoutCI, nullptr, &descriptorSetLayout));
VkPipelineLayoutCreateInfo pipelineLayoutCI = vks::initializers::pipelineLayoutCreateInfo(&descriptorSetLayout, 1);
std::array<VkDescriptorSetLayout, 2> setLayouts = {
descriptorSetLayout, scene.descriptorSetLayout
};
VkPipelineLayoutCreateInfo pipelineLayoutCI = vks::initializers::pipelineLayoutCreateInfo(setLayouts.data(), 2);
VkPushConstantRange pushConstantRange = vks::initializers::pushConstantRange(VK_SHADER_STAGE_VERTEX_BIT, sizeof(glm::vec4) * 2, 0);
pipelineLayoutCI.pushConstantRangeCount = 1;
pipelineLayoutCI.pPushConstantRanges = &pushConstantRange;
@ -225,7 +210,7 @@ public:
const std::vector<VkDynamicState> dynamicStateEnables = { VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR };
VkPipelineInputAssemblyStateCreateInfo inputAssemblyStateCI = vks::initializers::pipelineInputAssemblyStateCreateInfo(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 0, VK_FALSE);
VkPipelineRasterizationStateCreateInfo rasterizationStateCI = vks::initializers::pipelineRasterizationStateCreateInfo(VK_POLYGON_MODE_FILL, VK_CULL_MODE_BACK_BIT, VK_FRONT_FACE_CLOCKWISE, 0);
VkPipelineRasterizationStateCreateInfo rasterizationStateCI = vks::initializers::pipelineRasterizationStateCreateInfo(VK_POLYGON_MODE_FILL, VK_CULL_MODE_BACK_BIT, VK_FRONT_FACE_COUNTER_CLOCKWISE, 0);
VkPipelineColorBlendAttachmentState blendAttachmentState = vks::initializers::pipelineColorBlendAttachmentState(0xf, VK_FALSE);
VkPipelineColorBlendStateCreateInfo colorBlendStateCI = vks::initializers::pipelineColorBlendStateCreateInfo(1, &blendAttachmentState);
VkPipelineDepthStencilStateCreateInfo depthStencilStateCI = vks::initializers::pipelineDepthStencilStateCreateInfo(VK_TRUE, VK_TRUE, VK_COMPARE_OP_LESS_OR_EQUAL);
@ -235,12 +220,12 @@ public:
// Vertex bindings and attributes
const std::vector<VkVertexInputBindingDescription> vertexInputBindings = {
vks::initializers::vertexInputBindingDescription(0, vertexLayout.stride(), VK_VERTEX_INPUT_RATE_VERTEX),
vks::initializers::vertexInputBindingDescription(0, sizeof(vkglTF::Model::Vertex), VK_VERTEX_INPUT_RATE_VERTEX),
};
const std::vector<VkVertexInputAttributeDescription> vertexInputAttributes = {
vks::initializers::vertexInputAttributeDescription(0, 0, VK_FORMAT_R32G32B32_SFLOAT, 0), // Location 0: Position
vks::initializers::vertexInputAttributeDescription(0, 1, VK_FORMAT_R32G32B32_SFLOAT, sizeof(float) * 3), // Location 1: Normal
vks::initializers::vertexInputAttributeDescription(0, 2, VK_FORMAT_R32G32B32_SFLOAT, sizeof(float) * 6), // Location 3: Color
vks::initializers::vertexInputAttributeDescription(0, 2, VK_FORMAT_R32G32_SFLOAT, sizeof(float) * 6), // Location 2: UV
};
VkPipelineVertexInputStateCreateInfo vertexInputState = vks::initializers::pipelineVertexInputStateCreateInfo();
vertexInputState.vertexBindingDescriptionCount = static_cast<uint32_t>(vertexInputBindings.size());
@ -283,41 +268,26 @@ public:
void updateUniformBuffers()
{
uboVS.projection = camera.matrices.perspective;
uboVS.modelview = camera.matrices.view;
uboVS.view = glm::scale(camera.matrices.view, glm::vec3(0.1f , -0.1f, 0.1f));
uboVS.model = glm::translate(glm::mat4(1.0f), scene.dimensions.min);
memcpy(uniformBuffer.mapped, &uboVS, sizeof(uboVS));
}
void updateConditionalBuffer()
{
memcpy(conditionalBuffer.mapped, &conditionalVisibility, sizeof(conditionalVisibility));
memcpy(conditionalBuffer.mapped, conditionalVisibility.data(), sizeof(int32_t) * conditionalVisibility.size());
}
void draw()
{
VulkanExampleBase::prepareFrame();
// Command buffer to be sumitted to the queue
submitInfo.commandBufferCount = 1;
submitInfo.pCommandBuffers = &drawCmdBuffers[currentBuffer];
// Submit to queue
VK_CHECK_RESULT(vkQueueSubmit(queue, 1, &submitInfo, VK_NULL_HANDLE));
VulkanExampleBase::submitFrame();
}
void prepare()
{
VulkanExampleBase::prepare();
/*
Extension specific functions
[POI] Extension specific setup
Gets the function pointers required for conditonal rendering
Sets up a dedicated conditional buffer that is used to determine visibility at draw time
*/
void prepareConditionalRendering()
{
/*
Get the function pointer
The conditional rendering functions are part of an extension so they have to be manually loaded
The conditional rendering functions are part of an extension so they have to be loaded manually
*/
vkCmdBeginConditionalRenderingEXT = (PFN_vkCmdBeginConditionalRenderingEXT)vkGetDeviceProcAddr(device, "vkCmdBeginConditionalRenderingEXT");
if (!vkCmdBeginConditionalRenderingEXT) {
@ -329,45 +299,46 @@ public:
vks::tools::exitFatal("Could not get a valid function pointer for vkCmdEndConditionalRenderingEXT", -1);
}
/*
Get conditional rendering features
*/
PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR = reinterpret_cast<PFN_vkGetPhysicalDeviceFeatures2KHR>(vkGetInstanceProcAddr(instance, "vkGetPhysicalDeviceFeatures2KHR"));
if (!vkGetPhysicalDeviceFeatures2KHR) {
vks::tools::exitFatal("Could not get a valid function pointer for vkGetPhysicalDeviceFeatures2KHR", -1);
}
VkPhysicalDeviceFeatures2KHR deviceFeatures2{};
conditionalRenderingFeatures.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT;
deviceFeatures2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2_KHR;
deviceFeatures2.pNext = &conditionalRenderingFeatures;
vkGetPhysicalDeviceFeatures2KHR(physicalDevice, &deviceFeatures2);
/*
Create the buffer that contains the conditional rendering information
A single conditional value is 32 bits and if it's zero the rendering commands are discarded
This sample renders multiple rows of objects conditionally, so we setup a buffer with one value per row
*/
conditionalVisibility.resize(scene.linearNodes.size());
VK_CHECK_RESULT(vulkanDevice->createBuffer(
VK_BUFFER_USAGE_CONDITIONAL_RENDERING_BIT_EXT,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
&conditionalBuffer,
sizeof(uint32_t) * MODEL_ROWS));
sizeof(int32_t) *conditionalVisibility.size(),
conditionalVisibility.data()));
VK_CHECK_RESULT(conditionalBuffer.map());
// By default, all parts of the glTF are visible
for (auto i = 0; i < conditionalVisibility.size(); i++) {
conditionalVisibility[i] = 1;
}
/*
Copy visibility data
*/
for (auto i = 0; i < conditionalVisibility.size(); i++) {
conditionalVisibility[i] = 1;
updateConditionalBuffer();
}
VK_CHECK_RESULT(conditionalBuffer.map());
memcpy(conditionalBuffer.mapped, &conditionalVisibility, sizeof(conditionalVisibility));
/*
End of extension specific functions
*/
void draw()
{
VulkanExampleBase::prepareFrame();
submitInfo.commandBufferCount = 1;
submitInfo.pCommandBuffers = &drawCmdBuffers[currentBuffer];
VK_CHECK_RESULT(vkQueueSubmit(queue, 1, &submitInfo, VK_NULL_HANDLE));
VulkanExampleBase::submitFrame();
}
void prepare()
{
VulkanExampleBase::prepare();
loadAssets();
prepareConditionalRendering();
prepareUniformBuffers();
setupDescriptorSets();
preparePipelines();
@ -380,28 +351,42 @@ public:
if (!prepared)
return;
draw();
}
virtual void viewChanged()
{
if (camera.updated) {
updateUniformBuffers();
}
}
virtual void OnUpdateUIOverlay(vks::UIOverlay *overlay)
{
if (overlay->header("Visibility")) {
for (uint32_t i = 0; i < MODEL_ROWS; i++) {
if (overlay->checkBox(std::to_string(i).c_str(), &conditionalVisibility[i])) {
updateConditionalBuffer();
};
if (i < MODEL_ROWS - 1) { ImGui::SameLine(); };
}
}
if (overlay->header("Device properties")) {
overlay->text("conditional rendering: %s", conditionalRenderingFeatures.conditionalRendering ? "true" : "false");
overlay->text("inherited conditional rendering: %s", conditionalRenderingFeatures.inheritedConditionalRendering ? "true" : "false");
}
if (overlay->button("All")) {
for (auto i = 0; i < conditionalVisibility.size(); i++) {
conditionalVisibility[i] = 1;
}
updateConditionalBuffer();
}
ImGui::SameLine();
if (overlay->button("None")) {
for (auto i = 0; i < conditionalVisibility.size(); i++) {
conditionalVisibility[i] = 0;
}
updateConditionalBuffer();
}
ImGui::NewLine();
ImGui::BeginChild("InnerRegion", ImVec2(200.0f, 400.0f), false);
for (auto node : scene.linearNodes) {
// Add visibility toggle checkboxes for all model nodes with a mesh
if (node->mesh) {
if (overlay->checkBox(("[" + std::to_string(node->index) + "] " + node->mesh->name).c_str(), &conditionalVisibility[node->index])) {
updateConditionalBuffer();
}
}
}
ImGui::EndChild();
}
}
};

View file

@ -53,12 +53,6 @@ public:
vks::Model plane;
} models;
struct {
VkPipelineVertexInputStateCreateInfo inputState;
std::vector<VkVertexInputBindingDescription> bindingDescriptions;
std::vector<VkVertexInputAttributeDescription> attributeDescriptions;
} vertices;
struct {
vks::Buffer vsShared;
vks::Buffer vsMirror;
@ -442,7 +436,6 @@ public:
}
// Scene
vkCmdBindPipeline(drawCmdBuffers[i], VK_PIPELINE_BIND_POINT_GRAPHICS, pipelines.debug);
// Reflection plane
vkCmdBindDescriptorSets(drawCmdBuffers[i], VK_PIPELINE_BIND_POINT_GRAPHICS, pipelineLayouts.textured, 0, 1, &descriptorSets.mirror, 0, NULL);
@ -531,54 +524,6 @@ public:
models.quad.device = device;
}
void setupVertexDescriptions()
{
// Binding description
vertices.bindingDescriptions.resize(1);
vertices.bindingDescriptions[0] =
vks::initializers::vertexInputBindingDescription(
VERTEX_BUFFER_BIND_ID,
vertexLayout.stride(),
VK_VERTEX_INPUT_RATE_VERTEX);
// Attribute descriptions
vertices.attributeDescriptions.resize(4);
// Location 0 : Position
vertices.attributeDescriptions[0] =
vks::initializers::vertexInputAttributeDescription(
VERTEX_BUFFER_BIND_ID,
0,
VK_FORMAT_R32G32B32_SFLOAT,
0);
// Location 1 : Texture coordinates
vertices.attributeDescriptions[1] =
vks::initializers::vertexInputAttributeDescription(
VERTEX_BUFFER_BIND_ID,
1,
VK_FORMAT_R32G32_SFLOAT,
sizeof(float) * 3);
// Location 2 : Color
vertices.attributeDescriptions[2] =
vks::initializers::vertexInputAttributeDescription(
VERTEX_BUFFER_BIND_ID,
2,
VK_FORMAT_R32G32B32_SFLOAT,
sizeof(float) * 5);
// Location 3 : Normal
vertices.attributeDescriptions[3] =
vks::initializers::vertexInputAttributeDescription(
VERTEX_BUFFER_BIND_ID,
3,
VK_FORMAT_R32G32B32_SFLOAT,
sizeof(float) * 8);
vertices.inputState = vks::initializers::pipelineVertexInputStateCreateInfo();
vertices.inputState.vertexBindingDescriptionCount = vertices.bindingDescriptions.size();
vertices.inputState.pVertexBindingDescriptions = vertices.bindingDescriptions.data();
vertices.inputState.vertexAttributeDescriptionCount = vertices.attributeDescriptions.size();
vertices.inputState.pVertexAttributeDescriptions = vertices.attributeDescriptions.data();
}
void setupDescriptorPool()
{
std::vector<VkDescriptorPoolSize> poolSizes =
@ -777,45 +722,56 @@ public:
shaderStages[0] = loadShader(getAssetPath() + "shaders/offscreen/quad.vert.spv", VK_SHADER_STAGE_VERTEX_BIT);
shaderStages[1] = loadShader(getAssetPath() + "shaders/offscreen/quad.frag.spv", VK_SHADER_STAGE_FRAGMENT_BIT);
VkGraphicsPipelineCreateInfo pipelineCreateInfo =
vks::initializers::pipelineCreateInfo(
pipelineLayouts.textured,
renderPass,
0);
// Vertex bindings and attributes
const std::vector<VkVertexInputBindingDescription> vertexInputBindings = {
vks::initializers::vertexInputBindingDescription(0, vertexLayout.stride(), VK_VERTEX_INPUT_RATE_VERTEX),
};
const std::vector<VkVertexInputAttributeDescription> vertexInputAttributes = {
vks::initializers::vertexInputAttributeDescription(0, 0, VK_FORMAT_R32G32B32_SFLOAT, 0), // Location 0: Position
vks::initializers::vertexInputAttributeDescription(0, 1, VK_FORMAT_R32G32_SFLOAT, sizeof(float) * 3), // Location 1: UV
vks::initializers::vertexInputAttributeDescription(0, 2, VK_FORMAT_R32G32B32_SFLOAT, sizeof(float) * 5), // Location 2: Color
vks::initializers::vertexInputAttributeDescription(0, 3, VK_FORMAT_R32G32B32_SFLOAT, sizeof(float) * 8), // Location 3: Normal
};
VkPipelineVertexInputStateCreateInfo vertexInputState = vks::initializers::pipelineVertexInputStateCreateInfo();
vertexInputState.vertexBindingDescriptionCount = static_cast<uint32_t>(vertexInputBindings.size());
vertexInputState.pVertexBindingDescriptions = vertexInputBindings.data();
vertexInputState.vertexAttributeDescriptionCount = static_cast<uint32_t>(vertexInputAttributes.size());
vertexInputState.pVertexAttributeDescriptions = vertexInputAttributes.data();
pipelineCreateInfo.pVertexInputState = &vertices.inputState;
pipelineCreateInfo.pInputAssemblyState = &inputAssemblyState;
pipelineCreateInfo.pRasterizationState = &rasterizationState;
pipelineCreateInfo.pColorBlendState = &colorBlendState;
pipelineCreateInfo.pMultisampleState = &multisampleState;
pipelineCreateInfo.pViewportState = &viewportState;
pipelineCreateInfo.pDepthStencilState = &depthStencilState;
pipelineCreateInfo.pDynamicState = &dynamicState;
pipelineCreateInfo.stageCount = shaderStages.size();
pipelineCreateInfo.pStages = shaderStages.data();
VkGraphicsPipelineCreateInfo pipelineCI = vks::initializers::pipelineCreateInfo(pipelineLayouts.textured, renderPass, 0);
pipelineCI.pVertexInputState = &vertexInputState;
pipelineCI.pInputAssemblyState = &inputAssemblyState;
pipelineCI.pRasterizationState = &rasterizationState;
pipelineCI.pColorBlendState = &colorBlendState;
pipelineCI.pMultisampleState = &multisampleState;
pipelineCI.pViewportState = &viewportState;
pipelineCI.pDepthStencilState = &depthStencilState;
pipelineCI.pDynamicState = &dynamicState;
pipelineCI.stageCount = shaderStages.size();
pipelineCI.pStages = shaderStages.data();
VK_CHECK_RESULT(vkCreateGraphicsPipelines(device, pipelineCache, 1, &pipelineCreateInfo, nullptr, &pipelines.debug));
VK_CHECK_RESULT(vkCreateGraphicsPipelines(device, pipelineCache, 1, &pipelineCI, nullptr, &pipelines.debug));
// Mirror
shaderStages[0] = loadShader(getAssetPath() + "shaders/offscreen/mirror.vert.spv", VK_SHADER_STAGE_VERTEX_BIT);
shaderStages[1] = loadShader(getAssetPath() + "shaders/offscreen/mirror.frag.spv", VK_SHADER_STAGE_FRAGMENT_BIT);
rasterizationState.cullMode = VK_CULL_MODE_NONE;
VK_CHECK_RESULT(vkCreateGraphicsPipelines(device, pipelineCache, 1, &pipelineCreateInfo, nullptr, &pipelines.mirror));
VK_CHECK_RESULT(vkCreateGraphicsPipelines(device, pipelineCache, 1, &pipelineCI, nullptr, &pipelines.mirror));
// Flip culling
rasterizationState.cullMode = VK_CULL_MODE_BACK_BIT;
// Phong shading pipelines
pipelineCreateInfo.layout = pipelineLayouts.shaded;
pipelineCI.layout = pipelineLayouts.shaded;
// Scene
shaderStages[0] = loadShader(getAssetPath() + "shaders/offscreen/phong.vert.spv", VK_SHADER_STAGE_VERTEX_BIT);
shaderStages[1] = loadShader(getAssetPath() + "shaders/offscreen/phong.frag.spv", VK_SHADER_STAGE_FRAGMENT_BIT);
VK_CHECK_RESULT(vkCreateGraphicsPipelines(device, pipelineCache, 1, &pipelineCreateInfo, nullptr, &pipelines.shaded));
VK_CHECK_RESULT(vkCreateGraphicsPipelines(device, pipelineCache, 1, &pipelineCI, nullptr, &pipelines.shaded));
// Offscreen
// Flip culling
rasterizationState.cullMode = VK_CULL_MODE_FRONT_BIT;
pipelineCreateInfo.renderPass = offscreenPass.renderPass;
VK_CHECK_RESULT(vkCreateGraphicsPipelines(device, pipelineCache, 1, &pipelineCreateInfo, nullptr, &pipelines.shadedOffscreen));
pipelineCI.renderPass = offscreenPass.renderPass;
VK_CHECK_RESULT(vkCreateGraphicsPipelines(device, pipelineCache, 1, &pipelineCI, nullptr, &pipelines.shadedOffscreen));
}
@ -953,7 +909,6 @@ public:
loadAssets();
generateQuad();
prepareOffscreen();
setupVertexDescriptions();
prepareUniformBuffers();
setupDescriptorSetLayout();
preparePipelines();

21
external/tinygltf/LICENSE vendored Normal file
View file

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2017 Syoyo Fujita, Aurélien Chatelain and many contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

157
external/tinygltf/README.md vendored Normal file
View file

@ -0,0 +1,157 @@
# Header only C++ tiny glTF library(loader/saver).
`TinyGLTF` is a header only C++11 glTF 2.0 https://github.com/KhronosGroup/glTF library.
## Status
Work in process(`devel` branch). Very near to release, but need more tests and examples.
`TinyGLTF` uses Niels Lohmann's json library(https://github.com/nlohmann/json), so now it requires C++11 compiler.
If you are looking for old, C++03 version, please use `devel-picojson` branch.
## Builds
[![Build Status](https://travis-ci.org/syoyo/tinygltf.svg?branch=devel)](https://travis-ci.org/syoyo/tinygltf)
[![Build status](https://ci.appveyor.com/api/projects/status/warngenu9wjjhlm8?svg=true)](https://ci.appveyor.com/project/syoyo/tinygltf)
## Features
* Written in portable C++. C++-11 with STL dependency only.
* [x] macOS + clang(LLVM)
* [x] iOS + clang
* [x] Linux + gcc/clang
* [x] Windows + MinGW
* [x] Windows + Visual Studio 2015 Update 3 or later.
* Visual Studio 2013 is not supported since they have limited C++11 support and failed to compile `json.hpp`.
* [x] Android + CrystaX(NDK drop-in replacement) GCC
* [x] Web using Emscripten(LLVM)
* Moderate parsing time and memory consumption.
* glTF specification v2.0.0
* [x] ASCII glTF
* [x] Binary glTF(GLB)
* [x] PBR material description
* Buffers
* [x] Parse BASE64 encoded embedded buffer fata(DataURI).
* [x] Load `.bin` file.
* Image(Using stb_image)
* [x] Parse BASE64 encoded embedded image fata(DataURI).
* [x] Load external image file.
* [x] PNG(8bit only)
* [x] JPEG(8bit only)
* [x] BMP
* [x] GIF
## Examples
* [glview](examples/glview) : Simple glTF geometry viewer.
* [validator](examples/validator) : Simple glTF validator with JSON schema.
## Projects using TinyGLTF
* Physical based rendering with Vulkan using glTF 2.0 models https://github.com/SaschaWillems/Vulkan-glTF-PBR
* GLTF loader plugin for OGRE 2.1. Support for PBR materials via HLMS/PBS https://github.com/Ybalrid/Ogre_glTF
* [TinyGltfImporter](http://doc.magnum.graphics/magnum/classMagnum_1_1Trade_1_1TinyGltfImporter.html) plugin for [Magnum](https://github.com/mosra/magnum), a lightweight and modular C++11/C++14 graphics middleware for games and data visualization.
* Your projects here! (Please send PR)
## TODOs
* [ ] Write C++ code generator from jSON schema for robust parsing.
* [x] Serialization
* [ ] Compression/decompression(Open3DGC, etc)
* [ ] Support `extensions` and `extras` property
* [ ] HDR image?
* [ ] OpenEXR extension through TinyEXR.
* [ ] Write tests for `animation` and `skin`
## Licenses
TinyGLTF is licensed under MIT license.
TinyGLTF uses the following third party libraries.
* json.hpp : Copyright (c) 2013-2017 Niels Lohmann. MIT license.
* base64 : Copyright (C) 2004-2008 René Nyffenegger
* stb_image.h : v2.08 - public domain image loader - [Github link](https://github.com/nothings/stb/blob/master/stb_image.h)
* stb_image_write.h : v1.09 - public domain image writer - [Github link](https://github.com/nothings/stb/blob/master/stb_image_write.h)
## Build and example
Copy `stb_image.h`, `stb_image_write.h`, `json.hpp` and `tiny_gltf.h` to your project.
### Loading glTF 2.0 model
```c++
// Define these only in *one* .cc file.
#define TINYGLTF_IMPLEMENTATION
#define STB_IMAGE_IMPLEMENTATION
#define STB_IMAGE_WRITE_IMPLEMENTATION
// #define TINYGLTF_NOEXCEPTION // optional. disable exception handling.
#include "tiny_gltf.h"
using namespace tinygltf;
Model model;
TinyGLTF loader;
std::string err;
bool ret = loader.LoadASCIIFromFile(&model, &err, argv[1]);
//bool ret = loader.LoadBinaryFromFile(&model, &err, argv[1]); // for binary glTF(.glb)
if (!err.empty()) {
printf("Err: %s\n", err.c_str());
}
if (!ret) {
printf("Failed to parse glTF\n");
return -1;
}
```
## Compile options
* `TINYGLTF_NOEXCEPTION` : Disable C++ exception in JSON parsing. You can use `-fno-exceptions` or by defining the symbol `JSON_NOEXCEPTION` and `TINYGLTF_NOEXCEPTION` to fully remove C++ exception codes when compiling TinyGLTF.
* `TINYGLTF_NO_STB_IMAGE` : Do not load images with stb_image. Instead use `TinyGLTF::SetImageLoader(LoadimageDataFunction LoadImageData, void *user_data)` to set a callback for loading images.
* `TINYGLTF_NO_STB_IMAGE_WRITE` : Do not write images with stb_image_write. Instead use `TinyGLTF::SetImageWriter(WriteimageDataFunction WriteImageData, void *user_data)` to set a callback for writing images.
### Saving gltTF 2.0 model
* [ ] Buffers.
* [x] To file
* [x] Embedded
* [ ] Draco compressed?
* [x] Images
* [x] To file
* [x] Embedded
* [ ] Binary(.glb)
## Running tests.
### glTF parsing test
#### Setup
Python 2.6 or 2.7 required.
Git clone https://github.com/KhronosGroup/glTF-Sample-Models to your local dir.
#### Run parsing test
After building `loader_example`, edit `test_runner.py`, then,
```bash
$ python test_runner.py
```
### Unit tests
```bash
$ cd tests
$ make
$ ./tester
$ ./tester_noexcept
```
## Third party licenses
* json.hpp : Licensed under the MIT License <http://opensource.org/licenses/MIT>. Copyright (c) 2013-2017 Niels Lohmann <http://nlohmann.me>.
* stb_image : Public domain.
* catch : Copyright (c) 2012 Two Blue Cubes Ltd. All rights reserved. Distributed under the Boost Software License, Version 1.0.

14722
external/tinygltf/json.hpp vendored Normal file

File diff suppressed because it is too large Load diff

6509
external/tinygltf/stb_image.h vendored Normal file

File diff suppressed because it is too large Load diff

3800
external/tinygltf/tiny_gltf.h vendored Normal file

File diff suppressed because it is too large Load diff