diff --git a/core/application/application.cpp b/core/application/application.cpp index f9bb392..488f499 100644 --- a/core/application/application.cpp +++ b/core/application/application.cpp @@ -8,6 +8,7 @@ #include "filesystem/stb_image.h" #include "rhi/texture.h" #include "rhi/opengl/renderer_opengl.h" +#include "rhi/vulkan/renderer_vulkan.h" #include "spdlog/async.h" #include "spdlog/spdlog.h" #include "spdlog/sinks/basic_file_sink.h" @@ -31,7 +32,7 @@ void application::init(window_params in_window_params, int argc, char** argv) { init_glfw(); init_imgui(); - renderer_ = new renderer_opengl(); + renderer_ = new renderer_vulkan(); renderer_->pre_init(); diff --git a/core/rhi/vulkan/renderer_vulkan.cpp b/core/rhi/vulkan/renderer_vulkan.cpp new file mode 100644 index 0000000..7f2fa83 --- /dev/null +++ b/core/rhi/vulkan/renderer_vulkan.cpp @@ -0,0 +1,393 @@ +#include "renderer_vulkan.h" + +#include "imgui_impl_glfw.h" +#include "utils/utils.hpp" + +extern GLFWAPI VkResult glfwCreateWindowSurface(VkInstance instance, GLFWwindow* window, const VkAllocationCallbacks* allocator, VkSurfaceKHR* surface); + +static void check_vk_result(vk::Result err) { + if (err == vk::Result::eSuccess) + return; + + spdlog::error("[vulkan] Error: VkResult = {}", vk::to_string(err)); + abort(); +} + +static void check_vk_result(VkResult err) { + if (err == VK_SUCCESS) + return; + if (err < 0) { + spdlog::error("[vulkan] Error: VkResult = {}", err); + abort(); + } +} + +static bool is_extension_available(const std::vector& properties, const char* extension) { + return std::ranges::any_of(properties, [extension](const vk::ExtensionProperties& p) { + return strcmp(p.extensionName, extension) == 0; + }); +} + +vk::PhysicalDevice renderer_vulkan::setup_vulkan_select_physical_device() const { + const std::vector gpus = instance.enumeratePhysicalDevices(); + IM_ASSERT(!gpus.empty()); + + // If a number >1 of GPUs got reported, find discrete GPU if present, or use first one available. This covers + // most common cases (multi-gpu/integrated+dedicated graphics). Handling more complicated setups (multiple + // dedicated GPUs) is out of scope of this sample. + for (auto& device: gpus) { + auto properties = device.getProperties(); + + if (properties.deviceType == vk::PhysicalDeviceType::eDiscreteGpu) + return device; + } + + // Use first GPU (Integrated) is a Discrete one is not available. + if (!gpus.empty()) + return gpus[0]; + return VK_NULL_HANDLE; +} + +void renderer_vulkan::setup_vulkan(ImVector instance_extensions) { + // Create Vulkan Instance + { + VkInstanceCreateInfo create_info = {}; + create_info.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO; + + // Enumerate available extensions + auto properties = vk::enumerateInstanceExtensionProperties(); + + // Enable required extensions + if (is_extension_available(properties, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) + instance_extensions.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); +#ifdef VK_KHR_PORTABILITY_ENUMERATION_EXTENSION_NAME + if (is_extension_available(properties, VK_KHR_PORTABILITY_ENUMERATION_EXTENSION_NAME)) { + instance_extensions.push_back(VK_KHR_PORTABILITY_ENUMERATION_EXTENSION_NAME); + create_info.flags |= VK_INSTANCE_CREATE_ENUMERATE_PORTABILITY_BIT_KHR; + } +#endif + + // Create Vulkan Instance + create_info.enabledExtensionCount = static_cast(instance_extensions.Size); + create_info.ppEnabledExtensionNames = instance_extensions.Data; + instance = vk::createInstance(create_info, allocator); + } + + // Select Physical Device (GPU) + physical_device = setup_vulkan_select_physical_device(); + + // Select graphics queue family + { + auto queues = physical_device.getQueueFamilyProperties(); + for (uint32_t i = 0; i < queues.size(); i++) { + if (queues[i].queueFlags & vk::QueueFlagBits::eGraphics) { + queue_family = i; + break; + } + } + IM_ASSERT(queue_family != static_cast(-1)); + } + + // Create Logical Device (with 1 queue) + { + std::vector device_extensions; + device_extensions.emplace_back("VK_KHR_swapchain"); + + // Enumerate physical device extension + auto properties = physical_device.enumerateDeviceExtensionProperties(); + +#ifdef VK_KHR_PORTABILITY_SUBSET_EXTENSION_NAME + if (is_extension_available(properties, VK_KHR_PORTABILITY_SUBSET_EXTENSION_NAME)) + device_extensions.push_back(VK_KHR_PORTABILITY_SUBSET_EXTENSION_NAME); +#endif + + device = vk::su::createDevice(physical_device, queue_family, device_extensions); + queue = device.getQueue(queue_family, 0); + } + + // Create Descriptor Pool + // The example only requires a single combined image sampler descriptor for the font image and only uses one descriptor set (for that) + // If you wish to load e.g. additional textures you may need to alter pools sizes. + { + std::vector pool_sizes; + pool_sizes.emplace_back(vk::DescriptorType::eCombinedImageSampler, 1); + + vk::DescriptorPoolCreateInfo descriptor_pool_create_info; + descriptor_pool_create_info.setMaxSets(1); + descriptor_pool_create_info.setPoolSizeCount(pool_sizes.size()); + descriptor_pool_create_info.setPoolSizes(pool_sizes); + + descriptor_pool = device.createDescriptorPool(descriptor_pool_create_info); + } +} + +// All the ImGui_ImplVulkanH_XXX structures/functions are optional helpers used by the demo. +// Your real engine/app may not use them. +void renderer_vulkan::setup_vulkan_window(ImGui_ImplVulkanH_Window* wd, VkSurfaceKHR surface, int width, + int height) const { + wd->Surface = surface; + + // Check for WSI support + vk::Bool32 res; + const auto err = physical_device.getSurfaceSupportKHR(queue_family, wd->Surface, &res); + check_vk_result(err); + if (res != VK_TRUE) { + fprintf(stderr, "Error no WSI support on physical device 0\n"); + exit(-1); + } + + // Select Surface Format + constexpr VkFormat requestSurfaceImageFormat[] = { + VK_FORMAT_B8G8R8A8_UNORM, VK_FORMAT_R8G8B8A8_UNORM, VK_FORMAT_B8G8R8_UNORM, VK_FORMAT_R8G8B8_UNORM + }; + constexpr VkColorSpaceKHR requestSurfaceColorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR; + wd->SurfaceFormat = ImGui_ImplVulkanH_SelectSurfaceFormat(physical_device, wd->Surface, requestSurfaceImageFormat, + (size_t) IM_ARRAYSIZE(requestSurfaceImageFormat), + requestSurfaceColorSpace); + + // Select Present Mode +#ifdef APP_USE_UNLIMITED_FRAME_RATE + VkPresentModeKHR present_modes[] = { VK_PRESENT_MODE_MAILBOX_KHR, VK_PRESENT_MODE_IMMEDIATE_KHR, VK_PRESENT_MODE_FIFO_KHR }; +#else + VkPresentModeKHR present_modes[] = {VK_PRESENT_MODE_FIFO_KHR}; +#endif + wd->PresentMode = ImGui_ImplVulkanH_SelectPresentMode(physical_device, wd->Surface, &present_modes[0], + IM_ARRAYSIZE(present_modes)); + //printf("[vulkan] Selected PresentMode = %d\n", wd->PresentMode); + + // Create SwapChain, RenderPass, Framebuffer, etc. + IM_ASSERT(min_image_count >= 2); + + ImGui_ImplVulkanH_CreateOrResizeWindow(instance, physical_device, device, wd, queue_family, + reinterpret_cast(allocator), width, + height, min_image_count); +} + +void renderer_vulkan::cleanup_vulkan() const { + device.destroyDescriptorPool(descriptor_pool); +#ifdef APP_USE_VULKAN_DEBUG_REPORT + // Remove the debug report callback + auto vkDestroyDebugReportCallbackEXT = (PFN_vkDestroyDebugReportCallbackEXT)vkGetInstanceProcAddr(g_Instance, "vkDestroyDebugReportCallbackEXT"); + vkDestroyDebugReportCallbackEXT(g_Instance, g_DebugReport, g_Allocator); +#endif // APP_USE_VULKAN_DEBUG_REPORT + + device.destroy(); + instance.destroy(); +} + +void renderer_vulkan::cleanup_vulkan_window() { + ImGui_ImplVulkanH_DestroyWindow(instance, device, &main_window_data, + reinterpret_cast(allocator)); +} + +void renderer_vulkan::frame_render(ImGui_ImplVulkanH_Window* wd, ImDrawData* draw_data) { + vk::Semaphore image_acquired_semaphore = wd->FrameSemaphores[wd->SemaphoreIndex].ImageAcquiredSemaphore; + vk::Semaphore render_complete_semaphore = wd->FrameSemaphores[wd->SemaphoreIndex].RenderCompleteSemaphore; + + vk::Result err = device.acquireNextImageKHR(wd->Swapchain, UINT64_MAX, image_acquired_semaphore, VK_NULL_HANDLE, + &wd->FrameIndex); + if (err == vk::Result::eErrorOutOfDateKHR || err == vk::Result::eSuboptimalKHR) { + swap_chain_rebuild = true; + return; + } + + check_vk_result(err); + + ImGui_ImplVulkanH_Frame* fd = &wd->Frames[wd->FrameIndex]; + const vk::CommandBuffer cmd_buf = fd->CommandBuffer; + const vk::Fence fence = fd->Fence; { + err = device.waitForFences(1, &fence, VK_TRUE, UINT64_MAX); + + // wait indefinitely instead of periodically checking + check_vk_result(err); + + err = device.resetFences(1, &fence); + check_vk_result(err); + } { + const vk::CommandPool command_pool = fd->CommandPool; + device.resetCommandPool(command_pool); + + vk::CommandBufferBeginInfo info = {}; + info.setFlags(vk::CommandBufferUsageFlagBits::eOneTimeSubmit); + + cmd_buf.begin(info); + } { + const vk::Framebuffer framebuffer = fd->Framebuffer; + const vk::RenderPass render_pass = wd->RenderPass; + std::vector clear_values; + const auto clear_color = wd->ClearValue.color.float32; + const auto clear_depth = wd->ClearValue.depthStencil.depth; + const auto clear_stencil = wd->ClearValue.depthStencil.stencil; + vk::ClearValue clear_value; + clear_value.color = vk::ClearColorValue(std::array{ + clear_color[0], clear_color[1], clear_color[2], clear_color[3] + }); + clear_value.depthStencil = vk::ClearDepthStencilValue(clear_depth, clear_stencil); + + clear_values.emplace_back((clear_value.color)); + + vk::RenderPassBeginInfo info; + info.setRenderPass(render_pass); + info.setFramebuffer(framebuffer); + info.renderArea.extent.width = wd->Width; + info.renderArea.extent.height = wd->Height; + info.setClearValues(clear_values); + + cmd_buf.beginRenderPass(info, vk::SubpassContents::eInline); + } + + // Record dear imgui primitives into command buffer + ImGui_ImplVulkan_RenderDrawData(draw_data, fd->CommandBuffer); + + // Submit command buffer + vkCmdEndRenderPass(fd->CommandBuffer); { + vk::PipelineStageFlags wait_stage = vk::PipelineStageFlagBits::eColorAttachmentOutput; + + vk::SubmitInfo info; + info.setWaitSemaphores(image_acquired_semaphore); + info.setWaitDstStageMask(wait_stage); + info.setCommandBuffers(cmd_buf); + info.setSignalSemaphores(render_complete_semaphore); + + cmd_buf.end(); + err = queue.submit(1, &info, fence); + check_vk_result(err); + } +} + +void renderer_vulkan::frame_present(ImGui_ImplVulkanH_Window* wd) { + if (swap_chain_rebuild) + return; + vk::Semaphore render_complete_semaphore = wd->FrameSemaphores[wd->SemaphoreIndex].RenderCompleteSemaphore; + vk::SwapchainKHR swapchain = wd->Swapchain; + uint32_t frame_index = wd->FrameIndex; + vk::PresentInfoKHR info; + info.setWaitSemaphores(render_complete_semaphore); + info.setSwapchains(swapchain); + info.setImageIndices(frame_index); + + auto err = queue.presentKHR(info); + + if (err == vk::Result::eErrorOutOfDateKHR || err == vk::Result::eSuboptimalKHR) { + swap_chain_rebuild = true; + return; + } + check_vk_result(err); + wd->SemaphoreIndex = (wd->SemaphoreIndex + 1) % wd->SemaphoreCount; // Now we can use the next set of semaphores +} + +void renderer_vulkan::pre_init() { + renderer::pre_init(); + glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API); +} + +bool renderer_vulkan::init(GLFWwindow* window_handle) { + if (has_initialized_) + return true; + + if (!glfwVulkanSupported()) { + throw std::runtime_error("Vulkan not supported"); + } + init_vulkan(window_handle); + + has_initialized_ = true; + return true; +} + +void renderer_vulkan::shutdown() { + renderer::shutdown(); + + ImGui_ImplGlfw_Shutdown(); + ImGui_ImplVulkan_Shutdown(); +} + +std::shared_ptr renderer_vulkan::load_shader(const std::string& entry_name) { + return nullptr; +} + +std::shared_ptr renderer_vulkan::create_pixel_shader_drawer() { + return nullptr; +} + +void renderer_vulkan::new_frame(GLFWwindow* window_handle) { + // Start the Dear ImGui frame + ImGui_ImplVulkan_NewFrame(); + ImGui_ImplGlfw_NewFrame(); + ImGui::NewFrame(); +} + +void renderer_vulkan::end_frame(GLFWwindow* window_handle) { + ImGuiIO& io = ImGui::GetIO(); + + // Rendering + ImGui::Render(); + ImDrawData* main_draw_data = ImGui::GetDrawData(); + const bool main_is_minimized = (main_draw_data->DisplaySize.x <= 0.0f || main_draw_data->DisplaySize.y <= 0.0f); + main_window_data.ClearValue.color.float32[0] = clear_color.x * clear_color.w; + main_window_data.ClearValue.color.float32[1] = clear_color.y * clear_color.w; + main_window_data.ClearValue.color.float32[2] = clear_color.z * clear_color.w; + main_window_data.ClearValue.color.float32[3] = clear_color.w; + if (!main_is_minimized) + frame_render(&main_window_data, main_draw_data); + + // Update and Render additional Platform Windows + if (io.ConfigFlags & ImGuiConfigFlags_ViewportsEnable) + { + ImGui::UpdatePlatformWindows(); + ImGui::RenderPlatformWindowsDefault(); + } + + // Present Main Platform Window + if (!main_is_minimized) + frame_present(&main_window_data); +} + +void renderer_vulkan::resize(int width, int height) { +} + +std::shared_ptr renderer_vulkan::create_texture(const unsigned char* data, int width, int height) { + return nullptr; +} + +std::shared_ptr renderer_vulkan::create_render_target(int width, int height, texture_format format) { + return nullptr; +} + +void renderer_vulkan::init_vulkan(GLFWwindow* window_handle) { + ImVector extensions; + uint32_t extensions_count = 0; + const char** glfw_extensions = glfwGetRequiredInstanceExtensions(&extensions_count); + for (uint32_t i = 0; i < extensions_count; i++) + extensions.push_back(glfw_extensions[i]); + setup_vulkan(extensions); + + // Create Window Surface + VkSurfaceKHR surface; + VkResult err = glfwCreateWindowSurface(instance, window_handle, reinterpret_cast(allocator), &surface); + check_vk_result(err); + + // Create Framebuffers + int w, h; + glfwGetFramebufferSize(window_handle, &w, &h); + ImGui_ImplVulkanH_Window* wd = &main_window_data; + setup_vulkan_window(wd, surface, w, h); + + ImGui_ImplGlfw_InitForVulkan(window_handle, true); + + ImGui_ImplVulkan_InitInfo init_info = {}; + init_info.Instance = instance; + init_info.PhysicalDevice = physical_device; + init_info.Device = device; + init_info.QueueFamily = queue_family; + init_info.Queue = queue; + init_info.PipelineCache = pipeline_cache; + init_info.DescriptorPool = descriptor_pool; + init_info.RenderPass = wd->RenderPass; + init_info.Subpass = 0; + init_info.MinImageCount = min_image_count; + init_info.ImageCount = wd->ImageCount; + init_info.MSAASamples = VK_SAMPLE_COUNT_1_BIT; + init_info.Allocator = reinterpret_cast(allocator); + init_info.CheckVkResultFn = check_vk_result; + ImGui_ImplVulkan_Init(&init_info); +} diff --git a/core/rhi/vulkan/renderer_vulkan.h b/core/rhi/vulkan/renderer_vulkan.h new file mode 100644 index 0000000..9a510dd --- /dev/null +++ b/core/rhi/vulkan/renderer_vulkan.h @@ -0,0 +1,67 @@ +// +// Created by 46944 on 2024/2/19. +// +#pragma once + +#include + +#include "imgui_impl_vulkan.h" +#include "rhi/renderer.h" + + +class renderer_vulkan : public renderer { +public: + void pre_init() override; + + bool init(GLFWwindow* window_handle) override; + + void shutdown() override; + + std::shared_ptr load_shader(const std::string& entry_name) override; + + std::shared_ptr create_pixel_shader_drawer() override; + + void new_frame(GLFWwindow* window_handle) override; + + void end_frame(GLFWwindow* window_handle) override; + + void resize(int width, int height) override; + + std::shared_ptr create_texture(const unsigned char* data, int width, int height) override; + + std::shared_ptr create_render_target(int width, int height, texture_format format) override; + + // Data + vk::AllocationCallbacks* allocator = nullptr; + vk::Instance instance = VK_NULL_HANDLE; + vk::PhysicalDevice physical_device = VK_NULL_HANDLE; + vk::Device device = VK_NULL_HANDLE; + uint32_t queue_family = (uint32_t)-1; + vk::Queue queue = VK_NULL_HANDLE; + vk::PipelineCache pipeline_cache = VK_NULL_HANDLE; + vk::DescriptorPool descriptor_pool = VK_NULL_HANDLE; + + ImGui_ImplVulkanH_Window main_window_data; + int min_image_count = 2; + bool swap_chain_rebuild = false; + +protected: + vk::PhysicalDevice setup_vulkan_select_physical_device() const; + + void setup_vulkan(ImVector instance_extensions); + + void setup_vulkan_window(ImGui_ImplVulkanH_Window* wd, VkSurfaceKHR surface, int width, int height) const; + + void cleanup_vulkan() const; + + void cleanup_vulkan_window(); + + void frame_render(ImGui_ImplVulkanH_Window* wd, ImDrawData* draw_data); + + void frame_present(ImGui_ImplVulkanH_Window* wd); + +private: + void init_vulkan(GLFWwindow* window_handle); + + bool has_initialized_ = false; +}; diff --git a/core/rhi/vulkan/test_vulkan.cpp b/core/rhi/vulkan/test_vulkan.cpp new file mode 100644 index 0000000..b1d1b43 --- /dev/null +++ b/core/rhi/vulkan/test_vulkan.cpp @@ -0,0 +1,8 @@ +// +// Created by 46944 on 2024/2/19. +// + +#include "test_vulkan.h" + +test_vulkan::test_vulkan() { +} diff --git a/core/rhi/vulkan/test_vulkan.h b/core/rhi/vulkan/test_vulkan.h new file mode 100644 index 0000000..cb7589d --- /dev/null +++ b/core/rhi/vulkan/test_vulkan.h @@ -0,0 +1,17 @@ +// +// Created by 46944 on 2024/2/19. +// + +#ifndef TEST_VULKAN_H +#define TEST_VULKAN_H + +#include + +class CORE_API test_vulkan { +public: + test_vulkan(); +}; + + + +#endif //TEST_VULKAN_H diff --git a/core/rhi/vulkan/utils/utils.cpp b/core/rhi/vulkan/utils/utils.cpp new file mode 100644 index 0000000..64eb1bc --- /dev/null +++ b/core/rhi/vulkan/utils/utils.cpp @@ -0,0 +1,1074 @@ +// Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#if defined( _MSC_VER ) +// no need to ignore any warnings with MSVC +#elif defined( __clang__ ) +# pragma clang diagnostic ignored "-Wmissing-braces" +#elif defined( __GNUC__ ) +// no need to ignore any warnings with GCC +#else +// unknow compiler... just ignore the warnings for yourselves ;) +#endif + +#include "utils.hpp" + +#include +#include +#include +#if defined( VULKAN_HPP_NO_TO_STRING ) +# include +#endif +#include + +#if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) +VULKAN_HPP_DEFAULT_DISPATCH_LOADER_DYNAMIC_STORAGE +#endif + +namespace vk +{ + namespace su + { + vk::DeviceMemory allocateDeviceMemory( vk::Device const & device, + vk::PhysicalDeviceMemoryProperties const & memoryProperties, + vk::MemoryRequirements const & memoryRequirements, + vk::MemoryPropertyFlags memoryPropertyFlags ) + { + uint32_t memoryTypeIndex = findMemoryType( memoryProperties, memoryRequirements.memoryTypeBits, memoryPropertyFlags ); + + return device.allocateMemory( vk::MemoryAllocateInfo( memoryRequirements.size, memoryTypeIndex ) ); + } + + bool contains( std::vector const & extensionProperties, std::string const & extensionName ) + { + auto propertyIterator = std::find_if( extensionProperties.begin(), + extensionProperties.end(), + [&extensionName]( vk::ExtensionProperties const & ep ) { return extensionName == ep.extensionName; } ); + return ( propertyIterator != extensionProperties.end() ); + } + + vk::DescriptorPool createDescriptorPool( vk::Device const & device, std::vector const & poolSizes ) + { + assert( !poolSizes.empty() ); + uint32_t maxSets = + std::accumulate( poolSizes.begin(), poolSizes.end(), 0, []( uint32_t sum, vk::DescriptorPoolSize const & dps ) { return sum + dps.descriptorCount; } ); + assert( 0 < maxSets ); + + vk::DescriptorPoolCreateInfo descriptorPoolCreateInfo( vk::DescriptorPoolCreateFlagBits::eFreeDescriptorSet, maxSets, poolSizes ); + return device.createDescriptorPool( descriptorPoolCreateInfo ); + } + + vk::DescriptorSetLayout createDescriptorSetLayout( vk::Device const & device, + std::vector> const & bindingData, + vk::DescriptorSetLayoutCreateFlags flags ) + { + std::vector bindings( bindingData.size() ); + for ( size_t i = 0; i < bindingData.size(); i++ ) + { + bindings[i] = vk::DescriptorSetLayoutBinding( + checked_cast( i ), std::get<0>( bindingData[i] ), std::get<1>( bindingData[i] ), std::get<2>( bindingData[i] ) ); + } + return device.createDescriptorSetLayout( vk::DescriptorSetLayoutCreateInfo( flags, bindings ) ); + } + + vk::Device createDevice( vk::PhysicalDevice const & physicalDevice, + uint32_t queueFamilyIndex, + std::vector const & extensions, + vk::PhysicalDeviceFeatures const * physicalDeviceFeatures, + void const * pNext ) + { + std::vector enabledExtensions; + enabledExtensions.reserve( extensions.size() ); + for ( auto const & ext : extensions ) + { + enabledExtensions.push_back( ext.data() ); + } + + float queuePriority = 0.0f; + vk::DeviceQueueCreateInfo deviceQueueCreateInfo( {}, queueFamilyIndex, 1, &queuePriority ); + vk::DeviceCreateInfo deviceCreateInfo( {}, deviceQueueCreateInfo, {}, enabledExtensions, physicalDeviceFeatures, pNext ); + + vk::Device device = physicalDevice.createDevice( deviceCreateInfo ); +#if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) + // initialize function pointers for instance + VULKAN_HPP_DEFAULT_DISPATCHER.init( device ); +#endif + return device; + } + + std::vector createFramebuffers( vk::Device const & device, + vk::RenderPass & renderPass, + std::vector const & imageViews, + vk::ImageView const & depthImageView, + vk::Extent2D const & extent ) + { + vk::ImageView attachments[2]; + attachments[1] = depthImageView; + + vk::FramebufferCreateInfo framebufferCreateInfo( + vk::FramebufferCreateFlags(), renderPass, depthImageView ? 2 : 1, attachments, extent.width, extent.height, 1 ); + std::vector framebuffers; + framebuffers.reserve( imageViews.size() ); + for ( auto const & view : imageViews ) + { + attachments[0] = view; + framebuffers.push_back( device.createFramebuffer( framebufferCreateInfo ) ); + } + + return framebuffers; + } + + vk::Pipeline createGraphicsPipeline( vk::Device const & device, + vk::PipelineCache const & pipelineCache, + std::pair const & vertexShaderData, + std::pair const & fragmentShaderData, + uint32_t vertexStride, + std::vector> const & vertexInputAttributeFormatOffset, + vk::FrontFace frontFace, + bool depthBuffered, + vk::PipelineLayout const & pipelineLayout, + vk::RenderPass const & renderPass ) + { + std::array pipelineShaderStageCreateInfos = { + vk::PipelineShaderStageCreateInfo( + vk::PipelineShaderStageCreateFlags(), vk::ShaderStageFlagBits::eVertex, vertexShaderData.first, "main", vertexShaderData.second ), + vk::PipelineShaderStageCreateInfo( + vk::PipelineShaderStageCreateFlags(), vk::ShaderStageFlagBits::eFragment, fragmentShaderData.first, "main", fragmentShaderData.second ) + }; + + std::vector vertexInputAttributeDescriptions; + vk::PipelineVertexInputStateCreateInfo pipelineVertexInputStateCreateInfo; + vk::VertexInputBindingDescription vertexInputBindingDescription( 0, vertexStride ); + + if ( 0 < vertexStride ) + { + vertexInputAttributeDescriptions.reserve( vertexInputAttributeFormatOffset.size() ); + for ( uint32_t i = 0; i < vertexInputAttributeFormatOffset.size(); i++ ) + { + vertexInputAttributeDescriptions.emplace_back( i, 0, vertexInputAttributeFormatOffset[i].first, vertexInputAttributeFormatOffset[i].second ); + } + pipelineVertexInputStateCreateInfo.setVertexBindingDescriptions( vertexInputBindingDescription ); + pipelineVertexInputStateCreateInfo.setVertexAttributeDescriptions( vertexInputAttributeDescriptions ); + } + + vk::PipelineInputAssemblyStateCreateInfo pipelineInputAssemblyStateCreateInfo( vk::PipelineInputAssemblyStateCreateFlags(), + vk::PrimitiveTopology::eTriangleList ); + + vk::PipelineViewportStateCreateInfo pipelineViewportStateCreateInfo( vk::PipelineViewportStateCreateFlags(), 1, nullptr, 1, nullptr ); + + vk::PipelineRasterizationStateCreateInfo pipelineRasterizationStateCreateInfo( vk::PipelineRasterizationStateCreateFlags(), + false, + false, + vk::PolygonMode::eFill, + vk::CullModeFlagBits::eBack, + frontFace, + false, + 0.0f, + 0.0f, + 0.0f, + 1.0f ); + + vk::PipelineMultisampleStateCreateInfo pipelineMultisampleStateCreateInfo( {}, vk::SampleCountFlagBits::e1 ); + + vk::StencilOpState stencilOpState( vk::StencilOp::eKeep, vk::StencilOp::eKeep, vk::StencilOp::eKeep, vk::CompareOp::eAlways ); + vk::PipelineDepthStencilStateCreateInfo pipelineDepthStencilStateCreateInfo( + vk::PipelineDepthStencilStateCreateFlags(), depthBuffered, depthBuffered, vk::CompareOp::eLessOrEqual, false, false, stencilOpState, stencilOpState ); + + vk::ColorComponentFlags colorComponentFlags( vk::ColorComponentFlagBits::eR | vk::ColorComponentFlagBits::eG | vk::ColorComponentFlagBits::eB | + vk::ColorComponentFlagBits::eA ); + vk::PipelineColorBlendAttachmentState pipelineColorBlendAttachmentState( false, + vk::BlendFactor::eZero, + vk::BlendFactor::eZero, + vk::BlendOp::eAdd, + vk::BlendFactor::eZero, + vk::BlendFactor::eZero, + vk::BlendOp::eAdd, + colorComponentFlags ); + vk::PipelineColorBlendStateCreateInfo pipelineColorBlendStateCreateInfo( + vk::PipelineColorBlendStateCreateFlags(), false, vk::LogicOp::eNoOp, pipelineColorBlendAttachmentState, { { 1.0f, 1.0f, 1.0f, 1.0f } } ); + + std::array dynamicStates = { vk::DynamicState::eViewport, vk::DynamicState::eScissor }; + vk::PipelineDynamicStateCreateInfo pipelineDynamicStateCreateInfo( vk::PipelineDynamicStateCreateFlags(), dynamicStates ); + + vk::GraphicsPipelineCreateInfo graphicsPipelineCreateInfo( vk::PipelineCreateFlags(), + pipelineShaderStageCreateInfos, + &pipelineVertexInputStateCreateInfo, + &pipelineInputAssemblyStateCreateInfo, + nullptr, + &pipelineViewportStateCreateInfo, + &pipelineRasterizationStateCreateInfo, + &pipelineMultisampleStateCreateInfo, + &pipelineDepthStencilStateCreateInfo, + &pipelineColorBlendStateCreateInfo, + &pipelineDynamicStateCreateInfo, + pipelineLayout, + renderPass ); + + auto result = device.createGraphicsPipeline( pipelineCache, graphicsPipelineCreateInfo ); + assert( result.result == vk::Result::eSuccess ); + return result.value; + } + + std::vector gatherExtensions( std::vector const & extensions +#if !defined( NDEBUG ) + , + std::vector const & extensionProperties +#endif + ) + { + std::vector enabledExtensions; + enabledExtensions.reserve( extensions.size() ); + for ( auto const & ext : extensions ) + { + assert( std::any_of( + extensionProperties.begin(), extensionProperties.end(), [ext]( vk::ExtensionProperties const & ep ) { return ext == ep.extensionName; } ) ); + enabledExtensions.push_back( ext.data() ); + } +#if !defined( NDEBUG ) + if ( std::none_of( + extensions.begin(), extensions.end(), []( std::string const & extension ) { return extension == VK_EXT_DEBUG_UTILS_EXTENSION_NAME; } ) && + std::any_of( extensionProperties.begin(), + extensionProperties.end(), + []( vk::ExtensionProperties const & ep ) { return ( strcmp( VK_EXT_DEBUG_UTILS_EXTENSION_NAME, ep.extensionName ) == 0 ); } ) ) + { + enabledExtensions.push_back( VK_EXT_DEBUG_UTILS_EXTENSION_NAME ); + } +#endif + return enabledExtensions; + } + + std::vector gatherLayers( std::vector const & layers +#if !defined( NDEBUG ) + , + std::vector const & layerProperties +#endif + ) + { + std::vector enabledLayers; + enabledLayers.reserve( layers.size() ); + for ( auto const & layer : layers ) + { + assert( std::any_of( layerProperties.begin(), layerProperties.end(), [layer]( vk::LayerProperties const & lp ) { return layer == lp.layerName; } ) ); + enabledLayers.push_back( layer.data() ); + } +#if !defined( NDEBUG ) + // Enable standard validation layer to find as much errors as possible! + if ( std::none_of( layers.begin(), layers.end(), []( std::string const & layer ) { return layer == "VK_LAYER_KHRONOS_validation"; } ) && + std::any_of( layerProperties.begin(), + layerProperties.end(), + []( vk::LayerProperties const & lp ) { return ( strcmp( "VK_LAYER_KHRONOS_validation", lp.layerName ) == 0 ); } ) ) + { + enabledLayers.push_back( "VK_LAYER_KHRONOS_validation" ); + } +#endif + return enabledLayers; + } + + vk::Instance createInstance( std::string const & appName, + std::string const & engineName, + std::vector const & layers, + std::vector const & extensions, + uint32_t apiVersion ) + { +#if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) + VULKAN_HPP_DEFAULT_DISPATCHER.init(); +#endif + + vk::ApplicationInfo applicationInfo( appName.c_str(), 1, engineName.c_str(), 1, apiVersion ); + std::vector enabledLayers = vk::su::gatherLayers( layers +#if !defined( NDEBUG ) + , + vk::enumerateInstanceLayerProperties() +#endif + ); + std::vector enabledExtensions = vk::su::gatherExtensions( extensions +#if !defined( NDEBUG ) + , + vk::enumerateInstanceExtensionProperties() +#endif + ); + + vk::Instance instance = + vk::createInstance( makeInstanceCreateInfoChain( applicationInfo, enabledLayers, enabledExtensions ).get() ); + +#if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) + // initialize function pointers for instance + VULKAN_HPP_DEFAULT_DISPATCHER.init( instance ); +#endif + + return instance; + } + + vk::RenderPass createRenderPass( + vk::Device const & device, vk::Format colorFormat, vk::Format depthFormat, vk::AttachmentLoadOp loadOp, vk::ImageLayout colorFinalLayout ) + { + std::vector attachmentDescriptions; + assert( colorFormat != vk::Format::eUndefined ); + attachmentDescriptions.emplace_back( vk::AttachmentDescriptionFlags(), + colorFormat, + vk::SampleCountFlagBits::e1, + loadOp, + vk::AttachmentStoreOp::eStore, + vk::AttachmentLoadOp::eDontCare, + vk::AttachmentStoreOp::eDontCare, + vk::ImageLayout::eUndefined, + colorFinalLayout ); + if ( depthFormat != vk::Format::eUndefined ) + { + attachmentDescriptions.emplace_back( vk::AttachmentDescriptionFlags(), + depthFormat, + vk::SampleCountFlagBits::e1, + loadOp, + vk::AttachmentStoreOp::eDontCare, + vk::AttachmentLoadOp::eDontCare, + vk::AttachmentStoreOp::eDontCare, + vk::ImageLayout::eUndefined, + vk::ImageLayout::eDepthStencilAttachmentOptimal ); + } + vk::AttachmentReference colorAttachment( 0, vk::ImageLayout::eColorAttachmentOptimal ); + vk::AttachmentReference depthAttachment( 1, vk::ImageLayout::eDepthStencilAttachmentOptimal ); + vk::SubpassDescription subpassDescription( vk::SubpassDescriptionFlags(), + vk::PipelineBindPoint::eGraphics, + {}, + colorAttachment, + {}, + ( depthFormat != vk::Format::eUndefined ) ? &depthAttachment : nullptr ); + return device.createRenderPass( vk::RenderPassCreateInfo( vk::RenderPassCreateFlags(), attachmentDescriptions, subpassDescription ) ); + } + + VKAPI_ATTR VkBool32 VKAPI_CALL debugUtilsMessengerCallback( VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, + VkDebugUtilsMessageTypeFlagsEXT messageTypes, + VkDebugUtilsMessengerCallbackDataEXT const * pCallbackData, + void * /*pUserData*/ ) + { +#if !defined( NDEBUG ) + if ( static_cast( pCallbackData->messageIdNumber ) == 0x822806fa ) + { + // Validation Warning: vkCreateInstance(): to enable extension VK_EXT_debug_utils, but this extension is intended to support use by applications when + // debugging and it is strongly recommended that it be otherwise avoided. + return vk::False; + } + else if ( static_cast( pCallbackData->messageIdNumber ) == 0xe8d1a9fe ) + { + // Validation Performance Warning: Using debug builds of the validation layers *will* adversely affect performance. + return vk::False; + } +#endif + + std::cerr << vk::to_string( static_cast( messageSeverity ) ) << ": " + << vk::to_string( static_cast( messageTypes ) ) << ":\n"; + std::cerr << std::string( "\t" ) << "messageIDName = <" << pCallbackData->pMessageIdName << ">\n"; + std::cerr << std::string( "\t" ) << "messageIdNumber = " << pCallbackData->messageIdNumber << "\n"; + std::cerr << std::string( "\t" ) << "message = <" << pCallbackData->pMessage << ">\n"; + if ( 0 < pCallbackData->queueLabelCount ) + { + std::cerr << std::string( "\t" ) << "Queue Labels:\n"; + for ( uint32_t i = 0; i < pCallbackData->queueLabelCount; i++ ) + { + std::cerr << std::string( "\t\t" ) << "labelName = <" << pCallbackData->pQueueLabels[i].pLabelName << ">\n"; + } + } + if ( 0 < pCallbackData->cmdBufLabelCount ) + { + std::cerr << std::string( "\t" ) << "CommandBuffer Labels:\n"; + for ( uint32_t i = 0; i < pCallbackData->cmdBufLabelCount; i++ ) + { + std::cerr << std::string( "\t\t" ) << "labelName = <" << pCallbackData->pCmdBufLabels[i].pLabelName << ">\n"; + } + } + if ( 0 < pCallbackData->objectCount ) + { + std::cerr << std::string( "\t" ) << "Objects:\n"; + for ( uint32_t i = 0; i < pCallbackData->objectCount; i++ ) + { + std::cerr << std::string( "\t\t" ) << "Object " << i << "\n"; + std::cerr << std::string( "\t\t\t" ) << "objectType = " << vk::to_string( static_cast( pCallbackData->pObjects[i].objectType ) ) + << "\n"; + std::cerr << std::string( "\t\t\t" ) << "objectHandle = " << pCallbackData->pObjects[i].objectHandle << "\n"; + if ( pCallbackData->pObjects[i].pObjectName ) + { + std::cerr << std::string( "\t\t\t" ) << "objectName = <" << pCallbackData->pObjects[i].pObjectName << ">\n"; + } + } + } + return vk::False; + } + + uint32_t findGraphicsQueueFamilyIndex( std::vector const & queueFamilyProperties ) + { + // get the first index into queueFamiliyProperties which supports graphics + std::vector::const_iterator graphicsQueueFamilyProperty = + std::find_if( queueFamilyProperties.begin(), + queueFamilyProperties.end(), + []( vk::QueueFamilyProperties const & qfp ) { return qfp.queueFlags & vk::QueueFlagBits::eGraphics; } ); + assert( graphicsQueueFamilyProperty != queueFamilyProperties.end() ); + return static_cast( std::distance( queueFamilyProperties.begin(), graphicsQueueFamilyProperty ) ); + } + + std::pair findGraphicsAndPresentQueueFamilyIndex( vk::PhysicalDevice physicalDevice, vk::SurfaceKHR const & surface ) + { + std::vector queueFamilyProperties = physicalDevice.getQueueFamilyProperties(); + assert( queueFamilyProperties.size() < std::numeric_limits::max() ); + + uint32_t graphicsQueueFamilyIndex = findGraphicsQueueFamilyIndex( queueFamilyProperties ); + if ( physicalDevice.getSurfaceSupportKHR( graphicsQueueFamilyIndex, surface ) ) + { + return std::make_pair( graphicsQueueFamilyIndex, + graphicsQueueFamilyIndex ); // the first graphicsQueueFamilyIndex does also support presents + } + + // the graphicsQueueFamilyIndex doesn't support present -> look for an other family index that supports both + // graphics and present + for ( size_t i = 0; i < queueFamilyProperties.size(); i++ ) + { + if ( ( queueFamilyProperties[i].queueFlags & vk::QueueFlagBits::eGraphics ) && + physicalDevice.getSurfaceSupportKHR( static_cast( i ), surface ) ) + { + return std::make_pair( static_cast( i ), static_cast( i ) ); + } + } + + // there's nothing like a single family index that supports both graphics and present -> look for an other family + // index that supports present + for ( size_t i = 0; i < queueFamilyProperties.size(); i++ ) + { + if ( physicalDevice.getSurfaceSupportKHR( static_cast( i ), surface ) ) + { + return std::make_pair( graphicsQueueFamilyIndex, static_cast( i ) ); + } + } + + throw std::runtime_error( "Could not find queues for both graphics or present -> terminating" ); + } + + uint32_t findMemoryType( vk::PhysicalDeviceMemoryProperties const & memoryProperties, uint32_t typeBits, vk::MemoryPropertyFlags requirementsMask ) + { + uint32_t typeIndex = uint32_t( ~0 ); + for ( uint32_t i = 0; i < memoryProperties.memoryTypeCount; i++ ) + { + if ( ( typeBits & 1 ) && ( ( memoryProperties.memoryTypes[i].propertyFlags & requirementsMask ) == requirementsMask ) ) + { + typeIndex = i; + break; + } + typeBits >>= 1; + } + assert( typeIndex != uint32_t( ~0 ) ); + return typeIndex; + } + + std::vector getDeviceExtensions() + { + return { VK_KHR_SWAPCHAIN_EXTENSION_NAME }; + } + + std::vector getInstanceExtensions() + { + std::vector extensions; + extensions.push_back( VK_KHR_SURFACE_EXTENSION_NAME ); +#if defined( VK_USE_PLATFORM_ANDROID_KHR ) + extensions.push_back( VK_KHR_ANDROID_SURFACE_EXTENSION_NAME ); +#elif defined( VK_USE_PLATFORM_IOS_MVK ) + extensions.push_back( VK_MVK_IOS_SURFACE_EXTENSION_NAME ); +#elif defined( VK_USE_PLATFORM_MACOS_MVK ) + extensions.push_back( VK_MVK_MACOS_SURFACE_EXTENSION_NAME ); +#elif defined( VK_USE_PLATFORM_MIR_KHR ) + extensions.push_back( VK_KHR_MIR_SURFACE_EXTENSION_NAME ); +#elif defined( VK_USE_PLATFORM_VI_NN ) + extensions.push_back( VK_NN_VI_SURFACE_EXTENSION_NAME ); +#elif defined( VK_USE_PLATFORM_WAYLAND_KHR ) + extensions.push_back( VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME ); +#elif defined( VK_USE_PLATFORM_WIN32_KHR ) + extensions.push_back( VK_KHR_WIN32_SURFACE_EXTENSION_NAME ); +#elif defined( VK_USE_PLATFORM_XCB_KHR ) + extensions.push_back( VK_KHR_XCB_SURFACE_EXTENSION_NAME ); +#elif defined( VK_USE_PLATFORM_XLIB_KHR ) + extensions.push_back( VK_KHR_XLIB_SURFACE_EXTENSION_NAME ); +#elif defined( VK_USE_PLATFORM_XLIB_XRANDR_EXT ) + extensions.push_back( VK_EXT_ACQUIRE_XLIB_DISPLAY_EXTENSION_NAME ); +#endif + return extensions; + } + + vk::Format pickDepthFormat( vk::PhysicalDevice const & physicalDevice ) + { + std::vector candidates = { vk::Format::eD32Sfloat, vk::Format::eD32SfloatS8Uint, vk::Format::eD24UnormS8Uint }; + for ( vk::Format format : candidates ) + { + vk::FormatProperties props = physicalDevice.getFormatProperties( format ); + + if ( props.optimalTilingFeatures & vk::FormatFeatureFlagBits::eDepthStencilAttachment ) + { + return format; + } + } + throw std::runtime_error( "failed to find supported format!" ); + } + + vk::PresentModeKHR pickPresentMode( std::vector const & presentModes ) + { + vk::PresentModeKHR pickedMode = vk::PresentModeKHR::eFifo; + for ( const auto & presentMode : presentModes ) + { + if ( presentMode == vk::PresentModeKHR::eMailbox ) + { + pickedMode = presentMode; + break; + } + + if ( presentMode == vk::PresentModeKHR::eImmediate ) + { + pickedMode = presentMode; + } + } + return pickedMode; + } + + vk::SurfaceFormatKHR pickSurfaceFormat( std::vector const & formats ) + { + assert( !formats.empty() ); + vk::SurfaceFormatKHR pickedFormat = formats[0]; + if ( formats.size() == 1 ) + { + if ( formats[0].format == vk::Format::eUndefined ) + { + pickedFormat.format = vk::Format::eB8G8R8A8Unorm; + pickedFormat.colorSpace = vk::ColorSpaceKHR::eSrgbNonlinear; + } + } + else + { + // request several formats, the first found will be used + vk::Format requestedFormats[] = { vk::Format::eB8G8R8A8Unorm, vk::Format::eR8G8B8A8Unorm, vk::Format::eB8G8R8Unorm, vk::Format::eR8G8B8Unorm }; + vk::ColorSpaceKHR requestedColorSpace = vk::ColorSpaceKHR::eSrgbNonlinear; + for ( size_t i = 0; i < sizeof( requestedFormats ) / sizeof( requestedFormats[0] ); i++ ) + { + vk::Format requestedFormat = requestedFormats[i]; + auto it = std::find_if( formats.begin(), + formats.end(), + [requestedFormat, requestedColorSpace]( vk::SurfaceFormatKHR const & f ) + { return ( f.format == requestedFormat ) && ( f.colorSpace == requestedColorSpace ); } ); + if ( it != formats.end() ) + { + pickedFormat = *it; + break; + } + } + } + assert( pickedFormat.colorSpace == vk::ColorSpaceKHR::eSrgbNonlinear ); + return pickedFormat; + } + + void setImageLayout( + vk::CommandBuffer const & commandBuffer, vk::Image image, vk::Format format, vk::ImageLayout oldImageLayout, vk::ImageLayout newImageLayout ) + { + vk::AccessFlags sourceAccessMask; + switch ( oldImageLayout ) + { + case vk::ImageLayout::eTransferDstOptimal: sourceAccessMask = vk::AccessFlagBits::eTransferWrite; break; + case vk::ImageLayout::ePreinitialized: sourceAccessMask = vk::AccessFlagBits::eHostWrite; break; + case vk::ImageLayout::eGeneral: // sourceAccessMask is empty + case vk::ImageLayout::eUndefined: break; + default: assert( false ); break; + } + + vk::PipelineStageFlags sourceStage; + switch ( oldImageLayout ) + { + case vk::ImageLayout::eGeneral: + case vk::ImageLayout::ePreinitialized: sourceStage = vk::PipelineStageFlagBits::eHost; break; + case vk::ImageLayout::eTransferDstOptimal: sourceStage = vk::PipelineStageFlagBits::eTransfer; break; + case vk::ImageLayout::eUndefined: sourceStage = vk::PipelineStageFlagBits::eTopOfPipe; break; + default: assert( false ); break; + } + + vk::AccessFlags destinationAccessMask; + switch ( newImageLayout ) + { + case vk::ImageLayout::eColorAttachmentOptimal: destinationAccessMask = vk::AccessFlagBits::eColorAttachmentWrite; break; + case vk::ImageLayout::eDepthStencilAttachmentOptimal: + destinationAccessMask = vk::AccessFlagBits::eDepthStencilAttachmentRead | vk::AccessFlagBits::eDepthStencilAttachmentWrite; + break; + case vk::ImageLayout::eGeneral: // empty destinationAccessMask + case vk::ImageLayout::ePresentSrcKHR: break; + case vk::ImageLayout::eShaderReadOnlyOptimal: destinationAccessMask = vk::AccessFlagBits::eShaderRead; break; + case vk::ImageLayout::eTransferSrcOptimal: destinationAccessMask = vk::AccessFlagBits::eTransferRead; break; + case vk::ImageLayout::eTransferDstOptimal: destinationAccessMask = vk::AccessFlagBits::eTransferWrite; break; + default: assert( false ); break; + } + + vk::PipelineStageFlags destinationStage; + switch ( newImageLayout ) + { + case vk::ImageLayout::eColorAttachmentOptimal: destinationStage = vk::PipelineStageFlagBits::eColorAttachmentOutput; break; + case vk::ImageLayout::eDepthStencilAttachmentOptimal: destinationStage = vk::PipelineStageFlagBits::eEarlyFragmentTests; break; + case vk::ImageLayout::eGeneral: destinationStage = vk::PipelineStageFlagBits::eHost; break; + case vk::ImageLayout::ePresentSrcKHR: destinationStage = vk::PipelineStageFlagBits::eBottomOfPipe; break; + case vk::ImageLayout::eShaderReadOnlyOptimal: destinationStage = vk::PipelineStageFlagBits::eFragmentShader; break; + case vk::ImageLayout::eTransferDstOptimal: + case vk::ImageLayout::eTransferSrcOptimal: destinationStage = vk::PipelineStageFlagBits::eTransfer; break; + default: assert( false ); break; + } + + vk::ImageAspectFlags aspectMask; + if ( newImageLayout == vk::ImageLayout::eDepthStencilAttachmentOptimal ) + { + aspectMask = vk::ImageAspectFlagBits::eDepth; + if ( format == vk::Format::eD32SfloatS8Uint || format == vk::Format::eD24UnormS8Uint ) + { + aspectMask |= vk::ImageAspectFlagBits::eStencil; + } + } + else + { + aspectMask = vk::ImageAspectFlagBits::eColor; + } + + vk::ImageSubresourceRange imageSubresourceRange( aspectMask, 0, 1, 0, 1 ); + vk::ImageMemoryBarrier imageMemoryBarrier( sourceAccessMask, + destinationAccessMask, + oldImageLayout, + newImageLayout, + VK_QUEUE_FAMILY_IGNORED, + VK_QUEUE_FAMILY_IGNORED, + image, + imageSubresourceRange ); + return commandBuffer.pipelineBarrier( sourceStage, destinationStage, {}, nullptr, nullptr, imageMemoryBarrier ); + } + + void submitAndWait( vk::Device const & device, vk::Queue const & queue, vk::CommandBuffer const & commandBuffer ) + { + vk::Fence fence = device.createFence( vk::FenceCreateInfo() ); + queue.submit( vk::SubmitInfo( 0, nullptr, nullptr, 1, &commandBuffer ), fence ); + while ( vk::Result::eTimeout == device.waitForFences( fence, VK_TRUE, vk::su::FenceTimeout ) ) + ; + device.destroyFence( fence ); + } + + void updateDescriptorSets( vk::Device const & device, + vk::DescriptorSet const & descriptorSet, + std::vector> const & bufferData, + vk::su::TextureData const & textureData, + uint32_t bindingOffset ) + { + std::vector bufferInfos; + bufferInfos.reserve( bufferData.size() ); + + std::vector writeDescriptorSets; + writeDescriptorSets.reserve( bufferData.size() + 1 ); + uint32_t dstBinding = bindingOffset; + for ( auto const & bd : bufferData ) + { + bufferInfos.emplace_back( std::get<1>( bd ), 0, std::get<2>( bd ) ); + writeDescriptorSets.emplace_back( descriptorSet, dstBinding++, 0, 1, std::get<0>( bd ), nullptr, &bufferInfos.back(), &std::get<3>( bd ) ); + } + + vk::DescriptorImageInfo imageInfo( textureData.sampler, textureData.imageData->imageView, vk::ImageLayout::eShaderReadOnlyOptimal ); + writeDescriptorSets.emplace_back( descriptorSet, dstBinding, 0, vk::DescriptorType::eCombinedImageSampler, imageInfo, nullptr, nullptr ); + + device.updateDescriptorSets( writeDescriptorSets, nullptr ); + } + + void updateDescriptorSets( vk::Device const & device, + vk::DescriptorSet const & descriptorSet, + std::vector> const & bufferData, + std::vector const & textureData, + uint32_t bindingOffset ) + { + std::vector bufferInfos; + bufferInfos.reserve( bufferData.size() ); + + std::vector writeDescriptorSets; + writeDescriptorSets.reserve( bufferData.size() + ( textureData.empty() ? 0 : 1 ) ); + uint32_t dstBinding = bindingOffset; + for ( auto const & bd : bufferData ) + { + bufferInfos.emplace_back( std::get<1>( bd ), 0, std::get<2>( bd ) ); + writeDescriptorSets.emplace_back( descriptorSet, dstBinding++, 0, 1, std::get<0>( bd ), nullptr, &bufferInfos.back(), &std::get<3>( bd ) ); + } + + std::vector imageInfos; + if ( !textureData.empty() ) + { + imageInfos.reserve( textureData.size() ); + for ( auto const & td : textureData ) + { + imageInfos.emplace_back( td.sampler, td.imageData->imageView, vk::ImageLayout::eShaderReadOnlyOptimal ); + } + writeDescriptorSets.emplace_back( descriptorSet, + dstBinding, + 0, + checked_cast( imageInfos.size() ), + vk::DescriptorType::eCombinedImageSampler, + imageInfos.data(), + nullptr, + nullptr ); + } + + device.updateDescriptorSets( writeDescriptorSets, nullptr ); + } + + BufferData::BufferData( vk::PhysicalDevice const & physicalDevice, + vk::Device const & device, + vk::DeviceSize size, + vk::BufferUsageFlags usage, + vk::MemoryPropertyFlags propertyFlags ) +#if !defined( NDEBUG ) + : m_size( size ), m_usage( usage ), m_propertyFlags( propertyFlags ) +#endif + { + buffer = device.createBuffer( vk::BufferCreateInfo( vk::BufferCreateFlags(), size, usage ) ); + deviceMemory = vk::su::allocateDeviceMemory( device, physicalDevice.getMemoryProperties(), device.getBufferMemoryRequirements( buffer ), propertyFlags ); + device.bindBufferMemory( buffer, deviceMemory, 0 ); + } + + DepthBufferData::DepthBufferData( vk::PhysicalDevice const & physicalDevice, vk::Device const & device, vk::Format format, vk::Extent2D const & extent ) + : ImageData( physicalDevice, + device, + format, + extent, + vk::ImageTiling::eOptimal, + vk::ImageUsageFlagBits::eDepthStencilAttachment, + vk::ImageLayout::eUndefined, + vk::MemoryPropertyFlagBits::eDeviceLocal, + vk::ImageAspectFlagBits::eDepth ) + { + } + + ImageData::ImageData( vk::PhysicalDevice const & physicalDevice, + vk::Device const & device, + vk::Format format_, + vk::Extent2D const & extent, + vk::ImageTiling tiling, + vk::ImageUsageFlags usage, + vk::ImageLayout initialLayout, + vk::MemoryPropertyFlags memoryProperties, + vk::ImageAspectFlags aspectMask ) + : format( format_ ) + { + vk::ImageCreateInfo imageCreateInfo( vk::ImageCreateFlags(), + vk::ImageType::e2D, + format, + vk::Extent3D( extent, 1 ), + 1, + 1, + vk::SampleCountFlagBits::e1, + tiling, + usage | vk::ImageUsageFlagBits::eSampled, + vk::SharingMode::eExclusive, + {}, + initialLayout ); + image = device.createImage( imageCreateInfo ); + + deviceMemory = vk::su::allocateDeviceMemory( device, physicalDevice.getMemoryProperties(), device.getImageMemoryRequirements( image ), memoryProperties ); + + device.bindImageMemory( image, deviceMemory, 0 ); + + vk::ImageViewCreateInfo imageViewCreateInfo( {}, image, vk::ImageViewType::e2D, format, {}, { aspectMask, 0, 1, 0, 1 } ); + imageView = device.createImageView( imageViewCreateInfo ); + } + + SurfaceData::SurfaceData( vk::Instance const & instance, std::string const & windowName, vk::Extent2D const & extent_ ) + : extent( extent_ ), window( vk::su::createWindow( windowName, extent ) ) + { + VkSurfaceKHR _surface; + VkResult err = glfwCreateWindowSurface( static_cast( instance ), window.handle, nullptr, &_surface ); + if ( err != VK_SUCCESS ) + throw std::runtime_error( "Failed to create window!" ); + surface = vk::SurfaceKHR( _surface ); + } + + SwapChainData::SwapChainData( vk::PhysicalDevice const & physicalDevice, + vk::Device const & device, + vk::SurfaceKHR const & surface, + vk::Extent2D const & extent, + vk::ImageUsageFlags usage, + vk::SwapchainKHR const & oldSwapChain, + uint32_t graphicsQueueFamilyIndex, + uint32_t presentQueueFamilyIndex ) + { + vk::SurfaceFormatKHR surfaceFormat = vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surface ) ); + colorFormat = surfaceFormat.format; + + vk::SurfaceCapabilitiesKHR surfaceCapabilities = physicalDevice.getSurfaceCapabilitiesKHR( surface ); + vk::Extent2D swapchainExtent; + if ( surfaceCapabilities.currentExtent.width == std::numeric_limits::max() ) + { + // If the surface size is undefined, the size is set to the size of the images requested. + swapchainExtent.width = clamp( extent.width, surfaceCapabilities.minImageExtent.width, surfaceCapabilities.maxImageExtent.width ); + swapchainExtent.height = clamp( extent.height, surfaceCapabilities.minImageExtent.height, surfaceCapabilities.maxImageExtent.height ); + } + else + { + // If the surface size is defined, the swap chain size must match + swapchainExtent = surfaceCapabilities.currentExtent; + } + vk::SurfaceTransformFlagBitsKHR preTransform = ( surfaceCapabilities.supportedTransforms & vk::SurfaceTransformFlagBitsKHR::eIdentity ) + ? vk::SurfaceTransformFlagBitsKHR::eIdentity + : surfaceCapabilities.currentTransform; + vk::CompositeAlphaFlagBitsKHR compositeAlpha = + ( surfaceCapabilities.supportedCompositeAlpha & vk::CompositeAlphaFlagBitsKHR::ePreMultiplied ) ? vk::CompositeAlphaFlagBitsKHR::ePreMultiplied + : ( surfaceCapabilities.supportedCompositeAlpha & vk::CompositeAlphaFlagBitsKHR::ePostMultiplied ) ? vk::CompositeAlphaFlagBitsKHR::ePostMultiplied + : ( surfaceCapabilities.supportedCompositeAlpha & vk::CompositeAlphaFlagBitsKHR::eInherit ) ? vk::CompositeAlphaFlagBitsKHR::eInherit + : vk::CompositeAlphaFlagBitsKHR::eOpaque; + vk::PresentModeKHR presentMode = vk::su::pickPresentMode( physicalDevice.getSurfacePresentModesKHR( surface ) ); + vk::SwapchainCreateInfoKHR swapChainCreateInfo( {}, + surface, + vk::su::clamp( 3u, surfaceCapabilities.minImageCount, surfaceCapabilities.maxImageCount ), + colorFormat, + surfaceFormat.colorSpace, + swapchainExtent, + 1, + usage, + vk::SharingMode::eExclusive, + {}, + preTransform, + compositeAlpha, + presentMode, + true, + oldSwapChain ); + if ( graphicsQueueFamilyIndex != presentQueueFamilyIndex ) + { + uint32_t queueFamilyIndices[2] = { graphicsQueueFamilyIndex, presentQueueFamilyIndex }; + // If the graphics and present queues are from different queue families, we either have to explicitly transfer + // ownership of images between the queues, or we have to create the swapchain with imageSharingMode as + // vk::SharingMode::eConcurrent + swapChainCreateInfo.imageSharingMode = vk::SharingMode::eConcurrent; + swapChainCreateInfo.queueFamilyIndexCount = 2; + swapChainCreateInfo.pQueueFamilyIndices = queueFamilyIndices; + } + swapChain = device.createSwapchainKHR( swapChainCreateInfo ); + + images = device.getSwapchainImagesKHR( swapChain ); + + imageViews.reserve( images.size() ); + vk::ImageViewCreateInfo imageViewCreateInfo( {}, {}, vk::ImageViewType::e2D, colorFormat, {}, { vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1 } ); + for ( auto image : images ) + { + imageViewCreateInfo.image = image; + imageViews.push_back( device.createImageView( imageViewCreateInfo ) ); + } + } + + CheckerboardImageGenerator::CheckerboardImageGenerator( std::array const & rgb0, std::array const & rgb1 ) + : m_rgb0( rgb0 ), m_rgb1( rgb1 ) + { + } + + void CheckerboardImageGenerator::operator()( void * data, vk::Extent2D & extent ) const + { + // Checkerboard of 16x16 pixel squares + uint8_t * pImageMemory = static_cast( data ); + for ( uint32_t row = 0; row < extent.height; row++ ) + { + for ( uint32_t col = 0; col < extent.width; col++ ) + { + std::array const & rgb = ( ( ( row & 0x10 ) == 0 ) ^ ( ( col & 0x10 ) == 0 ) ) ? m_rgb1 : m_rgb0; + pImageMemory[0] = rgb[0]; + pImageMemory[1] = rgb[1]; + pImageMemory[2] = rgb[2]; + pImageMemory[3] = 255; + pImageMemory += 4; + } + } + } + + MonochromeImageGenerator::MonochromeImageGenerator( std::array const & rgb ) : m_rgb( rgb ) {} + + void MonochromeImageGenerator::operator()( void * data, vk::Extent2D const & extent ) const + { + // fill in with the monochrome color + unsigned char * pImageMemory = static_cast( data ); + for ( uint32_t row = 0; row < extent.height; row++ ) + { + for ( uint32_t col = 0; col < extent.width; col++ ) + { + pImageMemory[0] = m_rgb[0]; + pImageMemory[1] = m_rgb[1]; + pImageMemory[2] = m_rgb[2]; + pImageMemory[3] = 255; + pImageMemory += 4; + } + } + } + + PixelsImageGenerator::PixelsImageGenerator( vk::Extent2D const & extent, size_t channels, unsigned char const * pixels ) + : m_extent( extent ), m_channels( channels ), m_pixels( pixels ) + { + assert( m_channels == 4 ); + } + + void PixelsImageGenerator::operator()( void * data, vk::Extent2D const & extent ) const + { + assert( extent == m_extent ); + memcpy( data, m_pixels, extent.width * extent.height * m_channels ); + } + + TextureData::TextureData( vk::PhysicalDevice const & physicalDevice, + vk::Device const & device, + vk::Extent2D const & extent_, + vk::ImageUsageFlags usageFlags, + vk::FormatFeatureFlags formatFeatureFlags, + bool anisotropyEnable, + bool forceStaging ) + : format( vk::Format::eR8G8B8A8Unorm ), extent( extent_ ) + { + vk::FormatProperties formatProperties = physicalDevice.getFormatProperties( format ); + + formatFeatureFlags |= vk::FormatFeatureFlagBits::eSampledImage; + needsStaging = forceStaging || ( ( formatProperties.linearTilingFeatures & formatFeatureFlags ) != formatFeatureFlags ); + vk::ImageTiling imageTiling; + vk::ImageLayout initialLayout; + vk::MemoryPropertyFlags requirements; + if ( needsStaging ) + { + assert( ( formatProperties.optimalTilingFeatures & formatFeatureFlags ) == formatFeatureFlags ); + stagingBufferData = + std::unique_ptr( new BufferData( physicalDevice, device, extent.width * extent.height * 4, vk::BufferUsageFlagBits::eTransferSrc ) ); + imageTiling = vk::ImageTiling::eOptimal; + usageFlags |= vk::ImageUsageFlagBits::eTransferDst; + initialLayout = vk::ImageLayout::eUndefined; + } + else + { + imageTiling = vk::ImageTiling::eLinear; + initialLayout = vk::ImageLayout::ePreinitialized; + requirements = vk::MemoryPropertyFlagBits::eHostCoherent | vk::MemoryPropertyFlagBits::eHostVisible; + } + imageData = std::unique_ptr( new ImageData( physicalDevice, + device, + format, + extent, + imageTiling, + usageFlags | vk::ImageUsageFlagBits::eSampled, + initialLayout, + requirements, + vk::ImageAspectFlagBits::eColor ) ); + + sampler = device.createSampler( vk::SamplerCreateInfo( vk::SamplerCreateFlags(), + vk::Filter::eLinear, + vk::Filter::eLinear, + vk::SamplerMipmapMode::eLinear, + vk::SamplerAddressMode::eRepeat, + vk::SamplerAddressMode::eRepeat, + vk::SamplerAddressMode::eRepeat, + 0.0f, + anisotropyEnable, + 16.0f, + false, + vk::CompareOp::eNever, + 0.0f, + 0.0f, + vk::BorderColor::eFloatOpaqueBlack ) ); + } + + UUID::UUID( uint8_t const data[VK_UUID_SIZE] ) + { + memcpy( m_data, data, VK_UUID_SIZE * sizeof( uint8_t ) ); + } + + WindowData::WindowData( GLFWwindow * wnd, std::string const & name, vk::Extent2D const & extent ) : handle{ wnd }, name{ name }, extent{ extent } {} + + WindowData::WindowData( WindowData && other ) : handle{}, name{}, extent{} + { + std::swap( handle, other.handle ); + std::swap( name, other.name ); + std::swap( extent, other.extent ); + } + + WindowData::~WindowData() noexcept + { + glfwDestroyWindow( handle ); + } + + WindowData createWindow( std::string const & windowName, vk::Extent2D const & extent ) + { + struct glfwContext + { + glfwContext() + { + glfwInit(); + glfwSetErrorCallback( + []( int error, const char * msg ) + { + std::cerr << "glfw: " + << "(" << error << ") " << msg << std::endl; + } ); + } + + ~glfwContext() + { + glfwTerminate(); + } + }; + + static auto glfwCtx = glfwContext(); + (void)glfwCtx; + + glfwWindowHint( GLFW_CLIENT_API, GLFW_NO_API ); + GLFWwindow * window = glfwCreateWindow( extent.width, extent.height, windowName.c_str(), nullptr, nullptr ); + return WindowData( window, windowName, extent ); + } + + vk::DebugUtilsMessengerCreateInfoEXT makeDebugUtilsMessengerCreateInfoEXT() + { + return { {}, + vk::DebugUtilsMessageSeverityFlagBitsEXT::eWarning | vk::DebugUtilsMessageSeverityFlagBitsEXT::eError, + vk::DebugUtilsMessageTypeFlagBitsEXT::eGeneral | vk::DebugUtilsMessageTypeFlagBitsEXT::ePerformance | + vk::DebugUtilsMessageTypeFlagBitsEXT::eValidation, + &vk::su::debugUtilsMessengerCallback }; + } + +#if defined( NDEBUG ) + vk::StructureChain +#else + vk::StructureChain +#endif + makeInstanceCreateInfoChain( vk::ApplicationInfo const & applicationInfo, + std::vector const & layers, + std::vector const & extensions ) + { +#if defined( NDEBUG ) + // in non-debug mode just use the InstanceCreateInfo for instance creation + vk::StructureChain instanceCreateInfo( { {}, &applicationInfo, layers, extensions } ); +#else + // in debug mode, addionally use the debugUtilsMessengerCallback in instance creation! + vk::DebugUtilsMessageSeverityFlagsEXT severityFlags( vk::DebugUtilsMessageSeverityFlagBitsEXT::eWarning | + vk::DebugUtilsMessageSeverityFlagBitsEXT::eError ); + vk::DebugUtilsMessageTypeFlagsEXT messageTypeFlags( vk::DebugUtilsMessageTypeFlagBitsEXT::eGeneral | vk::DebugUtilsMessageTypeFlagBitsEXT::ePerformance | + vk::DebugUtilsMessageTypeFlagBitsEXT::eValidation ); + vk::StructureChain instanceCreateInfo( + { {}, &applicationInfo, layers, extensions }, { {}, severityFlags, messageTypeFlags, &vk::su::debugUtilsMessengerCallback } ); +#endif + return instanceCreateInfo; + } + + } // namespace su +} // namespace vk + +std::ostream & operator<<( std::ostream & os, vk::su::UUID const & uuid ) +{ + os << std::setfill( '0' ) << std::hex; + for ( uint32_t j = 0; j < VK_UUID_SIZE; ++j ) + { + os << std::setw( 2 ) << static_cast( uuid.m_data[j] ); + if ( j == 3 || j == 5 || j == 7 || j == 9 ) + { + std::cout << '-'; + } + } + os << std::setfill( ' ' ) << std::dec; + return os; +} diff --git a/core/rhi/vulkan/utils/utils.hpp b/core/rhi/vulkan/utils/utils.hpp new file mode 100644 index 0000000..5a88a0e --- /dev/null +++ b/core/rhi/vulkan/utils/utils.hpp @@ -0,0 +1,439 @@ +#pragma once + +// Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include + +#define GLFW_INCLUDE_NONE +#include +#include +#include +#include +#include // std::unique_ptr + +namespace vk +{ + namespace su + { + const uint64_t FenceTimeout = 100000000; + + template + void oneTimeSubmit( vk::Device const & device, vk::CommandPool const & commandPool, vk::Queue const & queue, Func const & func ) + { + vk::CommandBuffer commandBuffer = + device.allocateCommandBuffers( vk::CommandBufferAllocateInfo( commandPool, vk::CommandBufferLevel::ePrimary, 1 ) ).front(); + commandBuffer.begin( vk::CommandBufferBeginInfo( vk::CommandBufferUsageFlagBits::eOneTimeSubmit ) ); + func( commandBuffer ); + commandBuffer.end(); + queue.submit( vk::SubmitInfo( 0, nullptr, nullptr, 1, &commandBuffer ), nullptr ); + queue.waitIdle(); + } + + template + void copyToDevice( vk::Device const & device, vk::DeviceMemory const & deviceMemory, T const * pData, size_t count, vk::DeviceSize stride = sizeof( T ) ) + { + assert( sizeof( T ) <= stride ); + uint8_t * deviceData = static_cast( device.mapMemory( deviceMemory, 0, count * stride ) ); + if ( stride == sizeof( T ) ) + { + memcpy( deviceData, pData, count * sizeof( T ) ); + } + else + { + for ( size_t i = 0; i < count; i++ ) + { + memcpy( deviceData, &pData[i], sizeof( T ) ); + deviceData += stride; + } + } + device.unmapMemory( deviceMemory ); + } + + template + void copyToDevice( vk::Device const & device, vk::DeviceMemory const & deviceMemory, T const & data ) + { + copyToDevice( device, deviceMemory, &data, 1 ); + } + + template + VULKAN_HPP_INLINE constexpr const T & clamp( const T & v, const T & lo, const T & hi ) + { + return v < lo ? lo : hi < v ? hi : v; + } + + void setImageLayout( + vk::CommandBuffer const & commandBuffer, vk::Image image, vk::Format format, vk::ImageLayout oldImageLayout, vk::ImageLayout newImageLayout ); + + struct WindowData + { + WindowData( GLFWwindow * wnd, std::string const & name, vk::Extent2D const & extent ); + WindowData( const WindowData & ) = delete; + WindowData( WindowData && other ); + ~WindowData() noexcept; + + GLFWwindow * handle; + std::string name; + vk::Extent2D extent; + }; + + WindowData createWindow( std::string const & windowName, vk::Extent2D const & extent ); + + struct BufferData + { + BufferData( vk::PhysicalDevice const & physicalDevice, + vk::Device const & device, + vk::DeviceSize size, + vk::BufferUsageFlags usage, + vk::MemoryPropertyFlags propertyFlags = vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent ); + + void clear( vk::Device const & device ) + { + device.destroyBuffer( buffer ); // to prevent some validation layer warning, the Buffer needs to be destroyed before the bound DeviceMemory + device.freeMemory( deviceMemory ); + } + + template + void upload( vk::Device const & device, DataType const & data ) const + { + assert( ( m_propertyFlags & vk::MemoryPropertyFlagBits::eHostCoherent ) && ( m_propertyFlags & vk::MemoryPropertyFlagBits::eHostVisible ) ); + assert( sizeof( DataType ) <= m_size ); + + void * dataPtr = device.mapMemory( deviceMemory, 0, sizeof( DataType ) ); + memcpy( dataPtr, &data, sizeof( DataType ) ); + device.unmapMemory( deviceMemory ); + } + + template + void upload( vk::Device const & device, std::vector const & data, size_t stride = 0 ) const + { + assert( m_propertyFlags & vk::MemoryPropertyFlagBits::eHostVisible ); + + size_t elementSize = stride ? stride : sizeof( DataType ); + assert( sizeof( DataType ) <= elementSize ); + + copyToDevice( device, deviceMemory, data.data(), data.size(), elementSize ); + } + + template + void upload( vk::PhysicalDevice const & physicalDevice, + vk::Device const & device, + vk::CommandPool const & commandPool, + vk::Queue queue, + std::vector const & data, + size_t stride ) const + { + assert( m_usage & vk::BufferUsageFlagBits::eTransferDst ); + assert( m_propertyFlags & vk::MemoryPropertyFlagBits::eDeviceLocal ); + + size_t elementSize = stride ? stride : sizeof( DataType ); + assert( sizeof( DataType ) <= elementSize ); + + size_t dataSize = data.size() * elementSize; + assert( dataSize <= m_size ); + + vk::su::BufferData stagingBuffer( physicalDevice, device, dataSize, vk::BufferUsageFlagBits::eTransferSrc ); + copyToDevice( device, stagingBuffer.deviceMemory, data.data(), data.size(), elementSize ); + + vk::su::oneTimeSubmit( device, + commandPool, + queue, + [&]( vk::CommandBuffer const & commandBuffer ) + { commandBuffer.copyBuffer( stagingBuffer.buffer, buffer, vk::BufferCopy( 0, 0, dataSize ) ); } ); + + stagingBuffer.clear( device ); + } + + vk::Buffer buffer; + vk::DeviceMemory deviceMemory; +#if !defined( NDEBUG ) + private: + vk::DeviceSize m_size; + vk::BufferUsageFlags m_usage; + vk::MemoryPropertyFlags m_propertyFlags; +#endif + }; + + struct ImageData + { + ImageData( vk::PhysicalDevice const & physicalDevice, + vk::Device const & device, + vk::Format format, + vk::Extent2D const & extent, + vk::ImageTiling tiling, + vk::ImageUsageFlags usage, + vk::ImageLayout initialLayout, + vk::MemoryPropertyFlags memoryProperties, + vk::ImageAspectFlags aspectMask ); + + void clear( vk::Device const & device ) + { + device.destroyImageView( imageView ); + device.destroyImage( image ); // the Image should to be destroyed before the bound DeviceMemory is freed + device.freeMemory( deviceMemory ); + } + + vk::Format format; + vk::Image image; + vk::DeviceMemory deviceMemory; + vk::ImageView imageView; + }; + + struct DepthBufferData : public ImageData + { + DepthBufferData( vk::PhysicalDevice const & physicalDevice, vk::Device const & device, vk::Format format, vk::Extent2D const & extent ); + }; + + struct SurfaceData + { + SurfaceData( vk::Instance const & instance, std::string const & windowName, vk::Extent2D const & extent ); + + vk::Extent2D extent; + WindowData window; + vk::SurfaceKHR surface; + }; + + struct SwapChainData + { + SwapChainData( vk::PhysicalDevice const & physicalDevice, + vk::Device const & device, + vk::SurfaceKHR const & surface, + vk::Extent2D const & extent, + vk::ImageUsageFlags usage, + vk::SwapchainKHR const & oldSwapChain, + uint32_t graphicsFamilyIndex, + uint32_t presentFamilyIndex ); + + void clear( vk::Device const & device ) + { + for ( auto & imageView : imageViews ) + { + device.destroyImageView( imageView ); + } + imageViews.clear(); + images.clear(); + device.destroySwapchainKHR( swapChain ); + } + + vk::Format colorFormat; + vk::SwapchainKHR swapChain; + std::vector images; + std::vector imageViews; + }; + + class CheckerboardImageGenerator + { + public: + CheckerboardImageGenerator( std::array const & rgb0 = { { 0, 0, 0 } }, std::array const & rgb1 = { { 255, 255, 255 } } ); + + void operator()( void * data, vk::Extent2D & extent ) const; + + private: + std::array const & m_rgb0; + std::array const & m_rgb1; + }; + + class MonochromeImageGenerator + { + public: + MonochromeImageGenerator( std::array const & rgb ); + + void operator()( void * data, vk::Extent2D const & extent ) const; + + private: + std::array const & m_rgb; + }; + + class PixelsImageGenerator + { + public: + PixelsImageGenerator( vk::Extent2D const & extent, size_t channels, unsigned char const * pixels ); + + void operator()( void * data, vk::Extent2D const & extent ) const; + + private: + vk::Extent2D m_extent; + size_t m_channels; + unsigned char const * m_pixels; + }; + + struct TextureData + { + TextureData( vk::PhysicalDevice const & physicalDevice, + vk::Device const & device, + vk::Extent2D const & extent_ = { 256, 256 }, + vk::ImageUsageFlags usageFlags = {}, + vk::FormatFeatureFlags formatFeatureFlags = {}, + bool anisotropyEnable = false, + bool forceStaging = false ); + + void clear( vk::Device const & device ) + { + if ( stagingBufferData ) + { + stagingBufferData->clear( device ); + } + imageData->clear( device ); + device.destroySampler( sampler ); + } + + template + void setImage( vk::Device const & device, vk::CommandBuffer const & commandBuffer, ImageGenerator const & imageGenerator ) + { + void * data = needsStaging + ? device.mapMemory( stagingBufferData->deviceMemory, 0, device.getBufferMemoryRequirements( stagingBufferData->buffer ).size ) + : device.mapMemory( imageData->deviceMemory, 0, device.getImageMemoryRequirements( imageData->image ).size ); + imageGenerator( data, extent ); + device.unmapMemory( needsStaging ? stagingBufferData->deviceMemory : imageData->deviceMemory ); + + if ( needsStaging ) + { + // Since we're going to blit to the texture image, set its layout to eTransferDstOptimal + vk::su::setImageLayout( commandBuffer, imageData->image, imageData->format, vk::ImageLayout::eUndefined, vk::ImageLayout::eTransferDstOptimal ); + vk::BufferImageCopy copyRegion( 0, + extent.width, + extent.height, + vk::ImageSubresourceLayers( vk::ImageAspectFlagBits::eColor, 0, 0, 1 ), + vk::Offset3D( 0, 0, 0 ), + vk::Extent3D( extent, 1 ) ); + commandBuffer.copyBufferToImage( stagingBufferData->buffer, imageData->image, vk::ImageLayout::eTransferDstOptimal, copyRegion ); + // Set the layout for the texture image from eTransferDstOptimal to SHADER_READ_ONLY + vk::su::setImageLayout( + commandBuffer, imageData->image, imageData->format, vk::ImageLayout::eTransferDstOptimal, vk::ImageLayout::eShaderReadOnlyOptimal ); + } + else + { + // If we can use the linear tiled image as a texture, just do it + vk::su::setImageLayout( + commandBuffer, imageData->image, imageData->format, vk::ImageLayout::ePreinitialized, vk::ImageLayout::eShaderReadOnlyOptimal ); + } + } + + vk::Format format; + vk::Extent2D extent; + bool needsStaging; + std::unique_ptr stagingBufferData; + std::unique_ptr imageData; + vk::Sampler sampler; + }; + + struct UUID + { + public: + UUID( uint8_t const data[VK_UUID_SIZE] ); + + uint8_t m_data[VK_UUID_SIZE]; + }; + + template + VULKAN_HPP_INLINE TargetType checked_cast( SourceType value ) + { + static_assert( sizeof( TargetType ) <= sizeof( SourceType ), "No need to cast from smaller to larger type!" ); + static_assert( std::numeric_limits::is_integer, "Only integer types supported!" ); + static_assert( !std::numeric_limits::is_signed, "Only unsigned types supported!" ); + static_assert( std::numeric_limits::is_integer, "Only integer types supported!" ); + static_assert( !std::numeric_limits::is_signed, "Only unsigned types supported!" ); + assert( value <= std::numeric_limits::max() ); + return static_cast( value ); + } + + vk::DeviceMemory allocateDeviceMemory( vk::Device const & device, + vk::PhysicalDeviceMemoryProperties const & memoryProperties, + vk::MemoryRequirements const & memoryRequirements, + vk::MemoryPropertyFlags memoryPropertyFlags ); + bool contains( std::vector const & extensionProperties, std::string const & extensionName ); + vk::DescriptorPool createDescriptorPool( vk::Device const & device, std::vector const & poolSizes ); + vk::DescriptorSetLayout createDescriptorSetLayout( vk::Device const & device, + std::vector> const & bindingData, + vk::DescriptorSetLayoutCreateFlags flags = {} ); + vk::Device createDevice( vk::PhysicalDevice const & physicalDevice, + uint32_t queueFamilyIndex, + std::vector const & extensions = {}, + vk::PhysicalDeviceFeatures const * physicalDeviceFeatures = nullptr, + void const * pNext = nullptr ); + std::vector createFramebuffers( vk::Device const & device, + vk::RenderPass & renderPass, + std::vector const & imageViews, + vk::ImageView const & depthImageView, + vk::Extent2D const & extent ); + vk::Pipeline createGraphicsPipeline( vk::Device const & device, + vk::PipelineCache const & pipelineCache, + std::pair const & vertexShaderData, + std::pair const & fragmentShaderData, + uint32_t vertexStride, + std::vector> const & vertexInputAttributeFormatOffset, + vk::FrontFace frontFace, + bool depthBuffered, + vk::PipelineLayout const & pipelineLayout, + vk::RenderPass const & renderPass ); + vk::Instance createInstance( std::string const & appName, + std::string const & engineName, + std::vector const & layers = {}, + std::vector const & extensions = {}, + uint32_t apiVersion = VK_API_VERSION_1_0 ); + vk::RenderPass createRenderPass( vk::Device const & device, + vk::Format colorFormat, + vk::Format depthFormat, + vk::AttachmentLoadOp loadOp = vk::AttachmentLoadOp::eClear, + vk::ImageLayout colorFinalLayout = vk::ImageLayout::ePresentSrcKHR ); + VKAPI_ATTR VkBool32 VKAPI_CALL debugUtilsMessengerCallback( VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, + VkDebugUtilsMessageTypeFlagsEXT messageTypes, + VkDebugUtilsMessengerCallbackDataEXT const * pCallbackData, + void * /*pUserData*/ ); + uint32_t findGraphicsQueueFamilyIndex( std::vector const & queueFamilyProperties ); + std::pair findGraphicsAndPresentQueueFamilyIndex( vk::PhysicalDevice physicalDevice, vk::SurfaceKHR const & surface ); + uint32_t findMemoryType( vk::PhysicalDeviceMemoryProperties const & memoryProperties, uint32_t typeBits, vk::MemoryPropertyFlags requirementsMask ); + std::vector gatherExtensions( std::vector const & extensions +#if !defined( NDEBUG ) + , + std::vector const & extensionProperties +#endif + ); + std::vector gatherLayers( std::vector const & layers +#if !defined( NDEBUG ) + , + std::vector const & layerProperties +#endif + ); + std::vector getDeviceExtensions(); + std::vector getInstanceExtensions(); + vk::DebugUtilsMessengerCreateInfoEXT makeDebugUtilsMessengerCreateInfoEXT(); +#if defined( NDEBUG ) + vk::StructureChain +#else + vk::StructureChain +#endif + makeInstanceCreateInfoChain( vk::ApplicationInfo const & applicationInfo, + std::vector const & layers, + std::vector const & extensions ); + vk::Format pickDepthFormat( vk::PhysicalDevice const & physicalDevice ); + vk::PresentModeKHR pickPresentMode( std::vector const & presentModes ); + vk::SurfaceFormatKHR pickSurfaceFormat( std::vector const & formats ); + void submitAndWait( vk::Device const & device, vk::Queue const & queue, vk::CommandBuffer const & commandBuffer ); + void updateDescriptorSets( vk::Device const & device, + vk::DescriptorSet const & descriptorSet, + std::vector> const & bufferData, + vk::su::TextureData const & textureData, + uint32_t bindingOffset = 0 ); + void updateDescriptorSets( vk::Device const & device, + vk::DescriptorSet const & descriptorSet, + std::vector> const & bufferData, + std::vector const & textureData, + uint32_t bindingOffset = 0 ); + + } // namespace su +} // namespace vk + +std::ostream & operator<<( std::ostream & os, vk::su::UUID const & uuid ); diff --git a/third_party/imgui/CMakeLists.txt b/third_party/imgui/CMakeLists.txt index 3934ea3..20a4861 100644 --- a/third_party/imgui/CMakeLists.txt +++ b/third_party/imgui/CMakeLists.txt @@ -17,35 +17,21 @@ add_library(${PROJECT_NAME} STATIC imgui/imgui_tables.cpp ) -if (WIN32) - target_sources(${PROJECT_NAME} PRIVATE - imgui/backends/imgui_impl_glfw.cpp - imgui/backends/imgui_impl_glfw.h - imgui/backends/imgui_impl_opengl3.cpp - imgui/backends/imgui_impl_opengl3.h - ) - target_link_libraries(${PROJECT_NAME} PUBLIC glfw glad) -elseif(UNIX AND NOT APPLE) - target_sources(${PROJECT_NAME} PRIVATE - imgui/backends/imgui_impl_glfw.cpp - imgui/backends/imgui_impl_glfw.h - imgui/backends/imgui_impl_opengl3.cpp - imgui/backends/imgui_impl_opengl3.h - ) - target_link_libraries(${PROJECT_NAME} PUBLIC glfw glad) -elseif(APPLE) - target_sources(${PROJECT_NAME} PRIVATE - imgui/backends/imgui_impl_glfw.cpp - imgui/backends/imgui_impl_glfw.h - imgui/backends/imgui_impl_opengl3.cpp - imgui/backends/imgui_impl_opengl3.h - ) - target_link_libraries(${PROJECT_NAME} PUBLIC glfw glad) -endif() +find_package(Vulkan REQUIRED) + +target_sources(${PROJECT_NAME} PRIVATE + imgui/backends/imgui_impl_glfw.cpp + imgui/backends/imgui_impl_glfw.h + imgui/backends/imgui_impl_opengl3.cpp + imgui/backends/imgui_impl_opengl3.h + imgui/backends/imgui_impl_vulkan.cpp + imgui/backends/imgui_impl_vulkan.h +) +target_link_libraries(${PROJECT_NAME} PUBLIC glfw glad ${Vulkan_LIBRARIES}) target_include_directories(${PROJECT_NAME} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/imgui) target_include_directories(${PROJECT_NAME} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/imgui/backends) -target_include_directories(${PROJECT_NAME} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR} glfw glad) +target_include_directories(${PROJECT_NAME} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR} glfw glad ${Vulkan_INCLUDE_DIRS}) target_precompile_headers(${PROJECT_NAME} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/pch.h)