vulkan rhi

This commit is contained in:
daiqingshuang 2024-02-19 16:23:19 +08:00
parent 6dc432b283
commit 4d8b91ca98
8 changed files with 2012 additions and 27 deletions

View File

@ -8,6 +8,7 @@
#include "filesystem/stb_image.h"
#include "rhi/texture.h"
#include "rhi/opengl/renderer_opengl.h"
#include "rhi/vulkan/renderer_vulkan.h"
#include "spdlog/async.h"
#include "spdlog/spdlog.h"
#include "spdlog/sinks/basic_file_sink.h"
@ -31,7 +32,7 @@ void application::init(window_params in_window_params, int argc, char** argv) {
init_glfw();
init_imgui();
renderer_ = new renderer_opengl();
renderer_ = new renderer_vulkan();
renderer_->pre_init();

View File

@ -0,0 +1,393 @@
#include "renderer_vulkan.h"
#include "imgui_impl_glfw.h"
#include "utils/utils.hpp"
extern GLFWAPI VkResult glfwCreateWindowSurface(VkInstance instance, GLFWwindow* window, const VkAllocationCallbacks* allocator, VkSurfaceKHR* surface);
static void check_vk_result(vk::Result err) {
if (err == vk::Result::eSuccess)
return;
spdlog::error("[vulkan] Error: VkResult = {}", vk::to_string(err));
abort();
}
static void check_vk_result(VkResult err) {
if (err == VK_SUCCESS)
return;
if (err < 0) {
spdlog::error("[vulkan] Error: VkResult = {}", err);
abort();
}
}
static bool is_extension_available(const std::vector<vk::ExtensionProperties>& properties, const char* extension) {
return std::ranges::any_of(properties, [extension](const vk::ExtensionProperties& p) {
return strcmp(p.extensionName, extension) == 0;
});
}
vk::PhysicalDevice renderer_vulkan::setup_vulkan_select_physical_device() const {
const std::vector<vk::PhysicalDevice> gpus = instance.enumeratePhysicalDevices();
IM_ASSERT(!gpus.empty());
// If a number >1 of GPUs got reported, find discrete GPU if present, or use first one available. This covers
// most common cases (multi-gpu/integrated+dedicated graphics). Handling more complicated setups (multiple
// dedicated GPUs) is out of scope of this sample.
for (auto& device: gpus) {
auto properties = device.getProperties();
if (properties.deviceType == vk::PhysicalDeviceType::eDiscreteGpu)
return device;
}
// Use first GPU (Integrated) is a Discrete one is not available.
if (!gpus.empty())
return gpus[0];
return VK_NULL_HANDLE;
}
void renderer_vulkan::setup_vulkan(ImVector<const char*> instance_extensions) {
// Create Vulkan Instance
{
VkInstanceCreateInfo create_info = {};
create_info.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO;
// Enumerate available extensions
auto properties = vk::enumerateInstanceExtensionProperties();
// Enable required extensions
if (is_extension_available(properties, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME))
instance_extensions.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
#ifdef VK_KHR_PORTABILITY_ENUMERATION_EXTENSION_NAME
if (is_extension_available(properties, VK_KHR_PORTABILITY_ENUMERATION_EXTENSION_NAME)) {
instance_extensions.push_back(VK_KHR_PORTABILITY_ENUMERATION_EXTENSION_NAME);
create_info.flags |= VK_INSTANCE_CREATE_ENUMERATE_PORTABILITY_BIT_KHR;
}
#endif
// Create Vulkan Instance
create_info.enabledExtensionCount = static_cast<uint32_t>(instance_extensions.Size);
create_info.ppEnabledExtensionNames = instance_extensions.Data;
instance = vk::createInstance(create_info, allocator);
}
// Select Physical Device (GPU)
physical_device = setup_vulkan_select_physical_device();
// Select graphics queue family
{
auto queues = physical_device.getQueueFamilyProperties();
for (uint32_t i = 0; i < queues.size(); i++) {
if (queues[i].queueFlags & vk::QueueFlagBits::eGraphics) {
queue_family = i;
break;
}
}
IM_ASSERT(queue_family != static_cast<uint32_t>(-1));
}
// Create Logical Device (with 1 queue)
{
std::vector<std::string> device_extensions;
device_extensions.emplace_back("VK_KHR_swapchain");
// Enumerate physical device extension
auto properties = physical_device.enumerateDeviceExtensionProperties();
#ifdef VK_KHR_PORTABILITY_SUBSET_EXTENSION_NAME
if (is_extension_available(properties, VK_KHR_PORTABILITY_SUBSET_EXTENSION_NAME))
device_extensions.push_back(VK_KHR_PORTABILITY_SUBSET_EXTENSION_NAME);
#endif
device = vk::su::createDevice(physical_device, queue_family, device_extensions);
queue = device.getQueue(queue_family, 0);
}
// Create Descriptor Pool
// The example only requires a single combined image sampler descriptor for the font image and only uses one descriptor set (for that)
// If you wish to load e.g. additional textures you may need to alter pools sizes.
{
std::vector<vk::DescriptorPoolSize> pool_sizes;
pool_sizes.emplace_back(vk::DescriptorType::eCombinedImageSampler, 1);
vk::DescriptorPoolCreateInfo descriptor_pool_create_info;
descriptor_pool_create_info.setMaxSets(1);
descriptor_pool_create_info.setPoolSizeCount(pool_sizes.size());
descriptor_pool_create_info.setPoolSizes(pool_sizes);
descriptor_pool = device.createDescriptorPool(descriptor_pool_create_info);
}
}
// All the ImGui_ImplVulkanH_XXX structures/functions are optional helpers used by the demo.
// Your real engine/app may not use them.
void renderer_vulkan::setup_vulkan_window(ImGui_ImplVulkanH_Window* wd, VkSurfaceKHR surface, int width,
int height) const {
wd->Surface = surface;
// Check for WSI support
vk::Bool32 res;
const auto err = physical_device.getSurfaceSupportKHR(queue_family, wd->Surface, &res);
check_vk_result(err);
if (res != VK_TRUE) {
fprintf(stderr, "Error no WSI support on physical device 0\n");
exit(-1);
}
// Select Surface Format
constexpr VkFormat requestSurfaceImageFormat[] = {
VK_FORMAT_B8G8R8A8_UNORM, VK_FORMAT_R8G8B8A8_UNORM, VK_FORMAT_B8G8R8_UNORM, VK_FORMAT_R8G8B8_UNORM
};
constexpr VkColorSpaceKHR requestSurfaceColorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
wd->SurfaceFormat = ImGui_ImplVulkanH_SelectSurfaceFormat(physical_device, wd->Surface, requestSurfaceImageFormat,
(size_t) IM_ARRAYSIZE(requestSurfaceImageFormat),
requestSurfaceColorSpace);
// Select Present Mode
#ifdef APP_USE_UNLIMITED_FRAME_RATE
VkPresentModeKHR present_modes[] = { VK_PRESENT_MODE_MAILBOX_KHR, VK_PRESENT_MODE_IMMEDIATE_KHR, VK_PRESENT_MODE_FIFO_KHR };
#else
VkPresentModeKHR present_modes[] = {VK_PRESENT_MODE_FIFO_KHR};
#endif
wd->PresentMode = ImGui_ImplVulkanH_SelectPresentMode(physical_device, wd->Surface, &present_modes[0],
IM_ARRAYSIZE(present_modes));
//printf("[vulkan] Selected PresentMode = %d\n", wd->PresentMode);
// Create SwapChain, RenderPass, Framebuffer, etc.
IM_ASSERT(min_image_count >= 2);
ImGui_ImplVulkanH_CreateOrResizeWindow(instance, physical_device, device, wd, queue_family,
reinterpret_cast<VkAllocationCallbacks*>(allocator), width,
height, min_image_count);
}
void renderer_vulkan::cleanup_vulkan() const {
device.destroyDescriptorPool(descriptor_pool);
#ifdef APP_USE_VULKAN_DEBUG_REPORT
// Remove the debug report callback
auto vkDestroyDebugReportCallbackEXT = (PFN_vkDestroyDebugReportCallbackEXT)vkGetInstanceProcAddr(g_Instance, "vkDestroyDebugReportCallbackEXT");
vkDestroyDebugReportCallbackEXT(g_Instance, g_DebugReport, g_Allocator);
#endif // APP_USE_VULKAN_DEBUG_REPORT
device.destroy();
instance.destroy();
}
void renderer_vulkan::cleanup_vulkan_window() {
ImGui_ImplVulkanH_DestroyWindow(instance, device, &main_window_data,
reinterpret_cast<VkAllocationCallbacks*>(allocator));
}
void renderer_vulkan::frame_render(ImGui_ImplVulkanH_Window* wd, ImDrawData* draw_data) {
vk::Semaphore image_acquired_semaphore = wd->FrameSemaphores[wd->SemaphoreIndex].ImageAcquiredSemaphore;
vk::Semaphore render_complete_semaphore = wd->FrameSemaphores[wd->SemaphoreIndex].RenderCompleteSemaphore;
vk::Result err = device.acquireNextImageKHR(wd->Swapchain, UINT64_MAX, image_acquired_semaphore, VK_NULL_HANDLE,
&wd->FrameIndex);
if (err == vk::Result::eErrorOutOfDateKHR || err == vk::Result::eSuboptimalKHR) {
swap_chain_rebuild = true;
return;
}
check_vk_result(err);
ImGui_ImplVulkanH_Frame* fd = &wd->Frames[wd->FrameIndex];
const vk::CommandBuffer cmd_buf = fd->CommandBuffer;
const vk::Fence fence = fd->Fence; {
err = device.waitForFences(1, &fence, VK_TRUE, UINT64_MAX);
// wait indefinitely instead of periodically checking
check_vk_result(err);
err = device.resetFences(1, &fence);
check_vk_result(err);
} {
const vk::CommandPool command_pool = fd->CommandPool;
device.resetCommandPool(command_pool);
vk::CommandBufferBeginInfo info = {};
info.setFlags(vk::CommandBufferUsageFlagBits::eOneTimeSubmit);
cmd_buf.begin(info);
} {
const vk::Framebuffer framebuffer = fd->Framebuffer;
const vk::RenderPass render_pass = wd->RenderPass;
std::vector<vk::ClearValue> clear_values;
const auto clear_color = wd->ClearValue.color.float32;
const auto clear_depth = wd->ClearValue.depthStencil.depth;
const auto clear_stencil = wd->ClearValue.depthStencil.stencil;
vk::ClearValue clear_value;
clear_value.color = vk::ClearColorValue(std::array<float, 4>{
clear_color[0], clear_color[1], clear_color[2], clear_color[3]
});
clear_value.depthStencil = vk::ClearDepthStencilValue(clear_depth, clear_stencil);
clear_values.emplace_back((clear_value.color));
vk::RenderPassBeginInfo info;
info.setRenderPass(render_pass);
info.setFramebuffer(framebuffer);
info.renderArea.extent.width = wd->Width;
info.renderArea.extent.height = wd->Height;
info.setClearValues(clear_values);
cmd_buf.beginRenderPass(info, vk::SubpassContents::eInline);
}
// Record dear imgui primitives into command buffer
ImGui_ImplVulkan_RenderDrawData(draw_data, fd->CommandBuffer);
// Submit command buffer
vkCmdEndRenderPass(fd->CommandBuffer); {
vk::PipelineStageFlags wait_stage = vk::PipelineStageFlagBits::eColorAttachmentOutput;
vk::SubmitInfo info;
info.setWaitSemaphores(image_acquired_semaphore);
info.setWaitDstStageMask(wait_stage);
info.setCommandBuffers(cmd_buf);
info.setSignalSemaphores(render_complete_semaphore);
cmd_buf.end();
err = queue.submit(1, &info, fence);
check_vk_result(err);
}
}
void renderer_vulkan::frame_present(ImGui_ImplVulkanH_Window* wd) {
if (swap_chain_rebuild)
return;
vk::Semaphore render_complete_semaphore = wd->FrameSemaphores[wd->SemaphoreIndex].RenderCompleteSemaphore;
vk::SwapchainKHR swapchain = wd->Swapchain;
uint32_t frame_index = wd->FrameIndex;
vk::PresentInfoKHR info;
info.setWaitSemaphores(render_complete_semaphore);
info.setSwapchains(swapchain);
info.setImageIndices(frame_index);
auto err = queue.presentKHR(info);
if (err == vk::Result::eErrorOutOfDateKHR || err == vk::Result::eSuboptimalKHR) {
swap_chain_rebuild = true;
return;
}
check_vk_result(err);
wd->SemaphoreIndex = (wd->SemaphoreIndex + 1) % wd->SemaphoreCount; // Now we can use the next set of semaphores
}
void renderer_vulkan::pre_init() {
renderer::pre_init();
glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API);
}
bool renderer_vulkan::init(GLFWwindow* window_handle) {
if (has_initialized_)
return true;
if (!glfwVulkanSupported()) {
throw std::runtime_error("Vulkan not supported");
}
init_vulkan(window_handle);
has_initialized_ = true;
return true;
}
void renderer_vulkan::shutdown() {
renderer::shutdown();
ImGui_ImplGlfw_Shutdown();
ImGui_ImplVulkan_Shutdown();
}
std::shared_ptr<shader> renderer_vulkan::load_shader(const std::string& entry_name) {
return nullptr;
}
std::shared_ptr<pixel_shader_drawer> renderer_vulkan::create_pixel_shader_drawer() {
return nullptr;
}
void renderer_vulkan::new_frame(GLFWwindow* window_handle) {
// Start the Dear ImGui frame
ImGui_ImplVulkan_NewFrame();
ImGui_ImplGlfw_NewFrame();
ImGui::NewFrame();
}
void renderer_vulkan::end_frame(GLFWwindow* window_handle) {
ImGuiIO& io = ImGui::GetIO();
// Rendering
ImGui::Render();
ImDrawData* main_draw_data = ImGui::GetDrawData();
const bool main_is_minimized = (main_draw_data->DisplaySize.x <= 0.0f || main_draw_data->DisplaySize.y <= 0.0f);
main_window_data.ClearValue.color.float32[0] = clear_color.x * clear_color.w;
main_window_data.ClearValue.color.float32[1] = clear_color.y * clear_color.w;
main_window_data.ClearValue.color.float32[2] = clear_color.z * clear_color.w;
main_window_data.ClearValue.color.float32[3] = clear_color.w;
if (!main_is_minimized)
frame_render(&main_window_data, main_draw_data);
// Update and Render additional Platform Windows
if (io.ConfigFlags & ImGuiConfigFlags_ViewportsEnable)
{
ImGui::UpdatePlatformWindows();
ImGui::RenderPlatformWindowsDefault();
}
// Present Main Platform Window
if (!main_is_minimized)
frame_present(&main_window_data);
}
void renderer_vulkan::resize(int width, int height) {
}
std::shared_ptr<texture> renderer_vulkan::create_texture(const unsigned char* data, int width, int height) {
return nullptr;
}
std::shared_ptr<render_target> renderer_vulkan::create_render_target(int width, int height, texture_format format) {
return nullptr;
}
void renderer_vulkan::init_vulkan(GLFWwindow* window_handle) {
ImVector<const char*> extensions;
uint32_t extensions_count = 0;
const char** glfw_extensions = glfwGetRequiredInstanceExtensions(&extensions_count);
for (uint32_t i = 0; i < extensions_count; i++)
extensions.push_back(glfw_extensions[i]);
setup_vulkan(extensions);
// Create Window Surface
VkSurfaceKHR surface;
VkResult err = glfwCreateWindowSurface(instance, window_handle, reinterpret_cast<VkAllocationCallbacks*>(allocator), &surface);
check_vk_result(err);
// Create Framebuffers
int w, h;
glfwGetFramebufferSize(window_handle, &w, &h);
ImGui_ImplVulkanH_Window* wd = &main_window_data;
setup_vulkan_window(wd, surface, w, h);
ImGui_ImplGlfw_InitForVulkan(window_handle, true);
ImGui_ImplVulkan_InitInfo init_info = {};
init_info.Instance = instance;
init_info.PhysicalDevice = physical_device;
init_info.Device = device;
init_info.QueueFamily = queue_family;
init_info.Queue = queue;
init_info.PipelineCache = pipeline_cache;
init_info.DescriptorPool = descriptor_pool;
init_info.RenderPass = wd->RenderPass;
init_info.Subpass = 0;
init_info.MinImageCount = min_image_count;
init_info.ImageCount = wd->ImageCount;
init_info.MSAASamples = VK_SAMPLE_COUNT_1_BIT;
init_info.Allocator = reinterpret_cast<VkAllocationCallbacks*>(allocator);
init_info.CheckVkResultFn = check_vk_result;
ImGui_ImplVulkan_Init(&init_info);
}

View File

@ -0,0 +1,67 @@
//
// Created by 46944 on 2024/2/19.
//
#pragma once
#include <vulkan/vulkan.hpp>
#include "imgui_impl_vulkan.h"
#include "rhi/renderer.h"
class renderer_vulkan : public renderer {
public:
void pre_init() override;
bool init(GLFWwindow* window_handle) override;
void shutdown() override;
std::shared_ptr<shader> load_shader(const std::string& entry_name) override;
std::shared_ptr<pixel_shader_drawer> create_pixel_shader_drawer() override;
void new_frame(GLFWwindow* window_handle) override;
void end_frame(GLFWwindow* window_handle) override;
void resize(int width, int height) override;
std::shared_ptr<texture> create_texture(const unsigned char* data, int width, int height) override;
std::shared_ptr<render_target> create_render_target(int width, int height, texture_format format) override;
// Data
vk::AllocationCallbacks* allocator = nullptr;
vk::Instance instance = VK_NULL_HANDLE;
vk::PhysicalDevice physical_device = VK_NULL_HANDLE;
vk::Device device = VK_NULL_HANDLE;
uint32_t queue_family = (uint32_t)-1;
vk::Queue queue = VK_NULL_HANDLE;
vk::PipelineCache pipeline_cache = VK_NULL_HANDLE;
vk::DescriptorPool descriptor_pool = VK_NULL_HANDLE;
ImGui_ImplVulkanH_Window main_window_data;
int min_image_count = 2;
bool swap_chain_rebuild = false;
protected:
vk::PhysicalDevice setup_vulkan_select_physical_device() const;
void setup_vulkan(ImVector<const char*> instance_extensions);
void setup_vulkan_window(ImGui_ImplVulkanH_Window* wd, VkSurfaceKHR surface, int width, int height) const;
void cleanup_vulkan() const;
void cleanup_vulkan_window();
void frame_render(ImGui_ImplVulkanH_Window* wd, ImDrawData* draw_data);
void frame_present(ImGui_ImplVulkanH_Window* wd);
private:
void init_vulkan(GLFWwindow* window_handle);
bool has_initialized_ = false;
};

View File

@ -0,0 +1,8 @@
//
// Created by 46944 on 2024/2/19.
//
#include "test_vulkan.h"
test_vulkan::test_vulkan() {
}

View File

@ -0,0 +1,17 @@
//
// Created by 46944 on 2024/2/19.
//
#ifndef TEST_VULKAN_H
#define TEST_VULKAN_H
#include <vulkan/vulkan.hpp>
class CORE_API test_vulkan {
public:
test_vulkan();
};
#endif //TEST_VULKAN_H

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,439 @@
#pragma once
// Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include <vulkan/vulkan.hpp>
#define GLFW_INCLUDE_NONE
#include <GLFW/glfw3.h>
#include <iostream>
#include <limits>
#include <map>
#include <memory> // std::unique_ptr
namespace vk
{
namespace su
{
const uint64_t FenceTimeout = 100000000;
template <typename Func>
void oneTimeSubmit( vk::Device const & device, vk::CommandPool const & commandPool, vk::Queue const & queue, Func const & func )
{
vk::CommandBuffer commandBuffer =
device.allocateCommandBuffers( vk::CommandBufferAllocateInfo( commandPool, vk::CommandBufferLevel::ePrimary, 1 ) ).front();
commandBuffer.begin( vk::CommandBufferBeginInfo( vk::CommandBufferUsageFlagBits::eOneTimeSubmit ) );
func( commandBuffer );
commandBuffer.end();
queue.submit( vk::SubmitInfo( 0, nullptr, nullptr, 1, &commandBuffer ), nullptr );
queue.waitIdle();
}
template <class T>
void copyToDevice( vk::Device const & device, vk::DeviceMemory const & deviceMemory, T const * pData, size_t count, vk::DeviceSize stride = sizeof( T ) )
{
assert( sizeof( T ) <= stride );
uint8_t * deviceData = static_cast<uint8_t *>( device.mapMemory( deviceMemory, 0, count * stride ) );
if ( stride == sizeof( T ) )
{
memcpy( deviceData, pData, count * sizeof( T ) );
}
else
{
for ( size_t i = 0; i < count; i++ )
{
memcpy( deviceData, &pData[i], sizeof( T ) );
deviceData += stride;
}
}
device.unmapMemory( deviceMemory );
}
template <class T>
void copyToDevice( vk::Device const & device, vk::DeviceMemory const & deviceMemory, T const & data )
{
copyToDevice<T>( device, deviceMemory, &data, 1 );
}
template <class T>
VULKAN_HPP_INLINE constexpr const T & clamp( const T & v, const T & lo, const T & hi )
{
return v < lo ? lo : hi < v ? hi : v;
}
void setImageLayout(
vk::CommandBuffer const & commandBuffer, vk::Image image, vk::Format format, vk::ImageLayout oldImageLayout, vk::ImageLayout newImageLayout );
struct WindowData
{
WindowData( GLFWwindow * wnd, std::string const & name, vk::Extent2D const & extent );
WindowData( const WindowData & ) = delete;
WindowData( WindowData && other );
~WindowData() noexcept;
GLFWwindow * handle;
std::string name;
vk::Extent2D extent;
};
WindowData createWindow( std::string const & windowName, vk::Extent2D const & extent );
struct BufferData
{
BufferData( vk::PhysicalDevice const & physicalDevice,
vk::Device const & device,
vk::DeviceSize size,
vk::BufferUsageFlags usage,
vk::MemoryPropertyFlags propertyFlags = vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent );
void clear( vk::Device const & device )
{
device.destroyBuffer( buffer ); // to prevent some validation layer warning, the Buffer needs to be destroyed before the bound DeviceMemory
device.freeMemory( deviceMemory );
}
template <typename DataType>
void upload( vk::Device const & device, DataType const & data ) const
{
assert( ( m_propertyFlags & vk::MemoryPropertyFlagBits::eHostCoherent ) && ( m_propertyFlags & vk::MemoryPropertyFlagBits::eHostVisible ) );
assert( sizeof( DataType ) <= m_size );
void * dataPtr = device.mapMemory( deviceMemory, 0, sizeof( DataType ) );
memcpy( dataPtr, &data, sizeof( DataType ) );
device.unmapMemory( deviceMemory );
}
template <typename DataType>
void upload( vk::Device const & device, std::vector<DataType> const & data, size_t stride = 0 ) const
{
assert( m_propertyFlags & vk::MemoryPropertyFlagBits::eHostVisible );
size_t elementSize = stride ? stride : sizeof( DataType );
assert( sizeof( DataType ) <= elementSize );
copyToDevice( device, deviceMemory, data.data(), data.size(), elementSize );
}
template <typename DataType>
void upload( vk::PhysicalDevice const & physicalDevice,
vk::Device const & device,
vk::CommandPool const & commandPool,
vk::Queue queue,
std::vector<DataType> const & data,
size_t stride ) const
{
assert( m_usage & vk::BufferUsageFlagBits::eTransferDst );
assert( m_propertyFlags & vk::MemoryPropertyFlagBits::eDeviceLocal );
size_t elementSize = stride ? stride : sizeof( DataType );
assert( sizeof( DataType ) <= elementSize );
size_t dataSize = data.size() * elementSize;
assert( dataSize <= m_size );
vk::su::BufferData stagingBuffer( physicalDevice, device, dataSize, vk::BufferUsageFlagBits::eTransferSrc );
copyToDevice( device, stagingBuffer.deviceMemory, data.data(), data.size(), elementSize );
vk::su::oneTimeSubmit( device,
commandPool,
queue,
[&]( vk::CommandBuffer const & commandBuffer )
{ commandBuffer.copyBuffer( stagingBuffer.buffer, buffer, vk::BufferCopy( 0, 0, dataSize ) ); } );
stagingBuffer.clear( device );
}
vk::Buffer buffer;
vk::DeviceMemory deviceMemory;
#if !defined( NDEBUG )
private:
vk::DeviceSize m_size;
vk::BufferUsageFlags m_usage;
vk::MemoryPropertyFlags m_propertyFlags;
#endif
};
struct ImageData
{
ImageData( vk::PhysicalDevice const & physicalDevice,
vk::Device const & device,
vk::Format format,
vk::Extent2D const & extent,
vk::ImageTiling tiling,
vk::ImageUsageFlags usage,
vk::ImageLayout initialLayout,
vk::MemoryPropertyFlags memoryProperties,
vk::ImageAspectFlags aspectMask );
void clear( vk::Device const & device )
{
device.destroyImageView( imageView );
device.destroyImage( image ); // the Image should to be destroyed before the bound DeviceMemory is freed
device.freeMemory( deviceMemory );
}
vk::Format format;
vk::Image image;
vk::DeviceMemory deviceMemory;
vk::ImageView imageView;
};
struct DepthBufferData : public ImageData
{
DepthBufferData( vk::PhysicalDevice const & physicalDevice, vk::Device const & device, vk::Format format, vk::Extent2D const & extent );
};
struct SurfaceData
{
SurfaceData( vk::Instance const & instance, std::string const & windowName, vk::Extent2D const & extent );
vk::Extent2D extent;
WindowData window;
vk::SurfaceKHR surface;
};
struct SwapChainData
{
SwapChainData( vk::PhysicalDevice const & physicalDevice,
vk::Device const & device,
vk::SurfaceKHR const & surface,
vk::Extent2D const & extent,
vk::ImageUsageFlags usage,
vk::SwapchainKHR const & oldSwapChain,
uint32_t graphicsFamilyIndex,
uint32_t presentFamilyIndex );
void clear( vk::Device const & device )
{
for ( auto & imageView : imageViews )
{
device.destroyImageView( imageView );
}
imageViews.clear();
images.clear();
device.destroySwapchainKHR( swapChain );
}
vk::Format colorFormat;
vk::SwapchainKHR swapChain;
std::vector<vk::Image> images;
std::vector<vk::ImageView> imageViews;
};
class CheckerboardImageGenerator
{
public:
CheckerboardImageGenerator( std::array<uint8_t, 3> const & rgb0 = { { 0, 0, 0 } }, std::array<uint8_t, 3> const & rgb1 = { { 255, 255, 255 } } );
void operator()( void * data, vk::Extent2D & extent ) const;
private:
std::array<uint8_t, 3> const & m_rgb0;
std::array<uint8_t, 3> const & m_rgb1;
};
class MonochromeImageGenerator
{
public:
MonochromeImageGenerator( std::array<unsigned char, 3> const & rgb );
void operator()( void * data, vk::Extent2D const & extent ) const;
private:
std::array<unsigned char, 3> const & m_rgb;
};
class PixelsImageGenerator
{
public:
PixelsImageGenerator( vk::Extent2D const & extent, size_t channels, unsigned char const * pixels );
void operator()( void * data, vk::Extent2D const & extent ) const;
private:
vk::Extent2D m_extent;
size_t m_channels;
unsigned char const * m_pixels;
};
struct TextureData
{
TextureData( vk::PhysicalDevice const & physicalDevice,
vk::Device const & device,
vk::Extent2D const & extent_ = { 256, 256 },
vk::ImageUsageFlags usageFlags = {},
vk::FormatFeatureFlags formatFeatureFlags = {},
bool anisotropyEnable = false,
bool forceStaging = false );
void clear( vk::Device const & device )
{
if ( stagingBufferData )
{
stagingBufferData->clear( device );
}
imageData->clear( device );
device.destroySampler( sampler );
}
template <typename ImageGenerator>
void setImage( vk::Device const & device, vk::CommandBuffer const & commandBuffer, ImageGenerator const & imageGenerator )
{
void * data = needsStaging
? device.mapMemory( stagingBufferData->deviceMemory, 0, device.getBufferMemoryRequirements( stagingBufferData->buffer ).size )
: device.mapMemory( imageData->deviceMemory, 0, device.getImageMemoryRequirements( imageData->image ).size );
imageGenerator( data, extent );
device.unmapMemory( needsStaging ? stagingBufferData->deviceMemory : imageData->deviceMemory );
if ( needsStaging )
{
// Since we're going to blit to the texture image, set its layout to eTransferDstOptimal
vk::su::setImageLayout( commandBuffer, imageData->image, imageData->format, vk::ImageLayout::eUndefined, vk::ImageLayout::eTransferDstOptimal );
vk::BufferImageCopy copyRegion( 0,
extent.width,
extent.height,
vk::ImageSubresourceLayers( vk::ImageAspectFlagBits::eColor, 0, 0, 1 ),
vk::Offset3D( 0, 0, 0 ),
vk::Extent3D( extent, 1 ) );
commandBuffer.copyBufferToImage( stagingBufferData->buffer, imageData->image, vk::ImageLayout::eTransferDstOptimal, copyRegion );
// Set the layout for the texture image from eTransferDstOptimal to SHADER_READ_ONLY
vk::su::setImageLayout(
commandBuffer, imageData->image, imageData->format, vk::ImageLayout::eTransferDstOptimal, vk::ImageLayout::eShaderReadOnlyOptimal );
}
else
{
// If we can use the linear tiled image as a texture, just do it
vk::su::setImageLayout(
commandBuffer, imageData->image, imageData->format, vk::ImageLayout::ePreinitialized, vk::ImageLayout::eShaderReadOnlyOptimal );
}
}
vk::Format format;
vk::Extent2D extent;
bool needsStaging;
std::unique_ptr<BufferData> stagingBufferData;
std::unique_ptr<ImageData> imageData;
vk::Sampler sampler;
};
struct UUID
{
public:
UUID( uint8_t const data[VK_UUID_SIZE] );
uint8_t m_data[VK_UUID_SIZE];
};
template <typename TargetType, typename SourceType>
VULKAN_HPP_INLINE TargetType checked_cast( SourceType value )
{
static_assert( sizeof( TargetType ) <= sizeof( SourceType ), "No need to cast from smaller to larger type!" );
static_assert( std::numeric_limits<SourceType>::is_integer, "Only integer types supported!" );
static_assert( !std::numeric_limits<SourceType>::is_signed, "Only unsigned types supported!" );
static_assert( std::numeric_limits<TargetType>::is_integer, "Only integer types supported!" );
static_assert( !std::numeric_limits<TargetType>::is_signed, "Only unsigned types supported!" );
assert( value <= std::numeric_limits<TargetType>::max() );
return static_cast<TargetType>( value );
}
vk::DeviceMemory allocateDeviceMemory( vk::Device const & device,
vk::PhysicalDeviceMemoryProperties const & memoryProperties,
vk::MemoryRequirements const & memoryRequirements,
vk::MemoryPropertyFlags memoryPropertyFlags );
bool contains( std::vector<vk::ExtensionProperties> const & extensionProperties, std::string const & extensionName );
vk::DescriptorPool createDescriptorPool( vk::Device const & device, std::vector<vk::DescriptorPoolSize> const & poolSizes );
vk::DescriptorSetLayout createDescriptorSetLayout( vk::Device const & device,
std::vector<std::tuple<vk::DescriptorType, uint32_t, vk::ShaderStageFlags>> const & bindingData,
vk::DescriptorSetLayoutCreateFlags flags = {} );
vk::Device createDevice( vk::PhysicalDevice const & physicalDevice,
uint32_t queueFamilyIndex,
std::vector<std::string> const & extensions = {},
vk::PhysicalDeviceFeatures const * physicalDeviceFeatures = nullptr,
void const * pNext = nullptr );
std::vector<vk::Framebuffer> createFramebuffers( vk::Device const & device,
vk::RenderPass & renderPass,
std::vector<vk::ImageView> const & imageViews,
vk::ImageView const & depthImageView,
vk::Extent2D const & extent );
vk::Pipeline createGraphicsPipeline( vk::Device const & device,
vk::PipelineCache const & pipelineCache,
std::pair<vk::ShaderModule, vk::SpecializationInfo const *> const & vertexShaderData,
std::pair<vk::ShaderModule, vk::SpecializationInfo const *> const & fragmentShaderData,
uint32_t vertexStride,
std::vector<std::pair<vk::Format, uint32_t>> const & vertexInputAttributeFormatOffset,
vk::FrontFace frontFace,
bool depthBuffered,
vk::PipelineLayout const & pipelineLayout,
vk::RenderPass const & renderPass );
vk::Instance createInstance( std::string const & appName,
std::string const & engineName,
std::vector<std::string> const & layers = {},
std::vector<std::string> const & extensions = {},
uint32_t apiVersion = VK_API_VERSION_1_0 );
vk::RenderPass createRenderPass( vk::Device const & device,
vk::Format colorFormat,
vk::Format depthFormat,
vk::AttachmentLoadOp loadOp = vk::AttachmentLoadOp::eClear,
vk::ImageLayout colorFinalLayout = vk::ImageLayout::ePresentSrcKHR );
VKAPI_ATTR VkBool32 VKAPI_CALL debugUtilsMessengerCallback( VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
VkDebugUtilsMessageTypeFlagsEXT messageTypes,
VkDebugUtilsMessengerCallbackDataEXT const * pCallbackData,
void * /*pUserData*/ );
uint32_t findGraphicsQueueFamilyIndex( std::vector<vk::QueueFamilyProperties> const & queueFamilyProperties );
std::pair<uint32_t, uint32_t> findGraphicsAndPresentQueueFamilyIndex( vk::PhysicalDevice physicalDevice, vk::SurfaceKHR const & surface );
uint32_t findMemoryType( vk::PhysicalDeviceMemoryProperties const & memoryProperties, uint32_t typeBits, vk::MemoryPropertyFlags requirementsMask );
std::vector<char const *> gatherExtensions( std::vector<std::string> const & extensions
#if !defined( NDEBUG )
,
std::vector<vk::ExtensionProperties> const & extensionProperties
#endif
);
std::vector<char const *> gatherLayers( std::vector<std::string> const & layers
#if !defined( NDEBUG )
,
std::vector<vk::LayerProperties> const & layerProperties
#endif
);
std::vector<std::string> getDeviceExtensions();
std::vector<std::string> getInstanceExtensions();
vk::DebugUtilsMessengerCreateInfoEXT makeDebugUtilsMessengerCreateInfoEXT();
#if defined( NDEBUG )
vk::StructureChain<vk::InstanceCreateInfo>
#else
vk::StructureChain<vk::InstanceCreateInfo, vk::DebugUtilsMessengerCreateInfoEXT>
#endif
makeInstanceCreateInfoChain( vk::ApplicationInfo const & applicationInfo,
std::vector<char const *> const & layers,
std::vector<char const *> const & extensions );
vk::Format pickDepthFormat( vk::PhysicalDevice const & physicalDevice );
vk::PresentModeKHR pickPresentMode( std::vector<vk::PresentModeKHR> const & presentModes );
vk::SurfaceFormatKHR pickSurfaceFormat( std::vector<vk::SurfaceFormatKHR> const & formats );
void submitAndWait( vk::Device const & device, vk::Queue const & queue, vk::CommandBuffer const & commandBuffer );
void updateDescriptorSets( vk::Device const & device,
vk::DescriptorSet const & descriptorSet,
std::vector<std::tuple<vk::DescriptorType, vk::Buffer const &, vk::DeviceSize, vk::BufferView const &>> const & bufferData,
vk::su::TextureData const & textureData,
uint32_t bindingOffset = 0 );
void updateDescriptorSets( vk::Device const & device,
vk::DescriptorSet const & descriptorSet,
std::vector<std::tuple<vk::DescriptorType, vk::Buffer const &, vk::DeviceSize, vk::BufferView const &>> const & bufferData,
std::vector<vk::su::TextureData> const & textureData,
uint32_t bindingOffset = 0 );
} // namespace su
} // namespace vk
std::ostream & operator<<( std::ostream & os, vk::su::UUID const & uuid );

View File

@ -17,35 +17,21 @@ add_library(${PROJECT_NAME} STATIC
imgui/imgui_tables.cpp
)
if (WIN32)
target_sources(${PROJECT_NAME} PRIVATE
imgui/backends/imgui_impl_glfw.cpp
imgui/backends/imgui_impl_glfw.h
imgui/backends/imgui_impl_opengl3.cpp
imgui/backends/imgui_impl_opengl3.h
)
target_link_libraries(${PROJECT_NAME} PUBLIC glfw glad)
elseif(UNIX AND NOT APPLE)
target_sources(${PROJECT_NAME} PRIVATE
imgui/backends/imgui_impl_glfw.cpp
imgui/backends/imgui_impl_glfw.h
imgui/backends/imgui_impl_opengl3.cpp
imgui/backends/imgui_impl_opengl3.h
)
target_link_libraries(${PROJECT_NAME} PUBLIC glfw glad)
elseif(APPLE)
target_sources(${PROJECT_NAME} PRIVATE
imgui/backends/imgui_impl_glfw.cpp
imgui/backends/imgui_impl_glfw.h
imgui/backends/imgui_impl_opengl3.cpp
imgui/backends/imgui_impl_opengl3.h
)
target_link_libraries(${PROJECT_NAME} PUBLIC glfw glad)
endif()
find_package(Vulkan REQUIRED)
target_sources(${PROJECT_NAME} PRIVATE
imgui/backends/imgui_impl_glfw.cpp
imgui/backends/imgui_impl_glfw.h
imgui/backends/imgui_impl_opengl3.cpp
imgui/backends/imgui_impl_opengl3.h
imgui/backends/imgui_impl_vulkan.cpp
imgui/backends/imgui_impl_vulkan.h
)
target_link_libraries(${PROJECT_NAME} PUBLIC glfw glad ${Vulkan_LIBRARIES})
target_include_directories(${PROJECT_NAME} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/imgui)
target_include_directories(${PROJECT_NAME} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/imgui/backends)
target_include_directories(${PROJECT_NAME} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR} glfw glad)
target_include_directories(${PROJECT_NAME} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR} glfw glad ${Vulkan_INCLUDE_DIRS})
target_precompile_headers(${PROJECT_NAME} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/pch.h)