VulkanRenderer/vulkan.d
2026-03-07 17:59:17 +11:00

3354 lines
90 KiB
D

import vulkan_funcs;
import vulkan_logging;
import vulkan_util;
import std.stdio;
import std.algorithm.comparison;
import std.algorithm.searching : canFind;
import std.traits : isPointer, isArray;
import core.stdc.string : strcmp, memcpy;
import core.stdc.stdio : Printf = printf;
import std.format : sformat;
import std.math.rounding : Ceil = ceil;
version(VULKAN_DEBUG)
{
const BUILD_DEBUG = true;
}
else
{
const BUILD_DEBUG = false;
}
alias InitRenderer = Init;
alias Renderer = Vulkan;
alias Shader = VkShaderModule;
alias Pipeline = u32;
alias Attribute = VkVertexInputAttributeDescription;
alias SpecEntry = VkSpecializationMapEntry;
alias RenderPass = VkRenderPass;
alias DescSetLayout = VkDescriptorSetLayout;
alias PipelineLayout = VkPipelineLayout;
alias DescLayoutBinding = VkDescriptorSetLayoutBinding;
alias DescWrite = VkWriteDescriptorSet;
struct DescSet
{
VkDescriptorSet handle;
u32 dynamic_count;
}
bool g_VLAYER_SUPPORT = false;
version(VK_DEBUG_PRINTF)
{
bool g_DEBUG_PRINTF = true;
}
else
{
bool g_DEBUG_PRINTF = false;
}
const FRAME_OVERLAP = 2;
const MAX_SETS = 1000;
const DESC_ARRAY_SIZE = 256;
version(linux)
{
const string[] VULKAN_LIBS = [ "libvulkan.so.1", "libvulkan.so" ];
}
version(Windows)
{
const string[] VULKAN_LIBS = [ "vulkan-1.dll" ];
}
struct PlatformHandles
{
version(linux)
{
Display* display;
Window window;
}
version(Windows)
{
HINSTANCE hinstance;
HWND hwnd;
}
}
const char*[] VK_INSTANCE_LAYERS = [];
const char*[] VK_INSTANCE_LAYERS_DEBUG = [ "VK_LAYER_KHRONOS_validation" ];
version(linux)
{
const char*[] VK_INSTANCE_EXT = [ cast(char*)VK_KHR_SURFACE_EXTENSION_NAME, cast(char*)VK_KHR_XLIB_SURFACE_EXTENSION_NAME ];
const char*[] VK_INSTANCE_EXT_DEBUG = VK_INSTANCE_EXT ~ [ cast(char*)VK_EXT_DEBUG_UTILS_EXTENSION_NAME ];
}
version(Windows)
{
const char*[] VK_INSTANCE_EXT = [ cast(char*)VK_KHR_SURFACE_EXTENSION_NAME, cast(char*)VK_KHR_WIN32_SURFACE_EXTENSION_NAME ];
const char*[] VK_INSTANCE_EXT_DEBUG = VK_INSTANCE_EXT ~ [ cast(char*)VK_EXT_DEBUG_UTILS_EXTENSION_NAME ];
}
const char*[] VK_BASE_DEVICE_EXTENSIONS = [
cast(char*)VK_KHR_SWAPCHAIN_EXTENSION_NAME,
cast(char*)VK_KHR_UNIFORM_BUFFER_STANDARD_LAYOUT_EXTENSION_NAME,
cast(char*)VK_KHR_8BIT_STORAGE_EXTENSION_NAME,
cast(char*)VK_KHR_SYNCHRONIZATION_2_EXTENSION_NAME,
cast(char*)VK_EXT_EXTENDED_DYNAMIC_STATE_EXTENSION_NAME,
];
const char*[] VK_AMD_DEVICE_EXTENSIONS = [
cast(char*)VK_AMD_SHADER_INFO_EXTENSION_NAME,
];
version(AMD_GPU)
{
version(VULKAN_DEBUG) const char*[] VK_DEVICE_EXTENSIONS = VK_AMD_DEVICE_EXTENSIONS ~ VK_BASE_DEVICE_EXTENSIONS;
}
else
{
const char*[] VK_DEVICE_EXTENSIONS = VK_BASE_DEVICE_EXTENSIONS;
}
const VkFormat[] VK_IMAGE_FORMATS = [
VK_FORMAT_R16G16B16A16_UNORM,
VK_FORMAT_R8G8B8A8_UNORM,
];
enum StencilOp : VkStencilOp
{
Keep = VK_STENCIL_OP_KEEP,
Zero = VK_STENCIL_OP_ZERO,
Replace = VK_STENCIL_OP_REPLACE,
} alias SOP = StencilOp;
enum CompareOp : VkCompareOp
{
Never = VK_COMPARE_OP_NEVER,
Less = VK_COMPARE_OP_LESS,
Equal = VK_COMPARE_OP_EQUAL,
LessOrEqual = VK_COMPARE_OP_LESS_OR_EQUAL,
Greater = VK_COMPARE_OP_GREATER,
NotEqual = VK_COMPARE_OP_NOT_EQUAL,
GreaterOrEqual = VK_COMPARE_OP_GREATER_OR_EQUAL,
Always = VK_COMPARE_OP_ALWAYS,
} alias COP = CompareOp;
enum StencilFace : VkStencilFaceFlagBits
{
Front = VK_STENCIL_FACE_FRONT_BIT,
Back = VK_STENCIL_FACE_BACK_BIT,
Both = VK_STENCIL_FACE_FRONT_AND_BACK,
} alias SF = StencilFace;
enum ShaderStage : VkShaderStageFlagBits
{
None = cast(VkShaderStageFlagBits)0,
Vertex = VK_SHADER_STAGE_VERTEX_BIT,
Fragment = VK_SHADER_STAGE_FRAGMENT_BIT,
Compute = VK_SHADER_STAGE_COMPUTE_BIT,
All = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT | VK_SHADER_STAGE_COMPUTE_BIT,
}
alias SS = ShaderStage;
enum InputRate : int
{
Vertex = VK_VERTEX_INPUT_RATE_VERTEX,
Instance = VK_VERTEX_INPUT_RATE_INSTANCE,
}
alias IR = InputRate;
enum ImageUsage : VkImageUsageFlagBits
{
None = cast(VkImageUsageFlagBits)0,
Draw = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
Depth = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT,
Texture = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT,
Convert = VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
Storage = VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT,
Swapchain = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
}
alias IU = ImageUsage;
enum FrontFace: VkFrontFace
{
CCW = VK_FRONT_FACE_COUNTER_CLOCKWISE,
CW = VK_FRONT_FACE_CLOCKWISE,
}
alias FF = FrontFace;
enum Format : VkFormat
{
UINT = VK_FORMAT_R32_UINT,
R_F32 = VK_FORMAT_R32_SFLOAT,
RG_F32 = VK_FORMAT_R32G32_SFLOAT,
RGB_F32 = VK_FORMAT_R32G32B32_SFLOAT,
RGBA_F32 = VK_FORMAT_R32G32B32A32_SFLOAT,
RGBA_UINT = VK_FORMAT_B8G8R8A8_UINT,
R_U32 = VK_FORMAT_R32_UINT,
RG_U32 = VK_FORMAT_R32G32_UINT,
RGBA_UNORM = VK_FORMAT_R8G8B8A8_UNORM,
RGBA_SRGB = VK_FORMAT_R8G8B8A8_SRGB,
D_SF32 = VK_FORMAT_D32_SFLOAT,
}
alias FMT = Format;
enum BufferType : VkBufferUsageFlagBits
{
None = cast(VkBufferUsageFlagBits)0,
Vertex = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT,
Index = VK_BUFFER_USAGE_INDEX_BUFFER_BIT,
Uniform = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT,
Storage = VK_BUFFER_USAGE_STORAGE_BUFFER_BIT,
Staging = VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
BufferView = VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT,
}
alias BT = BufferType;
enum ImageLayout : VkImageLayout
{
Undefined = VK_IMAGE_LAYOUT_UNDEFINED,
General = VK_IMAGE_LAYOUT_GENERAL,
ColorAttach = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
DepthAttach = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL,
ReadOnly = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
TransferDst = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
TransferSrc = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
PresentSrc = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
}
alias IL = ImageLayout;
struct BufferView
{
Buffer base;
VkBufferView view;
Format format;
alias base this;
}
struct ImageView
{
VkImage image;
VkImageView view;
VmaAllocation alloc;
Format format;
ImageUsage usage;
ImageLayout shader_layout;
ImageLayout layout;
u32 w;
u32 h;
u32 ch;
}
struct Buffer
{
VkBuffer buffer;
VmaAllocation alloc;
u64 size;
}
struct Descriptor
{
union
{
Buffer buffer;
ImageView image;
BufferView buffer_view;
VkSampler sampler;
};
DescType type;
u32 binding;
u32 index;
}
static assert(Descriptor.buffer.buffer.offsetof == Descriptor.buffer_view.base.buffer.offsetof);
struct DescInfo
{
DescType type;
u32 binding;
u32 index;
Format format;
union
{
struct
{
u32 w, h, ch;
ImageUsage usage;
};
struct
{
u64 size;
BufferType buffer_type;
bool host_visible;
};
struct
{
MipmapMode mipmap_mode;
};
};
}
alias Desc = Descriptor;
enum MipmapMode : VkSamplerMipmapMode
{
Nearest = VK_SAMPLER_MIPMAP_MODE_NEAREST,
Linear = VK_SAMPLER_MIPMAP_MODE_LINEAR,
}
enum PipelineType : int
{
Graphics,
Compute,
}
alias PT = PipelineType;
enum BlendFactor : VkBlendFactor
{
Zero = VK_BLEND_FACTOR_ZERO,
One = VK_BLEND_FACTOR_ONE,
SrcColor = VK_BLEND_FACTOR_SRC_COLOR,
OneMinusSrcColor = VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR,
DstColor = VK_BLEND_FACTOR_DST_COLOR,
OneMinusDstColor = VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR,
SrcAlpha = VK_BLEND_FACTOR_SRC_ALPHA,
OneMinusSrcAlpha = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA,
DstAlpha = VK_BLEND_FACTOR_DST_ALPHA,
OneMinusDstAlpha = VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA,
ConstColor = VK_BLEND_FACTOR_CONSTANT_COLOR,
OneMinusConstColor = VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR,
ConstAlpha = VK_BLEND_FACTOR_CONSTANT_ALPHA,
OneMinusConstAlpha = VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA,
SrcAlphaSaturate = VK_BLEND_FACTOR_SRC_ALPHA_SATURATE,
Src1Color = VK_BLEND_FACTOR_SRC1_COLOR,
OneMinusSrc1Color = VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR,
Src1Alpha = VK_BLEND_FACTOR_SRC1_ALPHA,
OneMinusSrc1Alpha = VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA,
}
alias BF = BlendFactor;
enum BlendOp : VkBlendOp
{
Add = VK_BLEND_OP_ADD,
Sub = VK_BLEND_OP_SUBTRACT,
ReverseSub = VK_BLEND_OP_REVERSE_SUBTRACT,
Min = VK_BLEND_OP_MIN,
Max = VK_BLEND_OP_MAX,
}
alias BO = BlendOp;
struct Specialization
{
u64 size;
SpecEntry[] entries;
void* data;
}
struct GfxPipelineInfo
{
u8[] vertex_shader;
u8[] frag_shader;
InputRate input_rate;
u32 input_rate_stride;
Attribute[] vertex_attributes;
Specialization vert_spec;
Specialization frag_spec;
bool self_dependency;
PipelineLayout layout;
FrontFace front_face;
VkBlendFactor src_color;
VkBlendFactor dst_color;
VkBlendOp color_op;
VkBlendFactor src_alpha;
VkBlendFactor dst_alpha;
VkBlendOp alpha_op;
}
struct CompPipelineInfo
{
u8[] shader;
Specialization spec;
PipelineLayout layout;
}
enum StepInitialized : u32
{
Renderer = 1,
Instance,
Debug,
Surface,
Device,
Vma,
Buffers,
FrameStructures,
Swapchain,
DrawImages,
DescriptorPools,
Pipelines,
}
alias SI = StepInitialized;
enum DescType : VkDescriptorType
{
// VkSampler
Sampler = VK_DESCRIPTOR_TYPE_SAMPLER,
// ImageView
CombinedSampler = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
Image = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE,
StorageImage = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
InputAttach = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT,
// BufferView
UniformTexelBuf = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
StorageTexelBuf = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
// Buffer
Uniform = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
DynamicUniform = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC,
Storage = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
DynamicStorage = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC,
Max,
}
alias DT = DescType;
struct MappedBuffer(T)
{
Buffer base;
T[] data;
u64 offset;
alias base this;
}
struct Vulkan
{
Arena arena;
Arena[FRAME_OVERLAP] frame_arenas;
u32 frame_index;
u32 semaphore_index;
PlatformHandles platform_handles;
VkDebugUtilsMessengerEXT dbg_msg;
VkInstance instance;
VkSurfaceKHR surface;
VkPhysicalDevice physical_device;
VkDevice device;
VmaAllocator vma;
VkSwapchainKHR swapchain;
VkSurfaceFormatKHR surface_format;
VkPresentModeKHR present_mode;
VkExtent3D swapchain_extent;
f32[2] res;
bool recreate_swapchain;
VkRenderPass render_pass;
VkFramebuffer framebuffer;
Descriptor[] present_images;
u32 image_index;
Descriptor draw_image;
Descriptor depth_image;
VkCommandPool[FRAME_OVERLAP] cmd_pools;
VkCommandBuffer[FRAME_OVERLAP] cmds;
VkCommandPool comp_cmd_pool;
VkCommandBuffer comp_cmd;
VkFence comp_fence;
VkSemaphore[] submit_sems;
VkSemaphore[FRAME_OVERLAP] acquire_sems;
VkFence[FRAME_OVERLAP] render_fences;
VkCommandPool imm_pool;
VkCommandBuffer imm_cmd;
VkFence imm_fence;
VkDescriptorPool active_pool;
SLList!(VkDescriptorPool) full_pools;
PipelineHandles[] pipeline_handles;
u32 pipeline_count;
VkPipeline[FRAME_OVERLAP] last_pipelines;
DescSetLayout global_set_layout;
DescSet global_set;
MappedBuffer!(u8) transfer_buf;
QueueInfo queues;
Pipeline r_to_rgba_pipeline;
Pipeline rg_to_rgba_pipeline;
Pipeline rgb_to_rgba_pipeline;
DescSet conv_desc_set;
VkDescriptorSetLayout conv_desc_layout;
VkPipelineLayout conv_pipeline_layout;
VkPipeline last_comp_pipeline;
bool compute_pass_started;
f32[4] color_clear;
f32[4] depth_clear;
@property VkCommandPool
cmd_pool()
{
return this.cmd_pools[this.frame_index];
}
@property VkCommandBuffer
cmd()
{
return this.cmds[this.frame_index];
}
@property VkSemaphore
acquire_sem()
{
return this.acquire_sems[this.frame_index];
}
@property VkSemaphore*
acquire_sem_ptr()
{
return this.acquire_sems.ptr + this.frame_index;
}
@property VkFence
render_fence()
{
return this.render_fences[this.frame_index];
}
@property VkFence*
render_fence_ptr()
{
return this.render_fences.ptr + this.frame_index;
}
@property VkPipeline
last_pipeline()
{
return this.last_pipelines[this.frame_index];
}
@property VkPipeline
last_pipeline(VkPipeline pipeline)
{
return this.last_pipelines[this.frame_index] = pipeline;
}
@property VkSemaphore
submit_sem()
{
return this.submit_sems[this.image_index];
}
@property ref Descriptor
present_image()
{
return this.present_images[this.image_index];
}
@property Descriptor*
present_image_ptr()
{
return this.present_images.ptr + this.image_index;
}
Arena*
FrameArena()
{
return &this.frame_arenas[this.frame_index];
}
alias queues this;
}
struct ConvPushConst
{
u32 x;
u32 y;
}
struct PipelineHandles
{
VkPipeline handle;
VkPipelineBindPoint type;
VkPipelineLayout layout;
Pipeline index;
}
struct QueueInfo
{
i32 gfx_index, tfer_index;
VkQueue gfx_queue, tfer_queue;
bool single_queue;
}
__gshared Vulkan g_vk;
u8[] CONVERT_SHADER_BYTES = cast(u8[])import("convert.comp.spv");
DescSetLayout
CreateDescSetLayout(DescLayoutBinding[] bindings)
{
VkDescriptorSetLayoutCreateInfo layout_info = {
sType: VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
bindingCount: cast(u32)bindings.length,
pBindings: bindings.ptr,
};
DescSetLayout layout;
VkResult result = vkCreateDescriptorSetLayout(g_vk.device, &layout_info, null, &layout);
VkCheckA("vkCreateDescriptorSetLayout failure", result);
return layout;
}
DescSet
AllocDescSet(DescSetLayout layout, u32 dynamic_count = 0)
{
VkDescriptorSetAllocateInfo alloc_info = {
sType: VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
descriptorSetCount: 1,
pSetLayouts: &layout,
descriptorPool: g_vk.active_pool,
};
DescSet set = { dynamic_count: dynamic_count };
VkResult result = vkAllocateDescriptorSets(g_vk.device, &alloc_info, &set.handle);
if(result == VK_ERROR_OUT_OF_POOL_MEMORY || result == VK_ERROR_FRAGMENTED_POOL)
{
PushDescriptorPool();
alloc_info.descriptorPool = g_vk.active_pool;
result = vkAllocateDescriptorSets(g_vk.device, &alloc_info, &set.handle);
VkCheckA("vkAllocateDescriptorSets failure", result);
}
return set;
}
PipelineLayout
CreatePipelineLayout(T)(T layouts, u32 push_const_size, bool compute = false) if(is(T: DescSetLayout) || is(T: DescSetLayout[]))
{
VkShaderStageFlagBits stage = (compute ? VK_SHADER_STAGE_COMPUTE_BIT : VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT);
DescSetLayout[] desc_layouts;
static if(is(T: DescSetLayout))
{
desc_layouts = Alloc!(DescSetLayout)(g_vk.FrameArena(), 2);
desc_layouts[0] = g_vk.global_set_layout;
desc_layouts[1] = layouts;
}
else static if(is(T: DescSetLayout[]))
{
desc_layouts = Alloc!(DescSetLayout)(g_vk.FrameArena(), layouts.length + 1);
desc_layouts[0] = g_vk.global_set_layout;
foreach(i; 0 .. layouts.length)
{
desc_layouts[i+1] = layouts[i];
}
}
VkPushConstantRange const_range = {
offset: 0,
size: cast(VkDeviceSize)push_const_size,
stageFlags: stage,
};
VkPipelineLayoutCreateInfo layout_info = {
sType: VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
setLayoutCount: cast(u32)desc_layouts.length,
pSetLayouts: desc_layouts.ptr,
pushConstantRangeCount: (push_const_size > 0 ? 1 : 0),
pPushConstantRanges: (push_const_size > 0 ? &const_range : null),
};
PipelineLayout layout;
VkResult result = vkCreatePipelineLayout(g_vk.device, &layout_info, null, &layout);
VkCheckA("CreatePipelineLayout failure", result);
return layout;
}
void
CreateBuffer(Buffer* buffer, BufferType type, u64 size, bool host_visible)
{
assert(type != BT.None, "CreateBuffer failure: type is None");
VkBufferCreateInfo buffer_info = {
sType: VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
usage: type,
size: size,
};
VmaAllocationCreateInfo alloc_info = {
usage: VMA_MEMORY_USAGE_UNKNOWN,
flags: VMA_ALLOCATION_CREATE_MAPPED_BIT,
};
if(host_visible)
{
alloc_info.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
alloc_info.preferredFlags = VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
}
else
{
buffer_info.usage |= VK_BUFFER_USAGE_TRANSFER_DST_BIT;
alloc_info.requiredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
}
u32[2] indices = [g_vk.queues.gfx_index, g_vk.queues.tfer_index];
if(g_vk.queues.gfx_index != g_vk.queues.tfer_index)
{
buffer_info.sharingMode = VK_SHARING_MODE_CONCURRENT;
buffer_info.queueFamilyIndexCount = 2;
buffer_info.pQueueFamilyIndices = indices.ptr;
}
VmaAllocationInfo vma_info;
VkResult result = vmaCreateBuffer(g_vk.vma, &buffer_info, &alloc_info, &buffer.buffer, &buffer.alloc, &vma_info);
// TODO: handle errors here then reallocate buffer
VkCheck("CreateBuffer failure: vmaCreateBuffer error", result);
buffer.size = size;
}
void
BindBuffers(Buffer* index_buffer, Buffer* vertex_buffer)
{
VkDeviceSize offset = 0;
vkCmdBindIndexBuffer(g_vk.cmd, index_buffer.buffer, 0, VK_INDEX_TYPE_UINT32);
vkCmdBindVertexBuffers(g_vk.cmd, 0, 1, &vertex_buffer.buffer, &offset);
}
void
BindBuffers(T)(MappedBuffer!(u32)* index_buffer, MappedBuffer!(T)* vertex_buffer)
{
VkDeviceSize offset = 0;
vkCmdBindIndexBuffer(g_vk.cmd, index_buffer.buffer, 0, VK_INDEX_TYPE_UINT32);
vkCmdBindVertexBuffers(g_vk.cmd, 0, 1, &vertex_buffer.buffer, &offset);
}
MappedBuffer!(T)
CreateMappedBuffer(T)(BufferType type, u64 count)
{
MappedBuffer!(T) buf;
CreateBuffer(&buf.base, type, T.sizeof * count, true);
buf.data = MapBuffer!(T)(&buf.base, count);
return buf;
}
T[]
MapBuffer(T)(Buffer* buffer, u64 count)
{
void* ptr;
vmaMapMemory(g_vk.vma, buffer.alloc, &ptr);
return (cast(T*)ptr)[0 .. count];
}
void
BeginFrame()
{
// TODO: move vkWaitForFences so it no longer holds up the frame, will need to change how fences are handled in regards to images though
VkResult result = vkWaitForFences(g_vk.device, 1, g_vk.render_fence_ptr, VK_TRUE, 1000000000);
VkCheckA("BeginFrame failure: vkWaitForFences error", result);
result = vkResetFences(g_vk.device, 1, g_vk.render_fence_ptr);
VkCheckA("BeginFrame failure: vkResetFences error", result);
// ?? dont know why this is 3 (should have commented it)
for(u64 i = 0; i < 3; i += 1)
{
result = vkAcquireNextImageKHR(g_vk.device, g_vk.swapchain, 1000000000, g_vk.acquire_sem, null, &g_vk.image_index);
if(result == VK_ERROR_OUT_OF_DATE_KHR || g_vk.recreate_swapchain)
{
RecreateSwapchain();
VkSemaphoreCreateInfo sem_info = { sType: VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO };
vkDestroySemaphore(g_vk.device, g_vk.acquire_sem, null);
VkResult sem_res = vkCreateSemaphore(g_vk.device, &sem_info, null, g_vk.acquire_sem_ptr);
VkCheckA("Recreating swapchain sem reset failure", sem_res);
continue;
}
else if(result != VK_SUBOPTIMAL_KHR)
{
VkCheckA("BeginFrame failure: vkAcquireNextImageKHR error", result);
}
break;
}
result = vkResetCommandBuffer(g_vk.cmd, 0);
VkCheckA("BeginFrame failure: vkResetCommandBuffer failure", result);
VkCommandBufferBeginInfo cmd_info = {
sType: VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
flags: VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT,
};
g_vk.present_image_ptr.image.layout = IL.Undefined;
result = vkBeginCommandBuffer(g_vk.cmd, &cmd_info);
VkCheckA("BeginFrame failure: vkBeginCommandBuffer error", result);
}
void
SetClearColors(f32[4] color_clear, f32[4] depth_clear)
{
g_vk.color_clear = color_clear;
g_vk.depth_clear = depth_clear;
}
void
BeginRendering()
{
Transition(g_vk.cmd, &g_vk.draw_image, IL.ColorAttach);
Transition(g_vk.cmd, &g_vk.depth_image, IL.DepthAttach);
Transition(g_vk.cmd, g_vk.present_image_ptr, IL.TransferDst);
VkClearValue[2] clear_color = [
{ color: { float32: g_vk.color_clear } },
{ color: { float32: g_vk.depth_clear } },
];
VkRenderPassBeginInfo pass_info = {
sType: VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
renderPass: g_vk.render_pass,
framebuffer: g_vk.framebuffer,
renderArea: {
offset: { x: 0, y: 0 },
extent: {
width: g_vk.swapchain_extent.width,
height: g_vk.swapchain_extent.height,
},
},
clearValueCount: cast(u32)clear_color.length,
pClearValues: clear_color.ptr,
};
vkCmdBeginRenderPass(g_vk.cmd, &pass_info, VK_SUBPASS_CONTENTS_INLINE);
}
u32[2]
GetExtent()
{
u32[2] extent;
extent[0] = g_vk.swapchain_extent.width;
extent[1] = g_vk.swapchain_extent.height;
return extent;
}
f32
GetAspect()
{
return cast(f32)(g_vk.swapchain_extent.width) / cast(f32)(g_vk.swapchain_extent.height);
}
void
PrepComputeDrawImage()
{
Transition(g_vk.cmd, &g_vk.draw_image, IL.General);
}
void
FinishRendering()
{
vkCmdEndRenderPass(g_vk.cmd);
}
void
SubmitAndPresent()
{
scope(exit)
{
g_vk.last_pipeline = null;
g_vk.frame_index = (g_vk.frame_index + 1) % FRAME_OVERLAP;
}
VkImage image = g_vk.present_image.image.image;
VkSemaphore acquire_sem = g_vk.acquire_sem;
VkSemaphore submit_sem = g_vk.submit_sem;
Transition(g_vk.cmd, &g_vk.draw_image, IL.TransferSrc);
VkExtent2D extent = {
width: g_vk.swapchain_extent.width,
height: g_vk.swapchain_extent.height,
};
// TODO: Find out how to copy from same dimension images (pretty sure its not blitting)
Copy(g_vk.cmd, &g_vk.draw_image, image, IL.TransferDst, extent, extent);
Transition(g_vk.cmd, image, IL.TransferDst, IL.PresentSrc);
VkResult result = vkEndCommandBuffer(g_vk.cmd);
VkCheckA("FinishFrame failure: vkEndCommandBuffer error", result);
VkCommandBufferSubmitInfo cmd_info = {
sType: VK_STRUCTURE_TYPE_COMMAND_BUFFER_SUBMIT_INFO,
commandBuffer: g_vk.cmd,
};
VkSemaphoreSubmitInfo wait_info = {
sType: VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO,
semaphore: acquire_sem,
stageMask: VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT,
value: 1,
};
VkSemaphoreSubmitInfo signal_info = {
sType: VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO,
semaphore: submit_sem,
stageMask: VK_PIPELINE_STAGE_2_ALL_GRAPHICS_BIT,
value: 1,
};
VkSubmitInfo2 submit_info = {
sType: VK_STRUCTURE_TYPE_SUBMIT_INFO_2,
waitSemaphoreInfoCount: 1,
pWaitSemaphoreInfos: &wait_info,
signalSemaphoreInfoCount: 1,
pSignalSemaphoreInfos: &signal_info,
commandBufferInfoCount: 1,
pCommandBufferInfos: &cmd_info,
};
result = vkQueueSubmit2(g_vk.queues.gfx_queue, 1, &submit_info, g_vk.render_fence);
VkCheckA("FinishFrame failure: vkQueueSubmit2 error", result);
VkPresentInfoKHR present_info = {
sType: VK_STRUCTURE_TYPE_PRESENT_INFO_KHR,
swapchainCount: 1,
pSwapchains: &g_vk.swapchain,
waitSemaphoreCount: 1,
pWaitSemaphores: &submit_sem,
pImageIndices: &g_vk.image_index,
};
result = vkQueuePresentKHR(g_vk.queues.gfx_queue, &present_info);
if(result == VK_ERROR_OUT_OF_DATE_KHR || result == VK_SUBOPTIMAL_KHR || g_vk.recreate_swapchain)
{
RecreateSwapchain();
}
else
{
VkCheckA("FinishFrame failure: vkQueuePresentKHR failure", result);
}
}
void
Draw(u32 index_count, u32 instance_count)
{
vkCmdDraw(g_vk.cmd, index_count, instance_count, 0, 0);
}
void
DrawIndexed(u32 idx_count, u32 instance_count, u32 idx_offset, i32 vtx_offset, u32 first_instance)
{
vkCmdDrawIndexed(g_vk.cmd, idx_count, instance_count, idx_offset, vtx_offset, first_instance);
}
bool
ImmSubmitStart()
{
VkResult result = vkWaitForFences(g_vk.device, 1, &g_vk.imm_fence, true, 999999999);
bool success = VkCheck("ImmSubmit failure: vkWaitForFences error", result);
if(success)
{
result = vkResetFences(g_vk.device, 1, &g_vk.imm_fence);
success = VkCheck("ImmSubmit failure: vkResetFences error", result);
}
if(success)
{
result = vkResetCommandBuffer(g_vk.imm_cmd, 0);
success = VkCheck("ImmSubmit failure: vkResetCommandBuffer error", result);
}
if(success)
{
VkCommandBufferBeginInfo cmd_info = {
sType: VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
flags: VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT,
};
result = vkBeginCommandBuffer(g_vk.imm_cmd, &cmd_info);
success = VkCheck("ImmSubmit failure: vkBeginCommandBuffer error", result);
}
return success;
}
bool
ImmSubmitFinish()
{
VkCommandBufferSubmitInfo cmd_info = {
sType: VK_STRUCTURE_TYPE_COMMAND_BUFFER_SUBMIT_INFO,
commandBuffer: g_vk.imm_cmd,
};
VkSubmitInfo2 submit_info = {
sType: VK_STRUCTURE_TYPE_SUBMIT_INFO_2,
commandBufferInfoCount: 1,
pCommandBufferInfos: &cmd_info,
};
VkResult result = vkQueueSubmit2(g_vk.tfer_queue, 1, &submit_info, g_vk.imm_fence);
return VkCheck("ImmSubmit failure: vkQueueSubmit2 error", result);
}
bool
ImmSubmit(Descriptor* desc, VkBufferImageCopy copy, void function(Descriptor*, VkBufferImageCopy) fn)
{
bool success = ImmSubmitStart();
if(success)
{
fn(desc, copy);
VkResult result = vkEndCommandBuffer(g_vk.imm_cmd);
success = VkCheck("ImmSubmit failure: vkEndCommandBuffer error", result);
}
if(success)
{
success = ImmSubmitFinish();
}
return success;
}
bool
ImmSubmit(Descriptor* desc, VkBufferCopy copy, void function(Descriptor*, VkBufferCopy) fn)
{
bool success = ImmSubmitStart();
if(success)
{
fn(desc, copy);
VkResult result = vkEndCommandBuffer(g_vk.imm_cmd);
success = VkCheck("ImmSubmit failure: vkEndCommandBuffer error", result);
}
if(success)
{
success = ImmSubmitFinish();
}
return success;
}
bool
TransferAssets()
{
return true;
}
void
WaitForTransfers()
{
vkWaitForFences(g_vk.device, 1, &g_vk.imm_fence, VK_TRUE, u64.max);
}
void
CreateDescriptor(T = u8[])(Descriptor* desc, DescInfo info, T data = null) if(is(T: u8[]) || isPointer!(T))
{
enum DescType[] image_types = [DT.CombinedSampler, DT.Image, DT.StorageImage, DT.InputAttach];
enum DescType[] texel_types = [DT.UniformTexelBuf, DT.StorageTexelBuf];
enum DescType[] buffer_types = [DT.Uniform, DT.DynamicUniform, DT.Storage, DT.DynamicStorage];
desc.type = info.type;
desc.binding = info.binding;
desc.index = info.index;
if(canFind(image_types, info.type))
{
desc.image.usage = info.usage;
desc.image.format = info.format;
desc.image.layout = IL.Undefined;
desc.image.w = info.w;
desc.image.h = info.h;
desc.image.ch = info.ch;
static if(is(T: u8[]))
{
if(data.length)
{
CreateImageView(desc, data);
}
else
{
CreateImageView(desc);
}
}
else
{
CreateImageView(desc);
}
}
else if(canFind(texel_types, info.type))
{
desc.buffer_view.size = info.size;
desc.buffer_view.format = info.format;
CreateBufferView(desc);
if(data)
{
Transfer!(false)(desc, data);
}
}
else if(canFind(buffer_types, info.type))
{
CreateBuffer(&desc.buffer, info.buffer_type, info.size, info.host_visible);
if(data)
{
Transfer!(false)(desc, data);
}
}
else if(info.type == DT.Sampler)
{
CreateSampler(desc, info.mipmap_mode);
}
else assert(false, "Unknown descriptor type in CreateDescriptor");
}
void
CreateImageView(Descriptor* desc, u8[] data)
{
CreateImageView(desc);
if(desc.image.ch == 4)
{
bool result = Transfer!(true)(desc, data);
assert(result);
}
else
{
Descriptor buffer;
CreateDescriptor(&buffer, DescInfo(
type: DT.Storage,
buffer_type: BT.Storage,
size: desc.image.w * desc.image.h * desc.image.ch,
binding: 1,
));
bool result = Transfer!(false)(&buffer, data);
assert(result, "CreateImageView failure: Buffer Transfer error");
Descriptor conv_view;
CreateDescriptor(&conv_view, DescInfo(
type: DT.StorageImage,
w: desc.image.w,
h: desc.image.h,
format: FMT.RGBA_F32,
usage: IU.Convert,
));
Write(g_vk.conv_desc_set, [conv_view, buffer]);
BeginComputePass();
Pipeline pipeline = desc.image.ch == 1 ? g_vk.r_to_rgba_pipeline :
desc.image.ch == 2 ? g_vk.rg_to_rgba_pipeline :
g_vk.rgb_to_rgba_pipeline;
Bind(pipeline, g_vk.conv_desc_set, true);
ConvPushConst pc = { x: desc.image.w, y: desc.image.h };
vkCmdPushConstants(
g_vk.comp_cmd,
g_vk.conv_pipeline_layout,
VK_SHADER_STAGE_COMPUTE_BIT,
0,
ConvPushConst.sizeof,
&pc
);
Transition(g_vk.comp_cmd, &conv_view, IL.General);
Dispatch(g_vk.comp_cmd);
Transition(g_vk.comp_cmd, desc, IL.TransferDst);
VkExtent2D extent = { width: desc.image.w, height: desc.image.h };
Copy(g_vk.comp_cmd, &conv_view, desc, extent, extent);
Transition(g_vk.comp_cmd, desc, IL.ReadOnly);
FinishComputePass();
vkWaitForFences(g_vk.device, 1, &g_vk.comp_fence, VK_TRUE, u64.max);
vkQueueWaitIdle(g_vk.tfer_queue);
Destroy(&buffer);
Destroy(&conv_view);
}
}
void
CreateImageView(Descriptor* desc)
{
VmaAllocationCreateInfo alloc_info = {
usage: VMA_MEMORY_USAGE_GPU_ONLY,
requiredFlags: VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
};
VkImageCreateInfo image_info = {
sType: VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
imageType: VK_IMAGE_TYPE_2D,
mipLevels: 1,
arrayLayers: 1,
format: desc.image.format,
tiling: VK_IMAGE_TILING_OPTIMAL,
initialLayout: VK_IMAGE_LAYOUT_UNDEFINED,
usage: desc.image.usage,
samples: VK_SAMPLE_COUNT_1_BIT,
extent: {
width: desc.image.w,
height: desc.image.h,
depth: 1,
},
};
u32[2] indices = [g_vk.gfx_index, g_vk.tfer_index];
if(g_vk.gfx_index != g_vk.tfer_index)
{
image_info.sharingMode = VK_SHARING_MODE_CONCURRENT;
image_info.queueFamilyIndexCount = 2;
image_info.pQueueFamilyIndices = indices.ptr;
}
VkResult result = vmaCreateImage(g_vk.vma, &image_info, &alloc_info, &desc.image.image, &desc.image.alloc, null);
// TODO: handle errors and realloc
VkCheck("CreateImageView failure: vmaCreateImage error", result);
VkImageViewCreateInfo view_info = {
sType: VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
image: desc.image.image,
viewType: VK_IMAGE_VIEW_TYPE_2D,
format: desc.image.format,
subresourceRange: {
aspectMask: (desc.image.usage == IU.Depth ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_COLOR_BIT),
levelCount: 1,
layerCount: 1,
},
};
result = vkCreateImageView(g_vk.device, &view_info, null, &desc.image.view);
// TODO: also handle here
VkCheck("CreateImageView failure: vkCreateImageView error", result);
if(desc.image.usage == IU.Draw || desc.image.usage == IU.Depth || desc.image.usage == IU.Convert)
{
desc.image.shader_layout = IL.General;
}
else if(desc.image.usage == IU.Texture)
{
desc.image.shader_layout = IL.ReadOnly;
}
else assert(false, "Unimplemented usage");
desc.image.layout = IL.Undefined;
}
void
BeginComputePass()
{
VkResult result = vkWaitForFences(g_vk.device, 1, &g_vk.comp_fence, VK_TRUE, 1000000000);
VkCheckA("BeginComputePass failure: vkWaitForFences error", result);
result = vkResetFences(g_vk.device, 1, &g_vk.comp_fence);
VkCheckA("BeginComputepass failure: vkResetFences error", result);
result = vkResetCommandBuffer(g_vk.comp_cmd, 0);
VkCheckA("BeginComputePass failure: vkResetCommandBuffer error", result);
VkCommandBufferBeginInfo cmd_info = {
sType: VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
flags: VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT,
};
result = vkBeginCommandBuffer(g_vk.comp_cmd, &cmd_info);
VkCheckA("BeginComputePass failure: vkBeginCommandBuffer error", result);
}
void
FinishComputePass()
{
VkResult result = vkEndCommandBuffer(g_vk.comp_cmd);
VkCheckA("FinishComputePass failure: vkEndCommandBuffer error", result);
VkCommandBufferSubmitInfo cmd_info = {
sType: VK_STRUCTURE_TYPE_COMMAND_BUFFER_SUBMIT_INFO,
commandBuffer: g_vk.comp_cmd,
};
VkSubmitInfo2 submit_info = {
sType: VK_STRUCTURE_TYPE_SUBMIT_INFO_2,
commandBufferInfoCount: 1,
pCommandBufferInfos: &cmd_info,
};
result = vkQueueSubmit2(g_vk.gfx_queue, 1, &submit_info, g_vk.comp_fence);
VkCheckA("FinishComputePass failure: vkQueueSubmit2 error", result);
}
void
CreateBufferView(Descriptor* desc)
{
CreateBuffer(&desc.buffer_view.base, BT.BufferView, desc.buffer_view.size, false);
VkBufferViewCreateInfo info = {
sType: VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
buffer: desc.buffer_view.buffer,
format: desc.buffer_view.format,
range: desc.buffer_view.size,
};
VkResult result = vkCreateBufferView(g_vk.device, &info, null, &desc.buffer_view.view);
VkCheckA("CreateBufferView failure: vkCreateBufferView failed", result);
}
void
PushConstants(T)(Pipeline pipeline_id, T* pc)
{
assert(pipeline_id > 0, "PushConstants pipeline_id == 0");
PipelineHandles* pipeline = g_vk.pipeline_handles.ptr + pipeline_id;
VkShaderStageFlags stage = (pipeline.type == VK_PIPELINE_BIND_POINT_GRAPHICS ?
VK_SHADER_STAGE_VERTEX_BIT|VK_SHADER_STAGE_FRAGMENT_BIT :
VK_SHADER_STAGE_COMPUTE_BIT);
vkCmdPushConstants(
g_vk.cmd,
pipeline.layout,
stage,
0,
T.sizeof,
pc
);
}
void
ImageBarrier()
{
VkMemoryBarrier2 barrier = {
sType: VK_STRUCTURE_TYPE_MEMORY_BARRIER_2,
srcStageMask: VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
srcAccessMask: VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT,
dstStageMask: VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
dstAccessMask: VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT,
};
VkDependencyInfo dependency = {
sType: VK_STRUCTURE_TYPE_DEPENDENCY_INFO,
dependencyFlags: VK_DEPENDENCY_BY_REGION_BIT,
memoryBarrierCount: 1,
pMemoryBarriers: &barrier,
};
vkCmdPipelineBarrier2(g_vk.cmd, &dependency);
}
// Needs to be done before writing to the transfer buffer as otherwise it'll overwrite it while previous transfers are occurring
bool
TransferReady()
{
VkResult result = vkWaitForFences(g_vk.device, 1, &g_vk.imm_fence, true, 999999999);
return VkCheck("Transfer failure: vkWaitForFences error", result);
}
bool
Transfer(bool image, T)(Descriptor* desc, T data) if(isArray!(T) || isPointer!(T))
{
bool success = true;
u64 transfer_length = cast(u64)g_vk.transfer_buf.data.length;
static if(isArray!(T))
{
u64 data_length = cast(u64)(data.length * (*data.init.ptr).sizeof);
}
else static if(isPointer!(T))
{
u64 data_length = cast(u64)(*T).sizeof;
}
u64 copied = 0;
while(copied != data_length)
{
u64 remaining = data_length-copied;
u64 copy_length = transfer_length > remaining ? remaining : transfer_length;
g_vk.transfer_buf.data[0 .. copy_length] = data[copied .. copy_length];
static if(image)
{
auto fn = function(Descriptor* desc, VkBufferImageCopy copy)
{
Transition(g_vk.imm_cmd, desc, IL.TransferDst);
vkCmdCopyBufferToImage(g_vk.imm_cmd, g_vk.transfer_buf.buffer, desc.image.image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &copy);
Transition(g_vk.imm_cmd, desc, IL.ReadOnly);
};
VkBufferImageCopy copy = {
bufferRowLength: desc.image.w,
bufferImageHeight: desc.image.h,
imageSubresource: {
aspectMask: VK_IMAGE_ASPECT_COLOR_BIT,
layerCount: 1,
},
imageExtent: {
width: desc.image.w,
height: desc.image.h,
depth: 1,
},
bufferOffset: copied,
};
}
else
{
auto fn = function(Descriptor* desc, VkBufferCopy copy)
{
vkCmdCopyBuffer(g_vk.imm_cmd, g_vk.transfer_buf.buffer, desc.buffer.buffer, 1, &copy);
};
VkBufferCopy copy = {
srcOffset: 0,
dstOffset: copied,
size: copy_length,
};
}
success = ImmSubmit(desc, copy, fn);
copied += copy_length;
}
WaitForTransfers();
return success;
}
void
Copy(VkCommandBuffer cmd, Descriptor* src, Descriptor* dst, VkExtent2D src_ext, VkExtent2D dst_ext)
{
Copy(cmd, src.image.image, dst.image.image, src.image.layout, dst.image.layout, src_ext, dst_ext);
}
void
Copy(VkCommandBuffer cmd, Descriptor* src, VkImage dst, ImageLayout dst_layout, VkExtent2D src_ext, VkExtent2D dst_ext)
{
Copy(cmd, src.image.image, dst, src.image.layout, dst_layout, src_ext, dst_ext);
}
void
Copy(VkCommandBuffer cmd, VkImage src, VkImage dst, ImageLayout src_layout, ImageLayout dst_layout, VkExtent2D src_ext, VkExtent2D dst_ext)
{
VkImageBlit blit = {
srcOffsets: [
{ x: 0, y: 0 },
{ x: cast(i32)src_ext.width, y: cast(i32)src_ext.height, z: 1 },
],
dstOffsets: [
{ x: 0, y: 0 },
{ x: cast(i32)dst_ext.width, y: cast(i32)dst_ext.height, z: 1 },
],
srcSubresource: {
aspectMask: VK_IMAGE_ASPECT_COLOR_BIT,
baseArrayLayer: 0,
layerCount: 1,
mipLevel: 0,
},
dstSubresource: {
aspectMask: VK_IMAGE_ASPECT_COLOR_BIT,
baseArrayLayer: 0,
layerCount: 1,
mipLevel: 0,
},
};
vkCmdBlitImage(
cmd,
src,
src_layout,
dst,
dst_layout,
1,
&blit,
VK_FILTER_LINEAR
);
}
void
Bind(Pipeline pipeline_handle, DescSet[] sets, bool compute = false)
{
assert(pipeline_handle > 0, "Bind failure: pipeline is 0");
VkCommandBuffer cmd = (compute ? g_vk.comp_cmd : g_vk.cmd);
PipelineHandles* pipeline = g_vk.pipeline_handles.ptr + pipeline_handle;
BindPipeline(cmd, pipeline);
u32[] offsets;
if(g_vk.global_set.dynamic_count > 0)
{
offsets = Alloc!(u32)(g_vk.FrameArena(), g_vk.global_set.dynamic_count);
}
vkCmdBindDescriptorSets(
cmd,
pipeline.type,
pipeline.layout,
0,
1,
&g_vk.global_set.handle,
cast(u32)offsets.length,
offsets.ptr
);
u32 offset_count;
VkDescriptorSet[] handles = Alloc!(VkDescriptorSet)(g_vk.FrameArena(), sets.length);
foreach(i, set; sets)
{
handles[i] = set.handle;
offset_count += set.dynamic_count;
}
if(offset_count > 0)
{
offsets = Alloc!(u32)(g_vk.FrameArena(), offset_count);
}
vkCmdBindDescriptorSets(
cmd,
pipeline.type,
pipeline.layout,
1,
cast(u32)handles.length,
handles.ptr,
cast(u32)offsets.length,
offsets.ptr
);
}
PipelineHandles*
NewPipeline()
{
PipelineHandles* pipeline = g_vk.pipeline_handles.ptr + g_vk.pipeline_count;
pipeline.index = g_vk.pipeline_count;
g_vk.pipeline_count += 1;
return pipeline;
}
void
Bind(Pipeline pipeline_handle, DescSet set, bool compute = false)
{
assert(pipeline_handle > 0, "Bind failure: pipeline is 0");
VkCommandBuffer cmd = (compute ? g_vk.comp_cmd : g_vk.cmd);
PipelineHandles* pipeline = g_vk.pipeline_handles.ptr + pipeline_handle;
BindPipeline(cmd, pipeline);
u32[] offsets;
if(g_vk.global_set.dynamic_count > 0)
{
offsets = Alloc!(u32)(g_vk.FrameArena(), g_vk.global_set.dynamic_count);
}
vkCmdBindDescriptorSets(
cmd,
pipeline.type,
pipeline.layout,
0,
1,
&g_vk.global_set.handle,
cast(u32)offsets.length,
offsets.ptr
);
if(set.dynamic_count > 0)
{
offsets = Alloc!(u32)(g_vk.FrameArena(), set.dynamic_count);
}
vkCmdBindDescriptorSets(
cmd,
pipeline.type,
pipeline.layout,
1,
1,
&set.handle,
cast(u32)offsets.length,
offsets.ptr
);
}
void
SetExtent(u32 x, u32 y)
{
g_vk.recreate_swapchain = true;
g_vk.res = [x, y];
}
void
BindPipeline(VkCommandBuffer cmd, PipelineHandles* pipeline)
{
vkCmdBindPipeline(cmd, pipeline.type, pipeline.handle);
g_vk.last_pipeline = pipeline.handle;
VkViewport viewport = {
x: 0.0,
y: 0.0,
width: cast(f32)g_vk.swapchain_extent.width,
height: cast(f32)g_vk.swapchain_extent.height,
minDepth: 0.0,
maxDepth: 1.0,
};
vkCmdSetViewport(cmd, 0, 1, &viewport);
ResetScissor(cmd);
}
void
SetScissor(u32 x, u32 y, u32 w, u32 h)
{
VkRect2D scissor = {
offset: { x: x, y: y },
extent: { width: w, height: h },
};
vkCmdSetScissor(g_vk.cmd, 0, 1, &scissor);
}
void
ResetScissor(VkCommandBuffer cmd = cast(VkCommandBuffer)VK_NULL_HANDLE)
{
cmd = cmd == VK_NULL_HANDLE ? g_vk.cmd : cmd;
VkRect2D scissor = {
offset: { x: 0, y: 0 },
extent: { width: g_vk.swapchain_extent.width, height: g_vk.swapchain_extent.height },
};
vkCmdSetScissor(cmd, 0, 1, &scissor);
}
void
Transition(VkCommandBuffer cmd, Descriptor* desc, ImageLayout new_layout)
{
ImageDescCheck(desc);
Transition(cmd, desc.image.image, desc.image.layout, new_layout);
desc.image.layout = new_layout;
}
void
Transition(VkCommandBuffer cmd, VkImage image, ImageLayout current_layout, ImageLayout new_layout)
{
VkImageMemoryBarrier2 barrier = {
sType: VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,
srcStageMask: VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT,
srcAccessMask: VK_ACCESS_2_MEMORY_WRITE_BIT,
dstStageMask: VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT,
dstAccessMask: VK_ACCESS_2_MEMORY_WRITE_BIT | VK_ACCESS_2_MEMORY_READ_BIT,
oldLayout: current_layout,
newLayout: new_layout,
image: image,
subresourceRange: {
aspectMask: new_layout == IL.DepthAttach ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_COLOR_BIT,
baseMipLevel: 0,
levelCount: VK_REMAINING_MIP_LEVELS,
baseArrayLayer: 0,
layerCount: VK_REMAINING_ARRAY_LAYERS,
},
};
VkDependencyInfo dep_info = {
sType: VK_STRUCTURE_TYPE_DEPENDENCY_INFO,
imageMemoryBarrierCount: 1,
pImageMemoryBarriers: &barrier,
};
vkCmdPipelineBarrier2(cmd, &dep_info);
}
bool
ImageDescCheck(Descriptor* desc)
{
return ImageDescCheck(desc.type);
}
bool
ImageDescCheck(DescType desc_type)
{
bool match;
enum types = [DT.CombinedSampler, DT.Image, DT.StorageImage, DT.InputAttach];
static foreach(type; types)
{
match |= desc_type == type;
}
return match;
}
bool
BuildShader(Shader* shader, u8[] bytes)
{
VkShaderModuleCreateInfo shader_info = {
sType: VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO,
codeSize: bytes.length,
pCode: cast(uint*)bytes.ptr,
};
VkResult result = vkCreateShaderModule(g_vk.device, &shader_info, null, shader);
return VkCheck("vkCreateShaderModule failure", result);
}
bool
CreateGraphicsPipeline(Pipeline* pipeline_handle, GfxPipelineInfo* build_info)
{
PipelineHandles* pipeline = NewPipeline();
pipeline.type = VK_PIPELINE_BIND_POINT_GRAPHICS;
pipeline.layout = build_info.layout;
VkDynamicState[4] dyn_state = [
VK_DYNAMIC_STATE_VIEWPORT,
VK_DYNAMIC_STATE_SCISSOR,
VK_DYNAMIC_STATE_STENCIL_OP,
VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE,
];
VkPipelineDynamicStateCreateInfo dyn_info = {
sType: VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,
pDynamicStates: dyn_state.ptr,
dynamicStateCount: cast(u32)dyn_state.length,
};
VkPipelineInputAssemblyStateCreateInfo assembly_info = {
sType: VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
topology: VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST,
primitiveRestartEnable: VK_FALSE,
};
VkPipelineRasterizationStateCreateInfo rasterization_info = {
sType: VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
cullMode: VK_CULL_MODE_BACK_BIT,
polygonMode: VK_POLYGON_MODE_FILL,
lineWidth: 1.0,
frontFace: cast(VkFrontFace)build_info.front_face,
};
VkPipelineMultisampleStateCreateInfo multisample_info = {
sType: VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
rasterizationSamples: VK_SAMPLE_COUNT_1_BIT,
minSampleShading: 1.0,
alphaToCoverageEnable: VK_FALSE,
alphaToOneEnable: VK_FALSE,
};
VkPipelineDepthStencilStateCreateInfo depth_info = {
sType: VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,
depthTestEnable: VK_TRUE,
depthWriteEnable: VK_TRUE,
depthCompareOp: VK_COMPARE_OP_GREATER_OR_EQUAL,
depthBoundsTestEnable: VK_FALSE,
stencilTestEnable: VK_FALSE,
minDepthBounds: 0.0,
maxDepthBounds: 1.0,
};
VkPipelineRenderingCreateInfo rendering_info = {
sType: VK_STRUCTURE_TYPE_PIPELINE_RENDERING_CREATE_INFO,
colorAttachmentCount: 1,
pColorAttachmentFormats: cast(VkFormat*)&g_vk.draw_image.image.format,
depthAttachmentFormat: g_vk.depth_image.image.format,
};
VkPipelineColorBlendAttachmentState blend_state = {
colorWriteMask: VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT,
blendEnable: VK_TRUE,
srcColorBlendFactor: build_info.src_color,
dstColorBlendFactor: build_info.dst_color,
colorBlendOp: build_info.color_op,
srcAlphaBlendFactor: build_info.src_alpha,
dstAlphaBlendFactor: build_info.dst_alpha,
alphaBlendOp: build_info.alpha_op,
};
VkPipelineColorBlendStateCreateInfo blend_info = {
sType: VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
logicOpEnable: VK_FALSE,
logicOp: VK_LOGIC_OP_COPY,
attachmentCount: 1,
pAttachments: &blend_state,
};
VkVertexInputBindingDescription vertex_input_desc = {
binding: 0,
inputRate: cast(VkVertexInputRate)build_info.input_rate,
stride: build_info.input_rate_stride,
};
VkPipelineVertexInputStateCreateInfo input_info = {
sType: VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
vertexBindingDescriptionCount: 1,
pVertexBindingDescriptions: &vertex_input_desc,
vertexAttributeDescriptionCount: cast(u32)build_info.vertex_attributes.length,
pVertexAttributeDescriptions: build_info.vertex_attributes.ptr,
};
VkPipelineViewportStateCreateInfo viewport_info = {
sType: VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
viewportCount: 1,
scissorCount: 1,
};
VkPipelineShaderStageCreateInfo[2] shader_info = [
{
sType: VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
stage: VK_SHADER_STAGE_FRAGMENT_BIT,
pName: "main",
},
{
sType: VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
stage: VK_SHADER_STAGE_VERTEX_BIT,
pName: "main",
},
];
Arena* arena = &g_vk.frame_arenas[0];
bool success = true;
Shader frag_module, vert_module;
success &= BuildShader(&frag_module, build_info.frag_shader);
success &= BuildShader(&vert_module, build_info.vertex_shader);
scope(exit)
{
Destroy(frag_module);
Destroy(vert_module);
}
if(success)
{
__traits(getMember, shader_info.ptr + 0, "module") = frag_module;
__traits(getMember, shader_info.ptr + 1, "module") = vert_module;
VkSpecializationInfo vert_spec_info = {
dataSize: build_info.vert_spec.size,
mapEntryCount: cast(u32)build_info.vert_spec.entries.length,
pMapEntries: build_info.vert_spec.entries.ptr,
pData: build_info.vert_spec.data,
};
if(build_info.vert_spec.entries.length > 0)
{
shader_info[0].pSpecializationInfo = &vert_spec_info;
}
VkSpecializationInfo frag_spec_info = {
dataSize: build_info.frag_spec.size,
mapEntryCount: cast(u32)build_info.frag_spec.entries.length,
pMapEntries: build_info.frag_spec.entries.ptr,
pData: build_info.frag_spec.data,
};
if(build_info.frag_spec.entries.length > 0)
{
shader_info[1].pSpecializationInfo = &frag_spec_info;
}
VkGraphicsPipelineCreateInfo create_info = {
sType: VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
pNext: &rendering_info,
pVertexInputState: &input_info,
pInputAssemblyState: &assembly_info,
pViewportState: &viewport_info,
pRasterizationState: &rasterization_info,
pMultisampleState: &multisample_info,
pColorBlendState: &blend_info,
pDepthStencilState: &depth_info,
pDynamicState: &dyn_info,
stageCount: cast(u32)shader_info.length,
pStages: shader_info.ptr,
renderPass: g_vk.render_pass,
layout: build_info.layout,
};
VkResult result = vkCreateGraphicsPipelines(g_vk.device, null, 1, &create_info, null, &pipeline.handle);
success = VkCheck("CreateGraphicsPipeline failure", result);
*pipeline_handle = pipeline.index;
}
return success;
}
Pipeline
CreateComputePipeline(CompPipelineInfo* comp_info)
{
VkComputePipelineCreateInfo info = {
sType: VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
layout: comp_info.layout,
stage: {
sType: VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
stage: VK_SHADER_STAGE_COMPUTE_BIT,
pName: "main",
},
};
Shader comp_module;
BuildShader(&comp_module, comp_info.shader);
scope(exit) Destroy(comp_module);
__traits(getMember, &info.stage, "module") = comp_module;
VkSpecializationInfo spec_info = {
dataSize: comp_info.spec.size,
mapEntryCount: cast(u32)comp_info.spec.entries.length,
pMapEntries: comp_info.spec.entries.ptr,
pData: comp_info.spec.data,
};
if(comp_info.spec.entries.length > 0)
{
info.stage.pSpecializationInfo = &spec_info;
}
PipelineHandles* pipeline = NewPipeline();
pipeline.type = VK_PIPELINE_BIND_POINT_COMPUTE;
pipeline.layout = comp_info.layout;
Pipeline pipeline_handle = pipeline.index;
VkResult result = vkCreateComputePipelines(g_vk.device, null, 1, &info, null, &pipeline.handle);
VkCheck("CreateComputePipeline failure", result);
return pipeline_handle;
}
void
ClearDepth(T)(T color)
{
static if(is(T.v : f32[4]))
{
ClearDepth(color.v);
}
else static assert(false, "Invalid type for clear depth");
}
void
ClearColor(T)(T color)
{
static if(is(T.v : f32[4]))
{
ClearColor(color.v);
}
else static assert(false, "Invalid type for clear depth");
}
void
ClearDepth(f32[4] color = [0.0, 0.0, 0.0, 0.0])
{
Transition(g_vk.cmd, &g_vk.depth_image, IL.TransferDst);
ClearColor(&g_vk.depth_image, color);
}
void
ClearColor(f32[4] color)
{
Transition(g_vk.cmd, &g_vk.draw_image, IL.TransferDst);
ClearColor(&g_vk.draw_image, color);
}
void
ClearColor(Descriptor* desc, f32[4] color)
{
VkImageSubresourceRange clear_range = {
aspectMask: (desc.image.usage == IU.Depth ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_COLOR_BIT),
levelCount: 1,
layerCount: 1,
};
vkCmdClearColorImage(
g_vk.cmd,
desc.image.image,
desc.image.layout,
cast(VkClearColorValue*)color,
1,
&clear_range
);
}
void
WaitIdle()
{
vkDeviceWaitIdle(g_vk.device);
}
void
Destroy(Shader shader)
{
vkDestroyShaderModule(g_vk.device, shader, null);
}
void
Destroy(Pipeline pipeline)
{
vkDestroyPipeline(g_vk.device, g_vk.pipeline_handles[pipeline].handle, null);
}
void
DestroyDescriptorPools()
{
vkDestroyDescriptorPool(g_vk.device, g_vk.active_pool, null);
Node!(VkDescriptorPool)* node = g_vk.full_pools.first;
for(;;)
{
if(node == null)
{
break;
}
vkDestroyDescriptorPool(g_vk.device, node.value, null);
node = node.next;
}
}
void
PushDescriptorPool()
{
VkDescriptorPoolSize[8] pool_sizes = [
{ type: VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, descriptorCount: 4096 },
{ type: VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, descriptorCount: 4096 },
{ type: VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, descriptorCount: 4096 },
{ type: VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, descriptorCount: 4096 },
{ type: VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, descriptorCount: 4096 },
{ type: VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, descriptorCount: 4096 },
{ type: VK_DESCRIPTOR_TYPE_SAMPLER, descriptorCount: 4096 },
{ type: VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, descriptorCount: 4096 },
];
VkDescriptorPoolCreateInfo pool_info = {
sType: VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
poolSizeCount: cast(u32)pool_sizes.length,
pPoolSizes: pool_sizes.ptr,
maxSets: MAX_SETS,
};
VkDescriptorPool pool;
VkResult result = vkCreateDescriptorPool(g_vk.device, &pool_info, null, &pool);
bool success = VkCheck("vkCreateDescriptorPool failure", result);
assert(success, "vkCreateDescriptorPool error");
if(g_vk.active_pool == null || g_vk.active_pool == VK_NULL_HANDLE)
{
Node!(VkDescriptorPool)* node = Alloc!(Node!(VkDescriptorPool));
node.value = g_vk.active_pool;
PushFront(&g_vk.full_pools, node, null);
}
g_vk.active_pool = pool;
}
void
PrepareWrite(VkWriteDescriptorSet* write, DescSet set, Descriptor* desc)
{
Arena* arena = g_vk.FrameArena();
write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
write.descriptorType = desc.type;
write.dstSet = set.handle;
write.dstBinding = desc.binding;
write.descriptorCount = 1;
write.dstArrayElement = desc.index;
switch(desc.type) with(DescType)
{
case Image, StorageImage, InputAttach:
{
VkDescriptorImageInfo* info = Alloc!(VkDescriptorImageInfo)(arena);
info.imageView = desc.image.view;
info.imageLayout = desc.image.shader_layout;
write.pImageInfo = info;
} break;
case Uniform, Storage:
{
VkDescriptorBufferInfo* info = Alloc!(VkDescriptorBufferInfo)(arena);
info.buffer = desc.buffer.buffer;
info.offset = cast(VkDeviceSize)0;
info.range = cast(VkDeviceSize)desc.buffer.size;
write.pBufferInfo = info;
} break;
case UniformTexelBuf, StorageTexelBuf:
{
write.pTexelBufferView = &desc.buffer_view.view;
} break;
case Sampler:
{
VkDescriptorImageInfo* info = Alloc!(VkDescriptorImageInfo)(arena);
info.sampler = desc.sampler;
write.pImageInfo = info;
} break;
default: assert(false, "Unsupported descriptor type");
}
}
void
Write(DescSet set, Descriptor[] descs)
{
Arena* arena = g_vk.FrameArena();
VkWriteDescriptorSet[] writes = Alloc!(VkWriteDescriptorSet)(arena, descs.length);
for(u64 i = 0; i < descs.length; i += 1)
{
VkWriteDescriptorSet* write = writes.ptr + i;
PrepareWrite(write, set, &descs[i]);
}
vkUpdateDescriptorSets(g_vk.device, cast(u32)writes.length, writes.ptr, 0, null);
}
void
Write(DescSet set, Descriptor* desc)
{
VkWriteDescriptorSet write;
PrepareWrite(&write, set, desc);
vkUpdateDescriptorSets(g_vk.device, 1, &write, 0, null);
}
void
WriteConvDescriptor(Buffer* buf)
{
VkDescriptorBufferInfo buf_info = {
buffer: buf.buffer,
range: buf.size,
offset: 0,
};
VkWriteDescriptorSet write = {
sType: VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
dstSet: g_vk.conv_desc_set.handle,
dstBinding: 1,
descriptorCount: 1,
descriptorType: VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
pBufferInfo: &buf_info,
};
vkUpdateDescriptorSets(g_vk.device, 1, &write, 0, null);
}
void
WriteConvDescriptor(ImageView* view)
{
VkDescriptorImageInfo image_info = {
imageView: view.view,
imageLayout: VK_IMAGE_LAYOUT_GENERAL,
};
VkWriteDescriptorSet write = {
sType: VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
dstSet: g_vk.conv_desc_set.handle,
dstBinding: 0,
descriptorCount: 1,
descriptorType: VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
pImageInfo: &image_info,
};
vkUpdateDescriptorSets(g_vk.device, 1, &write, 0, null);
}
void
Dispatch(VkCommandBuffer cmd)
{
f32 w = Ceil(cast(f32)(g_vk.swapchain_extent.width) / 16.0F);
f32 h = Ceil(cast(f32)(g_vk.swapchain_extent.height) / 16.0F);
vkCmdDispatch(cmd, cast(u32)w, cast(u32)h, 1);
}
void
Dispatch()
{
Dispatch(g_vk.cmd);
}
bool
VkCheck(string message, VkResult result)
{
bool success = true;
// TODO: Handle error cases that can be handled
if(result == VK_ERROR_OUT_OF_DEVICE_MEMORY)
{
assert(false, "Handle VK_ERROR_OUT_OF_DEVICE_MEMORY");
}
else if(result == VK_ERROR_OUT_OF_HOST_MEMORY)
{
assert(false, "Handle VK_ERROR_OUT_OF_HOST_MEMORY");
}
else if(result != VK_SUCCESS)
{
success = false;
char[512] buf;
buf[] = '\0';
buf.sformat("%s: %s", message, VkResultStr(result));
Logf("%r", buf);
}
return success;
}
void
VkCheckA(string message, VkResult result)
{
assert(VkCheck(message, result), "Aborting program due to failure");
}
Format
GetDrawImageFormat()
{
VkFormat selected_format;
foreach(format; VK_IMAGE_FORMATS)
{
VkImageFormatProperties props;
VkResult result = vkGetPhysicalDeviceImageFormatProperties(
g_vk.physical_device,
format,
VK_IMAGE_TYPE_2D,
VK_IMAGE_TILING_OPTIMAL,
IU.Draw,
0,
&props
);
if(result == VK_ERROR_FORMAT_NOT_SUPPORTED)
{
continue;
}
if(result == VK_SUCCESS)
{
selected_format = format;
break;
}
}
return cast(Format)selected_format;
}
void
SelectSwapchainFormats()
{
Arena* arena = &g_vk.frame_arenas[0];
u32 format_count;
vkGetPhysicalDeviceSurfaceFormatsKHR(g_vk.physical_device, g_vk.surface, &format_count, null);
VkSurfaceFormatKHR[] formats = Alloc!(VkSurfaceFormatKHR)(arena, format_count);
vkGetPhysicalDeviceSurfaceFormatsKHR(g_vk.physical_device, g_vk.surface, &format_count, formats.ptr);
u32 mode_count;
vkGetPhysicalDeviceSurfacePresentModesKHR(g_vk.physical_device, g_vk.surface, &mode_count, null);
VkPresentModeKHR[] modes = Alloc!(VkPresentModeKHR)(arena, mode_count);
vkGetPhysicalDeviceSurfacePresentModesKHR(g_vk.physical_device, g_vk.surface, &mode_count, modes.ptr);
VkPresentModeKHR present_mode = VK_PRESENT_MODE_FIFO_KHR;
foreach(mode; modes)
{
if(mode == VK_PRESENT_MODE_MAILBOX_KHR)
{
present_mode = VK_PRESENT_MODE_MAILBOX_KHR;
break;
}
}
VkSurfaceFormatKHR surface_format = formats[0];
foreach(format; formats)
{
if(format.format == VK_FORMAT_B8G8R8A8_UNORM)
{
surface_format = format;
break;
}
}
g_vk.surface_format = surface_format;
g_vk.present_mode = present_mode;
}
void
CreateSampler(Descriptor* desc, MipmapMode mode)
{
VkPhysicalDeviceProperties props;
vkGetPhysicalDeviceProperties(g_vk.physical_device, &props);
VkSamplerCreateInfo sampler_info = {
sType: VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
magFilter: VK_FILTER_NEAREST,
minFilter: VK_FILTER_NEAREST,
addressModeU: VK_SAMPLER_ADDRESS_MODE_REPEAT,
addressModeV: VK_SAMPLER_ADDRESS_MODE_REPEAT,
addressModeW: VK_SAMPLER_ADDRESS_MODE_REPEAT,
anisotropyEnable: VK_TRUE,
maxAnisotropy: props.limits.maxSamplerAnisotropy,
borderColor: VK_BORDER_COLOR_INT_OPAQUE_BLACK,
compareOp: VK_COMPARE_OP_ALWAYS,
mipmapMode: mode,
};
VkResult result = vkCreateSampler(g_vk.device, &sampler_info, null, &desc.sampler);
VkCheckA("vkCreateSampler failure", result);
}
bool
CreateDrawImages()
{
bool success = true;
Format draw_format = cast(Format)GetDrawImageFormat();
Format depth_format = cast(Format)VK_FORMAT_D32_SFLOAT;
u32 w = g_vk.swapchain_extent.width;
u32 h = g_vk.swapchain_extent.height;
CreateDescriptor(&g_vk.draw_image, DescInfo(type: DT.StorageImage, format: draw_format, usage: IU.Draw, w: w, h: h, binding: 0));
CreateDescriptor(&g_vk.depth_image, DescInfo(type: DT.Image, format: depth_format, usage: IU.Depth, w: w, h: h, binding: 0));
return success;
}
bool
CreateSwapchain()
{
bool success = true;
Arena* arena = &g_vk.frame_arenas[0];
VkSurfaceCapabilitiesKHR cap;
vkGetPhysicalDeviceSurfaceCapabilitiesKHR(g_vk.physical_device, g_vk.surface, &cap);
static bool initialized = false;
if(!initialized)
{
SelectSwapchainFormats();
}
u32 w = clamp(cast(u32)g_vk.res[0], cap.minImageExtent.width, cap.maxImageExtent.width);
u32 h = clamp(cast(u32)g_vk.res[1], cap.minImageExtent.height, cap.maxImageExtent.height);
VkSwapchainCreateInfoKHR info = {
sType: VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR,
imageArrayLayers: 1,
imageUsage: IU.Swapchain,
compositeAlpha: VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR,
clipped: VK_TRUE,
imageSharingMode: VK_SHARING_MODE_EXCLUSIVE,
minImageCount: cap.minImageCount + 1,
surface: g_vk.surface,
imageFormat: g_vk.surface_format.format,
imageColorSpace: g_vk.surface_format.colorSpace,
preTransform: cap.currentTransform,
presentMode: g_vk.present_mode,
imageExtent: {
width: w,
height: h,
},
};
VkResult result = vkCreateSwapchainKHR(g_vk.device, &info, null, &g_vk.swapchain);
success = VkCheck("vkCreateSwapchainKHR failure", result);
u32 count;
vkGetSwapchainImagesKHR(g_vk.device, g_vk.swapchain, &count, null);
VkImage[] images = Alloc!(VkImage)(arena, count);
vkGetSwapchainImagesKHR(g_vk.device, g_vk.swapchain, &count, images.ptr);
VkImageView[] views = Alloc!(VkImageView)(arena, count);
g_vk.present_images = Alloc!(Descriptor)(&g_vk.arena, count);
VkImageViewCreateInfo view_info = {
sType: VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
viewType: VK_IMAGE_VIEW_TYPE_2D,
components: {
r: VK_COMPONENT_SWIZZLE_IDENTITY,
g: VK_COMPONENT_SWIZZLE_IDENTITY,
b: VK_COMPONENT_SWIZZLE_IDENTITY,
a: VK_COMPONENT_SWIZZLE_IDENTITY,
},
subresourceRange: {
aspectMask: VK_IMAGE_ASPECT_COLOR_BIT,
baseMipLevel: 0,
levelCount: 1,
baseArrayLayer: 0,
layerCount: 1,
},
};
VkFramebufferCreateInfo framebuffer_info = { sType: VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO };
foreach(i; 0 .. g_vk.present_images.length)
{
g_vk.present_images[i].image.image = images[i];
g_vk.present_images[i].image.format = cast(Format)g_vk.surface_format.format;
view_info.image = images[i];
view_info.format = g_vk.surface_format.format;
result = vkCreateImageView(g_vk.device, &view_info, null, &g_vk.present_images[i].image.view);
success = VkCheck("vkCreateImageView failure", result);
}
g_vk.swapchain_extent.width = w;
g_vk.swapchain_extent.height = h;
g_vk.swapchain_extent.depth = 1;
if(!initialized && success)
{
initialized = true;
}
return success;
}
void
RecreateSwapchain()
{
vkDeviceWaitIdle(g_vk.device);
Destroy(g_vk.swapchain, g_vk.present_images, g_vk.device);
Destroy(&g_vk.draw_image);
Destroy(&g_vk.depth_image);
vkDestroyFramebuffer(g_vk.device, g_vk.framebuffer, null);
CreateSwapchain();
CreateDrawImages();
Write(g_vk.global_set, &g_vk.draw_image);
CreateFramebuffer();
g_vk.recreate_swapchain = false;
}
void
CreateFramebuffer()
{
VkFramebufferCreateInfo framebuffer_info = {
sType: VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
renderPass: g_vk.render_pass,
attachmentCount: 2,
pAttachments: [g_vk.draw_image.image.view, g_vk.depth_image.image.view],
width: g_vk.swapchain_extent.width,
height: g_vk.swapchain_extent.height,
layers: 1,
};
VkResult result = vkCreateFramebuffer(g_vk.device, &framebuffer_info, null, &g_vk.framebuffer);
VkCheckA("vkCreateFramebuffer failure", result);
}
void
Destroy(T)(MappedBuffer!(T)* buf)
{
vmaUnmapMemory(g_vk.vma, buf.alloc);
Destroy(&buf.base);
}
void
Destroy(Buffer* buf)
{
vmaDestroyBuffer(g_vk.vma, buf.buffer, buf.alloc);
}
void
Destroy(Descriptor* desc)
{
switch(desc.type)
{
case DT.Sampler:
{
vkDestroySampler(g_vk.device, desc.sampler, null);
} break;
case DT.CombinedSampler, DT.Image, DT.StorageImage, DT.InputAttach:
{
if(desc.image.view)
{
vkDestroyImageView(g_vk.device, desc.image.view, null);
}
if(desc.image.image)
{
vmaDestroyImage(g_vk.vma, desc.image.image, desc.image.alloc);
}
} break;
case DT.UniformTexelBuf, DT.StorageTexelBuf:
{
if(desc.buffer_view.view)
{
vkDestroyBufferView(g_vk.device, desc.buffer_view.view, null);
}
if(desc.buffer_view.buffer)
{
vmaDestroyBuffer(g_vk.vma, desc.buffer_view.buffer, desc.buffer_view.alloc);
}
} break;
case DT.Uniform, DT.DynamicUniform, DT.Storage, DT.DynamicStorage:
{
if(desc.buffer.buffer)
{
vmaDestroyBuffer(g_vk.vma, desc.buffer.buffer, desc.buffer.alloc);
}
} break;
default: assert(false, "Unsupported descriptor Destroy call");
}
}
void
DestroyRenderer()
{
foreach(i, arena; g_vk.frame_arenas)
{
Free(g_vk.frame_arenas.ptr + i);
}
Free(&g_vk.arena);
}
void
Destroy(VkInstance instance)
{
if(instance)
{
vkDestroyInstance(instance, null);
}
}
void
Destroy(VkDebugUtilsMessengerEXT dbg, VkInstance instance)
{
version(VULKAN_DEBUG) if(dbg)
{
vkDestroyDebugUtilsMessengerEXT(instance, dbg, null);
}
}
void
Destroy(VkSurfaceKHR surface, VkInstance instance)
{
if(surface)
{
vkDestroySurfaceKHR(instance, surface, null);
}
}
void
Destroy(VkSwapchainKHR swapchain, Descriptor[] images, VkDevice device)
{
foreach(ref image; images)
{
if(image.image.view)
{
vkDestroyImageView(device, image.image.view, null);
}
}
if(swapchain)
{
vkDestroySwapchainKHR(device, swapchain, null);
}
}
void
PrintShaderDisassembly(Pipeline pipeline_id, VkShaderStageFlagBits stage)
{
version(AMD_GPU)
{
PipelineHandles* pipeline = g_vk.pipeline_handles.ptr + pipeline_id;
version(VULKAN_DEBUG)
{
u64 size;
VkResult result = vkGetShaderInfoAMD(g_vk.device, pipeline.handle, stage, VK_SHADER_INFO_TYPE_DISASSEMBLY_AMD, &size, null);
if(result == VK_SUCCESS)
{
u8[] buf = Alloc!(u8)(g_vk.FrameArena(), size);
vkGetShaderInfoAMD(g_vk.device, pipeline.handle, stage, VK_SHADER_INFO_TYPE_DISASSEMBLY_AMD, &size, buf.ptr);
Logf("DISASSEMBLY:\n%r", buf);
}
}
}
}
void
SetStencilWriteMask(StencilFace face, u32 mask)
{
vkCmdSetStencilWriteMask(g_vk.cmd, face, mask);
}
void
SetStencilReference(StencilFace face, u32 reference)
{
vkCmdSetStencilReference(g_vk.cmd, face, reference);
}
void
SetStencilCompareMask(StencilFace face, u32 mask)
{
vkCmdSetStencilCompareMask(g_vk.cmd, face, mask);
}
void
SetStencilTest(VkBool32 enable)
{
vkCmdSetStencilTestEnable(g_vk.cmd, enable);
}
void
SetStencilOp(StencilFace face, StencilOp fail_op, StencilOp pass_op, StencilOp depth_fail_op, CompareOp cmp_op)
{
vkCmdSetStencilOp(g_vk.cmd, face, fail_op, pass_op, depth_fail_op, cmp_op);
}
void
Init(PlatformHandles platform_handles, u64 permanent_mem, u64 frame_mem)
{
bool success;
Vulkan vk = {
arena: CreateArena(permanent_mem),
frame_arenas: [
CreateArena(frame_mem),
CreateArena(frame_mem),
],
platform_handles: platform_handles,
};
g_vk = vk;
Library lib;
Function fn;
foreach(name; VULKAN_LIBS)
{
lib = LoadLibrary(name);
if(lib.ptr)
{
fn = LoadFunction(lib, "vkGetInstanceProcAddr");
vkGetInstanceProcAddr = cast(PFN_vkGetInstanceProcAddr)fn.ptr;
}
}
if(fn.ptr)
{
vkGetInstanceProcAddr = cast(PFN_vkGetInstanceProcAddr)vkGetInstanceProcAddr(null, "vkGetInstanceProcAddr");
assert(vkGetInstanceProcAddr != null, "LoadGlobalFunctions failure: Unable to load vkGetInstanceProcAddr");
vkCreateInstance = cast(PFN_vkCreateInstance)vkGetInstanceProcAddr(null, "vkCreateInstance");
assert(vkCreateInstance != null, "LoadGlobalFunctions failure: Unable to load VkCreateInstance");
vkEnumerateInstanceLayerProperties = cast(PFN_vkEnumerateInstanceLayerProperties)vkGetInstanceProcAddr(null, "vkEnumerateInstanceLayerProperties");
assert(vkEnumerateInstanceLayerProperties != null, "LoadGlobalFunctions failure: Unable to load vkEnumerateInstanceLayerProperties");
success = true;
}
if(success)
{
Arena* arena = &g_vk.frame_arenas[0];
u32 count;
vkEnumerateInstanceLayerProperties(&count, null);
VkLayerProperties[] layers = Alloc!(VkLayerProperties)(arena, count);
vkEnumerateInstanceLayerProperties(&count, layers.ptr);
foreach(i, layer; layers)
{
if(strcmp(cast(char*)&layer.layerName, "VK_LAYER_KHRONOS_validation") == 0)
{
g_VLAYER_SUPPORT = true;
break;
}
}
const char*[] instance_layers = g_VLAYER_SUPPORT && BUILD_DEBUG ? VK_INSTANCE_LAYERS_DEBUG : VK_INSTANCE_LAYERS;
const char*[] instance_ext = g_VLAYER_SUPPORT && BUILD_DEBUG ? VK_INSTANCE_EXT_DEBUG : VK_INSTANCE_EXT;
VkApplicationInfo app_info = {
sType: VK_STRUCTURE_TYPE_APPLICATION_INFO,
pApplicationName: "Video Game",
applicationVersion: VK_MAKE_API_VERSION(0, 0, 0, 1),
pEngineName: "Gears",
engineVersion: VK_MAKE_API_VERSION(0, 0, 0, 1),
apiVersion: VK_MAKE_API_VERSION(0, 1, 2, 0),
};
VkInstanceCreateInfo instance_info = {
sType: VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,
pApplicationInfo: &app_info,
enabledLayerCount: cast(u32)instance_layers.length,
ppEnabledLayerNames: instance_layers.ptr,
enabledExtensionCount: cast(u32)instance_ext.length,
ppEnabledExtensionNames: instance_ext.ptr,
};
VkValidationFeatureEnableEXT validation_enable = VK_VALIDATION_FEATURE_ENABLE_DEBUG_PRINTF_EXT;
VkValidationFeaturesEXT validation_features = {
sType: VK_STRUCTURE_TYPE_VALIDATION_FEATURES_EXT,
enabledValidationFeatureCount: 1,
pEnabledValidationFeatures: &validation_enable,
};
version(VULKAN_DEBUG) if(g_VLAYER_SUPPORT && g_DEBUG_PRINTF)
{
instance_info.pNext = &validation_features;
}
VkResult result = vkCreateInstance(&instance_info, null, &g_vk.instance);
success &= VkCheck("vkCreateInstance failure", result);
}
if(success)
{
LoadInstanceFunctions();
version (VULKAN_DEBUG)
{
if(g_VLAYER_SUPPORT)
{
VkDebugUtilsMessageSeverityFlagBitsEXT severity_flags = VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT |
VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT;
if(g_DEBUG_PRINTF)
{
severity_flags |= VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT;
}
VkDebugUtilsMessengerCreateInfoEXT info = {
sType: VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT,
messageSeverity: severity_flags,
messageType: VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT |
VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT |
VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT,
pfnUserCallback: cast(PFN_vkDebugUtilsMessengerCallbackEXT)&DebugCallback,
};
if(g_DEBUG_PRINTF)
{
info.messageSeverity |= VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT;
}
VkResult result = vkCreateDebugUtilsMessengerEXT(g_vk.instance, &info, null, &g_vk.dbg_msg);
if(result != VK_SUCCESS)
{
Logf("EnableVLayers failed to initialize, will continue without validation: %s", VkResultStr(result));
}
}
else
{
Logf("EnableVLayers warning: Not supported on current device, continuing without");
}
}
}
// Surface
if(success)
{
version(linux)
{
VkXlibSurfaceCreateInfoKHR surface_info = {
sType: VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR,
dpy: g_vk.platform_handles.display,
window: g_vk.platform_handles.window,
};
VkResult result = vkCreateXlibSurfaceKHR(g_vk.instance, &surface_info, null, &g_vk.surface);
}
version(Windows)
{
VkWin32SurfaceCreateInfoKHR surface_info = {
sType: VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR,
hinstance: g_vk.platform_handles.instance,
hwnd: g_vk.platform_handles.handle,
};
VkResult result = vkCreateWin32SurfaceKHR(g_vk.instance, &surface_info, null, &g_vk.surface);
}
success &= VkCheck("InitSurface failure", result);
}
if(success)
{
success = false;
Arena* arena = &g_vk.frame_arenas[0];
u32 count;
vkEnumeratePhysicalDevices(g_vk.instance, &count, null);
VkPhysicalDevice[] devices = Alloc!(VkPhysicalDevice)(arena, count);
vkEnumeratePhysicalDevices(g_vk.instance, &count, devices.ptr);
VkPhysicalDevice physical_device = null;
bool discrete_candidate = false;
QueueInfo candidate = {
gfx_index: -1,
tfer_index: -1,
single_queue: false,
};
foreach(dev; devices)
{
const u32 T_BIT = VK_QUEUE_TRANSFER_BIT;
const u32 C_BIT = VK_QUEUE_COMPUTE_BIT;
const u32 G_BIT = VK_QUEUE_GRAPHICS_BIT;
const u32 S_BIT = VK_QUEUE_SPARSE_BINDING_BIT;
QueueInfo current = {
gfx_index: -1,
tfer_index: -1,
single_queue: false,
};
count = 0;
vkGetPhysicalDeviceQueueFamilyProperties(dev, &count, null);
VkQueueFamilyProperties[] properties = Alloc!(VkQueueFamilyProperties)(arena, count);
vkGetPhysicalDeviceQueueFamilyProperties(dev, &count, properties.ptr);
if(count == 1 && properties[0].queueCount == 1 && properties[0].queueFlags & (T_BIT | C_BIT | G_BIT))
{
current.gfx_index = current.tfer_index = 0;
current.single_queue = true;
}
else
{
bool sparse = false;
foreach(i, prop; properties)
{
i32 idx = cast(i32)i;
b32 surface_support;
vkGetPhysicalDeviceSurfaceSupportKHR(dev, cast(u32)i, g_vk.surface, &surface_support);
if(current.gfx_index < 0 && surface_support && prop.queueFlags & G_BIT)
{
current.gfx_index = idx;
continue;
}
if(BitEq(prop.queueFlags, T_BIT | S_BIT))
{
sparse = true;
current.tfer_index = idx;
continue;
}
if(!sparse && prop.queueFlags & T_BIT)
{
current.tfer_index = idx;
continue;
}
}
if(current.tfer_index < 0)
{
current.tfer_index = current.gfx_index;
}
}
if(current.gfx_index < 0)
{
continue;
}
bool discrete;
bool result;
// Properties
VkPhysicalDeviceProperties props;
vkGetPhysicalDeviceProperties(dev, &props);
if(VK_API_VERSION_MINOR(props.apiVersion) >= 2)
{
u32 ext_count;
vkEnumerateDeviceExtensionProperties(dev, null, &ext_count, null);
VkExtensionProperties[] ext_props = Alloc!(VkExtensionProperties)(arena, ext_count);
vkEnumerateDeviceExtensionProperties(dev, null, &ext_count, ext_props.ptr);
i32 matched = 0;
foreach(prop; ext_props)
{
foreach(ext; VK_DEVICE_EXTENSIONS)
{
if(strcmp(cast(char*)prop.extensionName, ext) == 0)
{
matched += 1;
break;
}
}
}
if(matched == VK_DEVICE_EXTENSIONS.length)
{
u32 fmt_count, present_count;
vkGetPhysicalDeviceSurfaceFormatsKHR(dev, g_vk.surface, &fmt_count, null);
vkGetPhysicalDeviceSurfacePresentModesKHR(dev, g_vk.surface, &present_count, null);
discrete = props.deviceType == VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU;
result = fmt_count && present_count;
}
}
if(discrete_candidate && !discrete)
{
continue;
}
if(!result)
{
continue;
}
// Device features
VkPhysicalDeviceFeatures2 features2 = { sType: VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2 };
VkPhysicalDeviceVulkan12Features features_12 = { sType: VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES };
features2.pNext = &features_12;
vkGetPhysicalDeviceFeatures2(dev, &features2);
VkPhysicalDeviceFeatures features = features2.features;
result &= cast(bool)features.fragmentStoresAndAtomics;
result &= cast(bool)features.shaderUniformBufferArrayDynamicIndexing;
result &= cast(bool)features.shaderSampledImageArrayDynamicIndexing;
result &= cast(bool)features.shaderStorageBufferArrayDynamicIndexing;
result &= cast(bool)features.shaderStorageImageArrayDynamicIndexing;
result &= cast(bool)features.samplerAnisotropy;
result &= cast(bool)features_12.descriptorIndexing;
result &= cast(bool)features_12.bufferDeviceAddress;
result &= cast(bool)features_12.descriptorBindingUniformBufferUpdateAfterBind;
result &= cast(bool)features_12.descriptorBindingStorageTexelBufferUpdateAfterBind;
result &= cast(bool)features_12.descriptorBindingSampledImageUpdateAfterBind;
result &= cast(bool)features_12.descriptorBindingStorageImageUpdateAfterBind;
result &= cast(bool)features_12.descriptorBindingStorageBufferUpdateAfterBind;
result &= cast(bool)features_12.descriptorBindingPartiallyBound;
result &= cast(bool)features_12.runtimeDescriptorArray;
result &= cast(bool)features_12.shaderSampledImageArrayNonUniformIndexing;
result &= cast(bool)features_12.shaderUniformBufferArrayNonUniformIndexing;
result &= cast(bool)features_12.timelineSemaphore;
result &= cast(bool)features_12.storageBuffer8BitAccess;
if(!result)
{
continue;
}
discrete_candidate = cast(bool)discrete;
candidate = current;
physical_device = dev;
}
if(physical_device)
{
VkDeviceQueueCreateInfo[2] queue_info;
f32 priority = 1.0f;
count = 1;
queue_info[0].sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
queue_info[0].queueFamilyIndex = candidate.gfx_index;
queue_info[0].queueCount = 1;
queue_info[0].pQueuePriorities = &priority;
queue_info[0].flags = 0;
if(!candidate.single_queue)
{
queue_info[1].sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
queue_info[1].queueFamilyIndex = candidate.tfer_index;
queue_info[1].queueCount = 1;
queue_info[1].pQueuePriorities = &priority;
queue_info[1].flags = 0;
count += 1;
}
VkPhysicalDeviceExtendedDynamicStateFeaturesEXT extended_dynamic_state = {
sType: VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_FEATURES_EXT,
pNext: null,
extendedDynamicState: VK_TRUE,
};
VkPhysicalDeviceSynchronization2Features synchronization2 = {
sType: VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES,
pNext: &extended_dynamic_state,
synchronization2: VK_TRUE,
};
VkPhysicalDeviceVulkan12Features features_12 = {
sType: VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES,
pNext: &synchronization2,
descriptorIndexing: VK_TRUE,
bufferDeviceAddress: VK_TRUE,
descriptorBindingUniformBufferUpdateAfterBind: VK_TRUE,
descriptorBindingSampledImageUpdateAfterBind: VK_TRUE,
descriptorBindingStorageImageUpdateAfterBind: VK_TRUE,
descriptorBindingStorageBufferUpdateAfterBind: VK_TRUE,
descriptorBindingStorageTexelBufferUpdateAfterBind: VK_TRUE,
descriptorBindingPartiallyBound: VK_TRUE,
shaderSampledImageArrayNonUniformIndexing: VK_TRUE,
shaderUniformBufferArrayNonUniformIndexing: VK_TRUE,
runtimeDescriptorArray: VK_TRUE,
storageBuffer8BitAccess: VK_TRUE,
};
VkPhysicalDeviceVulkan11Features features_11 = {
sType: VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES,
pNext: &features_12,
};
VkPhysicalDeviceFeatures2 features = {
sType: VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2,
pNext: &features_11,
features: {
shaderUniformBufferArrayDynamicIndexing: VK_TRUE,
shaderSampledImageArrayDynamicIndexing: VK_TRUE,
shaderStorageBufferArrayDynamicIndexing: VK_TRUE,
shaderStorageImageArrayDynamicIndexing: VK_TRUE,
samplerAnisotropy: VK_TRUE,
fragmentStoresAndAtomics: VK_TRUE,
},
};
VkDeviceCreateInfo device_info = {
sType: VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,
pNext: &features,
ppEnabledExtensionNames: VK_DEVICE_EXTENSIONS.ptr,
enabledExtensionCount: cast(u32)VK_DEVICE_EXTENSIONS.length,
queueCreateInfoCount: count,
pQueueCreateInfos: queue_info.ptr,
pEnabledFeatures: null,
};
VkResult result = vkCreateDevice(physical_device, &device_info, null, &g_vk.device);
if(result != VK_SUCCESS)
{
Logf("vkCreateDevices failure: %s", VkResultStr(result));
}
else
{
LoadDeviceFunctions();
vkGetDeviceQueue(
g_vk.device,
candidate.gfx_index,
0,
&candidate.gfx_queue
);
if(!candidate.single_queue)
{
vkGetDeviceQueue(
g_vk.device,
candidate.tfer_index,
candidate.tfer_index == candidate.gfx_index ? 1 : 0,
&candidate.tfer_queue
);
}
else
{
candidate.tfer_queue = candidate.gfx_queue;
candidate.tfer_index = candidate.gfx_index;
}
g_vk.physical_device = physical_device;
g_vk.queues = candidate;
success = true;
}
}
}
if(success)
{
VmaVulkanFunctions vk_functions = {
vkGetInstanceProcAddr: vkGetInstanceProcAddr,
vkGetDeviceProcAddr: vkGetDeviceProcAddr,
};
VmaAllocatorCreateInfo info = {
flags: VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT,
vulkanApiVersion: VK_MAKE_API_VERSION(0, 1, 2, 0),
pVulkanFunctions: &vk_functions,
physicalDevice: g_vk.physical_device,
device: g_vk.device,
instance: g_vk.instance,
};
VkResult result = vmaCreateAllocator(&info, &g_vk.vma);
success &= VkCheck("vmaCreateAllocator failure", result);
}
if(success)
{
success = CreateSwapchain();
}
if(success)
{
success = CreateDrawImages();
}
if(success)
{
Arena* arena = &g_vk.frame_arenas[0];
VkSemaphoreCreateInfo sem_info = { sType: VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO };
VkCommandPoolCreateInfo pool_info = {
sType: VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
flags: VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT,
};
VkFenceCreateInfo fence_info = {
sType: VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
flags: VK_FENCE_CREATE_SIGNALED_BIT,
};
VkCommandBufferAllocateInfo cmd_info = {
sType: VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
commandBufferCount: 1,
level: VK_COMMAND_BUFFER_LEVEL_PRIMARY,
};
u32 sem_count = cast(u32)g_vk.present_images.length;
g_vk.submit_sems = Alloc!(VkSemaphore)(arena, sem_count);
VkResult result;
foreach(i; 0 .. sem_count)
{
result = vkCreateSemaphore(g_vk.device, &sem_info, null, g_vk.submit_sems.ptr + i);
success &= VkCheck("vkCreateSemaphore failure", result);
}
foreach(i; 0 .. FRAME_OVERLAP)
{
pool_info.queueFamilyIndex = g_vk.gfx_index;
result = vkCreateCommandPool(g_vk.device, &pool_info, null, g_vk.cmd_pools.ptr + i);
success &= VkCheck("vkCreateCommandPool failure", result);
cmd_info.commandPool = g_vk.cmd_pools[i];
result = vkAllocateCommandBuffers(g_vk.device, &cmd_info, g_vk.cmds.ptr + i);
success &= VkCheck("vkAllocateCommandBuffers failure", result);
result = vkCreateFence(g_vk.device, &fence_info, null, g_vk.render_fences.ptr + i);
success &= VkCheck("vkCreateFence failure", result);
result = vkCreateSemaphore(g_vk.device, &sem_info, null, g_vk.acquire_sems.ptr + i);
success &= VkCheck("vkCreateSemaphore failure", result);
}
pool_info.queueFamilyIndex = g_vk.tfer_index;
result = vkCreateCommandPool(g_vk.device, &pool_info, null, &g_vk.imm_pool);
success &= VkCheck("vkCreateCommandPool failure", result);
cmd_info.commandPool = g_vk.imm_pool;
result = vkAllocateCommandBuffers(g_vk.device, &cmd_info, &g_vk.imm_cmd);
success &= VkCheck("vkAllocateCommandBuffers failure", result);
result = vkCreateFence(g_vk.device, &fence_info, null, &g_vk.imm_fence);
success &= VkCheck("vkCreateFence failure", result);
pool_info.queueFamilyIndex = g_vk.gfx_index;
result = vkCreateCommandPool(g_vk.device, &pool_info, null, &g_vk.comp_cmd_pool);
success &= VkCheck("vkCreateCommandPool failure", result);
cmd_info.commandPool = g_vk.comp_cmd_pool;
result = vkAllocateCommandBuffers(g_vk.device, &cmd_info, &g_vk.comp_cmd);
success &= VkCheck("vkCreateCommandPool failure", result);
result = vkCreateFence(g_vk.device, &fence_info, null, &g_vk.comp_fence);
success &= VkCheck("vkCreateFence failure", result);
}
if(success)
{
PushDescriptorPool();
if(success)
{
DescLayoutBinding[2] layout_bindings = [
{ binding: 0, descriptorType: VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, descriptorCount: 1, stageFlags: VK_SHADER_STAGE_ALL },
{ binding: 1, descriptorType: VK_DESCRIPTOR_TYPE_SAMPLER, descriptorCount: 1, stageFlags: VK_SHADER_STAGE_ALL },
];
g_vk.global_set_layout = CreateDescSetLayout(layout_bindings);
g_vk.global_set = AllocDescSet(g_vk.global_set_layout);
Write(g_vk.global_set, [g_vk.draw_image]);
}
}
if(success)
{
g_vk.pipeline_handles = Alloc!(PipelineHandles)(&g_vk.arena, 128);
g_vk.pipeline_count += 1;
u64 transfer_size = MB(64);
g_vk.transfer_buf = CreateMappedBuffer!(u8)(BT.Staging, transfer_size);
VkDescriptorSetLayoutBinding[2] layout_bindings = [
{ binding: 0, descriptorType: VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, descriptorCount: 1, stageFlags: VK_SHADER_STAGE_COMPUTE_BIT },
{ binding: 1, descriptorType: VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, descriptorCount: 1, stageFlags: VK_SHADER_STAGE_COMPUTE_BIT },
];
g_vk.conv_desc_layout = CreateDescSetLayout(layout_bindings);
g_vk.conv_desc_set = AllocDescSet(g_vk.conv_desc_layout);
g_vk.conv_pipeline_layout = CreatePipelineLayout(g_vk.conv_desc_layout, ConvPushConst.sizeof, true);
u32 channels = 1;
SpecEntry[1] entries = [
{
constantID: 0,
size: u32.sizeof,
offset: 0,
}
];
CompPipelineInfo conv_info = {
shader: CONVERT_SHADER_BYTES,
layout: g_vk.conv_pipeline_layout,
spec: {
data: &channels,
size: u32.sizeof,
entries: entries,
},
};
g_vk.r_to_rgba_pipeline = CreateComputePipeline(&conv_info);
channels = 2;
g_vk.rg_to_rgba_pipeline = CreateComputePipeline(&conv_info);
channels = 3;
g_vk.rgb_to_rgba_pipeline = CreateComputePipeline(&conv_info);
VkAttachmentDescription[2] attach_descriptions = [
{
format: g_vk.draw_image.image.format,
samples: VK_SAMPLE_COUNT_1_BIT,
loadOp: VK_ATTACHMENT_LOAD_OP_CLEAR,
storeOp: VK_ATTACHMENT_STORE_OP_STORE,
stencilLoadOp: VK_ATTACHMENT_LOAD_OP_DONT_CARE,
stencilStoreOp: VK_ATTACHMENT_STORE_OP_DONT_CARE,
initialLayout: VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
finalLayout: VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
},
{
format: g_vk.depth_image.image.format,
samples: VK_SAMPLE_COUNT_1_BIT,
loadOp: VK_ATTACHMENT_LOAD_OP_CLEAR,
storeOp: VK_ATTACHMENT_STORE_OP_STORE,
initialLayout: VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
finalLayout: VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
},
];
VkAttachmentReference color_ref = {
attachment: 0,
layout: VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
};
VkAttachmentReference depth_ref = {
attachment: 1,
layout: VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
};
VkSubpassDescription subpass = {
pipelineBindPoint: VK_PIPELINE_BIND_POINT_GRAPHICS,
colorAttachmentCount: 1,
pColorAttachments: &color_ref,
pDepthStencilAttachment: &depth_ref,
};
VkSubpassDependency self_dependency = {
srcSubpass: 0,
dstSubpass: 0,
srcStageMask: VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
dstStageMask: VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
srcAccessMask: VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT,
dstAccessMask: VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT,
dependencyFlags: VK_DEPENDENCY_BY_REGION_BIT,
};
VkRenderPassCreateInfo pass_info = {
sType: VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
attachmentCount: cast(u32)attach_descriptions.length,
pAttachments: attach_descriptions.ptr,
subpassCount: 1,
pSubpasses: &subpass,
dependencyCount: 1,
pDependencies: &self_dependency,
};
VkResult result = vkCreateRenderPass(g_vk.device, &pass_info, null, &g_vk.render_pass);
VkCheckA("vkCreateRenderPass failure", result);
CreateFramebuffer();
}
assert(success, "Error initializing vulkan");
}
version(VULKAN_RENDERER_TEST) unittest
{
PlatformHandles handles;
Init(handles, 10*1000*1000, 10*1000*1000);
}