work on isolating code

This commit is contained in:
Matthew 2025-08-15 06:14:10 +10:00
parent d7fa7fbc85
commit e5b07e1bb7
2 changed files with 157 additions and 88 deletions

152
vulkan.d
View File

@ -1,5 +1,6 @@
import vulkan_funcs;
import vulkan_logging;
import vulkan_util;
import std.stdio;
import std.algorithm.comparison;
import core.stdc.string : strcmp, memcpy;
@ -227,8 +228,8 @@ struct Specialization
struct GfxPipelineInfo
{
string vertex_shader;
string frag_shader;
u8[] vertex_shader;
u8[] frag_shader;
InputRate input_rate;
u32 input_rate_stride;
Attribute[] vertex_attributes;
@ -281,13 +282,6 @@ struct MappedBuffer(T)
alias base this;
}
struct DescBindings
{
u32[] free;
u64 count;
HashTable!(string, u32) lookup_table;
}
struct Vulkan
{
Arena arena;
@ -348,8 +342,6 @@ struct Vulkan
DescSet global_set;
MappedBuffer!(u8) transfer_buf;
MappedBuffer!(UIVertex) ui_vert_buf;
MappedBuffer!(u32) ui_index_buf;
QueueInfo queues;
@ -627,15 +619,8 @@ InitBuffers(Vulkan* vk)
{
Push(vk, SI.Buffers);
vk.global_buf = CreateMappedBuffer!(GlobalUniforms)(vk, BT.Uniform, 1);
vk.shader_buf = CreateMappedBuffer!(ShaderUniforms)(vk, BT.Uniform, 1);
u64 transfer_size = MB(64);
vk.transfer_buf = CreateMappedBuffer!(u8)(vk, BT.Staging, transfer_size);
u64 ui_size = MB(30);
vk.ui_vert_buf = CreateMappedBuffer!(UIVertex)(vk, BT.Vertex, ui_size / UIVertex.sizeof);
vk.ui_index_buf = CreateMappedBuffer!(u32)(vk, BT.Index, ui_size / u32.sizeof);
}
void
@ -1601,21 +1586,17 @@ Transition(VkCommandBuffer cmd, Image* image, VkImageLayout new_layout)
image.layout = new_layout;
}
Result!(Shader)
BuildShader(Vulkan* vk, u8[] bytes)
bool
BuildShader(Vulkan* vk, Shader* shader, u8[] bytes)
{
Result!(Shader) shader;
VkShaderModuleCreateInfo shader_info = {
sType: VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO,
codeSize: bytes.length,
pCode: cast(uint*)bytes.ptr,
};
VkResult result = vkCreateShaderModule(vk.device, &shader_info, null, &shader.value);
shader.ok = VkCheck("vkCreateShaderModule failure", result);
return shader;
VkResult result = vkCreateShaderModule(vk.device, &shader_info, null, &shader);
return VkCheck("vkCreateShaderModule failure", result);
}
void
@ -1703,8 +1684,8 @@ CreateFramebuffer(Vulkan* vk)
VkCheckA("vkCreateFramebuffer failure", result);
}
Pipeline
CreateGraphicsPipeline(Vulkan* vk, GfxPipelineInfo* build_info)
bool
CreateGraphicsPipeline(Vulkan* vk, Pipeline* pipeline_handle, GfxPipelineInfo* build_info)
{
PipelineHandles* pipeline = NewPipeline(vk);
pipeline.type = VK_PIPELINE_BIND_POINT_GRAPHICS;
@ -1812,70 +1793,71 @@ CreateGraphicsPipeline(Vulkan* vk, GfxPipelineInfo* build_info)
Arena* arena = &vk.frame_arenas[0];
u8[] frag_bytes = LoadAssetData(arena, build_info.frag_shader);
u8[] vert_bytes = LoadAssetData(arena, build_info.vertex_shader);
bool success = true;
Shader frag_module, vert_module;
assert(vert_bytes && frag_bytes, "Unable to load shaders");
Result!(Shader) frag_module = BuildShader(vk, frag_bytes);
Result!(Shader) vert_module = BuildShader(vk, vert_bytes);
assert(vert_module.ok && frag_module.ok, "Unable to build vulkan shaders");
success &= BuildShader(vk, &frag_module, build_info.frag_shader);
success &= BuildShader(vk, &vert_module, build_info.vert_shader);
scope(exit)
{
Destroy(vk, frag_module.value);
Destroy(vk, vert_module.value);
Destroy(vk, frag_module);
Destroy(vk, vert_module);
}
__traits(getMember, shader_info.ptr + 0, "module") = frag_module.value;
__traits(getMember, shader_info.ptr + 1, "module") = vert_module.value;
VkSpecializationInfo vert_spec_info = {
dataSize: build_info.vert_spec.size,
mapEntryCount: cast(u32)build_info.vert_spec.entries.length,
pMapEntries: build_info.vert_spec.entries.ptr,
pData: build_info.vert_spec.data,
};
if (build_info.vert_spec.entries.length > 0)
if (success)
{
shader_info[0].pSpecializationInfo = &vert_spec_info;
__traits(getMember, shader_info.ptr + 0, "module") = frag_module;
__traits(getMember, shader_info.ptr + 1, "module") = vert_module;
VkSpecializationInfo vert_spec_info = {
dataSize: build_info.vert_spec.size,
mapEntryCount: cast(u32)build_info.vert_spec.entries.length,
pMapEntries: build_info.vert_spec.entries.ptr,
pData: build_info.vert_spec.data,
};
if (build_info.vert_spec.entries.length > 0)
{
shader_info[0].pSpecializationInfo = &vert_spec_info;
}
VkSpecializationInfo frag_spec_info = {
dataSize: build_info.frag_spec.size,
mapEntryCount: cast(u32)build_info.frag_spec.entries.length,
pMapEntries: build_info.frag_spec.entries.ptr,
pData: build_info.frag_spec.data,
};
if (build_info.frag_spec.entries.length > 0)
{
shader_info[1].pSpecializationInfo = &frag_spec_info;
}
VkGraphicsPipelineCreateInfo create_info = {
sType: VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
pNext: &rendering_info,
pVertexInputState: &input_info,
pInputAssemblyState: &assembly_info,
pViewportState: &viewport_info,
pRasterizationState: &rasterization_info,
pMultisampleState: &multisample_info,
pColorBlendState: &blend_info,
pDepthStencilState: &depth_info,
pDynamicState: &dyn_info,
stageCount: cast(u32)shader_info.length,
pStages: shader_info.ptr,
renderPass: vk.render_pass,
layout: build_info.layout,
};
VkResult result = vkCreateGraphicsPipelines(vk.device, null, 1, &create_info, null, &pipeline.handle);
success = VkCheck("CreateGraphicsPipeline failure", result);
*pipeline_handle = pipeline.index;
}
VkSpecializationInfo frag_spec_info = {
dataSize: build_info.frag_spec.size,
mapEntryCount: cast(u32)build_info.frag_spec.entries.length,
pMapEntries: build_info.frag_spec.entries.ptr,
pData: build_info.frag_spec.data,
};
if (build_info.frag_spec.entries.length > 0)
{
shader_info[1].pSpecializationInfo = &frag_spec_info;
}
VkGraphicsPipelineCreateInfo create_info = {
sType: VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
pNext: &rendering_info,
pVertexInputState: &input_info,
pInputAssemblyState: &assembly_info,
pViewportState: &viewport_info,
pRasterizationState: &rasterization_info,
pMultisampleState: &multisample_info,
pColorBlendState: &blend_info,
pDepthStencilState: &depth_info,
pDynamicState: &dyn_info,
stageCount: cast(u32)shader_info.length,
pStages: shader_info.ptr,
renderPass: vk.render_pass,
layout: build_info.layout,
};
VkResult result = vkCreateGraphicsPipelines(vk.device, null, 1, &create_info, null, &pipeline.handle);
assert(VkCheck("CreateGraphicsPipeline failure", result), "Unable to build pipeline");
return pipeline.index;
return success;
}
Pipeline
@ -2005,10 +1987,6 @@ Destroy(Vulkan* vk)
break;
case SI.Buffers:
Destroy(vk, &vk.transfer_buf);
Destroy(vk, &vk.ui_vert_buf);
Destroy(vk, &vk.ui_index_buf);
Destroy(vk, &vk.global_buf);
Destroy(vk, &vk.shader_buf);
break;
case SI.Pipelines:
DestroyPipelines(vk);

91
vulkan_util.d Normal file
View File

@ -0,0 +1,91 @@
import core.stdc.stdio : Printf = printf;
version(linux)
{
import core.sys.posix.sys.mman;
void*
MemAlloc(u64 size)
{
return mmap(null, size, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
}
void
MemFree(void* ptr, u64 size)
{
assert(munmap(ptr, size) == 0, "MemFree failure");
}
}
struct Arena
{
u8* mem;
u64 length;
u64 pos;
};
Arena
CreateArena(u64 size)
{
Arena arena = {
mem: cast(u8 *)MemAlloc(size),
length: size,
pos: 0,
};
assert(arena.mem != null, "Unable to allocate memory for arena");
return arena;
};
T[]
AllocArray(T)(Arena* arena, u64 count)
{
void* mem = AllocAlign(arena, T.sizeof * count, DEFAULT_ALIGNMENT);
memset(mem, 0, T.sizeof * count);
return (cast(T*)mem)[0 .. count];
}
T*
Alloc(T)(Arena* arena)
{
void* mem = AllocAlign(arena, T.sizeof, DEFAULT_ALIGNMENT);
memset(mem, 0, T.sizeof);
return cast(T*)mem;
};
void*
AllocAlign(Arena* arena, u64 size, u64 alignment)
{
void* ptr = null;
uintptr mem_pos = cast(uintptr)arena.mem;
uintptr current = mem_pos + arena.pos;
uintptr offset = AlignPow2(current, alignment) - mem_pos;
if (offset+size <= arena.length)
{
ptr = &arena.mem[offset];
arena.pos = offset+size;
}
else
{
Printf("AllocAlign failure: out of memory, size requested: %llu", size);
assert(0);
}
return ptr;
};
void
Reset(Arena* arena)
{
arena.pos = 0;
}
void
Free(Arena* arena)
{
(arena.mem);
}