From bf34f3e6e1f4c0df495f237a942fefb04e898197 Mon Sep 17 00:00:00 2001 From: matthew Date: Sat, 19 Jul 2025 20:42:53 +1000 Subject: [PATCH] improved texture loading, added texture type conversion (not yet working properly) --- build-vma.sh | 2 +- src/gears/renderer.d | 98 +++-- src/gears/vulkan.d | 661 ++++++++++++++++++++++++++------- src/generated/assets_codegen.d | 3 + src/shaders/convert.comp.glsl | 54 +++ src/shaders/pbr.vert.glsl | 19 +- src/shaders/structures.layout | 13 +- 7 files changed, 679 insertions(+), 171 deletions(-) create mode 100644 src/shaders/convert.comp.glsl diff --git a/build-vma.sh b/build-vma.sh index 5872b42..126c076 100755 --- a/build-vma.sh +++ b/build-vma.sh @@ -4,7 +4,7 @@ set -eu # SHADERS shader_compiler="glslc" -shader_flags="--target-spv=spv1.6 -std=460" +shader_flags="--target-spv=spv1.6 -std=460 --target-env=vulkan1.3" shader_out="-oassets/shaders/" mkdir -p assets/shaders diff --git a/src/gears/renderer.d b/src/gears/renderer.d index 7fe0e8f..3df9a51 100644 --- a/src/gears/renderer.d +++ b/src/gears/renderer.d @@ -4,15 +4,16 @@ import assets; import util; import alloc; import vulkan; -import vulkan : Destroy, Init, Draw, DrawIndexed, Bind, BindUIBuffers, BeginRender, SetUniform, PrepCompute, Dispatch, FinishFrame, BeginFrame; +import vulkan : Destroy, Init, Draw, DrawIndexed, Bind, BindUIBuffers, BeginRender, SetUniform, PrepComputeDrawImage, Dispatch, FinishFrame, BeginFrame; import assets; import std.math.traits : isNaN; -import u = util; +import u = util : Logf; import p = platform; alias Shader = VkShaderModule; alias Pipeline = PipelineHandle; alias Attribute = VkVertexInputAttributeDescription; +alias SpecEntry = VkSpecializationMapEntry; enum InputRate : int { @@ -55,6 +56,13 @@ enum PipelineType : int alias PT = PipelineType; +struct Specialization +{ + u64 size; + SpecEntry[] entries; + void* data; +} + struct GfxPipelineInfo { string vertex_shader; @@ -62,6 +70,15 @@ struct GfxPipelineInfo InputRate input_rate; u32 input_rate_stride; Attribute[] vertex_attributes; + Specialization vert_spec; + Specialization frag_spec; +} + +struct CompPipelineInfo +{ + string shader; + Specialization spec; + VkPipelineLayout *layout; } struct Renderer @@ -82,6 +99,8 @@ struct Renderer Pipeline compute_pipeline; Pipeline ui_pipeline; + PushConst push_const; + Model yoder; } @@ -189,55 +208,54 @@ Init(p.Window* window) ], }; + CompPipelineInfo gradient_info = { + shader: "shaders/gradient.comp", + }; + rd.pbr_pipeline = BuildGfxPipeline(&rd, &pbr_info); rd.triangle_pipeline = BuildGfxPipeline(&rd, &triangle_info); rd.ui_pipeline = BuildGfxPipeline(&rd, &ui_info); - rd.compute_pipeline = BuildCompPipeline(&rd, "shaders/gradient.comp"); + rd.compute_pipeline = BuildCompPipeline(&rd, &gradient_info); rd.yoder = LoadModel(&rd, "models/yoda"); return rd; } -bool +void Cycle(Renderer* rd) { rd.ui_count = 0; - bool success = BeginFrame(rd); + BeginFrame(rd); - if (success) - { - Bind(rd, &rd.compute_pipeline); + Bind(rd, &rd.compute_pipeline); - SetUniform(rd, &rd.globals); + SetUniform(rd, &rd.globals); - DrawRect(rd, 150.0, 300.0, 500.0, 700.0, Vec4(r: 0.0, g: 0.0, b: 1.0, a: 1.0)); + DrawRect(rd, 150.0, 300.0, 500.0, 700.0, Vec4(r: 0.0, g: 0.0, b: 1.0, a: 1.0)); - PrepCompute(rd); + PrepComputeDrawImage(rd); - Dispatch(rd); + Dispatch(rd); - BeginRender(rd); + BeginRender(rd); - Bind(rd, &rd.ui_pipeline); + Bind(rd, &rd.ui_pipeline); - BindUIBuffers(rd); + BindUIBuffers(rd); - DrawIndexed(rd, 6, rd.ui_count, 0); + DrawIndexed(rd, 6, rd.ui_count, 0); - Bind(rd, &rd.triangle_pipeline); + Bind(rd, &rd.triangle_pipeline); - Draw(rd, 3, 1); + Draw(rd, 3, 1); - Bind(rd, &rd.pbr_pipeline); + Bind(rd, &rd.pbr_pipeline); - DrawModel(rd, &rd.yoder); + DrawModel(rd, &rd.yoder); - success = FinishFrame(rd); - } - - return success; + FinishFrame(rd); } pragma(inline): void @@ -247,6 +265,8 @@ DrawModel(Renderer* rd, Model* model) foreach(i, part; model.parts) { + rd.push_const.mat_id = part.mat; + PushConstants(&rd.vk, &rd.push_const); DrawIndexed(rd, part.length, 1, part.offset); } } @@ -269,16 +289,16 @@ debug } } -pragma(inline): bool +pragma(inline): void BeginFrame(Renderer* rd) { - return BeginFrame(&rd.vk); + BeginFrame(&rd.vk); } -pragma(inline): bool +pragma(inline): void FinishFrame(Renderer* rd) { - return FinishFrame(&rd.vk); + FinishFrame(&rd.vk); } pragma(inline): void @@ -288,9 +308,9 @@ Dispatch(Renderer* rd) } pragma(inline): void -PrepCompute(Renderer* rd) +PrepComputeDrawImage(Renderer* rd) { - PrepCompute(&rd.vk); + PrepComputeDrawImage(&rd.vk); } pragma(inline): void @@ -357,9 +377,9 @@ BuildGfxPipeline(Renderer* rd, GfxPipelineInfo* info) } pragma(inline): Pipeline -BuildCompPipeline(Renderer* rd, string compute) +BuildCompPipeline(Renderer* rd, CompPipelineInfo* comp_info) { - return CreateComputePipeline(&rd.vk, compute); + return CreateComputePipeline(&rd.vk, comp_info); } Model @@ -377,10 +397,9 @@ LoadModel(Renderer* rd, string name) foreach(i; 0 .. m3d.numtexture) { u32 w = m3d.texture[i].w; u32 h = m3d.texture[i].h; u32 ch = m3d.texture[i].f; + u8[] tex_data = m3d.texture[i].d[0 .. w * h * ch]; - model.textures[i] = CreateImageView(&rd.vk, w, h, ch); - u8[] texture_data = m3d.texture[i].d[0 .. w * h * ch]; - assert(Transfer(&rd.vk, &model.textures[i], texture_data, w, h), "LoadModel failure: Texture transfer error"); + CreateImageView(&rd.vk, &model.textures[i], w, h, ch, tex_data); tex_lookup[i] = Pop(&rd.vk, DT.SampledImage); } @@ -417,7 +436,7 @@ LoadModel(Renderer* rd, string name) } } - model.materials[i] = CreateBuffer(&rd.vk, BT.Uniform, Material.sizeof, false); + CreateBuffer(&rd.vk, &model.materials[i], BT.Uniform, Material.sizeof, false); assert(Transfer(&rd.vk, &model.materials[i], &mat), "LoadModel failure: Transfer error when transferring material"); mat_lookup[i] = Pop(&rd.vk, DT.Material); @@ -501,12 +520,15 @@ LoadModel(Renderer* rd, string name) indices[vi+2] = vi+2; } - model.vertex_buffer = CreateBuffer(&rd.vk, BT.Vertex, vertices.length * Vertex.sizeof, false); - model.index_buffer = CreateBuffer(&rd.vk, BT.Index, indices.length * u32.sizeof, false); + CreateBuffer(&rd.vk, &model.vertex_buffer, BT.Vertex, vertices.length * Vertex.sizeof, false); + CreateBuffer(&rd.vk, &model.index_buffer, BT.Index, indices.length * u32.sizeof, false); assert(Transfer(&rd.vk, &model.vertex_buffer, vertices), "LoadModel failure: Unable to transfer vertex buffer"); assert(Transfer(&rd.vk, &model.index_buffer, indices), "LoadModel failure: Unable to transfer index buffer"); + WriteDescriptors(&rd.vk, DT.Material, model.materials, mat_lookup); + WriteDescriptors(&rd.vk, DT.SampledImage, model.textures, tex_lookup); + Reset(&rd.temp_arena); return model; diff --git a/src/gears/vulkan.d b/src/gears/vulkan.d index 3ab09cf..9f08054 100644 --- a/src/gears/vulkan.d +++ b/src/gears/vulkan.d @@ -49,6 +49,7 @@ version(Windows) const char*[] VK_DEVICE_EXTENSIONS = [ cast(char*)VK_KHR_SWAPCHAIN_EXTENSION_NAME, cast(char*)VK_KHR_UNIFORM_BUFFER_STANDARD_LAYOUT_EXTENSION_NAME, + cast(char*)VK_KHR_8BIT_STORAGE_EXTENSION_NAME, ]; const VkFormat[] VK_IMAGE_FORMATS = [ @@ -91,7 +92,7 @@ alias DT = DescType; struct PushConst { - u32 texture; + u32 mat_id; } struct Image @@ -165,6 +166,10 @@ struct Vulkan VkCommandPool[FRAME_OVERLAP] cmd_pools; VkCommandBuffer[FRAME_OVERLAP] cmds; + VkCommandPool comp_cmd_pool; + VkCommandBuffer comp_cmd; + VkFence comp_fence; + VkSemaphore[] submit_sems; VkSemaphore[FRAME_OVERLAP] acquire_sems; VkFence[FRAME_OVERLAP] render_fences; @@ -180,6 +185,7 @@ struct Vulkan VkSampler nearest_sampler; + VkPipeline[FRAME_OVERLAP] last_pipeline; VkPipelineLayout pipeline_layout; MappedBuffer!(u8) transfer_buf; @@ -188,11 +194,27 @@ struct Vulkan MappedBuffer!(GlobalUniforms) global_buf; MappedBuffer!(ShaderUniforms) shader_buf; - QueueInfo queues; + QueueInfo queues; + + PipelineHandle r_to_rgba_pipeline; + PipelineHandle rg_to_rgba_pipeline; + PipelineHandle rgb_to_rgba_pipeline; + VkDescriptorSet conv_desc_set; + VkDescriptorSetLayout conv_desc_layout; + VkPipelineLayout conv_pipeline_layout; + + VkPipeline last_comp_pipeline; + bool compute_pass_started; alias queues this; } +struct ConvPushConst +{ + u32 x; + u32 y; +} + struct PipelineHandle { VkPipeline handle; @@ -240,6 +262,7 @@ Init(p.Window* window, u64 permanent_mem, u64 frame_mem) if (success) success = InitFrameStructures(&vk); if (success) success = InitDescriptors(&vk); if (success) InitBuffers(&vk); + if (success) success = InitConversionPipeline(&vk); u.Result!(Vulkan) result = { ok: success, @@ -249,8 +272,101 @@ Init(p.Window* window, u64 permanent_mem, u64 frame_mem) return result; } -Buffer -CreateBuffer(Vulkan* vk, BufferType type, u64 size, bool host_visible) +bool +InitConversionPipeline(Vulkan* vk) +{ + VkDescriptorBindingFlags[] binding_flags; + binding_flags[] = VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT; + + VkDescriptorSetLayoutBindingFlagsCreateInfo flag_info = { + sType: VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO, + bindingCount: cast(u32)binding_flags.length, + pBindingFlags: binding_flags.ptr, + }; + + VkDescriptorSetLayoutBinding[] layout_bindings = [ + { binding: 0, descriptorType: VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, descriptorCount: 1, stageFlags: VK_SHADER_STAGE_COMPUTE_BIT }, + { binding: 1, descriptorType: VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, descriptorCount: 1, stageFlags: VK_SHADER_STAGE_COMPUTE_BIT }, + ]; + + VkDescriptorSetLayoutCreateInfo set_info = { + sType: VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, + pNext: &flag_info, + bindingCount: cast(u32)layout_bindings.length, + pBindings: layout_bindings.ptr, + flags: VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT, + }; + + VkResult result = vkCreateDescriptorSetLayout(vk.device, &set_info, null, &vk.conv_desc_layout); + bool success = VkCheck("InitConversionPipeline failure: vkCreateDescriptorSetLayout error", result); + + if (success) + { + VkDescriptorSetAllocateInfo alloc_info = { + sType: VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO, + descriptorSetCount: 1, + pSetLayouts: &vk.conv_desc_layout, + descriptorPool: vk.desc_pool, + }; + + result = vkAllocateDescriptorSets(vk.device, &alloc_info, &vk.conv_desc_set); + success = VkCheck("InitConversionPipeline failure: vkAllocateDescriptorSets error", result); + } + + if (success) + { + VkPushConstantRange const_range = { + offset: 0, + size: cast(VkDeviceSize)ConvPushConst.sizeof, + stageFlags: VK_SHADER_STAGE_COMPUTE_BIT, + }; + + VkPipelineLayoutCreateInfo layout_info = { + sType: VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, + setLayoutCount: 1, + pSetLayouts: &vk.conv_desc_layout, + pushConstantRangeCount: 1, + pPushConstantRanges: &const_range, + }; + + result = vkCreatePipelineLayout(vk.device, &layout_info, null, &vk.conv_pipeline_layout); + success = VkCheck("InitConversionPipeline failure: vkCreatePipelineLayout error", result); + } + + if (success) + { + u32 channels = 1; + + CompPipelineInfo conv_info = { + shader: "shaders/convert.comp", + layout: &vk.conv_pipeline_layout, + spec: { + data: &channels, + size: u32.sizeof, + entries: [ + { + constantID: 0, + size: u32.sizeof, + offset: 0, + } + ], + }, + }; + + vk.r_to_rgba_pipeline = CreateComputePipeline(vk, &conv_info); + + channels = 2; + vk.rg_to_rgba_pipeline = CreateComputePipeline(vk, &conv_info); + + channels = 3; + vk.rgb_to_rgba_pipeline = CreateComputePipeline(vk, &conv_info); + } + + return success; +} + +void +CreateBuffer(Vulkan* vk, Buffer* buf, BufferType type, u64 size, bool host_visible) { assert(type != BT.None, "CreateBuffer failure: type is None"); @@ -284,12 +400,9 @@ CreateBuffer(Vulkan* vk, BufferType type, u64 size, bool host_visible) } VmaAllocationInfo vma_info; - Buffer buf; VkResult result = vmaCreateBuffer(vk.vma, &buffer_info, &alloc_info, &buf.buffer, &buf.alloc, &vma_info); // TODO: handle errors here then reallocate buffer assert(VkCheck("CreateBuffer failure: vmaCreateBuffer error", result), "CreateBuffer failure"); - - return buf; } void @@ -329,7 +442,7 @@ MappedBuffer!(T) CreateMappedBuffer(T)(Vulkan* vk, BufferType type, u64 count) { MappedBuffer!(T) buf; - buf.base = CreateBuffer(vk, type, T.sizeof * count, true); + CreateBuffer(vk, &buf.base, type, T.sizeof * count, true); buf.data = MapBuffer!(T)(vk, &buf.base, count); return buf; } @@ -360,50 +473,36 @@ GetUIIndexBuffer(Vulkan* vk) return vk.ui_index_buf.data; } -bool +void BeginFrame(Vulkan* vk) { // TODO: move vkWaitForFences so it no longer holds up the frame, will need to change how fences are handled in regards to images though VkResult result = vkWaitForFences(vk.device, 1, vk.render_fences.ptr + vk.frame_index, VK_TRUE, 1000000000); - bool success = VkCheck("BeginFrame failure: vkWaitForFences error", result); + VkCheckA("BeginFrame failure: vkWaitForFences error", result); - if (success) + result = vkResetFences(vk.device, 1, vk.render_fences.ptr + vk.frame_index); + VkCheckA("BeginFrame failure: vkResetFences error", result); + + result = vkAcquireNextImageKHR(vk.device, vk.swapchain, 1000000000, vk.acquire_sems[vk.frame_index], null, &vk.image_index); + if (result == VK_ERROR_OUT_OF_DATE_KHR) { - result = vkResetFences(vk.device, 1, vk.render_fences.ptr + vk.frame_index); - success = VkCheck("BeginFrame failure: vkResetFences error", result); + RecreateSwapchain(vk); + } + else + { + VkCheckA("BeginFrame failure: vkAcquireNextImageKHR error", result); } - if (success) - { - result = vkAcquireNextImageKHR(vk.device, vk.swapchain, 1000000000, vk.acquire_sems[vk.frame_index], null, &vk.image_index); - if (result == VK_ERROR_OUT_OF_DATE_KHR) - { - RecreateSwapchain(vk); - } - else - { - success = VkCheck("BeginFrame failure: vkAcquireNextImageKHR error", result); - } - } + result = vkResetCommandBuffer(vk.cmds[vk.frame_index], 0); + VkCheckA("BeginFrame failure: vkResetCommandBuffer failure", result); - if (success) - { - result = vkResetCommandBuffer(vk.cmds[vk.frame_index], 0); - success = VkCheck("BeginFrame failure: vkResetCommandBuffer failure", result); - } + VkCommandBufferBeginInfo cmd_info = { + sType: VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, + flags: VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, + }; - if (success) - { - VkCommandBufferBeginInfo cmd_info = { - sType: VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, - flags: VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, - }; - - result = vkBeginCommandBuffer(vk.cmds[vk.frame_index], &cmd_info); - success = VkCheck("BeginFrame failure: vkBeginCommandBuffer error", result); - } - - return success; + result = vkBeginCommandBuffer(vk.cmds[vk.frame_index], &cmd_info); + VkCheckA("BeginFrame failure: vkBeginCommandBuffer error", result); } void @@ -450,17 +549,19 @@ BeginRender(Vulkan* vk) } void -PrepCompute(Vulkan* vk) +PrepComputeDrawImage(Vulkan* vk) { Transition(vk.cmds[vk.frame_index], &vk.draw_image, VK_IMAGE_LAYOUT_GENERAL); } -bool +void FinishFrame(Vulkan* vk) { - scope(exit) vk.frame_index = (vk.frame_index + 1) % FRAME_OVERLAP; - - bool success = true; + scope(exit) + { + vk.last_pipeline[vk.frame_index] = null; + vk.frame_index = (vk.frame_index + 1) % FRAME_OVERLAP; + } VkImage image = CurrentImage(vk); VkSemaphore acquire_sem = vk.acquire_sems[vk.frame_index]; @@ -475,72 +576,64 @@ FinishFrame(Vulkan* vk) height: vk.swapchain_extent.height, }; - // TODO: Find out how to copy from same dimension images - Copy(vk.cmds[vk.frame_index], vk.draw_image.image, image, extent, extent); + // TODO: Find out how to copy from same dimension images (pretty sure its not blitting) + Copy(vk.cmds[vk.frame_index], &vk.draw_image.base, image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, extent, extent); Transition(vk.cmds[vk.frame_index], image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_PRESENT_SRC_KHR); VkResult result = vkEndCommandBuffer(vk.cmds[vk.frame_index]); - success = VkCheck("FinishFrame failure: vkEndCommandBuffer error", result); + VkCheckA("FinishFrame failure: vkEndCommandBuffer error", result); - if (success) + VkCommandBufferSubmitInfo cmd_info = { + sType: VK_STRUCTURE_TYPE_COMMAND_BUFFER_SUBMIT_INFO, + commandBuffer: vk.cmds[vk.frame_index], + }; + + VkSemaphoreSubmitInfo wait_info = { + sType: VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO, + semaphore: acquire_sem, + stageMask: VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT, + value: 1, + }; + + VkSemaphoreSubmitInfo signal_info = { + sType: VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO, + semaphore: submit_sem, + stageMask: VK_PIPELINE_STAGE_2_ALL_GRAPHICS_BIT, + value: 1, + }; + + VkSubmitInfo2 submit_info = { + sType: VK_STRUCTURE_TYPE_SUBMIT_INFO_2, + waitSemaphoreInfoCount: 1, + pWaitSemaphoreInfos: &wait_info, + signalSemaphoreInfoCount: 1, + pSignalSemaphoreInfos: &signal_info, + commandBufferInfoCount: 1, + pCommandBufferInfos: &cmd_info, + }; + + result = vkQueueSubmit2(vk.queues.gfx_queue, 1, &submit_info, vk.render_fences[vk.frame_index]); + VkCheckA("FinishFrame failure: vkQueueSubmit2 error", result); + + VkPresentInfoKHR present_info = { + sType: VK_STRUCTURE_TYPE_PRESENT_INFO_KHR, + swapchainCount: 1, + pSwapchains: &vk.swapchain, + waitSemaphoreCount: 1, + pWaitSemaphores: &submit_sem, + pImageIndices: &vk.image_index, + }; + + result = vkQueuePresentKHR(vk.queues.gfx_queue, &present_info); + if (result == VK_ERROR_OUT_OF_DATE_KHR || result == VK_SUBOPTIMAL_KHR) { - VkCommandBufferSubmitInfo cmd_info = { - sType: VK_STRUCTURE_TYPE_COMMAND_BUFFER_SUBMIT_INFO, - commandBuffer: vk.cmds[vk.frame_index], - }; - - VkSemaphoreSubmitInfo wait_info = { - sType: VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO, - semaphore: acquire_sem, - stageMask: VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT, - value: 1, - }; - - VkSemaphoreSubmitInfo signal_info = { - sType: VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO, - semaphore: submit_sem, - stageMask: VK_PIPELINE_STAGE_2_ALL_GRAPHICS_BIT, - value: 1, - }; - - VkSubmitInfo2 submit_info = { - sType: VK_STRUCTURE_TYPE_SUBMIT_INFO_2, - waitSemaphoreInfoCount: 1, - pWaitSemaphoreInfos: &wait_info, - signalSemaphoreInfoCount: 1, - pSignalSemaphoreInfos: &signal_info, - commandBufferInfoCount: 1, - pCommandBufferInfos: &cmd_info, - }; - - result = vkQueueSubmit2(vk.queues.gfx_queue, 1, &submit_info, vk.render_fences[vk.frame_index]); - success = VkCheck("FinishFrame failure: vkQueueSubmit2 error", result); + RecreateSwapchain(vk); } - - if (success) + else { - VkPresentInfoKHR present_info = { - sType: VK_STRUCTURE_TYPE_PRESENT_INFO_KHR, - swapchainCount: 1, - pSwapchains: &vk.swapchain, - waitSemaphoreCount: 1, - pWaitSemaphores: &submit_sem, - pImageIndices: &vk.image_index, - }; - - result = vkQueuePresentKHR(vk.queues.gfx_queue, &present_info); - if (result == VK_ERROR_OUT_OF_DATE_KHR || result == VK_SUBOPTIMAL_KHR) - { - RecreateSwapchain(vk); - } - else - { - success = VkCheck("FinishFrame failure: vkQueuePresentKHR failure", result); - } + VkCheckA("FinishFrame failure: vkQueuePresentKHR failure", result); } - - return success; } void @@ -619,8 +712,125 @@ TransferAssets(Vulkan* vk) return true; } -ImageView -CreateImageView(Vulkan* vk, u32 w, u32 h, u32 ch) +pragma(inline): void +CreateImageView(Vulkan* vk, ImageView* view, u32 w, u32 h, u32 ch, u8[] data) +{ + CreateImageView(vk, view, w, h); + + if (ch == 4) + { + assert(Transfer(vk, view, data, w, h), "CreateImageView failure: Image Transfer error"); + } + else + { + Buffer buf; + CreateBuffer(vk, &buf, BT.Storage, w * h * ch, false); + assert(Transfer(vk, &buf, data), "CreateImageView failure: Buffer Transfer error"); + + ImageView conv_view; + CreateImageView(vk, &conv_view, w, h, VK_FORMAT_R32G32B32A32_SFLOAT); + + WriteConvDescriptor(vk, &buf); + WriteConvDescriptor(vk, &conv_view); + + BeginComputePass(vk); + + vkCmdBindDescriptorSets( + vk.comp_cmd, + VK_PIPELINE_BIND_POINT_COMPUTE, + vk.conv_pipeline_layout, + 0, + 1, + &vk.conv_desc_set, + 0, + null + ); + + VkPipeline pipeline = ch == 1 ? vk.r_to_rgba_pipeline.handle : + ch == 2 ? vk.rg_to_rgba_pipeline.handle : + vk.rgb_to_rgba_pipeline.handle; + + vkCmdBindPipeline(vk.comp_cmd, VK_PIPELINE_BIND_POINT_COMPUTE, pipeline); + + ConvPushConst pc = { + x: w, + y: h, + }; + + vkCmdPushConstants( + vk.comp_cmd, + vk.conv_pipeline_layout, + VK_SHADER_STAGE_COMPUTE_BIT, + 0, + ConvPushConst.sizeof, + &pc + ); + + Transition(vk.comp_cmd, &conv_view, VK_IMAGE_LAYOUT_GENERAL); + + Dispatch(vk, vk.comp_cmd); + + Transition(vk.comp_cmd, view, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); + + VkExtent2D extent = { width: w, height: h }; + Copy(vk.comp_cmd, &conv_view.base, &view.base, extent, extent); + + Transition(vk.comp_cmd, view, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); + + FinishComputePass(vk); + + vkWaitForFences(vk.device, 1, &vk.comp_fence, VK_TRUE, 1000000000); + + //Destroy(vk, &buf); + //Destroy(&conv_view, vk.device, vk.vma); + } +} + +pragma(inline): void +BeginComputePass(Vulkan* vk) +{ + VkResult result = vkWaitForFences(vk.device, 1, &vk.comp_fence, VK_TRUE, 1000000000); + VkCheckA("BeginComputePass failure: vkWaitForFences error", result); + + result = vkResetFences(vk.device, 1, &vk.comp_fence); + VkCheckA("BeginComputepass failure: vkResetFences error", result); + + + result = vkResetCommandBuffer(vk.comp_cmd, 0); + VkCheckA("BeginComputePass failure: vkResetCommandBuffer error", result); + + VkCommandBufferBeginInfo cmd_info = { + sType: VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, + flags: VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, + }; + + result = vkBeginCommandBuffer(vk.comp_cmd, &cmd_info); + VkCheckA("BeginComputePass failure: vkBeginCommandBuffer error", result); +} + +pragma(inline): void +FinishComputePass(Vulkan* vk) +{ + VkResult result = vkEndCommandBuffer(vk.comp_cmd); + VkCheckA("FinishComputePass failure: vkEndCommandBuffer error", result); + + VkCommandBufferSubmitInfo cmd_info = { + sType: VK_STRUCTURE_TYPE_COMMAND_BUFFER_SUBMIT_INFO, + commandBuffer: vk.comp_cmd, + }; + + VkSubmitInfo2 submit_info = { + sType: VK_STRUCTURE_TYPE_SUBMIT_INFO_2, + commandBufferInfoCount: 1, + pCommandBufferInfos: &cmd_info, + }; + + result = vkQueueSubmit2(vk.gfx_queue, 1, &submit_info, vk.comp_fence); + VkCheckA("FinishComputePass failure: vkQueueSubmit2 error", result); +} + +pragma(inline): void +CreateImageView(Vulkan* vk, ImageView* view, u32 w, u32 h, VkFormat format = VK_FORMAT_R8G8B8A8_SRGB) { VmaAllocationCreateInfo alloc_info = { usage: VMA_MEMORY_USAGE_GPU_ONLY, @@ -632,10 +842,10 @@ CreateImageView(Vulkan* vk, u32 w, u32 h, u32 ch) imageType: VK_IMAGE_TYPE_2D, mipLevels: 1, arrayLayers: 1, - format: VK_FORMAT_R8G8B8A8_SRGB, + format: format, tiling: VK_IMAGE_TILING_OPTIMAL, initialLayout: VK_IMAGE_LAYOUT_UNDEFINED, - usage: VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT, + usage: format == VK_FORMAT_R8G8B8A8_SRGB ? (VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT) : (VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT), samples: VK_SAMPLE_COUNT_1_BIT, extent: { width: w, @@ -651,11 +861,8 @@ CreateImageView(Vulkan* vk, u32 w, u32 h, u32 ch) image_info.pQueueFamilyIndices = cast(const u32*)[vk.gfx_index, vk.tfer_index]; } - ImageView view = { - base: { - layout: VK_IMAGE_LAYOUT_UNDEFINED, - }, - }; + view.layout = VK_IMAGE_LAYOUT_UNDEFINED; + view.format = VK_FORMAT_R8G8B8A8_SRGB; VkResult result = vmaCreateImage(vk.vma, &image_info, &alloc_info, &view.image, &view.alloc, null); // TODO: handle errors and realloc @@ -665,7 +872,7 @@ CreateImageView(Vulkan* vk, u32 w, u32 h, u32 ch) sType: VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, image: view.image, viewType: VK_IMAGE_VIEW_TYPE_2D, - format: VK_FORMAT_R8G8B8A8_SRGB, + format: format, subresourceRange: { aspectMask: VK_IMAGE_ASPECT_COLOR_BIT, levelCount: 1, @@ -676,8 +883,6 @@ CreateImageView(Vulkan* vk, u32 w, u32 h, u32 ch) result = vkCreateImageView(vk.device, &view_info, null, &view.view); // TODO: also handle here assert(VkCheck("CreateImageView failure: vkCreateImageView error", result), "CreateImageView failure"); - - return view; } u32 @@ -736,6 +941,19 @@ Push(Vulkan* vk, string name, DescType type) } } +void +PushConstants(Vulkan* vk, PushConst* pc) +{ + vkCmdPushConstants( + vk.cmds[vk.frame_index], + vk.pipeline_layout, + VK_SHADER_STAGE_VERTEX_BIT|VK_SHADER_STAGE_FRAGMENT_BIT|VK_SHADER_STAGE_COMPUTE_BIT, + 0, + PushConst.sizeof, + pc + ); +} + bool Transfer(T)(Vulkan* vk, Buffer* buf, T[] data) { @@ -850,16 +1068,28 @@ Transfer(Vulkan* vk, Image* image, u8[] data, u32 w, u32 h) } pragma(inline): void -Copy(VkCommandBuffer cmd, VkImage src, VkImage dst, VkExtent2D src_ext, VkExtent2D dst_ext) +Copy(VkCommandBuffer cmd, Image* src, Image* dst, VkExtent2D src_ext, VkExtent2D dst_ext) +{ + Copy(cmd, src.image, dst.image, src.layout, dst.layout, src_ext, dst_ext); +} + +pragma(inline): void +Copy(VkCommandBuffer cmd, Image* src, VkImage dst, VkImageLayout dst_layout, VkExtent2D src_ext, VkExtent2D dst_ext) +{ + Copy(cmd, src.image, dst, src.layout, dst_layout, src_ext, dst_ext); +} + +pragma(inline): void +Copy(VkCommandBuffer cmd, VkImage src, VkImage dst, VkImageLayout src_layout, VkImageLayout dst_layout, VkExtent2D src_ext, VkExtent2D dst_ext) { VkImageBlit2 blit = { sType: VK_STRUCTURE_TYPE_IMAGE_BLIT_2, srcOffsets: [ - {}, + { x: 0, y: 0 }, { x: cast(i32)src_ext.width, y: cast(i32)src_ext.height, z: 1 }, ], dstOffsets: [ - {}, + { x: 0, y: 0 }, { x: cast(i32)dst_ext.width, y: cast(i32)dst_ext.height, z: 1 }, ], srcSubresource: { @@ -879,9 +1109,9 @@ Copy(VkCommandBuffer cmd, VkImage src, VkImage dst, VkExtent2D src_ext, VkExtent VkBlitImageInfo2 blit_info = { sType: VK_STRUCTURE_TYPE_BLIT_IMAGE_INFO_2, srcImage: src, - srcImageLayout: VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, + srcImageLayout: src_layout, dstImage: dst, - dstImageLayout: VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, + dstImageLayout: dst_layout, filter: VK_FILTER_LINEAR, regionCount: 1, pRegions: &blit, @@ -893,7 +1123,11 @@ Copy(VkCommandBuffer cmd, VkImage src, VkImage dst, VkExtent2D src_ext, VkExtent void Bind(Vulkan* vk, PipelineHandle* pipeline) { - vkCmdBindPipeline(vk.cmds[vk.frame_index], pipeline.type, pipeline.handle); + if (vk.last_pipeline[vk.frame_index] == null || vk.last_pipeline[vk.frame_index] != pipeline.handle) + { + vkCmdBindPipeline(vk.cmds[vk.frame_index], pipeline.type, pipeline.handle); + vk.last_pipeline[vk.frame_index] = pipeline.handle; + } vkCmdBindDescriptorSets( vk.cmds[vk.frame_index], @@ -1102,6 +1336,30 @@ CreateGraphicsPipeline(Vulkan* vk, GfxPipelineInfo* build_info) __traits(getMember, shader_info.ptr + 0, "module") = vert_module.value; __traits(getMember, shader_info.ptr + 1, "module") = frag_module.value; + VkSpecializationInfo vert_spec_info = { + dataSize: build_info.vert_spec.size, + mapEntryCount: cast(u32)build_info.vert_spec.entries.length, + pMapEntries: build_info.vert_spec.entries.ptr, + pData: build_info.vert_spec.data, + }; + + if (build_info.vert_spec.entries.length > 0) + { + shader_info[0].pSpecializationInfo = &vert_spec_info; + } + + VkSpecializationInfo frag_spec_info = { + dataSize: build_info.frag_spec.size, + mapEntryCount: cast(u32)build_info.frag_spec.entries.length, + pMapEntries: build_info.frag_spec.entries.ptr, + pData: build_info.frag_spec.data, + }; + + if (build_info.frag_spec.entries.length > 0) + { + shader_info[1].pSpecializationInfo = &frag_spec_info; + } + VkGraphicsPipelineCreateInfo create_info = { sType: VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, pNext: &rendering_info, @@ -1127,11 +1385,11 @@ CreateGraphicsPipeline(Vulkan* vk, GfxPipelineInfo* build_info) } Pipeline -CreateComputePipeline(Vulkan* vk, string shader) +CreateComputePipeline(Vulkan* vk, CompPipelineInfo* comp_info) { VkComputePipelineCreateInfo info = { sType: VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, - layout: vk.pipeline_layout, + layout: comp_info.layout ? *comp_info.layout : vk.pipeline_layout, stage: { sType: VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, stage: VK_SHADER_STAGE_COMPUTE_BIT, @@ -1139,7 +1397,7 @@ CreateComputePipeline(Vulkan* vk, string shader) }, }; - u8[] comp_bytes = ap.LoadAssetData(&vk.frame_arenas[0], shader); + u8[] comp_bytes = ap.LoadAssetData(&vk.frame_arenas[0], comp_info.shader); assert(comp_bytes != null, "Unable to load compute shader data"); Result!(Shader) comp_module = BuildShader(vk, comp_bytes); @@ -1148,6 +1406,18 @@ CreateComputePipeline(Vulkan* vk, string shader) __traits(getMember, &info.stage, "module") = comp_module.value; + VkSpecializationInfo spec_info = { + dataSize: comp_info.spec.size, + mapEntryCount: cast(u32)comp_info.spec.entries.length, + pMapEntries: comp_info.spec.entries.ptr, + pData: comp_info.spec.data, + }; + + if (comp_info.spec.entries.length > 0) + { + info.stage.pSpecializationInfo = &spec_info; + } + PipelineHandle pipeline = { type: VK_PIPELINE_BIND_POINT_COMPUTE }; VkResult result = vkCreateComputePipelines(vk.device, null, 1, &info, null, &pipeline.handle); assert(VkCheck("CreateComputePipeline failure", result), "Unable to build pipeline"); @@ -1195,6 +1465,8 @@ Destroy(Vulkan* vk, Pipeline pipeline) void Destroy(Vulkan* vk) { + vkDeviceWaitIdle(vk.device); + alias N = u.Node!(SI); assert(vk.cleanup_list.first != null, "node null"); for(N* node = vk.cleanup_list.first; node != null; node = node.next) @@ -1463,11 +1735,107 @@ InitDescriptors(Vulkan* vk) } void -Dispatch(Vulkan* vk) +WriteConvDescriptor(Vulkan* vk, Buffer* buf) +{ + VkDescriptorBufferInfo buf_info = { + buffer: buf.buffer, + range: VK_WHOLE_SIZE, + }; + + VkWriteDescriptorSet write = { + sType: VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, + dstSet: vk.conv_desc_set, + dstBinding: 1, + descriptorCount: 1, + descriptorType: VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, + pBufferInfo: &buf_info, + }; + + vkUpdateDescriptorSets(vk.device, 1, &write, 0, null); +} + +void +WriteConvDescriptor(Vulkan* vk, ImageView* view) +{ + VkDescriptorImageInfo image_info = { + imageView: view.view, + imageLayout: VK_IMAGE_LAYOUT_GENERAL, + }; + + VkWriteDescriptorSet write = { + sType: VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, + dstSet: vk.conv_desc_set, + dstBinding: 0, + descriptorCount: 1, + descriptorType: VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, + pImageInfo: &image_info, + }; + + vkUpdateDescriptorSets(vk.device, 1, &write, 0, null); +} + +void +WriteDescriptors(Vulkan* vk, DescType type, Buffer[] buffers, u32[] indices) +{ + assert(buffers.length > 0, "WriteDescriptors failure: Buffer length is 0"); + + VkDescriptorBufferInfo[] buffer_info = AllocArray!(VkDescriptorBufferInfo)(&vk.frame_arenas[vk.frame_index], buffers.length); + VkWriteDescriptorSet[] writes = AllocArray!(VkWriteDescriptorSet)(&vk.frame_arenas[vk.frame_index], buffers.length); + + foreach(i, buf; buffers) + { + buffer_info[i].buffer = buf.buffer; + buffer_info[i].range = VK_WHOLE_SIZE; + + writes[i].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; + writes[i].dstSet = vk.desc_sets[type]; + writes[i].dstArrayElement = indices[i]; + writes[i].dstBinding = 0; + writes[i].descriptorCount = 1; + writes[i].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; + writes[i].pBufferInfo = buffer_info.ptr + i; + } + + vkUpdateDescriptorSets(vk.device, cast(u32)writes.length, writes.ptr, 0, null); +} + +void +WriteDescriptors(Vulkan* vk, DescType type, ImageView[] views, u32[] indices) +{ + assert(views.length > 0, "WriteDescriptors failure: ImageView length is 0"); + + VkDescriptorImageInfo[] image_info = AllocArray!(VkDescriptorImageInfo)(&vk.frame_arenas[vk.frame_index], views.length); + VkWriteDescriptorSet[] writes = AllocArray!(VkWriteDescriptorSet)(&vk.frame_arenas[vk.frame_index], views.length); + + foreach(i, view; views) + { + image_info[i].imageView = view.view; + image_info[i].imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + + writes[i].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; + writes[i].dstSet = vk.desc_sets[type]; + writes[i].dstArrayElement = indices[i]; + writes[i].dstBinding = 0; + writes[i].descriptorCount = 1; + writes[i].descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, + writes[i].pImageInfo = image_info.ptr + i; + } + + vkUpdateDescriptorSets(vk.device, cast(u32)writes.length, writes.ptr, 0, null); +} + +pragma(inline): void +Dispatch(Vulkan* vk, VkCommandBuffer cmd) { f32 w = Ceil(cast(f32)(vk.swapchain_extent.width) / 16.0F); f32 h = Ceil(cast(f32)(vk.swapchain_extent.height) / 16.0F); - vkCmdDispatch(vk.cmds[vk.frame_index], cast(u32)w, cast(u32)h, 1); + vkCmdDispatch(cmd, cast(u32)w, cast(u32)h, 1); +} + +pragma(inline): void +Dispatch(Vulkan* vk) +{ + Dispatch(vk, vk.cmds[vk.frame_index]); } bool @@ -1475,7 +1843,16 @@ VkCheck(string message, VkResult result) { bool success = true; - if (result != VK_SUCCESS) + // TODO: Handle error cases that can be handled + if (result == VK_ERROR_OUT_OF_DEVICE_MEMORY) + { + assert(false, "Handle VK_ERROR_OUT_OF_DEVICE_MEMORY"); + } + else if (result == VK_ERROR_OUT_OF_HOST_MEMORY) + { + assert(false, "Handle VK_ERROR_OUT_OF_HOST_MEMORY"); + } + else if (result != VK_SUCCESS) { success = false; char[512] buf; @@ -1486,6 +1863,12 @@ VkCheck(string message, VkResult result) return success; } +void +VkCheckA(string message, VkResult result) +{ + assert(VkCheck(message, result), "Aborting program due to failure"); +} + bool InitFrameStructures(Vulkan* vk) { @@ -1575,6 +1958,26 @@ InitFrameStructures(Vulkan* vk) success = VkCheck("vkCreateFence failure", result); } + if (success) + { + pool_info.queueFamilyIndex = vk.gfx_index; + VkResult result = vkCreateCommandPool(vk.device, &pool_info, null, &vk.comp_cmd_pool); + success = VkCheck("vkCreateCommandPool failure", result); + } + + if (success) + { + cmd_info.commandPool = vk.comp_cmd_pool; + VkResult result = vkAllocateCommandBuffers(vk.device, &cmd_info, &vk.comp_cmd); + success = VkCheck("vkCreateCommandPool failure", result); + } + + if (success) + { + VkResult result = vkCreateFence(vk.device, &fence_info, null, &vk.comp_fence); + success = VkCheck("vkCreateFence failure", result); + } + return success; } @@ -1943,6 +2346,7 @@ InitDevice(Vulkan* vk) shaderSampledImageArrayNonUniformIndexing: VK_TRUE, shaderUniformBufferArrayNonUniformIndexing: VK_TRUE, runtimeDescriptorArray: VK_TRUE, + storageBuffer8BitAccess: VK_TRUE, }; VkPhysicalDeviceVulkan11Features features_11 = { @@ -2045,6 +2449,7 @@ CheckDeviceFeatures(VkPhysicalDevice device) result &= cast(bool)features_12.shaderSampledImageArrayNonUniformIndexing; result &= cast(bool)features_12.shaderUniformBufferArrayNonUniformIndexing; result &= cast(bool)features_12.timelineSemaphore; + result &= cast(bool)features_12.storageBuffer8BitAccess; result &= cast(bool)features_13.synchronization2; result &= cast(bool)features_13.dynamicRendering; diff --git a/src/generated/assets_codegen.d b/src/generated/assets_codegen.d index b3f00e5..43f3c4a 100644 --- a/src/generated/assets_codegen.d +++ b/src/generated/assets_codegen.d @@ -34,6 +34,7 @@ static immutable string[] SHADER_FILES = [ "shaders/pbr.frag.spv", "shaders/gui.vert.spv", "shaders/gradient.comp.spv", + "shaders/convert.comp.spv", "shaders/triangle.frag.spv", "shaders/pbr.vert.spv", ]; @@ -44,6 +45,7 @@ static immutable string[] SHADER_NAMES = [ "shaders/pbr.frag", "shaders/gui.vert", "shaders/gradient.comp", + "shaders/convert.comp", "shaders/triangle.frag", "shaders/pbr.vert", ]; @@ -54,6 +56,7 @@ static immutable u64[] SHADER_HASHES = [ 2230071466542309169, 14797956403837654625, 16130483095684998267, + 3780699516113804562, 6282520872716708711, 8518761701216801634, ]; diff --git a/src/shaders/convert.comp.glsl b/src/shaders/convert.comp.glsl new file mode 100644 index 0000000..0d5c687 --- /dev/null +++ b/src/shaders/convert.comp.glsl @@ -0,0 +1,54 @@ +#version 460 + +#extension GL_EXT_shader_8bit_storage : require + +layout (constant_id = 0) const int CHANNELS = 3; + +layout (local_size_x = 32, local_size_y = 32) in; + +layout (push_constant) uniform Constants { + uint x; + uint y; +} PC; + +layout (set = 0, binding = 0, rgba32f) uniform image2D dst; + +layout (set = 0, binding = 1) buffer input_array +{ + uint8_t src[]; +}; + + +void main() +{ + uint x = gl_GlobalInvocationID.x; + uint y = gl_GlobalInvocationID.y; + + if (x > PC.x || y > PC.y) + { + return; + } + + if (CHANNELS == 1) + { + uint index = x + (y * PC.x); + + vec4 col = vec4(vec3(uint(src[index])), 1.0); + imageStore(dst, ivec2(x, y), col); + } + else if (CHANNELS == 2) + { + uint index = (x + (y * PC.x)) * 2; + + vec4 col = vec4(uint(src[index]), uint(src[index]), uint(src[index]), uint(src[index+1])); + imageStore(dst, ivec2(x, y), col); + } + else if (CHANNELS == 3) + { + uint index = (x + (y * PC.x)) * 3; + + vec4 col = vec4(uint(src[index]), uint(src[index+1]), uint(src[index+2]), 1.0); + imageStore(dst, ivec2(x, y), col); + } +} + diff --git a/src/shaders/pbr.vert.glsl b/src/shaders/pbr.vert.glsl index ea0519a..3d3da30 100644 --- a/src/shaders/pbr.vert.glsl +++ b/src/shaders/pbr.vert.glsl @@ -13,8 +13,23 @@ layout (location = 3) in vec4 in_col; layout (location = 0) out vec4 out_col; layout (location = 1) out vec2 out_uv; +mat4 y_matrix = mat4( + 1.0, 0.0, 0.0, 0.0, + 0.0, -1.0, 0.0, 0.0, + 0.0, 0.0, 1.0, 0.0, + 0.0, 0.0, 0.0, 1.0 +); + void main() { - gl_Position = in_pos; - out_col = in_col; + gl_Position = in_pos * y_matrix; + + if (Materials[nonuniformEXT(PC.mat_id)].albedo_has_texture) + { + out_col = texture(sampler2D(Textures[nonuniformEXT(Materials[nonuniformEXT(PC.mat_id)].albedo_texture)], SamplerNearest), in_uv); + } + else + { + out_col = Materials[nonuniformEXT(PC.mat_id)].diffuse; + } } diff --git a/src/shaders/structures.layout b/src/shaders/structures.layout index 99c5f96..9262ee2 100644 --- a/src/shaders/structures.layout +++ b/src/shaders/structures.layout @@ -13,9 +13,18 @@ layout (set = 0, binding = 3) uniform sampler SamplerNearest; layout (set = 1, binding = 0) uniform texture2D Textures[]; layout (set = 2, binding = 0) uniform Material { - float placeholder; + uint albedo_texture; + uint ambient_texture; + uint specular_texture; + bool albedo_has_texture; + bool ambient_has_texture; + bool specular_has_texture; + vec4 ambient; + vec4 diffuse; + vec4 specular; + float shininess; } Materials[]; layout (push_constant) uniform Constants { - float placeholder; + uint mat_id; } PC;