You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

686 lines
27 KiB

use std::cell::RefCell;
use std::sync::{Arc, Mutex};
use std::thread::current;
use std::time::Duration;
use std::{iter, num::NonZeroU32, ops::Range, rc::Rc};
use bytemuck::__core::mem;
use bytemuck::{Pod, Zeroable};
use cgmath::{
vec3, Decomposed, Deg, Euler, InnerSpace, Matrix4, Point3, Quaternion, Rad, Rotation3,
Transform, Vector3,
};
use futures::executor::LocalPool;
use imgui::sys::ImGuiContext;
use imgui::*;
use imgui_wgpu::{Renderer as ImguiRenderer, RendererConfig as ImguiRendererConfig};
use legion::world::SubWorld;
use legion::*;
use wgpu::util::DeviceExt;
use wgpu::{BackendBit, BindGroup, BindGroupLayout, Buffer, BufferBindingType, Device, FragmentState, Instance, Queue, Surface, SwapChain, SwapChainDescriptor, SwapChainFrame, TextureView, VertexState, CommandEncoder};
use winit_24::dpi::PhysicalSize;
use winit_24::window::Window;
use crate::camera::{Camera, CameraController};
use crate::components::{Mesh, Position, RangeCopy};
use crate::current_ui;
use crate::geometry::{load_obj, Vertex, RawMesh};
use crate::imgui_supp::imgui_support::{ImguiContext, ImguiPlatform};
use crate::light::{DirectionalLight, LightRaw};
use crate::render::{EntityUniforms, ShadowUniforms, ForwardUniforms};
/// A render pass consists of a pipeline, bindgroup, and uniform buf
/// The uniform buf is just the ShadowUniforms or ForwardUniforms
/// And the bindgroup is just the localbindgroup (the EntityUniforms) and the rest
pub struct Pass {
pub pipeline: wgpu::RenderPipeline,
pub bind_group: wgpu::BindGroup,
pub uniform_buf: wgpu::Buffer,
}
pub struct RenderState {
swapchain: SwapChain,
swapchain_description: SwapChainDescriptor,
instance: Arc<Instance>,
pub(in crate::render) device: Arc<Device>,
pub(in crate::render) queue: Arc<Queue>,
size: PhysicalSize<u32>,
surface: Arc<Surface>,
pub(in crate::render) lights_are_dirty: bool,
pub(in crate::render) shadow_pass: Pass,
shadow_target_views: Vec<Arc<TextureView>>,
views_given: u32,
pub(in crate::render) forward_pass: Pass,
pub(in crate::render) forward_depth: wgpu::TextureView,
entity_bind_group_layout: BindGroupLayout,
pub(in crate::render) light_uniform_buf: wgpu::Buffer,
pub(in crate::render) camera_projection: Matrix4<f32>,
pub(in crate::render) imgui_renderer: ImguiRenderer,
}
impl RenderState {
const MAX_LIGHTS: usize = 10;
const SHADOW_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth32Float;
const SHADOW_SIZE: wgpu::Extent3d = wgpu::Extent3d {
width: 1024,
height: 1024,
depth: Self::MAX_LIGHTS as u32,
};
const DEPTH_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth32Float;
/// Generate a projection matrix with no look-at yet
pub fn generate_matrix(&self) -> cgmath::Matrix4<f32> {
// Specifies the aspect ratio that determines the field of view in the x direction.
// The aspect ratio is the ratio of x (width) to y (height).
cgmath::perspective(
cgmath::Deg(75f32),
self.size.width as f32 / self.size.height as f32,
0.1,
100.0,
)
}
/// Get the next frame from the swapchain
/// (recreates if something funky goes on)
pub fn get_current_frame(&mut self) -> SwapChainFrame {
match self.swapchain.get_current_frame() {
Ok(frame) => frame,
Err(_) => {
self.swapchain = self
.device
.create_swap_chain(&self.surface, &self.swapchain_description);
self.swapchain
.get_current_frame()
.expect("Failed to acquire next swap chain texture!")
}
}
}
/// Create a buffer for a mesh
fn create_buffer(
device: &wgpu::Device,
raw_mesh: &RawMesh,
) -> (Arc<Buffer>, Arc<Buffer>) {
let vertex_buf = Arc::new(
device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("vertex-buffer"),
contents: bytemuck::cast_slice(&raw_mesh.vertices),
usage: wgpu::BufferUsage::VERTEX,
}),
);
let index_buf = Arc::new(
device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("index-buffer"),
contents: bytemuck::cast_slice(&raw_mesh.indices),
usage: wgpu::BufferUsage::INDEX,
}),
);
(vertex_buf, index_buf)
}
/// Take a meshes raw representation and upload it to a GPU buffer
pub fn upload_mesh_to_buffer(&mut self, mesh: &RawMesh, color: Option<wgpu::Color>) -> Result<Mesh, String> {
let index_count = mesh.indices.len() * 3; // TODO bad bad bad bad!
let (vertex_buf, index_buf) = RenderState::create_buffer(&self.device, mesh);
let uniform_size = mem::size_of::<EntityUniforms>() as wgpu::BufferAddress;
let uniform_buf = Arc::new(self.device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Mesh Uniform Buf"),
size: uniform_size,
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
mapped_at_creation: false,
}));
let bind_group = Arc::new(self.device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("Mesh Bind Group"),
layout: &self.entity_bind_group_layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer {
buffer: &uniform_buf,
offset: 0,
size: wgpu::BufferSize::new(uniform_size),
},
}],
}));
Ok(Mesh {
index_buffer: index_buf,
index_count: index_count,
// TODO: This is hardcoded by tobj, maybe think about doing something a little more clever?
index_format: wgpu::IndexFormat::Uint32,
vertex_buffer: vertex_buf,
uniform_buffer: uniform_buf,
bind_group: bind_group,
color: color.unwrap_or(wgpu::Color::RED),
})
}
/// explicitly load from file, and upload to gpu the mesh
pub fn load_mesh_to_buffer(&mut self, filepath: &str, color: Option<wgpu::Color>) -> Result<Mesh, String> {
let raw_mesh = load_obj(filepath)?;
self.upload_mesh_to_buffer(&raw_mesh, color)
}
/// When creating a light we have to give it a target view to render to
/// This is major danger scary since we have a 10 light limit, and only
/// 2 views created at this moment, need to smarten this up
pub fn create_light(&mut self) -> DirectionalLight {
let target = self
.shadow_target_views
.get(self.views_given as usize)
.take()
.unwrap();
self.views_given = self.views_given + 1;
DirectionalLight {
color: wgpu::Color {
r: 1.0,
g: 0.5,
b: 0.5,
a: 1.0,
},
fov: 90.0,
depth: RangeCopy {
start: 1.0,
end: 200.0,
},
target_view: target.clone(),
}
}
pub fn init(window: &Window, imgui_context: &mut ImguiContext) -> RenderState {
// Grab the GPU instance, and query its features
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
let (size, surface) = unsafe {
let size = window.inner_size();
let surface = instance.create_surface(window);
(size, surface)
};
let surface = Arc::new(surface);
let adapter = instance.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::HighPerformance,
compatible_surface: Some(&surface),
});
let adapter = futures::executor::block_on(adapter).unwrap();
let optional_features = RenderState::optional_features();
let required_features = RenderState::required_features();
let adapter_features = adapter.features();
let needed_limits = wgpu::Limits::default(); //Renderer::required_limits();
// Maybe for debug tracing???
let trace_dir = std::env::var("WGPU_TRACE");
// And then get the device we want
let device = adapter.request_device(
&wgpu::DeviceDescriptor {
label: Some("device descriptor"),
features: (optional_features & adapter_features) | required_features,
limits: needed_limits,
},
trace_dir.ok().as_ref().map(std::path::Path::new),
);
let (device, queue) = futures::executor::block_on(device).unwrap();
let queue = Arc::new(queue);
let device = Arc::new(device);
let mut sc_desc = (wgpu::SwapChainDescriptor {
// Allows a texture to be a output attachment of a renderpass.
usage: wgpu::TextureUsage::RENDER_ATTACHMENT,
format: if cfg!(target_arch = "wasm32") {
wgpu::TextureFormat::Bgra8Unorm
} else {
wgpu::TextureFormat::Bgra8UnormSrgb
},
width: size.width,
height: size.height,
// The presentation engine waits for the next vertical blanking period to update
present_mode: wgpu::PresentMode::Mailbox,
});
let mut swap_chain = device.create_swap_chain(&surface, &sc_desc);
let entity_uniform_size = mem::size_of::<EntityUniforms>() as wgpu::BufferAddress;
// This seems way way way way easier than what I was doing in tracer
// Though the attr thing is still a macro. Which would cause issues if
// I wanted to get tricky with the 0,1 types
let vertex_size = mem::size_of::<Vertex>();
let vertex_attr = wgpu::vertex_attr_array![0 => Float4, 1 => Float4, 2 => Float2];
let vb_desc = wgpu::VertexBufferLayout {
array_stride: vertex_size as wgpu::BufferAddress,
step_mode: wgpu::InputStepMode::Vertex,
attributes: &vertex_attr,
};
// This is also in the runtime which really shouldn't have this
let entity_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("Entity Bind Group Layout"),
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX | wgpu::ShaderStage::FRAGMENT,
count: None,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
min_binding_size: wgpu::BufferSize::new(
mem::size_of::<EntityUniforms>() as _
),
has_dynamic_offset: false,
},
}],
});
let shadow_pass = {
let uniform_size = mem::size_of::<ShadowUniforms>() as wgpu::BufferAddress;
// I believe this is just making a_Pos or u_ViewProj available in the vert shader
let bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("Shadow pass bind group layout"),
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0, // global
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
min_binding_size: wgpu::BufferSize::new(uniform_size),
has_dynamic_offset: false,
},
count: None,
}],
});
// Pipeline is similar between passes, but with a different label
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("shadow pass pipeline layout"),
bind_group_layouts: &[&bind_group_layout, &entity_bind_group_layout],
push_constant_ranges: &[],
});
// Holds the shadow uniforms, which is just a 4 vec of quaternians
let uniform_buf = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("shadow pass shadow uniform buffer"),
size: uniform_size,
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
mapped_at_creation: false,
});
// Create bind group
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &bind_group_layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer {
buffer: &uniform_buf,
offset: 0,
size: wgpu::BufferSize::new(uniform_size),
},
}],
label: Some("Shadow uniform bind group"),
});
// Create the render pipeline
let vs_module =
device.create_shader_module(&wgpu::include_spirv!("../../resources/bake.vert.spv"));
let fs_module =
device.create_shader_module(&wgpu::include_spirv!("../../resources/bake.frag.spv"));
let pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("shadow"),
layout: Some(&pipeline_layout),
vertex: VertexState {
module: &vs_module,
entry_point: "main",
buffers: &[vb_desc.clone()],
},
fragment: Some(FragmentState {
module: &fs_module,
entry_point: "main",
targets: &[],
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList,
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
..Default::default()
},
depth_stencil: Some(wgpu::DepthStencilState {
format: Self::SHADOW_FORMAT,
depth_write_enabled: true,
depth_compare: wgpu::CompareFunction::LessEqual,
stencil: wgpu::StencilState::default(),
bias: wgpu::DepthBiasState {
constant: 2, // corresponds to bilinear filtering
slope_scale: 2.0,
clamp: 0.0,
},
clamp_depth: device.features().contains(wgpu::Features::DEPTH_CLAMPING),
}),
multisample: wgpu::MultisampleState::default(),
});
Pass {
pipeline,
bind_group,
uniform_buf,
}
};
// Pre init the light uniform, with slots enough for MAX_LIGHTS
let light_uniform_size =
(Self::MAX_LIGHTS * mem::size_of::<LightRaw>()) as wgpu::BufferAddress;
let light_uniform_buf = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Light uniform buffer"),
size: light_uniform_size,
usage: wgpu::BufferUsage::UNIFORM
| wgpu::BufferUsage::COPY_SRC
| wgpu::BufferUsage::COPY_DST,
mapped_at_creation: false,
});
let shadow_texture = device.create_texture(&wgpu::TextureDescriptor {
size: Self::SHADOW_SIZE,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: Self::SHADOW_FORMAT,
usage: wgpu::TextureUsage::RENDER_ATTACHMENT | wgpu::TextureUsage::SAMPLED,
label: Some("Shadow texture"),
});
let mut shadow_target_views = (0..2)
.map(|i| {
Arc::new(shadow_texture.create_view(&wgpu::TextureViewDescriptor {
label: Some("shadow"),
format: None,
dimension: Some(wgpu::TextureViewDimension::D2),
aspect: wgpu::TextureAspect::All,
base_mip_level: 0,
level_count: None,
base_array_layer: i as u32,
array_layer_count: NonZeroU32::new(1),
}))
})
.collect::<Vec<_>>();
let mx_projection = cgmath::perspective(
cgmath::Deg(75f32), // FOV, might wanna hook this up somewhere
sc_desc.width as f32 / sc_desc.height as f32,
1.0,
50.0,
);
let forward_pass = {
// Create pipeline layout
let bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0, // global
visibility: wgpu::ShaderStage::VERTEX | wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
min_binding_size: wgpu::BufferSize::new(mem::size_of::<
ForwardUniforms,
>(
)
as _),
has_dynamic_offset: false,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1, // lights
visibility: wgpu::ShaderStage::VERTEX | wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Buffer {
ty: BufferBindingType::Uniform,
min_binding_size: wgpu::BufferSize::new(light_uniform_size),
has_dynamic_offset: false,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 2,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Texture {
multisampled: false,
sample_type: wgpu::TextureSampleType::Depth,
view_dimension: wgpu::TextureViewDimension::D2Array,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 3,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler {
filtering: false,
comparison: true,
},
count: None,
},
],
label: Some("Forward pass bind group layout"),
});
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("main"),
bind_group_layouts: &[&bind_group_layout, &entity_bind_group_layout],
push_constant_ranges: &[],
});
// I need to know the number of lights...
let forward_uniforms = ForwardUniforms {
proj: *mx_projection.as_ref(),
//num_lights: [lights.len() as u32, 0, 0, 0],
num_lights: [2 as u32, 0, 0, 0],
};
let uniform_buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Forward pass binding 0 uniform buffer"),
contents: bytemuck::bytes_of(&forward_uniforms),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
});
let shadow_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
label: Some("shadow"),
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Nearest,
compare: Some(wgpu::CompareFunction::LessEqual),
..Default::default()
});
let shadow_view = shadow_texture.create_view(&wgpu::TextureViewDescriptor::default());
let forward_uniform_size =
wgpu::BufferSize::new(mem::size_of::<ForwardUniforms>() as wgpu::BufferAddress);
let light_uniform_size = wgpu::BufferSize::new(light_uniform_size);
// Create bind group
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer {
buffer: &uniform_buf,
offset: 0,
size: forward_uniform_size,
},
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Buffer {
buffer: &light_uniform_buf,
offset: 0,
size: light_uniform_size,
},
},
wgpu::BindGroupEntry {
binding: 2,
resource: wgpu::BindingResource::TextureView(&shadow_view),
},
wgpu::BindGroupEntry {
binding: 3,
resource: wgpu::BindingResource::Sampler(&shadow_sampler),
},
],
label: Some("Forward pass bind group"),
});
// Create the render pipeline
let vs_module =
device.create_shader_module(&wgpu::include_spirv!("../../resources/forward.vert.spv"));
let fs_module =
device.create_shader_module(&wgpu::include_spirv!("../../resources/forward.frag.spv"));
let pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("main"),
layout: Some(&pipeline_layout),
vertex: VertexState {
module: &vs_module,
entry_point: "main",
buffers: &[vb_desc],
},
fragment: Some(FragmentState {
module: &fs_module,
entry_point: "main",
targets: &[sc_desc.format.into()],
}),
primitive: wgpu::PrimitiveState {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
..Default::default()
},
depth_stencil: Some(wgpu::DepthStencilState {
format: Self::DEPTH_FORMAT,
depth_write_enabled: true,
depth_compare: wgpu::CompareFunction::Less,
stencil: wgpu::StencilState::default(),
bias: wgpu::DepthBiasState::default(),
clamp_depth: false,
}),
multisample: Default::default(),
});
Pass {
pipeline,
bind_group,
uniform_buf,
}
};
let depth_texture = device.create_texture(&wgpu::TextureDescriptor {
size: wgpu::Extent3d {
width: sc_desc.width,
height: sc_desc.height,
depth: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: Self::DEPTH_FORMAT,
usage: wgpu::TextureUsage::RENDER_ATTACHMENT,
label: Some("Depth Texture"),
});
// Imgui renderer
let renderer_config = ImguiRendererConfig {
texture_format: sc_desc.format,
..Default::default()
};
let mut imgui_renderer =
ImguiRenderer::new(&mut imgui_context.context, &device, &queue, renderer_config);
RenderState {
swapchain: swap_chain,
queue: queue,
size,
device: device,
lights_are_dirty: true,
shadow_pass,
forward_pass,
forward_depth: depth_texture.create_view(&wgpu::TextureViewDescriptor::default()),
entity_bind_group_layout: entity_bind_group_layout,
shadow_target_views: shadow_target_views,
light_uniform_buf,
swapchain_description: sc_desc,
surface,
instance: Arc::new(instance),
views_given: 0,
camera_projection: mx_projection,
imgui_renderer,
}
}
pub fn required_features() -> wgpu::Features {
wgpu::Features::empty()
}
pub fn optional_features() -> wgpu::Features {
wgpu::Features::DEPTH_CLAMPING
}
pub fn resize(&mut self, width: u32, height: u32) {
self.swapchain_description.width = width;
self.swapchain_description.height = height;
self.swapchain = self
.device
.create_swap_chain(&self.surface, &self.swapchain_description.clone());
// update view-projection matrix
let mx_total = self.generate_matrix();
let mx_ref: &[f32; 16] = mx_total.as_ref();
self.queue.write_buffer(
&self.forward_pass.uniform_buf,
0,
bytemuck::cast_slice(mx_ref),
);
let depth_texture = self.device.create_texture(&wgpu::TextureDescriptor {
size: wgpu::Extent3d {
width: width,
height: height,
depth: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: Self::DEPTH_FORMAT,
usage: wgpu::TextureUsage::RENDER_ATTACHMENT,
label: Some("Depth Texture"),
});
self.forward_depth = depth_texture.create_view(&wgpu::TextureViewDescriptor::default());
self.camera_projection = mx_total;
}
}
/*
*/