diff --git a/src/main.rs b/src/main.rs index 4043ae20..3029a98c 100644 --- a/src/main.rs +++ b/src/main.rs @@ -54,32 +54,7 @@ mod util; mod button; mod workpiece; - /* -What next? -Second sprite for rendering paths at x10 or so resolution -color bucketing - -Textures and Sprites cannot live in the same struct as there is no way for a sprite to own -its texture and become a single object (rust self-referencing structs) - -I want to pull out the textures into their own managing struct instead -But I want to be able to modify the texture after I give it to a sprite which is an issue -So if I place all the textures in a single container and then let a sprite borrow that container -I will no longer be able to modify any of the textures - -I have to pass in the textures to the sprite individually so they don't get borrow poisoned -It seems like I can put the textures in a struct as long as I pass the struct.texture explicitly - -So at first glance it seems like we need to - + create a texture - + assign that texture to a sprite -And any time we want to update the texture. We need to delete the sprite - -So I'm kinda coming to the conclusion here that rust SFML is not made for -frequent updates to the screen... - -Let's take a look at how easy it would be to replace SFML... */ fn main() { @@ -105,16 +80,8 @@ fn main() { - - - - - - let font = Font::from_file("resources/fonts/sansation.ttf").unwrap(); - - let mut window = RenderWindow::new( (900, 900), "Custom drawable", @@ -225,530 +192,4 @@ fn main() { window.display(); } -} - - -/* -use vulkano::buffer::{BufferUsage, CpuAccessibleBuffer, DeviceLocalBuffer, ImmutableBuffer, BufferAccess}; -use vulkano::command_buffer::{AutoCommandBufferBuilder, DynamicState}; -use vulkano::descriptor::descriptor_set::{PersistentDescriptorSet, StdDescriptorPoolAlloc}; -use vulkano::device::{Device, DeviceExtensions, QueuesIter, Queue}; -use vulkano::framebuffer::{Framebuffer, FramebufferAbstract, RenderPassAbstract, Subpass}; -use vulkano::instance::{Instance, InstanceExtensions, PhysicalDevice, QueueFamily}; -use vulkano::pipeline::{ComputePipeline, GraphicsPipeline}; -use vulkano::pipeline::viewport::Viewport; -use vulkano::sync::{FlushError, GpuFuture}; -use vulkano::sync; -use vulkano::image::SwapchainImage; -use vulkano::swapchain::{AcquireError, PresentMode, SurfaceTransform, Swapchain, SwapchainCreationError}; -use vulkano::swapchain; -use std::time::SystemTime; -use std::sync::Arc; -use std::ffi::CStr; -use std::path::PathBuf; -use shade_runner as sr; -use image::{DynamicImage, GenericImage, GenericImageView, ImageBuffer}; -use vulkano::descriptor::pipeline_layout::PipelineLayout; -use shade_runner::{ComputeLayout, CompileError}; -use vulkano::descriptor::descriptor_set::PersistentDescriptorSetBuf; -use shaderc::CompileOptions; -use winit::{Event, EventsLoop, Window, WindowBuilder, WindowEvent}; -use vulkano_win::VkSurfaceBuild; -use vulkano::SafeDeref; - - - -fn main() { - let instance = { - let extensions = vulkano_win::required_extensions(); - Instance::new(None, &extensions, None).unwrap() - }; - - let physical = PhysicalDevice::enumerate(&instance).next().unwrap(); - - // The objective of this example is to draw a triangle on a window. To do so, we first need to - // create the window. - // - // This is done by creating a `WindowBuilder` from the `winit` crate, then calling the - // `build_vk_surface` method provided by the `VkSurfaceBuild` trait from `vulkano_win`. If you - // ever get an error about `build_vk_surface` being undefined in one of your projects, this - // probably means that you forgot to import this trait. - // - // This returns a `vulkano::swapchain::Surface` object that contains both a cross-platform winit - // window and a cross-platform Vulkan surface that represents the surface of the window. - let mut events_loop = EventsLoop::new(); - - let surface = WindowBuilder::new().build_vk_surface(&events_loop, instance.clone()).unwrap(); - let window = surface.window(); - - let queue_family = physical.queue_families().find(|&q| { - // We take the first queue that supports drawing to our window. - q.supports_graphics() && - surface.is_supported(q).unwrap_or(false) && - q.supports_compute() - }).unwrap(); - - let device_ext = DeviceExtensions { khr_swapchain: true, ..DeviceExtensions::none() }; - let (device, mut queues) = Device::new(physical, physical.supported_features(), &device_ext, - [(queue_family, 0.5)].iter().cloned()).unwrap(); - - let queue = queues.next().unwrap(); - - // Before we can draw on the surface, we have to create what is called a swapchain. Creating - // a swapchain allocates the color buffers that will contain the image that will ultimately - // be visible on the screen. These images are returned alongside with the swapchain. - let (mut swapchain, images) = { - // Querying the capabilities of the surface. When we create the swapchain we can only - // pass values that are allowed by the capabilities. - let capabilities = surface.capabilities(physical).unwrap(); - - let usage = capabilities.supported_usage_flags; - - // The alpha mode indicates how the alpha value of the final image will behave. For example - // you can choose whether the window will be opaque or transparent. - let alpha = capabilities.supported_composite_alpha.iter().next().unwrap(); - - // Choosing the internal format that the images will have. - let format = capabilities.supported_formats[0].0; - - // The dimensions of the window, only used to initially setup the swapchain. - // NOTE: - // On some drivers the swapchain dimensions are specified by `caps.current_extent` and the - // swapchain size must use these dimensions. - // These dimensions are always the same as the window dimensions - // - // However other drivers dont specify a value i.e. `caps.current_extent` is `None` - // These drivers will allow anything but the only sensible value is the window dimensions. - // - // Because for both of these cases, the swapchain needs to be the window dimensions, we just use that. - let initial_dimensions = if let Some(dimensions) = window.get_inner_size() { - // convert to physical pixels - let dimensions: (u32, u32) = dimensions.to_physical(window.get_hidpi_factor()).into(); - [dimensions.0, dimensions.1] - } else { - // The window no longer exists so exit the application. - return; - }; - - // Please take a look at the docs for the meaning of the parameters we didn't mention. - Swapchain::new(device.clone(), surface.clone(), capabilities.min_image_count, format, - initial_dimensions, 1, usage, &queue, SurfaceTransform::Identity, alpha, - PresentMode::Fifo, true, None).unwrap() - }; - - - // We now create a buffer that will store the shape of our triangle. - let vertex_buffer = { - #[derive(Default, Debug, Clone)] - struct Vertex { position: [f32; 2] } - vulkano::impl_vertex!(Vertex, position); - - CpuAccessibleBuffer::from_iter(device.clone(), BufferUsage::all(), [ - Vertex { position: [-0.5, -0.25] }, - Vertex { position: [0.0, 0.5] }, - Vertex { position: [0.25, -0.1] } - ].iter().cloned()).unwrap() - }; - - mod vs { - vulkano_shaders::shader! { - ty: "vertex", - src: " -#version 450 - -layout(location = 0) in vec2 position; - -void main() { - gl_Position = vec4(position, 0.0, 1.0); -}" - } - } - - mod fs { - vulkano_shaders::shader! { - ty: "fragment", - src: " -#version 450 - -layout(location = 0) out vec4 f_color; - -void main() { - f_color = vec4(1.0, 0.0, 0.0, 1.0); -} -" - } - } - - let vs = vs::Shader::load(device.clone()).unwrap(); - let fs = fs::Shader::load(device.clone()).unwrap(); - - // The next step is to create a *render pass*, which is an object that describes where the - // output of the graphics pipeline will go. It describes the layout of the images - // where the colors, depth and/or stencil information will be written. - let render_pass = Arc::new(vulkano::single_pass_renderpass!( - device.clone(), - attachments: { - // `color` is a custom name we give to the first and only attachment. - color: { - // `load: Clear` means that we ask the GPU to clear the content of this - // attachment at the start of the drawing. - load: Clear, - // `store: Store` means that we ask the GPU to store the output of the draw - // in the actual image. We could also ask it to discard the result. - store: Store, - // `format: ` indicates the type of the format of the image. This has to - // be one of the types of the `vulkano::format` module (or alternatively one - // of your structs that implements the `FormatDesc` trait). Here we use the - // same format as the swapchain. - format: swapchain.format(), - // TODO: - samples: 1, - } - }, - pass: { - // We use the attachment named `color` as the one and only color attachment. - color: [color], - // No depth-stencil attachment is indicated with empty brackets. - depth_stencil: {} - } - ).unwrap()); - - // Before we draw we have to create what is called a pipeline. This is similar to an OpenGL - // program, but much more specific. - let pipeline = Arc::new(GraphicsPipeline::start() - // We need to indicate the layout of the vertices. - // The type `SingleBufferDefinition` actually contains a template parameter corresponding - // to the type of each vertex. But in this code it is automatically inferred. - .vertex_input_single_buffer() - // A Vulkan shader can in theory contain multiple entry points, so we have to specify - // which one. The `main` word of `main_entry_point` actually corresponds to the name of - // the entry point. - .vertex_shader(vs.main_entry_point(), ()) - // The content of the vertex buffer describes a list of triangles. - .triangle_list() - // Use a resizable viewport set to draw over the entire window - .viewports_dynamic_scissors_irrelevant(1) - // See `vertex_shader`. - .fragment_shader(fs.main_entry_point(), ()) - // We have to indicate which subpass of which render pass this pipeline is going to be used - // in. The pipeline will only be usable from this particular subpass. - .render_pass(Subpass::from(render_pass.clone(), 0).unwrap()) - // Now that our builder is filled, we call `build()` to obtain an actual pipeline. - .build(device.clone()) - .unwrap()); - - // Dynamic viewports allow us to recreate just the viewport when the window is resized - // Otherwise we would have to recreate the whole pipeline. - let mut dynamic_state = DynamicState { line_width: None, viewports: None, scissors: None }; - - // The render pass we created above only describes the layout of our framebuffers. Before we - // can draw we also need to create the actual framebuffers. - // - // Since we need to draw to multiple images, we are going to create a different framebuffer for - // each image. - let mut framebuffers = window_size_dependent_setup(&images, render_pass.clone(), &mut dynamic_state); - - // Initialization is finally finished! - - // In some situations, the swapchain will become invalid by itself. This includes for example - // when the window is resized (as the images of the swapchain will no longer match the - // window's) or, on Android, when the application went to the background and goes back to the - // foreground. - // - // In this situation, acquiring a swapchain image or presenting it will return an error. - // Rendering to an image of that swapchain will not produce any error, but may or may not work. - // To continue rendering, we need to recreate the swapchain by creating a new swapchain. - // Here, we remember that we need to do this for the next loop iteration. - let mut recreate_swapchain = false; - - // In the loop below we are going to submit commands to the GPU. Submitting a command produces - // an object that implements the `GpuFuture` trait, which holds the resources for as long as - // they are in use by the GPU. - // - // Destroying the `GpuFuture` blocks until the GPU is finished executing it. In order to avoid - // that, we store the submission of the previous frame here. - let mut previous_frame_end = Box::new(sync::now(device.clone())) as Box; - - loop { - // It is important to call this function from time to time, otherwise resources will keep - // accumulating and you will eventually reach an out of memory error. - // Calling this function polls various fences in order to determine what the GPU has - // already processed, and frees the resources that are no longer needed. - previous_frame_end.cleanup_finished(); - - // Whenever the window resizes we need to recreate everything dependent on the window size. - // In this example that includes the swapchain, the framebuffers and the dynamic state viewport. - if recreate_swapchain { - // Get the new dimensions of the window. - let dimensions = if let Some(dimensions) = window.get_inner_size() { - let dimensions: (u32, u32) = dimensions.to_physical(window.get_hidpi_factor()).into(); - [dimensions.0, dimensions.1] - } else { - return; - }; - - let (new_swapchain, new_images) = match swapchain.recreate_with_dimension(dimensions) { - Ok(r) => r, - // This error tends to happen when the user is manually resizing the window. - // Simply restarting the loop is the easiest way to fix this issue. - Err(SwapchainCreationError::UnsupportedDimensions) => continue, - Err(err) => panic!("{:?}", err) - }; - - swapchain = new_swapchain; - // Because framebuffers contains an Arc on the old swapchain, we need to - // recreate framebuffers as well. - framebuffers = window_size_dependent_setup(&new_images, render_pass.clone(), &mut dynamic_state); - - recreate_swapchain = false; - } - - // Before we can draw on the output, we have to *acquire* an image from the swapchain. If - // no image is available (which happens if you submit draw commands too quickly), then the - // function will block. - // This operation returns the index of the image that we are allowed to draw upon. - // - // This function can block if no image is available. The parameter is an optional timeout - // after which the function call will return an error. - let (image_num, acquire_future) = match swapchain::acquire_next_image(swapchain.clone(), None) { - Ok(r) => r, - Err(AcquireError::OutOfDate) => { - recreate_swapchain = true; - continue; - } - Err(err) => panic!("{:?}", err) - }; - - // Specify the color to clear the framebuffer with i.e. blue - let clear_values = vec!([0.0, 0.0, 1.0, 1.0].into()); - - - { - let project_root = - std::env::current_dir() - .expect("failed to get root directory"); - - let mut compute_path = project_root.clone(); - compute_path.push(PathBuf::from("resources/shaders/")); - compute_path.push(PathBuf::from("simple-edge.compute")); - - - let mut options = CompileOptions::new().ok_or(CompileError::CreateCompiler).unwrap(); - options.add_macro_definition("SETTING_POS_X", Some("0")); - options.add_macro_definition("SETTING_POS_Y", Some("1")); - options.add_macro_definition("SETTING_BUCKETS_START", Some("2")); - options.add_macro_definition("SETTING_BUCKETS_LEN", Some("2")); - - let shader = - shade_runner::load_compute_with_options(compute_path, options) - .expect("Failed to compile"); - - let vulkano_entry = - shade_runner::parse_compute(&shader) - .expect("failed to parse"); - - let x = unsafe { - vulkano::pipeline::shader::ShaderModule::from_words(device.clone(), &shader.compute) - }.unwrap(); - - let c_pipeline = Arc::new({ - unsafe { - ComputePipeline::new(device.clone(), &x.compute_entry_point( - CStr::from_bytes_with_nul_unchecked(b"main\0"), - vulkano_entry.compute_layout), &(), - ).unwrap() - } - }); - - let project_root = - std::env::current_dir() - .expect("failed to get root directory"); - - let mut compute_path = project_root.clone(); - compute_path.push(PathBuf::from("resources/images/")); - compute_path.push(PathBuf::from("funky-bird.jpg")); - - let img = image::open(compute_path).expect("Couldn't find image"); - - let xy = img.dimensions(); - - let data_length = xy.0 * xy.1 * 4; - let pixel_count = img.raw_pixels().len(); - println!("Pixel count {}", pixel_count); - - let mut image_buffer = Vec::new(); - if pixel_count != data_length as usize { - println!("Creating apha channel..."); - for i in img.raw_pixels().iter() { - if (image_buffer.len() + 1) % 4 == 0 { - image_buffer.push(255); - } - image_buffer.push(*i); - } - image_buffer.push(255); - } else { - image_buffer = img.raw_pixels(); - } - - println!("Buffer length {}", image_buffer.len()); - println!("Size {:?}", xy); - - println!("Allocating Buffers..."); - - // Pull out the image data and place it in a buffer for the kernel to write to and for us to read from - let write_buffer = { - let mut buff = image_buffer.iter(); - let data_iter = (0..data_length).map(|n| *(buff.next().unwrap())); - CpuAccessibleBuffer::from_iter(device.clone(), BufferUsage::all(), data_iter).unwrap() - }; - - - // Pull out the image data and place it in a buffer for the kernel to read from - let read_buffer = { - let mut buff = image_buffer.iter(); - let data_iter = (0..data_length).map(|n| *(buff.next().unwrap())); - CpuAccessibleBuffer::from_iter(device.clone(), BufferUsage::all(), data_iter).unwrap() - }; - - - // A buffer to hold many i32 values to use as settings - let settings_buffer = { - let vec = vec![xy.0, xy.1]; - let mut buff = vec.iter(); - let data_iter = - (0..2).map(|n| *(buff.next().unwrap())); - CpuAccessibleBuffer::from_iter(device.clone(), - BufferUsage::all(), - data_iter).unwrap() - }; - - println!("Done"); - - // Create the data descriptor set for our previously created shader pipeline - let mut set = - PersistentDescriptorSet::start(c_pipeline.clone(), 0) - .add_buffer(write_buffer.clone()).unwrap() - .add_buffer(read_buffer.clone()).unwrap() - .add_buffer(settings_buffer.clone()).unwrap(); - - let mut set = Arc::new(set.build().unwrap()); - - - - - // In order to draw, we have to build a *command buffer*. The command buffer object holds - // the list of commands that are going to be executed. - // - // Building a command buffer is an expensive operation (usually a few hundred - // microseconds), but it is known to be a hot path in the driver and is expected to be - // optimized. - // - // Note that we have to pass a queue family when we create the command buffer. The command - // buffer will only be executable on that given queue family. - let command_buffer = - AutoCommandBufferBuilder::primary_one_time_submit(device.clone(), queue.family()) - .unwrap() - - .dispatch([xy.0, xy.1, 1], - c_pipeline.clone(), - set.clone(), ()).unwrap() - // Before we can draw, we have to *enter a render pass*. There are two methods to do - // this: `draw_inline` and `draw_secondary`. The latter is a bit more advanced and is - // not covered here. - // - // The third parameter builds the list of values to clear the attachments with. The API - // is similar to the list of attachments when building the framebuffers, except that - // only the attachments that use `load: Clear` appear in the list. - .begin_render_pass(framebuffers[image_num].clone(), false, clear_values) - .unwrap() - - - // We are now inside the first subpass of the render pass. We add a draw command. - // - // The last two parameters contain the list of resources to pass to the shaders. - // Since we used an `EmptyPipeline` object, the objects have to be `()`. - .draw(pipeline.clone(), &dynamic_state, vertex_buffer.clone(), (), ()) - .unwrap() - - // We leave the render pass by calling `draw_end`. Note that if we had multiple - // subpasses we could have called `next_inline` (or `next_secondary`) to jump to the - // next subpass. - .end_render_pass() - .unwrap() - - // Finish building the command buffer by calling `build`. - .build().unwrap(); - - let future = previous_frame_end.join(acquire_future) - .then_execute(queue.clone(), command_buffer).unwrap() - - // The color output is now expected to contain our triangle. But in order to show it on - // the screen, we have to *present* the image by calling `present`. - // - // This function does not actually present the image immediately. Instead it submits a - // present command at the end of the queue. This means that it will only be presented once - // the GPU has finished executing the command buffer that draws the triangle. - .then_swapchain_present(queue.clone(), swapchain.clone(), image_num) - .then_signal_fence_and_flush(); - - match future { - Ok(future) => { - previous_frame_end = Box::new(future) as Box<_>; - } - Err(FlushError::OutOfDate) => { - recreate_swapchain = true; - previous_frame_end = Box::new(sync::now(device.clone())) as Box<_>; - } - Err(e) => { - println!("{:?}", e); - previous_frame_end = Box::new(sync::now(device.clone())) as Box<_>; - } - } - } - // Note that in more complex programs it is likely that one of `acquire_next_image`, - // `command_buffer::submit`, or `present` will block for some time. This happens when the - // GPU's queue is full and the driver has to wait until the GPU finished some work. - // - // Unfortunately the Vulkan API doesn't provide any way to not wait or to detect when a - // wait would happen. Blocking may be the desired behavior, but if you don't want to - // block you should spawn a separate thread dedicated to submissions. - - // Handling the window events in order to close the program when the user wants to close - // it. - let mut done = false; - events_loop.poll_events(|ev| { - match ev { - Event::WindowEvent { event: WindowEvent::CloseRequested, .. } => done = true, - Event::WindowEvent { event: WindowEvent::Resized(_), .. } => recreate_swapchain = true, - _ => () - } - }); - if done { return; } - } -} - -/// This method is called once during initialization, then again whenever the window is resized -fn window_size_dependent_setup( - images: &[Arc>], - render_pass: Arc, - dynamic_state: &mut DynamicState, -) -> Vec> { - let dimensions = images[0].dimensions(); - - let viewport = Viewport { - origin: [0.0, 0.0], - dimensions: [dimensions[0] as f32, dimensions[1] as f32], - depth_range: 0.0..1.0, - }; - dynamic_state.viewports = Some(vec!(viewport)); - - images.iter().map(|image| { - Arc::new( - Framebuffer::start(render_pass.clone()) - .add(image.clone()).unwrap() - .build().unwrap() - ) as Arc - }).collect::>() -} - -*/ \ No newline at end of file +} \ No newline at end of file diff --git a/src/vkprocessor.rs b/src/vkprocessor.rs index 90ec9632..d8260ac1 100644 --- a/src/vkprocessor.rs +++ b/src/vkprocessor.rs @@ -1,10 +1,10 @@ use vulkano::buffer::{BufferUsage, CpuAccessibleBuffer, DeviceLocalBuffer, ImmutableBuffer, BufferAccess}; -use vulkano::command_buffer::AutoCommandBufferBuilder; +use vulkano::command_buffer::{AutoCommandBufferBuilder, DynamicState}; use vulkano::descriptor::descriptor_set::{PersistentDescriptorSet, StdDescriptorPoolAlloc}; use vulkano::device::{Device, DeviceExtensions, QueuesIter, Queue}; use vulkano::instance::{Instance, InstanceExtensions, PhysicalDevice, QueueFamily}; use vulkano::pipeline::{ComputePipeline, GraphicsPipeline, GraphicsPipelineAbstract}; -use vulkano::sync::GpuFuture; +use vulkano::sync::{GpuFuture, FlushError}; use vulkano::sync; use std::time::SystemTime; use std::sync::Arc; @@ -18,15 +18,44 @@ use image::GenericImage; use shade_runner::{ComputeLayout, CompileError, FragLayout, FragInput, FragOutput, VertInput, VertOutput, VertLayout}; use vulkano::descriptor::descriptor_set::PersistentDescriptorSetBuf; use shaderc::CompileOptions; -use vulkano::framebuffer::{Subpass, RenderPass}; +use vulkano::framebuffer::{Subpass, RenderPass, RenderPassAbstract, Framebuffer, FramebufferAbstract}; use vulkano::pipeline::shader::{GraphicsShaderType, ShaderModule, GraphicsEntryPoint, SpecializationConstants, SpecializationMapEntry}; -use vulkano::swapchain::{Swapchain, PresentMode, SurfaceTransform, Surface}; +use vulkano::swapchain::{Swapchain, PresentMode, SurfaceTransform, Surface, SwapchainCreationError, AcquireError}; +use vulkano::swapchain::acquire_next_image; use vulkano::image::swapchain::SwapchainImage; -use winit::{EventsLoop, WindowBuilder, Window}; +use winit::{EventsLoop, WindowBuilder, Window, Event, WindowEvent}; use vulkano_win::VkSurfaceBuild; use vulkano::pipeline::vertex::{SingleBufferDefinition, Vertex}; use vulkano::descriptor::PipelineLayoutAbstract; use std::alloc::Layout; +use vulkano::pipeline::viewport::Viewport; + +#[derive(Default, Debug, Clone)] +struct tVertex { position: [f32; 2] } + +/// This method is called once during initialization, then again whenever the window is resized +fn window_size_dependent_setup( + images: &[Arc>], + render_pass: Arc, + dynamic_state: &mut DynamicState, +) -> Vec> { + let dimensions = images[0].dimensions(); + + let viewport = Viewport { + origin: [0.0, 0.0], + dimensions: [dimensions[0] as f32, dimensions[1] as f32], + depth_range: 0.0..1.0, + }; + dynamic_state.viewports = Some(vec!(viewport)); + + images.iter().map(|image| { + Arc::new( + Framebuffer::start(render_pass.clone()) + .add(image.clone()).unwrap() + .build().unwrap() + ) as Arc + }).collect::>() +} #[repr(C)] struct MySpecConstants { @@ -75,6 +104,8 @@ pub struct VkProcessor<'a> { pub swapchain: Option>>, pub images: Option>>>, pub xy: (u32, u32), + pub render_pass: Option>, + pub vertex_buffer: Option>>, } impl<'a> VkProcessor<'a> { @@ -114,6 +145,8 @@ impl<'a> VkProcessor<'a> { swapchain: Option::None, images: Option::None, xy: (0,0), + render_pass: Option::None, + vertex_buffer: Option::None, } } @@ -329,37 +362,193 @@ impl<'a> VkProcessor<'a> { self.pipeline = Option::Some(Arc::new(pipeline)); } + pub fn create_renderpass(&mut self) { - let render_pass = Arc::new(vulkano::single_pass_renderpass!( - self.device.clone(), - attachments: { - // `color` is a custom name we give to the first and only attachment. - color: { - // `load: Clear` means that we ask the GPU to clear the content of this - // attachment at the start of the drawing. - load: Clear, - // `store: Store` means that we ask the GPU to store the output of the draw - // in the actual image. We could also ask it to discard the result. - store: Store, - // `format: ` indicates the type of the format of the image. This has to - // be one of the types of the `vulkano::format` module (or alternatively one - // of your structs that implements the `FormatDesc` trait). Here we use the - // same format as the swapchain. - format: self.swapchain.clone().unwrap().clone().format(), - // TODO: - samples: 1, + + + } + +// Onto the actual vulkan loop + pub fn run_loop(&mut self, surface : &'a Arc>){ + + // Dynamic viewports allow us to recreate just the viewport when the window is resized + // Otherwise we would have to recreate the whole pipeline. + let mut dynamic_state = DynamicState { line_width: None, viewports: None, scissors: None }; + + // The render pass we created above only describes the layout of our framebuffers. Before we + // can draw we also need to create the actual framebuffers. + // + // Since we need to draw to multiple images, we are going to create a different framebuffer for + // each image. + let mut framebuffers = window_size_dependent_setup(&self.images.unwrap(), self.render_pass.clone().unwrap().clone(), &mut dynamic_state); + + // Initialization is finally finished! + + // In some situations, the swapchain will become invalid by itself. This includes for example + // when the window is resized (as the images of the swapchain will no longer match the + // window's) or, on Android, when the application went to the background and goes back to the + // foreground. + // + // In this situation, acquiring a swapchain image or presenting it will return an error. + // Rendering to an image of that swapchain will not produce any error, but may or may not work. + // To continue rendering, we need to recreate the swapchain by creating a new swapchain. + // Here, we remember that we need to do this for the next loop iteration. + let mut recreate_swapchain = false; + + // In the loop below we are going to submit commands to the GPU. Submitting a command produces + // an object that implements the `GpuFuture` trait, which holds the resources for as long as + // they are in use by the GPU. + // + // Destroying the `GpuFuture` blocks until the GPU is finished executing it. In order to avoid + // that, we store the submission of the previous frame here. + let mut previous_frame_end = Box::new(sync::now(self.device.clone())) as Box; + loop { + // It is important to call this function from time to time, otherwise resources will keep + // accumulating and you will eventually reach an out of memory error. + // Calling this function polls various fences in order to determine what the GPU has + // already processed, and frees the resources that are no longer needed. + previous_frame_end.cleanup_finished(); + + // Whenever the window resizes we need to recreate everything dependent on the window size. + // In this example that includes the swapchain, the framebuffers and the dynamic state viewport. + if recreate_swapchain { + // Get the new dimensions of the window. + + let dimensions = if let Some(dimensions) = surface.window().get_inner_size() { + let dimensions: (u32, u32) = dimensions.to_physical(surface.window().get_hidpi_factor()).into(); + [dimensions.0, dimensions.1] + } else { + return; + }; + + let (new_swapchain, new_images) = match self.swapchain.unwrap().recreate_with_dimension(dimensions) { + Ok(r) => r, + // This error tends to happen when the user is manually resizing the window. + // Simply restarting the loop is the easiest way to fix this issue. + Err(SwapchainCreationError::UnsupportedDimensions) => continue, + Err(err) => panic!("{:?}", err) + }; + + self.swapchain = Some(new_swapchain); + // Because framebuffers contains an Arc on the old swapchain, we need to + // recreate framebuffers as well. + framebuffers = window_size_dependent_setup(&new_images, self.render_pass.unwrap().clone(), &mut dynamic_state); + + recreate_swapchain = false; + } + + // Before we can draw on the output, we have to *acquire* an image from the swapchain. If + // no image is available (which happens if you submit draw commands too quickly), then the + // function will block. + // This operation returns the index of the image that we are allowed to draw upon. + // + // This function can block if no image is available. The parameter is an optional timeout + // after which the function call will return an error. + let (image_num, acquire_future) = match vulkano::swapchain::acquire_next_image(self.swapchain.unwrap().clone(), None) { + Ok(r) => r, + Err(AcquireError::OutOfDate) => { + recreate_swapchain = true; + continue; + } + Err(err) => panic!("{:?}", err) + }; + + // Specify the color to clear the framebuffer with i.e. blue + let clear_values = vec!([0.0, 0.0, 1.0, 1.0].into()); + + + { + // In order to draw, we have to build a *command buffer*. The command buffer object holds + // the list of commands that are going to be executed. + // + // Building a command buffer is an expensive operation (usually a few hundred + // microseconds), but it is known to be a hot path in the driver and is expected to be + // optimized. + // + // Note that we have to pass a queue family when we create the command buffer. The command + // buffer will only be executable on that given queue family. + let command_buffer = + AutoCommandBufferBuilder::primary_one_time_submit(self.device.clone(), self.queue.family()) + .unwrap() + +// .dispatch([self.xy.0, self.xy.1, 1], +// self.compute_pipeline.clone(), +// self.set.clone(), ()).unwrap() + // Before we can draw, we have to *enter a render pass*. There are two methods to do + // this: `draw_inline` and `draw_secondary`. The latter is a bit more advanced and is + // not covered here. + // + // The third parameter builds the list of values to clear the attachments with. The API + // is similar to the list of attachments when building the framebuffers, except that + // only the attachments that use `load: Clear` appear in the list. + .begin_render_pass(framebuffers[image_num].clone(), false, clear_values) + .unwrap() + + + // We are now inside the first subpass of the render pass. We add a draw command. + // + // The last two parameters contain the list of resources to pass to the shaders. + // Since we used an `EmptyPipeline` object, the objects have to be `()`. + .draw(self.pipeline.clone(), &dynamic_state, self.vertex_buffer.clone().unwrap().clone(), (), ()) + .unwrap() + + // We leave the render pass by calling `draw_end`. Note that if we had multiple + // subpasses we could have called `next_inline` (or `next_secondary`) to jump to the + // next subpass. + .end_render_pass() + .unwrap() + + // Finish building the command buffer by calling `build`. + .build().unwrap(); + + let future = previous_frame_end.join(acquire_future) + .then_execute(self.queue.clone(), command_buffer).unwrap() + + // The color output is now expected to contain our triangle. But in order to show it on + // the screen, we have to *present* the image by calling `present`. + // + // This function does not actually present the image immediately. Instead it submits a + // present command at the end of the queue. This means that it will only be presented once + // the GPU has finished executing the command buffer that draws the triangle. + .then_swapchain_present(self.queue.clone(), self.swapchain.unwrap().clone(), image_num) + .then_signal_fence_and_flush(); + + match future { + Ok(future) => { + previous_frame_end = Box::new(future) as Box<_>; + } + Err(FlushError::OutOfDate) => { + recreate_swapchain = true; + previous_frame_end = Box::new(sync::now(self.device.clone())) as Box<_>; + } + Err(e) => { + println!("{:?}", e); + previous_frame_end = Box::new(sync::now(self.device.clone())) as Box<_>; + } } - }, - pass: { - // We use the attachment named `color` as the one and only color attachment. - color: [color], - // No depth-stencil attachment is indicated with empty brackets. - depth_stencil: {} } - ).unwrap()); + // Note that in more complex programs it is likely that one of `acquire_next_image`, + // `command_buffer::submit`, or `present` will block for some time. This happens when the + // GPU's queue is full and the driver has to wait until the GPU finished some work. + // + // Unfortunately the Vulkan API doesn't provide any way to not wait or to detect when a + // wait would happen. Blocking may be the desired behavior, but if you don't want to + // block you should spawn a separate thread dedicated to submissions. + + // Handling the window events in order to close the program when the user wants to close + // it. + let mut done = true; +// events_loop.poll_events(|ev| { +// match ev { +// Event::WindowEvent { event: WindowEvent::CloseRequested, .. } => done = true, +// Event::WindowEvent { event: WindowEvent::Resized(_), .. } => recreate_swapchain = true, +// _ => () +// } +// }); + if done { return; } + } } - pub fn load_buffers(&mut self, image_filename: String) { let project_root = @@ -437,6 +626,22 @@ impl<'a> VkProcessor<'a> { self.img_buffers.push(write_buffer); self.img_buffers.push(read_buffer); self.settings_buffer = Some(settings_buffer); + + + // We now create a buffer that will store the shape of our triangle. + let vertex_buffer = { + #[derive(Default, Debug, Clone)] + struct Vertex { position: [f32; 2] } + vulkano::impl_vertex!(tVertex, position); + + CpuAccessibleBuffer::from_iter(self.device.clone(), BufferUsage::all(), [ + tVertex { position: [-0.5, -0.25] }, + tVertex { position: [0.0, 0.5] }, + tVertex { position: [0.25, -0.1] } + ].iter().cloned()).unwrap() + }; + + self.vertex_buffer = Some(vertex_buffer); } pub fn run_kernel(&mut self) { diff --git a/src/vulkan_example.rs b/src/vulkan_example.rs new file mode 100644 index 00000000..8f011e2d --- /dev/null +++ b/src/vulkan_example.rs @@ -0,0 +1,523 @@ + +use vulkano::buffer::{BufferUsage, CpuAccessibleBuffer, DeviceLocalBuffer, ImmutableBuffer, BufferAccess}; +use vulkano::command_buffer::{AutoCommandBufferBuilder, DynamicState}; +use vulkano::descriptor::descriptor_set::{PersistentDescriptorSet, StdDescriptorPoolAlloc}; +use vulkano::device::{Device, DeviceExtensions, QueuesIter, Queue}; +use vulkano::framebuffer::{Framebuffer, FramebufferAbstract, RenderPassAbstract, Subpass}; +use vulkano::instance::{Instance, InstanceExtensions, PhysicalDevice, QueueFamily}; +use vulkano::pipeline::{ComputePipeline, GraphicsPipeline}; +use vulkano::pipeline::viewport::Viewport; +use vulkano::sync::{FlushError, GpuFuture}; +use vulkano::sync; +use vulkano::image::SwapchainImage; +use vulkano::swapchain::{AcquireError, PresentMode, SurfaceTransform, Swapchain, SwapchainCreationError}; +use vulkano::swapchain; +use std::time::SystemTime; +use std::sync::Arc; +use std::ffi::CStr; +use std::path::PathBuf; +use shade_runner as sr; +use image::{DynamicImage, GenericImage, GenericImageView, ImageBuffer}; +use vulkano::descriptor::pipeline_layout::PipelineLayout; +use shade_runner::{ComputeLayout, CompileError}; +use vulkano::descriptor::descriptor_set::PersistentDescriptorSetBuf; +use shaderc::CompileOptions; +use winit::{Event, EventsLoop, Window, WindowBuilder, WindowEvent}; +use vulkano_win::VkSurfaceBuild; +use vulkano::SafeDeref; + + + +fn main() { + let instance = { + let extensions = vulkano_win::required_extensions(); + Instance::new(None, &extensions, None).unwrap() + }; + + let physical = PhysicalDevice::enumerate(&instance).next().unwrap(); + + // The objective of this example is to draw a triangle on a window. To do so, we first need to + // create the window. + // + // This is done by creating a `WindowBuilder` from the `winit` crate, then calling the + // `build_vk_surface` method provided by the `VkSurfaceBuild` trait from `vulkano_win`. If you + // ever get an error about `build_vk_surface` being undefined in one of your projects, this + // probably means that you forgot to import this trait. + // + // This returns a `vulkano::swapchain::Surface` object that contains both a cross-platform winit + // window and a cross-platform Vulkan surface that represents the surface of the window. + let mut events_loop = EventsLoop::new(); + + let surface = WindowBuilder::new().build_vk_surface(&events_loop, instance.clone()).unwrap(); + let window = surface.window(); + + let queue_family = physical.queue_families().find(|&q| { + // We take the first queue that supports drawing to our window. + q.supports_graphics() && + surface.is_supported(q).unwrap_or(false) && + q.supports_compute() + }).unwrap(); + + let device_ext = DeviceExtensions { khr_swapchain: true, ..DeviceExtensions::none() }; + let (device, mut queues) = Device::new(physical, physical.supported_features(), &device_ext, + [(queue_family, 0.5)].iter().cloned()).unwrap(); + + let queue = queues.next().unwrap(); + + // Before we can draw on the surface, we have to create what is called a swapchain. Creating + // a swapchain allocates the color buffers that will contain the image that will ultimately + // be visible on the screen. These images are returned alongside with the swapchain. + let (mut swapchain, images) = { + // Querying the capabilities of the surface. When we create the swapchain we can only + // pass values that are allowed by the capabilities. + let capabilities = surface.capabilities(physical).unwrap(); + + let usage = capabilities.supported_usage_flags; + + // The alpha mode indicates how the alpha value of the final image will behave. For example + // you can choose whether the window will be opaque or transparent. + let alpha = capabilities.supported_composite_alpha.iter().next().unwrap(); + + // Choosing the internal format that the images will have. + let format = capabilities.supported_formats[0].0; + + // The dimensions of the window, only used to initially setup the swapchain. + // NOTE: + // On some drivers the swapchain dimensions are specified by `caps.current_extent` and the + // swapchain size must use these dimensions. + // These dimensions are always the same as the window dimensions + // + // However other drivers dont specify a value i.e. `caps.current_extent` is `None` + // These drivers will allow anything but the only sensible value is the window dimensions. + // + // Because for both of these cases, the swapchain needs to be the window dimensions, we just use that. + let initial_dimensions = if let Some(dimensions) = window.get_inner_size() { + // convert to physical pixels + let dimensions: (u32, u32) = dimensions.to_physical(window.get_hidpi_factor()).into(); + [dimensions.0, dimensions.1] + } else { + // The window no longer exists so exit the application. + return; + }; + + // Please take a look at the docs for the meaning of the parameters we didn't mention. + Swapchain::new(device.clone(), surface.clone(), capabilities.min_image_count, format, + initial_dimensions, 1, usage, &queue, SurfaceTransform::Identity, alpha, + PresentMode::Fifo, true, None).unwrap() + }; + + + // We now create a buffer that will store the shape of our triangle. + let vertex_buffer = { + #[derive(Default, Debug, Clone)] + struct Vertex { position: [f32; 2] } + vulkano::impl_vertex!(Vertex, position); + + CpuAccessibleBuffer::from_iter(device.clone(), BufferUsage::all(), [ + Vertex { position: [-0.5, -0.25] }, + Vertex { position: [0.0, 0.5] }, + Vertex { position: [0.25, -0.1] } + ].iter().cloned()).unwrap() + }; + + mod vs { + vulkano_shaders::shader! { + ty: "vertex", + src: " +#version 450 + +layout(location = 0) in vec2 position; + +void main() { + gl_Position = vec4(position, 0.0, 1.0); +}" + } + } + + mod fs { + vulkano_shaders::shader! { + ty: "fragment", + src: " +#version 450 + +layout(location = 0) out vec4 f_color; + +void main() { + f_color = vec4(1.0, 0.0, 0.0, 1.0); +} +" + } + } + + let vs = vs::Shader::load(device.clone()).unwrap(); + let fs = fs::Shader::load(device.clone()).unwrap(); + + // The next step is to create a *render pass*, which is an object that describes where the + // output of the graphics pipeline will go. It describes the layout of the images + // where the colors, depth and/or stencil information will be written. + let render_pass = Arc::new(vulkano::single_pass_renderpass!( + device.clone(), + attachments: { + // `color` is a custom name we give to the first and only attachment. + color: { + // `load: Clear` means that we ask the GPU to clear the content of this + // attachment at the start of the drawing. + load: Clear, + // `store: Store` means that we ask the GPU to store the output of the draw + // in the actual image. We could also ask it to discard the result. + store: Store, + // `format: ` indicates the type of the format of the image. This has to + // be one of the types of the `vulkano::format` module (or alternatively one + // of your structs that implements the `FormatDesc` trait). Here we use the + // same format as the swapchain. + format: swapchain.format(), + // TODO: + samples: 1, + } + }, + pass: { + // We use the attachment named `color` as the one and only color attachment. + color: [color], + // No depth-stencil attachment is indicated with empty brackets. + depth_stencil: {} + } + ).unwrap()); + + // Before we draw we have to create what is called a pipeline. This is similar to an OpenGL + // program, but much more specific. + let pipeline = Arc::new(GraphicsPipeline::start() + // We need to indicate the layout of the vertices. + // The type `SingleBufferDefinition` actually contains a template parameter corresponding + // to the type of each vertex. But in this code it is automatically inferred. + .vertex_input_single_buffer() + // A Vulkan shader can in theory contain multiple entry points, so we have to specify + // which one. The `main` word of `main_entry_point` actually corresponds to the name of + // the entry point. + .vertex_shader(vs.main_entry_point(), ()) + // The content of the vertex buffer describes a list of triangles. + .triangle_list() + // Use a resizable viewport set to draw over the entire window + .viewports_dynamic_scissors_irrelevant(1) + // See `vertex_shader`. + .fragment_shader(fs.main_entry_point(), ()) + // We have to indicate which subpass of which render pass this pipeline is going to be used + // in. The pipeline will only be usable from this particular subpass. + .render_pass(Subpass::from(render_pass.clone(), 0).unwrap()) + // Now that our builder is filled, we call `build()` to obtain an actual pipeline. + .build(device.clone()) + .unwrap()); + + // Dynamic viewports allow us to recreate just the viewport when the window is resized + // Otherwise we would have to recreate the whole pipeline. + let mut dynamic_state = DynamicState { line_width: None, viewports: None, scissors: None }; + + // The render pass we created above only describes the layout of our framebuffers. Before we + // can draw we also need to create the actual framebuffers. + // + // Since we need to draw to multiple images, we are going to create a different framebuffer for + // each image. + let mut framebuffers = window_size_dependent_setup(&images, render_pass.clone(), &mut dynamic_state); + + // Initialization is finally finished! + + // In some situations, the swapchain will become invalid by itself. This includes for example + // when the window is resized (as the images of the swapchain will no longer match the + // window's) or, on Android, when the application went to the background and goes back to the + // foreground. + // + // In this situation, acquiring a swapchain image or presenting it will return an error. + // Rendering to an image of that swapchain will not produce any error, but may or may not work. + // To continue rendering, we need to recreate the swapchain by creating a new swapchain. + // Here, we remember that we need to do this for the next loop iteration. + let mut recreate_swapchain = false; + + // In the loop below we are going to submit commands to the GPU. Submitting a command produces + // an object that implements the `GpuFuture` trait, which holds the resources for as long as + // they are in use by the GPU. + // + // Destroying the `GpuFuture` blocks until the GPU is finished executing it. In order to avoid + // that, we store the submission of the previous frame here. + let mut previous_frame_end = Box::new(sync::now(device.clone())) as Box; + + loop { + // It is important to call this function from time to time, otherwise resources will keep + // accumulating and you will eventually reach an out of memory error. + // Calling this function polls various fences in order to determine what the GPU has + // already processed, and frees the resources that are no longer needed. + previous_frame_end.cleanup_finished(); + + // Whenever the window resizes we need to recreate everything dependent on the window size. + // In this example that includes the swapchain, the framebuffers and the dynamic state viewport. + if recreate_swapchain { + // Get the new dimensions of the window. + let dimensions = if let Some(dimensions) = window.get_inner_size() { + let dimensions: (u32, u32) = dimensions.to_physical(window.get_hidpi_factor()).into(); + [dimensions.0, dimensions.1] + } else { + return; + }; + + let (new_swapchain, new_images) = match swapchain.recreate_with_dimension(dimensions) { + Ok(r) => r, + // This error tends to happen when the user is manually resizing the window. + // Simply restarting the loop is the easiest way to fix this issue. + Err(SwapchainCreationError::UnsupportedDimensions) => continue, + Err(err) => panic!("{:?}", err) + }; + + swapchain = new_swapchain; + // Because framebuffers contains an Arc on the old swapchain, we need to + // recreate framebuffers as well. + framebuffers = window_size_dependent_setup(&new_images, render_pass.clone(), &mut dynamic_state); + + recreate_swapchain = false; + } + + // Before we can draw on the output, we have to *acquire* an image from the swapchain. If + // no image is available (which happens if you submit draw commands too quickly), then the + // function will block. + // This operation returns the index of the image that we are allowed to draw upon. + // + // This function can block if no image is available. The parameter is an optional timeout + // after which the function call will return an error. + let (image_num, acquire_future) = match swapchain::acquire_next_image(swapchain.clone(), None) { + Ok(r) => r, + Err(AcquireError::OutOfDate) => { + recreate_swapchain = true; + continue; + } + Err(err) => panic!("{:?}", err) + }; + + // Specify the color to clear the framebuffer with i.e. blue + let clear_values = vec!([0.0, 0.0, 1.0, 1.0].into()); + + + { + let project_root = + std::env::current_dir() + .expect("failed to get root directory"); + + let mut compute_path = project_root.clone(); + compute_path.push(PathBuf::from("resources/shaders/")); + compute_path.push(PathBuf::from("simple-edge.compute")); + + + let mut options = CompileOptions::new().ok_or(CompileError::CreateCompiler).unwrap(); + options.add_macro_definition("SETTING_POS_X", Some("0")); + options.add_macro_definition("SETTING_POS_Y", Some("1")); + options.add_macro_definition("SETTING_BUCKETS_START", Some("2")); + options.add_macro_definition("SETTING_BUCKETS_LEN", Some("2")); + + let shader = + shade_runner::load_compute_with_options(compute_path, options) + .expect("Failed to compile"); + + let vulkano_entry = + shade_runner::parse_compute(&shader) + .expect("failed to parse"); + + let x = unsafe { + vulkano::pipeline::shader::ShaderModule::from_words(device.clone(), &shader.compute) + }.unwrap(); + + let c_pipeline = Arc::new({ + unsafe { + ComputePipeline::new(device.clone(), &x.compute_entry_point( + CStr::from_bytes_with_nul_unchecked(b"main\0"), + vulkano_entry.compute_layout), &(), + ).unwrap() + } + }); + + let project_root = + std::env::current_dir() + .expect("failed to get root directory"); + + let mut compute_path = project_root.clone(); + compute_path.push(PathBuf::from("resources/images/")); + compute_path.push(PathBuf::from("funky-bird.jpg")); + + let img = image::open(compute_path).expect("Couldn't find image"); + + let xy = img.dimensions(); + + let data_length = xy.0 * xy.1 * 4; + let pixel_count = img.raw_pixels().len(); + println!("Pixel count {}", pixel_count); + + let mut image_buffer = Vec::new(); + if pixel_count != data_length as usize { + println!("Creating apha channel..."); + for i in img.raw_pixels().iter() { + if (image_buffer.len() + 1) % 4 == 0 { + image_buffer.push(255); + } + image_buffer.push(*i); + } + image_buffer.push(255); + } else { + image_buffer = img.raw_pixels(); + } + + println!("Buffer length {}", image_buffer.len()); + println!("Size {:?}", xy); + + println!("Allocating Buffers..."); + + // Pull out the image data and place it in a buffer for the kernel to write to and for us to read from + let write_buffer = { + let mut buff = image_buffer.iter(); + let data_iter = (0..data_length).map(|n| *(buff.next().unwrap())); + CpuAccessibleBuffer::from_iter(device.clone(), BufferUsage::all(), data_iter).unwrap() + }; + + + // Pull out the image data and place it in a buffer for the kernel to read from + let read_buffer = { + let mut buff = image_buffer.iter(); + let data_iter = (0..data_length).map(|n| *(buff.next().unwrap())); + CpuAccessibleBuffer::from_iter(device.clone(), BufferUsage::all(), data_iter).unwrap() + }; + + + // A buffer to hold many i32 values to use as settings + let settings_buffer = { + let vec = vec![xy.0, xy.1]; + let mut buff = vec.iter(); + let data_iter = + (0..2).map(|n| *(buff.next().unwrap())); + CpuAccessibleBuffer::from_iter(device.clone(), + BufferUsage::all(), + data_iter).unwrap() + }; + + println!("Done"); + + // Create the data descriptor set for our previously created shader pipeline + let mut set = + PersistentDescriptorSet::start(c_pipeline.clone(), 0) + .add_buffer(write_buffer.clone()).unwrap() + .add_buffer(read_buffer.clone()).unwrap() + .add_buffer(settings_buffer.clone()).unwrap(); + + let mut set = Arc::new(set.build().unwrap()); + + + + + // In order to draw, we have to build a *command buffer*. The command buffer object holds + // the list of commands that are going to be executed. + // + // Building a command buffer is an expensive operation (usually a few hundred + // microseconds), but it is known to be a hot path in the driver and is expected to be + // optimized. + // + // Note that we have to pass a queue family when we create the command buffer. The command + // buffer will only be executable on that given queue family. + let command_buffer = + AutoCommandBufferBuilder::primary_one_time_submit(device.clone(), queue.family()) + .unwrap() + + .dispatch([xy.0, xy.1, 1], + c_pipeline.clone(), + set.clone(), ()).unwrap() + // Before we can draw, we have to *enter a render pass*. There are two methods to do + // this: `draw_inline` and `draw_secondary`. The latter is a bit more advanced and is + // not covered here. + // + // The third parameter builds the list of values to clear the attachments with. The API + // is similar to the list of attachments when building the framebuffers, except that + // only the attachments that use `load: Clear` appear in the list. + .begin_render_pass(framebuffers[image_num].clone(), false, clear_values) + .unwrap() + + + // We are now inside the first subpass of the render pass. We add a draw command. + // + // The last two parameters contain the list of resources to pass to the shaders. + // Since we used an `EmptyPipeline` object, the objects have to be `()`. + .draw(pipeline.clone(), &dynamic_state, vertex_buffer.clone(), (), ()) + .unwrap() + + // We leave the render pass by calling `draw_end`. Note that if we had multiple + // subpasses we could have called `next_inline` (or `next_secondary`) to jump to the + // next subpass. + .end_render_pass() + .unwrap() + + // Finish building the command buffer by calling `build`. + .build().unwrap(); + + let future = previous_frame_end.join(acquire_future) + .then_execute(queue.clone(), command_buffer).unwrap() + + // The color output is now expected to contain our triangle. But in order to show it on + // the screen, we have to *present* the image by calling `present`. + // + // This function does not actually present the image immediately. Instead it submits a + // present command at the end of the queue. This means that it will only be presented once + // the GPU has finished executing the command buffer that draws the triangle. + .then_swapchain_present(queue.clone(), swapchain.clone(), image_num) + .then_signal_fence_and_flush(); + + match future { + Ok(future) => { + previous_frame_end = Box::new(future) as Box<_>; + } + Err(FlushError::OutOfDate) => { + recreate_swapchain = true; + previous_frame_end = Box::new(sync::now(device.clone())) as Box<_>; + } + Err(e) => { + println!("{:?}", e); + previous_frame_end = Box::new(sync::now(device.clone())) as Box<_>; + } + } + } + // Note that in more complex programs it is likely that one of `acquire_next_image`, + // `command_buffer::submit`, or `present` will block for some time. This happens when the + // GPU's queue is full and the driver has to wait until the GPU finished some work. + // + // Unfortunately the Vulkan API doesn't provide any way to not wait or to detect when a + // wait would happen. Blocking may be the desired behavior, but if you don't want to + // block you should spawn a separate thread dedicated to submissions. + + // Handling the window events in order to close the program when the user wants to close + // it. + let mut done = false; + events_loop.poll_events(|ev| { + match ev { + Event::WindowEvent { event: WindowEvent::CloseRequested, .. } => done = true, + Event::WindowEvent { event: WindowEvent::Resized(_), .. } => recreate_swapchain = true, + _ => () + } + }); + if done { return; } + } +} + +/// This method is called once during initialization, then again whenever the window is resized +fn window_size_dependent_setup( + images: &[Arc>], + render_pass: Arc, + dynamic_state: &mut DynamicState, +) -> Vec> { + let dimensions = images[0].dimensions(); + + let viewport = Viewport { + origin: [0.0, 0.0], + dimensions: [dimensions[0] as f32, dimensions[1] as f32], + depth_range: 0.0..1.0, + }; + dynamic_state.viewports = Some(vec!(viewport)); + + images.iter().map(|image| { + Arc::new( + Framebuffer::start(render_pass.clone()) + .add(image.clone()).unwrap() + .build().unwrap() + ) as Arc + }).collect::>() +} +