|
|
|
|
|
|
|
#![allow(dead_code)]
|
|
|
|
#![allow(unused_variables)]
|
|
|
|
#![allow(unused_mut)]
|
|
|
|
|
|
|
|
extern crate cgmath;
|
|
|
|
extern crate image;
|
|
|
|
extern crate nalgebra as na;
|
|
|
|
extern crate rand;
|
|
|
|
extern crate sfml;
|
|
|
|
extern crate time;
|
|
|
|
|
|
|
|
use sfml::graphics::*;
|
|
|
|
use sfml::graphics::{
|
|
|
|
Color, RenderTarget, RenderWindow,
|
|
|
|
};
|
|
|
|
use sfml::system::*;
|
|
|
|
use sfml::window::{Event, Key, Style};
|
|
|
|
use sfml::window::mouse::*;
|
|
|
|
use sfml::window::mouse;
|
|
|
|
|
|
|
|
use vulkano::sync;
|
|
|
|
use std::sync::Arc;
|
|
|
|
use std::{fs, mem, iter, ptr};
|
|
|
|
use std::path::PathBuf;
|
|
|
|
use std::result;
|
|
|
|
|
|
|
|
use crate::input::Input;
|
|
|
|
use crate::slider::Slider;
|
|
|
|
use crate::timer::Timer;
|
|
|
|
use na::DimAdd;
|
|
|
|
use std::time::{SystemTime, Duration};
|
|
|
|
use std::ffi::CStr;
|
|
|
|
use std::ptr::write;
|
|
|
|
use vulkano::buffer::{BufferUsage, CpuAccessibleBuffer, DeviceLocalBuffer, ImmutableBuffer, BufferAccess};
|
|
|
|
use vulkano::command_buffer::AutoCommandBufferBuilder;
|
|
|
|
use vulkano::descriptor::descriptor_set::PersistentDescriptorSet;
|
|
|
|
use vulkano::device::{Device, DeviceExtensions};
|
|
|
|
use vulkano::instance::{Instance, InstanceExtensions, PhysicalDevice};
|
|
|
|
use vulkano::pipeline::ComputePipeline;
|
|
|
|
use vulkano::descriptor::pipeline_layout::PipelineLayoutAbstract;
|
|
|
|
use vulkano::sync::GpuFuture;
|
|
|
|
use shaderc::CompileOptions;
|
|
|
|
use shade_runner::CompileError;
|
|
|
|
use crate::workpiece::{WorkpieceLoader, Workpiece};
|
|
|
|
|
|
|
|
mod slider;
|
|
|
|
mod timer;
|
|
|
|
mod input;
|
|
|
|
mod vkprocessor;
|
|
|
|
mod util;
|
|
|
|
mod button;
|
|
|
|
mod workpiece;
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
What next?
|
|
|
|
Second sprite for rendering paths at x10 or so resolution
|
|
|
|
color bucketing
|
|
|
|
|
|
|
|
Textures and Sprites cannot live in the same struct as there is no way for a sprite to own
|
|
|
|
its texture and become a single object (rust self-referencing structs)
|
|
|
|
|
|
|
|
I want to pull out the textures into their own managing struct instead
|
|
|
|
But I want to be able to modify the texture after I give it to a sprite which is an issue
|
|
|
|
So if I place all the textures in a single container and then let a sprite borrow that container
|
|
|
|
I will no longer be able to modify any of the textures
|
|
|
|
|
|
|
|
I have to pass in the textures to the sprite individually so they don't get borrow poisoned
|
|
|
|
It seems like I can put the textures in a struct as long as I pass the struct.texture explicitly
|
|
|
|
|
|
|
|
So at first glance it seems like we need to
|
|
|
|
+ create a texture
|
|
|
|
+ assign that texture to a sprite
|
|
|
|
And any time we want to update the texture. We need to delete the sprite
|
|
|
|
|
|
|
|
So I'm kinda coming to the conclusion here that rust SFML is not made for
|
|
|
|
frequent updates to the screen...
|
|
|
|
|
|
|
|
Let's take a look at how easy it would be to replace SFML...
|
|
|
|
*/
|
|
|
|
|
|
|
|
fn main() {
|
|
|
|
|
|
|
|
let font = Font::from_file("resources/fonts/sansation.ttf").unwrap();
|
|
|
|
|
|
|
|
let instance = Instance::new(None, &InstanceExtensions::none(), None).unwrap();
|
|
|
|
let mut processor = vkprocessor::VkProcessor::new(&instance);
|
|
|
|
processor.compile_kernel(String::from("simple-edge.compute"));
|
|
|
|
processor.load_buffers(String::from("funky-bird.jpg"));
|
|
|
|
processor.run_kernel();
|
|
|
|
processor.read_image();
|
|
|
|
processor.save_image();
|
|
|
|
|
|
|
|
let mut window = RenderWindow::new(
|
|
|
|
(900, 900),
|
|
|
|
"Custom drawable",
|
|
|
|
Style::CLOSE,
|
|
|
|
&Default::default(),
|
|
|
|
);
|
|
|
|
|
|
|
|
let mut timer = Timer::new();
|
|
|
|
let mut input = Input::new();
|
|
|
|
|
|
|
|
let xy = processor.xy;
|
|
|
|
|
|
|
|
let mut workpieceloader = WorkpieceLoader::new(String::from("resources/images/funky-bird.jpg"));
|
|
|
|
workpieceloader.load_first_stage(processor.read_image());
|
|
|
|
|
|
|
|
let mut texture = Texture::from_file("resources/images/funky-bird.jpg").expect("Couldn't load image");
|
|
|
|
|
|
|
|
let mut workpiece = Workpiece::new();
|
|
|
|
workpiece.render_sprite.set_texture(&mut texture, false);
|
|
|
|
|
|
|
|
let mut slider = Slider::new(Vector2f::new(40.0, 40.0), None, &font);
|
|
|
|
|
|
|
|
let mut selected_colors = Vec::new();
|
|
|
|
|
|
|
|
let mut button = button::Button::new(Vector2f::new(40.0,40.0), Vector2f::new(100.0,100.0), &font);
|
|
|
|
button.set_text("Text");
|
|
|
|
|
|
|
|
let step_size: f32 = 0.005;
|
|
|
|
let mut elapsed_time: f32;
|
|
|
|
let mut delta_time: f32;
|
|
|
|
let mut accumulator_time: f32 = 0.0;
|
|
|
|
let mut current_time: f32 = timer.elap_time();
|
|
|
|
|
|
|
|
let mut mouse_xy = Vector2i::new(0,0);
|
|
|
|
|
|
|
|
while window.is_open() {
|
|
|
|
|
|
|
|
while let Some(event) = window.poll_event() {
|
|
|
|
match event {
|
|
|
|
Event::Closed => return,
|
|
|
|
Event::KeyPressed { code, .. } => {
|
|
|
|
if code == Key::Escape {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
},
|
|
|
|
Event::MouseButtonPressed { button, x, y} => {
|
|
|
|
let x = x as u32;
|
|
|
|
let y = y as u32;
|
|
|
|
mouse_xy = mouse::desktop_position();
|
|
|
|
let r = processor.image_buffer[((processor.xy.0 * y + x) * 4 + 0) as usize] as u8;
|
|
|
|
let g = processor.image_buffer[((processor.xy.0 * y + x) * 4 + 1) as usize] as u8;
|
|
|
|
let b = processor.image_buffer[((processor.xy.0 * y + x) * 4 + 2) as usize] as u8;
|
|
|
|
let a = processor.image_buffer[((processor.xy.0 * y + x) * 4 + 3) as usize] as u8;
|
|
|
|
|
|
|
|
selected_colors.push(
|
|
|
|
RectangleShape::with_size(Vector2f::new(30.0, 30.0))
|
|
|
|
);
|
|
|
|
|
|
|
|
let mut x_position = 45.0 * selected_colors.len() as f32;
|
|
|
|
|
|
|
|
selected_colors.last_mut().unwrap().set_position(Vector2f::new(x_position, 80.0));
|
|
|
|
selected_colors.last_mut().unwrap().set_fill_color(&Color::rgba(r,g,b,a));
|
|
|
|
},
|
|
|
|
Event::MouseWheelScrolled { wheel, delta, x, y } => {
|
|
|
|
if delta > 0.0 {
|
|
|
|
workpiece.render_sprite.set_scale(workpiece.render_sprite.get_scale()*Vector2f::new(1.1,1.1));
|
|
|
|
} else {
|
|
|
|
workpiece.render_sprite.set_scale(workpiece.render_sprite.get_scale()*Vector2f::new(0.9,0.9));
|
|
|
|
}
|
|
|
|
},
|
|
|
|
_ => {}
|
|
|
|
}
|
|
|
|
input.ingest(&event)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Dragging by middle click
|
|
|
|
if input.is_mousebutton_held(Button::Middle) {
|
|
|
|
let delta = mouse_xy - mouse::desktop_position();
|
|
|
|
mouse_xy = mouse::desktop_position();
|
|
|
|
workpiece.render_sprite.set_position(
|
|
|
|
workpiece.render_sprite.position() - Vector2f::new(delta.x as f32, delta.y as f32)
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
elapsed_time = timer.elap_time();
|
|
|
|
delta_time = elapsed_time - current_time;
|
|
|
|
current_time = elapsed_time;
|
|
|
|
if delta_time > 0.02 {
|
|
|
|
delta_time = 0.02;
|
|
|
|
}
|
|
|
|
accumulator_time += delta_time;
|
|
|
|
|
|
|
|
while (accumulator_time - step_size) >= step_size {
|
|
|
|
accumulator_time -= step_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
window.clear(&Color::BLACK);
|
|
|
|
|
|
|
|
window.draw(&workpiece.render_sprite);
|
|
|
|
|
|
|
|
window.draw(&slider);
|
|
|
|
|
|
|
|
for i in &selected_colors {
|
|
|
|
window.draw(i);
|
|
|
|
}
|
|
|
|
|
|
|
|
window.draw(&button);
|
|
|
|
|
|
|
|
window.display();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
use vulkano::buffer::{BufferUsage, CpuAccessibleBuffer, DeviceLocalBuffer, ImmutableBuffer, BufferAccess};
|
|
|
|
use vulkano::command_buffer::{AutoCommandBufferBuilder, DynamicState};
|
|
|
|
use vulkano::descriptor::descriptor_set::{PersistentDescriptorSet, StdDescriptorPoolAlloc};
|
|
|
|
use vulkano::device::{Device, DeviceExtensions, QueuesIter, Queue};
|
|
|
|
use vulkano::framebuffer::{Framebuffer, FramebufferAbstract, RenderPassAbstract, Subpass};
|
|
|
|
use vulkano::instance::{Instance, InstanceExtensions, PhysicalDevice, QueueFamily};
|
|
|
|
use vulkano::pipeline::{ComputePipeline, GraphicsPipeline};
|
|
|
|
use vulkano::pipeline::viewport::Viewport;
|
|
|
|
use vulkano::sync::{FlushError, GpuFuture};
|
|
|
|
use vulkano::sync;
|
|
|
|
use vulkano::image::SwapchainImage;
|
|
|
|
use vulkano::swapchain::{AcquireError, PresentMode, SurfaceTransform, Swapchain, SwapchainCreationError};
|
|
|
|
use vulkano::swapchain;
|
|
|
|
use std::time::SystemTime;
|
|
|
|
use std::sync::Arc;
|
|
|
|
use std::ffi::CStr;
|
|
|
|
use std::path::PathBuf;
|
|
|
|
use shade_runner as sr;
|
|
|
|
use image::{DynamicImage, GenericImage, GenericImageView, ImageBuffer};
|
|
|
|
use vulkano::descriptor::pipeline_layout::PipelineLayout;
|
|
|
|
use shade_runner::{ComputeLayout, CompileError};
|
|
|
|
use vulkano::descriptor::descriptor_set::PersistentDescriptorSetBuf;
|
|
|
|
use shaderc::CompileOptions;
|
|
|
|
use winit::{Event, EventsLoop, Window, WindowBuilder, WindowEvent};
|
|
|
|
use vulkano_win::VkSurfaceBuild;
|
|
|
|
use vulkano::SafeDeref;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
fn main() {
|
|
|
|
let instance = {
|
|
|
|
let extensions = vulkano_win::required_extensions();
|
|
|
|
Instance::new(None, &extensions, None).unwrap()
|
|
|
|
};
|
|
|
|
|
|
|
|
let physical = PhysicalDevice::enumerate(&instance).next().unwrap();
|
|
|
|
|
|
|
|
// The objective of this example is to draw a triangle on a window. To do so, we first need to
|
|
|
|
// create the window.
|
|
|
|
//
|
|
|
|
// This is done by creating a `WindowBuilder` from the `winit` crate, then calling the
|
|
|
|
// `build_vk_surface` method provided by the `VkSurfaceBuild` trait from `vulkano_win`. If you
|
|
|
|
// ever get an error about `build_vk_surface` being undefined in one of your projects, this
|
|
|
|
// probably means that you forgot to import this trait.
|
|
|
|
//
|
|
|
|
// This returns a `vulkano::swapchain::Surface` object that contains both a cross-platform winit
|
|
|
|
// window and a cross-platform Vulkan surface that represents the surface of the window.
|
|
|
|
let mut events_loop = EventsLoop::new();
|
|
|
|
|
|
|
|
let surface = WindowBuilder::new().build_vk_surface(&events_loop, instance.clone()).unwrap();
|
|
|
|
let window = surface.window();
|
|
|
|
|
|
|
|
let queue_family = physical.queue_families().find(|&q| {
|
|
|
|
// We take the first queue that supports drawing to our window.
|
|
|
|
q.supports_graphics() &&
|
|
|
|
surface.is_supported(q).unwrap_or(false) &&
|
|
|
|
q.supports_compute()
|
|
|
|
}).unwrap();
|
|
|
|
|
|
|
|
let device_ext = DeviceExtensions { khr_swapchain: true, ..DeviceExtensions::none() };
|
|
|
|
let (device, mut queues) = Device::new(physical, physical.supported_features(), &device_ext,
|
|
|
|
[(queue_family, 0.5)].iter().cloned()).unwrap();
|
|
|
|
|
|
|
|
let queue = queues.next().unwrap();
|
|
|
|
|
|
|
|
// Before we can draw on the surface, we have to create what is called a swapchain. Creating
|
|
|
|
// a swapchain allocates the color buffers that will contain the image that will ultimately
|
|
|
|
// be visible on the screen. These images are returned alongside with the swapchain.
|
|
|
|
let (mut swapchain, images) = {
|
|
|
|
// Querying the capabilities of the surface. When we create the swapchain we can only
|
|
|
|
// pass values that are allowed by the capabilities.
|
|
|
|
let capabilities = surface.capabilities(physical).unwrap();
|
|
|
|
|
|
|
|
let usage = capabilities.supported_usage_flags;
|
|
|
|
|
|
|
|
// The alpha mode indicates how the alpha value of the final image will behave. For example
|
|
|
|
// you can choose whether the window will be opaque or transparent.
|
|
|
|
let alpha = capabilities.supported_composite_alpha.iter().next().unwrap();
|
|
|
|
|
|
|
|
// Choosing the internal format that the images will have.
|
|
|
|
let format = capabilities.supported_formats[0].0;
|
|
|
|
|
|
|
|
// The dimensions of the window, only used to initially setup the swapchain.
|
|
|
|
// NOTE:
|
|
|
|
// On some drivers the swapchain dimensions are specified by `caps.current_extent` and the
|
|
|
|
// swapchain size must use these dimensions.
|
|
|
|
// These dimensions are always the same as the window dimensions
|
|
|
|
//
|
|
|
|
// However other drivers dont specify a value i.e. `caps.current_extent` is `None`
|
|
|
|
// These drivers will allow anything but the only sensible value is the window dimensions.
|
|
|
|
//
|
|
|
|
// Because for both of these cases, the swapchain needs to be the window dimensions, we just use that.
|
|
|
|
let initial_dimensions = if let Some(dimensions) = window.get_inner_size() {
|
|
|
|
// convert to physical pixels
|
|
|
|
let dimensions: (u32, u32) = dimensions.to_physical(window.get_hidpi_factor()).into();
|
|
|
|
[dimensions.0, dimensions.1]
|
|
|
|
} else {
|
|
|
|
// The window no longer exists so exit the application.
|
|
|
|
return;
|
|
|
|
};
|
|
|
|
|
|
|
|
// Please take a look at the docs for the meaning of the parameters we didn't mention.
|
|
|
|
Swapchain::new(device.clone(), surface.clone(), capabilities.min_image_count, format,
|
|
|
|
initial_dimensions, 1, usage, &queue, SurfaceTransform::Identity, alpha,
|
|
|
|
PresentMode::Fifo, true, None).unwrap()
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
// We now create a buffer that will store the shape of our triangle.
|
|
|
|
let vertex_buffer = {
|
|
|
|
#[derive(Default, Debug, Clone)]
|
|
|
|
struct Vertex { position: [f32; 2] }
|
|
|
|
vulkano::impl_vertex!(Vertex, position);
|
|
|
|
|
|
|
|
CpuAccessibleBuffer::from_iter(device.clone(), BufferUsage::all(), [
|
|
|
|
Vertex { position: [-0.5, -0.25] },
|
|
|
|
Vertex { position: [0.0, 0.5] },
|
|
|
|
Vertex { position: [0.25, -0.1] }
|
|
|
|
].iter().cloned()).unwrap()
|
|
|
|
};
|
|
|
|
|
|
|
|
mod vs {
|
|
|
|
vulkano_shaders::shader! {
|
|
|
|
ty: "vertex",
|
|
|
|
src: "
|
|
|
|
#version 450
|
|
|
|
|
|
|
|
layout(location = 0) in vec2 position;
|
|
|
|
|
|
|
|
void main() {
|
|
|
|
gl_Position = vec4(position, 0.0, 1.0);
|
|
|
|
}"
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
mod fs {
|
|
|
|
vulkano_shaders::shader! {
|
|
|
|
ty: "fragment",
|
|
|
|
src: "
|
|
|
|
#version 450
|
|
|
|
|
|
|
|
layout(location = 0) out vec4 f_color;
|
|
|
|
|
|
|
|
void main() {
|
|
|
|
f_color = vec4(1.0, 0.0, 0.0, 1.0);
|
|
|
|
}
|
|
|
|
"
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
let vs = vs::Shader::load(device.clone()).unwrap();
|
|
|
|
let fs = fs::Shader::load(device.clone()).unwrap();
|
|
|
|
|
|
|
|
// The next step is to create a *render pass*, which is an object that describes where the
|
|
|
|
// output of the graphics pipeline will go. It describes the layout of the images
|
|
|
|
// where the colors, depth and/or stencil information will be written.
|
|
|
|
let render_pass = Arc::new(vulkano::single_pass_renderpass!(
|
|
|
|
device.clone(),
|
|
|
|
attachments: {
|
|
|
|
// `color` is a custom name we give to the first and only attachment.
|
|
|
|
color: {
|
|
|
|
// `load: Clear` means that we ask the GPU to clear the content of this
|
|
|
|
// attachment at the start of the drawing.
|
|
|
|
load: Clear,
|
|
|
|
// `store: Store` means that we ask the GPU to store the output of the draw
|
|
|
|
// in the actual image. We could also ask it to discard the result.
|
|
|
|
store: Store,
|
|
|
|
// `format: <ty>` indicates the type of the format of the image. This has to
|
|
|
|
// be one of the types of the `vulkano::format` module (or alternatively one
|
|
|
|
// of your structs that implements the `FormatDesc` trait). Here we use the
|
|
|
|
// same format as the swapchain.
|
|
|
|
format: swapchain.format(),
|
|
|
|
// TODO:
|
|
|
|
samples: 1,
|
|
|
|
}
|
|
|
|
},
|
|
|
|
pass: {
|
|
|
|
// We use the attachment named `color` as the one and only color attachment.
|
|
|
|
color: [color],
|
|
|
|
// No depth-stencil attachment is indicated with empty brackets.
|
|
|
|
depth_stencil: {}
|
|
|
|
}
|
|
|
|
).unwrap());
|
|
|
|
|
|
|
|
// Before we draw we have to create what is called a pipeline. This is similar to an OpenGL
|
|
|
|
// program, but much more specific.
|
|
|
|
let pipeline = Arc::new(GraphicsPipeline::start()
|
|
|
|
// We need to indicate the layout of the vertices.
|
|
|
|
// The type `SingleBufferDefinition` actually contains a template parameter corresponding
|
|
|
|
// to the type of each vertex. But in this code it is automatically inferred.
|
|
|
|
.vertex_input_single_buffer()
|
|
|
|
// A Vulkan shader can in theory contain multiple entry points, so we have to specify
|
|
|
|
// which one. The `main` word of `main_entry_point` actually corresponds to the name of
|
|
|
|
// the entry point.
|
|
|
|
.vertex_shader(vs.main_entry_point(), ())
|
|
|
|
// The content of the vertex buffer describes a list of triangles.
|
|
|
|
.triangle_list()
|
|
|
|
// Use a resizable viewport set to draw over the entire window
|
|
|
|
.viewports_dynamic_scissors_irrelevant(1)
|
|
|
|
// See `vertex_shader`.
|
|
|
|
.fragment_shader(fs.main_entry_point(), ())
|
|
|
|
// We have to indicate which subpass of which render pass this pipeline is going to be used
|
|
|
|
// in. The pipeline will only be usable from this particular subpass.
|
|
|
|
.render_pass(Subpass::from(render_pass.clone(), 0).unwrap())
|
|
|
|
// Now that our builder is filled, we call `build()` to obtain an actual pipeline.
|
|
|
|
.build(device.clone())
|
|
|
|
.unwrap());
|
|
|
|
|
|
|
|
// Dynamic viewports allow us to recreate just the viewport when the window is resized
|
|
|
|
// Otherwise we would have to recreate the whole pipeline.
|
|
|
|
let mut dynamic_state = DynamicState { line_width: None, viewports: None, scissors: None };
|
|
|
|
|
|
|
|
// The render pass we created above only describes the layout of our framebuffers. Before we
|
|
|
|
// can draw we also need to create the actual framebuffers.
|
|
|
|
//
|
|
|
|
// Since we need to draw to multiple images, we are going to create a different framebuffer for
|
|
|
|
// each image.
|
|
|
|
let mut framebuffers = window_size_dependent_setup(&images, render_pass.clone(), &mut dynamic_state);
|
|
|
|
|
|
|
|
// Initialization is finally finished!
|
|
|
|
|
|
|
|
// In some situations, the swapchain will become invalid by itself. This includes for example
|
|
|
|
// when the window is resized (as the images of the swapchain will no longer match the
|
|
|
|
// window's) or, on Android, when the application went to the background and goes back to the
|
|
|
|
// foreground.
|
|
|
|
//
|
|
|
|
// In this situation, acquiring a swapchain image or presenting it will return an error.
|
|
|
|
// Rendering to an image of that swapchain will not produce any error, but may or may not work.
|
|
|
|
// To continue rendering, we need to recreate the swapchain by creating a new swapchain.
|
|
|
|
// Here, we remember that we need to do this for the next loop iteration.
|
|
|
|
let mut recreate_swapchain = false;
|
|
|
|
|
|
|
|
// In the loop below we are going to submit commands to the GPU. Submitting a command produces
|
|
|
|
// an object that implements the `GpuFuture` trait, which holds the resources for as long as
|
|
|
|
// they are in use by the GPU.
|
|
|
|
//
|
|
|
|
// Destroying the `GpuFuture` blocks until the GPU is finished executing it. In order to avoid
|
|
|
|
// that, we store the submission of the previous frame here.
|
|
|
|
let mut previous_frame_end = Box::new(sync::now(device.clone())) as Box<dyn GpuFuture>;
|
|
|
|
|
|
|
|
loop {
|
|
|
|
// It is important to call this function from time to time, otherwise resources will keep
|
|
|
|
// accumulating and you will eventually reach an out of memory error.
|
|
|
|
// Calling this function polls various fences in order to determine what the GPU has
|
|
|
|
// already processed, and frees the resources that are no longer needed.
|
|
|
|
previous_frame_end.cleanup_finished();
|
|
|
|
|
|
|
|
// Whenever the window resizes we need to recreate everything dependent on the window size.
|
|
|
|
// In this example that includes the swapchain, the framebuffers and the dynamic state viewport.
|
|
|
|
if recreate_swapchain {
|
|
|
|
// Get the new dimensions of the window.
|
|
|
|
let dimensions = if let Some(dimensions) = window.get_inner_size() {
|
|
|
|
let dimensions: (u32, u32) = dimensions.to_physical(window.get_hidpi_factor()).into();
|
|
|
|
[dimensions.0, dimensions.1]
|
|
|
|
} else {
|
|
|
|
return;
|
|
|
|
};
|
|
|
|
|
|
|
|
let (new_swapchain, new_images) = match swapchain.recreate_with_dimension(dimensions) {
|
|
|
|
Ok(r) => r,
|
|
|
|
// This error tends to happen when the user is manually resizing the window.
|
|
|
|
// Simply restarting the loop is the easiest way to fix this issue.
|
|
|
|
Err(SwapchainCreationError::UnsupportedDimensions) => continue,
|
|
|
|
Err(err) => panic!("{:?}", err)
|
|
|
|
};
|
|
|
|
|
|
|
|
swapchain = new_swapchain;
|
|
|
|
// Because framebuffers contains an Arc on the old swapchain, we need to
|
|
|
|
// recreate framebuffers as well.
|
|
|
|
framebuffers = window_size_dependent_setup(&new_images, render_pass.clone(), &mut dynamic_state);
|
|
|
|
|
|
|
|
recreate_swapchain = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Before we can draw on the output, we have to *acquire* an image from the swapchain. If
|
|
|
|
// no image is available (which happens if you submit draw commands too quickly), then the
|
|
|
|
// function will block.
|
|
|
|
// This operation returns the index of the image that we are allowed to draw upon.
|
|
|
|
//
|
|
|
|
// This function can block if no image is available. The parameter is an optional timeout
|
|
|
|
// after which the function call will return an error.
|
|
|
|
let (image_num, acquire_future) = match swapchain::acquire_next_image(swapchain.clone(), None) {
|
|
|
|
Ok(r) => r,
|
|
|
|
Err(AcquireError::OutOfDate) => {
|
|
|
|
recreate_swapchain = true;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
Err(err) => panic!("{:?}", err)
|
|
|
|
};
|
|
|
|
|
|
|
|
// Specify the color to clear the framebuffer with i.e. blue
|
|
|
|
let clear_values = vec!([0.0, 0.0, 1.0, 1.0].into());
|
|
|
|
|
|
|
|
|
|
|
|
{
|
|
|
|
let project_root =
|
|
|
|
std::env::current_dir()
|
|
|
|
.expect("failed to get root directory");
|
|
|
|
|
|
|
|
let mut compute_path = project_root.clone();
|
|
|
|
compute_path.push(PathBuf::from("resources/shaders/"));
|
|
|
|
compute_path.push(PathBuf::from("simple-edge.compute"));
|
|
|
|
|
|
|
|
|
|
|
|
let mut options = CompileOptions::new().ok_or(CompileError::CreateCompiler).unwrap();
|
|
|
|
options.add_macro_definition("SETTING_POS_X", Some("0"));
|
|
|
|
options.add_macro_definition("SETTING_POS_Y", Some("1"));
|
|
|
|
options.add_macro_definition("SETTING_BUCKETS_START", Some("2"));
|
|
|
|
options.add_macro_definition("SETTING_BUCKETS_LEN", Some("2"));
|
|
|
|
|
|
|
|
let shader =
|
|
|
|
shade_runner::load_compute_with_options(compute_path, options)
|
|
|
|
.expect("Failed to compile");
|
|
|
|
|
|
|
|
let vulkano_entry =
|
|
|
|
shade_runner::parse_compute(&shader)
|
|
|
|
.expect("failed to parse");
|
|
|
|
|
|
|
|
let x = unsafe {
|
|
|
|
vulkano::pipeline::shader::ShaderModule::from_words(device.clone(), &shader.compute)
|
|
|
|
}.unwrap();
|
|
|
|
|
|
|
|
let c_pipeline = Arc::new({
|
|
|
|
unsafe {
|
|
|
|
ComputePipeline::new(device.clone(), &x.compute_entry_point(
|
|
|
|
CStr::from_bytes_with_nul_unchecked(b"main\0"),
|
|
|
|
vulkano_entry.compute_layout), &(),
|
|
|
|
).unwrap()
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
let project_root =
|
|
|
|
std::env::current_dir()
|
|
|
|
.expect("failed to get root directory");
|
|
|
|
|
|
|
|
let mut compute_path = project_root.clone();
|
|
|
|
compute_path.push(PathBuf::from("resources/images/"));
|
|
|
|
compute_path.push(PathBuf::from("funky-bird.jpg"));
|
|
|
|
|
|
|
|
let img = image::open(compute_path).expect("Couldn't find image");
|
|
|
|
|
|
|
|
let xy = img.dimensions();
|
|
|
|
|
|
|
|
let data_length = xy.0 * xy.1 * 4;
|
|
|
|
let pixel_count = img.raw_pixels().len();
|
|
|
|
println!("Pixel count {}", pixel_count);
|
|
|
|
|
|
|
|
let mut image_buffer = Vec::new();
|
|
|
|
if pixel_count != data_length as usize {
|
|
|
|
println!("Creating apha channel...");
|
|
|
|
for i in img.raw_pixels().iter() {
|
|
|
|
if (image_buffer.len() + 1) % 4 == 0 {
|
|
|
|
image_buffer.push(255);
|
|
|
|
}
|
|
|
|
image_buffer.push(*i);
|
|
|
|
}
|
|
|
|
image_buffer.push(255);
|
|
|
|
} else {
|
|
|
|
image_buffer = img.raw_pixels();
|
|
|
|
}
|
|
|
|
|
|
|
|
println!("Buffer length {}", image_buffer.len());
|
|
|
|
println!("Size {:?}", xy);
|
|
|
|
|
|
|
|
println!("Allocating Buffers...");
|
|
|
|
|
|
|
|
// Pull out the image data and place it in a buffer for the kernel to write to and for us to read from
|
|
|
|
let write_buffer = {
|
|
|
|
let mut buff = image_buffer.iter();
|
|
|
|
let data_iter = (0..data_length).map(|n| *(buff.next().unwrap()));
|
|
|
|
CpuAccessibleBuffer::from_iter(device.clone(), BufferUsage::all(), data_iter).unwrap()
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
// Pull out the image data and place it in a buffer for the kernel to read from
|
|
|
|
let read_buffer = {
|
|
|
|
let mut buff = image_buffer.iter();
|
|
|
|
let data_iter = (0..data_length).map(|n| *(buff.next().unwrap()));
|
|
|
|
CpuAccessibleBuffer::from_iter(device.clone(), BufferUsage::all(), data_iter).unwrap()
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
// A buffer to hold many i32 values to use as settings
|
|
|
|
let settings_buffer = {
|
|
|
|
let vec = vec![xy.0, xy.1];
|
|
|
|
let mut buff = vec.iter();
|
|
|
|
let data_iter =
|
|
|
|
(0..2).map(|n| *(buff.next().unwrap()));
|
|
|
|
CpuAccessibleBuffer::from_iter(device.clone(),
|
|
|
|
BufferUsage::all(),
|
|
|
|
data_iter).unwrap()
|
|
|
|
};
|
|
|
|
|
|
|
|
println!("Done");
|
|
|
|
|
|
|
|
// Create the data descriptor set for our previously created shader pipeline
|
|
|
|
let mut set =
|
|
|
|
PersistentDescriptorSet::start(c_pipeline.clone(), 0)
|
|
|
|
.add_buffer(write_buffer.clone()).unwrap()
|
|
|
|
.add_buffer(read_buffer.clone()).unwrap()
|
|
|
|
.add_buffer(settings_buffer.clone()).unwrap();
|
|
|
|
|
|
|
|
let mut set = Arc::new(set.build().unwrap());
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// In order to draw, we have to build a *command buffer*. The command buffer object holds
|
|
|
|
// the list of commands that are going to be executed.
|
|
|
|
//
|
|
|
|
// Building a command buffer is an expensive operation (usually a few hundred
|
|
|
|
// microseconds), but it is known to be a hot path in the driver and is expected to be
|
|
|
|
// optimized.
|
|
|
|
//
|
|
|
|
// Note that we have to pass a queue family when we create the command buffer. The command
|
|
|
|
// buffer will only be executable on that given queue family.
|
|
|
|
let command_buffer =
|
|
|
|
AutoCommandBufferBuilder::primary_one_time_submit(device.clone(), queue.family())
|
|
|
|
.unwrap()
|
|
|
|
|
|
|
|
.dispatch([xy.0, xy.1, 1],
|
|
|
|
c_pipeline.clone(),
|
|
|
|
set.clone(), ()).unwrap()
|
|
|
|
// Before we can draw, we have to *enter a render pass*. There are two methods to do
|
|
|
|
// this: `draw_inline` and `draw_secondary`. The latter is a bit more advanced and is
|
|
|
|
// not covered here.
|
|
|
|
//
|
|
|
|
// The third parameter builds the list of values to clear the attachments with. The API
|
|
|
|
// is similar to the list of attachments when building the framebuffers, except that
|
|
|
|
// only the attachments that use `load: Clear` appear in the list.
|
|
|
|
.begin_render_pass(framebuffers[image_num].clone(), false, clear_values)
|
|
|
|
.unwrap()
|
|
|
|
|
|
|
|
|
|
|
|
// We are now inside the first subpass of the render pass. We add a draw command.
|
|
|
|
//
|
|
|
|
// The last two parameters contain the list of resources to pass to the shaders.
|
|
|
|
// Since we used an `EmptyPipeline` object, the objects have to be `()`.
|
|
|
|
.draw(pipeline.clone(), &dynamic_state, vertex_buffer.clone(), (), ())
|
|
|
|
.unwrap()
|
|
|
|
|
|
|
|
// We leave the render pass by calling `draw_end`. Note that if we had multiple
|
|
|
|
// subpasses we could have called `next_inline` (or `next_secondary`) to jump to the
|
|
|
|
// next subpass.
|
|
|
|
.end_render_pass()
|
|
|
|
.unwrap()
|
|
|
|
|
|
|
|
// Finish building the command buffer by calling `build`.
|
|
|
|
.build().unwrap();
|
|
|
|
|
|
|
|
let future = previous_frame_end.join(acquire_future)
|
|
|
|
.then_execute(queue.clone(), command_buffer).unwrap()
|
|
|
|
|
|
|
|
// The color output is now expected to contain our triangle. But in order to show it on
|
|
|
|
// the screen, we have to *present* the image by calling `present`.
|
|
|
|
//
|
|
|
|
// This function does not actually present the image immediately. Instead it submits a
|
|
|
|
// present command at the end of the queue. This means that it will only be presented once
|
|
|
|
// the GPU has finished executing the command buffer that draws the triangle.
|
|
|
|
.then_swapchain_present(queue.clone(), swapchain.clone(), image_num)
|
|
|
|
.then_signal_fence_and_flush();
|
|
|
|
|
|
|
|
match future {
|
|
|
|
Ok(future) => {
|
|
|
|
previous_frame_end = Box::new(future) as Box<_>;
|
|
|
|
}
|
|
|
|
Err(FlushError::OutOfDate) => {
|
|
|
|
recreate_swapchain = true;
|
|
|
|
previous_frame_end = Box::new(sync::now(device.clone())) as Box<_>;
|
|
|
|
}
|
|
|
|
Err(e) => {
|
|
|
|
println!("{:?}", e);
|
|
|
|
previous_frame_end = Box::new(sync::now(device.clone())) as Box<_>;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Note that in more complex programs it is likely that one of `acquire_next_image`,
|
|
|
|
// `command_buffer::submit`, or `present` will block for some time. This happens when the
|
|
|
|
// GPU's queue is full and the driver has to wait until the GPU finished some work.
|
|
|
|
//
|
|
|
|
// Unfortunately the Vulkan API doesn't provide any way to not wait or to detect when a
|
|
|
|
// wait would happen. Blocking may be the desired behavior, but if you don't want to
|
|
|
|
// block you should spawn a separate thread dedicated to submissions.
|
|
|
|
|
|
|
|
// Handling the window events in order to close the program when the user wants to close
|
|
|
|
// it.
|
|
|
|
let mut done = false;
|
|
|
|
events_loop.poll_events(|ev| {
|
|
|
|
match ev {
|
|
|
|
Event::WindowEvent { event: WindowEvent::CloseRequested, .. } => done = true,
|
|
|
|
Event::WindowEvent { event: WindowEvent::Resized(_), .. } => recreate_swapchain = true,
|
|
|
|
_ => ()
|
|
|
|
}
|
|
|
|
});
|
|
|
|
if done { return; }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// This method is called once during initialization, then again whenever the window is resized
|
|
|
|
fn window_size_dependent_setup(
|
|
|
|
images: &[Arc<SwapchainImage<Window>>],
|
|
|
|
render_pass: Arc<dyn RenderPassAbstract + Send + Sync>,
|
|
|
|
dynamic_state: &mut DynamicState,
|
|
|
|
) -> Vec<Arc<dyn FramebufferAbstract + Send + Sync>> {
|
|
|
|
let dimensions = images[0].dimensions();
|
|
|
|
|
|
|
|
let viewport = Viewport {
|
|
|
|
origin: [0.0, 0.0],
|
|
|
|
dimensions: [dimensions[0] as f32, dimensions[1] as f32],
|
|
|
|
depth_range: 0.0..1.0,
|
|
|
|
};
|
|
|
|
dynamic_state.viewports = Some(vec!(viewport));
|
|
|
|
|
|
|
|
images.iter().map(|image| {
|
|
|
|
Arc::new(
|
|
|
|
Framebuffer::start(render_pass.clone())
|
|
|
|
.add(image.clone()).unwrap()
|
|
|
|
.build().unwrap()
|
|
|
|
) as Arc<dyn FramebufferAbstract + Send + Sync>
|
|
|
|
}).collect::<Vec<_>>()
|
|
|
|
}
|
|
|
|
|
|
|
|
*/
|