use smallvec::SmallVec;
use std::marker::PhantomData;
use std::mem;
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use std::sync::atomic::Ordering;
use buffer::BufferUsage;
use buffer::CpuAccessibleBuffer;
use buffer::sys::BufferCreationError;
use buffer::sys::SparseLevel;
use buffer::sys::UnsafeBuffer;
use buffer::traits::BufferAccess;
use buffer::traits::BufferInner;
use buffer::traits::TypedBufferAccess;
use command_buffer::AutoCommandBuffer;
use command_buffer::AutoCommandBufferBuilder;
use command_buffer::CommandBuffer;
use command_buffer::CommandBufferExecFuture;
use device::Device;
use device::DeviceOwned;
use device::Queue;
use image::ImageAccess;
use instance::QueueFamily;
use memory::DedicatedAlloc;
use memory::DeviceMemoryAllocError;
use memory::pool::AllocFromRequirementsFilter;
use memory::pool::AllocLayout;
use memory::pool::MappingRequirement;
use memory::pool::MemoryPool;
use memory::pool::MemoryPoolAlloc;
use memory::pool::PotentialDedicatedAllocation;
use memory::pool::StdMemoryPoolAlloc;
use sync::AccessError;
use sync::NowFuture;
use sync::Sharing;
pub struct ImmutableBuffer<T: ?Sized, A = PotentialDedicatedAllocation<StdMemoryPoolAlloc>> {
inner: UnsafeBuffer,
memory: A,
initialized: AtomicBool,
queue_families: SmallVec<[u32; 4]>,
marker: PhantomData<Box<T>>,
}
type ImmutableBufferFromBufferFuture = CommandBufferExecFuture<NowFuture, AutoCommandBuffer>;
impl<T: ?Sized> ImmutableBuffer<T> {
pub fn from_data(
data: T, usage: BufferUsage, queue: Arc<Queue>)
-> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferFromBufferFuture),
DeviceMemoryAllocError>
where T: 'static + Send + Sync + Sized
{
let source = CpuAccessibleBuffer::from_data(queue.device().clone(),
BufferUsage::transfer_source(),
data)?;
ImmutableBuffer::from_buffer(source, usage, queue)
}
pub fn from_buffer<B>(
source: B, usage: BufferUsage, queue: Arc<Queue>)
-> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferFromBufferFuture),
DeviceMemoryAllocError>
where B: BufferAccess + TypedBufferAccess<Content = T> + 'static + Clone + Send + Sync,
T: 'static + Send + Sync
{
unsafe {
let actual_usage = BufferUsage {
transfer_destination: true,
..usage
};
let (buffer, init) = ImmutableBuffer::raw(source.device().clone(),
source.size(),
actual_usage,
source.device().active_queue_families())?;
let cb = AutoCommandBufferBuilder::new(source.device().clone(),
queue.family())?
.copy_buffer(source, init).unwrap()
.build().unwrap();
let future = match cb.execute(queue) {
Ok(f) => f,
Err(_) => unreachable!(),
};
Ok((buffer, future))
}
}
}
impl<T> ImmutableBuffer<T> {
#[inline]
pub unsafe fn uninitialized(
device: Arc<Device>, usage: BufferUsage)
-> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferInitialization<T>),
DeviceMemoryAllocError> {
ImmutableBuffer::raw(device.clone(),
mem::size_of::<T>(),
usage,
device.active_queue_families())
}
}
impl<T> ImmutableBuffer<[T]> {
pub fn from_iter<D>(
data: D, usage: BufferUsage, queue: Arc<Queue>)
-> Result<(Arc<ImmutableBuffer<[T]>>, ImmutableBufferFromBufferFuture),
DeviceMemoryAllocError>
where D: ExactSizeIterator<Item = T>,
T: 'static + Send + Sync + Sized
{
let source = CpuAccessibleBuffer::from_iter(queue.device().clone(),
BufferUsage::transfer_source(),
data)?;
ImmutableBuffer::from_buffer(source, usage, queue)
}
#[inline]
pub unsafe fn uninitialized_array(device: Arc<Device>, len: usize, usage: BufferUsage)
-> Result<(Arc<ImmutableBuffer<[T]>>,
ImmutableBufferInitialization<[T]>),
DeviceMemoryAllocError> {
ImmutableBuffer::raw(device.clone(),
len * mem::size_of::<T>(),
usage,
device.active_queue_families())
}
}
impl<T: ?Sized> ImmutableBuffer<T> {
#[inline]
pub unsafe fn raw<'a, I>(
device: Arc<Device>, size: usize, usage: BufferUsage, queue_families: I)
-> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferInitialization<T>),
DeviceMemoryAllocError>
where I: IntoIterator<Item = QueueFamily<'a>>
{
let queue_families = queue_families.into_iter().map(|f| f.id()).collect();
ImmutableBuffer::raw_impl(device, size, usage, queue_families)
}
unsafe fn raw_impl(
device: Arc<Device>, size: usize, usage: BufferUsage, queue_families: SmallVec<[u32; 4]>)
-> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferInitialization<T>),
DeviceMemoryAllocError> {
let (buffer, mem_reqs) = {
let sharing = if queue_families.len() >= 2 {
Sharing::Concurrent(queue_families.iter().cloned())
} else {
Sharing::Exclusive
};
match UnsafeBuffer::new(device.clone(), size, usage, sharing, SparseLevel::none()) {
Ok(b) => b,
Err(BufferCreationError::AllocError(err)) => return Err(err),
Err(_) => unreachable!(),
}
};
let mem = MemoryPool::alloc_from_requirements(&Device::standard_pool(&device),
&mem_reqs,
AllocLayout::Linear,
MappingRequirement::DoNotMap,
DedicatedAlloc::Buffer(&buffer),
|t| if t.is_device_local() {
AllocFromRequirementsFilter::Preferred
} else {
AllocFromRequirementsFilter::Allowed
})?;
debug_assert!((mem.offset() % mem_reqs.alignment) == 0);
buffer.bind_memory(mem.memory(), mem.offset())?;
let final_buf = Arc::new(ImmutableBuffer {
inner: buffer,
memory: mem,
queue_families: queue_families,
initialized: AtomicBool::new(false),
marker: PhantomData,
});
let initialization = ImmutableBufferInitialization {
buffer: final_buf.clone(),
used: Arc::new(AtomicBool::new(false)),
};
Ok((final_buf, initialization))
}
}
impl<T: ?Sized, A> ImmutableBuffer<T, A> {
#[inline]
pub fn device(&self) -> &Arc<Device> {
self.inner.device()
}
#[inline]
pub fn queue_families(&self) -> Vec<QueueFamily> {
self.queue_families
.iter()
.map(|&num| {
self.device()
.physical_device()
.queue_family_by_id(num)
.unwrap()
})
.collect()
}
}
unsafe impl<T: ?Sized, A> BufferAccess for ImmutableBuffer<T, A> {
#[inline]
fn inner(&self) -> BufferInner {
BufferInner {
buffer: &self.inner,
offset: 0,
}
}
#[inline]
fn size(&self) -> usize {
self.inner.size()
}
#[inline]
fn conflicts_buffer(&self, other: &dyn BufferAccess) -> bool {
self.conflict_key() == other.conflict_key()
}
#[inline]
fn conflicts_image(&self, other: &dyn ImageAccess) -> bool {
false
}
#[inline]
fn conflict_key(&self) -> (u64, usize) {
(self.inner.key(), 0)
}
#[inline]
fn try_gpu_lock(&self, exclusive_access: bool, _: &Queue) -> Result<(), AccessError> {
if exclusive_access {
return Err(AccessError::ExclusiveDenied);
}
if !self.initialized.load(Ordering::Relaxed) {
return Err(AccessError::BufferNotInitialized);
}
Ok(())
}
#[inline]
unsafe fn increase_gpu_lock(&self) {
}
#[inline]
unsafe fn unlock(&self) {
}
}
unsafe impl<T: ?Sized, A> TypedBufferAccess for ImmutableBuffer<T, A> {
type Content = T;
}
unsafe impl<T: ?Sized, A> DeviceOwned for ImmutableBuffer<T, A> {
#[inline]
fn device(&self) -> &Arc<Device> {
self.inner.device()
}
}
pub struct ImmutableBufferInitialization<T: ?Sized, A = PotentialDedicatedAllocation<StdMemoryPoolAlloc>> {
buffer: Arc<ImmutableBuffer<T, A>>,
used: Arc<AtomicBool>,
}
unsafe impl<T: ?Sized, A> BufferAccess for ImmutableBufferInitialization<T, A> {
#[inline]
fn inner(&self) -> BufferInner {
self.buffer.inner()
}
#[inline]
fn size(&self) -> usize {
self.buffer.size()
}
#[inline]
fn conflicts_buffer(&self, other: &dyn BufferAccess) -> bool {
self.conflict_key() == other.conflict_key()
}
#[inline]
fn conflicts_image(&self, other: &dyn ImageAccess) -> bool {
false
}
#[inline]
fn conflict_key(&self) -> (u64, usize) {
(self.buffer.inner.key(), 0)
}
#[inline]
fn try_gpu_lock(&self, _: bool, _: &Queue) -> Result<(), AccessError> {
if self.buffer.initialized.load(Ordering::Relaxed) {
return Err(AccessError::AlreadyInUse);
}
if !self.used.compare_and_swap(false, true, Ordering::Relaxed) {
Ok(())
} else {
Err(AccessError::AlreadyInUse)
}
}
#[inline]
unsafe fn increase_gpu_lock(&self) {
debug_assert!(self.used.load(Ordering::Relaxed));
}
#[inline]
unsafe fn unlock(&self) {
self.buffer.initialized.store(true, Ordering::Relaxed);
}
}
unsafe impl<T: ?Sized, A> TypedBufferAccess for ImmutableBufferInitialization<T, A> {
type Content = T;
}
unsafe impl<T: ?Sized, A> DeviceOwned for ImmutableBufferInitialization<T, A> {
#[inline]
fn device(&self) -> &Arc<Device> {
self.buffer.inner.device()
}
}
impl<T: ?Sized, A> Clone for ImmutableBufferInitialization<T, A> {
#[inline]
fn clone(&self) -> ImmutableBufferInitialization<T, A> {
ImmutableBufferInitialization {
buffer: self.buffer.clone(),
used: self.used.clone(),
}
}
}
#[cfg(test)]
mod tests {
use buffer::BufferUsage;
use buffer::cpu_access::CpuAccessibleBuffer;
use buffer::immutable::ImmutableBuffer;
use command_buffer::AutoCommandBufferBuilder;
use command_buffer::CommandBuffer;
use sync::GpuFuture;
#[test]
fn from_data_working() {
let (device, queue) = gfx_dev_and_queue!();
let (buffer, _) = ImmutableBuffer::from_data(12u32, BufferUsage::all(), queue.clone())
.unwrap();
let destination = CpuAccessibleBuffer::from_data(device.clone(), BufferUsage::all(), 0)
.unwrap();
let _ = AutoCommandBufferBuilder::new(device.clone(), queue.family())
.unwrap()
.copy_buffer(buffer, destination.clone())
.unwrap()
.build()
.unwrap()
.execute(queue.clone())
.unwrap()
.then_signal_fence_and_flush()
.unwrap();
let destination_content = destination.read().unwrap();
assert_eq!(*destination_content, 12);
}
#[test]
fn from_iter_working() {
let (device, queue) = gfx_dev_and_queue!();
let (buffer, _) = ImmutableBuffer::from_iter((0 .. 512u32).map(|n| n * 2),
BufferUsage::all(),
queue.clone())
.unwrap();
let destination = CpuAccessibleBuffer::from_iter(device.clone(),
BufferUsage::all(),
(0 .. 512).map(|_| 0u32))
.unwrap();
let _ = AutoCommandBufferBuilder::new(device.clone(), queue.family())
.unwrap()
.copy_buffer(buffer, destination.clone())
.unwrap()
.build()
.unwrap()
.execute(queue.clone())
.unwrap()
.then_signal_fence_and_flush()
.unwrap();
let destination_content = destination.read().unwrap();
for (n, &v) in destination_content.iter().enumerate() {
assert_eq!(n * 2, v as usize);
}
}
#[test]
fn writing_forbidden() {
let (device, queue) = gfx_dev_and_queue!();
let (buffer, _) = ImmutableBuffer::from_data(12u32, BufferUsage::all(), queue.clone())
.unwrap();
assert_should_panic!({
let _ = AutoCommandBufferBuilder::new(device.clone(),
queue.family())
.unwrap()
.fill_buffer(buffer, 50)
.unwrap()
.build()
.unwrap()
.execute(queue.clone())
.unwrap()
.then_signal_fence_and_flush()
.unwrap();
});
}
#[test]
fn read_uninitialized_forbidden() {
let (device, queue) = gfx_dev_and_queue!();
let (buffer, _) = unsafe {
ImmutableBuffer::<u32>::uninitialized(device.clone(), BufferUsage::all()).unwrap()
};
let source = CpuAccessibleBuffer::from_data(device.clone(), BufferUsage::all(), 0).unwrap();
assert_should_panic!({
let _ = AutoCommandBufferBuilder::new(device.clone(),
queue.family())
.unwrap()
.copy_buffer(source, buffer)
.unwrap()
.build()
.unwrap()
.execute(queue.clone())
.unwrap()
.then_signal_fence_and_flush()
.unwrap();
});
}
#[test]
fn init_then_read_same_cb() {
let (device, queue) = gfx_dev_and_queue!();
let (buffer, init) = unsafe {
ImmutableBuffer::<u32>::uninitialized(device.clone(), BufferUsage::all()).unwrap()
};
let source = CpuAccessibleBuffer::from_data(device.clone(), BufferUsage::all(), 0).unwrap();
let _ = AutoCommandBufferBuilder::new(device.clone(), queue.family())
.unwrap()
.copy_buffer(source.clone(), init)
.unwrap()
.copy_buffer(buffer, source.clone())
.unwrap()
.build()
.unwrap()
.execute(queue.clone())
.unwrap()
.then_signal_fence_and_flush()
.unwrap();
}
#[test]
#[ignore]
fn init_then_read_same_future() {
let (device, queue) = gfx_dev_and_queue!();
let (buffer, init) = unsafe {
ImmutableBuffer::<u32>::uninitialized(device.clone(), BufferUsage::all()).unwrap()
};
let source = CpuAccessibleBuffer::from_data(device.clone(), BufferUsage::all(), 0).unwrap();
let cb1 = AutoCommandBufferBuilder::new(device.clone(), queue.family())
.unwrap()
.copy_buffer(source.clone(), init)
.unwrap()
.build()
.unwrap();
let cb2 = AutoCommandBufferBuilder::new(device.clone(), queue.family())
.unwrap()
.copy_buffer(buffer, source.clone())
.unwrap()
.build()
.unwrap();
let _ = cb1.execute(queue.clone())
.unwrap()
.then_execute(queue.clone(), cb2)
.unwrap()
.then_signal_fence_and_flush()
.unwrap();
}
#[test]
fn create_buffer_zero_size_data() {
let (device, queue) = gfx_dev_and_queue!();
let _ = ImmutableBuffer::from_data((), BufferUsage::all(), queue.clone());
}
}