1 // Copyright (c) 2016 The vulkano developers
2 // Licensed under the Apache License, Version 2.0
3 // <LICENSE-APACHE or
4 // https://www.apache.org/licenses/LICENSE-2.0> or the MIT
5 // license <LICENSE-MIT or https://opensource.org/licenses/MIT>,
6 // at your option. All files in the project carrying such
7 // notice may not be copied, modified, or distributed except
8 // according to those terms.
9 
10 //! A subpart of a buffer.
11 
12 use super::{allocator::Arena, Buffer, BufferError, BufferMemory};
13 use crate::{
14     device::{Device, DeviceOwned},
15     macros::try_opt,
16     memory::{
17         self,
18         allocator::{align_down, align_up, DeviceLayout},
19         is_aligned, DeviceAlignment,
20     },
21     DeviceSize, NonZeroDeviceSize,
22 };
23 use bytemuck::{AnyBitPattern, PodCastError};
24 use std::{
25     alloc::Layout,
26     cmp,
27     error::Error,
28     ffi::c_void,
29     fmt::{Display, Error as FmtError, Formatter},
30     hash::{Hash, Hasher},
31     marker::PhantomData,
32     mem::{self, align_of, size_of},
33     ops::{Deref, DerefMut, Range, RangeBounds},
34     ptr::{self, NonNull},
35     sync::Arc,
36     thread,
37 };
38 
39 #[cfg(feature = "macros")]
40 pub use vulkano_macros::BufferContents;
41 
42 /// A subpart of a buffer.
43 ///
44 /// This type doesn't correspond to any Vulkan object, it exists for API convenience. Most Vulkan
45 /// functions that work with buffers take the buffer as argument as well as an offset and size
46 /// within the buffer, which we can represent with a single subbuffer instead.
47 ///
48 /// `Subbuffer` also has a type parameter, which is a hint for how the data is going to be
49 /// interpreted by the host or device (or both). This is useful so that we can allocate
50 /// (sub)buffers that are correctly aligned and have the correct size for their content, and for
51 /// type-safety. For example, when reading/writing a subbuffer from the host, you can use
52 /// [`Subbuffer::read`]/[`Subbuffer::write`] without worrying about the alignment and size being
53 /// correct and about converting your data from/to raw bytes.
54 ///
55 /// There are two ways to get a `Subbuffer`:
56 ///
57 /// - By using the functions on [`Buffer`], which create a new buffer and memory allocation each
58 ///   time, and give you a `Subbuffer` that has an entire `Buffer` dedicated to it.
59 /// - By using the [`SubbufferAllocator`], which creates `Subbuffer`s by suballocating existing
60 ///   `Buffer`s such that the `Buffer`s can keep being reused.
61 ///
62 /// Alternatively, you can also create a `Buffer` manually and convert it to a `Subbuffer<[u8]>`.
63 ///
64 /// [`SubbufferAllocator`]: super::allocator::SubbufferAllocator
65 #[derive(Debug)]
66 #[repr(C)]
67 pub struct Subbuffer<T: ?Sized> {
68     offset: DeviceSize,
69     size: DeviceSize,
70     parent: SubbufferParent,
71     marker: PhantomData<Arc<T>>,
72 }
73 
74 #[derive(Clone, Debug, PartialEq, Eq, Hash)]
75 enum SubbufferParent {
76     Arena(Arc<Arena>),
77     Buffer(Arc<Buffer>),
78 }
79 
80 impl<T: ?Sized> Subbuffer<T> {
from_arena(arena: Arc<Arena>, offset: DeviceSize, size: DeviceSize) -> Self81     pub(super) fn from_arena(arena: Arc<Arena>, offset: DeviceSize, size: DeviceSize) -> Self {
82         Subbuffer {
83             offset,
84             size,
85             parent: SubbufferParent::Arena(arena),
86             marker: PhantomData,
87         }
88     }
89 
90     /// Returns the offset of the subbuffer, in bytes, relative to the buffer.
offset(&self) -> DeviceSize91     pub fn offset(&self) -> DeviceSize {
92         self.offset
93     }
94 
95     /// Returns the offset of the subbuffer, in bytes, relative to the [`DeviceMemory`] block.
memory_offset(&self) -> DeviceSize96     fn memory_offset(&self) -> DeviceSize {
97         let allocation = match self.buffer().memory() {
98             BufferMemory::Normal(a) => a,
99             BufferMemory::Sparse => unreachable!(),
100         };
101 
102         allocation.offset() + self.offset
103     }
104 
105     /// Returns the size of the subbuffer in bytes.
size(&self) -> DeviceSize106     pub fn size(&self) -> DeviceSize {
107         self.size
108     }
109 
110     /// Returns the range the subbuffer occupies, in bytes, relative to the buffer.
range(&self) -> Range<DeviceSize>111     pub(crate) fn range(&self) -> Range<DeviceSize> {
112         self.offset..self.offset + self.size
113     }
114 
115     /// Returns the buffer that this subbuffer is a part of.
buffer(&self) -> &Arc<Buffer>116     pub fn buffer(&self) -> &Arc<Buffer> {
117         match &self.parent {
118             SubbufferParent::Arena(arena) => arena.buffer(),
119             SubbufferParent::Buffer(buffer) => buffer,
120         }
121     }
122 
123     /// Returns the mapped pointer to the start of the subbuffer if the memory is host-visible,
124     /// otherwise returns [`None`].
mapped_ptr(&self) -> Option<NonNull<c_void>>125     pub fn mapped_ptr(&self) -> Option<NonNull<c_void>> {
126         match self.buffer().memory() {
127             BufferMemory::Normal(a) => a.mapped_ptr().map(|ptr| {
128                 // SAFETY: The original address came from the Vulkan implementation, and allocation
129                 // sizes are guaranteed to not exceed `isize::MAX` when there's a mapped pointer,
130                 // so the offset better be in range.
131                 unsafe { NonNull::new_unchecked(ptr.as_ptr().add(self.offset as usize)) }
132             }),
133             BufferMemory::Sparse => unreachable!(),
134         }
135     }
136 
137     /// Returns the device address for this subbuffer.
device_address(&self) -> Result<NonZeroDeviceSize, BufferError>138     pub fn device_address(&self) -> Result<NonZeroDeviceSize, BufferError> {
139         self.buffer().device_address().map(|ptr| {
140             // SAFETY: The original address came from the Vulkan implementation, and allocation
141             // sizes are guaranteed to not exceed `DeviceLayout::MAX_SIZE`, so the offset better be
142             // in range.
143             unsafe { NonZeroDeviceSize::new_unchecked(ptr.get() + self.offset) }
144         })
145     }
146 
147     /// Casts the subbuffer to a slice of raw bytes.
into_bytes(self) -> Subbuffer<[u8]>148     pub fn into_bytes(self) -> Subbuffer<[u8]> {
149         unsafe { self.reinterpret_unchecked_inner() }
150     }
151 
152     /// Same as [`into_bytes`], except it works with a reference to the subbuffer.
153     ///
154     /// [`into_bytes`]: Self::into_bytes
as_bytes(&self) -> &Subbuffer<[u8]>155     pub fn as_bytes(&self) -> &Subbuffer<[u8]> {
156         unsafe { self.reinterpret_ref_unchecked_inner() }
157     }
158 
159     #[inline(always)]
reinterpret_unchecked_inner<U: ?Sized>(self) -> Subbuffer<U>160     unsafe fn reinterpret_unchecked_inner<U: ?Sized>(self) -> Subbuffer<U> {
161         // SAFETY: All `Subbuffer`s share the same layout.
162         mem::transmute::<Subbuffer<T>, Subbuffer<U>>(self)
163     }
164 
165     #[inline(always)]
reinterpret_ref_unchecked_inner<U: ?Sized>(&self) -> &Subbuffer<U>166     unsafe fn reinterpret_ref_unchecked_inner<U: ?Sized>(&self) -> &Subbuffer<U> {
167         assert!(size_of::<Subbuffer<T>>() == size_of::<Subbuffer<U>>());
168         assert!(align_of::<Subbuffer<T>>() == align_of::<Subbuffer<U>>());
169 
170         // SAFETY: All `Subbuffer`s share the same layout.
171         mem::transmute::<&Subbuffer<T>, &Subbuffer<U>>(self)
172     }
173 }
174 
175 impl<T> Subbuffer<T>
176 where
177     T: BufferContents + ?Sized,
178 {
179     /// Changes the `T` generic parameter of the subbffer to the desired type without checking if
180     /// the contents are correctly aligned and sized.
181     ///
182     /// **NEVER use this function** unless you absolutely have to, and even then, open an issue on
183     /// GitHub instead. **An unaligned / incorrectly sized subbuffer is undefined behavior _both on
184     /// the Rust and the Vulkan side!_**
185     ///
186     /// # Safety
187     ///
188     /// - `self.memory_offset()` must be properly aligned for `U`.
189     /// - `self.size()` must be valid for `U`, which means:
190     ///   - If `U` is sized, the size must match exactly.
191     ///   - If `U` is unsized, then the subbuffer size minus the size of the head (sized part) of
192     ///     the DST must be evenly divisible by the size of the element type.
193     #[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
reinterpret_unchecked<U>(self) -> Subbuffer<U> where U: BufferContents + ?Sized,194     pub unsafe fn reinterpret_unchecked<U>(self) -> Subbuffer<U>
195     where
196         U: BufferContents + ?Sized,
197     {
198         let element_size = U::LAYOUT.element_size().unwrap_or(1);
199         debug_assert!(is_aligned(self.memory_offset(), U::LAYOUT.alignment()));
200         debug_assert!(self.size >= U::LAYOUT.head_size());
201         debug_assert!((self.size - U::LAYOUT.head_size()) % element_size == 0);
202 
203         self.reinterpret_unchecked_inner()
204     }
205 
206     /// Same as [`reinterpret_unchecked`], except it works with a reference to the subbuffer.
207     ///
208     /// # Safety
209     ///
210     /// Please read the safety docs on [`reinterpret_unchecked`] carefully.
211     ///
212     /// [`reinterpret_unchecked`]: Self::reinterpret_unchecked
213     #[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
reinterpret_ref_unchecked<U>(&self) -> &Subbuffer<U> where U: BufferContents + ?Sized,214     pub unsafe fn reinterpret_ref_unchecked<U>(&self) -> &Subbuffer<U>
215     where
216         U: BufferContents + ?Sized,
217     {
218         let element_size = U::LAYOUT.element_size().unwrap_or(1);
219         debug_assert!(is_aligned(self.memory_offset(), U::LAYOUT.alignment()));
220         debug_assert!(self.size >= U::LAYOUT.head_size());
221         debug_assert!((self.size - U::LAYOUT.head_size()) % element_size == 0);
222 
223         self.reinterpret_ref_unchecked_inner()
224     }
225 
226     /// Locks the subbuffer in order to read its content from the host.
227     ///
228     /// If the subbuffer is currently used in exclusive mode by the device, this function will
229     /// return an error. Similarly if you called [`write`] on the buffer and haven't dropped the
230     /// lock, this function will return an error as well.
231     ///
232     /// After this function successfully locks the subbuffer, any attempt to submit a command buffer
233     /// that uses it in exclusive mode will fail. You can still submit this subbuffer for
234     /// non-exclusive accesses (ie. reads).
235     ///
236     /// If the memory backing the buffer is not [host-coherent], then this function will lock a
237     /// range that is potentially larger than the subbuffer, because the range given to
238     /// [`invalidate_range`] must be aligned to the [`non_coherent_atom_size`]. This means that for
239     /// example if your Vulkan implementation reports an atom size of 64, and you tried to put 2
240     /// subbuffers of size 32 in the same buffer, one at offset 0 and one at offset 32, while the
241     /// buffer is backed by non-coherent memory, then invalidating one subbuffer would also
242     /// invalidate the other subbuffer. This can lead to data races and is therefore not allowed.
243     /// What you should do in that case is ensure that each subbuffer is aligned to the
244     /// non-coherent atom size, so in this case one would be at offset 0 and the other at offset
245     /// 64. [`SubbufferAllocator`] does this automatically.
246     ///
247     /// [host-coherent]: crate::memory::MemoryPropertyFlags::HOST_COHERENT
248     /// [`invalidate_range`]: crate::memory::allocator::MemoryAlloc::invalidate_range
249     /// [`non_coherent_atom_size`]: crate::device::Properties::non_coherent_atom_size
250     /// [`write`]: Self::write
251     /// [`SubbufferAllocator`]: super::allocator::SubbufferAllocator
read(&self) -> Result<BufferReadGuard<'_, T>, BufferError>252     pub fn read(&self) -> Result<BufferReadGuard<'_, T>, BufferError> {
253         let allocation = match self.buffer().memory() {
254             BufferMemory::Normal(a) => a,
255             BufferMemory::Sparse => todo!("`Subbuffer::read` doesn't support sparse binding yet"),
256         };
257 
258         let range = if let Some(atom_size) = allocation.atom_size() {
259             // This works because the suballocators align allocations to the non-coherent atom size
260             // when the memory is host-visible but not host-coherent.
261             let start = align_down(self.offset, atom_size);
262             let end = cmp::min(
263                 align_up(self.offset + self.size, atom_size),
264                 allocation.size(),
265             );
266 
267             Range { start, end }
268         } else {
269             self.range()
270         };
271 
272         let mut state = self.buffer().state();
273         state.check_cpu_read(range.clone())?;
274         unsafe { state.cpu_read_lock(range.clone()) };
275 
276         if allocation.atom_size().is_some() {
277             // If there are other read locks being held at this point, they also called
278             // `invalidate_range` when locking. The GPU can't write data while the CPU holds a read
279             // lock, so there will be no new data and this call will do nothing.
280             // TODO: probably still more efficient to call it only if we're the first to acquire a
281             // read lock, but the number of CPU locks isn't currently tracked anywhere.
282             unsafe { allocation.invalidate_range(range.clone()) }?;
283         }
284 
285         let mapped_ptr = self.mapped_ptr().ok_or(BufferError::MemoryNotHostVisible)?;
286         // SAFETY: `Subbuffer` guarantees that its contents are laid out correctly for `T`.
287         let data = unsafe { &*T::from_ffi(mapped_ptr.as_ptr(), self.size as usize) };
288 
289         Ok(BufferReadGuard {
290             subbuffer: self,
291             data,
292             range,
293         })
294     }
295 
296     /// Locks the subbuffer in order to write its content from the host.
297     ///
298     /// If the subbuffer is currently in use by the device, this function will return an error.
299     /// Similarly if you called [`read`] on the subbuffer and haven't dropped the lock, this
300     /// function will return an error as well.
301     ///
302     /// After this function successfully locks the buffer, any attempt to submit a command buffer
303     /// that uses it and any attempt to call `read` will return an error.
304     ///
305     /// If the memory backing the buffer is not [host-coherent], then this function will lock a
306     /// range that is potentially larger than the subbuffer, because the range given to
307     /// [`flush_range`] must be aligned to the [`non_coherent_atom_size`]. This means that for
308     /// example if your Vulkan implementation reports an atom size of 64, and you tried to put 2
309     /// subbuffers of size 32 in the same buffer, one at offset 0 and one at offset 32, while the
310     /// buffer is backed by non-coherent memory, then flushing one subbuffer would also flush the
311     /// other subbuffer. This can lead to data races and is therefore not allowed. What you should
312     /// do in that case is ensure that each subbuffer is aligned to the non-coherent atom size, so
313     /// in this case one would be at offset 0 and the other at offset 64. [`SubbufferAllocator`]
314     /// does this automatically.
315     ///
316     /// [host-coherent]: crate::memory::MemoryPropertyFlags::HOST_COHERENT
317     /// [`flush_range`]: crate::memory::allocator::MemoryAlloc::flush_range
318     /// [`non_coherent_atom_size`]: crate::device::Properties::non_coherent_atom_size
319     /// [`read`]: Self::read
320     /// [`SubbufferAllocator`]: super::allocator::SubbufferAllocator
write(&self) -> Result<BufferWriteGuard<'_, T>, BufferError>321     pub fn write(&self) -> Result<BufferWriteGuard<'_, T>, BufferError> {
322         let allocation = match self.buffer().memory() {
323             BufferMemory::Normal(a) => a,
324             BufferMemory::Sparse => todo!("`Subbuffer::write` doesn't support sparse binding yet"),
325         };
326 
327         let range = if let Some(atom_size) = allocation.atom_size() {
328             // This works because the suballocators align allocations to the non-coherent atom size
329             // when the memory is host-visible but not host-coherent.
330             let start = align_down(self.offset, atom_size);
331             let end = cmp::min(
332                 align_up(self.offset + self.size, atom_size),
333                 allocation.size(),
334             );
335 
336             Range { start, end }
337         } else {
338             self.range()
339         };
340 
341         let mut state = self.buffer().state();
342         state.check_cpu_write(range.clone())?;
343         unsafe { state.cpu_write_lock(range.clone()) };
344 
345         if allocation.atom_size().is_some() {
346             unsafe { allocation.invalidate_range(range.clone()) }?;
347         }
348 
349         let mapped_ptr = self.mapped_ptr().ok_or(BufferError::MemoryNotHostVisible)?;
350         // SAFETY: `Subbuffer` guarantees that its contents are laid out correctly for `T`.
351         let data = unsafe { &mut *T::from_ffi(mapped_ptr.as_ptr(), self.size as usize) };
352 
353         Ok(BufferWriteGuard {
354             subbuffer: self,
355             data,
356             range,
357         })
358     }
359 }
360 
361 impl<T> Subbuffer<T> {
362     /// Converts the subbuffer to a slice of one element.
into_slice(self) -> Subbuffer<[T]>363     pub fn into_slice(self) -> Subbuffer<[T]> {
364         unsafe { self.reinterpret_unchecked_inner() }
365     }
366 
367     /// Same as [`into_slice`], except it works with a reference to the subbuffer.
368     ///
369     /// [`into_slice`]: Self::into_slice
as_slice(&self) -> &Subbuffer<[T]>370     pub fn as_slice(&self) -> &Subbuffer<[T]> {
371         unsafe { self.reinterpret_ref_unchecked_inner() }
372     }
373 }
374 
375 impl<T> Subbuffer<T>
376 where
377     T: BufferContents,
378 {
379     /// Tries to cast a subbuffer of raw bytes to a `Subbuffer<T>`.
try_from_bytes(subbuffer: Subbuffer<[u8]>) -> Result<Self, PodCastError>380     pub fn try_from_bytes(subbuffer: Subbuffer<[u8]>) -> Result<Self, PodCastError> {
381         if subbuffer.size() != size_of::<T>() as DeviceSize {
382             Err(PodCastError::SizeMismatch)
383         } else if !is_aligned(subbuffer.memory_offset(), DeviceAlignment::of::<T>()) {
384             Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
385         } else {
386             Ok(unsafe { subbuffer.reinterpret_unchecked() })
387         }
388     }
389 
390     /// Tries to cast the subbuffer to a different type.
try_cast<U>(self) -> Result<Subbuffer<U>, PodCastError> where U: BufferContents,391     pub fn try_cast<U>(self) -> Result<Subbuffer<U>, PodCastError>
392     where
393         U: BufferContents,
394     {
395         if size_of::<U>() != size_of::<T>() {
396             Err(PodCastError::SizeMismatch)
397         } else if align_of::<U>() > align_of::<T>()
398             && !is_aligned(self.memory_offset(), DeviceAlignment::of::<U>())
399         {
400             Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
401         } else {
402             Ok(unsafe { self.reinterpret_unchecked() })
403         }
404     }
405 }
406 
407 impl<T> Subbuffer<[T]> {
408     /// Returns the number of elements in the slice.
len(&self) -> DeviceSize409     pub fn len(&self) -> DeviceSize {
410         debug_assert!(self.size % size_of::<T>() as DeviceSize == 0);
411 
412         self.size / size_of::<T>() as DeviceSize
413     }
414 
415     /// Reduces the subbuffer to just one element of the slice.
416     ///
417     /// # Panics
418     ///
419     /// - Panics if `index` is out of bounds.
index(self, index: DeviceSize) -> Subbuffer<T>420     pub fn index(self, index: DeviceSize) -> Subbuffer<T> {
421         assert!(index <= self.len());
422 
423         unsafe { self.index_unchecked(index) }
424     }
425 
426     #[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
index_unchecked(self, index: DeviceSize) -> Subbuffer<T>427     pub unsafe fn index_unchecked(self, index: DeviceSize) -> Subbuffer<T> {
428         Subbuffer {
429             offset: self.offset + index * size_of::<T>() as DeviceSize,
430             size: size_of::<T>() as DeviceSize,
431             parent: self.parent,
432             marker: PhantomData,
433         }
434     }
435 
436     /// Reduces the subbuffer to just a range of the slice.
437     ///
438     /// # Panics
439     ///
440     /// - Panics if `range` is out of bounds.
441     /// - Panics if `range` is empty.
slice(mut self, range: impl RangeBounds<DeviceSize>) -> Subbuffer<[T]>442     pub fn slice(mut self, range: impl RangeBounds<DeviceSize>) -> Subbuffer<[T]> {
443         let Range { start, end } = memory::range(range, ..self.len()).unwrap();
444 
445         self.offset += start * size_of::<T>() as DeviceSize;
446         self.size = (end - start) * size_of::<T>() as DeviceSize;
447         assert!(self.size != 0);
448 
449         self
450     }
451 
452     #[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
slice_unchecked(mut self, range: impl RangeBounds<DeviceSize>) -> Subbuffer<[T]>453     pub unsafe fn slice_unchecked(mut self, range: impl RangeBounds<DeviceSize>) -> Subbuffer<[T]> {
454         let Range { start, end } = memory::range(range, ..self.len()).unwrap_unchecked();
455 
456         self.offset += start * size_of::<T>() as DeviceSize;
457         self.size = (end - start) * size_of::<T>() as DeviceSize;
458         debug_assert!(self.size != 0);
459 
460         self
461     }
462 
463     /// Splits the subbuffer into two at an index.
464     ///
465     /// # Panics
466     ///
467     /// - Panics if `mid` is not greater than `0`.
468     /// - Panics if `mid` is not less than `self.len()`.
split_at(self, mid: DeviceSize) -> (Subbuffer<[T]>, Subbuffer<[T]>)469     pub fn split_at(self, mid: DeviceSize) -> (Subbuffer<[T]>, Subbuffer<[T]>) {
470         assert!(0 < mid && mid < self.len());
471 
472         unsafe { self.split_at_unchecked(mid) }
473     }
474 
475     #[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
split_at_unchecked(self, mid: DeviceSize) -> (Subbuffer<[T]>, Subbuffer<[T]>)476     pub unsafe fn split_at_unchecked(self, mid: DeviceSize) -> (Subbuffer<[T]>, Subbuffer<[T]>) {
477         (
478             self.clone().slice_unchecked(..mid),
479             self.slice_unchecked(mid..),
480         )
481     }
482 }
483 
484 impl Subbuffer<[u8]> {
485     /// Creates a new `Subbuffer<[u8]>` spanning the whole buffer.
486     #[inline]
new(buffer: Arc<Buffer>) -> Self487     pub fn new(buffer: Arc<Buffer>) -> Self {
488         Subbuffer {
489             offset: 0,
490             size: buffer.size(),
491             parent: SubbufferParent::Buffer(buffer),
492             marker: PhantomData,
493         }
494     }
495 
496     /// Casts the slice to a different element type while ensuring correct alignment for the type.
497     ///
498     /// The offset of the subbuffer is rounded up to the alignment of `T` and the size abjusted for
499     /// the padding, then the size is rounded down to the nearest multiple of `T`'s size.
500     ///
501     /// # Panics
502     ///
503     /// - Panics if the aligned offset would be out of bounds.
cast_aligned<T>(self) -> Subbuffer<[T]> where T: BufferContents,504     pub fn cast_aligned<T>(self) -> Subbuffer<[T]>
505     where
506         T: BufferContents,
507     {
508         let layout = DeviceLayout::from_layout(Layout::new::<T>()).unwrap();
509         let aligned = self.align_to(layout);
510 
511         unsafe { aligned.reinterpret_unchecked() }
512     }
513 
514     /// Aligns the subbuffer to the given `layout` by rounding the offset up to
515     /// `layout.alignment()` and adjusting the size for the padding, and then rounding the size
516     /// down to the nearest multiple of `layout.size()`.
517     ///
518     /// # Panics
519     ///
520     /// - Panics if the aligned offset would be out of bounds.
521     /// - Panics if `layout.alignment()` exceeds `64`.
522     #[inline]
align_to(mut self, layout: DeviceLayout) -> Subbuffer<[u8]>523     pub fn align_to(mut self, layout: DeviceLayout) -> Subbuffer<[u8]> {
524         assert!(layout.alignment().as_devicesize() <= 64);
525 
526         let offset = self.memory_offset();
527         let padding_front = align_up(offset, layout.alignment()) - offset;
528 
529         self.offset += padding_front;
530         self.size = self.size.checked_sub(padding_front).unwrap();
531         self.size -= self.size % layout.size();
532 
533         self
534     }
535 }
536 
537 impl<T> Subbuffer<[T]>
538 where
539     T: BufferContents,
540 {
541     /// Tries to cast the slice to a different element type.
try_cast_slice<U>(self) -> Result<Subbuffer<[U]>, PodCastError> where U: BufferContents,542     pub fn try_cast_slice<U>(self) -> Result<Subbuffer<[U]>, PodCastError>
543     where
544         U: BufferContents,
545     {
546         if size_of::<U>() != size_of::<T>() && self.size() % size_of::<U>() as DeviceSize != 0 {
547             Err(PodCastError::OutputSliceWouldHaveSlop)
548         } else if align_of::<U>() > align_of::<T>()
549             && !is_aligned(self.memory_offset(), DeviceAlignment::of::<U>())
550         {
551             Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
552         } else {
553             Ok(unsafe { self.reinterpret_unchecked() })
554         }
555     }
556 }
557 
558 impl From<Arc<Buffer>> for Subbuffer<[u8]> {
559     #[inline]
from(buffer: Arc<Buffer>) -> Self560     fn from(buffer: Arc<Buffer>) -> Self {
561         Self::new(buffer)
562     }
563 }
564 
565 impl<T: ?Sized> Clone for Subbuffer<T> {
clone(&self) -> Self566     fn clone(&self) -> Self {
567         Subbuffer {
568             parent: self.parent.clone(),
569             ..*self
570         }
571     }
572 }
573 
574 unsafe impl<T: ?Sized> DeviceOwned for Subbuffer<T> {
device(&self) -> &Arc<Device>575     fn device(&self) -> &Arc<Device> {
576         self.buffer().device()
577     }
578 }
579 
580 impl<T: ?Sized> PartialEq for Subbuffer<T> {
eq(&self, other: &Self) -> bool581     fn eq(&self, other: &Self) -> bool {
582         self.parent == other.parent && self.offset == other.offset && self.size == other.size
583     }
584 }
585 
586 impl<T: ?Sized> Eq for Subbuffer<T> {}
587 
588 impl<T: ?Sized> Hash for Subbuffer<T> {
hash<H: Hasher>(&self, state: &mut H)589     fn hash<H: Hasher>(&self, state: &mut H) {
590         self.parent.hash(state);
591         self.offset.hash(state);
592         self.size.hash(state);
593     }
594 }
595 
596 /// RAII structure used to release the CPU access of a subbuffer when dropped.
597 ///
598 /// This structure is created by the [`read`] method on [`Subbuffer`].
599 ///
600 /// [`read`]: Subbuffer::read
601 #[derive(Debug)]
602 pub struct BufferReadGuard<'a, T: ?Sized> {
603     subbuffer: &'a Subbuffer<T>,
604     data: &'a T,
605     range: Range<DeviceSize>,
606 }
607 
608 impl<T: ?Sized> Drop for BufferReadGuard<'_, T> {
drop(&mut self)609     fn drop(&mut self) {
610         let mut state = self.subbuffer.buffer().state();
611         unsafe { state.cpu_read_unlock(self.range.clone()) };
612     }
613 }
614 
615 impl<T: ?Sized> Deref for BufferReadGuard<'_, T> {
616     type Target = T;
617 
deref(&self) -> &Self::Target618     fn deref(&self) -> &Self::Target {
619         self.data
620     }
621 }
622 
623 /// RAII structure used to release the CPU write access of a subbuffer when dropped.
624 ///
625 /// This structure is created by the [`write`] method on [`Subbuffer`].
626 ///
627 /// [`write`]: Subbuffer::write
628 #[derive(Debug)]
629 pub struct BufferWriteGuard<'a, T: ?Sized> {
630     subbuffer: &'a Subbuffer<T>,
631     data: &'a mut T,
632     range: Range<DeviceSize>,
633 }
634 
635 impl<T: ?Sized> Drop for BufferWriteGuard<'_, T> {
drop(&mut self)636     fn drop(&mut self) {
637         let allocation = match self.subbuffer.buffer().memory() {
638             BufferMemory::Normal(a) => a,
639             BufferMemory::Sparse => unreachable!(),
640         };
641 
642         if allocation.atom_size().is_some() && !thread::panicking() {
643             unsafe { allocation.flush_range(self.range.clone()).unwrap() };
644         }
645 
646         let mut state = self.subbuffer.buffer().state();
647         unsafe { state.cpu_write_unlock(self.range.clone()) };
648     }
649 }
650 
651 impl<T: ?Sized> Deref for BufferWriteGuard<'_, T> {
652     type Target = T;
653 
deref(&self) -> &Self::Target654     fn deref(&self) -> &Self::Target {
655         self.data
656     }
657 }
658 
659 impl<T: ?Sized> DerefMut for BufferWriteGuard<'_, T> {
deref_mut(&mut self) -> &mut Self::Target660     fn deref_mut(&mut self) -> &mut Self::Target {
661         self.data
662     }
663 }
664 
665 /// Error when attempting to CPU-read a buffer.
666 #[derive(Clone, Debug, PartialEq, Eq)]
667 pub enum ReadLockError {
668     /// The buffer is already locked for write mode by the CPU.
669     CpuWriteLocked,
670     /// The buffer is already locked for write mode by the GPU.
671     GpuWriteLocked,
672 }
673 
674 impl Error for ReadLockError {}
675 
676 impl Display for ReadLockError {
fmt(&self, f: &mut Formatter<'_>) -> Result<(), FmtError>677     fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), FmtError> {
678         write!(
679             f,
680             "{}",
681             match self {
682                 ReadLockError::CpuWriteLocked => {
683                     "the buffer is already locked for write mode by the CPU"
684                 }
685                 ReadLockError::GpuWriteLocked => {
686                     "the buffer is already locked for write mode by the GPU"
687                 }
688             }
689         )
690     }
691 }
692 
693 /// Error when attempting to CPU-write a buffer.
694 #[derive(Clone, Debug, PartialEq, Eq)]
695 pub enum WriteLockError {
696     /// The buffer is already locked by the CPU.
697     CpuLocked,
698     /// The buffer is already locked by the GPU.
699     GpuLocked,
700 }
701 
702 impl Error for WriteLockError {}
703 
704 impl Display for WriteLockError {
fmt(&self, f: &mut Formatter<'_>) -> Result<(), FmtError>705     fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), FmtError> {
706         write!(
707             f,
708             "{}",
709             match self {
710                 WriteLockError::CpuLocked => "the buffer is already locked by the CPU",
711                 WriteLockError::GpuLocked => "the buffer is already locked by the GPU",
712             }
713         )
714     }
715 }
716 
717 /// Trait for types of data that can be put in a buffer.
718 ///
719 /// This trait is not intended to be implemented manually (ever) and attempting so will make you
720 /// one sad individual very quickly. Rather you should use [the derive macro]. Note also that there
721 /// are blanket implementations of this trait: you don't need to implement it if the type in
722 /// question already implements bytemuck's [`AnyBitPattern`]. Most if not all linear algebra crates
723 /// have a feature flag that you can enable for bytemuck support. The trait is also already
724 /// implemented for all slices where the element type implements `BufferContents`.
725 ///
726 /// # Examples
727 ///
728 /// Deriving the trait for sized types:
729 ///
730 /// ```
731 /// # use vulkano::buffer::BufferContents;
732 /// #[derive(BufferContents)]
733 /// #[repr(C)]
734 /// struct MyData {
735 ///     x: f32,
736 ///     y: f32,
737 ///     array: [i32; 12],
738 /// }
739 /// ```
740 ///
741 /// Deriving the trait for unsized types works the same:
742 ///
743 /// ```
744 /// # use vulkano::buffer::BufferContents;
745 /// #[derive(BufferContents)]
746 /// #[repr(C)]
747 /// struct MyData {
748 ///     x: f32,
749 ///     y: f32,
750 ///     slice: [i32],
751 /// }
752 /// ```
753 ///
754 /// This even works if the last field is a user-defined DST too:
755 ///
756 /// ```
757 /// # use vulkano::buffer::BufferContents;
758 /// #[derive(BufferContents)]
759 /// #[repr(C)]
760 /// struct MyData {
761 ///     x: f32,
762 ///     y: f32,
763 ///     other: OtherData,
764 /// }
765 ///
766 /// #[derive(BufferContents)]
767 /// #[repr(C)]
768 /// struct OtherData {
769 ///     slice: [i32],
770 /// }
771 /// ```
772 ///
773 /// You can also use generics if you please:
774 ///
775 /// ```
776 /// # use vulkano::buffer::BufferContents;
777 /// #[derive(BufferContents)]
778 /// #[repr(C)]
779 /// struct MyData<T, U> {
780 ///     x: T,
781 ///     y: T,
782 ///     slice: [U],
783 /// }
784 /// ```
785 ///
786 /// This even works with dependently-sized types:
787 ///
788 /// ```
789 /// # use vulkano::buffer::BufferContents;
790 /// #[derive(BufferContents)]
791 /// #[repr(C)]
792 /// struct MyData<T>
793 /// where
794 ///     T: ?Sized,
795 /// {
796 ///     x: f32,
797 ///     y: f32,
798 ///     z: T,
799 /// }
800 /// ```
801 ///
802 /// [the derive macro]: vulkano_macros::BufferContents
803 //
804 // If you absolutely *must* implement this trait by hand, here are the safety requirements (but
805 // please open an issue on GitHub instead):
806 //
807 // - The type must be a struct and all fields must implement `BufferContents`.
808 // - `LAYOUT` must be the correct layout for the type, which also means the type must either be
809 //   sized or if it's unsized then its metadata must be the same as that of a slice. Implementing
810 //   `BufferContents` for any other kind of DST is instantaneous horrifically undefined behavior.
811 // - `from_ffi` must create a pointer with the same address as the `data` parameter that is passed
812 //   in. The pointer is expected to be aligned properly already.
813 // - `from_ffi` must create a pointer that is expected to be valid for reads (and potentially
814 //   writes) for exactly `range` bytes. The `data` and `range` are expected to be valid for the
815 //   `LAYOUT`.
816 pub unsafe trait BufferContents: Send + Sync + 'static {
817     /// The layout of the contents.
818     const LAYOUT: BufferContentsLayout;
819 
820     /// Creates a pointer to `Self` from a pointer to the start of the data and a range in bytes.
821     ///
822     /// # Safety
823     ///
824     /// - If `Self` is sized, then `range` must match the size exactly.
825     /// - If `Self` is unsized, then the `range` minus the size of the head (sized part) of the DST
826     ///   must be evenly divisible by the size of the element type.
827     #[doc(hidden)]
from_ffi(data: *mut c_void, range: usize) -> *mut Self828     unsafe fn from_ffi(data: *mut c_void, range: usize) -> *mut Self;
829 }
830 
831 unsafe impl<T> BufferContents for T
832 where
833     T: AnyBitPattern + Send + Sync,
834 {
835     const LAYOUT: BufferContentsLayout =
836         if let Some(layout) = BufferContentsLayout::from_sized(Layout::new::<T>()) {
837             layout
838         } else {
839             panic!("zero-sized types are not valid buffer contents");
840         };
841 
842     #[inline(always)]
from_ffi(data: *mut c_void, range: usize) -> *mut Self843     unsafe fn from_ffi(data: *mut c_void, range: usize) -> *mut Self {
844         debug_assert!(range == size_of::<T>());
845         debug_assert!(data as usize % align_of::<T>() == 0);
846 
847         data.cast()
848     }
849 }
850 
851 unsafe impl<T> BufferContents for [T]
852 where
853     T: BufferContents,
854 {
855     const LAYOUT: BufferContentsLayout = BufferContentsLayout(BufferContentsLayoutInner::Unsized {
856         head_layout: None,
857         element_layout: T::LAYOUT.unwrap_sized(),
858     });
859 
860     #[inline(always)]
from_ffi(data: *mut c_void, range: usize) -> *mut Self861     unsafe fn from_ffi(data: *mut c_void, range: usize) -> *mut Self {
862         debug_assert!(range % size_of::<T>() == 0);
863         debug_assert!(data as usize % align_of::<T>() == 0);
864         let len = range / size_of::<T>();
865 
866         ptr::slice_from_raw_parts_mut(data.cast(), len)
867     }
868 }
869 
870 /// Describes the layout required for a type so that it can be read from/written to a buffer. This
871 /// is used to allocate (sub)buffers generically.
872 ///
873 /// This is similar to [`DeviceLayout`] except that this exists for the sole purpose of describing
874 /// the layout of buffer contents specifically. Which means for example that the sizedness of the
875 /// type is captured, as well as the layout of the head and tail if the layout is for unsized data,
876 /// in order to be able to represent everything that Vulkan can stuff in a buffer.
877 ///
878 /// `BufferContentsLayout` also has an additional invariant compared to `DeviceLayout`: the
879 /// alignment of the data must not exceed `64`. This is because that's the guaranteed alignment
880 /// that all `DeviceMemory` blocks must be aligned to at minimum, and hence any greater alignment
881 /// can't be guaranteed. Other than that, the invariant that sizes must be non-zero applies here as
882 /// well, for both sized data and the element type of unsized data.
883 #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
884 pub struct BufferContentsLayout(BufferContentsLayoutInner);
885 
886 #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
887 enum BufferContentsLayoutInner {
888     Sized(DeviceLayout),
889     Unsized {
890         head_layout: Option<DeviceLayout>,
891         element_layout: DeviceLayout,
892     },
893 }
894 
895 impl BufferContentsLayout {
896     /// Returns the size of the head (sized part). If the data has no sized part, then this will
897     /// return 0.
898     #[inline]
head_size(&self) -> DeviceSize899     pub const fn head_size(&self) -> DeviceSize {
900         match &self.0 {
901             BufferContentsLayoutInner::Sized(sized) => sized.size(),
902             BufferContentsLayoutInner::Unsized {
903                 head_layout: None, ..
904             } => 0,
905             BufferContentsLayoutInner::Unsized {
906                 head_layout: Some(head_layout),
907                 ..
908             } => head_layout.size(),
909         }
910     }
911 
912     /// Returns the size of the element type if the data is unsized, or returns [`None`].
913     /// Guaranteed to be non-zero.
914     #[inline]
element_size(&self) -> Option<DeviceSize>915     pub const fn element_size(&self) -> Option<DeviceSize> {
916         match &self.0 {
917             BufferContentsLayoutInner::Sized(_) => None,
918             BufferContentsLayoutInner::Unsized { element_layout, .. } => {
919                 Some(element_layout.size())
920             }
921         }
922     }
923 
924     /// Returns the alignment required for the data. Guaranteed to not exceed `64`.
925     #[inline]
alignment(&self) -> DeviceAlignment926     pub const fn alignment(&self) -> DeviceAlignment {
927         match &self.0 {
928             BufferContentsLayoutInner::Sized(sized) => sized.alignment(),
929             BufferContentsLayoutInner::Unsized {
930                 head_layout: None,
931                 element_layout,
932             } => element_layout.alignment(),
933             BufferContentsLayoutInner::Unsized {
934                 head_layout: Some(head_layout),
935                 ..
936             } => head_layout.alignment(),
937         }
938     }
939 
940     /// Returns the [`DeviceLayout`] for the data for the given `len`, or returns [`None`] on
941     /// arithmetic overflow or if the total size would exceed [`DeviceLayout::MAX_SIZE`].
942     #[inline]
layout_for_len(&self, len: NonZeroDeviceSize) -> Option<DeviceLayout>943     pub const fn layout_for_len(&self, len: NonZeroDeviceSize) -> Option<DeviceLayout> {
944         match &self.0 {
945             BufferContentsLayoutInner::Sized(sized) => Some(*sized),
946             BufferContentsLayoutInner::Unsized {
947                 head_layout,
948                 element_layout,
949             } => {
950                 let (tail_layout, _) = try_opt!(element_layout.repeat(len));
951 
952                 if let Some(head_layout) = head_layout {
953                     let (layout, _) = try_opt!(head_layout.extend(tail_layout));
954 
955                     Some(layout.pad_to_alignment())
956                 } else {
957                     Some(tail_layout)
958                 }
959             }
960         }
961     }
962 
963     /// Creates a new `BufferContentsLayout` from a sized layout. This is inteded for use by the
964     /// derive macro only.
965     #[doc(hidden)]
966     #[inline]
from_sized(sized: Layout) -> Option<Self>967     pub const fn from_sized(sized: Layout) -> Option<Self> {
968         assert!(
969             sized.align() <= 64,
970             "types with alignments above 64 are not valid buffer contents",
971         );
972 
973         if let Ok(sized) = DeviceLayout::from_layout(sized) {
974             Some(Self(BufferContentsLayoutInner::Sized(sized)))
975         } else {
976             None
977         }
978     }
979 
980     /// Creates a new `BufferContentsLayout` from a head and element layout. This is inteded for
981     /// use by the derive macro only.
982     #[doc(hidden)]
983     #[inline]
from_head_element_layout( head_layout: Layout, element_layout: Layout, ) -> Option<Self>984     pub const fn from_head_element_layout(
985         head_layout: Layout,
986         element_layout: Layout,
987     ) -> Option<Self> {
988         if head_layout.align() > 64 || element_layout.align() > 64 {
989             panic!("types with alignments above 64 are not valid buffer contents");
990         }
991 
992         // The head of a `BufferContentsLayout` can be zero-sized.
993         // TODO: Replace with `Result::ok` once its constness is stabilized.
994         let head_layout = if let Ok(head_layout) = DeviceLayout::from_layout(head_layout) {
995             Some(head_layout)
996         } else {
997             None
998         };
999 
1000         if let Ok(element_layout) = DeviceLayout::from_layout(element_layout) {
1001             Some(Self(BufferContentsLayoutInner::Unsized {
1002                 head_layout,
1003                 element_layout,
1004             }))
1005         } else {
1006             None
1007         }
1008     }
1009 
1010     /// Extends the given `previous` [`Layout`] by `self`. This is intended for use by the derive
1011     /// macro only.
1012     #[doc(hidden)]
1013     #[inline]
extend_from_layout(self, previous: &Layout) -> Option<Self>1014     pub const fn extend_from_layout(self, previous: &Layout) -> Option<Self> {
1015         assert!(
1016             previous.align() <= 64,
1017             "types with alignments above 64 are not valid buffer contents",
1018         );
1019 
1020         match self.0 {
1021             BufferContentsLayoutInner::Sized(sized) => {
1022                 let (sized, _) = try_opt!(sized.extend_from_layout(previous));
1023 
1024                 Some(Self(BufferContentsLayoutInner::Sized(sized)))
1025             }
1026             BufferContentsLayoutInner::Unsized {
1027                 head_layout: None,
1028                 element_layout,
1029             } => {
1030                 // The head of a `BufferContentsLayout` can be zero-sized.
1031                 // TODO: Replace with `Result::ok` once its constness is stabilized.
1032                 let head_layout = if let Ok(head_layout) = DeviceLayout::from_layout(*previous) {
1033                     Some(head_layout)
1034                 } else {
1035                     None
1036                 };
1037 
1038                 Some(Self(BufferContentsLayoutInner::Unsized {
1039                     head_layout,
1040                     element_layout,
1041                 }))
1042             }
1043             BufferContentsLayoutInner::Unsized {
1044                 head_layout: Some(head_layout),
1045                 element_layout,
1046             } => {
1047                 let (head_layout, _) = try_opt!(head_layout.extend_from_layout(previous));
1048 
1049                 Some(Self(BufferContentsLayoutInner::Unsized {
1050                     head_layout: Some(head_layout),
1051                     element_layout,
1052                 }))
1053             }
1054         }
1055     }
1056 
1057     /// Creates a new `BufferContentsLayout` by rounding up the size of the head to the nearest
1058     /// multiple of its alignment if the layout is sized, or by rounding up the size of the head to
1059     /// the nearest multiple of the alignment of the element type and aligning the head to the
1060     /// alignment of the element type if there is a sized part. Doesn't do anything if there is no
1061     /// sized part. Returns [`None`] if the new head size would exceed [`DeviceLayout::MAX_SIZE`].
1062     /// This is inteded for use by the derive macro only.
1063     #[doc(hidden)]
1064     #[inline]
pad_to_alignment(&self) -> Option<Self>1065     pub const fn pad_to_alignment(&self) -> Option<Self> {
1066         match &self.0 {
1067             BufferContentsLayoutInner::Sized(sized) => Some(Self(
1068                 BufferContentsLayoutInner::Sized(sized.pad_to_alignment()),
1069             )),
1070             BufferContentsLayoutInner::Unsized {
1071                 head_layout: None,
1072                 element_layout,
1073             } => Some(Self(BufferContentsLayoutInner::Unsized {
1074                 head_layout: None,
1075                 element_layout: *element_layout,
1076             })),
1077             BufferContentsLayoutInner::Unsized {
1078                 head_layout: Some(head_layout),
1079                 element_layout,
1080             } => {
1081                 // We must pad the head to the alignment of the element type, *not* the alignment
1082                 // of the head.
1083                 //
1084                 // Consider a head layout of `(u8, u8, u8)` and an element layout of `u32`. If we
1085                 // padded the head to its own alignment, like is the case for sized layouts, it
1086                 // wouldn't change the size. Yet there is padding between the head and the first
1087                 // element of the slice.
1088                 //
1089                 // The reverse is true: consider a head layout of `(u16, u8)` and an element layout
1090                 // of `u8`. If we padded the head to its own alignment, it would be too large.
1091                 let padded_head_size =
1092                     head_layout.size() + head_layout.padding_needed_for(element_layout.alignment());
1093 
1094                 // SAFETY: `BufferContentsLayout`'s invariant guarantees that the alignment of the
1095                 // element type doesn't exceed 64, which together with the overflow invariant of
1096                 // `DeviceLayout` means that this can't overflow.
1097                 let padded_head_size =
1098                     unsafe { NonZeroDeviceSize::new_unchecked(padded_head_size) };
1099 
1100                 // We have to align the head to the alignment of the element type, so that the
1101                 // struct as a whole is aligned correctly when a different struct is extended with
1102                 // this one.
1103                 //
1104                 // Note that this is *not* the same as aligning the head to the alignment of the
1105                 // element type and then padding the layout to its alignment. Consider the same
1106                 // layout from above, with a head layout of `(u16, u8)` and an element layout of
1107                 // `u8`. If we aligned the head to the element type and then padded it to its own
1108                 // alignment, we would get the same wrong result as above. This instead ensures the
1109                 // head is padded to the element and aligned to it, without the alignment of the
1110                 // head interfering.
1111                 let alignment =
1112                     DeviceAlignment::max(head_layout.alignment(), element_layout.alignment());
1113 
1114                 if let Some(head_layout) = DeviceLayout::new(padded_head_size, alignment) {
1115                     Some(Self(BufferContentsLayoutInner::Unsized {
1116                         head_layout: Some(head_layout),
1117                         element_layout: *element_layout,
1118                     }))
1119                 } else {
1120                     None
1121                 }
1122             }
1123         }
1124     }
1125 
unwrap_sized(self) -> DeviceLayout1126     pub(super) const fn unwrap_sized(self) -> DeviceLayout {
1127         match self.0 {
1128             BufferContentsLayoutInner::Sized(sized) => sized,
1129             BufferContentsLayoutInner::Unsized { .. } => {
1130                 panic!("called `BufferContentsLayout::unwrap_sized` on an unsized layout");
1131             }
1132         }
1133     }
1134 }
1135 
1136 #[cfg(test)]
1137 mod tests {
1138     use super::*;
1139     use crate::{
1140         buffer::{
1141             sys::{BufferCreateInfo, RawBuffer},
1142             BufferUsage,
1143         },
1144         memory::{
1145             allocator::{
1146                 AllocationCreateInfo, AllocationType, DeviceLayout, MemoryAllocator, MemoryUsage,
1147                 StandardMemoryAllocator,
1148             },
1149             MemoryRequirements,
1150         },
1151     };
1152 
1153     #[test]
derive_buffer_contents()1154     fn derive_buffer_contents() {
1155         #[derive(BufferContents)]
1156         #[repr(C)]
1157         struct Test1(u32, u64, u8);
1158 
1159         assert_eq!(Test1::LAYOUT.head_size() as usize, size_of::<Test1>());
1160         assert_eq!(Test1::LAYOUT.element_size(), None);
1161         assert_eq!(
1162             Test1::LAYOUT.alignment().as_devicesize() as usize,
1163             align_of::<Test1>(),
1164         );
1165 
1166         #[derive(BufferContents)]
1167         #[repr(C)]
1168         struct Composite1(Test1, [f32; 10], Test1);
1169 
1170         assert_eq!(
1171             Composite1::LAYOUT.head_size() as usize,
1172             size_of::<Composite1>(),
1173         );
1174         assert_eq!(Composite1::LAYOUT.element_size(), None);
1175         assert_eq!(
1176             Composite1::LAYOUT.alignment().as_devicesize() as usize,
1177             align_of::<Composite1>(),
1178         );
1179 
1180         #[derive(BufferContents)]
1181         #[repr(C)]
1182         struct Test2(u64, u8, [u32]);
1183 
1184         assert_eq!(
1185             Test2::LAYOUT.head_size() as usize,
1186             size_of::<u64>() + size_of::<u32>(),
1187         );
1188         assert_eq!(
1189             Test2::LAYOUT.element_size().unwrap() as usize,
1190             size_of::<u32>(),
1191         );
1192         assert_eq!(
1193             Test2::LAYOUT.alignment().as_devicesize() as usize,
1194             align_of::<u64>(),
1195         );
1196 
1197         #[derive(BufferContents)]
1198         #[repr(C)]
1199         struct Composite2(Test1, [f32; 10], Test2);
1200 
1201         assert_eq!(
1202             Composite2::LAYOUT.head_size() as usize,
1203             size_of::<Test1>() + size_of::<[f32; 10]>() + size_of::<u64>() + size_of::<u32>(),
1204         );
1205         assert_eq!(
1206             Composite2::LAYOUT.element_size().unwrap() as usize,
1207             size_of::<u32>(),
1208         );
1209         assert_eq!(
1210             Composite2::LAYOUT.alignment().as_devicesize() as usize,
1211             align_of::<u64>(),
1212         );
1213     }
1214 
1215     #[test]
split_at()1216     fn split_at() {
1217         let (device, _) = gfx_dev_and_queue!();
1218         let allocator = StandardMemoryAllocator::new_default(device);
1219 
1220         let buffer = Buffer::new_slice::<u32>(
1221             &allocator,
1222             BufferCreateInfo {
1223                 usage: BufferUsage::TRANSFER_SRC,
1224                 ..Default::default()
1225             },
1226             AllocationCreateInfo {
1227                 usage: MemoryUsage::Upload,
1228                 ..Default::default()
1229             },
1230             6,
1231         )
1232         .unwrap();
1233 
1234         {
1235             let (left, right) = buffer.clone().split_at(2);
1236             assert!(left.len() == 2);
1237             assert!(right.len() == 4);
1238         }
1239 
1240         {
1241             let (left, right) = buffer.clone().split_at(5);
1242             assert!(left.len() == 5);
1243             assert!(right.len() == 1);
1244         }
1245 
1246         {
1247             assert_should_panic!({ buffer.clone().split_at(0) });
1248         }
1249 
1250         {
1251             assert_should_panic!({ buffer.split_at(6) });
1252         }
1253     }
1254 
1255     #[test]
cast_aligned()1256     fn cast_aligned() {
1257         let (device, _) = gfx_dev_and_queue!();
1258         let allocator = StandardMemoryAllocator::new_default(device.clone());
1259 
1260         let raw_buffer = RawBuffer::new(
1261             device,
1262             BufferCreateInfo {
1263                 size: 32,
1264                 usage: BufferUsage::TRANSFER_SRC,
1265                 ..Default::default()
1266             },
1267         )
1268         .unwrap();
1269 
1270         let requirements = MemoryRequirements {
1271             layout: DeviceLayout::from_size_alignment(32, 1).unwrap(),
1272             memory_type_bits: 1,
1273             prefers_dedicated_allocation: false,
1274             requires_dedicated_allocation: false,
1275         };
1276 
1277         // Allocate some junk in the same block as the buffer.
1278         let _junk = allocator
1279             .allocate(
1280                 MemoryRequirements {
1281                     layout: DeviceLayout::from_size_alignment(17, 1).unwrap(),
1282                     ..requirements
1283                 },
1284                 AllocationType::Linear,
1285                 AllocationCreateInfo::default(),
1286                 None,
1287             )
1288             .unwrap();
1289 
1290         let allocation = allocator
1291             .allocate(
1292                 requirements,
1293                 AllocationType::Linear,
1294                 AllocationCreateInfo::default(),
1295                 None,
1296             )
1297             .unwrap();
1298 
1299         let buffer = Buffer::from_raw(raw_buffer, BufferMemory::Normal(allocation));
1300         let buffer = Subbuffer::from(Arc::new(buffer));
1301 
1302         assert!(buffer.memory_offset() >= 17);
1303 
1304         {
1305             #[derive(Clone, Copy, bytemuck::Pod, bytemuck::Zeroable)]
1306             #[repr(C, align(16))]
1307             struct Test([u8; 16]);
1308 
1309             let aligned = buffer.clone().cast_aligned::<Test>();
1310             assert_eq!(aligned.memory_offset() % 16, 0);
1311             assert_eq!(aligned.size(), 16);
1312         }
1313 
1314         {
1315             let aligned = buffer.clone().cast_aligned::<[u8; 16]>();
1316             assert_eq!(aligned.size() % 16, 0);
1317         }
1318 
1319         {
1320             let layout = DeviceLayout::from_size_alignment(32, 16).unwrap();
1321             let aligned = buffer.clone().align_to(layout);
1322             assert!(is_aligned(aligned.memory_offset(), layout.alignment()));
1323             assert_eq!(aligned.size(), 0);
1324         }
1325 
1326         {
1327             let layout = DeviceLayout::from_size_alignment(1, 64).unwrap();
1328             assert_should_panic!({ buffer.align_to(layout) });
1329         }
1330     }
1331 }
1332