1 // Copyright (c) 2016 The vulkano developers
2 // Licensed under the Apache License, Version 2.0
3 // <LICENSE-APACHE or
4 // https://www.apache.org/licenses/LICENSE-2.0> or the MIT
5 // license <LICENSE-MIT or https://opensource.org/licenses/MIT>,
6 // at your option. All files in the project carrying such
7 // notice may not be copied, modified, or distributed except
8 // according to those terms.
9
10 //! A newtype wrapper for enforcing correct alignment for external types.
11
12 use crate::buffer::{BufferContents, BufferContentsLayout};
13 #[cfg(feature = "serde")]
14 use serde::{Deserialize, Deserializer, Serialize, Serializer};
15 use std::{
16 alloc::Layout,
17 cmp::Ordering,
18 ffi::c_void,
19 fmt::{Debug, Display, Formatter, Result as FmtResult},
20 hash::{Hash, Hasher},
21 mem::{align_of, size_of, MaybeUninit},
22 ops::{Deref, DerefMut},
23 };
24
25 /// A newtype wrapper around `T`, with `N` bytes of trailing padding.
26 ///
27 /// In Vulkan, the layout of buffer contents is not necessarily as one would expect from the type
28 /// signature in the shader code. For example, the *extended layout* or *std140 layout* in GLSL,
29 /// which is used for uniform buffers by default, requires that array elements are aligned to 16
30 /// bytes at minimum. That means that even if the array contains a scalar type like `u32` for
31 /// example, it must be aligned to 16 bytes. We can not enforce that with primitive Rust types
32 /// alone. In such cases, we can use `Padded` to enforce correct alignment on the Rust side.
33 ///
34 /// See also [the `shader` module documentation] for more information about layout in shaders.
35 ///
36 /// # Examples
37 ///
38 /// ## Aligning struct members
39 ///
40 /// Consider this GLSL code:
41 ///
42 /// ```glsl
43 /// layout(binding = 0) uniform MyData {
44 /// int x;
45 /// vec3 y;
46 /// vec4 z;
47 /// };
48 /// ```
49 ///
50 /// By default, the alignment rules require that `y` and `z` are placed at an offset that is an
51 /// integer multiple of 16. However, `x` is only 4 bytes, which means that there must be 12 bytes
52 /// of padding between `x` and `y`. Furthermore, `y` is only 12 bytes, which means that there must
53 /// be 4 bytes of padding between `y` and `z`.
54 ///
55 /// We can model this in Rust using `Padded`:
56 ///
57 /// ```
58 /// # use vulkano::{buffer::BufferContents, padded::Padded};
59 /// #[derive(BufferContents)]
60 /// #[repr(C)]
61 /// struct MyData {
62 /// x: Padded<i32, 12>,
63 /// y: Padded<[f32; 3], 4>,
64 /// z: [f32; 4],
65 /// }
66 ///
67 /// let data = MyData {
68 /// x: Padded(42),
69 /// y: Padded([1.0, 2.0, 3.0]),
70 /// z: [10.0; 4],
71 /// };
72 /// ```
73 ///
74 /// **But note that this layout is extremely suboptimal.** What you should do instead is reorder
75 /// your fields such that you don't need any padding:
76 ///
77 /// ```glsl
78 /// layout(binding = 0) uniform MyData {
79 /// vec3 y;
80 /// int x;
81 /// vec4 z;
82 /// };
83 /// ```
84 ///
85 /// ```
86 /// # use vulkano::buffer::BufferContents;
87 /// #[derive(BufferContents)]
88 /// #[repr(C)]
89 /// struct MyData {
90 /// y: [f32; 3],
91 /// x: i32,
92 /// z: [f32; 4],
93 /// }
94 /// ```
95 ///
96 /// This way, the fields are aligned naturally. But reordering fields is not always an option: the
97 /// notable case being when your structure only contains `vec3`s and `vec4`s, or `vec3`s and
98 /// `vec2`s, so that there are no scalar fields to fill the gaps with.
99 ///
100 /// ## Aligning array elements
101 ///
102 /// If you need an array of `vec3`s, then that necessitates that each array element has 4 bytes of
103 /// trailing padding. The same goes for a matrix with 3 rows, each column will have to have 4 bytes
104 /// of trailing padding (assuming its column-major).
105 ///
106 /// We can model those using `Padded` too:
107 ///
108 /// ```glsl
109 /// layout(binding = 0) uniform MyData {
110 /// vec3 x[10];
111 /// mat3 y;
112 /// };
113 /// ```
114 ///
115 /// ```
116 /// # use vulkano::{buffer::BufferContents, padded::Padded};
117 /// #[derive(BufferContents)]
118 /// #[repr(C)]
119 /// struct MyData {
120 /// x: [Padded<[f32; 3], 4>; 10],
121 /// y: [Padded<[f32; 3], 4>; 3],
122 /// }
123 /// ```
124 ///
125 /// Another example would be if you have an array of scalars or `vec2`s inside a uniform block:
126 ///
127 /// ```glsl
128 /// layout(binding = 0) uniform MyData {
129 /// int x[10];
130 /// vec2 y[10];
131 /// };
132 /// ```
133 ///
134 /// By default, arrays inside uniform blocks must have their elements aligned to 16 bytes at
135 /// minimum, which would look like this in Rust:
136 ///
137 /// ```
138 /// # use vulkano::{buffer::BufferContents, padded::Padded};
139 /// #[derive(BufferContents)]
140 /// #[repr(C)]
141 /// struct MyData {
142 /// x: [Padded<i32, 12>; 10],
143 /// y: [Padded<[f32; 2], 8>; 10],
144 /// }
145 /// ```
146 ///
147 /// **But note again, that this layout is suboptimal.** You can instead use a buffer block instead
148 /// of the uniform block, if memory usage could become an issue:
149 ///
150 /// ```glsl
151 /// layout(binding = 0) buffer MyData {
152 /// int x[10];
153 /// vec2 y[10];
154 /// };
155 /// ```
156 ///
157 /// ```
158 /// # use vulkano::buffer::BufferContents;
159 /// #[derive(BufferContents)]
160 /// #[repr(C)]
161 /// struct MyData {
162 /// x: [i32; 10],
163 /// y: [[f32; 2]; 10],
164 /// }
165 /// ```
166 ///
167 /// You may also want to consider using [the `uniform_buffer_standard_layout` feature].
168 ///
169 /// [the `shader` module documentation]: crate::shader
170 /// [the `uniform_buffer_standard_layout` feature]: crate::device::Features::uniform_buffer_standard_layout
171 #[repr(C)]
172 pub struct Padded<T, const N: usize> {
173 value: T,
174 _padding: [MaybeUninit<u8>; N],
175 }
176
177 #[allow(non_snake_case)]
178 #[doc(hidden)]
179 #[inline(always)]
Padded<T, const N: usize>(value: T) -> Padded<T, N>180 pub const fn Padded<T, const N: usize>(value: T) -> Padded<T, N> {
181 Padded {
182 value,
183 _padding: [MaybeUninit::uninit(); N],
184 }
185 }
186
187 impl<T, const N: usize> AsRef<T> for Padded<T, N> {
as_ref(&self) -> &T188 fn as_ref(&self) -> &T {
189 &self.value
190 }
191 }
192
193 impl<T, const N: usize> AsMut<T> for Padded<T, N> {
as_mut(&mut self) -> &mut T194 fn as_mut(&mut self) -> &mut T {
195 &mut self.value
196 }
197 }
198
199 impl<T, const N: usize> Clone for Padded<T, N>
200 where
201 T: Clone,
202 {
clone(&self) -> Self203 fn clone(&self) -> Self {
204 Padded(self.value.clone())
205 }
206 }
207
208 impl<T, const N: usize> Copy for Padded<T, N> where T: Copy {}
209
210 impl<T, const N: usize> Debug for Padded<T, N>
211 where
212 T: Debug,
213 {
fmt(&self, f: &mut Formatter<'_>) -> FmtResult214 fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
215 self.value.fmt(f)
216 }
217 }
218
219 impl<T, const N: usize> Default for Padded<T, N>
220 where
221 T: Default,
222 {
default() -> Self223 fn default() -> Self {
224 Padded(T::default())
225 }
226 }
227
228 impl<T, const N: usize> Deref for Padded<T, N> {
229 type Target = T;
230
deref(&self) -> &Self::Target231 fn deref(&self) -> &Self::Target {
232 &self.value
233 }
234 }
235
236 impl<T, const N: usize> DerefMut for Padded<T, N> {
deref_mut(&mut self) -> &mut Self::Target237 fn deref_mut(&mut self) -> &mut Self::Target {
238 &mut self.value
239 }
240 }
241
242 impl<T, const N: usize> Display for Padded<T, N>
243 where
244 T: Display,
245 {
fmt(&self, f: &mut Formatter<'_>) -> FmtResult246 fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
247 self.value.fmt(f)
248 }
249 }
250
251 impl<T, const N: usize> From<T> for Padded<T, N> {
from(value: T) -> Self252 fn from(value: T) -> Self {
253 Padded(value)
254 }
255 }
256
257 impl<T, const N: usize> PartialEq for Padded<T, N>
258 where
259 T: PartialEq,
260 {
eq(&self, other: &Self) -> bool261 fn eq(&self, other: &Self) -> bool {
262 self.value == other.value
263 }
264 }
265
266 impl<T, const N: usize> Eq for Padded<T, N> where T: Eq {}
267
268 impl<T, const N: usize> Hash for Padded<T, N>
269 where
270 T: Hash,
271 {
hash<H: Hasher>(&self, state: &mut H)272 fn hash<H: Hasher>(&self, state: &mut H) {
273 self.value.hash(state);
274 }
275 }
276
277 impl<T, const N: usize> PartialOrd for Padded<T, N>
278 where
279 T: PartialOrd,
280 {
partial_cmp(&self, other: &Self) -> Option<Ordering>281 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
282 self.value.partial_cmp(&other.value)
283 }
284 }
285
286 impl<T, const N: usize> Ord for Padded<T, N>
287 where
288 T: Ord,
289 {
cmp(&self, other: &Self) -> Ordering290 fn cmp(&self, other: &Self) -> Ordering {
291 self.value.cmp(&other.value)
292 }
293 }
294
295 unsafe impl<T, const N: usize> BufferContents for Padded<T, N>
296 where
297 T: BufferContents,
298 {
299 const LAYOUT: BufferContentsLayout =
300 if let Some(layout) = BufferContentsLayout::from_sized(Layout::new::<Self>()) {
301 layout
302 } else {
303 panic!("zero-sized types are not valid buffer contents");
304 };
305
from_ffi(data: *mut c_void, range: usize) -> *mut Self306 unsafe fn from_ffi(data: *mut c_void, range: usize) -> *mut Self {
307 debug_assert!(range == size_of::<Self>());
308 debug_assert!(data as usize % align_of::<Self>() == 0);
309
310 data.cast()
311 }
312 }
313
314 #[cfg(feature = "serde")]
315 impl<T, const N: usize> Serialize for Padded<T, N>
316 where
317 T: Serialize,
318 {
serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer,319 fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
320 where
321 S: Serializer,
322 {
323 self.value.serialize(serializer)
324 }
325 }
326
327 #[cfg(feature = "serde")]
328 impl<'de, T, const N: usize> Deserialize<'de> for Padded<T, N>
329 where
330 T: Deserialize<'de>,
331 {
deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>,332 fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
333 where
334 D: Deserializer<'de>,
335 {
336 T::deserialize(deserializer).map(Padded)
337 }
338 }
339