1 // Copyright (c) 2022 The Vulkano developers 2 // Licensed under the Apache License, Version 2.0 3 // <LICENSE-APACHE or 4 // https://www.apache.org/licenses/LICENSE-2.0> or the MIT 5 // license <LICENSE-MIT or https://opensource.org/licenses/MIT>, 6 // at your option. All files in the project carrying such 7 // notice may not be copied, modified, or distributed except 8 // according to those terms. 9 10 use super::{Device, DeviceOwned}; 11 use crate::{ 12 buffer::BufferState, 13 command_buffer::{ 14 CommandBufferResourcesUsage, CommandBufferState, CommandBufferUsage, SemaphoreSubmitInfo, 15 SubmitInfo, 16 }, 17 image::{sys::ImageState, ImageAccess}, 18 instance::debug::DebugUtilsLabel, 19 macros::vulkan_bitflags, 20 memory::{ 21 BindSparseInfo, SparseBufferMemoryBind, SparseImageMemoryBind, SparseImageOpaqueMemoryBind, 22 }, 23 swapchain::{PresentInfo, SwapchainPresentInfo}, 24 sync::{ 25 fence::{Fence, FenceState}, 26 future::{AccessCheckError, FlushError, GpuFuture}, 27 semaphore::SemaphoreState, 28 }, 29 OomError, RequirementNotMet, RequiresOneOf, Version, VulkanError, VulkanObject, 30 }; 31 use ahash::HashMap; 32 use parking_lot::{Mutex, MutexGuard}; 33 use smallvec::{smallvec, SmallVec}; 34 use std::{ 35 collections::VecDeque, 36 error::Error, 37 ffi::CString, 38 fmt::{Display, Error as FmtError, Formatter}, 39 hash::{Hash, Hasher}, 40 mem::take, 41 ptr, 42 sync::{atomic::Ordering, Arc}, 43 }; 44 45 /// Represents a queue where commands can be submitted. 46 // TODO: should use internal synchronization? 47 #[derive(Debug)] 48 pub struct Queue { 49 handle: ash::vk::Queue, 50 device: Arc<Device>, 51 queue_family_index: u32, 52 id: u32, // id within family 53 54 state: Mutex<QueueState>, 55 } 56 57 impl Queue { 58 // TODO: Make public 59 #[inline] from_handle( device: Arc<Device>, handle: ash::vk::Queue, queue_family_index: u32, id: u32, ) -> Arc<Self>60 pub(super) fn from_handle( 61 device: Arc<Device>, 62 handle: ash::vk::Queue, 63 queue_family_index: u32, 64 id: u32, 65 ) -> Arc<Self> { 66 Arc::new(Queue { 67 handle, 68 device, 69 queue_family_index, 70 id, 71 state: Mutex::new(Default::default()), 72 }) 73 } 74 75 /// Returns the device that this queue belongs to. 76 #[inline] device(&self) -> &Arc<Device>77 pub fn device(&self) -> &Arc<Device> { 78 &self.device 79 } 80 81 /// Returns the index of the queue family that this queue belongs to. 82 #[inline] queue_family_index(&self) -> u3283 pub fn queue_family_index(&self) -> u32 { 84 self.queue_family_index 85 } 86 87 /// Returns the index of this queue within its queue family. 88 #[inline] id_within_family(&self) -> u3289 pub fn id_within_family(&self) -> u32 { 90 self.id 91 } 92 93 /// Locks the queue and then calls the provided closure, providing it with an object that 94 /// can be used to perform operations on the queue, such as command buffer submissions. 95 #[inline] with<'a, R>(self: &'a Arc<Self>, func: impl FnOnce(QueueGuard<'a>) -> R) -> R96 pub fn with<'a, R>(self: &'a Arc<Self>, func: impl FnOnce(QueueGuard<'a>) -> R) -> R { 97 func(QueueGuard { 98 queue: self, 99 state: self.state.lock(), 100 }) 101 } 102 } 103 104 impl Drop for Queue { 105 #[inline] drop(&mut self)106 fn drop(&mut self) { 107 let state = self.state.get_mut(); 108 let _ = state.wait_idle(&self.device, self.handle); 109 } 110 } 111 112 unsafe impl VulkanObject for Queue { 113 type Handle = ash::vk::Queue; 114 115 #[inline] handle(&self) -> Self::Handle116 fn handle(&self) -> Self::Handle { 117 self.handle 118 } 119 } 120 121 unsafe impl DeviceOwned for Queue { 122 #[inline] device(&self) -> &Arc<Device>123 fn device(&self) -> &Arc<Device> { 124 &self.device 125 } 126 } 127 128 impl PartialEq for Queue { 129 #[inline] eq(&self, other: &Self) -> bool130 fn eq(&self, other: &Self) -> bool { 131 self.id == other.id 132 && self.queue_family_index == other.queue_family_index 133 && self.device == other.device 134 } 135 } 136 137 impl Eq for Queue {} 138 139 impl Hash for Queue { hash<H: Hasher>(&self, state: &mut H)140 fn hash<H: Hasher>(&self, state: &mut H) { 141 self.id.hash(state); 142 self.queue_family_index.hash(state); 143 self.device.hash(state); 144 } 145 } 146 147 pub struct QueueGuard<'a> { 148 queue: &'a Arc<Queue>, 149 state: MutexGuard<'a, QueueState>, 150 } 151 152 impl<'a> QueueGuard<'a> { fence_signaled(&mut self, fence: &Fence)153 pub(crate) unsafe fn fence_signaled(&mut self, fence: &Fence) { 154 self.state.fence_signaled(fence) 155 } 156 157 /// Waits until all work on this queue has finished, then releases ownership of all resources 158 /// that were in use by the queue. 159 /// 160 /// This is equivalent to submitting a fence to the queue, waiting on it, and then calling 161 /// `cleanup_finished`. 162 /// 163 /// Just like [`Device::wait_idle`], you shouldn't have to call this function in a typical 164 /// program. 165 #[inline] wait_idle(&mut self) -> Result<(), OomError>166 pub fn wait_idle(&mut self) -> Result<(), OomError> { 167 self.state.wait_idle(&self.queue.device, self.queue.handle) 168 } 169 170 #[cfg_attr(not(feature = "document_unchecked"), doc(hidden))] bind_sparse_unchecked( &mut self, bind_infos: impl IntoIterator<Item = BindSparseInfo>, fence: Option<Arc<Fence>>, ) -> Result<(), VulkanError>171 pub(crate) unsafe fn bind_sparse_unchecked( 172 &mut self, 173 bind_infos: impl IntoIterator<Item = BindSparseInfo>, 174 fence: Option<Arc<Fence>>, 175 ) -> Result<(), VulkanError> { 176 let bind_infos: SmallVec<[_; 4]> = bind_infos.into_iter().collect(); 177 let mut states = States::from_bind_infos(&bind_infos); 178 179 self.bind_sparse_unchecked_locked( 180 &bind_infos, 181 fence.as_ref().map(|fence| { 182 let state = fence.state(); 183 (fence, state) 184 }), 185 &mut states, 186 ) 187 } 188 bind_sparse_unchecked_locked( &mut self, bind_infos: &SmallVec<[BindSparseInfo; 4]>, fence: Option<(&Arc<Fence>, MutexGuard<'_, FenceState>)>, states: &mut States<'_>, ) -> Result<(), VulkanError>189 unsafe fn bind_sparse_unchecked_locked( 190 &mut self, 191 bind_infos: &SmallVec<[BindSparseInfo; 4]>, 192 fence: Option<(&Arc<Fence>, MutexGuard<'_, FenceState>)>, 193 states: &mut States<'_>, 194 ) -> Result<(), VulkanError> { 195 struct PerBindSparseInfo { 196 wait_semaphores_vk: SmallVec<[ash::vk::Semaphore; 4]>, 197 buffer_bind_infos_vk: SmallVec<[ash::vk::SparseBufferMemoryBindInfo; 4]>, 198 buffer_binds_vk: SmallVec<[SmallVec<[ash::vk::SparseMemoryBind; 4]>; 4]>, 199 image_opaque_bind_infos_vk: SmallVec<[ash::vk::SparseImageOpaqueMemoryBindInfo; 4]>, 200 image_opaque_binds_vk: SmallVec<[SmallVec<[ash::vk::SparseMemoryBind; 4]>; 4]>, 201 image_bind_infos_vk: SmallVec<[ash::vk::SparseImageMemoryBindInfo; 4]>, 202 image_binds_vk: SmallVec<[SmallVec<[ash::vk::SparseImageMemoryBind; 4]>; 4]>, 203 signal_semaphores_vk: SmallVec<[ash::vk::Semaphore; 4]>, 204 } 205 206 let (mut bind_infos_vk, mut per_bind_vk): (SmallVec<[_; 4]>, SmallVec<[_; 4]>) = bind_infos 207 .iter() 208 .map(|bind_info| { 209 let &BindSparseInfo { 210 ref wait_semaphores, 211 ref buffer_binds, 212 ref image_opaque_binds, 213 ref image_binds, 214 ref signal_semaphores, 215 _ne: _, 216 } = bind_info; 217 218 let wait_semaphores_vk: SmallVec<[_; 4]> = wait_semaphores 219 .iter() 220 .map(|semaphore| semaphore.handle()) 221 .collect(); 222 223 let (buffer_bind_infos_vk, buffer_binds_vk): (SmallVec<[_; 4]>, SmallVec<[_; 4]>) = 224 buffer_binds 225 .iter() 226 .map(|(buffer, memory_binds)| { 227 ( 228 ash::vk::SparseBufferMemoryBindInfo { 229 buffer: buffer.buffer().handle(), 230 bind_count: 0, 231 p_binds: ptr::null(), 232 }, 233 memory_binds 234 .iter() 235 .map(|memory_bind| { 236 let &SparseBufferMemoryBind { 237 offset, 238 size, 239 ref memory, 240 } = memory_bind; 241 242 let (memory, memory_offset) = memory.as_ref().map_or_else( 243 Default::default, 244 |(memory, memory_offset)| { 245 (memory.handle(), *memory_offset) 246 }, 247 ); 248 249 ash::vk::SparseMemoryBind { 250 resource_offset: offset, 251 size, 252 memory, 253 memory_offset, 254 flags: ash::vk::SparseMemoryBindFlags::empty(), 255 } 256 }) 257 .collect::<SmallVec<[_; 4]>>(), 258 ) 259 }) 260 .unzip(); 261 262 let (image_opaque_bind_infos_vk, image_opaque_binds_vk): ( 263 SmallVec<[_; 4]>, 264 SmallVec<[_; 4]>, 265 ) = image_opaque_binds 266 .iter() 267 .map(|(image, memory_binds)| { 268 ( 269 ash::vk::SparseImageOpaqueMemoryBindInfo { 270 image: image.inner().image.handle(), 271 bind_count: 0, 272 p_binds: ptr::null(), 273 }, 274 memory_binds 275 .iter() 276 .map(|memory_bind| { 277 let &SparseImageOpaqueMemoryBind { 278 offset, 279 size, 280 ref memory, 281 metadata, 282 } = memory_bind; 283 284 let (memory, memory_offset) = memory.as_ref().map_or_else( 285 Default::default, 286 |(memory, memory_offset)| (memory.handle(), *memory_offset), 287 ); 288 289 ash::vk::SparseMemoryBind { 290 resource_offset: offset, 291 size, 292 memory, 293 memory_offset, 294 flags: if metadata { 295 ash::vk::SparseMemoryBindFlags::METADATA 296 } else { 297 ash::vk::SparseMemoryBindFlags::empty() 298 }, 299 } 300 }) 301 .collect::<SmallVec<[_; 4]>>(), 302 ) 303 }) 304 .unzip(); 305 306 let (image_bind_infos_vk, image_binds_vk): (SmallVec<[_; 4]>, SmallVec<[_; 4]>) = 307 image_binds 308 .iter() 309 .map(|(image, memory_binds)| { 310 ( 311 ash::vk::SparseImageMemoryBindInfo { 312 image: image.inner().image.handle(), 313 bind_count: 0, 314 p_binds: ptr::null(), 315 }, 316 memory_binds 317 .iter() 318 .map(|memory_bind| { 319 let &SparseImageMemoryBind { 320 aspects, 321 mip_level, 322 array_layer, 323 offset, 324 extent, 325 ref memory, 326 } = memory_bind; 327 328 let (memory, memory_offset) = memory.as_ref().map_or_else( 329 Default::default, 330 |(memory, memory_offset)| { 331 (memory.handle(), *memory_offset) 332 }, 333 ); 334 335 ash::vk::SparseImageMemoryBind { 336 subresource: ash::vk::ImageSubresource { 337 aspect_mask: aspects.into(), 338 mip_level, 339 array_layer, 340 }, 341 offset: ash::vk::Offset3D { 342 x: offset[0] as i32, 343 y: offset[1] as i32, 344 z: offset[2] as i32, 345 }, 346 extent: ash::vk::Extent3D { 347 width: extent[0], 348 height: extent[1], 349 depth: extent[2], 350 }, 351 memory, 352 memory_offset, 353 flags: ash::vk::SparseMemoryBindFlags::empty(), 354 } 355 }) 356 .collect::<SmallVec<[_; 4]>>(), 357 ) 358 }) 359 .unzip(); 360 361 let signal_semaphores_vk: SmallVec<[_; 4]> = signal_semaphores 362 .iter() 363 .map(|semaphore| semaphore.handle()) 364 .collect(); 365 366 ( 367 ash::vk::BindSparseInfo::default(), 368 PerBindSparseInfo { 369 wait_semaphores_vk, 370 buffer_bind_infos_vk, 371 buffer_binds_vk, 372 image_opaque_bind_infos_vk, 373 image_opaque_binds_vk, 374 image_bind_infos_vk, 375 image_binds_vk, 376 signal_semaphores_vk, 377 }, 378 ) 379 }) 380 .unzip(); 381 382 for ( 383 bind_info_vk, 384 PerBindSparseInfo { 385 wait_semaphores_vk, 386 buffer_bind_infos_vk, 387 buffer_binds_vk, 388 image_opaque_bind_infos_vk, 389 image_opaque_binds_vk, 390 image_bind_infos_vk, 391 image_binds_vk, 392 signal_semaphores_vk, 393 }, 394 ) in (bind_infos_vk.iter_mut()).zip(per_bind_vk.iter_mut()) 395 { 396 for (buffer_bind_infos_vk, buffer_binds_vk) in 397 (buffer_bind_infos_vk.iter_mut()).zip(buffer_binds_vk.iter()) 398 { 399 *buffer_bind_infos_vk = ash::vk::SparseBufferMemoryBindInfo { 400 bind_count: buffer_binds_vk.len() as u32, 401 p_binds: buffer_binds_vk.as_ptr(), 402 ..*buffer_bind_infos_vk 403 }; 404 } 405 406 for (image_opaque_bind_infos_vk, image_opaque_binds_vk) in 407 (image_opaque_bind_infos_vk.iter_mut()).zip(image_opaque_binds_vk.iter()) 408 { 409 *image_opaque_bind_infos_vk = ash::vk::SparseImageOpaqueMemoryBindInfo { 410 bind_count: image_opaque_binds_vk.len() as u32, 411 p_binds: image_opaque_binds_vk.as_ptr(), 412 ..*image_opaque_bind_infos_vk 413 }; 414 } 415 416 for (image_bind_infos_vk, image_binds_vk) in 417 (image_bind_infos_vk.iter_mut()).zip(image_binds_vk.iter()) 418 { 419 *image_bind_infos_vk = ash::vk::SparseImageMemoryBindInfo { 420 bind_count: image_binds_vk.len() as u32, 421 p_binds: image_binds_vk.as_ptr(), 422 ..*image_bind_infos_vk 423 }; 424 } 425 426 *bind_info_vk = ash::vk::BindSparseInfo { 427 wait_semaphore_count: wait_semaphores_vk.len() as u32, 428 p_wait_semaphores: wait_semaphores_vk.as_ptr(), 429 buffer_bind_count: buffer_bind_infos_vk.len() as u32, 430 p_buffer_binds: buffer_bind_infos_vk.as_ptr(), 431 image_opaque_bind_count: image_opaque_bind_infos_vk.len() as u32, 432 p_image_opaque_binds: image_opaque_bind_infos_vk.as_ptr(), 433 image_bind_count: image_bind_infos_vk.len() as u32, 434 p_image_binds: image_bind_infos_vk.as_ptr(), 435 signal_semaphore_count: signal_semaphores_vk.len() as u32, 436 p_signal_semaphores: signal_semaphores_vk.as_ptr(), 437 ..*bind_info_vk 438 } 439 } 440 441 let fns = self.queue.device.fns(); 442 (fns.v1_0.queue_bind_sparse)( 443 self.queue.handle, 444 bind_infos_vk.len() as u32, 445 bind_infos_vk.as_ptr(), 446 fence 447 .as_ref() 448 .map_or_else(Default::default, |(fence, _)| fence.handle()), 449 ) 450 .result() 451 .map_err(VulkanError::from)?; 452 453 for bind_info in bind_infos { 454 let BindSparseInfo { 455 wait_semaphores, 456 buffer_binds: _, 457 image_opaque_binds: _, 458 image_binds: _, 459 signal_semaphores, 460 _ne: _, 461 } = bind_info; 462 463 for semaphore in wait_semaphores { 464 let state = states.semaphores.get_mut(&semaphore.handle()).unwrap(); 465 state.add_queue_wait(self.queue); 466 } 467 468 for semaphore in signal_semaphores { 469 let state = states.semaphores.get_mut(&semaphore.handle()).unwrap(); 470 state.add_queue_wait(self.queue); 471 } 472 } 473 474 let fence = fence.map(|(fence, mut state)| { 475 state.add_queue_signal(self.queue); 476 fence.clone() 477 }); 478 479 self.state 480 .operations 481 .push_back((bind_infos.clone().into(), fence)); 482 483 Ok(()) 484 } 485 486 #[cfg_attr(not(feature = "document_unchecked"), doc(hidden))] 487 #[inline] present_unchecked( &mut self, present_info: PresentInfo, ) -> Result<impl ExactSizeIterator<Item = Result<bool, VulkanError>>, VulkanError>488 pub unsafe fn present_unchecked( 489 &mut self, 490 present_info: PresentInfo, 491 ) -> Result<impl ExactSizeIterator<Item = Result<bool, VulkanError>>, VulkanError> { 492 let mut states = States::from_present_info(&present_info); 493 self.present_unchecked_locked(&present_info, &mut states) 494 } 495 present_unchecked_locked( &mut self, present_info: &PresentInfo, states: &mut States<'_>, ) -> Result<impl ExactSizeIterator<Item = Result<bool, VulkanError>>, VulkanError>496 unsafe fn present_unchecked_locked( 497 &mut self, 498 present_info: &PresentInfo, 499 states: &mut States<'_>, 500 ) -> Result<impl ExactSizeIterator<Item = Result<bool, VulkanError>>, VulkanError> { 501 let PresentInfo { 502 ref wait_semaphores, 503 ref swapchain_infos, 504 _ne: _, 505 } = present_info; 506 507 let wait_semaphores_vk: SmallVec<[_; 4]> = wait_semaphores 508 .iter() 509 .map(|semaphore| semaphore.handle()) 510 .collect(); 511 512 let mut swapchains_vk: SmallVec<[_; 4]> = SmallVec::with_capacity(swapchain_infos.len()); 513 let mut image_indices_vk: SmallVec<[_; 4]> = SmallVec::with_capacity(swapchain_infos.len()); 514 let mut present_ids_vk: SmallVec<[_; 4]> = SmallVec::with_capacity(swapchain_infos.len()); 515 let mut present_regions_vk: SmallVec<[_; 4]> = 516 SmallVec::with_capacity(swapchain_infos.len()); 517 let mut rectangles_vk: SmallVec<[_; 4]> = SmallVec::with_capacity(swapchain_infos.len()); 518 519 let mut has_present_ids = false; 520 let mut has_present_regions = false; 521 522 for swapchain_info in swapchain_infos { 523 let &SwapchainPresentInfo { 524 ref swapchain, 525 image_index, 526 present_id, 527 ref present_regions, 528 _ne: _, 529 } = swapchain_info; 530 531 swapchains_vk.push(swapchain.handle()); 532 image_indices_vk.push(image_index); 533 present_ids_vk.push(present_id.map_or(0, u64::from)); 534 present_regions_vk.push(ash::vk::PresentRegionKHR::default()); 535 rectangles_vk.push( 536 present_regions 537 .iter() 538 .map(ash::vk::RectLayerKHR::from) 539 .collect::<SmallVec<[_; 4]>>(), 540 ); 541 542 if present_id.is_some() { 543 has_present_ids = true; 544 } 545 546 if !present_regions.is_empty() { 547 has_present_regions = true; 548 } 549 } 550 551 let mut results = vec![ash::vk::Result::SUCCESS; swapchain_infos.len()]; 552 let mut info_vk = ash::vk::PresentInfoKHR { 553 wait_semaphore_count: wait_semaphores_vk.len() as u32, 554 p_wait_semaphores: wait_semaphores_vk.as_ptr(), 555 swapchain_count: swapchains_vk.len() as u32, 556 p_swapchains: swapchains_vk.as_ptr(), 557 p_image_indices: image_indices_vk.as_ptr(), 558 p_results: results.as_mut_ptr(), 559 ..Default::default() 560 }; 561 let mut present_id_info_vk = None; 562 let mut present_region_info_vk = None; 563 564 if has_present_ids { 565 let next = present_id_info_vk.insert(ash::vk::PresentIdKHR { 566 swapchain_count: present_ids_vk.len() as u32, 567 p_present_ids: present_ids_vk.as_ptr(), 568 ..Default::default() 569 }); 570 571 next.p_next = info_vk.p_next; 572 info_vk.p_next = next as *const _ as *const _; 573 } 574 575 if has_present_regions { 576 for (present_regions_vk, rectangles_vk) in 577 (present_regions_vk.iter_mut()).zip(rectangles_vk.iter()) 578 { 579 *present_regions_vk = ash::vk::PresentRegionKHR { 580 rectangle_count: rectangles_vk.len() as u32, 581 p_rectangles: rectangles_vk.as_ptr(), 582 }; 583 } 584 585 let next = present_region_info_vk.insert(ash::vk::PresentRegionsKHR { 586 swapchain_count: present_regions_vk.len() as u32, 587 p_regions: present_regions_vk.as_ptr(), 588 ..Default::default() 589 }); 590 591 next.p_next = info_vk.p_next; 592 info_vk.p_next = next as *const _ as *const _; 593 } 594 595 let fns = self.queue.device().fns(); 596 let result = (fns.khr_swapchain.queue_present_khr)(self.queue.handle, &info_vk); 597 598 // Per the documentation of `vkQueuePresentKHR`, certain results indicate that the whole 599 // operation has failed, while others only indicate failure of a particular present. 600 // If we got a result that is not one of these per-present ones, we return it directly. 601 // Otherwise, we consider the present to be enqueued. 602 if !matches!( 603 result, 604 ash::vk::Result::SUCCESS 605 | ash::vk::Result::SUBOPTIMAL_KHR 606 | ash::vk::Result::ERROR_OUT_OF_DATE_KHR 607 | ash::vk::Result::ERROR_SURFACE_LOST_KHR 608 | ash::vk::Result::ERROR_FULL_SCREEN_EXCLUSIVE_MODE_LOST_EXT, 609 ) { 610 return Err(VulkanError::from(result)); 611 } 612 613 for semaphore in wait_semaphores { 614 let state = states.semaphores.get_mut(&semaphore.handle()).unwrap(); 615 state.add_queue_wait(self.queue); 616 } 617 618 self.state 619 .operations 620 .push_back((present_info.clone().into(), None)); 621 622 // If a presentation results in a loss of full-screen exclusive mode, 623 // signal that to the relevant swapchain. 624 for (&result, swapchain_info) in results.iter().zip(&present_info.swapchain_infos) { 625 if result == ash::vk::Result::ERROR_FULL_SCREEN_EXCLUSIVE_MODE_LOST_EXT { 626 swapchain_info 627 .swapchain 628 .full_screen_exclusive_held() 629 .store(false, Ordering::SeqCst); 630 } 631 } 632 633 Ok(results.into_iter().map(|result| match result { 634 ash::vk::Result::SUCCESS => Ok(false), 635 ash::vk::Result::SUBOPTIMAL_KHR => Ok(true), 636 err => Err(VulkanError::from(err)), 637 })) 638 } 639 640 // Temporary function to keep futures working. submit_with_future( &mut self, submit_info: SubmitInfo, fence: Option<Arc<Fence>>, future: &dyn GpuFuture, queue: &Queue, ) -> Result<(), FlushError>641 pub(crate) unsafe fn submit_with_future( 642 &mut self, 643 submit_info: SubmitInfo, 644 fence: Option<Arc<Fence>>, 645 future: &dyn GpuFuture, 646 queue: &Queue, 647 ) -> Result<(), FlushError> { 648 let submit_infos: SmallVec<[_; 4]> = smallvec![submit_info]; 649 let mut states = States::from_submit_infos(&submit_infos); 650 651 for submit_info in &submit_infos { 652 for command_buffer in &submit_info.command_buffers { 653 let state = states 654 .command_buffers 655 .get(&command_buffer.handle()) 656 .unwrap(); 657 658 match command_buffer.usage() { 659 CommandBufferUsage::OneTimeSubmit => { 660 // VUID-vkQueueSubmit2-commandBuffer-03874 661 if state.has_been_submitted() { 662 return Err(FlushError::OneTimeSubmitAlreadySubmitted); 663 } 664 } 665 CommandBufferUsage::MultipleSubmit => { 666 // VUID-vkQueueSubmit2-commandBuffer-03875 667 if state.is_submit_pending() { 668 return Err(FlushError::ExclusiveAlreadyInUse); 669 } 670 } 671 CommandBufferUsage::SimultaneousUse => (), 672 } 673 674 let CommandBufferResourcesUsage { 675 buffers, 676 images, 677 buffer_indices: _, 678 image_indices: _, 679 } = command_buffer.resources_usage(); 680 681 for usage in buffers { 682 let state = states.buffers.get_mut(&usage.buffer.handle()).unwrap(); 683 684 for (range, range_usage) in usage.ranges.iter() { 685 match future.check_buffer_access( 686 &usage.buffer, 687 range.clone(), 688 range_usage.mutable, 689 queue, 690 ) { 691 Err(AccessCheckError::Denied(error)) => { 692 return Err(FlushError::ResourceAccessError { 693 error, 694 use_ref: range_usage.first_use, 695 }); 696 } 697 Err(AccessCheckError::Unknown) => { 698 let result = if range_usage.mutable { 699 state.check_gpu_write(range.clone()) 700 } else { 701 state.check_gpu_read(range.clone()) 702 }; 703 704 if let Err(error) = result { 705 return Err(FlushError::ResourceAccessError { 706 error, 707 use_ref: range_usage.first_use, 708 }); 709 } 710 } 711 _ => (), 712 } 713 } 714 } 715 716 for usage in images { 717 let state = states.images.get_mut(&usage.image.handle()).unwrap(); 718 719 for (range, range_usage) in usage.ranges.iter() { 720 match future.check_image_access( 721 &usage.image, 722 range.clone(), 723 range_usage.mutable, 724 range_usage.expected_layout, 725 queue, 726 ) { 727 Err(AccessCheckError::Denied(error)) => { 728 return Err(FlushError::ResourceAccessError { 729 error, 730 use_ref: range_usage.first_use, 731 }); 732 } 733 Err(AccessCheckError::Unknown) => { 734 let result = if range_usage.mutable { 735 state 736 .check_gpu_write(range.clone(), range_usage.expected_layout) 737 } else { 738 state.check_gpu_read(range.clone(), range_usage.expected_layout) 739 }; 740 741 if let Err(error) = result { 742 return Err(FlushError::ResourceAccessError { 743 error, 744 use_ref: range_usage.first_use, 745 }); 746 } 747 } 748 _ => (), 749 }; 750 } 751 } 752 } 753 } 754 755 Ok(self.submit_unchecked_locked( 756 &submit_infos, 757 fence.as_ref().map(|fence| { 758 let state = fence.state(); 759 (fence, state) 760 }), 761 &mut states, 762 )?) 763 } 764 765 #[cfg_attr(not(feature = "document_unchecked"), doc(hidden))] submit_unchecked( &mut self, submit_infos: impl IntoIterator<Item = SubmitInfo>, fence: Option<Arc<Fence>>, ) -> Result<(), VulkanError>766 pub unsafe fn submit_unchecked( 767 &mut self, 768 submit_infos: impl IntoIterator<Item = SubmitInfo>, 769 fence: Option<Arc<Fence>>, 770 ) -> Result<(), VulkanError> { 771 let submit_infos: SmallVec<[_; 4]> = submit_infos.into_iter().collect(); 772 let mut states = States::from_submit_infos(&submit_infos); 773 774 self.submit_unchecked_locked( 775 &submit_infos, 776 fence.as_ref().map(|fence| { 777 let state = fence.state(); 778 (fence, state) 779 }), 780 &mut states, 781 ) 782 } 783 submit_unchecked_locked( &mut self, submit_infos: &SmallVec<[SubmitInfo; 4]>, fence: Option<(&Arc<Fence>, MutexGuard<'_, FenceState>)>, states: &mut States<'_>, ) -> Result<(), VulkanError>784 unsafe fn submit_unchecked_locked( 785 &mut self, 786 submit_infos: &SmallVec<[SubmitInfo; 4]>, 787 fence: Option<(&Arc<Fence>, MutexGuard<'_, FenceState>)>, 788 states: &mut States<'_>, 789 ) -> Result<(), VulkanError> { 790 if self.queue.device.enabled_features().synchronization2 { 791 struct PerSubmitInfo { 792 wait_semaphore_infos_vk: SmallVec<[ash::vk::SemaphoreSubmitInfo; 4]>, 793 command_buffer_infos_vk: SmallVec<[ash::vk::CommandBufferSubmitInfo; 4]>, 794 signal_semaphore_infos_vk: SmallVec<[ash::vk::SemaphoreSubmitInfo; 4]>, 795 } 796 797 let (mut submit_info_vk, per_submit_vk): (SmallVec<[_; 4]>, SmallVec<[_; 4]>) = 798 submit_infos 799 .iter() 800 .map(|submit_info| { 801 let &SubmitInfo { 802 ref wait_semaphores, 803 ref command_buffers, 804 ref signal_semaphores, 805 _ne: _, 806 } = submit_info; 807 808 let wait_semaphore_infos_vk = wait_semaphores 809 .iter() 810 .map(|semaphore_submit_info| { 811 let &SemaphoreSubmitInfo { 812 ref semaphore, 813 stages, 814 _ne: _, 815 } = semaphore_submit_info; 816 817 ash::vk::SemaphoreSubmitInfo { 818 semaphore: semaphore.handle(), 819 value: 0, // TODO: 820 stage_mask: stages.into(), 821 device_index: 0, // TODO: 822 ..Default::default() 823 } 824 }) 825 .collect(); 826 827 let command_buffer_infos_vk = command_buffers 828 .iter() 829 .map(|cb| ash::vk::CommandBufferSubmitInfo { 830 command_buffer: cb.handle(), 831 device_mask: 0, // TODO: 832 ..Default::default() 833 }) 834 .collect(); 835 836 let signal_semaphore_infos_vk = signal_semaphores 837 .iter() 838 .map(|semaphore_submit_info| { 839 let &SemaphoreSubmitInfo { 840 ref semaphore, 841 stages, 842 _ne: _, 843 } = semaphore_submit_info; 844 845 ash::vk::SemaphoreSubmitInfo { 846 semaphore: semaphore.handle(), 847 value: 0, // TODO: 848 stage_mask: stages.into(), 849 device_index: 0, // TODO: 850 ..Default::default() 851 } 852 }) 853 .collect(); 854 855 ( 856 ash::vk::SubmitInfo2 { 857 flags: ash::vk::SubmitFlags::empty(), // TODO: 858 wait_semaphore_info_count: 0, 859 p_wait_semaphore_infos: ptr::null(), 860 command_buffer_info_count: 0, 861 p_command_buffer_infos: ptr::null(), 862 signal_semaphore_info_count: 0, 863 p_signal_semaphore_infos: ptr::null(), 864 ..Default::default() 865 }, 866 PerSubmitInfo { 867 wait_semaphore_infos_vk, 868 command_buffer_infos_vk, 869 signal_semaphore_infos_vk, 870 }, 871 ) 872 }) 873 .unzip(); 874 875 for ( 876 submit_info_vk, 877 PerSubmitInfo { 878 wait_semaphore_infos_vk, 879 command_buffer_infos_vk, 880 signal_semaphore_infos_vk, 881 }, 882 ) in (submit_info_vk.iter_mut()).zip(per_submit_vk.iter()) 883 { 884 *submit_info_vk = ash::vk::SubmitInfo2 { 885 wait_semaphore_info_count: wait_semaphore_infos_vk.len() as u32, 886 p_wait_semaphore_infos: wait_semaphore_infos_vk.as_ptr(), 887 command_buffer_info_count: command_buffer_infos_vk.len() as u32, 888 p_command_buffer_infos: command_buffer_infos_vk.as_ptr(), 889 signal_semaphore_info_count: signal_semaphore_infos_vk.len() as u32, 890 p_signal_semaphore_infos: signal_semaphore_infos_vk.as_ptr(), 891 ..*submit_info_vk 892 }; 893 } 894 895 let fns = self.queue.device.fns(); 896 897 if self.queue.device.api_version() >= Version::V1_3 { 898 (fns.v1_3.queue_submit2)( 899 self.queue.handle, 900 submit_info_vk.len() as u32, 901 submit_info_vk.as_ptr(), 902 fence 903 .as_ref() 904 .map_or_else(Default::default, |(fence, _)| fence.handle()), 905 ) 906 } else { 907 debug_assert!(self.queue.device.enabled_extensions().khr_synchronization2); 908 (fns.khr_synchronization2.queue_submit2_khr)( 909 self.queue.handle, 910 submit_info_vk.len() as u32, 911 submit_info_vk.as_ptr(), 912 fence 913 .as_ref() 914 .map_or_else(Default::default, |(fence, _)| fence.handle()), 915 ) 916 } 917 .result() 918 .map_err(VulkanError::from)?; 919 } else { 920 struct PerSubmitInfo { 921 wait_semaphores_vk: SmallVec<[ash::vk::Semaphore; 4]>, 922 wait_dst_stage_mask_vk: SmallVec<[ash::vk::PipelineStageFlags; 4]>, 923 command_buffers_vk: SmallVec<[ash::vk::CommandBuffer; 4]>, 924 signal_semaphores_vk: SmallVec<[ash::vk::Semaphore; 4]>, 925 } 926 927 let (mut submit_info_vk, per_submit_vk): (SmallVec<[_; 4]>, SmallVec<[_; 4]>) = 928 submit_infos 929 .iter() 930 .map(|submit_info| { 931 let &SubmitInfo { 932 ref wait_semaphores, 933 ref command_buffers, 934 ref signal_semaphores, 935 _ne: _, 936 } = submit_info; 937 938 let (wait_semaphores_vk, wait_dst_stage_mask_vk) = wait_semaphores 939 .iter() 940 .map(|semaphore_submit_info| { 941 let &SemaphoreSubmitInfo { 942 ref semaphore, 943 stages, 944 _ne: _, 945 } = semaphore_submit_info; 946 947 (semaphore.handle(), stages.into()) 948 }) 949 .unzip(); 950 951 let command_buffers_vk = 952 command_buffers.iter().map(|cb| cb.handle()).collect(); 953 954 let signal_semaphores_vk = signal_semaphores 955 .iter() 956 .map(|semaphore_submit_info| { 957 let &SemaphoreSubmitInfo { 958 ref semaphore, 959 stages: _, 960 _ne: _, 961 } = semaphore_submit_info; 962 963 semaphore.handle() 964 }) 965 .collect(); 966 967 ( 968 ash::vk::SubmitInfo { 969 wait_semaphore_count: 0, 970 p_wait_semaphores: ptr::null(), 971 p_wait_dst_stage_mask: ptr::null(), 972 command_buffer_count: 0, 973 p_command_buffers: ptr::null(), 974 signal_semaphore_count: 0, 975 p_signal_semaphores: ptr::null(), 976 ..Default::default() 977 }, 978 PerSubmitInfo { 979 wait_semaphores_vk, 980 wait_dst_stage_mask_vk, 981 command_buffers_vk, 982 signal_semaphores_vk, 983 }, 984 ) 985 }) 986 .unzip(); 987 988 for ( 989 submit_info_vk, 990 PerSubmitInfo { 991 wait_semaphores_vk, 992 wait_dst_stage_mask_vk, 993 command_buffers_vk, 994 signal_semaphores_vk, 995 }, 996 ) in (submit_info_vk.iter_mut()).zip(per_submit_vk.iter()) 997 { 998 *submit_info_vk = ash::vk::SubmitInfo { 999 wait_semaphore_count: wait_semaphores_vk.len() as u32, 1000 p_wait_semaphores: wait_semaphores_vk.as_ptr(), 1001 p_wait_dst_stage_mask: wait_dst_stage_mask_vk.as_ptr(), 1002 command_buffer_count: command_buffers_vk.len() as u32, 1003 p_command_buffers: command_buffers_vk.as_ptr(), 1004 signal_semaphore_count: signal_semaphores_vk.len() as u32, 1005 p_signal_semaphores: signal_semaphores_vk.as_ptr(), 1006 ..*submit_info_vk 1007 }; 1008 } 1009 1010 let fns = self.queue.device.fns(); 1011 (fns.v1_0.queue_submit)( 1012 self.queue.handle, 1013 submit_info_vk.len() as u32, 1014 submit_info_vk.as_ptr(), 1015 fence 1016 .as_ref() 1017 .map_or_else(Default::default, |(fence, _)| fence.handle()), 1018 ) 1019 .result() 1020 .map_err(VulkanError::from)?; 1021 } 1022 1023 for submit_info in submit_infos { 1024 let SubmitInfo { 1025 wait_semaphores, 1026 command_buffers, 1027 signal_semaphores, 1028 _ne: _, 1029 } = submit_info; 1030 1031 for semaphore_submit_info in wait_semaphores { 1032 let state = states 1033 .semaphores 1034 .get_mut(&semaphore_submit_info.semaphore.handle()) 1035 .unwrap(); 1036 state.add_queue_wait(self.queue); 1037 } 1038 1039 for command_buffer in command_buffers { 1040 let state = states 1041 .command_buffers 1042 .get_mut(&command_buffer.handle()) 1043 .unwrap(); 1044 state.add_queue_submit(); 1045 1046 let CommandBufferResourcesUsage { 1047 buffers, 1048 images, 1049 buffer_indices: _, 1050 image_indices: _, 1051 } = command_buffer.resources_usage(); 1052 1053 for usage in buffers { 1054 let state = states.buffers.get_mut(&usage.buffer.handle()).unwrap(); 1055 1056 for (range, range_usage) in usage.ranges.iter() { 1057 if range_usage.mutable { 1058 state.gpu_write_lock(range.clone()); 1059 } else { 1060 state.gpu_read_lock(range.clone()); 1061 } 1062 } 1063 } 1064 1065 for usage in images { 1066 let state = states.images.get_mut(&usage.image.handle()).unwrap(); 1067 1068 for (range, range_usage) in usage.ranges.iter() { 1069 if range_usage.mutable { 1070 state.gpu_write_lock(range.clone(), range_usage.final_layout); 1071 } else { 1072 state.gpu_read_lock(range.clone()); 1073 } 1074 } 1075 } 1076 } 1077 1078 for semaphore_submit_info in signal_semaphores { 1079 let state = states 1080 .semaphores 1081 .get_mut(&semaphore_submit_info.semaphore.handle()) 1082 .unwrap(); 1083 state.add_queue_signal(self.queue); 1084 } 1085 } 1086 1087 let fence = fence.map(|(fence, mut state)| { 1088 state.add_queue_signal(self.queue); 1089 fence.clone() 1090 }); 1091 1092 self.state 1093 .operations 1094 .push_back((submit_infos.clone().into(), fence)); 1095 1096 Ok(()) 1097 } 1098 1099 /// Opens a queue debug label region. 1100 /// 1101 /// The [`ext_debug_utils`] extension must be enabled on the instance. 1102 /// 1103 /// [`ext_debug_utils`]: crate::instance::InstanceExtensions::ext_debug_utils 1104 #[inline] begin_debug_utils_label( &mut self, label_info: DebugUtilsLabel, ) -> Result<(), QueueError>1105 pub fn begin_debug_utils_label( 1106 &mut self, 1107 label_info: DebugUtilsLabel, 1108 ) -> Result<(), QueueError> { 1109 self.validate_begin_debug_utils_label(&label_info)?; 1110 1111 unsafe { 1112 self.begin_debug_utils_label_unchecked(label_info); 1113 Ok(()) 1114 } 1115 } 1116 validate_begin_debug_utils_label( &self, _label_info: &DebugUtilsLabel, ) -> Result<(), QueueError>1117 fn validate_begin_debug_utils_label( 1118 &self, 1119 _label_info: &DebugUtilsLabel, 1120 ) -> Result<(), QueueError> { 1121 if !self 1122 .queue 1123 .device 1124 .instance() 1125 .enabled_extensions() 1126 .ext_debug_utils 1127 { 1128 return Err(QueueError::RequirementNotMet { 1129 required_for: "`QueueGuard::begin_debug_utils_label`", 1130 requires_one_of: RequiresOneOf { 1131 instance_extensions: &["ext_debug_utils"], 1132 ..Default::default() 1133 }, 1134 }); 1135 } 1136 1137 Ok(()) 1138 } 1139 1140 #[cfg_attr(not(feature = "document_unchecked"), doc(hidden))] 1141 #[inline] begin_debug_utils_label_unchecked(&mut self, label_info: DebugUtilsLabel)1142 pub unsafe fn begin_debug_utils_label_unchecked(&mut self, label_info: DebugUtilsLabel) { 1143 let DebugUtilsLabel { 1144 label_name, 1145 color, 1146 _ne: _, 1147 } = label_info; 1148 1149 let label_name_vk = CString::new(label_name.as_str()).unwrap(); 1150 let label_info = ash::vk::DebugUtilsLabelEXT { 1151 p_label_name: label_name_vk.as_ptr(), 1152 color, 1153 ..Default::default() 1154 }; 1155 1156 let fns = self.queue.device.instance().fns(); 1157 (fns.ext_debug_utils.queue_begin_debug_utils_label_ext)(self.queue.handle, &label_info); 1158 } 1159 1160 /// Closes a queue debug label region. 1161 /// 1162 /// The [`ext_debug_utils`](crate::instance::InstanceExtensions::ext_debug_utils) must be 1163 /// enabled on the instance. 1164 /// 1165 /// # Safety 1166 /// 1167 /// - There must be an outstanding queue label region begun with `begin_debug_utils_label` in 1168 /// the queue. 1169 #[inline] end_debug_utils_label(&mut self) -> Result<(), QueueError>1170 pub unsafe fn end_debug_utils_label(&mut self) -> Result<(), QueueError> { 1171 self.validate_end_debug_utils_label()?; 1172 self.end_debug_utils_label_unchecked(); 1173 1174 Ok(()) 1175 } 1176 validate_end_debug_utils_label(&self) -> Result<(), QueueError>1177 fn validate_end_debug_utils_label(&self) -> Result<(), QueueError> { 1178 if !self 1179 .queue 1180 .device 1181 .instance() 1182 .enabled_extensions() 1183 .ext_debug_utils 1184 { 1185 return Err(QueueError::RequirementNotMet { 1186 required_for: "`QueueGuard::end_debug_utils_label`", 1187 requires_one_of: RequiresOneOf { 1188 instance_extensions: &["ext_debug_utils"], 1189 ..Default::default() 1190 }, 1191 }); 1192 } 1193 1194 // VUID-vkQueueEndDebugUtilsLabelEXT-None-01911 1195 // TODO: not checked, so unsafe for now 1196 1197 Ok(()) 1198 } 1199 1200 #[cfg_attr(not(feature = "document_unchecked"), doc(hidden))] 1201 #[inline] end_debug_utils_label_unchecked(&mut self)1202 pub unsafe fn end_debug_utils_label_unchecked(&mut self) { 1203 let fns = self.queue.device.instance().fns(); 1204 (fns.ext_debug_utils.queue_end_debug_utils_label_ext)(self.queue.handle); 1205 } 1206 1207 /// Inserts a queue debug label. 1208 /// 1209 /// The [`ext_debug_utils`](crate::instance::InstanceExtensions::ext_debug_utils) must be 1210 /// enabled on the instance. 1211 #[inline] insert_debug_utils_label( &mut self, label_info: DebugUtilsLabel, ) -> Result<(), QueueError>1212 pub fn insert_debug_utils_label( 1213 &mut self, 1214 label_info: DebugUtilsLabel, 1215 ) -> Result<(), QueueError> { 1216 self.validate_insert_debug_utils_label(&label_info)?; 1217 1218 unsafe { 1219 self.insert_debug_utils_label_unchecked(label_info); 1220 Ok(()) 1221 } 1222 } 1223 validate_insert_debug_utils_label( &self, _label_info: &DebugUtilsLabel, ) -> Result<(), QueueError>1224 fn validate_insert_debug_utils_label( 1225 &self, 1226 _label_info: &DebugUtilsLabel, 1227 ) -> Result<(), QueueError> { 1228 if !self 1229 .queue 1230 .device 1231 .instance() 1232 .enabled_extensions() 1233 .ext_debug_utils 1234 { 1235 return Err(QueueError::RequirementNotMet { 1236 required_for: "`QueueGuard::insert_debug_utils_label`", 1237 requires_one_of: RequiresOneOf { 1238 instance_extensions: &["ext_debug_utils"], 1239 ..Default::default() 1240 }, 1241 }); 1242 } 1243 1244 Ok(()) 1245 } 1246 1247 #[cfg_attr(not(feature = "document_unchecked"), doc(hidden))] 1248 #[inline] insert_debug_utils_label_unchecked(&mut self, label_info: DebugUtilsLabel)1249 pub unsafe fn insert_debug_utils_label_unchecked(&mut self, label_info: DebugUtilsLabel) { 1250 let DebugUtilsLabel { 1251 label_name, 1252 color, 1253 _ne: _, 1254 } = label_info; 1255 1256 let label_name_vk = CString::new(label_name.as_str()).unwrap(); 1257 let label_info = ash::vk::DebugUtilsLabelEXT { 1258 p_label_name: label_name_vk.as_ptr(), 1259 color, 1260 ..Default::default() 1261 }; 1262 1263 let fns = self.queue.device.instance().fns(); 1264 (fns.ext_debug_utils.queue_insert_debug_utils_label_ext)(self.queue.handle, &label_info); 1265 } 1266 } 1267 1268 #[derive(Debug, Default)] 1269 struct QueueState { 1270 operations: VecDeque<(QueueOperation, Option<Arc<Fence>>)>, 1271 } 1272 1273 impl QueueState { wait_idle(&mut self, device: &Device, handle: ash::vk::Queue) -> Result<(), OomError>1274 fn wait_idle(&mut self, device: &Device, handle: ash::vk::Queue) -> Result<(), OomError> { 1275 unsafe { 1276 let fns = device.fns(); 1277 (fns.v1_0.queue_wait_idle)(handle) 1278 .result() 1279 .map_err(VulkanError::from)?; 1280 1281 // Since we now know that the queue is finished with all work, 1282 // we can safely release all resources. 1283 for (operation, _) in take(&mut self.operations) { 1284 operation.set_finished(); 1285 } 1286 1287 Ok(()) 1288 } 1289 } 1290 1291 /// Called by `fence` when it finds that it is signaled. fence_signaled(&mut self, fence: &Fence)1292 fn fence_signaled(&mut self, fence: &Fence) { 1293 // Find the most recent operation that uses `fence`. 1294 let fence_index = self 1295 .operations 1296 .iter() 1297 .enumerate() 1298 .rev() 1299 .find_map(|(index, (_, f))| { 1300 f.as_ref().map_or(false, |f| **f == *fence).then_some(index) 1301 }); 1302 1303 if let Some(index) = fence_index { 1304 // Remove all operations up to this index, and perform cleanup if needed. 1305 for (operation, fence) in self.operations.drain(..index + 1) { 1306 unsafe { 1307 operation.set_finished(); 1308 1309 if let Some(fence) = fence { 1310 fence.state().set_signal_finished(); 1311 } 1312 } 1313 } 1314 } 1315 } 1316 } 1317 1318 #[derive(Debug)] 1319 enum QueueOperation { 1320 BindSparse(SmallVec<[BindSparseInfo; 4]>), 1321 Present(PresentInfo), 1322 Submit(SmallVec<[SubmitInfo; 4]>), 1323 } 1324 1325 impl QueueOperation { set_finished(self)1326 unsafe fn set_finished(self) { 1327 match self { 1328 QueueOperation::BindSparse(bind_infos) => { 1329 for bind_info in bind_infos { 1330 for semaphore in bind_info.wait_semaphores { 1331 semaphore.state().set_wait_finished(); 1332 } 1333 1334 for semaphore in bind_info.signal_semaphores { 1335 semaphore.state().set_signal_finished(); 1336 } 1337 } 1338 1339 // TODO: Do we need to unlock buffers and images here? 1340 } 1341 QueueOperation::Present(present_info) => { 1342 for semaphore in present_info.wait_semaphores { 1343 semaphore.state().set_wait_finished(); 1344 } 1345 } 1346 QueueOperation::Submit(submit_infos) => { 1347 for submit_info in submit_infos { 1348 for semaphore_submit_info in submit_info.wait_semaphores { 1349 semaphore_submit_info.semaphore.state().set_wait_finished(); 1350 } 1351 1352 for semaphore_submit_info in submit_info.signal_semaphores { 1353 semaphore_submit_info 1354 .semaphore 1355 .state() 1356 .set_signal_finished(); 1357 } 1358 1359 for command_buffer in submit_info.command_buffers { 1360 let resource_usage = command_buffer.resources_usage(); 1361 1362 for usage in &resource_usage.buffers { 1363 let mut state = usage.buffer.state(); 1364 1365 for (range, range_usage) in usage.ranges.iter() { 1366 if range_usage.mutable { 1367 state.gpu_write_unlock(range.clone()); 1368 } else { 1369 state.gpu_read_unlock(range.clone()); 1370 } 1371 } 1372 } 1373 1374 for usage in &resource_usage.images { 1375 let mut state = usage.image.state(); 1376 1377 for (range, range_usage) in usage.ranges.iter() { 1378 if range_usage.mutable { 1379 state.gpu_write_unlock(range.clone()); 1380 } else { 1381 state.gpu_read_unlock(range.clone()); 1382 } 1383 } 1384 } 1385 1386 command_buffer.state().set_submit_finished(); 1387 } 1388 } 1389 } 1390 } 1391 } 1392 } 1393 1394 impl From<SmallVec<[BindSparseInfo; 4]>> for QueueOperation { 1395 #[inline] from(val: SmallVec<[BindSparseInfo; 4]>) -> Self1396 fn from(val: SmallVec<[BindSparseInfo; 4]>) -> Self { 1397 Self::BindSparse(val) 1398 } 1399 } 1400 1401 impl From<PresentInfo> for QueueOperation { 1402 #[inline] from(val: PresentInfo) -> Self1403 fn from(val: PresentInfo) -> Self { 1404 Self::Present(val) 1405 } 1406 } 1407 1408 impl From<SmallVec<[SubmitInfo; 4]>> for QueueOperation { 1409 #[inline] from(val: SmallVec<[SubmitInfo; 4]>) -> Self1410 fn from(val: SmallVec<[SubmitInfo; 4]>) -> Self { 1411 Self::Submit(val) 1412 } 1413 } 1414 1415 // This struct exists to ensure that every object gets locked exactly once. 1416 // Otherwise we get deadlocks. 1417 #[derive(Debug)] 1418 struct States<'a> { 1419 buffers: HashMap<ash::vk::Buffer, MutexGuard<'a, BufferState>>, 1420 command_buffers: HashMap<ash::vk::CommandBuffer, MutexGuard<'a, CommandBufferState>>, 1421 images: HashMap<ash::vk::Image, MutexGuard<'a, ImageState>>, 1422 semaphores: HashMap<ash::vk::Semaphore, MutexGuard<'a, SemaphoreState>>, 1423 } 1424 1425 impl<'a> States<'a> { from_bind_infos(bind_infos: &'a [BindSparseInfo]) -> Self1426 fn from_bind_infos(bind_infos: &'a [BindSparseInfo]) -> Self { 1427 let mut buffers = HashMap::default(); 1428 let mut images = HashMap::default(); 1429 let mut semaphores = HashMap::default(); 1430 1431 for bind_info in bind_infos { 1432 let BindSparseInfo { 1433 wait_semaphores, 1434 buffer_binds, 1435 image_opaque_binds, 1436 image_binds, 1437 signal_semaphores, 1438 _ne: _, 1439 } = bind_info; 1440 1441 for semaphore in wait_semaphores { 1442 semaphores 1443 .entry(semaphore.handle()) 1444 .or_insert_with(|| semaphore.state()); 1445 } 1446 1447 for (buffer, _) in buffer_binds { 1448 let buffer = buffer.buffer(); 1449 buffers 1450 .entry(buffer.handle()) 1451 .or_insert_with(|| buffer.state()); 1452 } 1453 1454 for (image, _) in image_opaque_binds { 1455 let image = &image.inner().image; 1456 images 1457 .entry(image.handle()) 1458 .or_insert_with(|| image.state()); 1459 } 1460 1461 for (image, _) in image_binds { 1462 let image = &image.inner().image; 1463 images 1464 .entry(image.handle()) 1465 .or_insert_with(|| image.state()); 1466 } 1467 1468 for semaphore in signal_semaphores { 1469 semaphores 1470 .entry(semaphore.handle()) 1471 .or_insert_with(|| semaphore.state()); 1472 } 1473 } 1474 1475 Self { 1476 buffers, 1477 command_buffers: HashMap::default(), 1478 images, 1479 semaphores, 1480 } 1481 } 1482 from_present_info(present_info: &'a PresentInfo) -> Self1483 fn from_present_info(present_info: &'a PresentInfo) -> Self { 1484 let mut semaphores = HashMap::default(); 1485 1486 let PresentInfo { 1487 wait_semaphores, 1488 swapchain_infos: _, 1489 _ne: _, 1490 } = present_info; 1491 1492 for semaphore in wait_semaphores { 1493 semaphores 1494 .entry(semaphore.handle()) 1495 .or_insert_with(|| semaphore.state()); 1496 } 1497 1498 Self { 1499 buffers: HashMap::default(), 1500 command_buffers: HashMap::default(), 1501 images: HashMap::default(), 1502 semaphores, 1503 } 1504 } 1505 from_submit_infos(submit_infos: &'a [SubmitInfo]) -> Self1506 fn from_submit_infos(submit_infos: &'a [SubmitInfo]) -> Self { 1507 let mut buffers = HashMap::default(); 1508 let mut command_buffers = HashMap::default(); 1509 let mut images = HashMap::default(); 1510 let mut semaphores = HashMap::default(); 1511 1512 for submit_info in submit_infos { 1513 let SubmitInfo { 1514 wait_semaphores, 1515 command_buffers: info_command_buffers, 1516 signal_semaphores, 1517 _ne: _, 1518 } = submit_info; 1519 1520 for semaphore_submit_info in wait_semaphores { 1521 let semaphore = &semaphore_submit_info.semaphore; 1522 semaphores 1523 .entry(semaphore.handle()) 1524 .or_insert_with(|| semaphore.state()); 1525 } 1526 1527 for command_buffer in info_command_buffers { 1528 command_buffers 1529 .entry(command_buffer.handle()) 1530 .or_insert_with(|| command_buffer.state()); 1531 1532 let CommandBufferResourcesUsage { 1533 buffers: buffers_usage, 1534 images: images_usage, 1535 buffer_indices: _, 1536 image_indices: _, 1537 } = command_buffer.resources_usage(); 1538 1539 for usage in buffers_usage { 1540 let buffer = &usage.buffer; 1541 buffers 1542 .entry(buffer.handle()) 1543 .or_insert_with(|| buffer.state()); 1544 } 1545 1546 for usage in images_usage { 1547 let image = &usage.image; 1548 images 1549 .entry(image.handle()) 1550 .or_insert_with(|| image.state()); 1551 } 1552 } 1553 1554 for semaphore_submit_info in signal_semaphores { 1555 let semaphore = &semaphore_submit_info.semaphore; 1556 semaphores 1557 .entry(semaphore.handle()) 1558 .or_insert_with(|| semaphore.state()); 1559 } 1560 } 1561 1562 Self { 1563 buffers, 1564 command_buffers, 1565 images, 1566 semaphores, 1567 } 1568 } 1569 } 1570 1571 /// Properties of a queue family in a physical device. 1572 #[derive(Clone, Debug)] 1573 #[non_exhaustive] 1574 pub struct QueueFamilyProperties { 1575 /// Attributes of the queue family. 1576 pub queue_flags: QueueFlags, 1577 1578 /// The number of queues available in this family. 1579 /// 1580 /// This guaranteed to be at least 1 (or else that family wouldn't exist). 1581 pub queue_count: u32, 1582 1583 /// If timestamps are supported, the number of bits supported by timestamp operations. 1584 /// The returned value will be in the range 36..64. 1585 /// 1586 /// If timestamps are not supported, this is `None`. 1587 pub timestamp_valid_bits: Option<u32>, 1588 1589 /// The minimum granularity supported for image transfers, in terms of `[width, height, depth]`. 1590 pub min_image_transfer_granularity: [u32; 3], 1591 } 1592 1593 impl From<ash::vk::QueueFamilyProperties> for QueueFamilyProperties { 1594 #[inline] from(val: ash::vk::QueueFamilyProperties) -> Self1595 fn from(val: ash::vk::QueueFamilyProperties) -> Self { 1596 Self { 1597 queue_flags: val.queue_flags.into(), 1598 queue_count: val.queue_count, 1599 timestamp_valid_bits: (val.timestamp_valid_bits != 0) 1600 .then_some(val.timestamp_valid_bits), 1601 min_image_transfer_granularity: [ 1602 val.min_image_transfer_granularity.width, 1603 val.min_image_transfer_granularity.height, 1604 val.min_image_transfer_granularity.depth, 1605 ], 1606 } 1607 } 1608 } 1609 1610 vulkan_bitflags! { 1611 #[non_exhaustive] 1612 1613 /// Attributes of a queue or queue family. 1614 QueueFlags = QueueFlags(u32); 1615 1616 /// Queues of this family can execute graphics operations. 1617 GRAPHICS = GRAPHICS, 1618 1619 /// Queues of this family can execute compute operations. 1620 COMPUTE = COMPUTE, 1621 1622 /// Queues of this family can execute transfer operations. 1623 TRANSFER = TRANSFER, 1624 1625 /// Queues of this family can execute sparse memory management operations. 1626 SPARSE_BINDING = SPARSE_BINDING, 1627 1628 /// Queues of this family can be created using the `protected` flag. 1629 PROTECTED = PROTECTED { 1630 api_version: V1_1, 1631 }, 1632 1633 /// Queues of this family can execute video decode operations. 1634 VIDEO_DECODE = VIDEO_DECODE_KHR { 1635 device_extensions: [khr_video_decode_queue], 1636 }, 1637 1638 /// Queues of this family can execute video encode operations. 1639 VIDEO_ENCODE = VIDEO_ENCODE_KHR { 1640 device_extensions: [khr_video_encode_queue], 1641 }, 1642 1643 /// Queues of this family can execute optical flow operations. 1644 OPTICAL_FLOW = OPTICAL_FLOW_NV { 1645 device_extensions: [nv_optical_flow], 1646 }, 1647 } 1648 1649 /// Error that can happen when submitting work to a queue. 1650 #[derive(Clone, Debug)] 1651 pub enum QueueError { 1652 VulkanError(VulkanError), 1653 1654 RequirementNotMet { 1655 required_for: &'static str, 1656 requires_one_of: RequiresOneOf, 1657 }, 1658 } 1659 1660 impl Error for QueueError { source(&self) -> Option<&(dyn Error + 'static)>1661 fn source(&self) -> Option<&(dyn Error + 'static)> { 1662 match self { 1663 QueueError::VulkanError(err) => Some(err), 1664 _ => None, 1665 } 1666 } 1667 } 1668 1669 impl Display for QueueError { fmt(&self, f: &mut Formatter<'_>) -> Result<(), FmtError>1670 fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), FmtError> { 1671 match self { 1672 Self::VulkanError(_) => write!(f, "a runtime error occurred"), 1673 Self::RequirementNotMet { 1674 required_for, 1675 requires_one_of, 1676 } => write!( 1677 f, 1678 "a requirement was not met for: {}; requires one of: {}", 1679 required_for, requires_one_of, 1680 ), 1681 } 1682 } 1683 } 1684 1685 impl From<VulkanError> for QueueError { from(err: VulkanError) -> Self1686 fn from(err: VulkanError) -> Self { 1687 Self::VulkanError(err) 1688 } 1689 } 1690 1691 impl From<RequirementNotMet> for QueueError { from(err: RequirementNotMet) -> Self1692 fn from(err: RequirementNotMet) -> Self { 1693 Self::RequirementNotMet { 1694 required_for: err.required_for, 1695 requires_one_of: err.requires_one_of, 1696 } 1697 } 1698 } 1699 1700 #[cfg(test)] 1701 mod tests { 1702 use crate::sync::fence::Fence; 1703 use std::{sync::Arc, time::Duration}; 1704 1705 #[test] empty_submit()1706 fn empty_submit() { 1707 let (_device, queue) = gfx_dev_and_queue!(); 1708 1709 queue 1710 .with(|mut q| unsafe { q.submit_unchecked([Default::default()], None) }) 1711 .unwrap(); 1712 } 1713 1714 #[test] signal_fence()1715 fn signal_fence() { 1716 unsafe { 1717 let (device, queue) = gfx_dev_and_queue!(); 1718 1719 let fence = Arc::new(Fence::new(device, Default::default()).unwrap()); 1720 assert!(!fence.is_signaled().unwrap()); 1721 1722 queue 1723 .with(|mut q| q.submit_unchecked([Default::default()], Some(fence.clone()))) 1724 .unwrap(); 1725 1726 fence.wait(Some(Duration::from_secs(5))).unwrap(); 1727 assert!(fence.is_signaled().unwrap()); 1728 } 1729 } 1730 } 1731