1 // Copyright 2021 The SwiftShader Authors. All Rights Reserved. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 #ifndef VK_STRUCT_CONVERSION_HPP_ 16 #define VK_STRUCT_CONVERSION_HPP_ 17 18 #include "VkMemory.hpp" 19 #include "VkStringify.hpp" 20 21 #include "System/Debug.hpp" 22 23 #include <cstring> 24 #include <vector> 25 26 namespace vk { 27 28 struct CopyBufferInfo : public VkCopyBufferInfo2 29 { CopyBufferInfovk::CopyBufferInfo30 CopyBufferInfo(VkBuffer srcBuffer, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferCopy *pRegions) 31 : VkCopyBufferInfo2{ 32 VK_STRUCTURE_TYPE_COPY_BUFFER_INFO_2, 33 nullptr, 34 srcBuffer, 35 dstBuffer, 36 regionCount, 37 nullptr 38 } 39 { 40 regions.resize(regionCount); 41 for(uint32_t i = 0; i < regionCount; i++) 42 { 43 regions[i] = { 44 VK_STRUCTURE_TYPE_BUFFER_COPY_2, 45 nullptr, 46 pRegions[i].srcOffset, 47 pRegions[i].dstOffset, 48 pRegions[i].size 49 }; 50 } 51 52 this->pRegions = ®ions.front(); 53 } 54 55 private: 56 std::vector<VkBufferCopy2> regions; 57 }; 58 59 struct CopyImageInfo : public VkCopyImageInfo2 60 { CopyImageInfovk::CopyImageInfo61 CopyImageInfo(VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions) 62 : VkCopyImageInfo2{ 63 VK_STRUCTURE_TYPE_COPY_IMAGE_INFO_2, 64 nullptr, 65 srcImage, 66 srcImageLayout, 67 dstImage, 68 dstImageLayout, 69 regionCount, 70 nullptr 71 } 72 { 73 regions.resize(regionCount); 74 for(uint32_t i = 0; i < regionCount; i++) 75 { 76 regions[i] = { 77 VK_STRUCTURE_TYPE_IMAGE_COPY_2, 78 nullptr, 79 pRegions[i].srcSubresource, 80 pRegions[i].srcOffset, 81 pRegions[i].dstSubresource, 82 pRegions[i].dstOffset, 83 pRegions[i].extent 84 }; 85 } 86 87 this->pRegions = ®ions.front(); 88 } 89 90 private: 91 std::vector<VkImageCopy2> regions; 92 }; 93 94 struct BlitImageInfo : public VkBlitImageInfo2 95 { BlitImageInfovk::BlitImageInfo96 BlitImageInfo(VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter) 97 : VkBlitImageInfo2{ 98 VK_STRUCTURE_TYPE_BLIT_IMAGE_INFO_2, 99 nullptr, 100 srcImage, 101 srcImageLayout, 102 dstImage, 103 dstImageLayout, 104 regionCount, 105 nullptr, 106 filter 107 } 108 { 109 regions.resize(regionCount); 110 for(uint32_t i = 0; i < regionCount; i++) 111 { 112 regions[i] = { 113 VK_STRUCTURE_TYPE_IMAGE_BLIT_2, 114 nullptr, 115 pRegions[i].srcSubresource, 116 { pRegions[i].srcOffsets[0], pRegions[i].srcOffsets[1] }, 117 pRegions[i].dstSubresource, 118 { pRegions[i].dstOffsets[0], pRegions[i].dstOffsets[1] } 119 }; 120 } 121 122 this->pRegions = ®ions.front(); 123 } 124 125 private: 126 std::vector<VkImageBlit2> regions; 127 }; 128 129 struct CopyBufferToImageInfo : public VkCopyBufferToImageInfo2 130 { CopyBufferToImageInfovk::CopyBufferToImageInfo131 CopyBufferToImageInfo(VkBuffer srcBuffer, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkBufferImageCopy *pRegions) 132 : VkCopyBufferToImageInfo2{ 133 VK_STRUCTURE_TYPE_COPY_BUFFER_TO_IMAGE_INFO_2, 134 nullptr, 135 srcBuffer, 136 dstImage, 137 dstImageLayout, 138 regionCount, 139 nullptr 140 } 141 { 142 regions.resize(regionCount); 143 for(uint32_t i = 0; i < regionCount; i++) 144 { 145 regions[i] = { 146 VK_STRUCTURE_TYPE_BUFFER_IMAGE_COPY_2, 147 nullptr, 148 pRegions[i].bufferOffset, 149 pRegions[i].bufferRowLength, 150 pRegions[i].bufferImageHeight, 151 pRegions[i].imageSubresource, 152 pRegions[i].imageOffset, 153 pRegions[i].imageExtent 154 }; 155 } 156 157 this->pRegions = ®ions.front(); 158 } 159 160 private: 161 std::vector<VkBufferImageCopy2> regions; 162 }; 163 164 struct CopyImageToBufferInfo : public VkCopyImageToBufferInfo2 165 { CopyImageToBufferInfovk::CopyImageToBufferInfo166 CopyImageToBufferInfo(VkImage srcImage, VkImageLayout srcImageLayout, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions) 167 : VkCopyImageToBufferInfo2{ 168 VK_STRUCTURE_TYPE_COPY_IMAGE_TO_BUFFER_INFO_2, 169 nullptr, 170 srcImage, 171 srcImageLayout, 172 dstBuffer, 173 regionCount, 174 nullptr 175 } 176 { 177 regions.resize(regionCount); 178 for(uint32_t i = 0; i < regionCount; i++) 179 { 180 regions[i] = { 181 VK_STRUCTURE_TYPE_BUFFER_IMAGE_COPY_2, 182 nullptr, 183 pRegions[i].bufferOffset, 184 pRegions[i].bufferRowLength, 185 pRegions[i].bufferImageHeight, 186 pRegions[i].imageSubresource, 187 pRegions[i].imageOffset, 188 pRegions[i].imageExtent 189 }; 190 } 191 192 this->pRegions = ®ions.front(); 193 } 194 195 private: 196 std::vector<VkBufferImageCopy2> regions; 197 }; 198 199 struct ResolveImageInfo : public VkResolveImageInfo2 200 { ResolveImageInfovk::ResolveImageInfo201 ResolveImageInfo(VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve *pRegions) 202 : VkResolveImageInfo2{ 203 VK_STRUCTURE_TYPE_RESOLVE_IMAGE_INFO_2, 204 nullptr, 205 srcImage, 206 srcImageLayout, 207 dstImage, 208 dstImageLayout, 209 regionCount, 210 nullptr 211 } 212 { 213 regions.resize(regionCount); 214 for(uint32_t i = 0; i < regionCount; i++) 215 { 216 regions[i] = { 217 VK_STRUCTURE_TYPE_IMAGE_RESOLVE_2, 218 nullptr, 219 pRegions[i].srcSubresource, 220 pRegions[i].srcOffset, 221 pRegions[i].dstSubresource, 222 pRegions[i].dstOffset, 223 pRegions[i].extent 224 }; 225 } 226 227 this->pRegions = ®ions.front(); 228 } 229 230 private: 231 std::vector<VkImageResolve2> regions; 232 }; 233 234 struct DependencyInfo : public VkDependencyInfo 235 { DependencyInfovk::DependencyInfo236 DependencyInfo(VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, 237 VkDependencyFlags dependencyFlags, 238 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers, 239 uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers, 240 uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) 241 : VkDependencyInfo{ 242 VK_STRUCTURE_TYPE_DEPENDENCY_INFO, 243 nullptr, 244 dependencyFlags, 245 memoryBarrierCount, 246 nullptr, 247 bufferMemoryBarrierCount, 248 nullptr, 249 imageMemoryBarrierCount, 250 nullptr 251 } 252 { 253 if((memoryBarrierCount == 0) && 254 (bufferMemoryBarrierCount == 0) && 255 (imageMemoryBarrierCount == 0)) 256 { 257 // Create a single memory barrier entry to store the source and destination stage masks 258 memoryBarriers.resize(1); 259 memoryBarriers[0] = { 260 VK_STRUCTURE_TYPE_MEMORY_BARRIER_2, 261 nullptr, 262 srcStageMask, 263 VK_ACCESS_2_NONE, 264 dstStageMask, 265 VK_ACCESS_2_NONE 266 }; 267 } 268 else 269 { 270 memoryBarriers.resize(memoryBarrierCount); 271 for(uint32_t i = 0; i < memoryBarrierCount; i++) 272 { 273 memoryBarriers[i] = { 274 VK_STRUCTURE_TYPE_MEMORY_BARRIER_2, 275 pMemoryBarriers[i].pNext, 276 srcStageMask, 277 pMemoryBarriers[i].srcAccessMask, 278 dstStageMask, 279 pMemoryBarriers[i].dstAccessMask 280 }; 281 } 282 283 bufferMemoryBarriers.resize(bufferMemoryBarrierCount); 284 for(uint32_t i = 0; i < bufferMemoryBarrierCount; i++) 285 { 286 bufferMemoryBarriers[i] = { 287 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER_2, 288 pBufferMemoryBarriers[i].pNext, 289 srcStageMask, 290 pBufferMemoryBarriers[i].srcAccessMask, 291 dstStageMask, 292 pBufferMemoryBarriers[i].dstAccessMask, 293 pBufferMemoryBarriers[i].srcQueueFamilyIndex, 294 pBufferMemoryBarriers[i].dstQueueFamilyIndex, 295 pBufferMemoryBarriers[i].buffer, 296 pBufferMemoryBarriers[i].offset, 297 pBufferMemoryBarriers[i].size 298 }; 299 } 300 301 imageMemoryBarriers.resize(imageMemoryBarrierCount); 302 for(uint32_t i = 0; i < imageMemoryBarrierCount; i++) 303 { 304 imageMemoryBarriers[i] = { 305 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2, 306 pImageMemoryBarriers[i].pNext, 307 srcStageMask, 308 pImageMemoryBarriers[i].srcAccessMask, 309 dstStageMask, 310 pImageMemoryBarriers[i].dstAccessMask, 311 pImageMemoryBarriers[i].oldLayout, 312 pImageMemoryBarriers[i].newLayout, 313 pImageMemoryBarriers[i].srcQueueFamilyIndex, 314 pImageMemoryBarriers[i].dstQueueFamilyIndex, 315 pImageMemoryBarriers[i].image, 316 pImageMemoryBarriers[i].subresourceRange 317 }; 318 } 319 } 320 321 this->pMemoryBarriers = memoryBarriers.empty() ? nullptr : &memoryBarriers.front(); 322 this->pBufferMemoryBarriers = bufferMemoryBarriers.empty() ? nullptr : &bufferMemoryBarriers.front(); 323 this->pImageMemoryBarriers = imageMemoryBarriers.empty() ? nullptr : &imageMemoryBarriers.front(); 324 } 325 326 private: 327 std::vector<VkMemoryBarrier2> memoryBarriers; 328 std::vector<VkBufferMemoryBarrier2> bufferMemoryBarriers; 329 std::vector<VkImageMemoryBarrier2> imageMemoryBarriers; 330 }; 331 332 struct ImageSubresource : VkImageSubresource 333 { ImageSubresourcevk::ImageSubresource334 ImageSubresource(const VkImageSubresourceLayers &subresourceLayers) 335 : VkImageSubresource{ 336 subresourceLayers.aspectMask, 337 subresourceLayers.mipLevel, 338 subresourceLayers.baseArrayLayer 339 } 340 {} 341 }; 342 343 struct ImageSubresourceRange : VkImageSubresourceRange 344 { ImageSubresourceRangevk::ImageSubresourceRange345 ImageSubresourceRange(const VkImageSubresourceLayers &subresourceLayers) 346 : VkImageSubresourceRange{ 347 subresourceLayers.aspectMask, 348 subresourceLayers.mipLevel, 349 1, 350 subresourceLayers.baseArrayLayer, 351 subresourceLayers.layerCount 352 } 353 {} 354 }; 355 356 struct Extent2D : VkExtent2D 357 { Extent2Dvk::Extent2D358 Extent2D(const VkExtent3D &extent3D) 359 : VkExtent2D{ extent3D.width, extent3D.height } 360 {} 361 }; 362 363 struct SubmitInfo 364 { Allocatevk::SubmitInfo365 static SubmitInfo *Allocate(uint32_t submitCount, const VkSubmitInfo *pSubmits) 366 { 367 size_t submitSize = sizeof(SubmitInfo) * submitCount; 368 size_t totalSize = Align8(submitSize); 369 for(uint32_t i = 0; i < submitCount; i++) 370 { 371 totalSize += Align8(pSubmits[i].waitSemaphoreCount * sizeof(VkSemaphore)); 372 totalSize += Align8(pSubmits[i].waitSemaphoreCount * sizeof(VkPipelineStageFlags)); 373 totalSize += Align8(pSubmits[i].signalSemaphoreCount * sizeof(VkSemaphore)); 374 totalSize += Align8(pSubmits[i].commandBufferCount * sizeof(VkCommandBuffer)); 375 376 for(const auto *extension = reinterpret_cast<const VkBaseInStructure *>(pSubmits[i].pNext); 377 extension != nullptr; extension = reinterpret_cast<const VkBaseInStructure *>(extension->pNext)) 378 { 379 switch(extension->sType) 380 { 381 case VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO: 382 { 383 const auto *tlsSubmitInfo = reinterpret_cast<const VkTimelineSemaphoreSubmitInfo *>(extension); 384 totalSize += Align8(tlsSubmitInfo->waitSemaphoreValueCount * sizeof(uint64_t)); 385 totalSize += Align8(tlsSubmitInfo->signalSemaphoreValueCount * sizeof(uint64_t)); 386 } 387 break; 388 case VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO: 389 // SwiftShader doesn't use device group submit info because it only supports a single physical device. 390 // However, this extension is core in Vulkan 1.1, so we must treat it as a valid structure type. 391 break; 392 case VK_STRUCTURE_TYPE_MAX_ENUM: 393 // dEQP tests that this value is ignored. 394 break; 395 default: 396 UNSUPPORTED("submitInfo[%d]->pNext sType: %s", i, vk::Stringify(extension->sType).c_str()); 397 break; 398 } 399 } 400 } 401 402 uint8_t *buffer = static_cast<uint8_t *>( 403 vk::allocateHostMemory(totalSize, vk::HOST_MEMORY_ALLOCATION_ALIGNMENT, vk::NULL_ALLOCATION_CALLBACKS, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT)); 404 uint8_t *mem = buffer; 405 406 auto submits = new(mem) SubmitInfo[submitCount]; 407 mem += Align8(submitSize); 408 409 for(uint32_t i = 0; i < submitCount; i++) 410 { 411 submits[i].commandBufferCount = pSubmits[i].commandBufferCount; 412 submits[i].signalSemaphoreCount = pSubmits[i].signalSemaphoreCount; 413 submits[i].waitSemaphoreCount = pSubmits[i].waitSemaphoreCount; 414 415 submits[i].pWaitSemaphores = nullptr; 416 submits[i].pWaitDstStageMask = nullptr; 417 submits[i].pSignalSemaphores = nullptr; 418 submits[i].pCommandBuffers = nullptr; 419 420 if(pSubmits[i].waitSemaphoreCount > 0) 421 { 422 size_t size = pSubmits[i].waitSemaphoreCount * sizeof(VkSemaphore); 423 submits[i].pWaitSemaphores = reinterpret_cast<VkSemaphore *>(mem); 424 memcpy(mem, pSubmits[i].pWaitSemaphores, size); 425 mem += Align8(size); 426 427 size = pSubmits[i].waitSemaphoreCount * sizeof(VkPipelineStageFlags); 428 submits[i].pWaitDstStageMask = reinterpret_cast<VkPipelineStageFlags *>(mem); 429 memcpy(mem, pSubmits[i].pWaitDstStageMask, size); 430 mem += Align8(size); 431 } 432 433 if(pSubmits[i].signalSemaphoreCount > 0) 434 { 435 size_t size = pSubmits[i].signalSemaphoreCount * sizeof(VkSemaphore); 436 submits[i].pSignalSemaphores = reinterpret_cast<VkSemaphore *>(mem); 437 memcpy(mem, pSubmits[i].pSignalSemaphores, size); 438 mem += Align8(size); 439 } 440 441 if(pSubmits[i].commandBufferCount > 0) 442 { 443 size_t size = pSubmits[i].commandBufferCount * sizeof(VkCommandBuffer); 444 submits[i].pCommandBuffers = reinterpret_cast<VkCommandBuffer *>(mem); 445 memcpy(mem, pSubmits[i].pCommandBuffers, size); 446 mem += Align8(size); 447 } 448 449 submits[i].waitSemaphoreValueCount = 0; 450 submits[i].pWaitSemaphoreValues = nullptr; 451 submits[i].signalSemaphoreValueCount = 0; 452 submits[i].pSignalSemaphoreValues = nullptr; 453 454 for(const auto *extension = reinterpret_cast<const VkBaseInStructure *>(pSubmits[i].pNext); 455 extension != nullptr; extension = reinterpret_cast<const VkBaseInStructure *>(extension->pNext)) 456 { 457 switch(extension->sType) 458 { 459 case VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO: 460 { 461 const VkTimelineSemaphoreSubmitInfo *tlsSubmitInfo = reinterpret_cast<const VkTimelineSemaphoreSubmitInfo *>(extension); 462 463 if(tlsSubmitInfo->waitSemaphoreValueCount > 0) 464 { 465 submits[i].waitSemaphoreValueCount = tlsSubmitInfo->waitSemaphoreValueCount; 466 size_t size = tlsSubmitInfo->waitSemaphoreValueCount * sizeof(uint64_t); 467 submits[i].pWaitSemaphoreValues = reinterpret_cast<uint64_t *>(mem); 468 memcpy(mem, tlsSubmitInfo->pWaitSemaphoreValues, size); 469 mem += Align8(size); 470 } 471 472 if(tlsSubmitInfo->signalSemaphoreValueCount > 0) 473 { 474 submits[i].signalSemaphoreValueCount = tlsSubmitInfo->signalSemaphoreValueCount; 475 size_t size = tlsSubmitInfo->signalSemaphoreValueCount * sizeof(uint64_t); 476 submits[i].pSignalSemaphoreValues = reinterpret_cast<uint64_t *>(mem); 477 memcpy(mem, tlsSubmitInfo->pSignalSemaphoreValues, size); 478 mem += Align8(size); 479 } 480 } 481 break; 482 case VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO: 483 // SwiftShader doesn't use device group submit info because it only supports a single physical device. 484 // However, this extension is core in Vulkan 1.1, so we must treat it as a valid structure type. 485 break; 486 case VK_STRUCTURE_TYPE_MAX_ENUM: 487 // dEQP tests that this value is ignored. 488 break; 489 default: 490 UNSUPPORTED("submitInfo[%d]->pNext sType: %s", i, vk::Stringify(extension->sType).c_str()); 491 break; 492 } 493 } 494 } 495 496 ASSERT(static_cast<size_t>(mem - buffer) == totalSize); 497 return submits; 498 } 499 Allocatevk::SubmitInfo500 static SubmitInfo *Allocate(uint32_t submitCount, const VkSubmitInfo2 *pSubmits) 501 { 502 size_t submitSize = sizeof(SubmitInfo) * submitCount; 503 size_t totalSize = Align8(submitSize); 504 for(uint32_t i = 0; i < submitCount; i++) 505 { 506 totalSize += Align8(pSubmits[i].waitSemaphoreInfoCount * sizeof(VkSemaphore)); 507 totalSize += Align8(pSubmits[i].waitSemaphoreInfoCount * sizeof(VkPipelineStageFlags)); 508 totalSize += Align8(pSubmits[i].waitSemaphoreInfoCount * sizeof(uint64_t)); 509 totalSize += Align8(pSubmits[i].signalSemaphoreInfoCount * sizeof(VkSemaphore)); 510 totalSize += Align8(pSubmits[i].signalSemaphoreInfoCount * sizeof(uint64_t)); 511 totalSize += Align8(pSubmits[i].commandBufferInfoCount * sizeof(VkCommandBuffer)); 512 513 for(const auto *extension = reinterpret_cast<const VkBaseInStructure *>(pSubmits[i].pNext); 514 extension != nullptr; extension = reinterpret_cast<const VkBaseInStructure *>(extension->pNext)) 515 { 516 switch(extension->sType) 517 { 518 case VK_STRUCTURE_TYPE_MAX_ENUM: 519 // dEQP tests that this value is ignored. 520 break; 521 case VK_STRUCTURE_TYPE_PERFORMANCE_QUERY_SUBMIT_INFO_KHR: // VK_KHR_performance_query 522 case VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_KHR: // VK_KHR_win32_keyed_mutex 523 case VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_NV: // VK_NV_win32_keyed_mutex 524 default: 525 UNSUPPORTED("submitInfo[%d]->pNext sType: %s", i, vk::Stringify(extension->sType).c_str()); 526 break; 527 } 528 } 529 } 530 531 uint8_t *buffer = static_cast<uint8_t *>( 532 vk::allocateHostMemory(totalSize, vk::HOST_MEMORY_ALLOCATION_ALIGNMENT, vk::NULL_ALLOCATION_CALLBACKS, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT)); 533 uint8_t *mem = buffer; 534 535 auto submits = new(mem) SubmitInfo[submitCount]; 536 mem += Align8(submitSize); 537 538 for(uint32_t i = 0; i < submitCount; i++) 539 { 540 submits[i].commandBufferCount = pSubmits[i].commandBufferInfoCount; 541 submits[i].signalSemaphoreCount = pSubmits[i].signalSemaphoreInfoCount; 542 submits[i].waitSemaphoreCount = pSubmits[i].waitSemaphoreInfoCount; 543 544 submits[i].signalSemaphoreValueCount = pSubmits[i].signalSemaphoreInfoCount; 545 submits[i].waitSemaphoreValueCount = pSubmits[i].waitSemaphoreInfoCount; 546 547 submits[i].pWaitSemaphores = nullptr; 548 submits[i].pWaitDstStageMask = nullptr; 549 submits[i].pSignalSemaphores = nullptr; 550 submits[i].pCommandBuffers = nullptr; 551 submits[i].pWaitSemaphoreValues = nullptr; 552 submits[i].pSignalSemaphoreValues = nullptr; 553 554 if(submits[i].waitSemaphoreCount > 0) 555 { 556 size_t size = submits[i].waitSemaphoreCount * sizeof(VkSemaphore); 557 submits[i].pWaitSemaphores = reinterpret_cast<VkSemaphore *>(mem); 558 mem += Align8(size); 559 560 size = submits[i].waitSemaphoreCount * sizeof(VkPipelineStageFlags); 561 submits[i].pWaitDstStageMask = reinterpret_cast<VkPipelineStageFlags *>(mem); 562 mem += Align8(size); 563 564 size = submits[i].waitSemaphoreCount * sizeof(uint64_t); 565 submits[i].pWaitSemaphoreValues = reinterpret_cast<uint64_t *>(mem); 566 mem += Align8(size); 567 568 for(uint32_t j = 0; j < submits[i].waitSemaphoreCount; j++) 569 { 570 submits[i].pWaitSemaphores[j] = pSubmits[i].pWaitSemaphoreInfos[j].semaphore; 571 submits[i].pWaitDstStageMask[j] = pSubmits[i].pWaitSemaphoreInfos[j].stageMask; 572 submits[i].pWaitSemaphoreValues[j] = pSubmits[i].pWaitSemaphoreInfos[j].value; 573 } 574 } 575 576 if(submits[i].signalSemaphoreCount > 0) 577 { 578 size_t size = submits[i].signalSemaphoreCount * sizeof(VkSemaphore); 579 submits[i].pSignalSemaphores = reinterpret_cast<VkSemaphore *>(mem); 580 mem += Align8(size); 581 582 size = submits[i].signalSemaphoreCount * sizeof(uint64_t); 583 submits[i].pSignalSemaphoreValues = reinterpret_cast<uint64_t *>(mem); 584 mem += Align8(size); 585 586 for(uint32_t j = 0; j < submits[i].signalSemaphoreCount; j++) 587 { 588 submits[i].pSignalSemaphores[j] = pSubmits[i].pSignalSemaphoreInfos[j].semaphore; 589 submits[i].pSignalSemaphoreValues[j] = pSubmits[i].pSignalSemaphoreInfos[j].value; 590 } 591 } 592 593 if(submits[i].commandBufferCount > 0) 594 { 595 size_t size = submits[i].commandBufferCount * sizeof(VkCommandBuffer); 596 submits[i].pCommandBuffers = reinterpret_cast<VkCommandBuffer *>(mem); 597 mem += Align8(size); 598 599 for(uint32_t j = 0; j < submits[i].commandBufferCount; j++) 600 { 601 submits[i].pCommandBuffers[j] = pSubmits[i].pCommandBufferInfos[j].commandBuffer; 602 } 603 } 604 } 605 606 ASSERT(static_cast<size_t>(mem - buffer) == totalSize); 607 return submits; 608 } 609 Releasevk::SubmitInfo610 static void Release(SubmitInfo *submitInfo) 611 { 612 vk::freeHostMemory(submitInfo, NULL_ALLOCATION_CALLBACKS); 613 } 614 615 uint32_t waitSemaphoreCount; 616 VkSemaphore *pWaitSemaphores; 617 VkPipelineStageFlags *pWaitDstStageMask; 618 uint32_t commandBufferCount; 619 VkCommandBuffer *pCommandBuffers; 620 uint32_t signalSemaphoreCount; 621 VkSemaphore *pSignalSemaphores; 622 uint32_t waitSemaphoreValueCount; 623 uint64_t *pWaitSemaphoreValues; 624 uint32_t signalSemaphoreValueCount; 625 uint64_t *pSignalSemaphoreValues; 626 627 private: Align8vk::SubmitInfo628 static size_t Align8(size_t size) 629 { 630 // Keep all arrays 8-byte aligned, so that an odd number of `VkPipelineStageFlags` does not break the 631 // alignment of the other fields. 632 constexpr size_t align = 8; 633 return (size + align - 1) & ~(align - 1); 634 } 635 }; 636 637 } // namespace vk 638 639 #endif // VK_STRUCT_CONVERSION_HPP_ 640