1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2016 The Khronos Group Inc.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 *//*
20 * \file vktSparseResourcesShaderIntrinsicsBase.cpp
21 * \brief Sparse Resources Shader Intrinsics Base Classes
22 *//*--------------------------------------------------------------------*/
23
24 #include "vktSparseResourcesShaderIntrinsicsBase.hpp"
25 #include "vkCmdUtil.hpp"
26 #include "vkBarrierUtil.hpp"
27
28 using namespace vk;
29
30 namespace vkt
31 {
32 namespace sparse
33 {
34
getOpTypeImageComponent(const tcu::TextureFormat & format)35 std::string getOpTypeImageComponent(const tcu::TextureFormat &format)
36 {
37 switch (tcu::getTextureChannelClass(format.type))
38 {
39 case tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER:
40 return "OpTypeInt 32 0";
41 case tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER:
42 return "OpTypeInt 32 1";
43 case tcu::TEXTURECHANNELCLASS_UNSIGNED_FIXED_POINT:
44 case tcu::TEXTURECHANNELCLASS_SIGNED_FIXED_POINT:
45 case tcu::TEXTURECHANNELCLASS_FLOATING_POINT:
46 return "OpTypeFloat 32";
47 default:
48 DE_FATAL("Unexpected channel type");
49 return "";
50 }
51 }
52
getOpTypeImageComponent(const vk::PlanarFormatDescription & description)53 std::string getOpTypeImageComponent(const vk::PlanarFormatDescription &description)
54 {
55 switch (description.channels[0].type)
56 {
57 case tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER:
58 return "OpTypeInt 32 0";
59 case tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER:
60 return "OpTypeInt 32 1";
61 case tcu::TEXTURECHANNELCLASS_UNSIGNED_FIXED_POINT:
62 case tcu::TEXTURECHANNELCLASS_SIGNED_FIXED_POINT:
63 case tcu::TEXTURECHANNELCLASS_FLOATING_POINT:
64 return "OpTypeFloat 32";
65 default:
66 DE_FATAL("Unexpected channel type");
67 return "";
68 }
69 }
70
getImageComponentTypeName(const tcu::TextureFormat & format)71 std::string getImageComponentTypeName(const tcu::TextureFormat &format)
72 {
73 switch (tcu::getTextureChannelClass(format.type))
74 {
75 case tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER:
76 return "%type_uint";
77 case tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER:
78 return "%type_int";
79 case tcu::TEXTURECHANNELCLASS_UNSIGNED_FIXED_POINT:
80 case tcu::TEXTURECHANNELCLASS_SIGNED_FIXED_POINT:
81 case tcu::TEXTURECHANNELCLASS_FLOATING_POINT:
82 return "%type_float";
83 default:
84 DE_FATAL("Unexpected channel type");
85 return "";
86 }
87 }
88
getImageComponentTypeName(const vk::PlanarFormatDescription & description)89 std::string getImageComponentTypeName(const vk::PlanarFormatDescription &description)
90 {
91 switch (description.channels[0].type)
92 {
93 case tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER:
94 return (formatIsR64(description.planes[0].planeCompatibleFormat) ? "%type_uint64" : "%type_uint");
95 case tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER:
96 return (formatIsR64(description.planes[0].planeCompatibleFormat) ? "%type_int64" : "%type_int");
97 case tcu::TEXTURECHANNELCLASS_UNSIGNED_FIXED_POINT:
98 case tcu::TEXTURECHANNELCLASS_SIGNED_FIXED_POINT:
99 case tcu::TEXTURECHANNELCLASS_FLOATING_POINT:
100 return "%type_float";
101 default:
102 DE_FATAL("Unexpected channel type");
103 return "";
104 }
105 }
106
getImageComponentVec4TypeName(const tcu::TextureFormat & format)107 std::string getImageComponentVec4TypeName(const tcu::TextureFormat &format)
108 {
109 switch (tcu::getTextureChannelClass(format.type))
110 {
111 case tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER:
112 return "%type_uvec4";
113 case tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER:
114 return "%type_ivec4";
115 case tcu::TEXTURECHANNELCLASS_UNSIGNED_FIXED_POINT:
116 case tcu::TEXTURECHANNELCLASS_SIGNED_FIXED_POINT:
117 case tcu::TEXTURECHANNELCLASS_FLOATING_POINT:
118 return "%type_vec4";
119 default:
120 DE_FATAL("Unexpected channel type");
121 return "";
122 }
123 }
124
getImageComponentVec4TypeName(const vk::PlanarFormatDescription & description)125 std::string getImageComponentVec4TypeName(const vk::PlanarFormatDescription &description)
126 {
127
128 switch (description.channels[0].type)
129 {
130 case tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER:
131 return (formatIsR64(description.planes[0].planeCompatibleFormat) ? "%type_u64vec4" : "%type_uvec4");
132 case tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER:
133 return (formatIsR64(description.planes[0].planeCompatibleFormat) ? "%type_i64vec4" : "%type_ivec4");
134 case tcu::TEXTURECHANNELCLASS_UNSIGNED_FIXED_POINT:
135 case tcu::TEXTURECHANNELCLASS_SIGNED_FIXED_POINT:
136 case tcu::TEXTURECHANNELCLASS_FLOATING_POINT:
137 return "%type_vec4";
138 default:
139 DE_FATAL("Unexpected channel type");
140 return "";
141 }
142 }
143
getOpTypeImageSparse(const ImageType imageType,const tcu::TextureFormat & format,const std::string & componentType,const bool requiresSampler)144 std::string getOpTypeImageSparse(const ImageType imageType, const tcu::TextureFormat &format,
145 const std::string &componentType, const bool requiresSampler)
146 {
147 std::ostringstream src;
148
149 src << "OpTypeImage " << componentType << " ";
150
151 switch (imageType)
152 {
153 case IMAGE_TYPE_1D:
154 src << "1D 0 0 0 ";
155 break;
156 case IMAGE_TYPE_1D_ARRAY:
157 src << "1D 0 1 0 ";
158 break;
159 case IMAGE_TYPE_2D:
160 src << "2D 0 0 0 ";
161 break;
162 case IMAGE_TYPE_2D_ARRAY:
163 src << "2D 0 1 0 ";
164 break;
165 case IMAGE_TYPE_3D:
166 src << "3D 0 0 0 ";
167 break;
168 case IMAGE_TYPE_CUBE:
169 src << "Cube 0 0 0 ";
170 break;
171 case IMAGE_TYPE_CUBE_ARRAY:
172 src << "Cube 0 1 0 ";
173 break;
174 default:
175 DE_FATAL("Unexpected image type");
176 break;
177 }
178
179 if (requiresSampler)
180 src << "1 ";
181 else
182 src << "2 ";
183
184 switch (format.order)
185 {
186 case tcu::TextureFormat::R:
187 src << "R";
188 break;
189 case tcu::TextureFormat::RG:
190 src << "Rg";
191 break;
192 case tcu::TextureFormat::RGB:
193 src << "Rgb";
194 break;
195 case tcu::TextureFormat::RGBA:
196 src << "Rgba";
197 break;
198 default:
199 DE_FATAL("Unexpected channel order");
200 break;
201 }
202
203 switch (format.type)
204 {
205 case tcu::TextureFormat::SIGNED_INT8:
206 src << "8i";
207 break;
208 case tcu::TextureFormat::SIGNED_INT16:
209 src << "16i";
210 break;
211 case tcu::TextureFormat::SIGNED_INT32:
212 src << "32i";
213 break;
214 case tcu::TextureFormat::UNSIGNED_INT8:
215 src << "8ui";
216 break;
217 case tcu::TextureFormat::UNSIGNED_INT16:
218 src << "16ui";
219 break;
220 case tcu::TextureFormat::UNSIGNED_INT32:
221 src << "32ui";
222 break;
223 case tcu::TextureFormat::SNORM_INT8:
224 src << "8Snorm";
225 break;
226 case tcu::TextureFormat::SNORM_INT16:
227 src << "16Snorm";
228 break;
229 case tcu::TextureFormat::SNORM_INT32:
230 src << "32Snorm";
231 break;
232 case tcu::TextureFormat::UNORM_INT8:
233 src << "8";
234 break;
235 case tcu::TextureFormat::UNORM_INT16:
236 src << "16";
237 break;
238 case tcu::TextureFormat::UNORM_INT32:
239 src << "32";
240 break;
241 default:
242 DE_FATAL("Unexpected channel type");
243 break;
244 }
245
246 return src.str();
247 }
248
getOpTypeImageSparse(const ImageType imageType,const VkFormat format,const std::string & componentType,const bool requiresSampler)249 std::string getOpTypeImageSparse(const ImageType imageType, const VkFormat format, const std::string &componentType,
250 const bool requiresSampler)
251 {
252 std::ostringstream src;
253
254 src << "OpTypeImage " << componentType << " ";
255
256 switch (imageType)
257 {
258 case IMAGE_TYPE_1D:
259 src << "1D 0 0 0 ";
260 break;
261 case IMAGE_TYPE_1D_ARRAY:
262 src << "1D 0 1 0 ";
263 break;
264 case IMAGE_TYPE_2D:
265 src << "2D 0 0 0 ";
266 break;
267 case IMAGE_TYPE_2D_ARRAY:
268 src << "2D 0 1 0 ";
269 break;
270 case IMAGE_TYPE_3D:
271 src << "3D 0 0 0 ";
272 break;
273 case IMAGE_TYPE_CUBE:
274 src << "Cube 0 0 0 ";
275 break;
276 case IMAGE_TYPE_CUBE_ARRAY:
277 src << "Cube 0 1 0 ";
278 break;
279 default:
280 DE_FATAL("Unexpected image type");
281 break;
282 }
283
284 if (requiresSampler)
285 src << "1 ";
286 else
287 src << "2 ";
288
289 switch (format)
290 {
291 case VK_FORMAT_R8_SINT:
292 src << "R8i";
293 break;
294 case VK_FORMAT_R16_SINT:
295 src << "R16i";
296 break;
297 case VK_FORMAT_R32_SINT:
298 src << "R32i";
299 break;
300 case VK_FORMAT_R64_SINT:
301 src << "R64i";
302 break;
303 case VK_FORMAT_R8_UINT:
304 src << "R8ui";
305 break;
306 case VK_FORMAT_R16_UINT:
307 src << "R16ui";
308 break;
309 case VK_FORMAT_R32_UINT:
310 src << "R32ui";
311 break;
312 case VK_FORMAT_R64_UINT:
313 src << "R64ui";
314 break;
315 case VK_FORMAT_R8_SNORM:
316 src << "R8Snorm";
317 break;
318 case VK_FORMAT_R16_SNORM:
319 src << "R16Snorm";
320 break;
321 case VK_FORMAT_R8_UNORM:
322 src << "R8";
323 break;
324 case VK_FORMAT_R16_UNORM:
325 src << "R16";
326 break;
327
328 case VK_FORMAT_R8G8_SINT:
329 src << "Rg8i";
330 break;
331 case VK_FORMAT_R16G16_SINT:
332 src << "Rg16i";
333 break;
334 case VK_FORMAT_R32G32_SINT:
335 src << "Rg32i";
336 break;
337 case VK_FORMAT_R8G8_UINT:
338 src << "Rg8ui";
339 break;
340 case VK_FORMAT_R16G16_UINT:
341 src << "Rg16ui";
342 break;
343 case VK_FORMAT_R32G32_UINT:
344 src << "Rg32ui";
345 break;
346 case VK_FORMAT_R8G8_SNORM:
347 src << "Rg8Snorm";
348 break;
349 case VK_FORMAT_R16G16_SNORM:
350 src << "Rg16Snorm";
351 break;
352 case VK_FORMAT_R8G8_UNORM:
353 src << "Rg8";
354 break;
355 case VK_FORMAT_R16G16_UNORM:
356 src << "Rg16";
357 break;
358
359 case VK_FORMAT_R8G8B8A8_SINT:
360 src << "Rgba8i";
361 break;
362 case VK_FORMAT_R16G16B16A16_SINT:
363 src << "Rgba16i";
364 break;
365 case VK_FORMAT_R32G32B32A32_SINT:
366 src << "Rgba32i";
367 break;
368 case VK_FORMAT_R8G8B8A8_UINT:
369 src << "Rgba8ui";
370 break;
371 case VK_FORMAT_R16G16B16A16_UINT:
372 src << "Rgba16ui";
373 break;
374 case VK_FORMAT_R32G32B32A32_UINT:
375 src << "Rgba32ui";
376 break;
377 case VK_FORMAT_R8G8B8A8_SNORM:
378 src << "Rgba8Snorm";
379 break;
380 case VK_FORMAT_R16G16B16A16_SNORM:
381 src << "Rgba16Snorm";
382 break;
383 case VK_FORMAT_R8G8B8A8_UNORM:
384 src << "Rgba8";
385 break;
386 case VK_FORMAT_R16G16B16A16_UNORM:
387 src << "Rgba16";
388 break;
389
390 case VK_FORMAT_G8B8G8R8_422_UNORM:
391 src << "Rgba8";
392 break;
393 case VK_FORMAT_B8G8R8G8_422_UNORM:
394 src << "Rgba8";
395 break;
396 case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
397 src << "Rgba8";
398 break;
399 case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
400 src << "Rgba8";
401 break;
402 case VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM:
403 src << "Rgba8";
404 break;
405 case VK_FORMAT_G8_B8R8_2PLANE_422_UNORM:
406 src << "Rgba8";
407 break;
408 case VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM:
409 src << "Rgba8";
410 break;
411 case VK_FORMAT_R10X6_UNORM_PACK16:
412 src << "R16";
413 break;
414 case VK_FORMAT_R10X6G10X6_UNORM_2PACK16:
415 src << "Rg16";
416 break;
417 case VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16:
418 src << "Rgba16";
419 break;
420 case VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16:
421 src << "Rgba16";
422 break;
423 case VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16:
424 src << "Rgba16";
425 break;
426 case VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16:
427 src << "Rgba16";
428 break;
429 case VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16:
430 src << "Rgba16";
431 break;
432 case VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16:
433 src << "Rgba16";
434 break;
435 case VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16:
436 src << "Rgba16";
437 break;
438 case VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16:
439 src << "Rgba16";
440 break;
441 case VK_FORMAT_R12X4_UNORM_PACK16:
442 src << "R16";
443 break;
444 case VK_FORMAT_R12X4G12X4_UNORM_2PACK16:
445 src << "Rg16";
446 break;
447 case VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16:
448 src << "Rgba16";
449 break;
450 case VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16:
451 src << "Rgba16";
452 break;
453 case VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16:
454 src << "Rgba16";
455 break;
456 case VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16:
457 src << "Rgba16";
458 break;
459 case VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16:
460 src << "Rgba16";
461 break;
462 case VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16:
463 src << "Rgba16";
464 break;
465 case VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16:
466 src << "Rgba16";
467 break;
468 case VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16:
469 src << "Rgba16";
470 break;
471 case VK_FORMAT_G16B16G16R16_422_UNORM:
472 src << "Rgba16";
473 break;
474 case VK_FORMAT_B16G16R16G16_422_UNORM:
475 src << "Rgba16";
476 break;
477 case VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM:
478 src << "Rgba16";
479 break;
480 case VK_FORMAT_G16_B16R16_2PLANE_420_UNORM:
481 src << "Rgba16";
482 break;
483 case VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM:
484 src << "Rgba16";
485 break;
486 case VK_FORMAT_G16_B16R16_2PLANE_422_UNORM:
487 src << "Rgba16";
488 break;
489 case VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM:
490 src << "Rgba16";
491 break;
492 case VK_FORMAT_G8_B8R8_2PLANE_444_UNORM_EXT:
493 src << "Rgba8";
494 break;
495 case VK_FORMAT_G10X6_B10X6R10X6_2PLANE_444_UNORM_3PACK16_EXT:
496 src << "Rgba16";
497 break;
498 case VK_FORMAT_G12X4_B12X4R12X4_2PLANE_444_UNORM_3PACK16_EXT:
499 src << "Rgba16";
500 break;
501 case VK_FORMAT_G16_B16R16_2PLANE_444_UNORM_EXT:
502 src << "Rgba16";
503 break;
504
505 default:
506 DE_FATAL("Unexpected texture format");
507 break;
508 }
509 return src.str();
510 }
511
getOpTypeImageResidency(const ImageType imageType)512 std::string getOpTypeImageResidency(const ImageType imageType)
513 {
514 std::ostringstream src;
515
516 src << "OpTypeImage %type_uint ";
517
518 switch (imageType)
519 {
520 case IMAGE_TYPE_1D:
521 src << "1D 0 0 0 2 R32ui";
522 break;
523 case IMAGE_TYPE_1D_ARRAY:
524 src << "1D 0 1 0 2 R32ui";
525 break;
526 case IMAGE_TYPE_2D:
527 src << "2D 0 0 0 2 R32ui";
528 break;
529 case IMAGE_TYPE_2D_ARRAY:
530 src << "2D 0 1 0 2 R32ui";
531 break;
532 case IMAGE_TYPE_3D:
533 src << "3D 0 0 0 2 R32ui";
534 break;
535 case IMAGE_TYPE_CUBE:
536 src << "Cube 0 0 0 2 R32ui";
537 break;
538 case IMAGE_TYPE_CUBE_ARRAY:
539 src << "Cube 0 1 0 2 R32ui";
540 break;
541 default:
542 DE_FATAL("Unexpected image type");
543 break;
544 }
545
546 return src.str();
547 }
548
checkSupport(VkImageCreateInfo imageSparseInfo) const549 void SparseShaderIntrinsicsInstanceBase::checkSupport(VkImageCreateInfo imageSparseInfo) const
550 {
551 const InstanceInterface &instance = m_context.getInstanceInterface();
552 const VkPhysicalDevice physicalDevice = m_context.getPhysicalDevice();
553
554 if (formatIsR64(m_format))
555 {
556 m_context.requireDeviceFunctionality("VK_EXT_shader_image_atomic_int64");
557
558 if (m_context.getShaderImageAtomicInt64FeaturesEXT().shaderImageInt64Atomics == VK_FALSE)
559 {
560 TCU_THROW(NotSupportedError, "shaderImageInt64Atomics is not supported");
561 }
562
563 if (m_context.getShaderImageAtomicInt64FeaturesEXT().sparseImageInt64Atomics == VK_FALSE)
564 {
565 TCU_THROW(NotSupportedError, "sparseImageInt64Atomics is not supported for device");
566 }
567 }
568
569 // Check if device supports sparse operations for image format
570 if (!checkSparseSupportForImageFormat(instance, physicalDevice, imageSparseInfo))
571 TCU_THROW(NotSupportedError, "The image format does not support sparse operations");
572 }
573
iterate(void)574 tcu::TestStatus SparseShaderIntrinsicsInstanceBase::iterate(void)
575 {
576 const InstanceInterface &instance = m_context.getInstanceInterface();
577 const VkPhysicalDevice physicalDevice = m_context.getPhysicalDevice();
578 VkImageCreateInfo imageSparseInfo;
579 VkImageCreateInfo imageTexelsInfo;
580 VkImageCreateInfo imageResidencyInfo;
581 std::vector<uint32_t> residencyReferenceData;
582 std::vector<DeviceMemorySp> deviceMemUniquePtrVec;
583 const PlanarFormatDescription formatDescription = getPlanarFormatDescription(m_format);
584
585 imageSparseInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
586 imageSparseInfo.pNext = DE_NULL;
587 imageSparseInfo.flags = VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT | VK_IMAGE_CREATE_SPARSE_BINDING_BIT;
588 imageSparseInfo.imageType = mapImageType(m_imageType);
589 imageSparseInfo.format = m_format;
590 imageSparseInfo.extent = makeExtent3D(getLayerSize(m_imageType, m_imageSize));
591 imageSparseInfo.arrayLayers = getNumLayers(m_imageType, m_imageSize);
592 imageSparseInfo.samples = VK_SAMPLE_COUNT_1_BIT;
593 imageSparseInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
594 imageSparseInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
595 imageSparseInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | imageSparseUsageFlags();
596 imageSparseInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
597 imageSparseInfo.queueFamilyIndexCount = 0u;
598 imageSparseInfo.pQueueFamilyIndices = DE_NULL;
599
600 if (m_imageType == IMAGE_TYPE_CUBE || m_imageType == IMAGE_TYPE_CUBE_ARRAY)
601 {
602 imageSparseInfo.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
603 }
604
605 checkSupport(imageSparseInfo);
606
607 {
608 // Assign maximum allowed mipmap levels to image
609 VkImageFormatProperties imageFormatProperties;
610 if (instance.getPhysicalDeviceImageFormatProperties(
611 physicalDevice, imageSparseInfo.format, imageSparseInfo.imageType, imageSparseInfo.tiling,
612 imageSparseInfo.usage, imageSparseInfo.flags, &imageFormatProperties) == VK_ERROR_FORMAT_NOT_SUPPORTED)
613 {
614 TCU_THROW(NotSupportedError, "Image format does not support sparse operations");
615 }
616
617 imageSparseInfo.mipLevels =
618 getMipmapCount(m_format, formatDescription, imageFormatProperties, imageSparseInfo.extent);
619 }
620
621 // Create image to store texels copied from sparse image
622 imageTexelsInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
623 imageTexelsInfo.pNext = DE_NULL;
624 imageTexelsInfo.flags = 0u;
625 imageTexelsInfo.imageType = imageSparseInfo.imageType;
626 imageTexelsInfo.format = imageSparseInfo.format;
627 imageTexelsInfo.extent = imageSparseInfo.extent;
628 imageTexelsInfo.arrayLayers = imageSparseInfo.arrayLayers;
629 imageTexelsInfo.mipLevels = imageSparseInfo.mipLevels;
630 imageTexelsInfo.samples = imageSparseInfo.samples;
631 imageTexelsInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
632 imageTexelsInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
633 imageTexelsInfo.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | imageOutputUsageFlags();
634 imageTexelsInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
635 imageTexelsInfo.queueFamilyIndexCount = 0u;
636 imageTexelsInfo.pQueueFamilyIndices = DE_NULL;
637
638 if (m_imageType == IMAGE_TYPE_CUBE || m_imageType == IMAGE_TYPE_CUBE_ARRAY)
639 {
640 imageTexelsInfo.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
641 }
642
643 checkImageSupport(instance, physicalDevice, imageTexelsInfo);
644
645 {
646 // Create logical device supporting both sparse and compute/graphics queues
647 QueueRequirementsVec queueRequirements;
648 queueRequirements.push_back(QueueRequirements(VK_QUEUE_SPARSE_BINDING_BIT, 1u));
649 queueRequirements.push_back(QueueRequirements(getQueueFlags(), 1u));
650
651 createDeviceSupportingQueues(queueRequirements, formatIsR64(m_format));
652 }
653
654 // Create queues supporting sparse binding operations and compute/graphics operations
655 const DeviceInterface &deviceInterface = getDeviceInterface();
656 const Queue &sparseQueue = getQueue(VK_QUEUE_SPARSE_BINDING_BIT, 0);
657 const Queue &extractQueue = getQueue(getQueueFlags(), 0);
658
659 // Create sparse image
660 const Unique<VkImage> imageSparse(createImage(deviceInterface, getDevice(), &imageSparseInfo));
661
662 // Create sparse image memory bind semaphore
663 const Unique<VkSemaphore> memoryBindSemaphore(createSemaphore(deviceInterface, getDevice()));
664
665 std::vector<VkSparseImageMemoryRequirements> sparseMemoryRequirements;
666
667 uint32_t imageSparseSizeInBytes = 0;
668 uint32_t imageSizeInPixels = 0;
669
670 for (uint32_t planeNdx = 0; planeNdx < formatDescription.numPlanes; ++planeNdx)
671 {
672 for (uint32_t mipmapNdx = 0; mipmapNdx < imageSparseInfo.mipLevels; ++mipmapNdx)
673 {
674 imageSparseSizeInBytes +=
675 getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, formatDescription,
676 planeNdx, mipmapNdx, BUFFER_IMAGE_COPY_OFFSET_GRANULARITY);
677 imageSizeInPixels += getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers,
678 formatDescription, planeNdx, mipmapNdx) /
679 formatDescription.planes[planeNdx].elementSizeBytes;
680 }
681 }
682
683 residencyReferenceData.assign(imageSizeInPixels, MEMORY_BLOCK_NOT_BOUND_VALUE);
684
685 {
686 // Get sparse image general memory requirements
687 const VkMemoryRequirements imageMemoryRequirements =
688 getImageMemoryRequirements(deviceInterface, getDevice(), *imageSparse);
689
690 // Check if required image memory size does not exceed device limits
691 if (imageMemoryRequirements.size >
692 getPhysicalDeviceProperties(instance, physicalDevice).limits.sparseAddressSpaceSize)
693 TCU_THROW(NotSupportedError, "Required memory size for sparse resource exceeds device limits");
694
695 DE_ASSERT((imageMemoryRequirements.size % imageMemoryRequirements.alignment) == 0);
696
697 const uint32_t memoryType =
698 findMatchingMemoryType(instance, physicalDevice, imageMemoryRequirements, MemoryRequirement::Any);
699
700 if (memoryType == NO_MATCH_FOUND)
701 return tcu::TestStatus::fail("No matching memory type found");
702
703 // Get sparse image sparse memory requirements
704 sparseMemoryRequirements = getImageSparseMemoryRequirements(deviceInterface, getDevice(), *imageSparse);
705
706 DE_ASSERT(sparseMemoryRequirements.size() != 0);
707
708 const uint32_t metadataAspectIndex =
709 getSparseAspectRequirementsIndex(sparseMemoryRequirements, VK_IMAGE_ASPECT_METADATA_BIT);
710 uint32_t pixelOffset = 0u;
711 std::vector<VkSparseImageMemoryBind> imageResidencyMemoryBinds;
712 std::vector<VkSparseMemoryBind> imageMipTailBinds;
713
714 for (uint32_t planeNdx = 0; planeNdx < formatDescription.numPlanes; ++planeNdx)
715 {
716 const VkImageAspectFlags aspect =
717 (formatDescription.numPlanes > 1) ? getPlaneAspect(planeNdx) : VK_IMAGE_ASPECT_COLOR_BIT;
718 const uint32_t aspectIndex = getSparseAspectRequirementsIndex(sparseMemoryRequirements, aspect);
719
720 if (aspectIndex == NO_MATCH_FOUND)
721 TCU_THROW(NotSupportedError, "Not supported image aspect");
722
723 VkSparseImageMemoryRequirements aspectRequirements = sparseMemoryRequirements[aspectIndex];
724
725 DE_ASSERT((aspectRequirements.imageMipTailSize % imageMemoryRequirements.alignment) == 0);
726
727 VkExtent3D imageGranularity = aspectRequirements.formatProperties.imageGranularity;
728
729 // Bind memory for each mipmap level
730 for (uint32_t mipmapNdx = 0; mipmapNdx < aspectRequirements.imageMipTailFirstLod; ++mipmapNdx)
731 {
732 const uint32_t mipLevelSizeInPixels =
733 getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, formatDescription,
734 planeNdx, mipmapNdx) /
735 formatDescription.planes[planeNdx].elementSizeBytes;
736
737 if (mipmapNdx % MEMORY_BLOCK_TYPE_COUNT == MEMORY_BLOCK_NOT_BOUND)
738 {
739 pixelOffset += mipLevelSizeInPixels;
740 continue;
741 }
742
743 for (uint32_t pixelNdx = 0u; pixelNdx < mipLevelSizeInPixels; ++pixelNdx)
744 {
745 residencyReferenceData[pixelOffset + pixelNdx] = MEMORY_BLOCK_BOUND_VALUE;
746 }
747
748 pixelOffset += mipLevelSizeInPixels;
749
750 for (uint32_t layerNdx = 0; layerNdx < imageSparseInfo.arrayLayers; ++layerNdx)
751 {
752 const VkExtent3D mipExtent =
753 getPlaneExtent(formatDescription, imageSparseInfo.extent, planeNdx, mipmapNdx);
754 const tcu::UVec3 sparseBlocks = alignedDivide(mipExtent, imageGranularity);
755 const uint32_t numSparseBlocks = sparseBlocks.x() * sparseBlocks.y() * sparseBlocks.z();
756 const VkImageSubresource subresource = {aspect, mipmapNdx, layerNdx};
757
758 const VkSparseImageMemoryBind imageMemoryBind = makeSparseImageMemoryBind(
759 deviceInterface, getDevice(), imageMemoryRequirements.alignment * numSparseBlocks, memoryType,
760 subresource, makeOffset3D(0u, 0u, 0u), mipExtent);
761
762 deviceMemUniquePtrVec.push_back(makeVkSharedPtr(
763 Move<VkDeviceMemory>(check<VkDeviceMemory>(imageMemoryBind.memory),
764 Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
765
766 imageResidencyMemoryBinds.push_back(imageMemoryBind);
767 }
768 }
769
770 if (aspectRequirements.imageMipTailFirstLod < imageSparseInfo.mipLevels)
771 {
772 if (aspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT)
773 {
774 const VkSparseMemoryBind imageMipTailMemoryBind =
775 makeSparseMemoryBind(deviceInterface, getDevice(), aspectRequirements.imageMipTailSize,
776 memoryType, aspectRequirements.imageMipTailOffset);
777
778 deviceMemUniquePtrVec.push_back(makeVkSharedPtr(
779 Move<VkDeviceMemory>(check<VkDeviceMemory>(imageMipTailMemoryBind.memory),
780 Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
781
782 imageMipTailBinds.push_back(imageMipTailMemoryBind);
783 }
784 else
785 {
786 for (uint32_t layerNdx = 0; layerNdx < imageSparseInfo.arrayLayers; ++layerNdx)
787 {
788 const VkSparseMemoryBind imageMipTailMemoryBind = makeSparseMemoryBind(
789 deviceInterface, getDevice(), aspectRequirements.imageMipTailSize, memoryType,
790 aspectRequirements.imageMipTailOffset + layerNdx * aspectRequirements.imageMipTailStride);
791
792 deviceMemUniquePtrVec.push_back(makeVkSharedPtr(
793 Move<VkDeviceMemory>(check<VkDeviceMemory>(imageMipTailMemoryBind.memory),
794 Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
795
796 imageMipTailBinds.push_back(imageMipTailMemoryBind);
797 }
798 }
799
800 for (uint32_t pixelNdx = pixelOffset; pixelNdx < residencyReferenceData.size(); ++pixelNdx)
801 {
802 residencyReferenceData[pixelNdx] = MEMORY_BLOCK_BOUND_VALUE;
803 }
804 }
805 }
806
807 // Metadata
808 if (metadataAspectIndex != NO_MATCH_FOUND)
809 {
810 const VkSparseImageMemoryRequirements metadataAspectRequirements =
811 sparseMemoryRequirements[metadataAspectIndex];
812
813 const uint32_t metadataBindCount =
814 (metadataAspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT ?
815 1u :
816 imageSparseInfo.arrayLayers);
817 for (uint32_t bindNdx = 0u; bindNdx < metadataBindCount; ++bindNdx)
818 {
819 const VkSparseMemoryBind imageMipTailMemoryBind = makeSparseMemoryBind(
820 deviceInterface, getDevice(), metadataAspectRequirements.imageMipTailSize, memoryType,
821 metadataAspectRequirements.imageMipTailOffset +
822 bindNdx * metadataAspectRequirements.imageMipTailStride,
823 VK_SPARSE_MEMORY_BIND_METADATA_BIT);
824
825 deviceMemUniquePtrVec.push_back(makeVkSharedPtr(
826 Move<VkDeviceMemory>(check<VkDeviceMemory>(imageMipTailMemoryBind.memory),
827 Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
828
829 imageMipTailBinds.push_back(imageMipTailMemoryBind);
830 }
831 }
832
833 VkBindSparseInfo bindSparseInfo = {
834 VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, //VkStructureType sType;
835 DE_NULL, //const void* pNext;
836 0u, //uint32_t waitSemaphoreCount;
837 DE_NULL, //const VkSemaphore* pWaitSemaphores;
838 0u, //uint32_t bufferBindCount;
839 DE_NULL, //const VkSparseBufferMemoryBindInfo* pBufferBinds;
840 0u, //uint32_t imageOpaqueBindCount;
841 DE_NULL, //const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds;
842 0u, //uint32_t imageBindCount;
843 DE_NULL, //const VkSparseImageMemoryBindInfo* pImageBinds;
844 1u, //uint32_t signalSemaphoreCount;
845 &memoryBindSemaphore.get() //const VkSemaphore* pSignalSemaphores;
846 };
847
848 VkSparseImageMemoryBindInfo imageResidencyBindInfo;
849 VkSparseImageOpaqueMemoryBindInfo imageMipTailBindInfo;
850
851 if (imageResidencyMemoryBinds.size() > 0)
852 {
853 imageResidencyBindInfo.image = *imageSparse;
854 imageResidencyBindInfo.bindCount = static_cast<uint32_t>(imageResidencyMemoryBinds.size());
855 imageResidencyBindInfo.pBinds = imageResidencyMemoryBinds.data();
856
857 bindSparseInfo.imageBindCount = 1u;
858 bindSparseInfo.pImageBinds = &imageResidencyBindInfo;
859 }
860
861 if (imageMipTailBinds.size() > 0)
862 {
863 imageMipTailBindInfo.image = *imageSparse;
864 imageMipTailBindInfo.bindCount = static_cast<uint32_t>(imageMipTailBinds.size());
865 imageMipTailBindInfo.pBinds = imageMipTailBinds.data();
866
867 bindSparseInfo.imageOpaqueBindCount = 1u;
868 bindSparseInfo.pImageOpaqueBinds = &imageMipTailBindInfo;
869 }
870
871 // Submit sparse bind commands for execution
872 VK_CHECK(deviceInterface.queueBindSparse(sparseQueue.queueHandle, 1u, &bindSparseInfo, DE_NULL));
873 }
874
875 const Unique<VkImage> imageTexels(createImage(deviceInterface, getDevice(), &imageTexelsInfo));
876 const de::UniquePtr<Allocation> imageTexelsAlloc(
877 bindImage(deviceInterface, getDevice(), getAllocator(), *imageTexels, MemoryRequirement::Any));
878
879 // Create image to store residency info copied from sparse image
880 imageResidencyInfo = imageTexelsInfo;
881 imageResidencyInfo.format = mapTextureFormat(m_residencyFormat);
882
883 {
884 VkImageFormatProperties imageFormatProperties;
885 if (instance.getPhysicalDeviceImageFormatProperties(physicalDevice, imageResidencyInfo.format,
886 imageResidencyInfo.imageType, imageResidencyInfo.tiling,
887 imageResidencyInfo.usage, imageResidencyInfo.flags,
888 &imageFormatProperties) == VK_ERROR_FORMAT_NOT_SUPPORTED)
889 {
890 TCU_THROW(NotSupportedError, "Image format not supported for its usage ");
891 }
892 }
893
894 const Unique<VkImage> imageResidency(createImage(deviceInterface, getDevice(), &imageResidencyInfo));
895 const de::UniquePtr<Allocation> imageResidencyAlloc(
896 bindImage(deviceInterface, getDevice(), getAllocator(), *imageResidency, MemoryRequirement::Any));
897
898 std::vector<VkBufferImageCopy> bufferImageSparseCopy(formatDescription.numPlanes * imageSparseInfo.mipLevels);
899
900 {
901 uint32_t bufferOffset = 0u;
902 for (uint32_t planeNdx = 0; planeNdx < formatDescription.numPlanes; ++planeNdx)
903 {
904 const VkImageAspectFlags aspect =
905 (formatDescription.numPlanes > 1) ? getPlaneAspect(planeNdx) : VK_IMAGE_ASPECT_COLOR_BIT;
906
907 for (uint32_t mipmapNdx = 0; mipmapNdx < imageSparseInfo.mipLevels; ++mipmapNdx)
908 {
909 bufferImageSparseCopy[planeNdx * imageSparseInfo.mipLevels + mipmapNdx] = {
910 bufferOffset, // VkDeviceSize bufferOffset;
911 0u, // uint32_t bufferRowLength;
912 0u, // uint32_t bufferImageHeight;
913 makeImageSubresourceLayers(
914 aspect, mipmapNdx, 0u,
915 imageSparseInfo.arrayLayers), // VkImageSubresourceLayers imageSubresource;
916 makeOffset3D(0, 0, 0), // VkOffset3D imageOffset;
917 vk::getPlaneExtent(formatDescription, imageSparseInfo.extent, planeNdx,
918 mipmapNdx) // VkExtent3D imageExtent;
919 };
920 bufferOffset +=
921 getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, formatDescription,
922 planeNdx, mipmapNdx, BUFFER_IMAGE_COPY_OFFSET_GRANULARITY);
923 }
924 }
925 }
926
927 // Create command buffer for compute and transfer operations
928 const Unique<VkCommandPool> commandPool(
929 makeCommandPool(deviceInterface, getDevice(), extractQueue.queueFamilyIndex));
930 const Unique<VkCommandBuffer> commandBuffer(
931 allocateCommandBuffer(deviceInterface, getDevice(), *commandPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
932
933 // Start recording commands
934 beginCommandBuffer(deviceInterface, *commandBuffer);
935
936 // Create input buffer
937 const VkBufferCreateInfo inputBufferCreateInfo =
938 makeBufferCreateInfo(imageSparseSizeInBytes, VK_BUFFER_USAGE_TRANSFER_SRC_BIT);
939 const Unique<VkBuffer> inputBuffer(createBuffer(deviceInterface, getDevice(), &inputBufferCreateInfo));
940 const de::UniquePtr<Allocation> inputBufferAlloc(
941 bindBuffer(deviceInterface, getDevice(), getAllocator(), *inputBuffer, MemoryRequirement::HostVisible));
942
943 // Fill input buffer with reference data
944 std::vector<uint8_t> referenceData(imageSparseSizeInBytes);
945
946 for (uint32_t planeNdx = 0; planeNdx < formatDescription.numPlanes; ++planeNdx)
947 {
948 for (uint32_t mipmapNdx = 0u; mipmapNdx < imageSparseInfo.mipLevels; ++mipmapNdx)
949 {
950 const uint32_t mipLevelSizeinBytes = getImageMipLevelSizeInBytes(
951 imageSparseInfo.extent, imageSparseInfo.arrayLayers, formatDescription, planeNdx, mipmapNdx);
952 const uint32_t bufferOffset = static_cast<uint32_t>(bufferImageSparseCopy[mipmapNdx].bufferOffset);
953
954 if (formatIsR64(m_format) && (m_function == SPARSE_SAMPLE_EXPLICIT_LOD ||
955 m_function == SPARSE_SAMPLE_IMPLICIT_LOD || m_function == SPARSE_GATHER))
956 {
957 for (uint32_t byteNdx = 0u; byteNdx < mipLevelSizeinBytes / 8; byteNdx += 8)
958 {
959 void *prtData = &referenceData[bufferOffset + byteNdx];
960 *(static_cast<uint64_t *>(prtData)) = (uint64_t)((mipmapNdx + byteNdx) % 0x0FFFFFFF);
961 }
962 }
963 else
964 {
965 for (uint32_t byteNdx = 0u; byteNdx < mipLevelSizeinBytes; ++byteNdx)
966 {
967 referenceData[bufferOffset + byteNdx] = (uint8_t)((mipmapNdx + byteNdx) % 127u);
968 }
969 }
970 }
971 }
972
973 deMemcpy(inputBufferAlloc->getHostPtr(), referenceData.data(), imageSparseSizeInBytes);
974 flushAlloc(deviceInterface, getDevice(), *inputBufferAlloc);
975
976 {
977 // Prepare input buffer for data transfer operation
978 const VkBufferMemoryBarrier inputBufferBarrier = makeBufferMemoryBarrier(
979 VK_ACCESS_HOST_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT, *inputBuffer, 0u, imageSparseSizeInBytes);
980
981 deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
982 0u, 0u, DE_NULL, 1u, &inputBufferBarrier, 0u, DE_NULL);
983 }
984
985 {
986 // Prepare sparse image for data transfer operation
987 std::vector<VkImageMemoryBarrier> imageSparseTransferDstBarriers;
988 for (uint32_t planeNdx = 0; planeNdx < formatDescription.numPlanes; ++planeNdx)
989 {
990 const VkImageAspectFlags aspect =
991 (formatDescription.numPlanes > 1) ? getPlaneAspect(planeNdx) : VK_IMAGE_ASPECT_COLOR_BIT;
992
993 imageSparseTransferDstBarriers.emplace_back(makeImageMemoryBarrier(
994 0u, VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
995 *imageSparse,
996 makeImageSubresourceRange(aspect, 0u, imageSparseInfo.mipLevels, 0u, imageSparseInfo.arrayLayers),
997 sparseQueue.queueFamilyIndex != extractQueue.queueFamilyIndex ? sparseQueue.queueFamilyIndex :
998 VK_QUEUE_FAMILY_IGNORED,
999 sparseQueue.queueFamilyIndex != extractQueue.queueFamilyIndex ? extractQueue.queueFamilyIndex :
1000 VK_QUEUE_FAMILY_IGNORED));
1001 }
1002 deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
1003 VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL,
1004 static_cast<uint32_t>(imageSparseTransferDstBarriers.size()),
1005 imageSparseTransferDstBarriers.data());
1006 }
1007
1008 // Copy reference data from input buffer to sparse image
1009 deviceInterface.cmdCopyBufferToImage(
1010 *commandBuffer, *inputBuffer, *imageSparse, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1011 static_cast<uint32_t>(bufferImageSparseCopy.size()), bufferImageSparseCopy.data());
1012
1013 recordCommands(*commandBuffer, imageSparseInfo, *imageSparse, *imageTexels, *imageResidency);
1014
1015 const VkBufferCreateInfo bufferTexelsCreateInfo =
1016 makeBufferCreateInfo(imageSparseSizeInBytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT);
1017 const Unique<VkBuffer> bufferTexels(createBuffer(deviceInterface, getDevice(), &bufferTexelsCreateInfo));
1018 const de::UniquePtr<Allocation> bufferTexelsAlloc(
1019 bindBuffer(deviceInterface, getDevice(), getAllocator(), *bufferTexels, MemoryRequirement::HostVisible));
1020
1021 // Copy data from texels image to buffer
1022 deviceInterface.cmdCopyImageToBuffer(*commandBuffer, *imageTexels, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1023 *bufferTexels, static_cast<uint32_t>(bufferImageSparseCopy.size()),
1024 bufferImageSparseCopy.data());
1025
1026 const uint32_t imageResidencySizeInBytes =
1027 getImageSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_residencyFormat,
1028 imageSparseInfo.mipLevels, BUFFER_IMAGE_COPY_OFFSET_GRANULARITY);
1029
1030 const VkBufferCreateInfo bufferResidencyCreateInfo =
1031 makeBufferCreateInfo(imageResidencySizeInBytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT);
1032 const Unique<VkBuffer> bufferResidency(createBuffer(deviceInterface, getDevice(), &bufferResidencyCreateInfo));
1033 const de::UniquePtr<Allocation> bufferResidencyAlloc(
1034 bindBuffer(deviceInterface, getDevice(), getAllocator(), *bufferResidency, MemoryRequirement::HostVisible));
1035
1036 // Copy data from residency image to buffer
1037 std::vector<VkBufferImageCopy> bufferImageResidencyCopy(formatDescription.numPlanes * imageSparseInfo.mipLevels);
1038
1039 {
1040 uint32_t bufferOffset = 0u;
1041 for (uint32_t planeNdx = 0u; planeNdx < formatDescription.numPlanes; ++planeNdx)
1042 {
1043 const VkImageAspectFlags aspect =
1044 (formatDescription.numPlanes > 1) ? getPlaneAspect(planeNdx) : VK_IMAGE_ASPECT_COLOR_BIT;
1045
1046 for (uint32_t mipmapNdx = 0u; mipmapNdx < imageSparseInfo.mipLevels; ++mipmapNdx)
1047 {
1048 bufferImageResidencyCopy[planeNdx * imageSparseInfo.mipLevels + mipmapNdx] = {
1049 bufferOffset, // VkDeviceSize bufferOffset;
1050 0u, // uint32_t bufferRowLength;
1051 0u, // uint32_t bufferImageHeight;
1052 makeImageSubresourceLayers(
1053 aspect, mipmapNdx, 0u,
1054 imageSparseInfo.arrayLayers), // VkImageSubresourceLayers imageSubresource;
1055 makeOffset3D(0, 0, 0), // VkOffset3D imageOffset;
1056 vk::getPlaneExtent(formatDescription, imageSparseInfo.extent, planeNdx,
1057 mipmapNdx) // VkExtent3D imageExtent;
1058 };
1059 bufferOffset +=
1060 getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_residencyFormat,
1061 mipmapNdx, BUFFER_IMAGE_COPY_OFFSET_GRANULARITY);
1062 }
1063 }
1064 }
1065
1066 deviceInterface.cmdCopyImageToBuffer(*commandBuffer, *imageResidency, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1067 *bufferResidency, static_cast<uint32_t>(bufferImageResidencyCopy.size()),
1068 bufferImageResidencyCopy.data());
1069
1070 {
1071 VkBufferMemoryBarrier bufferOutputHostReadBarriers[2];
1072
1073 bufferOutputHostReadBarriers[0] = makeBufferMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT,
1074 *bufferTexels, 0u, imageSparseSizeInBytes);
1075
1076 bufferOutputHostReadBarriers[1] = makeBufferMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT,
1077 *bufferResidency, 0u, imageResidencySizeInBytes);
1078
1079 deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT,
1080 0u, 0u, DE_NULL, 2u, bufferOutputHostReadBarriers, 0u, DE_NULL);
1081 }
1082
1083 // End recording commands
1084 endCommandBuffer(deviceInterface, *commandBuffer);
1085
1086 const VkPipelineStageFlags stageBits[] = {VK_PIPELINE_STAGE_TRANSFER_BIT};
1087
1088 // Submit commands for execution and wait for completion
1089 submitCommandsAndWait(deviceInterface, getDevice(), extractQueue.queueHandle, *commandBuffer, 1u,
1090 &memoryBindSemaphore.get(), stageBits);
1091
1092 // Wait for sparse queue to become idle
1093 deviceInterface.queueWaitIdle(sparseQueue.queueHandle);
1094
1095 // Retrieve data from residency buffer to host memory
1096 invalidateAlloc(deviceInterface, getDevice(), *bufferResidencyAlloc);
1097
1098 const uint32_t *bufferResidencyData = static_cast<const uint32_t *>(bufferResidencyAlloc->getHostPtr());
1099
1100 uint32_t pixelOffsetNotAligned = 0u;
1101 for (uint32_t planeNdx = 0; planeNdx < formatDescription.numPlanes; ++planeNdx)
1102 {
1103 for (uint32_t mipmapNdx = 0; mipmapNdx < imageSparseInfo.mipLevels; ++mipmapNdx)
1104 {
1105 const uint32_t mipLevelSizeInBytes = getImageMipLevelSizeInBytes(
1106 imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_residencyFormat, mipmapNdx);
1107 const uint32_t pixelOffsetAligned =
1108 static_cast<uint32_t>(
1109 bufferImageResidencyCopy[planeNdx * imageSparseInfo.mipLevels + mipmapNdx].bufferOffset) /
1110 tcu::getPixelSize(m_residencyFormat);
1111
1112 if (deMemCmp(&bufferResidencyData[pixelOffsetAligned], &residencyReferenceData[pixelOffsetNotAligned],
1113 mipLevelSizeInBytes) != 0)
1114 return tcu::TestStatus::fail("Failed");
1115
1116 pixelOffsetNotAligned += mipLevelSizeInBytes / tcu::getPixelSize(m_residencyFormat);
1117 }
1118 }
1119 // Retrieve data from texels buffer to host memory
1120 invalidateAlloc(deviceInterface, getDevice(), *bufferTexelsAlloc);
1121
1122 const uint8_t *bufferTexelsData = static_cast<const uint8_t *>(bufferTexelsAlloc->getHostPtr());
1123
1124 for (uint32_t planeNdx = 0; planeNdx < formatDescription.numPlanes; ++planeNdx)
1125 {
1126 const VkImageAspectFlags aspect =
1127 (formatDescription.numPlanes > 1) ? getPlaneAspect(planeNdx) : VK_IMAGE_ASPECT_COLOR_BIT;
1128 const uint32_t aspectIndex = getSparseAspectRequirementsIndex(sparseMemoryRequirements, aspect);
1129
1130 if (aspectIndex == NO_MATCH_FOUND)
1131 TCU_THROW(NotSupportedError, "Not supported image aspect");
1132
1133 VkSparseImageMemoryRequirements aspectRequirements = sparseMemoryRequirements[aspectIndex];
1134
1135 for (uint32_t mipmapNdx = 0; mipmapNdx < imageSparseInfo.mipLevels; ++mipmapNdx)
1136 {
1137 const uint32_t mipLevelSizeInBytes = getImageMipLevelSizeInBytes(
1138 imageSparseInfo.extent, imageSparseInfo.arrayLayers, formatDescription, planeNdx, mipmapNdx);
1139 const uint32_t bufferOffset = static_cast<uint32_t>(
1140 bufferImageSparseCopy[planeNdx * imageSparseInfo.mipLevels + mipmapNdx].bufferOffset);
1141
1142 if (mipmapNdx < aspectRequirements.imageMipTailFirstLod)
1143 {
1144 if (mipmapNdx % MEMORY_BLOCK_TYPE_COUNT == MEMORY_BLOCK_BOUND)
1145 {
1146 if (deMemCmp(&bufferTexelsData[bufferOffset], &referenceData[bufferOffset], mipLevelSizeInBytes) !=
1147 0)
1148 return tcu::TestStatus::fail("Failed");
1149 }
1150 else if (getPhysicalDeviceProperties(instance, physicalDevice)
1151 .sparseProperties.residencyNonResidentStrict)
1152 {
1153 std::vector<uint8_t> zeroData;
1154 zeroData.assign(mipLevelSizeInBytes, 0u);
1155
1156 if (deMemCmp(&bufferTexelsData[bufferOffset], zeroData.data(), mipLevelSizeInBytes) != 0)
1157 return tcu::TestStatus::fail("Failed");
1158 }
1159 }
1160 else
1161 {
1162 if (deMemCmp(&bufferTexelsData[bufferOffset], &referenceData[bufferOffset], mipLevelSizeInBytes) != 0)
1163 return tcu::TestStatus::fail("Failed");
1164 }
1165 }
1166 }
1167
1168 return tcu::TestStatus::pass("Passed");
1169 }
1170
1171 } // namespace sparse
1172 } // namespace vkt
1173