1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2016 The Khronos Group Inc.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 *//*!
20 * \file vktSparseResourcesMipmapSparseResidency.cpp
21 * \brief Sparse partially resident images with mipmaps tests
22 *//*--------------------------------------------------------------------*/
23
24 #include "vktSparseResourcesMipmapSparseResidency.hpp"
25 #include "vktSparseResourcesTestsUtil.hpp"
26 #include "vktSparseResourcesBase.hpp"
27 #include "vktTestCaseUtil.hpp"
28
29 #include "vkDefs.hpp"
30 #include "vkRef.hpp"
31 #include "vkRefUtil.hpp"
32 #include "vkPlatform.hpp"
33 #include "vkPrograms.hpp"
34 #include "vkMemUtil.hpp"
35 #include "vkBarrierUtil.hpp"
36 #include "vkBuilderUtil.hpp"
37 #include "vkImageUtil.hpp"
38 #include "vkQueryUtil.hpp"
39 #include "vkTypeUtil.hpp"
40 #include "vkCmdUtil.hpp"
41
42 #include "deUniquePtr.hpp"
43 #include "deStringUtil.hpp"
44 #include "tcuTextureUtil.hpp"
45
46 #include <string>
47 #include <vector>
48
49 using namespace vk;
50
51 namespace vkt
52 {
53 namespace sparse
54 {
55 namespace
56 {
57
58 class MipmapSparseResidencyCase : public TestCase
59 {
60 public:
61 MipmapSparseResidencyCase(tcu::TestContext &testCtx, const std::string &name, const ImageType imageType,
62 const tcu::UVec3 &imageSize, const VkFormat format, const bool useDeviceGroups);
63
64 TestInstance *createInstance(Context &context) const;
65 virtual void checkSupport(Context &context) const;
66
67 private:
68 const bool m_useDeviceGroups;
69 const ImageType m_imageType;
70 const tcu::UVec3 m_imageSize;
71 const VkFormat m_format;
72 };
73
MipmapSparseResidencyCase(tcu::TestContext & testCtx,const std::string & name,const ImageType imageType,const tcu::UVec3 & imageSize,const VkFormat format,const bool useDeviceGroups)74 MipmapSparseResidencyCase::MipmapSparseResidencyCase(tcu::TestContext &testCtx, const std::string &name,
75 const ImageType imageType, const tcu::UVec3 &imageSize,
76 const VkFormat format, const bool useDeviceGroups)
77 : TestCase(testCtx, name)
78 , m_useDeviceGroups(useDeviceGroups)
79 , m_imageType(imageType)
80 , m_imageSize(imageSize)
81 , m_format(format)
82 {
83 }
84
checkSupport(Context & context) const85 void MipmapSparseResidencyCase::checkSupport(Context &context) const
86 {
87 const InstanceInterface &instance = context.getInstanceInterface();
88 const VkPhysicalDevice physicalDevice = context.getPhysicalDevice();
89
90 // Check if image size does not exceed device limits
91 if (!isImageSizeSupported(instance, physicalDevice, m_imageType, m_imageSize))
92 TCU_THROW(NotSupportedError, "Image size not supported for device");
93
94 // Check if device supports sparse operations for image type
95 if (!checkSparseSupportForImageType(instance, physicalDevice, m_imageType))
96 TCU_THROW(NotSupportedError, "Sparse residency for image type is not supported");
97
98 if (formatIsR64(m_format))
99 {
100 context.requireDeviceFunctionality("VK_EXT_shader_image_atomic_int64");
101
102 if (context.getShaderImageAtomicInt64FeaturesEXT().sparseImageInt64Atomics == VK_FALSE)
103 {
104 TCU_THROW(NotSupportedError, "sparseImageInt64Atomics is not supported for device");
105 }
106 }
107 }
108
109 class MipmapSparseResidencyInstance : public SparseResourcesBaseInstance
110 {
111 public:
112 MipmapSparseResidencyInstance(Context &context, const ImageType imageType, const tcu::UVec3 &imageSize,
113 const VkFormat format, const bool useDeviceGroups);
114
115 tcu::TestStatus iterate(void);
116
117 private:
118 const bool m_useDeviceGroups;
119 const ImageType m_imageType;
120 const tcu::UVec3 m_imageSize;
121 const VkFormat m_format;
122 };
123
MipmapSparseResidencyInstance(Context & context,const ImageType imageType,const tcu::UVec3 & imageSize,const VkFormat format,const bool useDeviceGroups)124 MipmapSparseResidencyInstance::MipmapSparseResidencyInstance(Context &context, const ImageType imageType,
125 const tcu::UVec3 &imageSize, const VkFormat format,
126 const bool useDeviceGroups)
127 : SparseResourcesBaseInstance(context, useDeviceGroups)
128 , m_useDeviceGroups(useDeviceGroups)
129 , m_imageType(imageType)
130 , m_imageSize(imageSize)
131 , m_format(format)
132 {
133 }
134
iterate(void)135 tcu::TestStatus MipmapSparseResidencyInstance::iterate(void)
136 {
137 const InstanceInterface &instance = m_context.getInstanceInterface();
138 {
139 // Create logical device supporting both sparse and compute operations
140 QueueRequirementsVec queueRequirements;
141 queueRequirements.push_back(QueueRequirements(VK_QUEUE_SPARSE_BINDING_BIT, 1u));
142 queueRequirements.push_back(QueueRequirements(VK_QUEUE_COMPUTE_BIT, 1u));
143
144 createDeviceSupportingQueues(queueRequirements);
145 }
146
147 const VkPhysicalDevice physicalDevice = getPhysicalDevice();
148 VkImageCreateInfo imageSparseInfo;
149 std::vector<DeviceMemorySp> deviceMemUniquePtrVec;
150
151 const DeviceInterface &deviceInterface = getDeviceInterface();
152 const Queue &sparseQueue = getQueue(VK_QUEUE_SPARSE_BINDING_BIT, 0);
153 const Queue &computeQueue = getQueue(VK_QUEUE_COMPUTE_BIT, 0);
154 const PlanarFormatDescription formatDescription = getPlanarFormatDescription(m_format);
155
156 // Go through all physical devices
157 for (uint32_t physDevID = 0; physDevID < m_numPhysicalDevices; physDevID++)
158 {
159 const uint32_t firstDeviceID = physDevID;
160 const uint32_t secondDeviceID = (firstDeviceID + 1) % m_numPhysicalDevices;
161
162 imageSparseInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
163 imageSparseInfo.pNext = DE_NULL;
164 imageSparseInfo.flags = VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT | VK_IMAGE_CREATE_SPARSE_BINDING_BIT;
165 imageSparseInfo.imageType = mapImageType(m_imageType);
166 imageSparseInfo.format = m_format;
167 imageSparseInfo.extent = makeExtent3D(getLayerSize(m_imageType, m_imageSize));
168 imageSparseInfo.arrayLayers = getNumLayers(m_imageType, m_imageSize);
169 imageSparseInfo.samples = VK_SAMPLE_COUNT_1_BIT;
170 imageSparseInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
171 imageSparseInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
172 imageSparseInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
173 imageSparseInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
174 imageSparseInfo.queueFamilyIndexCount = 0u;
175 imageSparseInfo.pQueueFamilyIndices = DE_NULL;
176
177 if (m_imageType == IMAGE_TYPE_CUBE || m_imageType == IMAGE_TYPE_CUBE_ARRAY)
178 {
179 imageSparseInfo.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
180 }
181
182 // Check if device supports sparse operations for image format
183 if (!checkSparseSupportForImageFormat(instance, physicalDevice, imageSparseInfo))
184 TCU_THROW(NotSupportedError, "The image format does not support sparse operations");
185
186 {
187 VkImageFormatProperties imageFormatProperties;
188 if (instance.getPhysicalDeviceImageFormatProperties(
189 physicalDevice, imageSparseInfo.format, imageSparseInfo.imageType, imageSparseInfo.tiling,
190 imageSparseInfo.usage, imageSparseInfo.flags,
191 &imageFormatProperties) == VK_ERROR_FORMAT_NOT_SUPPORTED)
192 {
193 TCU_THROW(NotSupportedError, "Image format does not support sparse operations");
194 }
195
196 imageSparseInfo.mipLevels =
197 getMipmapCount(m_format, formatDescription, imageFormatProperties, imageSparseInfo.extent);
198 }
199
200 // Create sparse image
201 const Unique<VkImage> imageSparse(createImage(deviceInterface, getDevice(), &imageSparseInfo));
202
203 // Create sparse image memory bind semaphore
204 const Unique<VkSemaphore> imageMemoryBindSemaphore(createSemaphore(deviceInterface, getDevice()));
205
206 std::vector<VkSparseImageMemoryRequirements> sparseMemoryRequirements;
207
208 {
209 // Get sparse image general memory requirements
210 const VkMemoryRequirements imageMemoryRequirements =
211 getImageMemoryRequirements(deviceInterface, getDevice(), *imageSparse);
212
213 // Check if required image memory size does not exceed device limits
214 if (imageMemoryRequirements.size >
215 getPhysicalDeviceProperties(instance, physicalDevice).limits.sparseAddressSpaceSize)
216 TCU_THROW(NotSupportedError, "Required memory size for sparse resource exceeds device limits");
217
218 DE_ASSERT((imageMemoryRequirements.size % imageMemoryRequirements.alignment) == 0);
219
220 const uint32_t memoryType = findMatchingMemoryType(instance, getPhysicalDevice(secondDeviceID),
221 imageMemoryRequirements, MemoryRequirement::Any);
222
223 if (memoryType == NO_MATCH_FOUND)
224 return tcu::TestStatus::fail("No matching memory type found");
225
226 if (firstDeviceID != secondDeviceID)
227 {
228 VkPeerMemoryFeatureFlags peerMemoryFeatureFlags = (VkPeerMemoryFeatureFlags)0;
229 const uint32_t heapIndex =
230 getHeapIndexForMemoryType(instance, getPhysicalDevice(secondDeviceID), memoryType);
231 deviceInterface.getDeviceGroupPeerMemoryFeatures(getDevice(), heapIndex, firstDeviceID, secondDeviceID,
232 &peerMemoryFeatureFlags);
233
234 if (((peerMemoryFeatureFlags & VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT) == 0) ||
235 ((peerMemoryFeatureFlags & VK_PEER_MEMORY_FEATURE_COPY_DST_BIT) == 0))
236 {
237 TCU_THROW(NotSupportedError, "Peer memory does not support COPY_SRC and COPY_DST");
238 }
239 }
240
241 // Get sparse image sparse memory requirements
242 sparseMemoryRequirements = getImageSparseMemoryRequirements(deviceInterface, getDevice(), *imageSparse);
243 DE_ASSERT(sparseMemoryRequirements.size() != 0);
244
245 const uint32_t metadataAspectIndex =
246 getSparseAspectRequirementsIndex(sparseMemoryRequirements, VK_IMAGE_ASPECT_METADATA_BIT);
247
248 std::vector<VkSparseImageMemoryBind> imageResidencyMemoryBinds;
249 std::vector<VkSparseMemoryBind> imageMipTailMemoryBinds;
250
251 for (uint32_t planeNdx = 0; planeNdx < formatDescription.numPlanes; ++planeNdx)
252 {
253 const VkImageAspectFlags aspect =
254 (formatDescription.numPlanes > 1) ? getPlaneAspect(planeNdx) : VK_IMAGE_ASPECT_COLOR_BIT;
255 const uint32_t aspectIndex = getSparseAspectRequirementsIndex(sparseMemoryRequirements, aspect);
256
257 if (aspectIndex == NO_MATCH_FOUND)
258 TCU_THROW(NotSupportedError, "Not supported image aspect");
259
260 VkSparseImageMemoryRequirements aspectRequirements = sparseMemoryRequirements[aspectIndex];
261
262 DE_ASSERT((aspectRequirements.imageMipTailSize % imageMemoryRequirements.alignment) == 0);
263
264 VkExtent3D imageGranularity = aspectRequirements.formatProperties.imageGranularity;
265
266 // Bind memory for each layer
267 for (uint32_t layerNdx = 0; layerNdx < imageSparseInfo.arrayLayers; ++layerNdx)
268 {
269 for (uint32_t mipLevelNdx = 0; mipLevelNdx < aspectRequirements.imageMipTailFirstLod; ++mipLevelNdx)
270 {
271 const VkExtent3D mipExtent =
272 getPlaneExtent(formatDescription, imageSparseInfo.extent, planeNdx, mipLevelNdx);
273 const tcu::UVec3 sparseBlocks = alignedDivide(mipExtent, imageGranularity);
274 const uint32_t numSparseBlocks = sparseBlocks.x() * sparseBlocks.y() * sparseBlocks.z();
275 const VkImageSubresource subresource = {aspect, mipLevelNdx, layerNdx};
276
277 const VkSparseImageMemoryBind imageMemoryBind = makeSparseImageMemoryBind(
278 deviceInterface, getDevice(), imageMemoryRequirements.alignment * numSparseBlocks,
279 memoryType, subresource, makeOffset3D(0u, 0u, 0u), mipExtent);
280
281 deviceMemUniquePtrVec.push_back(makeVkSharedPtr(
282 Move<VkDeviceMemory>(check<VkDeviceMemory>(imageMemoryBind.memory),
283 Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
284
285 imageResidencyMemoryBinds.push_back(imageMemoryBind);
286 }
287
288 if (!(aspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT) &&
289 aspectRequirements.imageMipTailFirstLod < imageSparseInfo.mipLevels)
290 {
291 const VkSparseMemoryBind imageMipTailMemoryBind = makeSparseMemoryBind(
292 deviceInterface, getDevice(), aspectRequirements.imageMipTailSize, memoryType,
293 aspectRequirements.imageMipTailOffset + layerNdx * aspectRequirements.imageMipTailStride);
294
295 deviceMemUniquePtrVec.push_back(makeVkSharedPtr(
296 Move<VkDeviceMemory>(check<VkDeviceMemory>(imageMipTailMemoryBind.memory),
297 Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
298
299 imageMipTailMemoryBinds.push_back(imageMipTailMemoryBind);
300 }
301
302 // Metadata
303 if (metadataAspectIndex != NO_MATCH_FOUND)
304 {
305 const VkSparseImageMemoryRequirements metadataAspectRequirements =
306 sparseMemoryRequirements[metadataAspectIndex];
307
308 if (!(metadataAspectRequirements.formatProperties.flags &
309 VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT))
310 {
311 const VkSparseMemoryBind imageMipTailMemoryBind = makeSparseMemoryBind(
312 deviceInterface, getDevice(), metadataAspectRequirements.imageMipTailSize, memoryType,
313 metadataAspectRequirements.imageMipTailOffset +
314 layerNdx * metadataAspectRequirements.imageMipTailStride,
315 VK_SPARSE_MEMORY_BIND_METADATA_BIT);
316
317 deviceMemUniquePtrVec.push_back(makeVkSharedPtr(
318 Move<VkDeviceMemory>(check<VkDeviceMemory>(imageMipTailMemoryBind.memory),
319 Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
320
321 imageMipTailMemoryBinds.push_back(imageMipTailMemoryBind);
322 }
323 }
324 }
325
326 if ((aspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT) &&
327 aspectRequirements.imageMipTailFirstLod < imageSparseInfo.mipLevels)
328 {
329 const VkSparseMemoryBind imageMipTailMemoryBind =
330 makeSparseMemoryBind(deviceInterface, getDevice(), aspectRequirements.imageMipTailSize,
331 memoryType, aspectRequirements.imageMipTailOffset);
332
333 deviceMemUniquePtrVec.push_back(makeVkSharedPtr(
334 Move<VkDeviceMemory>(check<VkDeviceMemory>(imageMipTailMemoryBind.memory),
335 Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
336
337 imageMipTailMemoryBinds.push_back(imageMipTailMemoryBind);
338 }
339 }
340
341 // Metadata
342 if (metadataAspectIndex != NO_MATCH_FOUND)
343 {
344 const VkSparseImageMemoryRequirements metadataAspectRequirements =
345 sparseMemoryRequirements[metadataAspectIndex];
346
347 if (metadataAspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT)
348 {
349 const VkSparseMemoryBind imageMipTailMemoryBind = makeSparseMemoryBind(
350 deviceInterface, getDevice(), metadataAspectRequirements.imageMipTailSize, memoryType,
351 metadataAspectRequirements.imageMipTailOffset, VK_SPARSE_MEMORY_BIND_METADATA_BIT);
352
353 deviceMemUniquePtrVec.push_back(makeVkSharedPtr(
354 Move<VkDeviceMemory>(check<VkDeviceMemory>(imageMipTailMemoryBind.memory),
355 Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
356
357 imageMipTailMemoryBinds.push_back(imageMipTailMemoryBind);
358 }
359 }
360
361 const VkDeviceGroupBindSparseInfo devGroupBindSparseInfo = {
362 VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO, //VkStructureType sType;
363 DE_NULL, //const void* pNext;
364 firstDeviceID, //uint32_t resourceDeviceIndex;
365 secondDeviceID, //uint32_t memoryDeviceIndex;
366 };
367
368 VkBindSparseInfo bindSparseInfo = {
369 VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, //VkStructureType sType;
370 m_useDeviceGroups ? &devGroupBindSparseInfo : DE_NULL, //const void* pNext;
371 0u, //uint32_t waitSemaphoreCount;
372 DE_NULL, //const VkSemaphore* pWaitSemaphores;
373 0u, //uint32_t bufferBindCount;
374 DE_NULL, //const VkSparseBufferMemoryBindInfo* pBufferBinds;
375 0u, //uint32_t imageOpaqueBindCount;
376 DE_NULL, //const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds;
377 0u, //uint32_t imageBindCount;
378 DE_NULL, //const VkSparseImageMemoryBindInfo* pImageBinds;
379 1u, //uint32_t signalSemaphoreCount;
380 &imageMemoryBindSemaphore.get() //const VkSemaphore* pSignalSemaphores;
381 };
382
383 VkSparseImageMemoryBindInfo imageResidencyBindInfo;
384 VkSparseImageOpaqueMemoryBindInfo imageMipTailBindInfo;
385
386 if (imageResidencyMemoryBinds.size() > 0)
387 {
388 imageResidencyBindInfo.image = *imageSparse;
389 imageResidencyBindInfo.bindCount = static_cast<uint32_t>(imageResidencyMemoryBinds.size());
390 imageResidencyBindInfo.pBinds = imageResidencyMemoryBinds.data();
391
392 bindSparseInfo.imageBindCount = 1u;
393 bindSparseInfo.pImageBinds = &imageResidencyBindInfo;
394 }
395
396 if (imageMipTailMemoryBinds.size() > 0)
397 {
398 imageMipTailBindInfo.image = *imageSparse;
399 imageMipTailBindInfo.bindCount = static_cast<uint32_t>(imageMipTailMemoryBinds.size());
400 imageMipTailBindInfo.pBinds = imageMipTailMemoryBinds.data();
401
402 bindSparseInfo.imageOpaqueBindCount = 1u;
403 bindSparseInfo.pImageOpaqueBinds = &imageMipTailBindInfo;
404 }
405
406 // Submit sparse bind commands for execution
407 VK_CHECK(deviceInterface.queueBindSparse(sparseQueue.queueHandle, 1u, &bindSparseInfo, DE_NULL));
408 }
409
410 uint32_t imageSizeInBytes = 0;
411
412 for (uint32_t planeNdx = 0; planeNdx < formatDescription.numPlanes; ++planeNdx)
413 for (uint32_t mipmapNdx = 0; mipmapNdx < imageSparseInfo.mipLevels; ++mipmapNdx)
414 imageSizeInBytes +=
415 getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, formatDescription,
416 planeNdx, mipmapNdx, BUFFER_IMAGE_COPY_OFFSET_GRANULARITY);
417
418 std::vector<VkBufferImageCopy> bufferImageCopy(formatDescription.numPlanes * imageSparseInfo.mipLevels);
419 {
420 uint32_t bufferOffset = 0;
421
422 for (uint32_t planeNdx = 0; planeNdx < formatDescription.numPlanes; ++planeNdx)
423 {
424 const VkImageAspectFlags aspect =
425 (formatDescription.numPlanes > 1) ? getPlaneAspect(planeNdx) : VK_IMAGE_ASPECT_COLOR_BIT;
426
427 for (uint32_t mipmapNdx = 0; mipmapNdx < imageSparseInfo.mipLevels; ++mipmapNdx)
428 {
429 bufferImageCopy[planeNdx * imageSparseInfo.mipLevels + mipmapNdx] = {
430 bufferOffset, // VkDeviceSize bufferOffset;
431 0u, // uint32_t bufferRowLength;
432 0u, // uint32_t bufferImageHeight;
433 makeImageSubresourceLayers(
434 aspect, mipmapNdx, 0u,
435 imageSparseInfo.arrayLayers), // VkImageSubresourceLayers imageSubresource;
436 makeOffset3D(0, 0, 0), // VkOffset3D imageOffset;
437 vk::getPlaneExtent(formatDescription, imageSparseInfo.extent, planeNdx,
438 mipmapNdx) // VkExtent3D imageExtent;
439 };
440 bufferOffset += getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers,
441 formatDescription, planeNdx, mipmapNdx,
442 BUFFER_IMAGE_COPY_OFFSET_GRANULARITY);
443 }
444 }
445 }
446
447 // Create command buffer for compute and transfer operations
448 const Unique<VkCommandPool> commandPool(
449 makeCommandPool(deviceInterface, getDevice(), computeQueue.queueFamilyIndex));
450 const Unique<VkCommandBuffer> commandBuffer(
451 allocateCommandBuffer(deviceInterface, getDevice(), *commandPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
452
453 // Start recording commands
454 beginCommandBuffer(deviceInterface, *commandBuffer);
455
456 const VkBufferCreateInfo inputBufferCreateInfo =
457 makeBufferCreateInfo(imageSizeInBytes, VK_BUFFER_USAGE_TRANSFER_SRC_BIT);
458 const Unique<VkBuffer> inputBuffer(createBuffer(deviceInterface, getDevice(), &inputBufferCreateInfo));
459 const de::UniquePtr<Allocation> inputBufferAlloc(
460 bindBuffer(deviceInterface, getDevice(), getAllocator(), *inputBuffer, MemoryRequirement::HostVisible));
461
462 std::vector<uint8_t> referenceData(imageSizeInBytes);
463
464 const VkMemoryRequirements imageMemoryRequirements =
465 getImageMemoryRequirements(deviceInterface, getDevice(), *imageSparse);
466
467 for (uint32_t valueNdx = 0; valueNdx < imageSizeInBytes; ++valueNdx)
468 {
469 referenceData[valueNdx] = static_cast<uint8_t>((valueNdx % imageMemoryRequirements.alignment) + 1u);
470 }
471
472 {
473 deMemcpy(inputBufferAlloc->getHostPtr(), referenceData.data(), imageSizeInBytes);
474 flushAlloc(deviceInterface, getDevice(), *inputBufferAlloc);
475
476 const VkBufferMemoryBarrier inputBufferBarrier = makeBufferMemoryBarrier(
477 VK_ACCESS_HOST_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT, *inputBuffer, 0u, imageSizeInBytes);
478
479 deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_HOST_BIT,
480 VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 1u, &inputBufferBarrier,
481 0u, DE_NULL);
482 }
483
484 {
485 std::vector<VkImageMemoryBarrier> imageSparseTransferDstBarriers;
486
487 for (uint32_t planeNdx = 0; planeNdx < formatDescription.numPlanes; ++planeNdx)
488 {
489 const VkImageAspectFlags aspect =
490 (formatDescription.numPlanes > 1) ? getPlaneAspect(planeNdx) : VK_IMAGE_ASPECT_COLOR_BIT;
491
492 imageSparseTransferDstBarriers.emplace_back(makeImageMemoryBarrier(
493 0u, VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
494 *imageSparse,
495 makeImageSubresourceRange(aspect, 0u, imageSparseInfo.mipLevels, 0u, imageSparseInfo.arrayLayers),
496 sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex ? sparseQueue.queueFamilyIndex :
497 VK_QUEUE_FAMILY_IGNORED,
498 sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex ? computeQueue.queueFamilyIndex :
499 VK_QUEUE_FAMILY_IGNORED));
500 }
501 deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
502 VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL,
503 static_cast<uint32_t>(imageSparseTransferDstBarriers.size()),
504 imageSparseTransferDstBarriers.data());
505 }
506
507 deviceInterface.cmdCopyBufferToImage(*commandBuffer, *inputBuffer, *imageSparse,
508 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
509 static_cast<uint32_t>(bufferImageCopy.size()), &bufferImageCopy[0]);
510
511 {
512 std::vector<VkImageMemoryBarrier> imageSparseTransferSrcBarriers;
513
514 for (uint32_t planeNdx = 0; planeNdx < formatDescription.numPlanes; ++planeNdx)
515 {
516 const VkImageAspectFlags aspect =
517 (formatDescription.numPlanes > 1) ? getPlaneAspect(planeNdx) : VK_IMAGE_ASPECT_COLOR_BIT;
518
519 imageSparseTransferSrcBarriers.emplace_back(makeImageMemoryBarrier(
520 VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
521 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *imageSparse,
522 makeImageSubresourceRange(aspect, 0u, imageSparseInfo.mipLevels, 0u, imageSparseInfo.arrayLayers)));
523 }
524
525 deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT,
526 VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL,
527 static_cast<uint32_t>(imageSparseTransferSrcBarriers.size()),
528 imageSparseTransferSrcBarriers.data());
529 }
530
531 const VkBufferCreateInfo outputBufferCreateInfo =
532 makeBufferCreateInfo(imageSizeInBytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT);
533 const Unique<VkBuffer> outputBuffer(createBuffer(deviceInterface, getDevice(), &outputBufferCreateInfo));
534 const de::UniquePtr<Allocation> outputBufferAlloc(
535 bindBuffer(deviceInterface, getDevice(), getAllocator(), *outputBuffer, MemoryRequirement::HostVisible));
536
537 deviceInterface.cmdCopyImageToBuffer(*commandBuffer, *imageSparse, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
538 *outputBuffer, static_cast<uint32_t>(bufferImageCopy.size()),
539 bufferImageCopy.data());
540
541 {
542 const VkBufferMemoryBarrier outputBufferBarrier = makeBufferMemoryBarrier(
543 VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, *outputBuffer, 0u, imageSizeInBytes);
544
545 deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT,
546 VK_PIPELINE_STAGE_HOST_BIT, 0u, 0u, DE_NULL, 1u, &outputBufferBarrier,
547 0u, DE_NULL);
548 }
549
550 // End recording commands
551 endCommandBuffer(deviceInterface, *commandBuffer);
552
553 const VkPipelineStageFlags stageBits[] = {VK_PIPELINE_STAGE_TRANSFER_BIT};
554
555 // Submit commands for execution and wait for completion
556 submitCommandsAndWait(deviceInterface, getDevice(), computeQueue.queueHandle, *commandBuffer, 1u,
557 &imageMemoryBindSemaphore.get(), stageBits, 0, DE_NULL, m_useDeviceGroups, firstDeviceID);
558
559 // Retrieve data from buffer to host memory
560 invalidateAlloc(deviceInterface, getDevice(), *outputBufferAlloc);
561
562 const uint8_t *outputData = static_cast<const uint8_t *>(outputBufferAlloc->getHostPtr());
563
564 // Wait for sparse queue to become idle
565 deviceInterface.queueWaitIdle(sparseQueue.queueHandle);
566
567 for (uint32_t planeNdx = 0; planeNdx < formatDescription.numPlanes; ++planeNdx)
568 {
569 for (uint32_t mipmapNdx = 0; mipmapNdx < imageSparseInfo.mipLevels; ++mipmapNdx)
570 {
571 const uint32_t mipLevelSizeInBytes = getImageMipLevelSizeInBytes(
572 imageSparseInfo.extent, imageSparseInfo.arrayLayers, formatDescription, planeNdx, mipmapNdx);
573 const uint32_t bufferOffset = static_cast<uint32_t>(
574 bufferImageCopy[planeNdx * imageSparseInfo.mipLevels + mipmapNdx].bufferOffset);
575
576 if (deMemCmp(outputData + bufferOffset, &referenceData[bufferOffset], mipLevelSizeInBytes) != 0)
577 return tcu::TestStatus::fail("Failed");
578 }
579 }
580 }
581 return tcu::TestStatus::pass("Passed");
582 }
583
createInstance(Context & context) const584 TestInstance *MipmapSparseResidencyCase::createInstance(Context &context) const
585 {
586 return new MipmapSparseResidencyInstance(context, m_imageType, m_imageSize, m_format, m_useDeviceGroups);
587 }
588
589 } // namespace
590
createMipmapSparseResidencyTestsCommon(tcu::TestContext & testCtx,de::MovePtr<tcu::TestCaseGroup> testGroup,const bool useDeviceGroup=false)591 tcu::TestCaseGroup *createMipmapSparseResidencyTestsCommon(tcu::TestContext &testCtx,
592 de::MovePtr<tcu::TestCaseGroup> testGroup,
593 const bool useDeviceGroup = false)
594 {
595 const std::vector<TestImageParameters> imageParameters{
596 {IMAGE_TYPE_2D,
597 {tcu::UVec3(512u, 256u, 1u), tcu::UVec3(1024u, 128u, 1u), tcu::UVec3(11u, 137u, 1u)},
598 getTestFormats(IMAGE_TYPE_2D)},
599 {IMAGE_TYPE_2D_ARRAY,
600 {tcu::UVec3(512u, 256u, 6u), tcu::UVec3(1024u, 128u, 8u), tcu::UVec3(11u, 137u, 3u)},
601 getTestFormats(IMAGE_TYPE_2D_ARRAY)},
602 {IMAGE_TYPE_CUBE,
603 {tcu::UVec3(256u, 256u, 1u), tcu::UVec3(128u, 128u, 1u), tcu::UVec3(137u, 137u, 1u)},
604 getTestFormats(IMAGE_TYPE_CUBE)},
605 {IMAGE_TYPE_CUBE_ARRAY,
606 {tcu::UVec3(256u, 256u, 6u), tcu::UVec3(128u, 128u, 8u), tcu::UVec3(137u, 137u, 3u)},
607 getTestFormats(IMAGE_TYPE_CUBE_ARRAY)},
608 {IMAGE_TYPE_3D,
609 {tcu::UVec3(256u, 256u, 16u), tcu::UVec3(1024u, 128u, 8u), tcu::UVec3(11u, 137u, 3u)},
610 getTestFormats(IMAGE_TYPE_3D)}};
611
612 for (size_t imageTypeNdx = 0; imageTypeNdx < imageParameters.size(); ++imageTypeNdx)
613 {
614 const ImageType imageType = imageParameters[imageTypeNdx].imageType;
615 de::MovePtr<tcu::TestCaseGroup> imageTypeGroup(
616 new tcu::TestCaseGroup(testCtx, getImageTypeName(imageType).c_str()));
617
618 for (size_t formatNdx = 0; formatNdx < imageParameters[imageTypeNdx].formats.size(); ++formatNdx)
619 {
620 VkFormat format = imageParameters[imageTypeNdx].formats[formatNdx].format;
621 tcu::UVec3 imageSizeAlignment = getImageSizeAlignment(format);
622 de::MovePtr<tcu::TestCaseGroup> formatGroup(
623 new tcu::TestCaseGroup(testCtx, getImageFormatID(format).c_str()));
624
625 for (size_t imageSizeNdx = 0; imageSizeNdx < imageParameters[imageTypeNdx].imageSizes.size();
626 ++imageSizeNdx)
627 {
628 const tcu::UVec3 imageSize = imageParameters[imageTypeNdx].imageSizes[imageSizeNdx];
629
630 // skip test for images with odd sizes for some YCbCr formats
631 if ((imageSize.x() % imageSizeAlignment.x()) != 0)
632 continue;
633 if ((imageSize.y() % imageSizeAlignment.y()) != 0)
634 continue;
635
636 std::ostringstream stream;
637 stream << imageSize.x() << "_" << imageSize.y() << "_" << imageSize.z();
638
639 formatGroup->addChild(
640 new MipmapSparseResidencyCase(testCtx, stream.str(), imageType, imageSize, format, useDeviceGroup));
641 }
642 imageTypeGroup->addChild(formatGroup.release());
643 }
644 testGroup->addChild(imageTypeGroup.release());
645 }
646
647 return testGroup.release();
648 }
649
createMipmapSparseResidencyTests(tcu::TestContext & testCtx)650 tcu::TestCaseGroup *createMipmapSparseResidencyTests(tcu::TestContext &testCtx)
651 {
652 de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "mipmap_sparse_residency"));
653 return createMipmapSparseResidencyTestsCommon(testCtx, testGroup);
654 }
655
createDeviceGroupMipmapSparseResidencyTests(tcu::TestContext & testCtx)656 tcu::TestCaseGroup *createDeviceGroupMipmapSparseResidencyTests(tcu::TestContext &testCtx)
657 {
658 de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "device_group_mipmap_sparse_residency"));
659 return createMipmapSparseResidencyTestsCommon(testCtx, testGroup, true);
660 }
661
662 } // namespace sparse
663 } // namespace vkt
664