1 /*-------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2015 Google Inc.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 *//*!
20 * \file
21 * \brief Simple memory allocation tests.
22 *//*--------------------------------------------------------------------*/
23
24 #include "vktMemoryAllocationTests.hpp"
25
26 #include "vktTestCaseUtil.hpp"
27 #include "vktCustomInstancesDevices.hpp"
28
29 #include "tcuMaybe.hpp"
30 #include "tcuResultCollector.hpp"
31 #include "tcuTestLog.hpp"
32 #include "tcuPlatform.hpp"
33 #include "tcuCommandLine.hpp"
34
35 #include "vkPlatform.hpp"
36 #include "vkStrUtil.hpp"
37 #include "vkRef.hpp"
38 #include "vkDeviceUtil.hpp"
39 #include "vkQueryUtil.hpp"
40 #include "vkRefUtil.hpp"
41 #include "vkAllocationCallbackUtil.hpp"
42
43 #include "deUniquePtr.hpp"
44 #include "deStringUtil.hpp"
45 #include "deRandom.hpp"
46
47 using tcu::Maybe;
48 using tcu::TestLog;
49
50 using std::string;
51 using std::vector;
52
53 using namespace vk;
54
55 namespace vkt
56 {
57 namespace memory
58 {
59 namespace
60 {
61
62 template <typename T>
roundUpToMultiple(const T & a,const T & b)63 T roundUpToMultiple(const T &a, const T &b)
64 {
65 return b * (a / b + (a % b != 0 ? 1 : 0));
66 }
67
68 enum
69 {
70 // The min max for allocation count is 4096. Use 4000 to take into account
71 // possible memory allocations made by layers etc.
72 MAX_ALLOCATION_COUNT = 4000
73 };
74
75 enum AllocationMode
76 {
77 ALLOCATION_MODE_DEFAULT,
78 ALLOCATION_MODE_DEVICE_GROUP,
79 ALLOCATION_MODE_PAGEABLE
80 };
81
82 struct TestConfig
83 {
84 enum Order
85 {
86 ALLOC_FREE,
87 ALLOC_REVERSE_FREE,
88 MIXED_ALLOC_FREE,
89 ORDER_LAST
90 };
91
92 Maybe<VkDeviceSize> memorySize;
93 Maybe<float> memoryPercentage;
94 uint32_t memoryAllocationCount;
95 Order order;
96 AllocationMode allocationMode;
97
TestConfigvkt::memory::__anon1099e0c40111::TestConfig98 TestConfig(void) : memoryAllocationCount((uint32_t)-1), order(ORDER_LAST), allocationMode(ALLOCATION_MODE_DEFAULT)
99 {
100 }
101 };
102
103 struct TestConfigRandom
104 {
105 const uint32_t seed;
106 const AllocationMode allocationMode;
107
TestConfigRandomvkt::memory::__anon1099e0c40111::TestConfigRandom108 TestConfigRandom(const uint32_t _seed, const AllocationMode _allocationMode)
109 : seed(_seed)
110 , allocationMode(_allocationMode)
111 {
112 }
113 };
114
115 template <typename T>
roundUpToNextMultiple(T value,T multiple)116 T roundUpToNextMultiple(T value, T multiple)
117 {
118 if (value % multiple == 0)
119 return value;
120 else
121 return value + multiple - (value % multiple);
122 }
123
124 class BaseAllocateTestInstance : public TestInstance
125 {
126 public:
BaseAllocateTestInstance(Context & context,AllocationMode allocationMode)127 BaseAllocateTestInstance(Context &context, AllocationMode allocationMode)
128 : TestInstance(context)
129 , m_allocationMode(allocationMode)
130 , m_subsetAllocationAllowed(false)
131 , m_numPhysDevices(1)
132 , m_memoryProperties(
133 getPhysicalDeviceMemoryProperties(context.getInstanceInterface(), context.getPhysicalDevice()))
134 , m_deviceCoherentMemSupported(false)
135 {
136 if (m_allocationMode == ALLOCATION_MODE_DEVICE_GROUP)
137 createDeviceGroup();
138 else
139 createTestDevice();
140
141 m_allocFlagsInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO;
142 m_allocFlagsInfo.pNext = DE_NULL;
143 m_allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT;
144 m_allocFlagsInfo.deviceMask = 0;
145 }
146
147 void createTestDevice(void);
148 void createDeviceGroup(void);
getDeviceInterface(void)149 const vk::DeviceInterface &getDeviceInterface(void)
150 {
151 return *m_deviceDriver;
152 }
getDevice(void)153 vk::VkDevice getDevice(void)
154 {
155 return m_logicalDevice.get();
156 }
157
158 protected:
159 AllocationMode m_allocationMode;
160 bool m_subsetAllocationAllowed;
161 VkMemoryAllocateFlagsInfo m_allocFlagsInfo;
162 uint32_t m_numPhysDevices;
163 VkPhysicalDeviceMemoryProperties m_memoryProperties;
164 bool m_deviceCoherentMemSupported;
165
166 private:
167 vk::Move<vk::VkDevice> m_logicalDevice;
168 #ifndef CTS_USES_VULKANSC
169 de::MovePtr<vk::DeviceDriver> m_deviceDriver;
170 #else
171 de::MovePtr<vk::DeviceDriverSC, vk::DeinitDeviceDeleter> m_deviceDriver;
172 #endif // CTS_USES_VULKANSC
173 };
174
createTestDevice(void)175 void BaseAllocateTestInstance::createTestDevice(void)
176 {
177 const auto &instanceDriver = m_context.getInstanceInterface();
178 const VkInstance instance = m_context.getInstance();
179 const VkPhysicalDevice physicalDevice =
180 chooseDevice(instanceDriver, instance, m_context.getTestContext().getCommandLine());
181 const VkPhysicalDeviceFeatures deviceFeatures = getPhysicalDeviceFeatures(instanceDriver, physicalDevice);
182 const float queuePriority = 1.0f;
183 uint32_t queueFamilyIndex = 0;
184 bool protMemSupported = false;
185 const bool usePageable = m_allocationMode == ALLOCATION_MODE_PAGEABLE;
186
187 void *pNext = DE_NULL;
188
189 if (usePageable && !m_context.isDeviceFunctionalitySupported("VK_EXT_pageable_device_local_memory"))
190 TCU_THROW(NotSupportedError, "VK_EXT_pageable_device_local_memory is not supported");
191
192 #ifndef CTS_USES_VULKANSC
193 VkPhysicalDevicePageableDeviceLocalMemoryFeaturesEXT pageableDeviceLocalMemoryFeature = {
194 VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PAGEABLE_DEVICE_LOCAL_MEMORY_FEATURES_EXT, // VkStructureType sType
195 pNext, // const void* pNext
196 VK_FALSE, // VkBool32 pageableDeviceLocalMemory;
197 };
198 pNext = (usePageable) ? &pageableDeviceLocalMemoryFeature : DE_NULL;
199 #endif // CTS_USES_VULKANSC
200
201 VkPhysicalDeviceProtectedMemoryFeatures protectedMemoryFeature = {
202 VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES, // VkStructureType sType
203 pNext, // const void* pNext
204 VK_FALSE // VkBool32 protectedMemory;
205 };
206 pNext = &protectedMemoryFeature;
207
208 #ifndef CTS_USES_VULKANSC
209 VkPhysicalDeviceCoherentMemoryFeaturesAMD coherentMemoryFeatures = {
210 VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COHERENT_MEMORY_FEATURES_AMD, // VkStructureType sType
211 pNext, // const void* pNext
212 VK_FALSE // VkBool32 deviceCoherentMemory;
213 };
214 if (m_context.isDeviceFunctionalitySupported("VK_AMD_device_coherent_memory"))
215 pNext = &coherentMemoryFeatures;
216 #endif // CTS_USES_VULKANSC
217
218 VkPhysicalDeviceFeatures features;
219 deMemset(&features, 0, sizeof(vk::VkPhysicalDeviceFeatures));
220
221 VkPhysicalDeviceFeatures2 features2 = {
222 VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2, // VkStructureType sType
223 pNext, // const void* pNext
224 features // VkPhysicalDeviceFeatures features
225 };
226
227 // Check if the physical device supports the protected memory feature
228 m_context.requireInstanceFunctionality("VK_KHR_get_physical_device_properties2");
229 instanceDriver.getPhysicalDeviceFeatures2(physicalDevice, &features2);
230 protMemSupported = protectedMemoryFeature.protectedMemory;
231 #ifndef CTS_USES_VULKANSC
232 m_deviceCoherentMemSupported = coherentMemoryFeatures.deviceCoherentMemory;
233 #endif // CTS_USES_VULKANSC
234
235 VkDeviceQueueCreateFlags queueCreateFlags =
236 protMemSupported ? (vk::VkDeviceQueueCreateFlags)vk::VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT : 0u;
237
238 #ifndef CTS_USES_VULKANSC
239 if (usePageable && !pageableDeviceLocalMemoryFeature.pageableDeviceLocalMemory)
240 TCU_FAIL("pageableDeviceLocalMemory feature not supported but VK_EXT_pageable_device_local_memory advertised");
241
242 pageableDeviceLocalMemoryFeature.pageableDeviceLocalMemory = usePageable;
243 #endif // CTS_USES_VULKANSC
244
245 std::vector<const char *> deviceExtensions;
246 if (usePageable)
247 {
248 deviceExtensions.push_back("VK_EXT_memory_priority");
249 deviceExtensions.push_back("VK_EXT_pageable_device_local_memory");
250 }
251
252 VkDeviceQueueCreateInfo queueInfo = {
253 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // VkStructureType sType;
254 DE_NULL, // const void* pNext;
255 queueCreateFlags, // VkDeviceQueueCreateFlags flags;
256 queueFamilyIndex, // uint32_t queueFamilyIndex;
257 1u, // uint32_t queueCount;
258 &queuePriority // const float* pQueuePriorities;
259 };
260
261 const VkDeviceCreateInfo deviceInfo = {
262 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // VkStructureType sType;
263 (protMemSupported || usePageable || m_deviceCoherentMemSupported) ? &features2 : DE_NULL, // const void* pNext;
264 (VkDeviceCreateFlags)0, // VkDeviceCreateFlags flags;
265 1u, // uint32_t queueCreateInfoCount;
266 &queueInfo, // const VkDeviceQueueCreateInfo* pQueueCreateInfos;
267 0u, // uint32_t enabledLayerCount;
268 DE_NULL, // const char* const* ppEnabledLayerNames;
269 uint32_t(deviceExtensions.size()), // uint32_t enabledExtensionCount;
270 (deviceExtensions.empty()) ? DE_NULL : deviceExtensions.data(), // const char* const* ppEnabledExtensionNames;
271 (protMemSupported || usePageable || m_deviceCoherentMemSupported) ?
272 DE_NULL :
273 &deviceFeatures // const VkPhysicalDeviceFeatures* pEnabledFeatures;
274 };
275
276 m_logicalDevice =
277 createCustomDevice(m_context.getTestContext().getCommandLine().isValidationEnabled(),
278 m_context.getPlatformInterface(), instance, instanceDriver, physicalDevice, &deviceInfo);
279 #ifndef CTS_USES_VULKANSC
280 m_deviceDriver = de::MovePtr<DeviceDriver>(new DeviceDriver(m_context.getPlatformInterface(), instance,
281 *m_logicalDevice, m_context.getUsedApiVersion(),
282 m_context.getTestContext().getCommandLine()));
283 #else
284 m_deviceDriver = de::MovePtr<DeviceDriverSC, DeinitDeviceDeleter>(
285 new DeviceDriverSC(m_context.getPlatformInterface(), instance, *m_logicalDevice,
286 m_context.getTestContext().getCommandLine(), m_context.getResourceInterface(),
287 m_context.getDeviceVulkanSC10Properties(), m_context.getDeviceProperties(),
288 m_context.getUsedApiVersion()),
289 vk::DeinitDeviceDeleter(m_context.getResourceInterface().get(), *m_logicalDevice));
290 #endif // CTS_USES_VULKANSC
291 }
292
createDeviceGroup(void)293 void BaseAllocateTestInstance::createDeviceGroup(void)
294 {
295 const tcu::CommandLine &cmdLine = m_context.getTestContext().getCommandLine();
296 const uint32_t devGroupIdx = cmdLine.getVKDeviceGroupId() - 1;
297 const uint32_t physDeviceIdx = cmdLine.getVKDeviceId() - 1;
298 const float queuePriority = 1.0f;
299 uint32_t queueFamilyIndex = 0;
300 const InstanceInterface &instanceDriver = m_context.getInstanceInterface();
301 const VkInstance instance = m_context.getInstance();
302 std::vector<VkPhysicalDeviceGroupProperties> devGroupProperties =
303 enumeratePhysicalDeviceGroups(instanceDriver, instance);
304 m_numPhysDevices = devGroupProperties[devGroupIdx].physicalDeviceCount;
305 m_subsetAllocationAllowed = devGroupProperties[devGroupIdx].subsetAllocation;
306 if (m_numPhysDevices < 2)
307 TCU_THROW(NotSupportedError, "Device group allocation tests not supported with 1 physical device");
308 std::vector<const char *> deviceExtensions;
309
310 if (!isCoreDeviceExtension(m_context.getUsedApiVersion(), "VK_KHR_device_group"))
311 deviceExtensions.push_back("VK_KHR_device_group");
312
313 VkDeviceGroupDeviceCreateInfo deviceGroupInfo = {
314 VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO, //stype
315 DE_NULL, //pNext
316 devGroupProperties[devGroupIdx].physicalDeviceCount, //physicalDeviceCount
317 devGroupProperties[devGroupIdx].physicalDevices //physicalDevices
318 };
319
320 const VkPhysicalDeviceFeatures deviceFeatures =
321 getPhysicalDeviceFeatures(instanceDriver, deviceGroupInfo.pPhysicalDevices[physDeviceIdx]);
322
323 const std::vector<VkQueueFamilyProperties> queueProps = getPhysicalDeviceQueueFamilyProperties(
324 instanceDriver, devGroupProperties[devGroupIdx].physicalDevices[physDeviceIdx]);
325 for (size_t queueNdx = 0; queueNdx < queueProps.size(); queueNdx++)
326 {
327 if (queueProps[queueNdx].queueFlags & VK_QUEUE_COMPUTE_BIT)
328 queueFamilyIndex = (uint32_t)queueNdx;
329 }
330
331 VkDeviceQueueCreateInfo queueInfo = {
332 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // VkStructureType sType;
333 DE_NULL, // const void* pNext;
334 (VkDeviceQueueCreateFlags)0u, // VkDeviceQueueCreateFlags flags;
335 queueFamilyIndex, // uint32_t queueFamilyIndex;
336 1u, // uint32_t queueCount;
337 &queuePriority // const float* pQueuePriorities;
338 };
339
340 const VkDeviceCreateInfo deviceInfo = {
341 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // VkStructureType sType;
342 &deviceGroupInfo, // const void* pNext;
343 (VkDeviceCreateFlags)0, // VkDeviceCreateFlags flags;
344 1u, // uint32_t queueCreateInfoCount;
345 &queueInfo, // const VkDeviceQueueCreateInfo* pQueueCreateInfos;
346 0u, // uint32_t enabledLayerCount;
347 DE_NULL, // const char* const* ppEnabledLayerNames;
348 uint32_t(deviceExtensions.size()), // uint32_t enabledExtensionCount;
349 deviceExtensions.empty() ? DE_NULL : &deviceExtensions[0], // const char* const* ppEnabledExtensionNames;
350 &deviceFeatures, // const VkPhysicalDeviceFeatures* pEnabledFeatures;
351 };
352
353 m_logicalDevice = createCustomDevice(m_context.getTestContext().getCommandLine().isValidationEnabled(),
354 m_context.getPlatformInterface(), instance, instanceDriver,
355 deviceGroupInfo.pPhysicalDevices[physDeviceIdx], &deviceInfo);
356 #ifndef CTS_USES_VULKANSC
357 m_deviceDriver = de::MovePtr<DeviceDriver>(new DeviceDriver(m_context.getPlatformInterface(), instance,
358 *m_logicalDevice, m_context.getUsedApiVersion(),
359 m_context.getTestContext().getCommandLine()));
360 #else
361 m_deviceDriver = de::MovePtr<DeviceDriverSC, DeinitDeviceDeleter>(
362 new DeviceDriverSC(m_context.getPlatformInterface(), instance, *m_logicalDevice,
363 m_context.getTestContext().getCommandLine(), m_context.getResourceInterface(),
364 m_context.getDeviceVulkanSC10Properties(), m_context.getDeviceProperties(),
365 m_context.getUsedApiVersion()),
366 vk::DeinitDeviceDeleter(m_context.getResourceInterface().get(), *m_logicalDevice));
367 #endif // CTS_USES_VULKANSC
368
369 m_memoryProperties =
370 getPhysicalDeviceMemoryProperties(instanceDriver, deviceGroupInfo.pPhysicalDevices[physDeviceIdx]);
371 }
372
373 class AllocateFreeTestInstance : public BaseAllocateTestInstance
374 {
375 public:
AllocateFreeTestInstance(Context & context,const TestConfig config)376 AllocateFreeTestInstance(Context &context, const TestConfig config)
377 : BaseAllocateTestInstance(context, config.allocationMode)
378 , m_config(config)
379 , m_result(m_context.getTestContext().getLog())
380 , m_memoryTypeIndex(0)
381 , m_memoryLimits(tcu::getMemoryLimits(context.getTestContext().getPlatform()))
382 {
383 DE_ASSERT(!!m_config.memorySize != !!m_config.memoryPercentage);
384 }
385
386 tcu::TestStatus iterate(void);
387
388 private:
389 const TestConfig m_config;
390 tcu::ResultCollector m_result;
391 uint32_t m_memoryTypeIndex;
392 const tcu::PlatformMemoryLimits m_memoryLimits;
393 };
394
iterate(void)395 tcu::TestStatus AllocateFreeTestInstance::iterate(void)
396 {
397 TestLog &log = m_context.getTestContext().getLog();
398 const VkDevice device = getDevice();
399 const DeviceInterface &vkd = getDeviceInterface();
400 VkMemoryRequirements memReqs;
401 const uint32_t queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
402 VkBufferCreateFlags createFlags = (vk::VkBufferCreateFlagBits)0u;
403 VkBufferUsageFlags usageFlags = vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
404 VkSharingMode sharingMode = vk::VK_SHARING_MODE_EXCLUSIVE;
405 Move<VkBuffer> buffer;
406
407 if ((m_memoryProperties.memoryTypes[m_memoryTypeIndex].propertyFlags & vk::VK_MEMORY_PROPERTY_PROTECTED_BIT) ==
408 vk::VK_MEMORY_PROPERTY_PROTECTED_BIT)
409 {
410 createFlags |= vk::VK_BUFFER_CREATE_PROTECTED_BIT;
411 }
412
413 DE_ASSERT(m_config.memoryAllocationCount <= MAX_ALLOCATION_COUNT);
414
415 if (m_memoryTypeIndex == 0)
416 {
417 log << TestLog::Message << "Memory allocation count: " << m_config.memoryAllocationCount << TestLog::EndMessage;
418 log << TestLog::Message << "Single allocation size: "
419 << (m_config.memorySize ?
420 de::toString(*m_config.memorySize) :
421 de::toString(100.0f * (*m_config.memoryPercentage)) + " percent of the heap size.")
422 << TestLog::EndMessage;
423
424 if (m_config.order == TestConfig::ALLOC_REVERSE_FREE)
425 log << TestLog::Message << "Memory is freed in reversed order. " << TestLog::EndMessage;
426 else if (m_config.order == TestConfig::ALLOC_FREE)
427 log << TestLog::Message << "Memory is freed in same order as allocated. " << TestLog::EndMessage;
428 else if (m_config.order == TestConfig::MIXED_ALLOC_FREE)
429 log << TestLog::Message << "Memory is freed right after allocation. " << TestLog::EndMessage;
430 else
431 DE_FATAL("Unknown allocation order");
432 }
433
434 bool memoryTypeSupported = true;
435 #ifndef CTS_USES_VULKANSC
436 memoryTypeSupported = !((m_memoryProperties.memoryTypes[m_memoryTypeIndex].propertyFlags &
437 vk::VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD) > 0 &&
438 !m_deviceCoherentMemSupported);
439 #endif
440
441 if (memoryTypeSupported)
442 {
443 try
444 {
445 const VkMemoryType memoryType = m_memoryProperties.memoryTypes[m_memoryTypeIndex];
446 const VkMemoryHeap memoryHeap = m_memoryProperties.memoryHeaps[memoryType.heapIndex];
447
448 // Create a buffer to get the required size
449 {
450 const VkDeviceSize bufferSize = m_config.memorySize ?
451 *m_config.memorySize :
452 (VkDeviceSize)(*m_config.memoryPercentage * (float)memoryHeap.size);
453
454 VkBufferCreateInfo bufferParams = {
455 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
456 DE_NULL, // const void* pNext;
457 createFlags, // VkBufferCreateFlags flags;
458 bufferSize, // VkDeviceSize size;
459 usageFlags, // VkBufferUsageFlags usage;
460 sharingMode, // VkSharingMode sharingMode;
461 1u, // uint32_t queueFamilyIndexCount;
462 &queueFamilyIndex, // const uint32_t* pQueueFamilyIndices;
463 };
464
465 buffer = createBuffer(vkd, device, &bufferParams);
466 vkd.getBufferMemoryRequirements(device, *buffer, &memReqs);
467 }
468
469 const VkDeviceSize allocationSize =
470 (m_config.memorySize ? memReqs.size :
471 (VkDeviceSize)(*m_config.memoryPercentage * (float)memoryHeap.size));
472 const VkDeviceSize roundedUpAllocationSize =
473 roundUpToNextMultiple(allocationSize, m_memoryLimits.deviceMemoryAllocationGranularity);
474 vector<VkDeviceMemory> memoryObjects(m_config.memoryAllocationCount, (VkDeviceMemory)0);
475
476 log << TestLog::Message << "Memory type index: " << m_memoryTypeIndex << TestLog::EndMessage;
477
478 if (memoryType.heapIndex >= m_memoryProperties.memoryHeapCount)
479 m_result.fail("Invalid heap index defined for memory type.");
480
481 {
482 log << TestLog::Message << "Memory type: " << memoryType << TestLog::EndMessage;
483 log << TestLog::Message << "Memory heap: " << memoryHeap << TestLog::EndMessage;
484
485 if (roundedUpAllocationSize * m_config.memoryAllocationCount > memoryHeap.size)
486 TCU_THROW(NotSupportedError, "Memory heap doesn't have enough memory.");
487
488 #if (DE_PTR_SIZE == 4)
489 // For 32-bit binaries we cap the total host visible allocations to 1.5GB to
490 // avoid exhausting CPU virtual address space and throwing a false negative result.
491 if ((memoryType.propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) &&
492 allocationSize * m_config.memoryAllocationCount *
493 (m_subsetAllocationAllowed ? 1 : m_numPhysDevices) >=
494 1610612736)
495
496 log << TestLog::Message
497 << " Skipping: Not enough CPU virtual address space for all host visible allocations."
498 << TestLog::EndMessage;
499 else
500 {
501 #else
502 {
503 #endif
504
505 try
506 {
507 const uint32_t totalDeviceMaskCombinations =
508 m_subsetAllocationAllowed ? (1 << m_numPhysDevices) - 1 : 1;
509 for (uint32_t deviceMask = 1; deviceMask <= totalDeviceMaskCombinations; deviceMask++)
510 {
511 // Allocate on all physical devices if subset allocation is not allowed, do only once.
512 if (!m_subsetAllocationAllowed)
513 deviceMask = (1 << m_numPhysDevices) - 1;
514 m_allocFlagsInfo.deviceMask = deviceMask;
515
516 if (m_config.order == TestConfig::ALLOC_FREE ||
517 m_config.order == TestConfig::ALLOC_REVERSE_FREE)
518 {
519 for (size_t ndx = 0; ndx < m_config.memoryAllocationCount; ndx++)
520 {
521 VkMemoryAllocateInfo alloc = {
522 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType
523 (m_allocationMode == ALLOCATION_MODE_DEVICE_GROUP) ? &m_allocFlagsInfo :
524 DE_NULL, // pNext
525 allocationSize, // allocationSize
526 m_memoryTypeIndex // memoryTypeIndex;
527 };
528
529 VkResult res = vkd.allocateMemory(
530 device, &alloc, (const VkAllocationCallbacks *)DE_NULL, &memoryObjects[ndx]);
531
532 // Some implementations might have limitations on protected heap, and these limitations
533 // don't show up in Vulkan queries. Use a hard coded threshold after which out of memory
534 // is allowed.
535 if (res == VK_ERROR_OUT_OF_DEVICE_MEMORY &&
536 memoryType.propertyFlags & vk::VK_MEMORY_PROPERTY_PROTECTED_BIT && ndx > 80)
537 break;
538
539 // We don't know the purpose of the memory type, memory type might have limitation not checked in this test.
540 if (res == VK_ERROR_OUT_OF_DEVICE_MEMORY &&
541 (memReqs.memoryTypeBits & (1 << m_memoryTypeIndex)) == 0)
542 break;
543
544 VK_CHECK(res);
545
546 TCU_CHECK(!!memoryObjects[ndx]);
547 }
548
549 if (m_config.order == TestConfig::ALLOC_FREE)
550 {
551 for (size_t ndx = 0; ndx < m_config.memoryAllocationCount; ndx++)
552 {
553 const VkDeviceMemory mem = memoryObjects[memoryObjects.size() - 1 - ndx];
554
555 if (!!mem)
556 {
557 #ifndef CTS_USES_VULKANSC
558 vkd.freeMemory(device, mem, (const VkAllocationCallbacks *)DE_NULL);
559 #endif // CTS_USES_VULKANSC
560 memoryObjects[memoryObjects.size() - 1 - ndx] = (VkDeviceMemory)0;
561 }
562 }
563 }
564 else
565 {
566 for (size_t ndx = 0; ndx < m_config.memoryAllocationCount; ndx++)
567 {
568 const VkDeviceMemory mem = memoryObjects[ndx];
569
570 if (!!mem)
571 {
572 #ifndef CTS_USES_VULKANSC
573 vkd.freeMemory(device, mem, (const VkAllocationCallbacks *)DE_NULL);
574 #endif // CTS_USES_VULKANSC
575 memoryObjects[ndx] = (VkDeviceMemory)0;
576 }
577 }
578 }
579 }
580 else
581 {
582 for (size_t ndx = 0; ndx < m_config.memoryAllocationCount; ndx++)
583 {
584 const VkMemoryAllocateInfo alloc = {
585 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType
586 (m_allocationMode == ALLOCATION_MODE_DEVICE_GROUP) ? &m_allocFlagsInfo :
587 DE_NULL, // pNext
588 allocationSize, // allocationSize
589 m_memoryTypeIndex // memoryTypeIndex;
590 };
591
592 VK_CHECK(vkd.allocateMemory(device, &alloc, (const VkAllocationCallbacks *)DE_NULL,
593 &memoryObjects[ndx]));
594 TCU_CHECK(!!memoryObjects[ndx]);
595 #ifndef CTS_USES_VULKANSC
596 vkd.freeMemory(device, memoryObjects[ndx], (const VkAllocationCallbacks *)DE_NULL);
597 #endif // CTS_USES_VULKANSC
598 memoryObjects[ndx] = (VkDeviceMemory)0;
599 }
600 }
601 }
602 }
603 catch (...)
604 {
605 for (size_t ndx = 0; ndx < m_config.memoryAllocationCount; ndx++)
606 {
607 const VkDeviceMemory mem = memoryObjects[ndx];
608
609 if (!!mem)
610 {
611 #ifndef CTS_USES_VULKANSC
612 vkd.freeMemory(device, mem, (const VkAllocationCallbacks *)DE_NULL);
613 #endif // CTS_USES_VULKANSC
614 memoryObjects[ndx] = (VkDeviceMemory)0;
615 }
616 }
617
618 throw;
619 }
620 }
621 }
622 }
623 catch (const tcu::TestError &error)
624 {
625 m_result.fail(error.getMessage());
626 }
627 }
628
629 m_memoryTypeIndex++;
630
631 if (m_memoryTypeIndex < m_memoryProperties.memoryTypeCount)
632 return tcu::TestStatus::incomplete();
633 else
634 return tcu::TestStatus(m_result.getResult(), m_result.getMessage());
635 }
636
637 #ifndef CTS_USES_VULKANSC
638
639 size_t computeDeviceMemorySystemMemFootprint(const DeviceInterface &vk, VkDevice device)
640 {
641 AllocationCallbackRecorder callbackRecorder(getSystemAllocator());
642
643 {
644 // 1 B allocation from memory type 0
645 const VkMemoryAllocateInfo allocInfo = {
646 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
647 DE_NULL,
648 1u,
649 0u,
650 };
651 const Unique<VkDeviceMemory> memory(allocateMemory(vk, device, &allocInfo, callbackRecorder.getCallbacks()));
652 AllocationCallbackValidationResults validateRes;
653
654 validateAllocationCallbacks(callbackRecorder, &validateRes);
655
656 TCU_CHECK(validateRes.violations.empty());
657
658 return getLiveSystemAllocationTotal(validateRes) +
659 sizeof(void *) * validateRes.liveAllocations.size(); // allocation overhead
660 }
661 }
662
663 struct MemoryType
664 {
665 uint32_t index;
666 VkMemoryType type;
667 };
668
669 struct MemoryObject
670 {
671 VkDeviceMemory memory;
672 VkDeviceSize size;
673 VkMemoryPropertyFlags propertyFlags;
674 };
675
676 struct Heap
677 {
678 VkMemoryHeap heap;
679 VkDeviceSize memoryUsage;
680 VkDeviceSize maxMemoryUsage;
681 vector<MemoryType> types;
682 vector<MemoryObject> objects;
683 };
684
685 class RandomAllocFreeTestInstance : public BaseAllocateTestInstance
686 {
687 public:
688 RandomAllocFreeTestInstance(Context &context, TestConfigRandom config);
689 ~RandomAllocFreeTestInstance(void);
690
691 tcu::TestStatus iterate(void);
692
693 private:
694 const size_t m_opCount;
695 const size_t m_allocSysMemSize;
696 const tcu::PlatformMemoryLimits m_memoryLimits;
697 const uint32_t m_totalDeviceMaskCombinations;
698
699 uint32_t m_memoryObjectCount;
700 uint32_t m_memoryProtectedObjectCount;
701 uint32_t m_currentDeviceMask;
702 size_t m_opNdx;
703 de::Random m_rng;
704 vector<Heap> m_heaps;
705 VkDeviceSize m_totalSystemMem;
706 VkDeviceSize m_totalDeviceMem;
707 };
708
709 RandomAllocFreeTestInstance::RandomAllocFreeTestInstance(Context &context, TestConfigRandom config)
710 : BaseAllocateTestInstance(context, config.allocationMode)
711 , m_opCount(128)
712 , m_allocSysMemSize(computeDeviceMemorySystemMemFootprint(getDeviceInterface(), context.getDevice()) +
713 sizeof(MemoryObject))
714 , m_memoryLimits(tcu::getMemoryLimits(context.getTestContext().getPlatform()))
715 , m_totalDeviceMaskCombinations(m_subsetAllocationAllowed ? (1 << m_numPhysDevices) - 1 : 1)
716 , m_memoryObjectCount(0)
717 , m_memoryProtectedObjectCount(0)
718 , m_currentDeviceMask(m_subsetAllocationAllowed ? 1 : (1 << m_numPhysDevices) - 1)
719 , m_opNdx(0)
720 , m_rng(config.seed)
721 , m_totalSystemMem(0)
722 , m_totalDeviceMem(0)
723 {
724 TCU_CHECK(m_memoryProperties.memoryHeapCount <= 32);
725 TCU_CHECK(m_memoryProperties.memoryTypeCount <= 32);
726
727 m_heaps.resize(m_memoryProperties.memoryHeapCount);
728
729 for (uint32_t heapNdx = 0; heapNdx < m_memoryProperties.memoryHeapCount; heapNdx++)
730 {
731 m_heaps[heapNdx].heap = m_memoryProperties.memoryHeaps[heapNdx];
732 m_heaps[heapNdx].memoryUsage = 0;
733 m_heaps[heapNdx].maxMemoryUsage = m_heaps[heapNdx].heap.size / 8; /* Use at maximum 12.5% of heap */
734
735 m_heaps[heapNdx].objects.reserve(100);
736 }
737
738 for (uint32_t memoryTypeNdx = 0; memoryTypeNdx < m_memoryProperties.memoryTypeCount; memoryTypeNdx++)
739 {
740 const MemoryType type = {memoryTypeNdx, m_memoryProperties.memoryTypes[memoryTypeNdx]};
741
742 TCU_CHECK(type.type.heapIndex < m_memoryProperties.memoryHeapCount);
743
744 if ((m_memoryProperties.memoryTypes[type.index].propertyFlags &
745 vk::VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD) > 0 &&
746 !m_deviceCoherentMemSupported)
747 {
748 continue;
749 }
750
751 m_heaps[type.type.heapIndex].types.push_back(type);
752 }
753 }
754
755 RandomAllocFreeTestInstance::~RandomAllocFreeTestInstance(void)
756 {
757 #ifndef CTS_USES_VULKANSC
758 const VkDevice device = getDevice();
759 const DeviceInterface &vkd = getDeviceInterface();
760
761 for (uint32_t heapNdx = 0; heapNdx < (uint32_t)m_heaps.size(); heapNdx++)
762 {
763 const Heap &heap = m_heaps[heapNdx];
764
765 for (size_t objectNdx = 0; objectNdx < heap.objects.size(); objectNdx++)
766 {
767 if (!!heap.objects[objectNdx].memory)
768 {
769 vkd.freeMemory(device, heap.objects[objectNdx].memory, (const VkAllocationCallbacks *)DE_NULL);
770 }
771 }
772 }
773 #endif // CTS_USES_VULKANSC
774 }
775
776 tcu::TestStatus RandomAllocFreeTestInstance::iterate(void)
777 {
778 const VkDevice device = getDevice();
779 const DeviceInterface &vkd = getDeviceInterface();
780 TestLog &log = m_context.getTestContext().getLog();
781 const bool isUMA = m_memoryLimits.totalDeviceLocalMemory == 0;
782 const VkDeviceSize usedSysMem = isUMA ? (m_totalDeviceMem + m_totalSystemMem) : m_totalSystemMem;
783 const bool canAllocateSys = usedSysMem + m_allocSysMemSize + 1024 <
784 m_memoryLimits.totalSystemMemory; // \note Always leave room for 1 KiB sys mem alloc
785 const bool canAllocateDev =
786 isUMA ? canAllocateSys : (m_totalDeviceMem + 16 < m_memoryLimits.totalDeviceLocalMemory);
787 vector<size_t> nonFullHeaps;
788 vector<size_t> nonEmptyHeaps;
789 bool allocateMore;
790
791 if (m_opNdx == 0)
792 {
793 log << TestLog::Message << "Performing " << m_opCount
794 << " random VkAllocMemory() / VkFreeMemory() calls before freeing all memory." << TestLog::EndMessage;
795 log << TestLog::Message << "Using max 1/8 of the memory in each memory heap." << TestLog::EndMessage;
796 }
797
798 // Sort heaps based on whether allocations or frees are possible
799 for (size_t heapNdx = 0; heapNdx < m_heaps.size(); ++heapNdx)
800 {
801 const bool isDeviceLocal = (m_heaps[heapNdx].heap.flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0;
802 const bool isHeapFull = m_heaps[heapNdx].memoryUsage >= m_heaps[heapNdx].maxMemoryUsage;
803 const bool isHeapEmpty = m_heaps[heapNdx].memoryUsage == 0;
804
805 if (!isHeapEmpty)
806 nonEmptyHeaps.push_back(heapNdx);
807
808 if (!isHeapFull && ((isUMA && canAllocateSys) || (!isUMA && isDeviceLocal && canAllocateDev) ||
809 (!isUMA && !isDeviceLocal && canAllocateSys)))
810 nonFullHeaps.push_back(heapNdx);
811 }
812
813 if (m_opNdx >= m_opCount)
814 {
815 if (nonEmptyHeaps.empty())
816 {
817 m_currentDeviceMask++;
818 if (m_currentDeviceMask > m_totalDeviceMaskCombinations)
819 return tcu::TestStatus::pass("Pass");
820 else
821 {
822 m_opNdx = 0;
823 return tcu::TestStatus::incomplete();
824 }
825 }
826 else
827 allocateMore = false;
828 }
829 else if (!nonEmptyHeaps.empty() && !nonFullHeaps.empty() && (m_memoryObjectCount < MAX_ALLOCATION_COUNT) &&
830 canAllocateSys)
831 allocateMore = m_rng.getBool(); // Randomize if both operations are doable.
832 else if (nonEmptyHeaps.empty())
833 {
834 DE_ASSERT(canAllocateSys);
835 allocateMore = true; // Allocate more if there are no objects to free.
836 }
837 else if (nonFullHeaps.empty() || !canAllocateSys)
838 allocateMore = false; // Free objects if there is no free space for new objects.
839 else
840 {
841 allocateMore = false;
842 DE_FATAL("Fail");
843 }
844
845 if (allocateMore)
846 {
847 const size_t nonFullHeapNdx = (size_t)(m_rng.getUint32() % (uint32_t)nonFullHeaps.size());
848 const size_t heapNdx = nonFullHeaps[nonFullHeapNdx];
849 Heap &heap = m_heaps[heapNdx];
850 const MemoryType &memoryType = m_rng.choose<MemoryType>(heap.types.begin(), heap.types.end());
851 const bool isDeviceLocal = (heap.heap.flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0;
852 const bool isProtected = memoryType.type.propertyFlags & VK_MEMORY_PROPERTY_PROTECTED_BIT;
853 VkDeviceSize maxAllocSize =
854 (isDeviceLocal && !isUMA) ?
855 de::min(heap.maxMemoryUsage - heap.memoryUsage,
856 (VkDeviceSize)m_memoryLimits.totalDeviceLocalMemory - m_totalDeviceMem) :
857 de::min(heap.maxMemoryUsage - heap.memoryUsage,
858 (VkDeviceSize)m_memoryLimits.totalSystemMemory - usedSysMem - m_allocSysMemSize);
859 const VkDeviceSize maxProtectedAllocSize = 1 * 1024 * 1024;
860
861 // Some implementations might have limitations on protected heap, and these
862 // limitations don't show up in Vulkan queries. Use a hard coded limit for
863 // allocations of arbitrarily selected size of 1MB as per Note at "Device
864 // Memory Allocation" at the spec to use minimum-size allocations.
865 if (isProtected)
866 maxAllocSize = (maxAllocSize > maxProtectedAllocSize) ? maxProtectedAllocSize : maxAllocSize;
867
868 const VkDeviceSize allocationSize = 1 + (m_rng.getUint64() % maxAllocSize);
869
870 if ((allocationSize > (uint64_t)(heap.maxMemoryUsage - heap.memoryUsage)) && (allocationSize != 1))
871 TCU_THROW(InternalError, "Test Error: trying to allocate memory more than the available heap size.");
872
873 const MemoryObject object = {(VkDeviceMemory)0, allocationSize, memoryType.type.propertyFlags};
874
875 heap.objects.push_back(object);
876
877 m_allocFlagsInfo.deviceMask = m_currentDeviceMask;
878 const VkMemoryAllocateInfo alloc = {
879 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType
880 (m_allocationMode == ALLOCATION_MODE_DEVICE_GROUP) ? &m_allocFlagsInfo : DE_NULL, // pNext
881 object.size, // allocationSize
882 memoryType.index // memoryTypeIndex;
883 };
884
885 VkResult res =
886 vkd.allocateMemory(device, &alloc, (const VkAllocationCallbacks *)DE_NULL, &heap.objects.back().memory);
887
888 // Some implementations might have limitations on protected heap, and these
889 // limitations don't show up in Vulkan queries. Use a hard coded threshold
890 // after which out of memory is allowed as per Note at "Device Memory Allocation"
891 // at the spec to support at least 80 allocations concurrently.
892 if (res == VK_ERROR_OUT_OF_DEVICE_MEMORY && isProtected && m_memoryProtectedObjectCount > 80)
893 {
894 heap.objects.pop_back();
895 }
896 else
897 {
898 VK_CHECK(res);
899
900 TCU_CHECK(!!heap.objects.back().memory);
901 m_memoryObjectCount++;
902
903 if (isProtected)
904 m_memoryProtectedObjectCount++;
905
906 heap.memoryUsage += allocationSize;
907 (isDeviceLocal ? m_totalDeviceMem : m_totalSystemMem) += allocationSize;
908 m_totalSystemMem += m_allocSysMemSize;
909 }
910 }
911 else
912 {
913 const size_t nonEmptyHeapNdx = (size_t)(m_rng.getUint32() % (uint32_t)nonEmptyHeaps.size());
914 const size_t heapNdx = nonEmptyHeaps[nonEmptyHeapNdx];
915 Heap &heap = m_heaps[heapNdx];
916 const size_t memoryObjectNdx = m_rng.getUint32() % heap.objects.size();
917 MemoryObject &memoryObject = heap.objects[memoryObjectNdx];
918 const bool isDeviceLocal = (heap.heap.flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0;
919
920 #ifndef CTS_USES_VULKANSC
921 vkd.freeMemory(device, memoryObject.memory, (const VkAllocationCallbacks *)DE_NULL);
922 #endif
923 memoryObject.memory = (VkDeviceMemory)0;
924 m_memoryObjectCount--;
925
926 if (memoryObject.propertyFlags & VK_MEMORY_PROPERTY_PROTECTED_BIT)
927 {
928 m_memoryProtectedObjectCount--;
929 memoryObject.propertyFlags = (VkMemoryPropertyFlags)0;
930 }
931
932 heap.memoryUsage -= memoryObject.size;
933 (isDeviceLocal ? m_totalDeviceMem : m_totalSystemMem) -= memoryObject.size;
934 m_totalSystemMem -= m_allocSysMemSize;
935
936 heap.objects[memoryObjectNdx] = heap.objects.back();
937 heap.objects.pop_back();
938
939 DE_ASSERT(heap.memoryUsage == 0 || !heap.objects.empty());
940 }
941
942 m_opNdx++;
943 return tcu::TestStatus::incomplete();
944 }
945 #endif // CTS_USES_VULKANSC
946
947 } // namespace
948
createAllocationTestsCommon(tcu::TestContext & testCtx,AllocationMode allocationMode)949 tcu::TestCaseGroup *createAllocationTestsCommon(tcu::TestContext &testCtx, AllocationMode allocationMode)
950 {
951 const char *name = [&]
952 {
953 switch (allocationMode)
954 {
955 case ALLOCATION_MODE_DEFAULT:
956 return "allocation";
957 case ALLOCATION_MODE_DEVICE_GROUP:
958 return "device_group_allocation";
959 case ALLOCATION_MODE_PAGEABLE:
960 return "pageable_allocation";
961 default:
962 TCU_THROW(InternalError, "Unknown allocation mode");
963 }
964 }();
965 de::MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(testCtx, name));
966
967 const VkDeviceSize KiB = 1024;
968 const VkDeviceSize MiB = 1024 * KiB;
969
970 const struct
971 {
972 const char *const str;
973 VkDeviceSize size;
974 } allocationSizes[] = {{"64", 64}, {"128", 128}, {"256", 256}, {"512", 512},
975 {"1KiB", 1 * KiB}, {"4KiB", 4 * KiB}, {"8KiB", 8 * KiB}, {"1MiB", 1 * MiB}};
976
977 const int allocationPercents[] = {1};
978
979 const int allocationCounts[] = {1, 10, 100, 1000, -1};
980
981 const struct
982 {
983 const char *const str;
984 const TestConfig::Order order;
985 } orders[] = {{"forward", TestConfig::ALLOC_FREE},
986 {"reverse", TestConfig::ALLOC_REVERSE_FREE},
987 {"mixed", TestConfig::MIXED_ALLOC_FREE}};
988
989 {
990 de::MovePtr<tcu::TestCaseGroup> basicGroup(new tcu::TestCaseGroup(testCtx, "basic"));
991
992 for (size_t allocationSizeNdx = 0; allocationSizeNdx < DE_LENGTH_OF_ARRAY(allocationSizes); allocationSizeNdx++)
993 {
994 const VkDeviceSize allocationSize = allocationSizes[allocationSizeNdx].size;
995 const char *const allocationSizeName = allocationSizes[allocationSizeNdx].str;
996 de::MovePtr<tcu::TestCaseGroup> sizeGroup(
997 new tcu::TestCaseGroup(testCtx, ("size_" + string(allocationSizeName)).c_str()));
998
999 for (size_t orderNdx = 0; orderNdx < DE_LENGTH_OF_ARRAY(orders); orderNdx++)
1000 {
1001 const TestConfig::Order order = orders[orderNdx].order;
1002 const char *const orderName = orders[orderNdx].str;
1003 de::MovePtr<tcu::TestCaseGroup> orderGroup(new tcu::TestCaseGroup(testCtx, orderName));
1004
1005 for (size_t allocationCountNdx = 0; allocationCountNdx < DE_LENGTH_OF_ARRAY(allocationCounts);
1006 allocationCountNdx++)
1007 {
1008 const int allocationCount = allocationCounts[allocationCountNdx];
1009
1010 if (allocationCount != -1 && allocationCount * allocationSize > 50 * MiB)
1011 continue;
1012
1013 TestConfig config;
1014
1015 config.memorySize = allocationSize;
1016 config.order = order;
1017 config.allocationMode = allocationMode;
1018 if (allocationCount == -1)
1019 {
1020 if (allocationSize < 4096)
1021 continue;
1022
1023 config.memoryAllocationCount =
1024 de::min((uint32_t)(50 * MiB / allocationSize), (uint32_t)MAX_ALLOCATION_COUNT);
1025
1026 if (config.memoryAllocationCount == 0 || config.memoryAllocationCount == 1 ||
1027 config.memoryAllocationCount == 10 || config.memoryAllocationCount == 100 ||
1028 config.memoryAllocationCount == 1000)
1029 continue;
1030 }
1031 else
1032 config.memoryAllocationCount = allocationCount;
1033
1034 orderGroup->addChild(new InstanceFactory1<AllocateFreeTestInstance, TestConfig>(
1035 testCtx, "count_" + de::toString(config.memoryAllocationCount), config));
1036 }
1037
1038 sizeGroup->addChild(orderGroup.release());
1039 }
1040
1041 basicGroup->addChild(sizeGroup.release());
1042 }
1043
1044 for (size_t allocationPercentNdx = 0; allocationPercentNdx < DE_LENGTH_OF_ARRAY(allocationPercents);
1045 allocationPercentNdx++)
1046 {
1047 const int allocationPercent = allocationPercents[allocationPercentNdx];
1048 de::MovePtr<tcu::TestCaseGroup> percentGroup(
1049 new tcu::TestCaseGroup(testCtx, ("percent_" + de::toString(allocationPercent)).c_str()));
1050
1051 for (size_t orderNdx = 0; orderNdx < DE_LENGTH_OF_ARRAY(orders); orderNdx++)
1052 {
1053 const TestConfig::Order order = orders[orderNdx].order;
1054 const char *const orderName = orders[orderNdx].str;
1055 de::MovePtr<tcu::TestCaseGroup> orderGroup(new tcu::TestCaseGroup(testCtx, orderName));
1056
1057 for (size_t allocationCountNdx = 0; allocationCountNdx < DE_LENGTH_OF_ARRAY(allocationCounts);
1058 allocationCountNdx++)
1059 {
1060 const int allocationCount = allocationCounts[allocationCountNdx];
1061
1062 if ((allocationCount != -1) && ((float)allocationCount * (float)allocationPercent >= 1.00f / 8.00f))
1063 continue;
1064
1065 TestConfig config;
1066
1067 config.memoryPercentage = (float)allocationPercent / 100.0f;
1068 config.order = order;
1069 config.allocationMode = allocationMode;
1070
1071 if (allocationCount == -1)
1072 {
1073 config.memoryAllocationCount =
1074 de::min((uint32_t)((1.00f / 8.00f) / ((float)allocationPercent / 100.0f)),
1075 (uint32_t)MAX_ALLOCATION_COUNT);
1076
1077 if (config.memoryAllocationCount == 0 || config.memoryAllocationCount == 1 ||
1078 config.memoryAllocationCount == 10 || config.memoryAllocationCount == 100 ||
1079 config.memoryAllocationCount == 1000)
1080 continue;
1081 }
1082 else
1083 config.memoryAllocationCount = allocationCount;
1084
1085 orderGroup->addChild(new InstanceFactory1<AllocateFreeTestInstance, TestConfig>(
1086 testCtx, "count_" + de::toString(config.memoryAllocationCount), config));
1087 }
1088
1089 percentGroup->addChild(orderGroup.release());
1090 }
1091
1092 basicGroup->addChild(percentGroup.release());
1093 }
1094
1095 group->addChild(basicGroup.release());
1096 }
1097
1098 #ifndef CTS_USES_VULKANSC
1099 // RandomAllocFreeTestInstance test uses VkAllocationCallbacks and in Vulkan SC VkAllocationCallbacks must be NULL
1100 {
1101 const uint32_t caseCount = 100;
1102 de::MovePtr<tcu::TestCaseGroup> randomGroup(new tcu::TestCaseGroup(testCtx, "random"));
1103
1104 for (uint32_t caseNdx = 0; caseNdx < caseCount; caseNdx++)
1105 {
1106 TestConfigRandom config(deInt32Hash(caseNdx ^ 32480), allocationMode);
1107 // Random case
1108 randomGroup->addChild(new InstanceFactory1<RandomAllocFreeTestInstance, TestConfigRandom>(
1109 testCtx, de::toString(caseNdx), config));
1110 }
1111
1112 group->addChild(randomGroup.release());
1113 }
1114 #endif // CTS_USES_VULKANSC
1115
1116 return group.release();
1117 }
1118
createAllocationTests(tcu::TestContext & testCtx)1119 tcu::TestCaseGroup *createAllocationTests(tcu::TestContext &testCtx)
1120 {
1121 return createAllocationTestsCommon(testCtx, ALLOCATION_MODE_DEFAULT);
1122 }
1123
createDeviceGroupAllocationTests(tcu::TestContext & testCtx)1124 tcu::TestCaseGroup *createDeviceGroupAllocationTests(tcu::TestContext &testCtx)
1125 {
1126 return createAllocationTestsCommon(testCtx, ALLOCATION_MODE_DEVICE_GROUP);
1127 }
1128
createPageableAllocationTests(tcu::TestContext & testCtx)1129 tcu::TestCaseGroup *createPageableAllocationTests(tcu::TestContext &testCtx)
1130 {
1131 return createAllocationTestsCommon(testCtx, ALLOCATION_MODE_PAGEABLE);
1132 }
1133
1134 } // namespace memory
1135 } // namespace vkt
1136