xref: /aosp_15_r20/external/skia/vma_android/include/vk_mem_alloc.h (revision c8dee2aa9b3f27cf6c858bd81872bdeb2c07ed17)
1 //
2 // Copyright (c) 2017-2022 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 /** \mainpage Vulkan Memory Allocator
27 
28 <b>Version 3.0.1 (2022-05-26)</b>
29 
30 Copyright (c) 2017-2022 Advanced Micro Devices, Inc. All rights reserved. \n
31 License: MIT
32 
33 <b>API documentation divided into groups:</b> [Modules](modules.html)
34 
35 \section main_table_of_contents Table of contents
36 
37 - <b>User guide</b>
38   - \subpage quick_start
39     - [Project setup](@ref quick_start_project_setup)
40     - [Initialization](@ref quick_start_initialization)
41     - [Resource allocation](@ref quick_start_resource_allocation)
42   - \subpage choosing_memory_type
43     - [Usage](@ref choosing_memory_type_usage)
44     - [Required and preferred flags](@ref choosing_memory_type_required_preferred_flags)
45     - [Explicit memory types](@ref choosing_memory_type_explicit_memory_types)
46     - [Custom memory pools](@ref choosing_memory_type_custom_memory_pools)
47     - [Dedicated allocations](@ref choosing_memory_type_dedicated_allocations)
48   - \subpage memory_mapping
49     - [Mapping functions](@ref memory_mapping_mapping_functions)
50     - [Persistently mapped memory](@ref memory_mapping_persistently_mapped_memory)
51     - [Cache flush and invalidate](@ref memory_mapping_cache_control)
52   - \subpage staying_within_budget
53     - [Querying for budget](@ref staying_within_budget_querying_for_budget)
54     - [Controlling memory usage](@ref staying_within_budget_controlling_memory_usage)
55   - \subpage resource_aliasing
56   - \subpage custom_memory_pools
57     - [Choosing memory type index](@ref custom_memory_pools_MemTypeIndex)
58     - [Linear allocation algorithm](@ref linear_algorithm)
59       - [Free-at-once](@ref linear_algorithm_free_at_once)
60       - [Stack](@ref linear_algorithm_stack)
61       - [Double stack](@ref linear_algorithm_double_stack)
62       - [Ring buffer](@ref linear_algorithm_ring_buffer)
63   - \subpage defragmentation
64   - \subpage statistics
65     - [Numeric statistics](@ref statistics_numeric_statistics)
66     - [JSON dump](@ref statistics_json_dump)
67   - \subpage allocation_annotation
68     - [Allocation user data](@ref allocation_user_data)
69     - [Allocation names](@ref allocation_names)
70   - \subpage virtual_allocator
71   - \subpage debugging_memory_usage
72     - [Memory initialization](@ref debugging_memory_usage_initialization)
73     - [Margins](@ref debugging_memory_usage_margins)
74     - [Corruption detection](@ref debugging_memory_usage_corruption_detection)
75   - \subpage opengl_interop
76 - \subpage usage_patterns
77     - [GPU-only resource](@ref usage_patterns_gpu_only)
78     - [Staging copy for upload](@ref usage_patterns_staging_copy_upload)
79     - [Readback](@ref usage_patterns_readback)
80     - [Advanced data uploading](@ref usage_patterns_advanced_data_uploading)
81     - [Other use cases](@ref usage_patterns_other_use_cases)
82 - \subpage configuration
83   - [Pointers to Vulkan functions](@ref config_Vulkan_functions)
84   - [Custom host memory allocator](@ref custom_memory_allocator)
85   - [Device memory allocation callbacks](@ref allocation_callbacks)
86   - [Device heap memory limit](@ref heap_memory_limit)
87 - <b>Extension support</b>
88     - \subpage vk_khr_dedicated_allocation
89     - \subpage enabling_buffer_device_address
90     - \subpage vk_ext_memory_priority
91     - \subpage vk_amd_device_coherent_memory
92 - \subpage general_considerations
93   - [Thread safety](@ref general_considerations_thread_safety)
94   - [Versioning and compatibility](@ref general_considerations_versioning_and_compatibility)
95   - [Validation layer warnings](@ref general_considerations_validation_layer_warnings)
96   - [Allocation algorithm](@ref general_considerations_allocation_algorithm)
97   - [Features not supported](@ref general_considerations_features_not_supported)
98 
99 \section main_see_also See also
100 
101 - [**Product page on GPUOpen**](https://gpuopen.com/gaming-product/vulkan-memory-allocator/)
102 - [**Source repository on GitHub**](https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator)
103 
104 \defgroup group_init Library initialization
105 
106 \brief API elements related to the initialization and management of the entire library, especially #VmaAllocator object.
107 
108 \defgroup group_alloc Memory allocation
109 
110 \brief API elements related to the allocation, deallocation, and management of Vulkan memory, buffers, images.
111 Most basic ones being: vmaCreateBuffer(), vmaCreateImage().
112 
113 \defgroup group_virtual Virtual allocator
114 
115 \brief API elements related to the mechanism of \ref virtual_allocator - using the core allocation algorithm
116 for user-defined purpose without allocating any real GPU memory.
117 
118 \defgroup group_stats Statistics
119 
120 \brief API elements that query current status of the allocator, from memory usage, budget, to full dump of the internal state in JSON format.
121 See documentation chapter: \ref statistics.
122 */
123 
124 
125 #ifdef __cplusplus
126 extern "C" {
127 #endif
128 
129 #ifndef VULKAN_H_
130     #include <vulkan/vulkan.h>
131 #endif
132 
133 // Define this macro to declare maximum supported Vulkan version in format AAABBBCCC,
134 // where AAA = major, BBB = minor, CCC = patch.
135 // If you want to use version > 1.0, it still needs to be enabled via VmaAllocatorCreateInfo::vulkanApiVersion.
136 #if !defined(VMA_VULKAN_VERSION)
137     #if defined(VK_VERSION_1_3)
138         #define VMA_VULKAN_VERSION 1003000
139     #elif defined(VK_VERSION_1_2)
140         #define VMA_VULKAN_VERSION 1002000
141     #elif defined(VK_VERSION_1_1)
142         #define VMA_VULKAN_VERSION 1001000
143     #else
144         #define VMA_VULKAN_VERSION 1000000
145     #endif
146 #endif
147 
148 #if defined(__ANDROID__) && defined(VK_NO_PROTOTYPES) && VMA_STATIC_VULKAN_FUNCTIONS
149     extern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr;
150     extern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr;
151     extern PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
152     extern PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
153     extern PFN_vkAllocateMemory vkAllocateMemory;
154     extern PFN_vkFreeMemory vkFreeMemory;
155     extern PFN_vkMapMemory vkMapMemory;
156     extern PFN_vkUnmapMemory vkUnmapMemory;
157     extern PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
158     extern PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
159     extern PFN_vkBindBufferMemory vkBindBufferMemory;
160     extern PFN_vkBindImageMemory vkBindImageMemory;
161     extern PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
162     extern PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
163     extern PFN_vkCreateBuffer vkCreateBuffer;
164     extern PFN_vkDestroyBuffer vkDestroyBuffer;
165     extern PFN_vkCreateImage vkCreateImage;
166     extern PFN_vkDestroyImage vkDestroyImage;
167     extern PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
168     #if VMA_VULKAN_VERSION >= 1001000
169         extern PFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2;
170         extern PFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2;
171         extern PFN_vkBindBufferMemory2 vkBindBufferMemory2;
172         extern PFN_vkBindImageMemory2 vkBindImageMemory2;
173         extern PFN_vkGetPhysicalDeviceMemoryProperties2 vkGetPhysicalDeviceMemoryProperties2;
174     #endif // #if VMA_VULKAN_VERSION >= 1001000
175 #endif // #if defined(__ANDROID__) && VMA_STATIC_VULKAN_FUNCTIONS && VK_NO_PROTOTYPES
176 
177 #if !defined(VMA_DEDICATED_ALLOCATION)
178     #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
179         #define VMA_DEDICATED_ALLOCATION 1
180     #else
181         #define VMA_DEDICATED_ALLOCATION 0
182     #endif
183 #endif
184 
185 #if !defined(VMA_BIND_MEMORY2)
186     #if VK_KHR_bind_memory2
187         #define VMA_BIND_MEMORY2 1
188     #else
189         #define VMA_BIND_MEMORY2 0
190     #endif
191 #endif
192 
193 #if !defined(VMA_MEMORY_BUDGET)
194     #if VK_EXT_memory_budget && (VK_KHR_get_physical_device_properties2 || VMA_VULKAN_VERSION >= 1001000)
195         #define VMA_MEMORY_BUDGET 1
196     #else
197         #define VMA_MEMORY_BUDGET 0
198     #endif
199 #endif
200 
201 // Defined to 1 when VK_KHR_buffer_device_address device extension or equivalent core Vulkan 1.2 feature is defined in its headers.
202 #if !defined(VMA_BUFFER_DEVICE_ADDRESS)
203     #if VK_KHR_buffer_device_address || VMA_VULKAN_VERSION >= 1002000
204         #define VMA_BUFFER_DEVICE_ADDRESS 1
205     #else
206         #define VMA_BUFFER_DEVICE_ADDRESS 0
207     #endif
208 #endif
209 
210 // Defined to 1 when VK_EXT_memory_priority device extension is defined in Vulkan headers.
211 #if !defined(VMA_MEMORY_PRIORITY)
212     #if VK_EXT_memory_priority
213         #define VMA_MEMORY_PRIORITY 1
214     #else
215         #define VMA_MEMORY_PRIORITY 0
216     #endif
217 #endif
218 
219 // Defined to 1 when VK_KHR_external_memory device extension is defined in Vulkan headers.
220 #if !defined(VMA_EXTERNAL_MEMORY)
221     #if VK_KHR_external_memory
222         #define VMA_EXTERNAL_MEMORY 1
223     #else
224         #define VMA_EXTERNAL_MEMORY 0
225     #endif
226 #endif
227 
228 // Define these macros to decorate all public functions with additional code,
229 // before and after returned type, appropriately. This may be useful for
230 // exporting the functions when compiling VMA as a separate library. Example:
231 // #define VMA_CALL_PRE  __declspec(dllexport)
232 // #define VMA_CALL_POST __cdecl
233 #ifndef VMA_CALL_PRE
234     #define VMA_CALL_PRE
235 #endif
236 #ifndef VMA_CALL_POST
237     #define VMA_CALL_POST
238 #endif
239 
240 // Define this macro to decorate pointers with an attribute specifying the
241 // length of the array they point to if they are not null.
242 //
243 // The length may be one of
244 // - The name of another parameter in the argument list where the pointer is declared
245 // - The name of another member in the struct where the pointer is declared
246 // - The name of a member of a struct type, meaning the value of that member in
247 //   the context of the call. For example
248 //   VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount"),
249 //   this means the number of memory heaps available in the device associated
250 //   with the VmaAllocator being dealt with.
251 #ifndef VMA_LEN_IF_NOT_NULL
252     #define VMA_LEN_IF_NOT_NULL(len)
253 #endif
254 
255 // The VMA_NULLABLE macro is defined to be _Nullable when compiling with Clang.
256 // see: https://clang.llvm.org/docs/AttributeReference.html#nullable
257 #ifndef VMA_NULLABLE
258     #ifdef __clang__
259         #define VMA_NULLABLE _Nullable
260     #else
261         #define VMA_NULLABLE
262     #endif
263 #endif
264 
265 // The VMA_NOT_NULL macro is defined to be _Nonnull when compiling with Clang.
266 // see: https://clang.llvm.org/docs/AttributeReference.html#nonnull
267 #ifndef VMA_NOT_NULL
268     #ifdef __clang__
269         #define VMA_NOT_NULL _Nonnull
270     #else
271         #define VMA_NOT_NULL
272     #endif
273 #endif
274 
275 // If non-dispatchable handles are represented as pointers then we can give
276 // then nullability annotations
277 #ifndef VMA_NOT_NULL_NON_DISPATCHABLE
278     #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
279         #define VMA_NOT_NULL_NON_DISPATCHABLE VMA_NOT_NULL
280     #else
281         #define VMA_NOT_NULL_NON_DISPATCHABLE
282     #endif
283 #endif
284 
285 #ifndef VMA_NULLABLE_NON_DISPATCHABLE
286     #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
287         #define VMA_NULLABLE_NON_DISPATCHABLE VMA_NULLABLE
288     #else
289         #define VMA_NULLABLE_NON_DISPATCHABLE
290     #endif
291 #endif
292 
293 #ifndef VMA_STATS_STRING_ENABLED
294     #define VMA_STATS_STRING_ENABLED 1
295 #endif
296 
297 ////////////////////////////////////////////////////////////////////////////////
298 ////////////////////////////////////////////////////////////////////////////////
299 //
300 //    INTERFACE
301 //
302 ////////////////////////////////////////////////////////////////////////////////
303 ////////////////////////////////////////////////////////////////////////////////
304 
305 // Sections for managing code placement in file, only for development purposes e.g. for convenient folding inside an IDE.
306 #ifndef _VMA_ENUM_DECLARATIONS
307 
308 /**
309 \addtogroup group_init
310 @{
311 */
312 
313 /// Flags for created #VmaAllocator.
314 typedef enum VmaAllocatorCreateFlagBits
315 {
316     /** \brief Allocator and all objects created from it will not be synchronized internally, so you must guarantee they are used from only one thread at a time or synchronized externally by you.
317 
318     Using this flag may increase performance because internal mutexes are not used.
319     */
320     VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT = 0x00000001,
321     /** \brief Enables usage of VK_KHR_dedicated_allocation extension.
322 
323     The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`.
324     When it is `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1.
325 
326     Using this extension will automatically allocate dedicated blocks of memory for
327     some buffers and images instead of suballocating place for them out of bigger
328     memory blocks (as if you explicitly used #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT
329     flag) when it is recommended by the driver. It may improve performance on some
330     GPUs.
331 
332     You may set this flag only if you found out that following device extensions are
333     supported, you enabled them while creating Vulkan device passed as
334     VmaAllocatorCreateInfo::device, and you want them to be used internally by this
335     library:
336 
337     - VK_KHR_get_memory_requirements2 (device extension)
338     - VK_KHR_dedicated_allocation (device extension)
339 
340     When this flag is set, you can experience following warnings reported by Vulkan
341     validation layer. You can ignore them.
342 
343     > vkBindBufferMemory(): Binding memory to buffer 0x2d but vkGetBufferMemoryRequirements() has not been called on that buffer.
344     */
345     VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT = 0x00000002,
346     /**
347     Enables usage of VK_KHR_bind_memory2 extension.
348 
349     The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`.
350     When it is `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1.
351 
352     You may set this flag only if you found out that this device extension is supported,
353     you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device,
354     and you want it to be used internally by this library.
355 
356     The extension provides functions `vkBindBufferMemory2KHR` and `vkBindImageMemory2KHR`,
357     which allow to pass a chain of `pNext` structures while binding.
358     This flag is required if you use `pNext` parameter in vmaBindBufferMemory2() or vmaBindImageMemory2().
359     */
360     VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT = 0x00000004,
361     /**
362     Enables usage of VK_EXT_memory_budget extension.
363 
364     You may set this flag only if you found out that this device extension is supported,
365     you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device,
366     and you want it to be used internally by this library, along with another instance extension
367     VK_KHR_get_physical_device_properties2, which is required by it (or Vulkan 1.1, where this extension is promoted).
368 
369     The extension provides query for current memory usage and budget, which will probably
370     be more accurate than an estimation used by the library otherwise.
371     */
372     VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT = 0x00000008,
373     /**
374     Enables usage of VK_AMD_device_coherent_memory extension.
375 
376     You may set this flag only if you:
377 
378     - found out that this device extension is supported and enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device,
379     - checked that `VkPhysicalDeviceCoherentMemoryFeaturesAMD::deviceCoherentMemory` is true and set it while creating the Vulkan device,
380     - want it to be used internally by this library.
381 
382     The extension and accompanying device feature provide access to memory types with
383     `VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD` and `VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` flags.
384     They are useful mostly for writing breadcrumb markers - a common method for debugging GPU crash/hang/TDR.
385 
386     When the extension is not enabled, such memory types are still enumerated, but their usage is illegal.
387     To protect from this error, if you don't create the allocator with this flag, it will refuse to allocate any memory or create a custom pool in such memory type,
388     returning `VK_ERROR_FEATURE_NOT_PRESENT`.
389     */
390     VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT = 0x00000010,
391     /**
392     Enables usage of "buffer device address" feature, which allows you to use function
393     `vkGetBufferDeviceAddress*` to get raw GPU pointer to a buffer and pass it for usage inside a shader.
394 
395     You may set this flag only if you:
396 
397     1. (For Vulkan version < 1.2) Found as available and enabled device extension
398     VK_KHR_buffer_device_address.
399     This extension is promoted to core Vulkan 1.2.
400     2. Found as available and enabled device feature `VkPhysicalDeviceBufferDeviceAddressFeatures::bufferDeviceAddress`.
401 
402     When this flag is set, you can create buffers with `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT` using VMA.
403     The library automatically adds `VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT` to
404     allocated memory blocks wherever it might be needed.
405 
406     For more information, see documentation chapter \ref enabling_buffer_device_address.
407     */
408     VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT = 0x00000020,
409     /**
410     Enables usage of VK_EXT_memory_priority extension in the library.
411 
412     You may set this flag only if you found available and enabled this device extension,
413     along with `VkPhysicalDeviceMemoryPriorityFeaturesEXT::memoryPriority == VK_TRUE`,
414     while creating Vulkan device passed as VmaAllocatorCreateInfo::device.
415 
416     When this flag is used, VmaAllocationCreateInfo::priority and VmaPoolCreateInfo::priority
417     are used to set priorities of allocated Vulkan memory. Without it, these variables are ignored.
418 
419     A priority must be a floating-point value between 0 and 1, indicating the priority of the allocation relative to other memory allocations.
420     Larger values are higher priority. The granularity of the priorities is implementation-dependent.
421     It is automatically passed to every call to `vkAllocateMemory` done by the library using structure `VkMemoryPriorityAllocateInfoEXT`.
422     The value to be used for default priority is 0.5.
423     For more details, see the documentation of the VK_EXT_memory_priority extension.
424     */
425     VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT = 0x00000040,
426 
427     VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
428 } VmaAllocatorCreateFlagBits;
429 /// See #VmaAllocatorCreateFlagBits.
430 typedef VkFlags VmaAllocatorCreateFlags;
431 
432 /** @} */
433 
434 /**
435 \addtogroup group_alloc
436 @{
437 */
438 
439 /// \brief Intended usage of the allocated memory.
440 typedef enum VmaMemoryUsage
441 {
442     /** No intended memory usage specified.
443     Use other members of VmaAllocationCreateInfo to specify your requirements.
444     */
445     VMA_MEMORY_USAGE_UNKNOWN = 0,
446     /**
447     \deprecated Obsolete, preserved for backward compatibility.
448     Prefers `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`.
449     */
450     VMA_MEMORY_USAGE_GPU_ONLY = 1,
451     /**
452     \deprecated Obsolete, preserved for backward compatibility.
453     Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` and `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT`.
454     */
455     VMA_MEMORY_USAGE_CPU_ONLY = 2,
456     /**
457     \deprecated Obsolete, preserved for backward compatibility.
458     Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`, prefers `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`.
459     */
460     VMA_MEMORY_USAGE_CPU_TO_GPU = 3,
461     /**
462     \deprecated Obsolete, preserved for backward compatibility.
463     Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`, prefers `VK_MEMORY_PROPERTY_HOST_CACHED_BIT`.
464     */
465     VMA_MEMORY_USAGE_GPU_TO_CPU = 4,
466     /**
467     \deprecated Obsolete, preserved for backward compatibility.
468     Prefers not `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`.
469     */
470     VMA_MEMORY_USAGE_CPU_COPY = 5,
471     /**
472     Lazily allocated GPU memory having `VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT`.
473     Exists mostly on mobile platforms. Using it on desktop PC or other GPUs with no such memory type present will fail the allocation.
474 
475     Usage: Memory for transient attachment images (color attachments, depth attachments etc.), created with `VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT`.
476 
477     Allocations with this usage are always created as dedicated - it implies #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
478     */
479     VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED = 6,
480     /**
481     Selects best memory type automatically.
482     This flag is recommended for most common use cases.
483 
484     When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT),
485     you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT
486     in VmaAllocationCreateInfo::flags.
487 
488     It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g.
489     vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo()
490     and not with generic memory allocation functions.
491     */
492     VMA_MEMORY_USAGE_AUTO = 7,
493     /**
494     Selects best memory type automatically with preference for GPU (device) memory.
495 
496     When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT),
497     you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT
498     in VmaAllocationCreateInfo::flags.
499 
500     It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g.
501     vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo()
502     and not with generic memory allocation functions.
503     */
504     VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE = 8,
505     /**
506     Selects best memory type automatically with preference for CPU (host) memory.
507 
508     When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT),
509     you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT
510     in VmaAllocationCreateInfo::flags.
511 
512     It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g.
513     vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo()
514     and not with generic memory allocation functions.
515     */
516     VMA_MEMORY_USAGE_AUTO_PREFER_HOST = 9,
517 
518     VMA_MEMORY_USAGE_MAX_ENUM = 0x7FFFFFFF
519 } VmaMemoryUsage;
520 
521 /// Flags to be passed as VmaAllocationCreateInfo::flags.
522 typedef enum VmaAllocationCreateFlagBits
523 {
524     /** \brief Set this flag if the allocation should have its own memory block.
525 
526     Use it for special, big resources, like fullscreen images used as attachments.
527     */
528     VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT = 0x00000001,
529 
530     /** \brief Set this flag to only try to allocate from existing `VkDeviceMemory` blocks and never create new such block.
531 
532     If new allocation cannot be placed in any of the existing blocks, allocation
533     fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY` error.
534 
535     You should not use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT and
536     #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT at the same time. It makes no sense.
537     */
538     VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT = 0x00000002,
539     /** \brief Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
540 
541     Pointer to mapped memory will be returned through VmaAllocationInfo::pMappedData.
542 
543     It is valid to use this flag for allocation made from memory type that is not
544     `HOST_VISIBLE`. This flag is then ignored and memory is not mapped. This is
545     useful if you need an allocation that is efficient to use on GPU
546     (`DEVICE_LOCAL`) and still want to map it directly if possible on platforms that
547     support it (e.g. Intel GPU).
548     */
549     VMA_ALLOCATION_CREATE_MAPPED_BIT = 0x00000004,
550     /** \deprecated Preserved for backward compatibility. Consider using vmaSetAllocationName() instead.
551 
552     Set this flag to treat VmaAllocationCreateInfo::pUserData as pointer to a
553     null-terminated string. Instead of copying pointer value, a local copy of the
554     string is made and stored in allocation's `pName`. The string is automatically
555     freed together with the allocation. It is also used in vmaBuildStatsString().
556     */
557     VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT = 0x00000020,
558     /** Allocation will be created from upper stack in a double stack pool.
559 
560     This flag is only allowed for custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT flag.
561     */
562     VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT = 0x00000040,
563     /** Create both buffer/image and allocation, but don't bind them together.
564     It is useful when you want to bind yourself to do some more advanced binding, e.g. using some extensions.
565     The flag is meaningful only with functions that bind by default: vmaCreateBuffer(), vmaCreateImage().
566     Otherwise it is ignored.
567 
568     If you want to make sure the new buffer/image is not tied to the new memory allocation
569     through `VkMemoryDedicatedAllocateInfoKHR` structure in case the allocation ends up in its own memory block,
570     use also flag #VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT.
571     */
572     VMA_ALLOCATION_CREATE_DONT_BIND_BIT = 0x00000080,
573     /** Create allocation only if additional device memory required for it, if any, won't exceed
574     memory budget. Otherwise return `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
575     */
576     VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT = 0x00000100,
577     /** \brief Set this flag if the allocated memory will have aliasing resources.
578 
579     Usage of this flag prevents supplying `VkMemoryDedicatedAllocateInfoKHR` when #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT is specified.
580     Otherwise created dedicated memory will not be suitable for aliasing resources, resulting in Vulkan Validation Layer errors.
581     */
582     VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT = 0x00000200,
583     /**
584     Requests possibility to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT).
585 
586     - If you use #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` value,
587       you must use this flag to be able to map the allocation. Otherwise, mapping is incorrect.
588     - If you use other value of #VmaMemoryUsage, this flag is ignored and mapping is always possible in memory types that are `HOST_VISIBLE`.
589       This includes allocations created in \ref custom_memory_pools.
590 
591     Declares that mapped memory will only be written sequentially, e.g. using `memcpy()` or a loop writing number-by-number,
592     never read or accessed randomly, so a memory type can be selected that is uncached and write-combined.
593 
594     \warning Violating this declaration may work correctly, but will likely be very slow.
595     Watch out for implicit reads introduced by doing e.g. `pMappedData[i] += x;`
596     Better prepare your data in a local variable and `memcpy()` it to the mapped pointer all at once.
597     */
598     VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT = 0x00000400,
599     /**
600     Requests possibility to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT).
601 
602     - If you use #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` value,
603       you must use this flag to be able to map the allocation. Otherwise, mapping is incorrect.
604     - If you use other value of #VmaMemoryUsage, this flag is ignored and mapping is always possible in memory types that are `HOST_VISIBLE`.
605       This includes allocations created in \ref custom_memory_pools.
606 
607     Declares that mapped memory can be read, written, and accessed in random order,
608     so a `HOST_CACHED` memory type is required.
609     */
610     VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT = 0x00000800,
611     /**
612     Together with #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT,
613     it says that despite request for host access, a not-`HOST_VISIBLE` memory type can be selected
614     if it may improve performance.
615 
616     By using this flag, you declare that you will check if the allocation ended up in a `HOST_VISIBLE` memory type
617     (e.g. using vmaGetAllocationMemoryProperties()) and if not, you will create some "staging" buffer and
618     issue an explicit transfer to write/read your data.
619     To prepare for this possibility, don't forget to add appropriate flags like
620     `VK_BUFFER_USAGE_TRANSFER_DST_BIT`, `VK_BUFFER_USAGE_TRANSFER_SRC_BIT` to the parameters of created buffer or image.
621     */
622     VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT = 0x00001000,
623     /** Allocation strategy that chooses smallest possible free range for the allocation
624     to minimize memory usage and fragmentation, possibly at the expense of allocation time.
625     */
626     VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT = 0x00010000,
627     /** Allocation strategy that chooses first suitable free range for the allocation -
628     not necessarily in terms of the smallest offset but the one that is easiest and fastest to find
629     to minimize allocation time, possibly at the expense of allocation quality.
630     */
631     VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT = 0x00020000,
632     /** Allocation strategy that chooses always the lowest offset in available space.
633     This is not the most efficient strategy but achieves highly packed data.
634     Used internally by defragmentation, not recomended in typical usage.
635     */
636     VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT  = 0x00040000,
637     /** Alias to #VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT.
638     */
639     VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT,
640     /** Alias to #VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT.
641     */
642     VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT,
643     /** A bit mask to extract only `STRATEGY` bits from entire set of flags.
644     */
645     VMA_ALLOCATION_CREATE_STRATEGY_MASK =
646         VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT |
647         VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT |
648         VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT,
649 
650     VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
651 } VmaAllocationCreateFlagBits;
652 /// See #VmaAllocationCreateFlagBits.
653 typedef VkFlags VmaAllocationCreateFlags;
654 
655 /// Flags to be passed as VmaPoolCreateInfo::flags.
656 typedef enum VmaPoolCreateFlagBits
657 {
658     /** \brief Use this flag if you always allocate only buffers and linear images or only optimal images out of this pool and so Buffer-Image Granularity can be ignored.
659 
660     This is an optional optimization flag.
661 
662     If you always allocate using vmaCreateBuffer(), vmaCreateImage(),
663     vmaAllocateMemoryForBuffer(), then you don't need to use it because allocator
664     knows exact type of your allocations so it can handle Buffer-Image Granularity
665     in the optimal way.
666 
667     If you also allocate using vmaAllocateMemoryForImage() or vmaAllocateMemory(),
668     exact type of such allocations is not known, so allocator must be conservative
669     in handling Buffer-Image Granularity, which can lead to suboptimal allocation
670     (wasted memory). In that case, if you can make sure you always allocate only
671     buffers and linear images or only optimal images out of this pool, use this flag
672     to make allocator disregard Buffer-Image Granularity and so make allocations
673     faster and more optimal.
674     */
675     VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT = 0x00000002,
676 
677     /** \brief Enables alternative, linear allocation algorithm in this pool.
678 
679     Specify this flag to enable linear allocation algorithm, which always creates
680     new allocations after last one and doesn't reuse space from allocations freed in
681     between. It trades memory consumption for simplified algorithm and data
682     structure, which has better performance and uses less memory for metadata.
683 
684     By using this flag, you can achieve behavior of free-at-once, stack,
685     ring buffer, and double stack.
686     For details, see documentation chapter \ref linear_algorithm.
687     */
688     VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT = 0x00000004,
689 
690     /** Bit mask to extract only `ALGORITHM` bits from entire set of flags.
691     */
692     VMA_POOL_CREATE_ALGORITHM_MASK =
693         VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT,
694 
695     VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
696 } VmaPoolCreateFlagBits;
697 /// Flags to be passed as VmaPoolCreateInfo::flags. See #VmaPoolCreateFlagBits.
698 typedef VkFlags VmaPoolCreateFlags;
699 
700 /// Flags to be passed as VmaDefragmentationInfo::flags.
701 typedef enum VmaDefragmentationFlagBits
702 {
703     /* \brief Use simple but fast algorithm for defragmentation.
704     May not achieve best results but will require least time to compute and least allocations to copy.
705     */
706     VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT = 0x1,
707     /* \brief Default defragmentation algorithm, applied also when no `ALGORITHM` flag is specified.
708     Offers a balance between defragmentation quality and the amount of allocations and bytes that need to be moved.
709     */
710     VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT = 0x2,
711     /* \brief Perform full defragmentation of memory.
712     Can result in notably more time to compute and allocations to copy, but will achieve best memory packing.
713     */
714     VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT = 0x4,
715     /** \brief Use the most roboust algorithm at the cost of time to compute and number of copies to make.
716     Only available when bufferImageGranularity is greater than 1, since it aims to reduce
717     alignment issues between different types of resources.
718     Otherwise falls back to same behavior as #VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT.
719     */
720     VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT = 0x8,
721 
722     /// A bit mask to extract only `ALGORITHM` bits from entire set of flags.
723     VMA_DEFRAGMENTATION_FLAG_ALGORITHM_MASK =
724         VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT |
725         VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT |
726         VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT |
727         VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT,
728 
729     VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
730 } VmaDefragmentationFlagBits;
731 /// See #VmaDefragmentationFlagBits.
732 typedef VkFlags VmaDefragmentationFlags;
733 
734 /// Operation performed on single defragmentation move. See structure #VmaDefragmentationMove.
735 typedef enum VmaDefragmentationMoveOperation
736 {
737     /// Buffer/image has been recreated at `dstTmpAllocation`, data has been copied, old buffer/image has been destroyed. `srcAllocation` should be changed to point to the new place. This is the default value set by vmaBeginDefragmentationPass().
738     VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY = 0,
739     /// Set this value if you cannot move the allocation. New place reserved at `dstTmpAllocation` will be freed. `srcAllocation` will remain unchanged.
740     VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE = 1,
741     /// Set this value if you decide to abandon the allocation and you destroyed the buffer/image. New place reserved at `dstTmpAllocation` will be freed, along with `srcAllocation`, which will be destroyed.
742     VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY = 2,
743 } VmaDefragmentationMoveOperation;
744 
745 /** @} */
746 
747 /**
748 \addtogroup group_virtual
749 @{
750 */
751 
752 /// Flags to be passed as VmaVirtualBlockCreateInfo::flags.
753 typedef enum VmaVirtualBlockCreateFlagBits
754 {
755     /** \brief Enables alternative, linear allocation algorithm in this virtual block.
756 
757     Specify this flag to enable linear allocation algorithm, which always creates
758     new allocations after last one and doesn't reuse space from allocations freed in
759     between. It trades memory consumption for simplified algorithm and data
760     structure, which has better performance and uses less memory for metadata.
761 
762     By using this flag, you can achieve behavior of free-at-once, stack,
763     ring buffer, and double stack.
764     For details, see documentation chapter \ref linear_algorithm.
765     */
766     VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT = 0x00000001,
767 
768     /** \brief Bit mask to extract only `ALGORITHM` bits from entire set of flags.
769     */
770     VMA_VIRTUAL_BLOCK_CREATE_ALGORITHM_MASK =
771         VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT,
772 
773     VMA_VIRTUAL_BLOCK_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
774 } VmaVirtualBlockCreateFlagBits;
775 /// Flags to be passed as VmaVirtualBlockCreateInfo::flags. See #VmaVirtualBlockCreateFlagBits.
776 typedef VkFlags VmaVirtualBlockCreateFlags;
777 
778 /// Flags to be passed as VmaVirtualAllocationCreateInfo::flags.
779 typedef enum VmaVirtualAllocationCreateFlagBits
780 {
781     /** \brief Allocation will be created from upper stack in a double stack pool.
782 
783     This flag is only allowed for virtual blocks created with #VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT flag.
784     */
785     VMA_VIRTUAL_ALLOCATION_CREATE_UPPER_ADDRESS_BIT = VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT,
786     /** \brief Allocation strategy that tries to minimize memory usage.
787     */
788     VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT,
789     /** \brief Allocation strategy that tries to minimize allocation time.
790     */
791     VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT,
792     /** Allocation strategy that chooses always the lowest offset in available space.
793     This is not the most efficient strategy but achieves highly packed data.
794     */
795     VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT,
796     /** \brief A bit mask to extract only `STRATEGY` bits from entire set of flags.
797 
798     These strategy flags are binary compatible with equivalent flags in #VmaAllocationCreateFlagBits.
799     */
800     VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MASK = VMA_ALLOCATION_CREATE_STRATEGY_MASK,
801 
802     VMA_VIRTUAL_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
803 } VmaVirtualAllocationCreateFlagBits;
804 /// Flags to be passed as VmaVirtualAllocationCreateInfo::flags. See #VmaVirtualAllocationCreateFlagBits.
805 typedef VkFlags VmaVirtualAllocationCreateFlags;
806 
807 /** @} */
808 
809 #endif // _VMA_ENUM_DECLARATIONS
810 
811 #ifndef _VMA_DATA_TYPES_DECLARATIONS
812 
813 /**
814 \addtogroup group_init
815 @{ */
816 
817 /** \struct VmaAllocator
818 \brief Represents main object of this library initialized.
819 
820 Fill structure #VmaAllocatorCreateInfo and call function vmaCreateAllocator() to create it.
821 Call function vmaDestroyAllocator() to destroy it.
822 
823 It is recommended to create just one object of this type per `VkDevice` object,
824 right after Vulkan is initialized and keep it alive until before Vulkan device is destroyed.
825 */
826 VK_DEFINE_HANDLE(VmaAllocator)
827 
828 /** @} */
829 
830 /**
831 \addtogroup group_alloc
832 @{
833 */
834 
835 /** \struct VmaPool
836 \brief Represents custom memory pool
837 
838 Fill structure VmaPoolCreateInfo and call function vmaCreatePool() to create it.
839 Call function vmaDestroyPool() to destroy it.
840 
841 For more information see [Custom memory pools](@ref choosing_memory_type_custom_memory_pools).
842 */
843 VK_DEFINE_HANDLE(VmaPool)
844 
845 /** \struct VmaAllocation
846 \brief Represents single memory allocation.
847 
848 It may be either dedicated block of `VkDeviceMemory` or a specific region of a bigger block of this type
849 plus unique offset.
850 
851 There are multiple ways to create such object.
852 You need to fill structure VmaAllocationCreateInfo.
853 For more information see [Choosing memory type](@ref choosing_memory_type).
854 
855 Although the library provides convenience functions that create Vulkan buffer or image,
856 allocate memory for it and bind them together,
857 binding of the allocation to a buffer or an image is out of scope of the allocation itself.
858 Allocation object can exist without buffer/image bound,
859 binding can be done manually by the user, and destruction of it can be done
860 independently of destruction of the allocation.
861 
862 The object also remembers its size and some other information.
863 To retrieve this information, use function vmaGetAllocationInfo() and inspect
864 returned structure VmaAllocationInfo.
865 */
866 VK_DEFINE_HANDLE(VmaAllocation)
867 
868 /** \struct VmaDefragmentationContext
869 \brief An opaque object that represents started defragmentation process.
870 
871 Fill structure #VmaDefragmentationInfo and call function vmaBeginDefragmentation() to create it.
872 Call function vmaEndDefragmentation() to destroy it.
873 */
874 VK_DEFINE_HANDLE(VmaDefragmentationContext)
875 
876 /** @} */
877 
878 /**
879 \addtogroup group_virtual
880 @{
881 */
882 
883 /** \struct VmaVirtualAllocation
884 \brief Represents single memory allocation done inside VmaVirtualBlock.
885 
886 Use it as a unique identifier to virtual allocation within the single block.
887 
888 Use value `VK_NULL_HANDLE` to represent a null/invalid allocation.
889 */
890 VK_DEFINE_NON_DISPATCHABLE_HANDLE(VmaVirtualAllocation);
891 
892 /** @} */
893 
894 /**
895 \addtogroup group_virtual
896 @{
897 */
898 
899 /** \struct VmaVirtualBlock
900 \brief Handle to a virtual block object that allows to use core allocation algorithm without allocating any real GPU memory.
901 
902 Fill in #VmaVirtualBlockCreateInfo structure and use vmaCreateVirtualBlock() to create it. Use vmaDestroyVirtualBlock() to destroy it.
903 For more information, see documentation chapter \ref virtual_allocator.
904 
905 This object is not thread-safe - should not be used from multiple threads simultaneously, must be synchronized externally.
906 */
907 VK_DEFINE_HANDLE(VmaVirtualBlock)
908 
909 /** @} */
910 
911 /**
912 \addtogroup group_init
913 @{
914 */
915 
916 /// Callback function called after successful vkAllocateMemory.
917 typedef void (VKAPI_PTR* PFN_vmaAllocateDeviceMemoryFunction)(
918     VmaAllocator VMA_NOT_NULL                    allocator,
919     uint32_t                                     memoryType,
920     VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory,
921     VkDeviceSize                                 size,
922     void* VMA_NULLABLE                           pUserData);
923 
924 /// Callback function called before vkFreeMemory.
925 typedef void (VKAPI_PTR* PFN_vmaFreeDeviceMemoryFunction)(
926     VmaAllocator VMA_NOT_NULL                    allocator,
927     uint32_t                                     memoryType,
928     VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory,
929     VkDeviceSize                                 size,
930     void* VMA_NULLABLE                           pUserData);
931 
932 /** \brief Set of callbacks that the library will call for `vkAllocateMemory` and `vkFreeMemory`.
933 
934 Provided for informative purpose, e.g. to gather statistics about number of
935 allocations or total amount of memory allocated in Vulkan.
936 
937 Used in VmaAllocatorCreateInfo::pDeviceMemoryCallbacks.
938 */
939 typedef struct VmaDeviceMemoryCallbacks
940 {
941     /// Optional, can be null.
942     PFN_vmaAllocateDeviceMemoryFunction VMA_NULLABLE pfnAllocate;
943     /// Optional, can be null.
944     PFN_vmaFreeDeviceMemoryFunction VMA_NULLABLE pfnFree;
945     /// Optional, can be null.
946     void* VMA_NULLABLE pUserData;
947 } VmaDeviceMemoryCallbacks;
948 
949 /** \brief Pointers to some Vulkan functions - a subset used by the library.
950 
951 Used in VmaAllocatorCreateInfo::pVulkanFunctions.
952 */
953 typedef struct VmaVulkanFunctions
954 {
955     /// Required when using VMA_DYNAMIC_VULKAN_FUNCTIONS.
956     PFN_vkGetInstanceProcAddr VMA_NULLABLE vkGetInstanceProcAddr;
957     /// Required when using VMA_DYNAMIC_VULKAN_FUNCTIONS.
958     PFN_vkGetDeviceProcAddr VMA_NULLABLE vkGetDeviceProcAddr;
959     PFN_vkGetPhysicalDeviceProperties VMA_NULLABLE vkGetPhysicalDeviceProperties;
960     PFN_vkGetPhysicalDeviceMemoryProperties VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties;
961     PFN_vkAllocateMemory VMA_NULLABLE vkAllocateMemory;
962     PFN_vkFreeMemory VMA_NULLABLE vkFreeMemory;
963     PFN_vkMapMemory VMA_NULLABLE vkMapMemory;
964     PFN_vkUnmapMemory VMA_NULLABLE vkUnmapMemory;
965     PFN_vkFlushMappedMemoryRanges VMA_NULLABLE vkFlushMappedMemoryRanges;
966     PFN_vkInvalidateMappedMemoryRanges VMA_NULLABLE vkInvalidateMappedMemoryRanges;
967     PFN_vkBindBufferMemory VMA_NULLABLE vkBindBufferMemory;
968     PFN_vkBindImageMemory VMA_NULLABLE vkBindImageMemory;
969     PFN_vkGetBufferMemoryRequirements VMA_NULLABLE vkGetBufferMemoryRequirements;
970     PFN_vkGetImageMemoryRequirements VMA_NULLABLE vkGetImageMemoryRequirements;
971     PFN_vkCreateBuffer VMA_NULLABLE vkCreateBuffer;
972     PFN_vkDestroyBuffer VMA_NULLABLE vkDestroyBuffer;
973     PFN_vkCreateImage VMA_NULLABLE vkCreateImage;
974     PFN_vkDestroyImage VMA_NULLABLE vkDestroyImage;
975     PFN_vkCmdCopyBuffer VMA_NULLABLE vkCmdCopyBuffer;
976 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
977     /// Fetch "vkGetBufferMemoryRequirements2" on Vulkan >= 1.1, fetch "vkGetBufferMemoryRequirements2KHR" when using VK_KHR_dedicated_allocation extension.
978     PFN_vkGetBufferMemoryRequirements2KHR VMA_NULLABLE vkGetBufferMemoryRequirements2KHR;
979     /// Fetch "vkGetImageMemoryRequirements2" on Vulkan >= 1.1, fetch "vkGetImageMemoryRequirements2KHR" when using VK_KHR_dedicated_allocation extension.
980     PFN_vkGetImageMemoryRequirements2KHR VMA_NULLABLE vkGetImageMemoryRequirements2KHR;
981 #endif
982 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
983     /// Fetch "vkBindBufferMemory2" on Vulkan >= 1.1, fetch "vkBindBufferMemory2KHR" when using VK_KHR_bind_memory2 extension.
984     PFN_vkBindBufferMemory2KHR VMA_NULLABLE vkBindBufferMemory2KHR;
985     /// Fetch "vkBindImageMemory2" on Vulkan >= 1.1, fetch "vkBindImageMemory2KHR" when using VK_KHR_bind_memory2 extension.
986     PFN_vkBindImageMemory2KHR VMA_NULLABLE vkBindImageMemory2KHR;
987 #endif
988 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
989     PFN_vkGetPhysicalDeviceMemoryProperties2KHR VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties2KHR;
990 #endif
991 #if VMA_VULKAN_VERSION >= 1003000
992     /// Fetch from "vkGetDeviceBufferMemoryRequirements" on Vulkan >= 1.3, but you can also fetch it from "vkGetDeviceBufferMemoryRequirementsKHR" if you enabled extension VK_KHR_maintenance4.
993     PFN_vkGetDeviceBufferMemoryRequirements VMA_NULLABLE vkGetDeviceBufferMemoryRequirements;
994     /// Fetch from "vkGetDeviceImageMemoryRequirements" on Vulkan >= 1.3, but you can also fetch it from "vkGetDeviceImageMemoryRequirementsKHR" if you enabled extension VK_KHR_maintenance4.
995     PFN_vkGetDeviceImageMemoryRequirements VMA_NULLABLE vkGetDeviceImageMemoryRequirements;
996 #endif
997 } VmaVulkanFunctions;
998 
999 /// Description of a Allocator to be created.
1000 typedef struct VmaAllocatorCreateInfo
1001 {
1002     /// Flags for created allocator. Use #VmaAllocatorCreateFlagBits enum.
1003     VmaAllocatorCreateFlags flags;
1004     /// Vulkan physical device.
1005     /** It must be valid throughout whole lifetime of created allocator. */
1006     VkPhysicalDevice VMA_NOT_NULL physicalDevice;
1007     /// Vulkan device.
1008     /** It must be valid throughout whole lifetime of created allocator. */
1009     VkDevice VMA_NOT_NULL device;
1010     /// Preferred size of a single `VkDeviceMemory` block to be allocated from large heaps > 1 GiB. Optional.
1011     /** Set to 0 to use default, which is currently 256 MiB. */
1012     VkDeviceSize preferredLargeHeapBlockSize;
1013     /// Custom CPU memory allocation callbacks. Optional.
1014     /** Optional, can be null. When specified, will also be used for all CPU-side memory allocations. */
1015     const VkAllocationCallbacks* VMA_NULLABLE pAllocationCallbacks;
1016     /// Informative callbacks for `vkAllocateMemory`, `vkFreeMemory`. Optional.
1017     /** Optional, can be null. */
1018     const VmaDeviceMemoryCallbacks* VMA_NULLABLE pDeviceMemoryCallbacks;
1019     /** \brief Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out of particular Vulkan memory heap.
1020 
1021     If not NULL, it must be a pointer to an array of
1022     `VkPhysicalDeviceMemoryProperties::memoryHeapCount` elements, defining limit on
1023     maximum number of bytes that can be allocated out of particular Vulkan memory
1024     heap.
1025 
1026     Any of the elements may be equal to `VK_WHOLE_SIZE`, which means no limit on that
1027     heap. This is also the default in case of `pHeapSizeLimit` = NULL.
1028 
1029     If there is a limit defined for a heap:
1030 
1031     - If user tries to allocate more memory from that heap using this allocator,
1032       the allocation fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
1033     - If the limit is smaller than heap size reported in `VkMemoryHeap::size`, the
1034       value of this limit will be reported instead when using vmaGetMemoryProperties().
1035 
1036     Warning! Using this feature may not be equivalent to installing a GPU with
1037     smaller amount of memory, because graphics driver doesn't necessary fail new
1038     allocations with `VK_ERROR_OUT_OF_DEVICE_MEMORY` result when memory capacity is
1039     exceeded. It may return success and just silently migrate some device memory
1040     blocks to system RAM. This driver behavior can also be controlled using
1041     VK_AMD_memory_overallocation_behavior extension.
1042     */
1043     const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pHeapSizeLimit;
1044 
1045     /** \brief Pointers to Vulkan functions. Can be null.
1046 
1047     For details see [Pointers to Vulkan functions](@ref config_Vulkan_functions).
1048     */
1049     const VmaVulkanFunctions* VMA_NULLABLE pVulkanFunctions;
1050     /** \brief Handle to Vulkan instance object.
1051 
1052     Starting from version 3.0.0 this member is no longer optional, it must be set!
1053     */
1054     VkInstance VMA_NOT_NULL instance;
1055     /** \brief Optional. The highest version of Vulkan that the application is designed to use.
1056 
1057     It must be a value in the format as created by macro `VK_MAKE_VERSION` or a constant like: `VK_API_VERSION_1_1`, `VK_API_VERSION_1_0`.
1058     The patch version number specified is ignored. Only the major and minor versions are considered.
1059     It must be less or equal (preferably equal) to value as passed to `vkCreateInstance` as `VkApplicationInfo::apiVersion`.
1060     Only versions 1.0, 1.1, 1.2, 1.3 are supported by the current implementation.
1061     Leaving it initialized to zero is equivalent to `VK_API_VERSION_1_0`.
1062     */
1063     uint32_t vulkanApiVersion;
1064 #if VMA_EXTERNAL_MEMORY
1065     /** \brief Either null or a pointer to an array of external memory handle types for each Vulkan memory type.
1066 
1067     If not NULL, it must be a pointer to an array of `VkPhysicalDeviceMemoryProperties::memoryTypeCount`
1068     elements, defining external memory handle types of particular Vulkan memory type,
1069     to be passed using `VkExportMemoryAllocateInfoKHR`.
1070 
1071     Any of the elements may be equal to 0, which means not to use `VkExportMemoryAllocateInfoKHR` on this memory type.
1072     This is also the default in case of `pTypeExternalMemoryHandleTypes` = NULL.
1073     */
1074     const VkExternalMemoryHandleTypeFlagsKHR* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryTypeCount") pTypeExternalMemoryHandleTypes;
1075 #endif // #if VMA_EXTERNAL_MEMORY
1076 } VmaAllocatorCreateInfo;
1077 
1078 /// Information about existing #VmaAllocator object.
1079 typedef struct VmaAllocatorInfo
1080 {
1081     /** \brief Handle to Vulkan instance object.
1082 
1083     This is the same value as has been passed through VmaAllocatorCreateInfo::instance.
1084     */
1085     VkInstance VMA_NOT_NULL instance;
1086     /** \brief Handle to Vulkan physical device object.
1087 
1088     This is the same value as has been passed through VmaAllocatorCreateInfo::physicalDevice.
1089     */
1090     VkPhysicalDevice VMA_NOT_NULL physicalDevice;
1091     /** \brief Handle to Vulkan device object.
1092 
1093     This is the same value as has been passed through VmaAllocatorCreateInfo::device.
1094     */
1095     VkDevice VMA_NOT_NULL device;
1096 } VmaAllocatorInfo;
1097 
1098 /** @} */
1099 
1100 /**
1101 \addtogroup group_stats
1102 @{
1103 */
1104 
1105 /** \brief Calculated statistics of memory usage e.g. in a specific memory type, heap, custom pool, or total.
1106 
1107 These are fast to calculate.
1108 See functions: vmaGetHeapBudgets(), vmaGetPoolStatistics().
1109 */
1110 typedef struct VmaStatistics
1111 {
1112     /** \brief Number of `VkDeviceMemory` objects - Vulkan memory blocks allocated.
1113     */
1114     uint32_t blockCount;
1115     /** \brief Number of #VmaAllocation objects allocated.
1116 
1117     Dedicated allocations have their own blocks, so each one adds 1 to `allocationCount` as well as `blockCount`.
1118     */
1119     uint32_t allocationCount;
1120     /** \brief Number of bytes allocated in `VkDeviceMemory` blocks.
1121 
1122     \note To avoid confusion, please be aware that what Vulkan calls an "allocation" - a whole `VkDeviceMemory` object
1123     (e.g. as in `VkPhysicalDeviceLimits::maxMemoryAllocationCount`) is called a "block" in VMA, while VMA calls
1124     "allocation" a #VmaAllocation object that represents a memory region sub-allocated from such block, usually for a single buffer or image.
1125     */
1126     VkDeviceSize blockBytes;
1127     /** \brief Total number of bytes occupied by all #VmaAllocation objects.
1128 
1129     Always less or equal than `blockBytes`.
1130     Difference `(blockBytes - allocationBytes)` is the amount of memory allocated from Vulkan
1131     but unused by any #VmaAllocation.
1132     */
1133     VkDeviceSize allocationBytes;
1134 } VmaStatistics;
1135 
1136 /** \brief More detailed statistics than #VmaStatistics.
1137 
1138 These are slower to calculate. Use for debugging purposes.
1139 See functions: vmaCalculateStatistics(), vmaCalculatePoolStatistics().
1140 
1141 Previous version of the statistics API provided averages, but they have been removed
1142 because they can be easily calculated as:
1143 
1144 \code
1145 VkDeviceSize allocationSizeAvg = detailedStats.statistics.allocationBytes / detailedStats.statistics.allocationCount;
1146 VkDeviceSize unusedBytes = detailedStats.statistics.blockBytes - detailedStats.statistics.allocationBytes;
1147 VkDeviceSize unusedRangeSizeAvg = unusedBytes / detailedStats.unusedRangeCount;
1148 \endcode
1149 */
1150 typedef struct VmaDetailedStatistics
1151 {
1152     /// Basic statistics.
1153     VmaStatistics statistics;
1154     /// Number of free ranges of memory between allocations.
1155     uint32_t unusedRangeCount;
1156     /// Smallest allocation size. `VK_WHOLE_SIZE` if there are 0 allocations.
1157     VkDeviceSize allocationSizeMin;
1158     /// Largest allocation size. 0 if there are 0 allocations.
1159     VkDeviceSize allocationSizeMax;
1160     /// Smallest empty range size. `VK_WHOLE_SIZE` if there are 0 empty ranges.
1161     VkDeviceSize unusedRangeSizeMin;
1162     /// Largest empty range size. 0 if there are 0 empty ranges.
1163     VkDeviceSize unusedRangeSizeMax;
1164 } VmaDetailedStatistics;
1165 
1166 /** \brief  General statistics from current state of the Allocator -
1167 total memory usage across all memory heaps and types.
1168 
1169 These are slower to calculate. Use for debugging purposes.
1170 See function vmaCalculateStatistics().
1171 */
1172 typedef struct VmaTotalStatistics
1173 {
1174     VmaDetailedStatistics memoryType[VK_MAX_MEMORY_TYPES];
1175     VmaDetailedStatistics memoryHeap[VK_MAX_MEMORY_HEAPS];
1176     VmaDetailedStatistics total;
1177 } VmaTotalStatistics;
1178 
1179 /** \brief Statistics of current memory usage and available budget for a specific memory heap.
1180 
1181 These are fast to calculate.
1182 See function vmaGetHeapBudgets().
1183 */
1184 typedef struct VmaBudget
1185 {
1186     /** \brief Statistics fetched from the library.
1187     */
1188     VmaStatistics statistics;
1189     /** \brief Estimated current memory usage of the program, in bytes.
1190 
1191     Fetched from system using VK_EXT_memory_budget extension if enabled.
1192 
1193     It might be different than `statistics.blockBytes` (usually higher) due to additional implicit objects
1194     also occupying the memory, like swapchain, pipelines, descriptor heaps, command buffers, or
1195     `VkDeviceMemory` blocks allocated outside of this library, if any.
1196     */
1197     VkDeviceSize usage;
1198     /** \brief Estimated amount of memory available to the program, in bytes.
1199 
1200     Fetched from system using VK_EXT_memory_budget extension if enabled.
1201 
1202     It might be different (most probably smaller) than `VkMemoryHeap::size[heapIndex]` due to factors
1203     external to the program, decided by the operating system.
1204     Difference `budget - usage` is the amount of additional memory that can probably
1205     be allocated without problems. Exceeding the budget may result in various problems.
1206     */
1207     VkDeviceSize budget;
1208 } VmaBudget;
1209 
1210 /** @} */
1211 
1212 /**
1213 \addtogroup group_alloc
1214 @{
1215 */
1216 
1217 /** \brief Parameters of new #VmaAllocation.
1218 
1219 To be used with functions like vmaCreateBuffer(), vmaCreateImage(), and many others.
1220 */
1221 typedef struct VmaAllocationCreateInfo
1222 {
1223     /// Use #VmaAllocationCreateFlagBits enum.
1224     VmaAllocationCreateFlags flags;
1225     /** \brief Intended usage of memory.
1226 
1227     You can leave #VMA_MEMORY_USAGE_UNKNOWN if you specify memory requirements in other way. \n
1228     If `pool` is not null, this member is ignored.
1229     */
1230     VmaMemoryUsage usage;
1231     /** \brief Flags that must be set in a Memory Type chosen for an allocation.
1232 
1233     Leave 0 if you specify memory requirements in other way. \n
1234     If `pool` is not null, this member is ignored.*/
1235     VkMemoryPropertyFlags requiredFlags;
1236     /** \brief Flags that preferably should be set in a memory type chosen for an allocation.
1237 
1238     Set to 0 if no additional flags are preferred. \n
1239     If `pool` is not null, this member is ignored. */
1240     VkMemoryPropertyFlags preferredFlags;
1241     /** \brief Bitmask containing one bit set for every memory type acceptable for this allocation.
1242 
1243     Value 0 is equivalent to `UINT32_MAX` - it means any memory type is accepted if
1244     it meets other requirements specified by this structure, with no further
1245     restrictions on memory type index. \n
1246     If `pool` is not null, this member is ignored.
1247     */
1248     uint32_t memoryTypeBits;
1249     /** \brief Pool that this allocation should be created in.
1250 
1251     Leave `VK_NULL_HANDLE` to allocate from default pool. If not null, members:
1252     `usage`, `requiredFlags`, `preferredFlags`, `memoryTypeBits` are ignored.
1253     */
1254     VmaPool VMA_NULLABLE pool;
1255     /** \brief Custom general-purpose pointer that will be stored in #VmaAllocation, can be read as VmaAllocationInfo::pUserData and changed using vmaSetAllocationUserData().
1256 
1257     If #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT is used, it must be either
1258     null or pointer to a null-terminated string. The string will be then copied to
1259     internal buffer, so it doesn't need to be valid after allocation call.
1260     */
1261     void* VMA_NULLABLE pUserData;
1262     /** \brief A floating-point value between 0 and 1, indicating the priority of the allocation relative to other memory allocations.
1263 
1264     It is used only when #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT flag was used during creation of the #VmaAllocator object
1265     and this allocation ends up as dedicated or is explicitly forced as dedicated using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
1266     Otherwise, it has the priority of a memory block where it is placed and this variable is ignored.
1267     */
1268     float priority;
1269 } VmaAllocationCreateInfo;
1270 
1271 /// Describes parameter of created #VmaPool.
1272 typedef struct VmaPoolCreateInfo
1273 {
1274     /** \brief Vulkan memory type index to allocate this pool from.
1275     */
1276     uint32_t memoryTypeIndex;
1277     /** \brief Use combination of #VmaPoolCreateFlagBits.
1278     */
1279     VmaPoolCreateFlags flags;
1280     /** \brief Size of a single `VkDeviceMemory` block to be allocated as part of this pool, in bytes. Optional.
1281 
1282     Specify nonzero to set explicit, constant size of memory blocks used by this
1283     pool.
1284 
1285     Leave 0 to use default and let the library manage block sizes automatically.
1286     Sizes of particular blocks may vary.
1287     In this case, the pool will also support dedicated allocations.
1288     */
1289     VkDeviceSize blockSize;
1290     /** \brief Minimum number of blocks to be always allocated in this pool, even if they stay empty.
1291 
1292     Set to 0 to have no preallocated blocks and allow the pool be completely empty.
1293     */
1294     size_t minBlockCount;
1295     /** \brief Maximum number of blocks that can be allocated in this pool. Optional.
1296 
1297     Set to 0 to use default, which is `SIZE_MAX`, which means no limit.
1298 
1299     Set to same value as VmaPoolCreateInfo::minBlockCount to have fixed amount of memory allocated
1300     throughout whole lifetime of this pool.
1301     */
1302     size_t maxBlockCount;
1303     /** \brief A floating-point value between 0 and 1, indicating the priority of the allocations in this pool relative to other memory allocations.
1304 
1305     It is used only when #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT flag was used during creation of the #VmaAllocator object.
1306     Otherwise, this variable is ignored.
1307     */
1308     float priority;
1309     /** \brief Additional minimum alignment to be used for all allocations created from this pool. Can be 0.
1310 
1311     Leave 0 (default) not to impose any additional alignment. If not 0, it must be a power of two.
1312     It can be useful in cases where alignment returned by Vulkan by functions like `vkGetBufferMemoryRequirements` is not enough,
1313     e.g. when doing interop with OpenGL.
1314     */
1315     VkDeviceSize minAllocationAlignment;
1316     /** \brief Additional `pNext` chain to be attached to `VkMemoryAllocateInfo` used for every allocation made by this pool. Optional.
1317 
1318     Optional, can be null. If not null, it must point to a `pNext` chain of structures that can be attached to `VkMemoryAllocateInfo`.
1319     It can be useful for special needs such as adding `VkExportMemoryAllocateInfoKHR`.
1320     Structures pointed by this member must remain alive and unchanged for the whole lifetime of the custom pool.
1321 
1322     Please note that some structures, e.g. `VkMemoryPriorityAllocateInfoEXT`, `VkMemoryDedicatedAllocateInfoKHR`,
1323     can be attached automatically by this library when using other, more convenient of its features.
1324     */
1325     void* VMA_NULLABLE pMemoryAllocateNext;
1326 } VmaPoolCreateInfo;
1327 
1328 /** @} */
1329 
1330 /**
1331 \addtogroup group_alloc
1332 @{
1333 */
1334 
1335 /// Parameters of #VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
1336 typedef struct VmaAllocationInfo
1337 {
1338     /** \brief Memory type index that this allocation was allocated from.
1339 
1340     It never changes.
1341     */
1342     uint32_t memoryType;
1343     /** \brief Handle to Vulkan memory object.
1344 
1345     Same memory object can be shared by multiple allocations.
1346 
1347     It can change after the allocation is moved during \ref defragmentation.
1348     */
1349     VkDeviceMemory VMA_NULLABLE_NON_DISPATCHABLE deviceMemory;
1350     /** \brief Offset in `VkDeviceMemory` object to the beginning of this allocation, in bytes. `(deviceMemory, offset)` pair is unique to this allocation.
1351 
1352     You usually don't need to use this offset. If you create a buffer or an image together with the allocation using e.g. function
1353     vmaCreateBuffer(), vmaCreateImage(), functions that operate on these resources refer to the beginning of the buffer or image,
1354     not entire device memory block. Functions like vmaMapMemory(), vmaBindBufferMemory() also refer to the beginning of the allocation
1355     and apply this offset automatically.
1356 
1357     It can change after the allocation is moved during \ref defragmentation.
1358     */
1359     VkDeviceSize offset;
1360     /** \brief Size of this allocation, in bytes.
1361 
1362     It never changes.
1363 
1364     \note Allocation size returned in this variable may be greater than the size
1365     requested for the resource e.g. as `VkBufferCreateInfo::size`. Whole size of the
1366     allocation is accessible for operations on memory e.g. using a pointer after
1367     mapping with vmaMapMemory(), but operations on the resource e.g. using
1368     `vkCmdCopyBuffer` must be limited to the size of the resource.
1369     */
1370     VkDeviceSize size;
1371     /** \brief Pointer to the beginning of this allocation as mapped data.
1372 
1373     If the allocation hasn't been mapped using vmaMapMemory() and hasn't been
1374     created with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag, this value is null.
1375 
1376     It can change after call to vmaMapMemory(), vmaUnmapMemory().
1377     It can also change after the allocation is moved during \ref defragmentation.
1378     */
1379     void* VMA_NULLABLE pMappedData;
1380     /** \brief Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vmaSetAllocationUserData().
1381 
1382     It can change after call to vmaSetAllocationUserData() for this allocation.
1383     */
1384     void* VMA_NULLABLE pUserData;
1385     /** \brief Custom allocation name that was set with vmaSetAllocationName().
1386 
1387     It can change after call to vmaSetAllocationName() for this allocation.
1388 
1389     Another way to set custom name is to pass it in VmaAllocationCreateInfo::pUserData with
1390     additional flag #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT set [DEPRECATED].
1391     */
1392     const char* VMA_NULLABLE pName;
1393 } VmaAllocationInfo;
1394 
1395 /** \brief Parameters for defragmentation.
1396 
1397 To be used with function vmaBeginDefragmentation().
1398 */
1399 typedef struct VmaDefragmentationInfo
1400 {
1401     /// \brief Use combination of #VmaDefragmentationFlagBits.
1402     VmaDefragmentationFlags flags;
1403     /** \brief Custom pool to be defragmented.
1404 
1405     If null then default pools will undergo defragmentation process.
1406     */
1407     VmaPool VMA_NULLABLE pool;
1408     /** \brief Maximum numbers of bytes that can be copied during single pass, while moving allocations to different places.
1409 
1410     `0` means no limit.
1411     */
1412     VkDeviceSize maxBytesPerPass;
1413     /** \brief Maximum number of allocations that can be moved during single pass to a different place.
1414 
1415     `0` means no limit.
1416     */
1417     uint32_t maxAllocationsPerPass;
1418 } VmaDefragmentationInfo;
1419 
1420 /// Single move of an allocation to be done for defragmentation.
1421 typedef struct VmaDefragmentationMove
1422 {
1423     /// Operation to be performed on the allocation by vmaEndDefragmentationPass(). Default value is #VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY. You can modify it.
1424     VmaDefragmentationMoveOperation operation;
1425     /// Allocation that should be moved.
1426     VmaAllocation VMA_NOT_NULL srcAllocation;
1427     /** \brief Temporary allocation pointing to destination memory that will replace `srcAllocation`.
1428 
1429     \warning Do not store this allocation in your data structures! It exists only temporarily, for the duration of the defragmentation pass,
1430     to be used for binding new buffer/image to the destination memory using e.g. vmaBindBufferMemory().
1431     vmaEndDefragmentationPass() will destroy it and make `srcAllocation` point to this memory.
1432     */
1433     VmaAllocation VMA_NOT_NULL dstTmpAllocation;
1434 } VmaDefragmentationMove;
1435 
1436 /** \brief Parameters for incremental defragmentation steps.
1437 
1438 To be used with function vmaBeginDefragmentationPass().
1439 */
1440 typedef struct VmaDefragmentationPassMoveInfo
1441 {
1442     /// Number of elements in the `pMoves` array.
1443     uint32_t moveCount;
1444     /** \brief Array of moves to be performed by the user in the current defragmentation pass.
1445 
1446     Pointer to an array of `moveCount` elements, owned by VMA, created in vmaBeginDefragmentationPass(), destroyed in vmaEndDefragmentationPass().
1447 
1448     For each element, you should:
1449 
1450     1. Create a new buffer/image in the place pointed by VmaDefragmentationMove::dstMemory + VmaDefragmentationMove::dstOffset.
1451     2. Copy data from the VmaDefragmentationMove::srcAllocation e.g. using `vkCmdCopyBuffer`, `vkCmdCopyImage`.
1452     3. Make sure these commands finished executing on the GPU.
1453     4. Destroy the old buffer/image.
1454 
1455     Only then you can finish defragmentation pass by calling vmaEndDefragmentationPass().
1456     After this call, the allocation will point to the new place in memory.
1457 
1458     Alternatively, if you cannot move specific allocation, you can set VmaDefragmentationMove::operation to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE.
1459 
1460     Alternatively, if you decide you want to completely remove the allocation:
1461 
1462     1. Destroy its buffer/image.
1463     2. Set VmaDefragmentationMove::operation to #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY.
1464 
1465     Then, after vmaEndDefragmentationPass() the allocation will be freed.
1466     */
1467     VmaDefragmentationMove* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(moveCount) pMoves;
1468 } VmaDefragmentationPassMoveInfo;
1469 
1470 /// Statistics returned for defragmentation process in function vmaEndDefragmentation().
1471 typedef struct VmaDefragmentationStats
1472 {
1473     /// Total number of bytes that have been copied while moving allocations to different places.
1474     VkDeviceSize bytesMoved;
1475     /// Total number of bytes that have been released to the system by freeing empty `VkDeviceMemory` objects.
1476     VkDeviceSize bytesFreed;
1477     /// Number of allocations that have been moved to different places.
1478     uint32_t allocationsMoved;
1479     /// Number of empty `VkDeviceMemory` objects that have been released to the system.
1480     uint32_t deviceMemoryBlocksFreed;
1481 } VmaDefragmentationStats;
1482 
1483 /** @} */
1484 
1485 /**
1486 \addtogroup group_virtual
1487 @{
1488 */
1489 
1490 /// Parameters of created #VmaVirtualBlock object to be passed to vmaCreateVirtualBlock().
1491 typedef struct VmaVirtualBlockCreateInfo
1492 {
1493     /** \brief Total size of the virtual block.
1494 
1495     Sizes can be expressed in bytes or any units you want as long as you are consistent in using them.
1496     For example, if you allocate from some array of structures, 1 can mean single instance of entire structure.
1497     */
1498     VkDeviceSize size;
1499 
1500     /** \brief Use combination of #VmaVirtualBlockCreateFlagBits.
1501     */
1502     VmaVirtualBlockCreateFlags flags;
1503 
1504     /** \brief Custom CPU memory allocation callbacks. Optional.
1505 
1506     Optional, can be null. When specified, they will be used for all CPU-side memory allocations.
1507     */
1508     const VkAllocationCallbacks* VMA_NULLABLE pAllocationCallbacks;
1509 } VmaVirtualBlockCreateInfo;
1510 
1511 /// Parameters of created virtual allocation to be passed to vmaVirtualAllocate().
1512 typedef struct VmaVirtualAllocationCreateInfo
1513 {
1514     /** \brief Size of the allocation.
1515 
1516     Cannot be zero.
1517     */
1518     VkDeviceSize size;
1519     /** \brief Required alignment of the allocation. Optional.
1520 
1521     Must be power of two. Special value 0 has the same meaning as 1 - means no special alignment is required, so allocation can start at any offset.
1522     */
1523     VkDeviceSize alignment;
1524     /** \brief Use combination of #VmaVirtualAllocationCreateFlagBits.
1525     */
1526     VmaVirtualAllocationCreateFlags flags;
1527     /** \brief Custom pointer to be associated with the allocation. Optional.
1528 
1529     It can be any value and can be used for user-defined purposes. It can be fetched or changed later.
1530     */
1531     void* VMA_NULLABLE pUserData;
1532 } VmaVirtualAllocationCreateInfo;
1533 
1534 /// Parameters of an existing virtual allocation, returned by vmaGetVirtualAllocationInfo().
1535 typedef struct VmaVirtualAllocationInfo
1536 {
1537     /** \brief Offset of the allocation.
1538 
1539     Offset at which the allocation was made.
1540     */
1541     VkDeviceSize offset;
1542     /** \brief Size of the allocation.
1543 
1544     Same value as passed in VmaVirtualAllocationCreateInfo::size.
1545     */
1546     VkDeviceSize size;
1547     /** \brief Custom pointer associated with the allocation.
1548 
1549     Same value as passed in VmaVirtualAllocationCreateInfo::pUserData or to vmaSetVirtualAllocationUserData().
1550     */
1551     void* VMA_NULLABLE pUserData;
1552 } VmaVirtualAllocationInfo;
1553 
1554 /** @} */
1555 
1556 #endif // _VMA_DATA_TYPES_DECLARATIONS
1557 
1558 #ifndef _VMA_FUNCTION_HEADERS
1559 
1560 /**
1561 \addtogroup group_init
1562 @{
1563 */
1564 
1565 /// Creates #VmaAllocator object.
1566 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
1567     const VmaAllocatorCreateInfo* VMA_NOT_NULL pCreateInfo,
1568     VmaAllocator VMA_NULLABLE* VMA_NOT_NULL pAllocator);
1569 
1570 /// Destroys allocator object.
1571 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
1572     VmaAllocator VMA_NULLABLE allocator);
1573 
1574 /** \brief Returns information about existing #VmaAllocator object - handle to Vulkan device etc.
1575 
1576 It might be useful if you want to keep just the #VmaAllocator handle and fetch other required handles to
1577 `VkPhysicalDevice`, `VkDevice` etc. every time using this function.
1578 */
1579 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(
1580     VmaAllocator VMA_NOT_NULL allocator,
1581     VmaAllocatorInfo* VMA_NOT_NULL pAllocatorInfo);
1582 
1583 /**
1584 PhysicalDeviceProperties are fetched from physicalDevice by the allocator.
1585 You can access it here, without fetching it again on your own.
1586 */
1587 VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
1588     VmaAllocator VMA_NOT_NULL allocator,
1589     const VkPhysicalDeviceProperties* VMA_NULLABLE* VMA_NOT_NULL ppPhysicalDeviceProperties);
1590 
1591 /**
1592 PhysicalDeviceMemoryProperties are fetched from physicalDevice by the allocator.
1593 You can access it here, without fetching it again on your own.
1594 */
1595 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
1596     VmaAllocator VMA_NOT_NULL allocator,
1597     const VkPhysicalDeviceMemoryProperties* VMA_NULLABLE* VMA_NOT_NULL ppPhysicalDeviceMemoryProperties);
1598 
1599 /**
1600 \brief Given Memory Type Index, returns Property Flags of this memory type.
1601 
1602 This is just a convenience function. Same information can be obtained using
1603 vmaGetMemoryProperties().
1604 */
1605 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
1606     VmaAllocator VMA_NOT_NULL allocator,
1607     uint32_t memoryTypeIndex,
1608     VkMemoryPropertyFlags* VMA_NOT_NULL pFlags);
1609 
1610 /** \brief Sets index of the current frame.
1611 */
1612 VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
1613     VmaAllocator VMA_NOT_NULL allocator,
1614     uint32_t frameIndex);
1615 
1616 /** @} */
1617 
1618 /**
1619 \addtogroup group_stats
1620 @{
1621 */
1622 
1623 /** \brief Retrieves statistics from current state of the Allocator.
1624 
1625 This function is called "calculate" not "get" because it has to traverse all
1626 internal data structures, so it may be quite slow. Use it for debugging purposes.
1627 For faster but more brief statistics suitable to be called every frame or every allocation,
1628 use vmaGetHeapBudgets().
1629 
1630 Note that when using allocator from multiple threads, returned information may immediately
1631 become outdated.
1632 */
1633 VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStatistics(
1634     VmaAllocator VMA_NOT_NULL allocator,
1635     VmaTotalStatistics* VMA_NOT_NULL pStats);
1636 
1637 /** \brief Retrieves information about current memory usage and budget for all memory heaps.
1638 
1639 \param allocator
1640 \param[out] pBudgets Must point to array with number of elements at least equal to number of memory heaps in physical device used.
1641 
1642 This function is called "get" not "calculate" because it is very fast, suitable to be called
1643 every frame or every allocation. For more detailed statistics use vmaCalculateStatistics().
1644 
1645 Note that when using allocator from multiple threads, returned information may immediately
1646 become outdated.
1647 */
1648 VMA_CALL_PRE void VMA_CALL_POST vmaGetHeapBudgets(
1649     VmaAllocator VMA_NOT_NULL allocator,
1650     VmaBudget* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pBudgets);
1651 
1652 /** @} */
1653 
1654 /**
1655 \addtogroup group_alloc
1656 @{
1657 */
1658 
1659 /**
1660 \brief Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
1661 
1662 This algorithm tries to find a memory type that:
1663 
1664 - Is allowed by memoryTypeBits.
1665 - Contains all the flags from pAllocationCreateInfo->requiredFlags.
1666 - Matches intended usage.
1667 - Has as many flags from pAllocationCreateInfo->preferredFlags as possible.
1668 
1669 \return Returns VK_ERROR_FEATURE_NOT_PRESENT if not found. Receiving such result
1670 from this function or any other allocating function probably means that your
1671 device doesn't support any memory type with requested features for the specific
1672 type of resource you want to use it for. Please check parameters of your
1673 resource, like image layout (OPTIMAL versus LINEAR) or mip level count.
1674 */
1675 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
1676     VmaAllocator VMA_NOT_NULL allocator,
1677     uint32_t memoryTypeBits,
1678     const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
1679     uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
1680 
1681 /**
1682 \brief Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
1683 
1684 It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex.
1685 It internally creates a temporary, dummy buffer that never has memory bound.
1686 */
1687 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
1688     VmaAllocator VMA_NOT_NULL allocator,
1689     const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
1690     const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
1691     uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
1692 
1693 /**
1694 \brief Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
1695 
1696 It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex.
1697 It internally creates a temporary, dummy image that never has memory bound.
1698 */
1699 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
1700     VmaAllocator VMA_NOT_NULL allocator,
1701     const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
1702     const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
1703     uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
1704 
1705 /** \brief Allocates Vulkan device memory and creates #VmaPool object.
1706 
1707 \param allocator Allocator object.
1708 \param pCreateInfo Parameters of pool to create.
1709 \param[out] pPool Handle to created pool.
1710 */
1711 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
1712     VmaAllocator VMA_NOT_NULL allocator,
1713     const VmaPoolCreateInfo* VMA_NOT_NULL pCreateInfo,
1714     VmaPool VMA_NULLABLE* VMA_NOT_NULL pPool);
1715 
1716 /** \brief Destroys #VmaPool object and frees Vulkan device memory.
1717 */
1718 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
1719     VmaAllocator VMA_NOT_NULL allocator,
1720     VmaPool VMA_NULLABLE pool);
1721 
1722 /** @} */
1723 
1724 /**
1725 \addtogroup group_stats
1726 @{
1727 */
1728 
1729 /** \brief Retrieves statistics of existing #VmaPool object.
1730 
1731 \param allocator Allocator object.
1732 \param pool Pool object.
1733 \param[out] pPoolStats Statistics of specified pool.
1734 */
1735 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStatistics(
1736     VmaAllocator VMA_NOT_NULL allocator,
1737     VmaPool VMA_NOT_NULL pool,
1738     VmaStatistics* VMA_NOT_NULL pPoolStats);
1739 
1740 /** \brief Retrieves detailed statistics of existing #VmaPool object.
1741 
1742 \param allocator Allocator object.
1743 \param pool Pool object.
1744 \param[out] pPoolStats Statistics of specified pool.
1745 */
1746 VMA_CALL_PRE void VMA_CALL_POST vmaCalculatePoolStatistics(
1747     VmaAllocator VMA_NOT_NULL allocator,
1748     VmaPool VMA_NOT_NULL pool,
1749     VmaDetailedStatistics* VMA_NOT_NULL pPoolStats);
1750 
1751 /** @} */
1752 
1753 /**
1754 \addtogroup group_alloc
1755 @{
1756 */
1757 
1758 /** \brief Checks magic number in margins around all allocations in given memory pool in search for corruptions.
1759 
1760 Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero,
1761 `VMA_DEBUG_MARGIN` is defined to nonzero and the pool is created in memory type that is
1762 `HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection).
1763 
1764 Possible return values:
1765 
1766 - `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for specified pool.
1767 - `VK_SUCCESS` - corruption detection has been performed and succeeded.
1768 - `VK_ERROR_UNKNOWN` - corruption detection has been performed and found memory corruptions around one of the allocations.
1769   `VMA_ASSERT` is also fired in that case.
1770 - Other value: Error returned by Vulkan, e.g. memory mapping failure.
1771 */
1772 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(
1773     VmaAllocator VMA_NOT_NULL allocator,
1774     VmaPool VMA_NOT_NULL pool);
1775 
1776 /** \brief Retrieves name of a custom pool.
1777 
1778 After the call `ppName` is either null or points to an internally-owned null-terminated string
1779 containing name of the pool that was previously set. The pointer becomes invalid when the pool is
1780 destroyed or its name is changed using vmaSetPoolName().
1781 */
1782 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
1783     VmaAllocator VMA_NOT_NULL allocator,
1784     VmaPool VMA_NOT_NULL pool,
1785     const char* VMA_NULLABLE* VMA_NOT_NULL ppName);
1786 
1787 /** \brief Sets name of a custom pool.
1788 
1789 `pName` can be either null or pointer to a null-terminated string with new name for the pool.
1790 Function makes internal copy of the string, so it can be changed or freed immediately after this call.
1791 */
1792 VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
1793     VmaAllocator VMA_NOT_NULL allocator,
1794     VmaPool VMA_NOT_NULL pool,
1795     const char* VMA_NULLABLE pName);
1796 
1797 /** \brief General purpose memory allocation.
1798 
1799 \param allocator
1800 \param pVkMemoryRequirements
1801 \param pCreateInfo
1802 \param[out] pAllocation Handle to allocated memory.
1803 \param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().
1804 
1805 You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages().
1806 
1807 It is recommended to use vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage(),
1808 vmaCreateBuffer(), vmaCreateImage() instead whenever possible.
1809 */
1810 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
1811     VmaAllocator VMA_NOT_NULL allocator,
1812     const VkMemoryRequirements* VMA_NOT_NULL pVkMemoryRequirements,
1813     const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
1814     VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,
1815     VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
1816 
1817 /** \brief General purpose memory allocation for multiple allocation objects at once.
1818 
1819 \param allocator Allocator object.
1820 \param pVkMemoryRequirements Memory requirements for each allocation.
1821 \param pCreateInfo Creation parameters for each allocation.
1822 \param allocationCount Number of allocations to make.
1823 \param[out] pAllocations Pointer to array that will be filled with handles to created allocations.
1824 \param[out] pAllocationInfo Optional. Pointer to array that will be filled with parameters of created allocations.
1825 
1826 You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages().
1827 
1828 Word "pages" is just a suggestion to use this function to allocate pieces of memory needed for sparse binding.
1829 It is just a general purpose allocation function able to make multiple allocations at once.
1830 It may be internally optimized to be more efficient than calling vmaAllocateMemory() `allocationCount` times.
1831 
1832 All allocations are made using same parameters. All of them are created out of the same memory pool and type.
1833 If any allocation fails, all allocations already made within this function call are also freed, so that when
1834 returned result is not `VK_SUCCESS`, `pAllocation` array is always entirely filled with `VK_NULL_HANDLE`.
1835 */
1836 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
1837     VmaAllocator VMA_NOT_NULL allocator,
1838     const VkMemoryRequirements* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pVkMemoryRequirements,
1839     const VmaAllocationCreateInfo* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pCreateInfo,
1840     size_t allocationCount,
1841     VmaAllocation VMA_NULLABLE* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations,
1842     VmaAllocationInfo* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationInfo);
1843 
1844 /** \brief Allocates memory suitable for given `VkBuffer`.
1845 
1846 \param allocator
1847 \param buffer
1848 \param pCreateInfo
1849 \param[out] pAllocation Handle to allocated memory.
1850 \param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().
1851 
1852 It only creates #VmaAllocation. To bind the memory to the buffer, use vmaBindBufferMemory().
1853 
1854 This is a special-purpose function. In most cases you should use vmaCreateBuffer().
1855 
1856 You must free the allocation using vmaFreeMemory() when no longer needed.
1857 */
1858 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
1859     VmaAllocator VMA_NOT_NULL allocator,
1860     VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer,
1861     const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
1862     VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,
1863     VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
1864 
1865 /** \brief Allocates memory suitable for given `VkImage`.
1866 
1867 \param allocator
1868 \param image
1869 \param pCreateInfo
1870 \param[out] pAllocation Handle to allocated memory.
1871 \param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().
1872 
1873 It only creates #VmaAllocation. To bind the memory to the buffer, use vmaBindImageMemory().
1874 
1875 This is a special-purpose function. In most cases you should use vmaCreateImage().
1876 
1877 You must free the allocation using vmaFreeMemory() when no longer needed.
1878 */
1879 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
1880     VmaAllocator VMA_NOT_NULL allocator,
1881     VkImage VMA_NOT_NULL_NON_DISPATCHABLE image,
1882     const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
1883     VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,
1884     VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
1885 
1886 /** \brief Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
1887 
1888 Passing `VK_NULL_HANDLE` as `allocation` is valid. Such function call is just skipped.
1889 */
1890 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
1891     VmaAllocator VMA_NOT_NULL allocator,
1892     const VmaAllocation VMA_NULLABLE allocation);
1893 
1894 /** \brief Frees memory and destroys multiple allocations.
1895 
1896 Word "pages" is just a suggestion to use this function to free pieces of memory used for sparse binding.
1897 It is just a general purpose function to free memory and destroy allocations made using e.g. vmaAllocateMemory(),
1898 vmaAllocateMemoryPages() and other functions.
1899 It may be internally optimized to be more efficient than calling vmaFreeMemory() `allocationCount` times.
1900 
1901 Allocations in `pAllocations` array can come from any memory pools and types.
1902 Passing `VK_NULL_HANDLE` as elements of `pAllocations` array is valid. Such entries are just skipped.
1903 */
1904 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
1905     VmaAllocator VMA_NOT_NULL allocator,
1906     size_t allocationCount,
1907     const VmaAllocation VMA_NULLABLE* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations);
1908 
1909 /** \brief Returns current information about specified allocation.
1910 
1911 Current paramteres of given allocation are returned in `pAllocationInfo`.
1912 
1913 Although this function doesn't lock any mutex, so it should be quite efficient,
1914 you should avoid calling it too often.
1915 You can retrieve same VmaAllocationInfo structure while creating your resource, from function
1916 vmaCreateBuffer(), vmaCreateImage(). You can remember it if you are sure parameters don't change
1917 (e.g. due to defragmentation).
1918 */
1919 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
1920     VmaAllocator VMA_NOT_NULL allocator,
1921     VmaAllocation VMA_NOT_NULL allocation,
1922     VmaAllocationInfo* VMA_NOT_NULL pAllocationInfo);
1923 
1924 /** \brief Sets pUserData in given allocation to new value.
1925 
1926 The value of pointer `pUserData` is copied to allocation's `pUserData`.
1927 It is opaque, so you can use it however you want - e.g.
1928 as a pointer, ordinal number or some handle to you own data.
1929 */
1930 VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
1931     VmaAllocator VMA_NOT_NULL allocator,
1932     VmaAllocation VMA_NOT_NULL allocation,
1933     void* VMA_NULLABLE pUserData);
1934 
1935 /** \brief Sets pName in given allocation to new value.
1936 
1937 `pName` must be either null, or pointer to a null-terminated string. The function
1938 makes local copy of the string and sets it as allocation's `pName`. String
1939 passed as pName doesn't need to be valid for whole lifetime of the allocation -
1940 you can free it after this call. String previously pointed by allocation's
1941 `pName` is freed from memory.
1942 */
1943 VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationName(
1944     VmaAllocator VMA_NOT_NULL allocator,
1945     VmaAllocation VMA_NOT_NULL allocation,
1946     const char* VMA_NULLABLE pName);
1947 
1948 /**
1949 \brief Given an allocation, returns Property Flags of its memory type.
1950 
1951 This is just a convenience function. Same information can be obtained using
1952 vmaGetAllocationInfo() + vmaGetMemoryProperties().
1953 */
1954 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationMemoryProperties(
1955     VmaAllocator VMA_NOT_NULL allocator,
1956     VmaAllocation VMA_NOT_NULL allocation,
1957     VkMemoryPropertyFlags* VMA_NOT_NULL pFlags);
1958 
1959 /** \brief Maps memory represented by given allocation and returns pointer to it.
1960 
1961 Maps memory represented by given allocation to make it accessible to CPU code.
1962 When succeeded, `*ppData` contains pointer to first byte of this memory.
1963 
1964 \warning
1965 If the allocation is part of a bigger `VkDeviceMemory` block, returned pointer is
1966 correctly offsetted to the beginning of region assigned to this particular allocation.
1967 Unlike the result of `vkMapMemory`, it points to the allocation, not to the beginning of the whole block.
1968 You should not add VmaAllocationInfo::offset to it!
1969 
1970 Mapping is internally reference-counted and synchronized, so despite raw Vulkan
1971 function `vkMapMemory()` cannot be used to map same block of `VkDeviceMemory`
1972 multiple times simultaneously, it is safe to call this function on allocations
1973 assigned to the same memory block. Actual Vulkan memory will be mapped on first
1974 mapping and unmapped on last unmapping.
1975 
1976 If the function succeeded, you must call vmaUnmapMemory() to unmap the
1977 allocation when mapping is no longer needed or before freeing the allocation, at
1978 the latest.
1979 
1980 It also safe to call this function multiple times on the same allocation. You
1981 must call vmaUnmapMemory() same number of times as you called vmaMapMemory().
1982 
1983 It is also safe to call this function on allocation created with
1984 #VMA_ALLOCATION_CREATE_MAPPED_BIT flag. Its memory stays mapped all the time.
1985 You must still call vmaUnmapMemory() same number of times as you called
1986 vmaMapMemory(). You must not call vmaUnmapMemory() additional time to free the
1987 "0-th" mapping made automatically due to #VMA_ALLOCATION_CREATE_MAPPED_BIT flag.
1988 
1989 This function fails when used on allocation made in memory type that is not
1990 `HOST_VISIBLE`.
1991 
1992 This function doesn't automatically flush or invalidate caches.
1993 If the allocation is made from a memory types that is not `HOST_COHERENT`,
1994 you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification.
1995 */
1996 VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
1997     VmaAllocator VMA_NOT_NULL allocator,
1998     VmaAllocation VMA_NOT_NULL allocation,
1999     void* VMA_NULLABLE* VMA_NOT_NULL ppData);
2000 
2001 /** \brief Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
2002 
2003 For details, see description of vmaMapMemory().
2004 
2005 This function doesn't automatically flush or invalidate caches.
2006 If the allocation is made from a memory types that is not `HOST_COHERENT`,
2007 you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification.
2008 */
2009 VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
2010     VmaAllocator VMA_NOT_NULL allocator,
2011     VmaAllocation VMA_NOT_NULL allocation);
2012 
2013 /** \brief Flushes memory of given allocation.
2014 
2015 Calls `vkFlushMappedMemoryRanges()` for memory associated with given range of given allocation.
2016 It needs to be called after writing to a mapped memory for memory types that are not `HOST_COHERENT`.
2017 Unmap operation doesn't do that automatically.
2018 
2019 - `offset` must be relative to the beginning of allocation.
2020 - `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation.
2021 - `offset` and `size` don't have to be aligned.
2022   They are internally rounded down/up to multiply of `nonCoherentAtomSize`.
2023 - If `size` is 0, this call is ignored.
2024 - If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`,
2025   this call is ignored.
2026 
2027 Warning! `offset` and `size` are relative to the contents of given `allocation`.
2028 If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively.
2029 Do not pass allocation's offset as `offset`!!!
2030 
2031 This function returns the `VkResult` from `vkFlushMappedMemoryRanges` if it is
2032 called, otherwise `VK_SUCCESS`.
2033 */
2034 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation(
2035     VmaAllocator VMA_NOT_NULL allocator,
2036     VmaAllocation VMA_NOT_NULL allocation,
2037     VkDeviceSize offset,
2038     VkDeviceSize size);
2039 
2040 /** \brief Invalidates memory of given allocation.
2041 
2042 Calls `vkInvalidateMappedMemoryRanges()` for memory associated with given range of given allocation.
2043 It needs to be called before reading from a mapped memory for memory types that are not `HOST_COHERENT`.
2044 Map operation doesn't do that automatically.
2045 
2046 - `offset` must be relative to the beginning of allocation.
2047 - `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation.
2048 - `offset` and `size` don't have to be aligned.
2049   They are internally rounded down/up to multiply of `nonCoherentAtomSize`.
2050 - If `size` is 0, this call is ignored.
2051 - If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`,
2052   this call is ignored.
2053 
2054 Warning! `offset` and `size` are relative to the contents of given `allocation`.
2055 If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively.
2056 Do not pass allocation's offset as `offset`!!!
2057 
2058 This function returns the `VkResult` from `vkInvalidateMappedMemoryRanges` if
2059 it is called, otherwise `VK_SUCCESS`.
2060 */
2061 VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation(
2062     VmaAllocator VMA_NOT_NULL allocator,
2063     VmaAllocation VMA_NOT_NULL allocation,
2064     VkDeviceSize offset,
2065     VkDeviceSize size);
2066 
2067 /** \brief Flushes memory of given set of allocations.
2068 
2069 Calls `vkFlushMappedMemoryRanges()` for memory associated with given ranges of given allocations.
2070 For more information, see documentation of vmaFlushAllocation().
2071 
2072 \param allocator
2073 \param allocationCount
2074 \param allocations
2075 \param offsets If not null, it must point to an array of offsets of regions to flush, relative to the beginning of respective allocations. Null means all ofsets are zero.
2076 \param sizes If not null, it must point to an array of sizes of regions to flush in respective allocations. Null means `VK_WHOLE_SIZE` for all allocations.
2077 
2078 This function returns the `VkResult` from `vkFlushMappedMemoryRanges` if it is
2079 called, otherwise `VK_SUCCESS`.
2080 */
2081 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations(
2082     VmaAllocator VMA_NOT_NULL allocator,
2083     uint32_t allocationCount,
2084     const VmaAllocation VMA_NOT_NULL* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations,
2085     const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets,
2086     const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes);
2087 
2088 /** \brief Invalidates memory of given set of allocations.
2089 
2090 Calls `vkInvalidateMappedMemoryRanges()` for memory associated with given ranges of given allocations.
2091 For more information, see documentation of vmaInvalidateAllocation().
2092 
2093 \param allocator
2094 \param allocationCount
2095 \param allocations
2096 \param offsets If not null, it must point to an array of offsets of regions to flush, relative to the beginning of respective allocations. Null means all ofsets are zero.
2097 \param sizes If not null, it must point to an array of sizes of regions to flush in respective allocations. Null means `VK_WHOLE_SIZE` for all allocations.
2098 
2099 This function returns the `VkResult` from `vkInvalidateMappedMemoryRanges` if it is
2100 called, otherwise `VK_SUCCESS`.
2101 */
2102 VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations(
2103     VmaAllocator VMA_NOT_NULL allocator,
2104     uint32_t allocationCount,
2105     const VmaAllocation VMA_NOT_NULL* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations,
2106     const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets,
2107     const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes);
2108 
2109 /** \brief Checks magic number in margins around all allocations in given memory types (in both default and custom pools) in search for corruptions.
2110 
2111 \param allocator
2112 \param memoryTypeBits Bit mask, where each bit set means that a memory type with that index should be checked.
2113 
2114 Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero,
2115 `VMA_DEBUG_MARGIN` is defined to nonzero and only for memory types that are
2116 `HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection).
2117 
2118 Possible return values:
2119 
2120 - `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for any of specified memory types.
2121 - `VK_SUCCESS` - corruption detection has been performed and succeeded.
2122 - `VK_ERROR_UNKNOWN` - corruption detection has been performed and found memory corruptions around one of the allocations.
2123   `VMA_ASSERT` is also fired in that case.
2124 - Other value: Error returned by Vulkan, e.g. memory mapping failure.
2125 */
2126 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(
2127     VmaAllocator VMA_NOT_NULL allocator,
2128     uint32_t memoryTypeBits);
2129 
2130 /** \brief Begins defragmentation process.
2131 
2132 \param allocator Allocator object.
2133 \param pInfo Structure filled with parameters of defragmentation.
2134 \param[out] pContext Context object that must be passed to vmaEndDefragmentation() to finish defragmentation.
2135 \returns
2136 - `VK_SUCCESS` if defragmentation can begin.
2137 - `VK_ERROR_FEATURE_NOT_PRESENT` if defragmentation is not supported.
2138 
2139 For more information about defragmentation, see documentation chapter:
2140 [Defragmentation](@ref defragmentation).
2141 */
2142 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentation(
2143     VmaAllocator VMA_NOT_NULL allocator,
2144     const VmaDefragmentationInfo* VMA_NOT_NULL pInfo,
2145     VmaDefragmentationContext VMA_NULLABLE* VMA_NOT_NULL pContext);
2146 
2147 /** \brief Ends defragmentation process.
2148 
2149 \param allocator Allocator object.
2150 \param context Context object that has been created by vmaBeginDefragmentation().
2151 \param[out] pStats Optional stats for the defragmentation. Can be null.
2152 
2153 Use this function to finish defragmentation started by vmaBeginDefragmentation().
2154 */
2155 VMA_CALL_PRE void VMA_CALL_POST vmaEndDefragmentation(
2156     VmaAllocator VMA_NOT_NULL allocator,
2157     VmaDefragmentationContext VMA_NOT_NULL context,
2158     VmaDefragmentationStats* VMA_NULLABLE pStats);
2159 
2160 /** \brief Starts single defragmentation pass.
2161 
2162 \param allocator Allocator object.
2163 \param context Context object that has been created by vmaBeginDefragmentation().
2164 \param[out] pPassInfo Computed informations for current pass.
2165 \returns
2166 - `VK_SUCCESS` if no more moves are possible. Then you can omit call to vmaEndDefragmentationPass() and simply end whole defragmentation.
2167 - `VK_INCOMPLETE` if there are pending moves returned in `pPassInfo`. You need to perform them, call vmaEndDefragmentationPass(),
2168   and then preferably try another pass with vmaBeginDefragmentationPass().
2169 */
2170 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(
2171     VmaAllocator VMA_NOT_NULL allocator,
2172     VmaDefragmentationContext VMA_NOT_NULL context,
2173     VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo);
2174 
2175 /** \brief Ends single defragmentation pass.
2176 
2177 \param allocator Allocator object.
2178 \param context Context object that has been created by vmaBeginDefragmentation().
2179 \param pPassInfo Computed informations for current pass filled by vmaBeginDefragmentationPass() and possibly modified by you.
2180 
2181 Returns `VK_SUCCESS` if no more moves are possible or `VK_INCOMPLETE` if more defragmentations are possible.
2182 
2183 Ends incremental defragmentation pass and commits all defragmentation moves from `pPassInfo`.
2184 After this call:
2185 
2186 - Allocations at `pPassInfo[i].srcAllocation` that had `pPassInfo[i].operation ==` #VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY
2187   (which is the default) will be pointing to the new destination place.
2188 - Allocation at `pPassInfo[i].srcAllocation` that had `pPassInfo[i].operation ==` #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY
2189   will be freed.
2190 
2191 If no more moves are possible you can end whole defragmentation.
2192 */
2193 VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(
2194     VmaAllocator VMA_NOT_NULL allocator,
2195     VmaDefragmentationContext VMA_NOT_NULL context,
2196     VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo);
2197 
2198 /** \brief Binds buffer to allocation.
2199 
2200 Binds specified buffer to region of memory represented by specified allocation.
2201 Gets `VkDeviceMemory` handle and offset from the allocation.
2202 If you want to create a buffer, allocate memory for it and bind them together separately,
2203 you should use this function for binding instead of standard `vkBindBufferMemory()`,
2204 because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple
2205 allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously
2206 (which is illegal in Vulkan).
2207 
2208 It is recommended to use function vmaCreateBuffer() instead of this one.
2209 */
2210 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
2211     VmaAllocator VMA_NOT_NULL allocator,
2212     VmaAllocation VMA_NOT_NULL allocation,
2213     VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer);
2214 
2215 /** \brief Binds buffer to allocation with additional parameters.
2216 
2217 \param allocator
2218 \param allocation
2219 \param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the `allocation`. Normally it should be 0.
2220 \param buffer
2221 \param pNext A chain of structures to be attached to `VkBindBufferMemoryInfoKHR` structure used internally. Normally it should be null.
2222 
2223 This function is similar to vmaBindBufferMemory(), but it provides additional parameters.
2224 
2225 If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag
2226 or with VmaAllocatorCreateInfo::vulkanApiVersion `>= VK_API_VERSION_1_1`. Otherwise the call fails.
2227 */
2228 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
2229     VmaAllocator VMA_NOT_NULL allocator,
2230     VmaAllocation VMA_NOT_NULL allocation,
2231     VkDeviceSize allocationLocalOffset,
2232     VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer,
2233     const void* VMA_NULLABLE pNext);
2234 
2235 /** \brief Binds image to allocation.
2236 
2237 Binds specified image to region of memory represented by specified allocation.
2238 Gets `VkDeviceMemory` handle and offset from the allocation.
2239 If you want to create an image, allocate memory for it and bind them together separately,
2240 you should use this function for binding instead of standard `vkBindImageMemory()`,
2241 because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple
2242 allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously
2243 (which is illegal in Vulkan).
2244 
2245 It is recommended to use function vmaCreateImage() instead of this one.
2246 */
2247 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
2248     VmaAllocator VMA_NOT_NULL allocator,
2249     VmaAllocation VMA_NOT_NULL allocation,
2250     VkImage VMA_NOT_NULL_NON_DISPATCHABLE image);
2251 
2252 /** \brief Binds image to allocation with additional parameters.
2253 
2254 \param allocator
2255 \param allocation
2256 \param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the `allocation`. Normally it should be 0.
2257 \param image
2258 \param pNext A chain of structures to be attached to `VkBindImageMemoryInfoKHR` structure used internally. Normally it should be null.
2259 
2260 This function is similar to vmaBindImageMemory(), but it provides additional parameters.
2261 
2262 If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag
2263 or with VmaAllocatorCreateInfo::vulkanApiVersion `>= VK_API_VERSION_1_1`. Otherwise the call fails.
2264 */
2265 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
2266     VmaAllocator VMA_NOT_NULL allocator,
2267     VmaAllocation VMA_NOT_NULL allocation,
2268     VkDeviceSize allocationLocalOffset,
2269     VkImage VMA_NOT_NULL_NON_DISPATCHABLE image,
2270     const void* VMA_NULLABLE pNext);
2271 
2272 /** \brief Creates a new `VkBuffer`, allocates and binds memory for it.
2273 
2274 \param allocator
2275 \param pBufferCreateInfo
2276 \param pAllocationCreateInfo
2277 \param[out] pBuffer Buffer that was created.
2278 \param[out] pAllocation Allocation that was created.
2279 \param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().
2280 
2281 This function automatically:
2282 
2283 -# Creates buffer.
2284 -# Allocates appropriate memory for it.
2285 -# Binds the buffer with the memory.
2286 
2287 If any of these operations fail, buffer and allocation are not created,
2288 returned value is negative error code, `*pBuffer` and `*pAllocation` are null.
2289 
2290 If the function succeeded, you must destroy both buffer and allocation when you
2291 no longer need them using either convenience function vmaDestroyBuffer() or
2292 separately, using `vkDestroyBuffer()` and vmaFreeMemory().
2293 
2294 If #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag was used,
2295 VK_KHR_dedicated_allocation extension is used internally to query driver whether
2296 it requires or prefers the new buffer to have dedicated allocation. If yes,
2297 and if dedicated allocation is possible
2298 (#VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT is not used), it creates dedicated
2299 allocation for this buffer, just like when using
2300 #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
2301 
2302 \note This function creates a new `VkBuffer`. Sub-allocation of parts of one large buffer,
2303 although recommended as a good practice, is out of scope of this library and could be implemented
2304 by the user as a higher-level logic on top of VMA.
2305 */
2306 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
2307     VmaAllocator VMA_NOT_NULL allocator,
2308     const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
2309     const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
2310     VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer,
2311     VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,
2312     VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
2313 
2314 /** \brief Creates a buffer with additional minimum alignment.
2315 
2316 Similar to vmaCreateBuffer() but provides additional parameter `minAlignment` which allows to specify custom,
2317 minimum alignment to be used when placing the buffer inside a larger memory block, which may be needed e.g.
2318 for interop with OpenGL.
2319 */
2320 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBufferWithAlignment(
2321     VmaAllocator VMA_NOT_NULL allocator,
2322     const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
2323     const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
2324     VkDeviceSize minAlignment,
2325     VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer,
2326     VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,
2327     VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
2328 
2329 /** \brief Creates a new `VkBuffer`, binds already created memory for it.
2330 
2331 \param allocator
2332 \param allocation Allocation that provides memory to be used for binding new buffer to it.
2333 \param pBufferCreateInfo
2334 \param[out] pBuffer Buffer that was created.
2335 
2336 This function automatically:
2337 
2338 -# Creates buffer.
2339 -# Binds the buffer with the supplied memory.
2340 
2341 If any of these operations fail, buffer is not created,
2342 returned value is negative error code and `*pBuffer` is null.
2343 
2344 If the function succeeded, you must destroy the buffer when you
2345 no longer need it using `vkDestroyBuffer()`. If you want to also destroy the corresponding
2346 allocation you can use convenience function vmaDestroyBuffer().
2347 */
2348 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer(
2349     VmaAllocator VMA_NOT_NULL allocator,
2350     VmaAllocation VMA_NOT_NULL allocation,
2351     const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
2352     VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer);
2353 
2354 /** \brief Destroys Vulkan buffer and frees allocated memory.
2355 
2356 This is just a convenience function equivalent to:
2357 
2358 \code
2359 vkDestroyBuffer(device, buffer, allocationCallbacks);
2360 vmaFreeMemory(allocator, allocation);
2361 \endcode
2362 
2363 It it safe to pass null as buffer and/or allocation.
2364 */
2365 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
2366     VmaAllocator VMA_NOT_NULL allocator,
2367     VkBuffer VMA_NULLABLE_NON_DISPATCHABLE buffer,
2368     VmaAllocation VMA_NULLABLE allocation);
2369 
2370 /// Function similar to vmaCreateBuffer().
2371 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
2372     VmaAllocator VMA_NOT_NULL allocator,
2373     const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
2374     const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
2375     VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage,
2376     VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,
2377     VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
2378 
2379 /// Function similar to vmaCreateAliasingBuffer().
2380 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage(
2381     VmaAllocator VMA_NOT_NULL allocator,
2382     VmaAllocation VMA_NOT_NULL allocation,
2383     const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
2384     VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage);
2385 
2386 /** \brief Destroys Vulkan image and frees allocated memory.
2387 
2388 This is just a convenience function equivalent to:
2389 
2390 \code
2391 vkDestroyImage(device, image, allocationCallbacks);
2392 vmaFreeMemory(allocator, allocation);
2393 \endcode
2394 
2395 It it safe to pass null as image and/or allocation.
2396 */
2397 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
2398     VmaAllocator VMA_NOT_NULL allocator,
2399     VkImage VMA_NULLABLE_NON_DISPATCHABLE image,
2400     VmaAllocation VMA_NULLABLE allocation);
2401 
2402 /** @} */
2403 
2404 /**
2405 \addtogroup group_virtual
2406 @{
2407 */
2408 
2409 /** \brief Creates new #VmaVirtualBlock object.
2410 
2411 \param pCreateInfo Parameters for creation.
2412 \param[out] pVirtualBlock Returned virtual block object or `VMA_NULL` if creation failed.
2413 */
2414 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateVirtualBlock(
2415     const VmaVirtualBlockCreateInfo* VMA_NOT_NULL pCreateInfo,
2416     VmaVirtualBlock VMA_NULLABLE* VMA_NOT_NULL pVirtualBlock);
2417 
2418 /** \brief Destroys #VmaVirtualBlock object.
2419 
2420 Please note that you should consciously handle virtual allocations that could remain unfreed in the block.
2421 You should either free them individually using vmaVirtualFree() or call vmaClearVirtualBlock()
2422 if you are sure this is what you want. If you do neither, an assert is called.
2423 
2424 If you keep pointers to some additional metadata associated with your virtual allocations in their `pUserData`,
2425 don't forget to free them.
2426 */
2427 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyVirtualBlock(
2428     VmaVirtualBlock VMA_NULLABLE virtualBlock);
2429 
2430 /** \brief Returns true of the #VmaVirtualBlock is empty - contains 0 virtual allocations and has all its space available for new allocations.
2431 */
2432 VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaIsVirtualBlockEmpty(
2433     VmaVirtualBlock VMA_NOT_NULL virtualBlock);
2434 
2435 /** \brief Returns information about a specific virtual allocation within a virtual block, like its size and `pUserData` pointer.
2436 */
2437 VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualAllocationInfo(
2438     VmaVirtualBlock VMA_NOT_NULL virtualBlock,
2439     VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, VmaVirtualAllocationInfo* VMA_NOT_NULL pVirtualAllocInfo);
2440 
2441 /** \brief Allocates new virtual allocation inside given #VmaVirtualBlock.
2442 
2443 If the allocation fails due to not enough free space available, `VK_ERROR_OUT_OF_DEVICE_MEMORY` is returned
2444 (despite the function doesn't ever allocate actual GPU memory).
2445 `pAllocation` is then set to `VK_NULL_HANDLE` and `pOffset`, if not null, it set to `UINT64_MAX`.
2446 
2447 \param virtualBlock Virtual block
2448 \param pCreateInfo Parameters for the allocation
2449 \param[out] pAllocation Returned handle of the new allocation
2450 \param[out] pOffset Returned offset of the new allocation. Optional, can be null.
2451 */
2452 VMA_CALL_PRE VkResult VMA_CALL_POST vmaVirtualAllocate(
2453     VmaVirtualBlock VMA_NOT_NULL virtualBlock,
2454     const VmaVirtualAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
2455     VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pAllocation,
2456     VkDeviceSize* VMA_NULLABLE pOffset);
2457 
2458 /** \brief Frees virtual allocation inside given #VmaVirtualBlock.
2459 
2460 It is correct to call this function with `allocation == VK_NULL_HANDLE` - it does nothing.
2461 */
2462 VMA_CALL_PRE void VMA_CALL_POST vmaVirtualFree(
2463     VmaVirtualBlock VMA_NOT_NULL virtualBlock,
2464     VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE allocation);
2465 
2466 /** \brief Frees all virtual allocations inside given #VmaVirtualBlock.
2467 
2468 You must either call this function or free each virtual allocation individually with vmaVirtualFree()
2469 before destroying a virtual block. Otherwise, an assert is called.
2470 
2471 If you keep pointer to some additional metadata associated with your virtual allocation in its `pUserData`,
2472 don't forget to free it as well.
2473 */
2474 VMA_CALL_PRE void VMA_CALL_POST vmaClearVirtualBlock(
2475     VmaVirtualBlock VMA_NOT_NULL virtualBlock);
2476 
2477 /** \brief Changes custom pointer associated with given virtual allocation.
2478 */
2479 VMA_CALL_PRE void VMA_CALL_POST vmaSetVirtualAllocationUserData(
2480     VmaVirtualBlock VMA_NOT_NULL virtualBlock,
2481     VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation,
2482     void* VMA_NULLABLE pUserData);
2483 
2484 /** \brief Calculates and returns statistics about virtual allocations and memory usage in given #VmaVirtualBlock.
2485 
2486 This function is fast to call. For more detailed statistics, see vmaCalculateVirtualBlockStatistics().
2487 */
2488 VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualBlockStatistics(
2489     VmaVirtualBlock VMA_NOT_NULL virtualBlock,
2490     VmaStatistics* VMA_NOT_NULL pStats);
2491 
2492 /** \brief Calculates and returns detailed statistics about virtual allocations and memory usage in given #VmaVirtualBlock.
2493 
2494 This function is slow to call. Use for debugging purposes.
2495 For less detailed statistics, see vmaGetVirtualBlockStatistics().
2496 */
2497 VMA_CALL_PRE void VMA_CALL_POST vmaCalculateVirtualBlockStatistics(
2498     VmaVirtualBlock VMA_NOT_NULL virtualBlock,
2499     VmaDetailedStatistics* VMA_NOT_NULL pStats);
2500 
2501 /** @} */
2502 
2503 #if VMA_STATS_STRING_ENABLED
2504 /**
2505 \addtogroup group_stats
2506 @{
2507 */
2508 
2509 /** \brief Builds and returns a null-terminated string in JSON format with information about given #VmaVirtualBlock.
2510 \param virtualBlock Virtual block.
2511 \param[out] ppStatsString Returned string.
2512 \param detailedMap Pass `VK_FALSE` to only obtain statistics as returned by vmaCalculateVirtualBlockStatistics(). Pass `VK_TRUE` to also obtain full list of allocations and free spaces.
2513 
2514 Returned string must be freed using vmaFreeVirtualBlockStatsString().
2515 */
2516 VMA_CALL_PRE void VMA_CALL_POST vmaBuildVirtualBlockStatsString(
2517     VmaVirtualBlock VMA_NOT_NULL virtualBlock,
2518     char* VMA_NULLABLE* VMA_NOT_NULL ppStatsString,
2519     VkBool32 detailedMap);
2520 
2521 /// Frees a string returned by vmaBuildVirtualBlockStatsString().
2522 VMA_CALL_PRE void VMA_CALL_POST vmaFreeVirtualBlockStatsString(
2523     VmaVirtualBlock VMA_NOT_NULL virtualBlock,
2524     char* VMA_NULLABLE pStatsString);
2525 
2526 /** \brief Builds and returns statistics as a null-terminated string in JSON format.
2527 \param allocator
2528 \param[out] ppStatsString Must be freed using vmaFreeStatsString() function.
2529 \param detailedMap
2530 */
2531 VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
2532     VmaAllocator VMA_NOT_NULL allocator,
2533     char* VMA_NULLABLE* VMA_NOT_NULL ppStatsString,
2534     VkBool32 detailedMap);
2535 
2536 VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
2537     VmaAllocator VMA_NOT_NULL allocator,
2538     char* VMA_NULLABLE pStatsString);
2539 
2540 /** @} */
2541 
2542 #endif // VMA_STATS_STRING_ENABLED
2543 
2544 #endif // _VMA_FUNCTION_HEADERS
2545 
2546 #ifdef __cplusplus
2547 }
2548 #endif
2549 
2550 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
2551 
2552 ////////////////////////////////////////////////////////////////////////////////
2553 ////////////////////////////////////////////////////////////////////////////////
2554 //
2555 //    IMPLEMENTATION
2556 //
2557 ////////////////////////////////////////////////////////////////////////////////
2558 ////////////////////////////////////////////////////////////////////////////////
2559 
2560 // For Visual Studio IntelliSense.
2561 #if defined(__cplusplus) && defined(__INTELLISENSE__)
2562 #define VMA_IMPLEMENTATION
2563 #endif
2564 
2565 #ifdef VMA_IMPLEMENTATION
2566 #undef VMA_IMPLEMENTATION
2567 
2568 #include <cstdint>
2569 #include <cstdlib>
2570 #include <cstring>
2571 #include <utility>
2572 #include <type_traits>
2573 
2574 #ifdef _MSC_VER
2575     #include <intrin.h> // For functions like __popcnt, _BitScanForward etc.
2576 #endif
2577 #if __cplusplus >= 202002L || _MSVC_LANG >= 202002L // C++20
2578     #include <bit> // For std::popcount
2579 #endif
2580 
2581 /*******************************************************************************
2582 CONFIGURATION SECTION
2583 
2584 Define some of these macros before each #include of this header or change them
2585 here if you need other then default behavior depending on your environment.
2586 */
2587 #ifndef _VMA_CONFIGURATION
2588 
2589 /*
2590 Define this macro to 1 to make the library fetch pointers to Vulkan functions
2591 internally, like:
2592 
2593     vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
2594 */
2595 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
2596     #define VMA_STATIC_VULKAN_FUNCTIONS 1
2597 #endif
2598 
2599 /*
2600 Define this macro to 1 to make the library fetch pointers to Vulkan functions
2601 internally, like:
2602 
2603     vulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkGetDeviceProcAddr(device, "vkAllocateMemory");
2604 
2605 To use this feature in new versions of VMA you now have to pass
2606 VmaVulkanFunctions::vkGetInstanceProcAddr and vkGetDeviceProcAddr as
2607 VmaAllocatorCreateInfo::pVulkanFunctions. Other members can be null.
2608 */
2609 #if !defined(VMA_DYNAMIC_VULKAN_FUNCTIONS)
2610     #define VMA_DYNAMIC_VULKAN_FUNCTIONS 1
2611 #endif
2612 
2613 #ifndef VMA_USE_STL_SHARED_MUTEX
2614     // Compiler conforms to C++17.
2615     #if __cplusplus >= 201703L
2616         #define VMA_USE_STL_SHARED_MUTEX 1
2617     // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
2618     // Otherwise it is always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
2619     #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
2620         #define VMA_USE_STL_SHARED_MUTEX 1
2621     #else
2622         #define VMA_USE_STL_SHARED_MUTEX 0
2623     #endif
2624 #endif
2625 
2626 /*
2627 Define this macro to include custom header files without having to edit this file directly, e.g.:
2628 
2629     // Inside of "my_vma_configuration_user_includes.h":
2630 
2631     #include "my_custom_assert.h" // for MY_CUSTOM_ASSERT
2632     #include "my_custom_min.h" // for my_custom_min
2633     #include <algorithm>
2634     #include <mutex>
2635 
2636     // Inside a different file, which includes "vk_mem_alloc.h":
2637 
2638     #define VMA_CONFIGURATION_USER_INCLUDES_H "my_vma_configuration_user_includes.h"
2639     #define VMA_ASSERT(expr) MY_CUSTOM_ASSERT(expr)
2640     #define VMA_MIN(v1, v2)  (my_custom_min(v1, v2))
2641     #include "vk_mem_alloc.h"
2642     ...
2643 
2644 The following headers are used in this CONFIGURATION section only, so feel free to
2645 remove them if not needed.
2646 */
2647 #if !defined(VMA_CONFIGURATION_USER_INCLUDES_H)
2648     #include <cassert> // for assert
2649     #include <algorithm> // for min, max
2650     #include <mutex>
2651 #else
2652     #include VMA_CONFIGURATION_USER_INCLUDES_H
2653 #endif
2654 
2655 #ifndef VMA_NULL
2656    // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
2657    #define VMA_NULL   nullptr
2658 #endif
2659 
2660 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
2661 #include <cstdlib>
vma_aligned_alloc(size_t alignment,size_t size)2662 static void* vma_aligned_alloc(size_t alignment, size_t size)
2663 {
2664     // alignment must be >= sizeof(void*)
2665     if(alignment < sizeof(void*))
2666     {
2667         alignment = sizeof(void*);
2668     }
2669 
2670     return memalign(alignment, size);
2671 }
2672 #elif defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC))
2673 #include <cstdlib>
2674 
2675 #if defined(__APPLE__)
2676 #include <AvailabilityMacros.h>
2677 #endif
2678 
vma_aligned_alloc(size_t alignment,size_t size)2679 static void* vma_aligned_alloc(size_t alignment, size_t size)
2680 {
2681     // Unfortunately, aligned_alloc causes VMA to crash due to it returning null pointers. (At least under 11.4)
2682     // Therefore, for now disable this specific exception until a proper solution is found.
2683     //#if defined(__APPLE__) && (defined(MAC_OS_X_VERSION_10_16) || defined(__IPHONE_14_0))
2684     //#if MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_16 || __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_14_0
2685     //    // For C++14, usr/include/malloc/_malloc.h declares aligned_alloc()) only
2686     //    // with the MacOSX11.0 SDK in Xcode 12 (which is what adds
2687     //    // MAC_OS_X_VERSION_10_16), even though the function is marked
2688     //    // availabe for 10.15. That is why the preprocessor checks for 10.16 but
2689     //    // the __builtin_available checks for 10.15.
2690     //    // People who use C++17 could call aligned_alloc with the 10.15 SDK already.
2691     //    if (__builtin_available(macOS 10.15, iOS 13, *))
2692     //        return aligned_alloc(alignment, size);
2693     //#endif
2694     //#endif
2695 
2696     // alignment must be >= sizeof(void*)
2697     if(alignment < sizeof(void*))
2698     {
2699         alignment = sizeof(void*);
2700     }
2701 
2702     void *pointer;
2703     if(posix_memalign(&pointer, alignment, size) == 0)
2704         return pointer;
2705     return VMA_NULL;
2706 }
2707 #elif defined(_WIN32)
vma_aligned_alloc(size_t alignment,size_t size)2708 static void* vma_aligned_alloc(size_t alignment, size_t size)
2709 {
2710     return _aligned_malloc(size, alignment);
2711 }
2712 #else
vma_aligned_alloc(size_t alignment,size_t size)2713 static void* vma_aligned_alloc(size_t alignment, size_t size)
2714 {
2715     return aligned_alloc(alignment, size);
2716 }
2717 #endif
2718 
2719 #if defined(_WIN32)
vma_aligned_free(void * ptr)2720 static void vma_aligned_free(void* ptr)
2721 {
2722     _aligned_free(ptr);
2723 }
2724 #else
vma_aligned_free(void * VMA_NULLABLE ptr)2725 static void vma_aligned_free(void* VMA_NULLABLE ptr)
2726 {
2727     free(ptr);
2728 }
2729 #endif
2730 
2731 // If your compiler is not compatible with C++11 and definition of
2732 // aligned_alloc() function is missing, uncommeting following line may help:
2733 
2734 //#include <malloc.h>
2735 
2736 // Normal assert to check for programmer's errors, especially in Debug configuration.
2737 #ifndef VMA_ASSERT
2738    #ifdef NDEBUG
2739        #define VMA_ASSERT(expr)
2740    #else
2741        #define VMA_ASSERT(expr)         assert(expr)
2742    #endif
2743 #endif
2744 
2745 // Assert that will be called very often, like inside data structures e.g. operator[].
2746 // Making it non-empty can make program slow.
2747 #ifndef VMA_HEAVY_ASSERT
2748    #ifdef NDEBUG
2749        #define VMA_HEAVY_ASSERT(expr)
2750    #else
2751        #define VMA_HEAVY_ASSERT(expr)   //VMA_ASSERT(expr)
2752    #endif
2753 #endif
2754 
2755 #ifndef VMA_ALIGN_OF
2756    #define VMA_ALIGN_OF(type)       (__alignof(type))
2757 #endif
2758 
2759 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
2760    #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) vma_aligned_alloc((alignment), (size))
2761 #endif
2762 
2763 #ifndef VMA_SYSTEM_ALIGNED_FREE
2764    // VMA_SYSTEM_FREE is the old name, but might have been defined by the user
2765    #if defined(VMA_SYSTEM_FREE)
2766       #define VMA_SYSTEM_ALIGNED_FREE(ptr)     VMA_SYSTEM_FREE(ptr)
2767    #else
2768       #define VMA_SYSTEM_ALIGNED_FREE(ptr)     vma_aligned_free(ptr)
2769     #endif
2770 #endif
2771 
2772 #ifndef VMA_COUNT_BITS_SET
2773     // Returns number of bits set to 1 in (v)
2774     #define VMA_COUNT_BITS_SET(v) VmaCountBitsSet(v)
2775 #endif
2776 
2777 #ifndef VMA_BITSCAN_LSB
2778     // Scans integer for index of first nonzero value from the Least Significant Bit (LSB). If mask is 0 then returns UINT8_MAX
2779     #define VMA_BITSCAN_LSB(mask) VmaBitScanLSB(mask)
2780 #endif
2781 
2782 #ifndef VMA_BITSCAN_MSB
2783     // Scans integer for index of first nonzero value from the Most Significant Bit (MSB). If mask is 0 then returns UINT8_MAX
2784     #define VMA_BITSCAN_MSB(mask) VmaBitScanMSB(mask)
2785 #endif
2786 
2787 #ifndef VMA_MIN
2788    #define VMA_MIN(v1, v2)    ((std::min)((v1), (v2)))
2789 #endif
2790 
2791 #ifndef VMA_MAX
2792    #define VMA_MAX(v1, v2)    ((std::max)((v1), (v2)))
2793 #endif
2794 
2795 #ifndef VMA_SWAP
2796    #define VMA_SWAP(v1, v2)   std::swap((v1), (v2))
2797 #endif
2798 
2799 #ifndef VMA_SORT
2800    #define VMA_SORT(beg, end, cmp)  std::sort(beg, end, cmp)
2801 #endif
2802 
2803 #ifndef VMA_DEBUG_LOG
2804    #define VMA_DEBUG_LOG(format, ...)
2805    /*
2806    #define VMA_DEBUG_LOG(format, ...) do { \
2807        printf(format, __VA_ARGS__); \
2808        printf("\n"); \
2809    } while(false)
2810    */
2811 #endif
2812 
2813 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
2814 #if VMA_STATS_STRING_ENABLED
VmaUint32ToStr(char * VMA_NOT_NULL outStr,size_t strLen,uint32_t num)2815     static inline void VmaUint32ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint32_t num)
2816     {
2817         snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
2818     }
VmaUint64ToStr(char * VMA_NOT_NULL outStr,size_t strLen,uint64_t num)2819     static inline void VmaUint64ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint64_t num)
2820     {
2821         snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
2822     }
VmaPtrToStr(char * VMA_NOT_NULL outStr,size_t strLen,const void * ptr)2823     static inline void VmaPtrToStr(char* VMA_NOT_NULL outStr, size_t strLen, const void* ptr)
2824     {
2825         snprintf(outStr, strLen, "%p", ptr);
2826     }
2827 #endif
2828 
2829 #ifndef VMA_MUTEX
2830     class VmaMutex
2831     {
2832     public:
Lock()2833         void Lock() { m_Mutex.lock(); }
Unlock()2834         void Unlock() { m_Mutex.unlock(); }
TryLock()2835         bool TryLock() { return m_Mutex.try_lock(); }
2836     private:
2837         std::mutex m_Mutex;
2838     };
2839     #define VMA_MUTEX VmaMutex
2840 #endif
2841 
2842 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
2843 #ifndef VMA_RW_MUTEX
2844     #if VMA_USE_STL_SHARED_MUTEX
2845         // Use std::shared_mutex from C++17.
2846         #include <shared_mutex>
2847         class VmaRWMutex
2848         {
2849         public:
LockRead()2850             void LockRead() { m_Mutex.lock_shared(); }
UnlockRead()2851             void UnlockRead() { m_Mutex.unlock_shared(); }
TryLockRead()2852             bool TryLockRead() { return m_Mutex.try_lock_shared(); }
LockWrite()2853             void LockWrite() { m_Mutex.lock(); }
UnlockWrite()2854             void UnlockWrite() { m_Mutex.unlock(); }
TryLockWrite()2855             bool TryLockWrite() { return m_Mutex.try_lock(); }
2856         private:
2857             std::shared_mutex m_Mutex;
2858         };
2859         #define VMA_RW_MUTEX VmaRWMutex
2860     #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
2861         // Use SRWLOCK from WinAPI.
2862         // Minimum supported client = Windows Vista, server = Windows Server 2008.
2863         class VmaRWMutex
2864         {
2865         public:
VmaRWMutex()2866             VmaRWMutex() { InitializeSRWLock(&m_Lock); }
LockRead()2867             void LockRead() { AcquireSRWLockShared(&m_Lock); }
UnlockRead()2868             void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
TryLockRead()2869             bool TryLockRead() { return TryAcquireSRWLockShared(&m_Lock) != FALSE; }
LockWrite()2870             void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
UnlockWrite()2871             void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
TryLockWrite()2872             bool TryLockWrite() { return TryAcquireSRWLockExclusive(&m_Lock) != FALSE; }
2873         private:
2874             SRWLOCK m_Lock;
2875         };
2876         #define VMA_RW_MUTEX VmaRWMutex
2877     #else
2878         // Less efficient fallback: Use normal mutex.
2879         class VmaRWMutex
2880         {
2881         public:
LockRead()2882             void LockRead() { m_Mutex.Lock(); }
UnlockRead()2883             void UnlockRead() { m_Mutex.Unlock(); }
TryLockRead()2884             bool TryLockRead() { return m_Mutex.TryLock(); }
LockWrite()2885             void LockWrite() { m_Mutex.Lock(); }
UnlockWrite()2886             void UnlockWrite() { m_Mutex.Unlock(); }
TryLockWrite()2887             bool TryLockWrite() { return m_Mutex.TryLock(); }
2888         private:
2889             VMA_MUTEX m_Mutex;
2890         };
2891         #define VMA_RW_MUTEX VmaRWMutex
2892     #endif // #if VMA_USE_STL_SHARED_MUTEX
2893 #endif // #ifndef VMA_RW_MUTEX
2894 
2895 /*
2896 If providing your own implementation, you need to implement a subset of std::atomic.
2897 */
2898 #ifndef VMA_ATOMIC_UINT32
2899     #include <atomic>
2900     #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
2901 #endif
2902 
2903 #ifndef VMA_ATOMIC_UINT64
2904     #include <atomic>
2905     #define VMA_ATOMIC_UINT64 std::atomic<uint64_t>
2906 #endif
2907 
2908 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
2909     /**
2910     Every allocation will have its own memory block.
2911     Define to 1 for debugging purposes only.
2912     */
2913     #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
2914 #endif
2915 
2916 #ifndef VMA_MIN_ALIGNMENT
2917     /**
2918     Minimum alignment of all allocations, in bytes.
2919     Set to more than 1 for debugging purposes. Must be power of two.
2920     */
2921     #ifdef VMA_DEBUG_ALIGNMENT // Old name
2922         #define VMA_MIN_ALIGNMENT VMA_DEBUG_ALIGNMENT
2923     #else
2924         #define VMA_MIN_ALIGNMENT (1)
2925     #endif
2926 #endif
2927 
2928 #ifndef VMA_DEBUG_MARGIN
2929     /**
2930     Minimum margin after every allocation, in bytes.
2931     Set nonzero for debugging purposes only.
2932     */
2933     #define VMA_DEBUG_MARGIN (0)
2934 #endif
2935 
2936 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
2937     /**
2938     Define this macro to 1 to automatically fill new allocations and destroyed
2939     allocations with some bit pattern.
2940     */
2941     #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
2942 #endif
2943 
2944 #ifndef VMA_DEBUG_DETECT_CORRUPTION
2945     /**
2946     Define this macro to 1 together with non-zero value of VMA_DEBUG_MARGIN to
2947     enable writing magic value to the margin after every allocation and
2948     validating it, so that memory corruptions (out-of-bounds writes) are detected.
2949     */
2950     #define VMA_DEBUG_DETECT_CORRUPTION (0)
2951 #endif
2952 
2953 #ifndef VMA_DEBUG_GLOBAL_MUTEX
2954     /**
2955     Set this to 1 for debugging purposes only, to enable single mutex protecting all
2956     entry calls to the library. Can be useful for debugging multithreading issues.
2957     */
2958     #define VMA_DEBUG_GLOBAL_MUTEX (0)
2959 #endif
2960 
2961 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
2962     /**
2963     Minimum value for VkPhysicalDeviceLimits::bufferImageGranularity.
2964     Set to more than 1 for debugging purposes only. Must be power of two.
2965     */
2966     #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
2967 #endif
2968 
2969 #ifndef VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT
2970     /*
2971     Set this to 1 to make VMA never exceed VkPhysicalDeviceLimits::maxMemoryAllocationCount
2972     and return error instead of leaving up to Vulkan implementation what to do in such cases.
2973     */
2974     #define VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT (0)
2975 #endif
2976 
2977 #ifndef VMA_SMALL_HEAP_MAX_SIZE
2978    /// Maximum size of a memory heap in Vulkan to consider it "small".
2979    #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
2980 #endif
2981 
2982 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
2983    /// Default size of a block allocated as single VkDeviceMemory from a "large" heap.
2984    #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
2985 #endif
2986 
2987 /*
2988 Mapping hysteresis is a logic that launches when vmaMapMemory/vmaUnmapMemory is called
2989 or a persistently mapped allocation is created and destroyed several times in a row.
2990 It keeps additional +1 mapping of a device memory block to prevent calling actual
2991 vkMapMemory/vkUnmapMemory too many times, which may improve performance and help
2992 tools like RenderDOc.
2993 */
2994 #ifndef VMA_MAPPING_HYSTERESIS_ENABLED
2995     #define VMA_MAPPING_HYSTERESIS_ENABLED 1
2996 #endif
2997 
2998 #ifndef VMA_CLASS_NO_COPY
2999     #define VMA_CLASS_NO_COPY(className) \
3000         private: \
3001             className(const className&) = delete; \
3002             className& operator=(const className&) = delete;
3003 #endif
3004 
3005 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
3006         VMA_ASSERT(0 && "Validation failed: " #cond); \
3007         return false; \
3008     } } while(false)
3009 
3010 /*******************************************************************************
3011 END OF CONFIGURATION
3012 */
3013 #endif // _VMA_CONFIGURATION
3014 
3015 
3016 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
3017 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
3018 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
3019 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
3020 
3021 // Copy of some Vulkan definitions so we don't need to check their existence just to handle few constants.
3022 static const uint32_t VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY = 0x00000040;
3023 static const uint32_t VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY = 0x00000080;
3024 static const uint32_t VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY = 0x00020000;
3025 static const uint32_t VK_IMAGE_CREATE_DISJOINT_BIT_COPY = 0x00000200;
3026 static const int32_t VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT_COPY = 1000158000;
3027 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
3028 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
3029 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
3030 
3031 // This one is tricky. Vulkan specification defines this code as available since
3032 // Vulkan 1.0, but doesn't actually define it in Vulkan SDK earlier than 1.2.131.
3033 // See pull request #207.
3034 #define VK_ERROR_UNKNOWN_COPY ((VkResult)-13)
3035 
3036 
3037 #if VMA_STATS_STRING_ENABLED
3038 // Correspond to values of enum VmaSuballocationType.
3039 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] =
3040 {
3041     "FREE",
3042     "UNKNOWN",
3043     "BUFFER",
3044     "IMAGE_UNKNOWN",
3045     "IMAGE_LINEAR",
3046     "IMAGE_OPTIMAL",
3047 };
3048 #endif
3049 
3050 static VkAllocationCallbacks VmaEmptyAllocationCallbacks =
3051     { VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
3052 
3053 
3054 #ifndef _VMA_ENUM_DECLARATIONS
3055 
3056 enum VmaSuballocationType
3057 {
3058     VMA_SUBALLOCATION_TYPE_FREE = 0,
3059     VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
3060     VMA_SUBALLOCATION_TYPE_BUFFER = 2,
3061     VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
3062     VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
3063     VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
3064     VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
3065 };
3066 
3067 enum VMA_CACHE_OPERATION
3068 {
3069     VMA_CACHE_FLUSH,
3070     VMA_CACHE_INVALIDATE
3071 };
3072 
3073 enum class VmaAllocationRequestType
3074 {
3075     Normal,
3076     TLSF,
3077     // Used by "Linear" algorithm.
3078     UpperAddress,
3079     EndOf1st,
3080     EndOf2nd,
3081 };
3082 
3083 #endif // _VMA_ENUM_DECLARATIONS
3084 
3085 #ifndef _VMA_FORWARD_DECLARATIONS
3086 // Opaque handle used by allocation algorithms to identify single allocation in any conforming way.
3087 VK_DEFINE_NON_DISPATCHABLE_HANDLE(VmaAllocHandle);
3088 
3089 struct VmaMutexLock;
3090 struct VmaMutexLockRead;
3091 struct VmaMutexLockWrite;
3092 
3093 template<typename T>
3094 struct AtomicTransactionalIncrement;
3095 
3096 template<typename T>
3097 struct VmaStlAllocator;
3098 
3099 template<typename T, typename AllocatorT>
3100 class VmaVector;
3101 
3102 template<typename T, typename AllocatorT, size_t N>
3103 class VmaSmallVector;
3104 
3105 template<typename T>
3106 class VmaPoolAllocator;
3107 
3108 template<typename T>
3109 struct VmaListItem;
3110 
3111 template<typename T>
3112 class VmaRawList;
3113 
3114 template<typename T, typename AllocatorT>
3115 class VmaList;
3116 
3117 template<typename ItemTypeTraits>
3118 class VmaIntrusiveLinkedList;
3119 
3120 // Unused in this version
3121 #if 0
3122 template<typename T1, typename T2>
3123 struct VmaPair;
3124 template<typename FirstT, typename SecondT>
3125 struct VmaPairFirstLess;
3126 
3127 template<typename KeyT, typename ValueT>
3128 class VmaMap;
3129 #endif
3130 
3131 #if VMA_STATS_STRING_ENABLED
3132 class VmaStringBuilder;
3133 class VmaJsonWriter;
3134 #endif
3135 
3136 class VmaDeviceMemoryBlock;
3137 
3138 struct VmaDedicatedAllocationListItemTraits;
3139 class VmaDedicatedAllocationList;
3140 
3141 struct VmaSuballocation;
3142 struct VmaSuballocationOffsetLess;
3143 struct VmaSuballocationOffsetGreater;
3144 struct VmaSuballocationItemSizeLess;
3145 
3146 typedef VmaList<VmaSuballocation, VmaStlAllocator<VmaSuballocation>> VmaSuballocationList;
3147 
3148 struct VmaAllocationRequest;
3149 
3150 class VmaBlockMetadata;
3151 class VmaBlockMetadata_Linear;
3152 class VmaBlockMetadata_TLSF;
3153 
3154 class VmaBlockVector;
3155 
3156 struct VmaPoolListItemTraits;
3157 
3158 struct VmaCurrentBudgetData;
3159 
3160 class VmaAllocationObjectAllocator;
3161 
3162 #endif // _VMA_FORWARD_DECLARATIONS
3163 
3164 
3165 #ifndef _VMA_FUNCTIONS
3166 
3167 /*
3168 Returns number of bits set to 1 in (v).
3169 
3170 On specific platforms and compilers you can use instrinsics like:
3171 
3172 Visual Studio:
3173     return __popcnt(v);
3174 GCC, Clang:
3175     return static_cast<uint32_t>(__builtin_popcount(v));
3176 
3177 Define macro VMA_COUNT_BITS_SET to provide your optimized implementation.
3178 But you need to check in runtime whether user's CPU supports these, as some old processors don't.
3179 */
VmaCountBitsSet(uint32_t v)3180 static inline uint32_t VmaCountBitsSet(uint32_t v)
3181 {
3182 #if __cplusplus >= 202002L || _MSVC_LANG >= 202002L // C++20
3183     return std::popcount(v);
3184 #else
3185     uint32_t c = v - ((v >> 1) & 0x55555555);
3186     c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
3187     c = ((c >> 4) + c) & 0x0F0F0F0F;
3188     c = ((c >> 8) + c) & 0x00FF00FF;
3189     c = ((c >> 16) + c) & 0x0000FFFF;
3190     return c;
3191 #endif
3192 }
3193 
VmaBitScanLSB(uint64_t mask)3194 static inline uint8_t VmaBitScanLSB(uint64_t mask)
3195 {
3196 #if defined(_MSC_VER) && defined(_WIN64)
3197     unsigned long pos;
3198     if (_BitScanForward64(&pos, mask))
3199         return static_cast<uint8_t>(pos);
3200     return UINT8_MAX;
3201 #elif defined __GNUC__ || defined __clang__
3202     return static_cast<uint8_t>(__builtin_ffsll(mask)) - 1U;
3203 #else
3204     uint8_t pos = 0;
3205     uint64_t bit = 1;
3206     do
3207     {
3208         if (mask & bit)
3209             return pos;
3210         bit <<= 1;
3211     } while (pos++ < 63);
3212     return UINT8_MAX;
3213 #endif
3214 }
3215 
VmaBitScanLSB(uint32_t mask)3216 static inline uint8_t VmaBitScanLSB(uint32_t mask)
3217 {
3218 #ifdef _MSC_VER
3219     unsigned long pos;
3220     if (_BitScanForward(&pos, mask))
3221         return static_cast<uint8_t>(pos);
3222     return UINT8_MAX;
3223 #elif defined __GNUC__ || defined __clang__
3224     return static_cast<uint8_t>(__builtin_ffs(mask)) - 1U;
3225 #else
3226     uint8_t pos = 0;
3227     uint32_t bit = 1;
3228     do
3229     {
3230         if (mask & bit)
3231             return pos;
3232         bit <<= 1;
3233     } while (pos++ < 31);
3234     return UINT8_MAX;
3235 #endif
3236 }
3237 
VmaBitScanMSB(uint64_t mask)3238 static inline uint8_t VmaBitScanMSB(uint64_t mask)
3239 {
3240 #if defined(_MSC_VER) && defined(_WIN64)
3241     unsigned long pos;
3242     if (_BitScanReverse64(&pos, mask))
3243         return static_cast<uint8_t>(pos);
3244 #elif defined __GNUC__ || defined __clang__
3245     if (mask)
3246         return 63 - static_cast<uint8_t>(__builtin_clzll(mask));
3247 #else
3248     uint8_t pos = 63;
3249     uint64_t bit = 1ULL << 63;
3250     do
3251     {
3252         if (mask & bit)
3253             return pos;
3254         bit >>= 1;
3255     } while (pos-- > 0);
3256 #endif
3257     return UINT8_MAX;
3258 }
3259 
VmaBitScanMSB(uint32_t mask)3260 static inline uint8_t VmaBitScanMSB(uint32_t mask)
3261 {
3262 #ifdef _MSC_VER
3263     unsigned long pos;
3264     if (_BitScanReverse(&pos, mask))
3265         return static_cast<uint8_t>(pos);
3266 #elif defined __GNUC__ || defined __clang__
3267     if (mask)
3268         return 31 - static_cast<uint8_t>(__builtin_clz(mask));
3269 #else
3270     uint8_t pos = 31;
3271     uint32_t bit = 1UL << 31;
3272     do
3273     {
3274         if (mask & bit)
3275             return pos;
3276         bit >>= 1;
3277     } while (pos-- > 0);
3278 #endif
3279     return UINT8_MAX;
3280 }
3281 
3282 /*
3283 Returns true if given number is a power of two.
3284 T must be unsigned integer number or signed integer but always nonnegative.
3285 For 0 returns true.
3286 */
3287 template <typename T>
VmaIsPow2(T x)3288 inline bool VmaIsPow2(T x)
3289 {
3290     return (x & (x - 1)) == 0;
3291 }
3292 
3293 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
3294 // Use types like uint32_t, uint64_t as T.
3295 template <typename T>
VmaAlignUp(T val,T alignment)3296 static inline T VmaAlignUp(T val, T alignment)
3297 {
3298     VMA_HEAVY_ASSERT(VmaIsPow2(alignment));
3299     return (val + alignment - 1) & ~(alignment - 1);
3300 }
3301 
3302 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
3303 // Use types like uint32_t, uint64_t as T.
3304 template <typename T>
VmaAlignDown(T val,T alignment)3305 static inline T VmaAlignDown(T val, T alignment)
3306 {
3307     VMA_HEAVY_ASSERT(VmaIsPow2(alignment));
3308     return val & ~(alignment - 1);
3309 }
3310 
3311 // Division with mathematical rounding to nearest number.
3312 template <typename T>
VmaRoundDiv(T x,T y)3313 static inline T VmaRoundDiv(T x, T y)
3314 {
3315     return (x + (y / (T)2)) / y;
3316 }
3317 
3318 // Divide by 'y' and round up to nearest integer.
3319 template <typename T>
VmaDivideRoundingUp(T x,T y)3320 static inline T VmaDivideRoundingUp(T x, T y)
3321 {
3322     return (x + y - (T)1) / y;
3323 }
3324 
3325 // Returns smallest power of 2 greater or equal to v.
VmaNextPow2(uint32_t v)3326 static inline uint32_t VmaNextPow2(uint32_t v)
3327 {
3328     v--;
3329     v |= v >> 1;
3330     v |= v >> 2;
3331     v |= v >> 4;
3332     v |= v >> 8;
3333     v |= v >> 16;
3334     v++;
3335     return v;
3336 }
3337 
VmaNextPow2(uint64_t v)3338 static inline uint64_t VmaNextPow2(uint64_t v)
3339 {
3340     v--;
3341     v |= v >> 1;
3342     v |= v >> 2;
3343     v |= v >> 4;
3344     v |= v >> 8;
3345     v |= v >> 16;
3346     v |= v >> 32;
3347     v++;
3348     return v;
3349 }
3350 
3351 // Returns largest power of 2 less or equal to v.
VmaPrevPow2(uint32_t v)3352 static inline uint32_t VmaPrevPow2(uint32_t v)
3353 {
3354     v |= v >> 1;
3355     v |= v >> 2;
3356     v |= v >> 4;
3357     v |= v >> 8;
3358     v |= v >> 16;
3359     v = v ^ (v >> 1);
3360     return v;
3361 }
3362 
VmaPrevPow2(uint64_t v)3363 static inline uint64_t VmaPrevPow2(uint64_t v)
3364 {
3365     v |= v >> 1;
3366     v |= v >> 2;
3367     v |= v >> 4;
3368     v |= v >> 8;
3369     v |= v >> 16;
3370     v |= v >> 32;
3371     v = v ^ (v >> 1);
3372     return v;
3373 }
3374 
VmaStrIsEmpty(const char * pStr)3375 static inline bool VmaStrIsEmpty(const char* pStr)
3376 {
3377     return pStr == VMA_NULL || *pStr == '\0';
3378 }
3379 
3380 /*
3381 Returns true if two memory blocks occupy overlapping pages.
3382 ResourceA must be in less memory offset than ResourceB.
3383 
3384 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
3385 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
3386 */
VmaBlocksOnSamePage(VkDeviceSize resourceAOffset,VkDeviceSize resourceASize,VkDeviceSize resourceBOffset,VkDeviceSize pageSize)3387 static inline bool VmaBlocksOnSamePage(
3388     VkDeviceSize resourceAOffset,
3389     VkDeviceSize resourceASize,
3390     VkDeviceSize resourceBOffset,
3391     VkDeviceSize pageSize)
3392 {
3393     VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
3394     VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
3395     VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
3396     VkDeviceSize resourceBStart = resourceBOffset;
3397     VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
3398     return resourceAEndPage == resourceBStartPage;
3399 }
3400 
3401 /*
3402 Returns true if given suballocation types could conflict and must respect
3403 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
3404 or linear image and another one is optimal image. If type is unknown, behave
3405 conservatively.
3406 */
VmaIsBufferImageGranularityConflict(VmaSuballocationType suballocType1,VmaSuballocationType suballocType2)3407 static inline bool VmaIsBufferImageGranularityConflict(
3408     VmaSuballocationType suballocType1,
3409     VmaSuballocationType suballocType2)
3410 {
3411     if (suballocType1 > suballocType2)
3412     {
3413         VMA_SWAP(suballocType1, suballocType2);
3414     }
3415 
3416     switch (suballocType1)
3417     {
3418     case VMA_SUBALLOCATION_TYPE_FREE:
3419         return false;
3420     case VMA_SUBALLOCATION_TYPE_UNKNOWN:
3421         return true;
3422     case VMA_SUBALLOCATION_TYPE_BUFFER:
3423         return
3424             suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3425             suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3426     case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
3427         return
3428             suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3429             suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
3430             suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3431     case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
3432         return
3433             suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3434     case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
3435         return false;
3436     default:
3437         VMA_ASSERT(0);
3438         return true;
3439     }
3440 }
3441 
VmaWriteMagicValue(void * pData,VkDeviceSize offset)3442 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
3443 {
3444 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
3445     uint32_t* pDst = (uint32_t*)((char*)pData + offset);
3446     const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3447     for (size_t i = 0; i < numberCount; ++i, ++pDst)
3448     {
3449         *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
3450     }
3451 #else
3452     // no-op
3453 #endif
3454 }
3455 
VmaValidateMagicValue(const void * pData,VkDeviceSize offset)3456 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
3457 {
3458 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
3459     const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
3460     const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3461     for (size_t i = 0; i < numberCount; ++i, ++pSrc)
3462     {
3463         if (*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
3464         {
3465             return false;
3466         }
3467     }
3468 #endif
3469     return true;
3470 }
3471 
3472 /*
3473 Fills structure with parameters of an example buffer to be used for transfers
3474 during GPU memory defragmentation.
3475 */
VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo & outBufCreateInfo)3476 static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)
3477 {
3478     memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo));
3479     outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
3480     outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
3481     outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size.
3482 }
3483 
3484 
3485 /*
3486 Performs binary search and returns iterator to first element that is greater or
3487 equal to (key), according to comparison (cmp).
3488 
3489 Cmp should return true if first argument is less than second argument.
3490 
3491 Returned value is the found element, if present in the collection or place where
3492 new element with value (key) should be inserted.
3493 */
3494 template <typename CmpLess, typename IterT, typename KeyT>
VmaBinaryFindFirstNotLess(IterT beg,IterT end,const KeyT & key,const CmpLess & cmp)3495 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT& key, const CmpLess& cmp)
3496 {
3497     size_t down = 0, up = (end - beg);
3498     while (down < up)
3499     {
3500         const size_t mid = down + (up - down) / 2;  // Overflow-safe midpoint calculation
3501         if (cmp(*(beg + mid), key))
3502         {
3503             down = mid + 1;
3504         }
3505         else
3506         {
3507             up = mid;
3508         }
3509     }
3510     return beg + down;
3511 }
3512 
3513 template<typename CmpLess, typename IterT, typename KeyT>
VmaBinaryFindSorted(const IterT & beg,const IterT & end,const KeyT & value,const CmpLess & cmp)3514 IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp)
3515 {
3516     IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
3517         beg, end, value, cmp);
3518     if (it == end ||
3519         (!cmp(*it, value) && !cmp(value, *it)))
3520     {
3521         return it;
3522     }
3523     return end;
3524 }
3525 
3526 /*
3527 Returns true if all pointers in the array are not-null and unique.
3528 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
3529 T must be pointer type, e.g. VmaAllocation, VmaPool.
3530 */
3531 template<typename T>
VmaValidatePointerArray(uint32_t count,const T * arr)3532 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
3533 {
3534     for (uint32_t i = 0; i < count; ++i)
3535     {
3536         const T iPtr = arr[i];
3537         if (iPtr == VMA_NULL)
3538         {
3539             return false;
3540         }
3541         for (uint32_t j = i + 1; j < count; ++j)
3542         {
3543             if (iPtr == arr[j])
3544             {
3545                 return false;
3546             }
3547         }
3548     }
3549     return true;
3550 }
3551 
3552 template<typename MainT, typename NewT>
VmaPnextChainPushFront(MainT * mainStruct,NewT * newStruct)3553 static inline void VmaPnextChainPushFront(MainT* mainStruct, NewT* newStruct)
3554 {
3555     newStruct->pNext = mainStruct->pNext;
3556     mainStruct->pNext = newStruct;
3557 }
3558 
3559 // This is the main algorithm that guides the selection of a memory type best for an allocation -
3560 // converts usage to required/preferred/not preferred flags.
FindMemoryPreferences(bool isIntegratedGPU,const VmaAllocationCreateInfo & allocCreateInfo,VkFlags bufImgUsage,VkMemoryPropertyFlags & outRequiredFlags,VkMemoryPropertyFlags & outPreferredFlags,VkMemoryPropertyFlags & outNotPreferredFlags)3561 static bool FindMemoryPreferences(
3562     bool isIntegratedGPU,
3563     const VmaAllocationCreateInfo& allocCreateInfo,
3564     VkFlags bufImgUsage, // VkBufferCreateInfo::usage or VkImageCreateInfo::usage. UINT32_MAX if unknown.
3565     VkMemoryPropertyFlags& outRequiredFlags,
3566     VkMemoryPropertyFlags& outPreferredFlags,
3567     VkMemoryPropertyFlags& outNotPreferredFlags)
3568 {
3569     outRequiredFlags = allocCreateInfo.requiredFlags;
3570     outPreferredFlags = allocCreateInfo.preferredFlags;
3571     outNotPreferredFlags = 0;
3572 
3573     switch(allocCreateInfo.usage)
3574     {
3575     case VMA_MEMORY_USAGE_UNKNOWN:
3576         break;
3577     case VMA_MEMORY_USAGE_GPU_ONLY:
3578         if(!isIntegratedGPU || (outPreferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
3579         {
3580             outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
3581         }
3582         break;
3583     case VMA_MEMORY_USAGE_CPU_ONLY:
3584         outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
3585         break;
3586     case VMA_MEMORY_USAGE_CPU_TO_GPU:
3587         outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
3588         if(!isIntegratedGPU || (outPreferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
3589         {
3590             outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
3591         }
3592         break;
3593     case VMA_MEMORY_USAGE_GPU_TO_CPU:
3594         outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
3595         outPreferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
3596         break;
3597     case VMA_MEMORY_USAGE_CPU_COPY:
3598         outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
3599         break;
3600     case VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED:
3601         outRequiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
3602         break;
3603     case VMA_MEMORY_USAGE_AUTO:
3604     case VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE:
3605     case VMA_MEMORY_USAGE_AUTO_PREFER_HOST:
3606     {
3607         if(bufImgUsage == UINT32_MAX)
3608         {
3609             VMA_ASSERT(0 && "VMA_MEMORY_USAGE_AUTO* values can only be used with functions like vmaCreateBuffer, vmaCreateImage so that the details of the created resource are known.");
3610             return false;
3611         }
3612         // This relies on values of VK_IMAGE_USAGE_TRANSFER* being the same VK_BUFFER_IMAGE_TRANSFER*.
3613         const bool deviceAccess = (bufImgUsage & ~(VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT)) != 0;
3614         const bool hostAccessSequentialWrite = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT) != 0;
3615         const bool hostAccessRandom = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT) != 0;
3616         const bool hostAccessAllowTransferInstead = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT) != 0;
3617         const bool preferDevice = allocCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE;
3618         const bool preferHost = allocCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_HOST;
3619 
3620         // CPU random access - e.g. a buffer written to or transferred from GPU to read back on CPU.
3621         if(hostAccessRandom)
3622         {
3623             if(!isIntegratedGPU && deviceAccess && hostAccessAllowTransferInstead && !preferHost)
3624             {
3625                 // Nice if it will end up in HOST_VISIBLE, but more importantly prefer DEVICE_LOCAL.
3626                 // Omitting HOST_VISIBLE here is intentional.
3627                 // In case there is DEVICE_LOCAL | HOST_VISIBLE | HOST_CACHED, it will pick that one.
3628                 // Otherwise, this will give same weight to DEVICE_LOCAL as HOST_VISIBLE | HOST_CACHED and select the former if occurs first on the list.
3629                 outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
3630             }
3631             else
3632             {
3633                 // Always CPU memory, cached.
3634                 outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
3635             }
3636         }
3637         // CPU sequential write - may be CPU or host-visible GPU memory, uncached and write-combined.
3638         else if(hostAccessSequentialWrite)
3639         {
3640             // Want uncached and write-combined.
3641             outNotPreferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
3642 
3643             if(!isIntegratedGPU && deviceAccess && hostAccessAllowTransferInstead && !preferHost)
3644             {
3645                 outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
3646             }
3647             else
3648             {
3649                 outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
3650                 // Direct GPU access, CPU sequential write (e.g. a dynamic uniform buffer updated every frame)
3651                 if(deviceAccess)
3652                 {
3653                     // Could go to CPU memory or GPU BAR/unified. Up to the user to decide. If no preference, choose GPU memory.
3654                     if(preferHost)
3655                         outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
3656                     else
3657                         outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
3658                 }
3659                 // GPU no direct access, CPU sequential write (e.g. an upload buffer to be transferred to the GPU)
3660                 else
3661                 {
3662                     // Could go to CPU memory or GPU BAR/unified. Up to the user to decide. If no preference, choose CPU memory.
3663                     if(preferDevice)
3664                         outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
3665                     else
3666                         outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
3667                 }
3668             }
3669         }
3670         // No CPU access
3671         else
3672         {
3673             // GPU access, no CPU access (e.g. a color attachment image) - prefer GPU memory
3674             if(deviceAccess)
3675             {
3676                 // ...unless there is a clear preference from the user not to do so.
3677                 if(preferHost)
3678                     outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
3679                 else
3680                     outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
3681             }
3682             // No direct GPU access, no CPU access, just transfers.
3683             // It may be staging copy intended for e.g. preserving image for next frame (then better GPU memory) or
3684             // a "swap file" copy to free some GPU memory (then better CPU memory).
3685             // Up to the user to decide. If no preferece, assume the former and choose GPU memory.
3686             if(preferHost)
3687                 outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
3688             else
3689                 outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
3690         }
3691         break;
3692     }
3693     default:
3694         VMA_ASSERT(0);
3695     }
3696 
3697     // Avoid DEVICE_COHERENT unless explicitly requested.
3698     if(((allocCreateInfo.requiredFlags | allocCreateInfo.preferredFlags) &
3699         (VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)) == 0)
3700     {
3701         outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY;
3702     }
3703 
3704     return true;
3705 }
3706 
3707 ////////////////////////////////////////////////////////////////////////////////
3708 // Memory allocation
3709 
VmaMalloc(const VkAllocationCallbacks * pAllocationCallbacks,size_t size,size_t alignment)3710 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
3711 {
3712     void* result = VMA_NULL;
3713     if ((pAllocationCallbacks != VMA_NULL) &&
3714         (pAllocationCallbacks->pfnAllocation != VMA_NULL))
3715     {
3716         result = (*pAllocationCallbacks->pfnAllocation)(
3717             pAllocationCallbacks->pUserData,
3718             size,
3719             alignment,
3720             VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3721     }
3722     else
3723     {
3724         result = VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
3725     }
3726     VMA_ASSERT(result != VMA_NULL && "CPU memory allocation failed.");
3727     return result;
3728 }
3729 
VmaFree(const VkAllocationCallbacks * pAllocationCallbacks,void * ptr)3730 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
3731 {
3732     if ((pAllocationCallbacks != VMA_NULL) &&
3733         (pAllocationCallbacks->pfnFree != VMA_NULL))
3734     {
3735         (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
3736     }
3737     else
3738     {
3739         VMA_SYSTEM_ALIGNED_FREE(ptr);
3740     }
3741 }
3742 
3743 template<typename T>
VmaAllocate(const VkAllocationCallbacks * pAllocationCallbacks)3744 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
3745 {
3746     return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
3747 }
3748 
3749 template<typename T>
VmaAllocateArray(const VkAllocationCallbacks * pAllocationCallbacks,size_t count)3750 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
3751 {
3752     return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
3753 }
3754 
3755 #define vma_new(allocator, type)   new(VmaAllocate<type>(allocator))(type)
3756 
3757 #define vma_new_array(allocator, type, count)   new(VmaAllocateArray<type>((allocator), (count)))(type)
3758 
3759 template<typename T>
vma_delete(const VkAllocationCallbacks * pAllocationCallbacks,T * ptr)3760 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
3761 {
3762     ptr->~T();
3763     VmaFree(pAllocationCallbacks, ptr);
3764 }
3765 
3766 template<typename T>
vma_delete_array(const VkAllocationCallbacks * pAllocationCallbacks,T * ptr,size_t count)3767 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
3768 {
3769     if (ptr != VMA_NULL)
3770     {
3771         for (size_t i = count; i--; )
3772         {
3773             ptr[i].~T();
3774         }
3775         VmaFree(pAllocationCallbacks, ptr);
3776     }
3777 }
3778 
VmaCreateStringCopy(const VkAllocationCallbacks * allocs,const char * srcStr)3779 static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr)
3780 {
3781     if (srcStr != VMA_NULL)
3782     {
3783         const size_t len = strlen(srcStr);
3784         char* const result = vma_new_array(allocs, char, len + 1);
3785         memcpy(result, srcStr, len + 1);
3786         return result;
3787     }
3788     return VMA_NULL;
3789 }
3790 
3791 #if VMA_STATS_STRING_ENABLED
VmaCreateStringCopy(const VkAllocationCallbacks * allocs,const char * srcStr,size_t strLen)3792 static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr, size_t strLen)
3793 {
3794     if (srcStr != VMA_NULL)
3795     {
3796         char* const result = vma_new_array(allocs, char, strLen + 1);
3797         memcpy(result, srcStr, strLen);
3798         result[strLen] = '\0';
3799         return result;
3800     }
3801     return VMA_NULL;
3802 }
3803 #endif // VMA_STATS_STRING_ENABLED
3804 
VmaFreeString(const VkAllocationCallbacks * allocs,char * str)3805 static void VmaFreeString(const VkAllocationCallbacks* allocs, char* str)
3806 {
3807     if (str != VMA_NULL)
3808     {
3809         const size_t len = strlen(str);
3810         vma_delete_array(allocs, str, len + 1);
3811     }
3812 }
3813 
3814 template<typename CmpLess, typename VectorT>
VmaVectorInsertSorted(VectorT & vector,const typename VectorT::value_type & value)3815 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
3816 {
3817     const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3818         vector.data(),
3819         vector.data() + vector.size(),
3820         value,
3821         CmpLess()) - vector.data();
3822     VmaVectorInsert(vector, indexToInsert, value);
3823     return indexToInsert;
3824 }
3825 
3826 template<typename CmpLess, typename VectorT>
VmaVectorRemoveSorted(VectorT & vector,const typename VectorT::value_type & value)3827 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
3828 {
3829     CmpLess comparator;
3830     typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
3831         vector.begin(),
3832         vector.end(),
3833         value,
3834         comparator);
3835     if ((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
3836     {
3837         size_t indexToRemove = it - vector.begin();
3838         VmaVectorRemove(vector, indexToRemove);
3839         return true;
3840     }
3841     return false;
3842 }
3843 #endif // _VMA_FUNCTIONS
3844 
3845 #ifndef _VMA_STATISTICS_FUNCTIONS
3846 
VmaClearStatistics(VmaStatistics & outStats)3847 static void VmaClearStatistics(VmaStatistics& outStats)
3848 {
3849     outStats.blockCount = 0;
3850     outStats.allocationCount = 0;
3851     outStats.blockBytes = 0;
3852     outStats.allocationBytes = 0;
3853 }
3854 
VmaAddStatistics(VmaStatistics & inoutStats,const VmaStatistics & src)3855 static void VmaAddStatistics(VmaStatistics& inoutStats, const VmaStatistics& src)
3856 {
3857     inoutStats.blockCount += src.blockCount;
3858     inoutStats.allocationCount += src.allocationCount;
3859     inoutStats.blockBytes += src.blockBytes;
3860     inoutStats.allocationBytes += src.allocationBytes;
3861 }
3862 
VmaClearDetailedStatistics(VmaDetailedStatistics & outStats)3863 static void VmaClearDetailedStatistics(VmaDetailedStatistics& outStats)
3864 {
3865     VmaClearStatistics(outStats.statistics);
3866     outStats.unusedRangeCount = 0;
3867     outStats.allocationSizeMin = VK_WHOLE_SIZE;
3868     outStats.allocationSizeMax = 0;
3869     outStats.unusedRangeSizeMin = VK_WHOLE_SIZE;
3870     outStats.unusedRangeSizeMax = 0;
3871 }
3872 
VmaAddDetailedStatisticsAllocation(VmaDetailedStatistics & inoutStats,VkDeviceSize size)3873 static void VmaAddDetailedStatisticsAllocation(VmaDetailedStatistics& inoutStats, VkDeviceSize size)
3874 {
3875     inoutStats.statistics.allocationCount++;
3876     inoutStats.statistics.allocationBytes += size;
3877     inoutStats.allocationSizeMin = VMA_MIN(inoutStats.allocationSizeMin, size);
3878     inoutStats.allocationSizeMax = VMA_MAX(inoutStats.allocationSizeMax, size);
3879 }
3880 
VmaAddDetailedStatisticsUnusedRange(VmaDetailedStatistics & inoutStats,VkDeviceSize size)3881 static void VmaAddDetailedStatisticsUnusedRange(VmaDetailedStatistics& inoutStats, VkDeviceSize size)
3882 {
3883     inoutStats.unusedRangeCount++;
3884     inoutStats.unusedRangeSizeMin = VMA_MIN(inoutStats.unusedRangeSizeMin, size);
3885     inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, size);
3886 }
3887 
VmaAddDetailedStatistics(VmaDetailedStatistics & inoutStats,const VmaDetailedStatistics & src)3888 static void VmaAddDetailedStatistics(VmaDetailedStatistics& inoutStats, const VmaDetailedStatistics& src)
3889 {
3890     VmaAddStatistics(inoutStats.statistics, src.statistics);
3891     inoutStats.unusedRangeCount += src.unusedRangeCount;
3892     inoutStats.allocationSizeMin = VMA_MIN(inoutStats.allocationSizeMin, src.allocationSizeMin);
3893     inoutStats.allocationSizeMax = VMA_MAX(inoutStats.allocationSizeMax, src.allocationSizeMax);
3894     inoutStats.unusedRangeSizeMin = VMA_MIN(inoutStats.unusedRangeSizeMin, src.unusedRangeSizeMin);
3895     inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, src.unusedRangeSizeMax);
3896 }
3897 
3898 #endif // _VMA_STATISTICS_FUNCTIONS
3899 
3900 #ifndef _VMA_MUTEX_LOCK
3901 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
3902 struct VmaMutexLock
3903 {
VMA_CLASS_NO_COPYVmaMutexLock3904     VMA_CLASS_NO_COPY(VmaMutexLock)
3905 public:
3906     VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
3907         m_pMutex(useMutex ? &mutex : VMA_NULL)
3908     {
3909         if (m_pMutex) { m_pMutex->Lock(); }
3910     }
~VmaMutexLockVmaMutexLock3911     ~VmaMutexLock() {  if (m_pMutex) { m_pMutex->Unlock(); } }
3912 
3913 private:
3914     VMA_MUTEX* m_pMutex;
3915 };
3916 
3917 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
3918 struct VmaMutexLockRead
3919 {
VMA_CLASS_NO_COPYVmaMutexLockRead3920     VMA_CLASS_NO_COPY(VmaMutexLockRead)
3921 public:
3922     VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
3923         m_pMutex(useMutex ? &mutex : VMA_NULL)
3924     {
3925         if (m_pMutex) { m_pMutex->LockRead(); }
3926     }
~VmaMutexLockReadVmaMutexLockRead3927     ~VmaMutexLockRead() { if (m_pMutex) { m_pMutex->UnlockRead(); } }
3928 
3929 private:
3930     VMA_RW_MUTEX* m_pMutex;
3931 };
3932 
3933 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
3934 struct VmaMutexLockWrite
3935 {
VMA_CLASS_NO_COPYVmaMutexLockWrite3936     VMA_CLASS_NO_COPY(VmaMutexLockWrite)
3937 public:
3938     VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex)
3939         : m_pMutex(useMutex ? &mutex : VMA_NULL)
3940     {
3941         if (m_pMutex) { m_pMutex->LockWrite(); }
3942     }
~VmaMutexLockWriteVmaMutexLockWrite3943     ~VmaMutexLockWrite() { if (m_pMutex) { m_pMutex->UnlockWrite(); } }
3944 
3945 private:
3946     VMA_RW_MUTEX* m_pMutex;
3947 };
3948 
3949 #if VMA_DEBUG_GLOBAL_MUTEX
3950     static VMA_MUTEX gDebugGlobalMutex;
3951     #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
3952 #else
3953     #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
3954 #endif
3955 #endif // _VMA_MUTEX_LOCK
3956 
3957 #ifndef _VMA_ATOMIC_TRANSACTIONAL_INCREMENT
3958 // An object that increments given atomic but decrements it back in the destructor unless Commit() is called.
3959 template<typename T>
3960 struct AtomicTransactionalIncrement
3961 {
3962 public:
3963     typedef std::atomic<T> AtomicT;
3964 
~AtomicTransactionalIncrementAtomicTransactionalIncrement3965     ~AtomicTransactionalIncrement()
3966     {
3967         if(m_Atomic)
3968             --(*m_Atomic);
3969     }
3970 
CommitAtomicTransactionalIncrement3971     void Commit() { m_Atomic = nullptr; }
IncrementAtomicTransactionalIncrement3972     T Increment(AtomicT* atomic)
3973     {
3974         m_Atomic = atomic;
3975         return m_Atomic->fetch_add(1);
3976     }
3977 
3978 private:
3979     AtomicT* m_Atomic = nullptr;
3980 };
3981 #endif // _VMA_ATOMIC_TRANSACTIONAL_INCREMENT
3982 
3983 #ifndef _VMA_STL_ALLOCATOR
3984 // STL-compatible allocator.
3985 template<typename T>
3986 struct VmaStlAllocator
3987 {
3988     const VkAllocationCallbacks* const m_pCallbacks;
3989     typedef T value_type;
3990 
VmaStlAllocatorVmaStlAllocator3991     VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) {}
3992     template<typename U>
VmaStlAllocatorVmaStlAllocator3993     VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) {}
3994     VmaStlAllocator(const VmaStlAllocator&) = default;
3995     VmaStlAllocator& operator=(const VmaStlAllocator&) = delete;
3996 
allocateVmaStlAllocator3997     T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
deallocateVmaStlAllocator3998     void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
3999 
4000     template<typename U>
4001     bool operator==(const VmaStlAllocator<U>& rhs) const
4002     {
4003         return m_pCallbacks == rhs.m_pCallbacks;
4004     }
4005     template<typename U>
4006     bool operator!=(const VmaStlAllocator<U>& rhs) const
4007     {
4008         return m_pCallbacks != rhs.m_pCallbacks;
4009     }
4010 };
4011 #endif // _VMA_STL_ALLOCATOR
4012 
4013 #ifndef _VMA_VECTOR
4014 /* Class with interface compatible with subset of std::vector.
4015 T must be POD because constructors and destructors are not called and memcpy is
4016 used for these objects. */
4017 template<typename T, typename AllocatorT>
4018 class VmaVector
4019 {
4020 public:
4021     typedef T value_type;
4022     typedef T* iterator;
4023     typedef const T* const_iterator;
4024 
4025     VmaVector(const AllocatorT& allocator);
4026     VmaVector(size_t count, const AllocatorT& allocator);
4027     // This version of the constructor is here for compatibility with pre-C++14 std::vector.
4028     // value is unused.
VmaVector(size_t count,const T & value,const AllocatorT & allocator)4029     VmaVector(size_t count, const T& value, const AllocatorT& allocator) : VmaVector(count, allocator) {}
4030     VmaVector(const VmaVector<T, AllocatorT>& src);
4031     VmaVector& operator=(const VmaVector& rhs);
~VmaVector()4032     ~VmaVector() { VmaFree(m_Allocator.m_pCallbacks, m_pArray); }
4033 
empty()4034     bool empty() const { return m_Count == 0; }
size()4035     size_t size() const { return m_Count; }
data()4036     T* data() { return m_pArray; }
front()4037     T& front() { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[0]; }
back()4038     T& back() { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[m_Count - 1]; }
data()4039     const T* data() const { return m_pArray; }
front()4040     const T& front() const { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[0]; }
back()4041     const T& back() const { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[m_Count - 1]; }
4042 
begin()4043     iterator begin() { return m_pArray; }
end()4044     iterator end() { return m_pArray + m_Count; }
cbegin()4045     const_iterator cbegin() const { return m_pArray; }
cend()4046     const_iterator cend() const { return m_pArray + m_Count; }
begin()4047     const_iterator begin() const { return cbegin(); }
end()4048     const_iterator end() const { return cend(); }
4049 
pop_front()4050     void pop_front() { VMA_HEAVY_ASSERT(m_Count > 0); remove(0); }
pop_back()4051     void pop_back() { VMA_HEAVY_ASSERT(m_Count > 0); resize(size() - 1); }
push_front(const T & src)4052     void push_front(const T& src) { insert(0, src); }
4053 
4054     void push_back(const T& src);
4055     void reserve(size_t newCapacity, bool freeMemory = false);
4056     void resize(size_t newCount);
clear()4057     void clear() { resize(0); }
4058     void shrink_to_fit();
4059     void insert(size_t index, const T& src);
4060     void remove(size_t index);
4061 
4062     T& operator[](size_t index) { VMA_HEAVY_ASSERT(index < m_Count); return m_pArray[index]; }
4063     const T& operator[](size_t index) const { VMA_HEAVY_ASSERT(index < m_Count); return m_pArray[index]; }
4064 
4065 private:
4066     AllocatorT m_Allocator;
4067     T* m_pArray;
4068     size_t m_Count;
4069     size_t m_Capacity;
4070 };
4071 
4072 #ifndef _VMA_VECTOR_FUNCTIONS
4073 template<typename T, typename AllocatorT>
VmaVector(const AllocatorT & allocator)4074 VmaVector<T, AllocatorT>::VmaVector(const AllocatorT& allocator)
4075     : m_Allocator(allocator),
4076     m_pArray(VMA_NULL),
4077     m_Count(0),
4078     m_Capacity(0) {}
4079 
4080 template<typename T, typename AllocatorT>
VmaVector(size_t count,const AllocatorT & allocator)4081 VmaVector<T, AllocatorT>::VmaVector(size_t count, const AllocatorT& allocator)
4082     : m_Allocator(allocator),
4083     m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
4084     m_Count(count),
4085     m_Capacity(count) {}
4086 
4087 template<typename T, typename AllocatorT>
VmaVector(const VmaVector & src)4088 VmaVector<T, AllocatorT>::VmaVector(const VmaVector& src)
4089     : m_Allocator(src.m_Allocator),
4090     m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
4091     m_Count(src.m_Count),
4092     m_Capacity(src.m_Count)
4093 {
4094     if (m_Count != 0)
4095     {
4096         memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
4097     }
4098 }
4099 
4100 template<typename T, typename AllocatorT>
4101 VmaVector<T, AllocatorT>& VmaVector<T, AllocatorT>::operator=(const VmaVector& rhs)
4102 {
4103     if (&rhs != this)
4104     {
4105         resize(rhs.m_Count);
4106         if (m_Count != 0)
4107         {
4108             memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
4109         }
4110     }
4111     return *this;
4112 }
4113 
4114 template<typename T, typename AllocatorT>
push_back(const T & src)4115 void VmaVector<T, AllocatorT>::push_back(const T& src)
4116 {
4117     const size_t newIndex = size();
4118     resize(newIndex + 1);
4119     m_pArray[newIndex] = src;
4120 }
4121 
4122 template<typename T, typename AllocatorT>
reserve(size_t newCapacity,bool freeMemory)4123 void VmaVector<T, AllocatorT>::reserve(size_t newCapacity, bool freeMemory)
4124 {
4125     newCapacity = VMA_MAX(newCapacity, m_Count);
4126 
4127     if ((newCapacity < m_Capacity) && !freeMemory)
4128     {
4129         newCapacity = m_Capacity;
4130     }
4131 
4132     if (newCapacity != m_Capacity)
4133     {
4134         T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
4135         if (m_Count != 0)
4136         {
4137             memcpy(newArray, m_pArray, m_Count * sizeof(T));
4138         }
4139         VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4140         m_Capacity = newCapacity;
4141         m_pArray = newArray;
4142     }
4143 }
4144 
4145 template<typename T, typename AllocatorT>
resize(size_t newCount)4146 void VmaVector<T, AllocatorT>::resize(size_t newCount)
4147 {
4148     size_t newCapacity = m_Capacity;
4149     if (newCount > m_Capacity)
4150     {
4151         newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
4152     }
4153 
4154     if (newCapacity != m_Capacity)
4155     {
4156         T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
4157         const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
4158         if (elementsToCopy != 0)
4159         {
4160             memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
4161         }
4162         VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4163         m_Capacity = newCapacity;
4164         m_pArray = newArray;
4165     }
4166 
4167     m_Count = newCount;
4168 }
4169 
4170 template<typename T, typename AllocatorT>
shrink_to_fit()4171 void VmaVector<T, AllocatorT>::shrink_to_fit()
4172 {
4173     if (m_Capacity > m_Count)
4174     {
4175         T* newArray = VMA_NULL;
4176         if (m_Count > 0)
4177         {
4178             newArray = VmaAllocateArray<T>(m_Allocator.m_pCallbacks, m_Count);
4179             memcpy(newArray, m_pArray, m_Count * sizeof(T));
4180         }
4181         VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4182         m_Capacity = m_Count;
4183         m_pArray = newArray;
4184     }
4185 }
4186 
4187 template<typename T, typename AllocatorT>
insert(size_t index,const T & src)4188 void VmaVector<T, AllocatorT>::insert(size_t index, const T& src)
4189 {
4190     VMA_HEAVY_ASSERT(index <= m_Count);
4191     const size_t oldCount = size();
4192     resize(oldCount + 1);
4193     if (index < oldCount)
4194     {
4195         memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
4196     }
4197     m_pArray[index] = src;
4198 }
4199 
4200 template<typename T, typename AllocatorT>
remove(size_t index)4201 void VmaVector<T, AllocatorT>::remove(size_t index)
4202 {
4203     VMA_HEAVY_ASSERT(index < m_Count);
4204     const size_t oldCount = size();
4205     if (index < oldCount - 1)
4206     {
4207         memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
4208     }
4209     resize(oldCount - 1);
4210 }
4211 #endif // _VMA_VECTOR_FUNCTIONS
4212 
4213 template<typename T, typename allocatorT>
VmaVectorInsert(VmaVector<T,allocatorT> & vec,size_t index,const T & item)4214 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
4215 {
4216     vec.insert(index, item);
4217 }
4218 
4219 template<typename T, typename allocatorT>
VmaVectorRemove(VmaVector<T,allocatorT> & vec,size_t index)4220 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
4221 {
4222     vec.remove(index);
4223 }
4224 #endif // _VMA_VECTOR
4225 
4226 #ifndef _VMA_SMALL_VECTOR
4227 /*
4228 This is a vector (a variable-sized array), optimized for the case when the array is small.
4229 
4230 It contains some number of elements in-place, which allows it to avoid heap allocation
4231 when the actual number of elements is below that threshold. This allows normal "small"
4232 cases to be fast without losing generality for large inputs.
4233 */
4234 template<typename T, typename AllocatorT, size_t N>
4235 class VmaSmallVector
4236 {
4237 public:
4238     typedef T value_type;
4239     typedef T* iterator;
4240 
4241     VmaSmallVector(const AllocatorT& allocator);
4242     VmaSmallVector(size_t count, const AllocatorT& allocator);
4243     template<typename SrcT, typename SrcAllocatorT, size_t SrcN>
4244     VmaSmallVector(const VmaSmallVector<SrcT, SrcAllocatorT, SrcN>&) = delete;
4245     template<typename SrcT, typename SrcAllocatorT, size_t SrcN>
4246     VmaSmallVector<T, AllocatorT, N>& operator=(const VmaSmallVector<SrcT, SrcAllocatorT, SrcN>&) = delete;
4247     ~VmaSmallVector() = default;
4248 
empty()4249     bool empty() const { return m_Count == 0; }
size()4250     size_t size() const { return m_Count; }
data()4251     T* data() { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; }
front()4252     T& front() { VMA_HEAVY_ASSERT(m_Count > 0); return data()[0]; }
back()4253     T& back() { VMA_HEAVY_ASSERT(m_Count > 0); return data()[m_Count - 1]; }
data()4254     const T* data() const { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; }
front()4255     const T& front() const { VMA_HEAVY_ASSERT(m_Count > 0); return data()[0]; }
back()4256     const T& back() const { VMA_HEAVY_ASSERT(m_Count > 0); return data()[m_Count - 1]; }
4257 
begin()4258     iterator begin() { return data(); }
end()4259     iterator end() { return data() + m_Count; }
4260 
pop_front()4261     void pop_front() { VMA_HEAVY_ASSERT(m_Count > 0); remove(0); }
pop_back()4262     void pop_back() { VMA_HEAVY_ASSERT(m_Count > 0); resize(size() - 1); }
push_front(const T & src)4263     void push_front(const T& src) { insert(0, src); }
4264 
4265     void push_back(const T& src);
4266     void resize(size_t newCount, bool freeMemory = false);
4267     void clear(bool freeMemory = false);
4268     void insert(size_t index, const T& src);
4269     void remove(size_t index);
4270 
4271     T& operator[](size_t index) { VMA_HEAVY_ASSERT(index < m_Count); return data()[index]; }
4272     const T& operator[](size_t index) const { VMA_HEAVY_ASSERT(index < m_Count); return data()[index]; }
4273 
4274 private:
4275     size_t m_Count;
4276     T m_StaticArray[N]; // Used when m_Size <= N
4277     VmaVector<T, AllocatorT> m_DynamicArray; // Used when m_Size > N
4278 };
4279 
4280 #ifndef _VMA_SMALL_VECTOR_FUNCTIONS
4281 template<typename T, typename AllocatorT, size_t N>
VmaSmallVector(const AllocatorT & allocator)4282 VmaSmallVector<T, AllocatorT, N>::VmaSmallVector(const AllocatorT& allocator)
4283     : m_Count(0),
4284     m_DynamicArray(allocator) {}
4285 
4286 template<typename T, typename AllocatorT, size_t N>
VmaSmallVector(size_t count,const AllocatorT & allocator)4287 VmaSmallVector<T, AllocatorT, N>::VmaSmallVector(size_t count, const AllocatorT& allocator)
4288     : m_Count(count),
4289     m_DynamicArray(count > N ? count : 0, allocator) {}
4290 
4291 template<typename T, typename AllocatorT, size_t N>
push_back(const T & src)4292 void VmaSmallVector<T, AllocatorT, N>::push_back(const T& src)
4293 {
4294     const size_t newIndex = size();
4295     resize(newIndex + 1);
4296     data()[newIndex] = src;
4297 }
4298 
4299 template<typename T, typename AllocatorT, size_t N>
resize(size_t newCount,bool freeMemory)4300 void VmaSmallVector<T, AllocatorT, N>::resize(size_t newCount, bool freeMemory)
4301 {
4302     if (newCount > N && m_Count > N)
4303     {
4304         // Any direction, staying in m_DynamicArray
4305         m_DynamicArray.resize(newCount);
4306         if (freeMemory)
4307         {
4308             m_DynamicArray.shrink_to_fit();
4309         }
4310     }
4311     else if (newCount > N && m_Count <= N)
4312     {
4313         // Growing, moving from m_StaticArray to m_DynamicArray
4314         m_DynamicArray.resize(newCount);
4315         if (m_Count > 0)
4316         {
4317             memcpy(m_DynamicArray.data(), m_StaticArray, m_Count * sizeof(T));
4318         }
4319     }
4320     else if (newCount <= N && m_Count > N)
4321     {
4322         // Shrinking, moving from m_DynamicArray to m_StaticArray
4323         if (newCount > 0)
4324         {
4325             memcpy(m_StaticArray, m_DynamicArray.data(), newCount * sizeof(T));
4326         }
4327         m_DynamicArray.resize(0);
4328         if (freeMemory)
4329         {
4330             m_DynamicArray.shrink_to_fit();
4331         }
4332     }
4333     else
4334     {
4335         // Any direction, staying in m_StaticArray - nothing to do here
4336     }
4337     m_Count = newCount;
4338 }
4339 
4340 template<typename T, typename AllocatorT, size_t N>
clear(bool freeMemory)4341 void VmaSmallVector<T, AllocatorT, N>::clear(bool freeMemory)
4342 {
4343     m_DynamicArray.clear();
4344     if (freeMemory)
4345     {
4346         m_DynamicArray.shrink_to_fit();
4347     }
4348     m_Count = 0;
4349 }
4350 
4351 template<typename T, typename AllocatorT, size_t N>
insert(size_t index,const T & src)4352 void VmaSmallVector<T, AllocatorT, N>::insert(size_t index, const T& src)
4353 {
4354     VMA_HEAVY_ASSERT(index <= m_Count);
4355     const size_t oldCount = size();
4356     resize(oldCount + 1);
4357     T* const dataPtr = data();
4358     if (index < oldCount)
4359     {
4360         //  I know, this could be more optimal for case where memmove can be memcpy directly from m_StaticArray to m_DynamicArray.
4361         memmove(dataPtr + (index + 1), dataPtr + index, (oldCount - index) * sizeof(T));
4362     }
4363     dataPtr[index] = src;
4364 }
4365 
4366 template<typename T, typename AllocatorT, size_t N>
remove(size_t index)4367 void VmaSmallVector<T, AllocatorT, N>::remove(size_t index)
4368 {
4369     VMA_HEAVY_ASSERT(index < m_Count);
4370     const size_t oldCount = size();
4371     if (index < oldCount - 1)
4372     {
4373         //  I know, this could be more optimal for case where memmove can be memcpy directly from m_DynamicArray to m_StaticArray.
4374         T* const dataPtr = data();
4375         memmove(dataPtr + index, dataPtr + (index + 1), (oldCount - index - 1) * sizeof(T));
4376     }
4377     resize(oldCount - 1);
4378 }
4379 #endif // _VMA_SMALL_VECTOR_FUNCTIONS
4380 #endif // _VMA_SMALL_VECTOR
4381 
4382 #ifndef _VMA_POOL_ALLOCATOR
4383 /*
4384 Allocator for objects of type T using a list of arrays (pools) to speed up
4385 allocation. Number of elements that can be allocated is not bounded because
4386 allocator can create multiple blocks.
4387 */
4388 template<typename T>
4389 class VmaPoolAllocator
4390 {
4391     VMA_CLASS_NO_COPY(VmaPoolAllocator)
4392 public:
4393     VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
4394     ~VmaPoolAllocator();
4395     template<typename... Types> T* Alloc(Types&&... args);
4396     void Free(T* ptr);
4397 
4398 private:
4399     union Item
4400     {
4401         uint32_t NextFreeIndex;
4402         alignas(T) char Value[sizeof(T)];
4403     };
4404     struct ItemBlock
4405     {
4406         Item* pItems;
4407         uint32_t Capacity;
4408         uint32_t FirstFreeIndex;
4409     };
4410 
4411     const VkAllocationCallbacks* m_pAllocationCallbacks;
4412     const uint32_t m_FirstBlockCapacity;
4413     VmaVector<ItemBlock, VmaStlAllocator<ItemBlock>> m_ItemBlocks;
4414 
4415     ItemBlock& CreateNewBlock();
4416 };
4417 
4418 #ifndef _VMA_POOL_ALLOCATOR_FUNCTIONS
4419 template<typename T>
VmaPoolAllocator(const VkAllocationCallbacks * pAllocationCallbacks,uint32_t firstBlockCapacity)4420 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity)
4421     : m_pAllocationCallbacks(pAllocationCallbacks),
4422     m_FirstBlockCapacity(firstBlockCapacity),
4423     m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
4424 {
4425     VMA_ASSERT(m_FirstBlockCapacity > 1);
4426 }
4427 
4428 template<typename T>
~VmaPoolAllocator()4429 VmaPoolAllocator<T>::~VmaPoolAllocator()
4430 {
4431     for (size_t i = m_ItemBlocks.size(); i--;)
4432         vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
4433     m_ItemBlocks.clear();
4434 }
4435 
4436 template<typename T>
Alloc(Types &&...args)4437 template<typename... Types> T* VmaPoolAllocator<T>::Alloc(Types&&... args)
4438 {
4439     for (size_t i = m_ItemBlocks.size(); i--; )
4440     {
4441         ItemBlock& block = m_ItemBlocks[i];
4442         // This block has some free items: Use first one.
4443         if (block.FirstFreeIndex != UINT32_MAX)
4444         {
4445             Item* const pItem = &block.pItems[block.FirstFreeIndex];
4446             block.FirstFreeIndex = pItem->NextFreeIndex;
4447             T* result = (T*)&pItem->Value;
4448             new(result)T(std::forward<Types>(args)...); // Explicit constructor call.
4449             return result;
4450         }
4451     }
4452 
4453     // No block has free item: Create new one and use it.
4454     ItemBlock& newBlock = CreateNewBlock();
4455     Item* const pItem = &newBlock.pItems[0];
4456     newBlock.FirstFreeIndex = pItem->NextFreeIndex;
4457     T* result = (T*)&pItem->Value;
4458     new(result) T(std::forward<Types>(args)...); // Explicit constructor call.
4459     return result;
4460 }
4461 
4462 template<typename T>
Free(T * ptr)4463 void VmaPoolAllocator<T>::Free(T* ptr)
4464 {
4465     // Search all memory blocks to find ptr.
4466     for (size_t i = m_ItemBlocks.size(); i--; )
4467     {
4468         ItemBlock& block = m_ItemBlocks[i];
4469 
4470         // Casting to union.
4471         Item* pItemPtr;
4472         memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
4473 
4474         // Check if pItemPtr is in address range of this block.
4475         if ((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
4476         {
4477             ptr->~T(); // Explicit destructor call.
4478             const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
4479             pItemPtr->NextFreeIndex = block.FirstFreeIndex;
4480             block.FirstFreeIndex = index;
4481             return;
4482         }
4483     }
4484     VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
4485 }
4486 
4487 template<typename T>
CreateNewBlock()4488 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
4489 {
4490     const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
4491         m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
4492 
4493     const ItemBlock newBlock =
4494     {
4495         vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
4496         newBlockCapacity,
4497         0
4498     };
4499 
4500     m_ItemBlocks.push_back(newBlock);
4501 
4502     // Setup singly-linked list of all free items in this block.
4503     for (uint32_t i = 0; i < newBlockCapacity - 1; ++i)
4504         newBlock.pItems[i].NextFreeIndex = i + 1;
4505     newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
4506     return m_ItemBlocks.back();
4507 }
4508 #endif // _VMA_POOL_ALLOCATOR_FUNCTIONS
4509 #endif // _VMA_POOL_ALLOCATOR
4510 
4511 #ifndef _VMA_RAW_LIST
4512 template<typename T>
4513 struct VmaListItem
4514 {
4515     VmaListItem* pPrev;
4516     VmaListItem* pNext;
4517     T Value;
4518 };
4519 
4520 // Doubly linked list.
4521 template<typename T>
4522 class VmaRawList
4523 {
4524     VMA_CLASS_NO_COPY(VmaRawList)
4525 public:
4526     typedef VmaListItem<T> ItemType;
4527 
4528     VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
4529     // Intentionally not calling Clear, because that would be unnecessary
4530     // computations to return all items to m_ItemAllocator as free.
4531     ~VmaRawList() = default;
4532 
GetCount()4533     size_t GetCount() const { return m_Count; }
IsEmpty()4534     bool IsEmpty() const { return m_Count == 0; }
4535 
Front()4536     ItemType* Front() { return m_pFront; }
Back()4537     ItemType* Back() { return m_pBack; }
Front()4538     const ItemType* Front() const { return m_pFront; }
Back()4539     const ItemType* Back() const { return m_pBack; }
4540 
4541     ItemType* PushFront();
4542     ItemType* PushBack();
4543     ItemType* PushFront(const T& value);
4544     ItemType* PushBack(const T& value);
4545     void PopFront();
4546     void PopBack();
4547 
4548     // Item can be null - it means PushBack.
4549     ItemType* InsertBefore(ItemType* pItem);
4550     // Item can be null - it means PushFront.
4551     ItemType* InsertAfter(ItemType* pItem);
4552     ItemType* InsertBefore(ItemType* pItem, const T& value);
4553     ItemType* InsertAfter(ItemType* pItem, const T& value);
4554 
4555     void Clear();
4556     void Remove(ItemType* pItem);
4557 
4558 private:
4559     const VkAllocationCallbacks* const m_pAllocationCallbacks;
4560     VmaPoolAllocator<ItemType> m_ItemAllocator;
4561     ItemType* m_pFront;
4562     ItemType* m_pBack;
4563     size_t m_Count;
4564 };
4565 
4566 #ifndef _VMA_RAW_LIST_FUNCTIONS
4567 template<typename T>
VmaRawList(const VkAllocationCallbacks * pAllocationCallbacks)4568 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks)
4569     : m_pAllocationCallbacks(pAllocationCallbacks),
4570     m_ItemAllocator(pAllocationCallbacks, 128),
4571     m_pFront(VMA_NULL),
4572     m_pBack(VMA_NULL),
4573     m_Count(0) {}
4574 
4575 template<typename T>
PushFront()4576 VmaListItem<T>* VmaRawList<T>::PushFront()
4577 {
4578     ItemType* const pNewItem = m_ItemAllocator.Alloc();
4579     pNewItem->pPrev = VMA_NULL;
4580     if (IsEmpty())
4581     {
4582         pNewItem->pNext = VMA_NULL;
4583         m_pFront = pNewItem;
4584         m_pBack = pNewItem;
4585         m_Count = 1;
4586     }
4587     else
4588     {
4589         pNewItem->pNext = m_pFront;
4590         m_pFront->pPrev = pNewItem;
4591         m_pFront = pNewItem;
4592         ++m_Count;
4593     }
4594     return pNewItem;
4595 }
4596 
4597 template<typename T>
PushBack()4598 VmaListItem<T>* VmaRawList<T>::PushBack()
4599 {
4600     ItemType* const pNewItem = m_ItemAllocator.Alloc();
4601     pNewItem->pNext = VMA_NULL;
4602     if(IsEmpty())
4603     {
4604         pNewItem->pPrev = VMA_NULL;
4605         m_pFront = pNewItem;
4606         m_pBack = pNewItem;
4607         m_Count = 1;
4608     }
4609     else
4610     {
4611         pNewItem->pPrev = m_pBack;
4612         m_pBack->pNext = pNewItem;
4613         m_pBack = pNewItem;
4614         ++m_Count;
4615     }
4616     return pNewItem;
4617 }
4618 
4619 template<typename T>
PushFront(const T & value)4620 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
4621 {
4622     ItemType* const pNewItem = PushFront();
4623     pNewItem->Value = value;
4624     return pNewItem;
4625 }
4626 
4627 template<typename T>
PushBack(const T & value)4628 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
4629 {
4630     ItemType* const pNewItem = PushBack();
4631     pNewItem->Value = value;
4632     return pNewItem;
4633 }
4634 
4635 template<typename T>
PopFront()4636 void VmaRawList<T>::PopFront()
4637 {
4638     VMA_HEAVY_ASSERT(m_Count > 0);
4639     ItemType* const pFrontItem = m_pFront;
4640     ItemType* const pNextItem = pFrontItem->pNext;
4641     if (pNextItem != VMA_NULL)
4642     {
4643         pNextItem->pPrev = VMA_NULL;
4644     }
4645     m_pFront = pNextItem;
4646     m_ItemAllocator.Free(pFrontItem);
4647     --m_Count;
4648 }
4649 
4650 template<typename T>
PopBack()4651 void VmaRawList<T>::PopBack()
4652 {
4653     VMA_HEAVY_ASSERT(m_Count > 0);
4654     ItemType* const pBackItem = m_pBack;
4655     ItemType* const pPrevItem = pBackItem->pPrev;
4656     if(pPrevItem != VMA_NULL)
4657     {
4658         pPrevItem->pNext = VMA_NULL;
4659     }
4660     m_pBack = pPrevItem;
4661     m_ItemAllocator.Free(pBackItem);
4662     --m_Count;
4663 }
4664 
4665 template<typename T>
Clear()4666 void VmaRawList<T>::Clear()
4667 {
4668     if (IsEmpty() == false)
4669     {
4670         ItemType* pItem = m_pBack;
4671         while (pItem != VMA_NULL)
4672         {
4673             ItemType* const pPrevItem = pItem->pPrev;
4674             m_ItemAllocator.Free(pItem);
4675             pItem = pPrevItem;
4676         }
4677         m_pFront = VMA_NULL;
4678         m_pBack = VMA_NULL;
4679         m_Count = 0;
4680     }
4681 }
4682 
4683 template<typename T>
Remove(ItemType * pItem)4684 void VmaRawList<T>::Remove(ItemType* pItem)
4685 {
4686     VMA_HEAVY_ASSERT(pItem != VMA_NULL);
4687     VMA_HEAVY_ASSERT(m_Count > 0);
4688 
4689     if(pItem->pPrev != VMA_NULL)
4690     {
4691         pItem->pPrev->pNext = pItem->pNext;
4692     }
4693     else
4694     {
4695         VMA_HEAVY_ASSERT(m_pFront == pItem);
4696         m_pFront = pItem->pNext;
4697     }
4698 
4699     if(pItem->pNext != VMA_NULL)
4700     {
4701         pItem->pNext->pPrev = pItem->pPrev;
4702     }
4703     else
4704     {
4705         VMA_HEAVY_ASSERT(m_pBack == pItem);
4706         m_pBack = pItem->pPrev;
4707     }
4708 
4709     m_ItemAllocator.Free(pItem);
4710     --m_Count;
4711 }
4712 
4713 template<typename T>
InsertBefore(ItemType * pItem)4714 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
4715 {
4716     if(pItem != VMA_NULL)
4717     {
4718         ItemType* const prevItem = pItem->pPrev;
4719         ItemType* const newItem = m_ItemAllocator.Alloc();
4720         newItem->pPrev = prevItem;
4721         newItem->pNext = pItem;
4722         pItem->pPrev = newItem;
4723         if(prevItem != VMA_NULL)
4724         {
4725             prevItem->pNext = newItem;
4726         }
4727         else
4728         {
4729             VMA_HEAVY_ASSERT(m_pFront == pItem);
4730             m_pFront = newItem;
4731         }
4732         ++m_Count;
4733         return newItem;
4734     }
4735     else
4736         return PushBack();
4737 }
4738 
4739 template<typename T>
InsertAfter(ItemType * pItem)4740 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
4741 {
4742     if(pItem != VMA_NULL)
4743     {
4744         ItemType* const nextItem = pItem->pNext;
4745         ItemType* const newItem = m_ItemAllocator.Alloc();
4746         newItem->pNext = nextItem;
4747         newItem->pPrev = pItem;
4748         pItem->pNext = newItem;
4749         if(nextItem != VMA_NULL)
4750         {
4751             nextItem->pPrev = newItem;
4752         }
4753         else
4754         {
4755             VMA_HEAVY_ASSERT(m_pBack == pItem);
4756             m_pBack = newItem;
4757         }
4758         ++m_Count;
4759         return newItem;
4760     }
4761     else
4762         return PushFront();
4763 }
4764 
4765 template<typename T>
InsertBefore(ItemType * pItem,const T & value)4766 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
4767 {
4768     ItemType* const newItem = InsertBefore(pItem);
4769     newItem->Value = value;
4770     return newItem;
4771 }
4772 
4773 template<typename T>
InsertAfter(ItemType * pItem,const T & value)4774 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
4775 {
4776     ItemType* const newItem = InsertAfter(pItem);
4777     newItem->Value = value;
4778     return newItem;
4779 }
4780 #endif // _VMA_RAW_LIST_FUNCTIONS
4781 #endif // _VMA_RAW_LIST
4782 
4783 #ifndef _VMA_LIST
4784 template<typename T, typename AllocatorT>
4785 class VmaList
4786 {
4787     VMA_CLASS_NO_COPY(VmaList)
4788 public:
4789     class reverse_iterator;
4790     class const_iterator;
4791     class const_reverse_iterator;
4792 
4793     class iterator
4794     {
4795         friend class const_iterator;
4796         friend class VmaList<T, AllocatorT>;
4797     public:
iterator()4798         iterator() :  m_pList(VMA_NULL), m_pItem(VMA_NULL) {}
iterator(const reverse_iterator & src)4799         iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
4800 
4801         T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; }
4802         T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; }
4803 
4804         bool operator==(const iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; }
4805         bool operator!=(const iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; }
4806 
4807         iterator operator++(int) { iterator result = *this; ++*this; return result; }
4808         iterator operator--(int) { iterator result = *this; --*this; return result; }
4809 
4810         iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pNext; return *this; }
4811         iterator& operator--();
4812 
4813     private:
4814         VmaRawList<T>* m_pList;
4815         VmaListItem<T>* m_pItem;
4816 
iterator(VmaRawList<T> * pList,VmaListItem<T> * pItem)4817         iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) : m_pList(pList),  m_pItem(pItem) {}
4818     };
4819     class reverse_iterator
4820     {
4821         friend class const_reverse_iterator;
4822         friend class VmaList<T, AllocatorT>;
4823     public:
reverse_iterator()4824         reverse_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {}
reverse_iterator(const iterator & src)4825         reverse_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
4826 
4827         T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; }
4828         T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; }
4829 
4830         bool operator==(const reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; }
4831         bool operator!=(const reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; }
4832 
4833         reverse_iterator operator++(int) { reverse_iterator result = *this; ++* this; return result; }
4834         reverse_iterator operator--(int) { reverse_iterator result = *this; --* this; return result; }
4835 
4836         reverse_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pPrev; return *this; }
4837         reverse_iterator& operator--();
4838 
4839     private:
4840         VmaRawList<T>* m_pList;
4841         VmaListItem<T>* m_pItem;
4842 
reverse_iterator(VmaRawList<T> * pList,VmaListItem<T> * pItem)4843         reverse_iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) : m_pList(pList),  m_pItem(pItem) {}
4844     };
4845     class const_iterator
4846     {
4847         friend class VmaList<T, AllocatorT>;
4848     public:
const_iterator()4849         const_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {}
const_iterator(const iterator & src)4850         const_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
const_iterator(const reverse_iterator & src)4851         const_iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
4852 
drop_const()4853         iterator drop_const() { return { const_cast<VmaRawList<T>*>(m_pList), const_cast<VmaListItem<T>*>(m_pItem) }; }
4854 
4855         const T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; }
4856         const T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; }
4857 
4858         bool operator==(const const_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; }
4859         bool operator!=(const const_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; }
4860 
4861         const_iterator operator++(int) { const_iterator result = *this; ++* this; return result; }
4862         const_iterator operator--(int) { const_iterator result = *this; --* this; return result; }
4863 
4864         const_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pNext; return *this; }
4865         const_iterator& operator--();
4866 
4867     private:
4868         const VmaRawList<T>* m_pList;
4869         const VmaListItem<T>* m_pItem;
4870 
const_iterator(const VmaRawList<T> * pList,const VmaListItem<T> * pItem)4871         const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) : m_pList(pList), m_pItem(pItem) {}
4872     };
4873     class const_reverse_iterator
4874     {
4875         friend class VmaList<T, AllocatorT>;
4876     public:
const_reverse_iterator()4877         const_reverse_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {}
const_reverse_iterator(const reverse_iterator & src)4878         const_reverse_iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
const_reverse_iterator(const iterator & src)4879         const_reverse_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
4880 
drop_const()4881         reverse_iterator drop_const() { return { const_cast<VmaRawList<T>*>(m_pList), const_cast<VmaListItem<T>*>(m_pItem) }; }
4882 
4883         const T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; }
4884         const T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; }
4885 
4886         bool operator==(const const_reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; }
4887         bool operator!=(const const_reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; }
4888 
4889         const_reverse_iterator operator++(int) { const_reverse_iterator result = *this; ++* this; return result; }
4890         const_reverse_iterator operator--(int) { const_reverse_iterator result = *this; --* this; return result; }
4891 
4892         const_reverse_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pPrev; return *this; }
4893         const_reverse_iterator& operator--();
4894 
4895     private:
4896         const VmaRawList<T>* m_pList;
4897         const VmaListItem<T>* m_pItem;
4898 
const_reverse_iterator(const VmaRawList<T> * pList,const VmaListItem<T> * pItem)4899         const_reverse_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) : m_pList(pList), m_pItem(pItem) {}
4900     };
4901 
VmaList(const AllocatorT & allocator)4902     VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) {}
4903 
empty()4904     bool empty() const { return m_RawList.IsEmpty(); }
size()4905     size_t size() const { return m_RawList.GetCount(); }
4906 
begin()4907     iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
end()4908     iterator end() { return iterator(&m_RawList, VMA_NULL); }
4909 
cbegin()4910     const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
cend()4911     const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
4912 
begin()4913     const_iterator begin() const { return cbegin(); }
end()4914     const_iterator end() const { return cend(); }
4915 
rbegin()4916     reverse_iterator rbegin() { return reverse_iterator(&m_RawList, m_RawList.Back()); }
rend()4917     reverse_iterator rend() { return reverse_iterator(&m_RawList, VMA_NULL); }
4918 
crbegin()4919     const_reverse_iterator crbegin() const { return const_reverse_iterator(&m_RawList, m_RawList.Back()); }
crend()4920     const_reverse_iterator crend() const { return const_reverse_iterator(&m_RawList, VMA_NULL); }
4921 
rbegin()4922     const_reverse_iterator rbegin() const { return crbegin(); }
rend()4923     const_reverse_iterator rend() const { return crend(); }
4924 
push_back(const T & value)4925     void push_back(const T& value) { m_RawList.PushBack(value); }
insert(iterator it,const T & value)4926     iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
4927 
clear()4928     void clear() { m_RawList.Clear(); }
erase(iterator it)4929     void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
4930 
4931 private:
4932     VmaRawList<T> m_RawList;
4933 };
4934 
4935 #ifndef _VMA_LIST_FUNCTIONS
4936 template<typename T, typename AllocatorT>
4937 typename VmaList<T, AllocatorT>::iterator& VmaList<T, AllocatorT>::iterator::operator--()
4938 {
4939     if (m_pItem != VMA_NULL)
4940     {
4941         m_pItem = m_pItem->pPrev;
4942     }
4943     else
4944     {
4945         VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4946         m_pItem = m_pList->Back();
4947     }
4948     return *this;
4949 }
4950 
4951 template<typename T, typename AllocatorT>
4952 typename VmaList<T, AllocatorT>::reverse_iterator& VmaList<T, AllocatorT>::reverse_iterator::operator--()
4953 {
4954     if (m_pItem != VMA_NULL)
4955     {
4956         m_pItem = m_pItem->pNext;
4957     }
4958     else
4959     {
4960         VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4961         m_pItem = m_pList->Front();
4962     }
4963     return *this;
4964 }
4965 
4966 template<typename T, typename AllocatorT>
4967 typename VmaList<T, AllocatorT>::const_iterator& VmaList<T, AllocatorT>::const_iterator::operator--()
4968 {
4969     if (m_pItem != VMA_NULL)
4970     {
4971         m_pItem = m_pItem->pPrev;
4972     }
4973     else
4974     {
4975         VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4976         m_pItem = m_pList->Back();
4977     }
4978     return *this;
4979 }
4980 
4981 template<typename T, typename AllocatorT>
4982 typename VmaList<T, AllocatorT>::const_reverse_iterator& VmaList<T, AllocatorT>::const_reverse_iterator::operator--()
4983 {
4984     if (m_pItem != VMA_NULL)
4985     {
4986         m_pItem = m_pItem->pNext;
4987     }
4988     else
4989     {
4990         VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4991         m_pItem = m_pList->Back();
4992     }
4993     return *this;
4994 }
4995 #endif // _VMA_LIST_FUNCTIONS
4996 #endif // _VMA_LIST
4997 
4998 #ifndef _VMA_INTRUSIVE_LINKED_LIST
4999 /*
5000 Expected interface of ItemTypeTraits:
5001 struct MyItemTypeTraits
5002 {
5003     typedef MyItem ItemType;
5004     static ItemType* GetPrev(const ItemType* item) { return item->myPrevPtr; }
5005     static ItemType* GetNext(const ItemType* item) { return item->myNextPtr; }
5006     static ItemType*& AccessPrev(ItemType* item) { return item->myPrevPtr; }
5007     static ItemType*& AccessNext(ItemType* item) { return item->myNextPtr; }
5008 };
5009 */
5010 template<typename ItemTypeTraits>
5011 class VmaIntrusiveLinkedList
5012 {
5013 public:
5014     typedef typename ItemTypeTraits::ItemType ItemType;
GetPrev(const ItemType * item)5015     static ItemType* GetPrev(const ItemType* item) { return ItemTypeTraits::GetPrev(item); }
GetNext(const ItemType * item)5016     static ItemType* GetNext(const ItemType* item) { return ItemTypeTraits::GetNext(item); }
5017 
5018     // Movable, not copyable.
5019     VmaIntrusiveLinkedList() = default;
5020     VmaIntrusiveLinkedList(VmaIntrusiveLinkedList && src);
5021     VmaIntrusiveLinkedList(const VmaIntrusiveLinkedList&) = delete;
5022     VmaIntrusiveLinkedList& operator=(VmaIntrusiveLinkedList&& src);
5023     VmaIntrusiveLinkedList& operator=(const VmaIntrusiveLinkedList&) = delete;
~VmaIntrusiveLinkedList()5024     ~VmaIntrusiveLinkedList() { VMA_HEAVY_ASSERT(IsEmpty()); }
5025 
GetCount()5026     size_t GetCount() const { return m_Count; }
IsEmpty()5027     bool IsEmpty() const { return m_Count == 0; }
Front()5028     ItemType* Front() { return m_Front; }
Back()5029     ItemType* Back() { return m_Back; }
Front()5030     const ItemType* Front() const { return m_Front; }
Back()5031     const ItemType* Back() const { return m_Back; }
5032 
5033     void PushBack(ItemType* item);
5034     void PushFront(ItemType* item);
5035     ItemType* PopBack();
5036     ItemType* PopFront();
5037 
5038     // MyItem can be null - it means PushBack.
5039     void InsertBefore(ItemType* existingItem, ItemType* newItem);
5040     // MyItem can be null - it means PushFront.
5041     void InsertAfter(ItemType* existingItem, ItemType* newItem);
5042     void Remove(ItemType* item);
5043     void RemoveAll();
5044 
5045 private:
5046     ItemType* m_Front = VMA_NULL;
5047     ItemType* m_Back = VMA_NULL;
5048     size_t m_Count = 0;
5049 };
5050 
5051 #ifndef _VMA_INTRUSIVE_LINKED_LIST_FUNCTIONS
5052 template<typename ItemTypeTraits>
VmaIntrusiveLinkedList(VmaIntrusiveLinkedList && src)5053 VmaIntrusiveLinkedList<ItemTypeTraits>::VmaIntrusiveLinkedList(VmaIntrusiveLinkedList&& src)
5054     : m_Front(src.m_Front), m_Back(src.m_Back), m_Count(src.m_Count)
5055 {
5056     src.m_Front = src.m_Back = VMA_NULL;
5057     src.m_Count = 0;
5058 }
5059 
5060 template<typename ItemTypeTraits>
5061 VmaIntrusiveLinkedList<ItemTypeTraits>& VmaIntrusiveLinkedList<ItemTypeTraits>::operator=(VmaIntrusiveLinkedList&& src)
5062 {
5063     if (&src != this)
5064     {
5065         VMA_HEAVY_ASSERT(IsEmpty());
5066         m_Front = src.m_Front;
5067         m_Back = src.m_Back;
5068         m_Count = src.m_Count;
5069         src.m_Front = src.m_Back = VMA_NULL;
5070         src.m_Count = 0;
5071     }
5072     return *this;
5073 }
5074 
5075 template<typename ItemTypeTraits>
PushBack(ItemType * item)5076 void VmaIntrusiveLinkedList<ItemTypeTraits>::PushBack(ItemType* item)
5077 {
5078     VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL);
5079     if (IsEmpty())
5080     {
5081         m_Front = item;
5082         m_Back = item;
5083         m_Count = 1;
5084     }
5085     else
5086     {
5087         ItemTypeTraits::AccessPrev(item) = m_Back;
5088         ItemTypeTraits::AccessNext(m_Back) = item;
5089         m_Back = item;
5090         ++m_Count;
5091     }
5092 }
5093 
5094 template<typename ItemTypeTraits>
PushFront(ItemType * item)5095 void VmaIntrusiveLinkedList<ItemTypeTraits>::PushFront(ItemType* item)
5096 {
5097     VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL);
5098     if (IsEmpty())
5099     {
5100         m_Front = item;
5101         m_Back = item;
5102         m_Count = 1;
5103     }
5104     else
5105     {
5106         ItemTypeTraits::AccessNext(item) = m_Front;
5107         ItemTypeTraits::AccessPrev(m_Front) = item;
5108         m_Front = item;
5109         ++m_Count;
5110     }
5111 }
5112 
5113 template<typename ItemTypeTraits>
PopBack()5114 typename VmaIntrusiveLinkedList<ItemTypeTraits>::ItemType* VmaIntrusiveLinkedList<ItemTypeTraits>::PopBack()
5115 {
5116     VMA_HEAVY_ASSERT(m_Count > 0);
5117     ItemType* const backItem = m_Back;
5118     ItemType* const prevItem = ItemTypeTraits::GetPrev(backItem);
5119     if (prevItem != VMA_NULL)
5120     {
5121         ItemTypeTraits::AccessNext(prevItem) = VMA_NULL;
5122     }
5123     m_Back = prevItem;
5124     --m_Count;
5125     ItemTypeTraits::AccessPrev(backItem) = VMA_NULL;
5126     ItemTypeTraits::AccessNext(backItem) = VMA_NULL;
5127     return backItem;
5128 }
5129 
5130 template<typename ItemTypeTraits>
PopFront()5131 typename VmaIntrusiveLinkedList<ItemTypeTraits>::ItemType* VmaIntrusiveLinkedList<ItemTypeTraits>::PopFront()
5132 {
5133     VMA_HEAVY_ASSERT(m_Count > 0);
5134     ItemType* const frontItem = m_Front;
5135     ItemType* const nextItem = ItemTypeTraits::GetNext(frontItem);
5136     if (nextItem != VMA_NULL)
5137     {
5138         ItemTypeTraits::AccessPrev(nextItem) = VMA_NULL;
5139     }
5140     m_Front = nextItem;
5141     --m_Count;
5142     ItemTypeTraits::AccessPrev(frontItem) = VMA_NULL;
5143     ItemTypeTraits::AccessNext(frontItem) = VMA_NULL;
5144     return frontItem;
5145 }
5146 
5147 template<typename ItemTypeTraits>
InsertBefore(ItemType * existingItem,ItemType * newItem)5148 void VmaIntrusiveLinkedList<ItemTypeTraits>::InsertBefore(ItemType* existingItem, ItemType* newItem)
5149 {
5150     VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL);
5151     if (existingItem != VMA_NULL)
5152     {
5153         ItemType* const prevItem = ItemTypeTraits::GetPrev(existingItem);
5154         ItemTypeTraits::AccessPrev(newItem) = prevItem;
5155         ItemTypeTraits::AccessNext(newItem) = existingItem;
5156         ItemTypeTraits::AccessPrev(existingItem) = newItem;
5157         if (prevItem != VMA_NULL)
5158         {
5159             ItemTypeTraits::AccessNext(prevItem) = newItem;
5160         }
5161         else
5162         {
5163             VMA_HEAVY_ASSERT(m_Front == existingItem);
5164             m_Front = newItem;
5165         }
5166         ++m_Count;
5167     }
5168     else
5169         PushBack(newItem);
5170 }
5171 
5172 template<typename ItemTypeTraits>
InsertAfter(ItemType * existingItem,ItemType * newItem)5173 void VmaIntrusiveLinkedList<ItemTypeTraits>::InsertAfter(ItemType* existingItem, ItemType* newItem)
5174 {
5175     VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL);
5176     if (existingItem != VMA_NULL)
5177     {
5178         ItemType* const nextItem = ItemTypeTraits::GetNext(existingItem);
5179         ItemTypeTraits::AccessNext(newItem) = nextItem;
5180         ItemTypeTraits::AccessPrev(newItem) = existingItem;
5181         ItemTypeTraits::AccessNext(existingItem) = newItem;
5182         if (nextItem != VMA_NULL)
5183         {
5184             ItemTypeTraits::AccessPrev(nextItem) = newItem;
5185         }
5186         else
5187         {
5188             VMA_HEAVY_ASSERT(m_Back == existingItem);
5189             m_Back = newItem;
5190         }
5191         ++m_Count;
5192     }
5193     else
5194         return PushFront(newItem);
5195 }
5196 
5197 template<typename ItemTypeTraits>
Remove(ItemType * item)5198 void VmaIntrusiveLinkedList<ItemTypeTraits>::Remove(ItemType* item)
5199 {
5200     VMA_HEAVY_ASSERT(item != VMA_NULL && m_Count > 0);
5201     if (ItemTypeTraits::GetPrev(item) != VMA_NULL)
5202     {
5203         ItemTypeTraits::AccessNext(ItemTypeTraits::AccessPrev(item)) = ItemTypeTraits::GetNext(item);
5204     }
5205     else
5206     {
5207         VMA_HEAVY_ASSERT(m_Front == item);
5208         m_Front = ItemTypeTraits::GetNext(item);
5209     }
5210 
5211     if (ItemTypeTraits::GetNext(item) != VMA_NULL)
5212     {
5213         ItemTypeTraits::AccessPrev(ItemTypeTraits::AccessNext(item)) = ItemTypeTraits::GetPrev(item);
5214     }
5215     else
5216     {
5217         VMA_HEAVY_ASSERT(m_Back == item);
5218         m_Back = ItemTypeTraits::GetPrev(item);
5219     }
5220     ItemTypeTraits::AccessPrev(item) = VMA_NULL;
5221     ItemTypeTraits::AccessNext(item) = VMA_NULL;
5222     --m_Count;
5223 }
5224 
5225 template<typename ItemTypeTraits>
RemoveAll()5226 void VmaIntrusiveLinkedList<ItemTypeTraits>::RemoveAll()
5227 {
5228     if (!IsEmpty())
5229     {
5230         ItemType* item = m_Back;
5231         while (item != VMA_NULL)
5232         {
5233             ItemType* const prevItem = ItemTypeTraits::AccessPrev(item);
5234             ItemTypeTraits::AccessPrev(item) = VMA_NULL;
5235             ItemTypeTraits::AccessNext(item) = VMA_NULL;
5236             item = prevItem;
5237         }
5238         m_Front = VMA_NULL;
5239         m_Back = VMA_NULL;
5240         m_Count = 0;
5241     }
5242 }
5243 #endif // _VMA_INTRUSIVE_LINKED_LIST_FUNCTIONS
5244 #endif // _VMA_INTRUSIVE_LINKED_LIST
5245 
5246 // Unused in this version.
5247 #if 0
5248 
5249 #ifndef _VMA_PAIR
5250 template<typename T1, typename T2>
5251 struct VmaPair
5252 {
5253     T1 first;
5254     T2 second;
5255 
5256     VmaPair() : first(), second() {}
5257     VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) {}
5258 };
5259 
5260 template<typename FirstT, typename SecondT>
5261 struct VmaPairFirstLess
5262 {
5263     bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
5264     {
5265         return lhs.first < rhs.first;
5266     }
5267     bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
5268     {
5269         return lhs.first < rhsFirst;
5270     }
5271 };
5272 #endif // _VMA_PAIR
5273 
5274 #ifndef _VMA_MAP
5275 /* Class compatible with subset of interface of std::unordered_map.
5276 KeyT, ValueT must be POD because they will be stored in VmaVector.
5277 */
5278 template<typename KeyT, typename ValueT>
5279 class VmaMap
5280 {
5281 public:
5282     typedef VmaPair<KeyT, ValueT> PairType;
5283     typedef PairType* iterator;
5284 
5285     VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) {}
5286 
5287     iterator begin() { return m_Vector.begin(); }
5288     iterator end() { return m_Vector.end(); }
5289     size_t size() { return m_Vector.size(); }
5290 
5291     void insert(const PairType& pair);
5292     iterator find(const KeyT& key);
5293     void erase(iterator it);
5294 
5295 private:
5296     VmaVector< PairType, VmaStlAllocator<PairType>> m_Vector;
5297 };
5298 
5299 #ifndef _VMA_MAP_FUNCTIONS
5300 template<typename KeyT, typename ValueT>
5301 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
5302 {
5303     const size_t indexToInsert = VmaBinaryFindFirstNotLess(
5304         m_Vector.data(),
5305         m_Vector.data() + m_Vector.size(),
5306         pair,
5307         VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
5308     VmaVectorInsert(m_Vector, indexToInsert, pair);
5309 }
5310 
5311 template<typename KeyT, typename ValueT>
5312 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
5313 {
5314     PairType* it = VmaBinaryFindFirstNotLess(
5315         m_Vector.data(),
5316         m_Vector.data() + m_Vector.size(),
5317         key,
5318         VmaPairFirstLess<KeyT, ValueT>());
5319     if ((it != m_Vector.end()) && (it->first == key))
5320     {
5321         return it;
5322     }
5323     else
5324     {
5325         return m_Vector.end();
5326     }
5327 }
5328 
5329 template<typename KeyT, typename ValueT>
5330 void VmaMap<KeyT, ValueT>::erase(iterator it)
5331 {
5332     VmaVectorRemove(m_Vector, it - m_Vector.begin());
5333 }
5334 #endif // _VMA_MAP_FUNCTIONS
5335 #endif // _VMA_MAP
5336 
5337 #endif // #if 0
5338 
5339 #if !defined(_VMA_STRING_BUILDER) && VMA_STATS_STRING_ENABLED
5340 class VmaStringBuilder
5341 {
5342 public:
VmaStringBuilder(const VkAllocationCallbacks * allocationCallbacks)5343     VmaStringBuilder(const VkAllocationCallbacks* allocationCallbacks) : m_Data(VmaStlAllocator<char>(allocationCallbacks)) {}
5344     ~VmaStringBuilder() = default;
5345 
GetLength()5346     size_t GetLength() const { return m_Data.size(); }
GetData()5347     const char* GetData() const { return m_Data.data(); }
AddNewLine()5348     void AddNewLine() { Add('\n'); }
Add(char ch)5349     void Add(char ch) { m_Data.push_back(ch); }
5350 
5351     void Add(const char* pStr);
5352     void AddNumber(uint32_t num);
5353     void AddNumber(uint64_t num);
5354     void AddPointer(const void* ptr);
5355 
5356 private:
5357     VmaVector<char, VmaStlAllocator<char>> m_Data;
5358 };
5359 
5360 #ifndef _VMA_STRING_BUILDER_FUNCTIONS
Add(const char * pStr)5361 void VmaStringBuilder::Add(const char* pStr)
5362 {
5363     const size_t strLen = strlen(pStr);
5364     if (strLen > 0)
5365     {
5366         const size_t oldCount = m_Data.size();
5367         m_Data.resize(oldCount + strLen);
5368         memcpy(m_Data.data() + oldCount, pStr, strLen);
5369     }
5370 }
5371 
AddNumber(uint32_t num)5372 void VmaStringBuilder::AddNumber(uint32_t num)
5373 {
5374     char buf[11];
5375     buf[10] = '\0';
5376     char* p = &buf[10];
5377     do
5378     {
5379         *--p = '0' + (num % 10);
5380         num /= 10;
5381     } while (num);
5382     Add(p);
5383 }
5384 
AddNumber(uint64_t num)5385 void VmaStringBuilder::AddNumber(uint64_t num)
5386 {
5387     char buf[21];
5388     buf[20] = '\0';
5389     char* p = &buf[20];
5390     do
5391     {
5392         *--p = '0' + (num % 10);
5393         num /= 10;
5394     } while (num);
5395     Add(p);
5396 }
5397 
AddPointer(const void * ptr)5398 void VmaStringBuilder::AddPointer(const void* ptr)
5399 {
5400     char buf[21];
5401     VmaPtrToStr(buf, sizeof(buf), ptr);
5402     Add(buf);
5403 }
5404 #endif //_VMA_STRING_BUILDER_FUNCTIONS
5405 #endif // _VMA_STRING_BUILDER
5406 
5407 #if !defined(_VMA_JSON_WRITER) && VMA_STATS_STRING_ENABLED
5408 /*
5409 Allows to conveniently build a correct JSON document to be written to the
5410 VmaStringBuilder passed to the constructor.
5411 */
5412 class VmaJsonWriter
5413 {
5414     VMA_CLASS_NO_COPY(VmaJsonWriter)
5415 public:
5416     // sb - string builder to write the document to. Must remain alive for the whole lifetime of this object.
5417     VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
5418     ~VmaJsonWriter();
5419 
5420     // Begins object by writing "{".
5421     // Inside an object, you must call pairs of WriteString and a value, e.g.:
5422     // j.BeginObject(true); j.WriteString("A"); j.WriteNumber(1); j.WriteString("B"); j.WriteNumber(2); j.EndObject();
5423     // Will write: { "A": 1, "B": 2 }
5424     void BeginObject(bool singleLine = false);
5425     // Ends object by writing "}".
5426     void EndObject();
5427 
5428     // Begins array by writing "[".
5429     // Inside an array, you can write a sequence of any values.
5430     void BeginArray(bool singleLine = false);
5431     // Ends array by writing "[".
5432     void EndArray();
5433 
5434     // Writes a string value inside "".
5435     // pStr can contain any ANSI characters, including '"', new line etc. - they will be properly escaped.
5436     void WriteString(const char* pStr);
5437 
5438     // Begins writing a string value.
5439     // Call BeginString, ContinueString, ContinueString, ..., EndString instead of
5440     // WriteString to conveniently build the string content incrementally, made of
5441     // parts including numbers.
5442     void BeginString(const char* pStr = VMA_NULL);
5443     // Posts next part of an open string.
5444     void ContinueString(const char* pStr);
5445     // Posts next part of an open string. The number is converted to decimal characters.
5446     void ContinueString(uint32_t n);
5447     void ContinueString(uint64_t n);
5448     void ContinueString_Size(size_t n);
5449     // Posts next part of an open string. Pointer value is converted to characters
5450     // using "%p" formatting - shown as hexadecimal number, e.g.: 000000081276Ad00
5451     void ContinueString_Pointer(const void* ptr);
5452     // Ends writing a string value by writing '"'.
5453     void EndString(const char* pStr = VMA_NULL);
5454 
5455     // Writes a number value.
5456     void WriteNumber(uint32_t n);
5457     void WriteNumber(uint64_t n);
5458     void WriteSize(size_t n);
5459     // Writes a boolean value - false or true.
5460     void WriteBool(bool b);
5461     // Writes a null value.
5462     void WriteNull();
5463 
5464 private:
5465     enum COLLECTION_TYPE
5466     {
5467         COLLECTION_TYPE_OBJECT,
5468         COLLECTION_TYPE_ARRAY,
5469     };
5470     struct StackItem
5471     {
5472         COLLECTION_TYPE type;
5473         uint32_t valueCount;
5474         bool singleLineMode;
5475     };
5476 
5477     static const char* const INDENT;
5478 
5479     VmaStringBuilder& m_SB;
5480     VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
5481     bool m_InsideString;
5482 
5483     // Write size_t for less than 64bits
WriteSize(size_t n,std::integral_constant<bool,false>)5484     void WriteSize(size_t n, std::integral_constant<bool, false>) { m_SB.AddNumber(static_cast<uint32_t>(n)); }
5485     // Write size_t for 64bits
WriteSize(size_t n,std::integral_constant<bool,true>)5486     void WriteSize(size_t n, std::integral_constant<bool, true>) { m_SB.AddNumber(static_cast<uint64_t>(n)); }
5487 
5488     void BeginValue(bool isString);
5489     void WriteIndent(bool oneLess = false);
5490 };
5491 const char* const VmaJsonWriter::INDENT = "  ";
5492 
5493 #ifndef _VMA_JSON_WRITER_FUNCTIONS
VmaJsonWriter(const VkAllocationCallbacks * pAllocationCallbacks,VmaStringBuilder & sb)5494 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb)
5495     : m_SB(sb),
5496     m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
5497     m_InsideString(false) {}
5498 
~VmaJsonWriter()5499 VmaJsonWriter::~VmaJsonWriter()
5500 {
5501     VMA_ASSERT(!m_InsideString);
5502     VMA_ASSERT(m_Stack.empty());
5503 }
5504 
BeginObject(bool singleLine)5505 void VmaJsonWriter::BeginObject(bool singleLine)
5506 {
5507     VMA_ASSERT(!m_InsideString);
5508 
5509     BeginValue(false);
5510     m_SB.Add('{');
5511 
5512     StackItem item;
5513     item.type = COLLECTION_TYPE_OBJECT;
5514     item.valueCount = 0;
5515     item.singleLineMode = singleLine;
5516     m_Stack.push_back(item);
5517 }
5518 
EndObject()5519 void VmaJsonWriter::EndObject()
5520 {
5521     VMA_ASSERT(!m_InsideString);
5522 
5523     WriteIndent(true);
5524     m_SB.Add('}');
5525 
5526     VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
5527     m_Stack.pop_back();
5528 }
5529 
BeginArray(bool singleLine)5530 void VmaJsonWriter::BeginArray(bool singleLine)
5531 {
5532     VMA_ASSERT(!m_InsideString);
5533 
5534     BeginValue(false);
5535     m_SB.Add('[');
5536 
5537     StackItem item;
5538     item.type = COLLECTION_TYPE_ARRAY;
5539     item.valueCount = 0;
5540     item.singleLineMode = singleLine;
5541     m_Stack.push_back(item);
5542 }
5543 
EndArray()5544 void VmaJsonWriter::EndArray()
5545 {
5546     VMA_ASSERT(!m_InsideString);
5547 
5548     WriteIndent(true);
5549     m_SB.Add(']');
5550 
5551     VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
5552     m_Stack.pop_back();
5553 }
5554 
WriteString(const char * pStr)5555 void VmaJsonWriter::WriteString(const char* pStr)
5556 {
5557     BeginString(pStr);
5558     EndString();
5559 }
5560 
BeginString(const char * pStr)5561 void VmaJsonWriter::BeginString(const char* pStr)
5562 {
5563     VMA_ASSERT(!m_InsideString);
5564 
5565     BeginValue(true);
5566     m_SB.Add('"');
5567     m_InsideString = true;
5568     if (pStr != VMA_NULL && pStr[0] != '\0')
5569     {
5570         ContinueString(pStr);
5571     }
5572 }
5573 
ContinueString(const char * pStr)5574 void VmaJsonWriter::ContinueString(const char* pStr)
5575 {
5576     VMA_ASSERT(m_InsideString);
5577 
5578     const size_t strLen = strlen(pStr);
5579     for (size_t i = 0; i < strLen; ++i)
5580     {
5581         char ch = pStr[i];
5582         if (ch == '\\')
5583         {
5584             m_SB.Add("\\\\");
5585         }
5586         else if (ch == '"')
5587         {
5588             m_SB.Add("\\\"");
5589         }
5590         else if (ch >= 32)
5591         {
5592             m_SB.Add(ch);
5593         }
5594         else switch (ch)
5595         {
5596         case '\b':
5597             m_SB.Add("\\b");
5598             break;
5599         case '\f':
5600             m_SB.Add("\\f");
5601             break;
5602         case '\n':
5603             m_SB.Add("\\n");
5604             break;
5605         case '\r':
5606             m_SB.Add("\\r");
5607             break;
5608         case '\t':
5609             m_SB.Add("\\t");
5610             break;
5611         default:
5612             VMA_ASSERT(0 && "Character not currently supported.");
5613             break;
5614         }
5615     }
5616 }
5617 
ContinueString(uint32_t n)5618 void VmaJsonWriter::ContinueString(uint32_t n)
5619 {
5620     VMA_ASSERT(m_InsideString);
5621     m_SB.AddNumber(n);
5622 }
5623 
ContinueString(uint64_t n)5624 void VmaJsonWriter::ContinueString(uint64_t n)
5625 {
5626     VMA_ASSERT(m_InsideString);
5627     m_SB.AddNumber(n);
5628 }
5629 
ContinueString_Size(size_t n)5630 void VmaJsonWriter::ContinueString_Size(size_t n)
5631 {
5632     VMA_ASSERT(m_InsideString);
5633     // Fix for AppleClang incorrect type casting
5634     // TODO: Change to if constexpr when C++17 used as minimal standard
5635     WriteSize(n, std::is_same<size_t, uint64_t>{});
5636 }
5637 
ContinueString_Pointer(const void * ptr)5638 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
5639 {
5640     VMA_ASSERT(m_InsideString);
5641     m_SB.AddPointer(ptr);
5642 }
5643 
EndString(const char * pStr)5644 void VmaJsonWriter::EndString(const char* pStr)
5645 {
5646     VMA_ASSERT(m_InsideString);
5647     if (pStr != VMA_NULL && pStr[0] != '\0')
5648     {
5649         ContinueString(pStr);
5650     }
5651     m_SB.Add('"');
5652     m_InsideString = false;
5653 }
5654 
WriteNumber(uint32_t n)5655 void VmaJsonWriter::WriteNumber(uint32_t n)
5656 {
5657     VMA_ASSERT(!m_InsideString);
5658     BeginValue(false);
5659     m_SB.AddNumber(n);
5660 }
5661 
WriteNumber(uint64_t n)5662 void VmaJsonWriter::WriteNumber(uint64_t n)
5663 {
5664     VMA_ASSERT(!m_InsideString);
5665     BeginValue(false);
5666     m_SB.AddNumber(n);
5667 }
5668 
WriteSize(size_t n)5669 void VmaJsonWriter::WriteSize(size_t n)
5670 {
5671     VMA_ASSERT(!m_InsideString);
5672     BeginValue(false);
5673     // Fix for AppleClang incorrect type casting
5674     // TODO: Change to if constexpr when C++17 used as minimal standard
5675     WriteSize(n, std::is_same<size_t, uint64_t>{});
5676 }
5677 
WriteBool(bool b)5678 void VmaJsonWriter::WriteBool(bool b)
5679 {
5680     VMA_ASSERT(!m_InsideString);
5681     BeginValue(false);
5682     m_SB.Add(b ? "true" : "false");
5683 }
5684 
WriteNull()5685 void VmaJsonWriter::WriteNull()
5686 {
5687     VMA_ASSERT(!m_InsideString);
5688     BeginValue(false);
5689     m_SB.Add("null");
5690 }
5691 
BeginValue(bool isString)5692 void VmaJsonWriter::BeginValue(bool isString)
5693 {
5694     if (!m_Stack.empty())
5695     {
5696         StackItem& currItem = m_Stack.back();
5697         if (currItem.type == COLLECTION_TYPE_OBJECT &&
5698             currItem.valueCount % 2 == 0)
5699         {
5700             VMA_ASSERT(isString);
5701         }
5702 
5703         if (currItem.type == COLLECTION_TYPE_OBJECT &&
5704             currItem.valueCount % 2 != 0)
5705         {
5706             m_SB.Add(": ");
5707         }
5708         else if (currItem.valueCount > 0)
5709         {
5710             m_SB.Add(", ");
5711             WriteIndent();
5712         }
5713         else
5714         {
5715             WriteIndent();
5716         }
5717         ++currItem.valueCount;
5718     }
5719 }
5720 
WriteIndent(bool oneLess)5721 void VmaJsonWriter::WriteIndent(bool oneLess)
5722 {
5723     if (!m_Stack.empty() && !m_Stack.back().singleLineMode)
5724     {
5725         m_SB.AddNewLine();
5726 
5727         size_t count = m_Stack.size();
5728         if (count > 0 && oneLess)
5729         {
5730             --count;
5731         }
5732         for (size_t i = 0; i < count; ++i)
5733         {
5734             m_SB.Add(INDENT);
5735         }
5736     }
5737 }
5738 #endif // _VMA_JSON_WRITER_FUNCTIONS
5739 
VmaPrintDetailedStatistics(VmaJsonWriter & json,const VmaDetailedStatistics & stat)5740 static void VmaPrintDetailedStatistics(VmaJsonWriter& json, const VmaDetailedStatistics& stat)
5741 {
5742     json.BeginObject();
5743 
5744     json.WriteString("BlockCount");
5745     json.WriteNumber(stat.statistics.blockCount);
5746     json.WriteString("BlockBytes");
5747     json.WriteNumber(stat.statistics.blockBytes);
5748     json.WriteString("AllocationCount");
5749     json.WriteNumber(stat.statistics.allocationCount);
5750     json.WriteString("AllocationBytes");
5751     json.WriteNumber(stat.statistics.allocationBytes);
5752     json.WriteString("UnusedRangeCount");
5753     json.WriteNumber(stat.unusedRangeCount);
5754 
5755     if (stat.statistics.allocationCount > 1)
5756     {
5757         json.WriteString("AllocationSizeMin");
5758         json.WriteNumber(stat.allocationSizeMin);
5759         json.WriteString("AllocationSizeMax");
5760         json.WriteNumber(stat.allocationSizeMax);
5761     }
5762     if (stat.unusedRangeCount > 1)
5763     {
5764         json.WriteString("UnusedRangeSizeMin");
5765         json.WriteNumber(stat.unusedRangeSizeMin);
5766         json.WriteString("UnusedRangeSizeMax");
5767         json.WriteNumber(stat.unusedRangeSizeMax);
5768     }
5769     json.EndObject();
5770 }
5771 #endif // _VMA_JSON_WRITER
5772 
5773 #ifndef _VMA_MAPPING_HYSTERESIS
5774 
5775 class VmaMappingHysteresis
5776 {
5777     VMA_CLASS_NO_COPY(VmaMappingHysteresis)
5778 public:
5779     VmaMappingHysteresis() = default;
5780 
GetExtraMapping()5781     uint32_t GetExtraMapping() const { return m_ExtraMapping; }
5782 
5783     // Call when Map was called.
5784     // Returns true if switched to extra +1 mapping reference count.
PostMap()5785     bool PostMap()
5786     {
5787 #if VMA_MAPPING_HYSTERESIS_ENABLED
5788         if(m_ExtraMapping == 0)
5789         {
5790             ++m_MajorCounter;
5791             if(m_MajorCounter >= COUNTER_MIN_EXTRA_MAPPING)
5792             {
5793                 m_ExtraMapping = 1;
5794                 m_MajorCounter = 0;
5795                 m_MinorCounter = 0;
5796                 return true;
5797             }
5798         }
5799         else // m_ExtraMapping == 1
5800             PostMinorCounter();
5801 #endif // #if VMA_MAPPING_HYSTERESIS_ENABLED
5802         return false;
5803     }
5804 
5805     // Call when Unmap was called.
PostUnmap()5806     void PostUnmap()
5807     {
5808 #if VMA_MAPPING_HYSTERESIS_ENABLED
5809         if(m_ExtraMapping == 0)
5810             ++m_MajorCounter;
5811         else // m_ExtraMapping == 1
5812             PostMinorCounter();
5813 #endif // #if VMA_MAPPING_HYSTERESIS_ENABLED
5814     }
5815 
5816     // Call when allocation was made from the memory block.
PostAlloc()5817     void PostAlloc()
5818     {
5819 #if VMA_MAPPING_HYSTERESIS_ENABLED
5820         if(m_ExtraMapping == 1)
5821             ++m_MajorCounter;
5822         else // m_ExtraMapping == 0
5823             PostMinorCounter();
5824 #endif // #if VMA_MAPPING_HYSTERESIS_ENABLED
5825     }
5826 
5827     // Call when allocation was freed from the memory block.
5828     // Returns true if switched to extra -1 mapping reference count.
PostFree()5829     bool PostFree()
5830     {
5831 #if VMA_MAPPING_HYSTERESIS_ENABLED
5832         if(m_ExtraMapping == 1)
5833         {
5834             ++m_MajorCounter;
5835             if(m_MajorCounter >= COUNTER_MIN_EXTRA_MAPPING &&
5836                 m_MajorCounter > m_MinorCounter + 1)
5837             {
5838                 m_ExtraMapping = 0;
5839                 m_MajorCounter = 0;
5840                 m_MinorCounter = 0;
5841                 return true;
5842             }
5843         }
5844         else // m_ExtraMapping == 0
5845             PostMinorCounter();
5846 #endif // #if VMA_MAPPING_HYSTERESIS_ENABLED
5847         return false;
5848     }
5849 
5850 private:
5851     static const int32_t COUNTER_MIN_EXTRA_MAPPING = 7;
5852 
5853     uint32_t m_MinorCounter = 0;
5854     uint32_t m_MajorCounter = 0;
5855     uint32_t m_ExtraMapping = 0; // 0 or 1.
5856 
PostMinorCounter()5857     void PostMinorCounter()
5858     {
5859         if(m_MinorCounter < m_MajorCounter)
5860         {
5861             ++m_MinorCounter;
5862         }
5863         else if(m_MajorCounter > 0)
5864         {
5865             --m_MajorCounter;
5866             --m_MinorCounter;
5867         }
5868     }
5869 };
5870 
5871 #endif // _VMA_MAPPING_HYSTERESIS
5872 
5873 #ifndef _VMA_DEVICE_MEMORY_BLOCK
5874 /*
5875 Represents a single block of device memory (`VkDeviceMemory`) with all the
5876 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
5877 
5878 Thread-safety:
5879 - Access to m_pMetadata must be externally synchronized.
5880 - Map, Unmap, Bind* are synchronized internally.
5881 */
5882 class VmaDeviceMemoryBlock
5883 {
5884     VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
5885 public:
5886     VmaBlockMetadata* m_pMetadata;
5887 
5888     VmaDeviceMemoryBlock(VmaAllocator hAllocator);
5889     ~VmaDeviceMemoryBlock();
5890 
5891     // Always call after construction.
5892     void Init(
5893         VmaAllocator hAllocator,
5894         VmaPool hParentPool,
5895         uint32_t newMemoryTypeIndex,
5896         VkDeviceMemory newMemory,
5897         VkDeviceSize newSize,
5898         uint32_t id,
5899         uint32_t algorithm,
5900         VkDeviceSize bufferImageGranularity);
5901     // Always call before destruction.
5902     void Destroy(VmaAllocator allocator);
5903 
GetParentPool()5904     VmaPool GetParentPool() const { return m_hParentPool; }
GetDeviceMemory()5905     VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
GetMemoryTypeIndex()5906     uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
GetId()5907     uint32_t GetId() const { return m_Id; }
GetMappedData()5908     void* GetMappedData() const { return m_pMappedData; }
GetMapRefCount()5909     uint32_t GetMapRefCount() const { return m_MapCount; }
5910 
5911     // Call when allocation/free was made from m_pMetadata.
5912     // Used for m_MappingHysteresis.
PostAlloc()5913     void PostAlloc() { m_MappingHysteresis.PostAlloc(); }
5914     void PostFree(VmaAllocator hAllocator);
5915 
5916     // Validates all data structures inside this object. If not valid, returns false.
5917     bool Validate() const;
5918     VkResult CheckCorruption(VmaAllocator hAllocator);
5919 
5920     // ppData can be null.
5921     VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
5922     void Unmap(VmaAllocator hAllocator, uint32_t count);
5923 
5924     VkResult WriteMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5925     VkResult ValidateMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5926 
5927     VkResult BindBufferMemory(
5928         const VmaAllocator hAllocator,
5929         const VmaAllocation hAllocation,
5930         VkDeviceSize allocationLocalOffset,
5931         VkBuffer hBuffer,
5932         const void* pNext);
5933     VkResult BindImageMemory(
5934         const VmaAllocator hAllocator,
5935         const VmaAllocation hAllocation,
5936         VkDeviceSize allocationLocalOffset,
5937         VkImage hImage,
5938         const void* pNext);
5939 
5940 private:
5941     VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
5942     uint32_t m_MemoryTypeIndex;
5943     uint32_t m_Id;
5944     VkDeviceMemory m_hMemory;
5945 
5946     /*
5947     Protects access to m_hMemory so it is not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
5948     Also protects m_MapCount, m_pMappedData.
5949     Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
5950     */
5951     VMA_MUTEX m_MapAndBindMutex;
5952     VmaMappingHysteresis m_MappingHysteresis;
5953     uint32_t m_MapCount;
5954     void* m_pMappedData;
5955 };
5956 #endif // _VMA_DEVICE_MEMORY_BLOCK
5957 
5958 #ifndef _VMA_ALLOCATION_T
5959 struct VmaAllocation_T
5960 {
5961     friend struct VmaDedicatedAllocationListItemTraits;
5962 
5963     enum FLAGS
5964     {
5965         FLAG_PERSISTENT_MAP   = 0x01,
5966         FLAG_MAPPING_ALLOWED  = 0x02,
5967     };
5968 
5969 public:
5970     enum ALLOCATION_TYPE
5971     {
5972         ALLOCATION_TYPE_NONE,
5973         ALLOCATION_TYPE_BLOCK,
5974         ALLOCATION_TYPE_DEDICATED,
5975     };
5976 
5977     // This struct is allocated using VmaPoolAllocator.
5978     VmaAllocation_T(bool mappingAllowed);
5979     ~VmaAllocation_T();
5980 
5981     void InitBlockAllocation(
5982         VmaDeviceMemoryBlock* block,
5983         VmaAllocHandle allocHandle,
5984         VkDeviceSize alignment,
5985         VkDeviceSize size,
5986         uint32_t memoryTypeIndex,
5987         VmaSuballocationType suballocationType,
5988         bool mapped);
5989     // pMappedData not null means allocation is created with MAPPED flag.
5990     void InitDedicatedAllocation(
5991         VmaPool hParentPool,
5992         uint32_t memoryTypeIndex,
5993         VkDeviceMemory hMemory,
5994         VmaSuballocationType suballocationType,
5995         void* pMappedData,
5996         VkDeviceSize size);
5997 
GetTypeVmaAllocation_T5998     ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
GetAlignmentVmaAllocation_T5999     VkDeviceSize GetAlignment() const { return m_Alignment; }
GetSizeVmaAllocation_T6000     VkDeviceSize GetSize() const { return m_Size; }
GetUserDataVmaAllocation_T6001     void* GetUserData() const { return m_pUserData; }
GetNameVmaAllocation_T6002     const char* GetName() const { return m_pName; }
GetSuballocationTypeVmaAllocation_T6003     VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
6004 
GetBlockVmaAllocation_T6005     VmaDeviceMemoryBlock* GetBlock() const { VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK); return m_BlockAllocation.m_Block; }
GetMemoryTypeIndexVmaAllocation_T6006     uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
IsPersistentMapVmaAllocation_T6007     bool IsPersistentMap() const { return (m_Flags & FLAG_PERSISTENT_MAP) != 0; }
IsMappingAllowedVmaAllocation_T6008     bool IsMappingAllowed() const { return (m_Flags & FLAG_MAPPING_ALLOWED) != 0; }
6009 
SetUserDataVmaAllocation_T6010     void SetUserData(VmaAllocator hAllocator, void* pUserData) { m_pUserData = pUserData; }
6011     void SetName(VmaAllocator hAllocator, const char* pName);
6012     void FreeName(VmaAllocator hAllocator);
6013     uint8_t SwapBlockAllocation(VmaAllocator hAllocator, VmaAllocation allocation);
6014     VmaAllocHandle GetAllocHandle() const;
6015     VkDeviceSize GetOffset() const;
6016     VmaPool GetParentPool() const;
6017     VkDeviceMemory GetMemory() const;
6018     void* GetMappedData() const;
6019 
6020     void BlockAllocMap();
6021     void BlockAllocUnmap();
6022     VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
6023     void DedicatedAllocUnmap(VmaAllocator hAllocator);
6024 
6025 #if VMA_STATS_STRING_ENABLED
GetBufferImageUsageVmaAllocation_T6026     uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
6027 
6028     void InitBufferImageUsage(uint32_t bufferImageUsage);
6029     void PrintParameters(class VmaJsonWriter& json) const;
6030 #endif
6031 
6032 private:
6033     // Allocation out of VmaDeviceMemoryBlock.
6034     struct BlockAllocation
6035     {
6036         VmaDeviceMemoryBlock* m_Block;
6037         VmaAllocHandle m_AllocHandle;
6038     };
6039     // Allocation for an object that has its own private VkDeviceMemory.
6040     struct DedicatedAllocation
6041     {
6042         VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
6043         VkDeviceMemory m_hMemory;
6044         void* m_pMappedData; // Not null means memory is mapped.
6045         VmaAllocation_T* m_Prev;
6046         VmaAllocation_T* m_Next;
6047     };
6048     union
6049     {
6050         // Allocation out of VmaDeviceMemoryBlock.
6051         BlockAllocation m_BlockAllocation;
6052         // Allocation for an object that has its own private VkDeviceMemory.
6053         DedicatedAllocation m_DedicatedAllocation;
6054     };
6055 
6056     VkDeviceSize m_Alignment;
6057     VkDeviceSize m_Size;
6058     void* m_pUserData;
6059     char* m_pName;
6060     uint32_t m_MemoryTypeIndex;
6061     uint8_t m_Type; // ALLOCATION_TYPE
6062     uint8_t m_SuballocationType; // VmaSuballocationType
6063     // Reference counter for vmaMapMemory()/vmaUnmapMemory().
6064     uint8_t m_MapCount;
6065     uint8_t m_Flags; // enum FLAGS
6066 #if VMA_STATS_STRING_ENABLED
6067     uint32_t m_BufferImageUsage; // 0 if unknown.
6068 #endif
6069 };
6070 #endif // _VMA_ALLOCATION_T
6071 
6072 #ifndef _VMA_DEDICATED_ALLOCATION_LIST_ITEM_TRAITS
6073 struct VmaDedicatedAllocationListItemTraits
6074 {
6075     typedef VmaAllocation_T ItemType;
6076 
GetPrevVmaDedicatedAllocationListItemTraits6077     static ItemType* GetPrev(const ItemType* item)
6078     {
6079         VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
6080         return item->m_DedicatedAllocation.m_Prev;
6081     }
GetNextVmaDedicatedAllocationListItemTraits6082     static ItemType* GetNext(const ItemType* item)
6083     {
6084         VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
6085         return item->m_DedicatedAllocation.m_Next;
6086     }
AccessPrevVmaDedicatedAllocationListItemTraits6087     static ItemType*& AccessPrev(ItemType* item)
6088     {
6089         VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
6090         return item->m_DedicatedAllocation.m_Prev;
6091     }
AccessNextVmaDedicatedAllocationListItemTraits6092     static ItemType*& AccessNext(ItemType* item)
6093     {
6094         VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
6095         return item->m_DedicatedAllocation.m_Next;
6096     }
6097 };
6098 #endif // _VMA_DEDICATED_ALLOCATION_LIST_ITEM_TRAITS
6099 
6100 #ifndef _VMA_DEDICATED_ALLOCATION_LIST
6101 /*
6102 Stores linked list of VmaAllocation_T objects.
6103 Thread-safe, synchronized internally.
6104 */
6105 class VmaDedicatedAllocationList
6106 {
6107 public:
VmaDedicatedAllocationList()6108     VmaDedicatedAllocationList() {}
6109     ~VmaDedicatedAllocationList();
6110 
Init(bool useMutex)6111     void Init(bool useMutex) { m_UseMutex = useMutex; }
6112     bool Validate();
6113 
6114     void AddDetailedStatistics(VmaDetailedStatistics& inoutStats);
6115     void AddStatistics(VmaStatistics& inoutStats);
6116 #if VMA_STATS_STRING_ENABLED
6117     // Writes JSON array with the list of allocations.
6118     void BuildStatsString(VmaJsonWriter& json);
6119 #endif
6120 
6121     bool IsEmpty();
6122     void Register(VmaAllocation alloc);
6123     void Unregister(VmaAllocation alloc);
6124 
6125 private:
6126     typedef VmaIntrusiveLinkedList<VmaDedicatedAllocationListItemTraits> DedicatedAllocationLinkedList;
6127 
6128     bool m_UseMutex = true;
6129     VMA_RW_MUTEX m_Mutex;
6130     DedicatedAllocationLinkedList m_AllocationList;
6131 };
6132 
6133 #ifndef _VMA_DEDICATED_ALLOCATION_LIST_FUNCTIONS
6134 
~VmaDedicatedAllocationList()6135 VmaDedicatedAllocationList::~VmaDedicatedAllocationList()
6136 {
6137     VMA_HEAVY_ASSERT(Validate());
6138 
6139     if (!m_AllocationList.IsEmpty())
6140     {
6141         VMA_ASSERT(false && "Unfreed dedicated allocations found!");
6142     }
6143 }
6144 
Validate()6145 bool VmaDedicatedAllocationList::Validate()
6146 {
6147     const size_t declaredCount = m_AllocationList.GetCount();
6148     size_t actualCount = 0;
6149     VmaMutexLockRead lock(m_Mutex, m_UseMutex);
6150     for (VmaAllocation alloc = m_AllocationList.Front();
6151         alloc != VMA_NULL; alloc = m_AllocationList.GetNext(alloc))
6152     {
6153         ++actualCount;
6154     }
6155     VMA_VALIDATE(actualCount == declaredCount);
6156 
6157     return true;
6158 }
6159 
AddDetailedStatistics(VmaDetailedStatistics & inoutStats)6160 void VmaDedicatedAllocationList::AddDetailedStatistics(VmaDetailedStatistics& inoutStats)
6161 {
6162     for(auto* item = m_AllocationList.Front(); item != nullptr; item = DedicatedAllocationLinkedList::GetNext(item))
6163     {
6164         const VkDeviceSize size = item->GetSize();
6165         inoutStats.statistics.blockCount++;
6166         inoutStats.statistics.blockBytes += size;
6167         VmaAddDetailedStatisticsAllocation(inoutStats, item->GetSize());
6168     }
6169 }
6170 
AddStatistics(VmaStatistics & inoutStats)6171 void VmaDedicatedAllocationList::AddStatistics(VmaStatistics& inoutStats)
6172 {
6173     VmaMutexLockRead lock(m_Mutex, m_UseMutex);
6174 
6175     const uint32_t allocCount = (uint32_t)m_AllocationList.GetCount();
6176     inoutStats.blockCount += allocCount;
6177     inoutStats.allocationCount += allocCount;
6178 
6179     for(auto* item = m_AllocationList.Front(); item != nullptr; item = DedicatedAllocationLinkedList::GetNext(item))
6180     {
6181         const VkDeviceSize size = item->GetSize();
6182         inoutStats.blockBytes += size;
6183         inoutStats.allocationBytes += size;
6184     }
6185 }
6186 
6187 #if VMA_STATS_STRING_ENABLED
BuildStatsString(VmaJsonWriter & json)6188 void VmaDedicatedAllocationList::BuildStatsString(VmaJsonWriter& json)
6189 {
6190     VmaMutexLockRead lock(m_Mutex, m_UseMutex);
6191     json.BeginArray();
6192     for (VmaAllocation alloc = m_AllocationList.Front();
6193         alloc != VMA_NULL; alloc = m_AllocationList.GetNext(alloc))
6194     {
6195         json.BeginObject(true);
6196         alloc->PrintParameters(json);
6197         json.EndObject();
6198     }
6199     json.EndArray();
6200 }
6201 #endif // VMA_STATS_STRING_ENABLED
6202 
IsEmpty()6203 bool VmaDedicatedAllocationList::IsEmpty()
6204 {
6205     VmaMutexLockRead lock(m_Mutex, m_UseMutex);
6206     return m_AllocationList.IsEmpty();
6207 }
6208 
Register(VmaAllocation alloc)6209 void VmaDedicatedAllocationList::Register(VmaAllocation alloc)
6210 {
6211     VmaMutexLockWrite lock(m_Mutex, m_UseMutex);
6212     m_AllocationList.PushBack(alloc);
6213 }
6214 
Unregister(VmaAllocation alloc)6215 void VmaDedicatedAllocationList::Unregister(VmaAllocation alloc)
6216 {
6217     VmaMutexLockWrite lock(m_Mutex, m_UseMutex);
6218     m_AllocationList.Remove(alloc);
6219 }
6220 #endif // _VMA_DEDICATED_ALLOCATION_LIST_FUNCTIONS
6221 #endif // _VMA_DEDICATED_ALLOCATION_LIST
6222 
6223 #ifndef _VMA_SUBALLOCATION
6224 /*
6225 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
6226 allocated memory block or free.
6227 */
6228 struct VmaSuballocation
6229 {
6230     VkDeviceSize offset;
6231     VkDeviceSize size;
6232     void* userData;
6233     VmaSuballocationType type;
6234 };
6235 
6236 // Comparator for offsets.
6237 struct VmaSuballocationOffsetLess
6238 {
operatorVmaSuballocationOffsetLess6239     bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
6240     {
6241         return lhs.offset < rhs.offset;
6242     }
6243 };
6244 
6245 struct VmaSuballocationOffsetGreater
6246 {
operatorVmaSuballocationOffsetGreater6247     bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
6248     {
6249         return lhs.offset > rhs.offset;
6250     }
6251 };
6252 
6253 struct VmaSuballocationItemSizeLess
6254 {
operatorVmaSuballocationItemSizeLess6255     bool operator()(const VmaSuballocationList::iterator lhs,
6256         const VmaSuballocationList::iterator rhs) const
6257     {
6258         return lhs->size < rhs->size;
6259     }
6260 
operatorVmaSuballocationItemSizeLess6261     bool operator()(const VmaSuballocationList::iterator lhs,
6262         VkDeviceSize rhsSize) const
6263     {
6264         return lhs->size < rhsSize;
6265     }
6266 };
6267 #endif // _VMA_SUBALLOCATION
6268 
6269 #ifndef _VMA_ALLOCATION_REQUEST
6270 /*
6271 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
6272 item points to a FREE suballocation.
6273 */
6274 struct VmaAllocationRequest
6275 {
6276     VmaAllocHandle allocHandle;
6277     VkDeviceSize size;
6278     VmaSuballocationList::iterator item;
6279     void* customData;
6280     uint64_t algorithmData;
6281     VmaAllocationRequestType type;
6282 };
6283 #endif // _VMA_ALLOCATION_REQUEST
6284 
6285 #ifndef _VMA_BLOCK_METADATA
6286 /*
6287 Data structure used for bookkeeping of allocations and unused ranges of memory
6288 in a single VkDeviceMemory block.
6289 */
6290 class VmaBlockMetadata
6291 {
6292 public:
6293     // pAllocationCallbacks, if not null, must be owned externally - alive and unchanged for the whole lifetime of this object.
6294     VmaBlockMetadata(const VkAllocationCallbacks* pAllocationCallbacks,
6295         VkDeviceSize bufferImageGranularity, bool isVirtual);
6296     virtual ~VmaBlockMetadata() = default;
6297 
Init(VkDeviceSize size)6298     virtual void Init(VkDeviceSize size) { m_Size = size; }
IsVirtual()6299     bool IsVirtual() const { return m_IsVirtual; }
GetSize()6300     VkDeviceSize GetSize() const { return m_Size; }
6301 
6302     // Validates all data structures inside this object. If not valid, returns false.
6303     virtual bool Validate() const = 0;
6304     virtual size_t GetAllocationCount() const = 0;
6305     virtual size_t GetFreeRegionsCount() const = 0;
6306     virtual VkDeviceSize GetSumFreeSize() const = 0;
6307     // Returns true if this block is empty - contains only single free suballocation.
6308     virtual bool IsEmpty() const = 0;
6309     virtual void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) = 0;
6310     virtual VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const = 0;
6311     virtual void* GetAllocationUserData(VmaAllocHandle allocHandle) const = 0;
6312 
6313     virtual VmaAllocHandle GetAllocationListBegin() const = 0;
6314     virtual VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const = 0;
6315     virtual VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const = 0;
6316 
6317     // Shouldn't modify blockCount.
6318     virtual void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const = 0;
6319     virtual void AddStatistics(VmaStatistics& inoutStats) const = 0;
6320 
6321 #if VMA_STATS_STRING_ENABLED
6322     virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
6323 #endif
6324 
6325     // Tries to find a place for suballocation with given parameters inside this block.
6326     // If succeeded, fills pAllocationRequest and returns true.
6327     // If failed, returns false.
6328     virtual bool CreateAllocationRequest(
6329         VkDeviceSize allocSize,
6330         VkDeviceSize allocAlignment,
6331         bool upperAddress,
6332         VmaSuballocationType allocType,
6333         // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
6334         uint32_t strategy,
6335         VmaAllocationRequest* pAllocationRequest) = 0;
6336 
6337     virtual VkResult CheckCorruption(const void* pBlockData) = 0;
6338 
6339     // Makes actual allocation based on request. Request must already be checked and valid.
6340     virtual void Alloc(
6341         const VmaAllocationRequest& request,
6342         VmaSuballocationType type,
6343         void* userData) = 0;
6344 
6345     // Frees suballocation assigned to given memory region.
6346     virtual void Free(VmaAllocHandle allocHandle) = 0;
6347 
6348     // Frees all allocations.
6349     // Careful! Don't call it if there are VmaAllocation objects owned by userData of cleared allocations!
6350     virtual void Clear() = 0;
6351 
6352     virtual void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) = 0;
6353     virtual void DebugLogAllAllocations() const = 0;
6354 
6355 protected:
GetAllocationCallbacks()6356     const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
GetBufferImageGranularity()6357     VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
GetDebugMargin()6358     VkDeviceSize GetDebugMargin() const { return IsVirtual() ? 0 : VMA_DEBUG_MARGIN; }
6359 
6360     void DebugLogAllocation(VkDeviceSize offset, VkDeviceSize size, void* userData) const;
6361 #if VMA_STATS_STRING_ENABLED
6362     // mapRefCount == UINT32_MAX means unspecified.
6363     void PrintDetailedMap_Begin(class VmaJsonWriter& json,
6364         VkDeviceSize unusedBytes,
6365         size_t allocationCount,
6366         size_t unusedRangeCount) const;
6367     void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
6368         VkDeviceSize offset, VkDeviceSize size, void* userData) const;
6369     void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
6370         VkDeviceSize offset,
6371         VkDeviceSize size) const;
6372     void PrintDetailedMap_End(class VmaJsonWriter& json) const;
6373 #endif
6374 
6375 private:
6376     VkDeviceSize m_Size;
6377     const VkAllocationCallbacks* m_pAllocationCallbacks;
6378     const VkDeviceSize m_BufferImageGranularity;
6379     const bool m_IsVirtual;
6380 };
6381 
6382 #ifndef _VMA_BLOCK_METADATA_FUNCTIONS
VmaBlockMetadata(const VkAllocationCallbacks * pAllocationCallbacks,VkDeviceSize bufferImageGranularity,bool isVirtual)6383 VmaBlockMetadata::VmaBlockMetadata(const VkAllocationCallbacks* pAllocationCallbacks,
6384     VkDeviceSize bufferImageGranularity, bool isVirtual)
6385     : m_Size(0),
6386     m_pAllocationCallbacks(pAllocationCallbacks),
6387     m_BufferImageGranularity(bufferImageGranularity),
6388     m_IsVirtual(isVirtual) {}
6389 
DebugLogAllocation(VkDeviceSize offset,VkDeviceSize size,void * userData)6390 void VmaBlockMetadata::DebugLogAllocation(VkDeviceSize offset, VkDeviceSize size, void* userData) const
6391 {
6392     if (IsVirtual())
6393     {
6394         VMA_DEBUG_LOG("UNFREED VIRTUAL ALLOCATION; Offset: %llu; Size: %llu; UserData: %p", offset, size, userData);
6395     }
6396     else
6397     {
6398         VMA_ASSERT(userData != VMA_NULL);
6399         VmaAllocation allocation = reinterpret_cast<VmaAllocation>(userData);
6400 
6401         userData = allocation->GetUserData();
6402         const char* name = allocation->GetName();
6403 
6404 #if VMA_STATS_STRING_ENABLED
6405         VMA_DEBUG_LOG("UNFREED ALLOCATION; Offset: %llu; Size: %llu; UserData: %p; Name: %s; Type: %s; Usage: %u",
6406             offset, size, userData, name ? name : "vma_empty",
6407             VMA_SUBALLOCATION_TYPE_NAMES[allocation->GetSuballocationType()],
6408             allocation->GetBufferImageUsage());
6409 #else
6410         VMA_DEBUG_LOG("UNFREED ALLOCATION; Offset: %llu; Size: %llu; UserData: %p; Name: %s; Type: %u",
6411             offset, size, userData, name ? name : "vma_empty",
6412             (uint32_t)allocation->GetSuballocationType());
6413 #endif // VMA_STATS_STRING_ENABLED
6414     }
6415 
6416 }
6417 
6418 #if VMA_STATS_STRING_ENABLED
PrintDetailedMap_Begin(class VmaJsonWriter & json,VkDeviceSize unusedBytes,size_t allocationCount,size_t unusedRangeCount)6419 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
6420     VkDeviceSize unusedBytes, size_t allocationCount, size_t unusedRangeCount) const
6421 {
6422     json.WriteString("TotalBytes");
6423     json.WriteNumber(GetSize());
6424 
6425     json.WriteString("UnusedBytes");
6426     json.WriteSize(unusedBytes);
6427 
6428     json.WriteString("Allocations");
6429     json.WriteSize(allocationCount);
6430 
6431     json.WriteString("UnusedRanges");
6432     json.WriteSize(unusedRangeCount);
6433 
6434     json.WriteString("Suballocations");
6435     json.BeginArray();
6436 }
6437 
PrintDetailedMap_Allocation(class VmaJsonWriter & json,VkDeviceSize offset,VkDeviceSize size,void * userData)6438 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
6439     VkDeviceSize offset, VkDeviceSize size, void* userData) const
6440 {
6441     json.BeginObject(true);
6442 
6443     json.WriteString("Offset");
6444     json.WriteNumber(offset);
6445 
6446     if (IsVirtual())
6447     {
6448         json.WriteString("Size");
6449         json.WriteNumber(size);
6450         if (userData)
6451         {
6452             json.WriteString("CustomData");
6453             json.BeginString();
6454             json.ContinueString_Pointer(userData);
6455             json.EndString();
6456         }
6457     }
6458     else
6459     {
6460         ((VmaAllocation)userData)->PrintParameters(json);
6461     }
6462 
6463     json.EndObject();
6464 }
6465 
PrintDetailedMap_UnusedRange(class VmaJsonWriter & json,VkDeviceSize offset,VkDeviceSize size)6466 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
6467     VkDeviceSize offset, VkDeviceSize size) const
6468 {
6469     json.BeginObject(true);
6470 
6471     json.WriteString("Offset");
6472     json.WriteNumber(offset);
6473 
6474     json.WriteString("Type");
6475     json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
6476 
6477     json.WriteString("Size");
6478     json.WriteNumber(size);
6479 
6480     json.EndObject();
6481 }
6482 
PrintDetailedMap_End(class VmaJsonWriter & json)6483 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
6484 {
6485     json.EndArray();
6486 }
6487 #endif // VMA_STATS_STRING_ENABLED
6488 #endif // _VMA_BLOCK_METADATA_FUNCTIONS
6489 #endif // _VMA_BLOCK_METADATA
6490 
6491 #ifndef _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY
6492 // Before deleting object of this class remember to call 'Destroy()'
6493 class VmaBlockBufferImageGranularity final
6494 {
6495 public:
6496     struct ValidationContext
6497     {
6498         const VkAllocationCallbacks* allocCallbacks;
6499         uint16_t* pageAllocs;
6500     };
6501 
6502     VmaBlockBufferImageGranularity(VkDeviceSize bufferImageGranularity);
6503     ~VmaBlockBufferImageGranularity();
6504 
IsEnabled()6505     bool IsEnabled() const { return m_BufferImageGranularity > MAX_LOW_BUFFER_IMAGE_GRANULARITY; }
6506 
6507     void Init(const VkAllocationCallbacks* pAllocationCallbacks, VkDeviceSize size);
6508     // Before destroying object you must call free it's memory
6509     void Destroy(const VkAllocationCallbacks* pAllocationCallbacks);
6510 
6511     void RoundupAllocRequest(VmaSuballocationType allocType,
6512         VkDeviceSize& inOutAllocSize,
6513         VkDeviceSize& inOutAllocAlignment) const;
6514 
6515     bool CheckConflictAndAlignUp(VkDeviceSize& inOutAllocOffset,
6516         VkDeviceSize allocSize,
6517         VkDeviceSize blockOffset,
6518         VkDeviceSize blockSize,
6519         VmaSuballocationType allocType) const;
6520 
6521     void AllocPages(uint8_t allocType, VkDeviceSize offset, VkDeviceSize size);
6522     void FreePages(VkDeviceSize offset, VkDeviceSize size);
6523     void Clear();
6524 
6525     ValidationContext StartValidation(const VkAllocationCallbacks* pAllocationCallbacks,
6526         bool isVirutal) const;
6527     bool Validate(ValidationContext& ctx, VkDeviceSize offset, VkDeviceSize size) const;
6528     bool FinishValidation(ValidationContext& ctx) const;
6529 
6530 private:
6531     static const uint16_t MAX_LOW_BUFFER_IMAGE_GRANULARITY = 256;
6532 
6533     struct RegionInfo
6534     {
6535         uint8_t allocType;
6536         uint16_t allocCount;
6537     };
6538 
6539     VkDeviceSize m_BufferImageGranularity;
6540     uint32_t m_RegionCount;
6541     RegionInfo* m_RegionInfo;
6542 
GetStartPage(VkDeviceSize offset)6543     uint32_t GetStartPage(VkDeviceSize offset) const { return OffsetToPageIndex(offset & ~(m_BufferImageGranularity - 1)); }
GetEndPage(VkDeviceSize offset,VkDeviceSize size)6544     uint32_t GetEndPage(VkDeviceSize offset, VkDeviceSize size) const { return OffsetToPageIndex((offset + size - 1) & ~(m_BufferImageGranularity - 1)); }
6545 
6546     uint32_t OffsetToPageIndex(VkDeviceSize offset) const;
6547     void AllocPage(RegionInfo& page, uint8_t allocType);
6548 };
6549 
6550 #ifndef _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY_FUNCTIONS
VmaBlockBufferImageGranularity(VkDeviceSize bufferImageGranularity)6551 VmaBlockBufferImageGranularity::VmaBlockBufferImageGranularity(VkDeviceSize bufferImageGranularity)
6552     : m_BufferImageGranularity(bufferImageGranularity),
6553     m_RegionCount(0),
6554     m_RegionInfo(VMA_NULL) {}
6555 
~VmaBlockBufferImageGranularity()6556 VmaBlockBufferImageGranularity::~VmaBlockBufferImageGranularity()
6557 {
6558     VMA_ASSERT(m_RegionInfo == VMA_NULL && "Free not called before destroying object!");
6559 }
6560 
Init(const VkAllocationCallbacks * pAllocationCallbacks,VkDeviceSize size)6561 void VmaBlockBufferImageGranularity::Init(const VkAllocationCallbacks* pAllocationCallbacks, VkDeviceSize size)
6562 {
6563     if (IsEnabled())
6564     {
6565         m_RegionCount = static_cast<uint32_t>(VmaDivideRoundingUp(size, m_BufferImageGranularity));
6566         m_RegionInfo = vma_new_array(pAllocationCallbacks, RegionInfo, m_RegionCount);
6567         memset(m_RegionInfo, 0, m_RegionCount * sizeof(RegionInfo));
6568     }
6569 }
6570 
Destroy(const VkAllocationCallbacks * pAllocationCallbacks)6571 void VmaBlockBufferImageGranularity::Destroy(const VkAllocationCallbacks* pAllocationCallbacks)
6572 {
6573     if (m_RegionInfo)
6574     {
6575         vma_delete_array(pAllocationCallbacks, m_RegionInfo, m_RegionCount);
6576         m_RegionInfo = VMA_NULL;
6577     }
6578 }
6579 
RoundupAllocRequest(VmaSuballocationType allocType,VkDeviceSize & inOutAllocSize,VkDeviceSize & inOutAllocAlignment)6580 void VmaBlockBufferImageGranularity::RoundupAllocRequest(VmaSuballocationType allocType,
6581     VkDeviceSize& inOutAllocSize,
6582     VkDeviceSize& inOutAllocAlignment) const
6583 {
6584     if (m_BufferImageGranularity > 1 &&
6585         m_BufferImageGranularity <= MAX_LOW_BUFFER_IMAGE_GRANULARITY)
6586     {
6587         if (allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
6588             allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
6589             allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
6590         {
6591             inOutAllocAlignment = VMA_MAX(inOutAllocAlignment, m_BufferImageGranularity);
6592             inOutAllocSize = VmaAlignUp(inOutAllocSize, m_BufferImageGranularity);
6593         }
6594     }
6595 }
6596 
CheckConflictAndAlignUp(VkDeviceSize & inOutAllocOffset,VkDeviceSize allocSize,VkDeviceSize blockOffset,VkDeviceSize blockSize,VmaSuballocationType allocType)6597 bool VmaBlockBufferImageGranularity::CheckConflictAndAlignUp(VkDeviceSize& inOutAllocOffset,
6598     VkDeviceSize allocSize,
6599     VkDeviceSize blockOffset,
6600     VkDeviceSize blockSize,
6601     VmaSuballocationType allocType) const
6602 {
6603     if (IsEnabled())
6604     {
6605         uint32_t startPage = GetStartPage(inOutAllocOffset);
6606         if (m_RegionInfo[startPage].allocCount > 0 &&
6607             VmaIsBufferImageGranularityConflict(static_cast<VmaSuballocationType>(m_RegionInfo[startPage].allocType), allocType))
6608         {
6609             inOutAllocOffset = VmaAlignUp(inOutAllocOffset, m_BufferImageGranularity);
6610             if (blockSize < allocSize + inOutAllocOffset - blockOffset)
6611                 return true;
6612             ++startPage;
6613         }
6614         uint32_t endPage = GetEndPage(inOutAllocOffset, allocSize);
6615         if (endPage != startPage &&
6616             m_RegionInfo[endPage].allocCount > 0 &&
6617             VmaIsBufferImageGranularityConflict(static_cast<VmaSuballocationType>(m_RegionInfo[endPage].allocType), allocType))
6618         {
6619             return true;
6620         }
6621     }
6622     return false;
6623 }
6624 
AllocPages(uint8_t allocType,VkDeviceSize offset,VkDeviceSize size)6625 void VmaBlockBufferImageGranularity::AllocPages(uint8_t allocType, VkDeviceSize offset, VkDeviceSize size)
6626 {
6627     if (IsEnabled())
6628     {
6629         uint32_t startPage = GetStartPage(offset);
6630         AllocPage(m_RegionInfo[startPage], allocType);
6631 
6632         uint32_t endPage = GetEndPage(offset, size);
6633         if (startPage != endPage)
6634             AllocPage(m_RegionInfo[endPage], allocType);
6635     }
6636 }
6637 
FreePages(VkDeviceSize offset,VkDeviceSize size)6638 void VmaBlockBufferImageGranularity::FreePages(VkDeviceSize offset, VkDeviceSize size)
6639 {
6640     if (IsEnabled())
6641     {
6642         uint32_t startPage = GetStartPage(offset);
6643         --m_RegionInfo[startPage].allocCount;
6644         if (m_RegionInfo[startPage].allocCount == 0)
6645             m_RegionInfo[startPage].allocType = VMA_SUBALLOCATION_TYPE_FREE;
6646         uint32_t endPage = GetEndPage(offset, size);
6647         if (startPage != endPage)
6648         {
6649             --m_RegionInfo[endPage].allocCount;
6650             if (m_RegionInfo[endPage].allocCount == 0)
6651                 m_RegionInfo[endPage].allocType = VMA_SUBALLOCATION_TYPE_FREE;
6652         }
6653     }
6654 }
6655 
Clear()6656 void VmaBlockBufferImageGranularity::Clear()
6657 {
6658     if (m_RegionInfo)
6659         memset(m_RegionInfo, 0, m_RegionCount * sizeof(RegionInfo));
6660 }
6661 
StartValidation(const VkAllocationCallbacks * pAllocationCallbacks,bool isVirutal)6662 VmaBlockBufferImageGranularity::ValidationContext VmaBlockBufferImageGranularity::StartValidation(
6663     const VkAllocationCallbacks* pAllocationCallbacks, bool isVirutal) const
6664 {
6665     ValidationContext ctx{ pAllocationCallbacks, VMA_NULL };
6666     if (!isVirutal && IsEnabled())
6667     {
6668         ctx.pageAllocs = vma_new_array(pAllocationCallbacks, uint16_t, m_RegionCount);
6669         memset(ctx.pageAllocs, 0, m_RegionCount * sizeof(uint16_t));
6670     }
6671     return ctx;
6672 }
6673 
Validate(ValidationContext & ctx,VkDeviceSize offset,VkDeviceSize size)6674 bool VmaBlockBufferImageGranularity::Validate(ValidationContext& ctx,
6675     VkDeviceSize offset, VkDeviceSize size) const
6676 {
6677     if (IsEnabled())
6678     {
6679         uint32_t start = GetStartPage(offset);
6680         ++ctx.pageAllocs[start];
6681         VMA_VALIDATE(m_RegionInfo[start].allocCount > 0);
6682 
6683         uint32_t end = GetEndPage(offset, size);
6684         if (start != end)
6685         {
6686             ++ctx.pageAllocs[end];
6687             VMA_VALIDATE(m_RegionInfo[end].allocCount > 0);
6688         }
6689     }
6690     return true;
6691 }
6692 
FinishValidation(ValidationContext & ctx)6693 bool VmaBlockBufferImageGranularity::FinishValidation(ValidationContext& ctx) const
6694 {
6695     // Check proper page structure
6696     if (IsEnabled())
6697     {
6698         VMA_ASSERT(ctx.pageAllocs != VMA_NULL && "Validation context not initialized!");
6699 
6700         for (uint32_t page = 0; page < m_RegionCount; ++page)
6701         {
6702             VMA_VALIDATE(ctx.pageAllocs[page] == m_RegionInfo[page].allocCount);
6703         }
6704         vma_delete_array(ctx.allocCallbacks, ctx.pageAllocs, m_RegionCount);
6705         ctx.pageAllocs = VMA_NULL;
6706     }
6707     return true;
6708 }
6709 
OffsetToPageIndex(VkDeviceSize offset)6710 uint32_t VmaBlockBufferImageGranularity::OffsetToPageIndex(VkDeviceSize offset) const
6711 {
6712     return static_cast<uint32_t>(offset >> VMA_BITSCAN_MSB(m_BufferImageGranularity));
6713 }
6714 
AllocPage(RegionInfo & page,uint8_t allocType)6715 void VmaBlockBufferImageGranularity::AllocPage(RegionInfo& page, uint8_t allocType)
6716 {
6717     // When current alloc type is free then it can be overriden by new type
6718     if (page.allocCount == 0 || (page.allocCount > 0 && page.allocType == VMA_SUBALLOCATION_TYPE_FREE))
6719         page.allocType = allocType;
6720 
6721     ++page.allocCount;
6722 }
6723 #endif // _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY_FUNCTIONS
6724 #endif // _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY
6725 
6726 #if 0
6727 #ifndef _VMA_BLOCK_METADATA_GENERIC
6728 class VmaBlockMetadata_Generic : public VmaBlockMetadata
6729 {
6730     friend class VmaDefragmentationAlgorithm_Generic;
6731     friend class VmaDefragmentationAlgorithm_Fast;
6732     VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
6733 public:
6734     VmaBlockMetadata_Generic(const VkAllocationCallbacks* pAllocationCallbacks,
6735         VkDeviceSize bufferImageGranularity, bool isVirtual);
6736     virtual ~VmaBlockMetadata_Generic() = default;
6737 
6738     size_t GetAllocationCount() const override { return m_Suballocations.size() - m_FreeCount; }
6739     VkDeviceSize GetSumFreeSize() const override { return m_SumFreeSize; }
6740     bool IsEmpty() const override { return (m_Suballocations.size() == 1) && (m_FreeCount == 1); }
6741     void Free(VmaAllocHandle allocHandle) override { FreeSuballocation(FindAtOffset((VkDeviceSize)allocHandle - 1)); }
6742     VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return (VkDeviceSize)allocHandle - 1; };
6743 
6744     void Init(VkDeviceSize size) override;
6745     bool Validate() const override;
6746 
6747     void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override;
6748     void AddStatistics(VmaStatistics& inoutStats) const override;
6749 
6750 #if VMA_STATS_STRING_ENABLED
6751     void PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const override;
6752 #endif
6753 
6754     bool CreateAllocationRequest(
6755         VkDeviceSize allocSize,
6756         VkDeviceSize allocAlignment,
6757         bool upperAddress,
6758         VmaSuballocationType allocType,
6759         uint32_t strategy,
6760         VmaAllocationRequest* pAllocationRequest) override;
6761 
6762     VkResult CheckCorruption(const void* pBlockData) override;
6763 
6764     void Alloc(
6765         const VmaAllocationRequest& request,
6766         VmaSuballocationType type,
6767         void* userData) override;
6768 
6769     void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override;
6770     void* GetAllocationUserData(VmaAllocHandle allocHandle) const override;
6771     VmaAllocHandle GetAllocationListBegin() const override;
6772     VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override;
6773     void Clear() override;
6774     void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override;
6775     void DebugLogAllAllocations() const override;
6776 
6777 private:
6778     uint32_t m_FreeCount;
6779     VkDeviceSize m_SumFreeSize;
6780     VmaSuballocationList m_Suballocations;
6781     // Suballocations that are free. Sorted by size, ascending.
6782     VmaVector<VmaSuballocationList::iterator, VmaStlAllocator<VmaSuballocationList::iterator>> m_FreeSuballocationsBySize;
6783 
6784     VkDeviceSize AlignAllocationSize(VkDeviceSize size) const { return IsVirtual() ? size : VmaAlignUp(size, (VkDeviceSize)16); }
6785 
6786     VmaSuballocationList::iterator FindAtOffset(VkDeviceSize offset) const;
6787     bool ValidateFreeSuballocationList() const;
6788 
6789     // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
6790     // If yes, fills pOffset and returns true. If no, returns false.
6791     bool CheckAllocation(
6792         VkDeviceSize allocSize,
6793         VkDeviceSize allocAlignment,
6794         VmaSuballocationType allocType,
6795         VmaSuballocationList::const_iterator suballocItem,
6796         VmaAllocHandle* pAllocHandle) const;
6797 
6798     // Given free suballocation, it merges it with following one, which must also be free.
6799     void MergeFreeWithNext(VmaSuballocationList::iterator item);
6800     // Releases given suballocation, making it free.
6801     // Merges it with adjacent free suballocations if applicable.
6802     // Returns iterator to new free suballocation at this place.
6803     VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
6804     // Given free suballocation, it inserts it into sorted list of
6805     // m_FreeSuballocationsBySize if it is suitable.
6806     void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
6807     // Given free suballocation, it removes it from sorted list of
6808     // m_FreeSuballocationsBySize if it is suitable.
6809     void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
6810 };
6811 
6812 #ifndef _VMA_BLOCK_METADATA_GENERIC_FUNCTIONS
6813 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(const VkAllocationCallbacks* pAllocationCallbacks,
6814     VkDeviceSize bufferImageGranularity, bool isVirtual)
6815     : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual),
6816     m_FreeCount(0),
6817     m_SumFreeSize(0),
6818     m_Suballocations(VmaStlAllocator<VmaSuballocation>(pAllocationCallbacks)),
6819     m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(pAllocationCallbacks)) {}
6820 
6821 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
6822 {
6823     VmaBlockMetadata::Init(size);
6824 
6825     m_FreeCount = 1;
6826     m_SumFreeSize = size;
6827 
6828     VmaSuballocation suballoc = {};
6829     suballoc.offset = 0;
6830     suballoc.size = size;
6831     suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
6832 
6833     m_Suballocations.push_back(suballoc);
6834     m_FreeSuballocationsBySize.push_back(m_Suballocations.begin());
6835 }
6836 
6837 bool VmaBlockMetadata_Generic::Validate() const
6838 {
6839     VMA_VALIDATE(!m_Suballocations.empty());
6840 
6841     // Expected offset of new suballocation as calculated from previous ones.
6842     VkDeviceSize calculatedOffset = 0;
6843     // Expected number of free suballocations as calculated from traversing their list.
6844     uint32_t calculatedFreeCount = 0;
6845     // Expected sum size of free suballocations as calculated from traversing their list.
6846     VkDeviceSize calculatedSumFreeSize = 0;
6847     // Expected number of free suballocations that should be registered in
6848     // m_FreeSuballocationsBySize calculated from traversing their list.
6849     size_t freeSuballocationsToRegister = 0;
6850     // True if previous visited suballocation was free.
6851     bool prevFree = false;
6852 
6853     const VkDeviceSize debugMargin = GetDebugMargin();
6854 
6855     for (const auto& subAlloc : m_Suballocations)
6856     {
6857         // Actual offset of this suballocation doesn't match expected one.
6858         VMA_VALIDATE(subAlloc.offset == calculatedOffset);
6859 
6860         const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
6861         // Two adjacent free suballocations are invalid. They should be merged.
6862         VMA_VALIDATE(!prevFree || !currFree);
6863 
6864         VmaAllocation alloc = (VmaAllocation)subAlloc.userData;
6865         if (!IsVirtual())
6866         {
6867             VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE));
6868         }
6869 
6870         if (currFree)
6871         {
6872             calculatedSumFreeSize += subAlloc.size;
6873             ++calculatedFreeCount;
6874             ++freeSuballocationsToRegister;
6875 
6876             // Margin required between allocations - every free space must be at least that large.
6877             VMA_VALIDATE(subAlloc.size >= debugMargin);
6878         }
6879         else
6880         {
6881             if (!IsVirtual())
6882             {
6883                 VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == subAlloc.offset + 1);
6884                 VMA_VALIDATE(alloc->GetSize() == subAlloc.size);
6885             }
6886 
6887             // Margin required between allocations - previous allocation must be free.
6888             VMA_VALIDATE(debugMargin == 0 || prevFree);
6889         }
6890 
6891         calculatedOffset += subAlloc.size;
6892         prevFree = currFree;
6893     }
6894 
6895     // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
6896     // match expected one.
6897     VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
6898 
6899     VkDeviceSize lastSize = 0;
6900     for (size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
6901     {
6902         VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
6903 
6904         // Only free suballocations can be registered in m_FreeSuballocationsBySize.
6905         VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
6906         // They must be sorted by size ascending.
6907         VMA_VALIDATE(suballocItem->size >= lastSize);
6908 
6909         lastSize = suballocItem->size;
6910     }
6911 
6912     // Check if totals match calculated values.
6913     VMA_VALIDATE(ValidateFreeSuballocationList());
6914     VMA_VALIDATE(calculatedOffset == GetSize());
6915     VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
6916     VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
6917 
6918     return true;
6919 }
6920 
6921 void VmaBlockMetadata_Generic::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const
6922 {
6923     const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
6924     inoutStats.statistics.blockCount++;
6925     inoutStats.statistics.blockBytes += GetSize();
6926 
6927     for (const auto& suballoc : m_Suballocations)
6928     {
6929         if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
6930             VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size);
6931         else
6932             VmaAddDetailedStatisticsUnusedRange(inoutStats, suballoc.size);
6933     }
6934 }
6935 
6936 void VmaBlockMetadata_Generic::AddStatistics(VmaStatistics& inoutStats) const
6937 {
6938     inoutStats.blockCount++;
6939     inoutStats.allocationCount += (uint32_t)m_Suballocations.size() - m_FreeCount;
6940     inoutStats.blockBytes += GetSize();
6941     inoutStats.allocationBytes += GetSize() - m_SumFreeSize;
6942 }
6943 
6944 #if VMA_STATS_STRING_ENABLED
6945 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const
6946 {
6947     PrintDetailedMap_Begin(json,
6948         m_SumFreeSize, // unusedBytes
6949         m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
6950         m_FreeCount, // unusedRangeCount
6951         mapRefCount);
6952 
6953     for (const auto& suballoc : m_Suballocations)
6954     {
6955         if (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
6956         {
6957             PrintDetailedMap_UnusedRange(json, suballoc.offset, suballoc.size);
6958         }
6959         else
6960         {
6961             PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData);
6962         }
6963     }
6964 
6965     PrintDetailedMap_End(json);
6966 }
6967 #endif // VMA_STATS_STRING_ENABLED
6968 
6969 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
6970     VkDeviceSize allocSize,
6971     VkDeviceSize allocAlignment,
6972     bool upperAddress,
6973     VmaSuballocationType allocType,
6974     uint32_t strategy,
6975     VmaAllocationRequest* pAllocationRequest)
6976 {
6977     VMA_ASSERT(allocSize > 0);
6978     VMA_ASSERT(!upperAddress);
6979     VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
6980     VMA_ASSERT(pAllocationRequest != VMA_NULL);
6981     VMA_HEAVY_ASSERT(Validate());
6982 
6983     allocSize = AlignAllocationSize(allocSize);
6984 
6985     pAllocationRequest->type = VmaAllocationRequestType::Normal;
6986     pAllocationRequest->size = allocSize;
6987 
6988     const VkDeviceSize debugMargin = GetDebugMargin();
6989 
6990     // There is not enough total free space in this block to fulfill the request: Early return.
6991     if (m_SumFreeSize < allocSize + debugMargin)
6992     {
6993         return false;
6994     }
6995 
6996     // New algorithm, efficiently searching freeSuballocationsBySize.
6997     const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
6998     if (freeSuballocCount > 0)
6999     {
7000         if (strategy == 0 ||
7001             strategy == VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT)
7002         {
7003             // Find first free suballocation with size not less than allocSize + debugMargin.
7004             VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
7005                 m_FreeSuballocationsBySize.data(),
7006                 m_FreeSuballocationsBySize.data() + freeSuballocCount,
7007                 allocSize + debugMargin,
7008                 VmaSuballocationItemSizeLess());
7009             size_t index = it - m_FreeSuballocationsBySize.data();
7010             for (; index < freeSuballocCount; ++index)
7011             {
7012                 if (CheckAllocation(
7013                     allocSize,
7014                     allocAlignment,
7015                     allocType,
7016                     m_FreeSuballocationsBySize[index],
7017                     &pAllocationRequest->allocHandle))
7018                 {
7019                     pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7020                     return true;
7021                 }
7022             }
7023         }
7024         else if (strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
7025         {
7026             for (VmaSuballocationList::iterator it = m_Suballocations.begin();
7027                 it != m_Suballocations.end();
7028                 ++it)
7029             {
7030                 if (it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
7031                     allocSize,
7032                     allocAlignment,
7033                     allocType,
7034                     it,
7035                     &pAllocationRequest->allocHandle))
7036                 {
7037                     pAllocationRequest->item = it;
7038                     return true;
7039                 }
7040             }
7041         }
7042         else
7043         {
7044             VMA_ASSERT(strategy & (VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT | VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT ));
7045             // Search staring from biggest suballocations.
7046             for (size_t index = freeSuballocCount; index--; )
7047             {
7048                 if (CheckAllocation(
7049                     allocSize,
7050                     allocAlignment,
7051                     allocType,
7052                     m_FreeSuballocationsBySize[index],
7053                     &pAllocationRequest->allocHandle))
7054                 {
7055                     pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7056                     return true;
7057                 }
7058             }
7059         }
7060     }
7061 
7062     return false;
7063 }
7064 
7065 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
7066 {
7067     for (auto& suballoc : m_Suballocations)
7068     {
7069         if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
7070         {
7071             if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
7072             {
7073                 VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
7074                 return VK_ERROR_UNKNOWN_COPY;
7075             }
7076         }
7077     }
7078 
7079     return VK_SUCCESS;
7080 }
7081 
7082 void VmaBlockMetadata_Generic::Alloc(
7083     const VmaAllocationRequest& request,
7084     VmaSuballocationType type,
7085     void* userData)
7086 {
7087     VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
7088     VMA_ASSERT(request.item != m_Suballocations.end());
7089     VmaSuballocation& suballoc = *request.item;
7090     // Given suballocation is a free block.
7091     VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7092 
7093     // Given offset is inside this suballocation.
7094     VMA_ASSERT((VkDeviceSize)request.allocHandle - 1 >= suballoc.offset);
7095     const VkDeviceSize paddingBegin = (VkDeviceSize)request.allocHandle - suballoc.offset - 1;
7096     VMA_ASSERT(suballoc.size >= paddingBegin + request.size);
7097     const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - request.size;
7098 
7099     // Unregister this free suballocation from m_FreeSuballocationsBySize and update
7100     // it to become used.
7101     UnregisterFreeSuballocation(request.item);
7102 
7103     suballoc.offset = (VkDeviceSize)request.allocHandle - 1;
7104     suballoc.size = request.size;
7105     suballoc.type = type;
7106     suballoc.userData = userData;
7107 
7108     // If there are any free bytes remaining at the end, insert new free suballocation after current one.
7109     if (paddingEnd)
7110     {
7111         VmaSuballocation paddingSuballoc = {};
7112         paddingSuballoc.offset = suballoc.offset + suballoc.size;
7113         paddingSuballoc.size = paddingEnd;
7114         paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7115         VmaSuballocationList::iterator next = request.item;
7116         ++next;
7117         const VmaSuballocationList::iterator paddingEndItem =
7118             m_Suballocations.insert(next, paddingSuballoc);
7119         RegisterFreeSuballocation(paddingEndItem);
7120     }
7121 
7122     // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
7123     if (paddingBegin)
7124     {
7125         VmaSuballocation paddingSuballoc = {};
7126         paddingSuballoc.offset = suballoc.offset - paddingBegin;
7127         paddingSuballoc.size = paddingBegin;
7128         paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7129         const VmaSuballocationList::iterator paddingBeginItem =
7130             m_Suballocations.insert(request.item, paddingSuballoc);
7131         RegisterFreeSuballocation(paddingBeginItem);
7132     }
7133 
7134     // Update totals.
7135     m_FreeCount = m_FreeCount - 1;
7136     if (paddingBegin > 0)
7137     {
7138         ++m_FreeCount;
7139     }
7140     if (paddingEnd > 0)
7141     {
7142         ++m_FreeCount;
7143     }
7144     m_SumFreeSize -= request.size;
7145 }
7146 
7147 void VmaBlockMetadata_Generic::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo)
7148 {
7149     outInfo.offset = (VkDeviceSize)allocHandle - 1;
7150     const VmaSuballocation& suballoc = *FindAtOffset(outInfo.offset);
7151     outInfo.size = suballoc.size;
7152     outInfo.pUserData = suballoc.userData;
7153 }
7154 
7155 void* VmaBlockMetadata_Generic::GetAllocationUserData(VmaAllocHandle allocHandle) const
7156 {
7157     return FindAtOffset((VkDeviceSize)allocHandle - 1)->userData;
7158 }
7159 
7160 VmaAllocHandle VmaBlockMetadata_Generic::GetAllocationListBegin() const
7161 {
7162     if (IsEmpty())
7163         return VK_NULL_HANDLE;
7164 
7165     for (const auto& suballoc : m_Suballocations)
7166     {
7167         if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
7168             return (VmaAllocHandle)(suballoc.offset + 1);
7169     }
7170     VMA_ASSERT(false && "Should contain at least 1 allocation!");
7171     return VK_NULL_HANDLE;
7172 }
7173 
7174 VmaAllocHandle VmaBlockMetadata_Generic::GetNextAllocation(VmaAllocHandle prevAlloc) const
7175 {
7176     VmaSuballocationList::const_iterator prev = FindAtOffset((VkDeviceSize)prevAlloc - 1);
7177 
7178     for (VmaSuballocationList::const_iterator it = ++prev; it != m_Suballocations.end(); ++it)
7179     {
7180         if (it->type != VMA_SUBALLOCATION_TYPE_FREE)
7181             return (VmaAllocHandle)(it->offset + 1);
7182     }
7183     return VK_NULL_HANDLE;
7184 }
7185 
7186 void VmaBlockMetadata_Generic::Clear()
7187 {
7188     const VkDeviceSize size = GetSize();
7189 
7190     VMA_ASSERT(IsVirtual());
7191     m_FreeCount = 1;
7192     m_SumFreeSize = size;
7193     m_Suballocations.clear();
7194     m_FreeSuballocationsBySize.clear();
7195 
7196     VmaSuballocation suballoc = {};
7197     suballoc.offset = 0;
7198     suballoc.size = size;
7199     suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7200     m_Suballocations.push_back(suballoc);
7201 
7202     m_FreeSuballocationsBySize.push_back(m_Suballocations.begin());
7203 }
7204 
7205 void VmaBlockMetadata_Generic::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData)
7206 {
7207     VmaSuballocation& suballoc = *FindAtOffset((VkDeviceSize)allocHandle - 1);
7208     suballoc.userData = userData;
7209 }
7210 
7211 void VmaBlockMetadata_Generic::DebugLogAllAllocations() const
7212 {
7213     for (const auto& suballoc : m_Suballocations)
7214     {
7215         if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
7216             DebugLogAllocation(suballoc.offset, suballoc.size, suballoc.userData);
7217     }
7218 }
7219 
7220 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FindAtOffset(VkDeviceSize offset) const
7221 {
7222     VMA_HEAVY_ASSERT(!m_Suballocations.empty());
7223     const VkDeviceSize last = m_Suballocations.rbegin()->offset;
7224     if (last == offset)
7225         return m_Suballocations.rbegin().drop_const();
7226     const VkDeviceSize first = m_Suballocations.begin()->offset;
7227     if (first == offset)
7228         return m_Suballocations.begin().drop_const();
7229 
7230     const size_t suballocCount = m_Suballocations.size();
7231     const VkDeviceSize step = (last - first + m_Suballocations.begin()->size) / suballocCount;
7232     auto findSuballocation = [&](auto begin, auto end) -> VmaSuballocationList::iterator
7233     {
7234         for (auto suballocItem = begin;
7235             suballocItem != end;
7236             ++suballocItem)
7237         {
7238             if (suballocItem->offset == offset)
7239                 return suballocItem.drop_const();
7240         }
7241         VMA_ASSERT(false && "Not found!");
7242         return m_Suballocations.end().drop_const();
7243     };
7244     // If requested offset is closer to the end of range, search from the end
7245     if (offset - first > suballocCount * step / 2)
7246     {
7247         return findSuballocation(m_Suballocations.rbegin(), m_Suballocations.rend());
7248     }
7249     return findSuballocation(m_Suballocations.begin(), m_Suballocations.end());
7250 }
7251 
7252 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
7253 {
7254     VkDeviceSize lastSize = 0;
7255     for (size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
7256     {
7257         const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
7258 
7259         VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
7260         VMA_VALIDATE(it->size >= lastSize);
7261         lastSize = it->size;
7262     }
7263     return true;
7264 }
7265 
7266 bool VmaBlockMetadata_Generic::CheckAllocation(
7267     VkDeviceSize allocSize,
7268     VkDeviceSize allocAlignment,
7269     VmaSuballocationType allocType,
7270     VmaSuballocationList::const_iterator suballocItem,
7271     VmaAllocHandle* pAllocHandle) const
7272 {
7273     VMA_ASSERT(allocSize > 0);
7274     VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
7275     VMA_ASSERT(suballocItem != m_Suballocations.cend());
7276     VMA_ASSERT(pAllocHandle != VMA_NULL);
7277 
7278     const VkDeviceSize debugMargin = GetDebugMargin();
7279     const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity();
7280 
7281     const VmaSuballocation& suballoc = *suballocItem;
7282     VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7283 
7284     // Size of this suballocation is too small for this request: Early return.
7285     if (suballoc.size < allocSize)
7286     {
7287         return false;
7288     }
7289 
7290     // Start from offset equal to beginning of this suballocation.
7291     VkDeviceSize offset = suballoc.offset + (suballocItem == m_Suballocations.cbegin() ? 0 : GetDebugMargin());
7292 
7293     // Apply debugMargin from the end of previous alloc.
7294     if (debugMargin > 0)
7295     {
7296         offset += debugMargin;
7297     }
7298 
7299     // Apply alignment.
7300     offset = VmaAlignUp(offset, allocAlignment);
7301 
7302     // Check previous suballocations for BufferImageGranularity conflicts.
7303     // Make bigger alignment if necessary.
7304     if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment)
7305     {
7306         bool bufferImageGranularityConflict = false;
7307         VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
7308         while (prevSuballocItem != m_Suballocations.cbegin())
7309         {
7310             --prevSuballocItem;
7311             const VmaSuballocation& prevSuballoc = *prevSuballocItem;
7312             if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, offset, bufferImageGranularity))
7313             {
7314                 if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
7315                 {
7316                     bufferImageGranularityConflict = true;
7317                     break;
7318                 }
7319             }
7320             else
7321                 // Already on previous page.
7322                 break;
7323         }
7324         if (bufferImageGranularityConflict)
7325         {
7326             offset = VmaAlignUp(offset, bufferImageGranularity);
7327         }
7328     }
7329 
7330     // Calculate padding at the beginning based on current offset.
7331     const VkDeviceSize paddingBegin = offset - suballoc.offset;
7332 
7333     // Fail if requested size plus margin after is bigger than size of this suballocation.
7334     if (paddingBegin + allocSize + debugMargin > suballoc.size)
7335     {
7336         return false;
7337     }
7338 
7339     // Check next suballocations for BufferImageGranularity conflicts.
7340     // If conflict exists, allocation cannot be made here.
7341     if (allocSize % bufferImageGranularity || offset % bufferImageGranularity)
7342     {
7343         VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
7344         ++nextSuballocItem;
7345         while (nextSuballocItem != m_Suballocations.cend())
7346         {
7347             const VmaSuballocation& nextSuballoc = *nextSuballocItem;
7348             if (VmaBlocksOnSamePage(offset, allocSize, nextSuballoc.offset, bufferImageGranularity))
7349             {
7350                 if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
7351                 {
7352                     return false;
7353                 }
7354             }
7355             else
7356             {
7357                 // Already on next page.
7358                 break;
7359             }
7360             ++nextSuballocItem;
7361         }
7362     }
7363 
7364     *pAllocHandle = (VmaAllocHandle)(offset + 1);
7365     // All tests passed: Success. pAllocHandle is already filled.
7366     return true;
7367 }
7368 
7369 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
7370 {
7371     VMA_ASSERT(item != m_Suballocations.end());
7372     VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7373 
7374     VmaSuballocationList::iterator nextItem = item;
7375     ++nextItem;
7376     VMA_ASSERT(nextItem != m_Suballocations.end());
7377     VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
7378 
7379     item->size += nextItem->size;
7380     --m_FreeCount;
7381     m_Suballocations.erase(nextItem);
7382 }
7383 
7384 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
7385 {
7386     // Change this suballocation to be marked as free.
7387     VmaSuballocation& suballoc = *suballocItem;
7388     suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7389     suballoc.userData = VMA_NULL;
7390 
7391     // Update totals.
7392     ++m_FreeCount;
7393     m_SumFreeSize += suballoc.size;
7394 
7395     // Merge with previous and/or next suballocation if it's also free.
7396     bool mergeWithNext = false;
7397     bool mergeWithPrev = false;
7398 
7399     VmaSuballocationList::iterator nextItem = suballocItem;
7400     ++nextItem;
7401     if ((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
7402     {
7403         mergeWithNext = true;
7404     }
7405 
7406     VmaSuballocationList::iterator prevItem = suballocItem;
7407     if (suballocItem != m_Suballocations.begin())
7408     {
7409         --prevItem;
7410         if (prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7411         {
7412             mergeWithPrev = true;
7413         }
7414     }
7415 
7416     if (mergeWithNext)
7417     {
7418         UnregisterFreeSuballocation(nextItem);
7419         MergeFreeWithNext(suballocItem);
7420     }
7421 
7422     if (mergeWithPrev)
7423     {
7424         UnregisterFreeSuballocation(prevItem);
7425         MergeFreeWithNext(prevItem);
7426         RegisterFreeSuballocation(prevItem);
7427         return prevItem;
7428     }
7429     else
7430     {
7431         RegisterFreeSuballocation(suballocItem);
7432         return suballocItem;
7433     }
7434 }
7435 
7436 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
7437 {
7438     VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7439     VMA_ASSERT(item->size > 0);
7440 
7441     // You may want to enable this validation at the beginning or at the end of
7442     // this function, depending on what do you want to check.
7443     VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7444 
7445     if (m_FreeSuballocationsBySize.empty())
7446     {
7447         m_FreeSuballocationsBySize.push_back(item);
7448     }
7449     else
7450     {
7451         VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
7452     }
7453 
7454     //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7455 }
7456 
7457 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
7458 {
7459     VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7460     VMA_ASSERT(item->size > 0);
7461 
7462     // You may want to enable this validation at the beginning or at the end of
7463     // this function, depending on what do you want to check.
7464     VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7465 
7466     VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
7467         m_FreeSuballocationsBySize.data(),
7468         m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
7469         item,
7470         VmaSuballocationItemSizeLess());
7471     for (size_t index = it - m_FreeSuballocationsBySize.data();
7472         index < m_FreeSuballocationsBySize.size();
7473         ++index)
7474     {
7475         if (m_FreeSuballocationsBySize[index] == item)
7476         {
7477             VmaVectorRemove(m_FreeSuballocationsBySize, index);
7478             return;
7479         }
7480         VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
7481     }
7482     VMA_ASSERT(0 && "Not found.");
7483 
7484     //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7485 }
7486 #endif // _VMA_BLOCK_METADATA_GENERIC_FUNCTIONS
7487 #endif // _VMA_BLOCK_METADATA_GENERIC
7488 #endif // #if 0
7489 
7490 #ifndef _VMA_BLOCK_METADATA_LINEAR
7491 /*
7492 Allocations and their references in internal data structure look like this:
7493 
7494 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
7495 
7496         0 +-------+
7497           |       |
7498           |       |
7499           |       |
7500           +-------+
7501           | Alloc |  1st[m_1stNullItemsBeginCount]
7502           +-------+
7503           | Alloc |  1st[m_1stNullItemsBeginCount + 1]
7504           +-------+
7505           |  ...  |
7506           +-------+
7507           | Alloc |  1st[1st.size() - 1]
7508           +-------+
7509           |       |
7510           |       |
7511           |       |
7512 GetSize() +-------+
7513 
7514 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
7515 
7516         0 +-------+
7517           | Alloc |  2nd[0]
7518           +-------+
7519           | Alloc |  2nd[1]
7520           +-------+
7521           |  ...  |
7522           +-------+
7523           | Alloc |  2nd[2nd.size() - 1]
7524           +-------+
7525           |       |
7526           |       |
7527           |       |
7528           +-------+
7529           | Alloc |  1st[m_1stNullItemsBeginCount]
7530           +-------+
7531           | Alloc |  1st[m_1stNullItemsBeginCount + 1]
7532           +-------+
7533           |  ...  |
7534           +-------+
7535           | Alloc |  1st[1st.size() - 1]
7536           +-------+
7537           |       |
7538 GetSize() +-------+
7539 
7540 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
7541 
7542         0 +-------+
7543           |       |
7544           |       |
7545           |       |
7546           +-------+
7547           | Alloc |  1st[m_1stNullItemsBeginCount]
7548           +-------+
7549           | Alloc |  1st[m_1stNullItemsBeginCount + 1]
7550           +-------+
7551           |  ...  |
7552           +-------+
7553           | Alloc |  1st[1st.size() - 1]
7554           +-------+
7555           |       |
7556           |       |
7557           |       |
7558           +-------+
7559           | Alloc |  2nd[2nd.size() - 1]
7560           +-------+
7561           |  ...  |
7562           +-------+
7563           | Alloc |  2nd[1]
7564           +-------+
7565           | Alloc |  2nd[0]
7566 GetSize() +-------+
7567 
7568 */
7569 class VmaBlockMetadata_Linear : public VmaBlockMetadata
7570 {
7571     VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
7572 public:
7573     VmaBlockMetadata_Linear(const VkAllocationCallbacks* pAllocationCallbacks,
7574         VkDeviceSize bufferImageGranularity, bool isVirtual);
7575     virtual ~VmaBlockMetadata_Linear() = default;
7576 
GetSumFreeSize()7577     VkDeviceSize GetSumFreeSize() const override { return m_SumFreeSize; }
IsEmpty()7578     bool IsEmpty() const override { return GetAllocationCount() == 0; }
GetAllocationOffset(VmaAllocHandle allocHandle)7579     VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return (VkDeviceSize)allocHandle - 1; };
7580 
7581     void Init(VkDeviceSize size) override;
7582     bool Validate() const override;
7583     size_t GetAllocationCount() const override;
7584     size_t GetFreeRegionsCount() const override;
7585 
7586     void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override;
7587     void AddStatistics(VmaStatistics& inoutStats) const override;
7588 
7589 #if VMA_STATS_STRING_ENABLED
7590     void PrintDetailedMap(class VmaJsonWriter& json) const override;
7591 #endif
7592 
7593     bool CreateAllocationRequest(
7594         VkDeviceSize allocSize,
7595         VkDeviceSize allocAlignment,
7596         bool upperAddress,
7597         VmaSuballocationType allocType,
7598         uint32_t strategy,
7599         VmaAllocationRequest* pAllocationRequest) override;
7600 
7601     VkResult CheckCorruption(const void* pBlockData) override;
7602 
7603     void Alloc(
7604         const VmaAllocationRequest& request,
7605         VmaSuballocationType type,
7606         void* userData) override;
7607 
7608     void Free(VmaAllocHandle allocHandle) override;
7609     void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override;
7610     void* GetAllocationUserData(VmaAllocHandle allocHandle) const override;
7611     VmaAllocHandle GetAllocationListBegin() const override;
7612     VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override;
7613     VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const override;
7614     void Clear() override;
7615     void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override;
7616     void DebugLogAllAllocations() const override;
7617 
7618 private:
7619     /*
7620     There are two suballocation vectors, used in ping-pong way.
7621     The one with index m_1stVectorIndex is called 1st.
7622     The one with index (m_1stVectorIndex ^ 1) is called 2nd.
7623     2nd can be non-empty only when 1st is not empty.
7624     When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
7625     */
7626     typedef VmaVector<VmaSuballocation, VmaStlAllocator<VmaSuballocation>> SuballocationVectorType;
7627 
7628     enum SECOND_VECTOR_MODE
7629     {
7630         SECOND_VECTOR_EMPTY,
7631         /*
7632         Suballocations in 2nd vector are created later than the ones in 1st, but they
7633         all have smaller offset.
7634         */
7635         SECOND_VECTOR_RING_BUFFER,
7636         /*
7637         Suballocations in 2nd vector are upper side of double stack.
7638         They all have offsets higher than those in 1st vector.
7639         Top of this stack means smaller offsets, but higher indices in this vector.
7640         */
7641         SECOND_VECTOR_DOUBLE_STACK,
7642     };
7643 
7644     VkDeviceSize m_SumFreeSize;
7645     SuballocationVectorType m_Suballocations0, m_Suballocations1;
7646     uint32_t m_1stVectorIndex;
7647     SECOND_VECTOR_MODE m_2ndVectorMode;
7648     // Number of items in 1st vector with hAllocation = null at the beginning.
7649     size_t m_1stNullItemsBeginCount;
7650     // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
7651     size_t m_1stNullItemsMiddleCount;
7652     // Number of items in 2nd vector with hAllocation = null.
7653     size_t m_2ndNullItemsCount;
7654 
AccessSuballocations1st()7655     SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
AccessSuballocations2nd()7656     SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
AccessSuballocations1st()7657     const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
AccessSuballocations2nd()7658     const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
7659 
7660     VmaSuballocation& FindSuballocation(VkDeviceSize offset) const;
7661     bool ShouldCompact1st() const;
7662     void CleanupAfterFree();
7663 
7664     bool CreateAllocationRequest_LowerAddress(
7665         VkDeviceSize allocSize,
7666         VkDeviceSize allocAlignment,
7667         VmaSuballocationType allocType,
7668         uint32_t strategy,
7669         VmaAllocationRequest* pAllocationRequest);
7670     bool CreateAllocationRequest_UpperAddress(
7671         VkDeviceSize allocSize,
7672         VkDeviceSize allocAlignment,
7673         VmaSuballocationType allocType,
7674         uint32_t strategy,
7675         VmaAllocationRequest* pAllocationRequest);
7676 };
7677 
7678 #ifndef _VMA_BLOCK_METADATA_LINEAR_FUNCTIONS
VmaBlockMetadata_Linear(const VkAllocationCallbacks * pAllocationCallbacks,VkDeviceSize bufferImageGranularity,bool isVirtual)7679 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(const VkAllocationCallbacks* pAllocationCallbacks,
7680     VkDeviceSize bufferImageGranularity, bool isVirtual)
7681     : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual),
7682     m_SumFreeSize(0),
7683     m_Suballocations0(VmaStlAllocator<VmaSuballocation>(pAllocationCallbacks)),
7684     m_Suballocations1(VmaStlAllocator<VmaSuballocation>(pAllocationCallbacks)),
7685     m_1stVectorIndex(0),
7686     m_2ndVectorMode(SECOND_VECTOR_EMPTY),
7687     m_1stNullItemsBeginCount(0),
7688     m_1stNullItemsMiddleCount(0),
7689     m_2ndNullItemsCount(0) {}
7690 
Init(VkDeviceSize size)7691 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
7692 {
7693     VmaBlockMetadata::Init(size);
7694     m_SumFreeSize = size;
7695 }
7696 
Validate()7697 bool VmaBlockMetadata_Linear::Validate() const
7698 {
7699     const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7700     const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7701 
7702     VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
7703     VMA_VALIDATE(!suballocations1st.empty() ||
7704         suballocations2nd.empty() ||
7705         m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
7706 
7707     if (!suballocations1st.empty())
7708     {
7709         // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
7710         VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].type != VMA_SUBALLOCATION_TYPE_FREE);
7711         // Null item at the end should be just pop_back().
7712         VMA_VALIDATE(suballocations1st.back().type != VMA_SUBALLOCATION_TYPE_FREE);
7713     }
7714     if (!suballocations2nd.empty())
7715     {
7716         // Null item at the end should be just pop_back().
7717         VMA_VALIDATE(suballocations2nd.back().type != VMA_SUBALLOCATION_TYPE_FREE);
7718     }
7719 
7720     VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
7721     VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
7722 
7723     VkDeviceSize sumUsedSize = 0;
7724     const size_t suballoc1stCount = suballocations1st.size();
7725     const VkDeviceSize debugMargin = GetDebugMargin();
7726     VkDeviceSize offset = 0;
7727 
7728     if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
7729     {
7730         const size_t suballoc2ndCount = suballocations2nd.size();
7731         size_t nullItem2ndCount = 0;
7732         for (size_t i = 0; i < suballoc2ndCount; ++i)
7733         {
7734             const VmaSuballocation& suballoc = suballocations2nd[i];
7735             const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7736 
7737             VmaAllocation const alloc = (VmaAllocation)suballoc.userData;
7738             if (!IsVirtual())
7739             {
7740                 VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE));
7741             }
7742             VMA_VALIDATE(suballoc.offset >= offset);
7743 
7744             if (!currFree)
7745             {
7746                 if (!IsVirtual())
7747                 {
7748                     VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset + 1);
7749                     VMA_VALIDATE(alloc->GetSize() == suballoc.size);
7750                 }
7751                 sumUsedSize += suballoc.size;
7752             }
7753             else
7754             {
7755                 ++nullItem2ndCount;
7756             }
7757 
7758             offset = suballoc.offset + suballoc.size + debugMargin;
7759         }
7760 
7761         VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
7762     }
7763 
7764     for (size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
7765     {
7766         const VmaSuballocation& suballoc = suballocations1st[i];
7767         VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
7768             suballoc.userData == VMA_NULL);
7769     }
7770 
7771     size_t nullItem1stCount = m_1stNullItemsBeginCount;
7772 
7773     for (size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
7774     {
7775         const VmaSuballocation& suballoc = suballocations1st[i];
7776         const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7777 
7778         VmaAllocation const alloc = (VmaAllocation)suballoc.userData;
7779         if (!IsVirtual())
7780         {
7781             VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE));
7782         }
7783         VMA_VALIDATE(suballoc.offset >= offset);
7784         VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
7785 
7786         if (!currFree)
7787         {
7788             if (!IsVirtual())
7789             {
7790                 VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset + 1);
7791                 VMA_VALIDATE(alloc->GetSize() == suballoc.size);
7792             }
7793             sumUsedSize += suballoc.size;
7794         }
7795         else
7796         {
7797             ++nullItem1stCount;
7798         }
7799 
7800         offset = suballoc.offset + suballoc.size + debugMargin;
7801     }
7802     VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
7803 
7804     if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
7805     {
7806         const size_t suballoc2ndCount = suballocations2nd.size();
7807         size_t nullItem2ndCount = 0;
7808         for (size_t i = suballoc2ndCount; i--; )
7809         {
7810             const VmaSuballocation& suballoc = suballocations2nd[i];
7811             const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7812 
7813             VmaAllocation const alloc = (VmaAllocation)suballoc.userData;
7814             if (!IsVirtual())
7815             {
7816                 VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE));
7817             }
7818             VMA_VALIDATE(suballoc.offset >= offset);
7819 
7820             if (!currFree)
7821             {
7822                 if (!IsVirtual())
7823                 {
7824                     VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset + 1);
7825                     VMA_VALIDATE(alloc->GetSize() == suballoc.size);
7826                 }
7827                 sumUsedSize += suballoc.size;
7828             }
7829             else
7830             {
7831                 ++nullItem2ndCount;
7832             }
7833 
7834             offset = suballoc.offset + suballoc.size + debugMargin;
7835         }
7836 
7837         VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
7838     }
7839 
7840     VMA_VALIDATE(offset <= GetSize());
7841     VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
7842 
7843     return true;
7844 }
7845 
GetAllocationCount()7846 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
7847 {
7848     return AccessSuballocations1st().size() - m_1stNullItemsBeginCount - m_1stNullItemsMiddleCount +
7849         AccessSuballocations2nd().size() - m_2ndNullItemsCount;
7850 }
7851 
GetFreeRegionsCount()7852 size_t VmaBlockMetadata_Linear::GetFreeRegionsCount() const
7853 {
7854     // Function only used for defragmentation, which is disabled for this algorithm
7855     VMA_ASSERT(0);
7856     return SIZE_MAX;
7857 }
7858 
AddDetailedStatistics(VmaDetailedStatistics & inoutStats)7859 void VmaBlockMetadata_Linear::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const
7860 {
7861     const VkDeviceSize size = GetSize();
7862     const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7863     const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7864     const size_t suballoc1stCount = suballocations1st.size();
7865     const size_t suballoc2ndCount = suballocations2nd.size();
7866 
7867     inoutStats.statistics.blockCount++;
7868     inoutStats.statistics.blockBytes += size;
7869 
7870     VkDeviceSize lastOffset = 0;
7871 
7872     if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
7873     {
7874         const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
7875         size_t nextAlloc2ndIndex = 0;
7876         while (lastOffset < freeSpace2ndTo1stEnd)
7877         {
7878             // Find next non-null allocation or move nextAllocIndex to the end.
7879             while (nextAlloc2ndIndex < suballoc2ndCount &&
7880                 suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
7881             {
7882                 ++nextAlloc2ndIndex;
7883             }
7884 
7885             // Found non-null allocation.
7886             if (nextAlloc2ndIndex < suballoc2ndCount)
7887             {
7888                 const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
7889 
7890                 // 1. Process free space before this allocation.
7891                 if (lastOffset < suballoc.offset)
7892                 {
7893                     // There is free space from lastOffset to suballoc.offset.
7894                     const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
7895                     VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
7896                 }
7897 
7898                 // 2. Process this allocation.
7899                 // There is allocation with suballoc.offset, suballoc.size.
7900                 VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size);
7901 
7902                 // 3. Prepare for next iteration.
7903                 lastOffset = suballoc.offset + suballoc.size;
7904                 ++nextAlloc2ndIndex;
7905             }
7906             // We are at the end.
7907             else
7908             {
7909                 // There is free space from lastOffset to freeSpace2ndTo1stEnd.
7910                 if (lastOffset < freeSpace2ndTo1stEnd)
7911                 {
7912                     const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
7913                     VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
7914                 }
7915 
7916                 // End of loop.
7917                 lastOffset = freeSpace2ndTo1stEnd;
7918             }
7919         }
7920     }
7921 
7922     size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
7923     const VkDeviceSize freeSpace1stTo2ndEnd =
7924         m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
7925     while (lastOffset < freeSpace1stTo2ndEnd)
7926     {
7927         // Find next non-null allocation or move nextAllocIndex to the end.
7928         while (nextAlloc1stIndex < suballoc1stCount &&
7929             suballocations1st[nextAlloc1stIndex].userData == VMA_NULL)
7930         {
7931             ++nextAlloc1stIndex;
7932         }
7933 
7934         // Found non-null allocation.
7935         if (nextAlloc1stIndex < suballoc1stCount)
7936         {
7937             const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
7938 
7939             // 1. Process free space before this allocation.
7940             if (lastOffset < suballoc.offset)
7941             {
7942                 // There is free space from lastOffset to suballoc.offset.
7943                 const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
7944                 VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
7945             }
7946 
7947             // 2. Process this allocation.
7948             // There is allocation with suballoc.offset, suballoc.size.
7949             VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size);
7950 
7951             // 3. Prepare for next iteration.
7952             lastOffset = suballoc.offset + suballoc.size;
7953             ++nextAlloc1stIndex;
7954         }
7955         // We are at the end.
7956         else
7957         {
7958             // There is free space from lastOffset to freeSpace1stTo2ndEnd.
7959             if (lastOffset < freeSpace1stTo2ndEnd)
7960             {
7961                 const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
7962                 VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
7963             }
7964 
7965             // End of loop.
7966             lastOffset = freeSpace1stTo2ndEnd;
7967         }
7968     }
7969 
7970     if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
7971     {
7972         size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
7973         while (lastOffset < size)
7974         {
7975             // Find next non-null allocation or move nextAllocIndex to the end.
7976             while (nextAlloc2ndIndex != SIZE_MAX &&
7977                 suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
7978             {
7979                 --nextAlloc2ndIndex;
7980             }
7981 
7982             // Found non-null allocation.
7983             if (nextAlloc2ndIndex != SIZE_MAX)
7984             {
7985                 const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
7986 
7987                 // 1. Process free space before this allocation.
7988                 if (lastOffset < suballoc.offset)
7989                 {
7990                     // There is free space from lastOffset to suballoc.offset.
7991                     const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
7992                     VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
7993                 }
7994 
7995                 // 2. Process this allocation.
7996                 // There is allocation with suballoc.offset, suballoc.size.
7997                 VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size);
7998 
7999                 // 3. Prepare for next iteration.
8000                 lastOffset = suballoc.offset + suballoc.size;
8001                 --nextAlloc2ndIndex;
8002             }
8003             // We are at the end.
8004             else
8005             {
8006                 // There is free space from lastOffset to size.
8007                 if (lastOffset < size)
8008                 {
8009                     const VkDeviceSize unusedRangeSize = size - lastOffset;
8010                     VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
8011                 }
8012 
8013                 // End of loop.
8014                 lastOffset = size;
8015             }
8016         }
8017     }
8018 }
8019 
AddStatistics(VmaStatistics & inoutStats)8020 void VmaBlockMetadata_Linear::AddStatistics(VmaStatistics& inoutStats) const
8021 {
8022     const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8023     const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8024     const VkDeviceSize size = GetSize();
8025     const size_t suballoc1stCount = suballocations1st.size();
8026     const size_t suballoc2ndCount = suballocations2nd.size();
8027 
8028     inoutStats.blockCount++;
8029     inoutStats.blockBytes += size;
8030     inoutStats.allocationBytes += size - m_SumFreeSize;
8031 
8032     VkDeviceSize lastOffset = 0;
8033 
8034     if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8035     {
8036         const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8037         size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
8038         while (lastOffset < freeSpace2ndTo1stEnd)
8039         {
8040             // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8041             while (nextAlloc2ndIndex < suballoc2ndCount &&
8042                 suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
8043             {
8044                 ++nextAlloc2ndIndex;
8045             }
8046 
8047             // Found non-null allocation.
8048             if (nextAlloc2ndIndex < suballoc2ndCount)
8049             {
8050                 const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8051 
8052                 // 1. Process free space before this allocation.
8053                 if (lastOffset < suballoc.offset)
8054                 {
8055                     // There is free space from lastOffset to suballoc.offset.
8056                     const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8057                 }
8058 
8059                 // 2. Process this allocation.
8060                 // There is allocation with suballoc.offset, suballoc.size.
8061                 ++inoutStats.allocationCount;
8062 
8063                 // 3. Prepare for next iteration.
8064                 lastOffset = suballoc.offset + suballoc.size;
8065                 ++nextAlloc2ndIndex;
8066             }
8067             // We are at the end.
8068             else
8069             {
8070                 if (lastOffset < freeSpace2ndTo1stEnd)
8071                 {
8072                     // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8073                     const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8074                 }
8075 
8076                 // End of loop.
8077                 lastOffset = freeSpace2ndTo1stEnd;
8078             }
8079         }
8080     }
8081 
8082     size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8083     const VkDeviceSize freeSpace1stTo2ndEnd =
8084         m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8085     while (lastOffset < freeSpace1stTo2ndEnd)
8086     {
8087         // Find next non-null allocation or move nextAllocIndex to the end.
8088         while (nextAlloc1stIndex < suballoc1stCount &&
8089             suballocations1st[nextAlloc1stIndex].userData == VMA_NULL)
8090         {
8091             ++nextAlloc1stIndex;
8092         }
8093 
8094         // Found non-null allocation.
8095         if (nextAlloc1stIndex < suballoc1stCount)
8096         {
8097             const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8098 
8099             // 1. Process free space before this allocation.
8100             if (lastOffset < suballoc.offset)
8101             {
8102                 // There is free space from lastOffset to suballoc.offset.
8103                 const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8104             }
8105 
8106             // 2. Process this allocation.
8107             // There is allocation with suballoc.offset, suballoc.size.
8108             ++inoutStats.allocationCount;
8109 
8110             // 3. Prepare for next iteration.
8111             lastOffset = suballoc.offset + suballoc.size;
8112             ++nextAlloc1stIndex;
8113         }
8114         // We are at the end.
8115         else
8116         {
8117             if (lastOffset < freeSpace1stTo2ndEnd)
8118             {
8119                 // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8120                 const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8121             }
8122 
8123             // End of loop.
8124             lastOffset = freeSpace1stTo2ndEnd;
8125         }
8126     }
8127 
8128     if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8129     {
8130         size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8131         while (lastOffset < size)
8132         {
8133             // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8134             while (nextAlloc2ndIndex != SIZE_MAX &&
8135                 suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
8136             {
8137                 --nextAlloc2ndIndex;
8138             }
8139 
8140             // Found non-null allocation.
8141             if (nextAlloc2ndIndex != SIZE_MAX)
8142             {
8143                 const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8144 
8145                 // 1. Process free space before this allocation.
8146                 if (lastOffset < suballoc.offset)
8147                 {
8148                     // There is free space from lastOffset to suballoc.offset.
8149                     const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8150                 }
8151 
8152                 // 2. Process this allocation.
8153                 // There is allocation with suballoc.offset, suballoc.size.
8154                 ++inoutStats.allocationCount;
8155 
8156                 // 3. Prepare for next iteration.
8157                 lastOffset = suballoc.offset + suballoc.size;
8158                 --nextAlloc2ndIndex;
8159             }
8160             // We are at the end.
8161             else
8162             {
8163                 if (lastOffset < size)
8164                 {
8165                     // There is free space from lastOffset to size.
8166                     const VkDeviceSize unusedRangeSize = size - lastOffset;
8167                 }
8168 
8169                 // End of loop.
8170                 lastOffset = size;
8171             }
8172         }
8173     }
8174 }
8175 
8176 #if VMA_STATS_STRING_ENABLED
PrintDetailedMap(class VmaJsonWriter & json)8177 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
8178 {
8179     const VkDeviceSize size = GetSize();
8180     const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8181     const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8182     const size_t suballoc1stCount = suballocations1st.size();
8183     const size_t suballoc2ndCount = suballocations2nd.size();
8184 
8185     // FIRST PASS
8186 
8187     size_t unusedRangeCount = 0;
8188     VkDeviceSize usedBytes = 0;
8189 
8190     VkDeviceSize lastOffset = 0;
8191 
8192     size_t alloc2ndCount = 0;
8193     if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8194     {
8195         const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8196         size_t nextAlloc2ndIndex = 0;
8197         while (lastOffset < freeSpace2ndTo1stEnd)
8198         {
8199             // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8200             while (nextAlloc2ndIndex < suballoc2ndCount &&
8201                 suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
8202             {
8203                 ++nextAlloc2ndIndex;
8204             }
8205 
8206             // Found non-null allocation.
8207             if (nextAlloc2ndIndex < suballoc2ndCount)
8208             {
8209                 const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8210 
8211                 // 1. Process free space before this allocation.
8212                 if (lastOffset < suballoc.offset)
8213                 {
8214                     // There is free space from lastOffset to suballoc.offset.
8215                     ++unusedRangeCount;
8216                 }
8217 
8218                 // 2. Process this allocation.
8219                 // There is allocation with suballoc.offset, suballoc.size.
8220                 ++alloc2ndCount;
8221                 usedBytes += suballoc.size;
8222 
8223                 // 3. Prepare for next iteration.
8224                 lastOffset = suballoc.offset + suballoc.size;
8225                 ++nextAlloc2ndIndex;
8226             }
8227             // We are at the end.
8228             else
8229             {
8230                 if (lastOffset < freeSpace2ndTo1stEnd)
8231                 {
8232                     // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8233                     ++unusedRangeCount;
8234                 }
8235 
8236                 // End of loop.
8237                 lastOffset = freeSpace2ndTo1stEnd;
8238             }
8239         }
8240     }
8241 
8242     size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8243     size_t alloc1stCount = 0;
8244     const VkDeviceSize freeSpace1stTo2ndEnd =
8245         m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8246     while (lastOffset < freeSpace1stTo2ndEnd)
8247     {
8248         // Find next non-null allocation or move nextAllocIndex to the end.
8249         while (nextAlloc1stIndex < suballoc1stCount &&
8250             suballocations1st[nextAlloc1stIndex].userData == VMA_NULL)
8251         {
8252             ++nextAlloc1stIndex;
8253         }
8254 
8255         // Found non-null allocation.
8256         if (nextAlloc1stIndex < suballoc1stCount)
8257         {
8258             const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8259 
8260             // 1. Process free space before this allocation.
8261             if (lastOffset < suballoc.offset)
8262             {
8263                 // There is free space from lastOffset to suballoc.offset.
8264                 ++unusedRangeCount;
8265             }
8266 
8267             // 2. Process this allocation.
8268             // There is allocation with suballoc.offset, suballoc.size.
8269             ++alloc1stCount;
8270             usedBytes += suballoc.size;
8271 
8272             // 3. Prepare for next iteration.
8273             lastOffset = suballoc.offset + suballoc.size;
8274             ++nextAlloc1stIndex;
8275         }
8276         // We are at the end.
8277         else
8278         {
8279             if (lastOffset < size)
8280             {
8281                 // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8282                 ++unusedRangeCount;
8283             }
8284 
8285             // End of loop.
8286             lastOffset = freeSpace1stTo2ndEnd;
8287         }
8288     }
8289 
8290     if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8291     {
8292         size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8293         while (lastOffset < size)
8294         {
8295             // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8296             while (nextAlloc2ndIndex != SIZE_MAX &&
8297                 suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
8298             {
8299                 --nextAlloc2ndIndex;
8300             }
8301 
8302             // Found non-null allocation.
8303             if (nextAlloc2ndIndex != SIZE_MAX)
8304             {
8305                 const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8306 
8307                 // 1. Process free space before this allocation.
8308                 if (lastOffset < suballoc.offset)
8309                 {
8310                     // There is free space from lastOffset to suballoc.offset.
8311                     ++unusedRangeCount;
8312                 }
8313 
8314                 // 2. Process this allocation.
8315                 // There is allocation with suballoc.offset, suballoc.size.
8316                 ++alloc2ndCount;
8317                 usedBytes += suballoc.size;
8318 
8319                 // 3. Prepare for next iteration.
8320                 lastOffset = suballoc.offset + suballoc.size;
8321                 --nextAlloc2ndIndex;
8322             }
8323             // We are at the end.
8324             else
8325             {
8326                 if (lastOffset < size)
8327                 {
8328                     // There is free space from lastOffset to size.
8329                     ++unusedRangeCount;
8330                 }
8331 
8332                 // End of loop.
8333                 lastOffset = size;
8334             }
8335         }
8336     }
8337 
8338     const VkDeviceSize unusedBytes = size - usedBytes;
8339     PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
8340 
8341     // SECOND PASS
8342     lastOffset = 0;
8343 
8344     if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8345     {
8346         const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8347         size_t nextAlloc2ndIndex = 0;
8348         while (lastOffset < freeSpace2ndTo1stEnd)
8349         {
8350             // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8351             while (nextAlloc2ndIndex < suballoc2ndCount &&
8352                 suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
8353             {
8354                 ++nextAlloc2ndIndex;
8355             }
8356 
8357             // Found non-null allocation.
8358             if (nextAlloc2ndIndex < suballoc2ndCount)
8359             {
8360                 const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8361 
8362                 // 1. Process free space before this allocation.
8363                 if (lastOffset < suballoc.offset)
8364                 {
8365                     // There is free space from lastOffset to suballoc.offset.
8366                     const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8367                     PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8368                 }
8369 
8370                 // 2. Process this allocation.
8371                 // There is allocation with suballoc.offset, suballoc.size.
8372                 PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData);
8373 
8374                 // 3. Prepare for next iteration.
8375                 lastOffset = suballoc.offset + suballoc.size;
8376                 ++nextAlloc2ndIndex;
8377             }
8378             // We are at the end.
8379             else
8380             {
8381                 if (lastOffset < freeSpace2ndTo1stEnd)
8382                 {
8383                     // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8384                     const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8385                     PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8386                 }
8387 
8388                 // End of loop.
8389                 lastOffset = freeSpace2ndTo1stEnd;
8390             }
8391         }
8392     }
8393 
8394     nextAlloc1stIndex = m_1stNullItemsBeginCount;
8395     while (lastOffset < freeSpace1stTo2ndEnd)
8396     {
8397         // Find next non-null allocation or move nextAllocIndex to the end.
8398         while (nextAlloc1stIndex < suballoc1stCount &&
8399             suballocations1st[nextAlloc1stIndex].userData == VMA_NULL)
8400         {
8401             ++nextAlloc1stIndex;
8402         }
8403 
8404         // Found non-null allocation.
8405         if (nextAlloc1stIndex < suballoc1stCount)
8406         {
8407             const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8408 
8409             // 1. Process free space before this allocation.
8410             if (lastOffset < suballoc.offset)
8411             {
8412                 // There is free space from lastOffset to suballoc.offset.
8413                 const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8414                 PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8415             }
8416 
8417             // 2. Process this allocation.
8418             // There is allocation with suballoc.offset, suballoc.size.
8419             PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData);
8420 
8421             // 3. Prepare for next iteration.
8422             lastOffset = suballoc.offset + suballoc.size;
8423             ++nextAlloc1stIndex;
8424         }
8425         // We are at the end.
8426         else
8427         {
8428             if (lastOffset < freeSpace1stTo2ndEnd)
8429             {
8430                 // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8431                 const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8432                 PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8433             }
8434 
8435             // End of loop.
8436             lastOffset = freeSpace1stTo2ndEnd;
8437         }
8438     }
8439 
8440     if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8441     {
8442         size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8443         while (lastOffset < size)
8444         {
8445             // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8446             while (nextAlloc2ndIndex != SIZE_MAX &&
8447                 suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
8448             {
8449                 --nextAlloc2ndIndex;
8450             }
8451 
8452             // Found non-null allocation.
8453             if (nextAlloc2ndIndex != SIZE_MAX)
8454             {
8455                 const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8456 
8457                 // 1. Process free space before this allocation.
8458                 if (lastOffset < suballoc.offset)
8459                 {
8460                     // There is free space from lastOffset to suballoc.offset.
8461                     const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8462                     PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8463                 }
8464 
8465                 // 2. Process this allocation.
8466                 // There is allocation with suballoc.offset, suballoc.size.
8467                 PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData);
8468 
8469                 // 3. Prepare for next iteration.
8470                 lastOffset = suballoc.offset + suballoc.size;
8471                 --nextAlloc2ndIndex;
8472             }
8473             // We are at the end.
8474             else
8475             {
8476                 if (lastOffset < size)
8477                 {
8478                     // There is free space from lastOffset to size.
8479                     const VkDeviceSize unusedRangeSize = size - lastOffset;
8480                     PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8481                 }
8482 
8483                 // End of loop.
8484                 lastOffset = size;
8485             }
8486         }
8487     }
8488 
8489     PrintDetailedMap_End(json);
8490 }
8491 #endif // VMA_STATS_STRING_ENABLED
8492 
CreateAllocationRequest(VkDeviceSize allocSize,VkDeviceSize allocAlignment,bool upperAddress,VmaSuballocationType allocType,uint32_t strategy,VmaAllocationRequest * pAllocationRequest)8493 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
8494     VkDeviceSize allocSize,
8495     VkDeviceSize allocAlignment,
8496     bool upperAddress,
8497     VmaSuballocationType allocType,
8498     uint32_t strategy,
8499     VmaAllocationRequest* pAllocationRequest)
8500 {
8501     VMA_ASSERT(allocSize > 0);
8502     VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8503     VMA_ASSERT(pAllocationRequest != VMA_NULL);
8504     VMA_HEAVY_ASSERT(Validate());
8505     pAllocationRequest->size = allocSize;
8506     return upperAddress ?
8507         CreateAllocationRequest_UpperAddress(
8508             allocSize, allocAlignment, allocType, strategy, pAllocationRequest) :
8509         CreateAllocationRequest_LowerAddress(
8510             allocSize, allocAlignment, allocType, strategy, pAllocationRequest);
8511 }
8512 
CheckCorruption(const void * pBlockData)8513 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
8514 {
8515     VMA_ASSERT(!IsVirtual());
8516     SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8517     for (size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
8518     {
8519         const VmaSuballocation& suballoc = suballocations1st[i];
8520         if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
8521         {
8522             if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
8523             {
8524                 VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
8525                 return VK_ERROR_UNKNOWN_COPY;
8526             }
8527         }
8528     }
8529 
8530     SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8531     for (size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
8532     {
8533         const VmaSuballocation& suballoc = suballocations2nd[i];
8534         if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
8535         {
8536             if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
8537             {
8538                 VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
8539                 return VK_ERROR_UNKNOWN_COPY;
8540             }
8541         }
8542     }
8543 
8544     return VK_SUCCESS;
8545 }
8546 
Alloc(const VmaAllocationRequest & request,VmaSuballocationType type,void * userData)8547 void VmaBlockMetadata_Linear::Alloc(
8548     const VmaAllocationRequest& request,
8549     VmaSuballocationType type,
8550     void* userData)
8551 {
8552     const VkDeviceSize offset = (VkDeviceSize)request.allocHandle - 1;
8553     const VmaSuballocation newSuballoc = { offset, request.size, userData, type };
8554 
8555     switch (request.type)
8556     {
8557     case VmaAllocationRequestType::UpperAddress:
8558     {
8559         VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
8560             "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
8561         SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8562         suballocations2nd.push_back(newSuballoc);
8563         m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
8564     }
8565     break;
8566     case VmaAllocationRequestType::EndOf1st:
8567     {
8568         SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8569 
8570         VMA_ASSERT(suballocations1st.empty() ||
8571             offset >= suballocations1st.back().offset + suballocations1st.back().size);
8572         // Check if it fits before the end of the block.
8573         VMA_ASSERT(offset + request.size <= GetSize());
8574 
8575         suballocations1st.push_back(newSuballoc);
8576     }
8577     break;
8578     case VmaAllocationRequestType::EndOf2nd:
8579     {
8580         SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8581         // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
8582         VMA_ASSERT(!suballocations1st.empty() &&
8583             offset + request.size <= suballocations1st[m_1stNullItemsBeginCount].offset);
8584         SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8585 
8586         switch (m_2ndVectorMode)
8587         {
8588         case SECOND_VECTOR_EMPTY:
8589             // First allocation from second part ring buffer.
8590             VMA_ASSERT(suballocations2nd.empty());
8591             m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
8592             break;
8593         case SECOND_VECTOR_RING_BUFFER:
8594             // 2-part ring buffer is already started.
8595             VMA_ASSERT(!suballocations2nd.empty());
8596             break;
8597         case SECOND_VECTOR_DOUBLE_STACK:
8598             VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
8599             break;
8600         default:
8601             VMA_ASSERT(0);
8602         }
8603 
8604         suballocations2nd.push_back(newSuballoc);
8605     }
8606     break;
8607     default:
8608         VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
8609     }
8610 
8611     m_SumFreeSize -= newSuballoc.size;
8612 }
8613 
Free(VmaAllocHandle allocHandle)8614 void VmaBlockMetadata_Linear::Free(VmaAllocHandle allocHandle)
8615 {
8616     SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8617     SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8618     VkDeviceSize offset = (VkDeviceSize)allocHandle - 1;
8619 
8620     if (!suballocations1st.empty())
8621     {
8622         // First allocation: Mark it as next empty at the beginning.
8623         VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
8624         if (firstSuballoc.offset == offset)
8625         {
8626             firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8627             firstSuballoc.userData = VMA_NULL;
8628             m_SumFreeSize += firstSuballoc.size;
8629             ++m_1stNullItemsBeginCount;
8630             CleanupAfterFree();
8631             return;
8632         }
8633     }
8634 
8635     // Last allocation in 2-part ring buffer or top of upper stack (same logic).
8636     if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
8637         m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8638     {
8639         VmaSuballocation& lastSuballoc = suballocations2nd.back();
8640         if (lastSuballoc.offset == offset)
8641         {
8642             m_SumFreeSize += lastSuballoc.size;
8643             suballocations2nd.pop_back();
8644             CleanupAfterFree();
8645             return;
8646         }
8647     }
8648     // Last allocation in 1st vector.
8649     else if (m_2ndVectorMode == SECOND_VECTOR_EMPTY)
8650     {
8651         VmaSuballocation& lastSuballoc = suballocations1st.back();
8652         if (lastSuballoc.offset == offset)
8653         {
8654             m_SumFreeSize += lastSuballoc.size;
8655             suballocations1st.pop_back();
8656             CleanupAfterFree();
8657             return;
8658         }
8659     }
8660 
8661     VmaSuballocation refSuballoc;
8662     refSuballoc.offset = offset;
8663     // Rest of members stays uninitialized intentionally for better performance.
8664 
8665     // Item from the middle of 1st vector.
8666     {
8667         const SuballocationVectorType::iterator it = VmaBinaryFindSorted(
8668             suballocations1st.begin() + m_1stNullItemsBeginCount,
8669             suballocations1st.end(),
8670             refSuballoc,
8671             VmaSuballocationOffsetLess());
8672         if (it != suballocations1st.end())
8673         {
8674             it->type = VMA_SUBALLOCATION_TYPE_FREE;
8675             it->userData = VMA_NULL;
8676             ++m_1stNullItemsMiddleCount;
8677             m_SumFreeSize += it->size;
8678             CleanupAfterFree();
8679             return;
8680         }
8681     }
8682 
8683     if (m_2ndVectorMode != SECOND_VECTOR_EMPTY)
8684     {
8685         // Item from the middle of 2nd vector.
8686         const SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
8687             VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) :
8688             VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater());
8689         if (it != suballocations2nd.end())
8690         {
8691             it->type = VMA_SUBALLOCATION_TYPE_FREE;
8692             it->userData = VMA_NULL;
8693             ++m_2ndNullItemsCount;
8694             m_SumFreeSize += it->size;
8695             CleanupAfterFree();
8696             return;
8697         }
8698     }
8699 
8700     VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
8701 }
8702 
GetAllocationInfo(VmaAllocHandle allocHandle,VmaVirtualAllocationInfo & outInfo)8703 void VmaBlockMetadata_Linear::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo)
8704 {
8705     outInfo.offset = (VkDeviceSize)allocHandle - 1;
8706     VmaSuballocation& suballoc = FindSuballocation(outInfo.offset);
8707     outInfo.size = suballoc.size;
8708     outInfo.pUserData = suballoc.userData;
8709 }
8710 
GetAllocationUserData(VmaAllocHandle allocHandle)8711 void* VmaBlockMetadata_Linear::GetAllocationUserData(VmaAllocHandle allocHandle) const
8712 {
8713     return FindSuballocation((VkDeviceSize)allocHandle - 1).userData;
8714 }
8715 
GetAllocationListBegin()8716 VmaAllocHandle VmaBlockMetadata_Linear::GetAllocationListBegin() const
8717 {
8718     // Function only used for defragmentation, which is disabled for this algorithm
8719     VMA_ASSERT(0);
8720     return VK_NULL_HANDLE;
8721 }
8722 
GetNextAllocation(VmaAllocHandle prevAlloc)8723 VmaAllocHandle VmaBlockMetadata_Linear::GetNextAllocation(VmaAllocHandle prevAlloc) const
8724 {
8725     // Function only used for defragmentation, which is disabled for this algorithm
8726     VMA_ASSERT(0);
8727     return VK_NULL_HANDLE;
8728 }
8729 
GetNextFreeRegionSize(VmaAllocHandle alloc)8730 VkDeviceSize VmaBlockMetadata_Linear::GetNextFreeRegionSize(VmaAllocHandle alloc) const
8731 {
8732     // Function only used for defragmentation, which is disabled for this algorithm
8733     VMA_ASSERT(0);
8734     return 0;
8735 }
8736 
Clear()8737 void VmaBlockMetadata_Linear::Clear()
8738 {
8739     m_SumFreeSize = GetSize();
8740     m_Suballocations0.clear();
8741     m_Suballocations1.clear();
8742     // Leaving m_1stVectorIndex unchanged - it doesn't matter.
8743     m_2ndVectorMode = SECOND_VECTOR_EMPTY;
8744     m_1stNullItemsBeginCount = 0;
8745     m_1stNullItemsMiddleCount = 0;
8746     m_2ndNullItemsCount = 0;
8747 }
8748 
SetAllocationUserData(VmaAllocHandle allocHandle,void * userData)8749 void VmaBlockMetadata_Linear::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData)
8750 {
8751     VmaSuballocation& suballoc = FindSuballocation((VkDeviceSize)allocHandle - 1);
8752     suballoc.userData = userData;
8753 }
8754 
DebugLogAllAllocations()8755 void VmaBlockMetadata_Linear::DebugLogAllAllocations() const
8756 {
8757     const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8758     for (auto it = suballocations1st.begin() + m_1stNullItemsBeginCount; it != suballocations1st.end(); ++it)
8759         if (it->type != VMA_SUBALLOCATION_TYPE_FREE)
8760             DebugLogAllocation(it->offset, it->size, it->userData);
8761 
8762     const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8763     for (auto it = suballocations2nd.begin(); it != suballocations2nd.end(); ++it)
8764         if (it->type != VMA_SUBALLOCATION_TYPE_FREE)
8765             DebugLogAllocation(it->offset, it->size, it->userData);
8766 }
8767 
FindSuballocation(VkDeviceSize offset)8768 VmaSuballocation& VmaBlockMetadata_Linear::FindSuballocation(VkDeviceSize offset) const
8769 {
8770     const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8771     const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8772 
8773     VmaSuballocation refSuballoc;
8774     refSuballoc.offset = offset;
8775     // Rest of members stays uninitialized intentionally for better performance.
8776 
8777     // Item from the 1st vector.
8778     {
8779         SuballocationVectorType::const_iterator it = VmaBinaryFindSorted(
8780             suballocations1st.begin() + m_1stNullItemsBeginCount,
8781             suballocations1st.end(),
8782             refSuballoc,
8783             VmaSuballocationOffsetLess());
8784         if (it != suballocations1st.end())
8785         {
8786             return const_cast<VmaSuballocation&>(*it);
8787         }
8788     }
8789 
8790     if (m_2ndVectorMode != SECOND_VECTOR_EMPTY)
8791     {
8792         // Rest of members stays uninitialized intentionally for better performance.
8793         SuballocationVectorType::const_iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
8794             VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) :
8795             VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater());
8796         if (it != suballocations2nd.end())
8797         {
8798             return const_cast<VmaSuballocation&>(*it);
8799         }
8800     }
8801 
8802     VMA_ASSERT(0 && "Allocation not found in linear allocator!");
8803     return const_cast<VmaSuballocation&>(suballocations1st.back()); // Should never occur.
8804 }
8805 
ShouldCompact1st()8806 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
8807 {
8808     const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
8809     const size_t suballocCount = AccessSuballocations1st().size();
8810     return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
8811 }
8812 
CleanupAfterFree()8813 void VmaBlockMetadata_Linear::CleanupAfterFree()
8814 {
8815     SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8816     SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8817 
8818     if (IsEmpty())
8819     {
8820         suballocations1st.clear();
8821         suballocations2nd.clear();
8822         m_1stNullItemsBeginCount = 0;
8823         m_1stNullItemsMiddleCount = 0;
8824         m_2ndNullItemsCount = 0;
8825         m_2ndVectorMode = SECOND_VECTOR_EMPTY;
8826     }
8827     else
8828     {
8829         const size_t suballoc1stCount = suballocations1st.size();
8830         const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
8831         VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
8832 
8833         // Find more null items at the beginning of 1st vector.
8834         while (m_1stNullItemsBeginCount < suballoc1stCount &&
8835             suballocations1st[m_1stNullItemsBeginCount].type == VMA_SUBALLOCATION_TYPE_FREE)
8836         {
8837             ++m_1stNullItemsBeginCount;
8838             --m_1stNullItemsMiddleCount;
8839         }
8840 
8841         // Find more null items at the end of 1st vector.
8842         while (m_1stNullItemsMiddleCount > 0 &&
8843             suballocations1st.back().type == VMA_SUBALLOCATION_TYPE_FREE)
8844         {
8845             --m_1stNullItemsMiddleCount;
8846             suballocations1st.pop_back();
8847         }
8848 
8849         // Find more null items at the end of 2nd vector.
8850         while (m_2ndNullItemsCount > 0 &&
8851             suballocations2nd.back().type == VMA_SUBALLOCATION_TYPE_FREE)
8852         {
8853             --m_2ndNullItemsCount;
8854             suballocations2nd.pop_back();
8855         }
8856 
8857         // Find more null items at the beginning of 2nd vector.
8858         while (m_2ndNullItemsCount > 0 &&
8859             suballocations2nd[0].type == VMA_SUBALLOCATION_TYPE_FREE)
8860         {
8861             --m_2ndNullItemsCount;
8862             VmaVectorRemove(suballocations2nd, 0);
8863         }
8864 
8865         if (ShouldCompact1st())
8866         {
8867             const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
8868             size_t srcIndex = m_1stNullItemsBeginCount;
8869             for (size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
8870             {
8871                 while (suballocations1st[srcIndex].type == VMA_SUBALLOCATION_TYPE_FREE)
8872                 {
8873                     ++srcIndex;
8874                 }
8875                 if (dstIndex != srcIndex)
8876                 {
8877                     suballocations1st[dstIndex] = suballocations1st[srcIndex];
8878                 }
8879                 ++srcIndex;
8880             }
8881             suballocations1st.resize(nonNullItemCount);
8882             m_1stNullItemsBeginCount = 0;
8883             m_1stNullItemsMiddleCount = 0;
8884         }
8885 
8886         // 2nd vector became empty.
8887         if (suballocations2nd.empty())
8888         {
8889             m_2ndVectorMode = SECOND_VECTOR_EMPTY;
8890         }
8891 
8892         // 1st vector became empty.
8893         if (suballocations1st.size() - m_1stNullItemsBeginCount == 0)
8894         {
8895             suballocations1st.clear();
8896             m_1stNullItemsBeginCount = 0;
8897 
8898             if (!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8899             {
8900                 // Swap 1st with 2nd. Now 2nd is empty.
8901                 m_2ndVectorMode = SECOND_VECTOR_EMPTY;
8902                 m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
8903                 while (m_1stNullItemsBeginCount < suballocations2nd.size() &&
8904                     suballocations2nd[m_1stNullItemsBeginCount].type == VMA_SUBALLOCATION_TYPE_FREE)
8905                 {
8906                     ++m_1stNullItemsBeginCount;
8907                     --m_1stNullItemsMiddleCount;
8908                 }
8909                 m_2ndNullItemsCount = 0;
8910                 m_1stVectorIndex ^= 1;
8911             }
8912         }
8913     }
8914 
8915     VMA_HEAVY_ASSERT(Validate());
8916 }
8917 
CreateAllocationRequest_LowerAddress(VkDeviceSize allocSize,VkDeviceSize allocAlignment,VmaSuballocationType allocType,uint32_t strategy,VmaAllocationRequest * pAllocationRequest)8918 bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
8919     VkDeviceSize allocSize,
8920     VkDeviceSize allocAlignment,
8921     VmaSuballocationType allocType,
8922     uint32_t strategy,
8923     VmaAllocationRequest* pAllocationRequest)
8924 {
8925     const VkDeviceSize blockSize = GetSize();
8926     const VkDeviceSize debugMargin = GetDebugMargin();
8927     const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity();
8928     SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8929     SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8930 
8931     if (m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8932     {
8933         // Try to allocate at the end of 1st vector.
8934 
8935         VkDeviceSize resultBaseOffset = 0;
8936         if (!suballocations1st.empty())
8937         {
8938             const VmaSuballocation& lastSuballoc = suballocations1st.back();
8939             resultBaseOffset = lastSuballoc.offset + lastSuballoc.size + debugMargin;
8940         }
8941 
8942         // Start from offset equal to beginning of free space.
8943         VkDeviceSize resultOffset = resultBaseOffset;
8944 
8945         // Apply alignment.
8946         resultOffset = VmaAlignUp(resultOffset, allocAlignment);
8947 
8948         // Check previous suballocations for BufferImageGranularity conflicts.
8949         // Make bigger alignment if necessary.
8950         if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations1st.empty())
8951         {
8952             bool bufferImageGranularityConflict = false;
8953             for (size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
8954             {
8955                 const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
8956                 if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8957                 {
8958                     if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8959                     {
8960                         bufferImageGranularityConflict = true;
8961                         break;
8962                     }
8963                 }
8964                 else
8965                     // Already on previous page.
8966                     break;
8967             }
8968             if (bufferImageGranularityConflict)
8969             {
8970                 resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
8971             }
8972         }
8973 
8974         const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
8975             suballocations2nd.back().offset : blockSize;
8976 
8977         // There is enough free space at the end after alignment.
8978         if (resultOffset + allocSize + debugMargin <= freeSpaceEnd)
8979         {
8980             // Check next suballocations for BufferImageGranularity conflicts.
8981             // If conflict exists, allocation cannot be made here.
8982             if ((allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity) && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8983             {
8984                 for (size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
8985                 {
8986                     const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
8987                     if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8988                     {
8989                         if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8990                         {
8991                             return false;
8992                         }
8993                     }
8994                     else
8995                     {
8996                         // Already on previous page.
8997                         break;
8998                     }
8999                 }
9000             }
9001 
9002             // All tests passed: Success.
9003             pAllocationRequest->allocHandle = (VmaAllocHandle)(resultOffset + 1);
9004             // pAllocationRequest->item, customData unused.
9005             pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
9006             return true;
9007         }
9008     }
9009 
9010     // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
9011     // beginning of 1st vector as the end of free space.
9012     if (m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9013     {
9014         VMA_ASSERT(!suballocations1st.empty());
9015 
9016         VkDeviceSize resultBaseOffset = 0;
9017         if (!suballocations2nd.empty())
9018         {
9019             const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9020             resultBaseOffset = lastSuballoc.offset + lastSuballoc.size + debugMargin;
9021         }
9022 
9023         // Start from offset equal to beginning of free space.
9024         VkDeviceSize resultOffset = resultBaseOffset;
9025 
9026         // Apply alignment.
9027         resultOffset = VmaAlignUp(resultOffset, allocAlignment);
9028 
9029         // Check previous suballocations for BufferImageGranularity conflicts.
9030         // Make bigger alignment if necessary.
9031         if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations2nd.empty())
9032         {
9033             bool bufferImageGranularityConflict = false;
9034             for (size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
9035             {
9036                 const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
9037                 if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9038                 {
9039                     if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9040                     {
9041                         bufferImageGranularityConflict = true;
9042                         break;
9043                     }
9044                 }
9045                 else
9046                     // Already on previous page.
9047                     break;
9048             }
9049             if (bufferImageGranularityConflict)
9050             {
9051                 resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
9052             }
9053         }
9054 
9055         size_t index1st = m_1stNullItemsBeginCount;
9056 
9057         // There is enough free space at the end after alignment.
9058         if ((index1st == suballocations1st.size() && resultOffset + allocSize + debugMargin <= blockSize) ||
9059             (index1st < suballocations1st.size() && resultOffset + allocSize + debugMargin <= suballocations1st[index1st].offset))
9060         {
9061             // Check next suballocations for BufferImageGranularity conflicts.
9062             // If conflict exists, allocation cannot be made here.
9063             if (allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity)
9064             {
9065                 for (size_t nextSuballocIndex = index1st;
9066                     nextSuballocIndex < suballocations1st.size();
9067                     nextSuballocIndex++)
9068                 {
9069                     const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
9070                     if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9071                     {
9072                         if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9073                         {
9074                             return false;
9075                         }
9076                     }
9077                     else
9078                     {
9079                         // Already on next page.
9080                         break;
9081                     }
9082                 }
9083             }
9084 
9085             // All tests passed: Success.
9086             pAllocationRequest->allocHandle = (VmaAllocHandle)(resultOffset + 1);
9087             pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
9088             // pAllocationRequest->item, customData unused.
9089             return true;
9090         }
9091     }
9092 
9093     return false;
9094 }
9095 
CreateAllocationRequest_UpperAddress(VkDeviceSize allocSize,VkDeviceSize allocAlignment,VmaSuballocationType allocType,uint32_t strategy,VmaAllocationRequest * pAllocationRequest)9096 bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
9097     VkDeviceSize allocSize,
9098     VkDeviceSize allocAlignment,
9099     VmaSuballocationType allocType,
9100     uint32_t strategy,
9101     VmaAllocationRequest* pAllocationRequest)
9102 {
9103     const VkDeviceSize blockSize = GetSize();
9104     const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity();
9105     SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9106     SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9107 
9108     if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9109     {
9110         VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
9111         return false;
9112     }
9113 
9114     // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
9115     if (allocSize > blockSize)
9116     {
9117         return false;
9118     }
9119     VkDeviceSize resultBaseOffset = blockSize - allocSize;
9120     if (!suballocations2nd.empty())
9121     {
9122         const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9123         resultBaseOffset = lastSuballoc.offset - allocSize;
9124         if (allocSize > lastSuballoc.offset)
9125         {
9126             return false;
9127         }
9128     }
9129 
9130     // Start from offset equal to end of free space.
9131     VkDeviceSize resultOffset = resultBaseOffset;
9132 
9133     const VkDeviceSize debugMargin = GetDebugMargin();
9134 
9135     // Apply debugMargin at the end.
9136     if (debugMargin > 0)
9137     {
9138         if (resultOffset < debugMargin)
9139         {
9140             return false;
9141         }
9142         resultOffset -= debugMargin;
9143     }
9144 
9145     // Apply alignment.
9146     resultOffset = VmaAlignDown(resultOffset, allocAlignment);
9147 
9148     // Check next suballocations from 2nd for BufferImageGranularity conflicts.
9149     // Make bigger alignment if necessary.
9150     if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations2nd.empty())
9151     {
9152         bool bufferImageGranularityConflict = false;
9153         for (size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9154         {
9155             const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9156             if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9157             {
9158                 if (VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
9159                 {
9160                     bufferImageGranularityConflict = true;
9161                     break;
9162                 }
9163             }
9164             else
9165                 // Already on previous page.
9166                 break;
9167         }
9168         if (bufferImageGranularityConflict)
9169         {
9170             resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
9171         }
9172     }
9173 
9174     // There is enough free space.
9175     const VkDeviceSize endOf1st = !suballocations1st.empty() ?
9176         suballocations1st.back().offset + suballocations1st.back().size :
9177         0;
9178     if (endOf1st + debugMargin <= resultOffset)
9179     {
9180         // Check previous suballocations for BufferImageGranularity conflicts.
9181         // If conflict exists, allocation cannot be made here.
9182         if (bufferImageGranularity > 1)
9183         {
9184             for (size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9185             {
9186                 const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9187                 if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9188                 {
9189                     if (VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
9190                     {
9191                         return false;
9192                     }
9193                 }
9194                 else
9195                 {
9196                     // Already on next page.
9197                     break;
9198                 }
9199             }
9200         }
9201 
9202         // All tests passed: Success.
9203         pAllocationRequest->allocHandle = (VmaAllocHandle)(resultOffset + 1);
9204         // pAllocationRequest->item unused.
9205         pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
9206         return true;
9207     }
9208 
9209     return false;
9210 }
9211 #endif // _VMA_BLOCK_METADATA_LINEAR_FUNCTIONS
9212 #endif // _VMA_BLOCK_METADATA_LINEAR
9213 
9214 #if 0
9215 #ifndef _VMA_BLOCK_METADATA_BUDDY
9216 /*
9217 - GetSize() is the original size of allocated memory block.
9218 - m_UsableSize is this size aligned down to a power of two.
9219   All allocations and calculations happen relative to m_UsableSize.
9220 - GetUnusableSize() is the difference between them.
9221   It is reported as separate, unused range, not available for allocations.
9222 
9223 Node at level 0 has size = m_UsableSize.
9224 Each next level contains nodes with size 2 times smaller than current level.
9225 m_LevelCount is the maximum number of levels to use in the current object.
9226 */
9227 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
9228 {
9229     VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
9230 public:
9231     VmaBlockMetadata_Buddy(const VkAllocationCallbacks* pAllocationCallbacks,
9232         VkDeviceSize bufferImageGranularity, bool isVirtual);
9233     virtual ~VmaBlockMetadata_Buddy();
9234 
9235     size_t GetAllocationCount() const override { return m_AllocationCount; }
9236     VkDeviceSize GetSumFreeSize() const override { return m_SumFreeSize + GetUnusableSize(); }
9237     bool IsEmpty() const override { return m_Root->type == Node::TYPE_FREE; }
9238     VkResult CheckCorruption(const void* pBlockData) override { return VK_ERROR_FEATURE_NOT_PRESENT; }
9239     VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return (VkDeviceSize)allocHandle - 1; };
9240     void DebugLogAllAllocations() const override { DebugLogAllAllocationNode(m_Root, 0); }
9241 
9242     void Init(VkDeviceSize size) override;
9243     bool Validate() const override;
9244 
9245     void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override;
9246     void AddStatistics(VmaStatistics& inoutStats) const override;
9247 
9248 #if VMA_STATS_STRING_ENABLED
9249     void PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const override;
9250 #endif
9251 
9252     bool CreateAllocationRequest(
9253         VkDeviceSize allocSize,
9254         VkDeviceSize allocAlignment,
9255         bool upperAddress,
9256         VmaSuballocationType allocType,
9257         uint32_t strategy,
9258         VmaAllocationRequest* pAllocationRequest) override;
9259 
9260     void Alloc(
9261         const VmaAllocationRequest& request,
9262         VmaSuballocationType type,
9263         void* userData) override;
9264 
9265     void Free(VmaAllocHandle allocHandle) override;
9266     void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override;
9267     void* GetAllocationUserData(VmaAllocHandle allocHandle) const override;
9268     VmaAllocHandle GetAllocationListBegin() const override;
9269     VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override;
9270     void Clear() override;
9271     void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override;
9272 
9273 private:
9274     static const size_t MAX_LEVELS = 48;
9275 
9276     struct ValidationContext
9277     {
9278         size_t calculatedAllocationCount = 0;
9279         size_t calculatedFreeCount = 0;
9280         VkDeviceSize calculatedSumFreeSize = 0;
9281     };
9282     struct Node
9283     {
9284         VkDeviceSize offset;
9285         enum TYPE
9286         {
9287             TYPE_FREE,
9288             TYPE_ALLOCATION,
9289             TYPE_SPLIT,
9290             TYPE_COUNT
9291         } type;
9292         Node* parent;
9293         Node* buddy;
9294 
9295         union
9296         {
9297             struct
9298             {
9299                 Node* prev;
9300                 Node* next;
9301             } free;
9302             struct
9303             {
9304                 void* userData;
9305             } allocation;
9306             struct
9307             {
9308                 Node* leftChild;
9309             } split;
9310         };
9311     };
9312 
9313     // Size of the memory block aligned down to a power of two.
9314     VkDeviceSize m_UsableSize;
9315     uint32_t m_LevelCount;
9316     VmaPoolAllocator<Node> m_NodeAllocator;
9317     Node* m_Root;
9318     struct
9319     {
9320         Node* front;
9321         Node* back;
9322     } m_FreeList[MAX_LEVELS];
9323 
9324     // Number of nodes in the tree with type == TYPE_ALLOCATION.
9325     size_t m_AllocationCount;
9326     // Number of nodes in the tree with type == TYPE_FREE.
9327     size_t m_FreeCount;
9328     // Doesn't include space wasted due to internal fragmentation - allocation sizes are just aligned up to node sizes.
9329     // Doesn't include unusable size.
9330     VkDeviceSize m_SumFreeSize;
9331 
9332     VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
9333     VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
9334 
9335     VkDeviceSize AlignAllocationSize(VkDeviceSize size) const
9336     {
9337         if (!IsVirtual())
9338         {
9339             size = VmaAlignUp(size, (VkDeviceSize)16);
9340         }
9341         return VmaNextPow2(size);
9342     }
9343     Node* FindAllocationNode(VkDeviceSize offset, uint32_t& outLevel) const;
9344     void DeleteNodeChildren(Node* node);
9345     bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
9346     uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
9347     void AddNodeToDetailedStatistics(VmaDetailedStatistics& inoutStats, const Node* node, VkDeviceSize levelNodeSize) const;
9348     // Adds node to the front of FreeList at given level.
9349     // node->type must be FREE.
9350     // node->free.prev, next can be undefined.
9351     void AddToFreeListFront(uint32_t level, Node* node);
9352     // Removes node from FreeList at given level.
9353     // node->type must be FREE.
9354     // node->free.prev, next stay untouched.
9355     void RemoveFromFreeList(uint32_t level, Node* node);
9356     void DebugLogAllAllocationNode(Node* node, uint32_t level) const;
9357 
9358 #if VMA_STATS_STRING_ENABLED
9359     void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
9360 #endif
9361 };
9362 
9363 #ifndef _VMA_BLOCK_METADATA_BUDDY_FUNCTIONS
9364 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(const VkAllocationCallbacks* pAllocationCallbacks,
9365     VkDeviceSize bufferImageGranularity, bool isVirtual)
9366     : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual),
9367     m_NodeAllocator(pAllocationCallbacks, 32), // firstBlockCapacity
9368     m_Root(VMA_NULL),
9369     m_AllocationCount(0),
9370     m_FreeCount(1),
9371     m_SumFreeSize(0)
9372 {
9373     memset(m_FreeList, 0, sizeof(m_FreeList));
9374 }
9375 
9376 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
9377 {
9378     DeleteNodeChildren(m_Root);
9379     m_NodeAllocator.Free(m_Root);
9380 }
9381 
9382 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
9383 {
9384     VmaBlockMetadata::Init(size);
9385 
9386     m_UsableSize = VmaPrevPow2(size);
9387     m_SumFreeSize = m_UsableSize;
9388 
9389     // Calculate m_LevelCount.
9390     const VkDeviceSize minNodeSize = IsVirtual() ? 1 : 16;
9391     m_LevelCount = 1;
9392     while (m_LevelCount < MAX_LEVELS &&
9393         LevelToNodeSize(m_LevelCount) >= minNodeSize)
9394     {
9395         ++m_LevelCount;
9396     }
9397 
9398     Node* rootNode = m_NodeAllocator.Alloc();
9399     rootNode->offset = 0;
9400     rootNode->type = Node::TYPE_FREE;
9401     rootNode->parent = VMA_NULL;
9402     rootNode->buddy = VMA_NULL;
9403 
9404     m_Root = rootNode;
9405     AddToFreeListFront(0, rootNode);
9406 }
9407 
9408 bool VmaBlockMetadata_Buddy::Validate() const
9409 {
9410     // Validate tree.
9411     ValidationContext ctx;
9412     if (!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
9413     {
9414         VMA_VALIDATE(false && "ValidateNode failed.");
9415     }
9416     VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
9417     VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
9418 
9419     // Validate free node lists.
9420     for (uint32_t level = 0; level < m_LevelCount; ++level)
9421     {
9422         VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
9423             m_FreeList[level].front->free.prev == VMA_NULL);
9424 
9425         for (Node* node = m_FreeList[level].front;
9426             node != VMA_NULL;
9427             node = node->free.next)
9428         {
9429             VMA_VALIDATE(node->type == Node::TYPE_FREE);
9430 
9431             if (node->free.next == VMA_NULL)
9432             {
9433                 VMA_VALIDATE(m_FreeList[level].back == node);
9434             }
9435             else
9436             {
9437                 VMA_VALIDATE(node->free.next->free.prev == node);
9438             }
9439         }
9440     }
9441 
9442     // Validate that free lists ar higher levels are empty.
9443     for (uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
9444     {
9445         VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
9446     }
9447 
9448     return true;
9449 }
9450 
9451 void VmaBlockMetadata_Buddy::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const
9452 {
9453     inoutStats.statistics.blockCount++;
9454     inoutStats.statistics.blockBytes += GetSize();
9455 
9456     AddNodeToDetailedStatistics(inoutStats, m_Root, LevelToNodeSize(0));
9457 
9458     const VkDeviceSize unusableSize = GetUnusableSize();
9459     if (unusableSize > 0)
9460         VmaAddDetailedStatisticsUnusedRange(inoutStats, unusableSize);
9461 }
9462 
9463 void VmaBlockMetadata_Buddy::AddStatistics(VmaStatistics& inoutStats) const
9464 {
9465     inoutStats.blockCount++;
9466     inoutStats.allocationCount += (uint32_t)m_AllocationCount;
9467     inoutStats.blockBytes += GetSize();
9468     inoutStats.allocationBytes += GetSize() - m_SumFreeSize;
9469 }
9470 
9471 #if VMA_STATS_STRING_ENABLED
9472 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const
9473 {
9474     VmaDetailedStatistics stats;
9475     VmaClearDetailedStatistics(stats);
9476     AddDetailedStatistics(stats);
9477 
9478     PrintDetailedMap_Begin(
9479         json,
9480         stats.statistics.blockBytes - stats.statistics.allocationBytes,
9481         stats.statistics.allocationCount,
9482         stats.unusedRangeCount,
9483         mapRefCount);
9484 
9485     PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
9486 
9487     const VkDeviceSize unusableSize = GetUnusableSize();
9488     if (unusableSize > 0)
9489     {
9490         PrintDetailedMap_UnusedRange(json,
9491             m_UsableSize, // offset
9492             unusableSize); // size
9493     }
9494 
9495     PrintDetailedMap_End(json);
9496 }
9497 #endif // VMA_STATS_STRING_ENABLED
9498 
9499 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
9500     VkDeviceSize allocSize,
9501     VkDeviceSize allocAlignment,
9502     bool upperAddress,
9503     VmaSuballocationType allocType,
9504     uint32_t strategy,
9505     VmaAllocationRequest* pAllocationRequest)
9506 {
9507     VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
9508 
9509     allocSize = AlignAllocationSize(allocSize);
9510 
9511     // Simple way to respect bufferImageGranularity. May be optimized some day.
9512     // Whenever it might be an OPTIMAL image...
9513     if (allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
9514         allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
9515         allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
9516     {
9517         allocAlignment = VMA_MAX(allocAlignment, GetBufferImageGranularity());
9518         allocSize = VmaAlignUp(allocSize, GetBufferImageGranularity());
9519     }
9520 
9521     if (allocSize > m_UsableSize)
9522     {
9523         return false;
9524     }
9525 
9526     const uint32_t targetLevel = AllocSizeToLevel(allocSize);
9527     for (uint32_t level = targetLevel; level--; )
9528     {
9529         for (Node* freeNode = m_FreeList[level].front;
9530             freeNode != VMA_NULL;
9531             freeNode = freeNode->free.next)
9532         {
9533             if (freeNode->offset % allocAlignment == 0)
9534             {
9535                 pAllocationRequest->type = VmaAllocationRequestType::Normal;
9536                 pAllocationRequest->allocHandle = (VmaAllocHandle)(freeNode->offset + 1);
9537                 pAllocationRequest->size = allocSize;
9538                 pAllocationRequest->customData = (void*)(uintptr_t)level;
9539                 return true;
9540             }
9541         }
9542     }
9543 
9544     return false;
9545 }
9546 
9547 void VmaBlockMetadata_Buddy::Alloc(
9548     const VmaAllocationRequest& request,
9549     VmaSuballocationType type,
9550     void* userData)
9551 {
9552     VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
9553 
9554     const uint32_t targetLevel = AllocSizeToLevel(request.size);
9555     uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
9556 
9557     Node* currNode = m_FreeList[currLevel].front;
9558     VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
9559     const VkDeviceSize offset = (VkDeviceSize)request.allocHandle - 1;
9560     while (currNode->offset != offset)
9561     {
9562         currNode = currNode->free.next;
9563         VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
9564     }
9565 
9566     // Go down, splitting free nodes.
9567     while (currLevel < targetLevel)
9568     {
9569         // currNode is already first free node at currLevel.
9570         // Remove it from list of free nodes at this currLevel.
9571         RemoveFromFreeList(currLevel, currNode);
9572 
9573         const uint32_t childrenLevel = currLevel + 1;
9574 
9575         // Create two free sub-nodes.
9576         Node* leftChild = m_NodeAllocator.Alloc();
9577         Node* rightChild = m_NodeAllocator.Alloc();
9578 
9579         leftChild->offset = currNode->offset;
9580         leftChild->type = Node::TYPE_FREE;
9581         leftChild->parent = currNode;
9582         leftChild->buddy = rightChild;
9583 
9584         rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
9585         rightChild->type = Node::TYPE_FREE;
9586         rightChild->parent = currNode;
9587         rightChild->buddy = leftChild;
9588 
9589         // Convert current currNode to split type.
9590         currNode->type = Node::TYPE_SPLIT;
9591         currNode->split.leftChild = leftChild;
9592 
9593         // Add child nodes to free list. Order is important!
9594         AddToFreeListFront(childrenLevel, rightChild);
9595         AddToFreeListFront(childrenLevel, leftChild);
9596 
9597         ++m_FreeCount;
9598         ++currLevel;
9599         currNode = m_FreeList[currLevel].front;
9600 
9601         /*
9602         We can be sure that currNode, as left child of node previously split,
9603         also fulfills the alignment requirement.
9604         */
9605     }
9606 
9607     // Remove from free list.
9608     VMA_ASSERT(currLevel == targetLevel &&
9609         currNode != VMA_NULL &&
9610         currNode->type == Node::TYPE_FREE);
9611     RemoveFromFreeList(currLevel, currNode);
9612 
9613     // Convert to allocation node.
9614     currNode->type = Node::TYPE_ALLOCATION;
9615     currNode->allocation.userData = userData;
9616 
9617     ++m_AllocationCount;
9618     --m_FreeCount;
9619     m_SumFreeSize -= request.size;
9620 }
9621 
9622 void VmaBlockMetadata_Buddy::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo)
9623 {
9624     uint32_t level = 0;
9625     outInfo.offset = (VkDeviceSize)allocHandle - 1;
9626     const Node* const node = FindAllocationNode(outInfo.offset, level);
9627     outInfo.size = LevelToNodeSize(level);
9628     outInfo.pUserData = node->allocation.userData;
9629 }
9630 
9631 void* VmaBlockMetadata_Buddy::GetAllocationUserData(VmaAllocHandle allocHandle) const
9632 {
9633     uint32_t level = 0;
9634     const Node* const node = FindAllocationNode((VkDeviceSize)allocHandle - 1, level);
9635     return node->allocation.userData;
9636 }
9637 
9638 VmaAllocHandle VmaBlockMetadata_Buddy::GetAllocationListBegin() const
9639 {
9640     // Function only used for defragmentation, which is disabled for this algorithm
9641     return VK_NULL_HANDLE;
9642 }
9643 
9644 VmaAllocHandle VmaBlockMetadata_Buddy::GetNextAllocation(VmaAllocHandle prevAlloc) const
9645 {
9646     // Function only used for defragmentation, which is disabled for this algorithm
9647     return VK_NULL_HANDLE;
9648 }
9649 
9650 void VmaBlockMetadata_Buddy::DeleteNodeChildren(Node* node)
9651 {
9652     if (node->type == Node::TYPE_SPLIT)
9653     {
9654         DeleteNodeChildren(node->split.leftChild->buddy);
9655         DeleteNodeChildren(node->split.leftChild);
9656         const VkAllocationCallbacks* allocationCallbacks = GetAllocationCallbacks();
9657         m_NodeAllocator.Free(node->split.leftChild->buddy);
9658         m_NodeAllocator.Free(node->split.leftChild);
9659     }
9660 }
9661 
9662 void VmaBlockMetadata_Buddy::Clear()
9663 {
9664     DeleteNodeChildren(m_Root);
9665     m_Root->type = Node::TYPE_FREE;
9666     m_AllocationCount = 0;
9667     m_FreeCount = 1;
9668     m_SumFreeSize = m_UsableSize;
9669 }
9670 
9671 void VmaBlockMetadata_Buddy::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData)
9672 {
9673     uint32_t level = 0;
9674     Node* const node = FindAllocationNode((VkDeviceSize)allocHandle - 1, level);
9675     node->allocation.userData = userData;
9676 }
9677 
9678 VmaBlockMetadata_Buddy::Node* VmaBlockMetadata_Buddy::FindAllocationNode(VkDeviceSize offset, uint32_t& outLevel) const
9679 {
9680     Node* node = m_Root;
9681     VkDeviceSize nodeOffset = 0;
9682     outLevel = 0;
9683     VkDeviceSize levelNodeSize = LevelToNodeSize(0);
9684     while (node->type == Node::TYPE_SPLIT)
9685     {
9686         const VkDeviceSize nextLevelNodeSize = levelNodeSize >> 1;
9687         if (offset < nodeOffset + nextLevelNodeSize)
9688         {
9689             node = node->split.leftChild;
9690         }
9691         else
9692         {
9693             node = node->split.leftChild->buddy;
9694             nodeOffset += nextLevelNodeSize;
9695         }
9696         ++outLevel;
9697         levelNodeSize = nextLevelNodeSize;
9698     }
9699 
9700     VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
9701     return node;
9702 }
9703 
9704 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
9705 {
9706     VMA_VALIDATE(level < m_LevelCount);
9707     VMA_VALIDATE(curr->parent == parent);
9708     VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
9709     VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
9710     switch (curr->type)
9711     {
9712     case Node::TYPE_FREE:
9713         // curr->free.prev, next are validated separately.
9714         ctx.calculatedSumFreeSize += levelNodeSize;
9715         ++ctx.calculatedFreeCount;
9716         break;
9717     case Node::TYPE_ALLOCATION:
9718         ++ctx.calculatedAllocationCount;
9719         if (!IsVirtual())
9720         {
9721             VMA_VALIDATE(curr->allocation.userData != VMA_NULL);
9722         }
9723         break;
9724     case Node::TYPE_SPLIT:
9725     {
9726         const uint32_t childrenLevel = level + 1;
9727         const VkDeviceSize childrenLevelNodeSize = levelNodeSize >> 1;
9728         const Node* const leftChild = curr->split.leftChild;
9729         VMA_VALIDATE(leftChild != VMA_NULL);
9730         VMA_VALIDATE(leftChild->offset == curr->offset);
9731         if (!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
9732         {
9733             VMA_VALIDATE(false && "ValidateNode for left child failed.");
9734         }
9735         const Node* const rightChild = leftChild->buddy;
9736         VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
9737         if (!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
9738         {
9739             VMA_VALIDATE(false && "ValidateNode for right child failed.");
9740         }
9741     }
9742     break;
9743     default:
9744         return false;
9745     }
9746 
9747     return true;
9748 }
9749 
9750 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
9751 {
9752     // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
9753     uint32_t level = 0;
9754     VkDeviceSize currLevelNodeSize = m_UsableSize;
9755     VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
9756     while (allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
9757     {
9758         ++level;
9759         currLevelNodeSize >>= 1;
9760         nextLevelNodeSize >>= 1;
9761     }
9762     return level;
9763 }
9764 
9765 void VmaBlockMetadata_Buddy::Free(VmaAllocHandle allocHandle)
9766 {
9767     uint32_t level = 0;
9768     Node* node = FindAllocationNode((VkDeviceSize)allocHandle - 1, level);
9769 
9770     ++m_FreeCount;
9771     --m_AllocationCount;
9772     m_SumFreeSize += LevelToNodeSize(level);
9773 
9774     node->type = Node::TYPE_FREE;
9775 
9776     // Join free nodes if possible.
9777     while (level > 0 && node->buddy->type == Node::TYPE_FREE)
9778     {
9779         RemoveFromFreeList(level, node->buddy);
9780         Node* const parent = node->parent;
9781 
9782         m_NodeAllocator.Free(node->buddy);
9783         m_NodeAllocator.Free(node);
9784         parent->type = Node::TYPE_FREE;
9785 
9786         node = parent;
9787         --level;
9788         --m_FreeCount;
9789     }
9790 
9791     AddToFreeListFront(level, node);
9792 }
9793 
9794 void VmaBlockMetadata_Buddy::AddNodeToDetailedStatistics(VmaDetailedStatistics& inoutStats, const Node* node, VkDeviceSize levelNodeSize) const
9795 {
9796     switch (node->type)
9797     {
9798     case Node::TYPE_FREE:
9799         VmaAddDetailedStatisticsUnusedRange(inoutStats, levelNodeSize);
9800         break;
9801     case Node::TYPE_ALLOCATION:
9802         VmaAddDetailedStatisticsAllocation(inoutStats, levelNodeSize);
9803         break;
9804     case Node::TYPE_SPLIT:
9805     {
9806         const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
9807         const Node* const leftChild = node->split.leftChild;
9808         AddNodeToDetailedStatistics(inoutStats, leftChild, childrenNodeSize);
9809         const Node* const rightChild = leftChild->buddy;
9810         AddNodeToDetailedStatistics(inoutStats, rightChild, childrenNodeSize);
9811     }
9812     break;
9813     default:
9814         VMA_ASSERT(0);
9815     }
9816 }
9817 
9818 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
9819 {
9820     VMA_ASSERT(node->type == Node::TYPE_FREE);
9821 
9822     // List is empty.
9823     Node* const frontNode = m_FreeList[level].front;
9824     if (frontNode == VMA_NULL)
9825     {
9826         VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
9827         node->free.prev = node->free.next = VMA_NULL;
9828         m_FreeList[level].front = m_FreeList[level].back = node;
9829     }
9830     else
9831     {
9832         VMA_ASSERT(frontNode->free.prev == VMA_NULL);
9833         node->free.prev = VMA_NULL;
9834         node->free.next = frontNode;
9835         frontNode->free.prev = node;
9836         m_FreeList[level].front = node;
9837     }
9838 }
9839 
9840 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
9841 {
9842     VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
9843 
9844     // It is at the front.
9845     if (node->free.prev == VMA_NULL)
9846     {
9847         VMA_ASSERT(m_FreeList[level].front == node);
9848         m_FreeList[level].front = node->free.next;
9849     }
9850     else
9851     {
9852         Node* const prevFreeNode = node->free.prev;
9853         VMA_ASSERT(prevFreeNode->free.next == node);
9854         prevFreeNode->free.next = node->free.next;
9855     }
9856 
9857     // It is at the back.
9858     if (node->free.next == VMA_NULL)
9859     {
9860         VMA_ASSERT(m_FreeList[level].back == node);
9861         m_FreeList[level].back = node->free.prev;
9862     }
9863     else
9864     {
9865         Node* const nextFreeNode = node->free.next;
9866         VMA_ASSERT(nextFreeNode->free.prev == node);
9867         nextFreeNode->free.prev = node->free.prev;
9868     }
9869 }
9870 
9871 void VmaBlockMetadata_Buddy::DebugLogAllAllocationNode(Node* node, uint32_t level) const
9872 {
9873     switch (node->type)
9874     {
9875     case Node::TYPE_FREE:
9876         break;
9877     case Node::TYPE_ALLOCATION:
9878         DebugLogAllocation(node->offset, LevelToNodeSize(level), node->allocation.userData);
9879         break;
9880     case Node::TYPE_SPLIT:
9881     {
9882         ++level;
9883         DebugLogAllAllocationNode(node->split.leftChild, level);
9884         DebugLogAllAllocationNode(node->split.leftChild->buddy, level);
9885     }
9886     break;
9887     default:
9888         VMA_ASSERT(0);
9889     }
9890 }
9891 
9892 #if VMA_STATS_STRING_ENABLED
9893 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
9894 {
9895     switch (node->type)
9896     {
9897     case Node::TYPE_FREE:
9898         PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
9899         break;
9900     case Node::TYPE_ALLOCATION:
9901         PrintDetailedMap_Allocation(json, node->offset, levelNodeSize, node->allocation.userData);
9902         break;
9903     case Node::TYPE_SPLIT:
9904     {
9905         const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
9906         const Node* const leftChild = node->split.leftChild;
9907         PrintDetailedMapNode(json, leftChild, childrenNodeSize);
9908         const Node* const rightChild = leftChild->buddy;
9909         PrintDetailedMapNode(json, rightChild, childrenNodeSize);
9910     }
9911     break;
9912     default:
9913         VMA_ASSERT(0);
9914     }
9915 }
9916 #endif // VMA_STATS_STRING_ENABLED
9917 #endif // _VMA_BLOCK_METADATA_BUDDY_FUNCTIONS
9918 #endif // _VMA_BLOCK_METADATA_BUDDY
9919 #endif // #if 0
9920 
9921 #ifndef _VMA_BLOCK_METADATA_TLSF
9922 // To not search current larger region if first allocation won't succeed and skip to smaller range
9923 // use with VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT as strategy in CreateAllocationRequest().
9924 // When fragmentation and reusal of previous blocks doesn't matter then use with
9925 // VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT for fastest alloc time possible.
9926 class VmaBlockMetadata_TLSF : public VmaBlockMetadata
9927 {
9928     VMA_CLASS_NO_COPY(VmaBlockMetadata_TLSF)
9929 public:
9930     VmaBlockMetadata_TLSF(const VkAllocationCallbacks* pAllocationCallbacks,
9931         VkDeviceSize bufferImageGranularity, bool isVirtual);
9932     virtual ~VmaBlockMetadata_TLSF();
9933 
GetAllocationCount()9934     size_t GetAllocationCount() const override { return m_AllocCount; }
GetFreeRegionsCount()9935     size_t GetFreeRegionsCount() const override { return m_BlocksFreeCount + 1; }
GetSumFreeSize()9936     VkDeviceSize GetSumFreeSize() const override { return m_BlocksFreeSize + m_NullBlock->size; }
IsEmpty()9937     bool IsEmpty() const override { return m_NullBlock->offset == 0; }
GetAllocationOffset(VmaAllocHandle allocHandle)9938     VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return ((Block*)allocHandle)->offset; };
9939 
9940     void Init(VkDeviceSize size) override;
9941     bool Validate() const override;
9942 
9943     void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override;
9944     void AddStatistics(VmaStatistics& inoutStats) const override;
9945 
9946 #if VMA_STATS_STRING_ENABLED
9947     void PrintDetailedMap(class VmaJsonWriter& json) const override;
9948 #endif
9949 
9950     bool CreateAllocationRequest(
9951         VkDeviceSize allocSize,
9952         VkDeviceSize allocAlignment,
9953         bool upperAddress,
9954         VmaSuballocationType allocType,
9955         uint32_t strategy,
9956         VmaAllocationRequest* pAllocationRequest) override;
9957 
9958     VkResult CheckCorruption(const void* pBlockData) override;
9959     void Alloc(
9960         const VmaAllocationRequest& request,
9961         VmaSuballocationType type,
9962         void* userData) override;
9963 
9964     void Free(VmaAllocHandle allocHandle) override;
9965     void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override;
9966     void* GetAllocationUserData(VmaAllocHandle allocHandle) const override;
9967     VmaAllocHandle GetAllocationListBegin() const override;
9968     VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override;
9969     VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const override;
9970     void Clear() override;
9971     void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override;
9972     void DebugLogAllAllocations() const override;
9973 
9974 private:
9975     // According to original paper it should be preferable 4 or 5:
9976     // M. Masmano, I. Ripoll, A. Crespo, and J. Real "TLSF: a New Dynamic Memory Allocator for Real-Time Systems"
9977     // http://www.gii.upv.es/tlsf/files/ecrts04_tlsf.pdf
9978     static const uint8_t SECOND_LEVEL_INDEX = 5;
9979     static const uint16_t SMALL_BUFFER_SIZE = 256;
9980     static const uint32_t INITIAL_BLOCK_ALLOC_COUNT = 16;
9981     static const uint8_t MEMORY_CLASS_SHIFT = 7;
9982     static const uint8_t MAX_MEMORY_CLASSES = 65 - MEMORY_CLASS_SHIFT;
9983 
9984     class Block
9985     {
9986     public:
9987         VkDeviceSize offset;
9988         VkDeviceSize size;
9989         Block* prevPhysical;
9990         Block* nextPhysical;
9991 
MarkFree()9992         void MarkFree() { prevFree = VMA_NULL; }
MarkTaken()9993         void MarkTaken() { prevFree = this; }
IsFree()9994         bool IsFree() const { return prevFree != this; }
UserData()9995         void*& UserData() { VMA_HEAVY_ASSERT(!IsFree()); return userData; }
PrevFree()9996         Block*& PrevFree() { return prevFree; }
NextFree()9997         Block*& NextFree() { VMA_HEAVY_ASSERT(IsFree()); return nextFree; }
9998 
9999     private:
10000         Block* prevFree; // Address of the same block here indicates that block is taken
10001         union
10002         {
10003             Block* nextFree;
10004             void* userData;
10005         };
10006     };
10007 
10008     size_t m_AllocCount;
10009     // Total number of free blocks besides null block
10010     size_t m_BlocksFreeCount;
10011     // Total size of free blocks excluding null block
10012     VkDeviceSize m_BlocksFreeSize;
10013     uint32_t m_IsFreeBitmap;
10014     uint8_t m_MemoryClasses;
10015     uint32_t m_InnerIsFreeBitmap[MAX_MEMORY_CLASSES];
10016     uint32_t m_ListsCount;
10017     /*
10018     * 0: 0-3 lists for small buffers
10019     * 1+: 0-(2^SLI-1) lists for normal buffers
10020     */
10021     Block** m_FreeList;
10022     VmaPoolAllocator<Block> m_BlockAllocator;
10023     Block* m_NullBlock;
10024     VmaBlockBufferImageGranularity m_GranularityHandler;
10025 
10026     uint8_t SizeToMemoryClass(VkDeviceSize size) const;
10027     uint16_t SizeToSecondIndex(VkDeviceSize size, uint8_t memoryClass) const;
10028     uint32_t GetListIndex(uint8_t memoryClass, uint16_t secondIndex) const;
10029     uint32_t GetListIndex(VkDeviceSize size) const;
10030 
10031     void RemoveFreeBlock(Block* block);
10032     void InsertFreeBlock(Block* block);
10033     void MergeBlock(Block* block, Block* prev);
10034 
10035     Block* FindFreeBlock(VkDeviceSize size, uint32_t& listIndex) const;
10036     bool CheckBlock(
10037         Block& block,
10038         uint32_t listIndex,
10039         VkDeviceSize allocSize,
10040         VkDeviceSize allocAlignment,
10041         VmaSuballocationType allocType,
10042         VmaAllocationRequest* pAllocationRequest);
10043 };
10044 
10045 #ifndef _VMA_BLOCK_METADATA_TLSF_FUNCTIONS
VmaBlockMetadata_TLSF(const VkAllocationCallbacks * pAllocationCallbacks,VkDeviceSize bufferImageGranularity,bool isVirtual)10046 VmaBlockMetadata_TLSF::VmaBlockMetadata_TLSF(const VkAllocationCallbacks* pAllocationCallbacks,
10047     VkDeviceSize bufferImageGranularity, bool isVirtual)
10048     : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual),
10049     m_AllocCount(0),
10050     m_BlocksFreeCount(0),
10051     m_BlocksFreeSize(0),
10052     m_IsFreeBitmap(0),
10053     m_MemoryClasses(0),
10054     m_ListsCount(0),
10055     m_FreeList(VMA_NULL),
10056     m_BlockAllocator(pAllocationCallbacks, INITIAL_BLOCK_ALLOC_COUNT),
10057     m_NullBlock(VMA_NULL),
10058     m_GranularityHandler(bufferImageGranularity) {}
10059 
~VmaBlockMetadata_TLSF()10060 VmaBlockMetadata_TLSF::~VmaBlockMetadata_TLSF()
10061 {
10062     if (m_FreeList)
10063         vma_delete_array(GetAllocationCallbacks(), m_FreeList, m_ListsCount);
10064     m_GranularityHandler.Destroy(GetAllocationCallbacks());
10065 }
10066 
Init(VkDeviceSize size)10067 void VmaBlockMetadata_TLSF::Init(VkDeviceSize size)
10068 {
10069     VmaBlockMetadata::Init(size);
10070 
10071     if (!IsVirtual())
10072         m_GranularityHandler.Init(GetAllocationCallbacks(), size);
10073 
10074     m_NullBlock = m_BlockAllocator.Alloc();
10075     m_NullBlock->size = size;
10076     m_NullBlock->offset = 0;
10077     m_NullBlock->prevPhysical = VMA_NULL;
10078     m_NullBlock->nextPhysical = VMA_NULL;
10079     m_NullBlock->MarkFree();
10080     m_NullBlock->NextFree() = VMA_NULL;
10081     m_NullBlock->PrevFree() = VMA_NULL;
10082     uint8_t memoryClass = SizeToMemoryClass(size);
10083     uint16_t sli = SizeToSecondIndex(size, memoryClass);
10084     m_ListsCount = (memoryClass == 0 ? 0 : (memoryClass - 1) * (1UL << SECOND_LEVEL_INDEX) + sli) + 1;
10085     if (IsVirtual())
10086         m_ListsCount += 1UL << SECOND_LEVEL_INDEX;
10087     else
10088         m_ListsCount += 4;
10089 
10090     m_MemoryClasses = memoryClass + 2;
10091     memset(m_InnerIsFreeBitmap, 0, MAX_MEMORY_CLASSES * sizeof(uint32_t));
10092 
10093     m_FreeList = vma_new_array(GetAllocationCallbacks(), Block*, m_ListsCount);
10094     memset(m_FreeList, 0, m_ListsCount * sizeof(Block*));
10095 }
10096 
Validate()10097 bool VmaBlockMetadata_TLSF::Validate() const
10098 {
10099     VMA_VALIDATE(GetSumFreeSize() <= GetSize());
10100 
10101     VkDeviceSize calculatedSize = m_NullBlock->size;
10102     VkDeviceSize calculatedFreeSize = m_NullBlock->size;
10103     size_t allocCount = 0;
10104     size_t freeCount = 0;
10105 
10106     // Check integrity of free lists
10107     for (uint32_t list = 0; list < m_ListsCount; ++list)
10108     {
10109         Block* block = m_FreeList[list];
10110         if (block != VMA_NULL)
10111         {
10112             VMA_VALIDATE(block->IsFree());
10113             VMA_VALIDATE(block->PrevFree() == VMA_NULL);
10114             while (block->NextFree())
10115             {
10116                 VMA_VALIDATE(block->NextFree()->IsFree());
10117                 VMA_VALIDATE(block->NextFree()->PrevFree() == block);
10118                 block = block->NextFree();
10119             }
10120         }
10121     }
10122 
10123     VkDeviceSize nextOffset = m_NullBlock->offset;
10124     auto validateCtx = m_GranularityHandler.StartValidation(GetAllocationCallbacks(), IsVirtual());
10125 
10126     VMA_VALIDATE(m_NullBlock->nextPhysical == VMA_NULL);
10127     if (m_NullBlock->prevPhysical)
10128     {
10129         VMA_VALIDATE(m_NullBlock->prevPhysical->nextPhysical == m_NullBlock);
10130     }
10131     // Check all blocks
10132     for (Block* prev = m_NullBlock->prevPhysical; prev != VMA_NULL; prev = prev->prevPhysical)
10133     {
10134         VMA_VALIDATE(prev->offset + prev->size == nextOffset);
10135         nextOffset = prev->offset;
10136         calculatedSize += prev->size;
10137 
10138         uint32_t listIndex = GetListIndex(prev->size);
10139         if (prev->IsFree())
10140         {
10141             ++freeCount;
10142             // Check if free block belongs to free list
10143             Block* freeBlock = m_FreeList[listIndex];
10144             VMA_VALIDATE(freeBlock != VMA_NULL);
10145 
10146             bool found = false;
10147             do
10148             {
10149                 if (freeBlock == prev)
10150                     found = true;
10151 
10152                 freeBlock = freeBlock->NextFree();
10153             } while (!found && freeBlock != VMA_NULL);
10154 
10155             VMA_VALIDATE(found);
10156             calculatedFreeSize += prev->size;
10157         }
10158         else
10159         {
10160             ++allocCount;
10161             // Check if taken block is not on a free list
10162             Block* freeBlock = m_FreeList[listIndex];
10163             while (freeBlock)
10164             {
10165                 VMA_VALIDATE(freeBlock != prev);
10166                 freeBlock = freeBlock->NextFree();
10167             }
10168 
10169             if (!IsVirtual())
10170             {
10171                 VMA_VALIDATE(m_GranularityHandler.Validate(validateCtx, prev->offset, prev->size));
10172             }
10173         }
10174 
10175         if (prev->prevPhysical)
10176         {
10177             VMA_VALIDATE(prev->prevPhysical->nextPhysical == prev);
10178         }
10179     }
10180 
10181     if (!IsVirtual())
10182     {
10183         VMA_VALIDATE(m_GranularityHandler.FinishValidation(validateCtx));
10184     }
10185 
10186     VMA_VALIDATE(nextOffset == 0);
10187     VMA_VALIDATE(calculatedSize == GetSize());
10188     VMA_VALIDATE(calculatedFreeSize == GetSumFreeSize());
10189     VMA_VALIDATE(allocCount == m_AllocCount);
10190     VMA_VALIDATE(freeCount == m_BlocksFreeCount);
10191 
10192     return true;
10193 }
10194 
AddDetailedStatistics(VmaDetailedStatistics & inoutStats)10195 void VmaBlockMetadata_TLSF::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const
10196 {
10197     inoutStats.statistics.blockCount++;
10198     inoutStats.statistics.blockBytes += GetSize();
10199     if (m_NullBlock->size > 0)
10200         VmaAddDetailedStatisticsUnusedRange(inoutStats, m_NullBlock->size);
10201 
10202     for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical)
10203     {
10204         if (block->IsFree())
10205             VmaAddDetailedStatisticsUnusedRange(inoutStats, block->size);
10206         else
10207             VmaAddDetailedStatisticsAllocation(inoutStats, block->size);
10208     }
10209 }
10210 
AddStatistics(VmaStatistics & inoutStats)10211 void VmaBlockMetadata_TLSF::AddStatistics(VmaStatistics& inoutStats) const
10212 {
10213     inoutStats.blockCount++;
10214     inoutStats.allocationCount += (uint32_t)m_AllocCount;
10215     inoutStats.blockBytes += GetSize();
10216     inoutStats.allocationBytes += GetSize() - GetSumFreeSize();
10217 }
10218 
10219 #if VMA_STATS_STRING_ENABLED
PrintDetailedMap(class VmaJsonWriter & json)10220 void VmaBlockMetadata_TLSF::PrintDetailedMap(class VmaJsonWriter& json) const
10221 {
10222     size_t blockCount = m_AllocCount + m_BlocksFreeCount;
10223     VmaStlAllocator<Block*> allocator(GetAllocationCallbacks());
10224     VmaVector<Block*, VmaStlAllocator<Block*>> blockList(blockCount, allocator);
10225 
10226     size_t i = blockCount;
10227     for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical)
10228     {
10229         blockList[--i] = block;
10230     }
10231     VMA_ASSERT(i == 0);
10232 
10233     VmaDetailedStatistics stats;
10234     VmaClearDetailedStatistics(stats);
10235     AddDetailedStatistics(stats);
10236 
10237     PrintDetailedMap_Begin(json,
10238         stats.statistics.blockBytes - stats.statistics.allocationBytes,
10239         stats.statistics.allocationCount,
10240         stats.unusedRangeCount);
10241 
10242     for (; i < blockCount; ++i)
10243     {
10244         Block* block = blockList[i];
10245         if (block->IsFree())
10246             PrintDetailedMap_UnusedRange(json, block->offset, block->size);
10247         else
10248             PrintDetailedMap_Allocation(json, block->offset, block->size, block->UserData());
10249     }
10250     if (m_NullBlock->size > 0)
10251         PrintDetailedMap_UnusedRange(json, m_NullBlock->offset, m_NullBlock->size);
10252 
10253     PrintDetailedMap_End(json);
10254 }
10255 #endif
10256 
CreateAllocationRequest(VkDeviceSize allocSize,VkDeviceSize allocAlignment,bool upperAddress,VmaSuballocationType allocType,uint32_t strategy,VmaAllocationRequest * pAllocationRequest)10257 bool VmaBlockMetadata_TLSF::CreateAllocationRequest(
10258     VkDeviceSize allocSize,
10259     VkDeviceSize allocAlignment,
10260     bool upperAddress,
10261     VmaSuballocationType allocType,
10262     uint32_t strategy,
10263     VmaAllocationRequest* pAllocationRequest)
10264 {
10265     VMA_ASSERT(allocSize > 0 && "Cannot allocate empty block!");
10266     VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
10267 
10268     // For small granularity round up
10269     if (!IsVirtual())
10270         m_GranularityHandler.RoundupAllocRequest(allocType, allocSize, allocAlignment);
10271 
10272     allocSize += GetDebugMargin();
10273     // Quick check for too small pool
10274     if (allocSize > GetSumFreeSize())
10275         return false;
10276 
10277     // If no free blocks in pool then check only null block
10278     if (m_BlocksFreeCount == 0)
10279         return CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest);
10280 
10281     // Round up to the next block
10282     VkDeviceSize sizeForNextList = allocSize;
10283     VkDeviceSize smallSizeStep = SMALL_BUFFER_SIZE / (IsVirtual() ? 1 << SECOND_LEVEL_INDEX : 4);
10284     if (allocSize > SMALL_BUFFER_SIZE)
10285     {
10286         sizeForNextList += (1ULL << (VMA_BITSCAN_MSB(allocSize) - SECOND_LEVEL_INDEX));
10287     }
10288     else if (allocSize > SMALL_BUFFER_SIZE - smallSizeStep)
10289         sizeForNextList = SMALL_BUFFER_SIZE + 1;
10290     else
10291         sizeForNextList += smallSizeStep;
10292 
10293     uint32_t nextListIndex = 0;
10294     uint32_t prevListIndex = 0;
10295     Block* nextListBlock = VMA_NULL;
10296     Block* prevListBlock = VMA_NULL;
10297 
10298     // Check blocks according to strategies
10299     if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT)
10300     {
10301         // Quick check for larger block first
10302         nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex);
10303         if (nextListBlock != VMA_NULL && CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
10304             return true;
10305 
10306         // If not fitted then null block
10307         if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest))
10308             return true;
10309 
10310         // Null block failed, search larger bucket
10311         while (nextListBlock)
10312         {
10313             if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
10314                 return true;
10315             nextListBlock = nextListBlock->NextFree();
10316         }
10317 
10318         // Failed again, check best fit bucket
10319         prevListBlock = FindFreeBlock(allocSize, prevListIndex);
10320         while (prevListBlock)
10321         {
10322             if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
10323                 return true;
10324             prevListBlock = prevListBlock->NextFree();
10325         }
10326     }
10327     else if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT)
10328     {
10329         // Check best fit bucket
10330         prevListBlock = FindFreeBlock(allocSize, prevListIndex);
10331         while (prevListBlock)
10332         {
10333             if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
10334                 return true;
10335             prevListBlock = prevListBlock->NextFree();
10336         }
10337 
10338         // If failed check null block
10339         if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest))
10340             return true;
10341 
10342         // Check larger bucket
10343         nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex);
10344         while (nextListBlock)
10345         {
10346             if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
10347                 return true;
10348             nextListBlock = nextListBlock->NextFree();
10349         }
10350     }
10351     else if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT )
10352     {
10353         // Perform search from the start
10354         VmaStlAllocator<Block*> allocator(GetAllocationCallbacks());
10355         VmaVector<Block*, VmaStlAllocator<Block*>> blockList(m_BlocksFreeCount, allocator);
10356 
10357         size_t i = m_BlocksFreeCount;
10358         for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical)
10359         {
10360             if (block->IsFree() && block->size >= allocSize)
10361                 blockList[--i] = block;
10362         }
10363 
10364         for (; i < m_BlocksFreeCount; ++i)
10365         {
10366             Block& block = *blockList[i];
10367             if (CheckBlock(block, GetListIndex(block.size), allocSize, allocAlignment, allocType, pAllocationRequest))
10368                 return true;
10369         }
10370 
10371         // If failed check null block
10372         if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest))
10373             return true;
10374 
10375         // Whole range searched, no more memory
10376         return false;
10377     }
10378     else
10379     {
10380         // Check larger bucket
10381         nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex);
10382         while (nextListBlock)
10383         {
10384             if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
10385                 return true;
10386             nextListBlock = nextListBlock->NextFree();
10387         }
10388 
10389         // If failed check null block
10390         if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest))
10391             return true;
10392 
10393         // Check best fit bucket
10394         prevListBlock = FindFreeBlock(allocSize, prevListIndex);
10395         while (prevListBlock)
10396         {
10397             if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
10398                 return true;
10399             prevListBlock = prevListBlock->NextFree();
10400         }
10401     }
10402 
10403     // Worst case, full search has to be done
10404     while (++nextListIndex < m_ListsCount)
10405     {
10406         nextListBlock = m_FreeList[nextListIndex];
10407         while (nextListBlock)
10408         {
10409             if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
10410                 return true;
10411             nextListBlock = nextListBlock->NextFree();
10412         }
10413     }
10414 
10415     // No more memory sadly
10416     return false;
10417 }
10418 
CheckCorruption(const void * pBlockData)10419 VkResult VmaBlockMetadata_TLSF::CheckCorruption(const void* pBlockData)
10420 {
10421     for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical)
10422     {
10423         if (!block->IsFree())
10424         {
10425             if (!VmaValidateMagicValue(pBlockData, block->offset + block->size))
10426             {
10427                 VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10428                 return VK_ERROR_UNKNOWN_COPY;
10429             }
10430         }
10431     }
10432 
10433     return VK_SUCCESS;
10434 }
10435 
Alloc(const VmaAllocationRequest & request,VmaSuballocationType type,void * userData)10436 void VmaBlockMetadata_TLSF::Alloc(
10437     const VmaAllocationRequest& request,
10438     VmaSuballocationType type,
10439     void* userData)
10440 {
10441     VMA_ASSERT(request.type == VmaAllocationRequestType::TLSF);
10442 
10443     // Get block and pop it from the free list
10444     Block* currentBlock = (Block*)request.allocHandle;
10445     VkDeviceSize offset = request.algorithmData;
10446     VMA_ASSERT(currentBlock != VMA_NULL);
10447     VMA_ASSERT(currentBlock->offset <= offset);
10448 
10449     if (currentBlock != m_NullBlock)
10450         RemoveFreeBlock(currentBlock);
10451 
10452     VkDeviceSize debugMargin = GetDebugMargin();
10453     VkDeviceSize misssingAlignment = offset - currentBlock->offset;
10454 
10455     // Append missing alignment to prev block or create new one
10456     if (misssingAlignment)
10457     {
10458         Block* prevBlock = currentBlock->prevPhysical;
10459         VMA_ASSERT(prevBlock != VMA_NULL && "There should be no missing alignment at offset 0!");
10460 
10461         if (prevBlock->IsFree() && prevBlock->size != debugMargin)
10462         {
10463             uint32_t oldList = GetListIndex(prevBlock->size);
10464             prevBlock->size += misssingAlignment;
10465             // Check if new size crosses list bucket
10466             if (oldList != GetListIndex(prevBlock->size))
10467             {
10468                 prevBlock->size -= misssingAlignment;
10469                 RemoveFreeBlock(prevBlock);
10470                 prevBlock->size += misssingAlignment;
10471                 InsertFreeBlock(prevBlock);
10472             }
10473             else
10474                 m_BlocksFreeSize += misssingAlignment;
10475         }
10476         else
10477         {
10478             Block* newBlock = m_BlockAllocator.Alloc();
10479             currentBlock->prevPhysical = newBlock;
10480             prevBlock->nextPhysical = newBlock;
10481             newBlock->prevPhysical = prevBlock;
10482             newBlock->nextPhysical = currentBlock;
10483             newBlock->size = misssingAlignment;
10484             newBlock->offset = currentBlock->offset;
10485             newBlock->MarkTaken();
10486 
10487             InsertFreeBlock(newBlock);
10488         }
10489 
10490         currentBlock->size -= misssingAlignment;
10491         currentBlock->offset += misssingAlignment;
10492     }
10493 
10494     VkDeviceSize size = request.size + debugMargin;
10495     if (currentBlock->size == size)
10496     {
10497         if (currentBlock == m_NullBlock)
10498         {
10499             // Setup new null block
10500             m_NullBlock = m_BlockAllocator.Alloc();
10501             m_NullBlock->size = 0;
10502             m_NullBlock->offset = currentBlock->offset + size;
10503             m_NullBlock->prevPhysical = currentBlock;
10504             m_NullBlock->nextPhysical = VMA_NULL;
10505             m_NullBlock->MarkFree();
10506             m_NullBlock->PrevFree() = VMA_NULL;
10507             m_NullBlock->NextFree() = VMA_NULL;
10508             currentBlock->nextPhysical = m_NullBlock;
10509             currentBlock->MarkTaken();
10510         }
10511     }
10512     else
10513     {
10514         VMA_ASSERT(currentBlock->size > size && "Proper block already found, shouldn't find smaller one!");
10515 
10516         // Create new free block
10517         Block* newBlock = m_BlockAllocator.Alloc();
10518         newBlock->size = currentBlock->size - size;
10519         newBlock->offset = currentBlock->offset + size;
10520         newBlock->prevPhysical = currentBlock;
10521         newBlock->nextPhysical = currentBlock->nextPhysical;
10522         currentBlock->nextPhysical = newBlock;
10523         currentBlock->size = size;
10524 
10525         if (currentBlock == m_NullBlock)
10526         {
10527             m_NullBlock = newBlock;
10528             m_NullBlock->MarkFree();
10529             m_NullBlock->NextFree() = VMA_NULL;
10530             m_NullBlock->PrevFree() = VMA_NULL;
10531             currentBlock->MarkTaken();
10532         }
10533         else
10534         {
10535             newBlock->nextPhysical->prevPhysical = newBlock;
10536             newBlock->MarkTaken();
10537             InsertFreeBlock(newBlock);
10538         }
10539     }
10540     currentBlock->UserData() = userData;
10541 
10542     if (debugMargin > 0)
10543     {
10544         currentBlock->size -= debugMargin;
10545         Block* newBlock = m_BlockAllocator.Alloc();
10546         newBlock->size = debugMargin;
10547         newBlock->offset = currentBlock->offset + currentBlock->size;
10548         newBlock->prevPhysical = currentBlock;
10549         newBlock->nextPhysical = currentBlock->nextPhysical;
10550         newBlock->MarkTaken();
10551         currentBlock->nextPhysical->prevPhysical = newBlock;
10552         currentBlock->nextPhysical = newBlock;
10553         InsertFreeBlock(newBlock);
10554     }
10555 
10556     if (!IsVirtual())
10557         m_GranularityHandler.AllocPages((uint8_t)(uintptr_t)request.customData,
10558             currentBlock->offset, currentBlock->size);
10559     ++m_AllocCount;
10560 }
10561 
Free(VmaAllocHandle allocHandle)10562 void VmaBlockMetadata_TLSF::Free(VmaAllocHandle allocHandle)
10563 {
10564     Block* block = (Block*)allocHandle;
10565     Block* next = block->nextPhysical;
10566     VMA_ASSERT(!block->IsFree() && "Block is already free!");
10567 
10568     if (!IsVirtual())
10569         m_GranularityHandler.FreePages(block->offset, block->size);
10570     --m_AllocCount;
10571 
10572     VkDeviceSize debugMargin = GetDebugMargin();
10573     if (debugMargin > 0)
10574     {
10575         RemoveFreeBlock(next);
10576         MergeBlock(next, block);
10577         block = next;
10578         next = next->nextPhysical;
10579     }
10580 
10581     // Try merging
10582     Block* prev = block->prevPhysical;
10583     if (prev != VMA_NULL && prev->IsFree() && prev->size != debugMargin)
10584     {
10585         RemoveFreeBlock(prev);
10586         MergeBlock(block, prev);
10587     }
10588 
10589     if (!next->IsFree())
10590         InsertFreeBlock(block);
10591     else if (next == m_NullBlock)
10592         MergeBlock(m_NullBlock, block);
10593     else
10594     {
10595         RemoveFreeBlock(next);
10596         MergeBlock(next, block);
10597         InsertFreeBlock(next);
10598     }
10599 }
10600 
GetAllocationInfo(VmaAllocHandle allocHandle,VmaVirtualAllocationInfo & outInfo)10601 void VmaBlockMetadata_TLSF::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo)
10602 {
10603     Block* block = (Block*)allocHandle;
10604     VMA_ASSERT(!block->IsFree() && "Cannot get allocation info for free block!");
10605     outInfo.offset = block->offset;
10606     outInfo.size = block->size;
10607     outInfo.pUserData = block->UserData();
10608 }
10609 
GetAllocationUserData(VmaAllocHandle allocHandle)10610 void* VmaBlockMetadata_TLSF::GetAllocationUserData(VmaAllocHandle allocHandle) const
10611 {
10612     Block* block = (Block*)allocHandle;
10613     VMA_ASSERT(!block->IsFree() && "Cannot get user data for free block!");
10614     return block->UserData();
10615 }
10616 
GetAllocationListBegin()10617 VmaAllocHandle VmaBlockMetadata_TLSF::GetAllocationListBegin() const
10618 {
10619     if (m_AllocCount == 0)
10620         return VK_NULL_HANDLE;
10621 
10622     for (Block* block = m_NullBlock->prevPhysical; block; block = block->prevPhysical)
10623     {
10624         if (!block->IsFree())
10625             return (VmaAllocHandle)block;
10626     }
10627     VMA_ASSERT(false && "If m_AllocCount > 0 then should find any allocation!");
10628     return VK_NULL_HANDLE;
10629 }
10630 
GetNextAllocation(VmaAllocHandle prevAlloc)10631 VmaAllocHandle VmaBlockMetadata_TLSF::GetNextAllocation(VmaAllocHandle prevAlloc) const
10632 {
10633     Block* startBlock = (Block*)prevAlloc;
10634     VMA_ASSERT(!startBlock->IsFree() && "Incorrect block!");
10635 
10636     for (Block* block = startBlock->prevPhysical; block; block = block->prevPhysical)
10637     {
10638         if (!block->IsFree())
10639             return (VmaAllocHandle)block;
10640     }
10641     return VK_NULL_HANDLE;
10642 }
10643 
GetNextFreeRegionSize(VmaAllocHandle alloc)10644 VkDeviceSize VmaBlockMetadata_TLSF::GetNextFreeRegionSize(VmaAllocHandle alloc) const
10645 {
10646     Block* block = (Block*)alloc;
10647     VMA_ASSERT(!block->IsFree() && "Incorrect block!");
10648 
10649     if (block->prevPhysical)
10650         return block->prevPhysical->IsFree() ? block->prevPhysical->size : 0;
10651     return 0;
10652 }
10653 
Clear()10654 void VmaBlockMetadata_TLSF::Clear()
10655 {
10656     m_AllocCount = 0;
10657     m_BlocksFreeCount = 0;
10658     m_BlocksFreeSize = 0;
10659     m_IsFreeBitmap = 0;
10660     m_NullBlock->offset = 0;
10661     m_NullBlock->size = GetSize();
10662     Block* block = m_NullBlock->prevPhysical;
10663     m_NullBlock->prevPhysical = VMA_NULL;
10664     while (block)
10665     {
10666         Block* prev = block->prevPhysical;
10667         m_BlockAllocator.Free(block);
10668         block = prev;
10669     }
10670     memset(m_FreeList, 0, m_ListsCount * sizeof(Block*));
10671     memset(m_InnerIsFreeBitmap, 0, m_MemoryClasses * sizeof(uint32_t));
10672     m_GranularityHandler.Clear();
10673 }
10674 
SetAllocationUserData(VmaAllocHandle allocHandle,void * userData)10675 void VmaBlockMetadata_TLSF::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData)
10676 {
10677     Block* block = (Block*)allocHandle;
10678     VMA_ASSERT(!block->IsFree() && "Trying to set user data for not allocated block!");
10679     block->UserData() = userData;
10680 }
10681 
DebugLogAllAllocations()10682 void VmaBlockMetadata_TLSF::DebugLogAllAllocations() const
10683 {
10684     for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical)
10685         if (!block->IsFree())
10686             DebugLogAllocation(block->offset, block->size, block->UserData());
10687 }
10688 
SizeToMemoryClass(VkDeviceSize size)10689 uint8_t VmaBlockMetadata_TLSF::SizeToMemoryClass(VkDeviceSize size) const
10690 {
10691     if (size > SMALL_BUFFER_SIZE)
10692         return VMA_BITSCAN_MSB(size) - MEMORY_CLASS_SHIFT;
10693     return 0;
10694 }
10695 
SizeToSecondIndex(VkDeviceSize size,uint8_t memoryClass)10696 uint16_t VmaBlockMetadata_TLSF::SizeToSecondIndex(VkDeviceSize size, uint8_t memoryClass) const
10697 {
10698     if (memoryClass == 0)
10699     {
10700         if (IsVirtual())
10701             return static_cast<uint16_t>((size - 1) / 8);
10702         else
10703             return static_cast<uint16_t>((size - 1) / 64);
10704     }
10705     return static_cast<uint16_t>((size >> (memoryClass + MEMORY_CLASS_SHIFT - SECOND_LEVEL_INDEX)) ^ (1U << SECOND_LEVEL_INDEX));
10706 }
10707 
GetListIndex(uint8_t memoryClass,uint16_t secondIndex)10708 uint32_t VmaBlockMetadata_TLSF::GetListIndex(uint8_t memoryClass, uint16_t secondIndex) const
10709 {
10710     if (memoryClass == 0)
10711         return secondIndex;
10712 
10713     const uint32_t index = static_cast<uint32_t>(memoryClass - 1) * (1 << SECOND_LEVEL_INDEX) + secondIndex;
10714     if (IsVirtual())
10715         return index + (1 << SECOND_LEVEL_INDEX);
10716     else
10717         return index + 4;
10718 }
10719 
GetListIndex(VkDeviceSize size)10720 uint32_t VmaBlockMetadata_TLSF::GetListIndex(VkDeviceSize size) const
10721 {
10722     uint8_t memoryClass = SizeToMemoryClass(size);
10723     return GetListIndex(memoryClass, SizeToSecondIndex(size, memoryClass));
10724 }
10725 
RemoveFreeBlock(Block * block)10726 void VmaBlockMetadata_TLSF::RemoveFreeBlock(Block* block)
10727 {
10728     VMA_ASSERT(block != m_NullBlock);
10729     VMA_ASSERT(block->IsFree());
10730 
10731     if (block->NextFree() != VMA_NULL)
10732         block->NextFree()->PrevFree() = block->PrevFree();
10733     if (block->PrevFree() != VMA_NULL)
10734         block->PrevFree()->NextFree() = block->NextFree();
10735     else
10736     {
10737         uint8_t memClass = SizeToMemoryClass(block->size);
10738         uint16_t secondIndex = SizeToSecondIndex(block->size, memClass);
10739         uint32_t index = GetListIndex(memClass, secondIndex);
10740         VMA_ASSERT(m_FreeList[index] == block);
10741         m_FreeList[index] = block->NextFree();
10742         if (block->NextFree() == VMA_NULL)
10743         {
10744             m_InnerIsFreeBitmap[memClass] &= ~(1U << secondIndex);
10745             if (m_InnerIsFreeBitmap[memClass] == 0)
10746                 m_IsFreeBitmap &= ~(1UL << memClass);
10747         }
10748     }
10749     block->MarkTaken();
10750     block->UserData() = VMA_NULL;
10751     --m_BlocksFreeCount;
10752     m_BlocksFreeSize -= block->size;
10753 }
10754 
InsertFreeBlock(Block * block)10755 void VmaBlockMetadata_TLSF::InsertFreeBlock(Block* block)
10756 {
10757     VMA_ASSERT(block != m_NullBlock);
10758     VMA_ASSERT(!block->IsFree() && "Cannot insert block twice!");
10759 
10760     uint8_t memClass = SizeToMemoryClass(block->size);
10761     uint16_t secondIndex = SizeToSecondIndex(block->size, memClass);
10762     uint32_t index = GetListIndex(memClass, secondIndex);
10763     VMA_ASSERT(index < m_ListsCount);
10764     block->PrevFree() = VMA_NULL;
10765     block->NextFree() = m_FreeList[index];
10766     m_FreeList[index] = block;
10767     if (block->NextFree() != VMA_NULL)
10768         block->NextFree()->PrevFree() = block;
10769     else
10770     {
10771         m_InnerIsFreeBitmap[memClass] |= 1U << secondIndex;
10772         m_IsFreeBitmap |= 1UL << memClass;
10773     }
10774     ++m_BlocksFreeCount;
10775     m_BlocksFreeSize += block->size;
10776 }
10777 
MergeBlock(Block * block,Block * prev)10778 void VmaBlockMetadata_TLSF::MergeBlock(Block* block, Block* prev)
10779 {
10780     VMA_ASSERT(block->prevPhysical == prev && "Cannot merge seperate physical regions!");
10781     VMA_ASSERT(!prev->IsFree() && "Cannot merge block that belongs to free list!");
10782 
10783     block->offset = prev->offset;
10784     block->size += prev->size;
10785     block->prevPhysical = prev->prevPhysical;
10786     if (block->prevPhysical)
10787         block->prevPhysical->nextPhysical = block;
10788     m_BlockAllocator.Free(prev);
10789 }
10790 
FindFreeBlock(VkDeviceSize size,uint32_t & listIndex)10791 VmaBlockMetadata_TLSF::Block* VmaBlockMetadata_TLSF::FindFreeBlock(VkDeviceSize size, uint32_t& listIndex) const
10792 {
10793     uint8_t memoryClass = SizeToMemoryClass(size);
10794     uint32_t innerFreeMap = m_InnerIsFreeBitmap[memoryClass] & (~0U << SizeToSecondIndex(size, memoryClass));
10795     if (!innerFreeMap)
10796     {
10797         // Check higher levels for avaiable blocks
10798         uint32_t freeMap = m_IsFreeBitmap & (~0UL << (memoryClass + 1));
10799         if (!freeMap)
10800             return VMA_NULL; // No more memory avaible
10801 
10802         // Find lowest free region
10803         memoryClass = VMA_BITSCAN_LSB(freeMap);
10804         innerFreeMap = m_InnerIsFreeBitmap[memoryClass];
10805         VMA_ASSERT(innerFreeMap != 0);
10806     }
10807     // Find lowest free subregion
10808     listIndex = GetListIndex(memoryClass, VMA_BITSCAN_LSB(innerFreeMap));
10809     VMA_ASSERT(m_FreeList[listIndex]);
10810     return m_FreeList[listIndex];
10811 }
10812 
CheckBlock(Block & block,uint32_t listIndex,VkDeviceSize allocSize,VkDeviceSize allocAlignment,VmaSuballocationType allocType,VmaAllocationRequest * pAllocationRequest)10813 bool VmaBlockMetadata_TLSF::CheckBlock(
10814     Block& block,
10815     uint32_t listIndex,
10816     VkDeviceSize allocSize,
10817     VkDeviceSize allocAlignment,
10818     VmaSuballocationType allocType,
10819     VmaAllocationRequest* pAllocationRequest)
10820 {
10821     VMA_ASSERT(block.IsFree() && "Block is already taken!");
10822 
10823     VkDeviceSize alignedOffset = VmaAlignUp(block.offset, allocAlignment);
10824     if (block.size < allocSize + alignedOffset - block.offset)
10825         return false;
10826 
10827     // Check for granularity conflicts
10828     if (!IsVirtual() &&
10829         m_GranularityHandler.CheckConflictAndAlignUp(alignedOffset, allocSize, block.offset, block.size, allocType))
10830         return false;
10831 
10832     // Alloc successful
10833     pAllocationRequest->type = VmaAllocationRequestType::TLSF;
10834     pAllocationRequest->allocHandle = (VmaAllocHandle)&block;
10835     pAllocationRequest->size = allocSize - GetDebugMargin();
10836     pAllocationRequest->customData = (void*)allocType;
10837     pAllocationRequest->algorithmData = alignedOffset;
10838 
10839     // Place block at the start of list if it's normal block
10840     if (listIndex != m_ListsCount && block.PrevFree())
10841     {
10842         block.PrevFree()->NextFree() = block.NextFree();
10843         if (block.NextFree())
10844             block.NextFree()->PrevFree() = block.PrevFree();
10845         block.PrevFree() = VMA_NULL;
10846         block.NextFree() = m_FreeList[listIndex];
10847         m_FreeList[listIndex] = &block;
10848         if (block.NextFree())
10849             block.NextFree()->PrevFree() = &block;
10850     }
10851 
10852     return true;
10853 }
10854 #endif // _VMA_BLOCK_METADATA_TLSF_FUNCTIONS
10855 #endif // _VMA_BLOCK_METADATA_TLSF
10856 
10857 #ifndef _VMA_BLOCK_VECTOR
10858 /*
10859 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
10860 Vulkan memory type.
10861 
10862 Synchronized internally with a mutex.
10863 */
10864 class VmaBlockVector
10865 {
10866     friend struct VmaDefragmentationContext_T;
10867     VMA_CLASS_NO_COPY(VmaBlockVector)
10868 public:
10869     VmaBlockVector(
10870         VmaAllocator hAllocator,
10871         VmaPool hParentPool,
10872         uint32_t memoryTypeIndex,
10873         VkDeviceSize preferredBlockSize,
10874         size_t minBlockCount,
10875         size_t maxBlockCount,
10876         VkDeviceSize bufferImageGranularity,
10877         bool explicitBlockSize,
10878         uint32_t algorithm,
10879         float priority,
10880         VkDeviceSize minAllocationAlignment,
10881         void* pMemoryAllocateNext);
10882     ~VmaBlockVector();
10883 
GetAllocator()10884     VmaAllocator GetAllocator() const { return m_hAllocator; }
GetParentPool()10885     VmaPool GetParentPool() const { return m_hParentPool; }
IsCustomPool()10886     bool IsCustomPool() const { return m_hParentPool != VMA_NULL; }
GetMemoryTypeIndex()10887     uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
GetPreferredBlockSize()10888     VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
GetBufferImageGranularity()10889     VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
GetAlgorithm()10890     uint32_t GetAlgorithm() const { return m_Algorithm; }
HasExplicitBlockSize()10891     bool HasExplicitBlockSize() const { return m_ExplicitBlockSize; }
GetPriority()10892     float GetPriority() const { return m_Priority; }
GetAllocationNextPtr()10893     const void* GetAllocationNextPtr() const { return m_pMemoryAllocateNext; }
10894     // To be used only while the m_Mutex is locked. Used during defragmentation.
GetBlockCount()10895     size_t GetBlockCount() const { return m_Blocks.size(); }
10896     // To be used only while the m_Mutex is locked. Used during defragmentation.
GetBlock(size_t index)10897     VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
GetMutex()10898     VMA_RW_MUTEX &GetMutex() { return m_Mutex; }
10899 
10900     VkResult CreateMinBlocks();
10901     void AddStatistics(VmaStatistics& inoutStats);
10902     void AddDetailedStatistics(VmaDetailedStatistics& inoutStats);
10903     bool IsEmpty();
10904     bool IsCorruptionDetectionEnabled() const;
10905 
10906     VkResult Allocate(
10907         VkDeviceSize size,
10908         VkDeviceSize alignment,
10909         const VmaAllocationCreateInfo& createInfo,
10910         VmaSuballocationType suballocType,
10911         size_t allocationCount,
10912         VmaAllocation* pAllocations);
10913 
10914     void Free(const VmaAllocation hAllocation);
10915 
10916 #if VMA_STATS_STRING_ENABLED
10917     void PrintDetailedMap(class VmaJsonWriter& json);
10918 #endif
10919 
10920     VkResult CheckCorruption();
10921 
10922 private:
10923     const VmaAllocator m_hAllocator;
10924     const VmaPool m_hParentPool;
10925     const uint32_t m_MemoryTypeIndex;
10926     const VkDeviceSize m_PreferredBlockSize;
10927     const size_t m_MinBlockCount;
10928     const size_t m_MaxBlockCount;
10929     const VkDeviceSize m_BufferImageGranularity;
10930     const bool m_ExplicitBlockSize;
10931     const uint32_t m_Algorithm;
10932     const float m_Priority;
10933     const VkDeviceSize m_MinAllocationAlignment;
10934 
10935     void* const m_pMemoryAllocateNext;
10936     VMA_RW_MUTEX m_Mutex;
10937     // Incrementally sorted by sumFreeSize, ascending.
10938     VmaVector<VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*>> m_Blocks;
10939     uint32_t m_NextBlockId;
10940     bool m_IncrementalSort = true;
10941 
SetIncrementalSort(bool val)10942     void SetIncrementalSort(bool val) { m_IncrementalSort = val; }
10943 
10944     VkDeviceSize CalcMaxBlockSize() const;
10945     // Finds and removes given block from vector.
10946     void Remove(VmaDeviceMemoryBlock* pBlock);
10947     // Performs single step in sorting m_Blocks. They may not be fully sorted
10948     // after this call.
10949     void IncrementallySortBlocks();
10950     void SortByFreeSize();
10951 
10952     VkResult AllocatePage(
10953         VkDeviceSize size,
10954         VkDeviceSize alignment,
10955         const VmaAllocationCreateInfo& createInfo,
10956         VmaSuballocationType suballocType,
10957         VmaAllocation* pAllocation);
10958 
10959     VkResult AllocateFromBlock(
10960         VmaDeviceMemoryBlock* pBlock,
10961         VkDeviceSize size,
10962         VkDeviceSize alignment,
10963         VmaAllocationCreateFlags allocFlags,
10964         void* pUserData,
10965         VmaSuballocationType suballocType,
10966         uint32_t strategy,
10967         VmaAllocation* pAllocation);
10968 
10969     VkResult CommitAllocationRequest(
10970         VmaAllocationRequest& allocRequest,
10971         VmaDeviceMemoryBlock* pBlock,
10972         VkDeviceSize alignment,
10973         VmaAllocationCreateFlags allocFlags,
10974         void* pUserData,
10975         VmaSuballocationType suballocType,
10976         VmaAllocation* pAllocation);
10977 
10978     VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
10979     bool HasEmptyBlock();
10980 };
10981 #endif // _VMA_BLOCK_VECTOR
10982 
10983 #ifndef _VMA_DEFRAGMENTATION_CONTEXT
10984 struct VmaDefragmentationContext_T
10985 {
10986     VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
10987 public:
10988     VmaDefragmentationContext_T(
10989         VmaAllocator hAllocator,
10990         const VmaDefragmentationInfo& info);
10991     ~VmaDefragmentationContext_T();
10992 
GetStatsVmaDefragmentationContext_T10993     void GetStats(VmaDefragmentationStats& outStats) { outStats = m_GlobalStats; }
10994 
10995     VkResult DefragmentPassBegin(VmaDefragmentationPassMoveInfo& moveInfo);
10996     VkResult DefragmentPassEnd(VmaDefragmentationPassMoveInfo& moveInfo);
10997 
10998 private:
10999     // Max number of allocations to ignore due to size constraints before ending single pass
11000     static const uint8_t MAX_ALLOCS_TO_IGNORE = 16;
11001     enum class CounterStatus { Pass, Ignore, End };
11002 
11003     struct FragmentedBlock
11004     {
11005         uint32_t data;
11006         VmaDeviceMemoryBlock* block;
11007     };
11008     struct StateBalanced
11009     {
11010         VkDeviceSize avgFreeSize = 0;
11011         VkDeviceSize avgAllocSize = UINT64_MAX;
11012     };
11013     struct StateExtensive
11014     {
11015         enum class Operation : uint8_t
11016         {
11017             FindFreeBlockBuffer, FindFreeBlockTexture, FindFreeBlockAll,
11018             MoveBuffers, MoveTextures, MoveAll,
11019             Cleanup, Done
11020         };
11021 
11022         Operation operation = Operation::FindFreeBlockTexture;
11023         size_t firstFreeBlock = SIZE_MAX;
11024     };
11025     struct MoveAllocationData
11026     {
11027         VkDeviceSize size;
11028         VkDeviceSize alignment;
11029         VmaSuballocationType type;
11030         VmaAllocationCreateFlags flags;
11031         VmaDefragmentationMove move = {};
11032     };
11033 
11034     const VkDeviceSize m_MaxPassBytes;
11035     const uint32_t m_MaxPassAllocations;
11036 
11037     VmaStlAllocator<VmaDefragmentationMove> m_MoveAllocator;
11038     VmaVector<VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove>> m_Moves;
11039 
11040     uint8_t m_IgnoredAllocs = 0;
11041     uint32_t m_Algorithm;
11042     uint32_t m_BlockVectorCount;
11043     VmaBlockVector* m_PoolBlockVector;
11044     VmaBlockVector** m_pBlockVectors;
11045     size_t m_ImmovableBlockCount = 0;
11046     VmaDefragmentationStats m_GlobalStats = { 0 };
11047     VmaDefragmentationStats m_PassStats = { 0 };
11048     void* m_AlgorithmState = VMA_NULL;
11049 
11050     static MoveAllocationData GetMoveData(VmaAllocHandle handle, VmaBlockMetadata* metadata);
11051     CounterStatus CheckCounters(VkDeviceSize bytes);
11052     bool IncrementCounters(VkDeviceSize bytes);
11053     bool ReallocWithinBlock(VmaBlockVector& vector, VmaDeviceMemoryBlock* block);
11054     bool AllocInOtherBlock(size_t start, size_t end, MoveAllocationData& data, VmaBlockVector& vector);
11055 
11056     bool ComputeDefragmentation(VmaBlockVector& vector, size_t index);
11057     bool ComputeDefragmentation_Fast(VmaBlockVector& vector);
11058     bool ComputeDefragmentation_Balanced(VmaBlockVector& vector, size_t index, bool update);
11059     bool ComputeDefragmentation_Full(VmaBlockVector& vector);
11060     bool ComputeDefragmentation_Extensive(VmaBlockVector& vector, size_t index);
11061 
11062     void UpdateVectorStatistics(VmaBlockVector& vector, StateBalanced& state);
11063     bool MoveDataToFreeBlocks(VmaSuballocationType currentType,
11064         VmaBlockVector& vector, size_t firstFreeBlock,
11065         bool& texturePresent, bool& bufferPresent, bool& otherPresent);
11066 };
11067 #endif // _VMA_DEFRAGMENTATION_CONTEXT
11068 
11069 #ifndef _VMA_POOL_T
11070 struct VmaPool_T
11071 {
11072     friend struct VmaPoolListItemTraits;
11073     VMA_CLASS_NO_COPY(VmaPool_T)
11074 public:
11075     VmaBlockVector m_BlockVector;
11076     VmaDedicatedAllocationList m_DedicatedAllocations;
11077 
11078     VmaPool_T(
11079         VmaAllocator hAllocator,
11080         const VmaPoolCreateInfo& createInfo,
11081         VkDeviceSize preferredBlockSize);
11082     ~VmaPool_T();
11083 
GetIdVmaPool_T11084     uint32_t GetId() const { return m_Id; }
SetIdVmaPool_T11085     void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
11086 
GetNameVmaPool_T11087     const char* GetName() const { return m_Name; }
11088     void SetName(const char* pName);
11089 
11090 #if VMA_STATS_STRING_ENABLED
11091     //void PrintDetailedMap(class VmaStringBuilder& sb);
11092 #endif
11093 
11094 private:
11095     uint32_t m_Id;
11096     char* m_Name;
11097     VmaPool_T* m_PrevPool = VMA_NULL;
11098     VmaPool_T* m_NextPool = VMA_NULL;
11099 };
11100 
11101 struct VmaPoolListItemTraits
11102 {
11103     typedef VmaPool_T ItemType;
11104 
GetPrevVmaPoolListItemTraits11105     static ItemType* GetPrev(const ItemType* item) { return item->m_PrevPool; }
GetNextVmaPoolListItemTraits11106     static ItemType* GetNext(const ItemType* item) { return item->m_NextPool; }
AccessPrevVmaPoolListItemTraits11107     static ItemType*& AccessPrev(ItemType* item) { return item->m_PrevPool; }
AccessNextVmaPoolListItemTraits11108     static ItemType*& AccessNext(ItemType* item) { return item->m_NextPool; }
11109 };
11110 #endif // _VMA_POOL_T
11111 
11112 #ifndef _VMA_CURRENT_BUDGET_DATA
11113 struct VmaCurrentBudgetData
11114 {
11115     VMA_ATOMIC_UINT32 m_BlockCount[VK_MAX_MEMORY_HEAPS];
11116     VMA_ATOMIC_UINT32 m_AllocationCount[VK_MAX_MEMORY_HEAPS];
11117     VMA_ATOMIC_UINT64 m_BlockBytes[VK_MAX_MEMORY_HEAPS];
11118     VMA_ATOMIC_UINT64 m_AllocationBytes[VK_MAX_MEMORY_HEAPS];
11119 
11120 #if VMA_MEMORY_BUDGET
11121     VMA_ATOMIC_UINT32 m_OperationsSinceBudgetFetch;
11122     VMA_RW_MUTEX m_BudgetMutex;
11123     uint64_t m_VulkanUsage[VK_MAX_MEMORY_HEAPS];
11124     uint64_t m_VulkanBudget[VK_MAX_MEMORY_HEAPS];
11125     uint64_t m_BlockBytesAtBudgetFetch[VK_MAX_MEMORY_HEAPS];
11126 #endif // VMA_MEMORY_BUDGET
11127 
11128     VmaCurrentBudgetData();
11129 
11130     void AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize);
11131     void RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize);
11132 };
11133 
11134 #ifndef _VMA_CURRENT_BUDGET_DATA_FUNCTIONS
VmaCurrentBudgetData()11135 VmaCurrentBudgetData::VmaCurrentBudgetData()
11136 {
11137     for (uint32_t heapIndex = 0; heapIndex < VK_MAX_MEMORY_HEAPS; ++heapIndex)
11138     {
11139         m_BlockCount[heapIndex] = 0;
11140         m_AllocationCount[heapIndex] = 0;
11141         m_BlockBytes[heapIndex] = 0;
11142         m_AllocationBytes[heapIndex] = 0;
11143 #if VMA_MEMORY_BUDGET
11144         m_VulkanUsage[heapIndex] = 0;
11145         m_VulkanBudget[heapIndex] = 0;
11146         m_BlockBytesAtBudgetFetch[heapIndex] = 0;
11147 #endif
11148     }
11149 
11150 #if VMA_MEMORY_BUDGET
11151     m_OperationsSinceBudgetFetch = 0;
11152 #endif
11153 }
11154 
AddAllocation(uint32_t heapIndex,VkDeviceSize allocationSize)11155 void VmaCurrentBudgetData::AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
11156 {
11157     m_AllocationBytes[heapIndex] += allocationSize;
11158     ++m_AllocationCount[heapIndex];
11159 #if VMA_MEMORY_BUDGET
11160     ++m_OperationsSinceBudgetFetch;
11161 #endif
11162 }
11163 
RemoveAllocation(uint32_t heapIndex,VkDeviceSize allocationSize)11164 void VmaCurrentBudgetData::RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
11165 {
11166     VMA_ASSERT(m_AllocationBytes[heapIndex] >= allocationSize);
11167     m_AllocationBytes[heapIndex] -= allocationSize;
11168     VMA_ASSERT(m_AllocationCount[heapIndex] > 0);
11169     --m_AllocationCount[heapIndex];
11170 #if VMA_MEMORY_BUDGET
11171     ++m_OperationsSinceBudgetFetch;
11172 #endif
11173 }
11174 #endif // _VMA_CURRENT_BUDGET_DATA_FUNCTIONS
11175 #endif // _VMA_CURRENT_BUDGET_DATA
11176 
11177 #ifndef _VMA_ALLOCATION_OBJECT_ALLOCATOR
11178 /*
11179 Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.
11180 */
11181 class VmaAllocationObjectAllocator
11182 {
VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator)11183     VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator)
11184 public:
11185     VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks)
11186         : m_Allocator(pAllocationCallbacks, 1024) {}
11187 
11188     template<typename... Types> VmaAllocation Allocate(Types&&... args);
11189     void Free(VmaAllocation hAlloc);
11190 
11191 private:
11192     VMA_MUTEX m_Mutex;
11193     VmaPoolAllocator<VmaAllocation_T> m_Allocator;
11194 };
11195 
11196 template<typename... Types>
Allocate(Types &&...args)11197 VmaAllocation VmaAllocationObjectAllocator::Allocate(Types&&... args)
11198 {
11199     VmaMutexLock mutexLock(m_Mutex);
11200     return m_Allocator.Alloc<Types...>(std::forward<Types>(args)...);
11201 }
11202 
Free(VmaAllocation hAlloc)11203 void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
11204 {
11205     VmaMutexLock mutexLock(m_Mutex);
11206     m_Allocator.Free(hAlloc);
11207 }
11208 #endif // _VMA_ALLOCATION_OBJECT_ALLOCATOR
11209 
11210 #ifndef _VMA_VIRTUAL_BLOCK_T
11211 struct VmaVirtualBlock_T
11212 {
11213     VMA_CLASS_NO_COPY(VmaVirtualBlock_T)
11214 public:
11215     const bool m_AllocationCallbacksSpecified;
11216     const VkAllocationCallbacks m_AllocationCallbacks;
11217 
11218     VmaVirtualBlock_T(const VmaVirtualBlockCreateInfo& createInfo);
11219     ~VmaVirtualBlock_T();
11220 
InitVmaVirtualBlock_T11221     VkResult Init() { return VK_SUCCESS; }
IsEmptyVmaVirtualBlock_T11222     bool IsEmpty() const { return m_Metadata->IsEmpty(); }
FreeVmaVirtualBlock_T11223     void Free(VmaVirtualAllocation allocation) { m_Metadata->Free((VmaAllocHandle)allocation); }
SetAllocationUserDataVmaVirtualBlock_T11224     void SetAllocationUserData(VmaVirtualAllocation allocation, void* userData) { m_Metadata->SetAllocationUserData((VmaAllocHandle)allocation, userData); }
ClearVmaVirtualBlock_T11225     void Clear() { m_Metadata->Clear(); }
11226 
11227     const VkAllocationCallbacks* GetAllocationCallbacks() const;
11228     void GetAllocationInfo(VmaVirtualAllocation allocation, VmaVirtualAllocationInfo& outInfo);
11229     VkResult Allocate(const VmaVirtualAllocationCreateInfo& createInfo, VmaVirtualAllocation& outAllocation,
11230         VkDeviceSize* outOffset);
11231     void GetStatistics(VmaStatistics& outStats) const;
11232     void CalculateDetailedStatistics(VmaDetailedStatistics& outStats) const;
11233 #if VMA_STATS_STRING_ENABLED
11234     void BuildStatsString(bool detailedMap, VmaStringBuilder& sb) const;
11235 #endif
11236 
11237 private:
11238     VmaBlockMetadata* m_Metadata;
11239 };
11240 
11241 #ifndef _VMA_VIRTUAL_BLOCK_T_FUNCTIONS
VmaVirtualBlock_T(const VmaVirtualBlockCreateInfo & createInfo)11242 VmaVirtualBlock_T::VmaVirtualBlock_T(const VmaVirtualBlockCreateInfo& createInfo)
11243     : m_AllocationCallbacksSpecified(createInfo.pAllocationCallbacks != VMA_NULL),
11244     m_AllocationCallbacks(createInfo.pAllocationCallbacks != VMA_NULL ? *createInfo.pAllocationCallbacks : VmaEmptyAllocationCallbacks)
11245 {
11246     const uint32_t algorithm = createInfo.flags & VMA_VIRTUAL_BLOCK_CREATE_ALGORITHM_MASK;
11247     switch (algorithm)
11248     {
11249     default:
11250         VMA_ASSERT(0);
11251     case 0:
11252         m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_TLSF)(VK_NULL_HANDLE, 1, true);
11253         break;
11254     case VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT:
11255         m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_Linear)(VK_NULL_HANDLE, 1, true);
11256         break;
11257     }
11258 
11259     m_Metadata->Init(createInfo.size);
11260 }
11261 
~VmaVirtualBlock_T()11262 VmaVirtualBlock_T::~VmaVirtualBlock_T()
11263 {
11264     // Define macro VMA_DEBUG_LOG to receive the list of the unfreed allocations
11265     if (!m_Metadata->IsEmpty())
11266         m_Metadata->DebugLogAllAllocations();
11267     // This is the most important assert in the entire library.
11268     // Hitting it means you have some memory leak - unreleased virtual allocations.
11269     VMA_ASSERT(m_Metadata->IsEmpty() && "Some virtual allocations were not freed before destruction of this virtual block!");
11270 
11271     vma_delete(GetAllocationCallbacks(), m_Metadata);
11272 }
11273 
GetAllocationCallbacks()11274 const VkAllocationCallbacks* VmaVirtualBlock_T::GetAllocationCallbacks() const
11275 {
11276     return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : VMA_NULL;
11277 }
11278 
GetAllocationInfo(VmaVirtualAllocation allocation,VmaVirtualAllocationInfo & outInfo)11279 void VmaVirtualBlock_T::GetAllocationInfo(VmaVirtualAllocation allocation, VmaVirtualAllocationInfo& outInfo)
11280 {
11281     m_Metadata->GetAllocationInfo((VmaAllocHandle)allocation, outInfo);
11282 }
11283 
Allocate(const VmaVirtualAllocationCreateInfo & createInfo,VmaVirtualAllocation & outAllocation,VkDeviceSize * outOffset)11284 VkResult VmaVirtualBlock_T::Allocate(const VmaVirtualAllocationCreateInfo& createInfo, VmaVirtualAllocation& outAllocation,
11285     VkDeviceSize* outOffset)
11286 {
11287     VmaAllocationRequest request = {};
11288     if (m_Metadata->CreateAllocationRequest(
11289         createInfo.size, // allocSize
11290         VMA_MAX(createInfo.alignment, (VkDeviceSize)1), // allocAlignment
11291         (createInfo.flags & VMA_VIRTUAL_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0, // upperAddress
11292         VMA_SUBALLOCATION_TYPE_UNKNOWN, // allocType - unimportant
11293         createInfo.flags & VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MASK, // strategy
11294         &request))
11295     {
11296         m_Metadata->Alloc(request,
11297             VMA_SUBALLOCATION_TYPE_UNKNOWN, // type - unimportant
11298             createInfo.pUserData);
11299         outAllocation = (VmaVirtualAllocation)request.allocHandle;
11300         if(outOffset)
11301             *outOffset = m_Metadata->GetAllocationOffset(request.allocHandle);
11302         return VK_SUCCESS;
11303     }
11304     outAllocation = (VmaVirtualAllocation)VK_NULL_HANDLE;
11305     if (outOffset)
11306         *outOffset = UINT64_MAX;
11307     return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11308 }
11309 
GetStatistics(VmaStatistics & outStats)11310 void VmaVirtualBlock_T::GetStatistics(VmaStatistics& outStats) const
11311 {
11312     VmaClearStatistics(outStats);
11313     m_Metadata->AddStatistics(outStats);
11314 }
11315 
CalculateDetailedStatistics(VmaDetailedStatistics & outStats)11316 void VmaVirtualBlock_T::CalculateDetailedStatistics(VmaDetailedStatistics& outStats) const
11317 {
11318     VmaClearDetailedStatistics(outStats);
11319     m_Metadata->AddDetailedStatistics(outStats);
11320 }
11321 
11322 #if VMA_STATS_STRING_ENABLED
BuildStatsString(bool detailedMap,VmaStringBuilder & sb)11323 void VmaVirtualBlock_T::BuildStatsString(bool detailedMap, VmaStringBuilder& sb) const
11324 {
11325     VmaJsonWriter json(GetAllocationCallbacks(), sb);
11326     json.BeginObject();
11327 
11328     VmaDetailedStatistics stats;
11329     CalculateDetailedStatistics(stats);
11330 
11331     json.WriteString("Stats");
11332     VmaPrintDetailedStatistics(json, stats);
11333 
11334     if (detailedMap)
11335     {
11336         json.WriteString("Details");
11337         json.BeginObject();
11338         m_Metadata->PrintDetailedMap(json);
11339         json.EndObject();
11340     }
11341 
11342     json.EndObject();
11343 }
11344 #endif // VMA_STATS_STRING_ENABLED
11345 #endif // _VMA_VIRTUAL_BLOCK_T_FUNCTIONS
11346 #endif // _VMA_VIRTUAL_BLOCK_T
11347 
11348 
11349 // Main allocator object.
11350 struct VmaAllocator_T
11351 {
11352     VMA_CLASS_NO_COPY(VmaAllocator_T)
11353 public:
11354     bool m_UseMutex;
11355     uint32_t m_VulkanApiVersion;
11356     bool m_UseKhrDedicatedAllocation; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
11357     bool m_UseKhrBindMemory2; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
11358     bool m_UseExtMemoryBudget;
11359     bool m_UseAmdDeviceCoherentMemory;
11360     bool m_UseKhrBufferDeviceAddress;
11361     bool m_UseExtMemoryPriority;
11362     VkDevice m_hDevice;
11363     VkInstance m_hInstance;
11364     bool m_AllocationCallbacksSpecified;
11365     VkAllocationCallbacks m_AllocationCallbacks;
11366     VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
11367     VmaAllocationObjectAllocator m_AllocationObjectAllocator;
11368 
11369     // Each bit (1 << i) is set if HeapSizeLimit is enabled for that heap, so cannot allocate more than the heap size.
11370     uint32_t m_HeapSizeLimitMask;
11371 
11372     VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
11373     VkPhysicalDeviceMemoryProperties m_MemProps;
11374 
11375     // Default pools.
11376     VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
11377     VmaDedicatedAllocationList m_DedicatedAllocations[VK_MAX_MEMORY_TYPES];
11378 
11379     VmaCurrentBudgetData m_Budget;
11380     VMA_ATOMIC_UINT32 m_DeviceMemoryCount; // Total number of VkDeviceMemory objects.
11381 
11382     VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
11383     VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
11384     ~VmaAllocator_T();
11385 
GetAllocationCallbacksVmaAllocator_T11386     const VkAllocationCallbacks* GetAllocationCallbacks() const
11387     {
11388         return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : VMA_NULL;
11389     }
GetVulkanFunctionsVmaAllocator_T11390     const VmaVulkanFunctions& GetVulkanFunctions() const
11391     {
11392         return m_VulkanFunctions;
11393     }
11394 
GetPhysicalDeviceVmaAllocator_T11395     VkPhysicalDevice GetPhysicalDevice() const { return m_PhysicalDevice; }
11396 
GetBufferImageGranularityVmaAllocator_T11397     VkDeviceSize GetBufferImageGranularity() const
11398     {
11399         return VMA_MAX(
11400             static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
11401             m_PhysicalDeviceProperties.limits.bufferImageGranularity);
11402     }
11403 
GetMemoryHeapCountVmaAllocator_T11404     uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
GetMemoryTypeCountVmaAllocator_T11405     uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
11406 
MemoryTypeIndexToHeapIndexVmaAllocator_T11407     uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
11408     {
11409         VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
11410         return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
11411     }
11412     // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
IsMemoryTypeNonCoherentVmaAllocator_T11413     bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
11414     {
11415         return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
11416             VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
11417     }
11418     // Minimum alignment for all allocations in specific memory type.
GetMemoryTypeMinAlignmentVmaAllocator_T11419     VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
11420     {
11421         return IsMemoryTypeNonCoherent(memTypeIndex) ?
11422             VMA_MAX((VkDeviceSize)VMA_MIN_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
11423             (VkDeviceSize)VMA_MIN_ALIGNMENT;
11424     }
11425 
IsIntegratedGpuVmaAllocator_T11426     bool IsIntegratedGpu() const
11427     {
11428         return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
11429     }
11430 
GetGlobalMemoryTypeBitsVmaAllocator_T11431     uint32_t GetGlobalMemoryTypeBits() const { return m_GlobalMemoryTypeBits; }
11432 
11433     void GetBufferMemoryRequirements(
11434         VkBuffer hBuffer,
11435         VkMemoryRequirements& memReq,
11436         bool& requiresDedicatedAllocation,
11437         bool& prefersDedicatedAllocation) const;
11438     void GetImageMemoryRequirements(
11439         VkImage hImage,
11440         VkMemoryRequirements& memReq,
11441         bool& requiresDedicatedAllocation,
11442         bool& prefersDedicatedAllocation) const;
11443     VkResult FindMemoryTypeIndex(
11444         uint32_t memoryTypeBits,
11445         const VmaAllocationCreateInfo* pAllocationCreateInfo,
11446         VkFlags bufImgUsage, // VkBufferCreateInfo::usage or VkImageCreateInfo::usage. UINT32_MAX if unknown.
11447         uint32_t* pMemoryTypeIndex) const;
11448 
11449     // Main allocation function.
11450     VkResult AllocateMemory(
11451         const VkMemoryRequirements& vkMemReq,
11452         bool requiresDedicatedAllocation,
11453         bool prefersDedicatedAllocation,
11454         VkBuffer dedicatedBuffer,
11455         VkImage dedicatedImage,
11456         VkFlags dedicatedBufferImageUsage, // UINT32_MAX if unknown.
11457         const VmaAllocationCreateInfo& createInfo,
11458         VmaSuballocationType suballocType,
11459         size_t allocationCount,
11460         VmaAllocation* pAllocations);
11461 
11462     // Main deallocation function.
11463     void FreeMemory(
11464         size_t allocationCount,
11465         const VmaAllocation* pAllocations);
11466 
11467     void CalculateStatistics(VmaTotalStatistics* pStats);
11468 
11469     void GetHeapBudgets(
11470         VmaBudget* outBudgets, uint32_t firstHeap, uint32_t heapCount);
11471 
11472 #if VMA_STATS_STRING_ENABLED
11473     void PrintDetailedMap(class VmaJsonWriter& json);
11474 #endif
11475 
11476     void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
11477 
11478     VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
11479     void DestroyPool(VmaPool pool);
11480     void GetPoolStatistics(VmaPool pool, VmaStatistics* pPoolStats);
11481     void CalculatePoolStatistics(VmaPool pool, VmaDetailedStatistics* pPoolStats);
11482 
11483     void SetCurrentFrameIndex(uint32_t frameIndex);
GetCurrentFrameIndexVmaAllocator_T11484     uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
11485 
11486     VkResult CheckPoolCorruption(VmaPool hPool);
11487     VkResult CheckCorruption(uint32_t memoryTypeBits);
11488 
11489     // Call to Vulkan function vkAllocateMemory with accompanying bookkeeping.
11490     VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
11491     // Call to Vulkan function vkFreeMemory with accompanying bookkeeping.
11492     void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
11493     // Call to Vulkan function vkBindBufferMemory or vkBindBufferMemory2KHR.
11494     VkResult BindVulkanBuffer(
11495         VkDeviceMemory memory,
11496         VkDeviceSize memoryOffset,
11497         VkBuffer buffer,
11498         const void* pNext);
11499     // Call to Vulkan function vkBindImageMemory or vkBindImageMemory2KHR.
11500     VkResult BindVulkanImage(
11501         VkDeviceMemory memory,
11502         VkDeviceSize memoryOffset,
11503         VkImage image,
11504         const void* pNext);
11505 
11506     VkResult Map(VmaAllocation hAllocation, void** ppData);
11507     void Unmap(VmaAllocation hAllocation);
11508 
11509     VkResult BindBufferMemory(
11510         VmaAllocation hAllocation,
11511         VkDeviceSize allocationLocalOffset,
11512         VkBuffer hBuffer,
11513         const void* pNext);
11514     VkResult BindImageMemory(
11515         VmaAllocation hAllocation,
11516         VkDeviceSize allocationLocalOffset,
11517         VkImage hImage,
11518         const void* pNext);
11519 
11520     VkResult FlushOrInvalidateAllocation(
11521         VmaAllocation hAllocation,
11522         VkDeviceSize offset, VkDeviceSize size,
11523         VMA_CACHE_OPERATION op);
11524     VkResult FlushOrInvalidateAllocations(
11525         uint32_t allocationCount,
11526         const VmaAllocation* allocations,
11527         const VkDeviceSize* offsets, const VkDeviceSize* sizes,
11528         VMA_CACHE_OPERATION op);
11529 
11530     void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
11531 
11532     /*
11533     Returns bit mask of memory types that can support defragmentation on GPU as
11534     they support creation of required buffer for copy operations.
11535     */
11536     uint32_t GetGpuDefragmentationMemoryTypeBits();
11537 
11538 #if VMA_EXTERNAL_MEMORY
GetExternalMemoryHandleTypeFlagsVmaAllocator_T11539     VkExternalMemoryHandleTypeFlagsKHR GetExternalMemoryHandleTypeFlags(uint32_t memTypeIndex) const
11540     {
11541         return m_TypeExternalMemoryHandleTypes[memTypeIndex];
11542     }
11543 #endif // #if VMA_EXTERNAL_MEMORY
11544 
11545 private:
11546     VkDeviceSize m_PreferredLargeHeapBlockSize;
11547 
11548     VkPhysicalDevice m_PhysicalDevice;
11549     VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
11550     VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized.
11551 #if VMA_EXTERNAL_MEMORY
11552     VkExternalMemoryHandleTypeFlagsKHR m_TypeExternalMemoryHandleTypes[VK_MAX_MEMORY_TYPES];
11553 #endif // #if VMA_EXTERNAL_MEMORY
11554 
11555     VMA_RW_MUTEX m_PoolsMutex;
11556     typedef VmaIntrusiveLinkedList<VmaPoolListItemTraits> PoolList;
11557     // Protected by m_PoolsMutex.
11558     PoolList m_Pools;
11559     uint32_t m_NextPoolId;
11560 
11561     VmaVulkanFunctions m_VulkanFunctions;
11562 
11563     // Global bit mask AND-ed with any memoryTypeBits to disallow certain memory types.
11564     uint32_t m_GlobalMemoryTypeBits;
11565 
11566     void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
11567 
11568 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
11569     void ImportVulkanFunctions_Static();
11570 #endif
11571 
11572     void ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions);
11573 
11574 #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
11575     void ImportVulkanFunctions_Dynamic();
11576 #endif
11577 
11578     void ValidateVulkanFunctions();
11579 
11580     VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
11581 
11582     VkResult AllocateMemoryOfType(
11583         VmaPool pool,
11584         VkDeviceSize size,
11585         VkDeviceSize alignment,
11586         bool dedicatedPreferred,
11587         VkBuffer dedicatedBuffer,
11588         VkImage dedicatedImage,
11589         VkFlags dedicatedBufferImageUsage,
11590         const VmaAllocationCreateInfo& createInfo,
11591         uint32_t memTypeIndex,
11592         VmaSuballocationType suballocType,
11593         VmaDedicatedAllocationList& dedicatedAllocations,
11594         VmaBlockVector& blockVector,
11595         size_t allocationCount,
11596         VmaAllocation* pAllocations);
11597 
11598     // Helper function only to be used inside AllocateDedicatedMemory.
11599     VkResult AllocateDedicatedMemoryPage(
11600         VmaPool pool,
11601         VkDeviceSize size,
11602         VmaSuballocationType suballocType,
11603         uint32_t memTypeIndex,
11604         const VkMemoryAllocateInfo& allocInfo,
11605         bool map,
11606         bool isUserDataString,
11607         bool isMappingAllowed,
11608         void* pUserData,
11609         VmaAllocation* pAllocation);
11610 
11611     // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
11612     VkResult AllocateDedicatedMemory(
11613         VmaPool pool,
11614         VkDeviceSize size,
11615         VmaSuballocationType suballocType,
11616         VmaDedicatedAllocationList& dedicatedAllocations,
11617         uint32_t memTypeIndex,
11618         bool map,
11619         bool isUserDataString,
11620         bool isMappingAllowed,
11621         bool canAliasMemory,
11622         void* pUserData,
11623         float priority,
11624         VkBuffer dedicatedBuffer,
11625         VkImage dedicatedImage,
11626         VkFlags dedicatedBufferImageUsage,
11627         size_t allocationCount,
11628         VmaAllocation* pAllocations,
11629         const void* pNextChain = nullptr);
11630 
11631     void FreeDedicatedMemory(const VmaAllocation allocation);
11632 
11633     VkResult CalcMemTypeParams(
11634         VmaAllocationCreateInfo& outCreateInfo,
11635         uint32_t memTypeIndex,
11636         VkDeviceSize size,
11637         size_t allocationCount);
11638     VkResult CalcAllocationParams(
11639         VmaAllocationCreateInfo& outCreateInfo,
11640         bool dedicatedRequired,
11641         bool dedicatedPreferred);
11642 
11643     /*
11644     Calculates and returns bit mask of memory types that can support defragmentation
11645     on GPU as they support creation of required buffer for copy operations.
11646     */
11647     uint32_t CalculateGpuDefragmentationMemoryTypeBits() const;
11648     uint32_t CalculateGlobalMemoryTypeBits() const;
11649 
11650     bool GetFlushOrInvalidateRange(
11651         VmaAllocation allocation,
11652         VkDeviceSize offset, VkDeviceSize size,
11653         VkMappedMemoryRange& outRange) const;
11654 
11655 #if VMA_MEMORY_BUDGET
11656     void UpdateVulkanBudget();
11657 #endif // #if VMA_MEMORY_BUDGET
11658 };
11659 
11660 
11661 #ifndef _VMA_MEMORY_FUNCTIONS
VmaMalloc(VmaAllocator hAllocator,size_t size,size_t alignment)11662 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
11663 {
11664     return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
11665 }
11666 
VmaFree(VmaAllocator hAllocator,void * ptr)11667 static void VmaFree(VmaAllocator hAllocator, void* ptr)
11668 {
11669     VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
11670 }
11671 
11672 template<typename T>
VmaAllocate(VmaAllocator hAllocator)11673 static T* VmaAllocate(VmaAllocator hAllocator)
11674 {
11675     return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
11676 }
11677 
11678 template<typename T>
VmaAllocateArray(VmaAllocator hAllocator,size_t count)11679 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
11680 {
11681     return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
11682 }
11683 
11684 template<typename T>
vma_delete(VmaAllocator hAllocator,T * ptr)11685 static void vma_delete(VmaAllocator hAllocator, T* ptr)
11686 {
11687     if(ptr != VMA_NULL)
11688     {
11689         ptr->~T();
11690         VmaFree(hAllocator, ptr);
11691     }
11692 }
11693 
11694 template<typename T>
vma_delete_array(VmaAllocator hAllocator,T * ptr,size_t count)11695 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
11696 {
11697     if(ptr != VMA_NULL)
11698     {
11699         for(size_t i = count; i--; )
11700             ptr[i].~T();
11701         VmaFree(hAllocator, ptr);
11702     }
11703 }
11704 #endif // _VMA_MEMORY_FUNCTIONS
11705 
11706 #ifndef _VMA_DEVICE_MEMORY_BLOCK_FUNCTIONS
VmaDeviceMemoryBlock(VmaAllocator hAllocator)11707 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator)
11708     : m_pMetadata(VMA_NULL),
11709     m_MemoryTypeIndex(UINT32_MAX),
11710     m_Id(0),
11711     m_hMemory(VK_NULL_HANDLE),
11712     m_MapCount(0),
11713     m_pMappedData(VMA_NULL) {}
11714 
~VmaDeviceMemoryBlock()11715 VmaDeviceMemoryBlock::~VmaDeviceMemoryBlock()
11716 {
11717     VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
11718     VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
11719 }
11720 
Init(VmaAllocator hAllocator,VmaPool hParentPool,uint32_t newMemoryTypeIndex,VkDeviceMemory newMemory,VkDeviceSize newSize,uint32_t id,uint32_t algorithm,VkDeviceSize bufferImageGranularity)11721 void VmaDeviceMemoryBlock::Init(
11722     VmaAllocator hAllocator,
11723     VmaPool hParentPool,
11724     uint32_t newMemoryTypeIndex,
11725     VkDeviceMemory newMemory,
11726     VkDeviceSize newSize,
11727     uint32_t id,
11728     uint32_t algorithm,
11729     VkDeviceSize bufferImageGranularity)
11730 {
11731     VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
11732 
11733     m_hParentPool = hParentPool;
11734     m_MemoryTypeIndex = newMemoryTypeIndex;
11735     m_Id = id;
11736     m_hMemory = newMemory;
11737 
11738     switch (algorithm)
11739     {
11740     case VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT:
11741         m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator->GetAllocationCallbacks(),
11742             bufferImageGranularity, false); // isVirtual
11743         break;
11744     default:
11745         VMA_ASSERT(0);
11746         // Fall-through.
11747     case 0:
11748         m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_TLSF)(hAllocator->GetAllocationCallbacks(),
11749             bufferImageGranularity, false); // isVirtual
11750     }
11751     m_pMetadata->Init(newSize);
11752 }
11753 
Destroy(VmaAllocator allocator)11754 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
11755 {
11756     // Define macro VMA_DEBUG_LOG to receive the list of the unfreed allocations
11757     if (!m_pMetadata->IsEmpty())
11758         m_pMetadata->DebugLogAllAllocations();
11759     // This is the most important assert in the entire library.
11760     // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
11761     VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
11762 
11763     VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
11764     allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
11765     m_hMemory = VK_NULL_HANDLE;
11766 
11767     vma_delete(allocator, m_pMetadata);
11768     m_pMetadata = VMA_NULL;
11769 }
11770 
PostFree(VmaAllocator hAllocator)11771 void VmaDeviceMemoryBlock::PostFree(VmaAllocator hAllocator)
11772 {
11773     if(m_MappingHysteresis.PostFree())
11774     {
11775         VMA_ASSERT(m_MappingHysteresis.GetExtraMapping() == 0);
11776         if (m_MapCount == 0)
11777         {
11778             m_pMappedData = VMA_NULL;
11779             (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
11780         }
11781     }
11782 }
11783 
Validate()11784 bool VmaDeviceMemoryBlock::Validate() const
11785 {
11786     VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
11787         (m_pMetadata->GetSize() != 0));
11788 
11789     return m_pMetadata->Validate();
11790 }
11791 
CheckCorruption(VmaAllocator hAllocator)11792 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
11793 {
11794     void* pData = nullptr;
11795     VkResult res = Map(hAllocator, 1, &pData);
11796     if (res != VK_SUCCESS)
11797     {
11798         return res;
11799     }
11800 
11801     res = m_pMetadata->CheckCorruption(pData);
11802 
11803     Unmap(hAllocator, 1);
11804 
11805     return res;
11806 }
11807 
Map(VmaAllocator hAllocator,uint32_t count,void ** ppData)11808 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
11809 {
11810     if (count == 0)
11811     {
11812         return VK_SUCCESS;
11813     }
11814 
11815     VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex);
11816     const uint32_t oldTotalMapCount = m_MapCount + m_MappingHysteresis.GetExtraMapping();
11817     m_MappingHysteresis.PostMap();
11818     if (oldTotalMapCount != 0)
11819     {
11820         m_MapCount += count;
11821         VMA_ASSERT(m_pMappedData != VMA_NULL);
11822         if (ppData != VMA_NULL)
11823         {
11824             *ppData = m_pMappedData;
11825         }
11826         return VK_SUCCESS;
11827     }
11828     else
11829     {
11830         VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
11831             hAllocator->m_hDevice,
11832             m_hMemory,
11833             0, // offset
11834             VK_WHOLE_SIZE,
11835             0, // flags
11836             &m_pMappedData);
11837         if (result == VK_SUCCESS)
11838         {
11839             if (ppData != VMA_NULL)
11840             {
11841                 *ppData = m_pMappedData;
11842             }
11843             m_MapCount = count;
11844         }
11845         return result;
11846     }
11847 }
11848 
Unmap(VmaAllocator hAllocator,uint32_t count)11849 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
11850 {
11851     if (count == 0)
11852     {
11853         return;
11854     }
11855 
11856     VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex);
11857     if (m_MapCount >= count)
11858     {
11859         m_MapCount -= count;
11860         const uint32_t totalMapCount = m_MapCount + m_MappingHysteresis.GetExtraMapping();
11861         if (totalMapCount == 0)
11862         {
11863             m_pMappedData = VMA_NULL;
11864             (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
11865         }
11866         m_MappingHysteresis.PostUnmap();
11867     }
11868     else
11869     {
11870         VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
11871     }
11872 }
11873 
WriteMagicValueAfterAllocation(VmaAllocator hAllocator,VkDeviceSize allocOffset,VkDeviceSize allocSize)11874 VkResult VmaDeviceMemoryBlock::WriteMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11875 {
11876     VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11877 
11878     void* pData;
11879     VkResult res = Map(hAllocator, 1, &pData);
11880     if (res != VK_SUCCESS)
11881     {
11882         return res;
11883     }
11884 
11885     VmaWriteMagicValue(pData, allocOffset + allocSize);
11886 
11887     Unmap(hAllocator, 1);
11888     return VK_SUCCESS;
11889 }
11890 
ValidateMagicValueAfterAllocation(VmaAllocator hAllocator,VkDeviceSize allocOffset,VkDeviceSize allocSize)11891 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11892 {
11893     VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11894 
11895     void* pData;
11896     VkResult res = Map(hAllocator, 1, &pData);
11897     if (res != VK_SUCCESS)
11898     {
11899         return res;
11900     }
11901 
11902     if (!VmaValidateMagicValue(pData, allocOffset + allocSize))
11903     {
11904         VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
11905     }
11906 
11907     Unmap(hAllocator, 1);
11908     return VK_SUCCESS;
11909 }
11910 
BindBufferMemory(const VmaAllocator hAllocator,const VmaAllocation hAllocation,VkDeviceSize allocationLocalOffset,VkBuffer hBuffer,const void * pNext)11911 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
11912     const VmaAllocator hAllocator,
11913     const VmaAllocation hAllocation,
11914     VkDeviceSize allocationLocalOffset,
11915     VkBuffer hBuffer,
11916     const void* pNext)
11917 {
11918     VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11919         hAllocation->GetBlock() == this);
11920     VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
11921         "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
11922     const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
11923     // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11924     VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex);
11925     return hAllocator->BindVulkanBuffer(m_hMemory, memoryOffset, hBuffer, pNext);
11926 }
11927 
BindImageMemory(const VmaAllocator hAllocator,const VmaAllocation hAllocation,VkDeviceSize allocationLocalOffset,VkImage hImage,const void * pNext)11928 VkResult VmaDeviceMemoryBlock::BindImageMemory(
11929     const VmaAllocator hAllocator,
11930     const VmaAllocation hAllocation,
11931     VkDeviceSize allocationLocalOffset,
11932     VkImage hImage,
11933     const void* pNext)
11934 {
11935     VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11936         hAllocation->GetBlock() == this);
11937     VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
11938         "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
11939     const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
11940     // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11941     VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex);
11942     return hAllocator->BindVulkanImage(m_hMemory, memoryOffset, hImage, pNext);
11943 }
11944 #endif // _VMA_DEVICE_MEMORY_BLOCK_FUNCTIONS
11945 
11946 #ifndef _VMA_ALLOCATION_T_FUNCTIONS
VmaAllocation_T(bool mappingAllowed)11947 VmaAllocation_T::VmaAllocation_T(bool mappingAllowed)
11948     : m_Alignment{ 1 },
11949     m_Size{ 0 },
11950     m_pUserData{ VMA_NULL },
11951     m_pName{ VMA_NULL },
11952     m_MemoryTypeIndex{ 0 },
11953     m_Type{ (uint8_t)ALLOCATION_TYPE_NONE },
11954     m_SuballocationType{ (uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN },
11955     m_MapCount{ 0 },
11956     m_Flags{ 0 }
11957 {
11958     if(mappingAllowed)
11959         m_Flags |= (uint8_t)FLAG_MAPPING_ALLOWED;
11960 
11961 #if VMA_STATS_STRING_ENABLED
11962     m_BufferImageUsage = 0;
11963 #endif
11964 }
11965 
~VmaAllocation_T()11966 VmaAllocation_T::~VmaAllocation_T()
11967 {
11968     VMA_ASSERT(m_MapCount == 0 && "Allocation was not unmapped before destruction.");
11969 
11970     // Check if owned string was freed.
11971     VMA_ASSERT(m_pName == VMA_NULL);
11972 }
11973 
InitBlockAllocation(VmaDeviceMemoryBlock * block,VmaAllocHandle allocHandle,VkDeviceSize alignment,VkDeviceSize size,uint32_t memoryTypeIndex,VmaSuballocationType suballocationType,bool mapped)11974 void VmaAllocation_T::InitBlockAllocation(
11975     VmaDeviceMemoryBlock* block,
11976     VmaAllocHandle allocHandle,
11977     VkDeviceSize alignment,
11978     VkDeviceSize size,
11979     uint32_t memoryTypeIndex,
11980     VmaSuballocationType suballocationType,
11981     bool mapped)
11982 {
11983     VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
11984     VMA_ASSERT(block != VMA_NULL);
11985     m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
11986     m_Alignment = alignment;
11987     m_Size = size;
11988     m_MemoryTypeIndex = memoryTypeIndex;
11989     if(mapped)
11990     {
11991         VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it.");
11992         m_Flags |= (uint8_t)FLAG_PERSISTENT_MAP;
11993     }
11994     m_SuballocationType = (uint8_t)suballocationType;
11995     m_BlockAllocation.m_Block = block;
11996     m_BlockAllocation.m_AllocHandle = allocHandle;
11997 }
11998 
InitDedicatedAllocation(VmaPool hParentPool,uint32_t memoryTypeIndex,VkDeviceMemory hMemory,VmaSuballocationType suballocationType,void * pMappedData,VkDeviceSize size)11999 void VmaAllocation_T::InitDedicatedAllocation(
12000     VmaPool hParentPool,
12001     uint32_t memoryTypeIndex,
12002     VkDeviceMemory hMemory,
12003     VmaSuballocationType suballocationType,
12004     void* pMappedData,
12005     VkDeviceSize size)
12006 {
12007     VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
12008     VMA_ASSERT(hMemory != VK_NULL_HANDLE);
12009     m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
12010     m_Alignment = 0;
12011     m_Size = size;
12012     m_MemoryTypeIndex = memoryTypeIndex;
12013     m_SuballocationType = (uint8_t)suballocationType;
12014     if(pMappedData != VMA_NULL)
12015     {
12016         VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it.");
12017         m_Flags |= (uint8_t)FLAG_PERSISTENT_MAP;
12018     }
12019     m_DedicatedAllocation.m_hParentPool = hParentPool;
12020     m_DedicatedAllocation.m_hMemory = hMemory;
12021     m_DedicatedAllocation.m_pMappedData = pMappedData;
12022     m_DedicatedAllocation.m_Prev = VMA_NULL;
12023     m_DedicatedAllocation.m_Next = VMA_NULL;
12024 }
12025 
SetName(VmaAllocator hAllocator,const char * pName)12026 void VmaAllocation_T::SetName(VmaAllocator hAllocator, const char* pName)
12027 {
12028     VMA_ASSERT(pName == VMA_NULL || pName != m_pName);
12029 
12030     FreeName(hAllocator);
12031 
12032     if (pName != VMA_NULL)
12033         m_pName = VmaCreateStringCopy(hAllocator->GetAllocationCallbacks(), pName);
12034 }
12035 
SwapBlockAllocation(VmaAllocator hAllocator,VmaAllocation allocation)12036 uint8_t VmaAllocation_T::SwapBlockAllocation(VmaAllocator hAllocator, VmaAllocation allocation)
12037 {
12038     VMA_ASSERT(allocation != VMA_NULL);
12039     VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
12040     VMA_ASSERT(allocation->m_Type == ALLOCATION_TYPE_BLOCK);
12041 
12042     if (m_MapCount != 0)
12043         m_BlockAllocation.m_Block->Unmap(hAllocator, m_MapCount);
12044 
12045     m_BlockAllocation.m_Block->m_pMetadata->SetAllocationUserData(m_BlockAllocation.m_AllocHandle, allocation);
12046     VMA_SWAP(m_BlockAllocation, allocation->m_BlockAllocation);
12047     m_BlockAllocation.m_Block->m_pMetadata->SetAllocationUserData(m_BlockAllocation.m_AllocHandle, this);
12048 
12049 #if VMA_STATS_STRING_ENABLED
12050     VMA_SWAP(m_BufferImageUsage, allocation->m_BufferImageUsage);
12051 #endif
12052     return m_MapCount;
12053 }
12054 
GetAllocHandle()12055 VmaAllocHandle VmaAllocation_T::GetAllocHandle() const
12056 {
12057     switch (m_Type)
12058     {
12059     case ALLOCATION_TYPE_BLOCK:
12060         return m_BlockAllocation.m_AllocHandle;
12061     case ALLOCATION_TYPE_DEDICATED:
12062         return VK_NULL_HANDLE;
12063     default:
12064         VMA_ASSERT(0);
12065         return VK_NULL_HANDLE;
12066     }
12067 }
12068 
GetOffset()12069 VkDeviceSize VmaAllocation_T::GetOffset() const
12070 {
12071     switch (m_Type)
12072     {
12073     case ALLOCATION_TYPE_BLOCK:
12074         return m_BlockAllocation.m_Block->m_pMetadata->GetAllocationOffset(m_BlockAllocation.m_AllocHandle);
12075     case ALLOCATION_TYPE_DEDICATED:
12076         return 0;
12077     default:
12078         VMA_ASSERT(0);
12079         return 0;
12080     }
12081 }
12082 
GetParentPool()12083 VmaPool VmaAllocation_T::GetParentPool() const
12084 {
12085     switch (m_Type)
12086     {
12087     case ALLOCATION_TYPE_BLOCK:
12088         return m_BlockAllocation.m_Block->GetParentPool();
12089     case ALLOCATION_TYPE_DEDICATED:
12090         return m_DedicatedAllocation.m_hParentPool;
12091     default:
12092         VMA_ASSERT(0);
12093         return VK_NULL_HANDLE;
12094     }
12095 }
12096 
GetMemory()12097 VkDeviceMemory VmaAllocation_T::GetMemory() const
12098 {
12099     switch (m_Type)
12100     {
12101     case ALLOCATION_TYPE_BLOCK:
12102         return m_BlockAllocation.m_Block->GetDeviceMemory();
12103     case ALLOCATION_TYPE_DEDICATED:
12104         return m_DedicatedAllocation.m_hMemory;
12105     default:
12106         VMA_ASSERT(0);
12107         return VK_NULL_HANDLE;
12108     }
12109 }
12110 
GetMappedData()12111 void* VmaAllocation_T::GetMappedData() const
12112 {
12113     switch (m_Type)
12114     {
12115     case ALLOCATION_TYPE_BLOCK:
12116         if (m_MapCount != 0 || IsPersistentMap())
12117         {
12118             void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
12119             VMA_ASSERT(pBlockData != VMA_NULL);
12120             return (char*)pBlockData + GetOffset();
12121         }
12122         else
12123         {
12124             return VMA_NULL;
12125         }
12126         break;
12127     case ALLOCATION_TYPE_DEDICATED:
12128         VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0 || IsPersistentMap()));
12129         return m_DedicatedAllocation.m_pMappedData;
12130     default:
12131         VMA_ASSERT(0);
12132         return VMA_NULL;
12133     }
12134 }
12135 
BlockAllocMap()12136 void VmaAllocation_T::BlockAllocMap()
12137 {
12138     VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
12139     VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it.");
12140 
12141     if (m_MapCount < 0xFF)
12142     {
12143         ++m_MapCount;
12144     }
12145     else
12146     {
12147         VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
12148     }
12149 }
12150 
BlockAllocUnmap()12151 void VmaAllocation_T::BlockAllocUnmap()
12152 {
12153     VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
12154 
12155     if (m_MapCount > 0)
12156     {
12157         --m_MapCount;
12158     }
12159     else
12160     {
12161         VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
12162     }
12163 }
12164 
DedicatedAllocMap(VmaAllocator hAllocator,void ** ppData)12165 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
12166 {
12167     VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
12168     VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it.");
12169 
12170     if (m_MapCount != 0 || IsPersistentMap())
12171     {
12172         if (m_MapCount < 0xFF)
12173         {
12174             VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
12175             *ppData = m_DedicatedAllocation.m_pMappedData;
12176             ++m_MapCount;
12177             return VK_SUCCESS;
12178         }
12179         else
12180         {
12181             VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
12182             return VK_ERROR_MEMORY_MAP_FAILED;
12183         }
12184     }
12185     else
12186     {
12187         VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
12188             hAllocator->m_hDevice,
12189             m_DedicatedAllocation.m_hMemory,
12190             0, // offset
12191             VK_WHOLE_SIZE,
12192             0, // flags
12193             ppData);
12194         if (result == VK_SUCCESS)
12195         {
12196             m_DedicatedAllocation.m_pMappedData = *ppData;
12197             m_MapCount = 1;
12198         }
12199         return result;
12200     }
12201 }
12202 
DedicatedAllocUnmap(VmaAllocator hAllocator)12203 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
12204 {
12205     VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
12206 
12207     if (m_MapCount > 0)
12208     {
12209         --m_MapCount;
12210         if (m_MapCount == 0 && !IsPersistentMap())
12211         {
12212             m_DedicatedAllocation.m_pMappedData = VMA_NULL;
12213             (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
12214                 hAllocator->m_hDevice,
12215                 m_DedicatedAllocation.m_hMemory);
12216         }
12217     }
12218     else
12219     {
12220         VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
12221     }
12222 }
12223 
12224 #if VMA_STATS_STRING_ENABLED
InitBufferImageUsage(uint32_t bufferImageUsage)12225 void VmaAllocation_T::InitBufferImageUsage(uint32_t bufferImageUsage)
12226 {
12227     VMA_ASSERT(m_BufferImageUsage == 0);
12228     m_BufferImageUsage = bufferImageUsage;
12229 }
12230 
PrintParameters(class VmaJsonWriter & json)12231 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
12232 {
12233     json.WriteString("Type");
12234     json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
12235 
12236     json.WriteString("Size");
12237     json.WriteNumber(m_Size);
12238     json.WriteString("Usage");
12239     json.WriteNumber(m_BufferImageUsage);
12240 
12241     if (m_pUserData != VMA_NULL)
12242     {
12243         json.WriteString("CustomData");
12244         json.BeginString();
12245         json.ContinueString_Pointer(m_pUserData);
12246         json.EndString();
12247     }
12248     if (m_pName != VMA_NULL)
12249     {
12250         json.WriteString("Name");
12251         json.WriteString(m_pName);
12252     }
12253 }
12254 #endif // VMA_STATS_STRING_ENABLED
12255 
FreeName(VmaAllocator hAllocator)12256 void VmaAllocation_T::FreeName(VmaAllocator hAllocator)
12257 {
12258     if(m_pName)
12259     {
12260         VmaFreeString(hAllocator->GetAllocationCallbacks(), m_pName);
12261         m_pName = VMA_NULL;
12262     }
12263 }
12264 #endif // _VMA_ALLOCATION_T_FUNCTIONS
12265 
12266 #ifndef _VMA_BLOCK_VECTOR_FUNCTIONS
VmaBlockVector(VmaAllocator hAllocator,VmaPool hParentPool,uint32_t memoryTypeIndex,VkDeviceSize preferredBlockSize,size_t minBlockCount,size_t maxBlockCount,VkDeviceSize bufferImageGranularity,bool explicitBlockSize,uint32_t algorithm,float priority,VkDeviceSize minAllocationAlignment,void * pMemoryAllocateNext)12267 VmaBlockVector::VmaBlockVector(
12268     VmaAllocator hAllocator,
12269     VmaPool hParentPool,
12270     uint32_t memoryTypeIndex,
12271     VkDeviceSize preferredBlockSize,
12272     size_t minBlockCount,
12273     size_t maxBlockCount,
12274     VkDeviceSize bufferImageGranularity,
12275     bool explicitBlockSize,
12276     uint32_t algorithm,
12277     float priority,
12278     VkDeviceSize minAllocationAlignment,
12279     void* pMemoryAllocateNext)
12280     : m_hAllocator(hAllocator),
12281     m_hParentPool(hParentPool),
12282     m_MemoryTypeIndex(memoryTypeIndex),
12283     m_PreferredBlockSize(preferredBlockSize),
12284     m_MinBlockCount(minBlockCount),
12285     m_MaxBlockCount(maxBlockCount),
12286     m_BufferImageGranularity(bufferImageGranularity),
12287     m_ExplicitBlockSize(explicitBlockSize),
12288     m_Algorithm(algorithm),
12289     m_Priority(priority),
12290     m_MinAllocationAlignment(minAllocationAlignment),
12291     m_pMemoryAllocateNext(pMemoryAllocateNext),
12292     m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
12293     m_NextBlockId(0) {}
12294 
~VmaBlockVector()12295 VmaBlockVector::~VmaBlockVector()
12296 {
12297     for (size_t i = m_Blocks.size(); i--; )
12298     {
12299         m_Blocks[i]->Destroy(m_hAllocator);
12300         vma_delete(m_hAllocator, m_Blocks[i]);
12301     }
12302 }
12303 
CreateMinBlocks()12304 VkResult VmaBlockVector::CreateMinBlocks()
12305 {
12306     for (size_t i = 0; i < m_MinBlockCount; ++i)
12307     {
12308         VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
12309         if (res != VK_SUCCESS)
12310         {
12311             return res;
12312         }
12313     }
12314     return VK_SUCCESS;
12315 }
12316 
AddStatistics(VmaStatistics & inoutStats)12317 void VmaBlockVector::AddStatistics(VmaStatistics& inoutStats)
12318 {
12319     VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12320 
12321     const size_t blockCount = m_Blocks.size();
12322     for (uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12323     {
12324         const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12325         VMA_ASSERT(pBlock);
12326         VMA_HEAVY_ASSERT(pBlock->Validate());
12327         pBlock->m_pMetadata->AddStatistics(inoutStats);
12328     }
12329 }
12330 
AddDetailedStatistics(VmaDetailedStatistics & inoutStats)12331 void VmaBlockVector::AddDetailedStatistics(VmaDetailedStatistics& inoutStats)
12332 {
12333     VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12334 
12335     const size_t blockCount = m_Blocks.size();
12336     for (uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12337     {
12338         const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12339         VMA_ASSERT(pBlock);
12340         VMA_HEAVY_ASSERT(pBlock->Validate());
12341         pBlock->m_pMetadata->AddDetailedStatistics(inoutStats);
12342     }
12343 }
12344 
IsEmpty()12345 bool VmaBlockVector::IsEmpty()
12346 {
12347     VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12348     return m_Blocks.empty();
12349 }
12350 
IsCorruptionDetectionEnabled()12351 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
12352 {
12353     const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
12354     return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
12355         (VMA_DEBUG_MARGIN > 0) &&
12356         (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
12357         (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
12358 }
12359 
Allocate(VkDeviceSize size,VkDeviceSize alignment,const VmaAllocationCreateInfo & createInfo,VmaSuballocationType suballocType,size_t allocationCount,VmaAllocation * pAllocations)12360 VkResult VmaBlockVector::Allocate(
12361     VkDeviceSize size,
12362     VkDeviceSize alignment,
12363     const VmaAllocationCreateInfo& createInfo,
12364     VmaSuballocationType suballocType,
12365     size_t allocationCount,
12366     VmaAllocation* pAllocations)
12367 {
12368     size_t allocIndex;
12369     VkResult res = VK_SUCCESS;
12370 
12371     alignment = VMA_MAX(alignment, m_MinAllocationAlignment);
12372 
12373     if (IsCorruptionDetectionEnabled())
12374     {
12375         size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
12376         alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
12377     }
12378 
12379     {
12380         VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12381         for (allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
12382         {
12383             res = AllocatePage(
12384                 size,
12385                 alignment,
12386                 createInfo,
12387                 suballocType,
12388                 pAllocations + allocIndex);
12389             if (res != VK_SUCCESS)
12390             {
12391                 break;
12392             }
12393         }
12394     }
12395 
12396     if (res != VK_SUCCESS)
12397     {
12398         // Free all already created allocations.
12399         while (allocIndex--)
12400             Free(pAllocations[allocIndex]);
12401         memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
12402     }
12403 
12404     return res;
12405 }
12406 
AllocatePage(VkDeviceSize size,VkDeviceSize alignment,const VmaAllocationCreateInfo & createInfo,VmaSuballocationType suballocType,VmaAllocation * pAllocation)12407 VkResult VmaBlockVector::AllocatePage(
12408     VkDeviceSize size,
12409     VkDeviceSize alignment,
12410     const VmaAllocationCreateInfo& createInfo,
12411     VmaSuballocationType suballocType,
12412     VmaAllocation* pAllocation)
12413 {
12414     const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
12415 
12416     VkDeviceSize freeMemory;
12417     {
12418         const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
12419         VmaBudget heapBudget = {};
12420         m_hAllocator->GetHeapBudgets(&heapBudget, heapIndex, 1);
12421         freeMemory = (heapBudget.usage < heapBudget.budget) ? (heapBudget.budget - heapBudget.usage) : 0;
12422     }
12423 
12424     const bool canFallbackToDedicated = !HasExplicitBlockSize() &&
12425         (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0;
12426     const bool canCreateNewBlock =
12427         ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
12428         (m_Blocks.size() < m_MaxBlockCount) &&
12429         (freeMemory >= size || !canFallbackToDedicated);
12430     uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
12431 
12432     // Upper address can only be used with linear allocator and within single memory block.
12433     if (isUpperAddress &&
12434         (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
12435     {
12436         return VK_ERROR_FEATURE_NOT_PRESENT;
12437     }
12438 
12439     // Early reject: requested allocation size is larger that maximum block size for this block vector.
12440     if (size + VMA_DEBUG_MARGIN > m_PreferredBlockSize)
12441     {
12442         return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12443     }
12444 
12445     // 1. Search existing allocations. Try to allocate.
12446     if (m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
12447     {
12448         // Use only last block.
12449         if (!m_Blocks.empty())
12450         {
12451             VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
12452             VMA_ASSERT(pCurrBlock);
12453             VkResult res = AllocateFromBlock(
12454                 pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation);
12455             if (res == VK_SUCCESS)
12456             {
12457                 VMA_DEBUG_LOG("    Returned from last block #%u", pCurrBlock->GetId());
12458                 IncrementallySortBlocks();
12459                 return VK_SUCCESS;
12460             }
12461         }
12462     }
12463     else
12464     {
12465         if (strategy != VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT) // MIN_MEMORY or default
12466         {
12467             const bool isHostVisible =
12468                 (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
12469             if(isHostVisible)
12470             {
12471                 const bool isMappingAllowed = (createInfo.flags &
12472                     (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0;
12473                 /*
12474                 For non-mappable allocations, check blocks that are not mapped first.
12475                 For mappable allocations, check blocks that are already mapped first.
12476                 This way, having many blocks, we will separate mappable and non-mappable allocations,
12477                 hopefully limiting the number of blocks that are mapped, which will help tools like RenderDoc.
12478                 */
12479                 for(size_t mappingI = 0; mappingI < 2; ++mappingI)
12480                 {
12481                     // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
12482                     for (size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12483                     {
12484                         VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12485                         VMA_ASSERT(pCurrBlock);
12486                         const bool isBlockMapped = pCurrBlock->GetMappedData() != VMA_NULL;
12487                         if((mappingI == 0) == (isMappingAllowed == isBlockMapped))
12488                         {
12489                             VkResult res = AllocateFromBlock(
12490                                 pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation);
12491                             if (res == VK_SUCCESS)
12492                             {
12493                                 VMA_DEBUG_LOG("    Returned from existing block #%u", pCurrBlock->GetId());
12494                                 IncrementallySortBlocks();
12495                                 return VK_SUCCESS;
12496                             }
12497                         }
12498                     }
12499                 }
12500             }
12501             else
12502             {
12503                 // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
12504                 for (size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12505                 {
12506                     VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12507                     VMA_ASSERT(pCurrBlock);
12508                     VkResult res = AllocateFromBlock(
12509                         pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation);
12510                     if (res == VK_SUCCESS)
12511                     {
12512                         VMA_DEBUG_LOG("    Returned from existing block #%u", pCurrBlock->GetId());
12513                         IncrementallySortBlocks();
12514                         return VK_SUCCESS;
12515                     }
12516                 }
12517             }
12518         }
12519         else // VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT
12520         {
12521             // Backward order in m_Blocks - prefer blocks with largest amount of free space.
12522             for (size_t blockIndex = m_Blocks.size(); blockIndex--; )
12523             {
12524                 VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12525                 VMA_ASSERT(pCurrBlock);
12526                 VkResult res = AllocateFromBlock(pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation);
12527                 if (res == VK_SUCCESS)
12528                 {
12529                     VMA_DEBUG_LOG("    Returned from existing block #%u", pCurrBlock->GetId());
12530                     IncrementallySortBlocks();
12531                     return VK_SUCCESS;
12532                 }
12533             }
12534         }
12535     }
12536 
12537     // 2. Try to create new block.
12538     if (canCreateNewBlock)
12539     {
12540         // Calculate optimal size for new block.
12541         VkDeviceSize newBlockSize = m_PreferredBlockSize;
12542         uint32_t newBlockSizeShift = 0;
12543         const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
12544 
12545         if (!m_ExplicitBlockSize)
12546         {
12547             // Allocate 1/8, 1/4, 1/2 as first blocks.
12548             const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
12549             for (uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
12550             {
12551                 const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
12552                 if (smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
12553                 {
12554                     newBlockSize = smallerNewBlockSize;
12555                     ++newBlockSizeShift;
12556                 }
12557                 else
12558                 {
12559                     break;
12560                 }
12561             }
12562         }
12563 
12564         size_t newBlockIndex = 0;
12565         VkResult res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
12566             CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
12567         // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
12568         if (!m_ExplicitBlockSize)
12569         {
12570             while (res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
12571             {
12572                 const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
12573                 if (smallerNewBlockSize >= size)
12574                 {
12575                     newBlockSize = smallerNewBlockSize;
12576                     ++newBlockSizeShift;
12577                     res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
12578                         CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
12579                 }
12580                 else
12581                 {
12582                     break;
12583                 }
12584             }
12585         }
12586 
12587         if (res == VK_SUCCESS)
12588         {
12589             VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
12590             VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
12591 
12592             res = AllocateFromBlock(
12593                 pBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation);
12594             if (res == VK_SUCCESS)
12595             {
12596                 VMA_DEBUG_LOG("    Created new block #%u Size=%llu", pBlock->GetId(), newBlockSize);
12597                 IncrementallySortBlocks();
12598                 return VK_SUCCESS;
12599             }
12600             else
12601             {
12602                 // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
12603                 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12604             }
12605         }
12606     }
12607 
12608     return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12609 }
12610 
Free(const VmaAllocation hAllocation)12611 void VmaBlockVector::Free(const VmaAllocation hAllocation)
12612 {
12613     VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
12614 
12615     bool budgetExceeded = false;
12616     {
12617         const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
12618         VmaBudget heapBudget = {};
12619         m_hAllocator->GetHeapBudgets(&heapBudget, heapIndex, 1);
12620         budgetExceeded = heapBudget.usage >= heapBudget.budget;
12621     }
12622 
12623     // Scope for lock.
12624     {
12625         VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12626 
12627         VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
12628 
12629         if (IsCorruptionDetectionEnabled())
12630         {
12631             VkResult res = pBlock->ValidateMagicValueAfterAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
12632             VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
12633         }
12634 
12635         if (hAllocation->IsPersistentMap())
12636         {
12637             pBlock->Unmap(m_hAllocator, 1);
12638         }
12639 
12640         const bool hadEmptyBlockBeforeFree = HasEmptyBlock();
12641         pBlock->m_pMetadata->Free(hAllocation->GetAllocHandle());
12642         pBlock->PostFree(m_hAllocator);
12643         VMA_HEAVY_ASSERT(pBlock->Validate());
12644 
12645         VMA_DEBUG_LOG("  Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
12646 
12647         const bool canDeleteBlock = m_Blocks.size() > m_MinBlockCount;
12648         // pBlock became empty after this deallocation.
12649         if (pBlock->m_pMetadata->IsEmpty())
12650         {
12651             // Already had empty block. We don't want to have two, so delete this one.
12652             if ((hadEmptyBlockBeforeFree || budgetExceeded) && canDeleteBlock)
12653             {
12654                 pBlockToDelete = pBlock;
12655                 Remove(pBlock);
12656             }
12657             // else: We now have one empty block - leave it. A hysteresis to avoid allocating whole block back and forth.
12658         }
12659         // pBlock didn't become empty, but we have another empty block - find and free that one.
12660         // (This is optional, heuristics.)
12661         else if (hadEmptyBlockBeforeFree && canDeleteBlock)
12662         {
12663             VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
12664             if (pLastBlock->m_pMetadata->IsEmpty())
12665             {
12666                 pBlockToDelete = pLastBlock;
12667                 m_Blocks.pop_back();
12668             }
12669         }
12670 
12671         IncrementallySortBlocks();
12672     }
12673 
12674     // Destruction of a free block. Deferred until this point, outside of mutex
12675     // lock, for performance reason.
12676     if (pBlockToDelete != VMA_NULL)
12677     {
12678         VMA_DEBUG_LOG("    Deleted empty block #%u", pBlockToDelete->GetId());
12679         pBlockToDelete->Destroy(m_hAllocator);
12680         vma_delete(m_hAllocator, pBlockToDelete);
12681     }
12682 
12683     m_hAllocator->m_Budget.RemoveAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), hAllocation->GetSize());
12684     m_hAllocator->m_AllocationObjectAllocator.Free(hAllocation);
12685 }
12686 
CalcMaxBlockSize()12687 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
12688 {
12689     VkDeviceSize result = 0;
12690     for (size_t i = m_Blocks.size(); i--; )
12691     {
12692         result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
12693         if (result >= m_PreferredBlockSize)
12694         {
12695             break;
12696         }
12697     }
12698     return result;
12699 }
12700 
Remove(VmaDeviceMemoryBlock * pBlock)12701 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
12702 {
12703     for (uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12704     {
12705         if (m_Blocks[blockIndex] == pBlock)
12706         {
12707             VmaVectorRemove(m_Blocks, blockIndex);
12708             return;
12709         }
12710     }
12711     VMA_ASSERT(0);
12712 }
12713 
IncrementallySortBlocks()12714 void VmaBlockVector::IncrementallySortBlocks()
12715 {
12716     if (!m_IncrementalSort)
12717         return;
12718     if (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
12719     {
12720         // Bubble sort only until first swap.
12721         for (size_t i = 1; i < m_Blocks.size(); ++i)
12722         {
12723             if (m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
12724             {
12725                 VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
12726                 return;
12727             }
12728         }
12729     }
12730 }
12731 
SortByFreeSize()12732 void VmaBlockVector::SortByFreeSize()
12733 {
12734     VMA_SORT(m_Blocks.begin(), m_Blocks.end(),
12735         [](VmaDeviceMemoryBlock* b1, VmaDeviceMemoryBlock* b2) -> bool
12736         {
12737             return b1->m_pMetadata->GetSumFreeSize() < b2->m_pMetadata->GetSumFreeSize();
12738         });
12739 }
12740 
AllocateFromBlock(VmaDeviceMemoryBlock * pBlock,VkDeviceSize size,VkDeviceSize alignment,VmaAllocationCreateFlags allocFlags,void * pUserData,VmaSuballocationType suballocType,uint32_t strategy,VmaAllocation * pAllocation)12741 VkResult VmaBlockVector::AllocateFromBlock(
12742     VmaDeviceMemoryBlock* pBlock,
12743     VkDeviceSize size,
12744     VkDeviceSize alignment,
12745     VmaAllocationCreateFlags allocFlags,
12746     void* pUserData,
12747     VmaSuballocationType suballocType,
12748     uint32_t strategy,
12749     VmaAllocation* pAllocation)
12750 {
12751     const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
12752 
12753     VmaAllocationRequest currRequest = {};
12754     if (pBlock->m_pMetadata->CreateAllocationRequest(
12755         size,
12756         alignment,
12757         isUpperAddress,
12758         suballocType,
12759         strategy,
12760         &currRequest))
12761     {
12762         return CommitAllocationRequest(currRequest, pBlock, alignment, allocFlags, pUserData, suballocType, pAllocation);
12763     }
12764     return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12765 }
12766 
CommitAllocationRequest(VmaAllocationRequest & allocRequest,VmaDeviceMemoryBlock * pBlock,VkDeviceSize alignment,VmaAllocationCreateFlags allocFlags,void * pUserData,VmaSuballocationType suballocType,VmaAllocation * pAllocation)12767 VkResult VmaBlockVector::CommitAllocationRequest(
12768     VmaAllocationRequest& allocRequest,
12769     VmaDeviceMemoryBlock* pBlock,
12770     VkDeviceSize alignment,
12771     VmaAllocationCreateFlags allocFlags,
12772     void* pUserData,
12773     VmaSuballocationType suballocType,
12774     VmaAllocation* pAllocation)
12775 {
12776     const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
12777     const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
12778     const bool isMappingAllowed = (allocFlags &
12779         (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0;
12780 
12781     pBlock->PostAlloc();
12782     // Allocate from pCurrBlock.
12783     if (mapped)
12784     {
12785         VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
12786         if (res != VK_SUCCESS)
12787         {
12788             return res;
12789         }
12790     }
12791 
12792     *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(isMappingAllowed);
12793     pBlock->m_pMetadata->Alloc(allocRequest, suballocType, *pAllocation);
12794     (*pAllocation)->InitBlockAllocation(
12795         pBlock,
12796         allocRequest.allocHandle,
12797         alignment,
12798         allocRequest.size, // Not size, as actual allocation size may be larger than requested!
12799         m_MemoryTypeIndex,
12800         suballocType,
12801         mapped);
12802     VMA_HEAVY_ASSERT(pBlock->Validate());
12803     if (isUserDataString)
12804         (*pAllocation)->SetName(m_hAllocator, (const char*)pUserData);
12805     else
12806         (*pAllocation)->SetUserData(m_hAllocator, pUserData);
12807     m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), allocRequest.size);
12808     if (VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12809     {
12810         m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12811     }
12812     if (IsCorruptionDetectionEnabled())
12813     {
12814         VkResult res = pBlock->WriteMagicValueAfterAllocation(m_hAllocator, (*pAllocation)->GetOffset(), allocRequest.size);
12815         VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
12816     }
12817     return VK_SUCCESS;
12818 }
12819 
CreateBlock(VkDeviceSize blockSize,size_t * pNewBlockIndex)12820 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
12821 {
12822     VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
12823     allocInfo.pNext = m_pMemoryAllocateNext;
12824     allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
12825     allocInfo.allocationSize = blockSize;
12826 
12827 #if VMA_BUFFER_DEVICE_ADDRESS
12828     // Every standalone block can potentially contain a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT - always enable the feature.
12829     VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
12830     if (m_hAllocator->m_UseKhrBufferDeviceAddress)
12831     {
12832         allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
12833         VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
12834     }
12835 #endif // VMA_BUFFER_DEVICE_ADDRESS
12836 
12837 #if VMA_MEMORY_PRIORITY
12838     VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT };
12839     if (m_hAllocator->m_UseExtMemoryPriority)
12840     {
12841         VMA_ASSERT(m_Priority >= 0.f && m_Priority <= 1.f);
12842         priorityInfo.priority = m_Priority;
12843         VmaPnextChainPushFront(&allocInfo, &priorityInfo);
12844     }
12845 #endif // VMA_MEMORY_PRIORITY
12846 
12847 #if VMA_EXTERNAL_MEMORY
12848     // Attach VkExportMemoryAllocateInfoKHR if necessary.
12849     VkExportMemoryAllocateInfoKHR exportMemoryAllocInfo = { VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR };
12850     exportMemoryAllocInfo.handleTypes = m_hAllocator->GetExternalMemoryHandleTypeFlags(m_MemoryTypeIndex);
12851     if (exportMemoryAllocInfo.handleTypes != 0)
12852     {
12853         VmaPnextChainPushFront(&allocInfo, &exportMemoryAllocInfo);
12854     }
12855 #endif // VMA_EXTERNAL_MEMORY
12856 
12857     VkDeviceMemory mem = VK_NULL_HANDLE;
12858     VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
12859     if (res < 0)
12860     {
12861         return res;
12862     }
12863 
12864     // New VkDeviceMemory successfully created.
12865 
12866     // Create new Allocation for it.
12867     VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
12868     pBlock->Init(
12869         m_hAllocator,
12870         m_hParentPool,
12871         m_MemoryTypeIndex,
12872         mem,
12873         allocInfo.allocationSize,
12874         m_NextBlockId++,
12875         m_Algorithm,
12876         m_BufferImageGranularity);
12877 
12878     m_Blocks.push_back(pBlock);
12879     if (pNewBlockIndex != VMA_NULL)
12880     {
12881         *pNewBlockIndex = m_Blocks.size() - 1;
12882     }
12883 
12884     return VK_SUCCESS;
12885 }
12886 
HasEmptyBlock()12887 bool VmaBlockVector::HasEmptyBlock()
12888 {
12889     for (size_t index = 0, count = m_Blocks.size(); index < count; ++index)
12890     {
12891         VmaDeviceMemoryBlock* const pBlock = m_Blocks[index];
12892         if (pBlock->m_pMetadata->IsEmpty())
12893         {
12894             return true;
12895         }
12896     }
12897     return false;
12898 }
12899 
12900 #if VMA_STATS_STRING_ENABLED
PrintDetailedMap(class VmaJsonWriter & json)12901 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
12902 {
12903     VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12904 
12905 
12906     json.BeginObject();
12907     for (size_t i = 0; i < m_Blocks.size(); ++i)
12908     {
12909         json.BeginString();
12910         json.ContinueString(m_Blocks[i]->GetId());
12911         json.EndString();
12912 
12913         json.BeginObject();
12914         json.WriteString("MapRefCount");
12915         json.WriteNumber(m_Blocks[i]->GetMapRefCount());
12916 
12917         m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
12918         json.EndObject();
12919     }
12920     json.EndObject();
12921 }
12922 #endif // VMA_STATS_STRING_ENABLED
12923 
CheckCorruption()12924 VkResult VmaBlockVector::CheckCorruption()
12925 {
12926     if (!IsCorruptionDetectionEnabled())
12927     {
12928         return VK_ERROR_FEATURE_NOT_PRESENT;
12929     }
12930 
12931     VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12932     for (uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12933     {
12934         VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12935         VMA_ASSERT(pBlock);
12936         VkResult res = pBlock->CheckCorruption(m_hAllocator);
12937         if (res != VK_SUCCESS)
12938         {
12939             return res;
12940         }
12941     }
12942     return VK_SUCCESS;
12943 }
12944 
12945 #endif // _VMA_BLOCK_VECTOR_FUNCTIONS
12946 
12947 #ifndef _VMA_DEFRAGMENTATION_CONTEXT_FUNCTIONS
VmaDefragmentationContext_T(VmaAllocator hAllocator,const VmaDefragmentationInfo & info)12948 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
12949     VmaAllocator hAllocator,
12950     const VmaDefragmentationInfo& info)
12951     : m_MaxPassBytes(info.maxBytesPerPass == 0 ? VK_WHOLE_SIZE : info.maxBytesPerPass),
12952     m_MaxPassAllocations(info.maxAllocationsPerPass == 0 ? UINT32_MAX : info.maxAllocationsPerPass),
12953     m_MoveAllocator(hAllocator->GetAllocationCallbacks()),
12954     m_Moves(m_MoveAllocator)
12955 {
12956     m_Algorithm = info.flags & VMA_DEFRAGMENTATION_FLAG_ALGORITHM_MASK;
12957 
12958     if (info.pool != VMA_NULL)
12959     {
12960         m_BlockVectorCount = 1;
12961         m_PoolBlockVector = &info.pool->m_BlockVector;
12962         m_pBlockVectors = &m_PoolBlockVector;
12963         m_PoolBlockVector->SetIncrementalSort(false);
12964         m_PoolBlockVector->SortByFreeSize();
12965     }
12966     else
12967     {
12968         m_BlockVectorCount = hAllocator->GetMemoryTypeCount();
12969         m_PoolBlockVector = VMA_NULL;
12970         m_pBlockVectors = hAllocator->m_pBlockVectors;
12971         for (uint32_t i = 0; i < m_BlockVectorCount; ++i)
12972         {
12973             VmaBlockVector* vector = m_pBlockVectors[i];
12974             if (vector != VMA_NULL)
12975             {
12976                 vector->SetIncrementalSort(false);
12977                 vector->SortByFreeSize();
12978             }
12979         }
12980     }
12981 
12982     switch (m_Algorithm)
12983     {
12984     case 0: // Default algorithm
12985         m_Algorithm = VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT;
12986     case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT:
12987     {
12988         m_AlgorithmState = vma_new_array(hAllocator, StateBalanced, m_BlockVectorCount);
12989         break;
12990     }
12991     case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT:
12992     {
12993         if (hAllocator->GetBufferImageGranularity() > 1)
12994         {
12995             m_AlgorithmState = vma_new_array(hAllocator, StateExtensive, m_BlockVectorCount);
12996         }
12997         break;
12998     }
12999     }
13000 }
13001 
~VmaDefragmentationContext_T()13002 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
13003 {
13004     if (m_PoolBlockVector != VMA_NULL)
13005     {
13006         m_PoolBlockVector->SetIncrementalSort(true);
13007     }
13008     else
13009     {
13010         for (uint32_t i = 0; i < m_BlockVectorCount; ++i)
13011         {
13012             VmaBlockVector* vector = m_pBlockVectors[i];
13013             if (vector != VMA_NULL)
13014                 vector->SetIncrementalSort(true);
13015         }
13016     }
13017 
13018     if (m_AlgorithmState)
13019     {
13020         switch (m_Algorithm)
13021         {
13022         case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT:
13023             vma_delete_array(m_MoveAllocator.m_pCallbacks, reinterpret_cast<StateBalanced*>(m_AlgorithmState), m_BlockVectorCount);
13024             break;
13025         case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT:
13026             vma_delete_array(m_MoveAllocator.m_pCallbacks, reinterpret_cast<StateExtensive*>(m_AlgorithmState), m_BlockVectorCount);
13027             break;
13028         default:
13029             VMA_ASSERT(0);
13030         }
13031     }
13032 }
13033 
DefragmentPassBegin(VmaDefragmentationPassMoveInfo & moveInfo)13034 VkResult VmaDefragmentationContext_T::DefragmentPassBegin(VmaDefragmentationPassMoveInfo& moveInfo)
13035 {
13036     if (m_PoolBlockVector != VMA_NULL)
13037     {
13038         VmaMutexLockWrite lock(m_PoolBlockVector->GetMutex(), m_PoolBlockVector->GetAllocator()->m_UseMutex);
13039 
13040         if (m_PoolBlockVector->GetBlockCount() > 1)
13041             ComputeDefragmentation(*m_PoolBlockVector, 0);
13042         else if (m_PoolBlockVector->GetBlockCount() == 1)
13043             ReallocWithinBlock(*m_PoolBlockVector, m_PoolBlockVector->GetBlock(0));
13044     }
13045     else
13046     {
13047         for (uint32_t i = 0; i < m_BlockVectorCount; ++i)
13048         {
13049             if (m_pBlockVectors[i] != VMA_NULL)
13050             {
13051                 VmaMutexLockWrite lock(m_pBlockVectors[i]->GetMutex(), m_pBlockVectors[i]->GetAllocator()->m_UseMutex);
13052 
13053                 if (m_pBlockVectors[i]->GetBlockCount() > 1)
13054                 {
13055                     if (ComputeDefragmentation(*m_pBlockVectors[i], i))
13056                         break;
13057                 }
13058                 else if (m_pBlockVectors[i]->GetBlockCount() == 1)
13059                 {
13060                     if (ReallocWithinBlock(*m_pBlockVectors[i], m_pBlockVectors[i]->GetBlock(0)))
13061                         break;
13062                 }
13063             }
13064         }
13065     }
13066 
13067     moveInfo.moveCount = static_cast<uint32_t>(m_Moves.size());
13068     if (moveInfo.moveCount > 0)
13069     {
13070         moveInfo.pMoves = m_Moves.data();
13071         return VK_INCOMPLETE;
13072     }
13073 
13074     moveInfo.pMoves = VMA_NULL;
13075     return VK_SUCCESS;
13076 }
13077 
DefragmentPassEnd(VmaDefragmentationPassMoveInfo & moveInfo)13078 VkResult VmaDefragmentationContext_T::DefragmentPassEnd(VmaDefragmentationPassMoveInfo& moveInfo)
13079 {
13080     VMA_ASSERT(moveInfo.moveCount > 0 ? moveInfo.pMoves != VMA_NULL : true);
13081 
13082     VkResult result = VK_SUCCESS;
13083     VmaStlAllocator<FragmentedBlock> blockAllocator(m_MoveAllocator.m_pCallbacks);
13084     VmaVector<FragmentedBlock, VmaStlAllocator<FragmentedBlock>> immovableBlocks(blockAllocator);
13085     VmaVector<FragmentedBlock, VmaStlAllocator<FragmentedBlock>> mappedBlocks(blockAllocator);
13086 
13087     VmaAllocator allocator = VMA_NULL;
13088     for (uint32_t i = 0; i < moveInfo.moveCount; ++i)
13089     {
13090         VmaDefragmentationMove& move = moveInfo.pMoves[i];
13091         size_t prevCount = 0, currentCount = 0;
13092         VkDeviceSize freedBlockSize = 0;
13093 
13094         uint32_t vectorIndex;
13095         VmaBlockVector* vector;
13096         if (m_PoolBlockVector != VMA_NULL)
13097         {
13098             vectorIndex = 0;
13099             vector = m_PoolBlockVector;
13100         }
13101         else
13102         {
13103             vectorIndex = move.srcAllocation->GetMemoryTypeIndex();
13104             vector = m_pBlockVectors[vectorIndex];
13105             VMA_ASSERT(vector != VMA_NULL);
13106         }
13107 
13108         switch (move.operation)
13109         {
13110         case VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY:
13111         {
13112             uint8_t mapCount = move.srcAllocation->SwapBlockAllocation(vector->m_hAllocator, move.dstTmpAllocation);
13113             if (mapCount > 0)
13114             {
13115                 allocator = vector->m_hAllocator;
13116                 VmaDeviceMemoryBlock* newMapBlock = move.srcAllocation->GetBlock();
13117                 bool notPresent = true;
13118                 for (FragmentedBlock& block : mappedBlocks)
13119                 {
13120                     if (block.block == newMapBlock)
13121                     {
13122                         notPresent = false;
13123                         block.data += mapCount;
13124                         break;
13125                     }
13126                 }
13127                 if (notPresent)
13128                     mappedBlocks.push_back({ mapCount, newMapBlock });
13129             }
13130 
13131             // Scope for locks, Free have it's own lock
13132             {
13133                 VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
13134                 prevCount = vector->GetBlockCount();
13135                 freedBlockSize = move.dstTmpAllocation->GetBlock()->m_pMetadata->GetSize();
13136             }
13137             vector->Free(move.dstTmpAllocation);
13138             {
13139                 VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
13140                 currentCount = vector->GetBlockCount();
13141             }
13142 
13143             result = VK_INCOMPLETE;
13144             break;
13145         }
13146         case VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE:
13147         {
13148             m_PassStats.bytesMoved -= move.srcAllocation->GetSize();
13149             --m_PassStats.allocationsMoved;
13150             vector->Free(move.dstTmpAllocation);
13151 
13152             VmaDeviceMemoryBlock* newBlock = move.srcAllocation->GetBlock();
13153             bool notPresent = true;
13154             for (const FragmentedBlock& block : immovableBlocks)
13155             {
13156                 if (block.block == newBlock)
13157                 {
13158                     notPresent = false;
13159                     break;
13160                 }
13161             }
13162             if (notPresent)
13163                 immovableBlocks.push_back({ vectorIndex, newBlock });
13164             break;
13165         }
13166         case VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY:
13167         {
13168             m_PassStats.bytesMoved -= move.srcAllocation->GetSize();
13169             --m_PassStats.allocationsMoved;
13170             // Scope for locks, Free have it's own lock
13171             {
13172                 VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
13173                 prevCount = vector->GetBlockCount();
13174                 freedBlockSize = move.srcAllocation->GetBlock()->m_pMetadata->GetSize();
13175             }
13176             vector->Free(move.srcAllocation);
13177             {
13178                 VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
13179                 currentCount = vector->GetBlockCount();
13180             }
13181             freedBlockSize *= prevCount - currentCount;
13182 
13183             VkDeviceSize dstBlockSize;
13184             {
13185                 VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
13186                 dstBlockSize = move.dstTmpAllocation->GetBlock()->m_pMetadata->GetSize();
13187             }
13188             vector->Free(move.dstTmpAllocation);
13189             {
13190                 VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
13191                 freedBlockSize += dstBlockSize * (currentCount - vector->GetBlockCount());
13192                 currentCount = vector->GetBlockCount();
13193             }
13194 
13195             result = VK_INCOMPLETE;
13196             break;
13197         }
13198         default:
13199             VMA_ASSERT(0);
13200         }
13201 
13202         if (prevCount > currentCount)
13203         {
13204             size_t freedBlocks = prevCount - currentCount;
13205             m_PassStats.deviceMemoryBlocksFreed += static_cast<uint32_t>(freedBlocks);
13206             m_PassStats.bytesFreed += freedBlockSize;
13207         }
13208 
13209         switch (m_Algorithm)
13210         {
13211         case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT:
13212         {
13213             if (m_AlgorithmState != VMA_NULL)
13214             {
13215                 // Avoid unnecessary tries to allocate when new free block is avaiable
13216                 StateExtensive& state = reinterpret_cast<StateExtensive*>(m_AlgorithmState)[vectorIndex];
13217                 if (state.firstFreeBlock != SIZE_MAX)
13218                 {
13219                     const size_t diff = prevCount - currentCount;
13220                     if (state.firstFreeBlock >= diff)
13221                     {
13222                         state.firstFreeBlock -= diff;
13223                         if (state.firstFreeBlock != 0)
13224                             state.firstFreeBlock -= vector->GetBlock(state.firstFreeBlock - 1)->m_pMetadata->IsEmpty();
13225                     }
13226                     else
13227                         state.firstFreeBlock = 0;
13228                 }
13229             }
13230         }
13231         }
13232     }
13233     moveInfo.moveCount = 0;
13234     moveInfo.pMoves = VMA_NULL;
13235     m_Moves.clear();
13236 
13237     // Update stats
13238     m_GlobalStats.allocationsMoved += m_PassStats.allocationsMoved;
13239     m_GlobalStats.bytesFreed += m_PassStats.bytesFreed;
13240     m_GlobalStats.bytesMoved += m_PassStats.bytesMoved;
13241     m_GlobalStats.deviceMemoryBlocksFreed += m_PassStats.deviceMemoryBlocksFreed;
13242     m_PassStats = { 0 };
13243 
13244     // Move blocks with immovable allocations according to algorithm
13245     if (immovableBlocks.size() > 0)
13246     {
13247         switch (m_Algorithm)
13248         {
13249         case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT:
13250         {
13251             if (m_AlgorithmState != VMA_NULL)
13252             {
13253                 bool swapped = false;
13254                 // Move to the start of free blocks range
13255                 for (const FragmentedBlock& block : immovableBlocks)
13256                 {
13257                     StateExtensive& state = reinterpret_cast<StateExtensive*>(m_AlgorithmState)[block.data];
13258                     if (state.operation != StateExtensive::Operation::Cleanup)
13259                     {
13260                         VmaBlockVector* vector = m_pBlockVectors[block.data];
13261                         VmaMutexLockWrite lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
13262 
13263                         for (size_t i = 0, count = vector->GetBlockCount() - m_ImmovableBlockCount; i < count; ++i)
13264                         {
13265                             if (vector->GetBlock(i) == block.block)
13266                             {
13267                                 VMA_SWAP(vector->m_Blocks[i], vector->m_Blocks[vector->GetBlockCount() - ++m_ImmovableBlockCount]);
13268                                 if (state.firstFreeBlock != SIZE_MAX)
13269                                 {
13270                                     if (i + 1 < state.firstFreeBlock)
13271                                     {
13272                                         if (state.firstFreeBlock > 1)
13273                                             VMA_SWAP(vector->m_Blocks[i], vector->m_Blocks[--state.firstFreeBlock]);
13274                                         else
13275                                             --state.firstFreeBlock;
13276                                     }
13277                                 }
13278                                 swapped = true;
13279                                 break;
13280                             }
13281                         }
13282                     }
13283                 }
13284                 if (swapped)
13285                     result = VK_INCOMPLETE;
13286                 break;
13287             }
13288         }
13289         default:
13290         {
13291             // Move to the begining
13292             for (const FragmentedBlock& block : immovableBlocks)
13293             {
13294                 VmaBlockVector* vector = m_pBlockVectors[block.data];
13295                 VmaMutexLockWrite lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
13296 
13297                 for (size_t i = m_ImmovableBlockCount; i < vector->GetBlockCount(); ++i)
13298                 {
13299                     if (vector->GetBlock(i) == block.block)
13300                     {
13301                         VMA_SWAP(vector->m_Blocks[i], vector->m_Blocks[m_ImmovableBlockCount++]);
13302                         break;
13303                     }
13304                 }
13305             }
13306             break;
13307         }
13308         }
13309     }
13310 
13311     // Bulk-map destination blocks
13312     for (const FragmentedBlock& block : mappedBlocks)
13313     {
13314         VkResult res = block.block->Map(allocator, block.data, VMA_NULL);
13315         VMA_ASSERT(res == VK_SUCCESS);
13316     }
13317     return result;
13318 }
13319 
ComputeDefragmentation(VmaBlockVector & vector,size_t index)13320 bool VmaDefragmentationContext_T::ComputeDefragmentation(VmaBlockVector& vector, size_t index)
13321 {
13322     switch (m_Algorithm)
13323     {
13324     case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT:
13325         return ComputeDefragmentation_Fast(vector);
13326     default:
13327         VMA_ASSERT(0);
13328     case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT:
13329         return ComputeDefragmentation_Balanced(vector, index, true);
13330     case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT:
13331         return ComputeDefragmentation_Full(vector);
13332     case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT:
13333         return ComputeDefragmentation_Extensive(vector, index);
13334     }
13335 }
13336 
GetMoveData(VmaAllocHandle handle,VmaBlockMetadata * metadata)13337 VmaDefragmentationContext_T::MoveAllocationData VmaDefragmentationContext_T::GetMoveData(
13338     VmaAllocHandle handle, VmaBlockMetadata* metadata)
13339 {
13340     MoveAllocationData moveData;
13341     moveData.move.srcAllocation = (VmaAllocation)metadata->GetAllocationUserData(handle);
13342     moveData.size = moveData.move.srcAllocation->GetSize();
13343     moveData.alignment = moveData.move.srcAllocation->GetAlignment();
13344     moveData.type = moveData.move.srcAllocation->GetSuballocationType();
13345     moveData.flags = 0;
13346 
13347     if (moveData.move.srcAllocation->IsPersistentMap())
13348         moveData.flags |= VMA_ALLOCATION_CREATE_MAPPED_BIT;
13349     if (moveData.move.srcAllocation->IsMappingAllowed())
13350         moveData.flags |= VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT;
13351 
13352     return moveData;
13353 }
13354 
CheckCounters(VkDeviceSize bytes)13355 VmaDefragmentationContext_T::CounterStatus VmaDefragmentationContext_T::CheckCounters(VkDeviceSize bytes)
13356 {
13357     // Ignore allocation if will exceed max size for copy
13358     if (m_PassStats.bytesMoved + bytes > m_MaxPassBytes)
13359     {
13360         if (++m_IgnoredAllocs < MAX_ALLOCS_TO_IGNORE)
13361             return CounterStatus::Ignore;
13362         else
13363             return CounterStatus::End;
13364     }
13365     return CounterStatus::Pass;
13366 }
13367 
IncrementCounters(VkDeviceSize bytes)13368 bool VmaDefragmentationContext_T::IncrementCounters(VkDeviceSize bytes)
13369 {
13370     m_PassStats.bytesMoved += bytes;
13371     // Early return when max found
13372     if (++m_PassStats.allocationsMoved >= m_MaxPassAllocations || m_PassStats.bytesMoved >= m_MaxPassBytes)
13373     {
13374         VMA_ASSERT(m_PassStats.allocationsMoved == m_MaxPassAllocations ||
13375             m_PassStats.bytesMoved == m_MaxPassBytes && "Exceeded maximal pass threshold!");
13376         return true;
13377     }
13378     return false;
13379 }
13380 
ReallocWithinBlock(VmaBlockVector & vector,VmaDeviceMemoryBlock * block)13381 bool VmaDefragmentationContext_T::ReallocWithinBlock(VmaBlockVector& vector, VmaDeviceMemoryBlock* block)
13382 {
13383     VmaBlockMetadata* metadata = block->m_pMetadata;
13384 
13385     for (VmaAllocHandle handle = metadata->GetAllocationListBegin();
13386         handle != VK_NULL_HANDLE;
13387         handle = metadata->GetNextAllocation(handle))
13388     {
13389         MoveAllocationData moveData = GetMoveData(handle, metadata);
13390         // Ignore newly created allocations by defragmentation algorithm
13391         if (moveData.move.srcAllocation->GetUserData() == this)
13392             continue;
13393         switch (CheckCounters(moveData.move.srcAllocation->GetSize()))
13394         {
13395         case CounterStatus::Ignore:
13396             continue;
13397         case CounterStatus::End:
13398             return true;
13399         default:
13400             VMA_ASSERT(0);
13401         case CounterStatus::Pass:
13402             break;
13403         }
13404 
13405         VkDeviceSize offset = moveData.move.srcAllocation->GetOffset();
13406         if (offset != 0 && metadata->GetSumFreeSize() >= moveData.size)
13407         {
13408             VmaAllocationRequest request = {};
13409             if (metadata->CreateAllocationRequest(
13410                 moveData.size,
13411                 moveData.alignment,
13412                 false,
13413                 moveData.type,
13414                 VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT,
13415                 &request))
13416             {
13417                 if (metadata->GetAllocationOffset(request.allocHandle) < offset)
13418                 {
13419                     if (vector.CommitAllocationRequest(
13420                         request,
13421                         block,
13422                         moveData.alignment,
13423                         moveData.flags,
13424                         this,
13425                         moveData.type,
13426                         &moveData.move.dstTmpAllocation) == VK_SUCCESS)
13427                     {
13428                         m_Moves.push_back(moveData.move);
13429                         if (IncrementCounters(moveData.size))
13430                             return true;
13431                     }
13432                 }
13433             }
13434         }
13435     }
13436     return false;
13437 }
13438 
AllocInOtherBlock(size_t start,size_t end,MoveAllocationData & data,VmaBlockVector & vector)13439 bool VmaDefragmentationContext_T::AllocInOtherBlock(size_t start, size_t end, MoveAllocationData& data, VmaBlockVector& vector)
13440 {
13441     for (; start < end; ++start)
13442     {
13443         VmaDeviceMemoryBlock* dstBlock = vector.GetBlock(start);
13444         if (dstBlock->m_pMetadata->GetSumFreeSize() >= data.size)
13445         {
13446             if (vector.AllocateFromBlock(dstBlock,
13447                 data.size,
13448                 data.alignment,
13449                 data.flags,
13450                 this,
13451                 data.type,
13452                 0,
13453                 &data.move.dstTmpAllocation) == VK_SUCCESS)
13454             {
13455                 m_Moves.push_back(data.move);
13456                 if (IncrementCounters(data.size))
13457                     return true;
13458                 break;
13459             }
13460         }
13461     }
13462     return false;
13463 }
13464 
ComputeDefragmentation_Fast(VmaBlockVector & vector)13465 bool VmaDefragmentationContext_T::ComputeDefragmentation_Fast(VmaBlockVector& vector)
13466 {
13467     // Move only between blocks
13468 
13469     // Go through allocations in last blocks and try to fit them inside first ones
13470     for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i)
13471     {
13472         VmaBlockMetadata* metadata = vector.GetBlock(i)->m_pMetadata;
13473 
13474         for (VmaAllocHandle handle = metadata->GetAllocationListBegin();
13475             handle != VK_NULL_HANDLE;
13476             handle = metadata->GetNextAllocation(handle))
13477         {
13478             MoveAllocationData moveData = GetMoveData(handle, metadata);
13479             // Ignore newly created allocations by defragmentation algorithm
13480             if (moveData.move.srcAllocation->GetUserData() == this)
13481                 continue;
13482             switch (CheckCounters(moveData.move.srcAllocation->GetSize()))
13483             {
13484             case CounterStatus::Ignore:
13485                 continue;
13486             case CounterStatus::End:
13487                 return true;
13488             default:
13489                 VMA_ASSERT(0);
13490             case CounterStatus::Pass:
13491                 break;
13492             }
13493 
13494             // Check all previous blocks for free space
13495             if (AllocInOtherBlock(0, i, moveData, vector))
13496                 return true;
13497         }
13498     }
13499     return false;
13500 }
13501 
ComputeDefragmentation_Balanced(VmaBlockVector & vector,size_t index,bool update)13502 bool VmaDefragmentationContext_T::ComputeDefragmentation_Balanced(VmaBlockVector& vector, size_t index, bool update)
13503 {
13504     // Go over every allocation and try to fit it in previous blocks at lowest offsets,
13505     // if not possible: realloc within single block to minimize offset (exclude offset == 0),
13506     // but only if there are noticable gaps between them (some heuristic, ex. average size of allocation in block)
13507     VMA_ASSERT(m_AlgorithmState != VMA_NULL);
13508 
13509     StateBalanced& vectorState = reinterpret_cast<StateBalanced*>(m_AlgorithmState)[index];
13510     if (update && vectorState.avgAllocSize == UINT64_MAX)
13511         UpdateVectorStatistics(vector, vectorState);
13512 
13513     const size_t startMoveCount = m_Moves.size();
13514     VkDeviceSize minimalFreeRegion = vectorState.avgFreeSize / 2;
13515     for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i)
13516     {
13517         VmaDeviceMemoryBlock* block = vector.GetBlock(i);
13518         VmaBlockMetadata* metadata = block->m_pMetadata;
13519         VkDeviceSize prevFreeRegionSize = 0;
13520 
13521         for (VmaAllocHandle handle = metadata->GetAllocationListBegin();
13522             handle != VK_NULL_HANDLE;
13523             handle = metadata->GetNextAllocation(handle))
13524         {
13525             MoveAllocationData moveData = GetMoveData(handle, metadata);
13526             // Ignore newly created allocations by defragmentation algorithm
13527             if (moveData.move.srcAllocation->GetUserData() == this)
13528                 continue;
13529             switch (CheckCounters(moveData.move.srcAllocation->GetSize()))
13530             {
13531             case CounterStatus::Ignore:
13532                 continue;
13533             case CounterStatus::End:
13534                 return true;
13535             default:
13536                 VMA_ASSERT(0);
13537             case CounterStatus::Pass:
13538                 break;
13539             }
13540 
13541             // Check all previous blocks for free space
13542             const size_t prevMoveCount = m_Moves.size();
13543             if (AllocInOtherBlock(0, i, moveData, vector))
13544                 return true;
13545 
13546             VkDeviceSize nextFreeRegionSize = metadata->GetNextFreeRegionSize(handle);
13547             // If no room found then realloc within block for lower offset
13548             VkDeviceSize offset = moveData.move.srcAllocation->GetOffset();
13549             if (prevMoveCount == m_Moves.size() && offset != 0 && metadata->GetSumFreeSize() >= moveData.size)
13550             {
13551                 // Check if realloc will make sense
13552                 if (prevFreeRegionSize >= minimalFreeRegion ||
13553                     nextFreeRegionSize >= minimalFreeRegion ||
13554                     moveData.size <= vectorState.avgFreeSize ||
13555                     moveData.size <= vectorState.avgAllocSize)
13556                 {
13557                     VmaAllocationRequest request = {};
13558                     if (metadata->CreateAllocationRequest(
13559                         moveData.size,
13560                         moveData.alignment,
13561                         false,
13562                         moveData.type,
13563                         VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT,
13564                         &request))
13565                     {
13566                         if (metadata->GetAllocationOffset(request.allocHandle) < offset)
13567                         {
13568                             if (vector.CommitAllocationRequest(
13569                                 request,
13570                                 block,
13571                                 moveData.alignment,
13572                                 moveData.flags,
13573                                 this,
13574                                 moveData.type,
13575                                 &moveData.move.dstTmpAllocation) == VK_SUCCESS)
13576                             {
13577                                 m_Moves.push_back(moveData.move);
13578                                 if (IncrementCounters(moveData.size))
13579                                     return true;
13580                             }
13581                         }
13582                     }
13583                 }
13584             }
13585             prevFreeRegionSize = nextFreeRegionSize;
13586         }
13587     }
13588 
13589     // No moves perfomed, update statistics to current vector state
13590     if (startMoveCount == m_Moves.size() && !update)
13591     {
13592         vectorState.avgAllocSize = UINT64_MAX;
13593         return ComputeDefragmentation_Balanced(vector, index, false);
13594     }
13595     return false;
13596 }
13597 
ComputeDefragmentation_Full(VmaBlockVector & vector)13598 bool VmaDefragmentationContext_T::ComputeDefragmentation_Full(VmaBlockVector& vector)
13599 {
13600     // Go over every allocation and try to fit it in previous blocks at lowest offsets,
13601     // if not possible: realloc within single block to minimize offset (exclude offset == 0)
13602 
13603     for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i)
13604     {
13605         VmaDeviceMemoryBlock* block = vector.GetBlock(i);
13606         VmaBlockMetadata* metadata = block->m_pMetadata;
13607 
13608         for (VmaAllocHandle handle = metadata->GetAllocationListBegin();
13609             handle != VK_NULL_HANDLE;
13610             handle = metadata->GetNextAllocation(handle))
13611         {
13612             MoveAllocationData moveData = GetMoveData(handle, metadata);
13613             // Ignore newly created allocations by defragmentation algorithm
13614             if (moveData.move.srcAllocation->GetUserData() == this)
13615                 continue;
13616             switch (CheckCounters(moveData.move.srcAllocation->GetSize()))
13617             {
13618             case CounterStatus::Ignore:
13619                 continue;
13620             case CounterStatus::End:
13621                 return true;
13622             default:
13623                 VMA_ASSERT(0);
13624             case CounterStatus::Pass:
13625                 break;
13626             }
13627 
13628             // Check all previous blocks for free space
13629             const size_t prevMoveCount = m_Moves.size();
13630             if (AllocInOtherBlock(0, i, moveData, vector))
13631                 return true;
13632 
13633             // If no room found then realloc within block for lower offset
13634             VkDeviceSize offset = moveData.move.srcAllocation->GetOffset();
13635             if (prevMoveCount == m_Moves.size() && offset != 0 && metadata->GetSumFreeSize() >= moveData.size)
13636             {
13637                 VmaAllocationRequest request = {};
13638                 if (metadata->CreateAllocationRequest(
13639                     moveData.size,
13640                     moveData.alignment,
13641                     false,
13642                     moveData.type,
13643                     VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT,
13644                     &request))
13645                 {
13646                     if (metadata->GetAllocationOffset(request.allocHandle) < offset)
13647                     {
13648                         if (vector.CommitAllocationRequest(
13649                             request,
13650                             block,
13651                             moveData.alignment,
13652                             moveData.flags,
13653                             this,
13654                             moveData.type,
13655                             &moveData.move.dstTmpAllocation) == VK_SUCCESS)
13656                         {
13657                             m_Moves.push_back(moveData.move);
13658                             if (IncrementCounters(moveData.size))
13659                                 return true;
13660                         }
13661                     }
13662                 }
13663             }
13664         }
13665     }
13666     return false;
13667 }
13668 
ComputeDefragmentation_Extensive(VmaBlockVector & vector,size_t index)13669 bool VmaDefragmentationContext_T::ComputeDefragmentation_Extensive(VmaBlockVector& vector, size_t index)
13670 {
13671     // First free single block, then populate it to the brim, then free another block, and so on
13672 
13673     // Fallback to previous algorithm since without granularity conflicts it can achieve max packing
13674     if (vector.m_BufferImageGranularity == 1)
13675         return ComputeDefragmentation_Full(vector);
13676 
13677     VMA_ASSERT(m_AlgorithmState != VMA_NULL);
13678 
13679     StateExtensive& vectorState = reinterpret_cast<StateExtensive*>(m_AlgorithmState)[index];
13680 
13681     bool texturePresent = false, bufferPresent = false, otherPresent = false;
13682     switch (vectorState.operation)
13683     {
13684     case StateExtensive::Operation::Done: // Vector defragmented
13685         return false;
13686     case StateExtensive::Operation::FindFreeBlockBuffer:
13687     case StateExtensive::Operation::FindFreeBlockTexture:
13688     case StateExtensive::Operation::FindFreeBlockAll:
13689     {
13690         // No more blocks to free, just perform fast realloc and move to cleanup
13691         if (vectorState.firstFreeBlock == 0)
13692         {
13693             vectorState.operation = StateExtensive::Operation::Cleanup;
13694             return ComputeDefragmentation_Fast(vector);
13695         }
13696 
13697         // No free blocks, have to clear last one
13698         size_t last = (vectorState.firstFreeBlock == SIZE_MAX ? vector.GetBlockCount() : vectorState.firstFreeBlock) - 1;
13699         VmaBlockMetadata* freeMetadata = vector.GetBlock(last)->m_pMetadata;
13700 
13701         const size_t prevMoveCount = m_Moves.size();
13702         for (VmaAllocHandle handle = freeMetadata->GetAllocationListBegin();
13703             handle != VK_NULL_HANDLE;
13704             handle = freeMetadata->GetNextAllocation(handle))
13705         {
13706             MoveAllocationData moveData = GetMoveData(handle, freeMetadata);
13707             switch (CheckCounters(moveData.move.srcAllocation->GetSize()))
13708             {
13709             case CounterStatus::Ignore:
13710                 continue;
13711             case CounterStatus::End:
13712                 return true;
13713             default:
13714                 VMA_ASSERT(0);
13715             case CounterStatus::Pass:
13716                 break;
13717             }
13718 
13719             // Check all previous blocks for free space
13720             if (AllocInOtherBlock(0, last, moveData, vector))
13721             {
13722                 // Full clear performed already
13723                 if (prevMoveCount != m_Moves.size() && freeMetadata->GetNextAllocation(handle) == VK_NULL_HANDLE)
13724                     reinterpret_cast<size_t*>(m_AlgorithmState)[index] = last;
13725                 return true;
13726             }
13727         }
13728 
13729         if (prevMoveCount == m_Moves.size())
13730         {
13731             // Cannot perform full clear, have to move data in other blocks around
13732             if (last != 0)
13733             {
13734                 for (size_t i = last - 1; i; --i)
13735                 {
13736                     if (ReallocWithinBlock(vector, vector.GetBlock(i)))
13737                         return true;
13738                 }
13739             }
13740 
13741             if (prevMoveCount == m_Moves.size())
13742             {
13743                 // No possible reallocs within blocks, try to move them around fast
13744                 return ComputeDefragmentation_Fast(vector);
13745             }
13746         }
13747         else
13748         {
13749             switch (vectorState.operation)
13750             {
13751             case StateExtensive::Operation::FindFreeBlockBuffer:
13752                 vectorState.operation = StateExtensive::Operation::MoveBuffers;
13753                 break;
13754             default:
13755                 VMA_ASSERT(0);
13756             case StateExtensive::Operation::FindFreeBlockTexture:
13757                 vectorState.operation = StateExtensive::Operation::MoveTextures;
13758                 break;
13759             case StateExtensive::Operation::FindFreeBlockAll:
13760                 vectorState.operation = StateExtensive::Operation::MoveAll;
13761                 break;
13762             }
13763             vectorState.firstFreeBlock = last;
13764             // Nothing done, block found without reallocations, can perform another reallocs in same pass
13765             return ComputeDefragmentation_Extensive(vector, index);
13766         }
13767         break;
13768     }
13769     case StateExtensive::Operation::MoveTextures:
13770     {
13771         if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL, vector,
13772             vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent))
13773         {
13774             if (texturePresent)
13775             {
13776                 vectorState.operation = StateExtensive::Operation::FindFreeBlockTexture;
13777                 return ComputeDefragmentation_Extensive(vector, index);
13778             }
13779 
13780             if (!bufferPresent && !otherPresent)
13781             {
13782                 vectorState.operation = StateExtensive::Operation::Cleanup;
13783                 break;
13784             }
13785 
13786             // No more textures to move, check buffers
13787             vectorState.operation = StateExtensive::Operation::MoveBuffers;
13788             bufferPresent = false;
13789             otherPresent = false;
13790         }
13791         else
13792             break;
13793     }
13794     case StateExtensive::Operation::MoveBuffers:
13795     {
13796         if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_BUFFER, vector,
13797             vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent))
13798         {
13799             if (bufferPresent)
13800             {
13801                 vectorState.operation = StateExtensive::Operation::FindFreeBlockBuffer;
13802                 return ComputeDefragmentation_Extensive(vector, index);
13803             }
13804 
13805             if (!otherPresent)
13806             {
13807                 vectorState.operation = StateExtensive::Operation::Cleanup;
13808                 break;
13809             }
13810 
13811             // No more buffers to move, check all others
13812             vectorState.operation = StateExtensive::Operation::MoveAll;
13813             otherPresent = false;
13814         }
13815         else
13816             break;
13817     }
13818     case StateExtensive::Operation::MoveAll:
13819     {
13820         if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_FREE, vector,
13821             vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent))
13822         {
13823             if (otherPresent)
13824             {
13825                 vectorState.operation = StateExtensive::Operation::FindFreeBlockBuffer;
13826                 return ComputeDefragmentation_Extensive(vector, index);
13827             }
13828             // Everything moved
13829             vectorState.operation = StateExtensive::Operation::Cleanup;
13830         }
13831         break;
13832     }
13833     case StateExtensive::Operation::Cleanup:
13834         // Cleanup is handled below so that other operations may reuse the cleanup code. This case is here to prevent the unhandled enum value warning (C4062).
13835         break;
13836     }
13837 
13838     if (vectorState.operation == StateExtensive::Operation::Cleanup)
13839     {
13840         // All other work done, pack data in blocks even tighter if possible
13841         const size_t prevMoveCount = m_Moves.size();
13842         for (size_t i = 0; i < vector.GetBlockCount(); ++i)
13843         {
13844             if (ReallocWithinBlock(vector, vector.GetBlock(i)))
13845                 return true;
13846         }
13847 
13848         if (prevMoveCount == m_Moves.size())
13849             vectorState.operation = StateExtensive::Operation::Done;
13850     }
13851     return false;
13852 }
13853 
UpdateVectorStatistics(VmaBlockVector & vector,StateBalanced & state)13854 void VmaDefragmentationContext_T::UpdateVectorStatistics(VmaBlockVector& vector, StateBalanced& state)
13855 {
13856     size_t allocCount = 0;
13857     size_t freeCount = 0;
13858     state.avgFreeSize = 0;
13859     state.avgAllocSize = 0;
13860 
13861     for (size_t i = 0; i < vector.GetBlockCount(); ++i)
13862     {
13863         VmaBlockMetadata* metadata = vector.GetBlock(i)->m_pMetadata;
13864 
13865         allocCount += metadata->GetAllocationCount();
13866         freeCount += metadata->GetFreeRegionsCount();
13867         state.avgFreeSize += metadata->GetSumFreeSize();
13868         state.avgAllocSize += metadata->GetSize();
13869     }
13870 
13871     state.avgAllocSize = (state.avgAllocSize - state.avgFreeSize) / allocCount;
13872     state.avgFreeSize /= freeCount;
13873 }
13874 
MoveDataToFreeBlocks(VmaSuballocationType currentType,VmaBlockVector & vector,size_t firstFreeBlock,bool & texturePresent,bool & bufferPresent,bool & otherPresent)13875 bool VmaDefragmentationContext_T::MoveDataToFreeBlocks(VmaSuballocationType currentType,
13876     VmaBlockVector& vector, size_t firstFreeBlock,
13877     bool& texturePresent, bool& bufferPresent, bool& otherPresent)
13878 {
13879     const size_t prevMoveCount = m_Moves.size();
13880     for (size_t i = firstFreeBlock ; i;)
13881     {
13882         VmaDeviceMemoryBlock* block = vector.GetBlock(--i);
13883         VmaBlockMetadata* metadata = block->m_pMetadata;
13884 
13885         for (VmaAllocHandle handle = metadata->GetAllocationListBegin();
13886             handle != VK_NULL_HANDLE;
13887             handle = metadata->GetNextAllocation(handle))
13888         {
13889             MoveAllocationData moveData = GetMoveData(handle, metadata);
13890             // Ignore newly created allocations by defragmentation algorithm
13891             if (moveData.move.srcAllocation->GetUserData() == this)
13892                 continue;
13893             switch (CheckCounters(moveData.move.srcAllocation->GetSize()))
13894             {
13895             case CounterStatus::Ignore:
13896                 continue;
13897             case CounterStatus::End:
13898                 return true;
13899             default:
13900                 VMA_ASSERT(0);
13901             case CounterStatus::Pass:
13902                 break;
13903             }
13904 
13905             // Move only single type of resources at once
13906             if (!VmaIsBufferImageGranularityConflict(moveData.type, currentType))
13907             {
13908                 // Try to fit allocation into free blocks
13909                 if (AllocInOtherBlock(firstFreeBlock, vector.GetBlockCount(), moveData, vector))
13910                     return false;
13911             }
13912 
13913             if (!VmaIsBufferImageGranularityConflict(moveData.type, VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL))
13914                 texturePresent = true;
13915             else if (!VmaIsBufferImageGranularityConflict(moveData.type, VMA_SUBALLOCATION_TYPE_BUFFER))
13916                 bufferPresent = true;
13917             else
13918                 otherPresent = true;
13919         }
13920     }
13921     return prevMoveCount == m_Moves.size();
13922 }
13923 #endif // _VMA_DEFRAGMENTATION_CONTEXT_FUNCTIONS
13924 
13925 #ifndef _VMA_POOL_T_FUNCTIONS
VmaPool_T(VmaAllocator hAllocator,const VmaPoolCreateInfo & createInfo,VkDeviceSize preferredBlockSize)13926 VmaPool_T::VmaPool_T(
13927     VmaAllocator hAllocator,
13928     const VmaPoolCreateInfo& createInfo,
13929     VkDeviceSize preferredBlockSize)
13930     : m_BlockVector(
13931         hAllocator,
13932         this, // hParentPool
13933         createInfo.memoryTypeIndex,
13934         createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
13935         createInfo.minBlockCount,
13936         createInfo.maxBlockCount,
13937         (createInfo.flags& VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
13938         createInfo.blockSize != 0, // explicitBlockSize
13939         createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK, // algorithm
13940         createInfo.priority,
13941         VMA_MAX(hAllocator->GetMemoryTypeMinAlignment(createInfo.memoryTypeIndex), createInfo.minAllocationAlignment),
13942         createInfo.pMemoryAllocateNext),
13943     m_Id(0),
13944     m_Name(VMA_NULL) {}
13945 
~VmaPool_T()13946 VmaPool_T::~VmaPool_T()
13947 {
13948     VMA_ASSERT(m_PrevPool == VMA_NULL && m_NextPool == VMA_NULL);
13949 }
13950 
SetName(const char * pName)13951 void VmaPool_T::SetName(const char* pName)
13952 {
13953     const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks();
13954     VmaFreeString(allocs, m_Name);
13955 
13956     if (pName != VMA_NULL)
13957     {
13958         m_Name = VmaCreateStringCopy(allocs, pName);
13959     }
13960     else
13961     {
13962         m_Name = VMA_NULL;
13963     }
13964 }
13965 #endif // _VMA_POOL_T_FUNCTIONS
13966 
13967 #ifndef _VMA_ALLOCATOR_T_FUNCTIONS
VmaAllocator_T(const VmaAllocatorCreateInfo * pCreateInfo)13968 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
13969     m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
13970     m_VulkanApiVersion(pCreateInfo->vulkanApiVersion != 0 ? pCreateInfo->vulkanApiVersion : VK_API_VERSION_1_0),
13971     m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
13972     m_UseKhrBindMemory2((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0),
13973     m_UseExtMemoryBudget((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0),
13974     m_UseAmdDeviceCoherentMemory((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT) != 0),
13975     m_UseKhrBufferDeviceAddress((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT) != 0),
13976     m_UseExtMemoryPriority((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT) != 0),
13977     m_hDevice(pCreateInfo->device),
13978     m_hInstance(pCreateInfo->instance),
13979     m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
13980     m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
13981         *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
13982     m_AllocationObjectAllocator(&m_AllocationCallbacks),
13983     m_HeapSizeLimitMask(0),
13984     m_DeviceMemoryCount(0),
13985     m_PreferredLargeHeapBlockSize(0),
13986     m_PhysicalDevice(pCreateInfo->physicalDevice),
13987     m_GpuDefragmentationMemoryTypeBits(UINT32_MAX),
13988     m_NextPoolId(0),
13989     m_GlobalMemoryTypeBits(UINT32_MAX)
13990 {
13991     if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
13992     {
13993         m_UseKhrDedicatedAllocation = false;
13994         m_UseKhrBindMemory2 = false;
13995     }
13996 
13997     if(VMA_DEBUG_DETECT_CORRUPTION)
13998     {
13999         // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
14000         VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
14001     }
14002 
14003     VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device && pCreateInfo->instance);
14004 
14005     if(m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0))
14006     {
14007 #if !(VMA_DEDICATED_ALLOCATION)
14008         if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0)
14009         {
14010             VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
14011         }
14012 #endif
14013 #if !(VMA_BIND_MEMORY2)
14014         if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0)
14015         {
14016             VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT set but required extension is disabled by preprocessor macros.");
14017         }
14018 #endif
14019     }
14020 #if !(VMA_MEMORY_BUDGET)
14021     if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0)
14022     {
14023         VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT set but required extension is disabled by preprocessor macros.");
14024     }
14025 #endif
14026 #if !(VMA_BUFFER_DEVICE_ADDRESS)
14027     if(m_UseKhrBufferDeviceAddress)
14028     {
14029         VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT is set but required extension or Vulkan 1.2 is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro.");
14030     }
14031 #endif
14032 #if VMA_VULKAN_VERSION < 1002000
14033     if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 2, 0))
14034     {
14035         VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_2 but required Vulkan version is disabled by preprocessor macros.");
14036     }
14037 #endif
14038 #if VMA_VULKAN_VERSION < 1001000
14039     if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
14040     {
14041         VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_1 but required Vulkan version is disabled by preprocessor macros.");
14042     }
14043 #endif
14044 #if !(VMA_MEMORY_PRIORITY)
14045     if(m_UseExtMemoryPriority)
14046     {
14047         VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT is set but required extension is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro.");
14048     }
14049 #endif
14050 
14051     memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
14052     memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
14053     memset(&m_MemProps, 0, sizeof(m_MemProps));
14054 
14055     memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
14056     memset(&m_VulkanFunctions, 0, sizeof(m_VulkanFunctions));
14057 
14058 #if VMA_EXTERNAL_MEMORY
14059     memset(&m_TypeExternalMemoryHandleTypes, 0, sizeof(m_TypeExternalMemoryHandleTypes));
14060 #endif // #if VMA_EXTERNAL_MEMORY
14061 
14062     if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
14063     {
14064         m_DeviceMemoryCallbacks.pUserData = pCreateInfo->pDeviceMemoryCallbacks->pUserData;
14065         m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
14066         m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
14067     }
14068 
14069     ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
14070 
14071     (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
14072     (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
14073 
14074     VMA_ASSERT(VmaIsPow2(VMA_MIN_ALIGNMENT));
14075     VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
14076     VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
14077     VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
14078 
14079     m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
14080         pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
14081 
14082     m_GlobalMemoryTypeBits = CalculateGlobalMemoryTypeBits();
14083 
14084 #if VMA_EXTERNAL_MEMORY
14085     if(pCreateInfo->pTypeExternalMemoryHandleTypes != VMA_NULL)
14086     {
14087         memcpy(m_TypeExternalMemoryHandleTypes, pCreateInfo->pTypeExternalMemoryHandleTypes,
14088             sizeof(VkExternalMemoryHandleTypeFlagsKHR) * GetMemoryTypeCount());
14089     }
14090 #endif // #if VMA_EXTERNAL_MEMORY
14091 
14092     if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
14093     {
14094         for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
14095         {
14096             const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
14097             if(limit != VK_WHOLE_SIZE)
14098             {
14099                 m_HeapSizeLimitMask |= 1u << heapIndex;
14100                 if(limit < m_MemProps.memoryHeaps[heapIndex].size)
14101                 {
14102                     m_MemProps.memoryHeaps[heapIndex].size = limit;
14103                 }
14104             }
14105         }
14106     }
14107 
14108     for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14109     {
14110         // Create only supported types
14111         if((m_GlobalMemoryTypeBits & (1u << memTypeIndex)) != 0)
14112         {
14113             const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
14114             m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
14115                 this,
14116                 VK_NULL_HANDLE, // hParentPool
14117                 memTypeIndex,
14118                 preferredBlockSize,
14119                 0,
14120                 SIZE_MAX,
14121                 GetBufferImageGranularity(),
14122                 false, // explicitBlockSize
14123                 0, // algorithm
14124                 0.5f, // priority (0.5 is the default per Vulkan spec)
14125                 GetMemoryTypeMinAlignment(memTypeIndex), // minAllocationAlignment
14126                 VMA_NULL); // // pMemoryAllocateNext
14127             // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
14128             // becase minBlockCount is 0.
14129         }
14130     }
14131 }
14132 
Init(const VmaAllocatorCreateInfo * pCreateInfo)14133 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
14134 {
14135     VkResult res = VK_SUCCESS;
14136 
14137 #if VMA_MEMORY_BUDGET
14138     if(m_UseExtMemoryBudget)
14139     {
14140         UpdateVulkanBudget();
14141     }
14142 #endif // #if VMA_MEMORY_BUDGET
14143 
14144     return res;
14145 }
14146 
~VmaAllocator_T()14147 VmaAllocator_T::~VmaAllocator_T()
14148 {
14149     VMA_ASSERT(m_Pools.IsEmpty());
14150 
14151     for(size_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
14152     {
14153         vma_delete(this, m_pBlockVectors[memTypeIndex]);
14154     }
14155 }
14156 
ImportVulkanFunctions(const VmaVulkanFunctions * pVulkanFunctions)14157 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
14158 {
14159 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14160     ImportVulkanFunctions_Static();
14161 #endif
14162 
14163     if(pVulkanFunctions != VMA_NULL)
14164     {
14165         ImportVulkanFunctions_Custom(pVulkanFunctions);
14166     }
14167 
14168 #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
14169     ImportVulkanFunctions_Dynamic();
14170 #endif
14171 
14172     ValidateVulkanFunctions();
14173 }
14174 
14175 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14176 
ImportVulkanFunctions_Static()14177 void VmaAllocator_T::ImportVulkanFunctions_Static()
14178 {
14179     // Vulkan 1.0
14180     m_VulkanFunctions.vkGetInstanceProcAddr = (PFN_vkGetInstanceProcAddr)vkGetInstanceProcAddr;
14181     m_VulkanFunctions.vkGetDeviceProcAddr = (PFN_vkGetDeviceProcAddr)vkGetDeviceProcAddr;
14182     m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
14183     m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
14184     m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
14185     m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
14186     m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
14187     m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
14188     m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
14189     m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
14190     m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
14191     m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
14192     m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
14193     m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
14194     m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
14195     m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
14196     m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
14197     m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
14198     m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
14199 
14200     // Vulkan 1.1
14201 #if VMA_VULKAN_VERSION >= 1001000
14202     if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
14203     {
14204         m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR = (PFN_vkGetBufferMemoryRequirements2)vkGetBufferMemoryRequirements2;
14205         m_VulkanFunctions.vkGetImageMemoryRequirements2KHR = (PFN_vkGetImageMemoryRequirements2)vkGetImageMemoryRequirements2;
14206         m_VulkanFunctions.vkBindBufferMemory2KHR = (PFN_vkBindBufferMemory2)vkBindBufferMemory2;
14207         m_VulkanFunctions.vkBindImageMemory2KHR = (PFN_vkBindImageMemory2)vkBindImageMemory2;
14208         m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR = (PFN_vkGetPhysicalDeviceMemoryProperties2)vkGetPhysicalDeviceMemoryProperties2;
14209     }
14210 #endif
14211 
14212 #if VMA_VULKAN_VERSION >= 1003000
14213     if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0))
14214     {
14215         m_VulkanFunctions.vkGetDeviceBufferMemoryRequirements = (PFN_vkGetDeviceBufferMemoryRequirements)vkGetDeviceBufferMemoryRequirements;
14216         m_VulkanFunctions.vkGetDeviceImageMemoryRequirements = (PFN_vkGetDeviceImageMemoryRequirements)vkGetDeviceImageMemoryRequirements;
14217     }
14218 #endif
14219 }
14220 
14221 #endif // VMA_STATIC_VULKAN_FUNCTIONS == 1
14222 
ImportVulkanFunctions_Custom(const VmaVulkanFunctions * pVulkanFunctions)14223 void VmaAllocator_T::ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions)
14224 {
14225     VMA_ASSERT(pVulkanFunctions != VMA_NULL);
14226 
14227 #define VMA_COPY_IF_NOT_NULL(funcName) \
14228     if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
14229 
14230     VMA_COPY_IF_NOT_NULL(vkGetInstanceProcAddr);
14231     VMA_COPY_IF_NOT_NULL(vkGetDeviceProcAddr);
14232     VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
14233     VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
14234     VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
14235     VMA_COPY_IF_NOT_NULL(vkFreeMemory);
14236     VMA_COPY_IF_NOT_NULL(vkMapMemory);
14237     VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
14238     VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
14239     VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
14240     VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
14241     VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
14242     VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
14243     VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
14244     VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
14245     VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
14246     VMA_COPY_IF_NOT_NULL(vkCreateImage);
14247     VMA_COPY_IF_NOT_NULL(vkDestroyImage);
14248     VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
14249 
14250 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
14251     VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
14252     VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
14253 #endif
14254 
14255 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
14256     VMA_COPY_IF_NOT_NULL(vkBindBufferMemory2KHR);
14257     VMA_COPY_IF_NOT_NULL(vkBindImageMemory2KHR);
14258 #endif
14259 
14260 #if VMA_MEMORY_BUDGET
14261     VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties2KHR);
14262 #endif
14263 
14264 #if VMA_VULKAN_VERSION >= 1003000
14265     VMA_COPY_IF_NOT_NULL(vkGetDeviceBufferMemoryRequirements);
14266     VMA_COPY_IF_NOT_NULL(vkGetDeviceImageMemoryRequirements);
14267 #endif
14268 
14269 #undef VMA_COPY_IF_NOT_NULL
14270 }
14271 
14272 #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
14273 
ImportVulkanFunctions_Dynamic()14274 void VmaAllocator_T::ImportVulkanFunctions_Dynamic()
14275 {
14276     VMA_ASSERT(m_VulkanFunctions.vkGetInstanceProcAddr && m_VulkanFunctions.vkGetDeviceProcAddr &&
14277         "To use VMA_DYNAMIC_VULKAN_FUNCTIONS in new versions of VMA you now have to pass "
14278         "VmaVulkanFunctions::vkGetInstanceProcAddr and vkGetDeviceProcAddr as VmaAllocatorCreateInfo::pVulkanFunctions. "
14279         "Other members can be null.");
14280 
14281 #define VMA_FETCH_INSTANCE_FUNC(memberName, functionPointerType, functionNameString) \
14282     if(m_VulkanFunctions.memberName == VMA_NULL) \
14283         m_VulkanFunctions.memberName = \
14284             (functionPointerType)m_VulkanFunctions.vkGetInstanceProcAddr(m_hInstance, functionNameString);
14285 #define VMA_FETCH_DEVICE_FUNC(memberName, functionPointerType, functionNameString) \
14286     if(m_VulkanFunctions.memberName == VMA_NULL) \
14287         m_VulkanFunctions.memberName = \
14288             (functionPointerType)m_VulkanFunctions.vkGetDeviceProcAddr(m_hDevice, functionNameString);
14289 
14290     VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceProperties, PFN_vkGetPhysicalDeviceProperties, "vkGetPhysicalDeviceProperties");
14291     VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties, PFN_vkGetPhysicalDeviceMemoryProperties, "vkGetPhysicalDeviceMemoryProperties");
14292     VMA_FETCH_DEVICE_FUNC(vkAllocateMemory, PFN_vkAllocateMemory, "vkAllocateMemory");
14293     VMA_FETCH_DEVICE_FUNC(vkFreeMemory, PFN_vkFreeMemory, "vkFreeMemory");
14294     VMA_FETCH_DEVICE_FUNC(vkMapMemory, PFN_vkMapMemory, "vkMapMemory");
14295     VMA_FETCH_DEVICE_FUNC(vkUnmapMemory, PFN_vkUnmapMemory, "vkUnmapMemory");
14296     VMA_FETCH_DEVICE_FUNC(vkFlushMappedMemoryRanges, PFN_vkFlushMappedMemoryRanges, "vkFlushMappedMemoryRanges");
14297     VMA_FETCH_DEVICE_FUNC(vkInvalidateMappedMemoryRanges, PFN_vkInvalidateMappedMemoryRanges, "vkInvalidateMappedMemoryRanges");
14298     VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory, PFN_vkBindBufferMemory, "vkBindBufferMemory");
14299     VMA_FETCH_DEVICE_FUNC(vkBindImageMemory, PFN_vkBindImageMemory, "vkBindImageMemory");
14300     VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements, PFN_vkGetBufferMemoryRequirements, "vkGetBufferMemoryRequirements");
14301     VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements, PFN_vkGetImageMemoryRequirements, "vkGetImageMemoryRequirements");
14302     VMA_FETCH_DEVICE_FUNC(vkCreateBuffer, PFN_vkCreateBuffer, "vkCreateBuffer");
14303     VMA_FETCH_DEVICE_FUNC(vkDestroyBuffer, PFN_vkDestroyBuffer, "vkDestroyBuffer");
14304     VMA_FETCH_DEVICE_FUNC(vkCreateImage, PFN_vkCreateImage, "vkCreateImage");
14305     VMA_FETCH_DEVICE_FUNC(vkDestroyImage, PFN_vkDestroyImage, "vkDestroyImage");
14306     VMA_FETCH_DEVICE_FUNC(vkCmdCopyBuffer, PFN_vkCmdCopyBuffer, "vkCmdCopyBuffer");
14307 
14308 #if VMA_VULKAN_VERSION >= 1001000
14309     if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
14310     {
14311         VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2, "vkGetBufferMemoryRequirements2");
14312         VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2, "vkGetImageMemoryRequirements2");
14313         VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2, "vkBindBufferMemory2");
14314         VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2, "vkBindImageMemory2");
14315         VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2, "vkGetPhysicalDeviceMemoryProperties2");
14316     }
14317 #endif
14318 
14319 #if VMA_DEDICATED_ALLOCATION
14320     if(m_UseKhrDedicatedAllocation)
14321     {
14322         VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2KHR, "vkGetBufferMemoryRequirements2KHR");
14323         VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2KHR, "vkGetImageMemoryRequirements2KHR");
14324     }
14325 #endif
14326 
14327 #if VMA_BIND_MEMORY2
14328     if(m_UseKhrBindMemory2)
14329     {
14330         VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2KHR, "vkBindBufferMemory2KHR");
14331         VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2KHR, "vkBindImageMemory2KHR");
14332     }
14333 #endif // #if VMA_BIND_MEMORY2
14334 
14335 #if VMA_MEMORY_BUDGET
14336     if(m_UseExtMemoryBudget)
14337     {
14338         VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2KHR");
14339     }
14340 #endif // #if VMA_MEMORY_BUDGET
14341 
14342 #if VMA_VULKAN_VERSION >= 1003000
14343     if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0))
14344     {
14345         VMA_FETCH_DEVICE_FUNC(vkGetDeviceBufferMemoryRequirements, PFN_vkGetDeviceBufferMemoryRequirements, "vkGetDeviceBufferMemoryRequirements");
14346         VMA_FETCH_DEVICE_FUNC(vkGetDeviceImageMemoryRequirements, PFN_vkGetDeviceImageMemoryRequirements, "vkGetDeviceImageMemoryRequirements");
14347     }
14348 #endif
14349 
14350 #undef VMA_FETCH_DEVICE_FUNC
14351 #undef VMA_FETCH_INSTANCE_FUNC
14352 }
14353 
14354 #endif // VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
14355 
ValidateVulkanFunctions()14356 void VmaAllocator_T::ValidateVulkanFunctions()
14357 {
14358     VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
14359     VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
14360     VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
14361     VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
14362     VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
14363     VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
14364     VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
14365     VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
14366     VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
14367     VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
14368     VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
14369     VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
14370     VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
14371     VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
14372     VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
14373     VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
14374     VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
14375 
14376 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
14377     if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrDedicatedAllocation)
14378     {
14379         VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
14380         VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
14381     }
14382 #endif
14383 
14384 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
14385     if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrBindMemory2)
14386     {
14387         VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL);
14388         VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL);
14389     }
14390 #endif
14391 
14392 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
14393     if(m_UseExtMemoryBudget || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
14394     {
14395         VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR != VMA_NULL);
14396     }
14397 #endif
14398 
14399 #if VMA_VULKAN_VERSION >= 1003000
14400     if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0))
14401     {
14402         VMA_ASSERT(m_VulkanFunctions.vkGetDeviceBufferMemoryRequirements != VMA_NULL);
14403         VMA_ASSERT(m_VulkanFunctions.vkGetDeviceImageMemoryRequirements != VMA_NULL);
14404     }
14405 #endif
14406 }
14407 
CalcPreferredBlockSize(uint32_t memTypeIndex)14408 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
14409 {
14410     const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14411     const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
14412     const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
14413     return VmaAlignUp(isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize, (VkDeviceSize)32);
14414 }
14415 
AllocateMemoryOfType(VmaPool pool,VkDeviceSize size,VkDeviceSize alignment,bool dedicatedPreferred,VkBuffer dedicatedBuffer,VkImage dedicatedImage,VkFlags dedicatedBufferImageUsage,const VmaAllocationCreateInfo & createInfo,uint32_t memTypeIndex,VmaSuballocationType suballocType,VmaDedicatedAllocationList & dedicatedAllocations,VmaBlockVector & blockVector,size_t allocationCount,VmaAllocation * pAllocations)14416 VkResult VmaAllocator_T::AllocateMemoryOfType(
14417     VmaPool pool,
14418     VkDeviceSize size,
14419     VkDeviceSize alignment,
14420     bool dedicatedPreferred,
14421     VkBuffer dedicatedBuffer,
14422     VkImage dedicatedImage,
14423     VkFlags dedicatedBufferImageUsage,
14424     const VmaAllocationCreateInfo& createInfo,
14425     uint32_t memTypeIndex,
14426     VmaSuballocationType suballocType,
14427     VmaDedicatedAllocationList& dedicatedAllocations,
14428     VmaBlockVector& blockVector,
14429     size_t allocationCount,
14430     VmaAllocation* pAllocations)
14431 {
14432     VMA_ASSERT(pAllocations != VMA_NULL);
14433     VMA_DEBUG_LOG("  AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
14434 
14435     VmaAllocationCreateInfo finalCreateInfo = createInfo;
14436     VkResult res = CalcMemTypeParams(
14437         finalCreateInfo,
14438         memTypeIndex,
14439         size,
14440         allocationCount);
14441     if(res != VK_SUCCESS)
14442         return res;
14443 
14444     if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
14445     {
14446         return AllocateDedicatedMemory(
14447             pool,
14448             size,
14449             suballocType,
14450             dedicatedAllocations,
14451             memTypeIndex,
14452             (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14453             (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14454             (finalCreateInfo.flags &
14455                 (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0,
14456             (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0,
14457             finalCreateInfo.pUserData,
14458             finalCreateInfo.priority,
14459             dedicatedBuffer,
14460             dedicatedImage,
14461             dedicatedBufferImageUsage,
14462             allocationCount,
14463             pAllocations,
14464             blockVector.GetAllocationNextPtr());
14465     }
14466     else
14467     {
14468         const bool canAllocateDedicated =
14469             (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
14470             (pool == VK_NULL_HANDLE || !blockVector.HasExplicitBlockSize());
14471 
14472         if(canAllocateDedicated)
14473         {
14474             // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
14475             if(size > blockVector.GetPreferredBlockSize() / 2)
14476             {
14477                 dedicatedPreferred = true;
14478             }
14479             // Protection against creating each allocation as dedicated when we reach or exceed heap size/budget,
14480             // which can quickly deplete maxMemoryAllocationCount: Don't prefer dedicated allocations when above
14481             // 3/4 of the maximum allocation count.
14482             if(m_DeviceMemoryCount.load() > m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount * 3 / 4)
14483             {
14484                 dedicatedPreferred = false;
14485             }
14486 
14487             if(dedicatedPreferred)
14488             {
14489                 res = AllocateDedicatedMemory(
14490                     pool,
14491                     size,
14492                     suballocType,
14493                     dedicatedAllocations,
14494                     memTypeIndex,
14495                     (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14496                     (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14497                     (finalCreateInfo.flags &
14498                         (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0,
14499                     (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0,
14500                     finalCreateInfo.pUserData,
14501                     finalCreateInfo.priority,
14502                     dedicatedBuffer,
14503                     dedicatedImage,
14504                     dedicatedBufferImageUsage,
14505                     allocationCount,
14506                     pAllocations,
14507                     blockVector.GetAllocationNextPtr());
14508                 if(res == VK_SUCCESS)
14509                 {
14510                     // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
14511                     VMA_DEBUG_LOG("    Allocated as DedicatedMemory");
14512                     return VK_SUCCESS;
14513                 }
14514             }
14515         }
14516 
14517         res = blockVector.Allocate(
14518             size,
14519             alignment,
14520             finalCreateInfo,
14521             suballocType,
14522             allocationCount,
14523             pAllocations);
14524         if(res == VK_SUCCESS)
14525             return VK_SUCCESS;
14526 
14527         // Try dedicated memory.
14528         if(canAllocateDedicated && !dedicatedPreferred)
14529         {
14530             res = AllocateDedicatedMemory(
14531                 pool,
14532                 size,
14533                 suballocType,
14534                 dedicatedAllocations,
14535                 memTypeIndex,
14536                 (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14537                 (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14538                 (finalCreateInfo.flags &
14539                     (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0,
14540                 (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0,
14541                 finalCreateInfo.pUserData,
14542                 finalCreateInfo.priority,
14543                 dedicatedBuffer,
14544                 dedicatedImage,
14545                 dedicatedBufferImageUsage,
14546                 allocationCount,
14547                 pAllocations,
14548                 blockVector.GetAllocationNextPtr());
14549             if(res == VK_SUCCESS)
14550             {
14551                 // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
14552                 VMA_DEBUG_LOG("    Allocated as DedicatedMemory");
14553                 return VK_SUCCESS;
14554             }
14555         }
14556         // Everything failed: Return error code.
14557         VMA_DEBUG_LOG("    vkAllocateMemory FAILED");
14558         return res;
14559     }
14560 }
14561 
AllocateDedicatedMemory(VmaPool pool,VkDeviceSize size,VmaSuballocationType suballocType,VmaDedicatedAllocationList & dedicatedAllocations,uint32_t memTypeIndex,bool map,bool isUserDataString,bool isMappingAllowed,bool canAliasMemory,void * pUserData,float priority,VkBuffer dedicatedBuffer,VkImage dedicatedImage,VkFlags dedicatedBufferImageUsage,size_t allocationCount,VmaAllocation * pAllocations,const void * pNextChain)14562 VkResult VmaAllocator_T::AllocateDedicatedMemory(
14563     VmaPool pool,
14564     VkDeviceSize size,
14565     VmaSuballocationType suballocType,
14566     VmaDedicatedAllocationList& dedicatedAllocations,
14567     uint32_t memTypeIndex,
14568     bool map,
14569     bool isUserDataString,
14570     bool isMappingAllowed,
14571     bool canAliasMemory,
14572     void* pUserData,
14573     float priority,
14574     VkBuffer dedicatedBuffer,
14575     VkImage dedicatedImage,
14576     VkFlags dedicatedBufferImageUsage,
14577     size_t allocationCount,
14578     VmaAllocation* pAllocations,
14579     const void* pNextChain)
14580 {
14581     VMA_ASSERT(allocationCount > 0 && pAllocations);
14582 
14583     VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
14584     allocInfo.memoryTypeIndex = memTypeIndex;
14585     allocInfo.allocationSize = size;
14586     allocInfo.pNext = pNextChain;
14587 
14588 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
14589     VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
14590     if(!canAliasMemory)
14591     {
14592         if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
14593         {
14594             if(dedicatedBuffer != VK_NULL_HANDLE)
14595             {
14596                 VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
14597                 dedicatedAllocInfo.buffer = dedicatedBuffer;
14598                 VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
14599             }
14600             else if(dedicatedImage != VK_NULL_HANDLE)
14601             {
14602                 dedicatedAllocInfo.image = dedicatedImage;
14603                 VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
14604             }
14605         }
14606     }
14607 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
14608 
14609 #if VMA_BUFFER_DEVICE_ADDRESS
14610     VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
14611     if(m_UseKhrBufferDeviceAddress)
14612     {
14613         bool canContainBufferWithDeviceAddress = true;
14614         if(dedicatedBuffer != VK_NULL_HANDLE)
14615         {
14616             canContainBufferWithDeviceAddress = dedicatedBufferImageUsage == UINT32_MAX || // Usage flags unknown
14617                 (dedicatedBufferImageUsage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT) != 0;
14618         }
14619         else if(dedicatedImage != VK_NULL_HANDLE)
14620         {
14621             canContainBufferWithDeviceAddress = false;
14622         }
14623         if(canContainBufferWithDeviceAddress)
14624         {
14625             allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
14626             VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
14627         }
14628     }
14629 #endif // #if VMA_BUFFER_DEVICE_ADDRESS
14630 
14631 #if VMA_MEMORY_PRIORITY
14632     VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT };
14633     if(m_UseExtMemoryPriority)
14634     {
14635         VMA_ASSERT(priority >= 0.f && priority <= 1.f);
14636         priorityInfo.priority = priority;
14637         VmaPnextChainPushFront(&allocInfo, &priorityInfo);
14638     }
14639 #endif // #if VMA_MEMORY_PRIORITY
14640 
14641 #if VMA_EXTERNAL_MEMORY
14642     // Attach VkExportMemoryAllocateInfoKHR if necessary.
14643     VkExportMemoryAllocateInfoKHR exportMemoryAllocInfo = { VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR };
14644     exportMemoryAllocInfo.handleTypes = GetExternalMemoryHandleTypeFlags(memTypeIndex);
14645     if(exportMemoryAllocInfo.handleTypes != 0)
14646     {
14647         VmaPnextChainPushFront(&allocInfo, &exportMemoryAllocInfo);
14648     }
14649 #endif // #if VMA_EXTERNAL_MEMORY
14650 
14651     size_t allocIndex;
14652     VkResult res = VK_SUCCESS;
14653     for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14654     {
14655         res = AllocateDedicatedMemoryPage(
14656             pool,
14657             size,
14658             suballocType,
14659             memTypeIndex,
14660             allocInfo,
14661             map,
14662             isUserDataString,
14663             isMappingAllowed,
14664             pUserData,
14665             pAllocations + allocIndex);
14666         if(res != VK_SUCCESS)
14667         {
14668             break;
14669         }
14670     }
14671 
14672     if(res == VK_SUCCESS)
14673     {
14674         for (allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14675         {
14676             dedicatedAllocations.Register(pAllocations[allocIndex]);
14677         }
14678         VMA_DEBUG_LOG("    Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
14679     }
14680     else
14681     {
14682         // Free all already created allocations.
14683         while(allocIndex--)
14684         {
14685             VmaAllocation currAlloc = pAllocations[allocIndex];
14686             VkDeviceMemory hMemory = currAlloc->GetMemory();
14687 
14688             /*
14689             There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
14690             before vkFreeMemory.
14691 
14692             if(currAlloc->GetMappedData() != VMA_NULL)
14693             {
14694                 (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
14695             }
14696             */
14697 
14698             FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
14699             m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), currAlloc->GetSize());
14700             m_AllocationObjectAllocator.Free(currAlloc);
14701         }
14702 
14703         memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
14704     }
14705 
14706     return res;
14707 }
14708 
AllocateDedicatedMemoryPage(VmaPool pool,VkDeviceSize size,VmaSuballocationType suballocType,uint32_t memTypeIndex,const VkMemoryAllocateInfo & allocInfo,bool map,bool isUserDataString,bool isMappingAllowed,void * pUserData,VmaAllocation * pAllocation)14709 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
14710     VmaPool pool,
14711     VkDeviceSize size,
14712     VmaSuballocationType suballocType,
14713     uint32_t memTypeIndex,
14714     const VkMemoryAllocateInfo& allocInfo,
14715     bool map,
14716     bool isUserDataString,
14717     bool isMappingAllowed,
14718     void* pUserData,
14719     VmaAllocation* pAllocation)
14720 {
14721     VkDeviceMemory hMemory = VK_NULL_HANDLE;
14722     VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
14723     if(res < 0)
14724     {
14725         VMA_DEBUG_LOG("    vkAllocateMemory FAILED");
14726         return res;
14727     }
14728 
14729     void* pMappedData = VMA_NULL;
14730     if(map)
14731     {
14732         res = (*m_VulkanFunctions.vkMapMemory)(
14733             m_hDevice,
14734             hMemory,
14735             0,
14736             VK_WHOLE_SIZE,
14737             0,
14738             &pMappedData);
14739         if(res < 0)
14740         {
14741             VMA_DEBUG_LOG("    vkMapMemory FAILED");
14742             FreeVulkanMemory(memTypeIndex, size, hMemory);
14743             return res;
14744         }
14745     }
14746 
14747     *pAllocation = m_AllocationObjectAllocator.Allocate(isMappingAllowed);
14748     (*pAllocation)->InitDedicatedAllocation(pool, memTypeIndex, hMemory, suballocType, pMappedData, size);
14749     if (isUserDataString)
14750         (*pAllocation)->SetName(this, (const char*)pUserData);
14751     else
14752         (*pAllocation)->SetUserData(this, pUserData);
14753     m_Budget.AddAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), size);
14754     if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14755     {
14756         FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
14757     }
14758 
14759     return VK_SUCCESS;
14760 }
14761 
GetBufferMemoryRequirements(VkBuffer hBuffer,VkMemoryRequirements & memReq,bool & requiresDedicatedAllocation,bool & prefersDedicatedAllocation)14762 void VmaAllocator_T::GetBufferMemoryRequirements(
14763     VkBuffer hBuffer,
14764     VkMemoryRequirements& memReq,
14765     bool& requiresDedicatedAllocation,
14766     bool& prefersDedicatedAllocation) const
14767 {
14768 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
14769     if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
14770     {
14771         VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
14772         memReqInfo.buffer = hBuffer;
14773 
14774         VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14775 
14776         VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14777         VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
14778 
14779         (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14780 
14781         memReq = memReq2.memoryRequirements;
14782         requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14783         prefersDedicatedAllocation  = (memDedicatedReq.prefersDedicatedAllocation  != VK_FALSE);
14784     }
14785     else
14786 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
14787     {
14788         (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
14789         requiresDedicatedAllocation = false;
14790         prefersDedicatedAllocation  = false;
14791     }
14792 }
14793 
GetImageMemoryRequirements(VkImage hImage,VkMemoryRequirements & memReq,bool & requiresDedicatedAllocation,bool & prefersDedicatedAllocation)14794 void VmaAllocator_T::GetImageMemoryRequirements(
14795     VkImage hImage,
14796     VkMemoryRequirements& memReq,
14797     bool& requiresDedicatedAllocation,
14798     bool& prefersDedicatedAllocation) const
14799 {
14800 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
14801     if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
14802     {
14803         VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
14804         memReqInfo.image = hImage;
14805 
14806         VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14807 
14808         VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14809         VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
14810 
14811         (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14812 
14813         memReq = memReq2.memoryRequirements;
14814         requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14815         prefersDedicatedAllocation  = (memDedicatedReq.prefersDedicatedAllocation  != VK_FALSE);
14816     }
14817     else
14818 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
14819     {
14820         (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
14821         requiresDedicatedAllocation = false;
14822         prefersDedicatedAllocation  = false;
14823     }
14824 }
14825 
FindMemoryTypeIndex(uint32_t memoryTypeBits,const VmaAllocationCreateInfo * pAllocationCreateInfo,VkFlags bufImgUsage,uint32_t * pMemoryTypeIndex)14826 VkResult VmaAllocator_T::FindMemoryTypeIndex(
14827     uint32_t memoryTypeBits,
14828     const VmaAllocationCreateInfo* pAllocationCreateInfo,
14829     VkFlags bufImgUsage,
14830     uint32_t* pMemoryTypeIndex) const
14831 {
14832     memoryTypeBits &= GetGlobalMemoryTypeBits();
14833 
14834     if(pAllocationCreateInfo->memoryTypeBits != 0)
14835     {
14836         memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
14837     }
14838 
14839     VkMemoryPropertyFlags requiredFlags = 0, preferredFlags = 0, notPreferredFlags = 0;
14840     if(!FindMemoryPreferences(
14841         IsIntegratedGpu(),
14842         *pAllocationCreateInfo,
14843         bufImgUsage,
14844         requiredFlags, preferredFlags, notPreferredFlags))
14845     {
14846         return VK_ERROR_FEATURE_NOT_PRESENT;
14847     }
14848 
14849     *pMemoryTypeIndex = UINT32_MAX;
14850     uint32_t minCost = UINT32_MAX;
14851     for(uint32_t memTypeIndex = 0, memTypeBit = 1;
14852         memTypeIndex < GetMemoryTypeCount();
14853         ++memTypeIndex, memTypeBit <<= 1)
14854     {
14855         // This memory type is acceptable according to memoryTypeBits bitmask.
14856         if((memTypeBit & memoryTypeBits) != 0)
14857         {
14858             const VkMemoryPropertyFlags currFlags =
14859                 m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
14860             // This memory type contains requiredFlags.
14861             if((requiredFlags & ~currFlags) == 0)
14862             {
14863                 // Calculate cost as number of bits from preferredFlags not present in this memory type.
14864                 uint32_t currCost = VMA_COUNT_BITS_SET(preferredFlags & ~currFlags) +
14865                     VMA_COUNT_BITS_SET(currFlags & notPreferredFlags);
14866                 // Remember memory type with lowest cost.
14867                 if(currCost < minCost)
14868                 {
14869                     *pMemoryTypeIndex = memTypeIndex;
14870                     if(currCost == 0)
14871                     {
14872                         return VK_SUCCESS;
14873                     }
14874                     minCost = currCost;
14875                 }
14876             }
14877         }
14878     }
14879     return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
14880 }
14881 
CalcMemTypeParams(VmaAllocationCreateInfo & inoutCreateInfo,uint32_t memTypeIndex,VkDeviceSize size,size_t allocationCount)14882 VkResult VmaAllocator_T::CalcMemTypeParams(
14883     VmaAllocationCreateInfo& inoutCreateInfo,
14884     uint32_t memTypeIndex,
14885     VkDeviceSize size,
14886     size_t allocationCount)
14887 {
14888     // If memory type is not HOST_VISIBLE, disable MAPPED.
14889     if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14890         (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
14891     {
14892         inoutCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
14893     }
14894 
14895     if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
14896         (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0)
14897     {
14898         const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14899         VmaBudget heapBudget = {};
14900         GetHeapBudgets(&heapBudget, heapIndex, 1);
14901         if(heapBudget.usage + size * allocationCount > heapBudget.budget)
14902         {
14903             return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14904         }
14905     }
14906     return VK_SUCCESS;
14907 }
14908 
CalcAllocationParams(VmaAllocationCreateInfo & inoutCreateInfo,bool dedicatedRequired,bool dedicatedPreferred)14909 VkResult VmaAllocator_T::CalcAllocationParams(
14910     VmaAllocationCreateInfo& inoutCreateInfo,
14911     bool dedicatedRequired,
14912     bool dedicatedPreferred)
14913 {
14914     VMA_ASSERT((inoutCreateInfo.flags &
14915         (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) !=
14916         (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT) &&
14917         "Specifying both flags VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT and VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT is incorrect.");
14918     VMA_ASSERT((((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT) == 0 ||
14919         (inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0)) &&
14920         "Specifying VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT requires also VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT.");
14921     if(inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO || inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE || inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_HOST)
14922     {
14923         if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0)
14924         {
14925             VMA_ASSERT((inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0 &&
14926                 "When using VMA_ALLOCATION_CREATE_MAPPED_BIT and usage = VMA_MEMORY_USAGE_AUTO*, you must also specify VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT.");
14927         }
14928     }
14929 
14930     // If memory is lazily allocated, it should be always dedicated.
14931     if(dedicatedRequired ||
14932         inoutCreateInfo.usage == VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED)
14933     {
14934         inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
14935     }
14936 
14937     if(inoutCreateInfo.pool != VK_NULL_HANDLE)
14938     {
14939         if(inoutCreateInfo.pool->m_BlockVector.HasExplicitBlockSize() &&
14940             (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
14941         {
14942             VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT while current custom pool doesn't support dedicated allocations.");
14943             return VK_ERROR_FEATURE_NOT_PRESENT;
14944         }
14945         inoutCreateInfo.priority = inoutCreateInfo.pool->m_BlockVector.GetPriority();
14946     }
14947 
14948     if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
14949         (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14950     {
14951         VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
14952         return VK_ERROR_FEATURE_NOT_PRESENT;
14953     }
14954 
14955     if(VMA_DEBUG_ALWAYS_DEDICATED_MEMORY &&
14956         (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14957     {
14958         inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
14959     }
14960 
14961     // Non-auto USAGE values imply HOST_ACCESS flags.
14962     // And so does VMA_MEMORY_USAGE_UNKNOWN because it is used with custom pools.
14963     // Which specific flag is used doesn't matter. They change things only when used with VMA_MEMORY_USAGE_AUTO*.
14964     // Otherwise they just protect from assert on mapping.
14965     if(inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO &&
14966         inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE &&
14967         inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO_PREFER_HOST)
14968     {
14969         if((inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) == 0)
14970         {
14971             inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT;
14972         }
14973     }
14974 
14975     return VK_SUCCESS;
14976 }
14977 
AllocateMemory(const VkMemoryRequirements & vkMemReq,bool requiresDedicatedAllocation,bool prefersDedicatedAllocation,VkBuffer dedicatedBuffer,VkImage dedicatedImage,VkFlags dedicatedBufferImageUsage,const VmaAllocationCreateInfo & createInfo,VmaSuballocationType suballocType,size_t allocationCount,VmaAllocation * pAllocations)14978 VkResult VmaAllocator_T::AllocateMemory(
14979     const VkMemoryRequirements& vkMemReq,
14980     bool requiresDedicatedAllocation,
14981     bool prefersDedicatedAllocation,
14982     VkBuffer dedicatedBuffer,
14983     VkImage dedicatedImage,
14984     VkFlags dedicatedBufferImageUsage,
14985     const VmaAllocationCreateInfo& createInfo,
14986     VmaSuballocationType suballocType,
14987     size_t allocationCount,
14988     VmaAllocation* pAllocations)
14989 {
14990     memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
14991 
14992     VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
14993 
14994     if(vkMemReq.size == 0)
14995     {
14996         return VK_ERROR_INITIALIZATION_FAILED;
14997     }
14998 
14999     VmaAllocationCreateInfo createInfoFinal = createInfo;
15000     VkResult res = CalcAllocationParams(createInfoFinal, requiresDedicatedAllocation, prefersDedicatedAllocation);
15001     if(res != VK_SUCCESS)
15002         return res;
15003 
15004     if(createInfoFinal.pool != VK_NULL_HANDLE)
15005     {
15006         VmaBlockVector& blockVector = createInfoFinal.pool->m_BlockVector;
15007         return AllocateMemoryOfType(
15008             createInfoFinal.pool,
15009             vkMemReq.size,
15010             vkMemReq.alignment,
15011             prefersDedicatedAllocation,
15012             dedicatedBuffer,
15013             dedicatedImage,
15014             dedicatedBufferImageUsage,
15015             createInfoFinal,
15016             blockVector.GetMemoryTypeIndex(),
15017             suballocType,
15018             createInfoFinal.pool->m_DedicatedAllocations,
15019             blockVector,
15020             allocationCount,
15021             pAllocations);
15022     }
15023     else
15024     {
15025         // Bit mask of memory Vulkan types acceptable for this allocation.
15026         uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
15027         uint32_t memTypeIndex = UINT32_MAX;
15028         res = FindMemoryTypeIndex(memoryTypeBits, &createInfoFinal, dedicatedBufferImageUsage, &memTypeIndex);
15029         // Can't find any single memory type matching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
15030         if(res != VK_SUCCESS)
15031             return res;
15032         do
15033         {
15034             VmaBlockVector* blockVector = m_pBlockVectors[memTypeIndex];
15035             VMA_ASSERT(blockVector && "Trying to use unsupported memory type!");
15036             res = AllocateMemoryOfType(
15037                 VK_NULL_HANDLE,
15038                 vkMemReq.size,
15039                 vkMemReq.alignment,
15040                 requiresDedicatedAllocation || prefersDedicatedAllocation,
15041                 dedicatedBuffer,
15042                 dedicatedImage,
15043                 dedicatedBufferImageUsage,
15044                 createInfoFinal,
15045                 memTypeIndex,
15046                 suballocType,
15047                 m_DedicatedAllocations[memTypeIndex],
15048                 *blockVector,
15049                 allocationCount,
15050                 pAllocations);
15051             // Allocation succeeded
15052             if(res == VK_SUCCESS)
15053                 return VK_SUCCESS;
15054 
15055             // Remove old memTypeIndex from list of possibilities.
15056             memoryTypeBits &= ~(1u << memTypeIndex);
15057             // Find alternative memTypeIndex.
15058             res = FindMemoryTypeIndex(memoryTypeBits, &createInfoFinal, dedicatedBufferImageUsage, &memTypeIndex);
15059         } while(res == VK_SUCCESS);
15060 
15061         // No other matching memory type index could be found.
15062         // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
15063         return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15064     }
15065 }
15066 
FreeMemory(size_t allocationCount,const VmaAllocation * pAllocations)15067 void VmaAllocator_T::FreeMemory(
15068     size_t allocationCount,
15069     const VmaAllocation* pAllocations)
15070 {
15071     VMA_ASSERT(pAllocations);
15072 
15073     for(size_t allocIndex = allocationCount; allocIndex--; )
15074     {
15075         VmaAllocation allocation = pAllocations[allocIndex];
15076 
15077         if(allocation != VK_NULL_HANDLE)
15078         {
15079             if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
15080             {
15081                 FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
15082             }
15083 
15084             allocation->FreeName(this);
15085 
15086             switch(allocation->GetType())
15087             {
15088             case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15089                 {
15090                     VmaBlockVector* pBlockVector = VMA_NULL;
15091                     VmaPool hPool = allocation->GetParentPool();
15092                     if(hPool != VK_NULL_HANDLE)
15093                     {
15094                         pBlockVector = &hPool->m_BlockVector;
15095                     }
15096                     else
15097                     {
15098                         const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
15099                         pBlockVector = m_pBlockVectors[memTypeIndex];
15100                         VMA_ASSERT(pBlockVector && "Trying to free memory of unsupported type!");
15101                     }
15102                     pBlockVector->Free(allocation);
15103                 }
15104                 break;
15105             case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15106                 FreeDedicatedMemory(allocation);
15107                 break;
15108             default:
15109                 VMA_ASSERT(0);
15110             }
15111         }
15112     }
15113 }
15114 
CalculateStatistics(VmaTotalStatistics * pStats)15115 void VmaAllocator_T::CalculateStatistics(VmaTotalStatistics* pStats)
15116 {
15117     // Initialize.
15118     VmaClearDetailedStatistics(pStats->total);
15119     for(uint32_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
15120         VmaClearDetailedStatistics(pStats->memoryType[i]);
15121     for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
15122         VmaClearDetailedStatistics(pStats->memoryHeap[i]);
15123 
15124     // Process default pools.
15125     for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15126     {
15127         VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
15128         if (pBlockVector != VMA_NULL)
15129             pBlockVector->AddDetailedStatistics(pStats->memoryType[memTypeIndex]);
15130     }
15131 
15132     // Process custom pools.
15133     {
15134         VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15135         for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool))
15136         {
15137             VmaBlockVector& blockVector = pool->m_BlockVector;
15138             const uint32_t memTypeIndex = blockVector.GetMemoryTypeIndex();
15139             blockVector.AddDetailedStatistics(pStats->memoryType[memTypeIndex]);
15140             pool->m_DedicatedAllocations.AddDetailedStatistics(pStats->memoryType[memTypeIndex]);
15141         }
15142     }
15143 
15144     // Process dedicated allocations.
15145     for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15146     {
15147         m_DedicatedAllocations[memTypeIndex].AddDetailedStatistics(pStats->memoryType[memTypeIndex]);
15148     }
15149 
15150     // Sum from memory types to memory heaps.
15151     for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15152     {
15153         const uint32_t memHeapIndex = m_MemProps.memoryTypes[memTypeIndex].heapIndex;
15154         VmaAddDetailedStatistics(pStats->memoryHeap[memHeapIndex], pStats->memoryType[memTypeIndex]);
15155     }
15156 
15157     // Sum from memory heaps to total.
15158     for(uint32_t memHeapIndex = 0; memHeapIndex < GetMemoryHeapCount(); ++memHeapIndex)
15159         VmaAddDetailedStatistics(pStats->total, pStats->memoryHeap[memHeapIndex]);
15160 
15161     VMA_ASSERT(pStats->total.statistics.allocationCount == 0 ||
15162         pStats->total.allocationSizeMax >= pStats->total.allocationSizeMin);
15163     VMA_ASSERT(pStats->total.unusedRangeCount == 0 ||
15164         pStats->total.unusedRangeSizeMax >= pStats->total.unusedRangeSizeMin);
15165 }
15166 
GetHeapBudgets(VmaBudget * outBudgets,uint32_t firstHeap,uint32_t heapCount)15167 void VmaAllocator_T::GetHeapBudgets(VmaBudget* outBudgets, uint32_t firstHeap, uint32_t heapCount)
15168 {
15169 #if VMA_MEMORY_BUDGET
15170     if(m_UseExtMemoryBudget)
15171     {
15172         if(m_Budget.m_OperationsSinceBudgetFetch < 30)
15173         {
15174             VmaMutexLockRead lockRead(m_Budget.m_BudgetMutex, m_UseMutex);
15175             for(uint32_t i = 0; i < heapCount; ++i, ++outBudgets)
15176             {
15177                 const uint32_t heapIndex = firstHeap + i;
15178 
15179                 outBudgets->statistics.blockCount = m_Budget.m_BlockCount[heapIndex];
15180                 outBudgets->statistics.allocationCount = m_Budget.m_AllocationCount[heapIndex];
15181                 outBudgets->statistics.blockBytes = m_Budget.m_BlockBytes[heapIndex];
15182                 outBudgets->statistics.allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
15183 
15184                 if(m_Budget.m_VulkanUsage[heapIndex] + outBudgets->statistics.blockBytes > m_Budget.m_BlockBytesAtBudgetFetch[heapIndex])
15185                 {
15186                     outBudgets->usage = m_Budget.m_VulkanUsage[heapIndex] +
15187                         outBudgets->statistics.blockBytes - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
15188                 }
15189                 else
15190                 {
15191                     outBudgets->usage = 0;
15192                 }
15193 
15194                 // Have to take MIN with heap size because explicit HeapSizeLimit is included in it.
15195                 outBudgets->budget = VMA_MIN(
15196                     m_Budget.m_VulkanBudget[heapIndex], m_MemProps.memoryHeaps[heapIndex].size);
15197             }
15198         }
15199         else
15200         {
15201             UpdateVulkanBudget(); // Outside of mutex lock
15202             GetHeapBudgets(outBudgets, firstHeap, heapCount); // Recursion
15203         }
15204     }
15205     else
15206 #endif
15207     {
15208         for(uint32_t i = 0; i < heapCount; ++i, ++outBudgets)
15209         {
15210             const uint32_t heapIndex = firstHeap + i;
15211 
15212             outBudgets->statistics.blockCount = m_Budget.m_BlockCount[heapIndex];
15213             outBudgets->statistics.allocationCount = m_Budget.m_AllocationCount[heapIndex];
15214             outBudgets->statistics.blockBytes = m_Budget.m_BlockBytes[heapIndex];
15215             outBudgets->statistics.allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
15216 
15217             outBudgets->usage = outBudgets->statistics.blockBytes;
15218             outBudgets->budget = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
15219         }
15220     }
15221 }
15222 
GetAllocationInfo(VmaAllocation hAllocation,VmaAllocationInfo * pAllocationInfo)15223 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
15224 {
15225     pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
15226     pAllocationInfo->deviceMemory = hAllocation->GetMemory();
15227     pAllocationInfo->offset = hAllocation->GetOffset();
15228     pAllocationInfo->size = hAllocation->GetSize();
15229     pAllocationInfo->pMappedData = hAllocation->GetMappedData();
15230     pAllocationInfo->pUserData = hAllocation->GetUserData();
15231     pAllocationInfo->pName = hAllocation->GetName();
15232 }
15233 
CreatePool(const VmaPoolCreateInfo * pCreateInfo,VmaPool * pPool)15234 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
15235 {
15236     VMA_DEBUG_LOG("  CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
15237 
15238     VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
15239 
15240     // Protection against uninitialized new structure member. If garbage data are left there, this pointer dereference would crash.
15241     if(pCreateInfo->pMemoryAllocateNext)
15242     {
15243         VMA_ASSERT(((const VkBaseInStructure*)pCreateInfo->pMemoryAllocateNext)->sType != 0);
15244     }
15245 
15246     if(newCreateInfo.maxBlockCount == 0)
15247     {
15248         newCreateInfo.maxBlockCount = SIZE_MAX;
15249     }
15250     if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
15251     {
15252         return VK_ERROR_INITIALIZATION_FAILED;
15253     }
15254     // Memory type index out of range or forbidden.
15255     if(pCreateInfo->memoryTypeIndex >= GetMemoryTypeCount() ||
15256         ((1u << pCreateInfo->memoryTypeIndex) & m_GlobalMemoryTypeBits) == 0)
15257     {
15258         return VK_ERROR_FEATURE_NOT_PRESENT;
15259     }
15260     if(newCreateInfo.minAllocationAlignment > 0)
15261     {
15262         VMA_ASSERT(VmaIsPow2(newCreateInfo.minAllocationAlignment));
15263     }
15264 
15265     const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
15266 
15267     *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
15268 
15269     VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
15270     if(res != VK_SUCCESS)
15271     {
15272         vma_delete(this, *pPool);
15273         *pPool = VMA_NULL;
15274         return res;
15275     }
15276 
15277     // Add to m_Pools.
15278     {
15279         VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15280         (*pPool)->SetId(m_NextPoolId++);
15281         m_Pools.PushBack(*pPool);
15282     }
15283 
15284     return VK_SUCCESS;
15285 }
15286 
DestroyPool(VmaPool pool)15287 void VmaAllocator_T::DestroyPool(VmaPool pool)
15288 {
15289     // Remove from m_Pools.
15290     {
15291         VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15292         m_Pools.Remove(pool);
15293     }
15294 
15295     vma_delete(this, pool);
15296 }
15297 
GetPoolStatistics(VmaPool pool,VmaStatistics * pPoolStats)15298 void VmaAllocator_T::GetPoolStatistics(VmaPool pool, VmaStatistics* pPoolStats)
15299 {
15300     VmaClearStatistics(*pPoolStats);
15301     pool->m_BlockVector.AddStatistics(*pPoolStats);
15302     pool->m_DedicatedAllocations.AddStatistics(*pPoolStats);
15303 }
15304 
CalculatePoolStatistics(VmaPool pool,VmaDetailedStatistics * pPoolStats)15305 void VmaAllocator_T::CalculatePoolStatistics(VmaPool pool, VmaDetailedStatistics* pPoolStats)
15306 {
15307     VmaClearDetailedStatistics(*pPoolStats);
15308     pool->m_BlockVector.AddDetailedStatistics(*pPoolStats);
15309     pool->m_DedicatedAllocations.AddDetailedStatistics(*pPoolStats);
15310 }
15311 
SetCurrentFrameIndex(uint32_t frameIndex)15312 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
15313 {
15314     m_CurrentFrameIndex.store(frameIndex);
15315 
15316 #if VMA_MEMORY_BUDGET
15317     if(m_UseExtMemoryBudget)
15318     {
15319         UpdateVulkanBudget();
15320     }
15321 #endif // #if VMA_MEMORY_BUDGET
15322 }
15323 
CheckPoolCorruption(VmaPool hPool)15324 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
15325 {
15326     return hPool->m_BlockVector.CheckCorruption();
15327 }
15328 
CheckCorruption(uint32_t memoryTypeBits)15329 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
15330 {
15331     VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
15332 
15333     // Process default pools.
15334     for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15335     {
15336         VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
15337         if(pBlockVector != VMA_NULL)
15338         {
15339             VkResult localRes = pBlockVector->CheckCorruption();
15340             switch(localRes)
15341             {
15342             case VK_ERROR_FEATURE_NOT_PRESENT:
15343                 break;
15344             case VK_SUCCESS:
15345                 finalRes = VK_SUCCESS;
15346                 break;
15347             default:
15348                 return localRes;
15349             }
15350         }
15351     }
15352 
15353     // Process custom pools.
15354     {
15355         VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15356         for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool))
15357         {
15358             if(((1u << pool->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
15359             {
15360                 VkResult localRes = pool->m_BlockVector.CheckCorruption();
15361                 switch(localRes)
15362                 {
15363                 case VK_ERROR_FEATURE_NOT_PRESENT:
15364                     break;
15365                 case VK_SUCCESS:
15366                     finalRes = VK_SUCCESS;
15367                     break;
15368                 default:
15369                     return localRes;
15370                 }
15371             }
15372         }
15373     }
15374 
15375     return finalRes;
15376 }
15377 
AllocateVulkanMemory(const VkMemoryAllocateInfo * pAllocateInfo,VkDeviceMemory * pMemory)15378 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
15379 {
15380     AtomicTransactionalIncrement<uint32_t> deviceMemoryCountIncrement;
15381     const uint64_t prevDeviceMemoryCount = deviceMemoryCountIncrement.Increment(&m_DeviceMemoryCount);
15382 #if VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT
15383     if(prevDeviceMemoryCount >= m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount)
15384     {
15385         return VK_ERROR_TOO_MANY_OBJECTS;
15386     }
15387 #endif
15388 
15389     const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
15390 
15391     // HeapSizeLimit is in effect for this heap.
15392     if((m_HeapSizeLimitMask & (1u << heapIndex)) != 0)
15393     {
15394         const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
15395         VkDeviceSize blockBytes = m_Budget.m_BlockBytes[heapIndex];
15396         for(;;)
15397         {
15398             const VkDeviceSize blockBytesAfterAllocation = blockBytes + pAllocateInfo->allocationSize;
15399             if(blockBytesAfterAllocation > heapSize)
15400             {
15401                 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15402             }
15403             if(m_Budget.m_BlockBytes[heapIndex].compare_exchange_strong(blockBytes, blockBytesAfterAllocation))
15404             {
15405                 break;
15406             }
15407         }
15408     }
15409     else
15410     {
15411         m_Budget.m_BlockBytes[heapIndex] += pAllocateInfo->allocationSize;
15412     }
15413     ++m_Budget.m_BlockCount[heapIndex];
15414 
15415     // VULKAN CALL vkAllocateMemory.
15416     VkResult res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
15417 
15418     if(res == VK_SUCCESS)
15419     {
15420 #if VMA_MEMORY_BUDGET
15421         ++m_Budget.m_OperationsSinceBudgetFetch;
15422 #endif
15423 
15424         // Informative callback.
15425         if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
15426         {
15427             (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize, m_DeviceMemoryCallbacks.pUserData);
15428         }
15429 
15430         deviceMemoryCountIncrement.Commit();
15431     }
15432     else
15433     {
15434         --m_Budget.m_BlockCount[heapIndex];
15435         m_Budget.m_BlockBytes[heapIndex] -= pAllocateInfo->allocationSize;
15436     }
15437 
15438     return res;
15439 }
15440 
FreeVulkanMemory(uint32_t memoryType,VkDeviceSize size,VkDeviceMemory hMemory)15441 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
15442 {
15443     // Informative callback.
15444     if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
15445     {
15446         (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size, m_DeviceMemoryCallbacks.pUserData);
15447     }
15448 
15449     // VULKAN CALL vkFreeMemory.
15450     (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
15451 
15452     const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
15453     --m_Budget.m_BlockCount[heapIndex];
15454     m_Budget.m_BlockBytes[heapIndex] -= size;
15455 
15456     --m_DeviceMemoryCount;
15457 }
15458 
BindVulkanBuffer(VkDeviceMemory memory,VkDeviceSize memoryOffset,VkBuffer buffer,const void * pNext)15459 VkResult VmaAllocator_T::BindVulkanBuffer(
15460     VkDeviceMemory memory,
15461     VkDeviceSize memoryOffset,
15462     VkBuffer buffer,
15463     const void* pNext)
15464 {
15465     if(pNext != VMA_NULL)
15466     {
15467 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
15468         if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
15469             m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL)
15470         {
15471             VkBindBufferMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR };
15472             bindBufferMemoryInfo.pNext = pNext;
15473             bindBufferMemoryInfo.buffer = buffer;
15474             bindBufferMemoryInfo.memory = memory;
15475             bindBufferMemoryInfo.memoryOffset = memoryOffset;
15476             return (*m_VulkanFunctions.vkBindBufferMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
15477         }
15478         else
15479 #endif // #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
15480         {
15481             return VK_ERROR_EXTENSION_NOT_PRESENT;
15482         }
15483     }
15484     else
15485     {
15486         return (*m_VulkanFunctions.vkBindBufferMemory)(m_hDevice, buffer, memory, memoryOffset);
15487     }
15488 }
15489 
BindVulkanImage(VkDeviceMemory memory,VkDeviceSize memoryOffset,VkImage image,const void * pNext)15490 VkResult VmaAllocator_T::BindVulkanImage(
15491     VkDeviceMemory memory,
15492     VkDeviceSize memoryOffset,
15493     VkImage image,
15494     const void* pNext)
15495 {
15496     if(pNext != VMA_NULL)
15497     {
15498 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
15499         if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
15500             m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL)
15501         {
15502             VkBindImageMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR };
15503             bindBufferMemoryInfo.pNext = pNext;
15504             bindBufferMemoryInfo.image = image;
15505             bindBufferMemoryInfo.memory = memory;
15506             bindBufferMemoryInfo.memoryOffset = memoryOffset;
15507             return (*m_VulkanFunctions.vkBindImageMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
15508         }
15509         else
15510 #endif // #if VMA_BIND_MEMORY2
15511         {
15512             return VK_ERROR_EXTENSION_NOT_PRESENT;
15513         }
15514     }
15515     else
15516     {
15517         return (*m_VulkanFunctions.vkBindImageMemory)(m_hDevice, image, memory, memoryOffset);
15518     }
15519 }
15520 
Map(VmaAllocation hAllocation,void ** ppData)15521 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
15522 {
15523     switch(hAllocation->GetType())
15524     {
15525     case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15526         {
15527             VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15528             char *pBytes = VMA_NULL;
15529             VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
15530             if(res == VK_SUCCESS)
15531             {
15532                 *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
15533                 hAllocation->BlockAllocMap();
15534             }
15535             return res;
15536         }
15537     case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15538         return hAllocation->DedicatedAllocMap(this, ppData);
15539     default:
15540         VMA_ASSERT(0);
15541         return VK_ERROR_MEMORY_MAP_FAILED;
15542     }
15543 }
15544 
Unmap(VmaAllocation hAllocation)15545 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
15546 {
15547     switch(hAllocation->GetType())
15548     {
15549     case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15550         {
15551             VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15552             hAllocation->BlockAllocUnmap();
15553             pBlock->Unmap(this, 1);
15554         }
15555         break;
15556     case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15557         hAllocation->DedicatedAllocUnmap(this);
15558         break;
15559     default:
15560         VMA_ASSERT(0);
15561     }
15562 }
15563 
BindBufferMemory(VmaAllocation hAllocation,VkDeviceSize allocationLocalOffset,VkBuffer hBuffer,const void * pNext)15564 VkResult VmaAllocator_T::BindBufferMemory(
15565     VmaAllocation hAllocation,
15566     VkDeviceSize allocationLocalOffset,
15567     VkBuffer hBuffer,
15568     const void* pNext)
15569 {
15570     VkResult res = VK_SUCCESS;
15571     switch(hAllocation->GetType())
15572     {
15573     case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15574         res = BindVulkanBuffer(hAllocation->GetMemory(), allocationLocalOffset, hBuffer, pNext);
15575         break;
15576     case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15577     {
15578         VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15579         VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block.");
15580         res = pBlock->BindBufferMemory(this, hAllocation, allocationLocalOffset, hBuffer, pNext);
15581         break;
15582     }
15583     default:
15584         VMA_ASSERT(0);
15585     }
15586     return res;
15587 }
15588 
BindImageMemory(VmaAllocation hAllocation,VkDeviceSize allocationLocalOffset,VkImage hImage,const void * pNext)15589 VkResult VmaAllocator_T::BindImageMemory(
15590     VmaAllocation hAllocation,
15591     VkDeviceSize allocationLocalOffset,
15592     VkImage hImage,
15593     const void* pNext)
15594 {
15595     VkResult res = VK_SUCCESS;
15596     switch(hAllocation->GetType())
15597     {
15598     case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15599         res = BindVulkanImage(hAllocation->GetMemory(), allocationLocalOffset, hImage, pNext);
15600         break;
15601     case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15602     {
15603         VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
15604         VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block.");
15605         res = pBlock->BindImageMemory(this, hAllocation, allocationLocalOffset, hImage, pNext);
15606         break;
15607     }
15608     default:
15609         VMA_ASSERT(0);
15610     }
15611     return res;
15612 }
15613 
FlushOrInvalidateAllocation(VmaAllocation hAllocation,VkDeviceSize offset,VkDeviceSize size,VMA_CACHE_OPERATION op)15614 VkResult VmaAllocator_T::FlushOrInvalidateAllocation(
15615     VmaAllocation hAllocation,
15616     VkDeviceSize offset, VkDeviceSize size,
15617     VMA_CACHE_OPERATION op)
15618 {
15619     VkResult res = VK_SUCCESS;
15620 
15621     VkMappedMemoryRange memRange = {};
15622     if(GetFlushOrInvalidateRange(hAllocation, offset, size, memRange))
15623     {
15624         switch(op)
15625         {
15626         case VMA_CACHE_FLUSH:
15627             res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
15628             break;
15629         case VMA_CACHE_INVALIDATE:
15630             res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
15631             break;
15632         default:
15633             VMA_ASSERT(0);
15634         }
15635     }
15636     // else: Just ignore this call.
15637     return res;
15638 }
15639 
FlushOrInvalidateAllocations(uint32_t allocationCount,const VmaAllocation * allocations,const VkDeviceSize * offsets,const VkDeviceSize * sizes,VMA_CACHE_OPERATION op)15640 VkResult VmaAllocator_T::FlushOrInvalidateAllocations(
15641     uint32_t allocationCount,
15642     const VmaAllocation* allocations,
15643     const VkDeviceSize* offsets, const VkDeviceSize* sizes,
15644     VMA_CACHE_OPERATION op)
15645 {
15646     typedef VmaStlAllocator<VkMappedMemoryRange> RangeAllocator;
15647     typedef VmaSmallVector<VkMappedMemoryRange, RangeAllocator, 16> RangeVector;
15648     RangeVector ranges = RangeVector(RangeAllocator(GetAllocationCallbacks()));
15649 
15650     for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
15651     {
15652         const VmaAllocation alloc = allocations[allocIndex];
15653         const VkDeviceSize offset = offsets != VMA_NULL ? offsets[allocIndex] : 0;
15654         const VkDeviceSize size = sizes != VMA_NULL ? sizes[allocIndex] : VK_WHOLE_SIZE;
15655         VkMappedMemoryRange newRange;
15656         if(GetFlushOrInvalidateRange(alloc, offset, size, newRange))
15657         {
15658             ranges.push_back(newRange);
15659         }
15660     }
15661 
15662     VkResult res = VK_SUCCESS;
15663     if(!ranges.empty())
15664     {
15665         switch(op)
15666         {
15667         case VMA_CACHE_FLUSH:
15668             res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data());
15669             break;
15670         case VMA_CACHE_INVALIDATE:
15671             res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data());
15672             break;
15673         default:
15674             VMA_ASSERT(0);
15675         }
15676     }
15677     // else: Just ignore this call.
15678     return res;
15679 }
15680 
FreeDedicatedMemory(const VmaAllocation allocation)15681 void VmaAllocator_T::FreeDedicatedMemory(const VmaAllocation allocation)
15682 {
15683     VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
15684 
15685     const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
15686     VmaPool parentPool = allocation->GetParentPool();
15687     if(parentPool == VK_NULL_HANDLE)
15688     {
15689         // Default pool
15690         m_DedicatedAllocations[memTypeIndex].Unregister(allocation);
15691     }
15692     else
15693     {
15694         // Custom pool
15695         parentPool->m_DedicatedAllocations.Unregister(allocation);
15696     }
15697 
15698     VkDeviceMemory hMemory = allocation->GetMemory();
15699 
15700     /*
15701     There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
15702     before vkFreeMemory.
15703 
15704     if(allocation->GetMappedData() != VMA_NULL)
15705     {
15706         (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
15707     }
15708     */
15709 
15710     FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
15711 
15712     m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(allocation->GetMemoryTypeIndex()), allocation->GetSize());
15713     m_AllocationObjectAllocator.Free(allocation);
15714 
15715     VMA_DEBUG_LOG("    Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
15716 }
15717 
CalculateGpuDefragmentationMemoryTypeBits()15718 uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const
15719 {
15720     VkBufferCreateInfo dummyBufCreateInfo;
15721     VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo);
15722 
15723     uint32_t memoryTypeBits = 0;
15724 
15725     // Create buffer.
15726     VkBuffer buf = VK_NULL_HANDLE;
15727     VkResult res = (*GetVulkanFunctions().vkCreateBuffer)(
15728         m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf);
15729     if(res == VK_SUCCESS)
15730     {
15731         // Query for supported memory types.
15732         VkMemoryRequirements memReq;
15733         (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq);
15734         memoryTypeBits = memReq.memoryTypeBits;
15735 
15736         // Destroy buffer.
15737         (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks());
15738     }
15739 
15740     return memoryTypeBits;
15741 }
15742 
CalculateGlobalMemoryTypeBits()15743 uint32_t VmaAllocator_T::CalculateGlobalMemoryTypeBits() const
15744 {
15745     // Make sure memory information is already fetched.
15746     VMA_ASSERT(GetMemoryTypeCount() > 0);
15747 
15748     uint32_t memoryTypeBits = UINT32_MAX;
15749 
15750     if(!m_UseAmdDeviceCoherentMemory)
15751     {
15752         // Exclude memory types that have VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD.
15753         for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15754         {
15755             if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0)
15756             {
15757                 memoryTypeBits &= ~(1u << memTypeIndex);
15758             }
15759         }
15760     }
15761 
15762     return memoryTypeBits;
15763 }
15764 
GetFlushOrInvalidateRange(VmaAllocation allocation,VkDeviceSize offset,VkDeviceSize size,VkMappedMemoryRange & outRange)15765 bool VmaAllocator_T::GetFlushOrInvalidateRange(
15766     VmaAllocation allocation,
15767     VkDeviceSize offset, VkDeviceSize size,
15768     VkMappedMemoryRange& outRange) const
15769 {
15770     const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
15771     if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
15772     {
15773         const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
15774         const VkDeviceSize allocationSize = allocation->GetSize();
15775         VMA_ASSERT(offset <= allocationSize);
15776 
15777         outRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
15778         outRange.pNext = VMA_NULL;
15779         outRange.memory = allocation->GetMemory();
15780 
15781         switch(allocation->GetType())
15782         {
15783         case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15784             outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15785             if(size == VK_WHOLE_SIZE)
15786             {
15787                 outRange.size = allocationSize - outRange.offset;
15788             }
15789             else
15790             {
15791                 VMA_ASSERT(offset + size <= allocationSize);
15792                 outRange.size = VMA_MIN(
15793                     VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize),
15794                     allocationSize - outRange.offset);
15795             }
15796             break;
15797         case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15798         {
15799             // 1. Still within this allocation.
15800             outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15801             if(size == VK_WHOLE_SIZE)
15802             {
15803                 size = allocationSize - offset;
15804             }
15805             else
15806             {
15807                 VMA_ASSERT(offset + size <= allocationSize);
15808             }
15809             outRange.size = VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize);
15810 
15811             // 2. Adjust to whole block.
15812             const VkDeviceSize allocationOffset = allocation->GetOffset();
15813             VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
15814             const VkDeviceSize blockSize = allocation->GetBlock()->m_pMetadata->GetSize();
15815             outRange.offset += allocationOffset;
15816             outRange.size = VMA_MIN(outRange.size, blockSize - outRange.offset);
15817 
15818             break;
15819         }
15820         default:
15821             VMA_ASSERT(0);
15822         }
15823         return true;
15824     }
15825     return false;
15826 }
15827 
15828 #if VMA_MEMORY_BUDGET
UpdateVulkanBudget()15829 void VmaAllocator_T::UpdateVulkanBudget()
15830 {
15831     VMA_ASSERT(m_UseExtMemoryBudget);
15832 
15833     VkPhysicalDeviceMemoryProperties2KHR memProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR };
15834 
15835     VkPhysicalDeviceMemoryBudgetPropertiesEXT budgetProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT };
15836     VmaPnextChainPushFront(&memProps, &budgetProps);
15837 
15838     GetVulkanFunctions().vkGetPhysicalDeviceMemoryProperties2KHR(m_PhysicalDevice, &memProps);
15839 
15840     {
15841         VmaMutexLockWrite lockWrite(m_Budget.m_BudgetMutex, m_UseMutex);
15842 
15843         for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
15844         {
15845             m_Budget.m_VulkanUsage[heapIndex] = budgetProps.heapUsage[heapIndex];
15846             m_Budget.m_VulkanBudget[heapIndex] = budgetProps.heapBudget[heapIndex];
15847             m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] = m_Budget.m_BlockBytes[heapIndex].load();
15848 
15849             // Some bugged drivers return the budget incorrectly, e.g. 0 or much bigger than heap size.
15850             if(m_Budget.m_VulkanBudget[heapIndex] == 0)
15851             {
15852                 m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
15853             }
15854             else if(m_Budget.m_VulkanBudget[heapIndex] > m_MemProps.memoryHeaps[heapIndex].size)
15855             {
15856                 m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size;
15857             }
15858             if(m_Budget.m_VulkanUsage[heapIndex] == 0 && m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] > 0)
15859             {
15860                 m_Budget.m_VulkanUsage[heapIndex] = m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
15861             }
15862         }
15863         m_Budget.m_OperationsSinceBudgetFetch = 0;
15864     }
15865 }
15866 #endif // VMA_MEMORY_BUDGET
15867 
FillAllocation(const VmaAllocation hAllocation,uint8_t pattern)15868 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
15869 {
15870     if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
15871         hAllocation->IsMappingAllowed() &&
15872         (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15873     {
15874         void* pData = VMA_NULL;
15875         VkResult res = Map(hAllocation, &pData);
15876         if(res == VK_SUCCESS)
15877         {
15878             memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
15879             FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
15880             Unmap(hAllocation);
15881         }
15882         else
15883         {
15884             VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
15885         }
15886     }
15887 }
15888 
GetGpuDefragmentationMemoryTypeBits()15889 uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits()
15890 {
15891     uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load();
15892     if(memoryTypeBits == UINT32_MAX)
15893     {
15894         memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits();
15895         m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits);
15896     }
15897     return memoryTypeBits;
15898 }
15899 
15900 #if VMA_STATS_STRING_ENABLED
PrintDetailedMap(VmaJsonWriter & json)15901 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
15902 {
15903     json.WriteString("DefaultPools");
15904     json.BeginObject();
15905     {
15906         for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15907         {
15908             VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex];
15909             VmaDedicatedAllocationList& dedicatedAllocList = m_DedicatedAllocations[memTypeIndex];
15910             if (pBlockVector != VMA_NULL)
15911             {
15912                 json.BeginString("Type ");
15913                 json.ContinueString(memTypeIndex);
15914                 json.EndString();
15915                 json.BeginObject();
15916                 {
15917                     json.WriteString("PreferredBlockSize");
15918                     json.WriteNumber(pBlockVector->GetPreferredBlockSize());
15919 
15920                     json.WriteString("Blocks");
15921                     pBlockVector->PrintDetailedMap(json);
15922 
15923                     json.WriteString("DedicatedAllocations");
15924                     dedicatedAllocList.BuildStatsString(json);
15925                 }
15926                 json.EndObject();
15927             }
15928         }
15929     }
15930     json.EndObject();
15931 
15932     json.WriteString("CustomPools");
15933     json.BeginObject();
15934     {
15935         VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15936         if (!m_Pools.IsEmpty())
15937         {
15938             for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15939             {
15940                 bool displayType = true;
15941                 size_t index = 0;
15942                 for (VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool))
15943                 {
15944                     VmaBlockVector& blockVector = pool->m_BlockVector;
15945                     if (blockVector.GetMemoryTypeIndex() == memTypeIndex)
15946                     {
15947                         if (displayType)
15948                         {
15949                             json.BeginString("Type ");
15950                             json.ContinueString(memTypeIndex);
15951                             json.EndString();
15952                             json.BeginArray();
15953                             displayType = false;
15954                         }
15955 
15956                         json.BeginObject();
15957                         {
15958                             json.WriteString("Name");
15959                             json.BeginString();
15960                             json.ContinueString_Size(index++);
15961                             if (pool->GetName())
15962                             {
15963                                 json.ContinueString(" - ");
15964                                 json.ContinueString(pool->GetName());
15965                             }
15966                             json.EndString();
15967 
15968                             json.WriteString("PreferredBlockSize");
15969                             json.WriteNumber(blockVector.GetPreferredBlockSize());
15970 
15971                             json.WriteString("Blocks");
15972                             blockVector.PrintDetailedMap(json);
15973 
15974                             json.WriteString("DedicatedAllocations");
15975                             pool->m_DedicatedAllocations.BuildStatsString(json);
15976                         }
15977                         json.EndObject();
15978                     }
15979                 }
15980 
15981                 if (!displayType)
15982                     json.EndArray();
15983             }
15984         }
15985     }
15986     json.EndObject();
15987 }
15988 #endif // VMA_STATS_STRING_ENABLED
15989 #endif // _VMA_ALLOCATOR_T_FUNCTIONS
15990 
15991 
15992 #ifndef _VMA_PUBLIC_INTERFACE
vmaCreateAllocator(const VmaAllocatorCreateInfo * pCreateInfo,VmaAllocator * pAllocator)15993 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
15994     const VmaAllocatorCreateInfo* pCreateInfo,
15995     VmaAllocator* pAllocator)
15996 {
15997     VMA_ASSERT(pCreateInfo && pAllocator);
15998     VMA_ASSERT(pCreateInfo->vulkanApiVersion == 0 ||
15999         (VK_VERSION_MAJOR(pCreateInfo->vulkanApiVersion) == 1 && VK_VERSION_MINOR(pCreateInfo->vulkanApiVersion) <= 3));
16000     VMA_DEBUG_LOG("vmaCreateAllocator");
16001     *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
16002     VkResult result = (*pAllocator)->Init(pCreateInfo);
16003     if(result < 0)
16004     {
16005         vma_delete(pCreateInfo->pAllocationCallbacks, *pAllocator);
16006         *pAllocator = VK_NULL_HANDLE;
16007     }
16008     return result;
16009 }
16010 
vmaDestroyAllocator(VmaAllocator allocator)16011 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
16012     VmaAllocator allocator)
16013 {
16014     if(allocator != VK_NULL_HANDLE)
16015     {
16016         VMA_DEBUG_LOG("vmaDestroyAllocator");
16017         VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks; // Have to copy the callbacks when destroying.
16018         vma_delete(&allocationCallbacks, allocator);
16019     }
16020 }
16021 
vmaGetAllocatorInfo(VmaAllocator allocator,VmaAllocatorInfo * pAllocatorInfo)16022 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo* pAllocatorInfo)
16023 {
16024     VMA_ASSERT(allocator && pAllocatorInfo);
16025     pAllocatorInfo->instance = allocator->m_hInstance;
16026     pAllocatorInfo->physicalDevice = allocator->GetPhysicalDevice();
16027     pAllocatorInfo->device = allocator->m_hDevice;
16028 }
16029 
vmaGetPhysicalDeviceProperties(VmaAllocator allocator,const VkPhysicalDeviceProperties ** ppPhysicalDeviceProperties)16030 VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
16031     VmaAllocator allocator,
16032     const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
16033 {
16034     VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
16035     *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
16036 }
16037 
vmaGetMemoryProperties(VmaAllocator allocator,const VkPhysicalDeviceMemoryProperties ** ppPhysicalDeviceMemoryProperties)16038 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
16039     VmaAllocator allocator,
16040     const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
16041 {
16042     VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
16043     *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
16044 }
16045 
vmaGetMemoryTypeProperties(VmaAllocator allocator,uint32_t memoryTypeIndex,VkMemoryPropertyFlags * pFlags)16046 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
16047     VmaAllocator allocator,
16048     uint32_t memoryTypeIndex,
16049     VkMemoryPropertyFlags* pFlags)
16050 {
16051     VMA_ASSERT(allocator && pFlags);
16052     VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
16053     *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
16054 }
16055 
vmaSetCurrentFrameIndex(VmaAllocator allocator,uint32_t frameIndex)16056 VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
16057     VmaAllocator allocator,
16058     uint32_t frameIndex)
16059 {
16060     VMA_ASSERT(allocator);
16061 
16062     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16063 
16064     allocator->SetCurrentFrameIndex(frameIndex);
16065 }
16066 
vmaCalculateStatistics(VmaAllocator allocator,VmaTotalStatistics * pStats)16067 VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStatistics(
16068     VmaAllocator allocator,
16069     VmaTotalStatistics* pStats)
16070 {
16071     VMA_ASSERT(allocator && pStats);
16072     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16073     allocator->CalculateStatistics(pStats);
16074 }
16075 
vmaGetHeapBudgets(VmaAllocator allocator,VmaBudget * pBudgets)16076 VMA_CALL_PRE void VMA_CALL_POST vmaGetHeapBudgets(
16077     VmaAllocator allocator,
16078     VmaBudget* pBudgets)
16079 {
16080     VMA_ASSERT(allocator && pBudgets);
16081     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16082     allocator->GetHeapBudgets(pBudgets, 0, allocator->GetMemoryHeapCount());
16083 }
16084 
16085 #if VMA_STATS_STRING_ENABLED
16086 
vmaBuildStatsString(VmaAllocator allocator,char ** ppStatsString,VkBool32 detailedMap)16087 VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
16088     VmaAllocator allocator,
16089     char** ppStatsString,
16090     VkBool32 detailedMap)
16091 {
16092     VMA_ASSERT(allocator && ppStatsString);
16093     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16094 
16095     VmaStringBuilder sb(allocator->GetAllocationCallbacks());
16096     {
16097         VmaBudget budgets[VK_MAX_MEMORY_HEAPS];
16098         allocator->GetHeapBudgets(budgets, 0, allocator->GetMemoryHeapCount());
16099 
16100         VmaTotalStatistics stats;
16101         allocator->CalculateStatistics(&stats);
16102 
16103         VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
16104         json.BeginObject();
16105         {
16106             json.WriteString("General");
16107             json.BeginObject();
16108             {
16109                 const VkPhysicalDeviceProperties& deviceProperties = allocator->m_PhysicalDeviceProperties;
16110                 const VkPhysicalDeviceMemoryProperties& memoryProperties = allocator->m_MemProps;
16111 
16112                 json.WriteString("API");
16113                 json.WriteString("Vulkan");
16114 
16115                 json.WriteString("apiVersion");
16116                 json.BeginString();
16117                 json.ContinueString(VK_API_VERSION_MAJOR(deviceProperties.apiVersion));
16118                 json.ContinueString(".");
16119                 json.ContinueString(VK_API_VERSION_MINOR(deviceProperties.apiVersion));
16120                 json.ContinueString(".");
16121                 json.ContinueString(VK_API_VERSION_PATCH(deviceProperties.apiVersion));
16122                 json.EndString();
16123 
16124                 json.WriteString("GPU");
16125                 json.WriteString(deviceProperties.deviceName);
16126                 json.WriteString("deviceType");
16127                 json.WriteNumber(static_cast<uint32_t>(deviceProperties.deviceType));
16128 
16129                 json.WriteString("maxMemoryAllocationCount");
16130                 json.WriteNumber(deviceProperties.limits.maxMemoryAllocationCount);
16131                 json.WriteString("bufferImageGranularity");
16132                 json.WriteNumber(deviceProperties.limits.bufferImageGranularity);
16133                 json.WriteString("nonCoherentAtomSize");
16134                 json.WriteNumber(deviceProperties.limits.nonCoherentAtomSize);
16135 
16136                 json.WriteString("memoryHeapCount");
16137                 json.WriteNumber(memoryProperties.memoryHeapCount);
16138                 json.WriteString("memoryTypeCount");
16139                 json.WriteNumber(memoryProperties.memoryTypeCount);
16140             }
16141             json.EndObject();
16142         }
16143         {
16144             json.WriteString("Total");
16145             VmaPrintDetailedStatistics(json, stats.total);
16146         }
16147         {
16148             json.WriteString("MemoryInfo");
16149             json.BeginObject();
16150             {
16151                 for (uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
16152                 {
16153                     json.BeginString("Heap ");
16154                     json.ContinueString(heapIndex);
16155                     json.EndString();
16156                     json.BeginObject();
16157                     {
16158                         const VkMemoryHeap& heapInfo = allocator->m_MemProps.memoryHeaps[heapIndex];
16159                         json.WriteString("Flags");
16160                         json.BeginArray(true);
16161                         {
16162                             if (heapInfo.flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT)
16163                                 json.WriteString("DEVICE_LOCAL");
16164                         #if VMA_VULKAN_VERSION >= 1001000
16165                             if (heapInfo.flags & VK_MEMORY_HEAP_MULTI_INSTANCE_BIT)
16166                                 json.WriteString("MULTI_INSTANCE");
16167                         #endif
16168 
16169                             VkMemoryHeapFlags flags = heapInfo.flags &
16170                                 ~(VK_MEMORY_HEAP_DEVICE_LOCAL_BIT
16171                         #if VMA_VULKAN_VERSION >= 1001000
16172                                     | VK_MEMORY_HEAP_MULTI_INSTANCE_BIT
16173                         #endif
16174                                     );
16175                             if (flags != 0)
16176                                 json.WriteNumber(flags);
16177                         }
16178                         json.EndArray();
16179 
16180                         json.WriteString("Size");
16181                         json.WriteNumber(heapInfo.size);
16182 
16183                         json.WriteString("Budget");
16184                         json.BeginObject();
16185                         {
16186                             json.WriteString("BudgetBytes");
16187                             json.WriteNumber(budgets[heapIndex].budget);
16188                             json.WriteString("UsageBytes");
16189                             json.WriteNumber(budgets[heapIndex].usage);
16190                         }
16191                         json.EndObject();
16192 
16193                         json.WriteString("Stats");
16194                         VmaPrintDetailedStatistics(json, stats.memoryHeap[heapIndex]);
16195 
16196                         json.WriteString("MemoryPools");
16197                         json.BeginObject();
16198                         {
16199                             for (uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
16200                             {
16201                                 if (allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
16202                                 {
16203                                     json.BeginString("Type ");
16204                                     json.ContinueString(typeIndex);
16205                                     json.EndString();
16206                                     json.BeginObject();
16207                                     {
16208                                         json.WriteString("Flags");
16209                                         json.BeginArray(true);
16210                                         {
16211                                             VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
16212                                             if (flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)
16213                                                 json.WriteString("DEVICE_LOCAL");
16214                                             if (flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)
16215                                                 json.WriteString("HOST_VISIBLE");
16216                                             if (flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)
16217                                                 json.WriteString("HOST_COHERENT");
16218                                             if (flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT)
16219                                                 json.WriteString("HOST_CACHED");
16220                                             if (flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT)
16221                                                 json.WriteString("LAZILY_ALLOCATED");
16222                                         #if VMA_VULKAN_VERSION >= 1001000
16223                                             if (flags & VK_MEMORY_PROPERTY_PROTECTED_BIT)
16224                                                 json.WriteString("PROTECTED");
16225                                         #endif
16226                                         #if VK_AMD_device_coherent_memory
16227                                             if (flags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY)
16228                                                 json.WriteString("DEVICE_COHERENT_AMD");
16229                                             if (flags & VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)
16230                                                 json.WriteString("DEVICE_UNCACHED_AMD");
16231                                         #endif
16232 
16233                                             flags &= ~(VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT
16234                                         #if VMA_VULKAN_VERSION >= 1001000
16235                                                 | VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT
16236                                         #endif
16237                                         #if VK_AMD_device_coherent_memory
16238                                                 | VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY
16239                                                 | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY
16240                                         #endif
16241                                                 | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT
16242                                                 | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT
16243                                                 | VK_MEMORY_PROPERTY_HOST_CACHED_BIT);
16244                                             if (flags != 0)
16245                                                 json.WriteNumber(flags);
16246                                         }
16247                                         json.EndArray();
16248 
16249                                         json.WriteString("Stats");
16250                                         VmaPrintDetailedStatistics(json, stats.memoryType[typeIndex]);
16251                                     }
16252                                     json.EndObject();
16253                                 }
16254                             }
16255 
16256                         }
16257                         json.EndObject();
16258                     }
16259                     json.EndObject();
16260                 }
16261             }
16262             json.EndObject();
16263         }
16264 
16265         if (detailedMap == VK_TRUE)
16266             allocator->PrintDetailedMap(json);
16267 
16268         json.EndObject();
16269     }
16270 
16271     *ppStatsString = VmaCreateStringCopy(allocator->GetAllocationCallbacks(), sb.GetData(), sb.GetLength());
16272 }
16273 
vmaFreeStatsString(VmaAllocator allocator,char * pStatsString)16274 VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
16275     VmaAllocator allocator,
16276     char* pStatsString)
16277 {
16278     if(pStatsString != VMA_NULL)
16279     {
16280         VMA_ASSERT(allocator);
16281         VmaFreeString(allocator->GetAllocationCallbacks(), pStatsString);
16282     }
16283 }
16284 
16285 #endif // VMA_STATS_STRING_ENABLED
16286 
16287 /*
16288 This function is not protected by any mutex because it just reads immutable data.
16289 */
vmaFindMemoryTypeIndex(VmaAllocator allocator,uint32_t memoryTypeBits,const VmaAllocationCreateInfo * pAllocationCreateInfo,uint32_t * pMemoryTypeIndex)16290 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
16291     VmaAllocator allocator,
16292     uint32_t memoryTypeBits,
16293     const VmaAllocationCreateInfo* pAllocationCreateInfo,
16294     uint32_t* pMemoryTypeIndex)
16295 {
16296     VMA_ASSERT(allocator != VK_NULL_HANDLE);
16297     VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
16298     VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
16299 
16300     return allocator->FindMemoryTypeIndex(memoryTypeBits, pAllocationCreateInfo, UINT32_MAX, pMemoryTypeIndex);
16301 }
16302 
vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator,const VkBufferCreateInfo * pBufferCreateInfo,const VmaAllocationCreateInfo * pAllocationCreateInfo,uint32_t * pMemoryTypeIndex)16303 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
16304     VmaAllocator allocator,
16305     const VkBufferCreateInfo* pBufferCreateInfo,
16306     const VmaAllocationCreateInfo* pAllocationCreateInfo,
16307     uint32_t* pMemoryTypeIndex)
16308 {
16309     VMA_ASSERT(allocator != VK_NULL_HANDLE);
16310     VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
16311     VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
16312     VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
16313 
16314     const VkDevice hDev = allocator->m_hDevice;
16315     const VmaVulkanFunctions* funcs = &allocator->GetVulkanFunctions();
16316     VkResult res;
16317 
16318 #if VMA_VULKAN_VERSION >= 1003000
16319     if(funcs->vkGetDeviceBufferMemoryRequirements)
16320     {
16321         // Can query straight from VkBufferCreateInfo :)
16322         VkDeviceBufferMemoryRequirements devBufMemReq = {VK_STRUCTURE_TYPE_DEVICE_BUFFER_MEMORY_REQUIREMENTS};
16323         devBufMemReq.pCreateInfo = pBufferCreateInfo;
16324 
16325         VkMemoryRequirements2 memReq = {VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2};
16326         (*funcs->vkGetDeviceBufferMemoryRequirements)(hDev, &devBufMemReq, &memReq);
16327 
16328         res = allocator->FindMemoryTypeIndex(
16329             memReq.memoryRequirements.memoryTypeBits, pAllocationCreateInfo, pBufferCreateInfo->usage, pMemoryTypeIndex);
16330     }
16331     else
16332 #endif // #if VMA_VULKAN_VERSION >= 1003000
16333     {
16334         // Must create a dummy buffer to query :(
16335         VkBuffer hBuffer = VK_NULL_HANDLE;
16336         res = funcs->vkCreateBuffer(
16337             hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
16338         if(res == VK_SUCCESS)
16339         {
16340             VkMemoryRequirements memReq = {};
16341             funcs->vkGetBufferMemoryRequirements(hDev, hBuffer, &memReq);
16342 
16343             res = allocator->FindMemoryTypeIndex(
16344                 memReq.memoryTypeBits, pAllocationCreateInfo, pBufferCreateInfo->usage, pMemoryTypeIndex);
16345 
16346             funcs->vkDestroyBuffer(
16347                 hDev, hBuffer, allocator->GetAllocationCallbacks());
16348         }
16349     }
16350     return res;
16351 }
16352 
vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator,const VkImageCreateInfo * pImageCreateInfo,const VmaAllocationCreateInfo * pAllocationCreateInfo,uint32_t * pMemoryTypeIndex)16353 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
16354     VmaAllocator allocator,
16355     const VkImageCreateInfo* pImageCreateInfo,
16356     const VmaAllocationCreateInfo* pAllocationCreateInfo,
16357     uint32_t* pMemoryTypeIndex)
16358 {
16359     VMA_ASSERT(allocator != VK_NULL_HANDLE);
16360     VMA_ASSERT(pImageCreateInfo != VMA_NULL);
16361     VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
16362     VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
16363 
16364     const VkDevice hDev = allocator->m_hDevice;
16365     const VmaVulkanFunctions* funcs = &allocator->GetVulkanFunctions();
16366     VkResult res;
16367 
16368 #if VMA_VULKAN_VERSION >= 1003000
16369     if(funcs->vkGetDeviceImageMemoryRequirements)
16370     {
16371         // Can query straight from VkImageCreateInfo :)
16372         VkDeviceImageMemoryRequirements devImgMemReq = {VK_STRUCTURE_TYPE_DEVICE_IMAGE_MEMORY_REQUIREMENTS};
16373         devImgMemReq.pCreateInfo = pImageCreateInfo;
16374         VMA_ASSERT(pImageCreateInfo->tiling != VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT_COPY && (pImageCreateInfo->flags & VK_IMAGE_CREATE_DISJOINT_BIT_COPY) == 0 &&
16375             "Cannot use this VkImageCreateInfo with vmaFindMemoryTypeIndexForImageInfo as I don't know what to pass as VkDeviceImageMemoryRequirements::planeAspect.");
16376 
16377         VkMemoryRequirements2 memReq = {VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2};
16378         (*funcs->vkGetDeviceImageMemoryRequirements)(hDev, &devImgMemReq, &memReq);
16379 
16380         res = allocator->FindMemoryTypeIndex(
16381             memReq.memoryRequirements.memoryTypeBits, pAllocationCreateInfo, pImageCreateInfo->usage, pMemoryTypeIndex);
16382     }
16383     else
16384 #endif // #if VMA_VULKAN_VERSION >= 1003000
16385     {
16386         // Must create a dummy image to query :(
16387         VkImage hImage = VK_NULL_HANDLE;
16388         res = funcs->vkCreateImage(
16389             hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
16390         if(res == VK_SUCCESS)
16391         {
16392             VkMemoryRequirements memReq = {};
16393             funcs->vkGetImageMemoryRequirements(hDev, hImage, &memReq);
16394 
16395             res = allocator->FindMemoryTypeIndex(
16396                 memReq.memoryTypeBits, pAllocationCreateInfo, pImageCreateInfo->usage, pMemoryTypeIndex);
16397 
16398             funcs->vkDestroyImage(
16399                 hDev, hImage, allocator->GetAllocationCallbacks());
16400         }
16401     }
16402     return res;
16403 }
16404 
vmaCreatePool(VmaAllocator allocator,const VmaPoolCreateInfo * pCreateInfo,VmaPool * pPool)16405 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
16406     VmaAllocator allocator,
16407     const VmaPoolCreateInfo* pCreateInfo,
16408     VmaPool* pPool)
16409 {
16410     VMA_ASSERT(allocator && pCreateInfo && pPool);
16411 
16412     VMA_DEBUG_LOG("vmaCreatePool");
16413 
16414     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16415 
16416     return allocator->CreatePool(pCreateInfo, pPool);
16417 }
16418 
vmaDestroyPool(VmaAllocator allocator,VmaPool pool)16419 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
16420     VmaAllocator allocator,
16421     VmaPool pool)
16422 {
16423     VMA_ASSERT(allocator);
16424 
16425     if(pool == VK_NULL_HANDLE)
16426     {
16427         return;
16428     }
16429 
16430     VMA_DEBUG_LOG("vmaDestroyPool");
16431 
16432     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16433 
16434     allocator->DestroyPool(pool);
16435 }
16436 
vmaGetPoolStatistics(VmaAllocator allocator,VmaPool pool,VmaStatistics * pPoolStats)16437 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStatistics(
16438     VmaAllocator allocator,
16439     VmaPool pool,
16440     VmaStatistics* pPoolStats)
16441 {
16442     VMA_ASSERT(allocator && pool && pPoolStats);
16443 
16444     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16445 
16446     allocator->GetPoolStatistics(pool, pPoolStats);
16447 }
16448 
vmaCalculatePoolStatistics(VmaAllocator allocator,VmaPool pool,VmaDetailedStatistics * pPoolStats)16449 VMA_CALL_PRE void VMA_CALL_POST vmaCalculatePoolStatistics(
16450     VmaAllocator allocator,
16451     VmaPool pool,
16452     VmaDetailedStatistics* pPoolStats)
16453 {
16454     VMA_ASSERT(allocator && pool && pPoolStats);
16455 
16456     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16457 
16458     allocator->CalculatePoolStatistics(pool, pPoolStats);
16459 }
16460 
vmaCheckPoolCorruption(VmaAllocator allocator,VmaPool pool)16461 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
16462 {
16463     VMA_ASSERT(allocator && pool);
16464 
16465     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16466 
16467     VMA_DEBUG_LOG("vmaCheckPoolCorruption");
16468 
16469     return allocator->CheckPoolCorruption(pool);
16470 }
16471 
vmaGetPoolName(VmaAllocator allocator,VmaPool pool,const char ** ppName)16472 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
16473     VmaAllocator allocator,
16474     VmaPool pool,
16475     const char** ppName)
16476 {
16477     VMA_ASSERT(allocator && pool && ppName);
16478 
16479     VMA_DEBUG_LOG("vmaGetPoolName");
16480 
16481     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16482 
16483     *ppName = pool->GetName();
16484 }
16485 
vmaSetPoolName(VmaAllocator allocator,VmaPool pool,const char * pName)16486 VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
16487     VmaAllocator allocator,
16488     VmaPool pool,
16489     const char* pName)
16490 {
16491     VMA_ASSERT(allocator && pool);
16492 
16493     VMA_DEBUG_LOG("vmaSetPoolName");
16494 
16495     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16496 
16497     pool->SetName(pName);
16498 }
16499 
vmaAllocateMemory(VmaAllocator allocator,const VkMemoryRequirements * pVkMemoryRequirements,const VmaAllocationCreateInfo * pCreateInfo,VmaAllocation * pAllocation,VmaAllocationInfo * pAllocationInfo)16500 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
16501     VmaAllocator allocator,
16502     const VkMemoryRequirements* pVkMemoryRequirements,
16503     const VmaAllocationCreateInfo* pCreateInfo,
16504     VmaAllocation* pAllocation,
16505     VmaAllocationInfo* pAllocationInfo)
16506 {
16507     VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
16508 
16509     VMA_DEBUG_LOG("vmaAllocateMemory");
16510 
16511     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16512 
16513     VkResult result = allocator->AllocateMemory(
16514         *pVkMemoryRequirements,
16515         false, // requiresDedicatedAllocation
16516         false, // prefersDedicatedAllocation
16517         VK_NULL_HANDLE, // dedicatedBuffer
16518         VK_NULL_HANDLE, // dedicatedImage
16519         UINT32_MAX, // dedicatedBufferImageUsage
16520         *pCreateInfo,
16521         VMA_SUBALLOCATION_TYPE_UNKNOWN,
16522         1, // allocationCount
16523         pAllocation);
16524 
16525     if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
16526     {
16527         allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16528     }
16529 
16530     return result;
16531 }
16532 
vmaAllocateMemoryPages(VmaAllocator allocator,const VkMemoryRequirements * pVkMemoryRequirements,const VmaAllocationCreateInfo * pCreateInfo,size_t allocationCount,VmaAllocation * pAllocations,VmaAllocationInfo * pAllocationInfo)16533 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
16534     VmaAllocator allocator,
16535     const VkMemoryRequirements* pVkMemoryRequirements,
16536     const VmaAllocationCreateInfo* pCreateInfo,
16537     size_t allocationCount,
16538     VmaAllocation* pAllocations,
16539     VmaAllocationInfo* pAllocationInfo)
16540 {
16541     if(allocationCount == 0)
16542     {
16543         return VK_SUCCESS;
16544     }
16545 
16546     VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
16547 
16548     VMA_DEBUG_LOG("vmaAllocateMemoryPages");
16549 
16550     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16551 
16552     VkResult result = allocator->AllocateMemory(
16553         *pVkMemoryRequirements,
16554         false, // requiresDedicatedAllocation
16555         false, // prefersDedicatedAllocation
16556         VK_NULL_HANDLE, // dedicatedBuffer
16557         VK_NULL_HANDLE, // dedicatedImage
16558         UINT32_MAX, // dedicatedBufferImageUsage
16559         *pCreateInfo,
16560         VMA_SUBALLOCATION_TYPE_UNKNOWN,
16561         allocationCount,
16562         pAllocations);
16563 
16564     if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
16565     {
16566         for(size_t i = 0; i < allocationCount; ++i)
16567         {
16568             allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
16569         }
16570     }
16571 
16572     return result;
16573 }
16574 
vmaAllocateMemoryForBuffer(VmaAllocator allocator,VkBuffer buffer,const VmaAllocationCreateInfo * pCreateInfo,VmaAllocation * pAllocation,VmaAllocationInfo * pAllocationInfo)16575 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
16576     VmaAllocator allocator,
16577     VkBuffer buffer,
16578     const VmaAllocationCreateInfo* pCreateInfo,
16579     VmaAllocation* pAllocation,
16580     VmaAllocationInfo* pAllocationInfo)
16581 {
16582     VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16583 
16584     VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
16585 
16586     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16587 
16588     VkMemoryRequirements vkMemReq = {};
16589     bool requiresDedicatedAllocation = false;
16590     bool prefersDedicatedAllocation = false;
16591     allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
16592         requiresDedicatedAllocation,
16593         prefersDedicatedAllocation);
16594 
16595     VkResult result = allocator->AllocateMemory(
16596         vkMemReq,
16597         requiresDedicatedAllocation,
16598         prefersDedicatedAllocation,
16599         buffer, // dedicatedBuffer
16600         VK_NULL_HANDLE, // dedicatedImage
16601         UINT32_MAX, // dedicatedBufferImageUsage
16602         *pCreateInfo,
16603         VMA_SUBALLOCATION_TYPE_BUFFER,
16604         1, // allocationCount
16605         pAllocation);
16606 
16607     if(pAllocationInfo && result == VK_SUCCESS)
16608     {
16609         allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16610     }
16611 
16612     return result;
16613 }
16614 
vmaAllocateMemoryForImage(VmaAllocator allocator,VkImage image,const VmaAllocationCreateInfo * pCreateInfo,VmaAllocation * pAllocation,VmaAllocationInfo * pAllocationInfo)16615 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
16616     VmaAllocator allocator,
16617     VkImage image,
16618     const VmaAllocationCreateInfo* pCreateInfo,
16619     VmaAllocation* pAllocation,
16620     VmaAllocationInfo* pAllocationInfo)
16621 {
16622     VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16623 
16624     VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
16625 
16626     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16627 
16628     VkMemoryRequirements vkMemReq = {};
16629     bool requiresDedicatedAllocation = false;
16630     bool prefersDedicatedAllocation  = false;
16631     allocator->GetImageMemoryRequirements(image, vkMemReq,
16632         requiresDedicatedAllocation, prefersDedicatedAllocation);
16633 
16634     VkResult result = allocator->AllocateMemory(
16635         vkMemReq,
16636         requiresDedicatedAllocation,
16637         prefersDedicatedAllocation,
16638         VK_NULL_HANDLE, // dedicatedBuffer
16639         image, // dedicatedImage
16640         UINT32_MAX, // dedicatedBufferImageUsage
16641         *pCreateInfo,
16642         VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
16643         1, // allocationCount
16644         pAllocation);
16645 
16646     if(pAllocationInfo && result == VK_SUCCESS)
16647     {
16648         allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16649     }
16650 
16651     return result;
16652 }
16653 
vmaFreeMemory(VmaAllocator allocator,VmaAllocation allocation)16654 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
16655     VmaAllocator allocator,
16656     VmaAllocation allocation)
16657 {
16658     VMA_ASSERT(allocator);
16659 
16660     if(allocation == VK_NULL_HANDLE)
16661     {
16662         return;
16663     }
16664 
16665     VMA_DEBUG_LOG("vmaFreeMemory");
16666 
16667     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16668 
16669     allocator->FreeMemory(
16670         1, // allocationCount
16671         &allocation);
16672 }
16673 
vmaFreeMemoryPages(VmaAllocator allocator,size_t allocationCount,const VmaAllocation * pAllocations)16674 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
16675     VmaAllocator allocator,
16676     size_t allocationCount,
16677     const VmaAllocation* pAllocations)
16678 {
16679     if(allocationCount == 0)
16680     {
16681         return;
16682     }
16683 
16684     VMA_ASSERT(allocator);
16685 
16686     VMA_DEBUG_LOG("vmaFreeMemoryPages");
16687 
16688     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16689 
16690     allocator->FreeMemory(allocationCount, pAllocations);
16691 }
16692 
vmaGetAllocationInfo(VmaAllocator allocator,VmaAllocation allocation,VmaAllocationInfo * pAllocationInfo)16693 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
16694     VmaAllocator allocator,
16695     VmaAllocation allocation,
16696     VmaAllocationInfo* pAllocationInfo)
16697 {
16698     VMA_ASSERT(allocator && allocation && pAllocationInfo);
16699 
16700     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16701 
16702     allocator->GetAllocationInfo(allocation, pAllocationInfo);
16703 }
16704 
vmaSetAllocationUserData(VmaAllocator allocator,VmaAllocation allocation,void * pUserData)16705 VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
16706     VmaAllocator allocator,
16707     VmaAllocation allocation,
16708     void* pUserData)
16709 {
16710     VMA_ASSERT(allocator && allocation);
16711 
16712     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16713 
16714     allocation->SetUserData(allocator, pUserData);
16715 }
16716 
vmaSetAllocationName(VmaAllocator VMA_NOT_NULL allocator,VmaAllocation VMA_NOT_NULL allocation,const char * VMA_NULLABLE pName)16717 VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationName(
16718     VmaAllocator VMA_NOT_NULL allocator,
16719     VmaAllocation VMA_NOT_NULL allocation,
16720     const char* VMA_NULLABLE pName)
16721 {
16722     allocation->SetName(allocator, pName);
16723 }
16724 
vmaGetAllocationMemoryProperties(VmaAllocator VMA_NOT_NULL allocator,VmaAllocation VMA_NOT_NULL allocation,VkMemoryPropertyFlags * VMA_NOT_NULL pFlags)16725 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationMemoryProperties(
16726     VmaAllocator VMA_NOT_NULL allocator,
16727     VmaAllocation VMA_NOT_NULL allocation,
16728     VkMemoryPropertyFlags* VMA_NOT_NULL pFlags)
16729 {
16730     VMA_ASSERT(allocator && allocation && pFlags);
16731     const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
16732     *pFlags = allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
16733 }
16734 
vmaMapMemory(VmaAllocator allocator,VmaAllocation allocation,void ** ppData)16735 VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
16736     VmaAllocator allocator,
16737     VmaAllocation allocation,
16738     void** ppData)
16739 {
16740     VMA_ASSERT(allocator && allocation && ppData);
16741 
16742     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16743 
16744     return allocator->Map(allocation, ppData);
16745 }
16746 
vmaUnmapMemory(VmaAllocator allocator,VmaAllocation allocation)16747 VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
16748     VmaAllocator allocator,
16749     VmaAllocation allocation)
16750 {
16751     VMA_ASSERT(allocator && allocation);
16752 
16753     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16754 
16755     allocator->Unmap(allocation);
16756 }
16757 
vmaFlushAllocation(VmaAllocator allocator,VmaAllocation allocation,VkDeviceSize offset,VkDeviceSize size)16758 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation(
16759     VmaAllocator allocator,
16760     VmaAllocation allocation,
16761     VkDeviceSize offset,
16762     VkDeviceSize size)
16763 {
16764     VMA_ASSERT(allocator && allocation);
16765 
16766     VMA_DEBUG_LOG("vmaFlushAllocation");
16767 
16768     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16769 
16770     const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
16771 
16772     return res;
16773 }
16774 
vmaInvalidateAllocation(VmaAllocator allocator,VmaAllocation allocation,VkDeviceSize offset,VkDeviceSize size)16775 VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation(
16776     VmaAllocator allocator,
16777     VmaAllocation allocation,
16778     VkDeviceSize offset,
16779     VkDeviceSize size)
16780 {
16781     VMA_ASSERT(allocator && allocation);
16782 
16783     VMA_DEBUG_LOG("vmaInvalidateAllocation");
16784 
16785     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16786 
16787     const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
16788 
16789     return res;
16790 }
16791 
vmaFlushAllocations(VmaAllocator allocator,uint32_t allocationCount,const VmaAllocation * allocations,const VkDeviceSize * offsets,const VkDeviceSize * sizes)16792 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations(
16793     VmaAllocator allocator,
16794     uint32_t allocationCount,
16795     const VmaAllocation* allocations,
16796     const VkDeviceSize* offsets,
16797     const VkDeviceSize* sizes)
16798 {
16799     VMA_ASSERT(allocator);
16800 
16801     if(allocationCount == 0)
16802     {
16803         return VK_SUCCESS;
16804     }
16805 
16806     VMA_ASSERT(allocations);
16807 
16808     VMA_DEBUG_LOG("vmaFlushAllocations");
16809 
16810     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16811 
16812     const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_FLUSH);
16813 
16814     return res;
16815 }
16816 
vmaInvalidateAllocations(VmaAllocator allocator,uint32_t allocationCount,const VmaAllocation * allocations,const VkDeviceSize * offsets,const VkDeviceSize * sizes)16817 VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations(
16818     VmaAllocator allocator,
16819     uint32_t allocationCount,
16820     const VmaAllocation* allocations,
16821     const VkDeviceSize* offsets,
16822     const VkDeviceSize* sizes)
16823 {
16824     VMA_ASSERT(allocator);
16825 
16826     if(allocationCount == 0)
16827     {
16828         return VK_SUCCESS;
16829     }
16830 
16831     VMA_ASSERT(allocations);
16832 
16833     VMA_DEBUG_LOG("vmaInvalidateAllocations");
16834 
16835     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16836 
16837     const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_INVALIDATE);
16838 
16839     return res;
16840 }
16841 
vmaCheckCorruption(VmaAllocator allocator,uint32_t memoryTypeBits)16842 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(
16843     VmaAllocator allocator,
16844     uint32_t memoryTypeBits)
16845 {
16846     VMA_ASSERT(allocator);
16847 
16848     VMA_DEBUG_LOG("vmaCheckCorruption");
16849 
16850     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16851 
16852     return allocator->CheckCorruption(memoryTypeBits);
16853 }
16854 
vmaBeginDefragmentation(VmaAllocator allocator,const VmaDefragmentationInfo * pInfo,VmaDefragmentationContext * pContext)16855 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentation(
16856     VmaAllocator allocator,
16857     const VmaDefragmentationInfo* pInfo,
16858     VmaDefragmentationContext* pContext)
16859 {
16860     VMA_ASSERT(allocator && pInfo && pContext);
16861 
16862     VMA_DEBUG_LOG("vmaBeginDefragmentation");
16863 
16864     if (pInfo->pool != VMA_NULL)
16865     {
16866         // Check if run on supported algorithms
16867         if (pInfo->pool->m_BlockVector.GetAlgorithm() & VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
16868             return VK_ERROR_FEATURE_NOT_PRESENT;
16869     }
16870 
16871     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16872 
16873     *pContext = vma_new(allocator, VmaDefragmentationContext_T)(allocator, *pInfo);
16874     return VK_SUCCESS;
16875 }
16876 
vmaEndDefragmentation(VmaAllocator allocator,VmaDefragmentationContext context,VmaDefragmentationStats * pStats)16877 VMA_CALL_PRE void VMA_CALL_POST vmaEndDefragmentation(
16878     VmaAllocator allocator,
16879     VmaDefragmentationContext context,
16880     VmaDefragmentationStats* pStats)
16881 {
16882     VMA_ASSERT(allocator && context);
16883 
16884     VMA_DEBUG_LOG("vmaEndDefragmentation");
16885 
16886     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16887 
16888     if (pStats)
16889         context->GetStats(*pStats);
16890     vma_delete(allocator, context);
16891 }
16892 
vmaBeginDefragmentationPass(VmaAllocator VMA_NOT_NULL allocator,VmaDefragmentationContext VMA_NOT_NULL context,VmaDefragmentationPassMoveInfo * VMA_NOT_NULL pPassInfo)16893 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(
16894     VmaAllocator VMA_NOT_NULL allocator,
16895     VmaDefragmentationContext VMA_NOT_NULL context,
16896     VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo)
16897 {
16898     VMA_ASSERT(context && pPassInfo);
16899 
16900     VMA_DEBUG_LOG("vmaBeginDefragmentationPass");
16901 
16902     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16903 
16904     return context->DefragmentPassBegin(*pPassInfo);
16905 }
16906 
vmaEndDefragmentationPass(VmaAllocator VMA_NOT_NULL allocator,VmaDefragmentationContext VMA_NOT_NULL context,VmaDefragmentationPassMoveInfo * VMA_NOT_NULL pPassInfo)16907 VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(
16908     VmaAllocator VMA_NOT_NULL allocator,
16909     VmaDefragmentationContext VMA_NOT_NULL context,
16910     VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo)
16911 {
16912     VMA_ASSERT(context && pPassInfo);
16913 
16914     VMA_DEBUG_LOG("vmaEndDefragmentationPass");
16915 
16916     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16917 
16918     return context->DefragmentPassEnd(*pPassInfo);
16919 }
16920 
vmaBindBufferMemory(VmaAllocator allocator,VmaAllocation allocation,VkBuffer buffer)16921 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
16922     VmaAllocator allocator,
16923     VmaAllocation allocation,
16924     VkBuffer buffer)
16925 {
16926     VMA_ASSERT(allocator && allocation && buffer);
16927 
16928     VMA_DEBUG_LOG("vmaBindBufferMemory");
16929 
16930     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16931 
16932     return allocator->BindBufferMemory(allocation, 0, buffer, VMA_NULL);
16933 }
16934 
vmaBindBufferMemory2(VmaAllocator allocator,VmaAllocation allocation,VkDeviceSize allocationLocalOffset,VkBuffer buffer,const void * pNext)16935 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
16936     VmaAllocator allocator,
16937     VmaAllocation allocation,
16938     VkDeviceSize allocationLocalOffset,
16939     VkBuffer buffer,
16940     const void* pNext)
16941 {
16942     VMA_ASSERT(allocator && allocation && buffer);
16943 
16944     VMA_DEBUG_LOG("vmaBindBufferMemory2");
16945 
16946     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16947 
16948     return allocator->BindBufferMemory(allocation, allocationLocalOffset, buffer, pNext);
16949 }
16950 
vmaBindImageMemory(VmaAllocator allocator,VmaAllocation allocation,VkImage image)16951 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
16952     VmaAllocator allocator,
16953     VmaAllocation allocation,
16954     VkImage image)
16955 {
16956     VMA_ASSERT(allocator && allocation && image);
16957 
16958     VMA_DEBUG_LOG("vmaBindImageMemory");
16959 
16960     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16961 
16962     return allocator->BindImageMemory(allocation, 0, image, VMA_NULL);
16963 }
16964 
vmaBindImageMemory2(VmaAllocator allocator,VmaAllocation allocation,VkDeviceSize allocationLocalOffset,VkImage image,const void * pNext)16965 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
16966     VmaAllocator allocator,
16967     VmaAllocation allocation,
16968     VkDeviceSize allocationLocalOffset,
16969     VkImage image,
16970     const void* pNext)
16971 {
16972     VMA_ASSERT(allocator && allocation && image);
16973 
16974     VMA_DEBUG_LOG("vmaBindImageMemory2");
16975 
16976     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16977 
16978         return allocator->BindImageMemory(allocation, allocationLocalOffset, image, pNext);
16979 }
16980 
vmaCreateBuffer(VmaAllocator allocator,const VkBufferCreateInfo * pBufferCreateInfo,const VmaAllocationCreateInfo * pAllocationCreateInfo,VkBuffer * pBuffer,VmaAllocation * pAllocation,VmaAllocationInfo * pAllocationInfo)16981 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
16982     VmaAllocator allocator,
16983     const VkBufferCreateInfo* pBufferCreateInfo,
16984     const VmaAllocationCreateInfo* pAllocationCreateInfo,
16985     VkBuffer* pBuffer,
16986     VmaAllocation* pAllocation,
16987     VmaAllocationInfo* pAllocationInfo)
16988 {
16989     VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
16990 
16991     if(pBufferCreateInfo->size == 0)
16992     {
16993         return VK_ERROR_INITIALIZATION_FAILED;
16994     }
16995     if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 &&
16996         !allocator->m_UseKhrBufferDeviceAddress)
16997     {
16998         VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used.");
16999         return VK_ERROR_INITIALIZATION_FAILED;
17000     }
17001 
17002     VMA_DEBUG_LOG("vmaCreateBuffer");
17003 
17004     VMA_DEBUG_GLOBAL_MUTEX_LOCK
17005 
17006     *pBuffer = VK_NULL_HANDLE;
17007     *pAllocation = VK_NULL_HANDLE;
17008 
17009     // 1. Create VkBuffer.
17010     VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
17011         allocator->m_hDevice,
17012         pBufferCreateInfo,
17013         allocator->GetAllocationCallbacks(),
17014         pBuffer);
17015     if(res >= 0)
17016     {
17017         // 2. vkGetBufferMemoryRequirements.
17018         VkMemoryRequirements vkMemReq = {};
17019         bool requiresDedicatedAllocation = false;
17020         bool prefersDedicatedAllocation  = false;
17021         allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
17022             requiresDedicatedAllocation, prefersDedicatedAllocation);
17023 
17024         // 3. Allocate memory using allocator.
17025         res = allocator->AllocateMemory(
17026             vkMemReq,
17027             requiresDedicatedAllocation,
17028             prefersDedicatedAllocation,
17029             *pBuffer, // dedicatedBuffer
17030             VK_NULL_HANDLE, // dedicatedImage
17031             pBufferCreateInfo->usage, // dedicatedBufferImageUsage
17032             *pAllocationCreateInfo,
17033             VMA_SUBALLOCATION_TYPE_BUFFER,
17034             1, // allocationCount
17035             pAllocation);
17036 
17037         if(res >= 0)
17038         {
17039             // 3. Bind buffer with memory.
17040             if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
17041             {
17042                 res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL);
17043             }
17044             if(res >= 0)
17045             {
17046                 // All steps succeeded.
17047                 #if VMA_STATS_STRING_ENABLED
17048                     (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
17049                 #endif
17050                 if(pAllocationInfo != VMA_NULL)
17051                 {
17052                     allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
17053                 }
17054 
17055                 return VK_SUCCESS;
17056             }
17057             allocator->FreeMemory(
17058                 1, // allocationCount
17059                 pAllocation);
17060             *pAllocation = VK_NULL_HANDLE;
17061             (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
17062             *pBuffer = VK_NULL_HANDLE;
17063             return res;
17064         }
17065         (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
17066         *pBuffer = VK_NULL_HANDLE;
17067         return res;
17068     }
17069     return res;
17070 }
17071 
vmaCreateBufferWithAlignment(VmaAllocator allocator,const VkBufferCreateInfo * pBufferCreateInfo,const VmaAllocationCreateInfo * pAllocationCreateInfo,VkDeviceSize minAlignment,VkBuffer * pBuffer,VmaAllocation * pAllocation,VmaAllocationInfo * pAllocationInfo)17072 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBufferWithAlignment(
17073     VmaAllocator allocator,
17074     const VkBufferCreateInfo* pBufferCreateInfo,
17075     const VmaAllocationCreateInfo* pAllocationCreateInfo,
17076     VkDeviceSize minAlignment,
17077     VkBuffer* pBuffer,
17078     VmaAllocation* pAllocation,
17079     VmaAllocationInfo* pAllocationInfo)
17080 {
17081     VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && VmaIsPow2(minAlignment) && pBuffer && pAllocation);
17082 
17083     if(pBufferCreateInfo->size == 0)
17084     {
17085         return VK_ERROR_INITIALIZATION_FAILED;
17086     }
17087     if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 &&
17088         !allocator->m_UseKhrBufferDeviceAddress)
17089     {
17090         VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used.");
17091         return VK_ERROR_INITIALIZATION_FAILED;
17092     }
17093 
17094     VMA_DEBUG_LOG("vmaCreateBufferWithAlignment");
17095 
17096     VMA_DEBUG_GLOBAL_MUTEX_LOCK
17097 
17098     *pBuffer = VK_NULL_HANDLE;
17099     *pAllocation = VK_NULL_HANDLE;
17100 
17101     // 1. Create VkBuffer.
17102     VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
17103         allocator->m_hDevice,
17104         pBufferCreateInfo,
17105         allocator->GetAllocationCallbacks(),
17106         pBuffer);
17107     if(res >= 0)
17108     {
17109         // 2. vkGetBufferMemoryRequirements.
17110         VkMemoryRequirements vkMemReq = {};
17111         bool requiresDedicatedAllocation = false;
17112         bool prefersDedicatedAllocation  = false;
17113         allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
17114             requiresDedicatedAllocation, prefersDedicatedAllocation);
17115 
17116         // 2a. Include minAlignment
17117         vkMemReq.alignment = VMA_MAX(vkMemReq.alignment, minAlignment);
17118 
17119         // 3. Allocate memory using allocator.
17120         res = allocator->AllocateMemory(
17121             vkMemReq,
17122             requiresDedicatedAllocation,
17123             prefersDedicatedAllocation,
17124             *pBuffer, // dedicatedBuffer
17125             VK_NULL_HANDLE, // dedicatedImage
17126             pBufferCreateInfo->usage, // dedicatedBufferImageUsage
17127             *pAllocationCreateInfo,
17128             VMA_SUBALLOCATION_TYPE_BUFFER,
17129             1, // allocationCount
17130             pAllocation);
17131 
17132         if(res >= 0)
17133         {
17134             // 3. Bind buffer with memory.
17135             if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
17136             {
17137                 res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL);
17138             }
17139             if(res >= 0)
17140             {
17141                 // All steps succeeded.
17142                 #if VMA_STATS_STRING_ENABLED
17143                     (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
17144                 #endif
17145                 if(pAllocationInfo != VMA_NULL)
17146                 {
17147                     allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
17148                 }
17149 
17150                 return VK_SUCCESS;
17151             }
17152             allocator->FreeMemory(
17153                 1, // allocationCount
17154                 pAllocation);
17155             *pAllocation = VK_NULL_HANDLE;
17156             (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
17157             *pBuffer = VK_NULL_HANDLE;
17158             return res;
17159         }
17160         (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
17161         *pBuffer = VK_NULL_HANDLE;
17162         return res;
17163     }
17164     return res;
17165 }
17166 
vmaCreateAliasingBuffer(VmaAllocator VMA_NOT_NULL allocator,VmaAllocation VMA_NOT_NULL allocation,const VkBufferCreateInfo * VMA_NOT_NULL pBufferCreateInfo,VkBuffer VMA_NULLABLE_NON_DISPATCHABLE * VMA_NOT_NULL pBuffer)17167 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer(
17168     VmaAllocator VMA_NOT_NULL allocator,
17169     VmaAllocation VMA_NOT_NULL allocation,
17170     const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
17171     VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer)
17172 {
17173     VMA_ASSERT(allocator && pBufferCreateInfo && pBuffer && allocation);
17174 
17175     VMA_DEBUG_LOG("vmaCreateAliasingBuffer");
17176 
17177     *pBuffer = VK_NULL_HANDLE;
17178 
17179     if (pBufferCreateInfo->size == 0)
17180     {
17181         return VK_ERROR_INITIALIZATION_FAILED;
17182     }
17183     if ((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 &&
17184         !allocator->m_UseKhrBufferDeviceAddress)
17185     {
17186         VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used.");
17187         return VK_ERROR_INITIALIZATION_FAILED;
17188     }
17189 
17190     VMA_DEBUG_GLOBAL_MUTEX_LOCK
17191 
17192     // 1. Create VkBuffer.
17193     VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
17194         allocator->m_hDevice,
17195         pBufferCreateInfo,
17196         allocator->GetAllocationCallbacks(),
17197         pBuffer);
17198     if (res >= 0)
17199     {
17200         // 2. Bind buffer with memory.
17201         res = allocator->BindBufferMemory(allocation, 0, *pBuffer, VMA_NULL);
17202         if (res >= 0)
17203         {
17204             return VK_SUCCESS;
17205         }
17206         (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
17207     }
17208     return res;
17209 }
17210 
vmaDestroyBuffer(VmaAllocator allocator,VkBuffer buffer,VmaAllocation allocation)17211 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
17212     VmaAllocator allocator,
17213     VkBuffer buffer,
17214     VmaAllocation allocation)
17215 {
17216     VMA_ASSERT(allocator);
17217 
17218     if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
17219     {
17220         return;
17221     }
17222 
17223     VMA_DEBUG_LOG("vmaDestroyBuffer");
17224 
17225     VMA_DEBUG_GLOBAL_MUTEX_LOCK
17226 
17227     if(buffer != VK_NULL_HANDLE)
17228     {
17229         (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
17230     }
17231 
17232     if(allocation != VK_NULL_HANDLE)
17233     {
17234         allocator->FreeMemory(
17235             1, // allocationCount
17236             &allocation);
17237     }
17238 }
17239 
vmaCreateImage(VmaAllocator allocator,const VkImageCreateInfo * pImageCreateInfo,const VmaAllocationCreateInfo * pAllocationCreateInfo,VkImage * pImage,VmaAllocation * pAllocation,VmaAllocationInfo * pAllocationInfo)17240 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
17241     VmaAllocator allocator,
17242     const VkImageCreateInfo* pImageCreateInfo,
17243     const VmaAllocationCreateInfo* pAllocationCreateInfo,
17244     VkImage* pImage,
17245     VmaAllocation* pAllocation,
17246     VmaAllocationInfo* pAllocationInfo)
17247 {
17248     VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
17249 
17250     if(pImageCreateInfo->extent.width == 0 ||
17251         pImageCreateInfo->extent.height == 0 ||
17252         pImageCreateInfo->extent.depth == 0 ||
17253         pImageCreateInfo->mipLevels == 0 ||
17254         pImageCreateInfo->arrayLayers == 0)
17255     {
17256         return VK_ERROR_INITIALIZATION_FAILED;
17257     }
17258 
17259     VMA_DEBUG_LOG("vmaCreateImage");
17260 
17261     VMA_DEBUG_GLOBAL_MUTEX_LOCK
17262 
17263     *pImage = VK_NULL_HANDLE;
17264     *pAllocation = VK_NULL_HANDLE;
17265 
17266     // 1. Create VkImage.
17267     VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
17268         allocator->m_hDevice,
17269         pImageCreateInfo,
17270         allocator->GetAllocationCallbacks(),
17271         pImage);
17272     if(res >= 0)
17273     {
17274         VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
17275             VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
17276             VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
17277 
17278         // 2. Allocate memory using allocator.
17279         VkMemoryRequirements vkMemReq = {};
17280         bool requiresDedicatedAllocation = false;
17281         bool prefersDedicatedAllocation  = false;
17282         allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
17283             requiresDedicatedAllocation, prefersDedicatedAllocation);
17284 
17285         res = allocator->AllocateMemory(
17286             vkMemReq,
17287             requiresDedicatedAllocation,
17288             prefersDedicatedAllocation,
17289             VK_NULL_HANDLE, // dedicatedBuffer
17290             *pImage, // dedicatedImage
17291             pImageCreateInfo->usage, // dedicatedBufferImageUsage
17292             *pAllocationCreateInfo,
17293             suballocType,
17294             1, // allocationCount
17295             pAllocation);
17296 
17297         if(res >= 0)
17298         {
17299             // 3. Bind image with memory.
17300             if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
17301             {
17302                 res = allocator->BindImageMemory(*pAllocation, 0, *pImage, VMA_NULL);
17303             }
17304             if(res >= 0)
17305             {
17306                 // All steps succeeded.
17307                 #if VMA_STATS_STRING_ENABLED
17308                     (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
17309                 #endif
17310                 if(pAllocationInfo != VMA_NULL)
17311                 {
17312                     allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
17313                 }
17314 
17315                 return VK_SUCCESS;
17316             }
17317             allocator->FreeMemory(
17318                 1, // allocationCount
17319                 pAllocation);
17320             *pAllocation = VK_NULL_HANDLE;
17321             (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
17322             *pImage = VK_NULL_HANDLE;
17323             return res;
17324         }
17325         (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
17326         *pImage = VK_NULL_HANDLE;
17327         return res;
17328     }
17329     return res;
17330 }
17331 
vmaCreateAliasingImage(VmaAllocator VMA_NOT_NULL allocator,VmaAllocation VMA_NOT_NULL allocation,const VkImageCreateInfo * VMA_NOT_NULL pImageCreateInfo,VkImage VMA_NULLABLE_NON_DISPATCHABLE * VMA_NOT_NULL pImage)17332 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage(
17333     VmaAllocator VMA_NOT_NULL allocator,
17334     VmaAllocation VMA_NOT_NULL allocation,
17335     const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
17336     VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage)
17337 {
17338     VMA_ASSERT(allocator && pImageCreateInfo && pImage && allocation);
17339 
17340     *pImage = VK_NULL_HANDLE;
17341 
17342     VMA_DEBUG_LOG("vmaCreateImage");
17343 
17344     if (pImageCreateInfo->extent.width == 0 ||
17345         pImageCreateInfo->extent.height == 0 ||
17346         pImageCreateInfo->extent.depth == 0 ||
17347         pImageCreateInfo->mipLevels == 0 ||
17348         pImageCreateInfo->arrayLayers == 0)
17349     {
17350         return VK_ERROR_INITIALIZATION_FAILED;
17351     }
17352 
17353     VMA_DEBUG_GLOBAL_MUTEX_LOCK
17354 
17355     // 1. Create VkImage.
17356     VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
17357         allocator->m_hDevice,
17358         pImageCreateInfo,
17359         allocator->GetAllocationCallbacks(),
17360         pImage);
17361     if (res >= 0)
17362     {
17363         // 2. Bind image with memory.
17364         res = allocator->BindImageMemory(allocation, 0, *pImage, VMA_NULL);
17365         if (res >= 0)
17366         {
17367             return VK_SUCCESS;
17368         }
17369         (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
17370     }
17371     return res;
17372 }
17373 
vmaDestroyImage(VmaAllocator VMA_NOT_NULL allocator,VkImage VMA_NULLABLE_NON_DISPATCHABLE image,VmaAllocation VMA_NULLABLE allocation)17374 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
17375     VmaAllocator VMA_NOT_NULL allocator,
17376     VkImage VMA_NULLABLE_NON_DISPATCHABLE image,
17377     VmaAllocation VMA_NULLABLE allocation)
17378 {
17379     VMA_ASSERT(allocator);
17380 
17381     if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
17382     {
17383         return;
17384     }
17385 
17386     VMA_DEBUG_LOG("vmaDestroyImage");
17387 
17388     VMA_DEBUG_GLOBAL_MUTEX_LOCK
17389 
17390     if(image != VK_NULL_HANDLE)
17391     {
17392         (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
17393     }
17394     if(allocation != VK_NULL_HANDLE)
17395     {
17396         allocator->FreeMemory(
17397             1, // allocationCount
17398             &allocation);
17399     }
17400 }
17401 
vmaCreateVirtualBlock(const VmaVirtualBlockCreateInfo * VMA_NOT_NULL pCreateInfo,VmaVirtualBlock VMA_NULLABLE * VMA_NOT_NULL pVirtualBlock)17402 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateVirtualBlock(
17403     const VmaVirtualBlockCreateInfo* VMA_NOT_NULL pCreateInfo,
17404     VmaVirtualBlock VMA_NULLABLE * VMA_NOT_NULL pVirtualBlock)
17405 {
17406     VMA_ASSERT(pCreateInfo && pVirtualBlock);
17407     VMA_ASSERT(pCreateInfo->size > 0);
17408     VMA_DEBUG_LOG("vmaCreateVirtualBlock");
17409     VMA_DEBUG_GLOBAL_MUTEX_LOCK;
17410     *pVirtualBlock = vma_new(pCreateInfo->pAllocationCallbacks, VmaVirtualBlock_T)(*pCreateInfo);
17411     VkResult res = (*pVirtualBlock)->Init();
17412     if(res < 0)
17413     {
17414         vma_delete(pCreateInfo->pAllocationCallbacks, *pVirtualBlock);
17415         *pVirtualBlock = VK_NULL_HANDLE;
17416     }
17417     return res;
17418 }
17419 
vmaDestroyVirtualBlock(VmaVirtualBlock VMA_NULLABLE virtualBlock)17420 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyVirtualBlock(VmaVirtualBlock VMA_NULLABLE virtualBlock)
17421 {
17422     if(virtualBlock != VK_NULL_HANDLE)
17423     {
17424         VMA_DEBUG_LOG("vmaDestroyVirtualBlock");
17425         VMA_DEBUG_GLOBAL_MUTEX_LOCK;
17426         VkAllocationCallbacks allocationCallbacks = virtualBlock->m_AllocationCallbacks; // Have to copy the callbacks when destroying.
17427         vma_delete(&allocationCallbacks, virtualBlock);
17428     }
17429 }
17430 
vmaIsVirtualBlockEmpty(VmaVirtualBlock VMA_NOT_NULL virtualBlock)17431 VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaIsVirtualBlockEmpty(VmaVirtualBlock VMA_NOT_NULL virtualBlock)
17432 {
17433     VMA_ASSERT(virtualBlock != VK_NULL_HANDLE);
17434     VMA_DEBUG_LOG("vmaIsVirtualBlockEmpty");
17435     VMA_DEBUG_GLOBAL_MUTEX_LOCK;
17436     return virtualBlock->IsEmpty() ? VK_TRUE : VK_FALSE;
17437 }
17438 
vmaGetVirtualAllocationInfo(VmaVirtualBlock VMA_NOT_NULL virtualBlock,VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation,VmaVirtualAllocationInfo * VMA_NOT_NULL pVirtualAllocInfo)17439 VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualAllocationInfo(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
17440     VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, VmaVirtualAllocationInfo* VMA_NOT_NULL pVirtualAllocInfo)
17441 {
17442     VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pVirtualAllocInfo != VMA_NULL);
17443     VMA_DEBUG_LOG("vmaGetVirtualAllocationInfo");
17444     VMA_DEBUG_GLOBAL_MUTEX_LOCK;
17445     virtualBlock->GetAllocationInfo(allocation, *pVirtualAllocInfo);
17446 }
17447 
vmaVirtualAllocate(VmaVirtualBlock VMA_NOT_NULL virtualBlock,const VmaVirtualAllocationCreateInfo * VMA_NOT_NULL pCreateInfo,VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE * VMA_NOT_NULL pAllocation,VkDeviceSize * VMA_NULLABLE pOffset)17448 VMA_CALL_PRE VkResult VMA_CALL_POST vmaVirtualAllocate(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
17449     const VmaVirtualAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pAllocation,
17450     VkDeviceSize* VMA_NULLABLE pOffset)
17451 {
17452     VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pCreateInfo != VMA_NULL && pAllocation != VMA_NULL);
17453     VMA_DEBUG_LOG("vmaVirtualAllocate");
17454     VMA_DEBUG_GLOBAL_MUTEX_LOCK;
17455     return virtualBlock->Allocate(*pCreateInfo, *pAllocation, pOffset);
17456 }
17457 
vmaVirtualFree(VmaVirtualBlock VMA_NOT_NULL virtualBlock,VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE allocation)17458 VMA_CALL_PRE void VMA_CALL_POST vmaVirtualFree(VmaVirtualBlock VMA_NOT_NULL virtualBlock, VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE allocation)
17459 {
17460     if(allocation != VK_NULL_HANDLE)
17461     {
17462         VMA_ASSERT(virtualBlock != VK_NULL_HANDLE);
17463         VMA_DEBUG_LOG("vmaVirtualFree");
17464         VMA_DEBUG_GLOBAL_MUTEX_LOCK;
17465         virtualBlock->Free(allocation);
17466     }
17467 }
17468 
vmaClearVirtualBlock(VmaVirtualBlock VMA_NOT_NULL virtualBlock)17469 VMA_CALL_PRE void VMA_CALL_POST vmaClearVirtualBlock(VmaVirtualBlock VMA_NOT_NULL virtualBlock)
17470 {
17471     VMA_ASSERT(virtualBlock != VK_NULL_HANDLE);
17472     VMA_DEBUG_LOG("vmaClearVirtualBlock");
17473     VMA_DEBUG_GLOBAL_MUTEX_LOCK;
17474     virtualBlock->Clear();
17475 }
17476 
vmaSetVirtualAllocationUserData(VmaVirtualBlock VMA_NOT_NULL virtualBlock,VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation,void * VMA_NULLABLE pUserData)17477 VMA_CALL_PRE void VMA_CALL_POST vmaSetVirtualAllocationUserData(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
17478     VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, void* VMA_NULLABLE pUserData)
17479 {
17480     VMA_ASSERT(virtualBlock != VK_NULL_HANDLE);
17481     VMA_DEBUG_LOG("vmaSetVirtualAllocationUserData");
17482     VMA_DEBUG_GLOBAL_MUTEX_LOCK;
17483     virtualBlock->SetAllocationUserData(allocation, pUserData);
17484 }
17485 
vmaGetVirtualBlockStatistics(VmaVirtualBlock VMA_NOT_NULL virtualBlock,VmaStatistics * VMA_NOT_NULL pStats)17486 VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualBlockStatistics(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
17487     VmaStatistics* VMA_NOT_NULL pStats)
17488 {
17489     VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pStats != VMA_NULL);
17490     VMA_DEBUG_LOG("vmaGetVirtualBlockStatistics");
17491     VMA_DEBUG_GLOBAL_MUTEX_LOCK;
17492     virtualBlock->GetStatistics(*pStats);
17493 }
17494 
vmaCalculateVirtualBlockStatistics(VmaVirtualBlock VMA_NOT_NULL virtualBlock,VmaDetailedStatistics * VMA_NOT_NULL pStats)17495 VMA_CALL_PRE void VMA_CALL_POST vmaCalculateVirtualBlockStatistics(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
17496     VmaDetailedStatistics* VMA_NOT_NULL pStats)
17497 {
17498     VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pStats != VMA_NULL);
17499     VMA_DEBUG_LOG("vmaCalculateVirtualBlockStatistics");
17500     VMA_DEBUG_GLOBAL_MUTEX_LOCK;
17501     virtualBlock->CalculateDetailedStatistics(*pStats);
17502 }
17503 
17504 #if VMA_STATS_STRING_ENABLED
17505 
vmaBuildVirtualBlockStatsString(VmaVirtualBlock VMA_NOT_NULL virtualBlock,char * VMA_NULLABLE * VMA_NOT_NULL ppStatsString,VkBool32 detailedMap)17506 VMA_CALL_PRE void VMA_CALL_POST vmaBuildVirtualBlockStatsString(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
17507     char* VMA_NULLABLE * VMA_NOT_NULL ppStatsString, VkBool32 detailedMap)
17508 {
17509     VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && ppStatsString != VMA_NULL);
17510     VMA_DEBUG_GLOBAL_MUTEX_LOCK;
17511     const VkAllocationCallbacks* allocationCallbacks = virtualBlock->GetAllocationCallbacks();
17512     VmaStringBuilder sb(allocationCallbacks);
17513     virtualBlock->BuildStatsString(detailedMap != VK_FALSE, sb);
17514     *ppStatsString = VmaCreateStringCopy(allocationCallbacks, sb.GetData(), sb.GetLength());
17515 }
17516 
vmaFreeVirtualBlockStatsString(VmaVirtualBlock VMA_NOT_NULL virtualBlock,char * VMA_NULLABLE pStatsString)17517 VMA_CALL_PRE void VMA_CALL_POST vmaFreeVirtualBlockStatsString(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
17518     char* VMA_NULLABLE pStatsString)
17519 {
17520     if(pStatsString != VMA_NULL)
17521     {
17522         VMA_ASSERT(virtualBlock != VK_NULL_HANDLE);
17523         VMA_DEBUG_GLOBAL_MUTEX_LOCK;
17524         VmaFreeString(virtualBlock->GetAllocationCallbacks(), pStatsString);
17525     }
17526 }
17527 #endif // VMA_STATS_STRING_ENABLED
17528 #endif // _VMA_PUBLIC_INTERFACE
17529 #endif // VMA_IMPLEMENTATION
17530 
17531 /**
17532 \page quick_start Quick start
17533 
17534 \section quick_start_project_setup Project setup
17535 
17536 Vulkan Memory Allocator comes in form of a "stb-style" single header file.
17537 You don't need to build it as a separate library project.
17538 You can add this file directly to your project and submit it to code repository next to your other source files.
17539 
17540 "Single header" doesn't mean that everything is contained in C/C++ declarations,
17541 like it tends to be in case of inline functions or C++ templates.
17542 It means that implementation is bundled with interface in a single file and needs to be extracted using preprocessor macro.
17543 If you don't do it properly, you will get linker errors.
17544 
17545 To do it properly:
17546 
17547 -# Include "vk_mem_alloc.h" file in each CPP file where you want to use the library.
17548    This includes declarations of all members of the library.
17549 -# In exactly one CPP file define following macro before this include.
17550    It enables also internal definitions.
17551 
17552 \code
17553 #define VMA_IMPLEMENTATION
17554 #include "vk_mem_alloc.h"
17555 \endcode
17556 
17557 It may be a good idea to create dedicated CPP file just for this purpose.
17558 
17559 This library includes header `<vulkan/vulkan.h>`, which in turn
17560 includes `<windows.h>` on Windows. If you need some specific macros defined
17561 before including these headers (like `WIN32_LEAN_AND_MEAN` or
17562 `WINVER` for Windows, `VK_USE_PLATFORM_WIN32_KHR` for Vulkan), you must define
17563 them before every `#include` of this library.
17564 
17565 This library is written in C++, but has C-compatible interface.
17566 Thus you can include and use vk_mem_alloc.h in C or C++ code, but full
17567 implementation with `VMA_IMPLEMENTATION` macro must be compiled as C++, NOT as C.
17568 Some features of C++14 used. STL containers, RTTI, or C++ exceptions are not used.
17569 
17570 
17571 \section quick_start_initialization Initialization
17572 
17573 At program startup:
17574 
17575 -# Initialize Vulkan to have `VkPhysicalDevice`, `VkDevice` and `VkInstance` object.
17576 -# Fill VmaAllocatorCreateInfo structure and create #VmaAllocator object by
17577    calling vmaCreateAllocator().
17578 
17579 Only members `physicalDevice`, `device`, `instance` are required.
17580 However, you should inform the library which Vulkan version do you use by setting
17581 VmaAllocatorCreateInfo::vulkanApiVersion and which extensions did you enable
17582 by setting VmaAllocatorCreateInfo::flags (like #VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT for VK_KHR_buffer_device_address).
17583 Otherwise, VMA would use only features of Vulkan 1.0 core with no extensions.
17584 
17585 You may need to configure importing Vulkan functions. There are 3 ways to do this:
17586 
17587 -# **If you link with Vulkan static library** (e.g. "vulkan-1.lib" on Windows):
17588    - You don't need to do anything.
17589    - VMA will use these, as macro `VMA_STATIC_VULKAN_FUNCTIONS` is defined to 1 by default.
17590 -# **If you want VMA to fetch pointers to Vulkan functions dynamically** using `vkGetInstanceProcAddr`,
17591    `vkGetDeviceProcAddr` (this is the option presented in the example below):
17592    - Define `VMA_STATIC_VULKAN_FUNCTIONS` to 0, `VMA_DYNAMIC_VULKAN_FUNCTIONS` to 1.
17593    - Provide pointers to these two functions via VmaVulkanFunctions::vkGetInstanceProcAddr,
17594      VmaVulkanFunctions::vkGetDeviceProcAddr.
17595    - The library will fetch pointers to all other functions it needs internally.
17596 -# **If you fetch pointers to all Vulkan functions in a custom way**, e.g. using some loader like
17597    [Volk](https://github.com/zeux/volk):
17598    - Define `VMA_STATIC_VULKAN_FUNCTIONS` and `VMA_DYNAMIC_VULKAN_FUNCTIONS` to 0.
17599    - Pass these pointers via structure #VmaVulkanFunctions.
17600 
17601 \code
17602 VmaVulkanFunctions vulkanFunctions = {};
17603 vulkanFunctions.vkGetInstanceProcAddr = &vkGetInstanceProcAddr;
17604 vulkanFunctions.vkGetDeviceProcAddr = &vkGetDeviceProcAddr;
17605 
17606 VmaAllocatorCreateInfo allocatorCreateInfo = {};
17607 allocatorCreateInfo.vulkanApiVersion = VK_API_VERSION_1_2;
17608 allocatorCreateInfo.physicalDevice = physicalDevice;
17609 allocatorCreateInfo.device = device;
17610 allocatorCreateInfo.instance = instance;
17611 allocatorCreateInfo.pVulkanFunctions = &vulkanFunctions;
17612 
17613 VmaAllocator allocator;
17614 vmaCreateAllocator(&allocatorCreateInfo, &allocator);
17615 \endcode
17616 
17617 
17618 \section quick_start_resource_allocation Resource allocation
17619 
17620 When you want to create a buffer or image:
17621 
17622 -# Fill `VkBufferCreateInfo` / `VkImageCreateInfo` structure.
17623 -# Fill VmaAllocationCreateInfo structure.
17624 -# Call vmaCreateBuffer() / vmaCreateImage() to get `VkBuffer`/`VkImage` with memory
17625    already allocated and bound to it, plus #VmaAllocation objects that represents its underlying memory.
17626 
17627 \code
17628 VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
17629 bufferInfo.size = 65536;
17630 bufferInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
17631 
17632 VmaAllocationCreateInfo allocInfo = {};
17633 allocInfo.usage = VMA_MEMORY_USAGE_AUTO;
17634 
17635 VkBuffer buffer;
17636 VmaAllocation allocation;
17637 vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);
17638 \endcode
17639 
17640 Don't forget to destroy your objects when no longer needed:
17641 
17642 \code
17643 vmaDestroyBuffer(allocator, buffer, allocation);
17644 vmaDestroyAllocator(allocator);
17645 \endcode
17646 
17647 
17648 \page choosing_memory_type Choosing memory type
17649 
17650 Physical devices in Vulkan support various combinations of memory heaps and
17651 types. Help with choosing correct and optimal memory type for your specific
17652 resource is one of the key features of this library. You can use it by filling
17653 appropriate members of VmaAllocationCreateInfo structure, as described below.
17654 You can also combine multiple methods.
17655 
17656 -# If you just want to find memory type index that meets your requirements, you
17657    can use function: vmaFindMemoryTypeIndexForBufferInfo(),
17658    vmaFindMemoryTypeIndexForImageInfo(), vmaFindMemoryTypeIndex().
17659 -# If you want to allocate a region of device memory without association with any
17660    specific image or buffer, you can use function vmaAllocateMemory(). Usage of
17661    this function is not recommended and usually not needed.
17662    vmaAllocateMemoryPages() function is also provided for creating multiple allocations at once,
17663    which may be useful for sparse binding.
17664 -# If you already have a buffer or an image created, you want to allocate memory
17665    for it and then you will bind it yourself, you can use function
17666    vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage().
17667    For binding you should use functions: vmaBindBufferMemory(), vmaBindImageMemory()
17668    or their extended versions: vmaBindBufferMemory2(), vmaBindImageMemory2().
17669 -# **This is the easiest and recommended way to use this library:**
17670    If you want to create a buffer or an image, allocate memory for it and bind
17671    them together, all in one call, you can use function vmaCreateBuffer(),
17672    vmaCreateImage().
17673 
17674 When using 3. or 4., the library internally queries Vulkan for memory types
17675 supported for that buffer or image (function `vkGetBufferMemoryRequirements()`)
17676 and uses only one of these types.
17677 
17678 If no memory type can be found that meets all the requirements, these functions
17679 return `VK_ERROR_FEATURE_NOT_PRESENT`.
17680 
17681 You can leave VmaAllocationCreateInfo structure completely filled with zeros.
17682 It means no requirements are specified for memory type.
17683 It is valid, although not very useful.
17684 
17685 \section choosing_memory_type_usage Usage
17686 
17687 The easiest way to specify memory requirements is to fill member
17688 VmaAllocationCreateInfo::usage using one of the values of enum #VmaMemoryUsage.
17689 It defines high level, common usage types.
17690 Since version 3 of the library, it is recommended to use #VMA_MEMORY_USAGE_AUTO to let it select best memory type for your resource automatically.
17691 
17692 For example, if you want to create a uniform buffer that will be filled using
17693 transfer only once or infrequently and then used for rendering every frame as a uniform buffer, you can
17694 do it using following code. The buffer will most likely end up in a memory type with
17695 `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT` to be fast to access by the GPU device.
17696 
17697 \code
17698 VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
17699 bufferInfo.size = 65536;
17700 bufferInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
17701 
17702 VmaAllocationCreateInfo allocInfo = {};
17703 allocInfo.usage = VMA_MEMORY_USAGE_AUTO;
17704 
17705 VkBuffer buffer;
17706 VmaAllocation allocation;
17707 vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);
17708 \endcode
17709 
17710 If you have a preference for putting the resource in GPU (device) memory or CPU (host) memory
17711 on systems with discrete graphics card that have the memories separate, you can use
17712 #VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE or #VMA_MEMORY_USAGE_AUTO_PREFER_HOST.
17713 
17714 When using `VMA_MEMORY_USAGE_AUTO*` while you want to map the allocated memory,
17715 you also need to specify one of the host access flags:
17716 #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT.
17717 This will help the library decide about preferred memory type to ensure it has `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`
17718 so you can map it.
17719 
17720 For example, a staging buffer that will be filled via mapped pointer and then
17721 used as a source of transfer to the buffer decribed previously can be created like this.
17722 It will likely and up in a memory type that is `HOST_VISIBLE` and `HOST_COHERENT`
17723 but not `HOST_CACHED` (meaning uncached, write-combined) and not `DEVICE_LOCAL` (meaning system RAM).
17724 
17725 \code
17726 VkBufferCreateInfo stagingBufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
17727 stagingBufferInfo.size = 65536;
17728 stagingBufferInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
17729 
17730 VmaAllocationCreateInfo stagingAllocInfo = {};
17731 stagingAllocInfo.usage = VMA_MEMORY_USAGE_AUTO;
17732 stagingAllocInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT;
17733 
17734 VkBuffer stagingBuffer;
17735 VmaAllocation stagingAllocation;
17736 vmaCreateBuffer(allocator, &stagingBufferInfo, &stagingAllocInfo, &stagingBuffer, &stagingAllocation, nullptr);
17737 \endcode
17738 
17739 For more examples of creating different kinds of resources, see chapter \ref usage_patterns.
17740 
17741 Usage values `VMA_MEMORY_USAGE_AUTO*` are legal to use only when the library knows
17742 about the resource being created by having `VkBufferCreateInfo` / `VkImageCreateInfo` passed,
17743 so they work with functions like: vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo() etc.
17744 If you allocate raw memory using function vmaAllocateMemory(), you have to use other means of selecting
17745 memory type, as decribed below.
17746 
17747 \note
17748 Old usage values (`VMA_MEMORY_USAGE_GPU_ONLY`, `VMA_MEMORY_USAGE_CPU_ONLY`,
17749 `VMA_MEMORY_USAGE_CPU_TO_GPU`, `VMA_MEMORY_USAGE_GPU_TO_CPU`, `VMA_MEMORY_USAGE_CPU_COPY`)
17750 are still available and work same way as in previous versions of the library
17751 for backward compatibility, but they are not recommended.
17752 
17753 \section choosing_memory_type_required_preferred_flags Required and preferred flags
17754 
17755 You can specify more detailed requirements by filling members
17756 VmaAllocationCreateInfo::requiredFlags and VmaAllocationCreateInfo::preferredFlags
17757 with a combination of bits from enum `VkMemoryPropertyFlags`. For example,
17758 if you want to create a buffer that will be persistently mapped on host (so it
17759 must be `HOST_VISIBLE`) and preferably will also be `HOST_COHERENT` and `HOST_CACHED`,
17760 use following code:
17761 
17762 \code
17763 VmaAllocationCreateInfo allocInfo = {};
17764 allocInfo.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
17765 allocInfo.preferredFlags = VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
17766 allocInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT;
17767 
17768 VkBuffer buffer;
17769 VmaAllocation allocation;
17770 vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);
17771 \endcode
17772 
17773 A memory type is chosen that has all the required flags and as many preferred
17774 flags set as possible.
17775 
17776 Value passed in VmaAllocationCreateInfo::usage is internally converted to a set of required and preferred flags,
17777 plus some extra "magic" (heuristics).
17778 
17779 \section choosing_memory_type_explicit_memory_types Explicit memory types
17780 
17781 If you inspected memory types available on the physical device and you have
17782 a preference for memory types that you want to use, you can fill member
17783 VmaAllocationCreateInfo::memoryTypeBits. It is a bit mask, where each bit set
17784 means that a memory type with that index is allowed to be used for the
17785 allocation. Special value 0, just like `UINT32_MAX`, means there are no
17786 restrictions to memory type index.
17787 
17788 Please note that this member is NOT just a memory type index.
17789 Still you can use it to choose just one, specific memory type.
17790 For example, if you already determined that your buffer should be created in
17791 memory type 2, use following code:
17792 
17793 \code
17794 uint32_t memoryTypeIndex = 2;
17795 
17796 VmaAllocationCreateInfo allocInfo = {};
17797 allocInfo.memoryTypeBits = 1u << memoryTypeIndex;
17798 
17799 VkBuffer buffer;
17800 VmaAllocation allocation;
17801 vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);
17802 \endcode
17803 
17804 
17805 \section choosing_memory_type_custom_memory_pools Custom memory pools
17806 
17807 If you allocate from custom memory pool, all the ways of specifying memory
17808 requirements described above are not applicable and the aforementioned members
17809 of VmaAllocationCreateInfo structure are ignored. Memory type is selected
17810 explicitly when creating the pool and then used to make all the allocations from
17811 that pool. For further details, see \ref custom_memory_pools.
17812 
17813 \section choosing_memory_type_dedicated_allocations Dedicated allocations
17814 
17815 Memory for allocations is reserved out of larger block of `VkDeviceMemory`
17816 allocated from Vulkan internally. That is the main feature of this whole library.
17817 You can still request a separate memory block to be created for an allocation,
17818 just like you would do in a trivial solution without using any allocator.
17819 In that case, a buffer or image is always bound to that memory at offset 0.
17820 This is called a "dedicated allocation".
17821 You can explicitly request it by using flag #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
17822 The library can also internally decide to use dedicated allocation in some cases, e.g.:
17823 
17824 - When the size of the allocation is large.
17825 - When [VK_KHR_dedicated_allocation](@ref vk_khr_dedicated_allocation) extension is enabled
17826   and it reports that dedicated allocation is required or recommended for the resource.
17827 - When allocation of next big memory block fails due to not enough device memory,
17828   but allocation with the exact requested size succeeds.
17829 
17830 
17831 \page memory_mapping Memory mapping
17832 
17833 To "map memory" in Vulkan means to obtain a CPU pointer to `VkDeviceMemory`,
17834 to be able to read from it or write to it in CPU code.
17835 Mapping is possible only of memory allocated from a memory type that has
17836 `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` flag.
17837 Functions `vkMapMemory()`, `vkUnmapMemory()` are designed for this purpose.
17838 You can use them directly with memory allocated by this library,
17839 but it is not recommended because of following issue:
17840 Mapping the same `VkDeviceMemory` block multiple times is illegal - only one mapping at a time is allowed.
17841 This includes mapping disjoint regions. Mapping is not reference-counted internally by Vulkan.
17842 Because of this, Vulkan Memory Allocator provides following facilities:
17843 
17844 \note If you want to be able to map an allocation, you need to specify one of the flags
17845 #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT
17846 in VmaAllocationCreateInfo::flags. These flags are required for an allocation to be mappable
17847 when using #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` enum values.
17848 For other usage values they are ignored and every such allocation made in `HOST_VISIBLE` memory type is mappable,
17849 but they can still be used for consistency.
17850 
17851 \section memory_mapping_mapping_functions Mapping functions
17852 
17853 The library provides following functions for mapping of a specific #VmaAllocation: vmaMapMemory(), vmaUnmapMemory().
17854 They are safer and more convenient to use than standard Vulkan functions.
17855 You can map an allocation multiple times simultaneously - mapping is reference-counted internally.
17856 You can also map different allocations simultaneously regardless of whether they use the same `VkDeviceMemory` block.
17857 The way it is implemented is that the library always maps entire memory block, not just region of the allocation.
17858 For further details, see description of vmaMapMemory() function.
17859 Example:
17860 
17861 \code
17862 // Having these objects initialized:
17863 struct ConstantBuffer
17864 {
17865     ...
17866 };
17867 ConstantBuffer constantBufferData = ...
17868 
17869 VmaAllocator allocator = ...
17870 VkBuffer constantBuffer = ...
17871 VmaAllocation constantBufferAllocation = ...
17872 
17873 // You can map and fill your buffer using following code:
17874 
17875 void* mappedData;
17876 vmaMapMemory(allocator, constantBufferAllocation, &mappedData);
17877 memcpy(mappedData, &constantBufferData, sizeof(constantBufferData));
17878 vmaUnmapMemory(allocator, constantBufferAllocation);
17879 \endcode
17880 
17881 When mapping, you may see a warning from Vulkan validation layer similar to this one:
17882 
17883 <i>Mapping an image with layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL can result in undefined behavior if this memory is used by the device. Only GENERAL or PREINITIALIZED should be used.</i>
17884 
17885 It happens because the library maps entire `VkDeviceMemory` block, where different
17886 types of images and buffers may end up together, especially on GPUs with unified memory like Intel.
17887 You can safely ignore it if you are sure you access only memory of the intended
17888 object that you wanted to map.
17889 
17890 
17891 \section memory_mapping_persistently_mapped_memory Persistently mapped memory
17892 
17893 Kepping your memory persistently mapped is generally OK in Vulkan.
17894 You don't need to unmap it before using its data on the GPU.
17895 The library provides a special feature designed for that:
17896 Allocations made with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag set in
17897 VmaAllocationCreateInfo::flags stay mapped all the time,
17898 so you can just access CPU pointer to it any time
17899 without a need to call any "map" or "unmap" function.
17900 Example:
17901 
17902 \code
17903 VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
17904 bufCreateInfo.size = sizeof(ConstantBuffer);
17905 bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
17906 
17907 VmaAllocationCreateInfo allocCreateInfo = {};
17908 allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
17909 allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |
17910     VMA_ALLOCATION_CREATE_MAPPED_BIT;
17911 
17912 VkBuffer buf;
17913 VmaAllocation alloc;
17914 VmaAllocationInfo allocInfo;
17915 vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
17916 
17917 // Buffer is already mapped. You can access its memory.
17918 memcpy(allocInfo.pMappedData, &constantBufferData, sizeof(constantBufferData));
17919 \endcode
17920 
17921 \note #VMA_ALLOCATION_CREATE_MAPPED_BIT by itself doesn't guarantee that the allocation will end up
17922 in a mappable memory type.
17923 For this, you need to also specify #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or
17924 #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT.
17925 #VMA_ALLOCATION_CREATE_MAPPED_BIT only guarantees that if the memory is `HOST_VISIBLE`, the allocation will be mapped on creation.
17926 For an example of how to make use of this fact, see section \ref usage_patterns_advanced_data_uploading.
17927 
17928 \section memory_mapping_cache_control Cache flush and invalidate
17929 
17930 Memory in Vulkan doesn't need to be unmapped before using it on GPU,
17931 but unless a memory types has `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT` flag set,
17932 you need to manually **invalidate** cache before reading of mapped pointer
17933 and **flush** cache after writing to mapped pointer.
17934 Map/unmap operations don't do that automatically.
17935 Vulkan provides following functions for this purpose `vkFlushMappedMemoryRanges()`,
17936 `vkInvalidateMappedMemoryRanges()`, but this library provides more convenient
17937 functions that refer to given allocation object: vmaFlushAllocation(),
17938 vmaInvalidateAllocation(),
17939 or multiple objects at once: vmaFlushAllocations(), vmaInvalidateAllocations().
17940 
17941 Regions of memory specified for flush/invalidate must be aligned to
17942 `VkPhysicalDeviceLimits::nonCoherentAtomSize`. This is automatically ensured by the library.
17943 In any memory type that is `HOST_VISIBLE` but not `HOST_COHERENT`, all allocations
17944 within blocks are aligned to this value, so their offsets are always multiply of
17945 `nonCoherentAtomSize` and two different allocations never share same "line" of this size.
17946 
17947 Also, Windows drivers from all 3 PC GPU vendors (AMD, Intel, NVIDIA)
17948 currently provide `HOST_COHERENT` flag on all memory types that are
17949 `HOST_VISIBLE`, so on PC you may not need to bother.
17950 
17951 
17952 \page staying_within_budget Staying within budget
17953 
17954 When developing a graphics-intensive game or program, it is important to avoid allocating
17955 more GPU memory than it is physically available. When the memory is over-committed,
17956 various bad things can happen, depending on the specific GPU, graphics driver, and
17957 operating system:
17958 
17959 - It may just work without any problems.
17960 - The application may slow down because some memory blocks are moved to system RAM
17961   and the GPU has to access them through PCI Express bus.
17962 - A new allocation may take very long time to complete, even few seconds, and possibly
17963   freeze entire system.
17964 - The new allocation may fail with `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
17965 - It may even result in GPU crash (TDR), observed as `VK_ERROR_DEVICE_LOST`
17966   returned somewhere later.
17967 
17968 \section staying_within_budget_querying_for_budget Querying for budget
17969 
17970 To query for current memory usage and available budget, use function vmaGetHeapBudgets().
17971 Returned structure #VmaBudget contains quantities expressed in bytes, per Vulkan memory heap.
17972 
17973 Please note that this function returns different information and works faster than
17974 vmaCalculateStatistics(). vmaGetHeapBudgets() can be called every frame or even before every
17975 allocation, while vmaCalculateStatistics() is intended to be used rarely,
17976 only to obtain statistical information, e.g. for debugging purposes.
17977 
17978 It is recommended to use <b>VK_EXT_memory_budget</b> device extension to obtain information
17979 about the budget from Vulkan device. VMA is able to use this extension automatically.
17980 When not enabled, the allocator behaves same way, but then it estimates current usage
17981 and available budget based on its internal information and Vulkan memory heap sizes,
17982 which may be less precise. In order to use this extension:
17983 
17984 1. Make sure extensions VK_EXT_memory_budget and VK_KHR_get_physical_device_properties2
17985    required by it are available and enable them. Please note that the first is a device
17986    extension and the second is instance extension!
17987 2. Use flag #VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT when creating #VmaAllocator object.
17988 3. Make sure to call vmaSetCurrentFrameIndex() every frame. Budget is queried from
17989    Vulkan inside of it to avoid overhead of querying it with every allocation.
17990 
17991 \section staying_within_budget_controlling_memory_usage Controlling memory usage
17992 
17993 There are many ways in which you can try to stay within the budget.
17994 
17995 First, when making new allocation requires allocating a new memory block, the library
17996 tries not to exceed the budget automatically. If a block with default recommended size
17997 (e.g. 256 MB) would go over budget, a smaller block is allocated, possibly even
17998 dedicated memory for just this resource.
17999 
18000 If the size of the requested resource plus current memory usage is more than the
18001 budget, by default the library still tries to create it, leaving it to the Vulkan
18002 implementation whether the allocation succeeds or fails. You can change this behavior
18003 by using #VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT flag. With it, the allocation is
18004 not made if it would exceed the budget or if the budget is already exceeded.
18005 VMA then tries to make the allocation from the next eligible Vulkan memory type.
18006 The all of them fail, the call then fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
18007 Example usage pattern may be to pass the #VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT flag
18008 when creating resources that are not essential for the application (e.g. the texture
18009 of a specific object) and not to pass it when creating critically important resources
18010 (e.g. render targets).
18011 
18012 On AMD graphics cards there is a custom vendor extension available: <b>VK_AMD_memory_overallocation_behavior</b>
18013 that allows to control the behavior of the Vulkan implementation in out-of-memory cases -
18014 whether it should fail with an error code or still allow the allocation.
18015 Usage of this extension involves only passing extra structure on Vulkan device creation,
18016 so it is out of scope of this library.
18017 
18018 Finally, you can also use #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT flag to make sure
18019 a new allocation is created only when it fits inside one of the existing memory blocks.
18020 If it would require to allocate a new block, if fails instead with `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
18021 This also ensures that the function call is very fast because it never goes to Vulkan
18022 to obtain a new block.
18023 
18024 \note Creating \ref custom_memory_pools with VmaPoolCreateInfo::minBlockCount
18025 set to more than 0 will currently try to allocate memory blocks without checking whether they
18026 fit within budget.
18027 
18028 
18029 \page resource_aliasing Resource aliasing (overlap)
18030 
18031 New explicit graphics APIs (Vulkan and Direct3D 12), thanks to manual memory
18032 management, give an opportunity to alias (overlap) multiple resources in the
18033 same region of memory - a feature not available in the old APIs (Direct3D 11, OpenGL).
18034 It can be useful to save video memory, but it must be used with caution.
18035 
18036 For example, if you know the flow of your whole render frame in advance, you
18037 are going to use some intermediate textures or buffers only during a small range of render passes,
18038 and you know these ranges don't overlap in time, you can bind these resources to
18039 the same place in memory, even if they have completely different parameters (width, height, format etc.).
18040 
18041 ![Resource aliasing (overlap)](../gfx/Aliasing.png)
18042 
18043 Such scenario is possible using VMA, but you need to create your images manually.
18044 Then you need to calculate parameters of an allocation to be made using formula:
18045 
18046 - allocation size = max(size of each image)
18047 - allocation alignment = max(alignment of each image)
18048 - allocation memoryTypeBits = bitwise AND(memoryTypeBits of each image)
18049 
18050 Following example shows two different images bound to the same place in memory,
18051 allocated to fit largest of them.
18052 
18053 \code
18054 // A 512x512 texture to be sampled.
18055 VkImageCreateInfo img1CreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
18056 img1CreateInfo.imageType = VK_IMAGE_TYPE_2D;
18057 img1CreateInfo.extent.width = 512;
18058 img1CreateInfo.extent.height = 512;
18059 img1CreateInfo.extent.depth = 1;
18060 img1CreateInfo.mipLevels = 10;
18061 img1CreateInfo.arrayLayers = 1;
18062 img1CreateInfo.format = VK_FORMAT_R8G8B8A8_SRGB;
18063 img1CreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
18064 img1CreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
18065 img1CreateInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
18066 img1CreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
18067 
18068 // A full screen texture to be used as color attachment.
18069 VkImageCreateInfo img2CreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
18070 img2CreateInfo.imageType = VK_IMAGE_TYPE_2D;
18071 img2CreateInfo.extent.width = 1920;
18072 img2CreateInfo.extent.height = 1080;
18073 img2CreateInfo.extent.depth = 1;
18074 img2CreateInfo.mipLevels = 1;
18075 img2CreateInfo.arrayLayers = 1;
18076 img2CreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM;
18077 img2CreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
18078 img2CreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
18079 img2CreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
18080 img2CreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
18081 
18082 VkImage img1;
18083 res = vkCreateImage(device, &img1CreateInfo, nullptr, &img1);
18084 VkImage img2;
18085 res = vkCreateImage(device, &img2CreateInfo, nullptr, &img2);
18086 
18087 VkMemoryRequirements img1MemReq;
18088 vkGetImageMemoryRequirements(device, img1, &img1MemReq);
18089 VkMemoryRequirements img2MemReq;
18090 vkGetImageMemoryRequirements(device, img2, &img2MemReq);
18091 
18092 VkMemoryRequirements finalMemReq = {};
18093 finalMemReq.size = std::max(img1MemReq.size, img2MemReq.size);
18094 finalMemReq.alignment = std::max(img1MemReq.alignment, img2MemReq.alignment);
18095 finalMemReq.memoryTypeBits = img1MemReq.memoryTypeBits & img2MemReq.memoryTypeBits;
18096 // Validate if(finalMemReq.memoryTypeBits != 0)
18097 
18098 VmaAllocationCreateInfo allocCreateInfo = {};
18099 allocCreateInfo.preferredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
18100 
18101 VmaAllocation alloc;
18102 res = vmaAllocateMemory(allocator, &finalMemReq, &allocCreateInfo, &alloc, nullptr);
18103 
18104 res = vmaBindImageMemory(allocator, alloc, img1);
18105 res = vmaBindImageMemory(allocator, alloc, img2);
18106 
18107 // You can use img1, img2 here, but not at the same time!
18108 
18109 vmaFreeMemory(allocator, alloc);
18110 vkDestroyImage(allocator, img2, nullptr);
18111 vkDestroyImage(allocator, img1, nullptr);
18112 \endcode
18113 
18114 Remember that using resources that alias in memory requires proper synchronization.
18115 You need to issue a memory barrier to make sure commands that use `img1` and `img2`
18116 don't overlap on GPU timeline.
18117 You also need to treat a resource after aliasing as uninitialized - containing garbage data.
18118 For example, if you use `img1` and then want to use `img2`, you need to issue
18119 an image memory barrier for `img2` with `oldLayout` = `VK_IMAGE_LAYOUT_UNDEFINED`.
18120 
18121 Additional considerations:
18122 
18123 - Vulkan also allows to interpret contents of memory between aliasing resources consistently in some cases.
18124 See chapter 11.8. "Memory Aliasing" of Vulkan specification or `VK_IMAGE_CREATE_ALIAS_BIT` flag.
18125 - You can create more complex layout where different images and buffers are bound
18126 at different offsets inside one large allocation. For example, one can imagine
18127 a big texture used in some render passes, aliasing with a set of many small buffers
18128 used between in some further passes. To bind a resource at non-zero offset in an allocation,
18129 use vmaBindBufferMemory2() / vmaBindImageMemory2().
18130 - Before allocating memory for the resources you want to alias, check `memoryTypeBits`
18131 returned in memory requirements of each resource to make sure the bits overlap.
18132 Some GPUs may expose multiple memory types suitable e.g. only for buffers or
18133 images with `COLOR_ATTACHMENT` usage, so the sets of memory types supported by your
18134 resources may be disjoint. Aliasing them is not possible in that case.
18135 
18136 
18137 \page custom_memory_pools Custom memory pools
18138 
18139 A memory pool contains a number of `VkDeviceMemory` blocks.
18140 The library automatically creates and manages default pool for each memory type available on the device.
18141 Default memory pool automatically grows in size.
18142 Size of allocated blocks is also variable and managed automatically.
18143 
18144 You can create custom pool and allocate memory out of it.
18145 It can be useful if you want to:
18146 
18147 - Keep certain kind of allocations separate from others.
18148 - Enforce particular, fixed size of Vulkan memory blocks.
18149 - Limit maximum amount of Vulkan memory allocated for that pool.
18150 - Reserve minimum or fixed amount of Vulkan memory always preallocated for that pool.
18151 - Use extra parameters for a set of your allocations that are available in #VmaPoolCreateInfo but not in
18152   #VmaAllocationCreateInfo - e.g., custom minimum alignment, custom `pNext` chain.
18153 - Perform defragmentation on a specific subset of your allocations.
18154 
18155 To use custom memory pools:
18156 
18157 -# Fill VmaPoolCreateInfo structure.
18158 -# Call vmaCreatePool() to obtain #VmaPool handle.
18159 -# When making an allocation, set VmaAllocationCreateInfo::pool to this handle.
18160    You don't need to specify any other parameters of this structure, like `usage`.
18161 
18162 Example:
18163 
18164 \code
18165 // Find memoryTypeIndex for the pool.
18166 VkBufferCreateInfo sampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
18167 sampleBufCreateInfo.size = 0x10000; // Doesn't matter.
18168 sampleBufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
18169 
18170 VmaAllocationCreateInfo sampleAllocCreateInfo = {};
18171 sampleAllocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
18172 
18173 uint32_t memTypeIndex;
18174 VkResult res = vmaFindMemoryTypeIndexForBufferInfo(allocator,
18175     &sampleBufCreateInfo, &sampleAllocCreateInfo, &memTypeIndex);
18176 // Check res...
18177 
18178 // Create a pool that can have at most 2 blocks, 128 MiB each.
18179 VmaPoolCreateInfo poolCreateInfo = {};
18180 poolCreateInfo.memoryTypeIndex = memTypeIndex;
18181 poolCreateInfo.blockSize = 128ull * 1024 * 1024;
18182 poolCreateInfo.maxBlockCount = 2;
18183 
18184 VmaPool pool;
18185 res = vmaCreatePool(allocator, &poolCreateInfo, &pool);
18186 // Check res...
18187 
18188 // Allocate a buffer out of it.
18189 VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
18190 bufCreateInfo.size = 1024;
18191 bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
18192 
18193 VmaAllocationCreateInfo allocCreateInfo = {};
18194 allocCreateInfo.pool = pool;
18195 
18196 VkBuffer buf;
18197 VmaAllocation alloc;
18198 res = vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, nullptr);
18199 // Check res...
18200 \endcode
18201 
18202 You have to free all allocations made from this pool before destroying it.
18203 
18204 \code
18205 vmaDestroyBuffer(allocator, buf, alloc);
18206 vmaDestroyPool(allocator, pool);
18207 \endcode
18208 
18209 New versions of this library support creating dedicated allocations in custom pools.
18210 It is supported only when VmaPoolCreateInfo::blockSize = 0.
18211 To use this feature, set VmaAllocationCreateInfo::pool to the pointer to your custom pool and
18212 VmaAllocationCreateInfo::flags to #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
18213 
18214 \note Excessive use of custom pools is a common mistake when using this library.
18215 Custom pools may be useful for special purposes - when you want to
18216 keep certain type of resources separate e.g. to reserve minimum amount of memory
18217 for them or limit maximum amount of memory they can occupy. For most
18218 resources this is not needed and so it is not recommended to create #VmaPool
18219 objects and allocations out of them. Allocating from the default pool is sufficient.
18220 
18221 
18222 \section custom_memory_pools_MemTypeIndex Choosing memory type index
18223 
18224 When creating a pool, you must explicitly specify memory type index.
18225 To find the one suitable for your buffers or images, you can use helper functions
18226 vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo().
18227 You need to provide structures with example parameters of buffers or images
18228 that you are going to create in that pool.
18229 
18230 \code
18231 VkBufferCreateInfo exampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
18232 exampleBufCreateInfo.size = 1024; // Doesn't matter
18233 exampleBufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
18234 
18235 VmaAllocationCreateInfo allocCreateInfo = {};
18236 allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
18237 
18238 uint32_t memTypeIndex;
18239 vmaFindMemoryTypeIndexForBufferInfo(allocator, &exampleBufCreateInfo, &allocCreateInfo, &memTypeIndex);
18240 
18241 VmaPoolCreateInfo poolCreateInfo = {};
18242 poolCreateInfo.memoryTypeIndex = memTypeIndex;
18243 // ...
18244 \endcode
18245 
18246 When creating buffers/images allocated in that pool, provide following parameters:
18247 
18248 - `VkBufferCreateInfo`: Prefer to pass same parameters as above.
18249   Otherwise you risk creating resources in a memory type that is not suitable for them, which may result in undefined behavior.
18250   Using different `VK_BUFFER_USAGE_` flags may work, but you shouldn't create images in a pool intended for buffers
18251   or the other way around.
18252 - VmaAllocationCreateInfo: You don't need to pass same parameters. Fill only `pool` member.
18253   Other members are ignored anyway.
18254 
18255 \section linear_algorithm Linear allocation algorithm
18256 
18257 Each Vulkan memory block managed by this library has accompanying metadata that
18258 keeps track of used and unused regions. By default, the metadata structure and
18259 algorithm tries to find best place for new allocations among free regions to
18260 optimize memory usage. This way you can allocate and free objects in any order.
18261 
18262 ![Default allocation algorithm](../gfx/Linear_allocator_1_algo_default.png)
18263 
18264 Sometimes there is a need to use simpler, linear allocation algorithm. You can
18265 create custom pool that uses such algorithm by adding flag
18266 #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT to VmaPoolCreateInfo::flags while creating
18267 #VmaPool object. Then an alternative metadata management is used. It always
18268 creates new allocations after last one and doesn't reuse free regions after
18269 allocations freed in the middle. It results in better allocation performance and
18270 less memory consumed by metadata.
18271 
18272 ![Linear allocation algorithm](../gfx/Linear_allocator_2_algo_linear.png)
18273 
18274 With this one flag, you can create a custom pool that can be used in many ways:
18275 free-at-once, stack, double stack, and ring buffer. See below for details.
18276 You don't need to specify explicitly which of these options you are going to use - it is detected automatically.
18277 
18278 \subsection linear_algorithm_free_at_once Free-at-once
18279 
18280 In a pool that uses linear algorithm, you still need to free all the allocations
18281 individually, e.g. by using vmaFreeMemory() or vmaDestroyBuffer(). You can free
18282 them in any order. New allocations are always made after last one - free space
18283 in the middle is not reused. However, when you release all the allocation and
18284 the pool becomes empty, allocation starts from the beginning again. This way you
18285 can use linear algorithm to speed up creation of allocations that you are going
18286 to release all at once.
18287 
18288 ![Free-at-once](../gfx/Linear_allocator_3_free_at_once.png)
18289 
18290 This mode is also available for pools created with VmaPoolCreateInfo::maxBlockCount
18291 value that allows multiple memory blocks.
18292 
18293 \subsection linear_algorithm_stack Stack
18294 
18295 When you free an allocation that was created last, its space can be reused.
18296 Thanks to this, if you always release allocations in the order opposite to their
18297 creation (LIFO - Last In First Out), you can achieve behavior of a stack.
18298 
18299 ![Stack](../gfx/Linear_allocator_4_stack.png)
18300 
18301 This mode is also available for pools created with VmaPoolCreateInfo::maxBlockCount
18302 value that allows multiple memory blocks.
18303 
18304 \subsection linear_algorithm_double_stack Double stack
18305 
18306 The space reserved by a custom pool with linear algorithm may be used by two
18307 stacks:
18308 
18309 - First, default one, growing up from offset 0.
18310 - Second, "upper" one, growing down from the end towards lower offsets.
18311 
18312 To make allocation from the upper stack, add flag #VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT
18313 to VmaAllocationCreateInfo::flags.
18314 
18315 ![Double stack](../gfx/Linear_allocator_7_double_stack.png)
18316 
18317 Double stack is available only in pools with one memory block -
18318 VmaPoolCreateInfo::maxBlockCount must be 1. Otherwise behavior is undefined.
18319 
18320 When the two stacks' ends meet so there is not enough space between them for a
18321 new allocation, such allocation fails with usual
18322 `VK_ERROR_OUT_OF_DEVICE_MEMORY` error.
18323 
18324 \subsection linear_algorithm_ring_buffer Ring buffer
18325 
18326 When you free some allocations from the beginning and there is not enough free space
18327 for a new one at the end of a pool, allocator's "cursor" wraps around to the
18328 beginning and starts allocation there. Thanks to this, if you always release
18329 allocations in the same order as you created them (FIFO - First In First Out),
18330 you can achieve behavior of a ring buffer / queue.
18331 
18332 ![Ring buffer](../gfx/Linear_allocator_5_ring_buffer.png)
18333 
18334 Ring buffer is available only in pools with one memory block -
18335 VmaPoolCreateInfo::maxBlockCount must be 1. Otherwise behavior is undefined.
18336 
18337 \note \ref defragmentation is not supported in custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT.
18338 
18339 
18340 \page defragmentation Defragmentation
18341 
18342 Interleaved allocations and deallocations of many objects of varying size can
18343 cause fragmentation over time, which can lead to a situation where the library is unable
18344 to find a continuous range of free memory for a new allocation despite there is
18345 enough free space, just scattered across many small free ranges between existing
18346 allocations.
18347 
18348 To mitigate this problem, you can use defragmentation feature.
18349 It doesn't happen automatically though and needs your cooperation,
18350 because VMA is a low level library that only allocates memory.
18351 It cannot recreate buffers and images in a new place as it doesn't remember the contents of `VkBufferCreateInfo` / `VkImageCreateInfo` structures.
18352 It cannot copy their contents as it doesn't record any commands to a command buffer.
18353 
18354 Example:
18355 
18356 \code
18357 VmaDefragmentationInfo defragInfo = {};
18358 defragInfo.pool = myPool;
18359 defragInfo.flags = VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT;
18360 
18361 VmaDefragmentationContext defragCtx;
18362 VkResult res = vmaBeginDefragmentation(allocator, &defragInfo, &defragCtx);
18363 // Check res...
18364 
18365 for(;;)
18366 {
18367     VmaDefragmentationPassMoveInfo pass;
18368     res = vmaBeginDefragmentationPass(allocator, defragCtx, &pass);
18369     if(res == VK_SUCCESS)
18370         break;
18371     else if(res != VK_INCOMPLETE)
18372         // Handle error...
18373 
18374     for(uint32_t i = 0; i < pass.moveCount; ++i)
18375     {
18376         // Inspect pass.pMoves[i].srcAllocation, identify what buffer/image it represents.
18377         VmaAllocationInfo allocInfo;
18378         vmaGetAllocationInfo(allocator, pMoves[i].srcAllocation, &allocInfo);
18379         MyEngineResourceData* resData = (MyEngineResourceData*)allocInfo.pUserData;
18380 
18381         // Recreate and bind this buffer/image at: pass.pMoves[i].dstMemory, pass.pMoves[i].dstOffset.
18382         VkImageCreateInfo imgCreateInfo = ...
18383         VkImage newImg;
18384         res = vkCreateImage(device, &imgCreateInfo, nullptr, &newImg);
18385         // Check res...
18386         res = vmaBindImageMemory(allocator, pMoves[i].dstTmpAllocation, newImg);
18387         // Check res...
18388 
18389         // Issue a vkCmdCopyBuffer/vkCmdCopyImage to copy its content to the new place.
18390         vkCmdCopyImage(cmdBuf, resData->img, ..., newImg, ...);
18391     }
18392 
18393     // Make sure the copy commands finished executing.
18394     vkWaitForFences(...);
18395 
18396     // Destroy old buffers/images bound with pass.pMoves[i].srcAllocation.
18397     for(uint32_t i = 0; i < pass.moveCount; ++i)
18398     {
18399         // ...
18400         vkDestroyImage(device, resData->img, nullptr);
18401     }
18402 
18403     // Update appropriate descriptors to point to the new places...
18404 
18405     res = vmaEndDefragmentationPass(allocator, defragCtx, &pass);
18406     if(res == VK_SUCCESS)
18407         break;
18408     else if(res != VK_INCOMPLETE)
18409         // Handle error...
18410 }
18411 
18412 vmaEndDefragmentation(allocator, defragCtx, nullptr);
18413 \endcode
18414 
18415 Although functions like vmaCreateBuffer(), vmaCreateImage(), vmaDestroyBuffer(), vmaDestroyImage()
18416 create/destroy an allocation and a buffer/image at once, these are just a shortcut for
18417 creating the resource, allocating memory, and binding them together.
18418 Defragmentation works on memory allocations only. You must handle the rest manually.
18419 Defragmentation is an iterative process that should repreat "passes" as long as related functions
18420 return `VK_INCOMPLETE` not `VK_SUCCESS`.
18421 In each pass:
18422 
18423 1. vmaBeginDefragmentationPass() function call:
18424    - Calculates and returns the list of allocations to be moved in this pass.
18425      Note this can be a time-consuming process.
18426    - Reserves destination memory for them by creating temporary destination allocations
18427      that you can query for their `VkDeviceMemory` + offset using vmaGetAllocationInfo().
18428 2. Inside the pass, **you should**:
18429    - Inspect the returned list of allocations to be moved.
18430    - Create new buffers/images and bind them at the returned destination temporary allocations.
18431    - Copy data from source to destination resources if necessary.
18432    - Destroy the source buffers/images, but NOT their allocations.
18433 3. vmaEndDefragmentationPass() function call:
18434    - Frees the source memory reserved for the allocations that are moved.
18435    - Modifies source #VmaAllocation objects that are moved to point to the destination reserved memory.
18436    - Frees `VkDeviceMemory` blocks that became empty.
18437 
18438 Unlike in previous iterations of the defragmentation API, there is no list of "movable" allocations passed as a parameter.
18439 Defragmentation algorithm tries to move all suitable allocations.
18440 You can, however, refuse to move some of them inside a defragmentation pass, by setting
18441 `pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE.
18442 This is not recommended and may result in suboptimal packing of the allocations after defragmentation.
18443 If you cannot ensure any allocation can be moved, it is better to keep movable allocations separate in a custom pool.
18444 
18445 Inside a pass, for each allocation that should be moved:
18446 
18447 - You should copy its data from the source to the destination place by calling e.g. `vkCmdCopyBuffer()`, `vkCmdCopyImage()`.
18448   - You need to make sure these commands finished executing before destroying the source buffers/images and before calling vmaEndDefragmentationPass().
18449 - If a resource doesn't contain any meaningful data, e.g. it is a transient color attachment image to be cleared,
18450   filled, and used temporarily in each rendering frame, you can just recreate this image
18451   without copying its data.
18452 - If the resource is in `HOST_VISIBLE` and `HOST_CACHED` memory, you can copy its data on the CPU
18453   using `memcpy()`.
18454 - If you cannot move the allocation, you can set `pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE.
18455   This will cancel the move.
18456   - vmaEndDefragmentationPass() will then free the destination memory
18457     not the source memory of the allocation, leaving it unchanged.
18458 - If you decide the allocation is unimportant and can be destroyed instead of moved (e.g. it wasn't used for long time),
18459   you can set `pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY.
18460   - vmaEndDefragmentationPass() will then free both source and destination memory, and will destroy the source #VmaAllocation object.
18461 
18462 You can defragment a specific custom pool by setting VmaDefragmentationInfo::pool
18463 (like in the example above) or all the default pools by setting this member to null.
18464 
18465 Defragmentation is always performed in each pool separately.
18466 Allocations are never moved between different Vulkan memory types.
18467 The size of the destination memory reserved for a moved allocation is the same as the original one.
18468 Alignment of an allocation as it was determined using `vkGetBufferMemoryRequirements()` etc. is also respected after defragmentation.
18469 Buffers/images should be recreated with the same `VkBufferCreateInfo` / `VkImageCreateInfo` parameters as the original ones.
18470 
18471 You can perform the defragmentation incrementally to limit the number of allocations and bytes to be moved
18472 in each pass, e.g. to call it in sync with render frames and not to experience too big hitches.
18473 See members: VmaDefragmentationInfo::maxBytesPerPass, VmaDefragmentationInfo::maxAllocationsPerPass.
18474 
18475 It is also safe to perform the defragmentation asynchronously to render frames and other Vulkan and VMA
18476 usage, possibly from multiple threads, with the exception that allocations
18477 returned in VmaDefragmentationPassMoveInfo::pMoves shouldn't be destroyed until the defragmentation pass is ended.
18478 
18479 <b>Mapping</b> is preserved on allocations that are moved during defragmentation.
18480 Whether through #VMA_ALLOCATION_CREATE_MAPPED_BIT or vmaMapMemory(), the allocations
18481 are mapped at their new place. Of course, pointer to the mapped data changes, so it needs to be queried
18482 using VmaAllocationInfo::pMappedData.
18483 
18484 \note Defragmentation is not supported in custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT.
18485 
18486 
18487 \page statistics Statistics
18488 
18489 This library contains several functions that return information about its internal state,
18490 especially the amount of memory allocated from Vulkan.
18491 
18492 \section statistics_numeric_statistics Numeric statistics
18493 
18494 If you need to obtain basic statistics about memory usage per heap, together with current budget,
18495 you can call function vmaGetHeapBudgets() and inspect structure #VmaBudget.
18496 This is useful to keep track of memory usage and stay withing budget
18497 (see also \ref staying_within_budget).
18498 Example:
18499 
18500 \code
18501 uint32_t heapIndex = ...
18502 
18503 VmaBudget budgets[VK_MAX_MEMORY_HEAPS];
18504 vmaGetHeapBudgets(allocator, budgets);
18505 
18506 printf("My heap currently has %u allocations taking %llu B,\n",
18507     budgets[heapIndex].statistics.allocationCount,
18508     budgets[heapIndex].statistics.allocationBytes);
18509 printf("allocated out of %u Vulkan device memory blocks taking %llu B,\n",
18510     budgets[heapIndex].statistics.blockCount,
18511     budgets[heapIndex].statistics.blockBytes);
18512 printf("Vulkan reports total usage %llu B with budget %llu B.\n",
18513     budgets[heapIndex].usage,
18514     budgets[heapIndex].budget);
18515 \endcode
18516 
18517 You can query for more detailed statistics per memory heap, type, and totals,
18518 including minimum and maximum allocation size and unused range size,
18519 by calling function vmaCalculateStatistics() and inspecting structure #VmaTotalStatistics.
18520 This function is slower though, as it has to traverse all the internal data structures,
18521 so it should be used only for debugging purposes.
18522 
18523 You can query for statistics of a custom pool using function vmaGetPoolStatistics()
18524 or vmaCalculatePoolStatistics().
18525 
18526 You can query for information about a specific allocation using function vmaGetAllocationInfo().
18527 It fill structure #VmaAllocationInfo.
18528 
18529 \section statistics_json_dump JSON dump
18530 
18531 You can dump internal state of the allocator to a string in JSON format using function vmaBuildStatsString().
18532 The result is guaranteed to be correct JSON.
18533 It uses ANSI encoding.
18534 Any strings provided by user (see [Allocation names](@ref allocation_names))
18535 are copied as-is and properly escaped for JSON, so if they use UTF-8, ISO-8859-2 or any other encoding,
18536 this JSON string can be treated as using this encoding.
18537 It must be freed using function vmaFreeStatsString().
18538 
18539 The format of this JSON string is not part of official documentation of the library,
18540 but it will not change in backward-incompatible way without increasing library major version number
18541 and appropriate mention in changelog.
18542 
18543 The JSON string contains all the data that can be obtained using vmaCalculateStatistics().
18544 It can also contain detailed map of allocated memory blocks and their regions -
18545 free and occupied by allocations.
18546 This allows e.g. to visualize the memory or assess fragmentation.
18547 
18548 
18549 \page allocation_annotation Allocation names and user data
18550 
18551 \section allocation_user_data Allocation user data
18552 
18553 You can annotate allocations with your own information, e.g. for debugging purposes.
18554 To do that, fill VmaAllocationCreateInfo::pUserData field when creating
18555 an allocation. It is an opaque `void*` pointer. You can use it e.g. as a pointer,
18556 some handle, index, key, ordinal number or any other value that would associate
18557 the allocation with your custom metadata.
18558 It it useful to identify appropriate data structures in your engine given #VmaAllocation,
18559 e.g. when doing \ref defragmentation.
18560 
18561 \code
18562 VkBufferCreateInfo bufCreateInfo = ...
18563 
18564 MyBufferMetadata* pMetadata = CreateBufferMetadata();
18565 
18566 VmaAllocationCreateInfo allocCreateInfo = {};
18567 allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
18568 allocCreateInfo.pUserData = pMetadata;
18569 
18570 VkBuffer buffer;
18571 VmaAllocation allocation;
18572 vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buffer, &allocation, nullptr);
18573 \endcode
18574 
18575 The pointer may be later retrieved as VmaAllocationInfo::pUserData:
18576 
18577 \code
18578 VmaAllocationInfo allocInfo;
18579 vmaGetAllocationInfo(allocator, allocation, &allocInfo);
18580 MyBufferMetadata* pMetadata = (MyBufferMetadata*)allocInfo.pUserData;
18581 \endcode
18582 
18583 It can also be changed using function vmaSetAllocationUserData().
18584 
18585 Values of (non-zero) allocations' `pUserData` are printed in JSON report created by
18586 vmaBuildStatsString() in hexadecimal form.
18587 
18588 \section allocation_names Allocation names
18589 
18590 An allocation can also carry a null-terminated string, giving a name to the allocation.
18591 To set it, call vmaSetAllocationName().
18592 The library creates internal copy of the string, so the pointer you pass doesn't need
18593 to be valid for whole lifetime of the allocation. You can free it after the call.
18594 
18595 \code
18596 std::string imageName = "Texture: ";
18597 imageName += fileName;
18598 vmaSetAllocationName(allocator, allocation, imageName.c_str());
18599 \endcode
18600 
18601 The string can be later retrieved by inspecting VmaAllocationInfo::pName.
18602 It is also printed in JSON report created by vmaBuildStatsString().
18603 
18604 \note Setting string name to VMA allocation doesn't automatically set it to the Vulkan buffer or image created with it.
18605 You must do it manually using an extension like VK_EXT_debug_utils, which is independent of this library.
18606 
18607 
18608 \page virtual_allocator Virtual allocator
18609 
18610 As an extra feature, the core allocation algorithm of the library is exposed through a simple and convenient API of "virtual allocator".
18611 It doesn't allocate any real GPU memory. It just keeps track of used and free regions of a "virtual block".
18612 You can use it to allocate your own memory or other objects, even completely unrelated to Vulkan.
18613 A common use case is sub-allocation of pieces of one large GPU buffer.
18614 
18615 \section virtual_allocator_creating_virtual_block Creating virtual block
18616 
18617 To use this functionality, there is no main "allocator" object.
18618 You don't need to have #VmaAllocator object created.
18619 All you need to do is to create a separate #VmaVirtualBlock object for each block of memory you want to be managed by the allocator:
18620 
18621 -# Fill in #VmaVirtualBlockCreateInfo structure.
18622 -# Call vmaCreateVirtualBlock(). Get new #VmaVirtualBlock object.
18623 
18624 Example:
18625 
18626 \code
18627 VmaVirtualBlockCreateInfo blockCreateInfo = {};
18628 blockCreateInfo.size = 1048576; // 1 MB
18629 
18630 VmaVirtualBlock block;
18631 VkResult res = vmaCreateVirtualBlock(&blockCreateInfo, &block);
18632 \endcode
18633 
18634 \section virtual_allocator_making_virtual_allocations Making virtual allocations
18635 
18636 #VmaVirtualBlock object contains internal data structure that keeps track of free and occupied regions
18637 using the same code as the main Vulkan memory allocator.
18638 Similarly to #VmaAllocation for standard GPU allocations, there is #VmaVirtualAllocation type
18639 that represents an opaque handle to an allocation withing the virtual block.
18640 
18641 In order to make such allocation:
18642 
18643 -# Fill in #VmaVirtualAllocationCreateInfo structure.
18644 -# Call vmaVirtualAllocate(). Get new #VmaVirtualAllocation object that represents the allocation.
18645    You can also receive `VkDeviceSize offset` that was assigned to the allocation.
18646 
18647 Example:
18648 
18649 \code
18650 VmaVirtualAllocationCreateInfo allocCreateInfo = {};
18651 allocCreateInfo.size = 4096; // 4 KB
18652 
18653 VmaVirtualAllocation alloc;
18654 VkDeviceSize offset;
18655 res = vmaVirtualAllocate(block, &allocCreateInfo, &alloc, &offset);
18656 if(res == VK_SUCCESS)
18657 {
18658     // Use the 4 KB of your memory starting at offset.
18659 }
18660 else
18661 {
18662     // Allocation failed - no space for it could be found. Handle this error!
18663 }
18664 \endcode
18665 
18666 \section virtual_allocator_deallocation Deallocation
18667 
18668 When no longer needed, an allocation can be freed by calling vmaVirtualFree().
18669 You can only pass to this function an allocation that was previously returned by vmaVirtualAllocate()
18670 called for the same #VmaVirtualBlock.
18671 
18672 When whole block is no longer needed, the block object can be released by calling vmaDestroyVirtualBlock().
18673 All allocations must be freed before the block is destroyed, which is checked internally by an assert.
18674 However, if you don't want to call vmaVirtualFree() for each allocation, you can use vmaClearVirtualBlock() to free them all at once -
18675 a feature not available in normal Vulkan memory allocator. Example:
18676 
18677 \code
18678 vmaVirtualFree(block, alloc);
18679 vmaDestroyVirtualBlock(block);
18680 \endcode
18681 
18682 \section virtual_allocator_allocation_parameters Allocation parameters
18683 
18684 You can attach a custom pointer to each allocation by using vmaSetVirtualAllocationUserData().
18685 Its default value is null.
18686 It can be used to store any data that needs to be associated with that allocation - e.g. an index, a handle, or a pointer to some
18687 larger data structure containing more information. Example:
18688 
18689 \code
18690 struct CustomAllocData
18691 {
18692     std::string m_AllocName;
18693 };
18694 CustomAllocData* allocData = new CustomAllocData();
18695 allocData->m_AllocName = "My allocation 1";
18696 vmaSetVirtualAllocationUserData(block, alloc, allocData);
18697 \endcode
18698 
18699 The pointer can later be fetched, along with allocation offset and size, by passing the allocation handle to function
18700 vmaGetVirtualAllocationInfo() and inspecting returned structure #VmaVirtualAllocationInfo.
18701 If you allocated a new object to be used as the custom pointer, don't forget to delete that object before freeing the allocation!
18702 Example:
18703 
18704 \code
18705 VmaVirtualAllocationInfo allocInfo;
18706 vmaGetVirtualAllocationInfo(block, alloc, &allocInfo);
18707 delete (CustomAllocData*)allocInfo.pUserData;
18708 
18709 vmaVirtualFree(block, alloc);
18710 \endcode
18711 
18712 \section virtual_allocator_alignment_and_units Alignment and units
18713 
18714 It feels natural to express sizes and offsets in bytes.
18715 If an offset of an allocation needs to be aligned to a multiply of some number (e.g. 4 bytes), you can fill optional member
18716 VmaVirtualAllocationCreateInfo::alignment to request it. Example:
18717 
18718 \code
18719 VmaVirtualAllocationCreateInfo allocCreateInfo = {};
18720 allocCreateInfo.size = 4096; // 4 KB
18721 allocCreateInfo.alignment = 4; // Returned offset must be a multiply of 4 B
18722 
18723 VmaVirtualAllocation alloc;
18724 res = vmaVirtualAllocate(block, &allocCreateInfo, &alloc, nullptr);
18725 \endcode
18726 
18727 Alignments of different allocations made from one block may vary.
18728 However, if all alignments and sizes are always multiply of some size e.g. 4 B or `sizeof(MyDataStruct)`,
18729 you can express all sizes, alignments, and offsets in multiples of that size instead of individual bytes.
18730 It might be more convenient, but you need to make sure to use this new unit consistently in all the places:
18731 
18732 - VmaVirtualBlockCreateInfo::size
18733 - VmaVirtualAllocationCreateInfo::size and VmaVirtualAllocationCreateInfo::alignment
18734 - Using offset returned by vmaVirtualAllocate() or in VmaVirtualAllocationInfo::offset
18735 
18736 \section virtual_allocator_statistics Statistics
18737 
18738 You can obtain statistics of a virtual block using vmaGetVirtualBlockStatistics()
18739 (to get brief statistics that are fast to calculate)
18740 or vmaCalculateVirtualBlockStatistics() (to get more detailed statistics, slower to calculate).
18741 The functions fill structures #VmaStatistics, #VmaDetailedStatistics respectively - same as used by the normal Vulkan memory allocator.
18742 Example:
18743 
18744 \code
18745 VmaStatistics stats;
18746 vmaGetVirtualBlockStatistics(block, &stats);
18747 printf("My virtual block has %llu bytes used by %u virtual allocations\n",
18748     stats.allocationBytes, stats.allocationCount);
18749 \endcode
18750 
18751 You can also request a full list of allocations and free regions as a string in JSON format by calling
18752 vmaBuildVirtualBlockStatsString().
18753 Returned string must be later freed using vmaFreeVirtualBlockStatsString().
18754 The format of this string differs from the one returned by the main Vulkan allocator, but it is similar.
18755 
18756 \section virtual_allocator_additional_considerations Additional considerations
18757 
18758 The "virtual allocator" functionality is implemented on a level of individual memory blocks.
18759 Keeping track of a whole collection of blocks, allocating new ones when out of free space,
18760 deleting empty ones, and deciding which one to try first for a new allocation must be implemented by the user.
18761 
18762 Alternative allocation algorithms are supported, just like in custom pools of the real GPU memory.
18763 See enum #VmaVirtualBlockCreateFlagBits to learn how to specify them (e.g. #VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT).
18764 You can find their description in chapter \ref custom_memory_pools.
18765 Allocation strategies are also supported.
18766 See enum #VmaVirtualAllocationCreateFlagBits to learn how to specify them (e.g. #VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT).
18767 
18768 Following features are supported only by the allocator of the real GPU memory and not by virtual allocations:
18769 buffer-image granularity, `VMA_DEBUG_MARGIN`, `VMA_MIN_ALIGNMENT`.
18770 
18771 
18772 \page debugging_memory_usage Debugging incorrect memory usage
18773 
18774 If you suspect a bug with memory usage, like usage of uninitialized memory or
18775 memory being overwritten out of bounds of an allocation,
18776 you can use debug features of this library to verify this.
18777 
18778 \section debugging_memory_usage_initialization Memory initialization
18779 
18780 If you experience a bug with incorrect and nondeterministic data in your program and you suspect uninitialized memory to be used,
18781 you can enable automatic memory initialization to verify this.
18782 To do it, define macro `VMA_DEBUG_INITIALIZE_ALLOCATIONS` to 1.
18783 
18784 \code
18785 #define VMA_DEBUG_INITIALIZE_ALLOCATIONS 1
18786 #include "vk_mem_alloc.h"
18787 \endcode
18788 
18789 It makes memory of new allocations initialized to bit pattern `0xDCDCDCDC`.
18790 Before an allocation is destroyed, its memory is filled with bit pattern `0xEFEFEFEF`.
18791 Memory is automatically mapped and unmapped if necessary.
18792 
18793 If you find these values while debugging your program, good chances are that you incorrectly
18794 read Vulkan memory that is allocated but not initialized, or already freed, respectively.
18795 
18796 Memory initialization works only with memory types that are `HOST_VISIBLE` and with allocations that can be mapped.
18797 It works also with dedicated allocations.
18798 
18799 \section debugging_memory_usage_margins Margins
18800 
18801 By default, allocations are laid out in memory blocks next to each other if possible
18802 (considering required alignment, `bufferImageGranularity`, and `nonCoherentAtomSize`).
18803 
18804 ![Allocations without margin](../gfx/Margins_1.png)
18805 
18806 Define macro `VMA_DEBUG_MARGIN` to some non-zero value (e.g. 16) to enforce specified
18807 number of bytes as a margin after every allocation.
18808 
18809 \code
18810 #define VMA_DEBUG_MARGIN 16
18811 #include "vk_mem_alloc.h"
18812 \endcode
18813 
18814 ![Allocations with margin](../gfx/Margins_2.png)
18815 
18816 If your bug goes away after enabling margins, it means it may be caused by memory
18817 being overwritten outside of allocation boundaries. It is not 100% certain though.
18818 Change in application behavior may also be caused by different order and distribution
18819 of allocations across memory blocks after margins are applied.
18820 
18821 Margins work with all types of memory.
18822 
18823 Margin is applied only to allocations made out of memory blocks and not to dedicated
18824 allocations, which have their own memory block of specific size.
18825 It is thus not applied to allocations made using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT flag
18826 or those automatically decided to put into dedicated allocations, e.g. due to its
18827 large size or recommended by VK_KHR_dedicated_allocation extension.
18828 
18829 Margins appear in [JSON dump](@ref statistics_json_dump) as part of free space.
18830 
18831 Note that enabling margins increases memory usage and fragmentation.
18832 
18833 Margins do not apply to \ref virtual_allocator.
18834 
18835 \section debugging_memory_usage_corruption_detection Corruption detection
18836 
18837 You can additionally define macro `VMA_DEBUG_DETECT_CORRUPTION` to 1 to enable validation
18838 of contents of the margins.
18839 
18840 \code
18841 #define VMA_DEBUG_MARGIN 16
18842 #define VMA_DEBUG_DETECT_CORRUPTION 1
18843 #include "vk_mem_alloc.h"
18844 \endcode
18845 
18846 When this feature is enabled, number of bytes specified as `VMA_DEBUG_MARGIN`
18847 (it must be multiply of 4) after every allocation is filled with a magic number.
18848 This idea is also know as "canary".
18849 Memory is automatically mapped and unmapped if necessary.
18850 
18851 This number is validated automatically when the allocation is destroyed.
18852 If it is not equal to the expected value, `VMA_ASSERT()` is executed.
18853 It clearly means that either CPU or GPU overwritten the memory outside of boundaries of the allocation,
18854 which indicates a serious bug.
18855 
18856 You can also explicitly request checking margins of all allocations in all memory blocks
18857 that belong to specified memory types by using function vmaCheckCorruption(),
18858 or in memory blocks that belong to specified custom pool, by using function
18859 vmaCheckPoolCorruption().
18860 
18861 Margin validation (corruption detection) works only for memory types that are
18862 `HOST_VISIBLE` and `HOST_COHERENT`.
18863 
18864 
18865 \page opengl_interop OpenGL Interop
18866 
18867 VMA provides some features that help with interoperability with OpenGL.
18868 
18869 \section opengl_interop_exporting_memory Exporting memory
18870 
18871 If you want to attach `VkExportMemoryAllocateInfoKHR` structure to `pNext` chain of memory allocations made by the library:
18872 
18873 It is recommended to create \ref custom_memory_pools for such allocations.
18874 Define and fill in your `VkExportMemoryAllocateInfoKHR` structure and attach it to VmaPoolCreateInfo::pMemoryAllocateNext
18875 while creating the custom pool.
18876 Please note that the structure must remain alive and unchanged for the whole lifetime of the #VmaPool,
18877 not only while creating it, as no copy of the structure is made,
18878 but its original pointer is used for each allocation instead.
18879 
18880 If you want to export all memory allocated by the library from certain memory types,
18881 also dedicated allocations or other allocations made from default pools,
18882 an alternative solution is to fill in VmaAllocatorCreateInfo::pTypeExternalMemoryHandleTypes.
18883 It should point to an array with `VkExternalMemoryHandleTypeFlagsKHR` to be automatically passed by the library
18884 through `VkExportMemoryAllocateInfoKHR` on each allocation made from a specific memory type.
18885 Please note that new versions of the library also support dedicated allocations created in custom pools.
18886 
18887 You should not mix these two methods in a way that allows to apply both to the same memory type.
18888 Otherwise, `VkExportMemoryAllocateInfoKHR` structure would be attached twice to the `pNext` chain of `VkMemoryAllocateInfo`.
18889 
18890 
18891 \section opengl_interop_custom_alignment Custom alignment
18892 
18893 Buffers or images exported to a different API like OpenGL may require a different alignment,
18894 higher than the one used by the library automatically, queried from functions like `vkGetBufferMemoryRequirements`.
18895 To impose such alignment:
18896 
18897 It is recommended to create \ref custom_memory_pools for such allocations.
18898 Set VmaPoolCreateInfo::minAllocationAlignment member to the minimum alignment required for each allocation
18899 to be made out of this pool.
18900 The alignment actually used will be the maximum of this member and the alignment returned for the specific buffer or image
18901 from a function like `vkGetBufferMemoryRequirements`, which is called by VMA automatically.
18902 
18903 If you want to create a buffer with a specific minimum alignment out of default pools,
18904 use special function vmaCreateBufferWithAlignment(), which takes additional parameter `minAlignment`.
18905 
18906 Note the problem of alignment affects only resources placed inside bigger `VkDeviceMemory` blocks and not dedicated
18907 allocations, as these, by definition, always have alignment = 0 because the resource is bound to the beginning of its dedicated block.
18908 Contrary to Direct3D 12, Vulkan doesn't have a concept of alignment of the entire memory block passed on its allocation.
18909 
18910 
18911 \page usage_patterns Recommended usage patterns
18912 
18913 Vulkan gives great flexibility in memory allocation.
18914 This chapter shows the most common patterns.
18915 
18916 See also slides from talk:
18917 [Sawicki, Adam. Advanced Graphics Techniques Tutorial: Memory management in Vulkan and DX12. Game Developers Conference, 2018](https://www.gdcvault.com/play/1025458/Advanced-Graphics-Techniques-Tutorial-New)
18918 
18919 
18920 \section usage_patterns_gpu_only GPU-only resource
18921 
18922 <b>When:</b>
18923 Any resources that you frequently write and read on GPU,
18924 e.g. images used as color attachments (aka "render targets"), depth-stencil attachments,
18925 images/buffers used as storage image/buffer (aka "Unordered Access View (UAV)").
18926 
18927 <b>What to do:</b>
18928 Let the library select the optimal memory type, which will likely have `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`.
18929 
18930 \code
18931 VkImageCreateInfo imgCreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
18932 imgCreateInfo.imageType = VK_IMAGE_TYPE_2D;
18933 imgCreateInfo.extent.width = 3840;
18934 imgCreateInfo.extent.height = 2160;
18935 imgCreateInfo.extent.depth = 1;
18936 imgCreateInfo.mipLevels = 1;
18937 imgCreateInfo.arrayLayers = 1;
18938 imgCreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM;
18939 imgCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
18940 imgCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
18941 imgCreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
18942 imgCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
18943 
18944 VmaAllocationCreateInfo allocCreateInfo = {};
18945 allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
18946 allocCreateInfo.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
18947 allocCreateInfo.priority = 1.0f;
18948 
18949 VkImage img;
18950 VmaAllocation alloc;
18951 vmaCreateImage(allocator, &imgCreateInfo, &allocCreateInfo, &img, &alloc, nullptr);
18952 \endcode
18953 
18954 <b>Also consider:</b>
18955 Consider creating them as dedicated allocations using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT,
18956 especially if they are large or if you plan to destroy and recreate them with different sizes
18957 e.g. when display resolution changes.
18958 Prefer to create such resources first and all other GPU resources (like textures and vertex buffers) later.
18959 When VK_EXT_memory_priority extension is enabled, it is also worth setting high priority to such allocation
18960 to decrease chances to be evicted to system memory by the operating system.
18961 
18962 \section usage_patterns_staging_copy_upload Staging copy for upload
18963 
18964 <b>When:</b>
18965 A "staging" buffer than you want to map and fill from CPU code, then use as a source od transfer
18966 to some GPU resource.
18967 
18968 <b>What to do:</b>
18969 Use flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT.
18970 Let the library select the optimal memory type, which will always have `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`.
18971 
18972 \code
18973 VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
18974 bufCreateInfo.size = 65536;
18975 bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
18976 
18977 VmaAllocationCreateInfo allocCreateInfo = {};
18978 allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
18979 allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |
18980     VMA_ALLOCATION_CREATE_MAPPED_BIT;
18981 
18982 VkBuffer buf;
18983 VmaAllocation alloc;
18984 VmaAllocationInfo allocInfo;
18985 vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
18986 
18987 ...
18988 
18989 memcpy(allocInfo.pMappedData, myData, myDataSize);
18990 \endcode
18991 
18992 <b>Also consider:</b>
18993 You can map the allocation using vmaMapMemory() or you can create it as persistenly mapped
18994 using #VMA_ALLOCATION_CREATE_MAPPED_BIT, as in the example above.
18995 
18996 
18997 \section usage_patterns_readback Readback
18998 
18999 <b>When:</b>
19000 Buffers for data written by or transferred from the GPU that you want to read back on the CPU,
19001 e.g. results of some computations.
19002 
19003 <b>What to do:</b>
19004 Use flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT.
19005 Let the library select the optimal memory type, which will always have `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`
19006 and `VK_MEMORY_PROPERTY_HOST_CACHED_BIT`.
19007 
19008 \code
19009 VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
19010 bufCreateInfo.size = 65536;
19011 bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
19012 
19013 VmaAllocationCreateInfo allocCreateInfo = {};
19014 allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
19015 allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT |
19016     VMA_ALLOCATION_CREATE_MAPPED_BIT;
19017 
19018 VkBuffer buf;
19019 VmaAllocation alloc;
19020 VmaAllocationInfo allocInfo;
19021 vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
19022 
19023 ...
19024 
19025 const float* downloadedData = (const float*)allocInfo.pMappedData;
19026 \endcode
19027 
19028 
19029 \section usage_patterns_advanced_data_uploading Advanced data uploading
19030 
19031 For resources that you frequently write on CPU via mapped pointer and
19032 freqnently read on GPU e.g. as a uniform buffer (also called "dynamic"), multiple options are possible:
19033 
19034 -# Easiest solution is to have one copy of the resource in `HOST_VISIBLE` memory,
19035    even if it means system RAM (not `DEVICE_LOCAL`) on systems with a discrete graphics card,
19036    and make the device reach out to that resource directly.
19037    - Reads performed by the device will then go through PCI Express bus.
19038      The performace of this access may be limited, but it may be fine depending on the size
19039      of this resource (whether it is small enough to quickly end up in GPU cache) and the sparsity
19040      of access.
19041 -# On systems with unified memory (e.g. AMD APU or Intel integrated graphics, mobile chips),
19042    a memory type may be available that is both `HOST_VISIBLE` (available for mapping) and `DEVICE_LOCAL`
19043    (fast to access from the GPU). Then, it is likely the best choice for such type of resource.
19044 -# Systems with a discrete graphics card and separate video memory may or may not expose
19045    a memory type that is both `HOST_VISIBLE` and `DEVICE_LOCAL`, also known as Base Address Register (BAR).
19046    If they do, it represents a piece of VRAM (or entire VRAM, if ReBAR is enabled in the motherboard BIOS)
19047    that is available to CPU for mapping.
19048    - Writes performed by the host to that memory go through PCI Express bus.
19049      The performance of these writes may be limited, but it may be fine, especially on PCIe 4.0,
19050      as long as rules of using uncached and write-combined memory are followed - only sequential writes and no reads.
19051 -# Finally, you may need or prefer to create a separate copy of the resource in `DEVICE_LOCAL` memory,
19052    a separate "staging" copy in `HOST_VISIBLE` memory and perform an explicit transfer command between them.
19053 
19054 Thankfully, VMA offers an aid to create and use such resources in the the way optimal
19055 for the current Vulkan device. To help the library make the best choice,
19056 use flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT together with
19057 #VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT.
19058 It will then prefer a memory type that is both `DEVICE_LOCAL` and `HOST_VISIBLE` (integrated memory or BAR),
19059 but if no such memory type is available or allocation from it fails
19060 (PC graphics cards have only 256 MB of BAR by default, unless ReBAR is supported and enabled in BIOS),
19061 it will fall back to `DEVICE_LOCAL` memory for fast GPU access.
19062 It is then up to you to detect that the allocation ended up in a memory type that is not `HOST_VISIBLE`,
19063 so you need to create another "staging" allocation and perform explicit transfers.
19064 
19065 \code
19066 VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
19067 bufCreateInfo.size = 65536;
19068 bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
19069 
19070 VmaAllocationCreateInfo allocCreateInfo = {};
19071 allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
19072 allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |
19073     VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT |
19074     VMA_ALLOCATION_CREATE_MAPPED_BIT;
19075 
19076 VkBuffer buf;
19077 VmaAllocation alloc;
19078 VmaAllocationInfo allocInfo;
19079 vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
19080 
19081 VkMemoryPropertyFlags memPropFlags;
19082 vmaGetAllocationMemoryProperties(allocator, alloc, &memPropFlags);
19083 
19084 if(memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)
19085 {
19086     // Allocation ended up in a mappable memory and is already mapped - write to it directly.
19087 
19088     // [Executed in runtime]:
19089     memcpy(allocInfo.pMappedData, myData, myDataSize);
19090 }
19091 else
19092 {
19093     // Allocation ended up in a non-mappable memory - need to transfer.
19094     VkBufferCreateInfo stagingBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
19095     stagingBufCreateInfo.size = 65536;
19096     stagingBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
19097 
19098     VmaAllocationCreateInfo stagingAllocCreateInfo = {};
19099     stagingAllocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
19100     stagingAllocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |
19101         VMA_ALLOCATION_CREATE_MAPPED_BIT;
19102 
19103     VkBuffer stagingBuf;
19104     VmaAllocation stagingAlloc;
19105     VmaAllocationInfo stagingAllocInfo;
19106     vmaCreateBuffer(allocator, &stagingBufCreateInfo, &stagingAllocCreateInfo,
19107         &stagingBuf, &stagingAlloc, stagingAllocInfo);
19108 
19109     // [Executed in runtime]:
19110     memcpy(stagingAllocInfo.pMappedData, myData, myDataSize);
19111     //vkCmdPipelineBarrier: VK_ACCESS_HOST_WRITE_BIT --> VK_ACCESS_TRANSFER_READ_BIT
19112     VkBufferCopy bufCopy = {
19113         0, // srcOffset
19114         0, // dstOffset,
19115         myDataSize); // size
19116     vkCmdCopyBuffer(cmdBuf, stagingBuf, buf, 1, &bufCopy);
19117 }
19118 \endcode
19119 
19120 \section usage_patterns_other_use_cases Other use cases
19121 
19122 Here are some other, less obvious use cases and their recommended settings:
19123 
19124 - An image that is used only as transfer source and destination, but it should stay on the device,
19125   as it is used to temporarily store a copy of some texture, e.g. from the current to the next frame,
19126   for temporal antialiasing or other temporal effects.
19127   - Use `VkImageCreateInfo::usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT`
19128   - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO
19129 - An image that is used only as transfer source and destination, but it should be placed
19130   in the system RAM despite it doesn't need to be mapped, because it serves as a "swap" copy to evict
19131   least recently used textures from VRAM.
19132   - Use `VkImageCreateInfo::usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT`
19133   - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO_PREFER_HOST,
19134     as VMA needs a hint here to differentiate from the previous case.
19135 - A buffer that you want to map and write from the CPU, directly read from the GPU
19136   (e.g. as a uniform or vertex buffer), but you have a clear preference to place it in device or
19137   host memory due to its large size.
19138   - Use `VkBufferCreateInfo::usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT`
19139   - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE or #VMA_MEMORY_USAGE_AUTO_PREFER_HOST
19140   - Use VmaAllocationCreateInfo::flags = #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT
19141 
19142 
19143 \page configuration Configuration
19144 
19145 Please check "CONFIGURATION SECTION" in the code to find macros that you can define
19146 before each include of this file or change directly in this file to provide
19147 your own implementation of basic facilities like assert, `min()` and `max()` functions,
19148 mutex, atomic etc.
19149 The library uses its own implementation of containers by default, but you can switch to using
19150 STL containers instead.
19151 
19152 For example, define `VMA_ASSERT(expr)` before including the library to provide
19153 custom implementation of the assertion, compatible with your project.
19154 By default it is defined to standard C `assert(expr)` in `_DEBUG` configuration
19155 and empty otherwise.
19156 
19157 \section config_Vulkan_functions Pointers to Vulkan functions
19158 
19159 There are multiple ways to import pointers to Vulkan functions in the library.
19160 In the simplest case you don't need to do anything.
19161 If the compilation or linking of your program or the initialization of the #VmaAllocator
19162 doesn't work for you, you can try to reconfigure it.
19163 
19164 First, the allocator tries to fetch pointers to Vulkan functions linked statically,
19165 like this:
19166 
19167 \code
19168 m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
19169 \endcode
19170 
19171 If you want to disable this feature, set configuration macro: `#define VMA_STATIC_VULKAN_FUNCTIONS 0`.
19172 
19173 Second, you can provide the pointers yourself by setting member VmaAllocatorCreateInfo::pVulkanFunctions.
19174 You can fetch them e.g. using functions `vkGetInstanceProcAddr` and `vkGetDeviceProcAddr` or
19175 by using a helper library like [volk](https://github.com/zeux/volk).
19176 
19177 Third, VMA tries to fetch remaining pointers that are still null by calling
19178 `vkGetInstanceProcAddr` and `vkGetDeviceProcAddr` on its own.
19179 You need to only fill in VmaVulkanFunctions::vkGetInstanceProcAddr and VmaVulkanFunctions::vkGetDeviceProcAddr.
19180 Other pointers will be fetched automatically.
19181 If you want to disable this feature, set configuration macro: `#define VMA_DYNAMIC_VULKAN_FUNCTIONS 0`.
19182 
19183 Finally, all the function pointers required by the library (considering selected
19184 Vulkan version and enabled extensions) are checked with `VMA_ASSERT` if they are not null.
19185 
19186 
19187 \section custom_memory_allocator Custom host memory allocator
19188 
19189 If you use custom allocator for CPU memory rather than default operator `new`
19190 and `delete` from C++, you can make this library using your allocator as well
19191 by filling optional member VmaAllocatorCreateInfo::pAllocationCallbacks. These
19192 functions will be passed to Vulkan, as well as used by the library itself to
19193 make any CPU-side allocations.
19194 
19195 \section allocation_callbacks Device memory allocation callbacks
19196 
19197 The library makes calls to `vkAllocateMemory()` and `vkFreeMemory()` internally.
19198 You can setup callbacks to be informed about these calls, e.g. for the purpose
19199 of gathering some statistics. To do it, fill optional member
19200 VmaAllocatorCreateInfo::pDeviceMemoryCallbacks.
19201 
19202 \section heap_memory_limit Device heap memory limit
19203 
19204 When device memory of certain heap runs out of free space, new allocations may
19205 fail (returning error code) or they may succeed, silently pushing some existing_
19206 memory blocks from GPU VRAM to system RAM (which degrades performance). This
19207 behavior is implementation-dependent - it depends on GPU vendor and graphics
19208 driver.
19209 
19210 On AMD cards it can be controlled while creating Vulkan device object by using
19211 VK_AMD_memory_overallocation_behavior extension, if available.
19212 
19213 Alternatively, if you want to test how your program behaves with limited amount of Vulkan device
19214 memory available without switching your graphics card to one that really has
19215 smaller VRAM, you can use a feature of this library intended for this purpose.
19216 To do it, fill optional member VmaAllocatorCreateInfo::pHeapSizeLimit.
19217 
19218 
19219 
19220 \page vk_khr_dedicated_allocation VK_KHR_dedicated_allocation
19221 
19222 VK_KHR_dedicated_allocation is a Vulkan extension which can be used to improve
19223 performance on some GPUs. It augments Vulkan API with possibility to query
19224 driver whether it prefers particular buffer or image to have its own, dedicated
19225 allocation (separate `VkDeviceMemory` block) for better efficiency - to be able
19226 to do some internal optimizations. The extension is supported by this library.
19227 It will be used automatically when enabled.
19228 
19229 It has been promoted to core Vulkan 1.1, so if you use eligible Vulkan version
19230 and inform VMA about it by setting VmaAllocatorCreateInfo::vulkanApiVersion,
19231 you are all set.
19232 
19233 Otherwise, if you want to use it as an extension:
19234 
19235 1 . When creating Vulkan device, check if following 2 device extensions are
19236 supported (call `vkEnumerateDeviceExtensionProperties()`).
19237 If yes, enable them (fill `VkDeviceCreateInfo::ppEnabledExtensionNames`).
19238 
19239 - VK_KHR_get_memory_requirements2
19240 - VK_KHR_dedicated_allocation
19241 
19242 If you enabled these extensions:
19243 
19244 2 . Use #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag when creating
19245 your #VmaAllocator to inform the library that you enabled required extensions
19246 and you want the library to use them.
19247 
19248 \code
19249 allocatorInfo.flags |= VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT;
19250 
19251 vmaCreateAllocator(&allocatorInfo, &allocator);
19252 \endcode
19253 
19254 That is all. The extension will be automatically used whenever you create a
19255 buffer using vmaCreateBuffer() or image using vmaCreateImage().
19256 
19257 When using the extension together with Vulkan Validation Layer, you will receive
19258 warnings like this:
19259 
19260 _vkBindBufferMemory(): Binding memory to buffer 0x33 but vkGetBufferMemoryRequirements() has not been called on that buffer._
19261 
19262 It is OK, you should just ignore it. It happens because you use function
19263 `vkGetBufferMemoryRequirements2KHR()` instead of standard
19264 `vkGetBufferMemoryRequirements()`, while the validation layer seems to be
19265 unaware of it.
19266 
19267 To learn more about this extension, see:
19268 
19269 - [VK_KHR_dedicated_allocation in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/chap50.html#VK_KHR_dedicated_allocation)
19270 - [VK_KHR_dedicated_allocation unofficial manual](http://asawicki.info/articles/VK_KHR_dedicated_allocation.php5)
19271 
19272 
19273 
19274 \page vk_ext_memory_priority VK_EXT_memory_priority
19275 
19276 VK_EXT_memory_priority is a device extension that allows to pass additional "priority"
19277 value to Vulkan memory allocations that the implementation may use prefer certain
19278 buffers and images that are critical for performance to stay in device-local memory
19279 in cases when the memory is over-subscribed, while some others may be moved to the system memory.
19280 
19281 VMA offers convenient usage of this extension.
19282 If you enable it, you can pass "priority" parameter when creating allocations or custom pools
19283 and the library automatically passes the value to Vulkan using this extension.
19284 
19285 If you want to use this extension in connection with VMA, follow these steps:
19286 
19287 \section vk_ext_memory_priority_initialization Initialization
19288 
19289 1) Call `vkEnumerateDeviceExtensionProperties` for the physical device.
19290 Check if the extension is supported - if returned array of `VkExtensionProperties` contains "VK_EXT_memory_priority".
19291 
19292 2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`.
19293 Attach additional structure `VkPhysicalDeviceMemoryPriorityFeaturesEXT` to `VkPhysicalDeviceFeatures2::pNext` to be returned.
19294 Check if the device feature is really supported - check if `VkPhysicalDeviceMemoryPriorityFeaturesEXT::memoryPriority` is true.
19295 
19296 3) While creating device with `vkCreateDevice`, enable this extension - add "VK_EXT_memory_priority"
19297 to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`.
19298 
19299 4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`.
19300 Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`.
19301 Enable this device feature - attach additional structure `VkPhysicalDeviceMemoryPriorityFeaturesEXT` to
19302 `VkPhysicalDeviceFeatures2::pNext` chain and set its member `memoryPriority` to `VK_TRUE`.
19303 
19304 5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you
19305 have enabled this extension and feature - add #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT
19306 to VmaAllocatorCreateInfo::flags.
19307 
19308 \section vk_ext_memory_priority_usage Usage
19309 
19310 When using this extension, you should initialize following member:
19311 
19312 - VmaAllocationCreateInfo::priority when creating a dedicated allocation with #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
19313 - VmaPoolCreateInfo::priority when creating a custom pool.
19314 
19315 It should be a floating-point value between `0.0f` and `1.0f`, where recommended default is `0.5f`.
19316 Memory allocated with higher value can be treated by the Vulkan implementation as higher priority
19317 and so it can have lower chances of being pushed out to system memory, experiencing degraded performance.
19318 
19319 It might be a good idea to create performance-critical resources like color-attachment or depth-stencil images
19320 as dedicated and set high priority to them. For example:
19321 
19322 \code
19323 VkImageCreateInfo imgCreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
19324 imgCreateInfo.imageType = VK_IMAGE_TYPE_2D;
19325 imgCreateInfo.extent.width = 3840;
19326 imgCreateInfo.extent.height = 2160;
19327 imgCreateInfo.extent.depth = 1;
19328 imgCreateInfo.mipLevels = 1;
19329 imgCreateInfo.arrayLayers = 1;
19330 imgCreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM;
19331 imgCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
19332 imgCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
19333 imgCreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
19334 imgCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
19335 
19336 VmaAllocationCreateInfo allocCreateInfo = {};
19337 allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
19338 allocCreateInfo.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
19339 allocCreateInfo.priority = 1.0f;
19340 
19341 VkImage img;
19342 VmaAllocation alloc;
19343 vmaCreateImage(allocator, &imgCreateInfo, &allocCreateInfo, &img, &alloc, nullptr);
19344 \endcode
19345 
19346 `priority` member is ignored in the following situations:
19347 
19348 - Allocations created in custom pools: They inherit the priority, along with all other allocation parameters
19349   from the parametrs passed in #VmaPoolCreateInfo when the pool was created.
19350 - Allocations created in default pools: They inherit the priority from the parameters
19351   VMA used when creating default pools, which means `priority == 0.5f`.
19352 
19353 
19354 \page vk_amd_device_coherent_memory VK_AMD_device_coherent_memory
19355 
19356 VK_AMD_device_coherent_memory is a device extension that enables access to
19357 additional memory types with `VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD` and
19358 `VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` flag. It is useful mostly for
19359 allocation of buffers intended for writing "breadcrumb markers" in between passes
19360 or draw calls, which in turn are useful for debugging GPU crash/hang/TDR cases.
19361 
19362 When the extension is available but has not been enabled, Vulkan physical device
19363 still exposes those memory types, but their usage is forbidden. VMA automatically
19364 takes care of that - it returns `VK_ERROR_FEATURE_NOT_PRESENT` when an attempt
19365 to allocate memory of such type is made.
19366 
19367 If you want to use this extension in connection with VMA, follow these steps:
19368 
19369 \section vk_amd_device_coherent_memory_initialization Initialization
19370 
19371 1) Call `vkEnumerateDeviceExtensionProperties` for the physical device.
19372 Check if the extension is supported - if returned array of `VkExtensionProperties` contains "VK_AMD_device_coherent_memory".
19373 
19374 2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`.
19375 Attach additional structure `VkPhysicalDeviceCoherentMemoryFeaturesAMD` to `VkPhysicalDeviceFeatures2::pNext` to be returned.
19376 Check if the device feature is really supported - check if `VkPhysicalDeviceCoherentMemoryFeaturesAMD::deviceCoherentMemory` is true.
19377 
19378 3) While creating device with `vkCreateDevice`, enable this extension - add "VK_AMD_device_coherent_memory"
19379 to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`.
19380 
19381 4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`.
19382 Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`.
19383 Enable this device feature - attach additional structure `VkPhysicalDeviceCoherentMemoryFeaturesAMD` to
19384 `VkPhysicalDeviceFeatures2::pNext` and set its member `deviceCoherentMemory` to `VK_TRUE`.
19385 
19386 5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you
19387 have enabled this extension and feature - add #VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT
19388 to VmaAllocatorCreateInfo::flags.
19389 
19390 \section vk_amd_device_coherent_memory_usage Usage
19391 
19392 After following steps described above, you can create VMA allocations and custom pools
19393 out of the special `DEVICE_COHERENT` and `DEVICE_UNCACHED` memory types on eligible
19394 devices. There are multiple ways to do it, for example:
19395 
19396 - You can request or prefer to allocate out of such memory types by adding
19397   `VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` to VmaAllocationCreateInfo::requiredFlags
19398   or VmaAllocationCreateInfo::preferredFlags. Those flags can be freely mixed with
19399   other ways of \ref choosing_memory_type, like setting VmaAllocationCreateInfo::usage.
19400 - If you manually found memory type index to use for this purpose, force allocation
19401   from this specific index by setting VmaAllocationCreateInfo::memoryTypeBits `= 1u << index`.
19402 
19403 \section vk_amd_device_coherent_memory_more_information More information
19404 
19405 To learn more about this extension, see [VK_AMD_device_coherent_memory in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VK_AMD_device_coherent_memory.html)
19406 
19407 Example use of this extension can be found in the code of the sample and test suite
19408 accompanying this library.
19409 
19410 
19411 \page enabling_buffer_device_address Enabling buffer device address
19412 
19413 Device extension VK_KHR_buffer_device_address
19414 allow to fetch raw GPU pointer to a buffer and pass it for usage in a shader code.
19415 It has been promoted to core Vulkan 1.2.
19416 
19417 If you want to use this feature in connection with VMA, follow these steps:
19418 
19419 \section enabling_buffer_device_address_initialization Initialization
19420 
19421 1) (For Vulkan version < 1.2) Call `vkEnumerateDeviceExtensionProperties` for the physical device.
19422 Check if the extension is supported - if returned array of `VkExtensionProperties` contains
19423 "VK_KHR_buffer_device_address".
19424 
19425 2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`.
19426 Attach additional structure `VkPhysicalDeviceBufferDeviceAddressFeatures*` to `VkPhysicalDeviceFeatures2::pNext` to be returned.
19427 Check if the device feature is really supported - check if `VkPhysicalDeviceBufferDeviceAddressFeatures::bufferDeviceAddress` is true.
19428 
19429 3) (For Vulkan version < 1.2) While creating device with `vkCreateDevice`, enable this extension - add
19430 "VK_KHR_buffer_device_address" to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`.
19431 
19432 4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`.
19433 Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`.
19434 Enable this device feature - attach additional structure `VkPhysicalDeviceBufferDeviceAddressFeatures*` to
19435 `VkPhysicalDeviceFeatures2::pNext` and set its member `bufferDeviceAddress` to `VK_TRUE`.
19436 
19437 5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you
19438 have enabled this feature - add #VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT
19439 to VmaAllocatorCreateInfo::flags.
19440 
19441 \section enabling_buffer_device_address_usage Usage
19442 
19443 After following steps described above, you can create buffers with `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT*` using VMA.
19444 The library automatically adds `VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT*` to
19445 allocated memory blocks wherever it might be needed.
19446 
19447 Please note that the library supports only `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT*`.
19448 The second part of this functionality related to "capture and replay" is not supported,
19449 as it is intended for usage in debugging tools like RenderDoc, not in everyday Vulkan usage.
19450 
19451 \section enabling_buffer_device_address_more_information More information
19452 
19453 To learn more about this extension, see [VK_KHR_buffer_device_address in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/chap46.html#VK_KHR_buffer_device_address)
19454 
19455 Example use of this extension can be found in the code of the sample and test suite
19456 accompanying this library.
19457 
19458 \page general_considerations General considerations
19459 
19460 \section general_considerations_thread_safety Thread safety
19461 
19462 - The library has no global state, so separate #VmaAllocator objects can be used
19463   independently.
19464   There should be no need to create multiple such objects though - one per `VkDevice` is enough.
19465 - By default, all calls to functions that take #VmaAllocator as first parameter
19466   are safe to call from multiple threads simultaneously because they are
19467   synchronized internally when needed.
19468   This includes allocation and deallocation from default memory pool, as well as custom #VmaPool.
19469 - When the allocator is created with #VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT
19470   flag, calls to functions that take such #VmaAllocator object must be
19471   synchronized externally.
19472 - Access to a #VmaAllocation object must be externally synchronized. For example,
19473   you must not call vmaGetAllocationInfo() and vmaMapMemory() from different
19474   threads at the same time if you pass the same #VmaAllocation object to these
19475   functions.
19476 - #VmaVirtualBlock is not safe to be used from multiple threads simultaneously.
19477 
19478 \section general_considerations_versioning_and_compatibility Versioning and compatibility
19479 
19480 The library uses [**Semantic Versioning**](https://semver.org/),
19481 which means version numbers follow convention: Major.Minor.Patch (e.g. 2.3.0), where:
19482 
19483 - Incremented Patch version means a release is backward- and forward-compatible,
19484   introducing only some internal improvements, bug fixes, optimizations etc.
19485   or changes that are out of scope of the official API described in this documentation.
19486 - Incremented Minor version means a release is backward-compatible,
19487   so existing code that uses the library should continue to work, while some new
19488   symbols could have been added: new structures, functions, new values in existing
19489   enums and bit flags, new structure members, but not new function parameters.
19490 - Incrementing Major version means a release could break some backward compatibility.
19491 
19492 All changes between official releases are documented in file "CHANGELOG.md".
19493 
19494 \warning Backward compatiblity is considered on the level of C++ source code, not binary linkage.
19495 Adding new members to existing structures is treated as backward compatible if initializing
19496 the new members to binary zero results in the old behavior.
19497 You should always fully initialize all library structures to zeros and not rely on their
19498 exact binary size.
19499 
19500 \section general_considerations_validation_layer_warnings Validation layer warnings
19501 
19502 When using this library, you can meet following types of warnings issued by
19503 Vulkan validation layer. They don't necessarily indicate a bug, so you may need
19504 to just ignore them.
19505 
19506 - *vkBindBufferMemory(): Binding memory to buffer 0xeb8e4 but vkGetBufferMemoryRequirements() has not been called on that buffer.*
19507   - It happens when VK_KHR_dedicated_allocation extension is enabled.
19508     `vkGetBufferMemoryRequirements2KHR` function is used instead, while validation layer seems to be unaware of it.
19509 - *Mapping an image with layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL can result in undefined behavior if this memory is used by the device. Only GENERAL or PREINITIALIZED should be used.*
19510   - It happens when you map a buffer or image, because the library maps entire
19511     `VkDeviceMemory` block, where different types of images and buffers may end
19512     up together, especially on GPUs with unified memory like Intel.
19513 - *Non-linear image 0xebc91 is aliased with linear buffer 0xeb8e4 which may indicate a bug.*
19514   - It may happen when you use [defragmentation](@ref defragmentation).
19515 
19516 \section general_considerations_allocation_algorithm Allocation algorithm
19517 
19518 The library uses following algorithm for allocation, in order:
19519 
19520 -# Try to find free range of memory in existing blocks.
19521 -# If failed, try to create a new block of `VkDeviceMemory`, with preferred block size.
19522 -# If failed, try to create such block with size / 2, size / 4, size / 8.
19523 -# If failed, try to allocate separate `VkDeviceMemory` for this allocation,
19524    just like when you use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
19525 -# If failed, choose other memory type that meets the requirements specified in
19526    VmaAllocationCreateInfo and go to point 1.
19527 -# If failed, return `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
19528 
19529 \section general_considerations_features_not_supported Features not supported
19530 
19531 Features deliberately excluded from the scope of this library:
19532 
19533 -# **Data transfer.** Uploading (streaming) and downloading data of buffers and images
19534    between CPU and GPU memory and related synchronization is responsibility of the user.
19535    Defining some "texture" object that would automatically stream its data from a
19536    staging copy in CPU memory to GPU memory would rather be a feature of another,
19537    higher-level library implemented on top of VMA.
19538    VMA doesn't record any commands to a `VkCommandBuffer`. It just allocates memory.
19539 -# **Recreation of buffers and images.** Although the library has functions for
19540    buffer and image creation: vmaCreateBuffer(), vmaCreateImage(), you need to
19541    recreate these objects yourself after defragmentation. That is because the big
19542    structures `VkBufferCreateInfo`, `VkImageCreateInfo` are not stored in
19543    #VmaAllocation object.
19544 -# **Handling CPU memory allocation failures.** When dynamically creating small C++
19545    objects in CPU memory (not Vulkan memory), allocation failures are not checked
19546    and handled gracefully, because that would complicate code significantly and
19547    is usually not needed in desktop PC applications anyway.
19548    Success of an allocation is just checked with an assert.
19549 -# **Code free of any compiler warnings.** Maintaining the library to compile and
19550    work correctly on so many different platforms is hard enough. Being free of
19551    any warnings, on any version of any compiler, is simply not feasible.
19552    There are many preprocessor macros that make some variables unused, function parameters unreferenced,
19553    or conditional expressions constant in some configurations.
19554    The code of this library should not be bigger or more complicated just to silence these warnings.
19555    It is recommended to disable such warnings instead.
19556 -# This is a C++ library with C interface. **Bindings or ports to any other programming languages** are welcome as external projects but
19557    are not going to be included into this repository.
19558 */
19559