xref: /aosp_15_r20/external/angle/third_party/vulkan_memory_allocator/include/vk_mem_alloc.h (revision 8975f5c5ed3d1c378011245431ada316dfb6f244)
1 //
2 // Copyright (c) 2017-2022 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 /** \mainpage Vulkan Memory Allocator
27 
28 <b>Version 3.1.0-development</b>
29 
30 Copyright (c) 2017-2022 Advanced Micro Devices, Inc. All rights reserved. \n
31 License: MIT
32 
33 <b>API documentation divided into groups:</b> [Modules](modules.html)
34 
35 \section main_table_of_contents Table of contents
36 
37 - <b>User guide</b>
38   - \subpage quick_start
39     - [Project setup](@ref quick_start_project_setup)
40     - [Initialization](@ref quick_start_initialization)
41     - [Resource allocation](@ref quick_start_resource_allocation)
42   - \subpage choosing_memory_type
43     - [Usage](@ref choosing_memory_type_usage)
44     - [Required and preferred flags](@ref choosing_memory_type_required_preferred_flags)
45     - [Explicit memory types](@ref choosing_memory_type_explicit_memory_types)
46     - [Custom memory pools](@ref choosing_memory_type_custom_memory_pools)
47     - [Dedicated allocations](@ref choosing_memory_type_dedicated_allocations)
48   - \subpage memory_mapping
49     - [Mapping functions](@ref memory_mapping_mapping_functions)
50     - [Persistently mapped memory](@ref memory_mapping_persistently_mapped_memory)
51     - [Cache flush and invalidate](@ref memory_mapping_cache_control)
52   - \subpage staying_within_budget
53     - [Querying for budget](@ref staying_within_budget_querying_for_budget)
54     - [Controlling memory usage](@ref staying_within_budget_controlling_memory_usage)
55   - \subpage resource_aliasing
56   - \subpage custom_memory_pools
57     - [Choosing memory type index](@ref custom_memory_pools_MemTypeIndex)
58     - [Linear allocation algorithm](@ref linear_algorithm)
59       - [Free-at-once](@ref linear_algorithm_free_at_once)
60       - [Stack](@ref linear_algorithm_stack)
61       - [Double stack](@ref linear_algorithm_double_stack)
62       - [Ring buffer](@ref linear_algorithm_ring_buffer)
63   - \subpage defragmentation
64   - \subpage statistics
65     - [Numeric statistics](@ref statistics_numeric_statistics)
66     - [JSON dump](@ref statistics_json_dump)
67   - \subpage allocation_annotation
68     - [Allocation user data](@ref allocation_user_data)
69     - [Allocation names](@ref allocation_names)
70   - \subpage virtual_allocator
71   - \subpage debugging_memory_usage
72     - [Memory initialization](@ref debugging_memory_usage_initialization)
73     - [Margins](@ref debugging_memory_usage_margins)
74     - [Corruption detection](@ref debugging_memory_usage_corruption_detection)
75   - \subpage opengl_interop
76 - \subpage usage_patterns
77     - [GPU-only resource](@ref usage_patterns_gpu_only)
78     - [Staging copy for upload](@ref usage_patterns_staging_copy_upload)
79     - [Readback](@ref usage_patterns_readback)
80     - [Advanced data uploading](@ref usage_patterns_advanced_data_uploading)
81     - [Other use cases](@ref usage_patterns_other_use_cases)
82 - \subpage configuration
83   - [Pointers to Vulkan functions](@ref config_Vulkan_functions)
84   - [Custom host memory allocator](@ref custom_memory_allocator)
85   - [Device memory allocation callbacks](@ref allocation_callbacks)
86   - [Device heap memory limit](@ref heap_memory_limit)
87 - <b>Extension support</b>
88     - \subpage vk_khr_dedicated_allocation
89     - \subpage enabling_buffer_device_address
90     - \subpage vk_ext_memory_priority
91     - \subpage vk_amd_device_coherent_memory
92 - \subpage general_considerations
93   - [Thread safety](@ref general_considerations_thread_safety)
94   - [Versioning and compatibility](@ref general_considerations_versioning_and_compatibility)
95   - [Validation layer warnings](@ref general_considerations_validation_layer_warnings)
96   - [Allocation algorithm](@ref general_considerations_allocation_algorithm)
97   - [Features not supported](@ref general_considerations_features_not_supported)
98 
99 \section main_see_also See also
100 
101 - [**Product page on GPUOpen**](https://gpuopen.com/gaming-product/vulkan-memory-allocator/)
102 - [**Source repository on GitHub**](https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator)
103 
104 \defgroup group_init Library initialization
105 
106 \brief API elements related to the initialization and management of the entire library, especially #VmaAllocator object.
107 
108 \defgroup group_alloc Memory allocation
109 
110 \brief API elements related to the allocation, deallocation, and management of Vulkan memory, buffers, images.
111 Most basic ones being: vmaCreateBuffer(), vmaCreateImage().
112 
113 \defgroup group_virtual Virtual allocator
114 
115 \brief API elements related to the mechanism of \ref virtual_allocator - using the core allocation algorithm
116 for user-defined purpose without allocating any real GPU memory.
117 
118 \defgroup group_stats Statistics
119 
120 \brief API elements that query current status of the allocator, from memory usage, budget, to full dump of the internal state in JSON format.
121 See documentation chapter: \ref statistics.
122 */
123 
124 
125 #ifdef __cplusplus
126 extern "C" {
127 #endif
128 
129 #include <vulkan/vulkan.h>
130 
131 #if !defined(VMA_VULKAN_VERSION)
132     #if defined(VK_VERSION_1_3)
133         #define VMA_VULKAN_VERSION 1003000
134     #elif defined(VK_VERSION_1_2)
135         #define VMA_VULKAN_VERSION 1002000
136     #elif defined(VK_VERSION_1_1)
137         #define VMA_VULKAN_VERSION 1001000
138     #else
139         #define VMA_VULKAN_VERSION 1000000
140     #endif
141 #endif
142 
143 #if defined(__ANDROID__) && defined(VK_NO_PROTOTYPES) && VMA_STATIC_VULKAN_FUNCTIONS
144     extern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr;
145     extern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr;
146     extern PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
147     extern PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
148     extern PFN_vkAllocateMemory vkAllocateMemory;
149     extern PFN_vkFreeMemory vkFreeMemory;
150     extern PFN_vkMapMemory vkMapMemory;
151     extern PFN_vkUnmapMemory vkUnmapMemory;
152     extern PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
153     extern PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
154     extern PFN_vkBindBufferMemory vkBindBufferMemory;
155     extern PFN_vkBindImageMemory vkBindImageMemory;
156     extern PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
157     extern PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
158     extern PFN_vkCreateBuffer vkCreateBuffer;
159     extern PFN_vkDestroyBuffer vkDestroyBuffer;
160     extern PFN_vkCreateImage vkCreateImage;
161     extern PFN_vkDestroyImage vkDestroyImage;
162     extern PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
163     #if VMA_VULKAN_VERSION >= 1001000
164         extern PFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2;
165         extern PFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2;
166         extern PFN_vkBindBufferMemory2 vkBindBufferMemory2;
167         extern PFN_vkBindImageMemory2 vkBindImageMemory2;
168         extern PFN_vkGetPhysicalDeviceMemoryProperties2 vkGetPhysicalDeviceMemoryProperties2;
169     #endif // #if VMA_VULKAN_VERSION >= 1001000
170 #endif // #if defined(__ANDROID__) && VMA_STATIC_VULKAN_FUNCTIONS && VK_NO_PROTOTYPES
171 
172 #if !defined(VMA_DEDICATED_ALLOCATION)
173     #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
174         #define VMA_DEDICATED_ALLOCATION 1
175     #else
176         #define VMA_DEDICATED_ALLOCATION 0
177     #endif
178 #endif
179 
180 #if !defined(VMA_BIND_MEMORY2)
181     #if VK_KHR_bind_memory2
182         #define VMA_BIND_MEMORY2 1
183     #else
184         #define VMA_BIND_MEMORY2 0
185     #endif
186 #endif
187 
188 #if !defined(VMA_MEMORY_BUDGET)
189     #if VK_EXT_memory_budget && (VK_KHR_get_physical_device_properties2 || VMA_VULKAN_VERSION >= 1001000)
190         #define VMA_MEMORY_BUDGET 1
191     #else
192         #define VMA_MEMORY_BUDGET 0
193     #endif
194 #endif
195 
196 // Defined to 1 when VK_KHR_buffer_device_address device extension or equivalent core Vulkan 1.2 feature is defined in its headers.
197 #if !defined(VMA_BUFFER_DEVICE_ADDRESS)
198     #if VK_KHR_buffer_device_address || VMA_VULKAN_VERSION >= 1002000
199         #define VMA_BUFFER_DEVICE_ADDRESS 1
200     #else
201         #define VMA_BUFFER_DEVICE_ADDRESS 0
202     #endif
203 #endif
204 
205 // Defined to 1 when VK_EXT_memory_priority device extension is defined in Vulkan headers.
206 #if !defined(VMA_MEMORY_PRIORITY)
207     #if VK_EXT_memory_priority
208         #define VMA_MEMORY_PRIORITY 1
209     #else
210         #define VMA_MEMORY_PRIORITY 0
211     #endif
212 #endif
213 
214 // Defined to 1 when VK_KHR_external_memory device extension is defined in Vulkan headers.
215 #if !defined(VMA_EXTERNAL_MEMORY)
216     #if VK_KHR_external_memory
217         #define VMA_EXTERNAL_MEMORY 1
218     #else
219         #define VMA_EXTERNAL_MEMORY 0
220     #endif
221 #endif
222 
223 // Define these macros to decorate all public functions with additional code,
224 // before and after returned type, appropriately. This may be useful for
225 // exporting the functions when compiling VMA as a separate library. Example:
226 // #define VMA_CALL_PRE  __declspec(dllexport)
227 // #define VMA_CALL_POST __cdecl
228 #ifndef VMA_CALL_PRE
229     #define VMA_CALL_PRE
230 #endif
231 #ifndef VMA_CALL_POST
232     #define VMA_CALL_POST
233 #endif
234 
235 // Define this macro to decorate pNext pointers with an attribute specifying the Vulkan
236 // structure that will be extended via the pNext chain.
237 #ifndef VMA_EXTENDS_VK_STRUCT
238     #define VMA_EXTENDS_VK_STRUCT(vkStruct)
239 #endif
240 
241 // Define this macro to decorate pointers with an attribute specifying the
242 // length of the array they point to if they are not null.
243 //
244 // The length may be one of
245 // - The name of another parameter in the argument list where the pointer is declared
246 // - The name of another member in the struct where the pointer is declared
247 // - The name of a member of a struct type, meaning the value of that member in
248 //   the context of the call. For example
249 //   VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount"),
250 //   this means the number of memory heaps available in the device associated
251 //   with the VmaAllocator being dealt with.
252 #ifndef VMA_LEN_IF_NOT_NULL
253     #define VMA_LEN_IF_NOT_NULL(len)
254 #endif
255 
256 // The VMA_NULLABLE macro is defined to be _Nullable when compiling with Clang.
257 // see: https://clang.llvm.org/docs/AttributeReference.html#nullable
258 #ifndef VMA_NULLABLE
259     #ifdef __clang__
260         #define VMA_NULLABLE _Nullable
261     #else
262         #define VMA_NULLABLE
263     #endif
264 #endif
265 
266 // The VMA_NOT_NULL macro is defined to be _Nonnull when compiling with Clang.
267 // see: https://clang.llvm.org/docs/AttributeReference.html#nonnull
268 #ifndef VMA_NOT_NULL
269     #ifdef __clang__
270         #define VMA_NOT_NULL _Nonnull
271     #else
272         #define VMA_NOT_NULL
273     #endif
274 #endif
275 
276 // If non-dispatchable handles are represented as pointers then we can give
277 // then nullability annotations
278 #ifndef VMA_NOT_NULL_NON_DISPATCHABLE
279     #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
280         #define VMA_NOT_NULL_NON_DISPATCHABLE VMA_NOT_NULL
281     #else
282         #define VMA_NOT_NULL_NON_DISPATCHABLE
283     #endif
284 #endif
285 
286 #ifndef VMA_NULLABLE_NON_DISPATCHABLE
287     #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
288         #define VMA_NULLABLE_NON_DISPATCHABLE VMA_NULLABLE
289     #else
290         #define VMA_NULLABLE_NON_DISPATCHABLE
291     #endif
292 #endif
293 
294 #ifndef VMA_STATS_STRING_ENABLED
295     #define VMA_STATS_STRING_ENABLED 1
296 #endif
297 
298 ////////////////////////////////////////////////////////////////////////////////
299 ////////////////////////////////////////////////////////////////////////////////
300 //
301 //    INTERFACE
302 //
303 ////////////////////////////////////////////////////////////////////////////////
304 ////////////////////////////////////////////////////////////////////////////////
305 
306 // Sections for managing code placement in file, only for development purposes e.g. for convenient folding inside an IDE.
307 #ifndef _VMA_ENUM_DECLARATIONS
308 
309 /**
310 \addtogroup group_init
311 @{
312 */
313 
314 /// Flags for created #VmaAllocator.
315 typedef enum VmaAllocatorCreateFlagBits
316 {
317     /** \brief Allocator and all objects created from it will not be synchronized internally, so you must guarantee they are used from only one thread at a time or synchronized externally by you.
318 
319     Using this flag may increase performance because internal mutexes are not used.
320     */
321     VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT = 0x00000001,
322     /** \brief Enables usage of VK_KHR_dedicated_allocation extension.
323 
324     The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`.
325     When it is `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1.
326 
327     Using this extension will automatically allocate dedicated blocks of memory for
328     some buffers and images instead of suballocating place for them out of bigger
329     memory blocks (as if you explicitly used #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT
330     flag) when it is recommended by the driver. It may improve performance on some
331     GPUs.
332 
333     You may set this flag only if you found out that following device extensions are
334     supported, you enabled them while creating Vulkan device passed as
335     VmaAllocatorCreateInfo::device, and you want them to be used internally by this
336     library:
337 
338     - VK_KHR_get_memory_requirements2 (device extension)
339     - VK_KHR_dedicated_allocation (device extension)
340 
341     When this flag is set, you can experience following warnings reported by Vulkan
342     validation layer. You can ignore them.
343 
344     > vkBindBufferMemory(): Binding memory to buffer 0x2d but vkGetBufferMemoryRequirements() has not been called on that buffer.
345     */
346     VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT = 0x00000002,
347     /**
348     Enables usage of VK_KHR_bind_memory2 extension.
349 
350     The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`.
351     When it is `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1.
352 
353     You may set this flag only if you found out that this device extension is supported,
354     you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device,
355     and you want it to be used internally by this library.
356 
357     The extension provides functions `vkBindBufferMemory2KHR` and `vkBindImageMemory2KHR`,
358     which allow to pass a chain of `pNext` structures while binding.
359     This flag is required if you use `pNext` parameter in vmaBindBufferMemory2() or vmaBindImageMemory2().
360     */
361     VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT = 0x00000004,
362     /**
363     Enables usage of VK_EXT_memory_budget extension.
364 
365     You may set this flag only if you found out that this device extension is supported,
366     you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device,
367     and you want it to be used internally by this library, along with another instance extension
368     VK_KHR_get_physical_device_properties2, which is required by it (or Vulkan 1.1, where this extension is promoted).
369 
370     The extension provides query for current memory usage and budget, which will probably
371     be more accurate than an estimation used by the library otherwise.
372     */
373     VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT = 0x00000008,
374     /**
375     Enables usage of VK_AMD_device_coherent_memory extension.
376 
377     You may set this flag only if you:
378 
379     - found out that this device extension is supported and enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device,
380     - checked that `VkPhysicalDeviceCoherentMemoryFeaturesAMD::deviceCoherentMemory` is true and set it while creating the Vulkan device,
381     - want it to be used internally by this library.
382 
383     The extension and accompanying device feature provide access to memory types with
384     `VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD` and `VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` flags.
385     They are useful mostly for writing breadcrumb markers - a common method for debugging GPU crash/hang/TDR.
386 
387     When the extension is not enabled, such memory types are still enumerated, but their usage is illegal.
388     To protect from this error, if you don't create the allocator with this flag, it will refuse to allocate any memory or create a custom pool in such memory type,
389     returning `VK_ERROR_FEATURE_NOT_PRESENT`.
390     */
391     VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT = 0x00000010,
392     /**
393     Enables usage of "buffer device address" feature, which allows you to use function
394     `vkGetBufferDeviceAddress*` to get raw GPU pointer to a buffer and pass it for usage inside a shader.
395 
396     You may set this flag only if you:
397 
398     1. (For Vulkan version < 1.2) Found as available and enabled device extension
399     VK_KHR_buffer_device_address.
400     This extension is promoted to core Vulkan 1.2.
401     2. Found as available and enabled device feature `VkPhysicalDeviceBufferDeviceAddressFeatures::bufferDeviceAddress`.
402 
403     When this flag is set, you can create buffers with `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT` using VMA.
404     The library automatically adds `VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT` to
405     allocated memory blocks wherever it might be needed.
406 
407     For more information, see documentation chapter \ref enabling_buffer_device_address.
408     */
409     VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT = 0x00000020,
410     /**
411     Enables usage of VK_EXT_memory_priority extension in the library.
412 
413     You may set this flag only if you found available and enabled this device extension,
414     along with `VkPhysicalDeviceMemoryPriorityFeaturesEXT::memoryPriority == VK_TRUE`,
415     while creating Vulkan device passed as VmaAllocatorCreateInfo::device.
416 
417     When this flag is used, VmaAllocationCreateInfo::priority and VmaPoolCreateInfo::priority
418     are used to set priorities of allocated Vulkan memory. Without it, these variables are ignored.
419 
420     A priority must be a floating-point value between 0 and 1, indicating the priority of the allocation relative to other memory allocations.
421     Larger values are higher priority. The granularity of the priorities is implementation-dependent.
422     It is automatically passed to every call to `vkAllocateMemory` done by the library using structure `VkMemoryPriorityAllocateInfoEXT`.
423     The value to be used for default priority is 0.5.
424     For more details, see the documentation of the VK_EXT_memory_priority extension.
425     */
426     VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT = 0x00000040,
427 
428     VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
429 } VmaAllocatorCreateFlagBits;
430 /// See #VmaAllocatorCreateFlagBits.
431 typedef VkFlags VmaAllocatorCreateFlags;
432 
433 /** @} */
434 
435 /**
436 \addtogroup group_alloc
437 @{
438 */
439 
440 /// \brief Intended usage of the allocated memory.
441 typedef enum VmaMemoryUsage
442 {
443     /** No intended memory usage specified.
444     Use other members of VmaAllocationCreateInfo to specify your requirements.
445     */
446     VMA_MEMORY_USAGE_UNKNOWN = 0,
447     /**
448     \deprecated Obsolete, preserved for backward compatibility.
449     Prefers `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`.
450     */
451     VMA_MEMORY_USAGE_GPU_ONLY = 1,
452     /**
453     \deprecated Obsolete, preserved for backward compatibility.
454     Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` and `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT`.
455     */
456     VMA_MEMORY_USAGE_CPU_ONLY = 2,
457     /**
458     \deprecated Obsolete, preserved for backward compatibility.
459     Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`, prefers `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`.
460     */
461     VMA_MEMORY_USAGE_CPU_TO_GPU = 3,
462     /**
463     \deprecated Obsolete, preserved for backward compatibility.
464     Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`, prefers `VK_MEMORY_PROPERTY_HOST_CACHED_BIT`.
465     */
466     VMA_MEMORY_USAGE_GPU_TO_CPU = 4,
467     /**
468     \deprecated Obsolete, preserved for backward compatibility.
469     Prefers not `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`.
470     */
471     VMA_MEMORY_USAGE_CPU_COPY = 5,
472     /**
473     Lazily allocated GPU memory having `VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT`.
474     Exists mostly on mobile platforms. Using it on desktop PC or other GPUs with no such memory type present will fail the allocation.
475 
476     Usage: Memory for transient attachment images (color attachments, depth attachments etc.), created with `VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT`.
477 
478     Allocations with this usage are always created as dedicated - it implies #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
479     */
480     VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED = 6,
481     /**
482     Selects best memory type automatically.
483     This flag is recommended for most common use cases.
484 
485     When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT),
486     you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT
487     in VmaAllocationCreateInfo::flags.
488 
489     It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g.
490     vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo()
491     and not with generic memory allocation functions.
492     */
493     VMA_MEMORY_USAGE_AUTO = 7,
494     /**
495     Selects best memory type automatically with preference for GPU (device) memory.
496 
497     When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT),
498     you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT
499     in VmaAllocationCreateInfo::flags.
500 
501     It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g.
502     vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo()
503     and not with generic memory allocation functions.
504     */
505     VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE = 8,
506     /**
507     Selects best memory type automatically with preference for CPU (host) memory.
508 
509     When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT),
510     you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT
511     in VmaAllocationCreateInfo::flags.
512 
513     It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g.
514     vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo()
515     and not with generic memory allocation functions.
516     */
517     VMA_MEMORY_USAGE_AUTO_PREFER_HOST = 9,
518 
519     VMA_MEMORY_USAGE_MAX_ENUM = 0x7FFFFFFF
520 } VmaMemoryUsage;
521 
522 /// Flags to be passed as VmaAllocationCreateInfo::flags.
523 typedef enum VmaAllocationCreateFlagBits
524 {
525     /** \brief Set this flag if the allocation should have its own memory block.
526 
527     Use it for special, big resources, like fullscreen images used as attachments.
528     */
529     VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT = 0x00000001,
530 
531     /** \brief Set this flag to only try to allocate from existing `VkDeviceMemory` blocks and never create new such block.
532 
533     If new allocation cannot be placed in any of the existing blocks, allocation
534     fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY` error.
535 
536     You should not use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT and
537     #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT at the same time. It makes no sense.
538     */
539     VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT = 0x00000002,
540     /** \brief Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
541 
542     Pointer to mapped memory will be returned through VmaAllocationInfo::pMappedData.
543 
544     It is valid to use this flag for allocation made from memory type that is not
545     `HOST_VISIBLE`. This flag is then ignored and memory is not mapped. This is
546     useful if you need an allocation that is efficient to use on GPU
547     (`DEVICE_LOCAL`) and still want to map it directly if possible on platforms that
548     support it (e.g. Intel GPU).
549     */
550     VMA_ALLOCATION_CREATE_MAPPED_BIT = 0x00000004,
551     /** \deprecated Preserved for backward compatibility. Consider using vmaSetAllocationName() instead.
552 
553     Set this flag to treat VmaAllocationCreateInfo::pUserData as pointer to a
554     null-terminated string. Instead of copying pointer value, a local copy of the
555     string is made and stored in allocation's `pName`. The string is automatically
556     freed together with the allocation. It is also used in vmaBuildStatsString().
557     */
558     VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT = 0x00000020,
559     /** Allocation will be created from upper stack in a double stack pool.
560 
561     This flag is only allowed for custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT flag.
562     */
563     VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT = 0x00000040,
564     /** Create both buffer/image and allocation, but don't bind them together.
565     It is useful when you want to bind yourself to do some more advanced binding, e.g. using some extensions.
566     The flag is meaningful only with functions that bind by default: vmaCreateBuffer(), vmaCreateImage().
567     Otherwise it is ignored.
568 
569     If you want to make sure the new buffer/image is not tied to the new memory allocation
570     through `VkMemoryDedicatedAllocateInfoKHR` structure in case the allocation ends up in its own memory block,
571     use also flag #VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT.
572     */
573     VMA_ALLOCATION_CREATE_DONT_BIND_BIT = 0x00000080,
574     /** Create allocation only if additional device memory required for it, if any, won't exceed
575     memory budget. Otherwise return `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
576     */
577     VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT = 0x00000100,
578     /** \brief Set this flag if the allocated memory will have aliasing resources.
579 
580     Usage of this flag prevents supplying `VkMemoryDedicatedAllocateInfoKHR` when #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT is specified.
581     Otherwise created dedicated memory will not be suitable for aliasing resources, resulting in Vulkan Validation Layer errors.
582     */
583     VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT = 0x00000200,
584     /**
585     Requests possibility to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT).
586 
587     - If you use #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` value,
588       you must use this flag to be able to map the allocation. Otherwise, mapping is incorrect.
589     - If you use other value of #VmaMemoryUsage, this flag is ignored and mapping is always possible in memory types that are `HOST_VISIBLE`.
590       This includes allocations created in \ref custom_memory_pools.
591 
592     Declares that mapped memory will only be written sequentially, e.g. using `memcpy()` or a loop writing number-by-number,
593     never read or accessed randomly, so a memory type can be selected that is uncached and write-combined.
594 
595     \warning Violating this declaration may work correctly, but will likely be very slow.
596     Watch out for implicit reads introduced by doing e.g. `pMappedData[i] += x;`
597     Better prepare your data in a local variable and `memcpy()` it to the mapped pointer all at once.
598     */
599     VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT = 0x00000400,
600     /**
601     Requests possibility to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT).
602 
603     - If you use #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` value,
604       you must use this flag to be able to map the allocation. Otherwise, mapping is incorrect.
605     - If you use other value of #VmaMemoryUsage, this flag is ignored and mapping is always possible in memory types that are `HOST_VISIBLE`.
606       This includes allocations created in \ref custom_memory_pools.
607 
608     Declares that mapped memory can be read, written, and accessed in random order,
609     so a `HOST_CACHED` memory type is required.
610     */
611     VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT = 0x00000800,
612     /**
613     Together with #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT,
614     it says that despite request for host access, a not-`HOST_VISIBLE` memory type can be selected
615     if it may improve performance.
616 
617     By using this flag, you declare that you will check if the allocation ended up in a `HOST_VISIBLE` memory type
618     (e.g. using vmaGetAllocationMemoryProperties()) and if not, you will create some "staging" buffer and
619     issue an explicit transfer to write/read your data.
620     To prepare for this possibility, don't forget to add appropriate flags like
621     `VK_BUFFER_USAGE_TRANSFER_DST_BIT`, `VK_BUFFER_USAGE_TRANSFER_SRC_BIT` to the parameters of created buffer or image.
622     */
623     VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT = 0x00001000,
624     /** Allocation strategy that chooses smallest possible free range for the allocation
625     to minimize memory usage and fragmentation, possibly at the expense of allocation time.
626     */
627     VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT = 0x00010000,
628     /** Allocation strategy that chooses first suitable free range for the allocation -
629     not necessarily in terms of the smallest offset but the one that is easiest and fastest to find
630     to minimize allocation time, possibly at the expense of allocation quality.
631     */
632     VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT = 0x00020000,
633     /** Allocation strategy that chooses always the lowest offset in available space.
634     This is not the most efficient strategy but achieves highly packed data.
635     Used internally by defragmentation, not recommended in typical usage.
636     */
637     VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT  = 0x00040000,
638     /** Alias to #VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT.
639     */
640     VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT,
641     /** Alias to #VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT.
642     */
643     VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT,
644     /** A bit mask to extract only `STRATEGY` bits from entire set of flags.
645     */
646     VMA_ALLOCATION_CREATE_STRATEGY_MASK =
647         VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT |
648         VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT |
649         VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT,
650 
651     VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
652 } VmaAllocationCreateFlagBits;
653 /// See #VmaAllocationCreateFlagBits.
654 typedef VkFlags VmaAllocationCreateFlags;
655 
656 /// Flags to be passed as VmaPoolCreateInfo::flags.
657 typedef enum VmaPoolCreateFlagBits
658 {
659     /** \brief Use this flag if you always allocate only buffers and linear images or only optimal images out of this pool and so Buffer-Image Granularity can be ignored.
660 
661     This is an optional optimization flag.
662 
663     If you always allocate using vmaCreateBuffer(), vmaCreateImage(),
664     vmaAllocateMemoryForBuffer(), then you don't need to use it because allocator
665     knows exact type of your allocations so it can handle Buffer-Image Granularity
666     in the optimal way.
667 
668     If you also allocate using vmaAllocateMemoryForImage() or vmaAllocateMemory(),
669     exact type of such allocations is not known, so allocator must be conservative
670     in handling Buffer-Image Granularity, which can lead to suboptimal allocation
671     (wasted memory). In that case, if you can make sure you always allocate only
672     buffers and linear images or only optimal images out of this pool, use this flag
673     to make allocator disregard Buffer-Image Granularity and so make allocations
674     faster and more optimal.
675     */
676     VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT = 0x00000002,
677 
678     /** \brief Enables alternative, linear allocation algorithm in this pool.
679 
680     Specify this flag to enable linear allocation algorithm, which always creates
681     new allocations after last one and doesn't reuse space from allocations freed in
682     between. It trades memory consumption for simplified algorithm and data
683     structure, which has better performance and uses less memory for metadata.
684 
685     By using this flag, you can achieve behavior of free-at-once, stack,
686     ring buffer, and double stack.
687     For details, see documentation chapter \ref linear_algorithm.
688     */
689     VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT = 0x00000004,
690 
691     /** Bit mask to extract only `ALGORITHM` bits from entire set of flags.
692     */
693     VMA_POOL_CREATE_ALGORITHM_MASK =
694         VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT,
695 
696     VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
697 } VmaPoolCreateFlagBits;
698 /// Flags to be passed as VmaPoolCreateInfo::flags. See #VmaPoolCreateFlagBits.
699 typedef VkFlags VmaPoolCreateFlags;
700 
701 /// Flags to be passed as VmaDefragmentationInfo::flags.
702 typedef enum VmaDefragmentationFlagBits
703 {
704     /* \brief Use simple but fast algorithm for defragmentation.
705     May not achieve best results but will require least time to compute and least allocations to copy.
706     */
707     VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT = 0x1,
708     /* \brief Default defragmentation algorithm, applied also when no `ALGORITHM` flag is specified.
709     Offers a balance between defragmentation quality and the amount of allocations and bytes that need to be moved.
710     */
711     VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT = 0x2,
712     /* \brief Perform full defragmentation of memory.
713     Can result in notably more time to compute and allocations to copy, but will achieve best memory packing.
714     */
715     VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT = 0x4,
716     /** \brief Use the most roboust algorithm at the cost of time to compute and number of copies to make.
717     Only available when bufferImageGranularity is greater than 1, since it aims to reduce
718     alignment issues between different types of resources.
719     Otherwise falls back to same behavior as #VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT.
720     */
721     VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT = 0x8,
722 
723     /// A bit mask to extract only `ALGORITHM` bits from entire set of flags.
724     VMA_DEFRAGMENTATION_FLAG_ALGORITHM_MASK =
725         VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT |
726         VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT |
727         VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT |
728         VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT,
729 
730     VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
731 } VmaDefragmentationFlagBits;
732 /// See #VmaDefragmentationFlagBits.
733 typedef VkFlags VmaDefragmentationFlags;
734 
735 /// Operation performed on single defragmentation move. See structure #VmaDefragmentationMove.
736 typedef enum VmaDefragmentationMoveOperation
737 {
738     /// Buffer/image has been recreated at `dstTmpAllocation`, data has been copied, old buffer/image has been destroyed. `srcAllocation` should be changed to point to the new place. This is the default value set by vmaBeginDefragmentationPass().
739     VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY = 0,
740     /// Set this value if you cannot move the allocation. New place reserved at `dstTmpAllocation` will be freed. `srcAllocation` will remain unchanged.
741     VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE = 1,
742     /// Set this value if you decide to abandon the allocation and you destroyed the buffer/image. New place reserved at `dstTmpAllocation` will be freed, along with `srcAllocation`, which will be destroyed.
743     VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY = 2,
744 } VmaDefragmentationMoveOperation;
745 
746 /** @} */
747 
748 /**
749 \addtogroup group_virtual
750 @{
751 */
752 
753 /// Flags to be passed as VmaVirtualBlockCreateInfo::flags.
754 typedef enum VmaVirtualBlockCreateFlagBits
755 {
756     /** \brief Enables alternative, linear allocation algorithm in this virtual block.
757 
758     Specify this flag to enable linear allocation algorithm, which always creates
759     new allocations after last one and doesn't reuse space from allocations freed in
760     between. It trades memory consumption for simplified algorithm and data
761     structure, which has better performance and uses less memory for metadata.
762 
763     By using this flag, you can achieve behavior of free-at-once, stack,
764     ring buffer, and double stack.
765     For details, see documentation chapter \ref linear_algorithm.
766     */
767     VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT = 0x00000001,
768 
769     /** \brief Bit mask to extract only `ALGORITHM` bits from entire set of flags.
770     */
771     VMA_VIRTUAL_BLOCK_CREATE_ALGORITHM_MASK =
772         VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT,
773 
774     VMA_VIRTUAL_BLOCK_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
775 } VmaVirtualBlockCreateFlagBits;
776 /// Flags to be passed as VmaVirtualBlockCreateInfo::flags. See #VmaVirtualBlockCreateFlagBits.
777 typedef VkFlags VmaVirtualBlockCreateFlags;
778 
779 /// Flags to be passed as VmaVirtualAllocationCreateInfo::flags.
780 typedef enum VmaVirtualAllocationCreateFlagBits
781 {
782     /** \brief Allocation will be created from upper stack in a double stack pool.
783 
784     This flag is only allowed for virtual blocks created with #VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT flag.
785     */
786     VMA_VIRTUAL_ALLOCATION_CREATE_UPPER_ADDRESS_BIT = VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT,
787     /** \brief Allocation strategy that tries to minimize memory usage.
788     */
789     VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT,
790     /** \brief Allocation strategy that tries to minimize allocation time.
791     */
792     VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT,
793     /** Allocation strategy that chooses always the lowest offset in available space.
794     This is not the most efficient strategy but achieves highly packed data.
795     */
796     VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT,
797     /** \brief A bit mask to extract only `STRATEGY` bits from entire set of flags.
798 
799     These strategy flags are binary compatible with equivalent flags in #VmaAllocationCreateFlagBits.
800     */
801     VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MASK = VMA_ALLOCATION_CREATE_STRATEGY_MASK,
802 
803     VMA_VIRTUAL_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
804 } VmaVirtualAllocationCreateFlagBits;
805 /// Flags to be passed as VmaVirtualAllocationCreateInfo::flags. See #VmaVirtualAllocationCreateFlagBits.
806 typedef VkFlags VmaVirtualAllocationCreateFlags;
807 
808 /** @} */
809 
810 #endif // _VMA_ENUM_DECLARATIONS
811 
812 #ifndef _VMA_DATA_TYPES_DECLARATIONS
813 
814 /**
815 \addtogroup group_init
816 @{ */
817 
818 /** \struct VmaAllocator
819 \brief Represents main object of this library initialized.
820 
821 Fill structure #VmaAllocatorCreateInfo and call function vmaCreateAllocator() to create it.
822 Call function vmaDestroyAllocator() to destroy it.
823 
824 It is recommended to create just one object of this type per `VkDevice` object,
825 right after Vulkan is initialized and keep it alive until before Vulkan device is destroyed.
826 */
827 VK_DEFINE_HANDLE(VmaAllocator)
828 
829 /** @} */
830 
831 /**
832 \addtogroup group_alloc
833 @{
834 */
835 
836 /** \struct VmaPool
837 \brief Represents custom memory pool
838 
839 Fill structure VmaPoolCreateInfo and call function vmaCreatePool() to create it.
840 Call function vmaDestroyPool() to destroy it.
841 
842 For more information see [Custom memory pools](@ref choosing_memory_type_custom_memory_pools).
843 */
844 VK_DEFINE_HANDLE(VmaPool)
845 
846 /** \struct VmaAllocation
847 \brief Represents single memory allocation.
848 
849 It may be either dedicated block of `VkDeviceMemory` or a specific region of a bigger block of this type
850 plus unique offset.
851 
852 There are multiple ways to create such object.
853 You need to fill structure VmaAllocationCreateInfo.
854 For more information see [Choosing memory type](@ref choosing_memory_type).
855 
856 Although the library provides convenience functions that create Vulkan buffer or image,
857 allocate memory for it and bind them together,
858 binding of the allocation to a buffer or an image is out of scope of the allocation itself.
859 Allocation object can exist without buffer/image bound,
860 binding can be done manually by the user, and destruction of it can be done
861 independently of destruction of the allocation.
862 
863 The object also remembers its size and some other information.
864 To retrieve this information, use function vmaGetAllocationInfo() and inspect
865 returned structure VmaAllocationInfo.
866 */
867 VK_DEFINE_HANDLE(VmaAllocation)
868 
869 /** \struct VmaDefragmentationContext
870 \brief An opaque object that represents started defragmentation process.
871 
872 Fill structure #VmaDefragmentationInfo and call function vmaBeginDefragmentation() to create it.
873 Call function vmaEndDefragmentation() to destroy it.
874 */
875 VK_DEFINE_HANDLE(VmaDefragmentationContext)
876 
877 /** @} */
878 
879 /**
880 \addtogroup group_virtual
881 @{
882 */
883 
884 /** \struct VmaVirtualAllocation
885 \brief Represents single memory allocation done inside VmaVirtualBlock.
886 
887 Use it as a unique identifier to virtual allocation within the single block.
888 
889 Use value `VK_NULL_HANDLE` to represent a null/invalid allocation.
890 */
891 VK_DEFINE_NON_DISPATCHABLE_HANDLE(VmaVirtualAllocation)
892 
893 /** @} */
894 
895 /**
896 \addtogroup group_virtual
897 @{
898 */
899 
900 /** \struct VmaVirtualBlock
901 \brief Handle to a virtual block object that allows to use core allocation algorithm without allocating any real GPU memory.
902 
903 Fill in #VmaVirtualBlockCreateInfo structure and use vmaCreateVirtualBlock() to create it. Use vmaDestroyVirtualBlock() to destroy it.
904 For more information, see documentation chapter \ref virtual_allocator.
905 
906 This object is not thread-safe - should not be used from multiple threads simultaneously, must be synchronized externally.
907 */
908 VK_DEFINE_HANDLE(VmaVirtualBlock)
909 
910 /** @} */
911 
912 /**
913 \addtogroup group_init
914 @{
915 */
916 
917 /// Callback function called after successful vkAllocateMemory.
918 typedef void (VKAPI_PTR* PFN_vmaAllocateDeviceMemoryFunction)(
919     VmaAllocator VMA_NOT_NULL                    allocator,
920     uint32_t                                     memoryType,
921     VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory,
922     VkDeviceSize                                 size,
923     void* VMA_NULLABLE                           pUserData);
924 
925 /// Callback function called before vkFreeMemory.
926 typedef void (VKAPI_PTR* PFN_vmaFreeDeviceMemoryFunction)(
927     VmaAllocator VMA_NOT_NULL                    allocator,
928     uint32_t                                     memoryType,
929     VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory,
930     VkDeviceSize                                 size,
931     void* VMA_NULLABLE                           pUserData);
932 
933 /** \brief Set of callbacks that the library will call for `vkAllocateMemory` and `vkFreeMemory`.
934 
935 Provided for informative purpose, e.g. to gather statistics about number of
936 allocations or total amount of memory allocated in Vulkan.
937 
938 Used in VmaAllocatorCreateInfo::pDeviceMemoryCallbacks.
939 */
940 typedef struct VmaDeviceMemoryCallbacks
941 {
942     /// Optional, can be null.
943     PFN_vmaAllocateDeviceMemoryFunction VMA_NULLABLE pfnAllocate;
944     /// Optional, can be null.
945     PFN_vmaFreeDeviceMemoryFunction VMA_NULLABLE pfnFree;
946     /// Optional, can be null.
947     void* VMA_NULLABLE pUserData;
948 } VmaDeviceMemoryCallbacks;
949 
950 /** \brief Pointers to some Vulkan functions - a subset used by the library.
951 
952 Used in VmaAllocatorCreateInfo::pVulkanFunctions.
953 */
954 typedef struct VmaVulkanFunctions
955 {
956     /// Required when using VMA_DYNAMIC_VULKAN_FUNCTIONS.
957     PFN_vkGetInstanceProcAddr VMA_NULLABLE vkGetInstanceProcAddr;
958     /// Required when using VMA_DYNAMIC_VULKAN_FUNCTIONS.
959     PFN_vkGetDeviceProcAddr VMA_NULLABLE vkGetDeviceProcAddr;
960     PFN_vkGetPhysicalDeviceProperties VMA_NULLABLE vkGetPhysicalDeviceProperties;
961     PFN_vkGetPhysicalDeviceMemoryProperties VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties;
962     PFN_vkAllocateMemory VMA_NULLABLE vkAllocateMemory;
963     PFN_vkFreeMemory VMA_NULLABLE vkFreeMemory;
964     PFN_vkMapMemory VMA_NULLABLE vkMapMemory;
965     PFN_vkUnmapMemory VMA_NULLABLE vkUnmapMemory;
966     PFN_vkFlushMappedMemoryRanges VMA_NULLABLE vkFlushMappedMemoryRanges;
967     PFN_vkInvalidateMappedMemoryRanges VMA_NULLABLE vkInvalidateMappedMemoryRanges;
968     PFN_vkBindBufferMemory VMA_NULLABLE vkBindBufferMemory;
969     PFN_vkBindImageMemory VMA_NULLABLE vkBindImageMemory;
970     PFN_vkGetBufferMemoryRequirements VMA_NULLABLE vkGetBufferMemoryRequirements;
971     PFN_vkGetImageMemoryRequirements VMA_NULLABLE vkGetImageMemoryRequirements;
972     PFN_vkCreateBuffer VMA_NULLABLE vkCreateBuffer;
973     PFN_vkDestroyBuffer VMA_NULLABLE vkDestroyBuffer;
974     PFN_vkCreateImage VMA_NULLABLE vkCreateImage;
975     PFN_vkDestroyImage VMA_NULLABLE vkDestroyImage;
976     PFN_vkCmdCopyBuffer VMA_NULLABLE vkCmdCopyBuffer;
977 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
978     /// Fetch "vkGetBufferMemoryRequirements2" on Vulkan >= 1.1, fetch "vkGetBufferMemoryRequirements2KHR" when using VK_KHR_dedicated_allocation extension.
979     PFN_vkGetBufferMemoryRequirements2KHR VMA_NULLABLE vkGetBufferMemoryRequirements2KHR;
980     /// Fetch "vkGetImageMemoryRequirements2" on Vulkan >= 1.1, fetch "vkGetImageMemoryRequirements2KHR" when using VK_KHR_dedicated_allocation extension.
981     PFN_vkGetImageMemoryRequirements2KHR VMA_NULLABLE vkGetImageMemoryRequirements2KHR;
982 #endif
983 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
984     /// Fetch "vkBindBufferMemory2" on Vulkan >= 1.1, fetch "vkBindBufferMemory2KHR" when using VK_KHR_bind_memory2 extension.
985     PFN_vkBindBufferMemory2KHR VMA_NULLABLE vkBindBufferMemory2KHR;
986     /// Fetch "vkBindImageMemory2" on Vulkan >= 1.1, fetch "vkBindImageMemory2KHR" when using VK_KHR_bind_memory2 extension.
987     PFN_vkBindImageMemory2KHR VMA_NULLABLE vkBindImageMemory2KHR;
988 #endif
989 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
990     PFN_vkGetPhysicalDeviceMemoryProperties2KHR VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties2KHR;
991 #endif
992 #if VMA_VULKAN_VERSION >= 1003000
993     /// Fetch from "vkGetDeviceBufferMemoryRequirements" on Vulkan >= 1.3, but you can also fetch it from "vkGetDeviceBufferMemoryRequirementsKHR" if you enabled extension VK_KHR_maintenance4.
994     PFN_vkGetDeviceBufferMemoryRequirements VMA_NULLABLE vkGetDeviceBufferMemoryRequirements;
995     /// Fetch from "vkGetDeviceImageMemoryRequirements" on Vulkan >= 1.3, but you can also fetch it from "vkGetDeviceImageMemoryRequirementsKHR" if you enabled extension VK_KHR_maintenance4.
996     PFN_vkGetDeviceImageMemoryRequirements VMA_NULLABLE vkGetDeviceImageMemoryRequirements;
997 #endif
998 } VmaVulkanFunctions;
999 
1000 /// Description of a Allocator to be created.
1001 typedef struct VmaAllocatorCreateInfo
1002 {
1003     /// Flags for created allocator. Use #VmaAllocatorCreateFlagBits enum.
1004     VmaAllocatorCreateFlags flags;
1005     /// Vulkan physical device.
1006     /** It must be valid throughout whole lifetime of created allocator. */
1007     VkPhysicalDevice VMA_NOT_NULL physicalDevice;
1008     /// Vulkan device.
1009     /** It must be valid throughout whole lifetime of created allocator. */
1010     VkDevice VMA_NOT_NULL device;
1011     /// Preferred size of a single `VkDeviceMemory` block to be allocated from large heaps > 1 GiB. Optional.
1012     /** Set to 0 to use default, which is currently 256 MiB. */
1013     VkDeviceSize preferredLargeHeapBlockSize;
1014     /// Custom CPU memory allocation callbacks. Optional.
1015     /** Optional, can be null. When specified, will also be used for all CPU-side memory allocations. */
1016     const VkAllocationCallbacks* VMA_NULLABLE pAllocationCallbacks;
1017     /// Informative callbacks for `vkAllocateMemory`, `vkFreeMemory`. Optional.
1018     /** Optional, can be null. */
1019     const VmaDeviceMemoryCallbacks* VMA_NULLABLE pDeviceMemoryCallbacks;
1020     /** \brief Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out of particular Vulkan memory heap.
1021 
1022     If not NULL, it must be a pointer to an array of
1023     `VkPhysicalDeviceMemoryProperties::memoryHeapCount` elements, defining limit on
1024     maximum number of bytes that can be allocated out of particular Vulkan memory
1025     heap.
1026 
1027     Any of the elements may be equal to `VK_WHOLE_SIZE`, which means no limit on that
1028     heap. This is also the default in case of `pHeapSizeLimit` = NULL.
1029 
1030     If there is a limit defined for a heap:
1031 
1032     - If user tries to allocate more memory from that heap using this allocator,
1033       the allocation fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
1034     - If the limit is smaller than heap size reported in `VkMemoryHeap::size`, the
1035       value of this limit will be reported instead when using vmaGetMemoryProperties().
1036 
1037     Warning! Using this feature may not be equivalent to installing a GPU with
1038     smaller amount of memory, because graphics driver doesn't necessary fail new
1039     allocations with `VK_ERROR_OUT_OF_DEVICE_MEMORY` result when memory capacity is
1040     exceeded. It may return success and just silently migrate some device memory
1041     blocks to system RAM. This driver behavior can also be controlled using
1042     VK_AMD_memory_overallocation_behavior extension.
1043     */
1044     const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pHeapSizeLimit;
1045 
1046     /** \brief Pointers to Vulkan functions. Can be null.
1047 
1048     For details see [Pointers to Vulkan functions](@ref config_Vulkan_functions).
1049     */
1050     const VmaVulkanFunctions* VMA_NULLABLE pVulkanFunctions;
1051     /** \brief Handle to Vulkan instance object.
1052 
1053     Starting from version 3.0.0 this member is no longer optional, it must be set!
1054     */
1055     VkInstance VMA_NOT_NULL instance;
1056     /** \brief Optional. The highest version of Vulkan that the application is designed to use.
1057 
1058     It must be a value in the format as created by macro `VK_MAKE_VERSION` or a constant like: `VK_API_VERSION_1_1`, `VK_API_VERSION_1_0`.
1059     The patch version number specified is ignored. Only the major and minor versions are considered.
1060     It must be less or equal (preferably equal) to value as passed to `vkCreateInstance` as `VkApplicationInfo::apiVersion`.
1061     Only versions 1.0, 1.1, 1.2, 1.3 are supported by the current implementation.
1062     Leaving it initialized to zero is equivalent to `VK_API_VERSION_1_0`.
1063     */
1064     uint32_t vulkanApiVersion;
1065 #if VMA_EXTERNAL_MEMORY
1066     /** \brief Either null or a pointer to an array of external memory handle types for each Vulkan memory type.
1067 
1068     If not NULL, it must be a pointer to an array of `VkPhysicalDeviceMemoryProperties::memoryTypeCount`
1069     elements, defining external memory handle types of particular Vulkan memory type,
1070     to be passed using `VkExportMemoryAllocateInfoKHR`.
1071 
1072     Any of the elements may be equal to 0, which means not to use `VkExportMemoryAllocateInfoKHR` on this memory type.
1073     This is also the default in case of `pTypeExternalMemoryHandleTypes` = NULL.
1074     */
1075     const VkExternalMemoryHandleTypeFlagsKHR* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryTypeCount") pTypeExternalMemoryHandleTypes;
1076 #endif // #if VMA_EXTERNAL_MEMORY
1077 } VmaAllocatorCreateInfo;
1078 
1079 /// Information about existing #VmaAllocator object.
1080 typedef struct VmaAllocatorInfo
1081 {
1082     /** \brief Handle to Vulkan instance object.
1083 
1084     This is the same value as has been passed through VmaAllocatorCreateInfo::instance.
1085     */
1086     VkInstance VMA_NOT_NULL instance;
1087     /** \brief Handle to Vulkan physical device object.
1088 
1089     This is the same value as has been passed through VmaAllocatorCreateInfo::physicalDevice.
1090     */
1091     VkPhysicalDevice VMA_NOT_NULL physicalDevice;
1092     /** \brief Handle to Vulkan device object.
1093 
1094     This is the same value as has been passed through VmaAllocatorCreateInfo::device.
1095     */
1096     VkDevice VMA_NOT_NULL device;
1097 } VmaAllocatorInfo;
1098 
1099 /** @} */
1100 
1101 /**
1102 \addtogroup group_stats
1103 @{
1104 */
1105 
1106 /** \brief Calculated statistics of memory usage e.g. in a specific memory type, heap, custom pool, or total.
1107 
1108 These are fast to calculate.
1109 See functions: vmaGetHeapBudgets(), vmaGetPoolStatistics().
1110 */
1111 typedef struct VmaStatistics
1112 {
1113     /** \brief Number of `VkDeviceMemory` objects - Vulkan memory blocks allocated.
1114     */
1115     uint32_t blockCount;
1116     /** \brief Number of #VmaAllocation objects allocated.
1117 
1118     Dedicated allocations have their own blocks, so each one adds 1 to `allocationCount` as well as `blockCount`.
1119     */
1120     uint32_t allocationCount;
1121     /** \brief Number of bytes allocated in `VkDeviceMemory` blocks.
1122 
1123     \note To avoid confusion, please be aware that what Vulkan calls an "allocation" - a whole `VkDeviceMemory` object
1124     (e.g. as in `VkPhysicalDeviceLimits::maxMemoryAllocationCount`) is called a "block" in VMA, while VMA calls
1125     "allocation" a #VmaAllocation object that represents a memory region sub-allocated from such block, usually for a single buffer or image.
1126     */
1127     VkDeviceSize blockBytes;
1128     /** \brief Total number of bytes occupied by all #VmaAllocation objects.
1129 
1130     Always less or equal than `blockBytes`.
1131     Difference `(blockBytes - allocationBytes)` is the amount of memory allocated from Vulkan
1132     but unused by any #VmaAllocation.
1133     */
1134     VkDeviceSize allocationBytes;
1135 } VmaStatistics;
1136 
1137 /** \brief More detailed statistics than #VmaStatistics.
1138 
1139 These are slower to calculate. Use for debugging purposes.
1140 See functions: vmaCalculateStatistics(), vmaCalculatePoolStatistics().
1141 
1142 Previous version of the statistics API provided averages, but they have been removed
1143 because they can be easily calculated as:
1144 
1145 \code
1146 VkDeviceSize allocationSizeAvg = detailedStats.statistics.allocationBytes / detailedStats.statistics.allocationCount;
1147 VkDeviceSize unusedBytes = detailedStats.statistics.blockBytes - detailedStats.statistics.allocationBytes;
1148 VkDeviceSize unusedRangeSizeAvg = unusedBytes / detailedStats.unusedRangeCount;
1149 \endcode
1150 */
1151 typedef struct VmaDetailedStatistics
1152 {
1153     /// Basic statistics.
1154     VmaStatistics statistics;
1155     /// Number of free ranges of memory between allocations.
1156     uint32_t unusedRangeCount;
1157     /// Smallest allocation size. `VK_WHOLE_SIZE` if there are 0 allocations.
1158     VkDeviceSize allocationSizeMin;
1159     /// Largest allocation size. 0 if there are 0 allocations.
1160     VkDeviceSize allocationSizeMax;
1161     /// Smallest empty range size. `VK_WHOLE_SIZE` if there are 0 empty ranges.
1162     VkDeviceSize unusedRangeSizeMin;
1163     /// Largest empty range size. 0 if there are 0 empty ranges.
1164     VkDeviceSize unusedRangeSizeMax;
1165 } VmaDetailedStatistics;
1166 
1167 /** \brief  General statistics from current state of the Allocator -
1168 total memory usage across all memory heaps and types.
1169 
1170 These are slower to calculate. Use for debugging purposes.
1171 See function vmaCalculateStatistics().
1172 */
1173 typedef struct VmaTotalStatistics
1174 {
1175     VmaDetailedStatistics memoryType[VK_MAX_MEMORY_TYPES];
1176     VmaDetailedStatistics memoryHeap[VK_MAX_MEMORY_HEAPS];
1177     VmaDetailedStatistics total;
1178 } VmaTotalStatistics;
1179 
1180 /** \brief Statistics of current memory usage and available budget for a specific memory heap.
1181 
1182 These are fast to calculate.
1183 See function vmaGetHeapBudgets().
1184 */
1185 typedef struct VmaBudget
1186 {
1187     /** \brief Statistics fetched from the library.
1188     */
1189     VmaStatistics statistics;
1190     /** \brief Estimated current memory usage of the program, in bytes.
1191 
1192     Fetched from system using VK_EXT_memory_budget extension if enabled.
1193 
1194     It might be different than `statistics.blockBytes` (usually higher) due to additional implicit objects
1195     also occupying the memory, like swapchain, pipelines, descriptor heaps, command buffers, or
1196     `VkDeviceMemory` blocks allocated outside of this library, if any.
1197     */
1198     VkDeviceSize usage;
1199     /** \brief Estimated amount of memory available to the program, in bytes.
1200 
1201     Fetched from system using VK_EXT_memory_budget extension if enabled.
1202 
1203     It might be different (most probably smaller) than `VkMemoryHeap::size[heapIndex]` due to factors
1204     external to the program, decided by the operating system.
1205     Difference `budget - usage` is the amount of additional memory that can probably
1206     be allocated without problems. Exceeding the budget may result in various problems.
1207     */
1208     VkDeviceSize budget;
1209 } VmaBudget;
1210 
1211 /** @} */
1212 
1213 /**
1214 \addtogroup group_alloc
1215 @{
1216 */
1217 
1218 /** \brief Parameters of new #VmaAllocation.
1219 
1220 To be used with functions like vmaCreateBuffer(), vmaCreateImage(), and many others.
1221 */
1222 typedef struct VmaAllocationCreateInfo
1223 {
1224     /// Use #VmaAllocationCreateFlagBits enum.
1225     VmaAllocationCreateFlags flags;
1226     /** \brief Intended usage of memory.
1227 
1228     You can leave #VMA_MEMORY_USAGE_UNKNOWN if you specify memory requirements in other way. \n
1229     If `pool` is not null, this member is ignored.
1230     */
1231     VmaMemoryUsage usage;
1232     /** \brief Flags that must be set in a Memory Type chosen for an allocation.
1233 
1234     Leave 0 if you specify memory requirements in other way. \n
1235     If `pool` is not null, this member is ignored.*/
1236     VkMemoryPropertyFlags requiredFlags;
1237     /** \brief Flags that preferably should be set in a memory type chosen for an allocation.
1238 
1239     Set to 0 if no additional flags are preferred. \n
1240     If `pool` is not null, this member is ignored. */
1241     VkMemoryPropertyFlags preferredFlags;
1242     /** \brief Bitmask containing one bit set for every memory type acceptable for this allocation.
1243 
1244     Value 0 is equivalent to `UINT32_MAX` - it means any memory type is accepted if
1245     it meets other requirements specified by this structure, with no further
1246     restrictions on memory type index. \n
1247     If `pool` is not null, this member is ignored.
1248     */
1249     uint32_t memoryTypeBits;
1250     /** \brief Pool that this allocation should be created in.
1251 
1252     Leave `VK_NULL_HANDLE` to allocate from default pool. If not null, members:
1253     `usage`, `requiredFlags`, `preferredFlags`, `memoryTypeBits` are ignored.
1254     */
1255     VmaPool VMA_NULLABLE pool;
1256     /** \brief Custom general-purpose pointer that will be stored in #VmaAllocation, can be read as VmaAllocationInfo::pUserData and changed using vmaSetAllocationUserData().
1257 
1258     If #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT is used, it must be either
1259     null or pointer to a null-terminated string. The string will be then copied to
1260     internal buffer, so it doesn't need to be valid after allocation call.
1261     */
1262     void* VMA_NULLABLE pUserData;
1263     /** \brief A floating-point value between 0 and 1, indicating the priority of the allocation relative to other memory allocations.
1264 
1265     It is used only when #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT flag was used during creation of the #VmaAllocator object
1266     and this allocation ends up as dedicated or is explicitly forced as dedicated using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
1267     Otherwise, it has the priority of a memory block where it is placed and this variable is ignored.
1268     */
1269     float priority;
1270 } VmaAllocationCreateInfo;
1271 
1272 /// Describes parameter of created #VmaPool.
1273 typedef struct VmaPoolCreateInfo
1274 {
1275     /** \brief Vulkan memory type index to allocate this pool from.
1276     */
1277     uint32_t memoryTypeIndex;
1278     /** \brief Use combination of #VmaPoolCreateFlagBits.
1279     */
1280     VmaPoolCreateFlags flags;
1281     /** \brief Size of a single `VkDeviceMemory` block to be allocated as part of this pool, in bytes. Optional.
1282 
1283     Specify nonzero to set explicit, constant size of memory blocks used by this
1284     pool.
1285 
1286     Leave 0 to use default and let the library manage block sizes automatically.
1287     Sizes of particular blocks may vary.
1288     In this case, the pool will also support dedicated allocations.
1289     */
1290     VkDeviceSize blockSize;
1291     /** \brief Minimum number of blocks to be always allocated in this pool, even if they stay empty.
1292 
1293     Set to 0 to have no preallocated blocks and allow the pool be completely empty.
1294     */
1295     size_t minBlockCount;
1296     /** \brief Maximum number of blocks that can be allocated in this pool. Optional.
1297 
1298     Set to 0 to use default, which is `SIZE_MAX`, which means no limit.
1299 
1300     Set to same value as VmaPoolCreateInfo::minBlockCount to have fixed amount of memory allocated
1301     throughout whole lifetime of this pool.
1302     */
1303     size_t maxBlockCount;
1304     /** \brief A floating-point value between 0 and 1, indicating the priority of the allocations in this pool relative to other memory allocations.
1305 
1306     It is used only when #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT flag was used during creation of the #VmaAllocator object.
1307     Otherwise, this variable is ignored.
1308     */
1309     float priority;
1310     /** \brief Additional minimum alignment to be used for all allocations created from this pool. Can be 0.
1311 
1312     Leave 0 (default) not to impose any additional alignment. If not 0, it must be a power of two.
1313     It can be useful in cases where alignment returned by Vulkan by functions like `vkGetBufferMemoryRequirements` is not enough,
1314     e.g. when doing interop with OpenGL.
1315     */
1316     VkDeviceSize minAllocationAlignment;
1317     /** \brief Additional `pNext` chain to be attached to `VkMemoryAllocateInfo` used for every allocation made by this pool. Optional.
1318 
1319     Optional, can be null. If not null, it must point to a `pNext` chain of structures that can be attached to `VkMemoryAllocateInfo`.
1320     It can be useful for special needs such as adding `VkExportMemoryAllocateInfoKHR`.
1321     Structures pointed by this member must remain alive and unchanged for the whole lifetime of the custom pool.
1322 
1323     Please note that some structures, e.g. `VkMemoryPriorityAllocateInfoEXT`, `VkMemoryDedicatedAllocateInfoKHR`,
1324     can be attached automatically by this library when using other, more convenient of its features.
1325     */
1326     void* VMA_NULLABLE VMA_EXTENDS_VK_STRUCT(VkMemoryAllocateInfo) pMemoryAllocateNext;
1327 } VmaPoolCreateInfo;
1328 
1329 /** @} */
1330 
1331 /**
1332 \addtogroup group_alloc
1333 @{
1334 */
1335 
1336 /// Parameters of #VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
1337 typedef struct VmaAllocationInfo
1338 {
1339     /** \brief Memory type index that this allocation was allocated from.
1340 
1341     It never changes.
1342     */
1343     uint32_t memoryType;
1344     /** \brief Handle to Vulkan memory object.
1345 
1346     Same memory object can be shared by multiple allocations.
1347 
1348     It can change after the allocation is moved during \ref defragmentation.
1349     */
1350     VkDeviceMemory VMA_NULLABLE_NON_DISPATCHABLE deviceMemory;
1351     /** \brief Offset in `VkDeviceMemory` object to the beginning of this allocation, in bytes. `(deviceMemory, offset)` pair is unique to this allocation.
1352 
1353     You usually don't need to use this offset. If you create a buffer or an image together with the allocation using e.g. function
1354     vmaCreateBuffer(), vmaCreateImage(), functions that operate on these resources refer to the beginning of the buffer or image,
1355     not entire device memory block. Functions like vmaMapMemory(), vmaBindBufferMemory() also refer to the beginning of the allocation
1356     and apply this offset automatically.
1357 
1358     It can change after the allocation is moved during \ref defragmentation.
1359     */
1360     VkDeviceSize offset;
1361     /** \brief Size of this allocation, in bytes.
1362 
1363     It never changes.
1364 
1365     \note Allocation size returned in this variable may be greater than the size
1366     requested for the resource e.g. as `VkBufferCreateInfo::size`. Whole size of the
1367     allocation is accessible for operations on memory e.g. using a pointer after
1368     mapping with vmaMapMemory(), but operations on the resource e.g. using
1369     `vkCmdCopyBuffer` must be limited to the size of the resource.
1370     */
1371     VkDeviceSize size;
1372     /** \brief Pointer to the beginning of this allocation as mapped data.
1373 
1374     If the allocation hasn't been mapped using vmaMapMemory() and hasn't been
1375     created with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag, this value is null.
1376 
1377     It can change after call to vmaMapMemory(), vmaUnmapMemory().
1378     It can also change after the allocation is moved during \ref defragmentation.
1379     */
1380     void* VMA_NULLABLE pMappedData;
1381     /** \brief Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vmaSetAllocationUserData().
1382 
1383     It can change after call to vmaSetAllocationUserData() for this allocation.
1384     */
1385     void* VMA_NULLABLE pUserData;
1386     /** \brief Custom allocation name that was set with vmaSetAllocationName().
1387 
1388     It can change after call to vmaSetAllocationName() for this allocation.
1389 
1390     Another way to set custom name is to pass it in VmaAllocationCreateInfo::pUserData with
1391     additional flag #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT set [DEPRECATED].
1392     */
1393     const char* VMA_NULLABLE pName;
1394 } VmaAllocationInfo;
1395 
1396 /** Callback function called during vmaBeginDefragmentation() to check custom criterion about ending current defragmentation pass.
1397 
1398 Should return true if the defragmentation needs to stop current pass.
1399 */
1400 typedef VkBool32 (VKAPI_PTR* PFN_vmaCheckDefragmentationBreakFunction)(void* VMA_NULLABLE pUserData);
1401 
1402 /** \brief Parameters for defragmentation.
1403 
1404 To be used with function vmaBeginDefragmentation().
1405 */
1406 typedef struct VmaDefragmentationInfo
1407 {
1408     /// \brief Use combination of #VmaDefragmentationFlagBits.
1409     VmaDefragmentationFlags flags;
1410     /** \brief Custom pool to be defragmented.
1411 
1412     If null then default pools will undergo defragmentation process.
1413     */
1414     VmaPool VMA_NULLABLE pool;
1415     /** \brief Maximum numbers of bytes that can be copied during single pass, while moving allocations to different places.
1416 
1417     `0` means no limit.
1418     */
1419     VkDeviceSize maxBytesPerPass;
1420     /** \brief Maximum number of allocations that can be moved during single pass to a different place.
1421 
1422     `0` means no limit.
1423     */
1424     uint32_t maxAllocationsPerPass;
1425     /** \brief Optional custom callback for stopping vmaBeginDefragmentation().
1426 
1427     Have to return true for breaking current defragmentation pass.
1428     */
1429     PFN_vmaCheckDefragmentationBreakFunction VMA_NULLABLE pfnBreakCallback;
1430     /// \brief Optional data to pass to custom callback for stopping pass of defragmentation.
1431     void* VMA_NULLABLE pBreakCallbackUserData;
1432 } VmaDefragmentationInfo;
1433 
1434 /// Single move of an allocation to be done for defragmentation.
1435 typedef struct VmaDefragmentationMove
1436 {
1437     /// Operation to be performed on the allocation by vmaEndDefragmentationPass(). Default value is #VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY. You can modify it.
1438     VmaDefragmentationMoveOperation operation;
1439     /// Allocation that should be moved.
1440     VmaAllocation VMA_NOT_NULL srcAllocation;
1441     /** \brief Temporary allocation pointing to destination memory that will replace `srcAllocation`.
1442 
1443     \warning Do not store this allocation in your data structures! It exists only temporarily, for the duration of the defragmentation pass,
1444     to be used for binding new buffer/image to the destination memory using e.g. vmaBindBufferMemory().
1445     vmaEndDefragmentationPass() will destroy it and make `srcAllocation` point to this memory.
1446     */
1447     VmaAllocation VMA_NOT_NULL dstTmpAllocation;
1448 } VmaDefragmentationMove;
1449 
1450 /** \brief Parameters for incremental defragmentation steps.
1451 
1452 To be used with function vmaBeginDefragmentationPass().
1453 */
1454 typedef struct VmaDefragmentationPassMoveInfo
1455 {
1456     /// Number of elements in the `pMoves` array.
1457     uint32_t moveCount;
1458     /** \brief Array of moves to be performed by the user in the current defragmentation pass.
1459 
1460     Pointer to an array of `moveCount` elements, owned by VMA, created in vmaBeginDefragmentationPass(), destroyed in vmaEndDefragmentationPass().
1461 
1462     For each element, you should:
1463 
1464     1. Create a new buffer/image in the place pointed by VmaDefragmentationMove::dstMemory + VmaDefragmentationMove::dstOffset.
1465     2. Copy data from the VmaDefragmentationMove::srcAllocation e.g. using `vkCmdCopyBuffer`, `vkCmdCopyImage`.
1466     3. Make sure these commands finished executing on the GPU.
1467     4. Destroy the old buffer/image.
1468 
1469     Only then you can finish defragmentation pass by calling vmaEndDefragmentationPass().
1470     After this call, the allocation will point to the new place in memory.
1471 
1472     Alternatively, if you cannot move specific allocation, you can set VmaDefragmentationMove::operation to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE.
1473 
1474     Alternatively, if you decide you want to completely remove the allocation:
1475 
1476     1. Destroy its buffer/image.
1477     2. Set VmaDefragmentationMove::operation to #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY.
1478 
1479     Then, after vmaEndDefragmentationPass() the allocation will be freed.
1480     */
1481     VmaDefragmentationMove* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(moveCount) pMoves;
1482 } VmaDefragmentationPassMoveInfo;
1483 
1484 /// Statistics returned for defragmentation process in function vmaEndDefragmentation().
1485 typedef struct VmaDefragmentationStats
1486 {
1487     /// Total number of bytes that have been copied while moving allocations to different places.
1488     VkDeviceSize bytesMoved;
1489     /// Total number of bytes that have been released to the system by freeing empty `VkDeviceMemory` objects.
1490     VkDeviceSize bytesFreed;
1491     /// Number of allocations that have been moved to different places.
1492     uint32_t allocationsMoved;
1493     /// Number of empty `VkDeviceMemory` objects that have been released to the system.
1494     uint32_t deviceMemoryBlocksFreed;
1495 } VmaDefragmentationStats;
1496 
1497 /** @} */
1498 
1499 /**
1500 \addtogroup group_virtual
1501 @{
1502 */
1503 
1504 /// Parameters of created #VmaVirtualBlock object to be passed to vmaCreateVirtualBlock().
1505 typedef struct VmaVirtualBlockCreateInfo
1506 {
1507     /** \brief Total size of the virtual block.
1508 
1509     Sizes can be expressed in bytes or any units you want as long as you are consistent in using them.
1510     For example, if you allocate from some array of structures, 1 can mean single instance of entire structure.
1511     */
1512     VkDeviceSize size;
1513 
1514     /** \brief Use combination of #VmaVirtualBlockCreateFlagBits.
1515     */
1516     VmaVirtualBlockCreateFlags flags;
1517 
1518     /** \brief Custom CPU memory allocation callbacks. Optional.
1519 
1520     Optional, can be null. When specified, they will be used for all CPU-side memory allocations.
1521     */
1522     const VkAllocationCallbacks* VMA_NULLABLE pAllocationCallbacks;
1523 } VmaVirtualBlockCreateInfo;
1524 
1525 /// Parameters of created virtual allocation to be passed to vmaVirtualAllocate().
1526 typedef struct VmaVirtualAllocationCreateInfo
1527 {
1528     /** \brief Size of the allocation.
1529 
1530     Cannot be zero.
1531     */
1532     VkDeviceSize size;
1533     /** \brief Required alignment of the allocation. Optional.
1534 
1535     Must be power of two. Special value 0 has the same meaning as 1 - means no special alignment is required, so allocation can start at any offset.
1536     */
1537     VkDeviceSize alignment;
1538     /** \brief Use combination of #VmaVirtualAllocationCreateFlagBits.
1539     */
1540     VmaVirtualAllocationCreateFlags flags;
1541     /** \brief Custom pointer to be associated with the allocation. Optional.
1542 
1543     It can be any value and can be used for user-defined purposes. It can be fetched or changed later.
1544     */
1545     void* VMA_NULLABLE pUserData;
1546 } VmaVirtualAllocationCreateInfo;
1547 
1548 /// Parameters of an existing virtual allocation, returned by vmaGetVirtualAllocationInfo().
1549 typedef struct VmaVirtualAllocationInfo
1550 {
1551     /** \brief Offset of the allocation.
1552 
1553     Offset at which the allocation was made.
1554     */
1555     VkDeviceSize offset;
1556     /** \brief Size of the allocation.
1557 
1558     Same value as passed in VmaVirtualAllocationCreateInfo::size.
1559     */
1560     VkDeviceSize size;
1561     /** \brief Custom pointer associated with the allocation.
1562 
1563     Same value as passed in VmaVirtualAllocationCreateInfo::pUserData or to vmaSetVirtualAllocationUserData().
1564     */
1565     void* VMA_NULLABLE pUserData;
1566 } VmaVirtualAllocationInfo;
1567 
1568 /** @} */
1569 
1570 #endif // _VMA_DATA_TYPES_DECLARATIONS
1571 
1572 #ifndef _VMA_FUNCTION_HEADERS
1573 
1574 /**
1575 \addtogroup group_init
1576 @{
1577 */
1578 
1579 /// Creates #VmaAllocator object.
1580 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
1581     const VmaAllocatorCreateInfo* VMA_NOT_NULL pCreateInfo,
1582     VmaAllocator VMA_NULLABLE* VMA_NOT_NULL pAllocator);
1583 
1584 /// Destroys allocator object.
1585 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
1586     VmaAllocator VMA_NULLABLE allocator);
1587 
1588 /** \brief Returns information about existing #VmaAllocator object - handle to Vulkan device etc.
1589 
1590 It might be useful if you want to keep just the #VmaAllocator handle and fetch other required handles to
1591 `VkPhysicalDevice`, `VkDevice` etc. every time using this function.
1592 */
1593 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(
1594     VmaAllocator VMA_NOT_NULL allocator,
1595     VmaAllocatorInfo* VMA_NOT_NULL pAllocatorInfo);
1596 
1597 /**
1598 PhysicalDeviceProperties are fetched from physicalDevice by the allocator.
1599 You can access it here, without fetching it again on your own.
1600 */
1601 VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
1602     VmaAllocator VMA_NOT_NULL allocator,
1603     const VkPhysicalDeviceProperties* VMA_NULLABLE* VMA_NOT_NULL ppPhysicalDeviceProperties);
1604 
1605 /**
1606 PhysicalDeviceMemoryProperties are fetched from physicalDevice by the allocator.
1607 You can access it here, without fetching it again on your own.
1608 */
1609 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
1610     VmaAllocator VMA_NOT_NULL allocator,
1611     const VkPhysicalDeviceMemoryProperties* VMA_NULLABLE* VMA_NOT_NULL ppPhysicalDeviceMemoryProperties);
1612 
1613 /**
1614 \brief Given Memory Type Index, returns Property Flags of this memory type.
1615 
1616 This is just a convenience function. Same information can be obtained using
1617 vmaGetMemoryProperties().
1618 */
1619 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
1620     VmaAllocator VMA_NOT_NULL allocator,
1621     uint32_t memoryTypeIndex,
1622     VkMemoryPropertyFlags* VMA_NOT_NULL pFlags);
1623 
1624 /** \brief Sets index of the current frame.
1625 */
1626 VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
1627     VmaAllocator VMA_NOT_NULL allocator,
1628     uint32_t frameIndex);
1629 
1630 /** @} */
1631 
1632 /**
1633 \addtogroup group_stats
1634 @{
1635 */
1636 
1637 /** \brief Retrieves statistics from current state of the Allocator.
1638 
1639 This function is called "calculate" not "get" because it has to traverse all
1640 internal data structures, so it may be quite slow. Use it for debugging purposes.
1641 For faster but more brief statistics suitable to be called every frame or every allocation,
1642 use vmaGetHeapBudgets().
1643 
1644 Note that when using allocator from multiple threads, returned information may immediately
1645 become outdated.
1646 */
1647 VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStatistics(
1648     VmaAllocator VMA_NOT_NULL allocator,
1649     VmaTotalStatistics* VMA_NOT_NULL pStats);
1650 
1651 /** \brief Retrieves information about current memory usage and budget for all memory heaps.
1652 
1653 \param allocator
1654 \param[out] pBudgets Must point to array with number of elements at least equal to number of memory heaps in physical device used.
1655 
1656 This function is called "get" not "calculate" because it is very fast, suitable to be called
1657 every frame or every allocation. For more detailed statistics use vmaCalculateStatistics().
1658 
1659 Note that when using allocator from multiple threads, returned information may immediately
1660 become outdated.
1661 */
1662 VMA_CALL_PRE void VMA_CALL_POST vmaGetHeapBudgets(
1663     VmaAllocator VMA_NOT_NULL allocator,
1664     VmaBudget* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pBudgets);
1665 
1666 /** @} */
1667 
1668 /**
1669 \addtogroup group_alloc
1670 @{
1671 */
1672 
1673 /**
1674 \brief Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
1675 
1676 This algorithm tries to find a memory type that:
1677 
1678 - Is allowed by memoryTypeBits.
1679 - Contains all the flags from pAllocationCreateInfo->requiredFlags.
1680 - Matches intended usage.
1681 - Has as many flags from pAllocationCreateInfo->preferredFlags as possible.
1682 
1683 \return Returns VK_ERROR_FEATURE_NOT_PRESENT if not found. Receiving such result
1684 from this function or any other allocating function probably means that your
1685 device doesn't support any memory type with requested features for the specific
1686 type of resource you want to use it for. Please check parameters of your
1687 resource, like image layout (OPTIMAL versus LINEAR) or mip level count.
1688 */
1689 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
1690     VmaAllocator VMA_NOT_NULL allocator,
1691     uint32_t memoryTypeBits,
1692     const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
1693     uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
1694 
1695 /**
1696 \brief Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
1697 
1698 It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex.
1699 It internally creates a temporary, dummy buffer that never has memory bound.
1700 */
1701 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
1702     VmaAllocator VMA_NOT_NULL allocator,
1703     const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
1704     const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
1705     uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
1706 
1707 /**
1708 \brief Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
1709 
1710 It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex.
1711 It internally creates a temporary, dummy image that never has memory bound.
1712 */
1713 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
1714     VmaAllocator VMA_NOT_NULL allocator,
1715     const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
1716     const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
1717     uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
1718 
1719 /** \brief Allocates Vulkan device memory and creates #VmaPool object.
1720 
1721 \param allocator Allocator object.
1722 \param pCreateInfo Parameters of pool to create.
1723 \param[out] pPool Handle to created pool.
1724 */
1725 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
1726     VmaAllocator VMA_NOT_NULL allocator,
1727     const VmaPoolCreateInfo* VMA_NOT_NULL pCreateInfo,
1728     VmaPool VMA_NULLABLE* VMA_NOT_NULL pPool);
1729 
1730 /** \brief Destroys #VmaPool object and frees Vulkan device memory.
1731 */
1732 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
1733     VmaAllocator VMA_NOT_NULL allocator,
1734     VmaPool VMA_NULLABLE pool);
1735 
1736 /** @} */
1737 
1738 /**
1739 \addtogroup group_stats
1740 @{
1741 */
1742 
1743 /** \brief Retrieves statistics of existing #VmaPool object.
1744 
1745 \param allocator Allocator object.
1746 \param pool Pool object.
1747 \param[out] pPoolStats Statistics of specified pool.
1748 */
1749 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStatistics(
1750     VmaAllocator VMA_NOT_NULL allocator,
1751     VmaPool VMA_NOT_NULL pool,
1752     VmaStatistics* VMA_NOT_NULL pPoolStats);
1753 
1754 /** \brief Retrieves detailed statistics of existing #VmaPool object.
1755 
1756 \param allocator Allocator object.
1757 \param pool Pool object.
1758 \param[out] pPoolStats Statistics of specified pool.
1759 */
1760 VMA_CALL_PRE void VMA_CALL_POST vmaCalculatePoolStatistics(
1761     VmaAllocator VMA_NOT_NULL allocator,
1762     VmaPool VMA_NOT_NULL pool,
1763     VmaDetailedStatistics* VMA_NOT_NULL pPoolStats);
1764 
1765 /** @} */
1766 
1767 /**
1768 \addtogroup group_alloc
1769 @{
1770 */
1771 
1772 /** \brief Checks magic number in margins around all allocations in given memory pool in search for corruptions.
1773 
1774 Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero,
1775 `VMA_DEBUG_MARGIN` is defined to nonzero and the pool is created in memory type that is
1776 `HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection).
1777 
1778 Possible return values:
1779 
1780 - `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for specified pool.
1781 - `VK_SUCCESS` - corruption detection has been performed and succeeded.
1782 - `VK_ERROR_UNKNOWN` - corruption detection has been performed and found memory corruptions around one of the allocations.
1783   `VMA_ASSERT` is also fired in that case.
1784 - Other value: Error returned by Vulkan, e.g. memory mapping failure.
1785 */
1786 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(
1787     VmaAllocator VMA_NOT_NULL allocator,
1788     VmaPool VMA_NOT_NULL pool);
1789 
1790 /** \brief Retrieves name of a custom pool.
1791 
1792 After the call `ppName` is either null or points to an internally-owned null-terminated string
1793 containing name of the pool that was previously set. The pointer becomes invalid when the pool is
1794 destroyed or its name is changed using vmaSetPoolName().
1795 */
1796 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
1797     VmaAllocator VMA_NOT_NULL allocator,
1798     VmaPool VMA_NOT_NULL pool,
1799     const char* VMA_NULLABLE* VMA_NOT_NULL ppName);
1800 
1801 /** \brief Sets name of a custom pool.
1802 
1803 `pName` can be either null or pointer to a null-terminated string with new name for the pool.
1804 Function makes internal copy of the string, so it can be changed or freed immediately after this call.
1805 */
1806 VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
1807     VmaAllocator VMA_NOT_NULL allocator,
1808     VmaPool VMA_NOT_NULL pool,
1809     const char* VMA_NULLABLE pName);
1810 
1811 /** \brief General purpose memory allocation.
1812 
1813 \param allocator
1814 \param pVkMemoryRequirements
1815 \param pCreateInfo
1816 \param[out] pAllocation Handle to allocated memory.
1817 \param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().
1818 
1819 You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages().
1820 
1821 It is recommended to use vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage(),
1822 vmaCreateBuffer(), vmaCreateImage() instead whenever possible.
1823 */
1824 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
1825     VmaAllocator VMA_NOT_NULL allocator,
1826     const VkMemoryRequirements* VMA_NOT_NULL pVkMemoryRequirements,
1827     const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
1828     VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,
1829     VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
1830 
1831 /** \brief General purpose memory allocation for multiple allocation objects at once.
1832 
1833 \param allocator Allocator object.
1834 \param pVkMemoryRequirements Memory requirements for each allocation.
1835 \param pCreateInfo Creation parameters for each allocation.
1836 \param allocationCount Number of allocations to make.
1837 \param[out] pAllocations Pointer to array that will be filled with handles to created allocations.
1838 \param[out] pAllocationInfo Optional. Pointer to array that will be filled with parameters of created allocations.
1839 
1840 You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages().
1841 
1842 Word "pages" is just a suggestion to use this function to allocate pieces of memory needed for sparse binding.
1843 It is just a general purpose allocation function able to make multiple allocations at once.
1844 It may be internally optimized to be more efficient than calling vmaAllocateMemory() `allocationCount` times.
1845 
1846 All allocations are made using same parameters. All of them are created out of the same memory pool and type.
1847 If any allocation fails, all allocations already made within this function call are also freed, so that when
1848 returned result is not `VK_SUCCESS`, `pAllocation` array is always entirely filled with `VK_NULL_HANDLE`.
1849 */
1850 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
1851     VmaAllocator VMA_NOT_NULL allocator,
1852     const VkMemoryRequirements* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pVkMemoryRequirements,
1853     const VmaAllocationCreateInfo* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pCreateInfo,
1854     size_t allocationCount,
1855     VmaAllocation VMA_NULLABLE* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations,
1856     VmaAllocationInfo* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationInfo);
1857 
1858 /** \brief Allocates memory suitable for given `VkBuffer`.
1859 
1860 \param allocator
1861 \param buffer
1862 \param pCreateInfo
1863 \param[out] pAllocation Handle to allocated memory.
1864 \param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().
1865 
1866 It only creates #VmaAllocation. To bind the memory to the buffer, use vmaBindBufferMemory().
1867 
1868 This is a special-purpose function. In most cases you should use vmaCreateBuffer().
1869 
1870 You must free the allocation using vmaFreeMemory() when no longer needed.
1871 */
1872 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
1873     VmaAllocator VMA_NOT_NULL allocator,
1874     VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer,
1875     const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
1876     VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,
1877     VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
1878 
1879 /** \brief Allocates memory suitable for given `VkImage`.
1880 
1881 \param allocator
1882 \param image
1883 \param pCreateInfo
1884 \param[out] pAllocation Handle to allocated memory.
1885 \param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().
1886 
1887 It only creates #VmaAllocation. To bind the memory to the buffer, use vmaBindImageMemory().
1888 
1889 This is a special-purpose function. In most cases you should use vmaCreateImage().
1890 
1891 You must free the allocation using vmaFreeMemory() when no longer needed.
1892 */
1893 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
1894     VmaAllocator VMA_NOT_NULL allocator,
1895     VkImage VMA_NOT_NULL_NON_DISPATCHABLE image,
1896     const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
1897     VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,
1898     VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
1899 
1900 /** \brief Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
1901 
1902 Passing `VK_NULL_HANDLE` as `allocation` is valid. Such function call is just skipped.
1903 */
1904 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
1905     VmaAllocator VMA_NOT_NULL allocator,
1906     const VmaAllocation VMA_NULLABLE allocation);
1907 
1908 /** \brief Frees memory and destroys multiple allocations.
1909 
1910 Word "pages" is just a suggestion to use this function to free pieces of memory used for sparse binding.
1911 It is just a general purpose function to free memory and destroy allocations made using e.g. vmaAllocateMemory(),
1912 vmaAllocateMemoryPages() and other functions.
1913 It may be internally optimized to be more efficient than calling vmaFreeMemory() `allocationCount` times.
1914 
1915 Allocations in `pAllocations` array can come from any memory pools and types.
1916 Passing `VK_NULL_HANDLE` as elements of `pAllocations` array is valid. Such entries are just skipped.
1917 */
1918 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
1919     VmaAllocator VMA_NOT_NULL allocator,
1920     size_t allocationCount,
1921     const VmaAllocation VMA_NULLABLE* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations);
1922 
1923 /** \brief Returns current information about specified allocation.
1924 
1925 Current parameters of given allocation are returned in `pAllocationInfo`.
1926 
1927 Although this function doesn't lock any mutex, so it should be quite efficient,
1928 you should avoid calling it too often.
1929 You can retrieve same VmaAllocationInfo structure while creating your resource, from function
1930 vmaCreateBuffer(), vmaCreateImage(). You can remember it if you are sure parameters don't change
1931 (e.g. due to defragmentation).
1932 */
1933 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
1934     VmaAllocator VMA_NOT_NULL allocator,
1935     VmaAllocation VMA_NOT_NULL allocation,
1936     VmaAllocationInfo* VMA_NOT_NULL pAllocationInfo);
1937 
1938 /** \brief Sets pUserData in given allocation to new value.
1939 
1940 The value of pointer `pUserData` is copied to allocation's `pUserData`.
1941 It is opaque, so you can use it however you want - e.g.
1942 as a pointer, ordinal number or some handle to you own data.
1943 */
1944 VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
1945     VmaAllocator VMA_NOT_NULL allocator,
1946     VmaAllocation VMA_NOT_NULL allocation,
1947     void* VMA_NULLABLE pUserData);
1948 
1949 /** \brief Sets pName in given allocation to new value.
1950 
1951 `pName` must be either null, or pointer to a null-terminated string. The function
1952 makes local copy of the string and sets it as allocation's `pName`. String
1953 passed as pName doesn't need to be valid for whole lifetime of the allocation -
1954 you can free it after this call. String previously pointed by allocation's
1955 `pName` is freed from memory.
1956 */
1957 VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationName(
1958     VmaAllocator VMA_NOT_NULL allocator,
1959     VmaAllocation VMA_NOT_NULL allocation,
1960     const char* VMA_NULLABLE pName);
1961 
1962 /**
1963 \brief Given an allocation, returns Property Flags of its memory type.
1964 
1965 This is just a convenience function. Same information can be obtained using
1966 vmaGetAllocationInfo() + vmaGetMemoryProperties().
1967 */
1968 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationMemoryProperties(
1969     VmaAllocator VMA_NOT_NULL allocator,
1970     VmaAllocation VMA_NOT_NULL allocation,
1971     VkMemoryPropertyFlags* VMA_NOT_NULL pFlags);
1972 
1973 /** \brief Maps memory represented by given allocation and returns pointer to it.
1974 
1975 Maps memory represented by given allocation to make it accessible to CPU code.
1976 When succeeded, `*ppData` contains pointer to first byte of this memory.
1977 
1978 \warning
1979 If the allocation is part of a bigger `VkDeviceMemory` block, returned pointer is
1980 correctly offsetted to the beginning of region assigned to this particular allocation.
1981 Unlike the result of `vkMapMemory`, it points to the allocation, not to the beginning of the whole block.
1982 You should not add VmaAllocationInfo::offset to it!
1983 
1984 Mapping is internally reference-counted and synchronized, so despite raw Vulkan
1985 function `vkMapMemory()` cannot be used to map same block of `VkDeviceMemory`
1986 multiple times simultaneously, it is safe to call this function on allocations
1987 assigned to the same memory block. Actual Vulkan memory will be mapped on first
1988 mapping and unmapped on last unmapping.
1989 
1990 If the function succeeded, you must call vmaUnmapMemory() to unmap the
1991 allocation when mapping is no longer needed or before freeing the allocation, at
1992 the latest.
1993 
1994 It also safe to call this function multiple times on the same allocation. You
1995 must call vmaUnmapMemory() same number of times as you called vmaMapMemory().
1996 
1997 It is also safe to call this function on allocation created with
1998 #VMA_ALLOCATION_CREATE_MAPPED_BIT flag. Its memory stays mapped all the time.
1999 You must still call vmaUnmapMemory() same number of times as you called
2000 vmaMapMemory(). You must not call vmaUnmapMemory() additional time to free the
2001 "0-th" mapping made automatically due to #VMA_ALLOCATION_CREATE_MAPPED_BIT flag.
2002 
2003 This function fails when used on allocation made in memory type that is not
2004 `HOST_VISIBLE`.
2005 
2006 This function doesn't automatically flush or invalidate caches.
2007 If the allocation is made from a memory types that is not `HOST_COHERENT`,
2008 you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification.
2009 */
2010 VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
2011     VmaAllocator VMA_NOT_NULL allocator,
2012     VmaAllocation VMA_NOT_NULL allocation,
2013     void* VMA_NULLABLE* VMA_NOT_NULL ppData);
2014 
2015 /** \brief Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
2016 
2017 For details, see description of vmaMapMemory().
2018 
2019 This function doesn't automatically flush or invalidate caches.
2020 If the allocation is made from a memory types that is not `HOST_COHERENT`,
2021 you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification.
2022 */
2023 VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
2024     VmaAllocator VMA_NOT_NULL allocator,
2025     VmaAllocation VMA_NOT_NULL allocation);
2026 
2027 /** \brief Flushes memory of given allocation.
2028 
2029 Calls `vkFlushMappedMemoryRanges()` for memory associated with given range of given allocation.
2030 It needs to be called after writing to a mapped memory for memory types that are not `HOST_COHERENT`.
2031 Unmap operation doesn't do that automatically.
2032 
2033 - `offset` must be relative to the beginning of allocation.
2034 - `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation.
2035 - `offset` and `size` don't have to be aligned.
2036   They are internally rounded down/up to multiply of `nonCoherentAtomSize`.
2037 - If `size` is 0, this call is ignored.
2038 - If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`,
2039   this call is ignored.
2040 
2041 Warning! `offset` and `size` are relative to the contents of given `allocation`.
2042 If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively.
2043 Do not pass allocation's offset as `offset`!!!
2044 
2045 This function returns the `VkResult` from `vkFlushMappedMemoryRanges` if it is
2046 called, otherwise `VK_SUCCESS`.
2047 */
2048 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation(
2049     VmaAllocator VMA_NOT_NULL allocator,
2050     VmaAllocation VMA_NOT_NULL allocation,
2051     VkDeviceSize offset,
2052     VkDeviceSize size);
2053 
2054 /** \brief Invalidates memory of given allocation.
2055 
2056 Calls `vkInvalidateMappedMemoryRanges()` for memory associated with given range of given allocation.
2057 It needs to be called before reading from a mapped memory for memory types that are not `HOST_COHERENT`.
2058 Map operation doesn't do that automatically.
2059 
2060 - `offset` must be relative to the beginning of allocation.
2061 - `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation.
2062 - `offset` and `size` don't have to be aligned.
2063   They are internally rounded down/up to multiply of `nonCoherentAtomSize`.
2064 - If `size` is 0, this call is ignored.
2065 - If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`,
2066   this call is ignored.
2067 
2068 Warning! `offset` and `size` are relative to the contents of given `allocation`.
2069 If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively.
2070 Do not pass allocation's offset as `offset`!!!
2071 
2072 This function returns the `VkResult` from `vkInvalidateMappedMemoryRanges` if
2073 it is called, otherwise `VK_SUCCESS`.
2074 */
2075 VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation(
2076     VmaAllocator VMA_NOT_NULL allocator,
2077     VmaAllocation VMA_NOT_NULL allocation,
2078     VkDeviceSize offset,
2079     VkDeviceSize size);
2080 
2081 /** \brief Flushes memory of given set of allocations.
2082 
2083 Calls `vkFlushMappedMemoryRanges()` for memory associated with given ranges of given allocations.
2084 For more information, see documentation of vmaFlushAllocation().
2085 
2086 \param allocator
2087 \param allocationCount
2088 \param allocations
2089 \param offsets If not null, it must point to an array of offsets of regions to flush, relative to the beginning of respective allocations. Null means all ofsets are zero.
2090 \param sizes If not null, it must point to an array of sizes of regions to flush in respective allocations. Null means `VK_WHOLE_SIZE` for all allocations.
2091 
2092 This function returns the `VkResult` from `vkFlushMappedMemoryRanges` if it is
2093 called, otherwise `VK_SUCCESS`.
2094 */
2095 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations(
2096     VmaAllocator VMA_NOT_NULL allocator,
2097     uint32_t allocationCount,
2098     const VmaAllocation VMA_NOT_NULL* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations,
2099     const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets,
2100     const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes);
2101 
2102 /** \brief Invalidates memory of given set of allocations.
2103 
2104 Calls `vkInvalidateMappedMemoryRanges()` for memory associated with given ranges of given allocations.
2105 For more information, see documentation of vmaInvalidateAllocation().
2106 
2107 \param allocator
2108 \param allocationCount
2109 \param allocations
2110 \param offsets If not null, it must point to an array of offsets of regions to flush, relative to the beginning of respective allocations. Null means all ofsets are zero.
2111 \param sizes If not null, it must point to an array of sizes of regions to flush in respective allocations. Null means `VK_WHOLE_SIZE` for all allocations.
2112 
2113 This function returns the `VkResult` from `vkInvalidateMappedMemoryRanges` if it is
2114 called, otherwise `VK_SUCCESS`.
2115 */
2116 VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations(
2117     VmaAllocator VMA_NOT_NULL allocator,
2118     uint32_t allocationCount,
2119     const VmaAllocation VMA_NOT_NULL* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations,
2120     const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets,
2121     const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes);
2122 
2123 /** \brief Checks magic number in margins around all allocations in given memory types (in both default and custom pools) in search for corruptions.
2124 
2125 \param allocator
2126 \param memoryTypeBits Bit mask, where each bit set means that a memory type with that index should be checked.
2127 
2128 Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero,
2129 `VMA_DEBUG_MARGIN` is defined to nonzero and only for memory types that are
2130 `HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection).
2131 
2132 Possible return values:
2133 
2134 - `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for any of specified memory types.
2135 - `VK_SUCCESS` - corruption detection has been performed and succeeded.
2136 - `VK_ERROR_UNKNOWN` - corruption detection has been performed and found memory corruptions around one of the allocations.
2137   `VMA_ASSERT` is also fired in that case.
2138 - Other value: Error returned by Vulkan, e.g. memory mapping failure.
2139 */
2140 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(
2141     VmaAllocator VMA_NOT_NULL allocator,
2142     uint32_t memoryTypeBits);
2143 
2144 /** \brief Begins defragmentation process.
2145 
2146 \param allocator Allocator object.
2147 \param pInfo Structure filled with parameters of defragmentation.
2148 \param[out] pContext Context object that must be passed to vmaEndDefragmentation() to finish defragmentation.
2149 \returns
2150 - `VK_SUCCESS` if defragmentation can begin.
2151 - `VK_ERROR_FEATURE_NOT_PRESENT` if defragmentation is not supported.
2152 
2153 For more information about defragmentation, see documentation chapter:
2154 [Defragmentation](@ref defragmentation).
2155 */
2156 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentation(
2157     VmaAllocator VMA_NOT_NULL allocator,
2158     const VmaDefragmentationInfo* VMA_NOT_NULL pInfo,
2159     VmaDefragmentationContext VMA_NULLABLE* VMA_NOT_NULL pContext);
2160 
2161 /** \brief Ends defragmentation process.
2162 
2163 \param allocator Allocator object.
2164 \param context Context object that has been created by vmaBeginDefragmentation().
2165 \param[out] pStats Optional stats for the defragmentation. Can be null.
2166 
2167 Use this function to finish defragmentation started by vmaBeginDefragmentation().
2168 */
2169 VMA_CALL_PRE void VMA_CALL_POST vmaEndDefragmentation(
2170     VmaAllocator VMA_NOT_NULL allocator,
2171     VmaDefragmentationContext VMA_NOT_NULL context,
2172     VmaDefragmentationStats* VMA_NULLABLE pStats);
2173 
2174 /** \brief Starts single defragmentation pass.
2175 
2176 \param allocator Allocator object.
2177 \param context Context object that has been created by vmaBeginDefragmentation().
2178 \param[out] pPassInfo Computed information for current pass.
2179 \returns
2180 - `VK_SUCCESS` if no more moves are possible. Then you can omit call to vmaEndDefragmentationPass() and simply end whole defragmentation.
2181 - `VK_INCOMPLETE` if there are pending moves returned in `pPassInfo`. You need to perform them, call vmaEndDefragmentationPass(),
2182   and then preferably try another pass with vmaBeginDefragmentationPass().
2183 */
2184 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(
2185     VmaAllocator VMA_NOT_NULL allocator,
2186     VmaDefragmentationContext VMA_NOT_NULL context,
2187     VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo);
2188 
2189 /** \brief Ends single defragmentation pass.
2190 
2191 \param allocator Allocator object.
2192 \param context Context object that has been created by vmaBeginDefragmentation().
2193 \param pPassInfo Computed information for current pass filled by vmaBeginDefragmentationPass() and possibly modified by you.
2194 
2195 Returns `VK_SUCCESS` if no more moves are possible or `VK_INCOMPLETE` if more defragmentations are possible.
2196 
2197 Ends incremental defragmentation pass and commits all defragmentation moves from `pPassInfo`.
2198 After this call:
2199 
2200 - Allocations at `pPassInfo[i].srcAllocation` that had `pPassInfo[i].operation ==` #VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY
2201   (which is the default) will be pointing to the new destination place.
2202 - Allocation at `pPassInfo[i].srcAllocation` that had `pPassInfo[i].operation ==` #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY
2203   will be freed.
2204 
2205 If no more moves are possible you can end whole defragmentation.
2206 */
2207 VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(
2208     VmaAllocator VMA_NOT_NULL allocator,
2209     VmaDefragmentationContext VMA_NOT_NULL context,
2210     VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo);
2211 
2212 /** \brief Binds buffer to allocation.
2213 
2214 Binds specified buffer to region of memory represented by specified allocation.
2215 Gets `VkDeviceMemory` handle and offset from the allocation.
2216 If you want to create a buffer, allocate memory for it and bind them together separately,
2217 you should use this function for binding instead of standard `vkBindBufferMemory()`,
2218 because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple
2219 allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously
2220 (which is illegal in Vulkan).
2221 
2222 It is recommended to use function vmaCreateBuffer() instead of this one.
2223 */
2224 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
2225     VmaAllocator VMA_NOT_NULL allocator,
2226     VmaAllocation VMA_NOT_NULL allocation,
2227     VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer);
2228 
2229 /** \brief Binds buffer to allocation with additional parameters.
2230 
2231 \param allocator
2232 \param allocation
2233 \param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the `allocation`. Normally it should be 0.
2234 \param buffer
2235 \param pNext A chain of structures to be attached to `VkBindBufferMemoryInfoKHR` structure used internally. Normally it should be null.
2236 
2237 This function is similar to vmaBindBufferMemory(), but it provides additional parameters.
2238 
2239 If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag
2240 or with VmaAllocatorCreateInfo::vulkanApiVersion `>= VK_API_VERSION_1_1`. Otherwise the call fails.
2241 */
2242 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
2243     VmaAllocator VMA_NOT_NULL allocator,
2244     VmaAllocation VMA_NOT_NULL allocation,
2245     VkDeviceSize allocationLocalOffset,
2246     VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer,
2247     const void* VMA_NULLABLE VMA_EXTENDS_VK_STRUCT(VkBindBufferMemoryInfoKHR) pNext);
2248 
2249 /** \brief Binds image to allocation.
2250 
2251 Binds specified image to region of memory represented by specified allocation.
2252 Gets `VkDeviceMemory` handle and offset from the allocation.
2253 If you want to create an image, allocate memory for it and bind them together separately,
2254 you should use this function for binding instead of standard `vkBindImageMemory()`,
2255 because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple
2256 allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously
2257 (which is illegal in Vulkan).
2258 
2259 It is recommended to use function vmaCreateImage() instead of this one.
2260 */
2261 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
2262     VmaAllocator VMA_NOT_NULL allocator,
2263     VmaAllocation VMA_NOT_NULL allocation,
2264     VkImage VMA_NOT_NULL_NON_DISPATCHABLE image);
2265 
2266 /** \brief Binds image to allocation with additional parameters.
2267 
2268 \param allocator
2269 \param allocation
2270 \param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the `allocation`. Normally it should be 0.
2271 \param image
2272 \param pNext A chain of structures to be attached to `VkBindImageMemoryInfoKHR` structure used internally. Normally it should be null.
2273 
2274 This function is similar to vmaBindImageMemory(), but it provides additional parameters.
2275 
2276 If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag
2277 or with VmaAllocatorCreateInfo::vulkanApiVersion `>= VK_API_VERSION_1_1`. Otherwise the call fails.
2278 */
2279 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
2280     VmaAllocator VMA_NOT_NULL allocator,
2281     VmaAllocation VMA_NOT_NULL allocation,
2282     VkDeviceSize allocationLocalOffset,
2283     VkImage VMA_NOT_NULL_NON_DISPATCHABLE image,
2284     const void* VMA_NULLABLE VMA_EXTENDS_VK_STRUCT(VkBindImageMemoryInfoKHR) pNext);
2285 
2286 /** \brief Creates a new `VkBuffer`, allocates and binds memory for it.
2287 
2288 \param allocator
2289 \param pBufferCreateInfo
2290 \param pAllocationCreateInfo
2291 \param[out] pBuffer Buffer that was created.
2292 \param[out] pAllocation Allocation that was created.
2293 \param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().
2294 
2295 This function automatically:
2296 
2297 -# Creates buffer.
2298 -# Allocates appropriate memory for it.
2299 -# Binds the buffer with the memory.
2300 
2301 If any of these operations fail, buffer and allocation are not created,
2302 returned value is negative error code, `*pBuffer` and `*pAllocation` are null.
2303 
2304 If the function succeeded, you must destroy both buffer and allocation when you
2305 no longer need them using either convenience function vmaDestroyBuffer() or
2306 separately, using `vkDestroyBuffer()` and vmaFreeMemory().
2307 
2308 If #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag was used,
2309 VK_KHR_dedicated_allocation extension is used internally to query driver whether
2310 it requires or prefers the new buffer to have dedicated allocation. If yes,
2311 and if dedicated allocation is possible
2312 (#VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT is not used), it creates dedicated
2313 allocation for this buffer, just like when using
2314 #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
2315 
2316 \note This function creates a new `VkBuffer`. Sub-allocation of parts of one large buffer,
2317 although recommended as a good practice, is out of scope of this library and could be implemented
2318 by the user as a higher-level logic on top of VMA.
2319 */
2320 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
2321     VmaAllocator VMA_NOT_NULL allocator,
2322     const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
2323     const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
2324     VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer,
2325     VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,
2326     VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
2327 
2328 /** \brief Creates a buffer with additional minimum alignment.
2329 
2330 Similar to vmaCreateBuffer() but provides additional parameter `minAlignment` which allows to specify custom,
2331 minimum alignment to be used when placing the buffer inside a larger memory block, which may be needed e.g.
2332 for interop with OpenGL.
2333 */
2334 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBufferWithAlignment(
2335     VmaAllocator VMA_NOT_NULL allocator,
2336     const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
2337     const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
2338     VkDeviceSize minAlignment,
2339     VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer,
2340     VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,
2341     VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
2342 
2343 /** \brief Creates a new `VkBuffer`, binds already created memory for it.
2344 
2345 \param allocator
2346 \param allocation Allocation that provides memory to be used for binding new buffer to it.
2347 \param pBufferCreateInfo
2348 \param[out] pBuffer Buffer that was created.
2349 
2350 This function automatically:
2351 
2352 -# Creates buffer.
2353 -# Binds the buffer with the supplied memory.
2354 
2355 If any of these operations fail, buffer is not created,
2356 returned value is negative error code and `*pBuffer` is null.
2357 
2358 If the function succeeded, you must destroy the buffer when you
2359 no longer need it using `vkDestroyBuffer()`. If you want to also destroy the corresponding
2360 allocation you can use convenience function vmaDestroyBuffer().
2361 
2362 \note There is a new version of this function augmented with parameter `allocationLocalOffset` - see vmaCreateAliasingBuffer2().
2363 */
2364 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer(
2365     VmaAllocator VMA_NOT_NULL allocator,
2366     VmaAllocation VMA_NOT_NULL allocation,
2367     const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
2368     VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer);
2369 
2370 /** \brief Creates a new `VkBuffer`, binds already created memory for it.
2371 
2372 \param allocator
2373 \param allocation Allocation that provides memory to be used for binding new buffer to it.
2374 \param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the allocation. Normally it should be 0.
2375 \param pBufferCreateInfo
2376 \param[out] pBuffer Buffer that was created.
2377 
2378 This function automatically:
2379 
2380 -# Creates buffer.
2381 -# Binds the buffer with the supplied memory.
2382 
2383 If any of these operations fail, buffer is not created,
2384 returned value is negative error code and `*pBuffer` is null.
2385 
2386 If the function succeeded, you must destroy the buffer when you
2387 no longer need it using `vkDestroyBuffer()`. If you want to also destroy the corresponding
2388 allocation you can use convenience function vmaDestroyBuffer().
2389 
2390 \note This is a new version of the function augmented with parameter `allocationLocalOffset`.
2391 */
2392 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer2(
2393     VmaAllocator VMA_NOT_NULL allocator,
2394     VmaAllocation VMA_NOT_NULL allocation,
2395     VkDeviceSize allocationLocalOffset,
2396     const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
2397     VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer);
2398 
2399 /** \brief Destroys Vulkan buffer and frees allocated memory.
2400 
2401 This is just a convenience function equivalent to:
2402 
2403 \code
2404 vkDestroyBuffer(device, buffer, allocationCallbacks);
2405 vmaFreeMemory(allocator, allocation);
2406 \endcode
2407 
2408 It is safe to pass null as buffer and/or allocation.
2409 */
2410 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
2411     VmaAllocator VMA_NOT_NULL allocator,
2412     VkBuffer VMA_NULLABLE_NON_DISPATCHABLE buffer,
2413     VmaAllocation VMA_NULLABLE allocation);
2414 
2415 /// Function similar to vmaCreateBuffer().
2416 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
2417     VmaAllocator VMA_NOT_NULL allocator,
2418     const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
2419     const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
2420     VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage,
2421     VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,
2422     VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
2423 
2424 /// Function similar to vmaCreateAliasingBuffer() but for images.
2425 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage(
2426     VmaAllocator VMA_NOT_NULL allocator,
2427     VmaAllocation VMA_NOT_NULL allocation,
2428     const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
2429     VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage);
2430 
2431 /// Function similar to vmaCreateAliasingBuffer2() but for images.
2432 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage2(
2433     VmaAllocator VMA_NOT_NULL allocator,
2434     VmaAllocation VMA_NOT_NULL allocation,
2435     VkDeviceSize allocationLocalOffset,
2436     const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
2437     VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage);
2438 
2439 /** \brief Destroys Vulkan image and frees allocated memory.
2440 
2441 This is just a convenience function equivalent to:
2442 
2443 \code
2444 vkDestroyImage(device, image, allocationCallbacks);
2445 vmaFreeMemory(allocator, allocation);
2446 \endcode
2447 
2448 It is safe to pass null as image and/or allocation.
2449 */
2450 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
2451     VmaAllocator VMA_NOT_NULL allocator,
2452     VkImage VMA_NULLABLE_NON_DISPATCHABLE image,
2453     VmaAllocation VMA_NULLABLE allocation);
2454 
2455 /** @} */
2456 
2457 /**
2458 \addtogroup group_virtual
2459 @{
2460 */
2461 
2462 /** \brief Creates new #VmaVirtualBlock object.
2463 
2464 \param pCreateInfo Parameters for creation.
2465 \param[out] pVirtualBlock Returned virtual block object or `VMA_NULL` if creation failed.
2466 */
2467 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateVirtualBlock(
2468     const VmaVirtualBlockCreateInfo* VMA_NOT_NULL pCreateInfo,
2469     VmaVirtualBlock VMA_NULLABLE* VMA_NOT_NULL pVirtualBlock);
2470 
2471 /** \brief Destroys #VmaVirtualBlock object.
2472 
2473 Please note that you should consciously handle virtual allocations that could remain unfreed in the block.
2474 You should either free them individually using vmaVirtualFree() or call vmaClearVirtualBlock()
2475 if you are sure this is what you want. If you do neither, an assert is called.
2476 
2477 If you keep pointers to some additional metadata associated with your virtual allocations in their `pUserData`,
2478 don't forget to free them.
2479 */
2480 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyVirtualBlock(
2481     VmaVirtualBlock VMA_NULLABLE virtualBlock);
2482 
2483 /** \brief Returns true of the #VmaVirtualBlock is empty - contains 0 virtual allocations and has all its space available for new allocations.
2484 */
2485 VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaIsVirtualBlockEmpty(
2486     VmaVirtualBlock VMA_NOT_NULL virtualBlock);
2487 
2488 /** \brief Returns information about a specific virtual allocation within a virtual block, like its size and `pUserData` pointer.
2489 */
2490 VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualAllocationInfo(
2491     VmaVirtualBlock VMA_NOT_NULL virtualBlock,
2492     VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, VmaVirtualAllocationInfo* VMA_NOT_NULL pVirtualAllocInfo);
2493 
2494 /** \brief Allocates new virtual allocation inside given #VmaVirtualBlock.
2495 
2496 If the allocation fails due to not enough free space available, `VK_ERROR_OUT_OF_DEVICE_MEMORY` is returned
2497 (despite the function doesn't ever allocate actual GPU memory).
2498 `pAllocation` is then set to `VK_NULL_HANDLE` and `pOffset`, if not null, it set to `UINT64_MAX`.
2499 
2500 \param virtualBlock Virtual block
2501 \param pCreateInfo Parameters for the allocation
2502 \param[out] pAllocation Returned handle of the new allocation
2503 \param[out] pOffset Returned offset of the new allocation. Optional, can be null.
2504 */
2505 VMA_CALL_PRE VkResult VMA_CALL_POST vmaVirtualAllocate(
2506     VmaVirtualBlock VMA_NOT_NULL virtualBlock,
2507     const VmaVirtualAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
2508     VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pAllocation,
2509     VkDeviceSize* VMA_NULLABLE pOffset);
2510 
2511 /** \brief Frees virtual allocation inside given #VmaVirtualBlock.
2512 
2513 It is correct to call this function with `allocation == VK_NULL_HANDLE` - it does nothing.
2514 */
2515 VMA_CALL_PRE void VMA_CALL_POST vmaVirtualFree(
2516     VmaVirtualBlock VMA_NOT_NULL virtualBlock,
2517     VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE allocation);
2518 
2519 /** \brief Frees all virtual allocations inside given #VmaVirtualBlock.
2520 
2521 You must either call this function or free each virtual allocation individually with vmaVirtualFree()
2522 before destroying a virtual block. Otherwise, an assert is called.
2523 
2524 If you keep pointer to some additional metadata associated with your virtual allocation in its `pUserData`,
2525 don't forget to free it as well.
2526 */
2527 VMA_CALL_PRE void VMA_CALL_POST vmaClearVirtualBlock(
2528     VmaVirtualBlock VMA_NOT_NULL virtualBlock);
2529 
2530 /** \brief Changes custom pointer associated with given virtual allocation.
2531 */
2532 VMA_CALL_PRE void VMA_CALL_POST vmaSetVirtualAllocationUserData(
2533     VmaVirtualBlock VMA_NOT_NULL virtualBlock,
2534     VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation,
2535     void* VMA_NULLABLE pUserData);
2536 
2537 /** \brief Calculates and returns statistics about virtual allocations and memory usage in given #VmaVirtualBlock.
2538 
2539 This function is fast to call. For more detailed statistics, see vmaCalculateVirtualBlockStatistics().
2540 */
2541 VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualBlockStatistics(
2542     VmaVirtualBlock VMA_NOT_NULL virtualBlock,
2543     VmaStatistics* VMA_NOT_NULL pStats);
2544 
2545 /** \brief Calculates and returns detailed statistics about virtual allocations and memory usage in given #VmaVirtualBlock.
2546 
2547 This function is slow to call. Use for debugging purposes.
2548 For less detailed statistics, see vmaGetVirtualBlockStatistics().
2549 */
2550 VMA_CALL_PRE void VMA_CALL_POST vmaCalculateVirtualBlockStatistics(
2551     VmaVirtualBlock VMA_NOT_NULL virtualBlock,
2552     VmaDetailedStatistics* VMA_NOT_NULL pStats);
2553 
2554 /** @} */
2555 
2556 #if VMA_STATS_STRING_ENABLED
2557 /**
2558 \addtogroup group_stats
2559 @{
2560 */
2561 
2562 /** \brief Builds and returns a null-terminated string in JSON format with information about given #VmaVirtualBlock.
2563 \param virtualBlock Virtual block.
2564 \param[out] ppStatsString Returned string.
2565 \param detailedMap Pass `VK_FALSE` to only obtain statistics as returned by vmaCalculateVirtualBlockStatistics(). Pass `VK_TRUE` to also obtain full list of allocations and free spaces.
2566 
2567 Returned string must be freed using vmaFreeVirtualBlockStatsString().
2568 */
2569 VMA_CALL_PRE void VMA_CALL_POST vmaBuildVirtualBlockStatsString(
2570     VmaVirtualBlock VMA_NOT_NULL virtualBlock,
2571     char* VMA_NULLABLE* VMA_NOT_NULL ppStatsString,
2572     VkBool32 detailedMap);
2573 
2574 /// Frees a string returned by vmaBuildVirtualBlockStatsString().
2575 VMA_CALL_PRE void VMA_CALL_POST vmaFreeVirtualBlockStatsString(
2576     VmaVirtualBlock VMA_NOT_NULL virtualBlock,
2577     char* VMA_NULLABLE pStatsString);
2578 
2579 /** \brief Builds and returns statistics as a null-terminated string in JSON format.
2580 \param allocator
2581 \param[out] ppStatsString Must be freed using vmaFreeStatsString() function.
2582 \param detailedMap
2583 */
2584 VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
2585     VmaAllocator VMA_NOT_NULL allocator,
2586     char* VMA_NULLABLE* VMA_NOT_NULL ppStatsString,
2587     VkBool32 detailedMap);
2588 
2589 VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
2590     VmaAllocator VMA_NOT_NULL allocator,
2591     char* VMA_NULLABLE pStatsString);
2592 
2593 /** @} */
2594 
2595 #endif // VMA_STATS_STRING_ENABLED
2596 
2597 #endif // _VMA_FUNCTION_HEADERS
2598 
2599 #ifdef __cplusplus
2600 }
2601 #endif
2602 
2603 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
2604 
2605 ////////////////////////////////////////////////////////////////////////////////
2606 ////////////////////////////////////////////////////////////////////////////////
2607 //
2608 //    IMPLEMENTATION
2609 //
2610 ////////////////////////////////////////////////////////////////////////////////
2611 ////////////////////////////////////////////////////////////////////////////////
2612 
2613 // For Visual Studio IntelliSense.
2614 #if defined(__cplusplus) && defined(__INTELLISENSE__)
2615 #define VMA_IMPLEMENTATION
2616 #endif
2617 
2618 #ifdef VMA_IMPLEMENTATION
2619 #undef VMA_IMPLEMENTATION
2620 
2621 #include <cstdint>
2622 #include <cstdlib>
2623 #include <cstring>
2624 #include <utility>
2625 #include <type_traits>
2626 
2627 #ifdef _MSC_VER
2628     #include <intrin.h> // For functions like __popcnt, _BitScanForward etc.
2629 #endif
2630 #if __cplusplus >= 202002L || _MSVC_LANG >= 202002L // C++20
2631     #include <bit> // For std::popcount
2632 #endif
2633 
2634 #if VMA_STATS_STRING_ENABLED
2635     #include <cstdio> // For snprintf
2636 #endif
2637 
2638 /*******************************************************************************
2639 CONFIGURATION SECTION
2640 
2641 Define some of these macros before each #include of this header or change them
2642 here if you need other then default behavior depending on your environment.
2643 */
2644 #ifndef _VMA_CONFIGURATION
2645 
2646 /*
2647 Define this macro to 1 to make the library fetch pointers to Vulkan functions
2648 internally, like:
2649 
2650     vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
2651 */
2652 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
2653     #define VMA_STATIC_VULKAN_FUNCTIONS 1
2654 #endif
2655 
2656 /*
2657 Define this macro to 1 to make the library fetch pointers to Vulkan functions
2658 internally, like:
2659 
2660     vulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkGetDeviceProcAddr(device, "vkAllocateMemory");
2661 
2662 To use this feature in new versions of VMA you now have to pass
2663 VmaVulkanFunctions::vkGetInstanceProcAddr and vkGetDeviceProcAddr as
2664 VmaAllocatorCreateInfo::pVulkanFunctions. Other members can be null.
2665 */
2666 #if !defined(VMA_DYNAMIC_VULKAN_FUNCTIONS)
2667     #define VMA_DYNAMIC_VULKAN_FUNCTIONS 1
2668 #endif
2669 
2670 #ifndef VMA_USE_STL_SHARED_MUTEX
2671     #if __cplusplus >= 201703L || _MSVC_LANG >= 201703L // C++17
2672         #define VMA_USE_STL_SHARED_MUTEX 1
2673     // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
2674     // Otherwise it is always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
2675     #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
2676         #define VMA_USE_STL_SHARED_MUTEX 1
2677     #else
2678         #define VMA_USE_STL_SHARED_MUTEX 0
2679     #endif
2680 #endif
2681 
2682 /*
2683 Define this macro to include custom header files without having to edit this file directly, e.g.:
2684 
2685     // Inside of "my_vma_configuration_user_includes.h":
2686 
2687     #include "my_custom_assert.h" // for MY_CUSTOM_ASSERT
2688     #include "my_custom_min.h" // for my_custom_min
2689     #include <algorithm>
2690     #include <mutex>
2691 
2692     // Inside a different file, which includes "vk_mem_alloc.h":
2693 
2694     #define VMA_CONFIGURATION_USER_INCLUDES_H "my_vma_configuration_user_includes.h"
2695     #define VMA_ASSERT(expr) MY_CUSTOM_ASSERT(expr)
2696     #define VMA_MIN(v1, v2)  (my_custom_min(v1, v2))
2697     #include "vk_mem_alloc.h"
2698     ...
2699 
2700 The following headers are used in this CONFIGURATION section only, so feel free to
2701 remove them if not needed.
2702 */
2703 #if !defined(VMA_CONFIGURATION_USER_INCLUDES_H)
2704     #include <cassert> // for assert
2705     #include <algorithm> // for min, max
2706     #include <mutex>
2707 #else
2708     #include VMA_CONFIGURATION_USER_INCLUDES_H
2709 #endif
2710 
2711 #ifndef VMA_NULL
2712    // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
2713    #define VMA_NULL   nullptr
2714 #endif
2715 
2716 // Used to silence warnings for implicit fallthrough.
2717 #ifndef VMA_FALLTHROUGH
2718     #if __has_cpp_attribute(clang::fallthrough)
2719         #define VMA_FALLTHROUGH [[clang::fallthrough]];
2720     #elif __cplusplus >= 201703L || _MSVC_LANG >= 201703L // C++17
2721         #define VMA_FALLTHROUGH [[fallthrough]]
2722     #else
2723         #define VMA_FALLTHROUGH
2724     #endif
2725 #endif
2726 
2727 // Normal assert to check for programmer's errors, especially in Debug configuration.
2728 #ifndef VMA_ASSERT
2729    #ifdef NDEBUG
2730        #define VMA_ASSERT(expr)
2731    #else
2732        #define VMA_ASSERT(expr)         assert(expr)
2733    #endif
2734 #endif
2735 
2736 // Assert that will be called very often, like inside data structures e.g. operator[].
2737 // Making it non-empty can make program slow.
2738 #ifndef VMA_HEAVY_ASSERT
2739    #ifdef NDEBUG
2740        #define VMA_HEAVY_ASSERT(expr)
2741    #else
2742        #define VMA_HEAVY_ASSERT(expr)   //VMA_ASSERT(expr)
2743    #endif
2744 #endif
2745 
2746 // If your compiler is not compatible with C++17 and definition of
2747 // aligned_alloc() function is missing, uncommenting following line may help:
2748 
2749 //#include <malloc.h>
2750 
2751 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
2752 #include <cstdlib>
vma_aligned_alloc(size_t alignment,size_t size)2753 void* vma_aligned_alloc(size_t alignment, size_t size)
2754 {
2755     // alignment must be >= sizeof(void*)
2756     if(alignment < sizeof(void*))
2757     {
2758         alignment = sizeof(void*);
2759     }
2760 
2761     return memalign(alignment, size);
2762 }
2763 #elif defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC))
2764 #include <cstdlib>
2765 
2766 #if defined(__APPLE__)
2767 #include <AvailabilityMacros.h>
2768 #endif
2769 
vma_aligned_alloc(size_t alignment,size_t size)2770 void *vma_aligned_alloc(size_t alignment, size_t size)
2771 {
2772     // Unfortunately, aligned_alloc causes VMA to crash due to it returning null pointers. (At least under 11.4)
2773     // Therefore, for now disable this specific exception until a proper solution is found.
2774     //#if defined(__APPLE__) && (defined(MAC_OS_X_VERSION_10_16) || defined(__IPHONE_14_0))
2775     //#if MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_16 || __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_14_0
2776     //    // For C++14, usr/include/malloc/_malloc.h declares aligned_alloc()) only
2777     //    // with the MacOSX11.0 SDK in Xcode 12 (which is what adds
2778     //    // MAC_OS_X_VERSION_10_16), even though the function is marked
2779     //    // available for 10.15. That is why the preprocessor checks for 10.16 but
2780     //    // the __builtin_available checks for 10.15.
2781     //    // People who use C++17 could call aligned_alloc with the 10.15 SDK already.
2782     //    if (__builtin_available(macOS 10.15, iOS 13, *))
2783     //        return aligned_alloc(alignment, size);
2784     //#endif
2785     //#endif
2786 
2787     // alignment must be >= sizeof(void*)
2788     if(alignment < sizeof(void*))
2789     {
2790         alignment = sizeof(void*);
2791     }
2792 
2793     void *pointer;
2794     if(posix_memalign(&pointer, alignment, size) == 0)
2795         return pointer;
2796     return VMA_NULL;
2797 }
2798 #elif defined(_WIN32)
vma_aligned_alloc(size_t alignment,size_t size)2799 void* vma_aligned_alloc(size_t alignment, size_t size)
2800 {
2801     return _aligned_malloc(size, alignment);
2802 }
2803 #elif __cplusplus >= 201703L || _MSVC_LANG >= 201703L // C++17
vma_aligned_alloc(size_t alignment,size_t size)2804 void* vma_aligned_alloc(size_t alignment, size_t size)
2805 {
2806     return aligned_alloc(alignment, size);
2807 }
2808 #else
vma_aligned_alloc(size_t alignment,size_t size)2809 void* vma_aligned_alloc(size_t alignment, size_t size)
2810 {
2811     VMA_ASSERT(0 && "Could not implement aligned_alloc automatically. Please enable C++17 or later in your compiler or provide custom implementation of macro VMA_SYSTEM_ALIGNED_MALLOC (and VMA_SYSTEM_ALIGNED_FREE if needed) using the API of your system.");
2812     return VMA_NULL;
2813 }
2814 #endif
2815 
2816 #if defined(_WIN32)
vma_aligned_free(void * ptr)2817 static void vma_aligned_free(void* ptr)
2818 {
2819     _aligned_free(ptr);
2820 }
2821 #else
vma_aligned_free(void * VMA_NULLABLE ptr)2822 static void vma_aligned_free(void* VMA_NULLABLE ptr)
2823 {
2824     free(ptr);
2825 }
2826 #endif
2827 
2828 #ifndef VMA_ALIGN_OF
2829    #define VMA_ALIGN_OF(type)       (alignof(type))
2830 #endif
2831 
2832 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
2833    #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) vma_aligned_alloc((alignment), (size))
2834 #endif
2835 
2836 #ifndef VMA_SYSTEM_ALIGNED_FREE
2837    // VMA_SYSTEM_FREE is the old name, but might have been defined by the user
2838    #if defined(VMA_SYSTEM_FREE)
2839       #define VMA_SYSTEM_ALIGNED_FREE(ptr)     VMA_SYSTEM_FREE(ptr)
2840    #else
2841       #define VMA_SYSTEM_ALIGNED_FREE(ptr)     vma_aligned_free(ptr)
2842     #endif
2843 #endif
2844 
2845 #ifndef VMA_COUNT_BITS_SET
2846     // Returns number of bits set to 1 in (v)
2847     #define VMA_COUNT_BITS_SET(v) VmaCountBitsSet(v)
2848 #endif
2849 
2850 #ifndef VMA_BITSCAN_LSB
2851     // Scans integer for index of first nonzero value from the Least Significant Bit (LSB). If mask is 0 then returns UINT8_MAX
2852     #define VMA_BITSCAN_LSB(mask) VmaBitScanLSB(mask)
2853 #endif
2854 
2855 #ifndef VMA_BITSCAN_MSB
2856     // Scans integer for index of first nonzero value from the Most Significant Bit (MSB). If mask is 0 then returns UINT8_MAX
2857     #define VMA_BITSCAN_MSB(mask) VmaBitScanMSB(mask)
2858 #endif
2859 
2860 #ifndef VMA_MIN
2861    #define VMA_MIN(v1, v2)    ((std::min)((v1), (v2)))
2862 #endif
2863 
2864 #ifndef VMA_MAX
2865    #define VMA_MAX(v1, v2)    ((std::max)((v1), (v2)))
2866 #endif
2867 
2868 #ifndef VMA_SWAP
2869    #define VMA_SWAP(v1, v2)   std::swap((v1), (v2))
2870 #endif
2871 
2872 #ifndef VMA_SORT
2873    #define VMA_SORT(beg, end, cmp)  std::sort(beg, end, cmp)
2874 #endif
2875 
2876 #ifndef VMA_DEBUG_LOG_FORMAT
2877    #define VMA_DEBUG_LOG_FORMAT(format, ...)
2878    /*
2879    #define VMA_DEBUG_LOG_FORMAT(format, ...) do { \
2880        printf((format), __VA_ARGS__); \
2881        printf("\n"); \
2882    } while(false)
2883    */
2884 #endif
2885 
2886 #ifndef VMA_DEBUG_LOG
2887     #define VMA_DEBUG_LOG(str)   VMA_DEBUG_LOG_FORMAT("%s", (str))
2888 #endif
2889 
2890 #ifndef VMA_CLASS_NO_COPY
2891     #define VMA_CLASS_NO_COPY(className) \
2892         private: \
2893             className(const className&) = delete; \
2894             className& operator=(const className&) = delete;
2895 #endif
2896 #ifndef VMA_CLASS_NO_COPY_NO_MOVE
2897     #define VMA_CLASS_NO_COPY_NO_MOVE(className) \
2898         private: \
2899             className(const className&) = delete; \
2900             className(className&&) = delete; \
2901             className& operator=(const className&) = delete; \
2902             className& operator=(className&&) = delete;
2903 #endif
2904 
2905 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
2906 #if VMA_STATS_STRING_ENABLED
VmaUint32ToStr(char * VMA_NOT_NULL outStr,size_t strLen,uint32_t num)2907     static inline void VmaUint32ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint32_t num)
2908     {
2909         snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
2910     }
VmaUint64ToStr(char * VMA_NOT_NULL outStr,size_t strLen,uint64_t num)2911     static inline void VmaUint64ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint64_t num)
2912     {
2913         snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
2914     }
VmaPtrToStr(char * VMA_NOT_NULL outStr,size_t strLen,const void * ptr)2915     static inline void VmaPtrToStr(char* VMA_NOT_NULL outStr, size_t strLen, const void* ptr)
2916     {
2917         snprintf(outStr, strLen, "%p", ptr);
2918     }
2919 #endif
2920 
2921 #ifndef VMA_MUTEX
2922     class VmaMutex
2923     {
VMA_CLASS_NO_COPY_NO_MOVE(VmaMutex)2924     VMA_CLASS_NO_COPY_NO_MOVE(VmaMutex)
2925     public:
2926         VmaMutex() { }
Lock()2927         void Lock() { m_Mutex.lock(); }
Unlock()2928         void Unlock() { m_Mutex.unlock(); }
TryLock()2929         bool TryLock() { return m_Mutex.try_lock(); }
2930     private:
2931         std::mutex m_Mutex;
2932     };
2933     #define VMA_MUTEX VmaMutex
2934 #endif
2935 
2936 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
2937 #ifndef VMA_RW_MUTEX
2938     #if VMA_USE_STL_SHARED_MUTEX
2939         // Use std::shared_mutex from C++17.
2940         #include <shared_mutex>
2941         class VmaRWMutex
2942         {
2943         public:
LockRead()2944             void LockRead() { m_Mutex.lock_shared(); }
UnlockRead()2945             void UnlockRead() { m_Mutex.unlock_shared(); }
TryLockRead()2946             bool TryLockRead() { return m_Mutex.try_lock_shared(); }
LockWrite()2947             void LockWrite() { m_Mutex.lock(); }
UnlockWrite()2948             void UnlockWrite() { m_Mutex.unlock(); }
TryLockWrite()2949             bool TryLockWrite() { return m_Mutex.try_lock(); }
2950         private:
2951             std::shared_mutex m_Mutex;
2952         };
2953         #define VMA_RW_MUTEX VmaRWMutex
2954     #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
2955         // Use SRWLOCK from WinAPI.
2956         // Minimum supported client = Windows Vista, server = Windows Server 2008.
2957         class VmaRWMutex
2958         {
2959         public:
VmaRWMutex()2960             VmaRWMutex() { InitializeSRWLock(&m_Lock); }
LockRead()2961             void LockRead() { AcquireSRWLockShared(&m_Lock); }
UnlockRead()2962             void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
TryLockRead()2963             bool TryLockRead() { return TryAcquireSRWLockShared(&m_Lock) != FALSE; }
LockWrite()2964             void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
UnlockWrite()2965             void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
TryLockWrite()2966             bool TryLockWrite() { return TryAcquireSRWLockExclusive(&m_Lock) != FALSE; }
2967         private:
2968             SRWLOCK m_Lock;
2969         };
2970         #define VMA_RW_MUTEX VmaRWMutex
2971     #else
2972         // Less efficient fallback: Use normal mutex.
2973         class VmaRWMutex
2974         {
2975         public:
LockRead()2976             void LockRead() { m_Mutex.Lock(); }
UnlockRead()2977             void UnlockRead() { m_Mutex.Unlock(); }
TryLockRead()2978             bool TryLockRead() { return m_Mutex.TryLock(); }
LockWrite()2979             void LockWrite() { m_Mutex.Lock(); }
UnlockWrite()2980             void UnlockWrite() { m_Mutex.Unlock(); }
TryLockWrite()2981             bool TryLockWrite() { return m_Mutex.TryLock(); }
2982         private:
2983             VMA_MUTEX m_Mutex;
2984         };
2985         #define VMA_RW_MUTEX VmaRWMutex
2986     #endif // #if VMA_USE_STL_SHARED_MUTEX
2987 #endif // #ifndef VMA_RW_MUTEX
2988 
2989 /*
2990 If providing your own implementation, you need to implement a subset of std::atomic.
2991 */
2992 #ifndef VMA_ATOMIC_UINT32
2993     #include <atomic>
2994     #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
2995 #endif
2996 
2997 #ifndef VMA_ATOMIC_UINT64
2998     #include <atomic>
2999     #define VMA_ATOMIC_UINT64 std::atomic<uint64_t>
3000 #endif
3001 
3002 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
3003     /**
3004     Every allocation will have its own memory block.
3005     Define to 1 for debugging purposes only.
3006     */
3007     #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
3008 #endif
3009 
3010 #ifndef VMA_MIN_ALIGNMENT
3011     /**
3012     Minimum alignment of all allocations, in bytes.
3013     Set to more than 1 for debugging purposes. Must be power of two.
3014     */
3015     #ifdef VMA_DEBUG_ALIGNMENT // Old name
3016         #define VMA_MIN_ALIGNMENT VMA_DEBUG_ALIGNMENT
3017     #else
3018         #define VMA_MIN_ALIGNMENT (1)
3019     #endif
3020 #endif
3021 
3022 #ifndef VMA_DEBUG_MARGIN
3023     /**
3024     Minimum margin after every allocation, in bytes.
3025     Set nonzero for debugging purposes only.
3026     */
3027     #define VMA_DEBUG_MARGIN (0)
3028 #endif
3029 
3030 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
3031     /**
3032     Define this macro to 1 to automatically fill new allocations and destroyed
3033     allocations with some bit pattern.
3034     */
3035     #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
3036 #endif
3037 
3038 #ifndef VMA_DEBUG_DETECT_CORRUPTION
3039     /**
3040     Define this macro to 1 together with non-zero value of VMA_DEBUG_MARGIN to
3041     enable writing magic value to the margin after every allocation and
3042     validating it, so that memory corruptions (out-of-bounds writes) are detected.
3043     */
3044     #define VMA_DEBUG_DETECT_CORRUPTION (0)
3045 #endif
3046 
3047 #ifndef VMA_DEBUG_GLOBAL_MUTEX
3048     /**
3049     Set this to 1 for debugging purposes only, to enable single mutex protecting all
3050     entry calls to the library. Can be useful for debugging multithreading issues.
3051     */
3052     #define VMA_DEBUG_GLOBAL_MUTEX (0)
3053 #endif
3054 
3055 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
3056     /**
3057     Minimum value for VkPhysicalDeviceLimits::bufferImageGranularity.
3058     Set to more than 1 for debugging purposes only. Must be power of two.
3059     */
3060     #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
3061 #endif
3062 
3063 #ifndef VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT
3064     /*
3065     Set this to 1 to make VMA never exceed VkPhysicalDeviceLimits::maxMemoryAllocationCount
3066     and return error instead of leaving up to Vulkan implementation what to do in such cases.
3067     */
3068     #define VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT (0)
3069 #endif
3070 
3071 #ifndef VMA_SMALL_HEAP_MAX_SIZE
3072    /// Maximum size of a memory heap in Vulkan to consider it "small".
3073    #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
3074 #endif
3075 
3076 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
3077    /// Default size of a block allocated as single VkDeviceMemory from a "large" heap.
3078    #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
3079 #endif
3080 
3081 /*
3082 Mapping hysteresis is a logic that launches when vmaMapMemory/vmaUnmapMemory is called
3083 or a persistently mapped allocation is created and destroyed several times in a row.
3084 It keeps additional +1 mapping of a device memory block to prevent calling actual
3085 vkMapMemory/vkUnmapMemory too many times, which may improve performance and help
3086 tools like RenderDoc.
3087 */
3088 #ifndef VMA_MAPPING_HYSTERESIS_ENABLED
3089     #define VMA_MAPPING_HYSTERESIS_ENABLED 1
3090 #endif
3091 
3092 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
3093         VMA_ASSERT(0 && "Validation failed: " #cond); \
3094         return false; \
3095     } } while(false)
3096 
3097 /*******************************************************************************
3098 END OF CONFIGURATION
3099 */
3100 #endif // _VMA_CONFIGURATION
3101 
3102 
3103 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
3104 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
3105 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
3106 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
3107 
3108 // Copy of some Vulkan definitions so we don't need to check their existence just to handle few constants.
3109 static const uint32_t VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY = 0x00000040;
3110 static const uint32_t VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY = 0x00000080;
3111 static const uint32_t VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY = 0x00020000;
3112 static const uint32_t VK_IMAGE_CREATE_DISJOINT_BIT_COPY = 0x00000200;
3113 static const int32_t VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT_COPY = 1000158000;
3114 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
3115 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
3116 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
3117 
3118 // This one is tricky. Vulkan specification defines this code as available since
3119 // Vulkan 1.0, but doesn't actually define it in Vulkan SDK earlier than 1.2.131.
3120 // See pull request #207.
3121 #define VK_ERROR_UNKNOWN_COPY ((VkResult)-13)
3122 
3123 
3124 #if VMA_STATS_STRING_ENABLED
3125 // Correspond to values of enum VmaSuballocationType.
3126 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] =
3127 {
3128     "FREE",
3129     "UNKNOWN",
3130     "BUFFER",
3131     "IMAGE_UNKNOWN",
3132     "IMAGE_LINEAR",
3133     "IMAGE_OPTIMAL",
3134 };
3135 #endif
3136 
3137 static VkAllocationCallbacks VmaEmptyAllocationCallbacks =
3138     { VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
3139 
3140 
3141 #ifndef _VMA_ENUM_DECLARATIONS
3142 
3143 enum VmaSuballocationType
3144 {
3145     VMA_SUBALLOCATION_TYPE_FREE = 0,
3146     VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
3147     VMA_SUBALLOCATION_TYPE_BUFFER = 2,
3148     VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
3149     VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
3150     VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
3151     VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
3152 };
3153 
3154 enum VMA_CACHE_OPERATION
3155 {
3156     VMA_CACHE_FLUSH,
3157     VMA_CACHE_INVALIDATE
3158 };
3159 
3160 enum class VmaAllocationRequestType
3161 {
3162     Normal,
3163     TLSF,
3164     // Used by "Linear" algorithm.
3165     UpperAddress,
3166     EndOf1st,
3167     EndOf2nd,
3168 };
3169 
3170 #endif // _VMA_ENUM_DECLARATIONS
3171 
3172 #ifndef _VMA_FORWARD_DECLARATIONS
3173 // Opaque handle used by allocation algorithms to identify single allocation in any conforming way.
3174 VK_DEFINE_NON_DISPATCHABLE_HANDLE(VmaAllocHandle)
3175 
3176 struct VmaMutexLock;
3177 struct VmaMutexLockRead;
3178 struct VmaMutexLockWrite;
3179 
3180 template<typename T>
3181 struct AtomicTransactionalIncrement;
3182 
3183 template<typename T>
3184 struct VmaStlAllocator;
3185 
3186 template<typename T, typename AllocatorT>
3187 class VmaVector;
3188 
3189 template<typename T, typename AllocatorT, size_t N>
3190 class VmaSmallVector;
3191 
3192 template<typename T>
3193 class VmaPoolAllocator;
3194 
3195 template<typename T>
3196 struct VmaListItem;
3197 
3198 template<typename T>
3199 class VmaRawList;
3200 
3201 template<typename T, typename AllocatorT>
3202 class VmaList;
3203 
3204 template<typename ItemTypeTraits>
3205 class VmaIntrusiveLinkedList;
3206 
3207 // Unused in this version
3208 #if 0
3209 template<typename T1, typename T2>
3210 struct VmaPair;
3211 template<typename FirstT, typename SecondT>
3212 struct VmaPairFirstLess;
3213 
3214 template<typename KeyT, typename ValueT>
3215 class VmaMap;
3216 #endif
3217 
3218 #if VMA_STATS_STRING_ENABLED
3219 class VmaStringBuilder;
3220 class VmaJsonWriter;
3221 #endif
3222 
3223 class VmaDeviceMemoryBlock;
3224 
3225 struct VmaDedicatedAllocationListItemTraits;
3226 class VmaDedicatedAllocationList;
3227 
3228 struct VmaSuballocation;
3229 struct VmaSuballocationOffsetLess;
3230 struct VmaSuballocationOffsetGreater;
3231 struct VmaSuballocationItemSizeLess;
3232 
3233 typedef VmaList<VmaSuballocation, VmaStlAllocator<VmaSuballocation>> VmaSuballocationList;
3234 
3235 struct VmaAllocationRequest;
3236 
3237 class VmaBlockMetadata;
3238 class VmaBlockMetadata_Linear;
3239 class VmaBlockMetadata_TLSF;
3240 
3241 class VmaBlockVector;
3242 
3243 struct VmaPoolListItemTraits;
3244 
3245 struct VmaCurrentBudgetData;
3246 
3247 class VmaAllocationObjectAllocator;
3248 
3249 #endif // _VMA_FORWARD_DECLARATIONS
3250 
3251 
3252 #ifndef _VMA_FUNCTIONS
3253 
3254 /*
3255 Returns number of bits set to 1 in (v).
3256 
3257 On specific platforms and compilers you can use instrinsics like:
3258 
3259 Visual Studio:
3260     return __popcnt(v);
3261 GCC, Clang:
3262     return static_cast<uint32_t>(__builtin_popcount(v));
3263 
3264 Define macro VMA_COUNT_BITS_SET to provide your optimized implementation.
3265 But you need to check in runtime whether user's CPU supports these, as some old processors don't.
3266 */
VmaCountBitsSet(uint32_t v)3267 static inline uint32_t VmaCountBitsSet(uint32_t v)
3268 {
3269 #if __cplusplus >= 202002L || _MSVC_LANG >= 202002L // C++20
3270     return std::popcount(v);
3271 #else
3272     uint32_t c = v - ((v >> 1) & 0x55555555);
3273     c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
3274     c = ((c >> 4) + c) & 0x0F0F0F0F;
3275     c = ((c >> 8) + c) & 0x00FF00FF;
3276     c = ((c >> 16) + c) & 0x0000FFFF;
3277     return c;
3278 #endif
3279 }
3280 
VmaBitScanLSB(uint64_t mask)3281 static inline uint8_t VmaBitScanLSB(uint64_t mask)
3282 {
3283 #if defined(_MSC_VER) && defined(_WIN64)
3284     unsigned long pos;
3285     if (_BitScanForward64(&pos, mask))
3286         return static_cast<uint8_t>(pos);
3287     return UINT8_MAX;
3288 #elif defined __GNUC__ || defined __clang__
3289     return static_cast<uint8_t>(__builtin_ffsll(mask)) - 1U;
3290 #else
3291     uint8_t pos = 0;
3292     uint64_t bit = 1;
3293     do
3294     {
3295         if (mask & bit)
3296             return pos;
3297         bit <<= 1;
3298     } while (pos++ < 63);
3299     return UINT8_MAX;
3300 #endif
3301 }
3302 
VmaBitScanLSB(uint32_t mask)3303 static inline uint8_t VmaBitScanLSB(uint32_t mask)
3304 {
3305 #ifdef _MSC_VER
3306     unsigned long pos;
3307     if (_BitScanForward(&pos, mask))
3308         return static_cast<uint8_t>(pos);
3309     return UINT8_MAX;
3310 #elif defined __GNUC__ || defined __clang__
3311     return static_cast<uint8_t>(__builtin_ffs(mask)) - 1U;
3312 #else
3313     uint8_t pos = 0;
3314     uint32_t bit = 1;
3315     do
3316     {
3317         if (mask & bit)
3318             return pos;
3319         bit <<= 1;
3320     } while (pos++ < 31);
3321     return UINT8_MAX;
3322 #endif
3323 }
3324 
VmaBitScanMSB(uint64_t mask)3325 static inline uint8_t VmaBitScanMSB(uint64_t mask)
3326 {
3327 #if defined(_MSC_VER) && defined(_WIN64)
3328     unsigned long pos;
3329     if (_BitScanReverse64(&pos, mask))
3330         return static_cast<uint8_t>(pos);
3331 #elif defined __GNUC__ || defined __clang__
3332     if (mask)
3333         return 63 - static_cast<uint8_t>(__builtin_clzll(mask));
3334 #else
3335     uint8_t pos = 63;
3336     uint64_t bit = 1ULL << 63;
3337     do
3338     {
3339         if (mask & bit)
3340             return pos;
3341         bit >>= 1;
3342     } while (pos-- > 0);
3343 #endif
3344     return UINT8_MAX;
3345 }
3346 
VmaBitScanMSB(uint32_t mask)3347 static inline uint8_t VmaBitScanMSB(uint32_t mask)
3348 {
3349 #ifdef _MSC_VER
3350     unsigned long pos;
3351     if (_BitScanReverse(&pos, mask))
3352         return static_cast<uint8_t>(pos);
3353 #elif defined __GNUC__ || defined __clang__
3354     if (mask)
3355         return 31 - static_cast<uint8_t>(__builtin_clz(mask));
3356 #else
3357     uint8_t pos = 31;
3358     uint32_t bit = 1UL << 31;
3359     do
3360     {
3361         if (mask & bit)
3362             return pos;
3363         bit >>= 1;
3364     } while (pos-- > 0);
3365 #endif
3366     return UINT8_MAX;
3367 }
3368 
3369 /*
3370 Returns true if given number is a power of two.
3371 T must be unsigned integer number or signed integer but always nonnegative.
3372 For 0 returns true.
3373 */
3374 template <typename T>
VmaIsPow2(T x)3375 inline bool VmaIsPow2(T x)
3376 {
3377     return (x & (x - 1)) == 0;
3378 }
3379 
3380 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
3381 // Use types like uint32_t, uint64_t as T.
3382 template <typename T>
VmaAlignUp(T val,T alignment)3383 static inline T VmaAlignUp(T val, T alignment)
3384 {
3385     VMA_HEAVY_ASSERT(VmaIsPow2(alignment));
3386     return (val + alignment - 1) & ~(alignment - 1);
3387 }
3388 
3389 // Aligns given value down to nearest multiply of align value. For example: VmaAlignDown(11, 8) = 8.
3390 // Use types like uint32_t, uint64_t as T.
3391 template <typename T>
VmaAlignDown(T val,T alignment)3392 static inline T VmaAlignDown(T val, T alignment)
3393 {
3394     VMA_HEAVY_ASSERT(VmaIsPow2(alignment));
3395     return val & ~(alignment - 1);
3396 }
3397 
3398 // Division with mathematical rounding to nearest number.
3399 template <typename T>
VmaRoundDiv(T x,T y)3400 static inline T VmaRoundDiv(T x, T y)
3401 {
3402     return (x + (y / (T)2)) / y;
3403 }
3404 
3405 // Divide by 'y' and round up to nearest integer.
3406 template <typename T>
VmaDivideRoundingUp(T x,T y)3407 static inline T VmaDivideRoundingUp(T x, T y)
3408 {
3409     return (x + y - (T)1) / y;
3410 }
3411 
3412 // Returns smallest power of 2 greater or equal to v.
VmaNextPow2(uint32_t v)3413 static inline uint32_t VmaNextPow2(uint32_t v)
3414 {
3415     v--;
3416     v |= v >> 1;
3417     v |= v >> 2;
3418     v |= v >> 4;
3419     v |= v >> 8;
3420     v |= v >> 16;
3421     v++;
3422     return v;
3423 }
3424 
VmaNextPow2(uint64_t v)3425 static inline uint64_t VmaNextPow2(uint64_t v)
3426 {
3427     v--;
3428     v |= v >> 1;
3429     v |= v >> 2;
3430     v |= v >> 4;
3431     v |= v >> 8;
3432     v |= v >> 16;
3433     v |= v >> 32;
3434     v++;
3435     return v;
3436 }
3437 
3438 // Returns largest power of 2 less or equal to v.
VmaPrevPow2(uint32_t v)3439 static inline uint32_t VmaPrevPow2(uint32_t v)
3440 {
3441     v |= v >> 1;
3442     v |= v >> 2;
3443     v |= v >> 4;
3444     v |= v >> 8;
3445     v |= v >> 16;
3446     v = v ^ (v >> 1);
3447     return v;
3448 }
3449 
VmaPrevPow2(uint64_t v)3450 static inline uint64_t VmaPrevPow2(uint64_t v)
3451 {
3452     v |= v >> 1;
3453     v |= v >> 2;
3454     v |= v >> 4;
3455     v |= v >> 8;
3456     v |= v >> 16;
3457     v |= v >> 32;
3458     v = v ^ (v >> 1);
3459     return v;
3460 }
3461 
VmaStrIsEmpty(const char * pStr)3462 static inline bool VmaStrIsEmpty(const char* pStr)
3463 {
3464     return pStr == VMA_NULL || *pStr == '\0';
3465 }
3466 
3467 /*
3468 Returns true if two memory blocks occupy overlapping pages.
3469 ResourceA must be in less memory offset than ResourceB.
3470 
3471 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
3472 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
3473 */
VmaBlocksOnSamePage(VkDeviceSize resourceAOffset,VkDeviceSize resourceASize,VkDeviceSize resourceBOffset,VkDeviceSize pageSize)3474 static inline bool VmaBlocksOnSamePage(
3475     VkDeviceSize resourceAOffset,
3476     VkDeviceSize resourceASize,
3477     VkDeviceSize resourceBOffset,
3478     VkDeviceSize pageSize)
3479 {
3480     VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
3481     VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
3482     VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
3483     VkDeviceSize resourceBStart = resourceBOffset;
3484     VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
3485     return resourceAEndPage == resourceBStartPage;
3486 }
3487 
3488 /*
3489 Returns true if given suballocation types could conflict and must respect
3490 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
3491 or linear image and another one is optimal image. If type is unknown, behave
3492 conservatively.
3493 */
VmaIsBufferImageGranularityConflict(VmaSuballocationType suballocType1,VmaSuballocationType suballocType2)3494 static inline bool VmaIsBufferImageGranularityConflict(
3495     VmaSuballocationType suballocType1,
3496     VmaSuballocationType suballocType2)
3497 {
3498     if (suballocType1 > suballocType2)
3499     {
3500         VMA_SWAP(suballocType1, suballocType2);
3501     }
3502 
3503     switch (suballocType1)
3504     {
3505     case VMA_SUBALLOCATION_TYPE_FREE:
3506         return false;
3507     case VMA_SUBALLOCATION_TYPE_UNKNOWN:
3508         return true;
3509     case VMA_SUBALLOCATION_TYPE_BUFFER:
3510         return
3511             suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3512             suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3513     case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
3514         return
3515             suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3516             suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
3517             suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3518     case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
3519         return
3520             suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3521     case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
3522         return false;
3523     default:
3524         VMA_ASSERT(0);
3525         return true;
3526     }
3527 }
3528 
VmaWriteMagicValue(void * pData,VkDeviceSize offset)3529 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
3530 {
3531 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
3532     uint32_t* pDst = (uint32_t*)((char*)pData + offset);
3533     const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3534     for (size_t i = 0; i < numberCount; ++i, ++pDst)
3535     {
3536         *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
3537     }
3538 #else
3539     // no-op
3540 #endif
3541 }
3542 
VmaValidateMagicValue(const void * pData,VkDeviceSize offset)3543 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
3544 {
3545 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
3546     const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
3547     const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3548     for (size_t i = 0; i < numberCount; ++i, ++pSrc)
3549     {
3550         if (*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
3551         {
3552             return false;
3553         }
3554     }
3555 #endif
3556     return true;
3557 }
3558 
3559 /*
3560 Fills structure with parameters of an example buffer to be used for transfers
3561 during GPU memory defragmentation.
3562 */
VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo & outBufCreateInfo)3563 static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)
3564 {
3565     memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo));
3566     outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
3567     outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
3568     outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size.
3569 }
3570 
3571 
3572 /*
3573 Performs binary search and returns iterator to first element that is greater or
3574 equal to (key), according to comparison (cmp).
3575 
3576 Cmp should return true if first argument is less than second argument.
3577 
3578 Returned value is the found element, if present in the collection or place where
3579 new element with value (key) should be inserted.
3580 */
3581 template <typename CmpLess, typename IterT, typename KeyT>
VmaBinaryFindFirstNotLess(IterT beg,IterT end,const KeyT & key,const CmpLess & cmp)3582 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT& key, const CmpLess& cmp)
3583 {
3584     size_t down = 0, up = size_t(end - beg);
3585     while (down < up)
3586     {
3587         const size_t mid = down + (up - down) / 2;  // Overflow-safe midpoint calculation
3588         if (cmp(*(beg + mid), key))
3589         {
3590             down = mid + 1;
3591         }
3592         else
3593         {
3594             up = mid;
3595         }
3596     }
3597     return beg + down;
3598 }
3599 
3600 template<typename CmpLess, typename IterT, typename KeyT>
VmaBinaryFindSorted(const IterT & beg,const IterT & end,const KeyT & value,const CmpLess & cmp)3601 IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp)
3602 {
3603     IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
3604         beg, end, value, cmp);
3605     if (it == end ||
3606         (!cmp(*it, value) && !cmp(value, *it)))
3607     {
3608         return it;
3609     }
3610     return end;
3611 }
3612 
3613 /*
3614 Returns true if all pointers in the array are not-null and unique.
3615 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
3616 T must be pointer type, e.g. VmaAllocation, VmaPool.
3617 */
3618 template<typename T>
VmaValidatePointerArray(uint32_t count,const T * arr)3619 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
3620 {
3621     for (uint32_t i = 0; i < count; ++i)
3622     {
3623         const T iPtr = arr[i];
3624         if (iPtr == VMA_NULL)
3625         {
3626             return false;
3627         }
3628         for (uint32_t j = i + 1; j < count; ++j)
3629         {
3630             if (iPtr == arr[j])
3631             {
3632                 return false;
3633             }
3634         }
3635     }
3636     return true;
3637 }
3638 
3639 template<typename MainT, typename NewT>
VmaPnextChainPushFront(MainT * mainStruct,NewT * newStruct)3640 static inline void VmaPnextChainPushFront(MainT* mainStruct, NewT* newStruct)
3641 {
3642     newStruct->pNext = mainStruct->pNext;
3643     mainStruct->pNext = newStruct;
3644 }
3645 
3646 // This is the main algorithm that guides the selection of a memory type best for an allocation -
3647 // converts usage to required/preferred/not preferred flags.
FindMemoryPreferences(bool isIntegratedGPU,const VmaAllocationCreateInfo & allocCreateInfo,VkFlags bufImgUsage,VkMemoryPropertyFlags & outRequiredFlags,VkMemoryPropertyFlags & outPreferredFlags,VkMemoryPropertyFlags & outNotPreferredFlags)3648 static bool FindMemoryPreferences(
3649     bool isIntegratedGPU,
3650     const VmaAllocationCreateInfo& allocCreateInfo,
3651     VkFlags bufImgUsage, // VkBufferCreateInfo::usage or VkImageCreateInfo::usage. UINT32_MAX if unknown.
3652     VkMemoryPropertyFlags& outRequiredFlags,
3653     VkMemoryPropertyFlags& outPreferredFlags,
3654     VkMemoryPropertyFlags& outNotPreferredFlags)
3655 {
3656     outRequiredFlags = allocCreateInfo.requiredFlags;
3657     outPreferredFlags = allocCreateInfo.preferredFlags;
3658     outNotPreferredFlags = 0;
3659 
3660     switch(allocCreateInfo.usage)
3661     {
3662     case VMA_MEMORY_USAGE_UNKNOWN:
3663         break;
3664     case VMA_MEMORY_USAGE_GPU_ONLY:
3665         if(!isIntegratedGPU || (outPreferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
3666         {
3667             outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
3668         }
3669         break;
3670     case VMA_MEMORY_USAGE_CPU_ONLY:
3671         outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
3672         break;
3673     case VMA_MEMORY_USAGE_CPU_TO_GPU:
3674         outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
3675         if(!isIntegratedGPU || (outPreferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
3676         {
3677             outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
3678         }
3679         break;
3680     case VMA_MEMORY_USAGE_GPU_TO_CPU:
3681         outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
3682         outPreferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
3683         break;
3684     case VMA_MEMORY_USAGE_CPU_COPY:
3685         outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
3686         break;
3687     case VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED:
3688         outRequiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
3689         break;
3690     case VMA_MEMORY_USAGE_AUTO:
3691     case VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE:
3692     case VMA_MEMORY_USAGE_AUTO_PREFER_HOST:
3693     {
3694         if(bufImgUsage == UINT32_MAX)
3695         {
3696             VMA_ASSERT(0 && "VMA_MEMORY_USAGE_AUTO* values can only be used with functions like vmaCreateBuffer, vmaCreateImage so that the details of the created resource are known.");
3697             return false;
3698         }
3699         // This relies on values of VK_IMAGE_USAGE_TRANSFER* being the same VK_BUFFER_IMAGE_TRANSFER*.
3700         const bool deviceAccess = (bufImgUsage & ~(VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT)) != 0;
3701         const bool hostAccessSequentialWrite = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT) != 0;
3702         const bool hostAccessRandom = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT) != 0;
3703         const bool hostAccessAllowTransferInstead = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT) != 0;
3704         const bool preferDevice = allocCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE;
3705         const bool preferHost = allocCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_HOST;
3706 
3707         // CPU random access - e.g. a buffer written to or transferred from GPU to read back on CPU.
3708         if(hostAccessRandom)
3709         {
3710             if(!isIntegratedGPU && deviceAccess && hostAccessAllowTransferInstead && !preferHost)
3711             {
3712                 // Nice if it will end up in HOST_VISIBLE, but more importantly prefer DEVICE_LOCAL.
3713                 // Omitting HOST_VISIBLE here is intentional.
3714                 // In case there is DEVICE_LOCAL | HOST_VISIBLE | HOST_CACHED, it will pick that one.
3715                 // Otherwise, this will give same weight to DEVICE_LOCAL as HOST_VISIBLE | HOST_CACHED and select the former if occurs first on the list.
3716                 outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
3717             }
3718             else
3719             {
3720                 // Always CPU memory, cached.
3721                 outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
3722             }
3723         }
3724         // CPU sequential write - may be CPU or host-visible GPU memory, uncached and write-combined.
3725         else if(hostAccessSequentialWrite)
3726         {
3727             // Want uncached and write-combined.
3728             outNotPreferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
3729 
3730             if(!isIntegratedGPU && deviceAccess && hostAccessAllowTransferInstead && !preferHost)
3731             {
3732                 outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
3733             }
3734             else
3735             {
3736                 outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
3737                 // Direct GPU access, CPU sequential write (e.g. a dynamic uniform buffer updated every frame)
3738                 if(deviceAccess)
3739                 {
3740                     // Could go to CPU memory or GPU BAR/unified. Up to the user to decide. If no preference, choose GPU memory.
3741                     if(preferHost)
3742                         outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
3743                     else
3744                         outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
3745                 }
3746                 // GPU no direct access, CPU sequential write (e.g. an upload buffer to be transferred to the GPU)
3747                 else
3748                 {
3749                     // Could go to CPU memory or GPU BAR/unified. Up to the user to decide. If no preference, choose CPU memory.
3750                     if(preferDevice)
3751                         outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
3752                     else
3753                         outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
3754                 }
3755             }
3756         }
3757         // No CPU access
3758         else
3759         {
3760             // if(deviceAccess)
3761             //
3762             // GPU access, no CPU access (e.g. a color attachment image) - prefer GPU memory,
3763             // unless there is a clear preference from the user not to do so.
3764             //
3765             // else:
3766             //
3767             // No direct GPU access, no CPU access, just transfers.
3768             // It may be staging copy intended for e.g. preserving image for next frame (then better GPU memory) or
3769             // a "swap file" copy to free some GPU memory (then better CPU memory).
3770             // Up to the user to decide. If no preferece, assume the former and choose GPU memory.
3771 
3772             if(preferHost)
3773                 outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
3774             else
3775                 outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
3776         }
3777         break;
3778     }
3779     default:
3780         VMA_ASSERT(0);
3781     }
3782 
3783     // Avoid DEVICE_COHERENT unless explicitly requested.
3784     if(((allocCreateInfo.requiredFlags | allocCreateInfo.preferredFlags) &
3785         (VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)) == 0)
3786     {
3787         outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY;
3788     }
3789 
3790     return true;
3791 }
3792 
3793 ////////////////////////////////////////////////////////////////////////////////
3794 // Memory allocation
3795 
VmaMalloc(const VkAllocationCallbacks * pAllocationCallbacks,size_t size,size_t alignment)3796 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
3797 {
3798     void* result = VMA_NULL;
3799     if ((pAllocationCallbacks != VMA_NULL) &&
3800         (pAllocationCallbacks->pfnAllocation != VMA_NULL))
3801     {
3802         result = (*pAllocationCallbacks->pfnAllocation)(
3803             pAllocationCallbacks->pUserData,
3804             size,
3805             alignment,
3806             VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3807     }
3808     else
3809     {
3810         result = VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
3811     }
3812     VMA_ASSERT(result != VMA_NULL && "CPU memory allocation failed.");
3813     return result;
3814 }
3815 
VmaFree(const VkAllocationCallbacks * pAllocationCallbacks,void * ptr)3816 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
3817 {
3818     if ((pAllocationCallbacks != VMA_NULL) &&
3819         (pAllocationCallbacks->pfnFree != VMA_NULL))
3820     {
3821         (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
3822     }
3823     else
3824     {
3825         VMA_SYSTEM_ALIGNED_FREE(ptr);
3826     }
3827 }
3828 
3829 template<typename T>
VmaAllocate(const VkAllocationCallbacks * pAllocationCallbacks)3830 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
3831 {
3832     return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
3833 }
3834 
3835 template<typename T>
VmaAllocateArray(const VkAllocationCallbacks * pAllocationCallbacks,size_t count)3836 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
3837 {
3838     return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
3839 }
3840 
3841 #define vma_new(allocator, type)   new(VmaAllocate<type>(allocator))(type)
3842 
3843 #define vma_new_array(allocator, type, count)   new(VmaAllocateArray<type>((allocator), (count)))(type)
3844 
3845 template<typename T>
vma_delete(const VkAllocationCallbacks * pAllocationCallbacks,T * ptr)3846 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
3847 {
3848     ptr->~T();
3849     VmaFree(pAllocationCallbacks, ptr);
3850 }
3851 
3852 template<typename T>
vma_delete_array(const VkAllocationCallbacks * pAllocationCallbacks,T * ptr,size_t count)3853 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
3854 {
3855     if (ptr != VMA_NULL)
3856     {
3857         for (size_t i = count; i--; )
3858         {
3859             ptr[i].~T();
3860         }
3861         VmaFree(pAllocationCallbacks, ptr);
3862     }
3863 }
3864 
VmaCreateStringCopy(const VkAllocationCallbacks * allocs,const char * srcStr)3865 static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr)
3866 {
3867     if (srcStr != VMA_NULL)
3868     {
3869         const size_t len = strlen(srcStr);
3870         char* const result = vma_new_array(allocs, char, len + 1);
3871         memcpy(result, srcStr, len + 1);
3872         return result;
3873     }
3874     return VMA_NULL;
3875 }
3876 
3877 #if VMA_STATS_STRING_ENABLED
VmaCreateStringCopy(const VkAllocationCallbacks * allocs,const char * srcStr,size_t strLen)3878 static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr, size_t strLen)
3879 {
3880     if (srcStr != VMA_NULL)
3881     {
3882         char* const result = vma_new_array(allocs, char, strLen + 1);
3883         memcpy(result, srcStr, strLen);
3884         result[strLen] = '\0';
3885         return result;
3886     }
3887     return VMA_NULL;
3888 }
3889 #endif // VMA_STATS_STRING_ENABLED
3890 
VmaFreeString(const VkAllocationCallbacks * allocs,char * str)3891 static void VmaFreeString(const VkAllocationCallbacks* allocs, char* str)
3892 {
3893     if (str != VMA_NULL)
3894     {
3895         const size_t len = strlen(str);
3896         vma_delete_array(allocs, str, len + 1);
3897     }
3898 }
3899 
3900 template<typename CmpLess, typename VectorT>
VmaVectorInsertSorted(VectorT & vector,const typename VectorT::value_type & value)3901 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
3902 {
3903     const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3904         vector.data(),
3905         vector.data() + vector.size(),
3906         value,
3907         CmpLess()) - vector.data();
3908     VmaVectorInsert(vector, indexToInsert, value);
3909     return indexToInsert;
3910 }
3911 
3912 template<typename CmpLess, typename VectorT>
VmaVectorRemoveSorted(VectorT & vector,const typename VectorT::value_type & value)3913 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
3914 {
3915     CmpLess comparator;
3916     typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
3917         vector.begin(),
3918         vector.end(),
3919         value,
3920         comparator);
3921     if ((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
3922     {
3923         size_t indexToRemove = it - vector.begin();
3924         VmaVectorRemove(vector, indexToRemove);
3925         return true;
3926     }
3927     return false;
3928 }
3929 #endif // _VMA_FUNCTIONS
3930 
3931 #ifndef _VMA_STATISTICS_FUNCTIONS
3932 
VmaClearStatistics(VmaStatistics & outStats)3933 static void VmaClearStatistics(VmaStatistics& outStats)
3934 {
3935     outStats.blockCount = 0;
3936     outStats.allocationCount = 0;
3937     outStats.blockBytes = 0;
3938     outStats.allocationBytes = 0;
3939 }
3940 
VmaAddStatistics(VmaStatistics & inoutStats,const VmaStatistics & src)3941 static void VmaAddStatistics(VmaStatistics& inoutStats, const VmaStatistics& src)
3942 {
3943     inoutStats.blockCount += src.blockCount;
3944     inoutStats.allocationCount += src.allocationCount;
3945     inoutStats.blockBytes += src.blockBytes;
3946     inoutStats.allocationBytes += src.allocationBytes;
3947 }
3948 
VmaClearDetailedStatistics(VmaDetailedStatistics & outStats)3949 static void VmaClearDetailedStatistics(VmaDetailedStatistics& outStats)
3950 {
3951     VmaClearStatistics(outStats.statistics);
3952     outStats.unusedRangeCount = 0;
3953     outStats.allocationSizeMin = VK_WHOLE_SIZE;
3954     outStats.allocationSizeMax = 0;
3955     outStats.unusedRangeSizeMin = VK_WHOLE_SIZE;
3956     outStats.unusedRangeSizeMax = 0;
3957 }
3958 
VmaAddDetailedStatisticsAllocation(VmaDetailedStatistics & inoutStats,VkDeviceSize size)3959 static void VmaAddDetailedStatisticsAllocation(VmaDetailedStatistics& inoutStats, VkDeviceSize size)
3960 {
3961     inoutStats.statistics.allocationCount++;
3962     inoutStats.statistics.allocationBytes += size;
3963     inoutStats.allocationSizeMin = VMA_MIN(inoutStats.allocationSizeMin, size);
3964     inoutStats.allocationSizeMax = VMA_MAX(inoutStats.allocationSizeMax, size);
3965 }
3966 
VmaAddDetailedStatisticsUnusedRange(VmaDetailedStatistics & inoutStats,VkDeviceSize size)3967 static void VmaAddDetailedStatisticsUnusedRange(VmaDetailedStatistics& inoutStats, VkDeviceSize size)
3968 {
3969     inoutStats.unusedRangeCount++;
3970     inoutStats.unusedRangeSizeMin = VMA_MIN(inoutStats.unusedRangeSizeMin, size);
3971     inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, size);
3972 }
3973 
VmaAddDetailedStatistics(VmaDetailedStatistics & inoutStats,const VmaDetailedStatistics & src)3974 static void VmaAddDetailedStatistics(VmaDetailedStatistics& inoutStats, const VmaDetailedStatistics& src)
3975 {
3976     VmaAddStatistics(inoutStats.statistics, src.statistics);
3977     inoutStats.unusedRangeCount += src.unusedRangeCount;
3978     inoutStats.allocationSizeMin = VMA_MIN(inoutStats.allocationSizeMin, src.allocationSizeMin);
3979     inoutStats.allocationSizeMax = VMA_MAX(inoutStats.allocationSizeMax, src.allocationSizeMax);
3980     inoutStats.unusedRangeSizeMin = VMA_MIN(inoutStats.unusedRangeSizeMin, src.unusedRangeSizeMin);
3981     inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, src.unusedRangeSizeMax);
3982 }
3983 
3984 #endif // _VMA_STATISTICS_FUNCTIONS
3985 
3986 #ifndef _VMA_MUTEX_LOCK
3987 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
3988 struct VmaMutexLock
3989 {
VMA_CLASS_NO_COPY_NO_MOVEVmaMutexLock3990     VMA_CLASS_NO_COPY_NO_MOVE(VmaMutexLock)
3991 public:
3992     VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
3993         m_pMutex(useMutex ? &mutex : VMA_NULL)
3994     {
3995         if (m_pMutex) { m_pMutex->Lock(); }
3996     }
~VmaMutexLockVmaMutexLock3997     ~VmaMutexLock() {  if (m_pMutex) { m_pMutex->Unlock(); } }
3998 
3999 private:
4000     VMA_MUTEX* m_pMutex;
4001 };
4002 
4003 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
4004 struct VmaMutexLockRead
4005 {
VMA_CLASS_NO_COPY_NO_MOVEVmaMutexLockRead4006     VMA_CLASS_NO_COPY_NO_MOVE(VmaMutexLockRead)
4007 public:
4008     VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
4009         m_pMutex(useMutex ? &mutex : VMA_NULL)
4010     {
4011         if (m_pMutex) { m_pMutex->LockRead(); }
4012     }
~VmaMutexLockReadVmaMutexLockRead4013     ~VmaMutexLockRead() { if (m_pMutex) { m_pMutex->UnlockRead(); } }
4014 
4015 private:
4016     VMA_RW_MUTEX* m_pMutex;
4017 };
4018 
4019 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
4020 struct VmaMutexLockWrite
4021 {
VMA_CLASS_NO_COPY_NO_MOVEVmaMutexLockWrite4022     VMA_CLASS_NO_COPY_NO_MOVE(VmaMutexLockWrite)
4023 public:
4024     VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex)
4025         : m_pMutex(useMutex ? &mutex : VMA_NULL)
4026     {
4027         if (m_pMutex) { m_pMutex->LockWrite(); }
4028     }
~VmaMutexLockWriteVmaMutexLockWrite4029     ~VmaMutexLockWrite() { if (m_pMutex) { m_pMutex->UnlockWrite(); } }
4030 
4031 private:
4032     VMA_RW_MUTEX* m_pMutex;
4033 };
4034 
4035 #if VMA_DEBUG_GLOBAL_MUTEX
4036     static VMA_MUTEX gDebugGlobalMutex;
4037     #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
4038 #else
4039     #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
4040 #endif
4041 #endif // _VMA_MUTEX_LOCK
4042 
4043 #ifndef _VMA_ATOMIC_TRANSACTIONAL_INCREMENT
4044 // An object that increments given atomic but decrements it back in the destructor unless Commit() is called.
4045 template<typename AtomicT>
4046 struct AtomicTransactionalIncrement
4047 {
4048 public:
4049     using T = decltype(AtomicT().load());
4050 
~AtomicTransactionalIncrementAtomicTransactionalIncrement4051     ~AtomicTransactionalIncrement()
4052     {
4053         if(m_Atomic)
4054             --(*m_Atomic);
4055     }
4056 
CommitAtomicTransactionalIncrement4057     void Commit() { m_Atomic = nullptr; }
IncrementAtomicTransactionalIncrement4058     T Increment(AtomicT* atomic)
4059     {
4060         m_Atomic = atomic;
4061         return m_Atomic->fetch_add(1);
4062     }
4063 
4064 private:
4065     AtomicT* m_Atomic = nullptr;
4066 };
4067 #endif // _VMA_ATOMIC_TRANSACTIONAL_INCREMENT
4068 
4069 #ifndef _VMA_STL_ALLOCATOR
4070 // STL-compatible allocator.
4071 template<typename T>
4072 struct VmaStlAllocator
4073 {
4074     const VkAllocationCallbacks* const m_pCallbacks;
4075     typedef T value_type;
4076 
VmaStlAllocatorVmaStlAllocator4077     VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) {}
4078     template<typename U>
VmaStlAllocatorVmaStlAllocator4079     VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) {}
4080     VmaStlAllocator(const VmaStlAllocator&) = default;
4081     VmaStlAllocator& operator=(const VmaStlAllocator&) = delete;
4082 
allocateVmaStlAllocator4083     T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
deallocateVmaStlAllocator4084     void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
4085 
4086     template<typename U>
4087     bool operator==(const VmaStlAllocator<U>& rhs) const
4088     {
4089         return m_pCallbacks == rhs.m_pCallbacks;
4090     }
4091     template<typename U>
4092     bool operator!=(const VmaStlAllocator<U>& rhs) const
4093     {
4094         return m_pCallbacks != rhs.m_pCallbacks;
4095     }
4096 };
4097 #endif // _VMA_STL_ALLOCATOR
4098 
4099 #ifndef _VMA_VECTOR
4100 /* Class with interface compatible with subset of std::vector.
4101 T must be POD because constructors and destructors are not called and memcpy is
4102 used for these objects. */
4103 template<typename T, typename AllocatorT>
4104 class VmaVector
4105 {
4106 public:
4107     typedef T value_type;
4108     typedef T* iterator;
4109     typedef const T* const_iterator;
4110 
4111     VmaVector(const AllocatorT& allocator);
4112     VmaVector(size_t count, const AllocatorT& allocator);
4113     // This version of the constructor is here for compatibility with pre-C++14 std::vector.
4114     // value is unused.
VmaVector(size_t count,const T & value,const AllocatorT & allocator)4115     VmaVector(size_t count, const T& value, const AllocatorT& allocator) : VmaVector(count, allocator) {}
4116     VmaVector(const VmaVector<T, AllocatorT>& src);
4117     VmaVector& operator=(const VmaVector& rhs);
~VmaVector()4118     ~VmaVector() { VmaFree(m_Allocator.m_pCallbacks, m_pArray); }
4119 
empty()4120     bool empty() const { return m_Count == 0; }
size()4121     size_t size() const { return m_Count; }
data()4122     T* data() { return m_pArray; }
front()4123     T& front() { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[0]; }
back()4124     T& back() { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[m_Count - 1]; }
data()4125     const T* data() const { return m_pArray; }
front()4126     const T& front() const { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[0]; }
back()4127     const T& back() const { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[m_Count - 1]; }
4128 
begin()4129     iterator begin() { return m_pArray; }
end()4130     iterator end() { return m_pArray + m_Count; }
cbegin()4131     const_iterator cbegin() const { return m_pArray; }
cend()4132     const_iterator cend() const { return m_pArray + m_Count; }
begin()4133     const_iterator begin() const { return cbegin(); }
end()4134     const_iterator end() const { return cend(); }
4135 
pop_front()4136     void pop_front() { VMA_HEAVY_ASSERT(m_Count > 0); remove(0); }
pop_back()4137     void pop_back() { VMA_HEAVY_ASSERT(m_Count > 0); resize(size() - 1); }
push_front(const T & src)4138     void push_front(const T& src) { insert(0, src); }
4139 
4140     void push_back(const T& src);
4141     void reserve(size_t newCapacity, bool freeMemory = false);
4142     void resize(size_t newCount);
clear()4143     void clear() { resize(0); }
4144     void shrink_to_fit();
4145     void insert(size_t index, const T& src);
4146     void remove(size_t index);
4147 
4148     T& operator[](size_t index) { VMA_HEAVY_ASSERT(index < m_Count); return m_pArray[index]; }
4149     const T& operator[](size_t index) const { VMA_HEAVY_ASSERT(index < m_Count); return m_pArray[index]; }
4150 
4151 private:
4152     AllocatorT m_Allocator;
4153     T* m_pArray;
4154     size_t m_Count;
4155     size_t m_Capacity;
4156 };
4157 
4158 #ifndef _VMA_VECTOR_FUNCTIONS
4159 template<typename T, typename AllocatorT>
VmaVector(const AllocatorT & allocator)4160 VmaVector<T, AllocatorT>::VmaVector(const AllocatorT& allocator)
4161     : m_Allocator(allocator),
4162     m_pArray(VMA_NULL),
4163     m_Count(0),
4164     m_Capacity(0) {}
4165 
4166 template<typename T, typename AllocatorT>
VmaVector(size_t count,const AllocatorT & allocator)4167 VmaVector<T, AllocatorT>::VmaVector(size_t count, const AllocatorT& allocator)
4168     : m_Allocator(allocator),
4169     m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
4170     m_Count(count),
4171     m_Capacity(count) {}
4172 
4173 template<typename T, typename AllocatorT>
VmaVector(const VmaVector & src)4174 VmaVector<T, AllocatorT>::VmaVector(const VmaVector& src)
4175     : m_Allocator(src.m_Allocator),
4176     m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
4177     m_Count(src.m_Count),
4178     m_Capacity(src.m_Count)
4179 {
4180     if (m_Count != 0)
4181     {
4182         memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
4183     }
4184 }
4185 
4186 template<typename T, typename AllocatorT>
4187 VmaVector<T, AllocatorT>& VmaVector<T, AllocatorT>::operator=(const VmaVector& rhs)
4188 {
4189     if (&rhs != this)
4190     {
4191         resize(rhs.m_Count);
4192         if (m_Count != 0)
4193         {
4194             memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
4195         }
4196     }
4197     return *this;
4198 }
4199 
4200 template<typename T, typename AllocatorT>
push_back(const T & src)4201 void VmaVector<T, AllocatorT>::push_back(const T& src)
4202 {
4203     const size_t newIndex = size();
4204     resize(newIndex + 1);
4205     m_pArray[newIndex] = src;
4206 }
4207 
4208 template<typename T, typename AllocatorT>
reserve(size_t newCapacity,bool freeMemory)4209 void VmaVector<T, AllocatorT>::reserve(size_t newCapacity, bool freeMemory)
4210 {
4211     newCapacity = VMA_MAX(newCapacity, m_Count);
4212 
4213     if ((newCapacity < m_Capacity) && !freeMemory)
4214     {
4215         newCapacity = m_Capacity;
4216     }
4217 
4218     if (newCapacity != m_Capacity)
4219     {
4220         T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
4221         if (m_Count != 0)
4222         {
4223             memcpy(newArray, m_pArray, m_Count * sizeof(T));
4224         }
4225         VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4226         m_Capacity = newCapacity;
4227         m_pArray = newArray;
4228     }
4229 }
4230 
4231 template<typename T, typename AllocatorT>
resize(size_t newCount)4232 void VmaVector<T, AllocatorT>::resize(size_t newCount)
4233 {
4234     size_t newCapacity = m_Capacity;
4235     if (newCount > m_Capacity)
4236     {
4237         newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
4238     }
4239 
4240     if (newCapacity != m_Capacity)
4241     {
4242         T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
4243         const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
4244         if (elementsToCopy != 0)
4245         {
4246             memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
4247         }
4248         VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4249         m_Capacity = newCapacity;
4250         m_pArray = newArray;
4251     }
4252 
4253     m_Count = newCount;
4254 }
4255 
4256 template<typename T, typename AllocatorT>
shrink_to_fit()4257 void VmaVector<T, AllocatorT>::shrink_to_fit()
4258 {
4259     if (m_Capacity > m_Count)
4260     {
4261         T* newArray = VMA_NULL;
4262         if (m_Count > 0)
4263         {
4264             newArray = VmaAllocateArray<T>(m_Allocator.m_pCallbacks, m_Count);
4265             memcpy(newArray, m_pArray, m_Count * sizeof(T));
4266         }
4267         VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4268         m_Capacity = m_Count;
4269         m_pArray = newArray;
4270     }
4271 }
4272 
4273 template<typename T, typename AllocatorT>
insert(size_t index,const T & src)4274 void VmaVector<T, AllocatorT>::insert(size_t index, const T& src)
4275 {
4276     VMA_HEAVY_ASSERT(index <= m_Count);
4277     const size_t oldCount = size();
4278     resize(oldCount + 1);
4279     if (index < oldCount)
4280     {
4281         memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
4282     }
4283     m_pArray[index] = src;
4284 }
4285 
4286 template<typename T, typename AllocatorT>
remove(size_t index)4287 void VmaVector<T, AllocatorT>::remove(size_t index)
4288 {
4289     VMA_HEAVY_ASSERT(index < m_Count);
4290     const size_t oldCount = size();
4291     if (index < oldCount - 1)
4292     {
4293         memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
4294     }
4295     resize(oldCount - 1);
4296 }
4297 #endif // _VMA_VECTOR_FUNCTIONS
4298 
4299 template<typename T, typename allocatorT>
VmaVectorInsert(VmaVector<T,allocatorT> & vec,size_t index,const T & item)4300 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
4301 {
4302     vec.insert(index, item);
4303 }
4304 
4305 template<typename T, typename allocatorT>
VmaVectorRemove(VmaVector<T,allocatorT> & vec,size_t index)4306 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
4307 {
4308     vec.remove(index);
4309 }
4310 #endif // _VMA_VECTOR
4311 
4312 #ifndef _VMA_SMALL_VECTOR
4313 /*
4314 This is a vector (a variable-sized array), optimized for the case when the array is small.
4315 
4316 It contains some number of elements in-place, which allows it to avoid heap allocation
4317 when the actual number of elements is below that threshold. This allows normal "small"
4318 cases to be fast without losing generality for large inputs.
4319 */
4320 template<typename T, typename AllocatorT, size_t N>
4321 class VmaSmallVector
4322 {
4323 public:
4324     typedef T value_type;
4325     typedef T* iterator;
4326 
4327     VmaSmallVector(const AllocatorT& allocator);
4328     VmaSmallVector(size_t count, const AllocatorT& allocator);
4329     template<typename SrcT, typename SrcAllocatorT, size_t SrcN>
4330     VmaSmallVector(const VmaSmallVector<SrcT, SrcAllocatorT, SrcN>&) = delete;
4331     template<typename SrcT, typename SrcAllocatorT, size_t SrcN>
4332     VmaSmallVector<T, AllocatorT, N>& operator=(const VmaSmallVector<SrcT, SrcAllocatorT, SrcN>&) = delete;
4333     ~VmaSmallVector() = default;
4334 
empty()4335     bool empty() const { return m_Count == 0; }
size()4336     size_t size() const { return m_Count; }
data()4337     T* data() { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; }
front()4338     T& front() { VMA_HEAVY_ASSERT(m_Count > 0); return data()[0]; }
back()4339     T& back() { VMA_HEAVY_ASSERT(m_Count > 0); return data()[m_Count - 1]; }
data()4340     const T* data() const { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; }
front()4341     const T& front() const { VMA_HEAVY_ASSERT(m_Count > 0); return data()[0]; }
back()4342     const T& back() const { VMA_HEAVY_ASSERT(m_Count > 0); return data()[m_Count - 1]; }
4343 
begin()4344     iterator begin() { return data(); }
end()4345     iterator end() { return data() + m_Count; }
4346 
pop_front()4347     void pop_front() { VMA_HEAVY_ASSERT(m_Count > 0); remove(0); }
pop_back()4348     void pop_back() { VMA_HEAVY_ASSERT(m_Count > 0); resize(size() - 1); }
push_front(const T & src)4349     void push_front(const T& src) { insert(0, src); }
4350 
4351     void push_back(const T& src);
4352     void resize(size_t newCount, bool freeMemory = false);
4353     void clear(bool freeMemory = false);
4354     void insert(size_t index, const T& src);
4355     void remove(size_t index);
4356 
4357     T& operator[](size_t index) { VMA_HEAVY_ASSERT(index < m_Count); return data()[index]; }
4358     const T& operator[](size_t index) const { VMA_HEAVY_ASSERT(index < m_Count); return data()[index]; }
4359 
4360 private:
4361     size_t m_Count;
4362     T m_StaticArray[N]; // Used when m_Size <= N
4363     VmaVector<T, AllocatorT> m_DynamicArray; // Used when m_Size > N
4364 };
4365 
4366 #ifndef _VMA_SMALL_VECTOR_FUNCTIONS
4367 template<typename T, typename AllocatorT, size_t N>
VmaSmallVector(const AllocatorT & allocator)4368 VmaSmallVector<T, AllocatorT, N>::VmaSmallVector(const AllocatorT& allocator)
4369     : m_Count(0),
4370     m_DynamicArray(allocator) {}
4371 
4372 template<typename T, typename AllocatorT, size_t N>
VmaSmallVector(size_t count,const AllocatorT & allocator)4373 VmaSmallVector<T, AllocatorT, N>::VmaSmallVector(size_t count, const AllocatorT& allocator)
4374     : m_Count(count),
4375     m_DynamicArray(count > N ? count : 0, allocator) {}
4376 
4377 template<typename T, typename AllocatorT, size_t N>
push_back(const T & src)4378 void VmaSmallVector<T, AllocatorT, N>::push_back(const T& src)
4379 {
4380     const size_t newIndex = size();
4381     resize(newIndex + 1);
4382     data()[newIndex] = src;
4383 }
4384 
4385 template<typename T, typename AllocatorT, size_t N>
resize(size_t newCount,bool freeMemory)4386 void VmaSmallVector<T, AllocatorT, N>::resize(size_t newCount, bool freeMemory)
4387 {
4388     if (newCount > N && m_Count > N)
4389     {
4390         // Any direction, staying in m_DynamicArray
4391         m_DynamicArray.resize(newCount);
4392         if (freeMemory)
4393         {
4394             m_DynamicArray.shrink_to_fit();
4395         }
4396     }
4397     else if (newCount > N && m_Count <= N)
4398     {
4399         // Growing, moving from m_StaticArray to m_DynamicArray
4400         m_DynamicArray.resize(newCount);
4401         if (m_Count > 0)
4402         {
4403             memcpy(m_DynamicArray.data(), m_StaticArray, m_Count * sizeof(T));
4404         }
4405     }
4406     else if (newCount <= N && m_Count > N)
4407     {
4408         // Shrinking, moving from m_DynamicArray to m_StaticArray
4409         if (newCount > 0)
4410         {
4411             memcpy(m_StaticArray, m_DynamicArray.data(), newCount * sizeof(T));
4412         }
4413         m_DynamicArray.resize(0);
4414         if (freeMemory)
4415         {
4416             m_DynamicArray.shrink_to_fit();
4417         }
4418     }
4419     else
4420     {
4421         // Any direction, staying in m_StaticArray - nothing to do here
4422     }
4423     m_Count = newCount;
4424 }
4425 
4426 template<typename T, typename AllocatorT, size_t N>
clear(bool freeMemory)4427 void VmaSmallVector<T, AllocatorT, N>::clear(bool freeMemory)
4428 {
4429     m_DynamicArray.clear();
4430     if (freeMemory)
4431     {
4432         m_DynamicArray.shrink_to_fit();
4433     }
4434     m_Count = 0;
4435 }
4436 
4437 template<typename T, typename AllocatorT, size_t N>
insert(size_t index,const T & src)4438 void VmaSmallVector<T, AllocatorT, N>::insert(size_t index, const T& src)
4439 {
4440     VMA_HEAVY_ASSERT(index <= m_Count);
4441     const size_t oldCount = size();
4442     resize(oldCount + 1);
4443     T* const dataPtr = data();
4444     if (index < oldCount)
4445     {
4446         //  I know, this could be more optimal for case where memmove can be memcpy directly from m_StaticArray to m_DynamicArray.
4447         memmove(dataPtr + (index + 1), dataPtr + index, (oldCount - index) * sizeof(T));
4448     }
4449     dataPtr[index] = src;
4450 }
4451 
4452 template<typename T, typename AllocatorT, size_t N>
remove(size_t index)4453 void VmaSmallVector<T, AllocatorT, N>::remove(size_t index)
4454 {
4455     VMA_HEAVY_ASSERT(index < m_Count);
4456     const size_t oldCount = size();
4457     if (index < oldCount - 1)
4458     {
4459         //  I know, this could be more optimal for case where memmove can be memcpy directly from m_DynamicArray to m_StaticArray.
4460         T* const dataPtr = data();
4461         memmove(dataPtr + index, dataPtr + (index + 1), (oldCount - index - 1) * sizeof(T));
4462     }
4463     resize(oldCount - 1);
4464 }
4465 #endif // _VMA_SMALL_VECTOR_FUNCTIONS
4466 #endif // _VMA_SMALL_VECTOR
4467 
4468 #ifndef _VMA_POOL_ALLOCATOR
4469 /*
4470 Allocator for objects of type T using a list of arrays (pools) to speed up
4471 allocation. Number of elements that can be allocated is not bounded because
4472 allocator can create multiple blocks.
4473 */
4474 template<typename T>
4475 class VmaPoolAllocator
4476 {
4477     VMA_CLASS_NO_COPY_NO_MOVE(VmaPoolAllocator)
4478 public:
4479     VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
4480     ~VmaPoolAllocator();
4481     template<typename... Types> T* Alloc(Types&&... args);
4482     void Free(T* ptr);
4483 
4484 private:
4485     union Item
4486     {
4487         uint32_t NextFreeIndex;
4488         alignas(T) char Value[sizeof(T)];
4489     };
4490     struct ItemBlock
4491     {
4492         Item* pItems;
4493         uint32_t Capacity;
4494         uint32_t FirstFreeIndex;
4495     };
4496 
4497     const VkAllocationCallbacks* m_pAllocationCallbacks;
4498     const uint32_t m_FirstBlockCapacity;
4499     VmaVector<ItemBlock, VmaStlAllocator<ItemBlock>> m_ItemBlocks;
4500 
4501     ItemBlock& CreateNewBlock();
4502 };
4503 
4504 #ifndef _VMA_POOL_ALLOCATOR_FUNCTIONS
4505 template<typename T>
VmaPoolAllocator(const VkAllocationCallbacks * pAllocationCallbacks,uint32_t firstBlockCapacity)4506 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity)
4507     : m_pAllocationCallbacks(pAllocationCallbacks),
4508     m_FirstBlockCapacity(firstBlockCapacity),
4509     m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
4510 {
4511     VMA_ASSERT(m_FirstBlockCapacity > 1);
4512 }
4513 
4514 template<typename T>
~VmaPoolAllocator()4515 VmaPoolAllocator<T>::~VmaPoolAllocator()
4516 {
4517     for (size_t i = m_ItemBlocks.size(); i--;)
4518         vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
4519     m_ItemBlocks.clear();
4520 }
4521 
4522 template<typename T>
Alloc(Types &&...args)4523 template<typename... Types> T* VmaPoolAllocator<T>::Alloc(Types&&... args)
4524 {
4525     for (size_t i = m_ItemBlocks.size(); i--; )
4526     {
4527         ItemBlock& block = m_ItemBlocks[i];
4528         // This block has some free items: Use first one.
4529         if (block.FirstFreeIndex != UINT32_MAX)
4530         {
4531             Item* const pItem = &block.pItems[block.FirstFreeIndex];
4532             block.FirstFreeIndex = pItem->NextFreeIndex;
4533             T* result = (T*)&pItem->Value;
4534             new(result)T(std::forward<Types>(args)...); // Explicit constructor call.
4535             return result;
4536         }
4537     }
4538 
4539     // No block has free item: Create new one and use it.
4540     ItemBlock& newBlock = CreateNewBlock();
4541     Item* const pItem = &newBlock.pItems[0];
4542     newBlock.FirstFreeIndex = pItem->NextFreeIndex;
4543     T* result = (T*)&pItem->Value;
4544     new(result) T(std::forward<Types>(args)...); // Explicit constructor call.
4545     return result;
4546 }
4547 
4548 template<typename T>
Free(T * ptr)4549 void VmaPoolAllocator<T>::Free(T* ptr)
4550 {
4551     // Search all memory blocks to find ptr.
4552     for (size_t i = m_ItemBlocks.size(); i--; )
4553     {
4554         ItemBlock& block = m_ItemBlocks[i];
4555 
4556         // Casting to union.
4557         Item* pItemPtr;
4558         memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
4559 
4560         // Check if pItemPtr is in address range of this block.
4561         if ((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
4562         {
4563             ptr->~T(); // Explicit destructor call.
4564             const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
4565             pItemPtr->NextFreeIndex = block.FirstFreeIndex;
4566             block.FirstFreeIndex = index;
4567             return;
4568         }
4569     }
4570     VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
4571 }
4572 
4573 template<typename T>
CreateNewBlock()4574 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
4575 {
4576     const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
4577         m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
4578 
4579     const ItemBlock newBlock =
4580     {
4581         vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
4582         newBlockCapacity,
4583         0
4584     };
4585 
4586     m_ItemBlocks.push_back(newBlock);
4587 
4588     // Setup singly-linked list of all free items in this block.
4589     for (uint32_t i = 0; i < newBlockCapacity - 1; ++i)
4590         newBlock.pItems[i].NextFreeIndex = i + 1;
4591     newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
4592     return m_ItemBlocks.back();
4593 }
4594 #endif // _VMA_POOL_ALLOCATOR_FUNCTIONS
4595 #endif // _VMA_POOL_ALLOCATOR
4596 
4597 #ifndef _VMA_RAW_LIST
4598 template<typename T>
4599 struct VmaListItem
4600 {
4601     VmaListItem* pPrev;
4602     VmaListItem* pNext;
4603     T Value;
4604 };
4605 
4606 // Doubly linked list.
4607 template<typename T>
4608 class VmaRawList
4609 {
4610     VMA_CLASS_NO_COPY_NO_MOVE(VmaRawList)
4611 public:
4612     typedef VmaListItem<T> ItemType;
4613 
4614     VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
4615     // Intentionally not calling Clear, because that would be unnecessary
4616     // computations to return all items to m_ItemAllocator as free.
4617     ~VmaRawList() = default;
4618 
GetCount()4619     size_t GetCount() const { return m_Count; }
IsEmpty()4620     bool IsEmpty() const { return m_Count == 0; }
4621 
Front()4622     ItemType* Front() { return m_pFront; }
Back()4623     ItemType* Back() { return m_pBack; }
Front()4624     const ItemType* Front() const { return m_pFront; }
Back()4625     const ItemType* Back() const { return m_pBack; }
4626 
4627     ItemType* PushFront();
4628     ItemType* PushBack();
4629     ItemType* PushFront(const T& value);
4630     ItemType* PushBack(const T& value);
4631     void PopFront();
4632     void PopBack();
4633 
4634     // Item can be null - it means PushBack.
4635     ItemType* InsertBefore(ItemType* pItem);
4636     // Item can be null - it means PushFront.
4637     ItemType* InsertAfter(ItemType* pItem);
4638     ItemType* InsertBefore(ItemType* pItem, const T& value);
4639     ItemType* InsertAfter(ItemType* pItem, const T& value);
4640 
4641     void Clear();
4642     void Remove(ItemType* pItem);
4643 
4644 private:
4645     const VkAllocationCallbacks* const m_pAllocationCallbacks;
4646     VmaPoolAllocator<ItemType> m_ItemAllocator;
4647     ItemType* m_pFront;
4648     ItemType* m_pBack;
4649     size_t m_Count;
4650 };
4651 
4652 #ifndef _VMA_RAW_LIST_FUNCTIONS
4653 template<typename T>
VmaRawList(const VkAllocationCallbacks * pAllocationCallbacks)4654 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks)
4655     : m_pAllocationCallbacks(pAllocationCallbacks),
4656     m_ItemAllocator(pAllocationCallbacks, 128),
4657     m_pFront(VMA_NULL),
4658     m_pBack(VMA_NULL),
4659     m_Count(0) {}
4660 
4661 template<typename T>
PushFront()4662 VmaListItem<T>* VmaRawList<T>::PushFront()
4663 {
4664     ItemType* const pNewItem = m_ItemAllocator.Alloc();
4665     pNewItem->pPrev = VMA_NULL;
4666     if (IsEmpty())
4667     {
4668         pNewItem->pNext = VMA_NULL;
4669         m_pFront = pNewItem;
4670         m_pBack = pNewItem;
4671         m_Count = 1;
4672     }
4673     else
4674     {
4675         pNewItem->pNext = m_pFront;
4676         m_pFront->pPrev = pNewItem;
4677         m_pFront = pNewItem;
4678         ++m_Count;
4679     }
4680     return pNewItem;
4681 }
4682 
4683 template<typename T>
PushBack()4684 VmaListItem<T>* VmaRawList<T>::PushBack()
4685 {
4686     ItemType* const pNewItem = m_ItemAllocator.Alloc();
4687     pNewItem->pNext = VMA_NULL;
4688     if(IsEmpty())
4689     {
4690         pNewItem->pPrev = VMA_NULL;
4691         m_pFront = pNewItem;
4692         m_pBack = pNewItem;
4693         m_Count = 1;
4694     }
4695     else
4696     {
4697         pNewItem->pPrev = m_pBack;
4698         m_pBack->pNext = pNewItem;
4699         m_pBack = pNewItem;
4700         ++m_Count;
4701     }
4702     return pNewItem;
4703 }
4704 
4705 template<typename T>
PushFront(const T & value)4706 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
4707 {
4708     ItemType* const pNewItem = PushFront();
4709     pNewItem->Value = value;
4710     return pNewItem;
4711 }
4712 
4713 template<typename T>
PushBack(const T & value)4714 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
4715 {
4716     ItemType* const pNewItem = PushBack();
4717     pNewItem->Value = value;
4718     return pNewItem;
4719 }
4720 
4721 template<typename T>
PopFront()4722 void VmaRawList<T>::PopFront()
4723 {
4724     VMA_HEAVY_ASSERT(m_Count > 0);
4725     ItemType* const pFrontItem = m_pFront;
4726     ItemType* const pNextItem = pFrontItem->pNext;
4727     if (pNextItem != VMA_NULL)
4728     {
4729         pNextItem->pPrev = VMA_NULL;
4730     }
4731     m_pFront = pNextItem;
4732     m_ItemAllocator.Free(pFrontItem);
4733     --m_Count;
4734 }
4735 
4736 template<typename T>
PopBack()4737 void VmaRawList<T>::PopBack()
4738 {
4739     VMA_HEAVY_ASSERT(m_Count > 0);
4740     ItemType* const pBackItem = m_pBack;
4741     ItemType* const pPrevItem = pBackItem->pPrev;
4742     if(pPrevItem != VMA_NULL)
4743     {
4744         pPrevItem->pNext = VMA_NULL;
4745     }
4746     m_pBack = pPrevItem;
4747     m_ItemAllocator.Free(pBackItem);
4748     --m_Count;
4749 }
4750 
4751 template<typename T>
Clear()4752 void VmaRawList<T>::Clear()
4753 {
4754     if (IsEmpty() == false)
4755     {
4756         ItemType* pItem = m_pBack;
4757         while (pItem != VMA_NULL)
4758         {
4759             ItemType* const pPrevItem = pItem->pPrev;
4760             m_ItemAllocator.Free(pItem);
4761             pItem = pPrevItem;
4762         }
4763         m_pFront = VMA_NULL;
4764         m_pBack = VMA_NULL;
4765         m_Count = 0;
4766     }
4767 }
4768 
4769 template<typename T>
Remove(ItemType * pItem)4770 void VmaRawList<T>::Remove(ItemType* pItem)
4771 {
4772     VMA_HEAVY_ASSERT(pItem != VMA_NULL);
4773     VMA_HEAVY_ASSERT(m_Count > 0);
4774 
4775     if(pItem->pPrev != VMA_NULL)
4776     {
4777         pItem->pPrev->pNext = pItem->pNext;
4778     }
4779     else
4780     {
4781         VMA_HEAVY_ASSERT(m_pFront == pItem);
4782         m_pFront = pItem->pNext;
4783     }
4784 
4785     if(pItem->pNext != VMA_NULL)
4786     {
4787         pItem->pNext->pPrev = pItem->pPrev;
4788     }
4789     else
4790     {
4791         VMA_HEAVY_ASSERT(m_pBack == pItem);
4792         m_pBack = pItem->pPrev;
4793     }
4794 
4795     m_ItemAllocator.Free(pItem);
4796     --m_Count;
4797 }
4798 
4799 template<typename T>
InsertBefore(ItemType * pItem)4800 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
4801 {
4802     if(pItem != VMA_NULL)
4803     {
4804         ItemType* const prevItem = pItem->pPrev;
4805         ItemType* const newItem = m_ItemAllocator.Alloc();
4806         newItem->pPrev = prevItem;
4807         newItem->pNext = pItem;
4808         pItem->pPrev = newItem;
4809         if(prevItem != VMA_NULL)
4810         {
4811             prevItem->pNext = newItem;
4812         }
4813         else
4814         {
4815             VMA_HEAVY_ASSERT(m_pFront == pItem);
4816             m_pFront = newItem;
4817         }
4818         ++m_Count;
4819         return newItem;
4820     }
4821     else
4822         return PushBack();
4823 }
4824 
4825 template<typename T>
InsertAfter(ItemType * pItem)4826 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
4827 {
4828     if(pItem != VMA_NULL)
4829     {
4830         ItemType* const nextItem = pItem->pNext;
4831         ItemType* const newItem = m_ItemAllocator.Alloc();
4832         newItem->pNext = nextItem;
4833         newItem->pPrev = pItem;
4834         pItem->pNext = newItem;
4835         if(nextItem != VMA_NULL)
4836         {
4837             nextItem->pPrev = newItem;
4838         }
4839         else
4840         {
4841             VMA_HEAVY_ASSERT(m_pBack == pItem);
4842             m_pBack = newItem;
4843         }
4844         ++m_Count;
4845         return newItem;
4846     }
4847     else
4848         return PushFront();
4849 }
4850 
4851 template<typename T>
InsertBefore(ItemType * pItem,const T & value)4852 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
4853 {
4854     ItemType* const newItem = InsertBefore(pItem);
4855     newItem->Value = value;
4856     return newItem;
4857 }
4858 
4859 template<typename T>
InsertAfter(ItemType * pItem,const T & value)4860 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
4861 {
4862     ItemType* const newItem = InsertAfter(pItem);
4863     newItem->Value = value;
4864     return newItem;
4865 }
4866 #endif // _VMA_RAW_LIST_FUNCTIONS
4867 #endif // _VMA_RAW_LIST
4868 
4869 #ifndef _VMA_LIST
4870 template<typename T, typename AllocatorT>
4871 class VmaList
4872 {
4873     VMA_CLASS_NO_COPY_NO_MOVE(VmaList)
4874 public:
4875     class reverse_iterator;
4876     class const_iterator;
4877     class const_reverse_iterator;
4878 
4879     class iterator
4880     {
4881         friend class const_iterator;
4882         friend class VmaList<T, AllocatorT>;
4883     public:
iterator()4884         iterator() :  m_pList(VMA_NULL), m_pItem(VMA_NULL) {}
iterator(const reverse_iterator & src)4885         iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
4886 
4887         T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; }
4888         T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; }
4889 
4890         bool operator==(const iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; }
4891         bool operator!=(const iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; }
4892 
4893         iterator operator++(int) { iterator result = *this; ++*this; return result; }
4894         iterator operator--(int) { iterator result = *this; --*this; return result; }
4895 
4896         iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pNext; return *this; }
4897         iterator& operator--();
4898 
4899     private:
4900         VmaRawList<T>* m_pList;
4901         VmaListItem<T>* m_pItem;
4902 
iterator(VmaRawList<T> * pList,VmaListItem<T> * pItem)4903         iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) : m_pList(pList),  m_pItem(pItem) {}
4904     };
4905     class reverse_iterator
4906     {
4907         friend class const_reverse_iterator;
4908         friend class VmaList<T, AllocatorT>;
4909     public:
reverse_iterator()4910         reverse_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {}
reverse_iterator(const iterator & src)4911         reverse_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
4912 
4913         T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; }
4914         T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; }
4915 
4916         bool operator==(const reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; }
4917         bool operator!=(const reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; }
4918 
4919         reverse_iterator operator++(int) { reverse_iterator result = *this; ++* this; return result; }
4920         reverse_iterator operator--(int) { reverse_iterator result = *this; --* this; return result; }
4921 
4922         reverse_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pPrev; return *this; }
4923         reverse_iterator& operator--();
4924 
4925     private:
4926         VmaRawList<T>* m_pList;
4927         VmaListItem<T>* m_pItem;
4928 
reverse_iterator(VmaRawList<T> * pList,VmaListItem<T> * pItem)4929         reverse_iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) : m_pList(pList),  m_pItem(pItem) {}
4930     };
4931     class const_iterator
4932     {
4933         friend class VmaList<T, AllocatorT>;
4934     public:
const_iterator()4935         const_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {}
const_iterator(const iterator & src)4936         const_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
const_iterator(const reverse_iterator & src)4937         const_iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
4938 
drop_const()4939         iterator drop_const() { return { const_cast<VmaRawList<T>*>(m_pList), const_cast<VmaListItem<T>*>(m_pItem) }; }
4940 
4941         const T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; }
4942         const T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; }
4943 
4944         bool operator==(const const_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; }
4945         bool operator!=(const const_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; }
4946 
4947         const_iterator operator++(int) { const_iterator result = *this; ++* this; return result; }
4948         const_iterator operator--(int) { const_iterator result = *this; --* this; return result; }
4949 
4950         const_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pNext; return *this; }
4951         const_iterator& operator--();
4952 
4953     private:
4954         const VmaRawList<T>* m_pList;
4955         const VmaListItem<T>* m_pItem;
4956 
const_iterator(const VmaRawList<T> * pList,const VmaListItem<T> * pItem)4957         const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) : m_pList(pList), m_pItem(pItem) {}
4958     };
4959     class const_reverse_iterator
4960     {
4961         friend class VmaList<T, AllocatorT>;
4962     public:
const_reverse_iterator()4963         const_reverse_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {}
const_reverse_iterator(const reverse_iterator & src)4964         const_reverse_iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
const_reverse_iterator(const iterator & src)4965         const_reverse_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
4966 
drop_const()4967         reverse_iterator drop_const() { return { const_cast<VmaRawList<T>*>(m_pList), const_cast<VmaListItem<T>*>(m_pItem) }; }
4968 
4969         const T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; }
4970         const T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; }
4971 
4972         bool operator==(const const_reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; }
4973         bool operator!=(const const_reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; }
4974 
4975         const_reverse_iterator operator++(int) { const_reverse_iterator result = *this; ++* this; return result; }
4976         const_reverse_iterator operator--(int) { const_reverse_iterator result = *this; --* this; return result; }
4977 
4978         const_reverse_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pPrev; return *this; }
4979         const_reverse_iterator& operator--();
4980 
4981     private:
4982         const VmaRawList<T>* m_pList;
4983         const VmaListItem<T>* m_pItem;
4984 
const_reverse_iterator(const VmaRawList<T> * pList,const VmaListItem<T> * pItem)4985         const_reverse_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) : m_pList(pList), m_pItem(pItem) {}
4986     };
4987 
VmaList(const AllocatorT & allocator)4988     VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) {}
4989 
empty()4990     bool empty() const { return m_RawList.IsEmpty(); }
size()4991     size_t size() const { return m_RawList.GetCount(); }
4992 
begin()4993     iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
end()4994     iterator end() { return iterator(&m_RawList, VMA_NULL); }
4995 
cbegin()4996     const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
cend()4997     const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
4998 
begin()4999     const_iterator begin() const { return cbegin(); }
end()5000     const_iterator end() const { return cend(); }
5001 
rbegin()5002     reverse_iterator rbegin() { return reverse_iterator(&m_RawList, m_RawList.Back()); }
rend()5003     reverse_iterator rend() { return reverse_iterator(&m_RawList, VMA_NULL); }
5004 
crbegin()5005     const_reverse_iterator crbegin() const { return const_reverse_iterator(&m_RawList, m_RawList.Back()); }
crend()5006     const_reverse_iterator crend() const { return const_reverse_iterator(&m_RawList, VMA_NULL); }
5007 
rbegin()5008     const_reverse_iterator rbegin() const { return crbegin(); }
rend()5009     const_reverse_iterator rend() const { return crend(); }
5010 
push_back(const T & value)5011     void push_back(const T& value) { m_RawList.PushBack(value); }
insert(iterator it,const T & value)5012     iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
5013 
clear()5014     void clear() { m_RawList.Clear(); }
erase(iterator it)5015     void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
5016 
5017 private:
5018     VmaRawList<T> m_RawList;
5019 };
5020 
5021 #ifndef _VMA_LIST_FUNCTIONS
5022 template<typename T, typename AllocatorT>
5023 typename VmaList<T, AllocatorT>::iterator& VmaList<T, AllocatorT>::iterator::operator--()
5024 {
5025     if (m_pItem != VMA_NULL)
5026     {
5027         m_pItem = m_pItem->pPrev;
5028     }
5029     else
5030     {
5031         VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
5032         m_pItem = m_pList->Back();
5033     }
5034     return *this;
5035 }
5036 
5037 template<typename T, typename AllocatorT>
5038 typename VmaList<T, AllocatorT>::reverse_iterator& VmaList<T, AllocatorT>::reverse_iterator::operator--()
5039 {
5040     if (m_pItem != VMA_NULL)
5041     {
5042         m_pItem = m_pItem->pNext;
5043     }
5044     else
5045     {
5046         VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
5047         m_pItem = m_pList->Front();
5048     }
5049     return *this;
5050 }
5051 
5052 template<typename T, typename AllocatorT>
5053 typename VmaList<T, AllocatorT>::const_iterator& VmaList<T, AllocatorT>::const_iterator::operator--()
5054 {
5055     if (m_pItem != VMA_NULL)
5056     {
5057         m_pItem = m_pItem->pPrev;
5058     }
5059     else
5060     {
5061         VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
5062         m_pItem = m_pList->Back();
5063     }
5064     return *this;
5065 }
5066 
5067 template<typename T, typename AllocatorT>
5068 typename VmaList<T, AllocatorT>::const_reverse_iterator& VmaList<T, AllocatorT>::const_reverse_iterator::operator--()
5069 {
5070     if (m_pItem != VMA_NULL)
5071     {
5072         m_pItem = m_pItem->pNext;
5073     }
5074     else
5075     {
5076         VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
5077         m_pItem = m_pList->Back();
5078     }
5079     return *this;
5080 }
5081 #endif // _VMA_LIST_FUNCTIONS
5082 #endif // _VMA_LIST
5083 
5084 #ifndef _VMA_INTRUSIVE_LINKED_LIST
5085 /*
5086 Expected interface of ItemTypeTraits:
5087 struct MyItemTypeTraits
5088 {
5089     typedef MyItem ItemType;
5090     static ItemType* GetPrev(const ItemType* item) { return item->myPrevPtr; }
5091     static ItemType* GetNext(const ItemType* item) { return item->myNextPtr; }
5092     static ItemType*& AccessPrev(ItemType* item) { return item->myPrevPtr; }
5093     static ItemType*& AccessNext(ItemType* item) { return item->myNextPtr; }
5094 };
5095 */
5096 template<typename ItemTypeTraits>
5097 class VmaIntrusiveLinkedList
5098 {
5099 public:
5100     typedef typename ItemTypeTraits::ItemType ItemType;
GetPrev(const ItemType * item)5101     static ItemType* GetPrev(const ItemType* item) { return ItemTypeTraits::GetPrev(item); }
GetNext(const ItemType * item)5102     static ItemType* GetNext(const ItemType* item) { return ItemTypeTraits::GetNext(item); }
5103 
5104     // Movable, not copyable.
5105     VmaIntrusiveLinkedList() = default;
5106     VmaIntrusiveLinkedList(VmaIntrusiveLinkedList && src);
5107     VmaIntrusiveLinkedList(const VmaIntrusiveLinkedList&) = delete;
5108     VmaIntrusiveLinkedList& operator=(VmaIntrusiveLinkedList&& src);
5109     VmaIntrusiveLinkedList& operator=(const VmaIntrusiveLinkedList&) = delete;
~VmaIntrusiveLinkedList()5110     ~VmaIntrusiveLinkedList() { VMA_HEAVY_ASSERT(IsEmpty()); }
5111 
GetCount()5112     size_t GetCount() const { return m_Count; }
IsEmpty()5113     bool IsEmpty() const { return m_Count == 0; }
Front()5114     ItemType* Front() { return m_Front; }
Back()5115     ItemType* Back() { return m_Back; }
Front()5116     const ItemType* Front() const { return m_Front; }
Back()5117     const ItemType* Back() const { return m_Back; }
5118 
5119     void PushBack(ItemType* item);
5120     void PushFront(ItemType* item);
5121     ItemType* PopBack();
5122     ItemType* PopFront();
5123 
5124     // MyItem can be null - it means PushBack.
5125     void InsertBefore(ItemType* existingItem, ItemType* newItem);
5126     // MyItem can be null - it means PushFront.
5127     void InsertAfter(ItemType* existingItem, ItemType* newItem);
5128     void Remove(ItemType* item);
5129     void RemoveAll();
5130 
5131 private:
5132     ItemType* m_Front = VMA_NULL;
5133     ItemType* m_Back = VMA_NULL;
5134     size_t m_Count = 0;
5135 };
5136 
5137 #ifndef _VMA_INTRUSIVE_LINKED_LIST_FUNCTIONS
5138 template<typename ItemTypeTraits>
VmaIntrusiveLinkedList(VmaIntrusiveLinkedList && src)5139 VmaIntrusiveLinkedList<ItemTypeTraits>::VmaIntrusiveLinkedList(VmaIntrusiveLinkedList&& src)
5140     : m_Front(src.m_Front), m_Back(src.m_Back), m_Count(src.m_Count)
5141 {
5142     src.m_Front = src.m_Back = VMA_NULL;
5143     src.m_Count = 0;
5144 }
5145 
5146 template<typename ItemTypeTraits>
5147 VmaIntrusiveLinkedList<ItemTypeTraits>& VmaIntrusiveLinkedList<ItemTypeTraits>::operator=(VmaIntrusiveLinkedList&& src)
5148 {
5149     if (&src != this)
5150     {
5151         VMA_HEAVY_ASSERT(IsEmpty());
5152         m_Front = src.m_Front;
5153         m_Back = src.m_Back;
5154         m_Count = src.m_Count;
5155         src.m_Front = src.m_Back = VMA_NULL;
5156         src.m_Count = 0;
5157     }
5158     return *this;
5159 }
5160 
5161 template<typename ItemTypeTraits>
PushBack(ItemType * item)5162 void VmaIntrusiveLinkedList<ItemTypeTraits>::PushBack(ItemType* item)
5163 {
5164     VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL);
5165     if (IsEmpty())
5166     {
5167         m_Front = item;
5168         m_Back = item;
5169         m_Count = 1;
5170     }
5171     else
5172     {
5173         ItemTypeTraits::AccessPrev(item) = m_Back;
5174         ItemTypeTraits::AccessNext(m_Back) = item;
5175         m_Back = item;
5176         ++m_Count;
5177     }
5178 }
5179 
5180 template<typename ItemTypeTraits>
PushFront(ItemType * item)5181 void VmaIntrusiveLinkedList<ItemTypeTraits>::PushFront(ItemType* item)
5182 {
5183     VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL);
5184     if (IsEmpty())
5185     {
5186         m_Front = item;
5187         m_Back = item;
5188         m_Count = 1;
5189     }
5190     else
5191     {
5192         ItemTypeTraits::AccessNext(item) = m_Front;
5193         ItemTypeTraits::AccessPrev(m_Front) = item;
5194         m_Front = item;
5195         ++m_Count;
5196     }
5197 }
5198 
5199 template<typename ItemTypeTraits>
PopBack()5200 typename VmaIntrusiveLinkedList<ItemTypeTraits>::ItemType* VmaIntrusiveLinkedList<ItemTypeTraits>::PopBack()
5201 {
5202     VMA_HEAVY_ASSERT(m_Count > 0);
5203     ItemType* const backItem = m_Back;
5204     ItemType* const prevItem = ItemTypeTraits::GetPrev(backItem);
5205     if (prevItem != VMA_NULL)
5206     {
5207         ItemTypeTraits::AccessNext(prevItem) = VMA_NULL;
5208     }
5209     m_Back = prevItem;
5210     --m_Count;
5211     ItemTypeTraits::AccessPrev(backItem) = VMA_NULL;
5212     ItemTypeTraits::AccessNext(backItem) = VMA_NULL;
5213     return backItem;
5214 }
5215 
5216 template<typename ItemTypeTraits>
PopFront()5217 typename VmaIntrusiveLinkedList<ItemTypeTraits>::ItemType* VmaIntrusiveLinkedList<ItemTypeTraits>::PopFront()
5218 {
5219     VMA_HEAVY_ASSERT(m_Count > 0);
5220     ItemType* const frontItem = m_Front;
5221     ItemType* const nextItem = ItemTypeTraits::GetNext(frontItem);
5222     if (nextItem != VMA_NULL)
5223     {
5224         ItemTypeTraits::AccessPrev(nextItem) = VMA_NULL;
5225     }
5226     m_Front = nextItem;
5227     --m_Count;
5228     ItemTypeTraits::AccessPrev(frontItem) = VMA_NULL;
5229     ItemTypeTraits::AccessNext(frontItem) = VMA_NULL;
5230     return frontItem;
5231 }
5232 
5233 template<typename ItemTypeTraits>
InsertBefore(ItemType * existingItem,ItemType * newItem)5234 void VmaIntrusiveLinkedList<ItemTypeTraits>::InsertBefore(ItemType* existingItem, ItemType* newItem)
5235 {
5236     VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL);
5237     if (existingItem != VMA_NULL)
5238     {
5239         ItemType* const prevItem = ItemTypeTraits::GetPrev(existingItem);
5240         ItemTypeTraits::AccessPrev(newItem) = prevItem;
5241         ItemTypeTraits::AccessNext(newItem) = existingItem;
5242         ItemTypeTraits::AccessPrev(existingItem) = newItem;
5243         if (prevItem != VMA_NULL)
5244         {
5245             ItemTypeTraits::AccessNext(prevItem) = newItem;
5246         }
5247         else
5248         {
5249             VMA_HEAVY_ASSERT(m_Front == existingItem);
5250             m_Front = newItem;
5251         }
5252         ++m_Count;
5253     }
5254     else
5255         PushBack(newItem);
5256 }
5257 
5258 template<typename ItemTypeTraits>
InsertAfter(ItemType * existingItem,ItemType * newItem)5259 void VmaIntrusiveLinkedList<ItemTypeTraits>::InsertAfter(ItemType* existingItem, ItemType* newItem)
5260 {
5261     VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL);
5262     if (existingItem != VMA_NULL)
5263     {
5264         ItemType* const nextItem = ItemTypeTraits::GetNext(existingItem);
5265         ItemTypeTraits::AccessNext(newItem) = nextItem;
5266         ItemTypeTraits::AccessPrev(newItem) = existingItem;
5267         ItemTypeTraits::AccessNext(existingItem) = newItem;
5268         if (nextItem != VMA_NULL)
5269         {
5270             ItemTypeTraits::AccessPrev(nextItem) = newItem;
5271         }
5272         else
5273         {
5274             VMA_HEAVY_ASSERT(m_Back == existingItem);
5275             m_Back = newItem;
5276         }
5277         ++m_Count;
5278     }
5279     else
5280         return PushFront(newItem);
5281 }
5282 
5283 template<typename ItemTypeTraits>
Remove(ItemType * item)5284 void VmaIntrusiveLinkedList<ItemTypeTraits>::Remove(ItemType* item)
5285 {
5286     VMA_HEAVY_ASSERT(item != VMA_NULL && m_Count > 0);
5287     if (ItemTypeTraits::GetPrev(item) != VMA_NULL)
5288     {
5289         ItemTypeTraits::AccessNext(ItemTypeTraits::AccessPrev(item)) = ItemTypeTraits::GetNext(item);
5290     }
5291     else
5292     {
5293         VMA_HEAVY_ASSERT(m_Front == item);
5294         m_Front = ItemTypeTraits::GetNext(item);
5295     }
5296 
5297     if (ItemTypeTraits::GetNext(item) != VMA_NULL)
5298     {
5299         ItemTypeTraits::AccessPrev(ItemTypeTraits::AccessNext(item)) = ItemTypeTraits::GetPrev(item);
5300     }
5301     else
5302     {
5303         VMA_HEAVY_ASSERT(m_Back == item);
5304         m_Back = ItemTypeTraits::GetPrev(item);
5305     }
5306     ItemTypeTraits::AccessPrev(item) = VMA_NULL;
5307     ItemTypeTraits::AccessNext(item) = VMA_NULL;
5308     --m_Count;
5309 }
5310 
5311 template<typename ItemTypeTraits>
RemoveAll()5312 void VmaIntrusiveLinkedList<ItemTypeTraits>::RemoveAll()
5313 {
5314     if (!IsEmpty())
5315     {
5316         ItemType* item = m_Back;
5317         while (item != VMA_NULL)
5318         {
5319             ItemType* const prevItem = ItemTypeTraits::AccessPrev(item);
5320             ItemTypeTraits::AccessPrev(item) = VMA_NULL;
5321             ItemTypeTraits::AccessNext(item) = VMA_NULL;
5322             item = prevItem;
5323         }
5324         m_Front = VMA_NULL;
5325         m_Back = VMA_NULL;
5326         m_Count = 0;
5327     }
5328 }
5329 #endif // _VMA_INTRUSIVE_LINKED_LIST_FUNCTIONS
5330 #endif // _VMA_INTRUSIVE_LINKED_LIST
5331 
5332 // Unused in this version.
5333 #if 0
5334 
5335 #ifndef _VMA_PAIR
5336 template<typename T1, typename T2>
5337 struct VmaPair
5338 {
5339     T1 first;
5340     T2 second;
5341 
5342     VmaPair() : first(), second() {}
5343     VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) {}
5344 };
5345 
5346 template<typename FirstT, typename SecondT>
5347 struct VmaPairFirstLess
5348 {
5349     bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
5350     {
5351         return lhs.first < rhs.first;
5352     }
5353     bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
5354     {
5355         return lhs.first < rhsFirst;
5356     }
5357 };
5358 #endif // _VMA_PAIR
5359 
5360 #ifndef _VMA_MAP
5361 /* Class compatible with subset of interface of std::unordered_map.
5362 KeyT, ValueT must be POD because they will be stored in VmaVector.
5363 */
5364 template<typename KeyT, typename ValueT>
5365 class VmaMap
5366 {
5367 public:
5368     typedef VmaPair<KeyT, ValueT> PairType;
5369     typedef PairType* iterator;
5370 
5371     VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) {}
5372 
5373     iterator begin() { return m_Vector.begin(); }
5374     iterator end() { return m_Vector.end(); }
5375     size_t size() { return m_Vector.size(); }
5376 
5377     void insert(const PairType& pair);
5378     iterator find(const KeyT& key);
5379     void erase(iterator it);
5380 
5381 private:
5382     VmaVector< PairType, VmaStlAllocator<PairType>> m_Vector;
5383 };
5384 
5385 #ifndef _VMA_MAP_FUNCTIONS
5386 template<typename KeyT, typename ValueT>
5387 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
5388 {
5389     const size_t indexToInsert = VmaBinaryFindFirstNotLess(
5390         m_Vector.data(),
5391         m_Vector.data() + m_Vector.size(),
5392         pair,
5393         VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
5394     VmaVectorInsert(m_Vector, indexToInsert, pair);
5395 }
5396 
5397 template<typename KeyT, typename ValueT>
5398 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
5399 {
5400     PairType* it = VmaBinaryFindFirstNotLess(
5401         m_Vector.data(),
5402         m_Vector.data() + m_Vector.size(),
5403         key,
5404         VmaPairFirstLess<KeyT, ValueT>());
5405     if ((it != m_Vector.end()) && (it->first == key))
5406     {
5407         return it;
5408     }
5409     else
5410     {
5411         return m_Vector.end();
5412     }
5413 }
5414 
5415 template<typename KeyT, typename ValueT>
5416 void VmaMap<KeyT, ValueT>::erase(iterator it)
5417 {
5418     VmaVectorRemove(m_Vector, it - m_Vector.begin());
5419 }
5420 #endif // _VMA_MAP_FUNCTIONS
5421 #endif // _VMA_MAP
5422 
5423 #endif // #if 0
5424 
5425 #if !defined(_VMA_STRING_BUILDER) && VMA_STATS_STRING_ENABLED
5426 class VmaStringBuilder
5427 {
5428 public:
VmaStringBuilder(const VkAllocationCallbacks * allocationCallbacks)5429     VmaStringBuilder(const VkAllocationCallbacks* allocationCallbacks) : m_Data(VmaStlAllocator<char>(allocationCallbacks)) {}
5430     ~VmaStringBuilder() = default;
5431 
GetLength()5432     size_t GetLength() const { return m_Data.size(); }
GetData()5433     const char* GetData() const { return m_Data.data(); }
AddNewLine()5434     void AddNewLine() { Add('\n'); }
Add(char ch)5435     void Add(char ch) { m_Data.push_back(ch); }
5436 
5437     void Add(const char* pStr);
5438     void AddNumber(uint32_t num);
5439     void AddNumber(uint64_t num);
5440     void AddPointer(const void* ptr);
5441 
5442 private:
5443     VmaVector<char, VmaStlAllocator<char>> m_Data;
5444 };
5445 
5446 #ifndef _VMA_STRING_BUILDER_FUNCTIONS
Add(const char * pStr)5447 void VmaStringBuilder::Add(const char* pStr)
5448 {
5449     const size_t strLen = strlen(pStr);
5450     if (strLen > 0)
5451     {
5452         const size_t oldCount = m_Data.size();
5453         m_Data.resize(oldCount + strLen);
5454         memcpy(m_Data.data() + oldCount, pStr, strLen);
5455     }
5456 }
5457 
AddNumber(uint32_t num)5458 void VmaStringBuilder::AddNumber(uint32_t num)
5459 {
5460     char buf[11];
5461     buf[10] = '\0';
5462     char* p = &buf[10];
5463     do
5464     {
5465         *--p = '0' + (char)(num % 10);
5466         num /= 10;
5467     } while (num);
5468     Add(p);
5469 }
5470 
AddNumber(uint64_t num)5471 void VmaStringBuilder::AddNumber(uint64_t num)
5472 {
5473     char buf[21];
5474     buf[20] = '\0';
5475     char* p = &buf[20];
5476     do
5477     {
5478         *--p = '0' + (char)(num % 10);
5479         num /= 10;
5480     } while (num);
5481     Add(p);
5482 }
5483 
AddPointer(const void * ptr)5484 void VmaStringBuilder::AddPointer(const void* ptr)
5485 {
5486     char buf[21];
5487     VmaPtrToStr(buf, sizeof(buf), ptr);
5488     Add(buf);
5489 }
5490 #endif //_VMA_STRING_BUILDER_FUNCTIONS
5491 #endif // _VMA_STRING_BUILDER
5492 
5493 #if !defined(_VMA_JSON_WRITER) && VMA_STATS_STRING_ENABLED
5494 /*
5495 Allows to conveniently build a correct JSON document to be written to the
5496 VmaStringBuilder passed to the constructor.
5497 */
5498 class VmaJsonWriter
5499 {
5500     VMA_CLASS_NO_COPY_NO_MOVE(VmaJsonWriter)
5501 public:
5502     // sb - string builder to write the document to. Must remain alive for the whole lifetime of this object.
5503     VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
5504     ~VmaJsonWriter();
5505 
5506     // Begins object by writing "{".
5507     // Inside an object, you must call pairs of WriteString and a value, e.g.:
5508     // j.BeginObject(true); j.WriteString("A"); j.WriteNumber(1); j.WriteString("B"); j.WriteNumber(2); j.EndObject();
5509     // Will write: { "A": 1, "B": 2 }
5510     void BeginObject(bool singleLine = false);
5511     // Ends object by writing "}".
5512     void EndObject();
5513 
5514     // Begins array by writing "[".
5515     // Inside an array, you can write a sequence of any values.
5516     void BeginArray(bool singleLine = false);
5517     // Ends array by writing "[".
5518     void EndArray();
5519 
5520     // Writes a string value inside "".
5521     // pStr can contain any ANSI characters, including '"', new line etc. - they will be properly escaped.
5522     void WriteString(const char* pStr);
5523 
5524     // Begins writing a string value.
5525     // Call BeginString, ContinueString, ContinueString, ..., EndString instead of
5526     // WriteString to conveniently build the string content incrementally, made of
5527     // parts including numbers.
5528     void BeginString(const char* pStr = VMA_NULL);
5529     // Posts next part of an open string.
5530     void ContinueString(const char* pStr);
5531     // Posts next part of an open string. The number is converted to decimal characters.
5532     void ContinueString(uint32_t n);
5533     void ContinueString(uint64_t n);
5534     // Posts next part of an open string. Pointer value is converted to characters
5535     // using "%p" formatting - shown as hexadecimal number, e.g.: 000000081276Ad00
5536     void ContinueString_Pointer(const void* ptr);
5537     // Ends writing a string value by writing '"'.
5538     void EndString(const char* pStr = VMA_NULL);
5539 
5540     // Writes a number value.
5541     void WriteNumber(uint32_t n);
5542     void WriteNumber(uint64_t n);
5543     // Writes a boolean value - false or true.
5544     void WriteBool(bool b);
5545     // Writes a null value.
5546     void WriteNull();
5547 
5548 private:
5549     enum COLLECTION_TYPE
5550     {
5551         COLLECTION_TYPE_OBJECT,
5552         COLLECTION_TYPE_ARRAY,
5553     };
5554     struct StackItem
5555     {
5556         COLLECTION_TYPE type;
5557         uint32_t valueCount;
5558         bool singleLineMode;
5559     };
5560 
5561     static const char* const INDENT;
5562 
5563     VmaStringBuilder& m_SB;
5564     VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
5565     bool m_InsideString;
5566 
5567     void BeginValue(bool isString);
5568     void WriteIndent(bool oneLess = false);
5569 };
5570 const char* const VmaJsonWriter::INDENT = "  ";
5571 
5572 #ifndef _VMA_JSON_WRITER_FUNCTIONS
VmaJsonWriter(const VkAllocationCallbacks * pAllocationCallbacks,VmaStringBuilder & sb)5573 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb)
5574     : m_SB(sb),
5575     m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
5576     m_InsideString(false) {}
5577 
~VmaJsonWriter()5578 VmaJsonWriter::~VmaJsonWriter()
5579 {
5580     VMA_ASSERT(!m_InsideString);
5581     VMA_ASSERT(m_Stack.empty());
5582 }
5583 
BeginObject(bool singleLine)5584 void VmaJsonWriter::BeginObject(bool singleLine)
5585 {
5586     VMA_ASSERT(!m_InsideString);
5587 
5588     BeginValue(false);
5589     m_SB.Add('{');
5590 
5591     StackItem item;
5592     item.type = COLLECTION_TYPE_OBJECT;
5593     item.valueCount = 0;
5594     item.singleLineMode = singleLine;
5595     m_Stack.push_back(item);
5596 }
5597 
EndObject()5598 void VmaJsonWriter::EndObject()
5599 {
5600     VMA_ASSERT(!m_InsideString);
5601 
5602     WriteIndent(true);
5603     m_SB.Add('}');
5604 
5605     VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
5606     m_Stack.pop_back();
5607 }
5608 
BeginArray(bool singleLine)5609 void VmaJsonWriter::BeginArray(bool singleLine)
5610 {
5611     VMA_ASSERT(!m_InsideString);
5612 
5613     BeginValue(false);
5614     m_SB.Add('[');
5615 
5616     StackItem item;
5617     item.type = COLLECTION_TYPE_ARRAY;
5618     item.valueCount = 0;
5619     item.singleLineMode = singleLine;
5620     m_Stack.push_back(item);
5621 }
5622 
EndArray()5623 void VmaJsonWriter::EndArray()
5624 {
5625     VMA_ASSERT(!m_InsideString);
5626 
5627     WriteIndent(true);
5628     m_SB.Add(']');
5629 
5630     VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
5631     m_Stack.pop_back();
5632 }
5633 
WriteString(const char * pStr)5634 void VmaJsonWriter::WriteString(const char* pStr)
5635 {
5636     BeginString(pStr);
5637     EndString();
5638 }
5639 
BeginString(const char * pStr)5640 void VmaJsonWriter::BeginString(const char* pStr)
5641 {
5642     VMA_ASSERT(!m_InsideString);
5643 
5644     BeginValue(true);
5645     m_SB.Add('"');
5646     m_InsideString = true;
5647     if (pStr != VMA_NULL && pStr[0] != '\0')
5648     {
5649         ContinueString(pStr);
5650     }
5651 }
5652 
ContinueString(const char * pStr)5653 void VmaJsonWriter::ContinueString(const char* pStr)
5654 {
5655     VMA_ASSERT(m_InsideString);
5656 
5657     const size_t strLen = strlen(pStr);
5658     for (size_t i = 0; i < strLen; ++i)
5659     {
5660         char ch = pStr[i];
5661         if (ch == '\\')
5662         {
5663             m_SB.Add("\\\\");
5664         }
5665         else if (ch == '"')
5666         {
5667             m_SB.Add("\\\"");
5668         }
5669         else if (ch >= 32)
5670         {
5671             m_SB.Add(ch);
5672         }
5673         else switch (ch)
5674         {
5675         case '\b':
5676             m_SB.Add("\\b");
5677             break;
5678         case '\f':
5679             m_SB.Add("\\f");
5680             break;
5681         case '\n':
5682             m_SB.Add("\\n");
5683             break;
5684         case '\r':
5685             m_SB.Add("\\r");
5686             break;
5687         case '\t':
5688             m_SB.Add("\\t");
5689             break;
5690         default:
5691             VMA_ASSERT(0 && "Character not currently supported.");
5692         }
5693     }
5694 }
5695 
ContinueString(uint32_t n)5696 void VmaJsonWriter::ContinueString(uint32_t n)
5697 {
5698     VMA_ASSERT(m_InsideString);
5699     m_SB.AddNumber(n);
5700 }
5701 
ContinueString(uint64_t n)5702 void VmaJsonWriter::ContinueString(uint64_t n)
5703 {
5704     VMA_ASSERT(m_InsideString);
5705     m_SB.AddNumber(n);
5706 }
5707 
ContinueString_Pointer(const void * ptr)5708 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
5709 {
5710     VMA_ASSERT(m_InsideString);
5711     m_SB.AddPointer(ptr);
5712 }
5713 
EndString(const char * pStr)5714 void VmaJsonWriter::EndString(const char* pStr)
5715 {
5716     VMA_ASSERT(m_InsideString);
5717     if (pStr != VMA_NULL && pStr[0] != '\0')
5718     {
5719         ContinueString(pStr);
5720     }
5721     m_SB.Add('"');
5722     m_InsideString = false;
5723 }
5724 
WriteNumber(uint32_t n)5725 void VmaJsonWriter::WriteNumber(uint32_t n)
5726 {
5727     VMA_ASSERT(!m_InsideString);
5728     BeginValue(false);
5729     m_SB.AddNumber(n);
5730 }
5731 
WriteNumber(uint64_t n)5732 void VmaJsonWriter::WriteNumber(uint64_t n)
5733 {
5734     VMA_ASSERT(!m_InsideString);
5735     BeginValue(false);
5736     m_SB.AddNumber(n);
5737 }
5738 
WriteBool(bool b)5739 void VmaJsonWriter::WriteBool(bool b)
5740 {
5741     VMA_ASSERT(!m_InsideString);
5742     BeginValue(false);
5743     m_SB.Add(b ? "true" : "false");
5744 }
5745 
WriteNull()5746 void VmaJsonWriter::WriteNull()
5747 {
5748     VMA_ASSERT(!m_InsideString);
5749     BeginValue(false);
5750     m_SB.Add("null");
5751 }
5752 
BeginValue(bool isString)5753 void VmaJsonWriter::BeginValue(bool isString)
5754 {
5755     if (!m_Stack.empty())
5756     {
5757         StackItem& currItem = m_Stack.back();
5758         if (currItem.type == COLLECTION_TYPE_OBJECT &&
5759             currItem.valueCount % 2 == 0)
5760         {
5761             VMA_ASSERT(isString);
5762         }
5763 
5764         if (currItem.type == COLLECTION_TYPE_OBJECT &&
5765             currItem.valueCount % 2 != 0)
5766         {
5767             m_SB.Add(": ");
5768         }
5769         else if (currItem.valueCount > 0)
5770         {
5771             m_SB.Add(", ");
5772             WriteIndent();
5773         }
5774         else
5775         {
5776             WriteIndent();
5777         }
5778         ++currItem.valueCount;
5779     }
5780 }
5781 
WriteIndent(bool oneLess)5782 void VmaJsonWriter::WriteIndent(bool oneLess)
5783 {
5784     if (!m_Stack.empty() && !m_Stack.back().singleLineMode)
5785     {
5786         m_SB.AddNewLine();
5787 
5788         size_t count = m_Stack.size();
5789         if (count > 0 && oneLess)
5790         {
5791             --count;
5792         }
5793         for (size_t i = 0; i < count; ++i)
5794         {
5795             m_SB.Add(INDENT);
5796         }
5797     }
5798 }
5799 #endif // _VMA_JSON_WRITER_FUNCTIONS
5800 
VmaPrintDetailedStatistics(VmaJsonWriter & json,const VmaDetailedStatistics & stat)5801 static void VmaPrintDetailedStatistics(VmaJsonWriter& json, const VmaDetailedStatistics& stat)
5802 {
5803     json.BeginObject();
5804 
5805     json.WriteString("BlockCount");
5806     json.WriteNumber(stat.statistics.blockCount);
5807     json.WriteString("BlockBytes");
5808     json.WriteNumber(stat.statistics.blockBytes);
5809     json.WriteString("AllocationCount");
5810     json.WriteNumber(stat.statistics.allocationCount);
5811     json.WriteString("AllocationBytes");
5812     json.WriteNumber(stat.statistics.allocationBytes);
5813     json.WriteString("UnusedRangeCount");
5814     json.WriteNumber(stat.unusedRangeCount);
5815 
5816     if (stat.statistics.allocationCount > 1)
5817     {
5818         json.WriteString("AllocationSizeMin");
5819         json.WriteNumber(stat.allocationSizeMin);
5820         json.WriteString("AllocationSizeMax");
5821         json.WriteNumber(stat.allocationSizeMax);
5822     }
5823     if (stat.unusedRangeCount > 1)
5824     {
5825         json.WriteString("UnusedRangeSizeMin");
5826         json.WriteNumber(stat.unusedRangeSizeMin);
5827         json.WriteString("UnusedRangeSizeMax");
5828         json.WriteNumber(stat.unusedRangeSizeMax);
5829     }
5830     json.EndObject();
5831 }
5832 #endif // _VMA_JSON_WRITER
5833 
5834 #ifndef _VMA_MAPPING_HYSTERESIS
5835 
5836 class VmaMappingHysteresis
5837 {
5838     VMA_CLASS_NO_COPY_NO_MOVE(VmaMappingHysteresis)
5839 public:
5840     VmaMappingHysteresis() = default;
5841 
GetExtraMapping()5842     uint32_t GetExtraMapping() const { return m_ExtraMapping; }
5843 
5844     // Call when Map was called.
5845     // Returns true if switched to extra +1 mapping reference count.
PostMap()5846     bool PostMap()
5847     {
5848 #if VMA_MAPPING_HYSTERESIS_ENABLED
5849         if(m_ExtraMapping == 0)
5850         {
5851             ++m_MajorCounter;
5852             if(m_MajorCounter >= COUNTER_MIN_EXTRA_MAPPING)
5853             {
5854                 m_ExtraMapping = 1;
5855                 m_MajorCounter = 0;
5856                 m_MinorCounter = 0;
5857                 return true;
5858             }
5859         }
5860         else // m_ExtraMapping == 1
5861             PostMinorCounter();
5862 #endif // #if VMA_MAPPING_HYSTERESIS_ENABLED
5863         return false;
5864     }
5865 
5866     // Call when Unmap was called.
PostUnmap()5867     void PostUnmap()
5868     {
5869 #if VMA_MAPPING_HYSTERESIS_ENABLED
5870         if(m_ExtraMapping == 0)
5871             ++m_MajorCounter;
5872         else // m_ExtraMapping == 1
5873             PostMinorCounter();
5874 #endif // #if VMA_MAPPING_HYSTERESIS_ENABLED
5875     }
5876 
5877     // Call when allocation was made from the memory block.
PostAlloc()5878     void PostAlloc()
5879     {
5880 #if VMA_MAPPING_HYSTERESIS_ENABLED
5881         if(m_ExtraMapping == 1)
5882             ++m_MajorCounter;
5883         else // m_ExtraMapping == 0
5884             PostMinorCounter();
5885 #endif // #if VMA_MAPPING_HYSTERESIS_ENABLED
5886     }
5887 
5888     // Call when allocation was freed from the memory block.
5889     // Returns true if switched to extra -1 mapping reference count.
PostFree()5890     bool PostFree()
5891     {
5892 #if VMA_MAPPING_HYSTERESIS_ENABLED
5893         if(m_ExtraMapping == 1)
5894         {
5895             ++m_MajorCounter;
5896             if(m_MajorCounter >= COUNTER_MIN_EXTRA_MAPPING &&
5897                 m_MajorCounter > m_MinorCounter + 1)
5898             {
5899                 m_ExtraMapping = 0;
5900                 m_MajorCounter = 0;
5901                 m_MinorCounter = 0;
5902                 return true;
5903             }
5904         }
5905         else // m_ExtraMapping == 0
5906             PostMinorCounter();
5907 #endif // #if VMA_MAPPING_HYSTERESIS_ENABLED
5908         return false;
5909     }
5910 
5911 private:
5912     static const int32_t COUNTER_MIN_EXTRA_MAPPING = 7;
5913 
5914     uint32_t m_MinorCounter = 0;
5915     uint32_t m_MajorCounter = 0;
5916     uint32_t m_ExtraMapping = 0; // 0 or 1.
5917 
PostMinorCounter()5918     void PostMinorCounter()
5919     {
5920         if(m_MinorCounter < m_MajorCounter)
5921         {
5922             ++m_MinorCounter;
5923         }
5924         else if(m_MajorCounter > 0)
5925         {
5926             --m_MajorCounter;
5927             --m_MinorCounter;
5928         }
5929     }
5930 };
5931 
5932 #endif // _VMA_MAPPING_HYSTERESIS
5933 
5934 #ifndef _VMA_DEVICE_MEMORY_BLOCK
5935 /*
5936 Represents a single block of device memory (`VkDeviceMemory`) with all the
5937 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
5938 
5939 Thread-safety:
5940 - Access to m_pMetadata must be externally synchronized.
5941 - Map, Unmap, Bind* are synchronized internally.
5942 */
5943 class VmaDeviceMemoryBlock
5944 {
5945     VMA_CLASS_NO_COPY_NO_MOVE(VmaDeviceMemoryBlock)
5946 public:
5947     VmaBlockMetadata* m_pMetadata;
5948 
5949     VmaDeviceMemoryBlock(VmaAllocator hAllocator);
5950     ~VmaDeviceMemoryBlock();
5951 
5952     // Always call after construction.
5953     void Init(
5954         VmaAllocator hAllocator,
5955         VmaPool hParentPool,
5956         uint32_t newMemoryTypeIndex,
5957         VkDeviceMemory newMemory,
5958         VkDeviceSize newSize,
5959         uint32_t id,
5960         uint32_t algorithm,
5961         VkDeviceSize bufferImageGranularity);
5962     // Always call before destruction.
5963     void Destroy(VmaAllocator allocator);
5964 
GetParentPool()5965     VmaPool GetParentPool() const { return m_hParentPool; }
GetDeviceMemory()5966     VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
GetMemoryTypeIndex()5967     uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
GetId()5968     uint32_t GetId() const { return m_Id; }
GetMappedData()5969     void* GetMappedData() const { return m_pMappedData; }
GetMapRefCount()5970     uint32_t GetMapRefCount() const { return m_MapCount; }
5971 
5972     // Call when allocation/free was made from m_pMetadata.
5973     // Used for m_MappingHysteresis.
5974     void PostAlloc(VmaAllocator hAllocator);
5975     void PostFree(VmaAllocator hAllocator);
5976 
5977     // Validates all data structures inside this object. If not valid, returns false.
5978     bool Validate() const;
5979     VkResult CheckCorruption(VmaAllocator hAllocator);
5980 
5981     // ppData can be null.
5982     VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
5983     void Unmap(VmaAllocator hAllocator, uint32_t count);
5984 
5985     VkResult WriteMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5986     VkResult ValidateMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5987 
5988     VkResult BindBufferMemory(
5989         const VmaAllocator hAllocator,
5990         const VmaAllocation hAllocation,
5991         VkDeviceSize allocationLocalOffset,
5992         VkBuffer hBuffer,
5993         const void* pNext);
5994     VkResult BindImageMemory(
5995         const VmaAllocator hAllocator,
5996         const VmaAllocation hAllocation,
5997         VkDeviceSize allocationLocalOffset,
5998         VkImage hImage,
5999         const void* pNext);
6000 
6001 private:
6002     VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
6003     uint32_t m_MemoryTypeIndex;
6004     uint32_t m_Id;
6005     VkDeviceMemory m_hMemory;
6006 
6007     /*
6008     Protects access to m_hMemory so it is not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
6009     Also protects m_MapCount, m_pMappedData.
6010     Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
6011     */
6012     VMA_MUTEX m_MapAndBindMutex;
6013     VmaMappingHysteresis m_MappingHysteresis;
6014     uint32_t m_MapCount;
6015     void* m_pMappedData;
6016 };
6017 #endif // _VMA_DEVICE_MEMORY_BLOCK
6018 
6019 #ifndef _VMA_ALLOCATION_T
6020 struct VmaAllocation_T
6021 {
6022     friend struct VmaDedicatedAllocationListItemTraits;
6023 
6024     enum FLAGS
6025     {
6026         FLAG_PERSISTENT_MAP   = 0x01,
6027         FLAG_MAPPING_ALLOWED  = 0x02,
6028     };
6029 
6030 public:
6031     enum ALLOCATION_TYPE
6032     {
6033         ALLOCATION_TYPE_NONE,
6034         ALLOCATION_TYPE_BLOCK,
6035         ALLOCATION_TYPE_DEDICATED,
6036     };
6037 
6038     // This struct is allocated using VmaPoolAllocator.
6039     VmaAllocation_T(bool mappingAllowed);
6040     ~VmaAllocation_T();
6041 
6042     void InitBlockAllocation(
6043         VmaDeviceMemoryBlock* block,
6044         VmaAllocHandle allocHandle,
6045         VkDeviceSize alignment,
6046         VkDeviceSize size,
6047         uint32_t memoryTypeIndex,
6048         VmaSuballocationType suballocationType,
6049         bool mapped);
6050     // pMappedData not null means allocation is created with MAPPED flag.
6051     void InitDedicatedAllocation(
6052         VmaPool hParentPool,
6053         uint32_t memoryTypeIndex,
6054         VkDeviceMemory hMemory,
6055         VmaSuballocationType suballocationType,
6056         void* pMappedData,
6057         VkDeviceSize size);
6058 
GetTypeVmaAllocation_T6059     ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
GetAlignmentVmaAllocation_T6060     VkDeviceSize GetAlignment() const { return m_Alignment; }
GetSizeVmaAllocation_T6061     VkDeviceSize GetSize() const { return m_Size; }
GetUserDataVmaAllocation_T6062     void* GetUserData() const { return m_pUserData; }
GetNameVmaAllocation_T6063     const char* GetName() const { return m_pName; }
GetSuballocationTypeVmaAllocation_T6064     VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
6065 
GetBlockVmaAllocation_T6066     VmaDeviceMemoryBlock* GetBlock() const { VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK); return m_BlockAllocation.m_Block; }
GetMemoryTypeIndexVmaAllocation_T6067     uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
IsPersistentMapVmaAllocation_T6068     bool IsPersistentMap() const { return (m_Flags & FLAG_PERSISTENT_MAP) != 0; }
IsMappingAllowedVmaAllocation_T6069     bool IsMappingAllowed() const { return (m_Flags & FLAG_MAPPING_ALLOWED) != 0; }
6070 
SetUserDataVmaAllocation_T6071     void SetUserData(VmaAllocator hAllocator, void* pUserData) { m_pUserData = pUserData; }
6072     void SetName(VmaAllocator hAllocator, const char* pName);
6073     void FreeName(VmaAllocator hAllocator);
6074     uint8_t SwapBlockAllocation(VmaAllocator hAllocator, VmaAllocation allocation);
6075     VmaAllocHandle GetAllocHandle() const;
6076     VkDeviceSize GetOffset() const;
6077     VmaPool GetParentPool() const;
6078     VkDeviceMemory GetMemory() const;
6079     void* GetMappedData() const;
6080 
6081     void BlockAllocMap();
6082     void BlockAllocUnmap();
6083     VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
6084     void DedicatedAllocUnmap(VmaAllocator hAllocator);
6085 
6086 #if VMA_STATS_STRING_ENABLED
GetBufferImageUsageVmaAllocation_T6087     uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
6088 
6089     void InitBufferImageUsage(uint32_t bufferImageUsage);
6090     void PrintParameters(class VmaJsonWriter& json) const;
6091 #endif
6092 
6093 private:
6094     // Allocation out of VmaDeviceMemoryBlock.
6095     struct BlockAllocation
6096     {
6097         VmaDeviceMemoryBlock* m_Block;
6098         VmaAllocHandle m_AllocHandle;
6099     };
6100     // Allocation for an object that has its own private VkDeviceMemory.
6101     struct DedicatedAllocation
6102     {
6103         VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
6104         VkDeviceMemory m_hMemory;
6105         void* m_pMappedData; // Not null means memory is mapped.
6106         VmaAllocation_T* m_Prev;
6107         VmaAllocation_T* m_Next;
6108     };
6109     union
6110     {
6111         // Allocation out of VmaDeviceMemoryBlock.
6112         BlockAllocation m_BlockAllocation;
6113         // Allocation for an object that has its own private VkDeviceMemory.
6114         DedicatedAllocation m_DedicatedAllocation;
6115     };
6116 
6117     VkDeviceSize m_Alignment;
6118     VkDeviceSize m_Size;
6119     void* m_pUserData;
6120     char* m_pName;
6121     uint32_t m_MemoryTypeIndex;
6122     uint8_t m_Type; // ALLOCATION_TYPE
6123     uint8_t m_SuballocationType; // VmaSuballocationType
6124     // Reference counter for vmaMapMemory()/vmaUnmapMemory().
6125     uint8_t m_MapCount;
6126     uint8_t m_Flags; // enum FLAGS
6127 #if VMA_STATS_STRING_ENABLED
6128     uint32_t m_BufferImageUsage; // 0 if unknown.
6129 #endif
6130 };
6131 #endif // _VMA_ALLOCATION_T
6132 
6133 #ifndef _VMA_DEDICATED_ALLOCATION_LIST_ITEM_TRAITS
6134 struct VmaDedicatedAllocationListItemTraits
6135 {
6136     typedef VmaAllocation_T ItemType;
6137 
GetPrevVmaDedicatedAllocationListItemTraits6138     static ItemType* GetPrev(const ItemType* item)
6139     {
6140         VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
6141         return item->m_DedicatedAllocation.m_Prev;
6142     }
GetNextVmaDedicatedAllocationListItemTraits6143     static ItemType* GetNext(const ItemType* item)
6144     {
6145         VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
6146         return item->m_DedicatedAllocation.m_Next;
6147     }
AccessPrevVmaDedicatedAllocationListItemTraits6148     static ItemType*& AccessPrev(ItemType* item)
6149     {
6150         VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
6151         return item->m_DedicatedAllocation.m_Prev;
6152     }
AccessNextVmaDedicatedAllocationListItemTraits6153     static ItemType*& AccessNext(ItemType* item)
6154     {
6155         VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
6156         return item->m_DedicatedAllocation.m_Next;
6157     }
6158 };
6159 #endif // _VMA_DEDICATED_ALLOCATION_LIST_ITEM_TRAITS
6160 
6161 #ifndef _VMA_DEDICATED_ALLOCATION_LIST
6162 /*
6163 Stores linked list of VmaAllocation_T objects.
6164 Thread-safe, synchronized internally.
6165 */
6166 class VmaDedicatedAllocationList
6167 {
VMA_CLASS_NO_COPY_NO_MOVE(VmaDedicatedAllocationList)6168     VMA_CLASS_NO_COPY_NO_MOVE(VmaDedicatedAllocationList)
6169 public:
6170     VmaDedicatedAllocationList() {}
6171     ~VmaDedicatedAllocationList();
6172 
Init(bool useMutex)6173     void Init(bool useMutex) { m_UseMutex = useMutex; }
6174     bool Validate();
6175 
6176     void AddDetailedStatistics(VmaDetailedStatistics& inoutStats);
6177     void AddStatistics(VmaStatistics& inoutStats);
6178 #if VMA_STATS_STRING_ENABLED
6179     // Writes JSON array with the list of allocations.
6180     void BuildStatsString(VmaJsonWriter& json);
6181 #endif
6182 
6183     bool IsEmpty();
6184     void Register(VmaAllocation alloc);
6185     void Unregister(VmaAllocation alloc);
6186 
6187 private:
6188     typedef VmaIntrusiveLinkedList<VmaDedicatedAllocationListItemTraits> DedicatedAllocationLinkedList;
6189 
6190     bool m_UseMutex = true;
6191     VMA_RW_MUTEX m_Mutex;
6192     DedicatedAllocationLinkedList m_AllocationList;
6193 };
6194 
6195 #ifndef _VMA_DEDICATED_ALLOCATION_LIST_FUNCTIONS
6196 
~VmaDedicatedAllocationList()6197 VmaDedicatedAllocationList::~VmaDedicatedAllocationList()
6198 {
6199     VMA_HEAVY_ASSERT(Validate());
6200 
6201     if (!m_AllocationList.IsEmpty())
6202     {
6203         VMA_ASSERT(false && "Unfreed dedicated allocations found!");
6204     }
6205 }
6206 
Validate()6207 bool VmaDedicatedAllocationList::Validate()
6208 {
6209     const size_t declaredCount = m_AllocationList.GetCount();
6210     size_t actualCount = 0;
6211     VmaMutexLockRead lock(m_Mutex, m_UseMutex);
6212     for (VmaAllocation alloc = m_AllocationList.Front();
6213         alloc != VMA_NULL; alloc = m_AllocationList.GetNext(alloc))
6214     {
6215         ++actualCount;
6216     }
6217     VMA_VALIDATE(actualCount == declaredCount);
6218 
6219     return true;
6220 }
6221 
AddDetailedStatistics(VmaDetailedStatistics & inoutStats)6222 void VmaDedicatedAllocationList::AddDetailedStatistics(VmaDetailedStatistics& inoutStats)
6223 {
6224     for(auto* item = m_AllocationList.Front(); item != nullptr; item = DedicatedAllocationLinkedList::GetNext(item))
6225     {
6226         const VkDeviceSize size = item->GetSize();
6227         inoutStats.statistics.blockCount++;
6228         inoutStats.statistics.blockBytes += size;
6229         VmaAddDetailedStatisticsAllocation(inoutStats, item->GetSize());
6230     }
6231 }
6232 
AddStatistics(VmaStatistics & inoutStats)6233 void VmaDedicatedAllocationList::AddStatistics(VmaStatistics& inoutStats)
6234 {
6235     VmaMutexLockRead lock(m_Mutex, m_UseMutex);
6236 
6237     const uint32_t allocCount = (uint32_t)m_AllocationList.GetCount();
6238     inoutStats.blockCount += allocCount;
6239     inoutStats.allocationCount += allocCount;
6240 
6241     for(auto* item = m_AllocationList.Front(); item != nullptr; item = DedicatedAllocationLinkedList::GetNext(item))
6242     {
6243         const VkDeviceSize size = item->GetSize();
6244         inoutStats.blockBytes += size;
6245         inoutStats.allocationBytes += size;
6246     }
6247 }
6248 
6249 #if VMA_STATS_STRING_ENABLED
BuildStatsString(VmaJsonWriter & json)6250 void VmaDedicatedAllocationList::BuildStatsString(VmaJsonWriter& json)
6251 {
6252     VmaMutexLockRead lock(m_Mutex, m_UseMutex);
6253     json.BeginArray();
6254     for (VmaAllocation alloc = m_AllocationList.Front();
6255         alloc != VMA_NULL; alloc = m_AllocationList.GetNext(alloc))
6256     {
6257         json.BeginObject(true);
6258         alloc->PrintParameters(json);
6259         json.EndObject();
6260     }
6261     json.EndArray();
6262 }
6263 #endif // VMA_STATS_STRING_ENABLED
6264 
IsEmpty()6265 bool VmaDedicatedAllocationList::IsEmpty()
6266 {
6267     VmaMutexLockRead lock(m_Mutex, m_UseMutex);
6268     return m_AllocationList.IsEmpty();
6269 }
6270 
Register(VmaAllocation alloc)6271 void VmaDedicatedAllocationList::Register(VmaAllocation alloc)
6272 {
6273     VmaMutexLockWrite lock(m_Mutex, m_UseMutex);
6274     m_AllocationList.PushBack(alloc);
6275 }
6276 
Unregister(VmaAllocation alloc)6277 void VmaDedicatedAllocationList::Unregister(VmaAllocation alloc)
6278 {
6279     VmaMutexLockWrite lock(m_Mutex, m_UseMutex);
6280     m_AllocationList.Remove(alloc);
6281 }
6282 #endif // _VMA_DEDICATED_ALLOCATION_LIST_FUNCTIONS
6283 #endif // _VMA_DEDICATED_ALLOCATION_LIST
6284 
6285 #ifndef _VMA_SUBALLOCATION
6286 /*
6287 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
6288 allocated memory block or free.
6289 */
6290 struct VmaSuballocation
6291 {
6292     VkDeviceSize offset;
6293     VkDeviceSize size;
6294     void* userData;
6295     VmaSuballocationType type;
6296 };
6297 
6298 // Comparator for offsets.
6299 struct VmaSuballocationOffsetLess
6300 {
operatorVmaSuballocationOffsetLess6301     bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
6302     {
6303         return lhs.offset < rhs.offset;
6304     }
6305 };
6306 
6307 struct VmaSuballocationOffsetGreater
6308 {
operatorVmaSuballocationOffsetGreater6309     bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
6310     {
6311         return lhs.offset > rhs.offset;
6312     }
6313 };
6314 
6315 struct VmaSuballocationItemSizeLess
6316 {
operatorVmaSuballocationItemSizeLess6317     bool operator()(const VmaSuballocationList::iterator lhs,
6318         const VmaSuballocationList::iterator rhs) const
6319     {
6320         return lhs->size < rhs->size;
6321     }
6322 
operatorVmaSuballocationItemSizeLess6323     bool operator()(const VmaSuballocationList::iterator lhs,
6324         VkDeviceSize rhsSize) const
6325     {
6326         return lhs->size < rhsSize;
6327     }
6328 };
6329 #endif // _VMA_SUBALLOCATION
6330 
6331 #ifndef _VMA_ALLOCATION_REQUEST
6332 /*
6333 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
6334 item points to a FREE suballocation.
6335 */
6336 struct VmaAllocationRequest
6337 {
6338     VmaAllocHandle allocHandle;
6339     VkDeviceSize size;
6340     VmaSuballocationList::iterator item;
6341     void* customData;
6342     uint64_t algorithmData;
6343     VmaAllocationRequestType type;
6344 };
6345 #endif // _VMA_ALLOCATION_REQUEST
6346 
6347 #ifndef _VMA_BLOCK_METADATA
6348 /*
6349 Data structure used for bookkeeping of allocations and unused ranges of memory
6350 in a single VkDeviceMemory block.
6351 */
6352 class VmaBlockMetadata
6353 {
6354     VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockMetadata)
6355 public:
6356     // pAllocationCallbacks, if not null, must be owned externally - alive and unchanged for the whole lifetime of this object.
6357     VmaBlockMetadata(const VkAllocationCallbacks* pAllocationCallbacks,
6358         VkDeviceSize bufferImageGranularity, bool isVirtual);
6359     virtual ~VmaBlockMetadata() = default;
6360 
Init(VkDeviceSize size)6361     virtual void Init(VkDeviceSize size) { m_Size = size; }
IsVirtual()6362     bool IsVirtual() const { return m_IsVirtual; }
GetSize()6363     VkDeviceSize GetSize() const { return m_Size; }
6364 
6365     // Validates all data structures inside this object. If not valid, returns false.
6366     virtual bool Validate() const = 0;
6367     virtual size_t GetAllocationCount() const = 0;
6368     virtual size_t GetFreeRegionsCount() const = 0;
6369     virtual VkDeviceSize GetSumFreeSize() const = 0;
6370     // Returns true if this block is empty - contains only single free suballocation.
6371     virtual bool IsEmpty() const = 0;
6372     virtual void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) = 0;
6373     virtual VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const = 0;
6374     virtual void* GetAllocationUserData(VmaAllocHandle allocHandle) const = 0;
6375 
6376     virtual VmaAllocHandle GetAllocationListBegin() const = 0;
6377     virtual VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const = 0;
6378     virtual VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const = 0;
6379 
6380     // Shouldn't modify blockCount.
6381     virtual void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const = 0;
6382     virtual void AddStatistics(VmaStatistics& inoutStats) const = 0;
6383 
6384 #if VMA_STATS_STRING_ENABLED
6385     virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
6386 #endif
6387 
6388     // Tries to find a place for suballocation with given parameters inside this block.
6389     // If succeeded, fills pAllocationRequest and returns true.
6390     // If failed, returns false.
6391     virtual bool CreateAllocationRequest(
6392         VkDeviceSize allocSize,
6393         VkDeviceSize allocAlignment,
6394         bool upperAddress,
6395         VmaSuballocationType allocType,
6396         // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
6397         uint32_t strategy,
6398         VmaAllocationRequest* pAllocationRequest) = 0;
6399 
6400     virtual VkResult CheckCorruption(const void* pBlockData) = 0;
6401 
6402     // Makes actual allocation based on request. Request must already be checked and valid.
6403     virtual void Alloc(
6404         const VmaAllocationRequest& request,
6405         VmaSuballocationType type,
6406         void* userData) = 0;
6407 
6408     // Frees suballocation assigned to given memory region.
6409     virtual void Free(VmaAllocHandle allocHandle) = 0;
6410 
6411     // Frees all allocations.
6412     // Careful! Don't call it if there are VmaAllocation objects owned by userData of cleared allocations!
6413     virtual void Clear() = 0;
6414 
6415     virtual void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) = 0;
6416     virtual void DebugLogAllAllocations() const = 0;
6417 
6418 protected:
GetAllocationCallbacks()6419     const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
GetBufferImageGranularity()6420     VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
GetDebugMargin()6421     VkDeviceSize GetDebugMargin() const { return VkDeviceSize(IsVirtual() ? 0 : VMA_DEBUG_MARGIN); }
6422 
6423     void DebugLogAllocation(VkDeviceSize offset, VkDeviceSize size, void* userData) const;
6424 #if VMA_STATS_STRING_ENABLED
6425     // mapRefCount == UINT32_MAX means unspecified.
6426     void PrintDetailedMap_Begin(class VmaJsonWriter& json,
6427         VkDeviceSize unusedBytes,
6428         size_t allocationCount,
6429         size_t unusedRangeCount) const;
6430     void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
6431         VkDeviceSize offset, VkDeviceSize size, void* userData) const;
6432     void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
6433         VkDeviceSize offset,
6434         VkDeviceSize size) const;
6435     void PrintDetailedMap_End(class VmaJsonWriter& json) const;
6436 #endif
6437 
6438 private:
6439     VkDeviceSize m_Size;
6440     const VkAllocationCallbacks* m_pAllocationCallbacks;
6441     const VkDeviceSize m_BufferImageGranularity;
6442     const bool m_IsVirtual;
6443 };
6444 
6445 #ifndef _VMA_BLOCK_METADATA_FUNCTIONS
VmaBlockMetadata(const VkAllocationCallbacks * pAllocationCallbacks,VkDeviceSize bufferImageGranularity,bool isVirtual)6446 VmaBlockMetadata::VmaBlockMetadata(const VkAllocationCallbacks* pAllocationCallbacks,
6447     VkDeviceSize bufferImageGranularity, bool isVirtual)
6448     : m_Size(0),
6449     m_pAllocationCallbacks(pAllocationCallbacks),
6450     m_BufferImageGranularity(bufferImageGranularity),
6451     m_IsVirtual(isVirtual) {}
6452 
DebugLogAllocation(VkDeviceSize offset,VkDeviceSize size,void * userData)6453 void VmaBlockMetadata::DebugLogAllocation(VkDeviceSize offset, VkDeviceSize size, void* userData) const
6454 {
6455     if (IsVirtual())
6456     {
6457         VMA_DEBUG_LOG_FORMAT("UNFREED VIRTUAL ALLOCATION; Offset: %llu; Size: %llu; UserData: %p", offset, size, userData);
6458     }
6459     else
6460     {
6461         VMA_ASSERT(userData != VMA_NULL);
6462         VmaAllocation allocation = reinterpret_cast<VmaAllocation>(userData);
6463 
6464         userData = allocation->GetUserData();
6465         const char* name = allocation->GetName();
6466 
6467 #if VMA_STATS_STRING_ENABLED
6468         VMA_DEBUG_LOG_FORMAT("UNFREED ALLOCATION; Offset: %llu; Size: %llu; UserData: %p; Name: %s; Type: %s; Usage: %u",
6469             offset, size, userData, name ? name : "vma_empty",
6470             VMA_SUBALLOCATION_TYPE_NAMES[allocation->GetSuballocationType()],
6471             allocation->GetBufferImageUsage());
6472 #else
6473         VMA_DEBUG_LOG_FORMAT("UNFREED ALLOCATION; Offset: %llu; Size: %llu; UserData: %p; Name: %s; Type: %u",
6474             offset, size, userData, name ? name : "vma_empty",
6475             (uint32_t)allocation->GetSuballocationType());
6476 #endif // VMA_STATS_STRING_ENABLED
6477     }
6478 
6479 }
6480 
6481 #if VMA_STATS_STRING_ENABLED
PrintDetailedMap_Begin(class VmaJsonWriter & json,VkDeviceSize unusedBytes,size_t allocationCount,size_t unusedRangeCount)6482 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
6483     VkDeviceSize unusedBytes, size_t allocationCount, size_t unusedRangeCount) const
6484 {
6485     json.WriteString("TotalBytes");
6486     json.WriteNumber(GetSize());
6487 
6488     json.WriteString("UnusedBytes");
6489     json.WriteNumber(unusedBytes);
6490 
6491     json.WriteString("Allocations");
6492     json.WriteNumber((uint64_t)allocationCount);
6493 
6494     json.WriteString("UnusedRanges");
6495     json.WriteNumber((uint64_t)unusedRangeCount);
6496 
6497     json.WriteString("Suballocations");
6498     json.BeginArray();
6499 }
6500 
PrintDetailedMap_Allocation(class VmaJsonWriter & json,VkDeviceSize offset,VkDeviceSize size,void * userData)6501 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
6502     VkDeviceSize offset, VkDeviceSize size, void* userData) const
6503 {
6504     json.BeginObject(true);
6505 
6506     json.WriteString("Offset");
6507     json.WriteNumber(offset);
6508 
6509     if (IsVirtual())
6510     {
6511         json.WriteString("Size");
6512         json.WriteNumber(size);
6513         if (userData)
6514         {
6515             json.WriteString("CustomData");
6516             json.BeginString();
6517             json.ContinueString_Pointer(userData);
6518             json.EndString();
6519         }
6520     }
6521     else
6522     {
6523         ((VmaAllocation)userData)->PrintParameters(json);
6524     }
6525 
6526     json.EndObject();
6527 }
6528 
PrintDetailedMap_UnusedRange(class VmaJsonWriter & json,VkDeviceSize offset,VkDeviceSize size)6529 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
6530     VkDeviceSize offset, VkDeviceSize size) const
6531 {
6532     json.BeginObject(true);
6533 
6534     json.WriteString("Offset");
6535     json.WriteNumber(offset);
6536 
6537     json.WriteString("Type");
6538     json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
6539 
6540     json.WriteString("Size");
6541     json.WriteNumber(size);
6542 
6543     json.EndObject();
6544 }
6545 
PrintDetailedMap_End(class VmaJsonWriter & json)6546 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
6547 {
6548     json.EndArray();
6549 }
6550 #endif // VMA_STATS_STRING_ENABLED
6551 #endif // _VMA_BLOCK_METADATA_FUNCTIONS
6552 #endif // _VMA_BLOCK_METADATA
6553 
6554 #ifndef _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY
6555 // Before deleting object of this class remember to call 'Destroy()'
6556 class VmaBlockBufferImageGranularity final
6557 {
6558 public:
6559     struct ValidationContext
6560     {
6561         const VkAllocationCallbacks* allocCallbacks;
6562         uint16_t* pageAllocs;
6563     };
6564 
6565     VmaBlockBufferImageGranularity(VkDeviceSize bufferImageGranularity);
6566     ~VmaBlockBufferImageGranularity();
6567 
IsEnabled()6568     bool IsEnabled() const { return m_BufferImageGranularity > MAX_LOW_BUFFER_IMAGE_GRANULARITY; }
6569 
6570     void Init(const VkAllocationCallbacks* pAllocationCallbacks, VkDeviceSize size);
6571     // Before destroying object you must call free it's memory
6572     void Destroy(const VkAllocationCallbacks* pAllocationCallbacks);
6573 
6574     void RoundupAllocRequest(VmaSuballocationType allocType,
6575         VkDeviceSize& inOutAllocSize,
6576         VkDeviceSize& inOutAllocAlignment) const;
6577 
6578     bool CheckConflictAndAlignUp(VkDeviceSize& inOutAllocOffset,
6579         VkDeviceSize allocSize,
6580         VkDeviceSize blockOffset,
6581         VkDeviceSize blockSize,
6582         VmaSuballocationType allocType) const;
6583 
6584     void AllocPages(uint8_t allocType, VkDeviceSize offset, VkDeviceSize size);
6585     void FreePages(VkDeviceSize offset, VkDeviceSize size);
6586     void Clear();
6587 
6588     ValidationContext StartValidation(const VkAllocationCallbacks* pAllocationCallbacks,
6589         bool isVirutal) const;
6590     bool Validate(ValidationContext& ctx, VkDeviceSize offset, VkDeviceSize size) const;
6591     bool FinishValidation(ValidationContext& ctx) const;
6592 
6593 private:
6594     static const uint16_t MAX_LOW_BUFFER_IMAGE_GRANULARITY = 256;
6595 
6596     struct RegionInfo
6597     {
6598         uint8_t allocType;
6599         uint16_t allocCount;
6600     };
6601 
6602     VkDeviceSize m_BufferImageGranularity;
6603     uint32_t m_RegionCount;
6604     RegionInfo* m_RegionInfo;
6605 
GetStartPage(VkDeviceSize offset)6606     uint32_t GetStartPage(VkDeviceSize offset) const { return OffsetToPageIndex(offset & ~(m_BufferImageGranularity - 1)); }
GetEndPage(VkDeviceSize offset,VkDeviceSize size)6607     uint32_t GetEndPage(VkDeviceSize offset, VkDeviceSize size) const { return OffsetToPageIndex((offset + size - 1) & ~(m_BufferImageGranularity - 1)); }
6608 
6609     uint32_t OffsetToPageIndex(VkDeviceSize offset) const;
6610     void AllocPage(RegionInfo& page, uint8_t allocType);
6611 };
6612 
6613 #ifndef _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY_FUNCTIONS
VmaBlockBufferImageGranularity(VkDeviceSize bufferImageGranularity)6614 VmaBlockBufferImageGranularity::VmaBlockBufferImageGranularity(VkDeviceSize bufferImageGranularity)
6615     : m_BufferImageGranularity(bufferImageGranularity),
6616     m_RegionCount(0),
6617     m_RegionInfo(VMA_NULL) {}
6618 
~VmaBlockBufferImageGranularity()6619 VmaBlockBufferImageGranularity::~VmaBlockBufferImageGranularity()
6620 {
6621     VMA_ASSERT(m_RegionInfo == VMA_NULL && "Free not called before destroying object!");
6622 }
6623 
Init(const VkAllocationCallbacks * pAllocationCallbacks,VkDeviceSize size)6624 void VmaBlockBufferImageGranularity::Init(const VkAllocationCallbacks* pAllocationCallbacks, VkDeviceSize size)
6625 {
6626     if (IsEnabled())
6627     {
6628         m_RegionCount = static_cast<uint32_t>(VmaDivideRoundingUp(size, m_BufferImageGranularity));
6629         m_RegionInfo = vma_new_array(pAllocationCallbacks, RegionInfo, m_RegionCount);
6630         memset(m_RegionInfo, 0, m_RegionCount * sizeof(RegionInfo));
6631     }
6632 }
6633 
Destroy(const VkAllocationCallbacks * pAllocationCallbacks)6634 void VmaBlockBufferImageGranularity::Destroy(const VkAllocationCallbacks* pAllocationCallbacks)
6635 {
6636     if (m_RegionInfo)
6637     {
6638         vma_delete_array(pAllocationCallbacks, m_RegionInfo, m_RegionCount);
6639         m_RegionInfo = VMA_NULL;
6640     }
6641 }
6642 
RoundupAllocRequest(VmaSuballocationType allocType,VkDeviceSize & inOutAllocSize,VkDeviceSize & inOutAllocAlignment)6643 void VmaBlockBufferImageGranularity::RoundupAllocRequest(VmaSuballocationType allocType,
6644     VkDeviceSize& inOutAllocSize,
6645     VkDeviceSize& inOutAllocAlignment) const
6646 {
6647     if (m_BufferImageGranularity > 1 &&
6648         m_BufferImageGranularity <= MAX_LOW_BUFFER_IMAGE_GRANULARITY)
6649     {
6650         if (allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
6651             allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
6652             allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
6653         {
6654             inOutAllocAlignment = VMA_MAX(inOutAllocAlignment, m_BufferImageGranularity);
6655             inOutAllocSize = VmaAlignUp(inOutAllocSize, m_BufferImageGranularity);
6656         }
6657     }
6658 }
6659 
CheckConflictAndAlignUp(VkDeviceSize & inOutAllocOffset,VkDeviceSize allocSize,VkDeviceSize blockOffset,VkDeviceSize blockSize,VmaSuballocationType allocType)6660 bool VmaBlockBufferImageGranularity::CheckConflictAndAlignUp(VkDeviceSize& inOutAllocOffset,
6661     VkDeviceSize allocSize,
6662     VkDeviceSize blockOffset,
6663     VkDeviceSize blockSize,
6664     VmaSuballocationType allocType) const
6665 {
6666     if (IsEnabled())
6667     {
6668         uint32_t startPage = GetStartPage(inOutAllocOffset);
6669         if (m_RegionInfo[startPage].allocCount > 0 &&
6670             VmaIsBufferImageGranularityConflict(static_cast<VmaSuballocationType>(m_RegionInfo[startPage].allocType), allocType))
6671         {
6672             inOutAllocOffset = VmaAlignUp(inOutAllocOffset, m_BufferImageGranularity);
6673             if (blockSize < allocSize + inOutAllocOffset - blockOffset)
6674                 return true;
6675             ++startPage;
6676         }
6677         uint32_t endPage = GetEndPage(inOutAllocOffset, allocSize);
6678         if (endPage != startPage &&
6679             m_RegionInfo[endPage].allocCount > 0 &&
6680             VmaIsBufferImageGranularityConflict(static_cast<VmaSuballocationType>(m_RegionInfo[endPage].allocType), allocType))
6681         {
6682             return true;
6683         }
6684     }
6685     return false;
6686 }
6687 
AllocPages(uint8_t allocType,VkDeviceSize offset,VkDeviceSize size)6688 void VmaBlockBufferImageGranularity::AllocPages(uint8_t allocType, VkDeviceSize offset, VkDeviceSize size)
6689 {
6690     if (IsEnabled())
6691     {
6692         uint32_t startPage = GetStartPage(offset);
6693         AllocPage(m_RegionInfo[startPage], allocType);
6694 
6695         uint32_t endPage = GetEndPage(offset, size);
6696         if (startPage != endPage)
6697             AllocPage(m_RegionInfo[endPage], allocType);
6698     }
6699 }
6700 
FreePages(VkDeviceSize offset,VkDeviceSize size)6701 void VmaBlockBufferImageGranularity::FreePages(VkDeviceSize offset, VkDeviceSize size)
6702 {
6703     if (IsEnabled())
6704     {
6705         uint32_t startPage = GetStartPage(offset);
6706         --m_RegionInfo[startPage].allocCount;
6707         if (m_RegionInfo[startPage].allocCount == 0)
6708             m_RegionInfo[startPage].allocType = VMA_SUBALLOCATION_TYPE_FREE;
6709         uint32_t endPage = GetEndPage(offset, size);
6710         if (startPage != endPage)
6711         {
6712             --m_RegionInfo[endPage].allocCount;
6713             if (m_RegionInfo[endPage].allocCount == 0)
6714                 m_RegionInfo[endPage].allocType = VMA_SUBALLOCATION_TYPE_FREE;
6715         }
6716     }
6717 }
6718 
Clear()6719 void VmaBlockBufferImageGranularity::Clear()
6720 {
6721     if (m_RegionInfo)
6722         memset(m_RegionInfo, 0, m_RegionCount * sizeof(RegionInfo));
6723 }
6724 
StartValidation(const VkAllocationCallbacks * pAllocationCallbacks,bool isVirutal)6725 VmaBlockBufferImageGranularity::ValidationContext VmaBlockBufferImageGranularity::StartValidation(
6726     const VkAllocationCallbacks* pAllocationCallbacks, bool isVirutal) const
6727 {
6728     ValidationContext ctx{ pAllocationCallbacks, VMA_NULL };
6729     if (!isVirutal && IsEnabled())
6730     {
6731         ctx.pageAllocs = vma_new_array(pAllocationCallbacks, uint16_t, m_RegionCount);
6732         memset(ctx.pageAllocs, 0, m_RegionCount * sizeof(uint16_t));
6733     }
6734     return ctx;
6735 }
6736 
Validate(ValidationContext & ctx,VkDeviceSize offset,VkDeviceSize size)6737 bool VmaBlockBufferImageGranularity::Validate(ValidationContext& ctx,
6738     VkDeviceSize offset, VkDeviceSize size) const
6739 {
6740     if (IsEnabled())
6741     {
6742         uint32_t start = GetStartPage(offset);
6743         ++ctx.pageAllocs[start];
6744         VMA_VALIDATE(m_RegionInfo[start].allocCount > 0);
6745 
6746         uint32_t end = GetEndPage(offset, size);
6747         if (start != end)
6748         {
6749             ++ctx.pageAllocs[end];
6750             VMA_VALIDATE(m_RegionInfo[end].allocCount > 0);
6751         }
6752     }
6753     return true;
6754 }
6755 
FinishValidation(ValidationContext & ctx)6756 bool VmaBlockBufferImageGranularity::FinishValidation(ValidationContext& ctx) const
6757 {
6758     // Check proper page structure
6759     if (IsEnabled())
6760     {
6761         VMA_ASSERT(ctx.pageAllocs != VMA_NULL && "Validation context not initialized!");
6762 
6763         for (uint32_t page = 0; page < m_RegionCount; ++page)
6764         {
6765             VMA_VALIDATE(ctx.pageAllocs[page] == m_RegionInfo[page].allocCount);
6766         }
6767         vma_delete_array(ctx.allocCallbacks, ctx.pageAllocs, m_RegionCount);
6768         ctx.pageAllocs = VMA_NULL;
6769     }
6770     return true;
6771 }
6772 
OffsetToPageIndex(VkDeviceSize offset)6773 uint32_t VmaBlockBufferImageGranularity::OffsetToPageIndex(VkDeviceSize offset) const
6774 {
6775     return static_cast<uint32_t>(offset >> VMA_BITSCAN_MSB(m_BufferImageGranularity));
6776 }
6777 
AllocPage(RegionInfo & page,uint8_t allocType)6778 void VmaBlockBufferImageGranularity::AllocPage(RegionInfo& page, uint8_t allocType)
6779 {
6780     // When current alloc type is free then it can be overridden by new type
6781     if (page.allocCount == 0 || (page.allocCount > 0 && page.allocType == VMA_SUBALLOCATION_TYPE_FREE))
6782         page.allocType = allocType;
6783 
6784     ++page.allocCount;
6785 }
6786 #endif // _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY_FUNCTIONS
6787 #endif // _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY
6788 
6789 #if 0
6790 #ifndef _VMA_BLOCK_METADATA_GENERIC
6791 class VmaBlockMetadata_Generic : public VmaBlockMetadata
6792 {
6793     friend class VmaDefragmentationAlgorithm_Generic;
6794     friend class VmaDefragmentationAlgorithm_Fast;
6795     VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockMetadata_Generic)
6796 public:
6797     VmaBlockMetadata_Generic(const VkAllocationCallbacks* pAllocationCallbacks,
6798         VkDeviceSize bufferImageGranularity, bool isVirtual);
6799     virtual ~VmaBlockMetadata_Generic() = default;
6800 
6801     size_t GetAllocationCount() const override { return m_Suballocations.size() - m_FreeCount; }
6802     VkDeviceSize GetSumFreeSize() const override { return m_SumFreeSize; }
6803     bool IsEmpty() const override { return (m_Suballocations.size() == 1) && (m_FreeCount == 1); }
6804     void Free(VmaAllocHandle allocHandle) override { FreeSuballocation(FindAtOffset((VkDeviceSize)allocHandle - 1)); }
6805     VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return (VkDeviceSize)allocHandle - 1; }
6806 
6807     void Init(VkDeviceSize size) override;
6808     bool Validate() const override;
6809 
6810     void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override;
6811     void AddStatistics(VmaStatistics& inoutStats) const override;
6812 
6813 #if VMA_STATS_STRING_ENABLED
6814     void PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const override;
6815 #endif
6816 
6817     bool CreateAllocationRequest(
6818         VkDeviceSize allocSize,
6819         VkDeviceSize allocAlignment,
6820         bool upperAddress,
6821         VmaSuballocationType allocType,
6822         uint32_t strategy,
6823         VmaAllocationRequest* pAllocationRequest) override;
6824 
6825     VkResult CheckCorruption(const void* pBlockData) override;
6826 
6827     void Alloc(
6828         const VmaAllocationRequest& request,
6829         VmaSuballocationType type,
6830         void* userData) override;
6831 
6832     void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override;
6833     void* GetAllocationUserData(VmaAllocHandle allocHandle) const override;
6834     VmaAllocHandle GetAllocationListBegin() const override;
6835     VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override;
6836     void Clear() override;
6837     void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override;
6838     void DebugLogAllAllocations() const override;
6839 
6840 private:
6841     uint32_t m_FreeCount;
6842     VkDeviceSize m_SumFreeSize;
6843     VmaSuballocationList m_Suballocations;
6844     // Suballocations that are free. Sorted by size, ascending.
6845     VmaVector<VmaSuballocationList::iterator, VmaStlAllocator<VmaSuballocationList::iterator>> m_FreeSuballocationsBySize;
6846 
6847     VkDeviceSize AlignAllocationSize(VkDeviceSize size) const { return IsVirtual() ? size : VmaAlignUp(size, (VkDeviceSize)16); }
6848 
6849     VmaSuballocationList::iterator FindAtOffset(VkDeviceSize offset) const;
6850     bool ValidateFreeSuballocationList() const;
6851 
6852     // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
6853     // If yes, fills pOffset and returns true. If no, returns false.
6854     bool CheckAllocation(
6855         VkDeviceSize allocSize,
6856         VkDeviceSize allocAlignment,
6857         VmaSuballocationType allocType,
6858         VmaSuballocationList::const_iterator suballocItem,
6859         VmaAllocHandle* pAllocHandle) const;
6860 
6861     // Given free suballocation, it merges it with following one, which must also be free.
6862     void MergeFreeWithNext(VmaSuballocationList::iterator item);
6863     // Releases given suballocation, making it free.
6864     // Merges it with adjacent free suballocations if applicable.
6865     // Returns iterator to new free suballocation at this place.
6866     VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
6867     // Given free suballocation, it inserts it into sorted list of
6868     // m_FreeSuballocationsBySize if it is suitable.
6869     void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
6870     // Given free suballocation, it removes it from sorted list of
6871     // m_FreeSuballocationsBySize if it is suitable.
6872     void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
6873 };
6874 
6875 #ifndef _VMA_BLOCK_METADATA_GENERIC_FUNCTIONS
6876 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(const VkAllocationCallbacks* pAllocationCallbacks,
6877     VkDeviceSize bufferImageGranularity, bool isVirtual)
6878     : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual),
6879     m_FreeCount(0),
6880     m_SumFreeSize(0),
6881     m_Suballocations(VmaStlAllocator<VmaSuballocation>(pAllocationCallbacks)),
6882     m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(pAllocationCallbacks)) {}
6883 
6884 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
6885 {
6886     VmaBlockMetadata::Init(size);
6887 
6888     m_FreeCount = 1;
6889     m_SumFreeSize = size;
6890 
6891     VmaSuballocation suballoc = {};
6892     suballoc.offset = 0;
6893     suballoc.size = size;
6894     suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
6895 
6896     m_Suballocations.push_back(suballoc);
6897     m_FreeSuballocationsBySize.push_back(m_Suballocations.begin());
6898 }
6899 
6900 bool VmaBlockMetadata_Generic::Validate() const
6901 {
6902     VMA_VALIDATE(!m_Suballocations.empty());
6903 
6904     // Expected offset of new suballocation as calculated from previous ones.
6905     VkDeviceSize calculatedOffset = 0;
6906     // Expected number of free suballocations as calculated from traversing their list.
6907     uint32_t calculatedFreeCount = 0;
6908     // Expected sum size of free suballocations as calculated from traversing their list.
6909     VkDeviceSize calculatedSumFreeSize = 0;
6910     // Expected number of free suballocations that should be registered in
6911     // m_FreeSuballocationsBySize calculated from traversing their list.
6912     size_t freeSuballocationsToRegister = 0;
6913     // True if previous visited suballocation was free.
6914     bool prevFree = false;
6915 
6916     const VkDeviceSize debugMargin = GetDebugMargin();
6917 
6918     for (const auto& subAlloc : m_Suballocations)
6919     {
6920         // Actual offset of this suballocation doesn't match expected one.
6921         VMA_VALIDATE(subAlloc.offset == calculatedOffset);
6922 
6923         const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
6924         // Two adjacent free suballocations are invalid. They should be merged.
6925         VMA_VALIDATE(!prevFree || !currFree);
6926 
6927         VmaAllocation alloc = (VmaAllocation)subAlloc.userData;
6928         if (!IsVirtual())
6929         {
6930             VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE));
6931         }
6932 
6933         if (currFree)
6934         {
6935             calculatedSumFreeSize += subAlloc.size;
6936             ++calculatedFreeCount;
6937             ++freeSuballocationsToRegister;
6938 
6939             // Margin required between allocations - every free space must be at least that large.
6940             VMA_VALIDATE(subAlloc.size >= debugMargin);
6941         }
6942         else
6943         {
6944             if (!IsVirtual())
6945             {
6946                 VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == subAlloc.offset + 1);
6947                 VMA_VALIDATE(alloc->GetSize() == subAlloc.size);
6948             }
6949 
6950             // Margin required between allocations - previous allocation must be free.
6951             VMA_VALIDATE(debugMargin == 0 || prevFree);
6952         }
6953 
6954         calculatedOffset += subAlloc.size;
6955         prevFree = currFree;
6956     }
6957 
6958     // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
6959     // match expected one.
6960     VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
6961 
6962     VkDeviceSize lastSize = 0;
6963     for (size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
6964     {
6965         VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
6966 
6967         // Only free suballocations can be registered in m_FreeSuballocationsBySize.
6968         VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
6969         // They must be sorted by size ascending.
6970         VMA_VALIDATE(suballocItem->size >= lastSize);
6971 
6972         lastSize = suballocItem->size;
6973     }
6974 
6975     // Check if totals match calculated values.
6976     VMA_VALIDATE(ValidateFreeSuballocationList());
6977     VMA_VALIDATE(calculatedOffset == GetSize());
6978     VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
6979     VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
6980 
6981     return true;
6982 }
6983 
6984 void VmaBlockMetadata_Generic::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const
6985 {
6986     const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
6987     inoutStats.statistics.blockCount++;
6988     inoutStats.statistics.blockBytes += GetSize();
6989 
6990     for (const auto& suballoc : m_Suballocations)
6991     {
6992         if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
6993             VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size);
6994         else
6995             VmaAddDetailedStatisticsUnusedRange(inoutStats, suballoc.size);
6996     }
6997 }
6998 
6999 void VmaBlockMetadata_Generic::AddStatistics(VmaStatistics& inoutStats) const
7000 {
7001     inoutStats.blockCount++;
7002     inoutStats.allocationCount += (uint32_t)m_Suballocations.size() - m_FreeCount;
7003     inoutStats.blockBytes += GetSize();
7004     inoutStats.allocationBytes += GetSize() - m_SumFreeSize;
7005 }
7006 
7007 #if VMA_STATS_STRING_ENABLED
7008 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const
7009 {
7010     PrintDetailedMap_Begin(json,
7011         m_SumFreeSize, // unusedBytes
7012         m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
7013         m_FreeCount, // unusedRangeCount
7014         mapRefCount);
7015 
7016     for (const auto& suballoc : m_Suballocations)
7017     {
7018         if (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
7019         {
7020             PrintDetailedMap_UnusedRange(json, suballoc.offset, suballoc.size);
7021         }
7022         else
7023         {
7024             PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData);
7025         }
7026     }
7027 
7028     PrintDetailedMap_End(json);
7029 }
7030 #endif // VMA_STATS_STRING_ENABLED
7031 
7032 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
7033     VkDeviceSize allocSize,
7034     VkDeviceSize allocAlignment,
7035     bool upperAddress,
7036     VmaSuballocationType allocType,
7037     uint32_t strategy,
7038     VmaAllocationRequest* pAllocationRequest)
7039 {
7040     VMA_ASSERT(allocSize > 0);
7041     VMA_ASSERT(!upperAddress);
7042     VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
7043     VMA_ASSERT(pAllocationRequest != VMA_NULL);
7044     VMA_HEAVY_ASSERT(Validate());
7045 
7046     allocSize = AlignAllocationSize(allocSize);
7047 
7048     pAllocationRequest->type = VmaAllocationRequestType::Normal;
7049     pAllocationRequest->size = allocSize;
7050 
7051     const VkDeviceSize debugMargin = GetDebugMargin();
7052 
7053     // There is not enough total free space in this block to fulfill the request: Early return.
7054     if (m_SumFreeSize < allocSize + debugMargin)
7055     {
7056         return false;
7057     }
7058 
7059     // New algorithm, efficiently searching freeSuballocationsBySize.
7060     const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
7061     if (freeSuballocCount > 0)
7062     {
7063         if (strategy == 0 ||
7064             strategy == VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT)
7065         {
7066             // Find first free suballocation with size not less than allocSize + debugMargin.
7067             VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
7068                 m_FreeSuballocationsBySize.data(),
7069                 m_FreeSuballocationsBySize.data() + freeSuballocCount,
7070                 allocSize + debugMargin,
7071                 VmaSuballocationItemSizeLess());
7072             size_t index = it - m_FreeSuballocationsBySize.data();
7073             for (; index < freeSuballocCount; ++index)
7074             {
7075                 if (CheckAllocation(
7076                     allocSize,
7077                     allocAlignment,
7078                     allocType,
7079                     m_FreeSuballocationsBySize[index],
7080                     &pAllocationRequest->allocHandle))
7081                 {
7082                     pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7083                     return true;
7084                 }
7085             }
7086         }
7087         else if (strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
7088         {
7089             for (VmaSuballocationList::iterator it = m_Suballocations.begin();
7090                 it != m_Suballocations.end();
7091                 ++it)
7092             {
7093                 if (it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
7094                     allocSize,
7095                     allocAlignment,
7096                     allocType,
7097                     it,
7098                     &pAllocationRequest->allocHandle))
7099                 {
7100                     pAllocationRequest->item = it;
7101                     return true;
7102                 }
7103             }
7104         }
7105         else
7106         {
7107             VMA_ASSERT(strategy & (VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT | VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT ));
7108             // Search staring from biggest suballocations.
7109             for (size_t index = freeSuballocCount; index--; )
7110             {
7111                 if (CheckAllocation(
7112                     allocSize,
7113                     allocAlignment,
7114                     allocType,
7115                     m_FreeSuballocationsBySize[index],
7116                     &pAllocationRequest->allocHandle))
7117                 {
7118                     pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7119                     return true;
7120                 }
7121             }
7122         }
7123     }
7124 
7125     return false;
7126 }
7127 
7128 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
7129 {
7130     for (auto& suballoc : m_Suballocations)
7131     {
7132         if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
7133         {
7134             if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
7135             {
7136                 VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
7137                 return VK_ERROR_UNKNOWN_COPY;
7138             }
7139         }
7140     }
7141 
7142     return VK_SUCCESS;
7143 }
7144 
7145 void VmaBlockMetadata_Generic::Alloc(
7146     const VmaAllocationRequest& request,
7147     VmaSuballocationType type,
7148     void* userData)
7149 {
7150     VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
7151     VMA_ASSERT(request.item != m_Suballocations.end());
7152     VmaSuballocation& suballoc = *request.item;
7153     // Given suballocation is a free block.
7154     VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7155 
7156     // Given offset is inside this suballocation.
7157     VMA_ASSERT((VkDeviceSize)request.allocHandle - 1 >= suballoc.offset);
7158     const VkDeviceSize paddingBegin = (VkDeviceSize)request.allocHandle - suballoc.offset - 1;
7159     VMA_ASSERT(suballoc.size >= paddingBegin + request.size);
7160     const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - request.size;
7161 
7162     // Unregister this free suballocation from m_FreeSuballocationsBySize and update
7163     // it to become used.
7164     UnregisterFreeSuballocation(request.item);
7165 
7166     suballoc.offset = (VkDeviceSize)request.allocHandle - 1;
7167     suballoc.size = request.size;
7168     suballoc.type = type;
7169     suballoc.userData = userData;
7170 
7171     // If there are any free bytes remaining at the end, insert new free suballocation after current one.
7172     if (paddingEnd)
7173     {
7174         VmaSuballocation paddingSuballoc = {};
7175         paddingSuballoc.offset = suballoc.offset + suballoc.size;
7176         paddingSuballoc.size = paddingEnd;
7177         paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7178         VmaSuballocationList::iterator next = request.item;
7179         ++next;
7180         const VmaSuballocationList::iterator paddingEndItem =
7181             m_Suballocations.insert(next, paddingSuballoc);
7182         RegisterFreeSuballocation(paddingEndItem);
7183     }
7184 
7185     // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
7186     if (paddingBegin)
7187     {
7188         VmaSuballocation paddingSuballoc = {};
7189         paddingSuballoc.offset = suballoc.offset - paddingBegin;
7190         paddingSuballoc.size = paddingBegin;
7191         paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7192         const VmaSuballocationList::iterator paddingBeginItem =
7193             m_Suballocations.insert(request.item, paddingSuballoc);
7194         RegisterFreeSuballocation(paddingBeginItem);
7195     }
7196 
7197     // Update totals.
7198     m_FreeCount = m_FreeCount - 1;
7199     if (paddingBegin > 0)
7200     {
7201         ++m_FreeCount;
7202     }
7203     if (paddingEnd > 0)
7204     {
7205         ++m_FreeCount;
7206     }
7207     m_SumFreeSize -= request.size;
7208 }
7209 
7210 void VmaBlockMetadata_Generic::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo)
7211 {
7212     outInfo.offset = (VkDeviceSize)allocHandle - 1;
7213     const VmaSuballocation& suballoc = *FindAtOffset(outInfo.offset);
7214     outInfo.size = suballoc.size;
7215     outInfo.pUserData = suballoc.userData;
7216 }
7217 
7218 void* VmaBlockMetadata_Generic::GetAllocationUserData(VmaAllocHandle allocHandle) const
7219 {
7220     return FindAtOffset((VkDeviceSize)allocHandle - 1)->userData;
7221 }
7222 
7223 VmaAllocHandle VmaBlockMetadata_Generic::GetAllocationListBegin() const
7224 {
7225     if (IsEmpty())
7226         return VK_NULL_HANDLE;
7227 
7228     for (const auto& suballoc : m_Suballocations)
7229     {
7230         if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
7231             return (VmaAllocHandle)(suballoc.offset + 1);
7232     }
7233     VMA_ASSERT(false && "Should contain at least 1 allocation!");
7234     return VK_NULL_HANDLE;
7235 }
7236 
7237 VmaAllocHandle VmaBlockMetadata_Generic::GetNextAllocation(VmaAllocHandle prevAlloc) const
7238 {
7239     VmaSuballocationList::const_iterator prev = FindAtOffset((VkDeviceSize)prevAlloc - 1);
7240 
7241     for (VmaSuballocationList::const_iterator it = ++prev; it != m_Suballocations.end(); ++it)
7242     {
7243         if (it->type != VMA_SUBALLOCATION_TYPE_FREE)
7244             return (VmaAllocHandle)(it->offset + 1);
7245     }
7246     return VK_NULL_HANDLE;
7247 }
7248 
7249 void VmaBlockMetadata_Generic::Clear()
7250 {
7251     const VkDeviceSize size = GetSize();
7252 
7253     VMA_ASSERT(IsVirtual());
7254     m_FreeCount = 1;
7255     m_SumFreeSize = size;
7256     m_Suballocations.clear();
7257     m_FreeSuballocationsBySize.clear();
7258 
7259     VmaSuballocation suballoc = {};
7260     suballoc.offset = 0;
7261     suballoc.size = size;
7262     suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7263     m_Suballocations.push_back(suballoc);
7264 
7265     m_FreeSuballocationsBySize.push_back(m_Suballocations.begin());
7266 }
7267 
7268 void VmaBlockMetadata_Generic::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData)
7269 {
7270     VmaSuballocation& suballoc = *FindAtOffset((VkDeviceSize)allocHandle - 1);
7271     suballoc.userData = userData;
7272 }
7273 
7274 void VmaBlockMetadata_Generic::DebugLogAllAllocations() const
7275 {
7276     for (const auto& suballoc : m_Suballocations)
7277     {
7278         if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
7279             DebugLogAllocation(suballoc.offset, suballoc.size, suballoc.userData);
7280     }
7281 }
7282 
7283 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FindAtOffset(VkDeviceSize offset) const
7284 {
7285     VMA_HEAVY_ASSERT(!m_Suballocations.empty());
7286     const VkDeviceSize last = m_Suballocations.rbegin()->offset;
7287     if (last == offset)
7288         return m_Suballocations.rbegin().drop_const();
7289     const VkDeviceSize first = m_Suballocations.begin()->offset;
7290     if (first == offset)
7291         return m_Suballocations.begin().drop_const();
7292 
7293     const size_t suballocCount = m_Suballocations.size();
7294     const VkDeviceSize step = (last - first + m_Suballocations.begin()->size) / suballocCount;
7295     auto findSuballocation = [&](auto begin, auto end) -> VmaSuballocationList::iterator
7296     {
7297         for (auto suballocItem = begin;
7298             suballocItem != end;
7299             ++suballocItem)
7300         {
7301             if (suballocItem->offset == offset)
7302                 return suballocItem.drop_const();
7303         }
7304         VMA_ASSERT(false && "Not found!");
7305         return m_Suballocations.end().drop_const();
7306     };
7307     // If requested offset is closer to the end of range, search from the end
7308     if (offset - first > suballocCount * step / 2)
7309     {
7310         return findSuballocation(m_Suballocations.rbegin(), m_Suballocations.rend());
7311     }
7312     return findSuballocation(m_Suballocations.begin(), m_Suballocations.end());
7313 }
7314 
7315 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
7316 {
7317     VkDeviceSize lastSize = 0;
7318     for (size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
7319     {
7320         const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
7321 
7322         VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
7323         VMA_VALIDATE(it->size >= lastSize);
7324         lastSize = it->size;
7325     }
7326     return true;
7327 }
7328 
7329 bool VmaBlockMetadata_Generic::CheckAllocation(
7330     VkDeviceSize allocSize,
7331     VkDeviceSize allocAlignment,
7332     VmaSuballocationType allocType,
7333     VmaSuballocationList::const_iterator suballocItem,
7334     VmaAllocHandle* pAllocHandle) const
7335 {
7336     VMA_ASSERT(allocSize > 0);
7337     VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
7338     VMA_ASSERT(suballocItem != m_Suballocations.cend());
7339     VMA_ASSERT(pAllocHandle != VMA_NULL);
7340 
7341     const VkDeviceSize debugMargin = GetDebugMargin();
7342     const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity();
7343 
7344     const VmaSuballocation& suballoc = *suballocItem;
7345     VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7346 
7347     // Size of this suballocation is too small for this request: Early return.
7348     if (suballoc.size < allocSize)
7349     {
7350         return false;
7351     }
7352 
7353     // Start from offset equal to beginning of this suballocation.
7354     VkDeviceSize offset = suballoc.offset + (suballocItem == m_Suballocations.cbegin() ? 0 : GetDebugMargin());
7355 
7356     // Apply debugMargin from the end of previous alloc.
7357     if (debugMargin > 0)
7358     {
7359         offset += debugMargin;
7360     }
7361 
7362     // Apply alignment.
7363     offset = VmaAlignUp(offset, allocAlignment);
7364 
7365     // Check previous suballocations for BufferImageGranularity conflicts.
7366     // Make bigger alignment if necessary.
7367     if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment)
7368     {
7369         bool bufferImageGranularityConflict = false;
7370         VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
7371         while (prevSuballocItem != m_Suballocations.cbegin())
7372         {
7373             --prevSuballocItem;
7374             const VmaSuballocation& prevSuballoc = *prevSuballocItem;
7375             if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, offset, bufferImageGranularity))
7376             {
7377                 if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
7378                 {
7379                     bufferImageGranularityConflict = true;
7380                     break;
7381                 }
7382             }
7383             else
7384                 // Already on previous page.
7385                 break;
7386         }
7387         if (bufferImageGranularityConflict)
7388         {
7389             offset = VmaAlignUp(offset, bufferImageGranularity);
7390         }
7391     }
7392 
7393     // Calculate padding at the beginning based on current offset.
7394     const VkDeviceSize paddingBegin = offset - suballoc.offset;
7395 
7396     // Fail if requested size plus margin after is bigger than size of this suballocation.
7397     if (paddingBegin + allocSize + debugMargin > suballoc.size)
7398     {
7399         return false;
7400     }
7401 
7402     // Check next suballocations for BufferImageGranularity conflicts.
7403     // If conflict exists, allocation cannot be made here.
7404     if (allocSize % bufferImageGranularity || offset % bufferImageGranularity)
7405     {
7406         VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
7407         ++nextSuballocItem;
7408         while (nextSuballocItem != m_Suballocations.cend())
7409         {
7410             const VmaSuballocation& nextSuballoc = *nextSuballocItem;
7411             if (VmaBlocksOnSamePage(offset, allocSize, nextSuballoc.offset, bufferImageGranularity))
7412             {
7413                 if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
7414                 {
7415                     return false;
7416                 }
7417             }
7418             else
7419             {
7420                 // Already on next page.
7421                 break;
7422             }
7423             ++nextSuballocItem;
7424         }
7425     }
7426 
7427     *pAllocHandle = (VmaAllocHandle)(offset + 1);
7428     // All tests passed: Success. pAllocHandle is already filled.
7429     return true;
7430 }
7431 
7432 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
7433 {
7434     VMA_ASSERT(item != m_Suballocations.end());
7435     VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7436 
7437     VmaSuballocationList::iterator nextItem = item;
7438     ++nextItem;
7439     VMA_ASSERT(nextItem != m_Suballocations.end());
7440     VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
7441 
7442     item->size += nextItem->size;
7443     --m_FreeCount;
7444     m_Suballocations.erase(nextItem);
7445 }
7446 
7447 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
7448 {
7449     // Change this suballocation to be marked as free.
7450     VmaSuballocation& suballoc = *suballocItem;
7451     suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7452     suballoc.userData = VMA_NULL;
7453 
7454     // Update totals.
7455     ++m_FreeCount;
7456     m_SumFreeSize += suballoc.size;
7457 
7458     // Merge with previous and/or next suballocation if it's also free.
7459     bool mergeWithNext = false;
7460     bool mergeWithPrev = false;
7461 
7462     VmaSuballocationList::iterator nextItem = suballocItem;
7463     ++nextItem;
7464     if ((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
7465     {
7466         mergeWithNext = true;
7467     }
7468 
7469     VmaSuballocationList::iterator prevItem = suballocItem;
7470     if (suballocItem != m_Suballocations.begin())
7471     {
7472         --prevItem;
7473         if (prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7474         {
7475             mergeWithPrev = true;
7476         }
7477     }
7478 
7479     if (mergeWithNext)
7480     {
7481         UnregisterFreeSuballocation(nextItem);
7482         MergeFreeWithNext(suballocItem);
7483     }
7484 
7485     if (mergeWithPrev)
7486     {
7487         UnregisterFreeSuballocation(prevItem);
7488         MergeFreeWithNext(prevItem);
7489         RegisterFreeSuballocation(prevItem);
7490         return prevItem;
7491     }
7492     else
7493     {
7494         RegisterFreeSuballocation(suballocItem);
7495         return suballocItem;
7496     }
7497 }
7498 
7499 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
7500 {
7501     VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7502     VMA_ASSERT(item->size > 0);
7503 
7504     // You may want to enable this validation at the beginning or at the end of
7505     // this function, depending on what do you want to check.
7506     VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7507 
7508     if (m_FreeSuballocationsBySize.empty())
7509     {
7510         m_FreeSuballocationsBySize.push_back(item);
7511     }
7512     else
7513     {
7514         VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
7515     }
7516 
7517     //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7518 }
7519 
7520 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
7521 {
7522     VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7523     VMA_ASSERT(item->size > 0);
7524 
7525     // You may want to enable this validation at the beginning or at the end of
7526     // this function, depending on what do you want to check.
7527     VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7528 
7529     VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
7530         m_FreeSuballocationsBySize.data(),
7531         m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
7532         item,
7533         VmaSuballocationItemSizeLess());
7534     for (size_t index = it - m_FreeSuballocationsBySize.data();
7535         index < m_FreeSuballocationsBySize.size();
7536         ++index)
7537     {
7538         if (m_FreeSuballocationsBySize[index] == item)
7539         {
7540             VmaVectorRemove(m_FreeSuballocationsBySize, index);
7541             return;
7542         }
7543         VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
7544     }
7545     VMA_ASSERT(0 && "Not found.");
7546 
7547     //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7548 }
7549 #endif // _VMA_BLOCK_METADATA_GENERIC_FUNCTIONS
7550 #endif // _VMA_BLOCK_METADATA_GENERIC
7551 #endif // #if 0
7552 
7553 #ifndef _VMA_BLOCK_METADATA_LINEAR
7554 /*
7555 Allocations and their references in internal data structure look like this:
7556 
7557 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
7558 
7559         0 +-------+
7560           |       |
7561           |       |
7562           |       |
7563           +-------+
7564           | Alloc |  1st[m_1stNullItemsBeginCount]
7565           +-------+
7566           | Alloc |  1st[m_1stNullItemsBeginCount + 1]
7567           +-------+
7568           |  ...  |
7569           +-------+
7570           | Alloc |  1st[1st.size() - 1]
7571           +-------+
7572           |       |
7573           |       |
7574           |       |
7575 GetSize() +-------+
7576 
7577 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
7578 
7579         0 +-------+
7580           | Alloc |  2nd[0]
7581           +-------+
7582           | Alloc |  2nd[1]
7583           +-------+
7584           |  ...  |
7585           +-------+
7586           | Alloc |  2nd[2nd.size() - 1]
7587           +-------+
7588           |       |
7589           |       |
7590           |       |
7591           +-------+
7592           | Alloc |  1st[m_1stNullItemsBeginCount]
7593           +-------+
7594           | Alloc |  1st[m_1stNullItemsBeginCount + 1]
7595           +-------+
7596           |  ...  |
7597           +-------+
7598           | Alloc |  1st[1st.size() - 1]
7599           +-------+
7600           |       |
7601 GetSize() +-------+
7602 
7603 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
7604 
7605         0 +-------+
7606           |       |
7607           |       |
7608           |       |
7609           +-------+
7610           | Alloc |  1st[m_1stNullItemsBeginCount]
7611           +-------+
7612           | Alloc |  1st[m_1stNullItemsBeginCount + 1]
7613           +-------+
7614           |  ...  |
7615           +-------+
7616           | Alloc |  1st[1st.size() - 1]
7617           +-------+
7618           |       |
7619           |       |
7620           |       |
7621           +-------+
7622           | Alloc |  2nd[2nd.size() - 1]
7623           +-------+
7624           |  ...  |
7625           +-------+
7626           | Alloc |  2nd[1]
7627           +-------+
7628           | Alloc |  2nd[0]
7629 GetSize() +-------+
7630 
7631 */
7632 class VmaBlockMetadata_Linear : public VmaBlockMetadata
7633 {
7634     VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockMetadata_Linear)
7635 public:
7636     VmaBlockMetadata_Linear(const VkAllocationCallbacks* pAllocationCallbacks,
7637         VkDeviceSize bufferImageGranularity, bool isVirtual);
7638     virtual ~VmaBlockMetadata_Linear() = default;
7639 
GetSumFreeSize()7640     VkDeviceSize GetSumFreeSize() const override { return m_SumFreeSize; }
IsEmpty()7641     bool IsEmpty() const override { return GetAllocationCount() == 0; }
GetAllocationOffset(VmaAllocHandle allocHandle)7642     VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return (VkDeviceSize)allocHandle - 1; }
7643 
7644     void Init(VkDeviceSize size) override;
7645     bool Validate() const override;
7646     size_t GetAllocationCount() const override;
7647     size_t GetFreeRegionsCount() const override;
7648 
7649     void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override;
7650     void AddStatistics(VmaStatistics& inoutStats) const override;
7651 
7652 #if VMA_STATS_STRING_ENABLED
7653     void PrintDetailedMap(class VmaJsonWriter& json) const override;
7654 #endif
7655 
7656     bool CreateAllocationRequest(
7657         VkDeviceSize allocSize,
7658         VkDeviceSize allocAlignment,
7659         bool upperAddress,
7660         VmaSuballocationType allocType,
7661         uint32_t strategy,
7662         VmaAllocationRequest* pAllocationRequest) override;
7663 
7664     VkResult CheckCorruption(const void* pBlockData) override;
7665 
7666     void Alloc(
7667         const VmaAllocationRequest& request,
7668         VmaSuballocationType type,
7669         void* userData) override;
7670 
7671     void Free(VmaAllocHandle allocHandle) override;
7672     void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override;
7673     void* GetAllocationUserData(VmaAllocHandle allocHandle) const override;
7674     VmaAllocHandle GetAllocationListBegin() const override;
7675     VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override;
7676     VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const override;
7677     void Clear() override;
7678     void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override;
7679     void DebugLogAllAllocations() const override;
7680 
7681 private:
7682     /*
7683     There are two suballocation vectors, used in ping-pong way.
7684     The one with index m_1stVectorIndex is called 1st.
7685     The one with index (m_1stVectorIndex ^ 1) is called 2nd.
7686     2nd can be non-empty only when 1st is not empty.
7687     When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
7688     */
7689     typedef VmaVector<VmaSuballocation, VmaStlAllocator<VmaSuballocation>> SuballocationVectorType;
7690 
7691     enum SECOND_VECTOR_MODE
7692     {
7693         SECOND_VECTOR_EMPTY,
7694         /*
7695         Suballocations in 2nd vector are created later than the ones in 1st, but they
7696         all have smaller offset.
7697         */
7698         SECOND_VECTOR_RING_BUFFER,
7699         /*
7700         Suballocations in 2nd vector are upper side of double stack.
7701         They all have offsets higher than those in 1st vector.
7702         Top of this stack means smaller offsets, but higher indices in this vector.
7703         */
7704         SECOND_VECTOR_DOUBLE_STACK,
7705     };
7706 
7707     VkDeviceSize m_SumFreeSize;
7708     SuballocationVectorType m_Suballocations0, m_Suballocations1;
7709     uint32_t m_1stVectorIndex;
7710     SECOND_VECTOR_MODE m_2ndVectorMode;
7711     // Number of items in 1st vector with hAllocation = null at the beginning.
7712     size_t m_1stNullItemsBeginCount;
7713     // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
7714     size_t m_1stNullItemsMiddleCount;
7715     // Number of items in 2nd vector with hAllocation = null.
7716     size_t m_2ndNullItemsCount;
7717 
AccessSuballocations1st()7718     SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
AccessSuballocations2nd()7719     SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
AccessSuballocations1st()7720     const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
AccessSuballocations2nd()7721     const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
7722 
7723     VmaSuballocation& FindSuballocation(VkDeviceSize offset) const;
7724     bool ShouldCompact1st() const;
7725     void CleanupAfterFree();
7726 
7727     bool CreateAllocationRequest_LowerAddress(
7728         VkDeviceSize allocSize,
7729         VkDeviceSize allocAlignment,
7730         VmaSuballocationType allocType,
7731         uint32_t strategy,
7732         VmaAllocationRequest* pAllocationRequest);
7733     bool CreateAllocationRequest_UpperAddress(
7734         VkDeviceSize allocSize,
7735         VkDeviceSize allocAlignment,
7736         VmaSuballocationType allocType,
7737         uint32_t strategy,
7738         VmaAllocationRequest* pAllocationRequest);
7739 };
7740 
7741 #ifndef _VMA_BLOCK_METADATA_LINEAR_FUNCTIONS
VmaBlockMetadata_Linear(const VkAllocationCallbacks * pAllocationCallbacks,VkDeviceSize bufferImageGranularity,bool isVirtual)7742 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(const VkAllocationCallbacks* pAllocationCallbacks,
7743     VkDeviceSize bufferImageGranularity, bool isVirtual)
7744     : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual),
7745     m_SumFreeSize(0),
7746     m_Suballocations0(VmaStlAllocator<VmaSuballocation>(pAllocationCallbacks)),
7747     m_Suballocations1(VmaStlAllocator<VmaSuballocation>(pAllocationCallbacks)),
7748     m_1stVectorIndex(0),
7749     m_2ndVectorMode(SECOND_VECTOR_EMPTY),
7750     m_1stNullItemsBeginCount(0),
7751     m_1stNullItemsMiddleCount(0),
7752     m_2ndNullItemsCount(0) {}
7753 
Init(VkDeviceSize size)7754 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
7755 {
7756     VmaBlockMetadata::Init(size);
7757     m_SumFreeSize = size;
7758 }
7759 
Validate()7760 bool VmaBlockMetadata_Linear::Validate() const
7761 {
7762     const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7763     const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7764 
7765     VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
7766     VMA_VALIDATE(!suballocations1st.empty() ||
7767         suballocations2nd.empty() ||
7768         m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
7769 
7770     if (!suballocations1st.empty())
7771     {
7772         // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
7773         VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].type != VMA_SUBALLOCATION_TYPE_FREE);
7774         // Null item at the end should be just pop_back().
7775         VMA_VALIDATE(suballocations1st.back().type != VMA_SUBALLOCATION_TYPE_FREE);
7776     }
7777     if (!suballocations2nd.empty())
7778     {
7779         // Null item at the end should be just pop_back().
7780         VMA_VALIDATE(suballocations2nd.back().type != VMA_SUBALLOCATION_TYPE_FREE);
7781     }
7782 
7783     VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
7784     VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
7785 
7786     VkDeviceSize sumUsedSize = 0;
7787     const size_t suballoc1stCount = suballocations1st.size();
7788     const VkDeviceSize debugMargin = GetDebugMargin();
7789     VkDeviceSize offset = 0;
7790 
7791     if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
7792     {
7793         const size_t suballoc2ndCount = suballocations2nd.size();
7794         size_t nullItem2ndCount = 0;
7795         for (size_t i = 0; i < suballoc2ndCount; ++i)
7796         {
7797             const VmaSuballocation& suballoc = suballocations2nd[i];
7798             const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7799 
7800             VmaAllocation const alloc = (VmaAllocation)suballoc.userData;
7801             if (!IsVirtual())
7802             {
7803                 VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE));
7804             }
7805             VMA_VALIDATE(suballoc.offset >= offset);
7806 
7807             if (!currFree)
7808             {
7809                 if (!IsVirtual())
7810                 {
7811                     VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset + 1);
7812                     VMA_VALIDATE(alloc->GetSize() == suballoc.size);
7813                 }
7814                 sumUsedSize += suballoc.size;
7815             }
7816             else
7817             {
7818                 ++nullItem2ndCount;
7819             }
7820 
7821             offset = suballoc.offset + suballoc.size + debugMargin;
7822         }
7823 
7824         VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
7825     }
7826 
7827     for (size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
7828     {
7829         const VmaSuballocation& suballoc = suballocations1st[i];
7830         VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
7831             suballoc.userData == VMA_NULL);
7832     }
7833 
7834     size_t nullItem1stCount = m_1stNullItemsBeginCount;
7835 
7836     for (size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
7837     {
7838         const VmaSuballocation& suballoc = suballocations1st[i];
7839         const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7840 
7841         VmaAllocation const alloc = (VmaAllocation)suballoc.userData;
7842         if (!IsVirtual())
7843         {
7844             VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE));
7845         }
7846         VMA_VALIDATE(suballoc.offset >= offset);
7847         VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
7848 
7849         if (!currFree)
7850         {
7851             if (!IsVirtual())
7852             {
7853                 VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset + 1);
7854                 VMA_VALIDATE(alloc->GetSize() == suballoc.size);
7855             }
7856             sumUsedSize += suballoc.size;
7857         }
7858         else
7859         {
7860             ++nullItem1stCount;
7861         }
7862 
7863         offset = suballoc.offset + suballoc.size + debugMargin;
7864     }
7865     VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
7866 
7867     if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
7868     {
7869         const size_t suballoc2ndCount = suballocations2nd.size();
7870         size_t nullItem2ndCount = 0;
7871         for (size_t i = suballoc2ndCount; i--; )
7872         {
7873             const VmaSuballocation& suballoc = suballocations2nd[i];
7874             const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7875 
7876             VmaAllocation const alloc = (VmaAllocation)suballoc.userData;
7877             if (!IsVirtual())
7878             {
7879                 VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE));
7880             }
7881             VMA_VALIDATE(suballoc.offset >= offset);
7882 
7883             if (!currFree)
7884             {
7885                 if (!IsVirtual())
7886                 {
7887                     VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset + 1);
7888                     VMA_VALIDATE(alloc->GetSize() == suballoc.size);
7889                 }
7890                 sumUsedSize += suballoc.size;
7891             }
7892             else
7893             {
7894                 ++nullItem2ndCount;
7895             }
7896 
7897             offset = suballoc.offset + suballoc.size + debugMargin;
7898         }
7899 
7900         VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
7901     }
7902 
7903     VMA_VALIDATE(offset <= GetSize());
7904     VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
7905 
7906     return true;
7907 }
7908 
GetAllocationCount()7909 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
7910 {
7911     return AccessSuballocations1st().size() - m_1stNullItemsBeginCount - m_1stNullItemsMiddleCount +
7912         AccessSuballocations2nd().size() - m_2ndNullItemsCount;
7913 }
7914 
GetFreeRegionsCount()7915 size_t VmaBlockMetadata_Linear::GetFreeRegionsCount() const
7916 {
7917     // Function only used for defragmentation, which is disabled for this algorithm
7918     VMA_ASSERT(0);
7919     return SIZE_MAX;
7920 }
7921 
AddDetailedStatistics(VmaDetailedStatistics & inoutStats)7922 void VmaBlockMetadata_Linear::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const
7923 {
7924     const VkDeviceSize size = GetSize();
7925     const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7926     const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7927     const size_t suballoc1stCount = suballocations1st.size();
7928     const size_t suballoc2ndCount = suballocations2nd.size();
7929 
7930     inoutStats.statistics.blockCount++;
7931     inoutStats.statistics.blockBytes += size;
7932 
7933     VkDeviceSize lastOffset = 0;
7934 
7935     if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
7936     {
7937         const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
7938         size_t nextAlloc2ndIndex = 0;
7939         while (lastOffset < freeSpace2ndTo1stEnd)
7940         {
7941             // Find next non-null allocation or move nextAllocIndex to the end.
7942             while (nextAlloc2ndIndex < suballoc2ndCount &&
7943                 suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
7944             {
7945                 ++nextAlloc2ndIndex;
7946             }
7947 
7948             // Found non-null allocation.
7949             if (nextAlloc2ndIndex < suballoc2ndCount)
7950             {
7951                 const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
7952 
7953                 // 1. Process free space before this allocation.
7954                 if (lastOffset < suballoc.offset)
7955                 {
7956                     // There is free space from lastOffset to suballoc.offset.
7957                     const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
7958                     VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
7959                 }
7960 
7961                 // 2. Process this allocation.
7962                 // There is allocation with suballoc.offset, suballoc.size.
7963                 VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size);
7964 
7965                 // 3. Prepare for next iteration.
7966                 lastOffset = suballoc.offset + suballoc.size;
7967                 ++nextAlloc2ndIndex;
7968             }
7969             // We are at the end.
7970             else
7971             {
7972                 // There is free space from lastOffset to freeSpace2ndTo1stEnd.
7973                 if (lastOffset < freeSpace2ndTo1stEnd)
7974                 {
7975                     const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
7976                     VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
7977                 }
7978 
7979                 // End of loop.
7980                 lastOffset = freeSpace2ndTo1stEnd;
7981             }
7982         }
7983     }
7984 
7985     size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
7986     const VkDeviceSize freeSpace1stTo2ndEnd =
7987         m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
7988     while (lastOffset < freeSpace1stTo2ndEnd)
7989     {
7990         // Find next non-null allocation or move nextAllocIndex to the end.
7991         while (nextAlloc1stIndex < suballoc1stCount &&
7992             suballocations1st[nextAlloc1stIndex].userData == VMA_NULL)
7993         {
7994             ++nextAlloc1stIndex;
7995         }
7996 
7997         // Found non-null allocation.
7998         if (nextAlloc1stIndex < suballoc1stCount)
7999         {
8000             const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8001 
8002             // 1. Process free space before this allocation.
8003             if (lastOffset < suballoc.offset)
8004             {
8005                 // There is free space from lastOffset to suballoc.offset.
8006                 const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8007                 VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
8008             }
8009 
8010             // 2. Process this allocation.
8011             // There is allocation with suballoc.offset, suballoc.size.
8012             VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size);
8013 
8014             // 3. Prepare for next iteration.
8015             lastOffset = suballoc.offset + suballoc.size;
8016             ++nextAlloc1stIndex;
8017         }
8018         // We are at the end.
8019         else
8020         {
8021             // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8022             if (lastOffset < freeSpace1stTo2ndEnd)
8023             {
8024                 const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8025                 VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
8026             }
8027 
8028             // End of loop.
8029             lastOffset = freeSpace1stTo2ndEnd;
8030         }
8031     }
8032 
8033     if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8034     {
8035         size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8036         while (lastOffset < size)
8037         {
8038             // Find next non-null allocation or move nextAllocIndex to the end.
8039             while (nextAlloc2ndIndex != SIZE_MAX &&
8040                 suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
8041             {
8042                 --nextAlloc2ndIndex;
8043             }
8044 
8045             // Found non-null allocation.
8046             if (nextAlloc2ndIndex != SIZE_MAX)
8047             {
8048                 const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8049 
8050                 // 1. Process free space before this allocation.
8051                 if (lastOffset < suballoc.offset)
8052                 {
8053                     // There is free space from lastOffset to suballoc.offset.
8054                     const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8055                     VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
8056                 }
8057 
8058                 // 2. Process this allocation.
8059                 // There is allocation with suballoc.offset, suballoc.size.
8060                 VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size);
8061 
8062                 // 3. Prepare for next iteration.
8063                 lastOffset = suballoc.offset + suballoc.size;
8064                 --nextAlloc2ndIndex;
8065             }
8066             // We are at the end.
8067             else
8068             {
8069                 // There is free space from lastOffset to size.
8070                 if (lastOffset < size)
8071                 {
8072                     const VkDeviceSize unusedRangeSize = size - lastOffset;
8073                     VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
8074                 }
8075 
8076                 // End of loop.
8077                 lastOffset = size;
8078             }
8079         }
8080     }
8081 }
8082 
AddStatistics(VmaStatistics & inoutStats)8083 void VmaBlockMetadata_Linear::AddStatistics(VmaStatistics& inoutStats) const
8084 {
8085     const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8086     const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8087     const VkDeviceSize size = GetSize();
8088     const size_t suballoc1stCount = suballocations1st.size();
8089     const size_t suballoc2ndCount = suballocations2nd.size();
8090 
8091     inoutStats.blockCount++;
8092     inoutStats.blockBytes += size;
8093     inoutStats.allocationBytes += size - m_SumFreeSize;
8094 
8095     VkDeviceSize lastOffset = 0;
8096 
8097     if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8098     {
8099         const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8100         size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
8101         while (lastOffset < freeSpace2ndTo1stEnd)
8102         {
8103             // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8104             while (nextAlloc2ndIndex < suballoc2ndCount &&
8105                 suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
8106             {
8107                 ++nextAlloc2ndIndex;
8108             }
8109 
8110             // Found non-null allocation.
8111             if (nextAlloc2ndIndex < suballoc2ndCount)
8112             {
8113                 const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8114 
8115                 // Process this allocation.
8116                 // There is allocation with suballoc.offset, suballoc.size.
8117                 ++inoutStats.allocationCount;
8118 
8119                 // Prepare for next iteration.
8120                 lastOffset = suballoc.offset + suballoc.size;
8121                 ++nextAlloc2ndIndex;
8122             }
8123             // We are at the end.
8124             else
8125             {
8126                 // End of loop.
8127                 lastOffset = freeSpace2ndTo1stEnd;
8128             }
8129         }
8130     }
8131 
8132     size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8133     const VkDeviceSize freeSpace1stTo2ndEnd =
8134         m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8135     while (lastOffset < freeSpace1stTo2ndEnd)
8136     {
8137         // Find next non-null allocation or move nextAllocIndex to the end.
8138         while (nextAlloc1stIndex < suballoc1stCount &&
8139             suballocations1st[nextAlloc1stIndex].userData == VMA_NULL)
8140         {
8141             ++nextAlloc1stIndex;
8142         }
8143 
8144         // Found non-null allocation.
8145         if (nextAlloc1stIndex < suballoc1stCount)
8146         {
8147             const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8148 
8149             // Process this allocation.
8150             // There is allocation with suballoc.offset, suballoc.size.
8151             ++inoutStats.allocationCount;
8152 
8153             // Prepare for next iteration.
8154             lastOffset = suballoc.offset + suballoc.size;
8155             ++nextAlloc1stIndex;
8156         }
8157         // We are at the end.
8158         else
8159         {
8160             // End of loop.
8161             lastOffset = freeSpace1stTo2ndEnd;
8162         }
8163     }
8164 
8165     if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8166     {
8167         size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8168         while (lastOffset < size)
8169         {
8170             // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8171             while (nextAlloc2ndIndex != SIZE_MAX &&
8172                 suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
8173             {
8174                 --nextAlloc2ndIndex;
8175             }
8176 
8177             // Found non-null allocation.
8178             if (nextAlloc2ndIndex != SIZE_MAX)
8179             {
8180                 const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8181 
8182                 // Process this allocation.
8183                 // There is allocation with suballoc.offset, suballoc.size.
8184                 ++inoutStats.allocationCount;
8185 
8186                 // Prepare for next iteration.
8187                 lastOffset = suballoc.offset + suballoc.size;
8188                 --nextAlloc2ndIndex;
8189             }
8190             // We are at the end.
8191             else
8192             {
8193                 // End of loop.
8194                 lastOffset = size;
8195             }
8196         }
8197     }
8198 }
8199 
8200 #if VMA_STATS_STRING_ENABLED
PrintDetailedMap(class VmaJsonWriter & json)8201 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
8202 {
8203     const VkDeviceSize size = GetSize();
8204     const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8205     const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8206     const size_t suballoc1stCount = suballocations1st.size();
8207     const size_t suballoc2ndCount = suballocations2nd.size();
8208 
8209     // FIRST PASS
8210 
8211     size_t unusedRangeCount = 0;
8212     VkDeviceSize usedBytes = 0;
8213 
8214     VkDeviceSize lastOffset = 0;
8215 
8216     size_t alloc2ndCount = 0;
8217     if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8218     {
8219         const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8220         size_t nextAlloc2ndIndex = 0;
8221         while (lastOffset < freeSpace2ndTo1stEnd)
8222         {
8223             // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8224             while (nextAlloc2ndIndex < suballoc2ndCount &&
8225                 suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
8226             {
8227                 ++nextAlloc2ndIndex;
8228             }
8229 
8230             // Found non-null allocation.
8231             if (nextAlloc2ndIndex < suballoc2ndCount)
8232             {
8233                 const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8234 
8235                 // 1. Process free space before this allocation.
8236                 if (lastOffset < suballoc.offset)
8237                 {
8238                     // There is free space from lastOffset to suballoc.offset.
8239                     ++unusedRangeCount;
8240                 }
8241 
8242                 // 2. Process this allocation.
8243                 // There is allocation with suballoc.offset, suballoc.size.
8244                 ++alloc2ndCount;
8245                 usedBytes += suballoc.size;
8246 
8247                 // 3. Prepare for next iteration.
8248                 lastOffset = suballoc.offset + suballoc.size;
8249                 ++nextAlloc2ndIndex;
8250             }
8251             // We are at the end.
8252             else
8253             {
8254                 if (lastOffset < freeSpace2ndTo1stEnd)
8255                 {
8256                     // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8257                     ++unusedRangeCount;
8258                 }
8259 
8260                 // End of loop.
8261                 lastOffset = freeSpace2ndTo1stEnd;
8262             }
8263         }
8264     }
8265 
8266     size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8267     size_t alloc1stCount = 0;
8268     const VkDeviceSize freeSpace1stTo2ndEnd =
8269         m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8270     while (lastOffset < freeSpace1stTo2ndEnd)
8271     {
8272         // Find next non-null allocation or move nextAllocIndex to the end.
8273         while (nextAlloc1stIndex < suballoc1stCount &&
8274             suballocations1st[nextAlloc1stIndex].userData == VMA_NULL)
8275         {
8276             ++nextAlloc1stIndex;
8277         }
8278 
8279         // Found non-null allocation.
8280         if (nextAlloc1stIndex < suballoc1stCount)
8281         {
8282             const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8283 
8284             // 1. Process free space before this allocation.
8285             if (lastOffset < suballoc.offset)
8286             {
8287                 // There is free space from lastOffset to suballoc.offset.
8288                 ++unusedRangeCount;
8289             }
8290 
8291             // 2. Process this allocation.
8292             // There is allocation with suballoc.offset, suballoc.size.
8293             ++alloc1stCount;
8294             usedBytes += suballoc.size;
8295 
8296             // 3. Prepare for next iteration.
8297             lastOffset = suballoc.offset + suballoc.size;
8298             ++nextAlloc1stIndex;
8299         }
8300         // We are at the end.
8301         else
8302         {
8303             if (lastOffset < size)
8304             {
8305                 // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8306                 ++unusedRangeCount;
8307             }
8308 
8309             // End of loop.
8310             lastOffset = freeSpace1stTo2ndEnd;
8311         }
8312     }
8313 
8314     if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8315     {
8316         size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8317         while (lastOffset < size)
8318         {
8319             // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8320             while (nextAlloc2ndIndex != SIZE_MAX &&
8321                 suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
8322             {
8323                 --nextAlloc2ndIndex;
8324             }
8325 
8326             // Found non-null allocation.
8327             if (nextAlloc2ndIndex != SIZE_MAX)
8328             {
8329                 const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8330 
8331                 // 1. Process free space before this allocation.
8332                 if (lastOffset < suballoc.offset)
8333                 {
8334                     // There is free space from lastOffset to suballoc.offset.
8335                     ++unusedRangeCount;
8336                 }
8337 
8338                 // 2. Process this allocation.
8339                 // There is allocation with suballoc.offset, suballoc.size.
8340                 ++alloc2ndCount;
8341                 usedBytes += suballoc.size;
8342 
8343                 // 3. Prepare for next iteration.
8344                 lastOffset = suballoc.offset + suballoc.size;
8345                 --nextAlloc2ndIndex;
8346             }
8347             // We are at the end.
8348             else
8349             {
8350                 if (lastOffset < size)
8351                 {
8352                     // There is free space from lastOffset to size.
8353                     ++unusedRangeCount;
8354                 }
8355 
8356                 // End of loop.
8357                 lastOffset = size;
8358             }
8359         }
8360     }
8361 
8362     const VkDeviceSize unusedBytes = size - usedBytes;
8363     PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
8364 
8365     // SECOND PASS
8366     lastOffset = 0;
8367 
8368     if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8369     {
8370         const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8371         size_t nextAlloc2ndIndex = 0;
8372         while (lastOffset < freeSpace2ndTo1stEnd)
8373         {
8374             // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8375             while (nextAlloc2ndIndex < suballoc2ndCount &&
8376                 suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
8377             {
8378                 ++nextAlloc2ndIndex;
8379             }
8380 
8381             // Found non-null allocation.
8382             if (nextAlloc2ndIndex < suballoc2ndCount)
8383             {
8384                 const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8385 
8386                 // 1. Process free space before this allocation.
8387                 if (lastOffset < suballoc.offset)
8388                 {
8389                     // There is free space from lastOffset to suballoc.offset.
8390                     const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8391                     PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8392                 }
8393 
8394                 // 2. Process this allocation.
8395                 // There is allocation with suballoc.offset, suballoc.size.
8396                 PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData);
8397 
8398                 // 3. Prepare for next iteration.
8399                 lastOffset = suballoc.offset + suballoc.size;
8400                 ++nextAlloc2ndIndex;
8401             }
8402             // We are at the end.
8403             else
8404             {
8405                 if (lastOffset < freeSpace2ndTo1stEnd)
8406                 {
8407                     // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8408                     const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8409                     PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8410                 }
8411 
8412                 // End of loop.
8413                 lastOffset = freeSpace2ndTo1stEnd;
8414             }
8415         }
8416     }
8417 
8418     nextAlloc1stIndex = m_1stNullItemsBeginCount;
8419     while (lastOffset < freeSpace1stTo2ndEnd)
8420     {
8421         // Find next non-null allocation or move nextAllocIndex to the end.
8422         while (nextAlloc1stIndex < suballoc1stCount &&
8423             suballocations1st[nextAlloc1stIndex].userData == VMA_NULL)
8424         {
8425             ++nextAlloc1stIndex;
8426         }
8427 
8428         // Found non-null allocation.
8429         if (nextAlloc1stIndex < suballoc1stCount)
8430         {
8431             const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8432 
8433             // 1. Process free space before this allocation.
8434             if (lastOffset < suballoc.offset)
8435             {
8436                 // There is free space from lastOffset to suballoc.offset.
8437                 const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8438                 PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8439             }
8440 
8441             // 2. Process this allocation.
8442             // There is allocation with suballoc.offset, suballoc.size.
8443             PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData);
8444 
8445             // 3. Prepare for next iteration.
8446             lastOffset = suballoc.offset + suballoc.size;
8447             ++nextAlloc1stIndex;
8448         }
8449         // We are at the end.
8450         else
8451         {
8452             if (lastOffset < freeSpace1stTo2ndEnd)
8453             {
8454                 // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8455                 const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8456                 PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8457             }
8458 
8459             // End of loop.
8460             lastOffset = freeSpace1stTo2ndEnd;
8461         }
8462     }
8463 
8464     if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8465     {
8466         size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8467         while (lastOffset < size)
8468         {
8469             // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8470             while (nextAlloc2ndIndex != SIZE_MAX &&
8471                 suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
8472             {
8473                 --nextAlloc2ndIndex;
8474             }
8475 
8476             // Found non-null allocation.
8477             if (nextAlloc2ndIndex != SIZE_MAX)
8478             {
8479                 const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8480 
8481                 // 1. Process free space before this allocation.
8482                 if (lastOffset < suballoc.offset)
8483                 {
8484                     // There is free space from lastOffset to suballoc.offset.
8485                     const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8486                     PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8487                 }
8488 
8489                 // 2. Process this allocation.
8490                 // There is allocation with suballoc.offset, suballoc.size.
8491                 PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData);
8492 
8493                 // 3. Prepare for next iteration.
8494                 lastOffset = suballoc.offset + suballoc.size;
8495                 --nextAlloc2ndIndex;
8496             }
8497             // We are at the end.
8498             else
8499             {
8500                 if (lastOffset < size)
8501                 {
8502                     // There is free space from lastOffset to size.
8503                     const VkDeviceSize unusedRangeSize = size - lastOffset;
8504                     PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8505                 }
8506 
8507                 // End of loop.
8508                 lastOffset = size;
8509             }
8510         }
8511     }
8512 
8513     PrintDetailedMap_End(json);
8514 }
8515 #endif // VMA_STATS_STRING_ENABLED
8516 
CreateAllocationRequest(VkDeviceSize allocSize,VkDeviceSize allocAlignment,bool upperAddress,VmaSuballocationType allocType,uint32_t strategy,VmaAllocationRequest * pAllocationRequest)8517 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
8518     VkDeviceSize allocSize,
8519     VkDeviceSize allocAlignment,
8520     bool upperAddress,
8521     VmaSuballocationType allocType,
8522     uint32_t strategy,
8523     VmaAllocationRequest* pAllocationRequest)
8524 {
8525     VMA_ASSERT(allocSize > 0);
8526     VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8527     VMA_ASSERT(pAllocationRequest != VMA_NULL);
8528     VMA_HEAVY_ASSERT(Validate());
8529     pAllocationRequest->size = allocSize;
8530     return upperAddress ?
8531         CreateAllocationRequest_UpperAddress(
8532             allocSize, allocAlignment, allocType, strategy, pAllocationRequest) :
8533         CreateAllocationRequest_LowerAddress(
8534             allocSize, allocAlignment, allocType, strategy, pAllocationRequest);
8535 }
8536 
CheckCorruption(const void * pBlockData)8537 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
8538 {
8539     VMA_ASSERT(!IsVirtual());
8540     SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8541     for (size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
8542     {
8543         const VmaSuballocation& suballoc = suballocations1st[i];
8544         if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
8545         {
8546             if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
8547             {
8548                 VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
8549                 return VK_ERROR_UNKNOWN_COPY;
8550             }
8551         }
8552     }
8553 
8554     SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8555     for (size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
8556     {
8557         const VmaSuballocation& suballoc = suballocations2nd[i];
8558         if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
8559         {
8560             if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
8561             {
8562                 VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
8563                 return VK_ERROR_UNKNOWN_COPY;
8564             }
8565         }
8566     }
8567 
8568     return VK_SUCCESS;
8569 }
8570 
Alloc(const VmaAllocationRequest & request,VmaSuballocationType type,void * userData)8571 void VmaBlockMetadata_Linear::Alloc(
8572     const VmaAllocationRequest& request,
8573     VmaSuballocationType type,
8574     void* userData)
8575 {
8576     const VkDeviceSize offset = (VkDeviceSize)request.allocHandle - 1;
8577     const VmaSuballocation newSuballoc = { offset, request.size, userData, type };
8578 
8579     switch (request.type)
8580     {
8581     case VmaAllocationRequestType::UpperAddress:
8582     {
8583         VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
8584             "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
8585         SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8586         suballocations2nd.push_back(newSuballoc);
8587         m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
8588     }
8589     break;
8590     case VmaAllocationRequestType::EndOf1st:
8591     {
8592         SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8593 
8594         VMA_ASSERT(suballocations1st.empty() ||
8595             offset >= suballocations1st.back().offset + suballocations1st.back().size);
8596         // Check if it fits before the end of the block.
8597         VMA_ASSERT(offset + request.size <= GetSize());
8598 
8599         suballocations1st.push_back(newSuballoc);
8600     }
8601     break;
8602     case VmaAllocationRequestType::EndOf2nd:
8603     {
8604         SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8605         // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
8606         VMA_ASSERT(!suballocations1st.empty() &&
8607             offset + request.size <= suballocations1st[m_1stNullItemsBeginCount].offset);
8608         SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8609 
8610         switch (m_2ndVectorMode)
8611         {
8612         case SECOND_VECTOR_EMPTY:
8613             // First allocation from second part ring buffer.
8614             VMA_ASSERT(suballocations2nd.empty());
8615             m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
8616             break;
8617         case SECOND_VECTOR_RING_BUFFER:
8618             // 2-part ring buffer is already started.
8619             VMA_ASSERT(!suballocations2nd.empty());
8620             break;
8621         case SECOND_VECTOR_DOUBLE_STACK:
8622             VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
8623             break;
8624         default:
8625             VMA_ASSERT(0);
8626         }
8627 
8628         suballocations2nd.push_back(newSuballoc);
8629     }
8630     break;
8631     default:
8632         VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
8633     }
8634 
8635     m_SumFreeSize -= newSuballoc.size;
8636 }
8637 
Free(VmaAllocHandle allocHandle)8638 void VmaBlockMetadata_Linear::Free(VmaAllocHandle allocHandle)
8639 {
8640     SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8641     SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8642     VkDeviceSize offset = (VkDeviceSize)allocHandle - 1;
8643 
8644     if (!suballocations1st.empty())
8645     {
8646         // First allocation: Mark it as next empty at the beginning.
8647         VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
8648         if (firstSuballoc.offset == offset)
8649         {
8650             firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8651             firstSuballoc.userData = VMA_NULL;
8652             m_SumFreeSize += firstSuballoc.size;
8653             ++m_1stNullItemsBeginCount;
8654             CleanupAfterFree();
8655             return;
8656         }
8657     }
8658 
8659     // Last allocation in 2-part ring buffer or top of upper stack (same logic).
8660     if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
8661         m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8662     {
8663         VmaSuballocation& lastSuballoc = suballocations2nd.back();
8664         if (lastSuballoc.offset == offset)
8665         {
8666             m_SumFreeSize += lastSuballoc.size;
8667             suballocations2nd.pop_back();
8668             CleanupAfterFree();
8669             return;
8670         }
8671     }
8672     // Last allocation in 1st vector.
8673     else if (m_2ndVectorMode == SECOND_VECTOR_EMPTY)
8674     {
8675         VmaSuballocation& lastSuballoc = suballocations1st.back();
8676         if (lastSuballoc.offset == offset)
8677         {
8678             m_SumFreeSize += lastSuballoc.size;
8679             suballocations1st.pop_back();
8680             CleanupAfterFree();
8681             return;
8682         }
8683     }
8684 
8685     VmaSuballocation refSuballoc;
8686     refSuballoc.offset = offset;
8687     // Rest of members stays uninitialized intentionally for better performance.
8688 
8689     // Item from the middle of 1st vector.
8690     {
8691         const SuballocationVectorType::iterator it = VmaBinaryFindSorted(
8692             suballocations1st.begin() + m_1stNullItemsBeginCount,
8693             suballocations1st.end(),
8694             refSuballoc,
8695             VmaSuballocationOffsetLess());
8696         if (it != suballocations1st.end())
8697         {
8698             it->type = VMA_SUBALLOCATION_TYPE_FREE;
8699             it->userData = VMA_NULL;
8700             ++m_1stNullItemsMiddleCount;
8701             m_SumFreeSize += it->size;
8702             CleanupAfterFree();
8703             return;
8704         }
8705     }
8706 
8707     if (m_2ndVectorMode != SECOND_VECTOR_EMPTY)
8708     {
8709         // Item from the middle of 2nd vector.
8710         const SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
8711             VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) :
8712             VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater());
8713         if (it != suballocations2nd.end())
8714         {
8715             it->type = VMA_SUBALLOCATION_TYPE_FREE;
8716             it->userData = VMA_NULL;
8717             ++m_2ndNullItemsCount;
8718             m_SumFreeSize += it->size;
8719             CleanupAfterFree();
8720             return;
8721         }
8722     }
8723 
8724     VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
8725 }
8726 
GetAllocationInfo(VmaAllocHandle allocHandle,VmaVirtualAllocationInfo & outInfo)8727 void VmaBlockMetadata_Linear::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo)
8728 {
8729     outInfo.offset = (VkDeviceSize)allocHandle - 1;
8730     VmaSuballocation& suballoc = FindSuballocation(outInfo.offset);
8731     outInfo.size = suballoc.size;
8732     outInfo.pUserData = suballoc.userData;
8733 }
8734 
GetAllocationUserData(VmaAllocHandle allocHandle)8735 void* VmaBlockMetadata_Linear::GetAllocationUserData(VmaAllocHandle allocHandle) const
8736 {
8737     return FindSuballocation((VkDeviceSize)allocHandle - 1).userData;
8738 }
8739 
GetAllocationListBegin()8740 VmaAllocHandle VmaBlockMetadata_Linear::GetAllocationListBegin() const
8741 {
8742     // Function only used for defragmentation, which is disabled for this algorithm
8743     VMA_ASSERT(0);
8744     return VK_NULL_HANDLE;
8745 }
8746 
GetNextAllocation(VmaAllocHandle prevAlloc)8747 VmaAllocHandle VmaBlockMetadata_Linear::GetNextAllocation(VmaAllocHandle prevAlloc) const
8748 {
8749     // Function only used for defragmentation, which is disabled for this algorithm
8750     VMA_ASSERT(0);
8751     return VK_NULL_HANDLE;
8752 }
8753 
GetNextFreeRegionSize(VmaAllocHandle alloc)8754 VkDeviceSize VmaBlockMetadata_Linear::GetNextFreeRegionSize(VmaAllocHandle alloc) const
8755 {
8756     // Function only used for defragmentation, which is disabled for this algorithm
8757     VMA_ASSERT(0);
8758     return 0;
8759 }
8760 
Clear()8761 void VmaBlockMetadata_Linear::Clear()
8762 {
8763     m_SumFreeSize = GetSize();
8764     m_Suballocations0.clear();
8765     m_Suballocations1.clear();
8766     // Leaving m_1stVectorIndex unchanged - it doesn't matter.
8767     m_2ndVectorMode = SECOND_VECTOR_EMPTY;
8768     m_1stNullItemsBeginCount = 0;
8769     m_1stNullItemsMiddleCount = 0;
8770     m_2ndNullItemsCount = 0;
8771 }
8772 
SetAllocationUserData(VmaAllocHandle allocHandle,void * userData)8773 void VmaBlockMetadata_Linear::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData)
8774 {
8775     VmaSuballocation& suballoc = FindSuballocation((VkDeviceSize)allocHandle - 1);
8776     suballoc.userData = userData;
8777 }
8778 
DebugLogAllAllocations()8779 void VmaBlockMetadata_Linear::DebugLogAllAllocations() const
8780 {
8781     const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8782     for (auto it = suballocations1st.begin() + m_1stNullItemsBeginCount; it != suballocations1st.end(); ++it)
8783         if (it->type != VMA_SUBALLOCATION_TYPE_FREE)
8784             DebugLogAllocation(it->offset, it->size, it->userData);
8785 
8786     const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8787     for (auto it = suballocations2nd.begin(); it != suballocations2nd.end(); ++it)
8788         if (it->type != VMA_SUBALLOCATION_TYPE_FREE)
8789             DebugLogAllocation(it->offset, it->size, it->userData);
8790 }
8791 
FindSuballocation(VkDeviceSize offset)8792 VmaSuballocation& VmaBlockMetadata_Linear::FindSuballocation(VkDeviceSize offset) const
8793 {
8794     const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8795     const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8796 
8797     VmaSuballocation refSuballoc;
8798     refSuballoc.offset = offset;
8799     // Rest of members stays uninitialized intentionally for better performance.
8800 
8801     // Item from the 1st vector.
8802     {
8803         SuballocationVectorType::const_iterator it = VmaBinaryFindSorted(
8804             suballocations1st.begin() + m_1stNullItemsBeginCount,
8805             suballocations1st.end(),
8806             refSuballoc,
8807             VmaSuballocationOffsetLess());
8808         if (it != suballocations1st.end())
8809         {
8810             return const_cast<VmaSuballocation&>(*it);
8811         }
8812     }
8813 
8814     if (m_2ndVectorMode != SECOND_VECTOR_EMPTY)
8815     {
8816         // Rest of members stays uninitialized intentionally for better performance.
8817         SuballocationVectorType::const_iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
8818             VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) :
8819             VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater());
8820         if (it != suballocations2nd.end())
8821         {
8822             return const_cast<VmaSuballocation&>(*it);
8823         }
8824     }
8825 
8826     VMA_ASSERT(0 && "Allocation not found in linear allocator!");
8827     return const_cast<VmaSuballocation&>(suballocations1st.back()); // Should never occur.
8828 }
8829 
ShouldCompact1st()8830 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
8831 {
8832     const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
8833     const size_t suballocCount = AccessSuballocations1st().size();
8834     return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
8835 }
8836 
CleanupAfterFree()8837 void VmaBlockMetadata_Linear::CleanupAfterFree()
8838 {
8839     SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8840     SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8841 
8842     if (IsEmpty())
8843     {
8844         suballocations1st.clear();
8845         suballocations2nd.clear();
8846         m_1stNullItemsBeginCount = 0;
8847         m_1stNullItemsMiddleCount = 0;
8848         m_2ndNullItemsCount = 0;
8849         m_2ndVectorMode = SECOND_VECTOR_EMPTY;
8850     }
8851     else
8852     {
8853         const size_t suballoc1stCount = suballocations1st.size();
8854         const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
8855         VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
8856 
8857         // Find more null items at the beginning of 1st vector.
8858         while (m_1stNullItemsBeginCount < suballoc1stCount &&
8859             suballocations1st[m_1stNullItemsBeginCount].type == VMA_SUBALLOCATION_TYPE_FREE)
8860         {
8861             ++m_1stNullItemsBeginCount;
8862             --m_1stNullItemsMiddleCount;
8863         }
8864 
8865         // Find more null items at the end of 1st vector.
8866         while (m_1stNullItemsMiddleCount > 0 &&
8867             suballocations1st.back().type == VMA_SUBALLOCATION_TYPE_FREE)
8868         {
8869             --m_1stNullItemsMiddleCount;
8870             suballocations1st.pop_back();
8871         }
8872 
8873         // Find more null items at the end of 2nd vector.
8874         while (m_2ndNullItemsCount > 0 &&
8875             suballocations2nd.back().type == VMA_SUBALLOCATION_TYPE_FREE)
8876         {
8877             --m_2ndNullItemsCount;
8878             suballocations2nd.pop_back();
8879         }
8880 
8881         // Find more null items at the beginning of 2nd vector.
8882         while (m_2ndNullItemsCount > 0 &&
8883             suballocations2nd[0].type == VMA_SUBALLOCATION_TYPE_FREE)
8884         {
8885             --m_2ndNullItemsCount;
8886             VmaVectorRemove(suballocations2nd, 0);
8887         }
8888 
8889         if (ShouldCompact1st())
8890         {
8891             const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
8892             size_t srcIndex = m_1stNullItemsBeginCount;
8893             for (size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
8894             {
8895                 while (suballocations1st[srcIndex].type == VMA_SUBALLOCATION_TYPE_FREE)
8896                 {
8897                     ++srcIndex;
8898                 }
8899                 if (dstIndex != srcIndex)
8900                 {
8901                     suballocations1st[dstIndex] = suballocations1st[srcIndex];
8902                 }
8903                 ++srcIndex;
8904             }
8905             suballocations1st.resize(nonNullItemCount);
8906             m_1stNullItemsBeginCount = 0;
8907             m_1stNullItemsMiddleCount = 0;
8908         }
8909 
8910         // 2nd vector became empty.
8911         if (suballocations2nd.empty())
8912         {
8913             m_2ndVectorMode = SECOND_VECTOR_EMPTY;
8914         }
8915 
8916         // 1st vector became empty.
8917         if (suballocations1st.size() - m_1stNullItemsBeginCount == 0)
8918         {
8919             suballocations1st.clear();
8920             m_1stNullItemsBeginCount = 0;
8921 
8922             if (!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8923             {
8924                 // Swap 1st with 2nd. Now 2nd is empty.
8925                 m_2ndVectorMode = SECOND_VECTOR_EMPTY;
8926                 m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
8927                 while (m_1stNullItemsBeginCount < suballocations2nd.size() &&
8928                     suballocations2nd[m_1stNullItemsBeginCount].type == VMA_SUBALLOCATION_TYPE_FREE)
8929                 {
8930                     ++m_1stNullItemsBeginCount;
8931                     --m_1stNullItemsMiddleCount;
8932                 }
8933                 m_2ndNullItemsCount = 0;
8934                 m_1stVectorIndex ^= 1;
8935             }
8936         }
8937     }
8938 
8939     VMA_HEAVY_ASSERT(Validate());
8940 }
8941 
CreateAllocationRequest_LowerAddress(VkDeviceSize allocSize,VkDeviceSize allocAlignment,VmaSuballocationType allocType,uint32_t strategy,VmaAllocationRequest * pAllocationRequest)8942 bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
8943     VkDeviceSize allocSize,
8944     VkDeviceSize allocAlignment,
8945     VmaSuballocationType allocType,
8946     uint32_t strategy,
8947     VmaAllocationRequest* pAllocationRequest)
8948 {
8949     const VkDeviceSize blockSize = GetSize();
8950     const VkDeviceSize debugMargin = GetDebugMargin();
8951     const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity();
8952     SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8953     SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8954 
8955     if (m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8956     {
8957         // Try to allocate at the end of 1st vector.
8958 
8959         VkDeviceSize resultBaseOffset = 0;
8960         if (!suballocations1st.empty())
8961         {
8962             const VmaSuballocation& lastSuballoc = suballocations1st.back();
8963             resultBaseOffset = lastSuballoc.offset + lastSuballoc.size + debugMargin;
8964         }
8965 
8966         // Start from offset equal to beginning of free space.
8967         VkDeviceSize resultOffset = resultBaseOffset;
8968 
8969         // Apply alignment.
8970         resultOffset = VmaAlignUp(resultOffset, allocAlignment);
8971 
8972         // Check previous suballocations for BufferImageGranularity conflicts.
8973         // Make bigger alignment if necessary.
8974         if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations1st.empty())
8975         {
8976             bool bufferImageGranularityConflict = false;
8977             for (size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
8978             {
8979                 const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
8980                 if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8981                 {
8982                     if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8983                     {
8984                         bufferImageGranularityConflict = true;
8985                         break;
8986                     }
8987                 }
8988                 else
8989                     // Already on previous page.
8990                     break;
8991             }
8992             if (bufferImageGranularityConflict)
8993             {
8994                 resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
8995             }
8996         }
8997 
8998         const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
8999             suballocations2nd.back().offset : blockSize;
9000 
9001         // There is enough free space at the end after alignment.
9002         if (resultOffset + allocSize + debugMargin <= freeSpaceEnd)
9003         {
9004             // Check next suballocations for BufferImageGranularity conflicts.
9005             // If conflict exists, allocation cannot be made here.
9006             if ((allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity) && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9007             {
9008                 for (size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9009                 {
9010                     const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9011                     if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9012                     {
9013                         if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9014                         {
9015                             return false;
9016                         }
9017                     }
9018                     else
9019                     {
9020                         // Already on previous page.
9021                         break;
9022                     }
9023                 }
9024             }
9025 
9026             // All tests passed: Success.
9027             pAllocationRequest->allocHandle = (VmaAllocHandle)(resultOffset + 1);
9028             // pAllocationRequest->item, customData unused.
9029             pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
9030             return true;
9031         }
9032     }
9033 
9034     // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
9035     // beginning of 1st vector as the end of free space.
9036     if (m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9037     {
9038         VMA_ASSERT(!suballocations1st.empty());
9039 
9040         VkDeviceSize resultBaseOffset = 0;
9041         if (!suballocations2nd.empty())
9042         {
9043             const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9044             resultBaseOffset = lastSuballoc.offset + lastSuballoc.size + debugMargin;
9045         }
9046 
9047         // Start from offset equal to beginning of free space.
9048         VkDeviceSize resultOffset = resultBaseOffset;
9049 
9050         // Apply alignment.
9051         resultOffset = VmaAlignUp(resultOffset, allocAlignment);
9052 
9053         // Check previous suballocations for BufferImageGranularity conflicts.
9054         // Make bigger alignment if necessary.
9055         if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations2nd.empty())
9056         {
9057             bool bufferImageGranularityConflict = false;
9058             for (size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
9059             {
9060                 const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
9061                 if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9062                 {
9063                     if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9064                     {
9065                         bufferImageGranularityConflict = true;
9066                         break;
9067                     }
9068                 }
9069                 else
9070                     // Already on previous page.
9071                     break;
9072             }
9073             if (bufferImageGranularityConflict)
9074             {
9075                 resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
9076             }
9077         }
9078 
9079         size_t index1st = m_1stNullItemsBeginCount;
9080 
9081         // There is enough free space at the end after alignment.
9082         if ((index1st == suballocations1st.size() && resultOffset + allocSize + debugMargin <= blockSize) ||
9083             (index1st < suballocations1st.size() && resultOffset + allocSize + debugMargin <= suballocations1st[index1st].offset))
9084         {
9085             // Check next suballocations for BufferImageGranularity conflicts.
9086             // If conflict exists, allocation cannot be made here.
9087             if (allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity)
9088             {
9089                 for (size_t nextSuballocIndex = index1st;
9090                     nextSuballocIndex < suballocations1st.size();
9091                     nextSuballocIndex++)
9092                 {
9093                     const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
9094                     if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9095                     {
9096                         if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9097                         {
9098                             return false;
9099                         }
9100                     }
9101                     else
9102                     {
9103                         // Already on next page.
9104                         break;
9105                     }
9106                 }
9107             }
9108 
9109             // All tests passed: Success.
9110             pAllocationRequest->allocHandle = (VmaAllocHandle)(resultOffset + 1);
9111             pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
9112             // pAllocationRequest->item, customData unused.
9113             return true;
9114         }
9115     }
9116 
9117     return false;
9118 }
9119 
CreateAllocationRequest_UpperAddress(VkDeviceSize allocSize,VkDeviceSize allocAlignment,VmaSuballocationType allocType,uint32_t strategy,VmaAllocationRequest * pAllocationRequest)9120 bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
9121     VkDeviceSize allocSize,
9122     VkDeviceSize allocAlignment,
9123     VmaSuballocationType allocType,
9124     uint32_t strategy,
9125     VmaAllocationRequest* pAllocationRequest)
9126 {
9127     const VkDeviceSize blockSize = GetSize();
9128     const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity();
9129     SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9130     SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9131 
9132     if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9133     {
9134         VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
9135         return false;
9136     }
9137 
9138     // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
9139     if (allocSize > blockSize)
9140     {
9141         return false;
9142     }
9143     VkDeviceSize resultBaseOffset = blockSize - allocSize;
9144     if (!suballocations2nd.empty())
9145     {
9146         const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9147         resultBaseOffset = lastSuballoc.offset - allocSize;
9148         if (allocSize > lastSuballoc.offset)
9149         {
9150             return false;
9151         }
9152     }
9153 
9154     // Start from offset equal to end of free space.
9155     VkDeviceSize resultOffset = resultBaseOffset;
9156 
9157     const VkDeviceSize debugMargin = GetDebugMargin();
9158 
9159     // Apply debugMargin at the end.
9160     if (debugMargin > 0)
9161     {
9162         if (resultOffset < debugMargin)
9163         {
9164             return false;
9165         }
9166         resultOffset -= debugMargin;
9167     }
9168 
9169     // Apply alignment.
9170     resultOffset = VmaAlignDown(resultOffset, allocAlignment);
9171 
9172     // Check next suballocations from 2nd for BufferImageGranularity conflicts.
9173     // Make bigger alignment if necessary.
9174     if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations2nd.empty())
9175     {
9176         bool bufferImageGranularityConflict = false;
9177         for (size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9178         {
9179             const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9180             if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9181             {
9182                 if (VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
9183                 {
9184                     bufferImageGranularityConflict = true;
9185                     break;
9186                 }
9187             }
9188             else
9189                 // Already on previous page.
9190                 break;
9191         }
9192         if (bufferImageGranularityConflict)
9193         {
9194             resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
9195         }
9196     }
9197 
9198     // There is enough free space.
9199     const VkDeviceSize endOf1st = !suballocations1st.empty() ?
9200         suballocations1st.back().offset + suballocations1st.back().size :
9201         0;
9202     if (endOf1st + debugMargin <= resultOffset)
9203     {
9204         // Check previous suballocations for BufferImageGranularity conflicts.
9205         // If conflict exists, allocation cannot be made here.
9206         if (bufferImageGranularity > 1)
9207         {
9208             for (size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9209             {
9210                 const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9211                 if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9212                 {
9213                     if (VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
9214                     {
9215                         return false;
9216                     }
9217                 }
9218                 else
9219                 {
9220                     // Already on next page.
9221                     break;
9222                 }
9223             }
9224         }
9225 
9226         // All tests passed: Success.
9227         pAllocationRequest->allocHandle = (VmaAllocHandle)(resultOffset + 1);
9228         // pAllocationRequest->item unused.
9229         pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
9230         return true;
9231     }
9232 
9233     return false;
9234 }
9235 #endif // _VMA_BLOCK_METADATA_LINEAR_FUNCTIONS
9236 #endif // _VMA_BLOCK_METADATA_LINEAR
9237 
9238 #if 0
9239 #ifndef _VMA_BLOCK_METADATA_BUDDY
9240 /*
9241 - GetSize() is the original size of allocated memory block.
9242 - m_UsableSize is this size aligned down to a power of two.
9243   All allocations and calculations happen relative to m_UsableSize.
9244 - GetUnusableSize() is the difference between them.
9245   It is reported as separate, unused range, not available for allocations.
9246 
9247 Node at level 0 has size = m_UsableSize.
9248 Each next level contains nodes with size 2 times smaller than current level.
9249 m_LevelCount is the maximum number of levels to use in the current object.
9250 */
9251 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
9252 {
9253     VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockMetadata_Buddy)
9254 public:
9255     VmaBlockMetadata_Buddy(const VkAllocationCallbacks* pAllocationCallbacks,
9256         VkDeviceSize bufferImageGranularity, bool isVirtual);
9257     virtual ~VmaBlockMetadata_Buddy();
9258 
9259     size_t GetAllocationCount() const override { return m_AllocationCount; }
9260     VkDeviceSize GetSumFreeSize() const override { return m_SumFreeSize + GetUnusableSize(); }
9261     bool IsEmpty() const override { return m_Root->type == Node::TYPE_FREE; }
9262     VkResult CheckCorruption(const void* pBlockData) override { return VK_ERROR_FEATURE_NOT_PRESENT; }
9263     VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return (VkDeviceSize)allocHandle - 1; }
9264     void DebugLogAllAllocations() const override { DebugLogAllAllocationNode(m_Root, 0); }
9265 
9266     void Init(VkDeviceSize size) override;
9267     bool Validate() const override;
9268 
9269     void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override;
9270     void AddStatistics(VmaStatistics& inoutStats) const override;
9271 
9272 #if VMA_STATS_STRING_ENABLED
9273     void PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const override;
9274 #endif
9275 
9276     bool CreateAllocationRequest(
9277         VkDeviceSize allocSize,
9278         VkDeviceSize allocAlignment,
9279         bool upperAddress,
9280         VmaSuballocationType allocType,
9281         uint32_t strategy,
9282         VmaAllocationRequest* pAllocationRequest) override;
9283 
9284     void Alloc(
9285         const VmaAllocationRequest& request,
9286         VmaSuballocationType type,
9287         void* userData) override;
9288 
9289     void Free(VmaAllocHandle allocHandle) override;
9290     void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override;
9291     void* GetAllocationUserData(VmaAllocHandle allocHandle) const override;
9292     VmaAllocHandle GetAllocationListBegin() const override;
9293     VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override;
9294     void Clear() override;
9295     void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override;
9296 
9297 private:
9298     static const size_t MAX_LEVELS = 48;
9299 
9300     struct ValidationContext
9301     {
9302         size_t calculatedAllocationCount = 0;
9303         size_t calculatedFreeCount = 0;
9304         VkDeviceSize calculatedSumFreeSize = 0;
9305     };
9306     struct Node
9307     {
9308         VkDeviceSize offset;
9309         enum TYPE
9310         {
9311             TYPE_FREE,
9312             TYPE_ALLOCATION,
9313             TYPE_SPLIT,
9314             TYPE_COUNT
9315         } type;
9316         Node* parent;
9317         Node* buddy;
9318 
9319         union
9320         {
9321             struct
9322             {
9323                 Node* prev;
9324                 Node* next;
9325             } free;
9326             struct
9327             {
9328                 void* userData;
9329             } allocation;
9330             struct
9331             {
9332                 Node* leftChild;
9333             } split;
9334         };
9335     };
9336 
9337     // Size of the memory block aligned down to a power of two.
9338     VkDeviceSize m_UsableSize;
9339     uint32_t m_LevelCount;
9340     VmaPoolAllocator<Node> m_NodeAllocator;
9341     Node* m_Root;
9342     struct
9343     {
9344         Node* front;
9345         Node* back;
9346     } m_FreeList[MAX_LEVELS];
9347 
9348     // Number of nodes in the tree with type == TYPE_ALLOCATION.
9349     size_t m_AllocationCount;
9350     // Number of nodes in the tree with type == TYPE_FREE.
9351     size_t m_FreeCount;
9352     // Doesn't include space wasted due to internal fragmentation - allocation sizes are just aligned up to node sizes.
9353     // Doesn't include unusable size.
9354     VkDeviceSize m_SumFreeSize;
9355 
9356     VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
9357     VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
9358 
9359     VkDeviceSize AlignAllocationSize(VkDeviceSize size) const
9360     {
9361         if (!IsVirtual())
9362         {
9363             size = VmaAlignUp(size, (VkDeviceSize)16);
9364         }
9365         return VmaNextPow2(size);
9366     }
9367     Node* FindAllocationNode(VkDeviceSize offset, uint32_t& outLevel) const;
9368     void DeleteNodeChildren(Node* node);
9369     bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
9370     uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
9371     void AddNodeToDetailedStatistics(VmaDetailedStatistics& inoutStats, const Node* node, VkDeviceSize levelNodeSize) const;
9372     // Adds node to the front of FreeList at given level.
9373     // node->type must be FREE.
9374     // node->free.prev, next can be undefined.
9375     void AddToFreeListFront(uint32_t level, Node* node);
9376     // Removes node from FreeList at given level.
9377     // node->type must be FREE.
9378     // node->free.prev, next stay untouched.
9379     void RemoveFromFreeList(uint32_t level, Node* node);
9380     void DebugLogAllAllocationNode(Node* node, uint32_t level) const;
9381 
9382 #if VMA_STATS_STRING_ENABLED
9383     void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
9384 #endif
9385 };
9386 
9387 #ifndef _VMA_BLOCK_METADATA_BUDDY_FUNCTIONS
9388 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(const VkAllocationCallbacks* pAllocationCallbacks,
9389     VkDeviceSize bufferImageGranularity, bool isVirtual)
9390     : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual),
9391     m_NodeAllocator(pAllocationCallbacks, 32), // firstBlockCapacity
9392     m_Root(VMA_NULL),
9393     m_AllocationCount(0),
9394     m_FreeCount(1),
9395     m_SumFreeSize(0)
9396 {
9397     memset(m_FreeList, 0, sizeof(m_FreeList));
9398 }
9399 
9400 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
9401 {
9402     DeleteNodeChildren(m_Root);
9403     m_NodeAllocator.Free(m_Root);
9404 }
9405 
9406 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
9407 {
9408     VmaBlockMetadata::Init(size);
9409 
9410     m_UsableSize = VmaPrevPow2(size);
9411     m_SumFreeSize = m_UsableSize;
9412 
9413     // Calculate m_LevelCount.
9414     const VkDeviceSize minNodeSize = IsVirtual() ? 1 : 16;
9415     m_LevelCount = 1;
9416     while (m_LevelCount < MAX_LEVELS &&
9417         LevelToNodeSize(m_LevelCount) >= minNodeSize)
9418     {
9419         ++m_LevelCount;
9420     }
9421 
9422     Node* rootNode = m_NodeAllocator.Alloc();
9423     rootNode->offset = 0;
9424     rootNode->type = Node::TYPE_FREE;
9425     rootNode->parent = VMA_NULL;
9426     rootNode->buddy = VMA_NULL;
9427 
9428     m_Root = rootNode;
9429     AddToFreeListFront(0, rootNode);
9430 }
9431 
9432 bool VmaBlockMetadata_Buddy::Validate() const
9433 {
9434     // Validate tree.
9435     ValidationContext ctx;
9436     if (!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
9437     {
9438         VMA_VALIDATE(false && "ValidateNode failed.");
9439     }
9440     VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
9441     VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
9442 
9443     // Validate free node lists.
9444     for (uint32_t level = 0; level < m_LevelCount; ++level)
9445     {
9446         VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
9447             m_FreeList[level].front->free.prev == VMA_NULL);
9448 
9449         for (Node* node = m_FreeList[level].front;
9450             node != VMA_NULL;
9451             node = node->free.next)
9452         {
9453             VMA_VALIDATE(node->type == Node::TYPE_FREE);
9454 
9455             if (node->free.next == VMA_NULL)
9456             {
9457                 VMA_VALIDATE(m_FreeList[level].back == node);
9458             }
9459             else
9460             {
9461                 VMA_VALIDATE(node->free.next->free.prev == node);
9462             }
9463         }
9464     }
9465 
9466     // Validate that free lists ar higher levels are empty.
9467     for (uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
9468     {
9469         VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
9470     }
9471 
9472     return true;
9473 }
9474 
9475 void VmaBlockMetadata_Buddy::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const
9476 {
9477     inoutStats.statistics.blockCount++;
9478     inoutStats.statistics.blockBytes += GetSize();
9479 
9480     AddNodeToDetailedStatistics(inoutStats, m_Root, LevelToNodeSize(0));
9481 
9482     const VkDeviceSize unusableSize = GetUnusableSize();
9483     if (unusableSize > 0)
9484         VmaAddDetailedStatisticsUnusedRange(inoutStats, unusableSize);
9485 }
9486 
9487 void VmaBlockMetadata_Buddy::AddStatistics(VmaStatistics& inoutStats) const
9488 {
9489     inoutStats.blockCount++;
9490     inoutStats.allocationCount += (uint32_t)m_AllocationCount;
9491     inoutStats.blockBytes += GetSize();
9492     inoutStats.allocationBytes += GetSize() - m_SumFreeSize;
9493 }
9494 
9495 #if VMA_STATS_STRING_ENABLED
9496 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const
9497 {
9498     VmaDetailedStatistics stats;
9499     VmaClearDetailedStatistics(stats);
9500     AddDetailedStatistics(stats);
9501 
9502     PrintDetailedMap_Begin(
9503         json,
9504         stats.statistics.blockBytes - stats.statistics.allocationBytes,
9505         stats.statistics.allocationCount,
9506         stats.unusedRangeCount,
9507         mapRefCount);
9508 
9509     PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
9510 
9511     const VkDeviceSize unusableSize = GetUnusableSize();
9512     if (unusableSize > 0)
9513     {
9514         PrintDetailedMap_UnusedRange(json,
9515             m_UsableSize, // offset
9516             unusableSize); // size
9517     }
9518 
9519     PrintDetailedMap_End(json);
9520 }
9521 #endif // VMA_STATS_STRING_ENABLED
9522 
9523 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
9524     VkDeviceSize allocSize,
9525     VkDeviceSize allocAlignment,
9526     bool upperAddress,
9527     VmaSuballocationType allocType,
9528     uint32_t strategy,
9529     VmaAllocationRequest* pAllocationRequest)
9530 {
9531     VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
9532 
9533     allocSize = AlignAllocationSize(allocSize);
9534 
9535     // Simple way to respect bufferImageGranularity. May be optimized some day.
9536     // Whenever it might be an OPTIMAL image...
9537     if (allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
9538         allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
9539         allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
9540     {
9541         allocAlignment = VMA_MAX(allocAlignment, GetBufferImageGranularity());
9542         allocSize = VmaAlignUp(allocSize, GetBufferImageGranularity());
9543     }
9544 
9545     if (allocSize > m_UsableSize)
9546     {
9547         return false;
9548     }
9549 
9550     const uint32_t targetLevel = AllocSizeToLevel(allocSize);
9551     for (uint32_t level = targetLevel; level--; )
9552     {
9553         for (Node* freeNode = m_FreeList[level].front;
9554             freeNode != VMA_NULL;
9555             freeNode = freeNode->free.next)
9556         {
9557             if (freeNode->offset % allocAlignment == 0)
9558             {
9559                 pAllocationRequest->type = VmaAllocationRequestType::Normal;
9560                 pAllocationRequest->allocHandle = (VmaAllocHandle)(freeNode->offset + 1);
9561                 pAllocationRequest->size = allocSize;
9562                 pAllocationRequest->customData = (void*)(uintptr_t)level;
9563                 return true;
9564             }
9565         }
9566     }
9567 
9568     return false;
9569 }
9570 
9571 void VmaBlockMetadata_Buddy::Alloc(
9572     const VmaAllocationRequest& request,
9573     VmaSuballocationType type,
9574     void* userData)
9575 {
9576     VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
9577 
9578     const uint32_t targetLevel = AllocSizeToLevel(request.size);
9579     uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
9580 
9581     Node* currNode = m_FreeList[currLevel].front;
9582     VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
9583     const VkDeviceSize offset = (VkDeviceSize)request.allocHandle - 1;
9584     while (currNode->offset != offset)
9585     {
9586         currNode = currNode->free.next;
9587         VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
9588     }
9589 
9590     // Go down, splitting free nodes.
9591     while (currLevel < targetLevel)
9592     {
9593         // currNode is already first free node at currLevel.
9594         // Remove it from list of free nodes at this currLevel.
9595         RemoveFromFreeList(currLevel, currNode);
9596 
9597         const uint32_t childrenLevel = currLevel + 1;
9598 
9599         // Create two free sub-nodes.
9600         Node* leftChild = m_NodeAllocator.Alloc();
9601         Node* rightChild = m_NodeAllocator.Alloc();
9602 
9603         leftChild->offset = currNode->offset;
9604         leftChild->type = Node::TYPE_FREE;
9605         leftChild->parent = currNode;
9606         leftChild->buddy = rightChild;
9607 
9608         rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
9609         rightChild->type = Node::TYPE_FREE;
9610         rightChild->parent = currNode;
9611         rightChild->buddy = leftChild;
9612 
9613         // Convert current currNode to split type.
9614         currNode->type = Node::TYPE_SPLIT;
9615         currNode->split.leftChild = leftChild;
9616 
9617         // Add child nodes to free list. Order is important!
9618         AddToFreeListFront(childrenLevel, rightChild);
9619         AddToFreeListFront(childrenLevel, leftChild);
9620 
9621         ++m_FreeCount;
9622         ++currLevel;
9623         currNode = m_FreeList[currLevel].front;
9624 
9625         /*
9626         We can be sure that currNode, as left child of node previously split,
9627         also fulfills the alignment requirement.
9628         */
9629     }
9630 
9631     // Remove from free list.
9632     VMA_ASSERT(currLevel == targetLevel &&
9633         currNode != VMA_NULL &&
9634         currNode->type == Node::TYPE_FREE);
9635     RemoveFromFreeList(currLevel, currNode);
9636 
9637     // Convert to allocation node.
9638     currNode->type = Node::TYPE_ALLOCATION;
9639     currNode->allocation.userData = userData;
9640 
9641     ++m_AllocationCount;
9642     --m_FreeCount;
9643     m_SumFreeSize -= request.size;
9644 }
9645 
9646 void VmaBlockMetadata_Buddy::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo)
9647 {
9648     uint32_t level = 0;
9649     outInfo.offset = (VkDeviceSize)allocHandle - 1;
9650     const Node* const node = FindAllocationNode(outInfo.offset, level);
9651     outInfo.size = LevelToNodeSize(level);
9652     outInfo.pUserData = node->allocation.userData;
9653 }
9654 
9655 void* VmaBlockMetadata_Buddy::GetAllocationUserData(VmaAllocHandle allocHandle) const
9656 {
9657     uint32_t level = 0;
9658     const Node* const node = FindAllocationNode((VkDeviceSize)allocHandle - 1, level);
9659     return node->allocation.userData;
9660 }
9661 
9662 VmaAllocHandle VmaBlockMetadata_Buddy::GetAllocationListBegin() const
9663 {
9664     // Function only used for defragmentation, which is disabled for this algorithm
9665     return VK_NULL_HANDLE;
9666 }
9667 
9668 VmaAllocHandle VmaBlockMetadata_Buddy::GetNextAllocation(VmaAllocHandle prevAlloc) const
9669 {
9670     // Function only used for defragmentation, which is disabled for this algorithm
9671     return VK_NULL_HANDLE;
9672 }
9673 
9674 void VmaBlockMetadata_Buddy::DeleteNodeChildren(Node* node)
9675 {
9676     if (node->type == Node::TYPE_SPLIT)
9677     {
9678         DeleteNodeChildren(node->split.leftChild->buddy);
9679         DeleteNodeChildren(node->split.leftChild);
9680         const VkAllocationCallbacks* allocationCallbacks = GetAllocationCallbacks();
9681         m_NodeAllocator.Free(node->split.leftChild->buddy);
9682         m_NodeAllocator.Free(node->split.leftChild);
9683     }
9684 }
9685 
9686 void VmaBlockMetadata_Buddy::Clear()
9687 {
9688     DeleteNodeChildren(m_Root);
9689     m_Root->type = Node::TYPE_FREE;
9690     m_AllocationCount = 0;
9691     m_FreeCount = 1;
9692     m_SumFreeSize = m_UsableSize;
9693 }
9694 
9695 void VmaBlockMetadata_Buddy::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData)
9696 {
9697     uint32_t level = 0;
9698     Node* const node = FindAllocationNode((VkDeviceSize)allocHandle - 1, level);
9699     node->allocation.userData = userData;
9700 }
9701 
9702 VmaBlockMetadata_Buddy::Node* VmaBlockMetadata_Buddy::FindAllocationNode(VkDeviceSize offset, uint32_t& outLevel) const
9703 {
9704     Node* node = m_Root;
9705     VkDeviceSize nodeOffset = 0;
9706     outLevel = 0;
9707     VkDeviceSize levelNodeSize = LevelToNodeSize(0);
9708     while (node->type == Node::TYPE_SPLIT)
9709     {
9710         const VkDeviceSize nextLevelNodeSize = levelNodeSize >> 1;
9711         if (offset < nodeOffset + nextLevelNodeSize)
9712         {
9713             node = node->split.leftChild;
9714         }
9715         else
9716         {
9717             node = node->split.leftChild->buddy;
9718             nodeOffset += nextLevelNodeSize;
9719         }
9720         ++outLevel;
9721         levelNodeSize = nextLevelNodeSize;
9722     }
9723 
9724     VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
9725     return node;
9726 }
9727 
9728 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
9729 {
9730     VMA_VALIDATE(level < m_LevelCount);
9731     VMA_VALIDATE(curr->parent == parent);
9732     VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
9733     VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
9734     switch (curr->type)
9735     {
9736     case Node::TYPE_FREE:
9737         // curr->free.prev, next are validated separately.
9738         ctx.calculatedSumFreeSize += levelNodeSize;
9739         ++ctx.calculatedFreeCount;
9740         break;
9741     case Node::TYPE_ALLOCATION:
9742         ++ctx.calculatedAllocationCount;
9743         if (!IsVirtual())
9744         {
9745             VMA_VALIDATE(curr->allocation.userData != VMA_NULL);
9746         }
9747         break;
9748     case Node::TYPE_SPLIT:
9749     {
9750         const uint32_t childrenLevel = level + 1;
9751         const VkDeviceSize childrenLevelNodeSize = levelNodeSize >> 1;
9752         const Node* const leftChild = curr->split.leftChild;
9753         VMA_VALIDATE(leftChild != VMA_NULL);
9754         VMA_VALIDATE(leftChild->offset == curr->offset);
9755         if (!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
9756         {
9757             VMA_VALIDATE(false && "ValidateNode for left child failed.");
9758         }
9759         const Node* const rightChild = leftChild->buddy;
9760         VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
9761         if (!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
9762         {
9763             VMA_VALIDATE(false && "ValidateNode for right child failed.");
9764         }
9765     }
9766     break;
9767     default:
9768         return false;
9769     }
9770 
9771     return true;
9772 }
9773 
9774 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
9775 {
9776     // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
9777     uint32_t level = 0;
9778     VkDeviceSize currLevelNodeSize = m_UsableSize;
9779     VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
9780     while (allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
9781     {
9782         ++level;
9783         currLevelNodeSize >>= 1;
9784         nextLevelNodeSize >>= 1;
9785     }
9786     return level;
9787 }
9788 
9789 void VmaBlockMetadata_Buddy::Free(VmaAllocHandle allocHandle)
9790 {
9791     uint32_t level = 0;
9792     Node* node = FindAllocationNode((VkDeviceSize)allocHandle - 1, level);
9793 
9794     ++m_FreeCount;
9795     --m_AllocationCount;
9796     m_SumFreeSize += LevelToNodeSize(level);
9797 
9798     node->type = Node::TYPE_FREE;
9799 
9800     // Join free nodes if possible.
9801     while (level > 0 && node->buddy->type == Node::TYPE_FREE)
9802     {
9803         RemoveFromFreeList(level, node->buddy);
9804         Node* const parent = node->parent;
9805 
9806         m_NodeAllocator.Free(node->buddy);
9807         m_NodeAllocator.Free(node);
9808         parent->type = Node::TYPE_FREE;
9809 
9810         node = parent;
9811         --level;
9812         --m_FreeCount;
9813     }
9814 
9815     AddToFreeListFront(level, node);
9816 }
9817 
9818 void VmaBlockMetadata_Buddy::AddNodeToDetailedStatistics(VmaDetailedStatistics& inoutStats, const Node* node, VkDeviceSize levelNodeSize) const
9819 {
9820     switch (node->type)
9821     {
9822     case Node::TYPE_FREE:
9823         VmaAddDetailedStatisticsUnusedRange(inoutStats, levelNodeSize);
9824         break;
9825     case Node::TYPE_ALLOCATION:
9826         VmaAddDetailedStatisticsAllocation(inoutStats, levelNodeSize);
9827         break;
9828     case Node::TYPE_SPLIT:
9829     {
9830         const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
9831         const Node* const leftChild = node->split.leftChild;
9832         AddNodeToDetailedStatistics(inoutStats, leftChild, childrenNodeSize);
9833         const Node* const rightChild = leftChild->buddy;
9834         AddNodeToDetailedStatistics(inoutStats, rightChild, childrenNodeSize);
9835     }
9836     break;
9837     default:
9838         VMA_ASSERT(0);
9839     }
9840 }
9841 
9842 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
9843 {
9844     VMA_ASSERT(node->type == Node::TYPE_FREE);
9845 
9846     // List is empty.
9847     Node* const frontNode = m_FreeList[level].front;
9848     if (frontNode == VMA_NULL)
9849     {
9850         VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
9851         node->free.prev = node->free.next = VMA_NULL;
9852         m_FreeList[level].front = m_FreeList[level].back = node;
9853     }
9854     else
9855     {
9856         VMA_ASSERT(frontNode->free.prev == VMA_NULL);
9857         node->free.prev = VMA_NULL;
9858         node->free.next = frontNode;
9859         frontNode->free.prev = node;
9860         m_FreeList[level].front = node;
9861     }
9862 }
9863 
9864 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
9865 {
9866     VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
9867 
9868     // It is at the front.
9869     if (node->free.prev == VMA_NULL)
9870     {
9871         VMA_ASSERT(m_FreeList[level].front == node);
9872         m_FreeList[level].front = node->free.next;
9873     }
9874     else
9875     {
9876         Node* const prevFreeNode = node->free.prev;
9877         VMA_ASSERT(prevFreeNode->free.next == node);
9878         prevFreeNode->free.next = node->free.next;
9879     }
9880 
9881     // It is at the back.
9882     if (node->free.next == VMA_NULL)
9883     {
9884         VMA_ASSERT(m_FreeList[level].back == node);
9885         m_FreeList[level].back = node->free.prev;
9886     }
9887     else
9888     {
9889         Node* const nextFreeNode = node->free.next;
9890         VMA_ASSERT(nextFreeNode->free.prev == node);
9891         nextFreeNode->free.prev = node->free.prev;
9892     }
9893 }
9894 
9895 void VmaBlockMetadata_Buddy::DebugLogAllAllocationNode(Node* node, uint32_t level) const
9896 {
9897     switch (node->type)
9898     {
9899     case Node::TYPE_FREE:
9900         break;
9901     case Node::TYPE_ALLOCATION:
9902         DebugLogAllocation(node->offset, LevelToNodeSize(level), node->allocation.userData);
9903         break;
9904     case Node::TYPE_SPLIT:
9905     {
9906         ++level;
9907         DebugLogAllAllocationNode(node->split.leftChild, level);
9908         DebugLogAllAllocationNode(node->split.leftChild->buddy, level);
9909     }
9910     break;
9911     default:
9912         VMA_ASSERT(0);
9913     }
9914 }
9915 
9916 #if VMA_STATS_STRING_ENABLED
9917 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
9918 {
9919     switch (node->type)
9920     {
9921     case Node::TYPE_FREE:
9922         PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
9923         break;
9924     case Node::TYPE_ALLOCATION:
9925         PrintDetailedMap_Allocation(json, node->offset, levelNodeSize, node->allocation.userData);
9926         break;
9927     case Node::TYPE_SPLIT:
9928     {
9929         const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
9930         const Node* const leftChild = node->split.leftChild;
9931         PrintDetailedMapNode(json, leftChild, childrenNodeSize);
9932         const Node* const rightChild = leftChild->buddy;
9933         PrintDetailedMapNode(json, rightChild, childrenNodeSize);
9934     }
9935     break;
9936     default:
9937         VMA_ASSERT(0);
9938     }
9939 }
9940 #endif // VMA_STATS_STRING_ENABLED
9941 #endif // _VMA_BLOCK_METADATA_BUDDY_FUNCTIONS
9942 #endif // _VMA_BLOCK_METADATA_BUDDY
9943 #endif // #if 0
9944 
9945 #ifndef _VMA_BLOCK_METADATA_TLSF
9946 // To not search current larger region if first allocation won't succeed and skip to smaller range
9947 // use with VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT as strategy in CreateAllocationRequest().
9948 // When fragmentation and reusal of previous blocks doesn't matter then use with
9949 // VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT for fastest alloc time possible.
9950 class VmaBlockMetadata_TLSF : public VmaBlockMetadata
9951 {
9952     VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockMetadata_TLSF)
9953 public:
9954     VmaBlockMetadata_TLSF(const VkAllocationCallbacks* pAllocationCallbacks,
9955         VkDeviceSize bufferImageGranularity, bool isVirtual);
9956     virtual ~VmaBlockMetadata_TLSF();
9957 
GetAllocationCount()9958     size_t GetAllocationCount() const override { return m_AllocCount; }
GetFreeRegionsCount()9959     size_t GetFreeRegionsCount() const override { return m_BlocksFreeCount + 1; }
GetSumFreeSize()9960     VkDeviceSize GetSumFreeSize() const override { return m_BlocksFreeSize + m_NullBlock->size; }
IsEmpty()9961     bool IsEmpty() const override { return m_NullBlock->offset == 0; }
GetAllocationOffset(VmaAllocHandle allocHandle)9962     VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return ((Block*)allocHandle)->offset; }
9963 
9964     void Init(VkDeviceSize size) override;
9965     bool Validate() const override;
9966 
9967     void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override;
9968     void AddStatistics(VmaStatistics& inoutStats) const override;
9969 
9970 #if VMA_STATS_STRING_ENABLED
9971     void PrintDetailedMap(class VmaJsonWriter& json) const override;
9972 #endif
9973 
9974     bool CreateAllocationRequest(
9975         VkDeviceSize allocSize,
9976         VkDeviceSize allocAlignment,
9977         bool upperAddress,
9978         VmaSuballocationType allocType,
9979         uint32_t strategy,
9980         VmaAllocationRequest* pAllocationRequest) override;
9981 
9982     VkResult CheckCorruption(const void* pBlockData) override;
9983     void Alloc(
9984         const VmaAllocationRequest& request,
9985         VmaSuballocationType type,
9986         void* userData) override;
9987 
9988     void Free(VmaAllocHandle allocHandle) override;
9989     void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override;
9990     void* GetAllocationUserData(VmaAllocHandle allocHandle) const override;
9991     VmaAllocHandle GetAllocationListBegin() const override;
9992     VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override;
9993     VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const override;
9994     void Clear() override;
9995     void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override;
9996     void DebugLogAllAllocations() const override;
9997 
9998 private:
9999     // According to original paper it should be preferable 4 or 5:
10000     // M. Masmano, I. Ripoll, A. Crespo, and J. Real "TLSF: a New Dynamic Memory Allocator for Real-Time Systems"
10001     // http://www.gii.upv.es/tlsf/files/ecrts04_tlsf.pdf
10002     static const uint8_t SECOND_LEVEL_INDEX = 5;
10003     static const uint16_t SMALL_BUFFER_SIZE = 256;
10004     static const uint32_t INITIAL_BLOCK_ALLOC_COUNT = 16;
10005     static const uint8_t MEMORY_CLASS_SHIFT = 7;
10006     static const uint8_t MAX_MEMORY_CLASSES = 65 - MEMORY_CLASS_SHIFT;
10007 
10008     class Block
10009     {
10010     public:
10011         VkDeviceSize offset;
10012         VkDeviceSize size;
10013         Block* prevPhysical;
10014         Block* nextPhysical;
10015 
MarkFree()10016         void MarkFree() { prevFree = VMA_NULL; }
MarkTaken()10017         void MarkTaken() { prevFree = this; }
IsFree()10018         bool IsFree() const { return prevFree != this; }
UserData()10019         void*& UserData() { VMA_HEAVY_ASSERT(!IsFree()); return userData; }
PrevFree()10020         Block*& PrevFree() { return prevFree; }
NextFree()10021         Block*& NextFree() { VMA_HEAVY_ASSERT(IsFree()); return nextFree; }
10022 
10023     private:
10024         Block* prevFree; // Address of the same block here indicates that block is taken
10025         union
10026         {
10027             Block* nextFree;
10028             void* userData;
10029         };
10030     };
10031 
10032     size_t m_AllocCount;
10033     // Total number of free blocks besides null block
10034     size_t m_BlocksFreeCount;
10035     // Total size of free blocks excluding null block
10036     VkDeviceSize m_BlocksFreeSize;
10037     uint32_t m_IsFreeBitmap;
10038     uint8_t m_MemoryClasses;
10039     uint32_t m_InnerIsFreeBitmap[MAX_MEMORY_CLASSES];
10040     uint32_t m_ListsCount;
10041     /*
10042     * 0: 0-3 lists for small buffers
10043     * 1+: 0-(2^SLI-1) lists for normal buffers
10044     */
10045     Block** m_FreeList;
10046     VmaPoolAllocator<Block> m_BlockAllocator;
10047     Block* m_NullBlock;
10048     VmaBlockBufferImageGranularity m_GranularityHandler;
10049 
10050     uint8_t SizeToMemoryClass(VkDeviceSize size) const;
10051     uint16_t SizeToSecondIndex(VkDeviceSize size, uint8_t memoryClass) const;
10052     uint32_t GetListIndex(uint8_t memoryClass, uint16_t secondIndex) const;
10053     uint32_t GetListIndex(VkDeviceSize size) const;
10054 
10055     void RemoveFreeBlock(Block* block);
10056     void InsertFreeBlock(Block* block);
10057     void MergeBlock(Block* block, Block* prev);
10058 
10059     Block* FindFreeBlock(VkDeviceSize size, uint32_t& listIndex) const;
10060     bool CheckBlock(
10061         Block& block,
10062         uint32_t listIndex,
10063         VkDeviceSize allocSize,
10064         VkDeviceSize allocAlignment,
10065         VmaSuballocationType allocType,
10066         VmaAllocationRequest* pAllocationRequest);
10067 };
10068 
10069 #ifndef _VMA_BLOCK_METADATA_TLSF_FUNCTIONS
VmaBlockMetadata_TLSF(const VkAllocationCallbacks * pAllocationCallbacks,VkDeviceSize bufferImageGranularity,bool isVirtual)10070 VmaBlockMetadata_TLSF::VmaBlockMetadata_TLSF(const VkAllocationCallbacks* pAllocationCallbacks,
10071     VkDeviceSize bufferImageGranularity, bool isVirtual)
10072     : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual),
10073     m_AllocCount(0),
10074     m_BlocksFreeCount(0),
10075     m_BlocksFreeSize(0),
10076     m_IsFreeBitmap(0),
10077     m_MemoryClasses(0),
10078     m_ListsCount(0),
10079     m_FreeList(VMA_NULL),
10080     m_BlockAllocator(pAllocationCallbacks, INITIAL_BLOCK_ALLOC_COUNT),
10081     m_NullBlock(VMA_NULL),
10082     m_GranularityHandler(bufferImageGranularity) {}
10083 
~VmaBlockMetadata_TLSF()10084 VmaBlockMetadata_TLSF::~VmaBlockMetadata_TLSF()
10085 {
10086     if (m_FreeList)
10087         vma_delete_array(GetAllocationCallbacks(), m_FreeList, m_ListsCount);
10088     m_GranularityHandler.Destroy(GetAllocationCallbacks());
10089 }
10090 
Init(VkDeviceSize size)10091 void VmaBlockMetadata_TLSF::Init(VkDeviceSize size)
10092 {
10093     VmaBlockMetadata::Init(size);
10094 
10095     if (!IsVirtual())
10096         m_GranularityHandler.Init(GetAllocationCallbacks(), size);
10097 
10098     m_NullBlock = m_BlockAllocator.Alloc();
10099     m_NullBlock->size = size;
10100     m_NullBlock->offset = 0;
10101     m_NullBlock->prevPhysical = VMA_NULL;
10102     m_NullBlock->nextPhysical = VMA_NULL;
10103     m_NullBlock->MarkFree();
10104     m_NullBlock->NextFree() = VMA_NULL;
10105     m_NullBlock->PrevFree() = VMA_NULL;
10106     uint8_t memoryClass = SizeToMemoryClass(size);
10107     uint16_t sli = SizeToSecondIndex(size, memoryClass);
10108     m_ListsCount = (memoryClass == 0 ? 0 : (memoryClass - 1) * (1UL << SECOND_LEVEL_INDEX) + sli) + 1;
10109     if (IsVirtual())
10110         m_ListsCount += 1UL << SECOND_LEVEL_INDEX;
10111     else
10112         m_ListsCount += 4;
10113 
10114     m_MemoryClasses = memoryClass + uint8_t(2);
10115     memset(m_InnerIsFreeBitmap, 0, MAX_MEMORY_CLASSES * sizeof(uint32_t));
10116 
10117     m_FreeList = vma_new_array(GetAllocationCallbacks(), Block*, m_ListsCount);
10118     memset(m_FreeList, 0, m_ListsCount * sizeof(Block*));
10119 }
10120 
Validate()10121 bool VmaBlockMetadata_TLSF::Validate() const
10122 {
10123     VMA_VALIDATE(GetSumFreeSize() <= GetSize());
10124 
10125     VkDeviceSize calculatedSize = m_NullBlock->size;
10126     VkDeviceSize calculatedFreeSize = m_NullBlock->size;
10127     size_t allocCount = 0;
10128     size_t freeCount = 0;
10129 
10130     // Check integrity of free lists
10131     for (uint32_t list = 0; list < m_ListsCount; ++list)
10132     {
10133         Block* block = m_FreeList[list];
10134         if (block != VMA_NULL)
10135         {
10136             VMA_VALIDATE(block->IsFree());
10137             VMA_VALIDATE(block->PrevFree() == VMA_NULL);
10138             while (block->NextFree())
10139             {
10140                 VMA_VALIDATE(block->NextFree()->IsFree());
10141                 VMA_VALIDATE(block->NextFree()->PrevFree() == block);
10142                 block = block->NextFree();
10143             }
10144         }
10145     }
10146 
10147     VkDeviceSize nextOffset = m_NullBlock->offset;
10148     auto validateCtx = m_GranularityHandler.StartValidation(GetAllocationCallbacks(), IsVirtual());
10149 
10150     VMA_VALIDATE(m_NullBlock->nextPhysical == VMA_NULL);
10151     if (m_NullBlock->prevPhysical)
10152     {
10153         VMA_VALIDATE(m_NullBlock->prevPhysical->nextPhysical == m_NullBlock);
10154     }
10155     // Check all blocks
10156     for (Block* prev = m_NullBlock->prevPhysical; prev != VMA_NULL; prev = prev->prevPhysical)
10157     {
10158         VMA_VALIDATE(prev->offset + prev->size == nextOffset);
10159         nextOffset = prev->offset;
10160         calculatedSize += prev->size;
10161 
10162         uint32_t listIndex = GetListIndex(prev->size);
10163         if (prev->IsFree())
10164         {
10165             ++freeCount;
10166             // Check if free block belongs to free list
10167             Block* freeBlock = m_FreeList[listIndex];
10168             VMA_VALIDATE(freeBlock != VMA_NULL);
10169 
10170             bool found = false;
10171             do
10172             {
10173                 if (freeBlock == prev)
10174                     found = true;
10175 
10176                 freeBlock = freeBlock->NextFree();
10177             } while (!found && freeBlock != VMA_NULL);
10178 
10179             VMA_VALIDATE(found);
10180             calculatedFreeSize += prev->size;
10181         }
10182         else
10183         {
10184             ++allocCount;
10185             // Check if taken block is not on a free list
10186             Block* freeBlock = m_FreeList[listIndex];
10187             while (freeBlock)
10188             {
10189                 VMA_VALIDATE(freeBlock != prev);
10190                 freeBlock = freeBlock->NextFree();
10191             }
10192 
10193             if (!IsVirtual())
10194             {
10195                 VMA_VALIDATE(m_GranularityHandler.Validate(validateCtx, prev->offset, prev->size));
10196             }
10197         }
10198 
10199         if (prev->prevPhysical)
10200         {
10201             VMA_VALIDATE(prev->prevPhysical->nextPhysical == prev);
10202         }
10203     }
10204 
10205     if (!IsVirtual())
10206     {
10207         VMA_VALIDATE(m_GranularityHandler.FinishValidation(validateCtx));
10208     }
10209 
10210     VMA_VALIDATE(nextOffset == 0);
10211     VMA_VALIDATE(calculatedSize == GetSize());
10212     VMA_VALIDATE(calculatedFreeSize == GetSumFreeSize());
10213     VMA_VALIDATE(allocCount == m_AllocCount);
10214     VMA_VALIDATE(freeCount == m_BlocksFreeCount);
10215 
10216     return true;
10217 }
10218 
AddDetailedStatistics(VmaDetailedStatistics & inoutStats)10219 void VmaBlockMetadata_TLSF::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const
10220 {
10221     inoutStats.statistics.blockCount++;
10222     inoutStats.statistics.blockBytes += GetSize();
10223     if (m_NullBlock->size > 0)
10224         VmaAddDetailedStatisticsUnusedRange(inoutStats, m_NullBlock->size);
10225 
10226     for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical)
10227     {
10228         if (block->IsFree())
10229             VmaAddDetailedStatisticsUnusedRange(inoutStats, block->size);
10230         else
10231             VmaAddDetailedStatisticsAllocation(inoutStats, block->size);
10232     }
10233 }
10234 
AddStatistics(VmaStatistics & inoutStats)10235 void VmaBlockMetadata_TLSF::AddStatistics(VmaStatistics& inoutStats) const
10236 {
10237     inoutStats.blockCount++;
10238     inoutStats.allocationCount += (uint32_t)m_AllocCount;
10239     inoutStats.blockBytes += GetSize();
10240     inoutStats.allocationBytes += GetSize() - GetSumFreeSize();
10241 }
10242 
10243 #if VMA_STATS_STRING_ENABLED
PrintDetailedMap(class VmaJsonWriter & json)10244 void VmaBlockMetadata_TLSF::PrintDetailedMap(class VmaJsonWriter& json) const
10245 {
10246     size_t blockCount = m_AllocCount + m_BlocksFreeCount;
10247     VmaStlAllocator<Block*> allocator(GetAllocationCallbacks());
10248     VmaVector<Block*, VmaStlAllocator<Block*>> blockList(blockCount, allocator);
10249 
10250     size_t i = blockCount;
10251     for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical)
10252     {
10253         blockList[--i] = block;
10254     }
10255     VMA_ASSERT(i == 0);
10256 
10257     VmaDetailedStatistics stats;
10258     VmaClearDetailedStatistics(stats);
10259     AddDetailedStatistics(stats);
10260 
10261     PrintDetailedMap_Begin(json,
10262         stats.statistics.blockBytes - stats.statistics.allocationBytes,
10263         stats.statistics.allocationCount,
10264         stats.unusedRangeCount);
10265 
10266     for (; i < blockCount; ++i)
10267     {
10268         Block* block = blockList[i];
10269         if (block->IsFree())
10270             PrintDetailedMap_UnusedRange(json, block->offset, block->size);
10271         else
10272             PrintDetailedMap_Allocation(json, block->offset, block->size, block->UserData());
10273     }
10274     if (m_NullBlock->size > 0)
10275         PrintDetailedMap_UnusedRange(json, m_NullBlock->offset, m_NullBlock->size);
10276 
10277     PrintDetailedMap_End(json);
10278 }
10279 #endif
10280 
CreateAllocationRequest(VkDeviceSize allocSize,VkDeviceSize allocAlignment,bool upperAddress,VmaSuballocationType allocType,uint32_t strategy,VmaAllocationRequest * pAllocationRequest)10281 bool VmaBlockMetadata_TLSF::CreateAllocationRequest(
10282     VkDeviceSize allocSize,
10283     VkDeviceSize allocAlignment,
10284     bool upperAddress,
10285     VmaSuballocationType allocType,
10286     uint32_t strategy,
10287     VmaAllocationRequest* pAllocationRequest)
10288 {
10289     VMA_ASSERT(allocSize > 0 && "Cannot allocate empty block!");
10290     VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
10291 
10292     // For small granularity round up
10293     if (!IsVirtual())
10294         m_GranularityHandler.RoundupAllocRequest(allocType, allocSize, allocAlignment);
10295 
10296     allocSize += GetDebugMargin();
10297     // Quick check for too small pool
10298     if (allocSize > GetSumFreeSize())
10299         return false;
10300 
10301     // If no free blocks in pool then check only null block
10302     if (m_BlocksFreeCount == 0)
10303         return CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest);
10304 
10305     // Round up to the next block
10306     VkDeviceSize sizeForNextList = allocSize;
10307     VkDeviceSize smallSizeStep = VkDeviceSize(SMALL_BUFFER_SIZE / (IsVirtual() ? 1 << SECOND_LEVEL_INDEX : 4));
10308     if (allocSize > SMALL_BUFFER_SIZE)
10309     {
10310         sizeForNextList += (1ULL << (VMA_BITSCAN_MSB(allocSize) - SECOND_LEVEL_INDEX));
10311     }
10312     else if (allocSize > SMALL_BUFFER_SIZE - smallSizeStep)
10313         sizeForNextList = SMALL_BUFFER_SIZE + 1;
10314     else
10315         sizeForNextList += smallSizeStep;
10316 
10317     uint32_t nextListIndex = m_ListsCount;
10318     uint32_t prevListIndex = m_ListsCount;
10319     Block* nextListBlock = VMA_NULL;
10320     Block* prevListBlock = VMA_NULL;
10321 
10322     // Check blocks according to strategies
10323     if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT)
10324     {
10325         // Quick check for larger block first
10326         nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex);
10327         if (nextListBlock != VMA_NULL && CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
10328             return true;
10329 
10330         // If not fitted then null block
10331         if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest))
10332             return true;
10333 
10334         // Null block failed, search larger bucket
10335         while (nextListBlock)
10336         {
10337             if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
10338                 return true;
10339             nextListBlock = nextListBlock->NextFree();
10340         }
10341 
10342         // Failed again, check best fit bucket
10343         prevListBlock = FindFreeBlock(allocSize, prevListIndex);
10344         while (prevListBlock)
10345         {
10346             if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
10347                 return true;
10348             prevListBlock = prevListBlock->NextFree();
10349         }
10350     }
10351     else if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT)
10352     {
10353         // Check best fit bucket
10354         prevListBlock = FindFreeBlock(allocSize, prevListIndex);
10355         while (prevListBlock)
10356         {
10357             if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
10358                 return true;
10359             prevListBlock = prevListBlock->NextFree();
10360         }
10361 
10362         // If failed check null block
10363         if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest))
10364             return true;
10365 
10366         // Check larger bucket
10367         nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex);
10368         while (nextListBlock)
10369         {
10370             if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
10371                 return true;
10372             nextListBlock = nextListBlock->NextFree();
10373         }
10374     }
10375     else if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT )
10376     {
10377         // Perform search from the start
10378         VmaStlAllocator<Block*> allocator(GetAllocationCallbacks());
10379         VmaVector<Block*, VmaStlAllocator<Block*>> blockList(m_BlocksFreeCount, allocator);
10380 
10381         size_t i = m_BlocksFreeCount;
10382         for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical)
10383         {
10384             if (block->IsFree() && block->size >= allocSize)
10385                 blockList[--i] = block;
10386         }
10387 
10388         for (; i < m_BlocksFreeCount; ++i)
10389         {
10390             Block& block = *blockList[i];
10391             if (CheckBlock(block, GetListIndex(block.size), allocSize, allocAlignment, allocType, pAllocationRequest))
10392                 return true;
10393         }
10394 
10395         // If failed check null block
10396         if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest))
10397             return true;
10398 
10399         // Whole range searched, no more memory
10400         return false;
10401     }
10402     else
10403     {
10404         // Check larger bucket
10405         nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex);
10406         while (nextListBlock)
10407         {
10408             if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
10409                 return true;
10410             nextListBlock = nextListBlock->NextFree();
10411         }
10412 
10413         // If failed check null block
10414         if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest))
10415             return true;
10416 
10417         // Check best fit bucket
10418         prevListBlock = FindFreeBlock(allocSize, prevListIndex);
10419         while (prevListBlock)
10420         {
10421             if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
10422                 return true;
10423             prevListBlock = prevListBlock->NextFree();
10424         }
10425     }
10426 
10427     // Worst case, full search has to be done
10428     while (++nextListIndex < m_ListsCount)
10429     {
10430         nextListBlock = m_FreeList[nextListIndex];
10431         while (nextListBlock)
10432         {
10433             if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
10434                 return true;
10435             nextListBlock = nextListBlock->NextFree();
10436         }
10437     }
10438 
10439     // No more memory sadly
10440     return false;
10441 }
10442 
CheckCorruption(const void * pBlockData)10443 VkResult VmaBlockMetadata_TLSF::CheckCorruption(const void* pBlockData)
10444 {
10445     for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical)
10446     {
10447         if (!block->IsFree())
10448         {
10449             if (!VmaValidateMagicValue(pBlockData, block->offset + block->size))
10450             {
10451                 VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10452                 return VK_ERROR_UNKNOWN_COPY;
10453             }
10454         }
10455     }
10456 
10457     return VK_SUCCESS;
10458 }
10459 
Alloc(const VmaAllocationRequest & request,VmaSuballocationType type,void * userData)10460 void VmaBlockMetadata_TLSF::Alloc(
10461     const VmaAllocationRequest& request,
10462     VmaSuballocationType type,
10463     void* userData)
10464 {
10465     VMA_ASSERT(request.type == VmaAllocationRequestType::TLSF);
10466 
10467     // Get block and pop it from the free list
10468     Block* currentBlock = (Block*)request.allocHandle;
10469     VkDeviceSize offset = request.algorithmData;
10470     VMA_ASSERT(currentBlock != VMA_NULL);
10471     VMA_ASSERT(currentBlock->offset <= offset);
10472 
10473     if (currentBlock != m_NullBlock)
10474         RemoveFreeBlock(currentBlock);
10475 
10476     VkDeviceSize debugMargin = GetDebugMargin();
10477     VkDeviceSize misssingAlignment = offset - currentBlock->offset;
10478 
10479     // Append missing alignment to prev block or create new one
10480     if (misssingAlignment)
10481     {
10482         Block* prevBlock = currentBlock->prevPhysical;
10483         VMA_ASSERT(prevBlock != VMA_NULL && "There should be no missing alignment at offset 0!");
10484 
10485         if (prevBlock->IsFree() && prevBlock->size != debugMargin)
10486         {
10487             uint32_t oldList = GetListIndex(prevBlock->size);
10488             prevBlock->size += misssingAlignment;
10489             // Check if new size crosses list bucket
10490             if (oldList != GetListIndex(prevBlock->size))
10491             {
10492                 prevBlock->size -= misssingAlignment;
10493                 RemoveFreeBlock(prevBlock);
10494                 prevBlock->size += misssingAlignment;
10495                 InsertFreeBlock(prevBlock);
10496             }
10497             else
10498                 m_BlocksFreeSize += misssingAlignment;
10499         }
10500         else
10501         {
10502             Block* newBlock = m_BlockAllocator.Alloc();
10503             currentBlock->prevPhysical = newBlock;
10504             prevBlock->nextPhysical = newBlock;
10505             newBlock->prevPhysical = prevBlock;
10506             newBlock->nextPhysical = currentBlock;
10507             newBlock->size = misssingAlignment;
10508             newBlock->offset = currentBlock->offset;
10509             newBlock->MarkTaken();
10510 
10511             InsertFreeBlock(newBlock);
10512         }
10513 
10514         currentBlock->size -= misssingAlignment;
10515         currentBlock->offset += misssingAlignment;
10516     }
10517 
10518     VkDeviceSize size = request.size + debugMargin;
10519     if (currentBlock->size == size)
10520     {
10521         if (currentBlock == m_NullBlock)
10522         {
10523             // Setup new null block
10524             m_NullBlock = m_BlockAllocator.Alloc();
10525             m_NullBlock->size = 0;
10526             m_NullBlock->offset = currentBlock->offset + size;
10527             m_NullBlock->prevPhysical = currentBlock;
10528             m_NullBlock->nextPhysical = VMA_NULL;
10529             m_NullBlock->MarkFree();
10530             m_NullBlock->PrevFree() = VMA_NULL;
10531             m_NullBlock->NextFree() = VMA_NULL;
10532             currentBlock->nextPhysical = m_NullBlock;
10533             currentBlock->MarkTaken();
10534         }
10535     }
10536     else
10537     {
10538         VMA_ASSERT(currentBlock->size > size && "Proper block already found, shouldn't find smaller one!");
10539 
10540         // Create new free block
10541         Block* newBlock = m_BlockAllocator.Alloc();
10542         newBlock->size = currentBlock->size - size;
10543         newBlock->offset = currentBlock->offset + size;
10544         newBlock->prevPhysical = currentBlock;
10545         newBlock->nextPhysical = currentBlock->nextPhysical;
10546         currentBlock->nextPhysical = newBlock;
10547         currentBlock->size = size;
10548 
10549         if (currentBlock == m_NullBlock)
10550         {
10551             m_NullBlock = newBlock;
10552             m_NullBlock->MarkFree();
10553             m_NullBlock->NextFree() = VMA_NULL;
10554             m_NullBlock->PrevFree() = VMA_NULL;
10555             currentBlock->MarkTaken();
10556         }
10557         else
10558         {
10559             newBlock->nextPhysical->prevPhysical = newBlock;
10560             newBlock->MarkTaken();
10561             InsertFreeBlock(newBlock);
10562         }
10563     }
10564     currentBlock->UserData() = userData;
10565 
10566     if (debugMargin > 0)
10567     {
10568         currentBlock->size -= debugMargin;
10569         Block* newBlock = m_BlockAllocator.Alloc();
10570         newBlock->size = debugMargin;
10571         newBlock->offset = currentBlock->offset + currentBlock->size;
10572         newBlock->prevPhysical = currentBlock;
10573         newBlock->nextPhysical = currentBlock->nextPhysical;
10574         newBlock->MarkTaken();
10575         currentBlock->nextPhysical->prevPhysical = newBlock;
10576         currentBlock->nextPhysical = newBlock;
10577         InsertFreeBlock(newBlock);
10578     }
10579 
10580     if (!IsVirtual())
10581         m_GranularityHandler.AllocPages((uint8_t)(uintptr_t)request.customData,
10582             currentBlock->offset, currentBlock->size);
10583     ++m_AllocCount;
10584 }
10585 
Free(VmaAllocHandle allocHandle)10586 void VmaBlockMetadata_TLSF::Free(VmaAllocHandle allocHandle)
10587 {
10588     Block* block = (Block*)allocHandle;
10589     Block* next = block->nextPhysical;
10590     VMA_ASSERT(!block->IsFree() && "Block is already free!");
10591 
10592     if (!IsVirtual())
10593         m_GranularityHandler.FreePages(block->offset, block->size);
10594     --m_AllocCount;
10595 
10596     VkDeviceSize debugMargin = GetDebugMargin();
10597     if (debugMargin > 0)
10598     {
10599         RemoveFreeBlock(next);
10600         MergeBlock(next, block);
10601         block = next;
10602         next = next->nextPhysical;
10603     }
10604 
10605     // Try merging
10606     Block* prev = block->prevPhysical;
10607     if (prev != VMA_NULL && prev->IsFree() && prev->size != debugMargin)
10608     {
10609         RemoveFreeBlock(prev);
10610         MergeBlock(block, prev);
10611     }
10612 
10613     if (!next->IsFree())
10614         InsertFreeBlock(block);
10615     else if (next == m_NullBlock)
10616         MergeBlock(m_NullBlock, block);
10617     else
10618     {
10619         RemoveFreeBlock(next);
10620         MergeBlock(next, block);
10621         InsertFreeBlock(next);
10622     }
10623 }
10624 
GetAllocationInfo(VmaAllocHandle allocHandle,VmaVirtualAllocationInfo & outInfo)10625 void VmaBlockMetadata_TLSF::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo)
10626 {
10627     Block* block = (Block*)allocHandle;
10628     VMA_ASSERT(!block->IsFree() && "Cannot get allocation info for free block!");
10629     outInfo.offset = block->offset;
10630     outInfo.size = block->size;
10631     outInfo.pUserData = block->UserData();
10632 }
10633 
GetAllocationUserData(VmaAllocHandle allocHandle)10634 void* VmaBlockMetadata_TLSF::GetAllocationUserData(VmaAllocHandle allocHandle) const
10635 {
10636     Block* block = (Block*)allocHandle;
10637     VMA_ASSERT(!block->IsFree() && "Cannot get user data for free block!");
10638     return block->UserData();
10639 }
10640 
GetAllocationListBegin()10641 VmaAllocHandle VmaBlockMetadata_TLSF::GetAllocationListBegin() const
10642 {
10643     if (m_AllocCount == 0)
10644         return VK_NULL_HANDLE;
10645 
10646     for (Block* block = m_NullBlock->prevPhysical; block; block = block->prevPhysical)
10647     {
10648         if (!block->IsFree())
10649             return (VmaAllocHandle)block;
10650     }
10651     VMA_ASSERT(false && "If m_AllocCount > 0 then should find any allocation!");
10652     return VK_NULL_HANDLE;
10653 }
10654 
GetNextAllocation(VmaAllocHandle prevAlloc)10655 VmaAllocHandle VmaBlockMetadata_TLSF::GetNextAllocation(VmaAllocHandle prevAlloc) const
10656 {
10657     Block* startBlock = (Block*)prevAlloc;
10658     VMA_ASSERT(!startBlock->IsFree() && "Incorrect block!");
10659 
10660     for (Block* block = startBlock->prevPhysical; block; block = block->prevPhysical)
10661     {
10662         if (!block->IsFree())
10663             return (VmaAllocHandle)block;
10664     }
10665     return VK_NULL_HANDLE;
10666 }
10667 
GetNextFreeRegionSize(VmaAllocHandle alloc)10668 VkDeviceSize VmaBlockMetadata_TLSF::GetNextFreeRegionSize(VmaAllocHandle alloc) const
10669 {
10670     Block* block = (Block*)alloc;
10671     VMA_ASSERT(!block->IsFree() && "Incorrect block!");
10672 
10673     if (block->prevPhysical)
10674         return block->prevPhysical->IsFree() ? block->prevPhysical->size : 0;
10675     return 0;
10676 }
10677 
Clear()10678 void VmaBlockMetadata_TLSF::Clear()
10679 {
10680     m_AllocCount = 0;
10681     m_BlocksFreeCount = 0;
10682     m_BlocksFreeSize = 0;
10683     m_IsFreeBitmap = 0;
10684     m_NullBlock->offset = 0;
10685     m_NullBlock->size = GetSize();
10686     Block* block = m_NullBlock->prevPhysical;
10687     m_NullBlock->prevPhysical = VMA_NULL;
10688     while (block)
10689     {
10690         Block* prev = block->prevPhysical;
10691         m_BlockAllocator.Free(block);
10692         block = prev;
10693     }
10694     memset(m_FreeList, 0, m_ListsCount * sizeof(Block*));
10695     memset(m_InnerIsFreeBitmap, 0, m_MemoryClasses * sizeof(uint32_t));
10696     m_GranularityHandler.Clear();
10697 }
10698 
SetAllocationUserData(VmaAllocHandle allocHandle,void * userData)10699 void VmaBlockMetadata_TLSF::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData)
10700 {
10701     Block* block = (Block*)allocHandle;
10702     VMA_ASSERT(!block->IsFree() && "Trying to set user data for not allocated block!");
10703     block->UserData() = userData;
10704 }
10705 
DebugLogAllAllocations()10706 void VmaBlockMetadata_TLSF::DebugLogAllAllocations() const
10707 {
10708     for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical)
10709         if (!block->IsFree())
10710             DebugLogAllocation(block->offset, block->size, block->UserData());
10711 }
10712 
SizeToMemoryClass(VkDeviceSize size)10713 uint8_t VmaBlockMetadata_TLSF::SizeToMemoryClass(VkDeviceSize size) const
10714 {
10715     if (size > SMALL_BUFFER_SIZE)
10716         return uint8_t(VMA_BITSCAN_MSB(size) - MEMORY_CLASS_SHIFT);
10717     return 0;
10718 }
10719 
SizeToSecondIndex(VkDeviceSize size,uint8_t memoryClass)10720 uint16_t VmaBlockMetadata_TLSF::SizeToSecondIndex(VkDeviceSize size, uint8_t memoryClass) const
10721 {
10722     if (memoryClass == 0)
10723     {
10724         if (IsVirtual())
10725             return static_cast<uint16_t>((size - 1) / 8);
10726         else
10727             return static_cast<uint16_t>((size - 1) / 64);
10728     }
10729     return static_cast<uint16_t>((size >> (memoryClass + MEMORY_CLASS_SHIFT - SECOND_LEVEL_INDEX)) ^ (1U << SECOND_LEVEL_INDEX));
10730 }
10731 
GetListIndex(uint8_t memoryClass,uint16_t secondIndex)10732 uint32_t VmaBlockMetadata_TLSF::GetListIndex(uint8_t memoryClass, uint16_t secondIndex) const
10733 {
10734     if (memoryClass == 0)
10735         return secondIndex;
10736 
10737     const uint32_t index = static_cast<uint32_t>(memoryClass - 1) * (1 << SECOND_LEVEL_INDEX) + secondIndex;
10738     if (IsVirtual())
10739         return index + (1 << SECOND_LEVEL_INDEX);
10740     else
10741         return index + 4;
10742 }
10743 
GetListIndex(VkDeviceSize size)10744 uint32_t VmaBlockMetadata_TLSF::GetListIndex(VkDeviceSize size) const
10745 {
10746     uint8_t memoryClass = SizeToMemoryClass(size);
10747     return GetListIndex(memoryClass, SizeToSecondIndex(size, memoryClass));
10748 }
10749 
RemoveFreeBlock(Block * block)10750 void VmaBlockMetadata_TLSF::RemoveFreeBlock(Block* block)
10751 {
10752     VMA_ASSERT(block != m_NullBlock);
10753     VMA_ASSERT(block->IsFree());
10754 
10755     if (block->NextFree() != VMA_NULL)
10756         block->NextFree()->PrevFree() = block->PrevFree();
10757     if (block->PrevFree() != VMA_NULL)
10758         block->PrevFree()->NextFree() = block->NextFree();
10759     else
10760     {
10761         uint8_t memClass = SizeToMemoryClass(block->size);
10762         uint16_t secondIndex = SizeToSecondIndex(block->size, memClass);
10763         uint32_t index = GetListIndex(memClass, secondIndex);
10764         VMA_ASSERT(m_FreeList[index] == block);
10765         m_FreeList[index] = block->NextFree();
10766         if (block->NextFree() == VMA_NULL)
10767         {
10768             m_InnerIsFreeBitmap[memClass] &= ~(1U << secondIndex);
10769             if (m_InnerIsFreeBitmap[memClass] == 0)
10770                 m_IsFreeBitmap &= ~(1UL << memClass);
10771         }
10772     }
10773     block->MarkTaken();
10774     block->UserData() = VMA_NULL;
10775     --m_BlocksFreeCount;
10776     m_BlocksFreeSize -= block->size;
10777 }
10778 
InsertFreeBlock(Block * block)10779 void VmaBlockMetadata_TLSF::InsertFreeBlock(Block* block)
10780 {
10781     VMA_ASSERT(block != m_NullBlock);
10782     VMA_ASSERT(!block->IsFree() && "Cannot insert block twice!");
10783 
10784     uint8_t memClass = SizeToMemoryClass(block->size);
10785     uint16_t secondIndex = SizeToSecondIndex(block->size, memClass);
10786     uint32_t index = GetListIndex(memClass, secondIndex);
10787     VMA_ASSERT(index < m_ListsCount);
10788     block->PrevFree() = VMA_NULL;
10789     block->NextFree() = m_FreeList[index];
10790     m_FreeList[index] = block;
10791     if (block->NextFree() != VMA_NULL)
10792         block->NextFree()->PrevFree() = block;
10793     else
10794     {
10795         m_InnerIsFreeBitmap[memClass] |= 1U << secondIndex;
10796         m_IsFreeBitmap |= 1UL << memClass;
10797     }
10798     ++m_BlocksFreeCount;
10799     m_BlocksFreeSize += block->size;
10800 }
10801 
MergeBlock(Block * block,Block * prev)10802 void VmaBlockMetadata_TLSF::MergeBlock(Block* block, Block* prev)
10803 {
10804     VMA_ASSERT(block->prevPhysical == prev && "Cannot merge separate physical regions!");
10805     VMA_ASSERT(!prev->IsFree() && "Cannot merge block that belongs to free list!");
10806 
10807     block->offset = prev->offset;
10808     block->size += prev->size;
10809     block->prevPhysical = prev->prevPhysical;
10810     if (block->prevPhysical)
10811         block->prevPhysical->nextPhysical = block;
10812     m_BlockAllocator.Free(prev);
10813 }
10814 
FindFreeBlock(VkDeviceSize size,uint32_t & listIndex)10815 VmaBlockMetadata_TLSF::Block* VmaBlockMetadata_TLSF::FindFreeBlock(VkDeviceSize size, uint32_t& listIndex) const
10816 {
10817     uint8_t memoryClass = SizeToMemoryClass(size);
10818     uint32_t innerFreeMap = m_InnerIsFreeBitmap[memoryClass] & (~0U << SizeToSecondIndex(size, memoryClass));
10819     if (!innerFreeMap)
10820     {
10821         // Check higher levels for available blocks
10822         uint32_t freeMap = m_IsFreeBitmap & (~0UL << (memoryClass + 1));
10823         if (!freeMap)
10824             return VMA_NULL; // No more memory available
10825 
10826         // Find lowest free region
10827         memoryClass = VMA_BITSCAN_LSB(freeMap);
10828         innerFreeMap = m_InnerIsFreeBitmap[memoryClass];
10829         VMA_ASSERT(innerFreeMap != 0);
10830     }
10831     // Find lowest free subregion
10832     listIndex = GetListIndex(memoryClass, VMA_BITSCAN_LSB(innerFreeMap));
10833     VMA_ASSERT(m_FreeList[listIndex]);
10834     return m_FreeList[listIndex];
10835 }
10836 
CheckBlock(Block & block,uint32_t listIndex,VkDeviceSize allocSize,VkDeviceSize allocAlignment,VmaSuballocationType allocType,VmaAllocationRequest * pAllocationRequest)10837 bool VmaBlockMetadata_TLSF::CheckBlock(
10838     Block& block,
10839     uint32_t listIndex,
10840     VkDeviceSize allocSize,
10841     VkDeviceSize allocAlignment,
10842     VmaSuballocationType allocType,
10843     VmaAllocationRequest* pAllocationRequest)
10844 {
10845     VMA_ASSERT(block.IsFree() && "Block is already taken!");
10846 
10847     VkDeviceSize alignedOffset = VmaAlignUp(block.offset, allocAlignment);
10848     if (block.size < allocSize + alignedOffset - block.offset)
10849         return false;
10850 
10851     // Check for granularity conflicts
10852     if (!IsVirtual() &&
10853         m_GranularityHandler.CheckConflictAndAlignUp(alignedOffset, allocSize, block.offset, block.size, allocType))
10854         return false;
10855 
10856     // Alloc successful
10857     pAllocationRequest->type = VmaAllocationRequestType::TLSF;
10858     pAllocationRequest->allocHandle = (VmaAllocHandle)&block;
10859     pAllocationRequest->size = allocSize - GetDebugMargin();
10860     pAllocationRequest->customData = (void*)allocType;
10861     pAllocationRequest->algorithmData = alignedOffset;
10862 
10863     // Place block at the start of list if it's normal block
10864     if (listIndex != m_ListsCount && block.PrevFree())
10865     {
10866         block.PrevFree()->NextFree() = block.NextFree();
10867         if (block.NextFree())
10868             block.NextFree()->PrevFree() = block.PrevFree();
10869         block.PrevFree() = VMA_NULL;
10870         block.NextFree() = m_FreeList[listIndex];
10871         m_FreeList[listIndex] = &block;
10872         if (block.NextFree())
10873             block.NextFree()->PrevFree() = &block;
10874     }
10875 
10876     return true;
10877 }
10878 #endif // _VMA_BLOCK_METADATA_TLSF_FUNCTIONS
10879 #endif // _VMA_BLOCK_METADATA_TLSF
10880 
10881 #ifndef _VMA_BLOCK_VECTOR
10882 /*
10883 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
10884 Vulkan memory type.
10885 
10886 Synchronized internally with a mutex.
10887 */
10888 class VmaBlockVector
10889 {
10890     friend struct VmaDefragmentationContext_T;
10891     VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockVector)
10892 public:
10893     VmaBlockVector(
10894         VmaAllocator hAllocator,
10895         VmaPool hParentPool,
10896         uint32_t memoryTypeIndex,
10897         VkDeviceSize preferredBlockSize,
10898         size_t minBlockCount,
10899         size_t maxBlockCount,
10900         VkDeviceSize bufferImageGranularity,
10901         bool explicitBlockSize,
10902         uint32_t algorithm,
10903         float priority,
10904         VkDeviceSize minAllocationAlignment,
10905         void* pMemoryAllocateNext);
10906     ~VmaBlockVector();
10907 
GetAllocator()10908     VmaAllocator GetAllocator() const { return m_hAllocator; }
GetParentPool()10909     VmaPool GetParentPool() const { return m_hParentPool; }
IsCustomPool()10910     bool IsCustomPool() const { return m_hParentPool != VMA_NULL; }
GetMemoryTypeIndex()10911     uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
GetPreferredBlockSize()10912     VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
GetBufferImageGranularity()10913     VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
GetAlgorithm()10914     uint32_t GetAlgorithm() const { return m_Algorithm; }
HasExplicitBlockSize()10915     bool HasExplicitBlockSize() const { return m_ExplicitBlockSize; }
GetPriority()10916     float GetPriority() const { return m_Priority; }
GetAllocationNextPtr()10917     const void* GetAllocationNextPtr() const { return m_pMemoryAllocateNext; }
10918     // To be used only while the m_Mutex is locked. Used during defragmentation.
GetBlockCount()10919     size_t GetBlockCount() const { return m_Blocks.size(); }
10920     // To be used only while the m_Mutex is locked. Used during defragmentation.
GetBlock(size_t index)10921     VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
GetMutex()10922     VMA_RW_MUTEX &GetMutex() { return m_Mutex; }
10923 
10924     VkResult CreateMinBlocks();
10925     void AddStatistics(VmaStatistics& inoutStats);
10926     void AddDetailedStatistics(VmaDetailedStatistics& inoutStats);
10927     bool IsEmpty();
10928     bool IsCorruptionDetectionEnabled() const;
10929 
10930     VkResult Allocate(
10931         VkDeviceSize size,
10932         VkDeviceSize alignment,
10933         const VmaAllocationCreateInfo& createInfo,
10934         VmaSuballocationType suballocType,
10935         size_t allocationCount,
10936         VmaAllocation* pAllocations);
10937 
10938     void Free(const VmaAllocation hAllocation);
10939 
10940 #if VMA_STATS_STRING_ENABLED
10941     void PrintDetailedMap(class VmaJsonWriter& json);
10942 #endif
10943 
10944     VkResult CheckCorruption();
10945 
10946 private:
10947     const VmaAllocator m_hAllocator;
10948     const VmaPool m_hParentPool;
10949     const uint32_t m_MemoryTypeIndex;
10950     const VkDeviceSize m_PreferredBlockSize;
10951     const size_t m_MinBlockCount;
10952     const size_t m_MaxBlockCount;
10953     const VkDeviceSize m_BufferImageGranularity;
10954     const bool m_ExplicitBlockSize;
10955     const uint32_t m_Algorithm;
10956     const float m_Priority;
10957     const VkDeviceSize m_MinAllocationAlignment;
10958 
10959     void* const m_pMemoryAllocateNext;
10960     VMA_RW_MUTEX m_Mutex;
10961     // Incrementally sorted by sumFreeSize, ascending.
10962     VmaVector<VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*>> m_Blocks;
10963     uint32_t m_NextBlockId;
10964     bool m_IncrementalSort = true;
10965 
SetIncrementalSort(bool val)10966     void SetIncrementalSort(bool val) { m_IncrementalSort = val; }
10967 
10968     VkDeviceSize CalcMaxBlockSize() const;
10969     // Finds and removes given block from vector.
10970     void Remove(VmaDeviceMemoryBlock* pBlock);
10971     // Performs single step in sorting m_Blocks. They may not be fully sorted
10972     // after this call.
10973     void IncrementallySortBlocks();
10974     void SortByFreeSize();
10975 
10976     VkResult AllocatePage(
10977         VkDeviceSize size,
10978         VkDeviceSize alignment,
10979         const VmaAllocationCreateInfo& createInfo,
10980         VmaSuballocationType suballocType,
10981         VmaAllocation* pAllocation);
10982 
10983     VkResult AllocateFromBlock(
10984         VmaDeviceMemoryBlock* pBlock,
10985         VkDeviceSize size,
10986         VkDeviceSize alignment,
10987         VmaAllocationCreateFlags allocFlags,
10988         void* pUserData,
10989         VmaSuballocationType suballocType,
10990         uint32_t strategy,
10991         VmaAllocation* pAllocation);
10992 
10993     VkResult CommitAllocationRequest(
10994         VmaAllocationRequest& allocRequest,
10995         VmaDeviceMemoryBlock* pBlock,
10996         VkDeviceSize alignment,
10997         VmaAllocationCreateFlags allocFlags,
10998         void* pUserData,
10999         VmaSuballocationType suballocType,
11000         VmaAllocation* pAllocation);
11001 
11002     VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
11003     bool HasEmptyBlock();
11004 };
11005 #endif // _VMA_BLOCK_VECTOR
11006 
11007 #ifndef _VMA_DEFRAGMENTATION_CONTEXT
11008 struct VmaDefragmentationContext_T
11009 {
11010     VMA_CLASS_NO_COPY_NO_MOVE(VmaDefragmentationContext_T)
11011 public:
11012     VmaDefragmentationContext_T(
11013         VmaAllocator hAllocator,
11014         const VmaDefragmentationInfo& info);
11015     ~VmaDefragmentationContext_T();
11016 
GetStatsVmaDefragmentationContext_T11017     void GetStats(VmaDefragmentationStats& outStats) { outStats = m_GlobalStats; }
11018 
11019     VkResult DefragmentPassBegin(VmaDefragmentationPassMoveInfo& moveInfo);
11020     VkResult DefragmentPassEnd(VmaDefragmentationPassMoveInfo& moveInfo);
11021 
11022 private:
11023     // Max number of allocations to ignore due to size constraints before ending single pass
11024     static const uint8_t MAX_ALLOCS_TO_IGNORE = 16;
11025     enum class CounterStatus { Pass, Ignore, End };
11026 
11027     struct FragmentedBlock
11028     {
11029         uint32_t data;
11030         VmaDeviceMemoryBlock* block;
11031     };
11032     struct StateBalanced
11033     {
11034         VkDeviceSize avgFreeSize = 0;
11035         VkDeviceSize avgAllocSize = UINT64_MAX;
11036     };
11037     struct StateExtensive
11038     {
11039         enum class Operation : uint8_t
11040         {
11041             FindFreeBlockBuffer, FindFreeBlockTexture, FindFreeBlockAll,
11042             MoveBuffers, MoveTextures, MoveAll,
11043             Cleanup, Done
11044         };
11045 
11046         Operation operation = Operation::FindFreeBlockTexture;
11047         size_t firstFreeBlock = SIZE_MAX;
11048     };
11049     struct MoveAllocationData
11050     {
11051         VkDeviceSize size;
11052         VkDeviceSize alignment;
11053         VmaSuballocationType type;
11054         VmaAllocationCreateFlags flags;
11055         VmaDefragmentationMove move = {};
11056     };
11057 
11058     const VkDeviceSize m_MaxPassBytes;
11059     const uint32_t m_MaxPassAllocations;
11060     const PFN_vmaCheckDefragmentationBreakFunction m_BreakCallback;
11061     void* m_BreakCallbackUserData;
11062 
11063     VmaStlAllocator<VmaDefragmentationMove> m_MoveAllocator;
11064     VmaVector<VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove>> m_Moves;
11065 
11066     uint8_t m_IgnoredAllocs = 0;
11067     uint32_t m_Algorithm;
11068     uint32_t m_BlockVectorCount;
11069     VmaBlockVector* m_PoolBlockVector;
11070     VmaBlockVector** m_pBlockVectors;
11071     size_t m_ImmovableBlockCount = 0;
11072     VmaDefragmentationStats m_GlobalStats = { 0 };
11073     VmaDefragmentationStats m_PassStats = { 0 };
11074     void* m_AlgorithmState = VMA_NULL;
11075 
11076     static MoveAllocationData GetMoveData(VmaAllocHandle handle, VmaBlockMetadata* metadata);
11077     CounterStatus CheckCounters(VkDeviceSize bytes);
11078     bool IncrementCounters(VkDeviceSize bytes);
11079     bool ReallocWithinBlock(VmaBlockVector& vector, VmaDeviceMemoryBlock* block);
11080     bool AllocInOtherBlock(size_t start, size_t end, MoveAllocationData& data, VmaBlockVector& vector);
11081 
11082     bool ComputeDefragmentation(VmaBlockVector& vector, size_t index);
11083     bool ComputeDefragmentation_Fast(VmaBlockVector& vector);
11084     bool ComputeDefragmentation_Balanced(VmaBlockVector& vector, size_t index, bool update);
11085     bool ComputeDefragmentation_Full(VmaBlockVector& vector);
11086     bool ComputeDefragmentation_Extensive(VmaBlockVector& vector, size_t index);
11087 
11088     void UpdateVectorStatistics(VmaBlockVector& vector, StateBalanced& state);
11089     bool MoveDataToFreeBlocks(VmaSuballocationType currentType,
11090         VmaBlockVector& vector, size_t firstFreeBlock,
11091         bool& texturePresent, bool& bufferPresent, bool& otherPresent);
11092 };
11093 #endif // _VMA_DEFRAGMENTATION_CONTEXT
11094 
11095 #ifndef _VMA_POOL_T
11096 struct VmaPool_T
11097 {
11098     friend struct VmaPoolListItemTraits;
11099     VMA_CLASS_NO_COPY_NO_MOVE(VmaPool_T)
11100 public:
11101     VmaBlockVector m_BlockVector;
11102     VmaDedicatedAllocationList m_DedicatedAllocations;
11103 
11104     VmaPool_T(
11105         VmaAllocator hAllocator,
11106         const VmaPoolCreateInfo& createInfo,
11107         VkDeviceSize preferredBlockSize);
11108     ~VmaPool_T();
11109 
GetIdVmaPool_T11110     uint32_t GetId() const { return m_Id; }
SetIdVmaPool_T11111     void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
11112 
GetNameVmaPool_T11113     const char* GetName() const { return m_Name; }
11114     void SetName(const char* pName);
11115 
11116 #if VMA_STATS_STRING_ENABLED
11117     //void PrintDetailedMap(class VmaStringBuilder& sb);
11118 #endif
11119 
11120 private:
11121     uint32_t m_Id;
11122     char* m_Name;
11123     VmaPool_T* m_PrevPool = VMA_NULL;
11124     VmaPool_T* m_NextPool = VMA_NULL;
11125 };
11126 
11127 struct VmaPoolListItemTraits
11128 {
11129     typedef VmaPool_T ItemType;
11130 
GetPrevVmaPoolListItemTraits11131     static ItemType* GetPrev(const ItemType* item) { return item->m_PrevPool; }
GetNextVmaPoolListItemTraits11132     static ItemType* GetNext(const ItemType* item) { return item->m_NextPool; }
AccessPrevVmaPoolListItemTraits11133     static ItemType*& AccessPrev(ItemType* item) { return item->m_PrevPool; }
AccessNextVmaPoolListItemTraits11134     static ItemType*& AccessNext(ItemType* item) { return item->m_NextPool; }
11135 };
11136 #endif // _VMA_POOL_T
11137 
11138 #ifndef _VMA_CURRENT_BUDGET_DATA
11139 struct VmaCurrentBudgetData
11140 {
11141     VMA_CLASS_NO_COPY_NO_MOVE(VmaCurrentBudgetData)
11142 public:
11143 
11144     VMA_ATOMIC_UINT32 m_BlockCount[VK_MAX_MEMORY_HEAPS];
11145     VMA_ATOMIC_UINT32 m_AllocationCount[VK_MAX_MEMORY_HEAPS];
11146     VMA_ATOMIC_UINT64 m_BlockBytes[VK_MAX_MEMORY_HEAPS];
11147     VMA_ATOMIC_UINT64 m_AllocationBytes[VK_MAX_MEMORY_HEAPS];
11148 
11149 #if VMA_MEMORY_BUDGET
11150     VMA_ATOMIC_UINT32 m_OperationsSinceBudgetFetch;
11151     VMA_RW_MUTEX m_BudgetMutex;
11152     uint64_t m_VulkanUsage[VK_MAX_MEMORY_HEAPS];
11153     uint64_t m_VulkanBudget[VK_MAX_MEMORY_HEAPS];
11154     uint64_t m_BlockBytesAtBudgetFetch[VK_MAX_MEMORY_HEAPS];
11155 #endif // VMA_MEMORY_BUDGET
11156 
11157     VmaCurrentBudgetData();
11158 
11159     void AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize);
11160     void RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize);
11161 };
11162 
11163 #ifndef _VMA_CURRENT_BUDGET_DATA_FUNCTIONS
VmaCurrentBudgetData()11164 VmaCurrentBudgetData::VmaCurrentBudgetData()
11165 {
11166     for (uint32_t heapIndex = 0; heapIndex < VK_MAX_MEMORY_HEAPS; ++heapIndex)
11167     {
11168         m_BlockCount[heapIndex] = 0;
11169         m_AllocationCount[heapIndex] = 0;
11170         m_BlockBytes[heapIndex] = 0;
11171         m_AllocationBytes[heapIndex] = 0;
11172 #if VMA_MEMORY_BUDGET
11173         m_VulkanUsage[heapIndex] = 0;
11174         m_VulkanBudget[heapIndex] = 0;
11175         m_BlockBytesAtBudgetFetch[heapIndex] = 0;
11176 #endif
11177     }
11178 
11179 #if VMA_MEMORY_BUDGET
11180     m_OperationsSinceBudgetFetch = 0;
11181 #endif
11182 }
11183 
AddAllocation(uint32_t heapIndex,VkDeviceSize allocationSize)11184 void VmaCurrentBudgetData::AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
11185 {
11186     m_AllocationBytes[heapIndex] += allocationSize;
11187     ++m_AllocationCount[heapIndex];
11188 #if VMA_MEMORY_BUDGET
11189     ++m_OperationsSinceBudgetFetch;
11190 #endif
11191 }
11192 
RemoveAllocation(uint32_t heapIndex,VkDeviceSize allocationSize)11193 void VmaCurrentBudgetData::RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
11194 {
11195     VMA_ASSERT(m_AllocationBytes[heapIndex] >= allocationSize);
11196     m_AllocationBytes[heapIndex] -= allocationSize;
11197     VMA_ASSERT(m_AllocationCount[heapIndex] > 0);
11198     --m_AllocationCount[heapIndex];
11199 #if VMA_MEMORY_BUDGET
11200     ++m_OperationsSinceBudgetFetch;
11201 #endif
11202 }
11203 #endif // _VMA_CURRENT_BUDGET_DATA_FUNCTIONS
11204 #endif // _VMA_CURRENT_BUDGET_DATA
11205 
11206 #ifndef _VMA_ALLOCATION_OBJECT_ALLOCATOR
11207 /*
11208 Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.
11209 */
11210 class VmaAllocationObjectAllocator
11211 {
VMA_CLASS_NO_COPY_NO_MOVE(VmaAllocationObjectAllocator)11212     VMA_CLASS_NO_COPY_NO_MOVE(VmaAllocationObjectAllocator)
11213 public:
11214     VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks)
11215         : m_Allocator(pAllocationCallbacks, 1024) {}
11216 
11217     template<typename... Types> VmaAllocation Allocate(Types&&... args);
11218     void Free(VmaAllocation hAlloc);
11219 
11220 private:
11221     VMA_MUTEX m_Mutex;
11222     VmaPoolAllocator<VmaAllocation_T> m_Allocator;
11223 };
11224 
11225 template<typename... Types>
Allocate(Types &&...args)11226 VmaAllocation VmaAllocationObjectAllocator::Allocate(Types&&... args)
11227 {
11228     VmaMutexLock mutexLock(m_Mutex);
11229     return m_Allocator.Alloc<Types...>(std::forward<Types>(args)...);
11230 }
11231 
Free(VmaAllocation hAlloc)11232 void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
11233 {
11234     VmaMutexLock mutexLock(m_Mutex);
11235     m_Allocator.Free(hAlloc);
11236 }
11237 #endif // _VMA_ALLOCATION_OBJECT_ALLOCATOR
11238 
11239 #ifndef _VMA_VIRTUAL_BLOCK_T
11240 struct VmaVirtualBlock_T
11241 {
11242     VMA_CLASS_NO_COPY_NO_MOVE(VmaVirtualBlock_T)
11243 public:
11244     const bool m_AllocationCallbacksSpecified;
11245     const VkAllocationCallbacks m_AllocationCallbacks;
11246 
11247     VmaVirtualBlock_T(const VmaVirtualBlockCreateInfo& createInfo);
11248     ~VmaVirtualBlock_T();
11249 
InitVmaVirtualBlock_T11250     VkResult Init() { return VK_SUCCESS; }
IsEmptyVmaVirtualBlock_T11251     bool IsEmpty() const { return m_Metadata->IsEmpty(); }
FreeVmaVirtualBlock_T11252     void Free(VmaVirtualAllocation allocation) { m_Metadata->Free((VmaAllocHandle)allocation); }
SetAllocationUserDataVmaVirtualBlock_T11253     void SetAllocationUserData(VmaVirtualAllocation allocation, void* userData) { m_Metadata->SetAllocationUserData((VmaAllocHandle)allocation, userData); }
ClearVmaVirtualBlock_T11254     void Clear() { m_Metadata->Clear(); }
11255 
11256     const VkAllocationCallbacks* GetAllocationCallbacks() const;
11257     void GetAllocationInfo(VmaVirtualAllocation allocation, VmaVirtualAllocationInfo& outInfo);
11258     VkResult Allocate(const VmaVirtualAllocationCreateInfo& createInfo, VmaVirtualAllocation& outAllocation,
11259         VkDeviceSize* outOffset);
11260     void GetStatistics(VmaStatistics& outStats) const;
11261     void CalculateDetailedStatistics(VmaDetailedStatistics& outStats) const;
11262 #if VMA_STATS_STRING_ENABLED
11263     void BuildStatsString(bool detailedMap, VmaStringBuilder& sb) const;
11264 #endif
11265 
11266 private:
11267     VmaBlockMetadata* m_Metadata;
11268 };
11269 
11270 #ifndef _VMA_VIRTUAL_BLOCK_T_FUNCTIONS
VmaVirtualBlock_T(const VmaVirtualBlockCreateInfo & createInfo)11271 VmaVirtualBlock_T::VmaVirtualBlock_T(const VmaVirtualBlockCreateInfo& createInfo)
11272     : m_AllocationCallbacksSpecified(createInfo.pAllocationCallbacks != VMA_NULL),
11273     m_AllocationCallbacks(createInfo.pAllocationCallbacks != VMA_NULL ? *createInfo.pAllocationCallbacks : VmaEmptyAllocationCallbacks)
11274 {
11275     const uint32_t algorithm = createInfo.flags & VMA_VIRTUAL_BLOCK_CREATE_ALGORITHM_MASK;
11276     switch (algorithm)
11277     {
11278     case 0:
11279         m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_TLSF)(VK_NULL_HANDLE, 1, true);
11280         break;
11281     case VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT:
11282         m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_Linear)(VK_NULL_HANDLE, 1, true);
11283         break;
11284     default:
11285         VMA_ASSERT(0);
11286         m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_TLSF)(VK_NULL_HANDLE, 1, true);
11287     }
11288 
11289     m_Metadata->Init(createInfo.size);
11290 }
11291 
~VmaVirtualBlock_T()11292 VmaVirtualBlock_T::~VmaVirtualBlock_T()
11293 {
11294     // Define macro VMA_DEBUG_LOG_FORMAT to receive the list of the unfreed allocations
11295     if (!m_Metadata->IsEmpty())
11296         m_Metadata->DebugLogAllAllocations();
11297     // This is the most important assert in the entire library.
11298     // Hitting it means you have some memory leak - unreleased virtual allocations.
11299     VMA_ASSERT(m_Metadata->IsEmpty() && "Some virtual allocations were not freed before destruction of this virtual block!");
11300 
11301     vma_delete(GetAllocationCallbacks(), m_Metadata);
11302 }
11303 
GetAllocationCallbacks()11304 const VkAllocationCallbacks* VmaVirtualBlock_T::GetAllocationCallbacks() const
11305 {
11306     return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : VMA_NULL;
11307 }
11308 
GetAllocationInfo(VmaVirtualAllocation allocation,VmaVirtualAllocationInfo & outInfo)11309 void VmaVirtualBlock_T::GetAllocationInfo(VmaVirtualAllocation allocation, VmaVirtualAllocationInfo& outInfo)
11310 {
11311     m_Metadata->GetAllocationInfo((VmaAllocHandle)allocation, outInfo);
11312 }
11313 
Allocate(const VmaVirtualAllocationCreateInfo & createInfo,VmaVirtualAllocation & outAllocation,VkDeviceSize * outOffset)11314 VkResult VmaVirtualBlock_T::Allocate(const VmaVirtualAllocationCreateInfo& createInfo, VmaVirtualAllocation& outAllocation,
11315     VkDeviceSize* outOffset)
11316 {
11317     VmaAllocationRequest request = {};
11318     if (m_Metadata->CreateAllocationRequest(
11319         createInfo.size, // allocSize
11320         VMA_MAX(createInfo.alignment, (VkDeviceSize)1), // allocAlignment
11321         (createInfo.flags & VMA_VIRTUAL_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0, // upperAddress
11322         VMA_SUBALLOCATION_TYPE_UNKNOWN, // allocType - unimportant
11323         createInfo.flags & VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MASK, // strategy
11324         &request))
11325     {
11326         m_Metadata->Alloc(request,
11327             VMA_SUBALLOCATION_TYPE_UNKNOWN, // type - unimportant
11328             createInfo.pUserData);
11329         outAllocation = (VmaVirtualAllocation)request.allocHandle;
11330         if(outOffset)
11331             *outOffset = m_Metadata->GetAllocationOffset(request.allocHandle);
11332         return VK_SUCCESS;
11333     }
11334     outAllocation = (VmaVirtualAllocation)VK_NULL_HANDLE;
11335     if (outOffset)
11336         *outOffset = UINT64_MAX;
11337     return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11338 }
11339 
GetStatistics(VmaStatistics & outStats)11340 void VmaVirtualBlock_T::GetStatistics(VmaStatistics& outStats) const
11341 {
11342     VmaClearStatistics(outStats);
11343     m_Metadata->AddStatistics(outStats);
11344 }
11345 
CalculateDetailedStatistics(VmaDetailedStatistics & outStats)11346 void VmaVirtualBlock_T::CalculateDetailedStatistics(VmaDetailedStatistics& outStats) const
11347 {
11348     VmaClearDetailedStatistics(outStats);
11349     m_Metadata->AddDetailedStatistics(outStats);
11350 }
11351 
11352 #if VMA_STATS_STRING_ENABLED
BuildStatsString(bool detailedMap,VmaStringBuilder & sb)11353 void VmaVirtualBlock_T::BuildStatsString(bool detailedMap, VmaStringBuilder& sb) const
11354 {
11355     VmaJsonWriter json(GetAllocationCallbacks(), sb);
11356     json.BeginObject();
11357 
11358     VmaDetailedStatistics stats;
11359     CalculateDetailedStatistics(stats);
11360 
11361     json.WriteString("Stats");
11362     VmaPrintDetailedStatistics(json, stats);
11363 
11364     if (detailedMap)
11365     {
11366         json.WriteString("Details");
11367         json.BeginObject();
11368         m_Metadata->PrintDetailedMap(json);
11369         json.EndObject();
11370     }
11371 
11372     json.EndObject();
11373 }
11374 #endif // VMA_STATS_STRING_ENABLED
11375 #endif // _VMA_VIRTUAL_BLOCK_T_FUNCTIONS
11376 #endif // _VMA_VIRTUAL_BLOCK_T
11377 
11378 
11379 // Main allocator object.
11380 struct VmaAllocator_T
11381 {
11382     VMA_CLASS_NO_COPY_NO_MOVE(VmaAllocator_T)
11383 public:
11384     bool m_UseMutex;
11385     uint32_t m_VulkanApiVersion;
11386     bool m_UseKhrDedicatedAllocation; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
11387     bool m_UseKhrBindMemory2; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
11388     bool m_UseExtMemoryBudget;
11389     bool m_UseAmdDeviceCoherentMemory;
11390     bool m_UseKhrBufferDeviceAddress;
11391     bool m_UseExtMemoryPriority;
11392     VkDevice m_hDevice;
11393     VkInstance m_hInstance;
11394     bool m_AllocationCallbacksSpecified;
11395     VkAllocationCallbacks m_AllocationCallbacks;
11396     VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
11397     VmaAllocationObjectAllocator m_AllocationObjectAllocator;
11398 
11399     // Each bit (1 << i) is set if HeapSizeLimit is enabled for that heap, so cannot allocate more than the heap size.
11400     uint32_t m_HeapSizeLimitMask;
11401 
11402     VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
11403     VkPhysicalDeviceMemoryProperties m_MemProps;
11404 
11405     // Default pools.
11406     VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
11407     VmaDedicatedAllocationList m_DedicatedAllocations[VK_MAX_MEMORY_TYPES];
11408 
11409     VmaCurrentBudgetData m_Budget;
11410     VMA_ATOMIC_UINT32 m_DeviceMemoryCount; // Total number of VkDeviceMemory objects.
11411 
11412     VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
11413     VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
11414     ~VmaAllocator_T();
11415 
GetAllocationCallbacksVmaAllocator_T11416     const VkAllocationCallbacks* GetAllocationCallbacks() const
11417     {
11418         return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : VMA_NULL;
11419     }
GetVulkanFunctionsVmaAllocator_T11420     const VmaVulkanFunctions& GetVulkanFunctions() const
11421     {
11422         return m_VulkanFunctions;
11423     }
11424 
GetPhysicalDeviceVmaAllocator_T11425     VkPhysicalDevice GetPhysicalDevice() const { return m_PhysicalDevice; }
11426 
GetBufferImageGranularityVmaAllocator_T11427     VkDeviceSize GetBufferImageGranularity() const
11428     {
11429         return VMA_MAX(
11430             static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
11431             m_PhysicalDeviceProperties.limits.bufferImageGranularity);
11432     }
11433 
GetMemoryHeapCountVmaAllocator_T11434     uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
GetMemoryTypeCountVmaAllocator_T11435     uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
11436 
MemoryTypeIndexToHeapIndexVmaAllocator_T11437     uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
11438     {
11439         VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
11440         return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
11441     }
11442     // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
IsMemoryTypeNonCoherentVmaAllocator_T11443     bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
11444     {
11445         return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
11446             VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
11447     }
11448     // Minimum alignment for all allocations in specific memory type.
GetMemoryTypeMinAlignmentVmaAllocator_T11449     VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
11450     {
11451         return IsMemoryTypeNonCoherent(memTypeIndex) ?
11452             VMA_MAX((VkDeviceSize)VMA_MIN_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
11453             (VkDeviceSize)VMA_MIN_ALIGNMENT;
11454     }
11455 
IsIntegratedGpuVmaAllocator_T11456     bool IsIntegratedGpu() const
11457     {
11458         return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
11459     }
11460 
GetGlobalMemoryTypeBitsVmaAllocator_T11461     uint32_t GetGlobalMemoryTypeBits() const { return m_GlobalMemoryTypeBits; }
11462 
11463     void GetBufferMemoryRequirements(
11464         VkBuffer hBuffer,
11465         VkMemoryRequirements& memReq,
11466         bool& requiresDedicatedAllocation,
11467         bool& prefersDedicatedAllocation) const;
11468     void GetImageMemoryRequirements(
11469         VkImage hImage,
11470         VkMemoryRequirements& memReq,
11471         bool& requiresDedicatedAllocation,
11472         bool& prefersDedicatedAllocation) const;
11473     VkResult FindMemoryTypeIndex(
11474         uint32_t memoryTypeBits,
11475         const VmaAllocationCreateInfo* pAllocationCreateInfo,
11476         VkFlags bufImgUsage, // VkBufferCreateInfo::usage or VkImageCreateInfo::usage. UINT32_MAX if unknown.
11477         uint32_t* pMemoryTypeIndex) const;
11478 
11479     // Main allocation function.
11480     VkResult AllocateMemory(
11481         const VkMemoryRequirements& vkMemReq,
11482         bool requiresDedicatedAllocation,
11483         bool prefersDedicatedAllocation,
11484         VkBuffer dedicatedBuffer,
11485         VkImage dedicatedImage,
11486         VkFlags dedicatedBufferImageUsage, // UINT32_MAX if unknown.
11487         const VmaAllocationCreateInfo& createInfo,
11488         VmaSuballocationType suballocType,
11489         size_t allocationCount,
11490         VmaAllocation* pAllocations);
11491 
11492     // Main deallocation function.
11493     void FreeMemory(
11494         size_t allocationCount,
11495         const VmaAllocation* pAllocations);
11496 
11497     void CalculateStatistics(VmaTotalStatistics* pStats);
11498 
11499     void GetHeapBudgets(
11500         VmaBudget* outBudgets, uint32_t firstHeap, uint32_t heapCount);
11501 
11502 #if VMA_STATS_STRING_ENABLED
11503     void PrintDetailedMap(class VmaJsonWriter& json);
11504 #endif
11505 
11506     void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
11507 
11508     VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
11509     void DestroyPool(VmaPool pool);
11510     void GetPoolStatistics(VmaPool pool, VmaStatistics* pPoolStats);
11511     void CalculatePoolStatistics(VmaPool pool, VmaDetailedStatistics* pPoolStats);
11512 
11513     void SetCurrentFrameIndex(uint32_t frameIndex);
GetCurrentFrameIndexVmaAllocator_T11514     uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
11515 
11516     VkResult CheckPoolCorruption(VmaPool hPool);
11517     VkResult CheckCorruption(uint32_t memoryTypeBits);
11518 
11519     // Call to Vulkan function vkAllocateMemory with accompanying bookkeeping.
11520     VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
11521     // Call to Vulkan function vkFreeMemory with accompanying bookkeeping.
11522     void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
11523     // Call to Vulkan function vkBindBufferMemory or vkBindBufferMemory2KHR.
11524     VkResult BindVulkanBuffer(
11525         VkDeviceMemory memory,
11526         VkDeviceSize memoryOffset,
11527         VkBuffer buffer,
11528         const void* pNext);
11529     // Call to Vulkan function vkBindImageMemory or vkBindImageMemory2KHR.
11530     VkResult BindVulkanImage(
11531         VkDeviceMemory memory,
11532         VkDeviceSize memoryOffset,
11533         VkImage image,
11534         const void* pNext);
11535 
11536     VkResult Map(VmaAllocation hAllocation, void** ppData);
11537     void Unmap(VmaAllocation hAllocation);
11538 
11539     VkResult BindBufferMemory(
11540         VmaAllocation hAllocation,
11541         VkDeviceSize allocationLocalOffset,
11542         VkBuffer hBuffer,
11543         const void* pNext);
11544     VkResult BindImageMemory(
11545         VmaAllocation hAllocation,
11546         VkDeviceSize allocationLocalOffset,
11547         VkImage hImage,
11548         const void* pNext);
11549 
11550     VkResult FlushOrInvalidateAllocation(
11551         VmaAllocation hAllocation,
11552         VkDeviceSize offset, VkDeviceSize size,
11553         VMA_CACHE_OPERATION op);
11554     VkResult FlushOrInvalidateAllocations(
11555         uint32_t allocationCount,
11556         const VmaAllocation* allocations,
11557         const VkDeviceSize* offsets, const VkDeviceSize* sizes,
11558         VMA_CACHE_OPERATION op);
11559 
11560     void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
11561 
11562     /*
11563     Returns bit mask of memory types that can support defragmentation on GPU as
11564     they support creation of required buffer for copy operations.
11565     */
11566     uint32_t GetGpuDefragmentationMemoryTypeBits();
11567 
11568 #if VMA_EXTERNAL_MEMORY
GetExternalMemoryHandleTypeFlagsVmaAllocator_T11569     VkExternalMemoryHandleTypeFlagsKHR GetExternalMemoryHandleTypeFlags(uint32_t memTypeIndex) const
11570     {
11571         return m_TypeExternalMemoryHandleTypes[memTypeIndex];
11572     }
11573 #endif // #if VMA_EXTERNAL_MEMORY
11574 
11575 private:
11576     VkDeviceSize m_PreferredLargeHeapBlockSize;
11577 
11578     VkPhysicalDevice m_PhysicalDevice;
11579     VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
11580     VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized.
11581 #if VMA_EXTERNAL_MEMORY
11582     VkExternalMemoryHandleTypeFlagsKHR m_TypeExternalMemoryHandleTypes[VK_MAX_MEMORY_TYPES];
11583 #endif // #if VMA_EXTERNAL_MEMORY
11584 
11585     VMA_RW_MUTEX m_PoolsMutex;
11586     typedef VmaIntrusiveLinkedList<VmaPoolListItemTraits> PoolList;
11587     // Protected by m_PoolsMutex.
11588     PoolList m_Pools;
11589     uint32_t m_NextPoolId;
11590 
11591     VmaVulkanFunctions m_VulkanFunctions;
11592 
11593     // Global bit mask AND-ed with any memoryTypeBits to disallow certain memory types.
11594     uint32_t m_GlobalMemoryTypeBits;
11595 
11596     void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
11597 
11598 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
11599     void ImportVulkanFunctions_Static();
11600 #endif
11601 
11602     void ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions);
11603 
11604 #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
11605     void ImportVulkanFunctions_Dynamic();
11606 #endif
11607 
11608     void ValidateVulkanFunctions();
11609 
11610     VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
11611 
11612     VkResult AllocateMemoryOfType(
11613         VmaPool pool,
11614         VkDeviceSize size,
11615         VkDeviceSize alignment,
11616         bool dedicatedPreferred,
11617         VkBuffer dedicatedBuffer,
11618         VkImage dedicatedImage,
11619         VkFlags dedicatedBufferImageUsage,
11620         const VmaAllocationCreateInfo& createInfo,
11621         uint32_t memTypeIndex,
11622         VmaSuballocationType suballocType,
11623         VmaDedicatedAllocationList& dedicatedAllocations,
11624         VmaBlockVector& blockVector,
11625         size_t allocationCount,
11626         VmaAllocation* pAllocations);
11627 
11628     // Helper function only to be used inside AllocateDedicatedMemory.
11629     VkResult AllocateDedicatedMemoryPage(
11630         VmaPool pool,
11631         VkDeviceSize size,
11632         VmaSuballocationType suballocType,
11633         uint32_t memTypeIndex,
11634         const VkMemoryAllocateInfo& allocInfo,
11635         bool map,
11636         bool isUserDataString,
11637         bool isMappingAllowed,
11638         void* pUserData,
11639         VmaAllocation* pAllocation);
11640 
11641     // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
11642     VkResult AllocateDedicatedMemory(
11643         VmaPool pool,
11644         VkDeviceSize size,
11645         VmaSuballocationType suballocType,
11646         VmaDedicatedAllocationList& dedicatedAllocations,
11647         uint32_t memTypeIndex,
11648         bool map,
11649         bool isUserDataString,
11650         bool isMappingAllowed,
11651         bool canAliasMemory,
11652         void* pUserData,
11653         float priority,
11654         VkBuffer dedicatedBuffer,
11655         VkImage dedicatedImage,
11656         VkFlags dedicatedBufferImageUsage,
11657         size_t allocationCount,
11658         VmaAllocation* pAllocations,
11659         const void* pNextChain = nullptr);
11660 
11661     void FreeDedicatedMemory(const VmaAllocation allocation);
11662 
11663     VkResult CalcMemTypeParams(
11664         VmaAllocationCreateInfo& outCreateInfo,
11665         uint32_t memTypeIndex,
11666         VkDeviceSize size,
11667         size_t allocationCount);
11668     VkResult CalcAllocationParams(
11669         VmaAllocationCreateInfo& outCreateInfo,
11670         bool dedicatedRequired,
11671         bool dedicatedPreferred);
11672 
11673     /*
11674     Calculates and returns bit mask of memory types that can support defragmentation
11675     on GPU as they support creation of required buffer for copy operations.
11676     */
11677     uint32_t CalculateGpuDefragmentationMemoryTypeBits() const;
11678     uint32_t CalculateGlobalMemoryTypeBits() const;
11679 
11680     bool GetFlushOrInvalidateRange(
11681         VmaAllocation allocation,
11682         VkDeviceSize offset, VkDeviceSize size,
11683         VkMappedMemoryRange& outRange) const;
11684 
11685 #if VMA_MEMORY_BUDGET
11686     void UpdateVulkanBudget();
11687 #endif // #if VMA_MEMORY_BUDGET
11688 };
11689 
11690 
11691 #ifndef _VMA_MEMORY_FUNCTIONS
VmaMalloc(VmaAllocator hAllocator,size_t size,size_t alignment)11692 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
11693 {
11694     return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
11695 }
11696 
VmaFree(VmaAllocator hAllocator,void * ptr)11697 static void VmaFree(VmaAllocator hAllocator, void* ptr)
11698 {
11699     VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
11700 }
11701 
11702 template<typename T>
VmaAllocate(VmaAllocator hAllocator)11703 static T* VmaAllocate(VmaAllocator hAllocator)
11704 {
11705     return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
11706 }
11707 
11708 template<typename T>
VmaAllocateArray(VmaAllocator hAllocator,size_t count)11709 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
11710 {
11711     return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
11712 }
11713 
11714 template<typename T>
vma_delete(VmaAllocator hAllocator,T * ptr)11715 static void vma_delete(VmaAllocator hAllocator, T* ptr)
11716 {
11717     if(ptr != VMA_NULL)
11718     {
11719         ptr->~T();
11720         VmaFree(hAllocator, ptr);
11721     }
11722 }
11723 
11724 template<typename T>
vma_delete_array(VmaAllocator hAllocator,T * ptr,size_t count)11725 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
11726 {
11727     if(ptr != VMA_NULL)
11728     {
11729         for(size_t i = count; i--; )
11730             ptr[i].~T();
11731         VmaFree(hAllocator, ptr);
11732     }
11733 }
11734 #endif // _VMA_MEMORY_FUNCTIONS
11735 
11736 #ifndef _VMA_DEVICE_MEMORY_BLOCK_FUNCTIONS
VmaDeviceMemoryBlock(VmaAllocator hAllocator)11737 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator)
11738     : m_pMetadata(VMA_NULL),
11739     m_MemoryTypeIndex(UINT32_MAX),
11740     m_Id(0),
11741     m_hMemory(VK_NULL_HANDLE),
11742     m_MapCount(0),
11743     m_pMappedData(VMA_NULL) {}
11744 
~VmaDeviceMemoryBlock()11745 VmaDeviceMemoryBlock::~VmaDeviceMemoryBlock()
11746 {
11747     VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
11748     VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
11749 }
11750 
Init(VmaAllocator hAllocator,VmaPool hParentPool,uint32_t newMemoryTypeIndex,VkDeviceMemory newMemory,VkDeviceSize newSize,uint32_t id,uint32_t algorithm,VkDeviceSize bufferImageGranularity)11751 void VmaDeviceMemoryBlock::Init(
11752     VmaAllocator hAllocator,
11753     VmaPool hParentPool,
11754     uint32_t newMemoryTypeIndex,
11755     VkDeviceMemory newMemory,
11756     VkDeviceSize newSize,
11757     uint32_t id,
11758     uint32_t algorithm,
11759     VkDeviceSize bufferImageGranularity)
11760 {
11761     VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
11762 
11763     m_hParentPool = hParentPool;
11764     m_MemoryTypeIndex = newMemoryTypeIndex;
11765     m_Id = id;
11766     m_hMemory = newMemory;
11767 
11768     switch (algorithm)
11769     {
11770     case 0:
11771         m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_TLSF)(hAllocator->GetAllocationCallbacks(),
11772             bufferImageGranularity, false); // isVirtual
11773         break;
11774     case VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT:
11775         m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator->GetAllocationCallbacks(),
11776             bufferImageGranularity, false); // isVirtual
11777         break;
11778     default:
11779         VMA_ASSERT(0);
11780         m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_TLSF)(hAllocator->GetAllocationCallbacks(),
11781             bufferImageGranularity, false); // isVirtual
11782     }
11783     m_pMetadata->Init(newSize);
11784 }
11785 
Destroy(VmaAllocator allocator)11786 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
11787 {
11788     // Define macro VMA_DEBUG_LOG_FORMAT to receive the list of the unfreed allocations
11789     if (!m_pMetadata->IsEmpty())
11790         m_pMetadata->DebugLogAllAllocations();
11791     // This is the most important assert in the entire library.
11792     // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
11793     VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
11794 
11795     VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
11796     allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
11797     m_hMemory = VK_NULL_HANDLE;
11798 
11799     vma_delete(allocator, m_pMetadata);
11800     m_pMetadata = VMA_NULL;
11801 }
11802 
PostAlloc(VmaAllocator hAllocator)11803 void VmaDeviceMemoryBlock::PostAlloc(VmaAllocator hAllocator)
11804 {
11805     VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex);
11806     m_MappingHysteresis.PostAlloc();
11807 }
11808 
PostFree(VmaAllocator hAllocator)11809 void VmaDeviceMemoryBlock::PostFree(VmaAllocator hAllocator)
11810 {
11811     VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex);
11812     if(m_MappingHysteresis.PostFree())
11813     {
11814         VMA_ASSERT(m_MappingHysteresis.GetExtraMapping() == 0);
11815         if (m_MapCount == 0)
11816         {
11817             m_pMappedData = VMA_NULL;
11818             (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
11819         }
11820     }
11821 }
11822 
Validate()11823 bool VmaDeviceMemoryBlock::Validate() const
11824 {
11825     VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
11826         (m_pMetadata->GetSize() != 0));
11827 
11828     return m_pMetadata->Validate();
11829 }
11830 
CheckCorruption(VmaAllocator hAllocator)11831 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
11832 {
11833     void* pData = nullptr;
11834     VkResult res = Map(hAllocator, 1, &pData);
11835     if (res != VK_SUCCESS)
11836     {
11837         return res;
11838     }
11839 
11840     res = m_pMetadata->CheckCorruption(pData);
11841 
11842     Unmap(hAllocator, 1);
11843 
11844     return res;
11845 }
11846 
Map(VmaAllocator hAllocator,uint32_t count,void ** ppData)11847 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
11848 {
11849     if (count == 0)
11850     {
11851         return VK_SUCCESS;
11852     }
11853 
11854     VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex);
11855     const uint32_t oldTotalMapCount = m_MapCount + m_MappingHysteresis.GetExtraMapping();
11856     m_MappingHysteresis.PostMap();
11857     if (oldTotalMapCount != 0)
11858     {
11859         m_MapCount += count;
11860         VMA_ASSERT(m_pMappedData != VMA_NULL);
11861         if (ppData != VMA_NULL)
11862         {
11863             *ppData = m_pMappedData;
11864         }
11865         return VK_SUCCESS;
11866     }
11867     else
11868     {
11869         VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
11870             hAllocator->m_hDevice,
11871             m_hMemory,
11872             0, // offset
11873             VK_WHOLE_SIZE,
11874             0, // flags
11875             &m_pMappedData);
11876         if (result == VK_SUCCESS)
11877         {
11878             if (ppData != VMA_NULL)
11879             {
11880                 *ppData = m_pMappedData;
11881             }
11882             m_MapCount = count;
11883         }
11884         return result;
11885     }
11886 }
11887 
Unmap(VmaAllocator hAllocator,uint32_t count)11888 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
11889 {
11890     if (count == 0)
11891     {
11892         return;
11893     }
11894 
11895     VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex);
11896     if (m_MapCount >= count)
11897     {
11898         m_MapCount -= count;
11899         const uint32_t totalMapCount = m_MapCount + m_MappingHysteresis.GetExtraMapping();
11900         if (totalMapCount == 0)
11901         {
11902             m_pMappedData = VMA_NULL;
11903             (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
11904         }
11905         m_MappingHysteresis.PostUnmap();
11906     }
11907     else
11908     {
11909         VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
11910     }
11911 }
11912 
WriteMagicValueAfterAllocation(VmaAllocator hAllocator,VkDeviceSize allocOffset,VkDeviceSize allocSize)11913 VkResult VmaDeviceMemoryBlock::WriteMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11914 {
11915     VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11916 
11917     void* pData;
11918     VkResult res = Map(hAllocator, 1, &pData);
11919     if (res != VK_SUCCESS)
11920     {
11921         return res;
11922     }
11923 
11924     VmaWriteMagicValue(pData, allocOffset + allocSize);
11925 
11926     Unmap(hAllocator, 1);
11927     return VK_SUCCESS;
11928 }
11929 
ValidateMagicValueAfterAllocation(VmaAllocator hAllocator,VkDeviceSize allocOffset,VkDeviceSize allocSize)11930 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11931 {
11932     VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11933 
11934     void* pData;
11935     VkResult res = Map(hAllocator, 1, &pData);
11936     if (res != VK_SUCCESS)
11937     {
11938         return res;
11939     }
11940 
11941     if (!VmaValidateMagicValue(pData, allocOffset + allocSize))
11942     {
11943         VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
11944     }
11945 
11946     Unmap(hAllocator, 1);
11947     return VK_SUCCESS;
11948 }
11949 
BindBufferMemory(const VmaAllocator hAllocator,const VmaAllocation hAllocation,VkDeviceSize allocationLocalOffset,VkBuffer hBuffer,const void * pNext)11950 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
11951     const VmaAllocator hAllocator,
11952     const VmaAllocation hAllocation,
11953     VkDeviceSize allocationLocalOffset,
11954     VkBuffer hBuffer,
11955     const void* pNext)
11956 {
11957     VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11958         hAllocation->GetBlock() == this);
11959     VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
11960         "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
11961     const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
11962     // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11963     VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex);
11964     return hAllocator->BindVulkanBuffer(m_hMemory, memoryOffset, hBuffer, pNext);
11965 }
11966 
BindImageMemory(const VmaAllocator hAllocator,const VmaAllocation hAllocation,VkDeviceSize allocationLocalOffset,VkImage hImage,const void * pNext)11967 VkResult VmaDeviceMemoryBlock::BindImageMemory(
11968     const VmaAllocator hAllocator,
11969     const VmaAllocation hAllocation,
11970     VkDeviceSize allocationLocalOffset,
11971     VkImage hImage,
11972     const void* pNext)
11973 {
11974     VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11975         hAllocation->GetBlock() == this);
11976     VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
11977         "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
11978     const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
11979     // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11980     VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex);
11981     return hAllocator->BindVulkanImage(m_hMemory, memoryOffset, hImage, pNext);
11982 }
11983 #endif // _VMA_DEVICE_MEMORY_BLOCK_FUNCTIONS
11984 
11985 #ifndef _VMA_ALLOCATION_T_FUNCTIONS
VmaAllocation_T(bool mappingAllowed)11986 VmaAllocation_T::VmaAllocation_T(bool mappingAllowed)
11987     : m_Alignment{ 1 },
11988     m_Size{ 0 },
11989     m_pUserData{ VMA_NULL },
11990     m_pName{ VMA_NULL },
11991     m_MemoryTypeIndex{ 0 },
11992     m_Type{ (uint8_t)ALLOCATION_TYPE_NONE },
11993     m_SuballocationType{ (uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN },
11994     m_MapCount{ 0 },
11995     m_Flags{ 0 }
11996 {
11997     if(mappingAllowed)
11998         m_Flags |= (uint8_t)FLAG_MAPPING_ALLOWED;
11999 
12000 #if VMA_STATS_STRING_ENABLED
12001     m_BufferImageUsage = 0;
12002 #endif
12003 }
12004 
~VmaAllocation_T()12005 VmaAllocation_T::~VmaAllocation_T()
12006 {
12007     VMA_ASSERT(m_MapCount == 0 && "Allocation was not unmapped before destruction.");
12008 
12009     // Check if owned string was freed.
12010     VMA_ASSERT(m_pName == VMA_NULL);
12011 }
12012 
InitBlockAllocation(VmaDeviceMemoryBlock * block,VmaAllocHandle allocHandle,VkDeviceSize alignment,VkDeviceSize size,uint32_t memoryTypeIndex,VmaSuballocationType suballocationType,bool mapped)12013 void VmaAllocation_T::InitBlockAllocation(
12014     VmaDeviceMemoryBlock* block,
12015     VmaAllocHandle allocHandle,
12016     VkDeviceSize alignment,
12017     VkDeviceSize size,
12018     uint32_t memoryTypeIndex,
12019     VmaSuballocationType suballocationType,
12020     bool mapped)
12021 {
12022     VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
12023     VMA_ASSERT(block != VMA_NULL);
12024     m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
12025     m_Alignment = alignment;
12026     m_Size = size;
12027     m_MemoryTypeIndex = memoryTypeIndex;
12028     if(mapped)
12029     {
12030         VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it.");
12031         m_Flags |= (uint8_t)FLAG_PERSISTENT_MAP;
12032     }
12033     m_SuballocationType = (uint8_t)suballocationType;
12034     m_BlockAllocation.m_Block = block;
12035     m_BlockAllocation.m_AllocHandle = allocHandle;
12036 }
12037 
InitDedicatedAllocation(VmaPool hParentPool,uint32_t memoryTypeIndex,VkDeviceMemory hMemory,VmaSuballocationType suballocationType,void * pMappedData,VkDeviceSize size)12038 void VmaAllocation_T::InitDedicatedAllocation(
12039     VmaPool hParentPool,
12040     uint32_t memoryTypeIndex,
12041     VkDeviceMemory hMemory,
12042     VmaSuballocationType suballocationType,
12043     void* pMappedData,
12044     VkDeviceSize size)
12045 {
12046     VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
12047     VMA_ASSERT(hMemory != VK_NULL_HANDLE);
12048     m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
12049     m_Alignment = 0;
12050     m_Size = size;
12051     m_MemoryTypeIndex = memoryTypeIndex;
12052     m_SuballocationType = (uint8_t)suballocationType;
12053     if(pMappedData != VMA_NULL)
12054     {
12055         VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it.");
12056         m_Flags |= (uint8_t)FLAG_PERSISTENT_MAP;
12057     }
12058     m_DedicatedAllocation.m_hParentPool = hParentPool;
12059     m_DedicatedAllocation.m_hMemory = hMemory;
12060     m_DedicatedAllocation.m_pMappedData = pMappedData;
12061     m_DedicatedAllocation.m_Prev = VMA_NULL;
12062     m_DedicatedAllocation.m_Next = VMA_NULL;
12063 }
12064 
SetName(VmaAllocator hAllocator,const char * pName)12065 void VmaAllocation_T::SetName(VmaAllocator hAllocator, const char* pName)
12066 {
12067     VMA_ASSERT(pName == VMA_NULL || pName != m_pName);
12068 
12069     FreeName(hAllocator);
12070 
12071     if (pName != VMA_NULL)
12072         m_pName = VmaCreateStringCopy(hAllocator->GetAllocationCallbacks(), pName);
12073 }
12074 
SwapBlockAllocation(VmaAllocator hAllocator,VmaAllocation allocation)12075 uint8_t VmaAllocation_T::SwapBlockAllocation(VmaAllocator hAllocator, VmaAllocation allocation)
12076 {
12077     VMA_ASSERT(allocation != VMA_NULL);
12078     VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
12079     VMA_ASSERT(allocation->m_Type == ALLOCATION_TYPE_BLOCK);
12080 
12081     if (m_MapCount != 0)
12082         m_BlockAllocation.m_Block->Unmap(hAllocator, m_MapCount);
12083 
12084     m_BlockAllocation.m_Block->m_pMetadata->SetAllocationUserData(m_BlockAllocation.m_AllocHandle, allocation);
12085     VMA_SWAP(m_BlockAllocation, allocation->m_BlockAllocation);
12086     m_BlockAllocation.m_Block->m_pMetadata->SetAllocationUserData(m_BlockAllocation.m_AllocHandle, this);
12087 
12088 #if VMA_STATS_STRING_ENABLED
12089     VMA_SWAP(m_BufferImageUsage, allocation->m_BufferImageUsage);
12090 #endif
12091     return m_MapCount;
12092 }
12093 
GetAllocHandle()12094 VmaAllocHandle VmaAllocation_T::GetAllocHandle() const
12095 {
12096     switch (m_Type)
12097     {
12098     case ALLOCATION_TYPE_BLOCK:
12099         return m_BlockAllocation.m_AllocHandle;
12100     case ALLOCATION_TYPE_DEDICATED:
12101         return VK_NULL_HANDLE;
12102     default:
12103         VMA_ASSERT(0);
12104         return VK_NULL_HANDLE;
12105     }
12106 }
12107 
GetOffset()12108 VkDeviceSize VmaAllocation_T::GetOffset() const
12109 {
12110     switch (m_Type)
12111     {
12112     case ALLOCATION_TYPE_BLOCK:
12113         return m_BlockAllocation.m_Block->m_pMetadata->GetAllocationOffset(m_BlockAllocation.m_AllocHandle);
12114     case ALLOCATION_TYPE_DEDICATED:
12115         return 0;
12116     default:
12117         VMA_ASSERT(0);
12118         return 0;
12119     }
12120 }
12121 
GetParentPool()12122 VmaPool VmaAllocation_T::GetParentPool() const
12123 {
12124     switch (m_Type)
12125     {
12126     case ALLOCATION_TYPE_BLOCK:
12127         return m_BlockAllocation.m_Block->GetParentPool();
12128     case ALLOCATION_TYPE_DEDICATED:
12129         return m_DedicatedAllocation.m_hParentPool;
12130     default:
12131         VMA_ASSERT(0);
12132         return VK_NULL_HANDLE;
12133     }
12134 }
12135 
GetMemory()12136 VkDeviceMemory VmaAllocation_T::GetMemory() const
12137 {
12138     switch (m_Type)
12139     {
12140     case ALLOCATION_TYPE_BLOCK:
12141         return m_BlockAllocation.m_Block->GetDeviceMemory();
12142     case ALLOCATION_TYPE_DEDICATED:
12143         return m_DedicatedAllocation.m_hMemory;
12144     default:
12145         VMA_ASSERT(0);
12146         return VK_NULL_HANDLE;
12147     }
12148 }
12149 
GetMappedData()12150 void* VmaAllocation_T::GetMappedData() const
12151 {
12152     switch (m_Type)
12153     {
12154     case ALLOCATION_TYPE_BLOCK:
12155         if (m_MapCount != 0 || IsPersistentMap())
12156         {
12157             void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
12158             VMA_ASSERT(pBlockData != VMA_NULL);
12159             return (char*)pBlockData + GetOffset();
12160         }
12161         else
12162         {
12163             return VMA_NULL;
12164         }
12165         break;
12166     case ALLOCATION_TYPE_DEDICATED:
12167         VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0 || IsPersistentMap()));
12168         return m_DedicatedAllocation.m_pMappedData;
12169     default:
12170         VMA_ASSERT(0);
12171         return VMA_NULL;
12172     }
12173 }
12174 
BlockAllocMap()12175 void VmaAllocation_T::BlockAllocMap()
12176 {
12177     VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
12178     VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it.");
12179 
12180     if (m_MapCount < 0xFF)
12181     {
12182         ++m_MapCount;
12183     }
12184     else
12185     {
12186         VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
12187     }
12188 }
12189 
BlockAllocUnmap()12190 void VmaAllocation_T::BlockAllocUnmap()
12191 {
12192     VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
12193 
12194     if (m_MapCount > 0)
12195     {
12196         --m_MapCount;
12197     }
12198     else
12199     {
12200         VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
12201     }
12202 }
12203 
DedicatedAllocMap(VmaAllocator hAllocator,void ** ppData)12204 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
12205 {
12206     VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
12207     VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it.");
12208 
12209     if (m_MapCount != 0 || IsPersistentMap())
12210     {
12211         if (m_MapCount < 0xFF)
12212         {
12213             VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
12214             *ppData = m_DedicatedAllocation.m_pMappedData;
12215             ++m_MapCount;
12216             return VK_SUCCESS;
12217         }
12218         else
12219         {
12220             VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
12221             return VK_ERROR_MEMORY_MAP_FAILED;
12222         }
12223     }
12224     else
12225     {
12226         VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
12227             hAllocator->m_hDevice,
12228             m_DedicatedAllocation.m_hMemory,
12229             0, // offset
12230             VK_WHOLE_SIZE,
12231             0, // flags
12232             ppData);
12233         if (result == VK_SUCCESS)
12234         {
12235             m_DedicatedAllocation.m_pMappedData = *ppData;
12236             m_MapCount = 1;
12237         }
12238         return result;
12239     }
12240 }
12241 
DedicatedAllocUnmap(VmaAllocator hAllocator)12242 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
12243 {
12244     VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
12245 
12246     if (m_MapCount > 0)
12247     {
12248         --m_MapCount;
12249         if (m_MapCount == 0 && !IsPersistentMap())
12250         {
12251             m_DedicatedAllocation.m_pMappedData = VMA_NULL;
12252             (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
12253                 hAllocator->m_hDevice,
12254                 m_DedicatedAllocation.m_hMemory);
12255         }
12256     }
12257     else
12258     {
12259         VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
12260     }
12261 }
12262 
12263 #if VMA_STATS_STRING_ENABLED
InitBufferImageUsage(uint32_t bufferImageUsage)12264 void VmaAllocation_T::InitBufferImageUsage(uint32_t bufferImageUsage)
12265 {
12266     VMA_ASSERT(m_BufferImageUsage == 0);
12267     m_BufferImageUsage = bufferImageUsage;
12268 }
12269 
PrintParameters(class VmaJsonWriter & json)12270 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
12271 {
12272     json.WriteString("Type");
12273     json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
12274 
12275     json.WriteString("Size");
12276     json.WriteNumber(m_Size);
12277     json.WriteString("Usage");
12278     json.WriteNumber(m_BufferImageUsage);
12279 
12280     if (m_pUserData != VMA_NULL)
12281     {
12282         json.WriteString("CustomData");
12283         json.BeginString();
12284         json.ContinueString_Pointer(m_pUserData);
12285         json.EndString();
12286     }
12287     if (m_pName != VMA_NULL)
12288     {
12289         json.WriteString("Name");
12290         json.WriteString(m_pName);
12291     }
12292 }
12293 #endif // VMA_STATS_STRING_ENABLED
12294 
FreeName(VmaAllocator hAllocator)12295 void VmaAllocation_T::FreeName(VmaAllocator hAllocator)
12296 {
12297     if(m_pName)
12298     {
12299         VmaFreeString(hAllocator->GetAllocationCallbacks(), m_pName);
12300         m_pName = VMA_NULL;
12301     }
12302 }
12303 #endif // _VMA_ALLOCATION_T_FUNCTIONS
12304 
12305 #ifndef _VMA_BLOCK_VECTOR_FUNCTIONS
VmaBlockVector(VmaAllocator hAllocator,VmaPool hParentPool,uint32_t memoryTypeIndex,VkDeviceSize preferredBlockSize,size_t minBlockCount,size_t maxBlockCount,VkDeviceSize bufferImageGranularity,bool explicitBlockSize,uint32_t algorithm,float priority,VkDeviceSize minAllocationAlignment,void * pMemoryAllocateNext)12306 VmaBlockVector::VmaBlockVector(
12307     VmaAllocator hAllocator,
12308     VmaPool hParentPool,
12309     uint32_t memoryTypeIndex,
12310     VkDeviceSize preferredBlockSize,
12311     size_t minBlockCount,
12312     size_t maxBlockCount,
12313     VkDeviceSize bufferImageGranularity,
12314     bool explicitBlockSize,
12315     uint32_t algorithm,
12316     float priority,
12317     VkDeviceSize minAllocationAlignment,
12318     void* pMemoryAllocateNext)
12319     : m_hAllocator(hAllocator),
12320     m_hParentPool(hParentPool),
12321     m_MemoryTypeIndex(memoryTypeIndex),
12322     m_PreferredBlockSize(preferredBlockSize),
12323     m_MinBlockCount(minBlockCount),
12324     m_MaxBlockCount(maxBlockCount),
12325     m_BufferImageGranularity(bufferImageGranularity),
12326     m_ExplicitBlockSize(explicitBlockSize),
12327     m_Algorithm(algorithm),
12328     m_Priority(priority),
12329     m_MinAllocationAlignment(minAllocationAlignment),
12330     m_pMemoryAllocateNext(pMemoryAllocateNext),
12331     m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
12332     m_NextBlockId(0) {}
12333 
~VmaBlockVector()12334 VmaBlockVector::~VmaBlockVector()
12335 {
12336     for (size_t i = m_Blocks.size(); i--; )
12337     {
12338         m_Blocks[i]->Destroy(m_hAllocator);
12339         vma_delete(m_hAllocator, m_Blocks[i]);
12340     }
12341 }
12342 
CreateMinBlocks()12343 VkResult VmaBlockVector::CreateMinBlocks()
12344 {
12345     for (size_t i = 0; i < m_MinBlockCount; ++i)
12346     {
12347         VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
12348         if (res != VK_SUCCESS)
12349         {
12350             return res;
12351         }
12352     }
12353     return VK_SUCCESS;
12354 }
12355 
AddStatistics(VmaStatistics & inoutStats)12356 void VmaBlockVector::AddStatistics(VmaStatistics& inoutStats)
12357 {
12358     VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12359 
12360     const size_t blockCount = m_Blocks.size();
12361     for (uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12362     {
12363         const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12364         VMA_ASSERT(pBlock);
12365         VMA_HEAVY_ASSERT(pBlock->Validate());
12366         pBlock->m_pMetadata->AddStatistics(inoutStats);
12367     }
12368 }
12369 
AddDetailedStatistics(VmaDetailedStatistics & inoutStats)12370 void VmaBlockVector::AddDetailedStatistics(VmaDetailedStatistics& inoutStats)
12371 {
12372     VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12373 
12374     const size_t blockCount = m_Blocks.size();
12375     for (uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12376     {
12377         const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12378         VMA_ASSERT(pBlock);
12379         VMA_HEAVY_ASSERT(pBlock->Validate());
12380         pBlock->m_pMetadata->AddDetailedStatistics(inoutStats);
12381     }
12382 }
12383 
IsEmpty()12384 bool VmaBlockVector::IsEmpty()
12385 {
12386     VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12387     return m_Blocks.empty();
12388 }
12389 
IsCorruptionDetectionEnabled()12390 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
12391 {
12392     const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
12393     return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
12394         (VMA_DEBUG_MARGIN > 0) &&
12395         (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
12396         (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
12397 }
12398 
Allocate(VkDeviceSize size,VkDeviceSize alignment,const VmaAllocationCreateInfo & createInfo,VmaSuballocationType suballocType,size_t allocationCount,VmaAllocation * pAllocations)12399 VkResult VmaBlockVector::Allocate(
12400     VkDeviceSize size,
12401     VkDeviceSize alignment,
12402     const VmaAllocationCreateInfo& createInfo,
12403     VmaSuballocationType suballocType,
12404     size_t allocationCount,
12405     VmaAllocation* pAllocations)
12406 {
12407     size_t allocIndex;
12408     VkResult res = VK_SUCCESS;
12409 
12410     alignment = VMA_MAX(alignment, m_MinAllocationAlignment);
12411 
12412     if (IsCorruptionDetectionEnabled())
12413     {
12414         size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
12415         alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
12416     }
12417 
12418     {
12419         VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12420         for (allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
12421         {
12422             res = AllocatePage(
12423                 size,
12424                 alignment,
12425                 createInfo,
12426                 suballocType,
12427                 pAllocations + allocIndex);
12428             if (res != VK_SUCCESS)
12429             {
12430                 break;
12431             }
12432         }
12433     }
12434 
12435     if (res != VK_SUCCESS)
12436     {
12437         // Free all already created allocations.
12438         while (allocIndex--)
12439             Free(pAllocations[allocIndex]);
12440         memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
12441     }
12442 
12443     return res;
12444 }
12445 
AllocatePage(VkDeviceSize size,VkDeviceSize alignment,const VmaAllocationCreateInfo & createInfo,VmaSuballocationType suballocType,VmaAllocation * pAllocation)12446 VkResult VmaBlockVector::AllocatePage(
12447     VkDeviceSize size,
12448     VkDeviceSize alignment,
12449     const VmaAllocationCreateInfo& createInfo,
12450     VmaSuballocationType suballocType,
12451     VmaAllocation* pAllocation)
12452 {
12453     const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
12454 
12455     VkDeviceSize freeMemory;
12456     {
12457         const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
12458         VmaBudget heapBudget = {};
12459         m_hAllocator->GetHeapBudgets(&heapBudget, heapIndex, 1);
12460         freeMemory = (heapBudget.usage < heapBudget.budget) ? (heapBudget.budget - heapBudget.usage) : 0;
12461     }
12462 
12463     const bool canFallbackToDedicated = !HasExplicitBlockSize() &&
12464         (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0;
12465     const bool canCreateNewBlock =
12466         ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
12467         (m_Blocks.size() < m_MaxBlockCount) &&
12468         (freeMemory >= size || !canFallbackToDedicated);
12469     uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
12470 
12471     // Upper address can only be used with linear allocator and within single memory block.
12472     if (isUpperAddress &&
12473         (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
12474     {
12475         return VK_ERROR_FEATURE_NOT_PRESENT;
12476     }
12477 
12478     // Early reject: requested allocation size is larger that maximum block size for this block vector.
12479     if (size + VMA_DEBUG_MARGIN > m_PreferredBlockSize)
12480     {
12481         return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12482     }
12483 
12484     // 1. Search existing allocations. Try to allocate.
12485     if (m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
12486     {
12487         // Use only last block.
12488         if (!m_Blocks.empty())
12489         {
12490             VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
12491             VMA_ASSERT(pCurrBlock);
12492             VkResult res = AllocateFromBlock(
12493                 pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation);
12494             if (res == VK_SUCCESS)
12495             {
12496                 VMA_DEBUG_LOG_FORMAT("    Returned from last block #%u", pCurrBlock->GetId());
12497                 IncrementallySortBlocks();
12498                 return VK_SUCCESS;
12499             }
12500         }
12501     }
12502     else
12503     {
12504         if (strategy != VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT) // MIN_MEMORY or default
12505         {
12506             const bool isHostVisible =
12507                 (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
12508             if(isHostVisible)
12509             {
12510                 const bool isMappingAllowed = (createInfo.flags &
12511                     (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0;
12512                 /*
12513                 For non-mappable allocations, check blocks that are not mapped first.
12514                 For mappable allocations, check blocks that are already mapped first.
12515                 This way, having many blocks, we will separate mappable and non-mappable allocations,
12516                 hopefully limiting the number of blocks that are mapped, which will help tools like RenderDoc.
12517                 */
12518                 for(size_t mappingI = 0; mappingI < 2; ++mappingI)
12519                 {
12520                     // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
12521                     for (size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12522                     {
12523                         VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12524                         VMA_ASSERT(pCurrBlock);
12525                         const bool isBlockMapped = pCurrBlock->GetMappedData() != VMA_NULL;
12526                         if((mappingI == 0) == (isMappingAllowed == isBlockMapped))
12527                         {
12528                             VkResult res = AllocateFromBlock(
12529                                 pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation);
12530                             if (res == VK_SUCCESS)
12531                             {
12532                                 VMA_DEBUG_LOG_FORMAT("    Returned from existing block #%u", pCurrBlock->GetId());
12533                                 IncrementallySortBlocks();
12534                                 return VK_SUCCESS;
12535                             }
12536                         }
12537                     }
12538                 }
12539             }
12540             else
12541             {
12542                 // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
12543                 for (size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12544                 {
12545                     VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12546                     VMA_ASSERT(pCurrBlock);
12547                     VkResult res = AllocateFromBlock(
12548                         pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation);
12549                     if (res == VK_SUCCESS)
12550                     {
12551                         VMA_DEBUG_LOG_FORMAT("    Returned from existing block #%u", pCurrBlock->GetId());
12552                         IncrementallySortBlocks();
12553                         return VK_SUCCESS;
12554                     }
12555                 }
12556             }
12557         }
12558         else // VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT
12559         {
12560             // Backward order in m_Blocks - prefer blocks with largest amount of free space.
12561             for (size_t blockIndex = m_Blocks.size(); blockIndex--; )
12562             {
12563                 VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12564                 VMA_ASSERT(pCurrBlock);
12565                 VkResult res = AllocateFromBlock(pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation);
12566                 if (res == VK_SUCCESS)
12567                 {
12568                     VMA_DEBUG_LOG_FORMAT("    Returned from existing block #%u", pCurrBlock->GetId());
12569                     IncrementallySortBlocks();
12570                     return VK_SUCCESS;
12571                 }
12572             }
12573         }
12574     }
12575 
12576     // 2. Try to create new block.
12577     if (canCreateNewBlock)
12578     {
12579         // Calculate optimal size for new block.
12580         VkDeviceSize newBlockSize = m_PreferredBlockSize;
12581         uint32_t newBlockSizeShift = 0;
12582         const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
12583 
12584         if (!m_ExplicitBlockSize)
12585         {
12586             // Allocate 1/8, 1/4, 1/2 as first blocks.
12587             const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
12588             for (uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
12589             {
12590                 const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
12591                 if (smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
12592                 {
12593                     newBlockSize = smallerNewBlockSize;
12594                     ++newBlockSizeShift;
12595                 }
12596                 else
12597                 {
12598                     break;
12599                 }
12600             }
12601         }
12602 
12603         size_t newBlockIndex = 0;
12604         VkResult res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
12605             CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
12606         // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
12607         if (!m_ExplicitBlockSize)
12608         {
12609             while (res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
12610             {
12611                 const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
12612                 if (smallerNewBlockSize >= size)
12613                 {
12614                     newBlockSize = smallerNewBlockSize;
12615                     ++newBlockSizeShift;
12616                     res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
12617                         CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
12618                 }
12619                 else
12620                 {
12621                     break;
12622                 }
12623             }
12624         }
12625 
12626         if (res == VK_SUCCESS)
12627         {
12628             VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
12629             VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
12630 
12631             res = AllocateFromBlock(
12632                 pBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation);
12633             if (res == VK_SUCCESS)
12634             {
12635                 VMA_DEBUG_LOG_FORMAT("    Created new block #%u Size=%llu", pBlock->GetId(), newBlockSize);
12636                 IncrementallySortBlocks();
12637                 return VK_SUCCESS;
12638             }
12639             else
12640             {
12641                 // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
12642                 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12643             }
12644         }
12645     }
12646 
12647     return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12648 }
12649 
Free(const VmaAllocation hAllocation)12650 void VmaBlockVector::Free(const VmaAllocation hAllocation)
12651 {
12652     VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
12653 
12654     bool budgetExceeded = false;
12655     {
12656         const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
12657         VmaBudget heapBudget = {};
12658         m_hAllocator->GetHeapBudgets(&heapBudget, heapIndex, 1);
12659         budgetExceeded = heapBudget.usage >= heapBudget.budget;
12660     }
12661 
12662     // Scope for lock.
12663     {
12664         VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12665 
12666         VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
12667 
12668         if (IsCorruptionDetectionEnabled())
12669         {
12670             VkResult res = pBlock->ValidateMagicValueAfterAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
12671             VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
12672         }
12673 
12674         if (hAllocation->IsPersistentMap())
12675         {
12676             pBlock->Unmap(m_hAllocator, 1);
12677         }
12678 
12679         const bool hadEmptyBlockBeforeFree = HasEmptyBlock();
12680         pBlock->m_pMetadata->Free(hAllocation->GetAllocHandle());
12681         pBlock->PostFree(m_hAllocator);
12682         VMA_HEAVY_ASSERT(pBlock->Validate());
12683 
12684         VMA_DEBUG_LOG_FORMAT("  Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
12685 
12686         const bool canDeleteBlock = m_Blocks.size() > m_MinBlockCount;
12687         // pBlock became empty after this deallocation.
12688         if (pBlock->m_pMetadata->IsEmpty())
12689         {
12690             // Already had empty block. We don't want to have two, so delete this one.
12691             if ((hadEmptyBlockBeforeFree || budgetExceeded) && canDeleteBlock)
12692             {
12693                 pBlockToDelete = pBlock;
12694                 Remove(pBlock);
12695             }
12696             // else: We now have one empty block - leave it. A hysteresis to avoid allocating whole block back and forth.
12697         }
12698         // pBlock didn't become empty, but we have another empty block - find and free that one.
12699         // (This is optional, heuristics.)
12700         else if (hadEmptyBlockBeforeFree && canDeleteBlock)
12701         {
12702             VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
12703             if (pLastBlock->m_pMetadata->IsEmpty())
12704             {
12705                 pBlockToDelete = pLastBlock;
12706                 m_Blocks.pop_back();
12707             }
12708         }
12709 
12710         IncrementallySortBlocks();
12711     }
12712 
12713     // Destruction of a free block. Deferred until this point, outside of mutex
12714     // lock, for performance reason.
12715     if (pBlockToDelete != VMA_NULL)
12716     {
12717         VMA_DEBUG_LOG_FORMAT("    Deleted empty block #%u", pBlockToDelete->GetId());
12718         pBlockToDelete->Destroy(m_hAllocator);
12719         vma_delete(m_hAllocator, pBlockToDelete);
12720     }
12721 
12722     m_hAllocator->m_Budget.RemoveAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), hAllocation->GetSize());
12723     m_hAllocator->m_AllocationObjectAllocator.Free(hAllocation);
12724 }
12725 
CalcMaxBlockSize()12726 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
12727 {
12728     VkDeviceSize result = 0;
12729     for (size_t i = m_Blocks.size(); i--; )
12730     {
12731         result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
12732         if (result >= m_PreferredBlockSize)
12733         {
12734             break;
12735         }
12736     }
12737     return result;
12738 }
12739 
Remove(VmaDeviceMemoryBlock * pBlock)12740 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
12741 {
12742     for (uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12743     {
12744         if (m_Blocks[blockIndex] == pBlock)
12745         {
12746             VmaVectorRemove(m_Blocks, blockIndex);
12747             return;
12748         }
12749     }
12750     VMA_ASSERT(0);
12751 }
12752 
IncrementallySortBlocks()12753 void VmaBlockVector::IncrementallySortBlocks()
12754 {
12755     if (!m_IncrementalSort)
12756         return;
12757     if (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
12758     {
12759         // Bubble sort only until first swap.
12760         for (size_t i = 1; i < m_Blocks.size(); ++i)
12761         {
12762             if (m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
12763             {
12764                 VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
12765                 return;
12766             }
12767         }
12768     }
12769 }
12770 
SortByFreeSize()12771 void VmaBlockVector::SortByFreeSize()
12772 {
12773     VMA_SORT(m_Blocks.begin(), m_Blocks.end(),
12774         [](VmaDeviceMemoryBlock* b1, VmaDeviceMemoryBlock* b2) -> bool
12775         {
12776             return b1->m_pMetadata->GetSumFreeSize() < b2->m_pMetadata->GetSumFreeSize();
12777         });
12778 }
12779 
AllocateFromBlock(VmaDeviceMemoryBlock * pBlock,VkDeviceSize size,VkDeviceSize alignment,VmaAllocationCreateFlags allocFlags,void * pUserData,VmaSuballocationType suballocType,uint32_t strategy,VmaAllocation * pAllocation)12780 VkResult VmaBlockVector::AllocateFromBlock(
12781     VmaDeviceMemoryBlock* pBlock,
12782     VkDeviceSize size,
12783     VkDeviceSize alignment,
12784     VmaAllocationCreateFlags allocFlags,
12785     void* pUserData,
12786     VmaSuballocationType suballocType,
12787     uint32_t strategy,
12788     VmaAllocation* pAllocation)
12789 {
12790     const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
12791 
12792     VmaAllocationRequest currRequest = {};
12793     if (pBlock->m_pMetadata->CreateAllocationRequest(
12794         size,
12795         alignment,
12796         isUpperAddress,
12797         suballocType,
12798         strategy,
12799         &currRequest))
12800     {
12801         return CommitAllocationRequest(currRequest, pBlock, alignment, allocFlags, pUserData, suballocType, pAllocation);
12802     }
12803     return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12804 }
12805 
CommitAllocationRequest(VmaAllocationRequest & allocRequest,VmaDeviceMemoryBlock * pBlock,VkDeviceSize alignment,VmaAllocationCreateFlags allocFlags,void * pUserData,VmaSuballocationType suballocType,VmaAllocation * pAllocation)12806 VkResult VmaBlockVector::CommitAllocationRequest(
12807     VmaAllocationRequest& allocRequest,
12808     VmaDeviceMemoryBlock* pBlock,
12809     VkDeviceSize alignment,
12810     VmaAllocationCreateFlags allocFlags,
12811     void* pUserData,
12812     VmaSuballocationType suballocType,
12813     VmaAllocation* pAllocation)
12814 {
12815     const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
12816     const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
12817     const bool isMappingAllowed = (allocFlags &
12818         (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0;
12819 
12820     pBlock->PostAlloc(m_hAllocator);
12821     // Allocate from pCurrBlock.
12822     if (mapped)
12823     {
12824         VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
12825         if (res != VK_SUCCESS)
12826         {
12827             return res;
12828         }
12829     }
12830 
12831     *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(isMappingAllowed);
12832     pBlock->m_pMetadata->Alloc(allocRequest, suballocType, *pAllocation);
12833     (*pAllocation)->InitBlockAllocation(
12834         pBlock,
12835         allocRequest.allocHandle,
12836         alignment,
12837         allocRequest.size, // Not size, as actual allocation size may be larger than requested!
12838         m_MemoryTypeIndex,
12839         suballocType,
12840         mapped);
12841     VMA_HEAVY_ASSERT(pBlock->Validate());
12842     if (isUserDataString)
12843         (*pAllocation)->SetName(m_hAllocator, (const char*)pUserData);
12844     else
12845         (*pAllocation)->SetUserData(m_hAllocator, pUserData);
12846     m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), allocRequest.size);
12847     if (VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12848     {
12849         m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12850     }
12851     if (IsCorruptionDetectionEnabled())
12852     {
12853         VkResult res = pBlock->WriteMagicValueAfterAllocation(m_hAllocator, (*pAllocation)->GetOffset(), allocRequest.size);
12854         VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
12855     }
12856     return VK_SUCCESS;
12857 }
12858 
CreateBlock(VkDeviceSize blockSize,size_t * pNewBlockIndex)12859 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
12860 {
12861     VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
12862     allocInfo.pNext = m_pMemoryAllocateNext;
12863     allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
12864     allocInfo.allocationSize = blockSize;
12865 
12866 #if VMA_BUFFER_DEVICE_ADDRESS
12867     // Every standalone block can potentially contain a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT - always enable the feature.
12868     VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
12869     if (m_hAllocator->m_UseKhrBufferDeviceAddress)
12870     {
12871         allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
12872         VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
12873     }
12874 #endif // VMA_BUFFER_DEVICE_ADDRESS
12875 
12876 #if VMA_MEMORY_PRIORITY
12877     VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT };
12878     if (m_hAllocator->m_UseExtMemoryPriority)
12879     {
12880         VMA_ASSERT(m_Priority >= 0.f && m_Priority <= 1.f);
12881         priorityInfo.priority = m_Priority;
12882         VmaPnextChainPushFront(&allocInfo, &priorityInfo);
12883     }
12884 #endif // VMA_MEMORY_PRIORITY
12885 
12886 #if VMA_EXTERNAL_MEMORY
12887     // Attach VkExportMemoryAllocateInfoKHR if necessary.
12888     VkExportMemoryAllocateInfoKHR exportMemoryAllocInfo = { VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR };
12889     exportMemoryAllocInfo.handleTypes = m_hAllocator->GetExternalMemoryHandleTypeFlags(m_MemoryTypeIndex);
12890     if (exportMemoryAllocInfo.handleTypes != 0)
12891     {
12892         VmaPnextChainPushFront(&allocInfo, &exportMemoryAllocInfo);
12893     }
12894 #endif // VMA_EXTERNAL_MEMORY
12895 
12896     VkDeviceMemory mem = VK_NULL_HANDLE;
12897     VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
12898     if (res < 0)
12899     {
12900         return res;
12901     }
12902 
12903     // New VkDeviceMemory successfully created.
12904 
12905     // Create new Allocation for it.
12906     VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
12907     pBlock->Init(
12908         m_hAllocator,
12909         m_hParentPool,
12910         m_MemoryTypeIndex,
12911         mem,
12912         allocInfo.allocationSize,
12913         m_NextBlockId++,
12914         m_Algorithm,
12915         m_BufferImageGranularity);
12916 
12917     m_Blocks.push_back(pBlock);
12918     if (pNewBlockIndex != VMA_NULL)
12919     {
12920         *pNewBlockIndex = m_Blocks.size() - 1;
12921     }
12922 
12923     return VK_SUCCESS;
12924 }
12925 
HasEmptyBlock()12926 bool VmaBlockVector::HasEmptyBlock()
12927 {
12928     for (size_t index = 0, count = m_Blocks.size(); index < count; ++index)
12929     {
12930         VmaDeviceMemoryBlock* const pBlock = m_Blocks[index];
12931         if (pBlock->m_pMetadata->IsEmpty())
12932         {
12933             return true;
12934         }
12935     }
12936     return false;
12937 }
12938 
12939 #if VMA_STATS_STRING_ENABLED
PrintDetailedMap(class VmaJsonWriter & json)12940 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
12941 {
12942     VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12943 
12944 
12945     json.BeginObject();
12946     for (size_t i = 0; i < m_Blocks.size(); ++i)
12947     {
12948         json.BeginString();
12949         json.ContinueString(m_Blocks[i]->GetId());
12950         json.EndString();
12951 
12952         json.BeginObject();
12953         json.WriteString("MapRefCount");
12954         json.WriteNumber(m_Blocks[i]->GetMapRefCount());
12955 
12956         m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
12957         json.EndObject();
12958     }
12959     json.EndObject();
12960 }
12961 #endif // VMA_STATS_STRING_ENABLED
12962 
CheckCorruption()12963 VkResult VmaBlockVector::CheckCorruption()
12964 {
12965     if (!IsCorruptionDetectionEnabled())
12966     {
12967         return VK_ERROR_FEATURE_NOT_PRESENT;
12968     }
12969 
12970     VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12971     for (uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12972     {
12973         VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12974         VMA_ASSERT(pBlock);
12975         VkResult res = pBlock->CheckCorruption(m_hAllocator);
12976         if (res != VK_SUCCESS)
12977         {
12978             return res;
12979         }
12980     }
12981     return VK_SUCCESS;
12982 }
12983 
12984 #endif // _VMA_BLOCK_VECTOR_FUNCTIONS
12985 
12986 #ifndef _VMA_DEFRAGMENTATION_CONTEXT_FUNCTIONS
VmaDefragmentationContext_T(VmaAllocator hAllocator,const VmaDefragmentationInfo & info)12987 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
12988     VmaAllocator hAllocator,
12989     const VmaDefragmentationInfo& info)
12990     : m_MaxPassBytes(info.maxBytesPerPass == 0 ? VK_WHOLE_SIZE : info.maxBytesPerPass),
12991     m_MaxPassAllocations(info.maxAllocationsPerPass == 0 ? UINT32_MAX : info.maxAllocationsPerPass),
12992     m_BreakCallback(info.pfnBreakCallback),
12993     m_BreakCallbackUserData(info.pBreakCallbackUserData),
12994     m_MoveAllocator(hAllocator->GetAllocationCallbacks()),
12995     m_Moves(m_MoveAllocator)
12996 {
12997     m_Algorithm = info.flags & VMA_DEFRAGMENTATION_FLAG_ALGORITHM_MASK;
12998 
12999     if (info.pool != VMA_NULL)
13000     {
13001         m_BlockVectorCount = 1;
13002         m_PoolBlockVector = &info.pool->m_BlockVector;
13003         m_pBlockVectors = &m_PoolBlockVector;
13004         m_PoolBlockVector->SetIncrementalSort(false);
13005         m_PoolBlockVector->SortByFreeSize();
13006     }
13007     else
13008     {
13009         m_BlockVectorCount = hAllocator->GetMemoryTypeCount();
13010         m_PoolBlockVector = VMA_NULL;
13011         m_pBlockVectors = hAllocator->m_pBlockVectors;
13012         for (uint32_t i = 0; i < m_BlockVectorCount; ++i)
13013         {
13014             VmaBlockVector* vector = m_pBlockVectors[i];
13015             if (vector != VMA_NULL)
13016             {
13017                 vector->SetIncrementalSort(false);
13018                 vector->SortByFreeSize();
13019             }
13020         }
13021     }
13022 
13023     switch (m_Algorithm)
13024     {
13025     case 0: // Default algorithm
13026         m_Algorithm = VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT;
13027         m_AlgorithmState = vma_new_array(hAllocator, StateBalanced, m_BlockVectorCount);
13028         break;
13029     case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT:
13030         m_AlgorithmState = vma_new_array(hAllocator, StateBalanced, m_BlockVectorCount);
13031         break;
13032     case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT:
13033         if (hAllocator->GetBufferImageGranularity() > 1)
13034         {
13035             m_AlgorithmState = vma_new_array(hAllocator, StateExtensive, m_BlockVectorCount);
13036         }
13037         break;
13038     }
13039 }
13040 
~VmaDefragmentationContext_T()13041 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
13042 {
13043     if (m_PoolBlockVector != VMA_NULL)
13044     {
13045         m_PoolBlockVector->SetIncrementalSort(true);
13046     }
13047     else
13048     {
13049         for (uint32_t i = 0; i < m_BlockVectorCount; ++i)
13050         {
13051             VmaBlockVector* vector = m_pBlockVectors[i];
13052             if (vector != VMA_NULL)
13053                 vector->SetIncrementalSort(true);
13054         }
13055     }
13056 
13057     if (m_AlgorithmState)
13058     {
13059         switch (m_Algorithm)
13060         {
13061         case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT:
13062             vma_delete_array(m_MoveAllocator.m_pCallbacks, reinterpret_cast<StateBalanced*>(m_AlgorithmState), m_BlockVectorCount);
13063             break;
13064         case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT:
13065             vma_delete_array(m_MoveAllocator.m_pCallbacks, reinterpret_cast<StateExtensive*>(m_AlgorithmState), m_BlockVectorCount);
13066             break;
13067         default:
13068             VMA_ASSERT(0);
13069         }
13070     }
13071 }
13072 
DefragmentPassBegin(VmaDefragmentationPassMoveInfo & moveInfo)13073 VkResult VmaDefragmentationContext_T::DefragmentPassBegin(VmaDefragmentationPassMoveInfo& moveInfo)
13074 {
13075     if (m_PoolBlockVector != VMA_NULL)
13076     {
13077         VmaMutexLockWrite lock(m_PoolBlockVector->GetMutex(), m_PoolBlockVector->GetAllocator()->m_UseMutex);
13078 
13079         if (m_PoolBlockVector->GetBlockCount() > 1)
13080             ComputeDefragmentation(*m_PoolBlockVector, 0);
13081         else if (m_PoolBlockVector->GetBlockCount() == 1)
13082             ReallocWithinBlock(*m_PoolBlockVector, m_PoolBlockVector->GetBlock(0));
13083     }
13084     else
13085     {
13086         for (uint32_t i = 0; i < m_BlockVectorCount; ++i)
13087         {
13088             if (m_pBlockVectors[i] != VMA_NULL)
13089             {
13090                 VmaMutexLockWrite lock(m_pBlockVectors[i]->GetMutex(), m_pBlockVectors[i]->GetAllocator()->m_UseMutex);
13091 
13092                 if (m_pBlockVectors[i]->GetBlockCount() > 1)
13093                 {
13094                     if (ComputeDefragmentation(*m_pBlockVectors[i], i))
13095                         break;
13096                 }
13097                 else if (m_pBlockVectors[i]->GetBlockCount() == 1)
13098                 {
13099                     if (ReallocWithinBlock(*m_pBlockVectors[i], m_pBlockVectors[i]->GetBlock(0)))
13100                         break;
13101                 }
13102             }
13103         }
13104     }
13105 
13106     moveInfo.moveCount = static_cast<uint32_t>(m_Moves.size());
13107     if (moveInfo.moveCount > 0)
13108     {
13109         moveInfo.pMoves = m_Moves.data();
13110         return VK_INCOMPLETE;
13111     }
13112 
13113     moveInfo.pMoves = VMA_NULL;
13114     return VK_SUCCESS;
13115 }
13116 
DefragmentPassEnd(VmaDefragmentationPassMoveInfo & moveInfo)13117 VkResult VmaDefragmentationContext_T::DefragmentPassEnd(VmaDefragmentationPassMoveInfo& moveInfo)
13118 {
13119     VMA_ASSERT(moveInfo.moveCount > 0 ? moveInfo.pMoves != VMA_NULL : true);
13120 
13121     VkResult result = VK_SUCCESS;
13122     VmaStlAllocator<FragmentedBlock> blockAllocator(m_MoveAllocator.m_pCallbacks);
13123     VmaVector<FragmentedBlock, VmaStlAllocator<FragmentedBlock>> immovableBlocks(blockAllocator);
13124     VmaVector<FragmentedBlock, VmaStlAllocator<FragmentedBlock>> mappedBlocks(blockAllocator);
13125 
13126     VmaAllocator allocator = VMA_NULL;
13127     for (uint32_t i = 0; i < moveInfo.moveCount; ++i)
13128     {
13129         VmaDefragmentationMove& move = moveInfo.pMoves[i];
13130         size_t prevCount = 0, currentCount = 0;
13131         VkDeviceSize freedBlockSize = 0;
13132 
13133         uint32_t vectorIndex;
13134         VmaBlockVector* vector;
13135         if (m_PoolBlockVector != VMA_NULL)
13136         {
13137             vectorIndex = 0;
13138             vector = m_PoolBlockVector;
13139         }
13140         else
13141         {
13142             vectorIndex = move.srcAllocation->GetMemoryTypeIndex();
13143             vector = m_pBlockVectors[vectorIndex];
13144             VMA_ASSERT(vector != VMA_NULL);
13145         }
13146 
13147         switch (move.operation)
13148         {
13149         case VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY:
13150         {
13151             uint8_t mapCount = move.srcAllocation->SwapBlockAllocation(vector->m_hAllocator, move.dstTmpAllocation);
13152             if (mapCount > 0)
13153             {
13154                 allocator = vector->m_hAllocator;
13155                 VmaDeviceMemoryBlock* newMapBlock = move.srcAllocation->GetBlock();
13156                 bool notPresent = true;
13157                 for (FragmentedBlock& block : mappedBlocks)
13158                 {
13159                     if (block.block == newMapBlock)
13160                     {
13161                         notPresent = false;
13162                         block.data += mapCount;
13163                         break;
13164                     }
13165                 }
13166                 if (notPresent)
13167                     mappedBlocks.push_back({ mapCount, newMapBlock });
13168             }
13169 
13170             // Scope for locks, Free have it's own lock
13171             {
13172                 VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
13173                 prevCount = vector->GetBlockCount();
13174                 freedBlockSize = move.dstTmpAllocation->GetBlock()->m_pMetadata->GetSize();
13175             }
13176             vector->Free(move.dstTmpAllocation);
13177             {
13178                 VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
13179                 currentCount = vector->GetBlockCount();
13180             }
13181 
13182             result = VK_INCOMPLETE;
13183             break;
13184         }
13185         case VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE:
13186         {
13187             m_PassStats.bytesMoved -= move.srcAllocation->GetSize();
13188             --m_PassStats.allocationsMoved;
13189             vector->Free(move.dstTmpAllocation);
13190 
13191             VmaDeviceMemoryBlock* newBlock = move.srcAllocation->GetBlock();
13192             bool notPresent = true;
13193             for (const FragmentedBlock& block : immovableBlocks)
13194             {
13195                 if (block.block == newBlock)
13196                 {
13197                     notPresent = false;
13198                     break;
13199                 }
13200             }
13201             if (notPresent)
13202                 immovableBlocks.push_back({ vectorIndex, newBlock });
13203             break;
13204         }
13205         case VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY:
13206         {
13207             m_PassStats.bytesMoved -= move.srcAllocation->GetSize();
13208             --m_PassStats.allocationsMoved;
13209             // Scope for locks, Free have it's own lock
13210             {
13211                 VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
13212                 prevCount = vector->GetBlockCount();
13213                 freedBlockSize = move.srcAllocation->GetBlock()->m_pMetadata->GetSize();
13214             }
13215             vector->Free(move.srcAllocation);
13216             {
13217                 VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
13218                 currentCount = vector->GetBlockCount();
13219             }
13220             freedBlockSize *= prevCount - currentCount;
13221 
13222             VkDeviceSize dstBlockSize;
13223             {
13224                 VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
13225                 dstBlockSize = move.dstTmpAllocation->GetBlock()->m_pMetadata->GetSize();
13226             }
13227             vector->Free(move.dstTmpAllocation);
13228             {
13229                 VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
13230                 freedBlockSize += dstBlockSize * (currentCount - vector->GetBlockCount());
13231                 currentCount = vector->GetBlockCount();
13232             }
13233 
13234             result = VK_INCOMPLETE;
13235             break;
13236         }
13237         default:
13238             VMA_ASSERT(0);
13239         }
13240 
13241         if (prevCount > currentCount)
13242         {
13243             size_t freedBlocks = prevCount - currentCount;
13244             m_PassStats.deviceMemoryBlocksFreed += static_cast<uint32_t>(freedBlocks);
13245             m_PassStats.bytesFreed += freedBlockSize;
13246         }
13247 
13248         if(m_Algorithm == VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT &&
13249             m_AlgorithmState != VMA_NULL)
13250         {
13251             // Avoid unnecessary tries to allocate when new free block is available
13252             StateExtensive& state = reinterpret_cast<StateExtensive*>(m_AlgorithmState)[vectorIndex];
13253             if (state.firstFreeBlock != SIZE_MAX)
13254             {
13255                 const size_t diff = prevCount - currentCount;
13256                 if (state.firstFreeBlock >= diff)
13257                 {
13258                     state.firstFreeBlock -= diff;
13259                     if (state.firstFreeBlock != 0)
13260                         state.firstFreeBlock -= vector->GetBlock(state.firstFreeBlock - 1)->m_pMetadata->IsEmpty();
13261                 }
13262                 else
13263                     state.firstFreeBlock = 0;
13264             }
13265         }
13266     }
13267     moveInfo.moveCount = 0;
13268     moveInfo.pMoves = VMA_NULL;
13269     m_Moves.clear();
13270 
13271     // Update stats
13272     m_GlobalStats.allocationsMoved += m_PassStats.allocationsMoved;
13273     m_GlobalStats.bytesFreed += m_PassStats.bytesFreed;
13274     m_GlobalStats.bytesMoved += m_PassStats.bytesMoved;
13275     m_GlobalStats.deviceMemoryBlocksFreed += m_PassStats.deviceMemoryBlocksFreed;
13276     m_PassStats = { 0 };
13277 
13278     // Move blocks with immovable allocations according to algorithm
13279     if (immovableBlocks.size() > 0)
13280     {
13281         do
13282         {
13283             if(m_Algorithm == VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT)
13284             {
13285                 if (m_AlgorithmState != VMA_NULL)
13286                 {
13287                     bool swapped = false;
13288                     // Move to the start of free blocks range
13289                     for (const FragmentedBlock& block : immovableBlocks)
13290                     {
13291                         StateExtensive& state = reinterpret_cast<StateExtensive*>(m_AlgorithmState)[block.data];
13292                         if (state.operation != StateExtensive::Operation::Cleanup)
13293                         {
13294                             VmaBlockVector* vector = m_pBlockVectors[block.data];
13295                             VmaMutexLockWrite lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
13296 
13297                             for (size_t i = 0, count = vector->GetBlockCount() - m_ImmovableBlockCount; i < count; ++i)
13298                             {
13299                                 if (vector->GetBlock(i) == block.block)
13300                                 {
13301                                     VMA_SWAP(vector->m_Blocks[i], vector->m_Blocks[vector->GetBlockCount() - ++m_ImmovableBlockCount]);
13302                                     if (state.firstFreeBlock != SIZE_MAX)
13303                                     {
13304                                         if (i + 1 < state.firstFreeBlock)
13305                                         {
13306                                             if (state.firstFreeBlock > 1)
13307                                                 VMA_SWAP(vector->m_Blocks[i], vector->m_Blocks[--state.firstFreeBlock]);
13308                                             else
13309                                                 --state.firstFreeBlock;
13310                                         }
13311                                     }
13312                                     swapped = true;
13313                                     break;
13314                                 }
13315                             }
13316                         }
13317                     }
13318                     if (swapped)
13319                         result = VK_INCOMPLETE;
13320                     break;
13321                 }
13322             }
13323 
13324             // Move to the beginning
13325             for (const FragmentedBlock& block : immovableBlocks)
13326             {
13327                 VmaBlockVector* vector = m_pBlockVectors[block.data];
13328                 VmaMutexLockWrite lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
13329 
13330                 for (size_t i = m_ImmovableBlockCount; i < vector->GetBlockCount(); ++i)
13331                 {
13332                     if (vector->GetBlock(i) == block.block)
13333                     {
13334                         VMA_SWAP(vector->m_Blocks[i], vector->m_Blocks[m_ImmovableBlockCount++]);
13335                         break;
13336                     }
13337                 }
13338             }
13339         } while (false);
13340     }
13341 
13342     // Bulk-map destination blocks
13343     for (const FragmentedBlock& block : mappedBlocks)
13344     {
13345         VkResult res = block.block->Map(allocator, block.data, VMA_NULL);
13346         VMA_ASSERT(res == VK_SUCCESS);
13347     }
13348     return result;
13349 }
13350 
ComputeDefragmentation(VmaBlockVector & vector,size_t index)13351 bool VmaDefragmentationContext_T::ComputeDefragmentation(VmaBlockVector& vector, size_t index)
13352 {
13353     switch (m_Algorithm)
13354     {
13355     case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT:
13356         return ComputeDefragmentation_Fast(vector);
13357     case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT:
13358         return ComputeDefragmentation_Balanced(vector, index, true);
13359     case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT:
13360         return ComputeDefragmentation_Full(vector);
13361     case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT:
13362         return ComputeDefragmentation_Extensive(vector, index);
13363     default:
13364         VMA_ASSERT(0);
13365         return ComputeDefragmentation_Balanced(vector, index, true);
13366     }
13367 }
13368 
GetMoveData(VmaAllocHandle handle,VmaBlockMetadata * metadata)13369 VmaDefragmentationContext_T::MoveAllocationData VmaDefragmentationContext_T::GetMoveData(
13370     VmaAllocHandle handle, VmaBlockMetadata* metadata)
13371 {
13372     MoveAllocationData moveData;
13373     moveData.move.srcAllocation = (VmaAllocation)metadata->GetAllocationUserData(handle);
13374     moveData.size = moveData.move.srcAllocation->GetSize();
13375     moveData.alignment = moveData.move.srcAllocation->GetAlignment();
13376     moveData.type = moveData.move.srcAllocation->GetSuballocationType();
13377     moveData.flags = 0;
13378 
13379     if (moveData.move.srcAllocation->IsPersistentMap())
13380         moveData.flags |= VMA_ALLOCATION_CREATE_MAPPED_BIT;
13381     if (moveData.move.srcAllocation->IsMappingAllowed())
13382         moveData.flags |= VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT;
13383 
13384     return moveData;
13385 }
13386 
CheckCounters(VkDeviceSize bytes)13387 VmaDefragmentationContext_T::CounterStatus VmaDefragmentationContext_T::CheckCounters(VkDeviceSize bytes)
13388 {
13389     // Check custom criteria if exists
13390     if (m_BreakCallback && m_BreakCallback(m_BreakCallbackUserData))
13391         return CounterStatus::End;
13392 
13393     // Ignore allocation if will exceed max size for copy
13394     if (m_PassStats.bytesMoved + bytes > m_MaxPassBytes)
13395     {
13396         if (++m_IgnoredAllocs < MAX_ALLOCS_TO_IGNORE)
13397             return CounterStatus::Ignore;
13398         else
13399             return CounterStatus::End;
13400     }
13401     else
13402         m_IgnoredAllocs = 0;
13403     return CounterStatus::Pass;
13404 }
13405 
IncrementCounters(VkDeviceSize bytes)13406 bool VmaDefragmentationContext_T::IncrementCounters(VkDeviceSize bytes)
13407 {
13408     m_PassStats.bytesMoved += bytes;
13409     // Early return when max found
13410     if (++m_PassStats.allocationsMoved >= m_MaxPassAllocations || m_PassStats.bytesMoved >= m_MaxPassBytes)
13411     {
13412         VMA_ASSERT((m_PassStats.allocationsMoved == m_MaxPassAllocations ||
13413             m_PassStats.bytesMoved == m_MaxPassBytes) && "Exceeded maximal pass threshold!");
13414         return true;
13415     }
13416     return false;
13417 }
13418 
ReallocWithinBlock(VmaBlockVector & vector,VmaDeviceMemoryBlock * block)13419 bool VmaDefragmentationContext_T::ReallocWithinBlock(VmaBlockVector& vector, VmaDeviceMemoryBlock* block)
13420 {
13421     VmaBlockMetadata* metadata = block->m_pMetadata;
13422 
13423     for (VmaAllocHandle handle = metadata->GetAllocationListBegin();
13424         handle != VK_NULL_HANDLE;
13425         handle = metadata->GetNextAllocation(handle))
13426     {
13427         MoveAllocationData moveData = GetMoveData(handle, metadata);
13428         // Ignore newly created allocations by defragmentation algorithm
13429         if (moveData.move.srcAllocation->GetUserData() == this)
13430             continue;
13431         switch (CheckCounters(moveData.move.srcAllocation->GetSize()))
13432         {
13433         case CounterStatus::Ignore:
13434             continue;
13435         case CounterStatus::End:
13436             return true;
13437         case CounterStatus::Pass:
13438             break;
13439         default:
13440             VMA_ASSERT(0);
13441         }
13442 
13443         VkDeviceSize offset = moveData.move.srcAllocation->GetOffset();
13444         if (offset != 0 && metadata->GetSumFreeSize() >= moveData.size)
13445         {
13446             VmaAllocationRequest request = {};
13447             if (metadata->CreateAllocationRequest(
13448                 moveData.size,
13449                 moveData.alignment,
13450                 false,
13451                 moveData.type,
13452                 VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT,
13453                 &request))
13454             {
13455                 if (metadata->GetAllocationOffset(request.allocHandle) < offset)
13456                 {
13457                     if (vector.CommitAllocationRequest(
13458                         request,
13459                         block,
13460                         moveData.alignment,
13461                         moveData.flags,
13462                         this,
13463                         moveData.type,
13464                         &moveData.move.dstTmpAllocation) == VK_SUCCESS)
13465                     {
13466                         m_Moves.push_back(moveData.move);
13467                         if (IncrementCounters(moveData.size))
13468                             return true;
13469                     }
13470                 }
13471             }
13472         }
13473     }
13474     return false;
13475 }
13476 
AllocInOtherBlock(size_t start,size_t end,MoveAllocationData & data,VmaBlockVector & vector)13477 bool VmaDefragmentationContext_T::AllocInOtherBlock(size_t start, size_t end, MoveAllocationData& data, VmaBlockVector& vector)
13478 {
13479     for (; start < end; ++start)
13480     {
13481         VmaDeviceMemoryBlock* dstBlock = vector.GetBlock(start);
13482         if (dstBlock->m_pMetadata->GetSumFreeSize() >= data.size)
13483         {
13484             if (vector.AllocateFromBlock(dstBlock,
13485                 data.size,
13486                 data.alignment,
13487                 data.flags,
13488                 this,
13489                 data.type,
13490                 0,
13491                 &data.move.dstTmpAllocation) == VK_SUCCESS)
13492             {
13493                 m_Moves.push_back(data.move);
13494                 if (IncrementCounters(data.size))
13495                     return true;
13496                 break;
13497             }
13498         }
13499     }
13500     return false;
13501 }
13502 
ComputeDefragmentation_Fast(VmaBlockVector & vector)13503 bool VmaDefragmentationContext_T::ComputeDefragmentation_Fast(VmaBlockVector& vector)
13504 {
13505     // Move only between blocks
13506 
13507     // Go through allocations in last blocks and try to fit them inside first ones
13508     for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i)
13509     {
13510         VmaBlockMetadata* metadata = vector.GetBlock(i)->m_pMetadata;
13511 
13512         for (VmaAllocHandle handle = metadata->GetAllocationListBegin();
13513             handle != VK_NULL_HANDLE;
13514             handle = metadata->GetNextAllocation(handle))
13515         {
13516             MoveAllocationData moveData = GetMoveData(handle, metadata);
13517             // Ignore newly created allocations by defragmentation algorithm
13518             if (moveData.move.srcAllocation->GetUserData() == this)
13519                 continue;
13520             switch (CheckCounters(moveData.move.srcAllocation->GetSize()))
13521             {
13522             case CounterStatus::Ignore:
13523                 continue;
13524             case CounterStatus::End:
13525                 return true;
13526             case CounterStatus::Pass:
13527                 break;
13528             default:
13529                 VMA_ASSERT(0);
13530             }
13531 
13532             // Check all previous blocks for free space
13533             if (AllocInOtherBlock(0, i, moveData, vector))
13534                 return true;
13535         }
13536     }
13537     return false;
13538 }
13539 
ComputeDefragmentation_Balanced(VmaBlockVector & vector,size_t index,bool update)13540 bool VmaDefragmentationContext_T::ComputeDefragmentation_Balanced(VmaBlockVector& vector, size_t index, bool update)
13541 {
13542     // Go over every allocation and try to fit it in previous blocks at lowest offsets,
13543     // if not possible: realloc within single block to minimize offset (exclude offset == 0),
13544     // but only if there are noticeable gaps between them (some heuristic, ex. average size of allocation in block)
13545     VMA_ASSERT(m_AlgorithmState != VMA_NULL);
13546 
13547     StateBalanced& vectorState = reinterpret_cast<StateBalanced*>(m_AlgorithmState)[index];
13548     if (update && vectorState.avgAllocSize == UINT64_MAX)
13549         UpdateVectorStatistics(vector, vectorState);
13550 
13551     const size_t startMoveCount = m_Moves.size();
13552     VkDeviceSize minimalFreeRegion = vectorState.avgFreeSize / 2;
13553     for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i)
13554     {
13555         VmaDeviceMemoryBlock* block = vector.GetBlock(i);
13556         VmaBlockMetadata* metadata = block->m_pMetadata;
13557         VkDeviceSize prevFreeRegionSize = 0;
13558 
13559         for (VmaAllocHandle handle = metadata->GetAllocationListBegin();
13560             handle != VK_NULL_HANDLE;
13561             handle = metadata->GetNextAllocation(handle))
13562         {
13563             MoveAllocationData moveData = GetMoveData(handle, metadata);
13564             // Ignore newly created allocations by defragmentation algorithm
13565             if (moveData.move.srcAllocation->GetUserData() == this)
13566                 continue;
13567             switch (CheckCounters(moveData.move.srcAllocation->GetSize()))
13568             {
13569             case CounterStatus::Ignore:
13570                 continue;
13571             case CounterStatus::End:
13572                 return true;
13573             case CounterStatus::Pass:
13574                 break;
13575             default:
13576                 VMA_ASSERT(0);
13577             }
13578 
13579             // Check all previous blocks for free space
13580             const size_t prevMoveCount = m_Moves.size();
13581             if (AllocInOtherBlock(0, i, moveData, vector))
13582                 return true;
13583 
13584             VkDeviceSize nextFreeRegionSize = metadata->GetNextFreeRegionSize(handle);
13585             // If no room found then realloc within block for lower offset
13586             VkDeviceSize offset = moveData.move.srcAllocation->GetOffset();
13587             if (prevMoveCount == m_Moves.size() && offset != 0 && metadata->GetSumFreeSize() >= moveData.size)
13588             {
13589                 // Check if realloc will make sense
13590                 if (prevFreeRegionSize >= minimalFreeRegion ||
13591                     nextFreeRegionSize >= minimalFreeRegion ||
13592                     moveData.size <= vectorState.avgFreeSize ||
13593                     moveData.size <= vectorState.avgAllocSize)
13594                 {
13595                     VmaAllocationRequest request = {};
13596                     if (metadata->CreateAllocationRequest(
13597                         moveData.size,
13598                         moveData.alignment,
13599                         false,
13600                         moveData.type,
13601                         VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT,
13602                         &request))
13603                     {
13604                         if (metadata->GetAllocationOffset(request.allocHandle) < offset)
13605                         {
13606                             if (vector.CommitAllocationRequest(
13607                                 request,
13608                                 block,
13609                                 moveData.alignment,
13610                                 moveData.flags,
13611                                 this,
13612                                 moveData.type,
13613                                 &moveData.move.dstTmpAllocation) == VK_SUCCESS)
13614                             {
13615                                 m_Moves.push_back(moveData.move);
13616                                 if (IncrementCounters(moveData.size))
13617                                     return true;
13618                             }
13619                         }
13620                     }
13621                 }
13622             }
13623             prevFreeRegionSize = nextFreeRegionSize;
13624         }
13625     }
13626 
13627     // No moves performed, update statistics to current vector state
13628     if (startMoveCount == m_Moves.size() && !update)
13629     {
13630         vectorState.avgAllocSize = UINT64_MAX;
13631         return ComputeDefragmentation_Balanced(vector, index, false);
13632     }
13633     return false;
13634 }
13635 
ComputeDefragmentation_Full(VmaBlockVector & vector)13636 bool VmaDefragmentationContext_T::ComputeDefragmentation_Full(VmaBlockVector& vector)
13637 {
13638     // Go over every allocation and try to fit it in previous blocks at lowest offsets,
13639     // if not possible: realloc within single block to minimize offset (exclude offset == 0)
13640 
13641     for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i)
13642     {
13643         VmaDeviceMemoryBlock* block = vector.GetBlock(i);
13644         VmaBlockMetadata* metadata = block->m_pMetadata;
13645 
13646         for (VmaAllocHandle handle = metadata->GetAllocationListBegin();
13647             handle != VK_NULL_HANDLE;
13648             handle = metadata->GetNextAllocation(handle))
13649         {
13650             MoveAllocationData moveData = GetMoveData(handle, metadata);
13651             // Ignore newly created allocations by defragmentation algorithm
13652             if (moveData.move.srcAllocation->GetUserData() == this)
13653                 continue;
13654             switch (CheckCounters(moveData.move.srcAllocation->GetSize()))
13655             {
13656             case CounterStatus::Ignore:
13657                 continue;
13658             case CounterStatus::End:
13659                 return true;
13660             case CounterStatus::Pass:
13661                 break;
13662             default:
13663                 VMA_ASSERT(0);
13664             }
13665 
13666             // Check all previous blocks for free space
13667             const size_t prevMoveCount = m_Moves.size();
13668             if (AllocInOtherBlock(0, i, moveData, vector))
13669                 return true;
13670 
13671             // If no room found then realloc within block for lower offset
13672             VkDeviceSize offset = moveData.move.srcAllocation->GetOffset();
13673             if (prevMoveCount == m_Moves.size() && offset != 0 && metadata->GetSumFreeSize() >= moveData.size)
13674             {
13675                 VmaAllocationRequest request = {};
13676                 if (metadata->CreateAllocationRequest(
13677                     moveData.size,
13678                     moveData.alignment,
13679                     false,
13680                     moveData.type,
13681                     VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT,
13682                     &request))
13683                 {
13684                     if (metadata->GetAllocationOffset(request.allocHandle) < offset)
13685                     {
13686                         if (vector.CommitAllocationRequest(
13687                             request,
13688                             block,
13689                             moveData.alignment,
13690                             moveData.flags,
13691                             this,
13692                             moveData.type,
13693                             &moveData.move.dstTmpAllocation) == VK_SUCCESS)
13694                         {
13695                             m_Moves.push_back(moveData.move);
13696                             if (IncrementCounters(moveData.size))
13697                                 return true;
13698                         }
13699                     }
13700                 }
13701             }
13702         }
13703     }
13704     return false;
13705 }
13706 
ComputeDefragmentation_Extensive(VmaBlockVector & vector,size_t index)13707 bool VmaDefragmentationContext_T::ComputeDefragmentation_Extensive(VmaBlockVector& vector, size_t index)
13708 {
13709     // First free single block, then populate it to the brim, then free another block, and so on
13710 
13711     // Fallback to previous algorithm since without granularity conflicts it can achieve max packing
13712     if (vector.m_BufferImageGranularity == 1)
13713         return ComputeDefragmentation_Full(vector);
13714 
13715     VMA_ASSERT(m_AlgorithmState != VMA_NULL);
13716 
13717     StateExtensive& vectorState = reinterpret_cast<StateExtensive*>(m_AlgorithmState)[index];
13718 
13719     bool texturePresent = false, bufferPresent = false, otherPresent = false;
13720     switch (vectorState.operation)
13721     {
13722     case StateExtensive::Operation::Done: // Vector defragmented
13723         return false;
13724     case StateExtensive::Operation::FindFreeBlockBuffer:
13725     case StateExtensive::Operation::FindFreeBlockTexture:
13726     case StateExtensive::Operation::FindFreeBlockAll:
13727     {
13728         // No more blocks to free, just perform fast realloc and move to cleanup
13729         if (vectorState.firstFreeBlock == 0)
13730         {
13731             vectorState.operation = StateExtensive::Operation::Cleanup;
13732             return ComputeDefragmentation_Fast(vector);
13733         }
13734 
13735         // No free blocks, have to clear last one
13736         size_t last = (vectorState.firstFreeBlock == SIZE_MAX ? vector.GetBlockCount() : vectorState.firstFreeBlock) - 1;
13737         VmaBlockMetadata* freeMetadata = vector.GetBlock(last)->m_pMetadata;
13738 
13739         const size_t prevMoveCount = m_Moves.size();
13740         for (VmaAllocHandle handle = freeMetadata->GetAllocationListBegin();
13741             handle != VK_NULL_HANDLE;
13742             handle = freeMetadata->GetNextAllocation(handle))
13743         {
13744             MoveAllocationData moveData = GetMoveData(handle, freeMetadata);
13745             switch (CheckCounters(moveData.move.srcAllocation->GetSize()))
13746             {
13747             case CounterStatus::Ignore:
13748                 continue;
13749             case CounterStatus::End:
13750                 return true;
13751             case CounterStatus::Pass:
13752                 break;
13753             default:
13754                 VMA_ASSERT(0);
13755             }
13756 
13757             // Check all previous blocks for free space
13758             if (AllocInOtherBlock(0, last, moveData, vector))
13759             {
13760                 // Full clear performed already
13761                 if (prevMoveCount != m_Moves.size() && freeMetadata->GetNextAllocation(handle) == VK_NULL_HANDLE)
13762                     vectorState.firstFreeBlock = last;
13763                 return true;
13764             }
13765         }
13766 
13767         if (prevMoveCount == m_Moves.size())
13768         {
13769             // Cannot perform full clear, have to move data in other blocks around
13770             if (last != 0)
13771             {
13772                 for (size_t i = last - 1; i; --i)
13773                 {
13774                     if (ReallocWithinBlock(vector, vector.GetBlock(i)))
13775                         return true;
13776                 }
13777             }
13778 
13779             if (prevMoveCount == m_Moves.size())
13780             {
13781                 // No possible reallocs within blocks, try to move them around fast
13782                 return ComputeDefragmentation_Fast(vector);
13783             }
13784         }
13785         else
13786         {
13787             switch (vectorState.operation)
13788             {
13789             case StateExtensive::Operation::FindFreeBlockBuffer:
13790                 vectorState.operation = StateExtensive::Operation::MoveBuffers;
13791                 break;
13792             case StateExtensive::Operation::FindFreeBlockTexture:
13793                 vectorState.operation = StateExtensive::Operation::MoveTextures;
13794                 break;
13795             case StateExtensive::Operation::FindFreeBlockAll:
13796                 vectorState.operation = StateExtensive::Operation::MoveAll;
13797                 break;
13798             default:
13799                 VMA_ASSERT(0);
13800                 vectorState.operation = StateExtensive::Operation::MoveTextures;
13801             }
13802             vectorState.firstFreeBlock = last;
13803             // Nothing done, block found without reallocations, can perform another reallocs in same pass
13804             return ComputeDefragmentation_Extensive(vector, index);
13805         }
13806         break;
13807     }
13808     case StateExtensive::Operation::MoveTextures:
13809     {
13810         if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL, vector,
13811             vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent))
13812         {
13813             if (texturePresent)
13814             {
13815                 vectorState.operation = StateExtensive::Operation::FindFreeBlockTexture;
13816                 return ComputeDefragmentation_Extensive(vector, index);
13817             }
13818 
13819             if (!bufferPresent && !otherPresent)
13820             {
13821                 vectorState.operation = StateExtensive::Operation::Cleanup;
13822                 break;
13823             }
13824 
13825             // No more textures to move, check buffers
13826             vectorState.operation = StateExtensive::Operation::MoveBuffers;
13827             bufferPresent = false;
13828             otherPresent = false;
13829         }
13830         else
13831             break;
13832         VMA_FALLTHROUGH; // Fallthrough
13833     }
13834     case StateExtensive::Operation::MoveBuffers:
13835     {
13836         if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_BUFFER, vector,
13837             vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent))
13838         {
13839             if (bufferPresent)
13840             {
13841                 vectorState.operation = StateExtensive::Operation::FindFreeBlockBuffer;
13842                 return ComputeDefragmentation_Extensive(vector, index);
13843             }
13844 
13845             if (!otherPresent)
13846             {
13847                 vectorState.operation = StateExtensive::Operation::Cleanup;
13848                 break;
13849             }
13850 
13851             // No more buffers to move, check all others
13852             vectorState.operation = StateExtensive::Operation::MoveAll;
13853             otherPresent = false;
13854         }
13855         else
13856             break;
13857         VMA_FALLTHROUGH; // Fallthrough
13858     }
13859     case StateExtensive::Operation::MoveAll:
13860     {
13861         if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_FREE, vector,
13862             vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent))
13863         {
13864             if (otherPresent)
13865             {
13866                 vectorState.operation = StateExtensive::Operation::FindFreeBlockBuffer;
13867                 return ComputeDefragmentation_Extensive(vector, index);
13868             }
13869             // Everything moved
13870             vectorState.operation = StateExtensive::Operation::Cleanup;
13871         }
13872         break;
13873     }
13874     case StateExtensive::Operation::Cleanup:
13875         // Cleanup is handled below so that other operations may reuse the cleanup code. This case is here to prevent the unhandled enum value warning (C4062).
13876         break;
13877     }
13878 
13879     if (vectorState.operation == StateExtensive::Operation::Cleanup)
13880     {
13881         // All other work done, pack data in blocks even tighter if possible
13882         const size_t prevMoveCount = m_Moves.size();
13883         for (size_t i = 0; i < vector.GetBlockCount(); ++i)
13884         {
13885             if (ReallocWithinBlock(vector, vector.GetBlock(i)))
13886                 return true;
13887         }
13888 
13889         if (prevMoveCount == m_Moves.size())
13890             vectorState.operation = StateExtensive::Operation::Done;
13891     }
13892     return false;
13893 }
13894 
UpdateVectorStatistics(VmaBlockVector & vector,StateBalanced & state)13895 void VmaDefragmentationContext_T::UpdateVectorStatistics(VmaBlockVector& vector, StateBalanced& state)
13896 {
13897     size_t allocCount = 0;
13898     size_t freeCount = 0;
13899     state.avgFreeSize = 0;
13900     state.avgAllocSize = 0;
13901 
13902     for (size_t i = 0; i < vector.GetBlockCount(); ++i)
13903     {
13904         VmaBlockMetadata* metadata = vector.GetBlock(i)->m_pMetadata;
13905 
13906         allocCount += metadata->GetAllocationCount();
13907         freeCount += metadata->GetFreeRegionsCount();
13908         state.avgFreeSize += metadata->GetSumFreeSize();
13909         state.avgAllocSize += metadata->GetSize();
13910     }
13911 
13912     state.avgAllocSize = (state.avgAllocSize - state.avgFreeSize) / allocCount;
13913     state.avgFreeSize /= freeCount;
13914 }
13915 
MoveDataToFreeBlocks(VmaSuballocationType currentType,VmaBlockVector & vector,size_t firstFreeBlock,bool & texturePresent,bool & bufferPresent,bool & otherPresent)13916 bool VmaDefragmentationContext_T::MoveDataToFreeBlocks(VmaSuballocationType currentType,
13917     VmaBlockVector& vector, size_t firstFreeBlock,
13918     bool& texturePresent, bool& bufferPresent, bool& otherPresent)
13919 {
13920     const size_t prevMoveCount = m_Moves.size();
13921     for (size_t i = firstFreeBlock ; i;)
13922     {
13923         VmaDeviceMemoryBlock* block = vector.GetBlock(--i);
13924         VmaBlockMetadata* metadata = block->m_pMetadata;
13925 
13926         for (VmaAllocHandle handle = metadata->GetAllocationListBegin();
13927             handle != VK_NULL_HANDLE;
13928             handle = metadata->GetNextAllocation(handle))
13929         {
13930             MoveAllocationData moveData = GetMoveData(handle, metadata);
13931             // Ignore newly created allocations by defragmentation algorithm
13932             if (moveData.move.srcAllocation->GetUserData() == this)
13933                 continue;
13934             switch (CheckCounters(moveData.move.srcAllocation->GetSize()))
13935             {
13936             case CounterStatus::Ignore:
13937                 continue;
13938             case CounterStatus::End:
13939                 return true;
13940             case CounterStatus::Pass:
13941                 break;
13942             default:
13943                 VMA_ASSERT(0);
13944             }
13945 
13946             // Move only single type of resources at once
13947             if (!VmaIsBufferImageGranularityConflict(moveData.type, currentType))
13948             {
13949                 // Try to fit allocation into free blocks
13950                 if (AllocInOtherBlock(firstFreeBlock, vector.GetBlockCount(), moveData, vector))
13951                     return false;
13952             }
13953 
13954             if (!VmaIsBufferImageGranularityConflict(moveData.type, VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL))
13955                 texturePresent = true;
13956             else if (!VmaIsBufferImageGranularityConflict(moveData.type, VMA_SUBALLOCATION_TYPE_BUFFER))
13957                 bufferPresent = true;
13958             else
13959                 otherPresent = true;
13960         }
13961     }
13962     return prevMoveCount == m_Moves.size();
13963 }
13964 #endif // _VMA_DEFRAGMENTATION_CONTEXT_FUNCTIONS
13965 
13966 #ifndef _VMA_POOL_T_FUNCTIONS
VmaPool_T(VmaAllocator hAllocator,const VmaPoolCreateInfo & createInfo,VkDeviceSize preferredBlockSize)13967 VmaPool_T::VmaPool_T(
13968     VmaAllocator hAllocator,
13969     const VmaPoolCreateInfo& createInfo,
13970     VkDeviceSize preferredBlockSize)
13971     : m_BlockVector(
13972         hAllocator,
13973         this, // hParentPool
13974         createInfo.memoryTypeIndex,
13975         createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
13976         createInfo.minBlockCount,
13977         createInfo.maxBlockCount,
13978         (createInfo.flags& VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
13979         createInfo.blockSize != 0, // explicitBlockSize
13980         createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK, // algorithm
13981         createInfo.priority,
13982         VMA_MAX(hAllocator->GetMemoryTypeMinAlignment(createInfo.memoryTypeIndex), createInfo.minAllocationAlignment),
13983         createInfo.pMemoryAllocateNext),
13984     m_Id(0),
13985     m_Name(VMA_NULL) {}
13986 
~VmaPool_T()13987 VmaPool_T::~VmaPool_T()
13988 {
13989     VMA_ASSERT(m_PrevPool == VMA_NULL && m_NextPool == VMA_NULL);
13990 }
13991 
SetName(const char * pName)13992 void VmaPool_T::SetName(const char* pName)
13993 {
13994     const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks();
13995     VmaFreeString(allocs, m_Name);
13996 
13997     if (pName != VMA_NULL)
13998     {
13999         m_Name = VmaCreateStringCopy(allocs, pName);
14000     }
14001     else
14002     {
14003         m_Name = VMA_NULL;
14004     }
14005 }
14006 #endif // _VMA_POOL_T_FUNCTIONS
14007 
14008 #ifndef _VMA_ALLOCATOR_T_FUNCTIONS
VmaAllocator_T(const VmaAllocatorCreateInfo * pCreateInfo)14009 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
14010     m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
14011     m_VulkanApiVersion(pCreateInfo->vulkanApiVersion != 0 ? pCreateInfo->vulkanApiVersion : VK_API_VERSION_1_0),
14012     m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
14013     m_UseKhrBindMemory2((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0),
14014     m_UseExtMemoryBudget((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0),
14015     m_UseAmdDeviceCoherentMemory((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT) != 0),
14016     m_UseKhrBufferDeviceAddress((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT) != 0),
14017     m_UseExtMemoryPriority((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT) != 0),
14018     m_hDevice(pCreateInfo->device),
14019     m_hInstance(pCreateInfo->instance),
14020     m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
14021     m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
14022         *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
14023     m_AllocationObjectAllocator(&m_AllocationCallbacks),
14024     m_HeapSizeLimitMask(0),
14025     m_DeviceMemoryCount(0),
14026     m_PreferredLargeHeapBlockSize(0),
14027     m_PhysicalDevice(pCreateInfo->physicalDevice),
14028     m_GpuDefragmentationMemoryTypeBits(UINT32_MAX),
14029     m_NextPoolId(0),
14030     m_GlobalMemoryTypeBits(UINT32_MAX)
14031 {
14032     if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
14033     {
14034         m_UseKhrDedicatedAllocation = false;
14035         m_UseKhrBindMemory2 = false;
14036     }
14037 
14038     if(VMA_DEBUG_DETECT_CORRUPTION)
14039     {
14040         // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
14041         VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
14042     }
14043 
14044     VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device && pCreateInfo->instance);
14045 
14046     if(m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0))
14047     {
14048 #if !(VMA_DEDICATED_ALLOCATION)
14049         if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0)
14050         {
14051             VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
14052         }
14053 #endif
14054 #if !(VMA_BIND_MEMORY2)
14055         if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0)
14056         {
14057             VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT set but required extension is disabled by preprocessor macros.");
14058         }
14059 #endif
14060     }
14061 #if !(VMA_MEMORY_BUDGET)
14062     if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0)
14063     {
14064         VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT set but required extension is disabled by preprocessor macros.");
14065     }
14066 #endif
14067 #if !(VMA_BUFFER_DEVICE_ADDRESS)
14068     if(m_UseKhrBufferDeviceAddress)
14069     {
14070         VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT is set but required extension or Vulkan 1.2 is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro.");
14071     }
14072 #endif
14073 #if VMA_VULKAN_VERSION < 1003000
14074     if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0))
14075     {
14076         VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_3 but required Vulkan version is disabled by preprocessor macros.");
14077     }
14078 #endif
14079 #if VMA_VULKAN_VERSION < 1002000
14080     if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 2, 0))
14081     {
14082         VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_2 but required Vulkan version is disabled by preprocessor macros.");
14083     }
14084 #endif
14085 #if VMA_VULKAN_VERSION < 1001000
14086     if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
14087     {
14088         VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_1 but required Vulkan version is disabled by preprocessor macros.");
14089     }
14090 #endif
14091 #if !(VMA_MEMORY_PRIORITY)
14092     if(m_UseExtMemoryPriority)
14093     {
14094         VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT is set but required extension is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro.");
14095     }
14096 #endif
14097 
14098     memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
14099     memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
14100     memset(&m_MemProps, 0, sizeof(m_MemProps));
14101 
14102     memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
14103     memset(&m_VulkanFunctions, 0, sizeof(m_VulkanFunctions));
14104 
14105 #if VMA_EXTERNAL_MEMORY
14106     memset(&m_TypeExternalMemoryHandleTypes, 0, sizeof(m_TypeExternalMemoryHandleTypes));
14107 #endif // #if VMA_EXTERNAL_MEMORY
14108 
14109     if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
14110     {
14111         m_DeviceMemoryCallbacks.pUserData = pCreateInfo->pDeviceMemoryCallbacks->pUserData;
14112         m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
14113         m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
14114     }
14115 
14116     ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
14117 
14118     (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
14119     (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
14120 
14121     VMA_ASSERT(VmaIsPow2(VMA_MIN_ALIGNMENT));
14122     VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
14123     VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
14124     VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
14125 
14126     m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
14127         pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
14128 
14129     m_GlobalMemoryTypeBits = CalculateGlobalMemoryTypeBits();
14130 
14131 #if VMA_EXTERNAL_MEMORY
14132     if(pCreateInfo->pTypeExternalMemoryHandleTypes != VMA_NULL)
14133     {
14134         memcpy(m_TypeExternalMemoryHandleTypes, pCreateInfo->pTypeExternalMemoryHandleTypes,
14135             sizeof(VkExternalMemoryHandleTypeFlagsKHR) * GetMemoryTypeCount());
14136     }
14137 #endif // #if VMA_EXTERNAL_MEMORY
14138 
14139     if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
14140     {
14141         for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
14142         {
14143             const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
14144             if(limit != VK_WHOLE_SIZE)
14145             {
14146                 m_HeapSizeLimitMask |= 1u << heapIndex;
14147                 if(limit < m_MemProps.memoryHeaps[heapIndex].size)
14148                 {
14149                     m_MemProps.memoryHeaps[heapIndex].size = limit;
14150                 }
14151             }
14152         }
14153     }
14154 
14155     for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14156     {
14157         // Create only supported types
14158         if((m_GlobalMemoryTypeBits & (1u << memTypeIndex)) != 0)
14159         {
14160             const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
14161             m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
14162                 this,
14163                 VK_NULL_HANDLE, // hParentPool
14164                 memTypeIndex,
14165                 preferredBlockSize,
14166                 0,
14167                 SIZE_MAX,
14168                 GetBufferImageGranularity(),
14169                 false, // explicitBlockSize
14170                 0, // algorithm
14171                 0.5f, // priority (0.5 is the default per Vulkan spec)
14172                 GetMemoryTypeMinAlignment(memTypeIndex), // minAllocationAlignment
14173                 VMA_NULL); // // pMemoryAllocateNext
14174             // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
14175             // becase minBlockCount is 0.
14176         }
14177     }
14178 }
14179 
Init(const VmaAllocatorCreateInfo * pCreateInfo)14180 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
14181 {
14182     VkResult res = VK_SUCCESS;
14183 
14184 #if VMA_MEMORY_BUDGET
14185     if(m_UseExtMemoryBudget)
14186     {
14187         UpdateVulkanBudget();
14188     }
14189 #endif // #if VMA_MEMORY_BUDGET
14190 
14191     return res;
14192 }
14193 
~VmaAllocator_T()14194 VmaAllocator_T::~VmaAllocator_T()
14195 {
14196     VMA_ASSERT(m_Pools.IsEmpty());
14197 
14198     for(size_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
14199     {
14200         vma_delete(this, m_pBlockVectors[memTypeIndex]);
14201     }
14202 }
14203 
ImportVulkanFunctions(const VmaVulkanFunctions * pVulkanFunctions)14204 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
14205 {
14206 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14207     ImportVulkanFunctions_Static();
14208 #endif
14209 
14210     if(pVulkanFunctions != VMA_NULL)
14211     {
14212         ImportVulkanFunctions_Custom(pVulkanFunctions);
14213     }
14214 
14215 #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
14216     ImportVulkanFunctions_Dynamic();
14217 #endif
14218 
14219     ValidateVulkanFunctions();
14220 }
14221 
14222 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14223 
ImportVulkanFunctions_Static()14224 void VmaAllocator_T::ImportVulkanFunctions_Static()
14225 {
14226     // Vulkan 1.0
14227     m_VulkanFunctions.vkGetInstanceProcAddr = (PFN_vkGetInstanceProcAddr)vkGetInstanceProcAddr;
14228     m_VulkanFunctions.vkGetDeviceProcAddr = (PFN_vkGetDeviceProcAddr)vkGetDeviceProcAddr;
14229     m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
14230     m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
14231     m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
14232     m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
14233     m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
14234     m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
14235     m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
14236     m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
14237     m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
14238     m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
14239     m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
14240     m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
14241     m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
14242     m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
14243     m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
14244     m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
14245     m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
14246 
14247     // Vulkan 1.1
14248 #if VMA_VULKAN_VERSION >= 1001000
14249     if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
14250     {
14251         m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR = (PFN_vkGetBufferMemoryRequirements2)vkGetBufferMemoryRequirements2;
14252         m_VulkanFunctions.vkGetImageMemoryRequirements2KHR = (PFN_vkGetImageMemoryRequirements2)vkGetImageMemoryRequirements2;
14253         m_VulkanFunctions.vkBindBufferMemory2KHR = (PFN_vkBindBufferMemory2)vkBindBufferMemory2;
14254         m_VulkanFunctions.vkBindImageMemory2KHR = (PFN_vkBindImageMemory2)vkBindImageMemory2;
14255     }
14256 #endif
14257 
14258 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
14259     if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
14260     {
14261         m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR = (PFN_vkGetPhysicalDeviceMemoryProperties2)vkGetPhysicalDeviceMemoryProperties2;
14262     }
14263 #endif
14264 
14265 #if VMA_VULKAN_VERSION >= 1003000
14266     if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0))
14267     {
14268         m_VulkanFunctions.vkGetDeviceBufferMemoryRequirements = (PFN_vkGetDeviceBufferMemoryRequirements)vkGetDeviceBufferMemoryRequirements;
14269         m_VulkanFunctions.vkGetDeviceImageMemoryRequirements = (PFN_vkGetDeviceImageMemoryRequirements)vkGetDeviceImageMemoryRequirements;
14270     }
14271 #endif
14272 }
14273 
14274 #endif // VMA_STATIC_VULKAN_FUNCTIONS == 1
14275 
ImportVulkanFunctions_Custom(const VmaVulkanFunctions * pVulkanFunctions)14276 void VmaAllocator_T::ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions)
14277 {
14278     VMA_ASSERT(pVulkanFunctions != VMA_NULL);
14279 
14280 #define VMA_COPY_IF_NOT_NULL(funcName) \
14281     if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
14282 
14283     VMA_COPY_IF_NOT_NULL(vkGetInstanceProcAddr);
14284     VMA_COPY_IF_NOT_NULL(vkGetDeviceProcAddr);
14285     VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
14286     VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
14287     VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
14288     VMA_COPY_IF_NOT_NULL(vkFreeMemory);
14289     VMA_COPY_IF_NOT_NULL(vkMapMemory);
14290     VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
14291     VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
14292     VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
14293     VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
14294     VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
14295     VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
14296     VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
14297     VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
14298     VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
14299     VMA_COPY_IF_NOT_NULL(vkCreateImage);
14300     VMA_COPY_IF_NOT_NULL(vkDestroyImage);
14301     VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
14302 
14303 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
14304     VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
14305     VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
14306 #endif
14307 
14308 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
14309     VMA_COPY_IF_NOT_NULL(vkBindBufferMemory2KHR);
14310     VMA_COPY_IF_NOT_NULL(vkBindImageMemory2KHR);
14311 #endif
14312 
14313 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
14314     VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties2KHR);
14315 #endif
14316 
14317 #if VMA_VULKAN_VERSION >= 1003000
14318     VMA_COPY_IF_NOT_NULL(vkGetDeviceBufferMemoryRequirements);
14319     VMA_COPY_IF_NOT_NULL(vkGetDeviceImageMemoryRequirements);
14320 #endif
14321 
14322 #undef VMA_COPY_IF_NOT_NULL
14323 }
14324 
14325 #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
14326 
ImportVulkanFunctions_Dynamic()14327 void VmaAllocator_T::ImportVulkanFunctions_Dynamic()
14328 {
14329     VMA_ASSERT(m_VulkanFunctions.vkGetInstanceProcAddr && m_VulkanFunctions.vkGetDeviceProcAddr &&
14330         "To use VMA_DYNAMIC_VULKAN_FUNCTIONS in new versions of VMA you now have to pass "
14331         "VmaVulkanFunctions::vkGetInstanceProcAddr and vkGetDeviceProcAddr as VmaAllocatorCreateInfo::pVulkanFunctions. "
14332         "Other members can be null.");
14333 
14334 #define VMA_FETCH_INSTANCE_FUNC(memberName, functionPointerType, functionNameString) \
14335     if(m_VulkanFunctions.memberName == VMA_NULL) \
14336         m_VulkanFunctions.memberName = \
14337             (functionPointerType)m_VulkanFunctions.vkGetInstanceProcAddr(m_hInstance, functionNameString);
14338 #define VMA_FETCH_DEVICE_FUNC(memberName, functionPointerType, functionNameString) \
14339     if(m_VulkanFunctions.memberName == VMA_NULL) \
14340         m_VulkanFunctions.memberName = \
14341             (functionPointerType)m_VulkanFunctions.vkGetDeviceProcAddr(m_hDevice, functionNameString);
14342 
14343     VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceProperties, PFN_vkGetPhysicalDeviceProperties, "vkGetPhysicalDeviceProperties");
14344     VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties, PFN_vkGetPhysicalDeviceMemoryProperties, "vkGetPhysicalDeviceMemoryProperties");
14345     VMA_FETCH_DEVICE_FUNC(vkAllocateMemory, PFN_vkAllocateMemory, "vkAllocateMemory");
14346     VMA_FETCH_DEVICE_FUNC(vkFreeMemory, PFN_vkFreeMemory, "vkFreeMemory");
14347     VMA_FETCH_DEVICE_FUNC(vkMapMemory, PFN_vkMapMemory, "vkMapMemory");
14348     VMA_FETCH_DEVICE_FUNC(vkUnmapMemory, PFN_vkUnmapMemory, "vkUnmapMemory");
14349     VMA_FETCH_DEVICE_FUNC(vkFlushMappedMemoryRanges, PFN_vkFlushMappedMemoryRanges, "vkFlushMappedMemoryRanges");
14350     VMA_FETCH_DEVICE_FUNC(vkInvalidateMappedMemoryRanges, PFN_vkInvalidateMappedMemoryRanges, "vkInvalidateMappedMemoryRanges");
14351     VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory, PFN_vkBindBufferMemory, "vkBindBufferMemory");
14352     VMA_FETCH_DEVICE_FUNC(vkBindImageMemory, PFN_vkBindImageMemory, "vkBindImageMemory");
14353     VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements, PFN_vkGetBufferMemoryRequirements, "vkGetBufferMemoryRequirements");
14354     VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements, PFN_vkGetImageMemoryRequirements, "vkGetImageMemoryRequirements");
14355     VMA_FETCH_DEVICE_FUNC(vkCreateBuffer, PFN_vkCreateBuffer, "vkCreateBuffer");
14356     VMA_FETCH_DEVICE_FUNC(vkDestroyBuffer, PFN_vkDestroyBuffer, "vkDestroyBuffer");
14357     VMA_FETCH_DEVICE_FUNC(vkCreateImage, PFN_vkCreateImage, "vkCreateImage");
14358     VMA_FETCH_DEVICE_FUNC(vkDestroyImage, PFN_vkDestroyImage, "vkDestroyImage");
14359     VMA_FETCH_DEVICE_FUNC(vkCmdCopyBuffer, PFN_vkCmdCopyBuffer, "vkCmdCopyBuffer");
14360 
14361 #if VMA_VULKAN_VERSION >= 1001000
14362     if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
14363     {
14364         VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2, "vkGetBufferMemoryRequirements2");
14365         VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2, "vkGetImageMemoryRequirements2");
14366         VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2, "vkBindBufferMemory2");
14367         VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2, "vkBindImageMemory2");
14368     }
14369 #endif
14370 
14371 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
14372     if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
14373     {
14374         VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2, "vkGetPhysicalDeviceMemoryProperties2");
14375     }
14376     else if(m_UseExtMemoryBudget)
14377     {
14378         VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2, "vkGetPhysicalDeviceMemoryProperties2KHR");
14379     }
14380 #endif
14381 
14382 #if VMA_DEDICATED_ALLOCATION
14383     if(m_UseKhrDedicatedAllocation)
14384     {
14385         VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2KHR, "vkGetBufferMemoryRequirements2KHR");
14386         VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2KHR, "vkGetImageMemoryRequirements2KHR");
14387     }
14388 #endif
14389 
14390 #if VMA_BIND_MEMORY2
14391     if(m_UseKhrBindMemory2)
14392     {
14393         VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2KHR, "vkBindBufferMemory2KHR");
14394         VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2KHR, "vkBindImageMemory2KHR");
14395     }
14396 #endif // #if VMA_BIND_MEMORY2
14397 
14398 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
14399     if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
14400     {
14401         VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2");
14402     }
14403     else if(m_UseExtMemoryBudget)
14404     {
14405         VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2KHR");
14406     }
14407 #endif // #if VMA_MEMORY_BUDGET
14408 
14409 #if VMA_VULKAN_VERSION >= 1003000
14410     if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0))
14411     {
14412         VMA_FETCH_DEVICE_FUNC(vkGetDeviceBufferMemoryRequirements, PFN_vkGetDeviceBufferMemoryRequirements, "vkGetDeviceBufferMemoryRequirements");
14413         VMA_FETCH_DEVICE_FUNC(vkGetDeviceImageMemoryRequirements, PFN_vkGetDeviceImageMemoryRequirements, "vkGetDeviceImageMemoryRequirements");
14414     }
14415 #endif
14416 
14417 #undef VMA_FETCH_DEVICE_FUNC
14418 #undef VMA_FETCH_INSTANCE_FUNC
14419 }
14420 
14421 #endif // VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
14422 
ValidateVulkanFunctions()14423 void VmaAllocator_T::ValidateVulkanFunctions()
14424 {
14425     VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
14426     VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
14427     VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
14428     VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
14429     VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
14430     VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
14431     VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
14432     VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
14433     VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
14434     VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
14435     VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
14436     VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
14437     VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
14438     VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
14439     VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
14440     VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
14441     VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
14442 
14443 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
14444     if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrDedicatedAllocation)
14445     {
14446         VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
14447         VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
14448     }
14449 #endif
14450 
14451 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
14452     if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrBindMemory2)
14453     {
14454         VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL);
14455         VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL);
14456     }
14457 #endif
14458 
14459 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
14460     if(m_UseExtMemoryBudget || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
14461     {
14462         VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR != VMA_NULL);
14463     }
14464 #endif
14465 
14466 #if VMA_VULKAN_VERSION >= 1003000
14467     if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0))
14468     {
14469         VMA_ASSERT(m_VulkanFunctions.vkGetDeviceBufferMemoryRequirements != VMA_NULL);
14470         VMA_ASSERT(m_VulkanFunctions.vkGetDeviceImageMemoryRequirements != VMA_NULL);
14471     }
14472 #endif
14473 }
14474 
CalcPreferredBlockSize(uint32_t memTypeIndex)14475 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
14476 {
14477     const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14478     const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
14479     const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
14480     return VmaAlignUp(isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize, (VkDeviceSize)32);
14481 }
14482 
AllocateMemoryOfType(VmaPool pool,VkDeviceSize size,VkDeviceSize alignment,bool dedicatedPreferred,VkBuffer dedicatedBuffer,VkImage dedicatedImage,VkFlags dedicatedBufferImageUsage,const VmaAllocationCreateInfo & createInfo,uint32_t memTypeIndex,VmaSuballocationType suballocType,VmaDedicatedAllocationList & dedicatedAllocations,VmaBlockVector & blockVector,size_t allocationCount,VmaAllocation * pAllocations)14483 VkResult VmaAllocator_T::AllocateMemoryOfType(
14484     VmaPool pool,
14485     VkDeviceSize size,
14486     VkDeviceSize alignment,
14487     bool dedicatedPreferred,
14488     VkBuffer dedicatedBuffer,
14489     VkImage dedicatedImage,
14490     VkFlags dedicatedBufferImageUsage,
14491     const VmaAllocationCreateInfo& createInfo,
14492     uint32_t memTypeIndex,
14493     VmaSuballocationType suballocType,
14494     VmaDedicatedAllocationList& dedicatedAllocations,
14495     VmaBlockVector& blockVector,
14496     size_t allocationCount,
14497     VmaAllocation* pAllocations)
14498 {
14499     VMA_ASSERT(pAllocations != VMA_NULL);
14500     VMA_DEBUG_LOG_FORMAT("  AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
14501 
14502     VmaAllocationCreateInfo finalCreateInfo = createInfo;
14503     VkResult res = CalcMemTypeParams(
14504         finalCreateInfo,
14505         memTypeIndex,
14506         size,
14507         allocationCount);
14508     if(res != VK_SUCCESS)
14509         return res;
14510 
14511     if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
14512     {
14513         return AllocateDedicatedMemory(
14514             pool,
14515             size,
14516             suballocType,
14517             dedicatedAllocations,
14518             memTypeIndex,
14519             (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14520             (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14521             (finalCreateInfo.flags &
14522                 (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0,
14523             (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0,
14524             finalCreateInfo.pUserData,
14525             finalCreateInfo.priority,
14526             dedicatedBuffer,
14527             dedicatedImage,
14528             dedicatedBufferImageUsage,
14529             allocationCount,
14530             pAllocations,
14531             blockVector.GetAllocationNextPtr());
14532     }
14533     else
14534     {
14535         const bool canAllocateDedicated =
14536             (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
14537             (pool == VK_NULL_HANDLE || !blockVector.HasExplicitBlockSize());
14538 
14539         if(canAllocateDedicated)
14540         {
14541             // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
14542             if(size > blockVector.GetPreferredBlockSize() / 2)
14543             {
14544                 dedicatedPreferred = true;
14545             }
14546             // Protection against creating each allocation as dedicated when we reach or exceed heap size/budget,
14547             // which can quickly deplete maxMemoryAllocationCount: Don't prefer dedicated allocations when above
14548             // 3/4 of the maximum allocation count.
14549             if(m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount < UINT32_MAX / 4 &&
14550                 m_DeviceMemoryCount.load() > m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount * 3 / 4)
14551             {
14552                 dedicatedPreferred = false;
14553             }
14554 
14555             if(dedicatedPreferred)
14556             {
14557                 res = AllocateDedicatedMemory(
14558                     pool,
14559                     size,
14560                     suballocType,
14561                     dedicatedAllocations,
14562                     memTypeIndex,
14563                     (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14564                     (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14565                     (finalCreateInfo.flags &
14566                         (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0,
14567                     (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0,
14568                     finalCreateInfo.pUserData,
14569                     finalCreateInfo.priority,
14570                     dedicatedBuffer,
14571                     dedicatedImage,
14572                     dedicatedBufferImageUsage,
14573                     allocationCount,
14574                     pAllocations,
14575                     blockVector.GetAllocationNextPtr());
14576                 if(res == VK_SUCCESS)
14577                 {
14578                     // Succeeded: AllocateDedicatedMemory function already filled pMemory, nothing more to do here.
14579                     VMA_DEBUG_LOG("    Allocated as DedicatedMemory");
14580                     return VK_SUCCESS;
14581                 }
14582             }
14583         }
14584 
14585         res = blockVector.Allocate(
14586             size,
14587             alignment,
14588             finalCreateInfo,
14589             suballocType,
14590             allocationCount,
14591             pAllocations);
14592         if(res == VK_SUCCESS)
14593             return VK_SUCCESS;
14594 
14595         // Try dedicated memory.
14596         if(canAllocateDedicated && !dedicatedPreferred)
14597         {
14598             res = AllocateDedicatedMemory(
14599                 pool,
14600                 size,
14601                 suballocType,
14602                 dedicatedAllocations,
14603                 memTypeIndex,
14604                 (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14605                 (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14606                 (finalCreateInfo.flags &
14607                     (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0,
14608                 (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0,
14609                 finalCreateInfo.pUserData,
14610                 finalCreateInfo.priority,
14611                 dedicatedBuffer,
14612                 dedicatedImage,
14613                 dedicatedBufferImageUsage,
14614                 allocationCount,
14615                 pAllocations,
14616                 blockVector.GetAllocationNextPtr());
14617             if(res == VK_SUCCESS)
14618             {
14619                 // Succeeded: AllocateDedicatedMemory function already filled pMemory, nothing more to do here.
14620                 VMA_DEBUG_LOG("    Allocated as DedicatedMemory");
14621                 return VK_SUCCESS;
14622             }
14623         }
14624         // Everything failed: Return error code.
14625         VMA_DEBUG_LOG("    vkAllocateMemory FAILED");
14626         return res;
14627     }
14628 }
14629 
AllocateDedicatedMemory(VmaPool pool,VkDeviceSize size,VmaSuballocationType suballocType,VmaDedicatedAllocationList & dedicatedAllocations,uint32_t memTypeIndex,bool map,bool isUserDataString,bool isMappingAllowed,bool canAliasMemory,void * pUserData,float priority,VkBuffer dedicatedBuffer,VkImage dedicatedImage,VkFlags dedicatedBufferImageUsage,size_t allocationCount,VmaAllocation * pAllocations,const void * pNextChain)14630 VkResult VmaAllocator_T::AllocateDedicatedMemory(
14631     VmaPool pool,
14632     VkDeviceSize size,
14633     VmaSuballocationType suballocType,
14634     VmaDedicatedAllocationList& dedicatedAllocations,
14635     uint32_t memTypeIndex,
14636     bool map,
14637     bool isUserDataString,
14638     bool isMappingAllowed,
14639     bool canAliasMemory,
14640     void* pUserData,
14641     float priority,
14642     VkBuffer dedicatedBuffer,
14643     VkImage dedicatedImage,
14644     VkFlags dedicatedBufferImageUsage,
14645     size_t allocationCount,
14646     VmaAllocation* pAllocations,
14647     const void* pNextChain)
14648 {
14649     VMA_ASSERT(allocationCount > 0 && pAllocations);
14650 
14651     VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
14652     allocInfo.memoryTypeIndex = memTypeIndex;
14653     allocInfo.allocationSize = size;
14654     allocInfo.pNext = pNextChain;
14655 
14656 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
14657     VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
14658     if(!canAliasMemory)
14659     {
14660         if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
14661         {
14662             if(dedicatedBuffer != VK_NULL_HANDLE)
14663             {
14664                 VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
14665                 dedicatedAllocInfo.buffer = dedicatedBuffer;
14666                 VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
14667             }
14668             else if(dedicatedImage != VK_NULL_HANDLE)
14669             {
14670                 dedicatedAllocInfo.image = dedicatedImage;
14671                 VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
14672             }
14673         }
14674     }
14675 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
14676 
14677 #if VMA_BUFFER_DEVICE_ADDRESS
14678     VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
14679     if(m_UseKhrBufferDeviceAddress)
14680     {
14681         bool canContainBufferWithDeviceAddress = true;
14682         if(dedicatedBuffer != VK_NULL_HANDLE)
14683         {
14684             canContainBufferWithDeviceAddress = dedicatedBufferImageUsage == UINT32_MAX || // Usage flags unknown
14685                 (dedicatedBufferImageUsage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT) != 0;
14686         }
14687         else if(dedicatedImage != VK_NULL_HANDLE)
14688         {
14689             canContainBufferWithDeviceAddress = false;
14690         }
14691         if(canContainBufferWithDeviceAddress)
14692         {
14693             allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
14694             VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
14695         }
14696     }
14697 #endif // #if VMA_BUFFER_DEVICE_ADDRESS
14698 
14699 #if VMA_MEMORY_PRIORITY
14700     VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT };
14701     if(m_UseExtMemoryPriority)
14702     {
14703         VMA_ASSERT(priority >= 0.f && priority <= 1.f);
14704         priorityInfo.priority = priority;
14705         VmaPnextChainPushFront(&allocInfo, &priorityInfo);
14706     }
14707 #endif // #if VMA_MEMORY_PRIORITY
14708 
14709 #if VMA_EXTERNAL_MEMORY
14710     // Attach VkExportMemoryAllocateInfoKHR if necessary.
14711     VkExportMemoryAllocateInfoKHR exportMemoryAllocInfo = { VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR };
14712     exportMemoryAllocInfo.handleTypes = GetExternalMemoryHandleTypeFlags(memTypeIndex);
14713     if(exportMemoryAllocInfo.handleTypes != 0)
14714     {
14715         VmaPnextChainPushFront(&allocInfo, &exportMemoryAllocInfo);
14716     }
14717 #endif // #if VMA_EXTERNAL_MEMORY
14718 
14719     size_t allocIndex;
14720     VkResult res = VK_SUCCESS;
14721     for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14722     {
14723         res = AllocateDedicatedMemoryPage(
14724             pool,
14725             size,
14726             suballocType,
14727             memTypeIndex,
14728             allocInfo,
14729             map,
14730             isUserDataString,
14731             isMappingAllowed,
14732             pUserData,
14733             pAllocations + allocIndex);
14734         if(res != VK_SUCCESS)
14735         {
14736             break;
14737         }
14738     }
14739 
14740     if(res == VK_SUCCESS)
14741     {
14742         for (allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14743         {
14744             dedicatedAllocations.Register(pAllocations[allocIndex]);
14745         }
14746         VMA_DEBUG_LOG_FORMAT("    Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
14747     }
14748     else
14749     {
14750         // Free all already created allocations.
14751         while(allocIndex--)
14752         {
14753             VmaAllocation currAlloc = pAllocations[allocIndex];
14754             VkDeviceMemory hMemory = currAlloc->GetMemory();
14755 
14756             /*
14757             There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
14758             before vkFreeMemory.
14759 
14760             if(currAlloc->GetMappedData() != VMA_NULL)
14761             {
14762                 (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
14763             }
14764             */
14765 
14766             FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
14767             m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), currAlloc->GetSize());
14768             m_AllocationObjectAllocator.Free(currAlloc);
14769         }
14770 
14771         memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
14772     }
14773 
14774     return res;
14775 }
14776 
AllocateDedicatedMemoryPage(VmaPool pool,VkDeviceSize size,VmaSuballocationType suballocType,uint32_t memTypeIndex,const VkMemoryAllocateInfo & allocInfo,bool map,bool isUserDataString,bool isMappingAllowed,void * pUserData,VmaAllocation * pAllocation)14777 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
14778     VmaPool pool,
14779     VkDeviceSize size,
14780     VmaSuballocationType suballocType,
14781     uint32_t memTypeIndex,
14782     const VkMemoryAllocateInfo& allocInfo,
14783     bool map,
14784     bool isUserDataString,
14785     bool isMappingAllowed,
14786     void* pUserData,
14787     VmaAllocation* pAllocation)
14788 {
14789     VkDeviceMemory hMemory = VK_NULL_HANDLE;
14790     VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
14791     if(res < 0)
14792     {
14793         VMA_DEBUG_LOG("    vkAllocateMemory FAILED");
14794         return res;
14795     }
14796 
14797     void* pMappedData = VMA_NULL;
14798     if(map)
14799     {
14800         res = (*m_VulkanFunctions.vkMapMemory)(
14801             m_hDevice,
14802             hMemory,
14803             0,
14804             VK_WHOLE_SIZE,
14805             0,
14806             &pMappedData);
14807         if(res < 0)
14808         {
14809             VMA_DEBUG_LOG("    vkMapMemory FAILED");
14810             FreeVulkanMemory(memTypeIndex, size, hMemory);
14811             return res;
14812         }
14813     }
14814 
14815     *pAllocation = m_AllocationObjectAllocator.Allocate(isMappingAllowed);
14816     (*pAllocation)->InitDedicatedAllocation(pool, memTypeIndex, hMemory, suballocType, pMappedData, size);
14817     if (isUserDataString)
14818         (*pAllocation)->SetName(this, (const char*)pUserData);
14819     else
14820         (*pAllocation)->SetUserData(this, pUserData);
14821     m_Budget.AddAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), size);
14822     if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14823     {
14824         FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
14825     }
14826 
14827     return VK_SUCCESS;
14828 }
14829 
GetBufferMemoryRequirements(VkBuffer hBuffer,VkMemoryRequirements & memReq,bool & requiresDedicatedAllocation,bool & prefersDedicatedAllocation)14830 void VmaAllocator_T::GetBufferMemoryRequirements(
14831     VkBuffer hBuffer,
14832     VkMemoryRequirements& memReq,
14833     bool& requiresDedicatedAllocation,
14834     bool& prefersDedicatedAllocation) const
14835 {
14836 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
14837     if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
14838     {
14839         VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
14840         memReqInfo.buffer = hBuffer;
14841 
14842         VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14843 
14844         VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14845         VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
14846 
14847         (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14848 
14849         memReq = memReq2.memoryRequirements;
14850         requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14851         prefersDedicatedAllocation  = (memDedicatedReq.prefersDedicatedAllocation  != VK_FALSE);
14852     }
14853     else
14854 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
14855     {
14856         (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
14857         requiresDedicatedAllocation = false;
14858         prefersDedicatedAllocation  = false;
14859     }
14860 }
14861 
GetImageMemoryRequirements(VkImage hImage,VkMemoryRequirements & memReq,bool & requiresDedicatedAllocation,bool & prefersDedicatedAllocation)14862 void VmaAllocator_T::GetImageMemoryRequirements(
14863     VkImage hImage,
14864     VkMemoryRequirements& memReq,
14865     bool& requiresDedicatedAllocation,
14866     bool& prefersDedicatedAllocation) const
14867 {
14868 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
14869     if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
14870     {
14871         VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
14872         memReqInfo.image = hImage;
14873 
14874         VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14875 
14876         VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14877         VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
14878 
14879         (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14880 
14881         memReq = memReq2.memoryRequirements;
14882         requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14883         prefersDedicatedAllocation  = (memDedicatedReq.prefersDedicatedAllocation  != VK_FALSE);
14884     }
14885     else
14886 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
14887     {
14888         (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
14889         requiresDedicatedAllocation = false;
14890         prefersDedicatedAllocation  = false;
14891     }
14892 }
14893 
FindMemoryTypeIndex(uint32_t memoryTypeBits,const VmaAllocationCreateInfo * pAllocationCreateInfo,VkFlags bufImgUsage,uint32_t * pMemoryTypeIndex)14894 VkResult VmaAllocator_T::FindMemoryTypeIndex(
14895     uint32_t memoryTypeBits,
14896     const VmaAllocationCreateInfo* pAllocationCreateInfo,
14897     VkFlags bufImgUsage,
14898     uint32_t* pMemoryTypeIndex) const
14899 {
14900     memoryTypeBits &= GetGlobalMemoryTypeBits();
14901 
14902     if(pAllocationCreateInfo->memoryTypeBits != 0)
14903     {
14904         memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
14905     }
14906 
14907     VkMemoryPropertyFlags requiredFlags = 0, preferredFlags = 0, notPreferredFlags = 0;
14908     if(!FindMemoryPreferences(
14909         IsIntegratedGpu(),
14910         *pAllocationCreateInfo,
14911         bufImgUsage,
14912         requiredFlags, preferredFlags, notPreferredFlags))
14913     {
14914         return VK_ERROR_FEATURE_NOT_PRESENT;
14915     }
14916 
14917     *pMemoryTypeIndex = UINT32_MAX;
14918     uint32_t minCost = UINT32_MAX;
14919     for(uint32_t memTypeIndex = 0, memTypeBit = 1;
14920         memTypeIndex < GetMemoryTypeCount();
14921         ++memTypeIndex, memTypeBit <<= 1)
14922     {
14923         // This memory type is acceptable according to memoryTypeBits bitmask.
14924         if((memTypeBit & memoryTypeBits) != 0)
14925         {
14926             const VkMemoryPropertyFlags currFlags =
14927                 m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
14928             // This memory type contains requiredFlags.
14929             if((requiredFlags & ~currFlags) == 0)
14930             {
14931                 // Calculate cost as number of bits from preferredFlags not present in this memory type.
14932                 uint32_t currCost = VMA_COUNT_BITS_SET(preferredFlags & ~currFlags) +
14933                     VMA_COUNT_BITS_SET(currFlags & notPreferredFlags);
14934                 // Remember memory type with lowest cost.
14935                 if(currCost < minCost)
14936                 {
14937                     *pMemoryTypeIndex = memTypeIndex;
14938                     if(currCost == 0)
14939                     {
14940                         return VK_SUCCESS;
14941                     }
14942                     minCost = currCost;
14943                 }
14944             }
14945         }
14946     }
14947     return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
14948 }
14949 
CalcMemTypeParams(VmaAllocationCreateInfo & inoutCreateInfo,uint32_t memTypeIndex,VkDeviceSize size,size_t allocationCount)14950 VkResult VmaAllocator_T::CalcMemTypeParams(
14951     VmaAllocationCreateInfo& inoutCreateInfo,
14952     uint32_t memTypeIndex,
14953     VkDeviceSize size,
14954     size_t allocationCount)
14955 {
14956     // If memory type is not HOST_VISIBLE, disable MAPPED.
14957     if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14958         (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
14959     {
14960         inoutCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
14961     }
14962 
14963     if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
14964         (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0)
14965     {
14966         const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14967         VmaBudget heapBudget = {};
14968         GetHeapBudgets(&heapBudget, heapIndex, 1);
14969         if(heapBudget.usage + size * allocationCount > heapBudget.budget)
14970         {
14971             return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14972         }
14973     }
14974     return VK_SUCCESS;
14975 }
14976 
CalcAllocationParams(VmaAllocationCreateInfo & inoutCreateInfo,bool dedicatedRequired,bool dedicatedPreferred)14977 VkResult VmaAllocator_T::CalcAllocationParams(
14978     VmaAllocationCreateInfo& inoutCreateInfo,
14979     bool dedicatedRequired,
14980     bool dedicatedPreferred)
14981 {
14982     VMA_ASSERT((inoutCreateInfo.flags &
14983         (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) !=
14984         (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT) &&
14985         "Specifying both flags VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT and VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT is incorrect.");
14986     VMA_ASSERT((((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT) == 0 ||
14987         (inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0)) &&
14988         "Specifying VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT requires also VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT.");
14989     if(inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO || inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE || inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_HOST)
14990     {
14991         if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0)
14992         {
14993             VMA_ASSERT((inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0 &&
14994                 "When using VMA_ALLOCATION_CREATE_MAPPED_BIT and usage = VMA_MEMORY_USAGE_AUTO*, you must also specify VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT.");
14995         }
14996     }
14997 
14998     // If memory is lazily allocated, it should be always dedicated.
14999     if(dedicatedRequired ||
15000         inoutCreateInfo.usage == VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED)
15001     {
15002         inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
15003     }
15004 
15005     if(inoutCreateInfo.pool != VK_NULL_HANDLE)
15006     {
15007         if(inoutCreateInfo.pool->m_BlockVector.HasExplicitBlockSize() &&
15008             (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
15009         {
15010             VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT while current custom pool doesn't support dedicated allocations.");
15011             return VK_ERROR_FEATURE_NOT_PRESENT;
15012         }
15013         inoutCreateInfo.priority = inoutCreateInfo.pool->m_BlockVector.GetPriority();
15014     }
15015 
15016     if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
15017         (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
15018     {
15019         VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
15020         return VK_ERROR_FEATURE_NOT_PRESENT;
15021     }
15022 
15023     if(VMA_DEBUG_ALWAYS_DEDICATED_MEMORY &&
15024         (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
15025     {
15026         inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
15027     }
15028 
15029     // Non-auto USAGE values imply HOST_ACCESS flags.
15030     // And so does VMA_MEMORY_USAGE_UNKNOWN because it is used with custom pools.
15031     // Which specific flag is used doesn't matter. They change things only when used with VMA_MEMORY_USAGE_AUTO*.
15032     // Otherwise they just protect from assert on mapping.
15033     if(inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO &&
15034         inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE &&
15035         inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO_PREFER_HOST)
15036     {
15037         if((inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) == 0)
15038         {
15039             inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT;
15040         }
15041     }
15042 
15043     return VK_SUCCESS;
15044 }
15045 
AllocateMemory(const VkMemoryRequirements & vkMemReq,bool requiresDedicatedAllocation,bool prefersDedicatedAllocation,VkBuffer dedicatedBuffer,VkImage dedicatedImage,VkFlags dedicatedBufferImageUsage,const VmaAllocationCreateInfo & createInfo,VmaSuballocationType suballocType,size_t allocationCount,VmaAllocation * pAllocations)15046 VkResult VmaAllocator_T::AllocateMemory(
15047     const VkMemoryRequirements& vkMemReq,
15048     bool requiresDedicatedAllocation,
15049     bool prefersDedicatedAllocation,
15050     VkBuffer dedicatedBuffer,
15051     VkImage dedicatedImage,
15052     VkFlags dedicatedBufferImageUsage,
15053     const VmaAllocationCreateInfo& createInfo,
15054     VmaSuballocationType suballocType,
15055     size_t allocationCount,
15056     VmaAllocation* pAllocations)
15057 {
15058     memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
15059 
15060     VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
15061 
15062     if(vkMemReq.size == 0)
15063     {
15064         return VK_ERROR_INITIALIZATION_FAILED;
15065     }
15066 
15067     VmaAllocationCreateInfo createInfoFinal = createInfo;
15068     VkResult res = CalcAllocationParams(createInfoFinal, requiresDedicatedAllocation, prefersDedicatedAllocation);
15069     if(res != VK_SUCCESS)
15070         return res;
15071 
15072     if(createInfoFinal.pool != VK_NULL_HANDLE)
15073     {
15074         VmaBlockVector& blockVector = createInfoFinal.pool->m_BlockVector;
15075         return AllocateMemoryOfType(
15076             createInfoFinal.pool,
15077             vkMemReq.size,
15078             vkMemReq.alignment,
15079             prefersDedicatedAllocation,
15080             dedicatedBuffer,
15081             dedicatedImage,
15082             dedicatedBufferImageUsage,
15083             createInfoFinal,
15084             blockVector.GetMemoryTypeIndex(),
15085             suballocType,
15086             createInfoFinal.pool->m_DedicatedAllocations,
15087             blockVector,
15088             allocationCount,
15089             pAllocations);
15090     }
15091     else
15092     {
15093         // Bit mask of memory Vulkan types acceptable for this allocation.
15094         uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
15095         uint32_t memTypeIndex = UINT32_MAX;
15096         res = FindMemoryTypeIndex(memoryTypeBits, &createInfoFinal, dedicatedBufferImageUsage, &memTypeIndex);
15097         // Can't find any single memory type matching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
15098         if(res != VK_SUCCESS)
15099             return res;
15100         do
15101         {
15102             VmaBlockVector* blockVector = m_pBlockVectors[memTypeIndex];
15103             VMA_ASSERT(blockVector && "Trying to use unsupported memory type!");
15104             res = AllocateMemoryOfType(
15105                 VK_NULL_HANDLE,
15106                 vkMemReq.size,
15107                 vkMemReq.alignment,
15108                 requiresDedicatedAllocation || prefersDedicatedAllocation,
15109                 dedicatedBuffer,
15110                 dedicatedImage,
15111                 dedicatedBufferImageUsage,
15112                 createInfoFinal,
15113                 memTypeIndex,
15114                 suballocType,
15115                 m_DedicatedAllocations[memTypeIndex],
15116                 *blockVector,
15117                 allocationCount,
15118                 pAllocations);
15119             // Allocation succeeded
15120             if(res == VK_SUCCESS)
15121                 return VK_SUCCESS;
15122 
15123             // Remove old memTypeIndex from list of possibilities.
15124             memoryTypeBits &= ~(1u << memTypeIndex);
15125             // Find alternative memTypeIndex.
15126             res = FindMemoryTypeIndex(memoryTypeBits, &createInfoFinal, dedicatedBufferImageUsage, &memTypeIndex);
15127         } while(res == VK_SUCCESS);
15128 
15129         // No other matching memory type index could be found.
15130         // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
15131         return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15132     }
15133 }
15134 
FreeMemory(size_t allocationCount,const VmaAllocation * pAllocations)15135 void VmaAllocator_T::FreeMemory(
15136     size_t allocationCount,
15137     const VmaAllocation* pAllocations)
15138 {
15139     VMA_ASSERT(pAllocations);
15140 
15141     for(size_t allocIndex = allocationCount; allocIndex--; )
15142     {
15143         VmaAllocation allocation = pAllocations[allocIndex];
15144 
15145         if(allocation != VK_NULL_HANDLE)
15146         {
15147             if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
15148             {
15149                 FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
15150             }
15151 
15152             allocation->FreeName(this);
15153 
15154             switch(allocation->GetType())
15155             {
15156             case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15157                 {
15158                     VmaBlockVector* pBlockVector = VMA_NULL;
15159                     VmaPool hPool = allocation->GetParentPool();
15160                     if(hPool != VK_NULL_HANDLE)
15161                     {
15162                         pBlockVector = &hPool->m_BlockVector;
15163                     }
15164                     else
15165                     {
15166                         const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
15167                         pBlockVector = m_pBlockVectors[memTypeIndex];
15168                         VMA_ASSERT(pBlockVector && "Trying to free memory of unsupported type!");
15169                     }
15170                     pBlockVector->Free(allocation);
15171                 }
15172                 break;
15173             case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15174                 FreeDedicatedMemory(allocation);
15175                 break;
15176             default:
15177                 VMA_ASSERT(0);
15178             }
15179         }
15180     }
15181 }
15182 
CalculateStatistics(VmaTotalStatistics * pStats)15183 void VmaAllocator_T::CalculateStatistics(VmaTotalStatistics* pStats)
15184 {
15185     // Initialize.
15186     VmaClearDetailedStatistics(pStats->total);
15187     for(uint32_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
15188         VmaClearDetailedStatistics(pStats->memoryType[i]);
15189     for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
15190         VmaClearDetailedStatistics(pStats->memoryHeap[i]);
15191 
15192     // Process default pools.
15193     for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15194     {
15195         VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
15196         if (pBlockVector != VMA_NULL)
15197             pBlockVector->AddDetailedStatistics(pStats->memoryType[memTypeIndex]);
15198     }
15199 
15200     // Process custom pools.
15201     {
15202         VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15203         for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool))
15204         {
15205             VmaBlockVector& blockVector = pool->m_BlockVector;
15206             const uint32_t memTypeIndex = blockVector.GetMemoryTypeIndex();
15207             blockVector.AddDetailedStatistics(pStats->memoryType[memTypeIndex]);
15208             pool->m_DedicatedAllocations.AddDetailedStatistics(pStats->memoryType[memTypeIndex]);
15209         }
15210     }
15211 
15212     // Process dedicated allocations.
15213     for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15214     {
15215         m_DedicatedAllocations[memTypeIndex].AddDetailedStatistics(pStats->memoryType[memTypeIndex]);
15216     }
15217 
15218     // Sum from memory types to memory heaps.
15219     for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15220     {
15221         const uint32_t memHeapIndex = m_MemProps.memoryTypes[memTypeIndex].heapIndex;
15222         VmaAddDetailedStatistics(pStats->memoryHeap[memHeapIndex], pStats->memoryType[memTypeIndex]);
15223     }
15224 
15225     // Sum from memory heaps to total.
15226     for(uint32_t memHeapIndex = 0; memHeapIndex < GetMemoryHeapCount(); ++memHeapIndex)
15227         VmaAddDetailedStatistics(pStats->total, pStats->memoryHeap[memHeapIndex]);
15228 
15229     VMA_ASSERT(pStats->total.statistics.allocationCount == 0 ||
15230         pStats->total.allocationSizeMax >= pStats->total.allocationSizeMin);
15231     VMA_ASSERT(pStats->total.unusedRangeCount == 0 ||
15232         pStats->total.unusedRangeSizeMax >= pStats->total.unusedRangeSizeMin);
15233 }
15234 
GetHeapBudgets(VmaBudget * outBudgets,uint32_t firstHeap,uint32_t heapCount)15235 void VmaAllocator_T::GetHeapBudgets(VmaBudget* outBudgets, uint32_t firstHeap, uint32_t heapCount)
15236 {
15237 #if VMA_MEMORY_BUDGET
15238     if(m_UseExtMemoryBudget)
15239     {
15240         if(m_Budget.m_OperationsSinceBudgetFetch < 30)
15241         {
15242             VmaMutexLockRead lockRead(m_Budget.m_BudgetMutex, m_UseMutex);
15243             for(uint32_t i = 0; i < heapCount; ++i, ++outBudgets)
15244             {
15245                 const uint32_t heapIndex = firstHeap + i;
15246 
15247                 outBudgets->statistics.blockCount = m_Budget.m_BlockCount[heapIndex];
15248                 outBudgets->statistics.allocationCount = m_Budget.m_AllocationCount[heapIndex];
15249                 outBudgets->statistics.blockBytes = m_Budget.m_BlockBytes[heapIndex];
15250                 outBudgets->statistics.allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
15251 
15252                 if(m_Budget.m_VulkanUsage[heapIndex] + outBudgets->statistics.blockBytes > m_Budget.m_BlockBytesAtBudgetFetch[heapIndex])
15253                 {
15254                     outBudgets->usage = m_Budget.m_VulkanUsage[heapIndex] +
15255                         outBudgets->statistics.blockBytes - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
15256                 }
15257                 else
15258                 {
15259                     outBudgets->usage = 0;
15260                 }
15261 
15262                 // Have to take MIN with heap size because explicit HeapSizeLimit is included in it.
15263                 outBudgets->budget = VMA_MIN(
15264                     m_Budget.m_VulkanBudget[heapIndex], m_MemProps.memoryHeaps[heapIndex].size);
15265             }
15266         }
15267         else
15268         {
15269             UpdateVulkanBudget(); // Outside of mutex lock
15270             GetHeapBudgets(outBudgets, firstHeap, heapCount); // Recursion
15271         }
15272     }
15273     else
15274 #endif
15275     {
15276         for(uint32_t i = 0; i < heapCount; ++i, ++outBudgets)
15277         {
15278             const uint32_t heapIndex = firstHeap + i;
15279 
15280             outBudgets->statistics.blockCount = m_Budget.m_BlockCount[heapIndex];
15281             outBudgets->statistics.allocationCount = m_Budget.m_AllocationCount[heapIndex];
15282             outBudgets->statistics.blockBytes = m_Budget.m_BlockBytes[heapIndex];
15283             outBudgets->statistics.allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
15284 
15285             outBudgets->usage = outBudgets->statistics.blockBytes;
15286             outBudgets->budget = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
15287         }
15288     }
15289 }
15290 
GetAllocationInfo(VmaAllocation hAllocation,VmaAllocationInfo * pAllocationInfo)15291 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
15292 {
15293     pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
15294     pAllocationInfo->deviceMemory = hAllocation->GetMemory();
15295     pAllocationInfo->offset = hAllocation->GetOffset();
15296     pAllocationInfo->size = hAllocation->GetSize();
15297     pAllocationInfo->pMappedData = hAllocation->GetMappedData();
15298     pAllocationInfo->pUserData = hAllocation->GetUserData();
15299     pAllocationInfo->pName = hAllocation->GetName();
15300 }
15301 
CreatePool(const VmaPoolCreateInfo * pCreateInfo,VmaPool * pPool)15302 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
15303 {
15304     VMA_DEBUG_LOG_FORMAT("  CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
15305 
15306     VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
15307 
15308     // Protection against uninitialized new structure member. If garbage data are left there, this pointer dereference would crash.
15309     if(pCreateInfo->pMemoryAllocateNext)
15310     {
15311         VMA_ASSERT(((const VkBaseInStructure*)pCreateInfo->pMemoryAllocateNext)->sType != 0);
15312     }
15313 
15314     if(newCreateInfo.maxBlockCount == 0)
15315     {
15316         newCreateInfo.maxBlockCount = SIZE_MAX;
15317     }
15318     if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
15319     {
15320         return VK_ERROR_INITIALIZATION_FAILED;
15321     }
15322     // Memory type index out of range or forbidden.
15323     if(pCreateInfo->memoryTypeIndex >= GetMemoryTypeCount() ||
15324         ((1u << pCreateInfo->memoryTypeIndex) & m_GlobalMemoryTypeBits) == 0)
15325     {
15326         return VK_ERROR_FEATURE_NOT_PRESENT;
15327     }
15328     if(newCreateInfo.minAllocationAlignment > 0)
15329     {
15330         VMA_ASSERT(VmaIsPow2(newCreateInfo.minAllocationAlignment));
15331     }
15332 
15333     const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
15334 
15335     *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
15336 
15337     VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
15338     if(res != VK_SUCCESS)
15339     {
15340         vma_delete(this, *pPool);
15341         *pPool = VMA_NULL;
15342         return res;
15343     }
15344 
15345     // Add to m_Pools.
15346     {
15347         VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15348         (*pPool)->SetId(m_NextPoolId++);
15349         m_Pools.PushBack(*pPool);
15350     }
15351 
15352     return VK_SUCCESS;
15353 }
15354 
DestroyPool(VmaPool pool)15355 void VmaAllocator_T::DestroyPool(VmaPool pool)
15356 {
15357     // Remove from m_Pools.
15358     {
15359         VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15360         m_Pools.Remove(pool);
15361     }
15362 
15363     vma_delete(this, pool);
15364 }
15365 
GetPoolStatistics(VmaPool pool,VmaStatistics * pPoolStats)15366 void VmaAllocator_T::GetPoolStatistics(VmaPool pool, VmaStatistics* pPoolStats)
15367 {
15368     VmaClearStatistics(*pPoolStats);
15369     pool->m_BlockVector.AddStatistics(*pPoolStats);
15370     pool->m_DedicatedAllocations.AddStatistics(*pPoolStats);
15371 }
15372 
CalculatePoolStatistics(VmaPool pool,VmaDetailedStatistics * pPoolStats)15373 void VmaAllocator_T::CalculatePoolStatistics(VmaPool pool, VmaDetailedStatistics* pPoolStats)
15374 {
15375     VmaClearDetailedStatistics(*pPoolStats);
15376     pool->m_BlockVector.AddDetailedStatistics(*pPoolStats);
15377     pool->m_DedicatedAllocations.AddDetailedStatistics(*pPoolStats);
15378 }
15379 
SetCurrentFrameIndex(uint32_t frameIndex)15380 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
15381 {
15382     m_CurrentFrameIndex.store(frameIndex);
15383 
15384 #if VMA_MEMORY_BUDGET
15385     if(m_UseExtMemoryBudget)
15386     {
15387         UpdateVulkanBudget();
15388     }
15389 #endif // #if VMA_MEMORY_BUDGET
15390 }
15391 
CheckPoolCorruption(VmaPool hPool)15392 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
15393 {
15394     return hPool->m_BlockVector.CheckCorruption();
15395 }
15396 
CheckCorruption(uint32_t memoryTypeBits)15397 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
15398 {
15399     VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
15400 
15401     // Process default pools.
15402     for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15403     {
15404         VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
15405         if(pBlockVector != VMA_NULL)
15406         {
15407             VkResult localRes = pBlockVector->CheckCorruption();
15408             switch(localRes)
15409             {
15410             case VK_ERROR_FEATURE_NOT_PRESENT:
15411                 break;
15412             case VK_SUCCESS:
15413                 finalRes = VK_SUCCESS;
15414                 break;
15415             default:
15416                 return localRes;
15417             }
15418         }
15419     }
15420 
15421     // Process custom pools.
15422     {
15423         VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15424         for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool))
15425         {
15426             if(((1u << pool->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
15427             {
15428                 VkResult localRes = pool->m_BlockVector.CheckCorruption();
15429                 switch(localRes)
15430                 {
15431                 case VK_ERROR_FEATURE_NOT_PRESENT:
15432                     break;
15433                 case VK_SUCCESS:
15434                     finalRes = VK_SUCCESS;
15435                     break;
15436                 default:
15437                     return localRes;
15438                 }
15439             }
15440         }
15441     }
15442 
15443     return finalRes;
15444 }
15445 
AllocateVulkanMemory(const VkMemoryAllocateInfo * pAllocateInfo,VkDeviceMemory * pMemory)15446 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
15447 {
15448     AtomicTransactionalIncrement<VMA_ATOMIC_UINT32> deviceMemoryCountIncrement;
15449     const uint64_t prevDeviceMemoryCount = deviceMemoryCountIncrement.Increment(&m_DeviceMemoryCount);
15450 #if VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT
15451     if(prevDeviceMemoryCount >= m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount)
15452     {
15453         return VK_ERROR_TOO_MANY_OBJECTS;
15454     }
15455 #endif
15456 
15457     const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
15458 
15459     // HeapSizeLimit is in effect for this heap.
15460     if((m_HeapSizeLimitMask & (1u << heapIndex)) != 0)
15461     {
15462         const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
15463         VkDeviceSize blockBytes = m_Budget.m_BlockBytes[heapIndex];
15464         for(;;)
15465         {
15466             const VkDeviceSize blockBytesAfterAllocation = blockBytes + pAllocateInfo->allocationSize;
15467             if(blockBytesAfterAllocation > heapSize)
15468             {
15469                 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15470             }
15471             if(m_Budget.m_BlockBytes[heapIndex].compare_exchange_strong(blockBytes, blockBytesAfterAllocation))
15472             {
15473                 break;
15474             }
15475         }
15476     }
15477     else
15478     {
15479         m_Budget.m_BlockBytes[heapIndex] += pAllocateInfo->allocationSize;
15480     }
15481     ++m_Budget.m_BlockCount[heapIndex];
15482 
15483     // VULKAN CALL vkAllocateMemory.
15484     VkResult res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
15485 
15486     if(res == VK_SUCCESS)
15487     {
15488 #if VMA_MEMORY_BUDGET
15489         ++m_Budget.m_OperationsSinceBudgetFetch;
15490 #endif
15491 
15492         // Informative callback.
15493         if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
15494         {
15495             (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize, m_DeviceMemoryCallbacks.pUserData);
15496         }
15497 
15498         deviceMemoryCountIncrement.Commit();
15499     }
15500     else
15501     {
15502         --m_Budget.m_BlockCount[heapIndex];
15503         m_Budget.m_BlockBytes[heapIndex] -= pAllocateInfo->allocationSize;
15504     }
15505 
15506     return res;
15507 }
15508 
FreeVulkanMemory(uint32_t memoryType,VkDeviceSize size,VkDeviceMemory hMemory)15509 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
15510 {
15511     // Informative callback.
15512     if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
15513     {
15514         (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size, m_DeviceMemoryCallbacks.pUserData);
15515     }
15516 
15517     // VULKAN CALL vkFreeMemory.
15518     (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
15519 
15520     const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
15521     --m_Budget.m_BlockCount[heapIndex];
15522     m_Budget.m_BlockBytes[heapIndex] -= size;
15523 
15524     --m_DeviceMemoryCount;
15525 }
15526 
BindVulkanBuffer(VkDeviceMemory memory,VkDeviceSize memoryOffset,VkBuffer buffer,const void * pNext)15527 VkResult VmaAllocator_T::BindVulkanBuffer(
15528     VkDeviceMemory memory,
15529     VkDeviceSize memoryOffset,
15530     VkBuffer buffer,
15531     const void* pNext)
15532 {
15533     if(pNext != VMA_NULL)
15534     {
15535 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
15536         if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
15537             m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL)
15538         {
15539             VkBindBufferMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR };
15540             bindBufferMemoryInfo.pNext = pNext;
15541             bindBufferMemoryInfo.buffer = buffer;
15542             bindBufferMemoryInfo.memory = memory;
15543             bindBufferMemoryInfo.memoryOffset = memoryOffset;
15544             return (*m_VulkanFunctions.vkBindBufferMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
15545         }
15546         else
15547 #endif // #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
15548         {
15549             return VK_ERROR_EXTENSION_NOT_PRESENT;
15550         }
15551     }
15552     else
15553     {
15554         return (*m_VulkanFunctions.vkBindBufferMemory)(m_hDevice, buffer, memory, memoryOffset);
15555     }
15556 }
15557 
BindVulkanImage(VkDeviceMemory memory,VkDeviceSize memoryOffset,VkImage image,const void * pNext)15558 VkResult VmaAllocator_T::BindVulkanImage(
15559     VkDeviceMemory memory,
15560     VkDeviceSize memoryOffset,
15561     VkImage image,
15562     const void* pNext)
15563 {
15564     if(pNext != VMA_NULL)
15565     {
15566 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
15567         if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
15568             m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL)
15569         {
15570             VkBindImageMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR };
15571             bindBufferMemoryInfo.pNext = pNext;
15572             bindBufferMemoryInfo.image = image;
15573             bindBufferMemoryInfo.memory = memory;
15574             bindBufferMemoryInfo.memoryOffset = memoryOffset;
15575             return (*m_VulkanFunctions.vkBindImageMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
15576         }
15577         else
15578 #endif // #if VMA_BIND_MEMORY2
15579         {
15580             return VK_ERROR_EXTENSION_NOT_PRESENT;
15581         }
15582     }
15583     else
15584     {
15585         return (*m_VulkanFunctions.vkBindImageMemory)(m_hDevice, image, memory, memoryOffset);
15586     }
15587 }
15588 
Map(VmaAllocation hAllocation,void ** ppData)15589 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
15590 {
15591     switch(hAllocation->GetType())
15592     {
15593     case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15594         {
15595             VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15596             char *pBytes = VMA_NULL;
15597             VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
15598             if(res == VK_SUCCESS)
15599             {
15600                 *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
15601                 hAllocation->BlockAllocMap();
15602             }
15603             return res;
15604         }
15605     case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15606         return hAllocation->DedicatedAllocMap(this, ppData);
15607     default:
15608         VMA_ASSERT(0);
15609         return VK_ERROR_MEMORY_MAP_FAILED;
15610     }
15611 }
15612 
Unmap(VmaAllocation hAllocation)15613 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
15614 {
15615     switch(hAllocation->GetType())
15616     {
15617     case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15618         {
15619             VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15620             hAllocation->BlockAllocUnmap();
15621             pBlock->Unmap(this, 1);
15622         }
15623         break;
15624     case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15625         hAllocation->DedicatedAllocUnmap(this);
15626         break;
15627     default:
15628         VMA_ASSERT(0);
15629     }
15630 }
15631 
BindBufferMemory(VmaAllocation hAllocation,VkDeviceSize allocationLocalOffset,VkBuffer hBuffer,const void * pNext)15632 VkResult VmaAllocator_T::BindBufferMemory(
15633     VmaAllocation hAllocation,
15634     VkDeviceSize allocationLocalOffset,
15635     VkBuffer hBuffer,
15636     const void* pNext)
15637 {
15638     VkResult res = VK_ERROR_UNKNOWN;
15639     switch(hAllocation->GetType())
15640     {
15641     case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15642         res = BindVulkanBuffer(hAllocation->GetMemory(), allocationLocalOffset, hBuffer, pNext);
15643         break;
15644     case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15645     {
15646         VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15647         VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block.");
15648         res = pBlock->BindBufferMemory(this, hAllocation, allocationLocalOffset, hBuffer, pNext);
15649         break;
15650     }
15651     default:
15652         VMA_ASSERT(0);
15653     }
15654     return res;
15655 }
15656 
BindImageMemory(VmaAllocation hAllocation,VkDeviceSize allocationLocalOffset,VkImage hImage,const void * pNext)15657 VkResult VmaAllocator_T::BindImageMemory(
15658     VmaAllocation hAllocation,
15659     VkDeviceSize allocationLocalOffset,
15660     VkImage hImage,
15661     const void* pNext)
15662 {
15663     VkResult res = VK_ERROR_UNKNOWN;
15664     switch(hAllocation->GetType())
15665     {
15666     case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15667         res = BindVulkanImage(hAllocation->GetMemory(), allocationLocalOffset, hImage, pNext);
15668         break;
15669     case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15670     {
15671         VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
15672         VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block.");
15673         res = pBlock->BindImageMemory(this, hAllocation, allocationLocalOffset, hImage, pNext);
15674         break;
15675     }
15676     default:
15677         VMA_ASSERT(0);
15678     }
15679     return res;
15680 }
15681 
FlushOrInvalidateAllocation(VmaAllocation hAllocation,VkDeviceSize offset,VkDeviceSize size,VMA_CACHE_OPERATION op)15682 VkResult VmaAllocator_T::FlushOrInvalidateAllocation(
15683     VmaAllocation hAllocation,
15684     VkDeviceSize offset, VkDeviceSize size,
15685     VMA_CACHE_OPERATION op)
15686 {
15687     VkResult res = VK_SUCCESS;
15688 
15689     VkMappedMemoryRange memRange = {};
15690     if(GetFlushOrInvalidateRange(hAllocation, offset, size, memRange))
15691     {
15692         switch(op)
15693         {
15694         case VMA_CACHE_FLUSH:
15695             res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
15696             break;
15697         case VMA_CACHE_INVALIDATE:
15698             res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
15699             break;
15700         default:
15701             VMA_ASSERT(0);
15702         }
15703     }
15704     // else: Just ignore this call.
15705     return res;
15706 }
15707 
FlushOrInvalidateAllocations(uint32_t allocationCount,const VmaAllocation * allocations,const VkDeviceSize * offsets,const VkDeviceSize * sizes,VMA_CACHE_OPERATION op)15708 VkResult VmaAllocator_T::FlushOrInvalidateAllocations(
15709     uint32_t allocationCount,
15710     const VmaAllocation* allocations,
15711     const VkDeviceSize* offsets, const VkDeviceSize* sizes,
15712     VMA_CACHE_OPERATION op)
15713 {
15714     typedef VmaStlAllocator<VkMappedMemoryRange> RangeAllocator;
15715     typedef VmaSmallVector<VkMappedMemoryRange, RangeAllocator, 16> RangeVector;
15716     RangeVector ranges = RangeVector(RangeAllocator(GetAllocationCallbacks()));
15717 
15718     for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
15719     {
15720         const VmaAllocation alloc = allocations[allocIndex];
15721         const VkDeviceSize offset = offsets != VMA_NULL ? offsets[allocIndex] : 0;
15722         const VkDeviceSize size = sizes != VMA_NULL ? sizes[allocIndex] : VK_WHOLE_SIZE;
15723         VkMappedMemoryRange newRange;
15724         if(GetFlushOrInvalidateRange(alloc, offset, size, newRange))
15725         {
15726             ranges.push_back(newRange);
15727         }
15728     }
15729 
15730     VkResult res = VK_SUCCESS;
15731     if(!ranges.empty())
15732     {
15733         switch(op)
15734         {
15735         case VMA_CACHE_FLUSH:
15736             res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data());
15737             break;
15738         case VMA_CACHE_INVALIDATE:
15739             res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data());
15740             break;
15741         default:
15742             VMA_ASSERT(0);
15743         }
15744     }
15745     // else: Just ignore this call.
15746     return res;
15747 }
15748 
FreeDedicatedMemory(const VmaAllocation allocation)15749 void VmaAllocator_T::FreeDedicatedMemory(const VmaAllocation allocation)
15750 {
15751     VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
15752 
15753     const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
15754     VmaPool parentPool = allocation->GetParentPool();
15755     if(parentPool == VK_NULL_HANDLE)
15756     {
15757         // Default pool
15758         m_DedicatedAllocations[memTypeIndex].Unregister(allocation);
15759     }
15760     else
15761     {
15762         // Custom pool
15763         parentPool->m_DedicatedAllocations.Unregister(allocation);
15764     }
15765 
15766     VkDeviceMemory hMemory = allocation->GetMemory();
15767 
15768     /*
15769     There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
15770     before vkFreeMemory.
15771 
15772     if(allocation->GetMappedData() != VMA_NULL)
15773     {
15774         (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
15775     }
15776     */
15777 
15778     FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
15779 
15780     m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(allocation->GetMemoryTypeIndex()), allocation->GetSize());
15781     m_AllocationObjectAllocator.Free(allocation);
15782 
15783     VMA_DEBUG_LOG_FORMAT("    Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
15784 }
15785 
CalculateGpuDefragmentationMemoryTypeBits()15786 uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const
15787 {
15788     VkBufferCreateInfo dummyBufCreateInfo;
15789     VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo);
15790 
15791     uint32_t memoryTypeBits = 0;
15792 
15793     // Create buffer.
15794     VkBuffer buf = VK_NULL_HANDLE;
15795     VkResult res = (*GetVulkanFunctions().vkCreateBuffer)(
15796         m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf);
15797     if(res == VK_SUCCESS)
15798     {
15799         // Query for supported memory types.
15800         VkMemoryRequirements memReq;
15801         (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq);
15802         memoryTypeBits = memReq.memoryTypeBits;
15803 
15804         // Destroy buffer.
15805         (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks());
15806     }
15807 
15808     return memoryTypeBits;
15809 }
15810 
CalculateGlobalMemoryTypeBits()15811 uint32_t VmaAllocator_T::CalculateGlobalMemoryTypeBits() const
15812 {
15813     // Make sure memory information is already fetched.
15814     VMA_ASSERT(GetMemoryTypeCount() > 0);
15815 
15816     uint32_t memoryTypeBits = UINT32_MAX;
15817 
15818     if(!m_UseAmdDeviceCoherentMemory)
15819     {
15820         // Exclude memory types that have VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD.
15821         for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15822         {
15823             if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0)
15824             {
15825                 memoryTypeBits &= ~(1u << memTypeIndex);
15826             }
15827         }
15828     }
15829 
15830     return memoryTypeBits;
15831 }
15832 
GetFlushOrInvalidateRange(VmaAllocation allocation,VkDeviceSize offset,VkDeviceSize size,VkMappedMemoryRange & outRange)15833 bool VmaAllocator_T::GetFlushOrInvalidateRange(
15834     VmaAllocation allocation,
15835     VkDeviceSize offset, VkDeviceSize size,
15836     VkMappedMemoryRange& outRange) const
15837 {
15838     const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
15839     if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
15840     {
15841         const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
15842         const VkDeviceSize allocationSize = allocation->GetSize();
15843         VMA_ASSERT(offset <= allocationSize);
15844 
15845         outRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
15846         outRange.pNext = VMA_NULL;
15847         outRange.memory = allocation->GetMemory();
15848 
15849         switch(allocation->GetType())
15850         {
15851         case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15852             outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15853             if(size == VK_WHOLE_SIZE)
15854             {
15855                 outRange.size = allocationSize - outRange.offset;
15856             }
15857             else
15858             {
15859                 VMA_ASSERT(offset + size <= allocationSize);
15860                 outRange.size = VMA_MIN(
15861                     VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize),
15862                     allocationSize - outRange.offset);
15863             }
15864             break;
15865         case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15866         {
15867             // 1. Still within this allocation.
15868             outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15869             if(size == VK_WHOLE_SIZE)
15870             {
15871                 size = allocationSize - offset;
15872             }
15873             else
15874             {
15875                 VMA_ASSERT(offset + size <= allocationSize);
15876             }
15877             outRange.size = VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize);
15878 
15879             // 2. Adjust to whole block.
15880             const VkDeviceSize allocationOffset = allocation->GetOffset();
15881             VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
15882             const VkDeviceSize blockSize = allocation->GetBlock()->m_pMetadata->GetSize();
15883             outRange.offset += allocationOffset;
15884             outRange.size = VMA_MIN(outRange.size, blockSize - outRange.offset);
15885 
15886             break;
15887         }
15888         default:
15889             VMA_ASSERT(0);
15890         }
15891         return true;
15892     }
15893     return false;
15894 }
15895 
15896 #if VMA_MEMORY_BUDGET
UpdateVulkanBudget()15897 void VmaAllocator_T::UpdateVulkanBudget()
15898 {
15899     VMA_ASSERT(m_UseExtMemoryBudget);
15900 
15901     VkPhysicalDeviceMemoryProperties2KHR memProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR };
15902 
15903     VkPhysicalDeviceMemoryBudgetPropertiesEXT budgetProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT };
15904     VmaPnextChainPushFront(&memProps, &budgetProps);
15905 
15906     GetVulkanFunctions().vkGetPhysicalDeviceMemoryProperties2KHR(m_PhysicalDevice, &memProps);
15907 
15908     {
15909         VmaMutexLockWrite lockWrite(m_Budget.m_BudgetMutex, m_UseMutex);
15910 
15911         for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
15912         {
15913             m_Budget.m_VulkanUsage[heapIndex] = budgetProps.heapUsage[heapIndex];
15914             m_Budget.m_VulkanBudget[heapIndex] = budgetProps.heapBudget[heapIndex];
15915             m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] = m_Budget.m_BlockBytes[heapIndex].load();
15916 
15917             // Some bugged drivers return the budget incorrectly, e.g. 0 or much bigger than heap size.
15918             if(m_Budget.m_VulkanBudget[heapIndex] == 0)
15919             {
15920                 m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
15921             }
15922             else if(m_Budget.m_VulkanBudget[heapIndex] > m_MemProps.memoryHeaps[heapIndex].size)
15923             {
15924                 m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size;
15925             }
15926             if(m_Budget.m_VulkanUsage[heapIndex] == 0 && m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] > 0)
15927             {
15928                 m_Budget.m_VulkanUsage[heapIndex] = m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
15929             }
15930         }
15931         m_Budget.m_OperationsSinceBudgetFetch = 0;
15932     }
15933 }
15934 #endif // VMA_MEMORY_BUDGET
15935 
FillAllocation(const VmaAllocation hAllocation,uint8_t pattern)15936 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
15937 {
15938     if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
15939         hAllocation->IsMappingAllowed() &&
15940         (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15941     {
15942         void* pData = VMA_NULL;
15943         VkResult res = Map(hAllocation, &pData);
15944         if(res == VK_SUCCESS)
15945         {
15946             memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
15947             FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
15948             Unmap(hAllocation);
15949         }
15950         else
15951         {
15952             VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
15953         }
15954     }
15955 }
15956 
GetGpuDefragmentationMemoryTypeBits()15957 uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits()
15958 {
15959     uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load();
15960     if(memoryTypeBits == UINT32_MAX)
15961     {
15962         memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits();
15963         m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits);
15964     }
15965     return memoryTypeBits;
15966 }
15967 
15968 #if VMA_STATS_STRING_ENABLED
PrintDetailedMap(VmaJsonWriter & json)15969 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
15970 {
15971     json.WriteString("DefaultPools");
15972     json.BeginObject();
15973     {
15974         for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15975         {
15976             VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex];
15977             VmaDedicatedAllocationList& dedicatedAllocList = m_DedicatedAllocations[memTypeIndex];
15978             if (pBlockVector != VMA_NULL)
15979             {
15980                 json.BeginString("Type ");
15981                 json.ContinueString(memTypeIndex);
15982                 json.EndString();
15983                 json.BeginObject();
15984                 {
15985                     json.WriteString("PreferredBlockSize");
15986                     json.WriteNumber(pBlockVector->GetPreferredBlockSize());
15987 
15988                     json.WriteString("Blocks");
15989                     pBlockVector->PrintDetailedMap(json);
15990 
15991                     json.WriteString("DedicatedAllocations");
15992                     dedicatedAllocList.BuildStatsString(json);
15993                 }
15994                 json.EndObject();
15995             }
15996         }
15997     }
15998     json.EndObject();
15999 
16000     json.WriteString("CustomPools");
16001     json.BeginObject();
16002     {
16003         VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
16004         if (!m_Pools.IsEmpty())
16005         {
16006             for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
16007             {
16008                 bool displayType = true;
16009                 size_t index = 0;
16010                 for (VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool))
16011                 {
16012                     VmaBlockVector& blockVector = pool->m_BlockVector;
16013                     if (blockVector.GetMemoryTypeIndex() == memTypeIndex)
16014                     {
16015                         if (displayType)
16016                         {
16017                             json.BeginString("Type ");
16018                             json.ContinueString(memTypeIndex);
16019                             json.EndString();
16020                             json.BeginArray();
16021                             displayType = false;
16022                         }
16023 
16024                         json.BeginObject();
16025                         {
16026                             json.WriteString("Name");
16027                             json.BeginString();
16028                             json.ContinueString((uint64_t)index++);
16029                             if (pool->GetName())
16030                             {
16031                                 json.ContinueString(" - ");
16032                                 json.ContinueString(pool->GetName());
16033                             }
16034                             json.EndString();
16035 
16036                             json.WriteString("PreferredBlockSize");
16037                             json.WriteNumber(blockVector.GetPreferredBlockSize());
16038 
16039                             json.WriteString("Blocks");
16040                             blockVector.PrintDetailedMap(json);
16041 
16042                             json.WriteString("DedicatedAllocations");
16043                             pool->m_DedicatedAllocations.BuildStatsString(json);
16044                         }
16045                         json.EndObject();
16046                     }
16047                 }
16048 
16049                 if (!displayType)
16050                     json.EndArray();
16051             }
16052         }
16053     }
16054     json.EndObject();
16055 }
16056 #endif // VMA_STATS_STRING_ENABLED
16057 #endif // _VMA_ALLOCATOR_T_FUNCTIONS
16058 
16059 
16060 #ifndef _VMA_PUBLIC_INTERFACE
vmaCreateAllocator(const VmaAllocatorCreateInfo * pCreateInfo,VmaAllocator * pAllocator)16061 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
16062     const VmaAllocatorCreateInfo* pCreateInfo,
16063     VmaAllocator* pAllocator)
16064 {
16065     VMA_ASSERT(pCreateInfo && pAllocator);
16066     VMA_ASSERT(pCreateInfo->vulkanApiVersion == 0 ||
16067         (VK_VERSION_MAJOR(pCreateInfo->vulkanApiVersion) == 1 && VK_VERSION_MINOR(pCreateInfo->vulkanApiVersion) <= 3));
16068     VMA_DEBUG_LOG("vmaCreateAllocator");
16069     *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
16070     VkResult result = (*pAllocator)->Init(pCreateInfo);
16071     if(result < 0)
16072     {
16073         vma_delete(pCreateInfo->pAllocationCallbacks, *pAllocator);
16074         *pAllocator = VK_NULL_HANDLE;
16075     }
16076     return result;
16077 }
16078 
vmaDestroyAllocator(VmaAllocator allocator)16079 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
16080     VmaAllocator allocator)
16081 {
16082     if(allocator != VK_NULL_HANDLE)
16083     {
16084         VMA_DEBUG_LOG("vmaDestroyAllocator");
16085         VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks; // Have to copy the callbacks when destroying.
16086         vma_delete(&allocationCallbacks, allocator);
16087     }
16088 }
16089 
vmaGetAllocatorInfo(VmaAllocator allocator,VmaAllocatorInfo * pAllocatorInfo)16090 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo* pAllocatorInfo)
16091 {
16092     VMA_ASSERT(allocator && pAllocatorInfo);
16093     pAllocatorInfo->instance = allocator->m_hInstance;
16094     pAllocatorInfo->physicalDevice = allocator->GetPhysicalDevice();
16095     pAllocatorInfo->device = allocator->m_hDevice;
16096 }
16097 
vmaGetPhysicalDeviceProperties(VmaAllocator allocator,const VkPhysicalDeviceProperties ** ppPhysicalDeviceProperties)16098 VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
16099     VmaAllocator allocator,
16100     const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
16101 {
16102     VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
16103     *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
16104 }
16105 
vmaGetMemoryProperties(VmaAllocator allocator,const VkPhysicalDeviceMemoryProperties ** ppPhysicalDeviceMemoryProperties)16106 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
16107     VmaAllocator allocator,
16108     const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
16109 {
16110     VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
16111     *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
16112 }
16113 
vmaGetMemoryTypeProperties(VmaAllocator allocator,uint32_t memoryTypeIndex,VkMemoryPropertyFlags * pFlags)16114 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
16115     VmaAllocator allocator,
16116     uint32_t memoryTypeIndex,
16117     VkMemoryPropertyFlags* pFlags)
16118 {
16119     VMA_ASSERT(allocator && pFlags);
16120     VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
16121     *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
16122 }
16123 
vmaSetCurrentFrameIndex(VmaAllocator allocator,uint32_t frameIndex)16124 VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
16125     VmaAllocator allocator,
16126     uint32_t frameIndex)
16127 {
16128     VMA_ASSERT(allocator);
16129 
16130     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16131 
16132     allocator->SetCurrentFrameIndex(frameIndex);
16133 }
16134 
vmaCalculateStatistics(VmaAllocator allocator,VmaTotalStatistics * pStats)16135 VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStatistics(
16136     VmaAllocator allocator,
16137     VmaTotalStatistics* pStats)
16138 {
16139     VMA_ASSERT(allocator && pStats);
16140     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16141     allocator->CalculateStatistics(pStats);
16142 }
16143 
vmaGetHeapBudgets(VmaAllocator allocator,VmaBudget * pBudgets)16144 VMA_CALL_PRE void VMA_CALL_POST vmaGetHeapBudgets(
16145     VmaAllocator allocator,
16146     VmaBudget* pBudgets)
16147 {
16148     VMA_ASSERT(allocator && pBudgets);
16149     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16150     allocator->GetHeapBudgets(pBudgets, 0, allocator->GetMemoryHeapCount());
16151 }
16152 
16153 #if VMA_STATS_STRING_ENABLED
16154 
vmaBuildStatsString(VmaAllocator allocator,char ** ppStatsString,VkBool32 detailedMap)16155 VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
16156     VmaAllocator allocator,
16157     char** ppStatsString,
16158     VkBool32 detailedMap)
16159 {
16160     VMA_ASSERT(allocator && ppStatsString);
16161     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16162 
16163     VmaStringBuilder sb(allocator->GetAllocationCallbacks());
16164     {
16165         VmaBudget budgets[VK_MAX_MEMORY_HEAPS];
16166         allocator->GetHeapBudgets(budgets, 0, allocator->GetMemoryHeapCount());
16167 
16168         VmaTotalStatistics stats;
16169         allocator->CalculateStatistics(&stats);
16170 
16171         VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
16172         json.BeginObject();
16173         {
16174             json.WriteString("General");
16175             json.BeginObject();
16176             {
16177                 const VkPhysicalDeviceProperties& deviceProperties = allocator->m_PhysicalDeviceProperties;
16178                 const VkPhysicalDeviceMemoryProperties& memoryProperties = allocator->m_MemProps;
16179 
16180                 json.WriteString("API");
16181                 json.WriteString("Vulkan");
16182 
16183                 json.WriteString("apiVersion");
16184                 json.BeginString();
16185                 json.ContinueString(VK_VERSION_MAJOR(deviceProperties.apiVersion));
16186                 json.ContinueString(".");
16187                 json.ContinueString(VK_VERSION_MINOR(deviceProperties.apiVersion));
16188                 json.ContinueString(".");
16189                 json.ContinueString(VK_VERSION_PATCH(deviceProperties.apiVersion));
16190                 json.EndString();
16191 
16192                 json.WriteString("GPU");
16193                 json.WriteString(deviceProperties.deviceName);
16194                 json.WriteString("deviceType");
16195                 json.WriteNumber(static_cast<uint32_t>(deviceProperties.deviceType));
16196 
16197                 json.WriteString("maxMemoryAllocationCount");
16198                 json.WriteNumber(deviceProperties.limits.maxMemoryAllocationCount);
16199                 json.WriteString("bufferImageGranularity");
16200                 json.WriteNumber(deviceProperties.limits.bufferImageGranularity);
16201                 json.WriteString("nonCoherentAtomSize");
16202                 json.WriteNumber(deviceProperties.limits.nonCoherentAtomSize);
16203 
16204                 json.WriteString("memoryHeapCount");
16205                 json.WriteNumber(memoryProperties.memoryHeapCount);
16206                 json.WriteString("memoryTypeCount");
16207                 json.WriteNumber(memoryProperties.memoryTypeCount);
16208             }
16209             json.EndObject();
16210         }
16211         {
16212             json.WriteString("Total");
16213             VmaPrintDetailedStatistics(json, stats.total);
16214         }
16215         {
16216             json.WriteString("MemoryInfo");
16217             json.BeginObject();
16218             {
16219                 for (uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
16220                 {
16221                     json.BeginString("Heap ");
16222                     json.ContinueString(heapIndex);
16223                     json.EndString();
16224                     json.BeginObject();
16225                     {
16226                         const VkMemoryHeap& heapInfo = allocator->m_MemProps.memoryHeaps[heapIndex];
16227                         json.WriteString("Flags");
16228                         json.BeginArray(true);
16229                         {
16230                             if (heapInfo.flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT)
16231                                 json.WriteString("DEVICE_LOCAL");
16232                         #if VMA_VULKAN_VERSION >= 1001000
16233                             if (heapInfo.flags & VK_MEMORY_HEAP_MULTI_INSTANCE_BIT)
16234                                 json.WriteString("MULTI_INSTANCE");
16235                         #endif
16236 
16237                             VkMemoryHeapFlags flags = heapInfo.flags &
16238                                 ~(VK_MEMORY_HEAP_DEVICE_LOCAL_BIT
16239                         #if VMA_VULKAN_VERSION >= 1001000
16240                                     | VK_MEMORY_HEAP_MULTI_INSTANCE_BIT
16241                         #endif
16242                                     );
16243                             if (flags != 0)
16244                                 json.WriteNumber(flags);
16245                         }
16246                         json.EndArray();
16247 
16248                         json.WriteString("Size");
16249                         json.WriteNumber(heapInfo.size);
16250 
16251                         json.WriteString("Budget");
16252                         json.BeginObject();
16253                         {
16254                             json.WriteString("BudgetBytes");
16255                             json.WriteNumber(budgets[heapIndex].budget);
16256                             json.WriteString("UsageBytes");
16257                             json.WriteNumber(budgets[heapIndex].usage);
16258                         }
16259                         json.EndObject();
16260 
16261                         json.WriteString("Stats");
16262                         VmaPrintDetailedStatistics(json, stats.memoryHeap[heapIndex]);
16263 
16264                         json.WriteString("MemoryPools");
16265                         json.BeginObject();
16266                         {
16267                             for (uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
16268                             {
16269                                 if (allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
16270                                 {
16271                                     json.BeginString("Type ");
16272                                     json.ContinueString(typeIndex);
16273                                     json.EndString();
16274                                     json.BeginObject();
16275                                     {
16276                                         json.WriteString("Flags");
16277                                         json.BeginArray(true);
16278                                         {
16279                                             VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
16280                                             if (flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)
16281                                                 json.WriteString("DEVICE_LOCAL");
16282                                             if (flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)
16283                                                 json.WriteString("HOST_VISIBLE");
16284                                             if (flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)
16285                                                 json.WriteString("HOST_COHERENT");
16286                                             if (flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT)
16287                                                 json.WriteString("HOST_CACHED");
16288                                             if (flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT)
16289                                                 json.WriteString("LAZILY_ALLOCATED");
16290                                         #if VMA_VULKAN_VERSION >= 1001000
16291                                             if (flags & VK_MEMORY_PROPERTY_PROTECTED_BIT)
16292                                                 json.WriteString("PROTECTED");
16293                                         #endif
16294                                         #if VK_AMD_device_coherent_memory
16295                                             if (flags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY)
16296                                                 json.WriteString("DEVICE_COHERENT_AMD");
16297                                             if (flags & VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)
16298                                                 json.WriteString("DEVICE_UNCACHED_AMD");
16299                                         #endif
16300 
16301                                             flags &= ~(VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT
16302                                         #if VMA_VULKAN_VERSION >= 1001000
16303                                                 | VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT
16304                                         #endif
16305                                         #if VK_AMD_device_coherent_memory
16306                                                 | VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY
16307                                                 | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY
16308                                         #endif
16309                                                 | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT
16310                                                 | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT
16311                                                 | VK_MEMORY_PROPERTY_HOST_CACHED_BIT);
16312                                             if (flags != 0)
16313                                                 json.WriteNumber(flags);
16314                                         }
16315                                         json.EndArray();
16316 
16317                                         json.WriteString("Stats");
16318                                         VmaPrintDetailedStatistics(json, stats.memoryType[typeIndex]);
16319                                     }
16320                                     json.EndObject();
16321                                 }
16322                             }
16323 
16324                         }
16325                         json.EndObject();
16326                     }
16327                     json.EndObject();
16328                 }
16329             }
16330             json.EndObject();
16331         }
16332 
16333         if (detailedMap == VK_TRUE)
16334             allocator->PrintDetailedMap(json);
16335 
16336         json.EndObject();
16337     }
16338 
16339     *ppStatsString = VmaCreateStringCopy(allocator->GetAllocationCallbacks(), sb.GetData(), sb.GetLength());
16340 }
16341 
vmaFreeStatsString(VmaAllocator allocator,char * pStatsString)16342 VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
16343     VmaAllocator allocator,
16344     char* pStatsString)
16345 {
16346     if(pStatsString != VMA_NULL)
16347     {
16348         VMA_ASSERT(allocator);
16349         VmaFreeString(allocator->GetAllocationCallbacks(), pStatsString);
16350     }
16351 }
16352 
16353 #endif // VMA_STATS_STRING_ENABLED
16354 
16355 /*
16356 This function is not protected by any mutex because it just reads immutable data.
16357 */
vmaFindMemoryTypeIndex(VmaAllocator allocator,uint32_t memoryTypeBits,const VmaAllocationCreateInfo * pAllocationCreateInfo,uint32_t * pMemoryTypeIndex)16358 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
16359     VmaAllocator allocator,
16360     uint32_t memoryTypeBits,
16361     const VmaAllocationCreateInfo* pAllocationCreateInfo,
16362     uint32_t* pMemoryTypeIndex)
16363 {
16364     VMA_ASSERT(allocator != VK_NULL_HANDLE);
16365     VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
16366     VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
16367 
16368     return allocator->FindMemoryTypeIndex(memoryTypeBits, pAllocationCreateInfo, UINT32_MAX, pMemoryTypeIndex);
16369 }
16370 
vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator,const VkBufferCreateInfo * pBufferCreateInfo,const VmaAllocationCreateInfo * pAllocationCreateInfo,uint32_t * pMemoryTypeIndex)16371 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
16372     VmaAllocator allocator,
16373     const VkBufferCreateInfo* pBufferCreateInfo,
16374     const VmaAllocationCreateInfo* pAllocationCreateInfo,
16375     uint32_t* pMemoryTypeIndex)
16376 {
16377     VMA_ASSERT(allocator != VK_NULL_HANDLE);
16378     VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
16379     VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
16380     VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
16381 
16382     const VkDevice hDev = allocator->m_hDevice;
16383     const VmaVulkanFunctions* funcs = &allocator->GetVulkanFunctions();
16384     VkResult res;
16385 
16386 #if VMA_VULKAN_VERSION >= 1003000
16387     if(funcs->vkGetDeviceBufferMemoryRequirements)
16388     {
16389         // Can query straight from VkBufferCreateInfo :)
16390         VkDeviceBufferMemoryRequirements devBufMemReq = {VK_STRUCTURE_TYPE_DEVICE_BUFFER_MEMORY_REQUIREMENTS};
16391         devBufMemReq.pCreateInfo = pBufferCreateInfo;
16392 
16393         VkMemoryRequirements2 memReq = {VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2};
16394         (*funcs->vkGetDeviceBufferMemoryRequirements)(hDev, &devBufMemReq, &memReq);
16395 
16396         res = allocator->FindMemoryTypeIndex(
16397             memReq.memoryRequirements.memoryTypeBits, pAllocationCreateInfo, pBufferCreateInfo->usage, pMemoryTypeIndex);
16398     }
16399     else
16400 #endif // #if VMA_VULKAN_VERSION >= 1003000
16401     {
16402         // Must create a dummy buffer to query :(
16403         VkBuffer hBuffer = VK_NULL_HANDLE;
16404         res = funcs->vkCreateBuffer(
16405             hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
16406         if(res == VK_SUCCESS)
16407         {
16408             VkMemoryRequirements memReq = {};
16409             funcs->vkGetBufferMemoryRequirements(hDev, hBuffer, &memReq);
16410 
16411             res = allocator->FindMemoryTypeIndex(
16412                 memReq.memoryTypeBits, pAllocationCreateInfo, pBufferCreateInfo->usage, pMemoryTypeIndex);
16413 
16414             funcs->vkDestroyBuffer(
16415                 hDev, hBuffer, allocator->GetAllocationCallbacks());
16416         }
16417     }
16418     return res;
16419 }
16420 
vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator,const VkImageCreateInfo * pImageCreateInfo,const VmaAllocationCreateInfo * pAllocationCreateInfo,uint32_t * pMemoryTypeIndex)16421 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
16422     VmaAllocator allocator,
16423     const VkImageCreateInfo* pImageCreateInfo,
16424     const VmaAllocationCreateInfo* pAllocationCreateInfo,
16425     uint32_t* pMemoryTypeIndex)
16426 {
16427     VMA_ASSERT(allocator != VK_NULL_HANDLE);
16428     VMA_ASSERT(pImageCreateInfo != VMA_NULL);
16429     VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
16430     VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
16431 
16432     const VkDevice hDev = allocator->m_hDevice;
16433     const VmaVulkanFunctions* funcs = &allocator->GetVulkanFunctions();
16434     VkResult res;
16435 
16436 #if VMA_VULKAN_VERSION >= 1003000
16437     if(funcs->vkGetDeviceImageMemoryRequirements)
16438     {
16439         // Can query straight from VkImageCreateInfo :)
16440         VkDeviceImageMemoryRequirements devImgMemReq = {VK_STRUCTURE_TYPE_DEVICE_IMAGE_MEMORY_REQUIREMENTS};
16441         devImgMemReq.pCreateInfo = pImageCreateInfo;
16442         VMA_ASSERT(pImageCreateInfo->tiling != VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT_COPY && (pImageCreateInfo->flags & VK_IMAGE_CREATE_DISJOINT_BIT_COPY) == 0 &&
16443             "Cannot use this VkImageCreateInfo with vmaFindMemoryTypeIndexForImageInfo as I don't know what to pass as VkDeviceImageMemoryRequirements::planeAspect.");
16444 
16445         VkMemoryRequirements2 memReq = {VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2};
16446         (*funcs->vkGetDeviceImageMemoryRequirements)(hDev, &devImgMemReq, &memReq);
16447 
16448         res = allocator->FindMemoryTypeIndex(
16449             memReq.memoryRequirements.memoryTypeBits, pAllocationCreateInfo, pImageCreateInfo->usage, pMemoryTypeIndex);
16450     }
16451     else
16452 #endif // #if VMA_VULKAN_VERSION >= 1003000
16453     {
16454         // Must create a dummy image to query :(
16455         VkImage hImage = VK_NULL_HANDLE;
16456         res = funcs->vkCreateImage(
16457             hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
16458         if(res == VK_SUCCESS)
16459         {
16460             VkMemoryRequirements memReq = {};
16461             funcs->vkGetImageMemoryRequirements(hDev, hImage, &memReq);
16462 
16463             res = allocator->FindMemoryTypeIndex(
16464                 memReq.memoryTypeBits, pAllocationCreateInfo, pImageCreateInfo->usage, pMemoryTypeIndex);
16465 
16466             funcs->vkDestroyImage(
16467                 hDev, hImage, allocator->GetAllocationCallbacks());
16468         }
16469     }
16470     return res;
16471 }
16472 
vmaCreatePool(VmaAllocator allocator,const VmaPoolCreateInfo * pCreateInfo,VmaPool * pPool)16473 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
16474     VmaAllocator allocator,
16475     const VmaPoolCreateInfo* pCreateInfo,
16476     VmaPool* pPool)
16477 {
16478     VMA_ASSERT(allocator && pCreateInfo && pPool);
16479 
16480     VMA_DEBUG_LOG("vmaCreatePool");
16481 
16482     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16483 
16484     return allocator->CreatePool(pCreateInfo, pPool);
16485 }
16486 
vmaDestroyPool(VmaAllocator allocator,VmaPool pool)16487 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
16488     VmaAllocator allocator,
16489     VmaPool pool)
16490 {
16491     VMA_ASSERT(allocator);
16492 
16493     if(pool == VK_NULL_HANDLE)
16494     {
16495         return;
16496     }
16497 
16498     VMA_DEBUG_LOG("vmaDestroyPool");
16499 
16500     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16501 
16502     allocator->DestroyPool(pool);
16503 }
16504 
vmaGetPoolStatistics(VmaAllocator allocator,VmaPool pool,VmaStatistics * pPoolStats)16505 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStatistics(
16506     VmaAllocator allocator,
16507     VmaPool pool,
16508     VmaStatistics* pPoolStats)
16509 {
16510     VMA_ASSERT(allocator && pool && pPoolStats);
16511 
16512     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16513 
16514     allocator->GetPoolStatistics(pool, pPoolStats);
16515 }
16516 
vmaCalculatePoolStatistics(VmaAllocator allocator,VmaPool pool,VmaDetailedStatistics * pPoolStats)16517 VMA_CALL_PRE void VMA_CALL_POST vmaCalculatePoolStatistics(
16518     VmaAllocator allocator,
16519     VmaPool pool,
16520     VmaDetailedStatistics* pPoolStats)
16521 {
16522     VMA_ASSERT(allocator && pool && pPoolStats);
16523 
16524     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16525 
16526     allocator->CalculatePoolStatistics(pool, pPoolStats);
16527 }
16528 
vmaCheckPoolCorruption(VmaAllocator allocator,VmaPool pool)16529 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
16530 {
16531     VMA_ASSERT(allocator && pool);
16532 
16533     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16534 
16535     VMA_DEBUG_LOG("vmaCheckPoolCorruption");
16536 
16537     return allocator->CheckPoolCorruption(pool);
16538 }
16539 
vmaGetPoolName(VmaAllocator allocator,VmaPool pool,const char ** ppName)16540 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
16541     VmaAllocator allocator,
16542     VmaPool pool,
16543     const char** ppName)
16544 {
16545     VMA_ASSERT(allocator && pool && ppName);
16546 
16547     VMA_DEBUG_LOG("vmaGetPoolName");
16548 
16549     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16550 
16551     *ppName = pool->GetName();
16552 }
16553 
vmaSetPoolName(VmaAllocator allocator,VmaPool pool,const char * pName)16554 VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
16555     VmaAllocator allocator,
16556     VmaPool pool,
16557     const char* pName)
16558 {
16559     VMA_ASSERT(allocator && pool);
16560 
16561     VMA_DEBUG_LOG("vmaSetPoolName");
16562 
16563     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16564 
16565     pool->SetName(pName);
16566 }
16567 
vmaAllocateMemory(VmaAllocator allocator,const VkMemoryRequirements * pVkMemoryRequirements,const VmaAllocationCreateInfo * pCreateInfo,VmaAllocation * pAllocation,VmaAllocationInfo * pAllocationInfo)16568 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
16569     VmaAllocator allocator,
16570     const VkMemoryRequirements* pVkMemoryRequirements,
16571     const VmaAllocationCreateInfo* pCreateInfo,
16572     VmaAllocation* pAllocation,
16573     VmaAllocationInfo* pAllocationInfo)
16574 {
16575     VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
16576 
16577     VMA_DEBUG_LOG("vmaAllocateMemory");
16578 
16579     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16580 
16581     VkResult result = allocator->AllocateMemory(
16582         *pVkMemoryRequirements,
16583         false, // requiresDedicatedAllocation
16584         false, // prefersDedicatedAllocation
16585         VK_NULL_HANDLE, // dedicatedBuffer
16586         VK_NULL_HANDLE, // dedicatedImage
16587         UINT32_MAX, // dedicatedBufferImageUsage
16588         *pCreateInfo,
16589         VMA_SUBALLOCATION_TYPE_UNKNOWN,
16590         1, // allocationCount
16591         pAllocation);
16592 
16593     if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
16594     {
16595         allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16596     }
16597 
16598     return result;
16599 }
16600 
vmaAllocateMemoryPages(VmaAllocator allocator,const VkMemoryRequirements * pVkMemoryRequirements,const VmaAllocationCreateInfo * pCreateInfo,size_t allocationCount,VmaAllocation * pAllocations,VmaAllocationInfo * pAllocationInfo)16601 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
16602     VmaAllocator allocator,
16603     const VkMemoryRequirements* pVkMemoryRequirements,
16604     const VmaAllocationCreateInfo* pCreateInfo,
16605     size_t allocationCount,
16606     VmaAllocation* pAllocations,
16607     VmaAllocationInfo* pAllocationInfo)
16608 {
16609     if(allocationCount == 0)
16610     {
16611         return VK_SUCCESS;
16612     }
16613 
16614     VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
16615 
16616     VMA_DEBUG_LOG("vmaAllocateMemoryPages");
16617 
16618     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16619 
16620     VkResult result = allocator->AllocateMemory(
16621         *pVkMemoryRequirements,
16622         false, // requiresDedicatedAllocation
16623         false, // prefersDedicatedAllocation
16624         VK_NULL_HANDLE, // dedicatedBuffer
16625         VK_NULL_HANDLE, // dedicatedImage
16626         UINT32_MAX, // dedicatedBufferImageUsage
16627         *pCreateInfo,
16628         VMA_SUBALLOCATION_TYPE_UNKNOWN,
16629         allocationCount,
16630         pAllocations);
16631 
16632     if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
16633     {
16634         for(size_t i = 0; i < allocationCount; ++i)
16635         {
16636             allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
16637         }
16638     }
16639 
16640     return result;
16641 }
16642 
vmaAllocateMemoryForBuffer(VmaAllocator allocator,VkBuffer buffer,const VmaAllocationCreateInfo * pCreateInfo,VmaAllocation * pAllocation,VmaAllocationInfo * pAllocationInfo)16643 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
16644     VmaAllocator allocator,
16645     VkBuffer buffer,
16646     const VmaAllocationCreateInfo* pCreateInfo,
16647     VmaAllocation* pAllocation,
16648     VmaAllocationInfo* pAllocationInfo)
16649 {
16650     VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16651 
16652     VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
16653 
16654     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16655 
16656     VkMemoryRequirements vkMemReq = {};
16657     bool requiresDedicatedAllocation = false;
16658     bool prefersDedicatedAllocation = false;
16659     allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
16660         requiresDedicatedAllocation,
16661         prefersDedicatedAllocation);
16662 
16663     VkResult result = allocator->AllocateMemory(
16664         vkMemReq,
16665         requiresDedicatedAllocation,
16666         prefersDedicatedAllocation,
16667         buffer, // dedicatedBuffer
16668         VK_NULL_HANDLE, // dedicatedImage
16669         UINT32_MAX, // dedicatedBufferImageUsage
16670         *pCreateInfo,
16671         VMA_SUBALLOCATION_TYPE_BUFFER,
16672         1, // allocationCount
16673         pAllocation);
16674 
16675     if(pAllocationInfo && result == VK_SUCCESS)
16676     {
16677         allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16678     }
16679 
16680     return result;
16681 }
16682 
vmaAllocateMemoryForImage(VmaAllocator allocator,VkImage image,const VmaAllocationCreateInfo * pCreateInfo,VmaAllocation * pAllocation,VmaAllocationInfo * pAllocationInfo)16683 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
16684     VmaAllocator allocator,
16685     VkImage image,
16686     const VmaAllocationCreateInfo* pCreateInfo,
16687     VmaAllocation* pAllocation,
16688     VmaAllocationInfo* pAllocationInfo)
16689 {
16690     VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16691 
16692     VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
16693 
16694     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16695 
16696     VkMemoryRequirements vkMemReq = {};
16697     bool requiresDedicatedAllocation = false;
16698     bool prefersDedicatedAllocation  = false;
16699     allocator->GetImageMemoryRequirements(image, vkMemReq,
16700         requiresDedicatedAllocation, prefersDedicatedAllocation);
16701 
16702     VkResult result = allocator->AllocateMemory(
16703         vkMemReq,
16704         requiresDedicatedAllocation,
16705         prefersDedicatedAllocation,
16706         VK_NULL_HANDLE, // dedicatedBuffer
16707         image, // dedicatedImage
16708         UINT32_MAX, // dedicatedBufferImageUsage
16709         *pCreateInfo,
16710         VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
16711         1, // allocationCount
16712         pAllocation);
16713 
16714     if(pAllocationInfo && result == VK_SUCCESS)
16715     {
16716         allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16717     }
16718 
16719     return result;
16720 }
16721 
vmaFreeMemory(VmaAllocator allocator,VmaAllocation allocation)16722 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
16723     VmaAllocator allocator,
16724     VmaAllocation allocation)
16725 {
16726     VMA_ASSERT(allocator);
16727 
16728     if(allocation == VK_NULL_HANDLE)
16729     {
16730         return;
16731     }
16732 
16733     VMA_DEBUG_LOG("vmaFreeMemory");
16734 
16735     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16736 
16737     allocator->FreeMemory(
16738         1, // allocationCount
16739         &allocation);
16740 }
16741 
vmaFreeMemoryPages(VmaAllocator allocator,size_t allocationCount,const VmaAllocation * pAllocations)16742 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
16743     VmaAllocator allocator,
16744     size_t allocationCount,
16745     const VmaAllocation* pAllocations)
16746 {
16747     if(allocationCount == 0)
16748     {
16749         return;
16750     }
16751 
16752     VMA_ASSERT(allocator);
16753 
16754     VMA_DEBUG_LOG("vmaFreeMemoryPages");
16755 
16756     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16757 
16758     allocator->FreeMemory(allocationCount, pAllocations);
16759 }
16760 
vmaGetAllocationInfo(VmaAllocator allocator,VmaAllocation allocation,VmaAllocationInfo * pAllocationInfo)16761 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
16762     VmaAllocator allocator,
16763     VmaAllocation allocation,
16764     VmaAllocationInfo* pAllocationInfo)
16765 {
16766     VMA_ASSERT(allocator && allocation && pAllocationInfo);
16767 
16768     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16769 
16770     allocator->GetAllocationInfo(allocation, pAllocationInfo);
16771 }
16772 
vmaSetAllocationUserData(VmaAllocator allocator,VmaAllocation allocation,void * pUserData)16773 VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
16774     VmaAllocator allocator,
16775     VmaAllocation allocation,
16776     void* pUserData)
16777 {
16778     VMA_ASSERT(allocator && allocation);
16779 
16780     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16781 
16782     allocation->SetUserData(allocator, pUserData);
16783 }
16784 
vmaSetAllocationName(VmaAllocator VMA_NOT_NULL allocator,VmaAllocation VMA_NOT_NULL allocation,const char * VMA_NULLABLE pName)16785 VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationName(
16786     VmaAllocator VMA_NOT_NULL allocator,
16787     VmaAllocation VMA_NOT_NULL allocation,
16788     const char* VMA_NULLABLE pName)
16789 {
16790     allocation->SetName(allocator, pName);
16791 }
16792 
vmaGetAllocationMemoryProperties(VmaAllocator VMA_NOT_NULL allocator,VmaAllocation VMA_NOT_NULL allocation,VkMemoryPropertyFlags * VMA_NOT_NULL pFlags)16793 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationMemoryProperties(
16794     VmaAllocator VMA_NOT_NULL allocator,
16795     VmaAllocation VMA_NOT_NULL allocation,
16796     VkMemoryPropertyFlags* VMA_NOT_NULL pFlags)
16797 {
16798     VMA_ASSERT(allocator && allocation && pFlags);
16799     const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
16800     *pFlags = allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
16801 }
16802 
vmaMapMemory(VmaAllocator allocator,VmaAllocation allocation,void ** ppData)16803 VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
16804     VmaAllocator allocator,
16805     VmaAllocation allocation,
16806     void** ppData)
16807 {
16808     VMA_ASSERT(allocator && allocation && ppData);
16809 
16810     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16811 
16812     return allocator->Map(allocation, ppData);
16813 }
16814 
vmaUnmapMemory(VmaAllocator allocator,VmaAllocation allocation)16815 VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
16816     VmaAllocator allocator,
16817     VmaAllocation allocation)
16818 {
16819     VMA_ASSERT(allocator && allocation);
16820 
16821     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16822 
16823     allocator->Unmap(allocation);
16824 }
16825 
vmaFlushAllocation(VmaAllocator allocator,VmaAllocation allocation,VkDeviceSize offset,VkDeviceSize size)16826 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation(
16827     VmaAllocator allocator,
16828     VmaAllocation allocation,
16829     VkDeviceSize offset,
16830     VkDeviceSize size)
16831 {
16832     VMA_ASSERT(allocator && allocation);
16833 
16834     VMA_DEBUG_LOG("vmaFlushAllocation");
16835 
16836     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16837 
16838     const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
16839 
16840     return res;
16841 }
16842 
vmaInvalidateAllocation(VmaAllocator allocator,VmaAllocation allocation,VkDeviceSize offset,VkDeviceSize size)16843 VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation(
16844     VmaAllocator allocator,
16845     VmaAllocation allocation,
16846     VkDeviceSize offset,
16847     VkDeviceSize size)
16848 {
16849     VMA_ASSERT(allocator && allocation);
16850 
16851     VMA_DEBUG_LOG("vmaInvalidateAllocation");
16852 
16853     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16854 
16855     const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
16856 
16857     return res;
16858 }
16859 
vmaFlushAllocations(VmaAllocator allocator,uint32_t allocationCount,const VmaAllocation * allocations,const VkDeviceSize * offsets,const VkDeviceSize * sizes)16860 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations(
16861     VmaAllocator allocator,
16862     uint32_t allocationCount,
16863     const VmaAllocation* allocations,
16864     const VkDeviceSize* offsets,
16865     const VkDeviceSize* sizes)
16866 {
16867     VMA_ASSERT(allocator);
16868 
16869     if(allocationCount == 0)
16870     {
16871         return VK_SUCCESS;
16872     }
16873 
16874     VMA_ASSERT(allocations);
16875 
16876     VMA_DEBUG_LOG("vmaFlushAllocations");
16877 
16878     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16879 
16880     const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_FLUSH);
16881 
16882     return res;
16883 }
16884 
vmaInvalidateAllocations(VmaAllocator allocator,uint32_t allocationCount,const VmaAllocation * allocations,const VkDeviceSize * offsets,const VkDeviceSize * sizes)16885 VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations(
16886     VmaAllocator allocator,
16887     uint32_t allocationCount,
16888     const VmaAllocation* allocations,
16889     const VkDeviceSize* offsets,
16890     const VkDeviceSize* sizes)
16891 {
16892     VMA_ASSERT(allocator);
16893 
16894     if(allocationCount == 0)
16895     {
16896         return VK_SUCCESS;
16897     }
16898 
16899     VMA_ASSERT(allocations);
16900 
16901     VMA_DEBUG_LOG("vmaInvalidateAllocations");
16902 
16903     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16904 
16905     const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_INVALIDATE);
16906 
16907     return res;
16908 }
16909 
vmaCheckCorruption(VmaAllocator allocator,uint32_t memoryTypeBits)16910 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(
16911     VmaAllocator allocator,
16912     uint32_t memoryTypeBits)
16913 {
16914     VMA_ASSERT(allocator);
16915 
16916     VMA_DEBUG_LOG("vmaCheckCorruption");
16917 
16918     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16919 
16920     return allocator->CheckCorruption(memoryTypeBits);
16921 }
16922 
vmaBeginDefragmentation(VmaAllocator allocator,const VmaDefragmentationInfo * pInfo,VmaDefragmentationContext * pContext)16923 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentation(
16924     VmaAllocator allocator,
16925     const VmaDefragmentationInfo* pInfo,
16926     VmaDefragmentationContext* pContext)
16927 {
16928     VMA_ASSERT(allocator && pInfo && pContext);
16929 
16930     VMA_DEBUG_LOG("vmaBeginDefragmentation");
16931 
16932     if (pInfo->pool != VMA_NULL)
16933     {
16934         // Check if run on supported algorithms
16935         if (pInfo->pool->m_BlockVector.GetAlgorithm() & VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
16936             return VK_ERROR_FEATURE_NOT_PRESENT;
16937     }
16938 
16939     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16940 
16941     *pContext = vma_new(allocator, VmaDefragmentationContext_T)(allocator, *pInfo);
16942     return VK_SUCCESS;
16943 }
16944 
vmaEndDefragmentation(VmaAllocator allocator,VmaDefragmentationContext context,VmaDefragmentationStats * pStats)16945 VMA_CALL_PRE void VMA_CALL_POST vmaEndDefragmentation(
16946     VmaAllocator allocator,
16947     VmaDefragmentationContext context,
16948     VmaDefragmentationStats* pStats)
16949 {
16950     VMA_ASSERT(allocator && context);
16951 
16952     VMA_DEBUG_LOG("vmaEndDefragmentation");
16953 
16954     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16955 
16956     if (pStats)
16957         context->GetStats(*pStats);
16958     vma_delete(allocator, context);
16959 }
16960 
vmaBeginDefragmentationPass(VmaAllocator VMA_NOT_NULL allocator,VmaDefragmentationContext VMA_NOT_NULL context,VmaDefragmentationPassMoveInfo * VMA_NOT_NULL pPassInfo)16961 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(
16962     VmaAllocator VMA_NOT_NULL allocator,
16963     VmaDefragmentationContext VMA_NOT_NULL context,
16964     VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo)
16965 {
16966     VMA_ASSERT(context && pPassInfo);
16967 
16968     VMA_DEBUG_LOG("vmaBeginDefragmentationPass");
16969 
16970     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16971 
16972     return context->DefragmentPassBegin(*pPassInfo);
16973 }
16974 
vmaEndDefragmentationPass(VmaAllocator VMA_NOT_NULL allocator,VmaDefragmentationContext VMA_NOT_NULL context,VmaDefragmentationPassMoveInfo * VMA_NOT_NULL pPassInfo)16975 VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(
16976     VmaAllocator VMA_NOT_NULL allocator,
16977     VmaDefragmentationContext VMA_NOT_NULL context,
16978     VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo)
16979 {
16980     VMA_ASSERT(context && pPassInfo);
16981 
16982     VMA_DEBUG_LOG("vmaEndDefragmentationPass");
16983 
16984     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16985 
16986     return context->DefragmentPassEnd(*pPassInfo);
16987 }
16988 
vmaBindBufferMemory(VmaAllocator allocator,VmaAllocation allocation,VkBuffer buffer)16989 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
16990     VmaAllocator allocator,
16991     VmaAllocation allocation,
16992     VkBuffer buffer)
16993 {
16994     VMA_ASSERT(allocator && allocation && buffer);
16995 
16996     VMA_DEBUG_LOG("vmaBindBufferMemory");
16997 
16998     VMA_DEBUG_GLOBAL_MUTEX_LOCK
16999 
17000     return allocator->BindBufferMemory(allocation, 0, buffer, VMA_NULL);
17001 }
17002 
vmaBindBufferMemory2(VmaAllocator allocator,VmaAllocation allocation,VkDeviceSize allocationLocalOffset,VkBuffer buffer,const void * pNext)17003 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
17004     VmaAllocator allocator,
17005     VmaAllocation allocation,
17006     VkDeviceSize allocationLocalOffset,
17007     VkBuffer buffer,
17008     const void* pNext)
17009 {
17010     VMA_ASSERT(allocator && allocation && buffer);
17011 
17012     VMA_DEBUG_LOG("vmaBindBufferMemory2");
17013 
17014     VMA_DEBUG_GLOBAL_MUTEX_LOCK
17015 
17016     return allocator->BindBufferMemory(allocation, allocationLocalOffset, buffer, pNext);
17017 }
17018 
vmaBindImageMemory(VmaAllocator allocator,VmaAllocation allocation,VkImage image)17019 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
17020     VmaAllocator allocator,
17021     VmaAllocation allocation,
17022     VkImage image)
17023 {
17024     VMA_ASSERT(allocator && allocation && image);
17025 
17026     VMA_DEBUG_LOG("vmaBindImageMemory");
17027 
17028     VMA_DEBUG_GLOBAL_MUTEX_LOCK
17029 
17030     return allocator->BindImageMemory(allocation, 0, image, VMA_NULL);
17031 }
17032 
vmaBindImageMemory2(VmaAllocator allocator,VmaAllocation allocation,VkDeviceSize allocationLocalOffset,VkImage image,const void * pNext)17033 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
17034     VmaAllocator allocator,
17035     VmaAllocation allocation,
17036     VkDeviceSize allocationLocalOffset,
17037     VkImage image,
17038     const void* pNext)
17039 {
17040     VMA_ASSERT(allocator && allocation && image);
17041 
17042     VMA_DEBUG_LOG("vmaBindImageMemory2");
17043 
17044     VMA_DEBUG_GLOBAL_MUTEX_LOCK
17045 
17046         return allocator->BindImageMemory(allocation, allocationLocalOffset, image, pNext);
17047 }
17048 
vmaCreateBuffer(VmaAllocator allocator,const VkBufferCreateInfo * pBufferCreateInfo,const VmaAllocationCreateInfo * pAllocationCreateInfo,VkBuffer * pBuffer,VmaAllocation * pAllocation,VmaAllocationInfo * pAllocationInfo)17049 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
17050     VmaAllocator allocator,
17051     const VkBufferCreateInfo* pBufferCreateInfo,
17052     const VmaAllocationCreateInfo* pAllocationCreateInfo,
17053     VkBuffer* pBuffer,
17054     VmaAllocation* pAllocation,
17055     VmaAllocationInfo* pAllocationInfo)
17056 {
17057     VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
17058 
17059     if(pBufferCreateInfo->size == 0)
17060     {
17061         return VK_ERROR_INITIALIZATION_FAILED;
17062     }
17063     if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 &&
17064         !allocator->m_UseKhrBufferDeviceAddress)
17065     {
17066         VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used.");
17067         return VK_ERROR_INITIALIZATION_FAILED;
17068     }
17069 
17070     VMA_DEBUG_LOG("vmaCreateBuffer");
17071 
17072     VMA_DEBUG_GLOBAL_MUTEX_LOCK
17073 
17074     *pBuffer = VK_NULL_HANDLE;
17075     *pAllocation = VK_NULL_HANDLE;
17076 
17077     // 1. Create VkBuffer.
17078     VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
17079         allocator->m_hDevice,
17080         pBufferCreateInfo,
17081         allocator->GetAllocationCallbacks(),
17082         pBuffer);
17083     if(res >= 0)
17084     {
17085         // 2. vkGetBufferMemoryRequirements.
17086         VkMemoryRequirements vkMemReq = {};
17087         bool requiresDedicatedAllocation = false;
17088         bool prefersDedicatedAllocation  = false;
17089         allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
17090             requiresDedicatedAllocation, prefersDedicatedAllocation);
17091 
17092         // 3. Allocate memory using allocator.
17093         res = allocator->AllocateMemory(
17094             vkMemReq,
17095             requiresDedicatedAllocation,
17096             prefersDedicatedAllocation,
17097             *pBuffer, // dedicatedBuffer
17098             VK_NULL_HANDLE, // dedicatedImage
17099             pBufferCreateInfo->usage, // dedicatedBufferImageUsage
17100             *pAllocationCreateInfo,
17101             VMA_SUBALLOCATION_TYPE_BUFFER,
17102             1, // allocationCount
17103             pAllocation);
17104 
17105         if(res >= 0)
17106         {
17107             // 3. Bind buffer with memory.
17108             if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
17109             {
17110                 res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL);
17111             }
17112             if(res >= 0)
17113             {
17114                 // All steps succeeded.
17115                 #if VMA_STATS_STRING_ENABLED
17116                     (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
17117                 #endif
17118                 if(pAllocationInfo != VMA_NULL)
17119                 {
17120                     allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
17121                 }
17122 
17123                 return VK_SUCCESS;
17124             }
17125             allocator->FreeMemory(
17126                 1, // allocationCount
17127                 pAllocation);
17128             *pAllocation = VK_NULL_HANDLE;
17129             (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
17130             *pBuffer = VK_NULL_HANDLE;
17131             return res;
17132         }
17133         (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
17134         *pBuffer = VK_NULL_HANDLE;
17135         return res;
17136     }
17137     return res;
17138 }
17139 
vmaCreateBufferWithAlignment(VmaAllocator allocator,const VkBufferCreateInfo * pBufferCreateInfo,const VmaAllocationCreateInfo * pAllocationCreateInfo,VkDeviceSize minAlignment,VkBuffer * pBuffer,VmaAllocation * pAllocation,VmaAllocationInfo * pAllocationInfo)17140 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBufferWithAlignment(
17141     VmaAllocator allocator,
17142     const VkBufferCreateInfo* pBufferCreateInfo,
17143     const VmaAllocationCreateInfo* pAllocationCreateInfo,
17144     VkDeviceSize minAlignment,
17145     VkBuffer* pBuffer,
17146     VmaAllocation* pAllocation,
17147     VmaAllocationInfo* pAllocationInfo)
17148 {
17149     VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && VmaIsPow2(minAlignment) && pBuffer && pAllocation);
17150 
17151     if(pBufferCreateInfo->size == 0)
17152     {
17153         return VK_ERROR_INITIALIZATION_FAILED;
17154     }
17155     if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 &&
17156         !allocator->m_UseKhrBufferDeviceAddress)
17157     {
17158         VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used.");
17159         return VK_ERROR_INITIALIZATION_FAILED;
17160     }
17161 
17162     VMA_DEBUG_LOG("vmaCreateBufferWithAlignment");
17163 
17164     VMA_DEBUG_GLOBAL_MUTEX_LOCK
17165 
17166     *pBuffer = VK_NULL_HANDLE;
17167     *pAllocation = VK_NULL_HANDLE;
17168 
17169     // 1. Create VkBuffer.
17170     VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
17171         allocator->m_hDevice,
17172         pBufferCreateInfo,
17173         allocator->GetAllocationCallbacks(),
17174         pBuffer);
17175     if(res >= 0)
17176     {
17177         // 2. vkGetBufferMemoryRequirements.
17178         VkMemoryRequirements vkMemReq = {};
17179         bool requiresDedicatedAllocation = false;
17180         bool prefersDedicatedAllocation  = false;
17181         allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
17182             requiresDedicatedAllocation, prefersDedicatedAllocation);
17183 
17184         // 2a. Include minAlignment
17185         vkMemReq.alignment = VMA_MAX(vkMemReq.alignment, minAlignment);
17186 
17187         // 3. Allocate memory using allocator.
17188         res = allocator->AllocateMemory(
17189             vkMemReq,
17190             requiresDedicatedAllocation,
17191             prefersDedicatedAllocation,
17192             *pBuffer, // dedicatedBuffer
17193             VK_NULL_HANDLE, // dedicatedImage
17194             pBufferCreateInfo->usage, // dedicatedBufferImageUsage
17195             *pAllocationCreateInfo,
17196             VMA_SUBALLOCATION_TYPE_BUFFER,
17197             1, // allocationCount
17198             pAllocation);
17199 
17200         if(res >= 0)
17201         {
17202             // 3. Bind buffer with memory.
17203             if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
17204             {
17205                 res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL);
17206             }
17207             if(res >= 0)
17208             {
17209                 // All steps succeeded.
17210                 #if VMA_STATS_STRING_ENABLED
17211                     (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
17212                 #endif
17213                 if(pAllocationInfo != VMA_NULL)
17214                 {
17215                     allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
17216                 }
17217 
17218                 return VK_SUCCESS;
17219             }
17220             allocator->FreeMemory(
17221                 1, // allocationCount
17222                 pAllocation);
17223             *pAllocation = VK_NULL_HANDLE;
17224             (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
17225             *pBuffer = VK_NULL_HANDLE;
17226             return res;
17227         }
17228         (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
17229         *pBuffer = VK_NULL_HANDLE;
17230         return res;
17231     }
17232     return res;
17233 }
17234 
vmaCreateAliasingBuffer(VmaAllocator VMA_NOT_NULL allocator,VmaAllocation VMA_NOT_NULL allocation,const VkBufferCreateInfo * VMA_NOT_NULL pBufferCreateInfo,VkBuffer VMA_NULLABLE_NON_DISPATCHABLE * VMA_NOT_NULL pBuffer)17235 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer(
17236     VmaAllocator VMA_NOT_NULL allocator,
17237     VmaAllocation VMA_NOT_NULL allocation,
17238     const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
17239     VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer)
17240 {
17241     return vmaCreateAliasingBuffer2(allocator, allocation, 0, pBufferCreateInfo, pBuffer);
17242 }
17243 
vmaCreateAliasingBuffer2(VmaAllocator VMA_NOT_NULL allocator,VmaAllocation VMA_NOT_NULL allocation,VkDeviceSize allocationLocalOffset,const VkBufferCreateInfo * VMA_NOT_NULL pBufferCreateInfo,VkBuffer VMA_NULLABLE_NON_DISPATCHABLE * VMA_NOT_NULL pBuffer)17244 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer2(
17245     VmaAllocator VMA_NOT_NULL allocator,
17246     VmaAllocation VMA_NOT_NULL allocation,
17247     VkDeviceSize allocationLocalOffset,
17248     const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
17249     VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer)
17250 {
17251     VMA_ASSERT(allocator && pBufferCreateInfo && pBuffer && allocation);
17252     VMA_ASSERT(allocationLocalOffset + pBufferCreateInfo->size <= allocation->GetSize());
17253 
17254     VMA_DEBUG_LOG("vmaCreateAliasingBuffer2");
17255 
17256     *pBuffer = VK_NULL_HANDLE;
17257 
17258     if (pBufferCreateInfo->size == 0)
17259     {
17260         return VK_ERROR_INITIALIZATION_FAILED;
17261     }
17262     if ((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 &&
17263         !allocator->m_UseKhrBufferDeviceAddress)
17264     {
17265         VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used.");
17266         return VK_ERROR_INITIALIZATION_FAILED;
17267     }
17268 
17269     VMA_DEBUG_GLOBAL_MUTEX_LOCK
17270 
17271     // 1. Create VkBuffer.
17272     VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
17273         allocator->m_hDevice,
17274         pBufferCreateInfo,
17275         allocator->GetAllocationCallbacks(),
17276         pBuffer);
17277     if (res >= 0)
17278     {
17279         // 2. Bind buffer with memory.
17280         res = allocator->BindBufferMemory(allocation, allocationLocalOffset, *pBuffer, VMA_NULL);
17281         if (res >= 0)
17282         {
17283             return VK_SUCCESS;
17284         }
17285         (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
17286     }
17287     return res;
17288 }
17289 
vmaDestroyBuffer(VmaAllocator allocator,VkBuffer buffer,VmaAllocation allocation)17290 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
17291     VmaAllocator allocator,
17292     VkBuffer buffer,
17293     VmaAllocation allocation)
17294 {
17295     VMA_ASSERT(allocator);
17296 
17297     if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
17298     {
17299         return;
17300     }
17301 
17302     VMA_DEBUG_LOG("vmaDestroyBuffer");
17303 
17304     VMA_DEBUG_GLOBAL_MUTEX_LOCK
17305 
17306     if(buffer != VK_NULL_HANDLE)
17307     {
17308         (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
17309     }
17310 
17311     if(allocation != VK_NULL_HANDLE)
17312     {
17313         allocator->FreeMemory(
17314             1, // allocationCount
17315             &allocation);
17316     }
17317 }
17318 
vmaCreateImage(VmaAllocator allocator,const VkImageCreateInfo * pImageCreateInfo,const VmaAllocationCreateInfo * pAllocationCreateInfo,VkImage * pImage,VmaAllocation * pAllocation,VmaAllocationInfo * pAllocationInfo)17319 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
17320     VmaAllocator allocator,
17321     const VkImageCreateInfo* pImageCreateInfo,
17322     const VmaAllocationCreateInfo* pAllocationCreateInfo,
17323     VkImage* pImage,
17324     VmaAllocation* pAllocation,
17325     VmaAllocationInfo* pAllocationInfo)
17326 {
17327     VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
17328 
17329     if(pImageCreateInfo->extent.width == 0 ||
17330         pImageCreateInfo->extent.height == 0 ||
17331         pImageCreateInfo->extent.depth == 0 ||
17332         pImageCreateInfo->mipLevels == 0 ||
17333         pImageCreateInfo->arrayLayers == 0)
17334     {
17335         return VK_ERROR_INITIALIZATION_FAILED;
17336     }
17337 
17338     VMA_DEBUG_LOG("vmaCreateImage");
17339 
17340     VMA_DEBUG_GLOBAL_MUTEX_LOCK
17341 
17342     *pImage = VK_NULL_HANDLE;
17343     *pAllocation = VK_NULL_HANDLE;
17344 
17345     // 1. Create VkImage.
17346     VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
17347         allocator->m_hDevice,
17348         pImageCreateInfo,
17349         allocator->GetAllocationCallbacks(),
17350         pImage);
17351     if(res >= 0)
17352     {
17353         VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
17354             VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
17355             VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
17356 
17357         // 2. Allocate memory using allocator.
17358         VkMemoryRequirements vkMemReq = {};
17359         bool requiresDedicatedAllocation = false;
17360         bool prefersDedicatedAllocation  = false;
17361         allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
17362             requiresDedicatedAllocation, prefersDedicatedAllocation);
17363 
17364         res = allocator->AllocateMemory(
17365             vkMemReq,
17366             requiresDedicatedAllocation,
17367             prefersDedicatedAllocation,
17368             VK_NULL_HANDLE, // dedicatedBuffer
17369             *pImage, // dedicatedImage
17370             pImageCreateInfo->usage, // dedicatedBufferImageUsage
17371             *pAllocationCreateInfo,
17372             suballocType,
17373             1, // allocationCount
17374             pAllocation);
17375 
17376         if(res >= 0)
17377         {
17378             // 3. Bind image with memory.
17379             if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
17380             {
17381                 res = allocator->BindImageMemory(*pAllocation, 0, *pImage, VMA_NULL);
17382             }
17383             if(res >= 0)
17384             {
17385                 // All steps succeeded.
17386                 #if VMA_STATS_STRING_ENABLED
17387                     (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
17388                 #endif
17389                 if(pAllocationInfo != VMA_NULL)
17390                 {
17391                     allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
17392                 }
17393 
17394                 return VK_SUCCESS;
17395             }
17396             allocator->FreeMemory(
17397                 1, // allocationCount
17398                 pAllocation);
17399             *pAllocation = VK_NULL_HANDLE;
17400             (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
17401             *pImage = VK_NULL_HANDLE;
17402             return res;
17403         }
17404         (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
17405         *pImage = VK_NULL_HANDLE;
17406         return res;
17407     }
17408     return res;
17409 }
17410 
vmaCreateAliasingImage(VmaAllocator VMA_NOT_NULL allocator,VmaAllocation VMA_NOT_NULL allocation,const VkImageCreateInfo * VMA_NOT_NULL pImageCreateInfo,VkImage VMA_NULLABLE_NON_DISPATCHABLE * VMA_NOT_NULL pImage)17411 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage(
17412     VmaAllocator VMA_NOT_NULL allocator,
17413     VmaAllocation VMA_NOT_NULL allocation,
17414     const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
17415     VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage)
17416 {
17417     return vmaCreateAliasingImage2(allocator, allocation, 0, pImageCreateInfo, pImage);
17418 }
17419 
vmaCreateAliasingImage2(VmaAllocator VMA_NOT_NULL allocator,VmaAllocation VMA_NOT_NULL allocation,VkDeviceSize allocationLocalOffset,const VkImageCreateInfo * VMA_NOT_NULL pImageCreateInfo,VkImage VMA_NULLABLE_NON_DISPATCHABLE * VMA_NOT_NULL pImage)17420 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage2(
17421     VmaAllocator VMA_NOT_NULL allocator,
17422     VmaAllocation VMA_NOT_NULL allocation,
17423     VkDeviceSize allocationLocalOffset,
17424     const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
17425     VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage)
17426 {
17427     VMA_ASSERT(allocator && pImageCreateInfo && pImage && allocation);
17428 
17429     *pImage = VK_NULL_HANDLE;
17430 
17431     VMA_DEBUG_LOG("vmaCreateImage2");
17432 
17433     if (pImageCreateInfo->extent.width == 0 ||
17434         pImageCreateInfo->extent.height == 0 ||
17435         pImageCreateInfo->extent.depth == 0 ||
17436         pImageCreateInfo->mipLevels == 0 ||
17437         pImageCreateInfo->arrayLayers == 0)
17438     {
17439         return VK_ERROR_INITIALIZATION_FAILED;
17440     }
17441 
17442     VMA_DEBUG_GLOBAL_MUTEX_LOCK
17443 
17444     // 1. Create VkImage.
17445     VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
17446         allocator->m_hDevice,
17447         pImageCreateInfo,
17448         allocator->GetAllocationCallbacks(),
17449         pImage);
17450     if (res >= 0)
17451     {
17452         // 2. Bind image with memory.
17453         res = allocator->BindImageMemory(allocation, allocationLocalOffset, *pImage, VMA_NULL);
17454         if (res >= 0)
17455         {
17456             return VK_SUCCESS;
17457         }
17458         (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
17459     }
17460     return res;
17461 }
17462 
vmaDestroyImage(VmaAllocator VMA_NOT_NULL allocator,VkImage VMA_NULLABLE_NON_DISPATCHABLE image,VmaAllocation VMA_NULLABLE allocation)17463 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
17464     VmaAllocator VMA_NOT_NULL allocator,
17465     VkImage VMA_NULLABLE_NON_DISPATCHABLE image,
17466     VmaAllocation VMA_NULLABLE allocation)
17467 {
17468     VMA_ASSERT(allocator);
17469 
17470     if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
17471     {
17472         return;
17473     }
17474 
17475     VMA_DEBUG_LOG("vmaDestroyImage");
17476 
17477     VMA_DEBUG_GLOBAL_MUTEX_LOCK
17478 
17479     if(image != VK_NULL_HANDLE)
17480     {
17481         (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
17482     }
17483     if(allocation != VK_NULL_HANDLE)
17484     {
17485         allocator->FreeMemory(
17486             1, // allocationCount
17487             &allocation);
17488     }
17489 }
17490 
vmaCreateVirtualBlock(const VmaVirtualBlockCreateInfo * VMA_NOT_NULL pCreateInfo,VmaVirtualBlock VMA_NULLABLE * VMA_NOT_NULL pVirtualBlock)17491 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateVirtualBlock(
17492     const VmaVirtualBlockCreateInfo* VMA_NOT_NULL pCreateInfo,
17493     VmaVirtualBlock VMA_NULLABLE * VMA_NOT_NULL pVirtualBlock)
17494 {
17495     VMA_ASSERT(pCreateInfo && pVirtualBlock);
17496     VMA_ASSERT(pCreateInfo->size > 0);
17497     VMA_DEBUG_LOG("vmaCreateVirtualBlock");
17498     VMA_DEBUG_GLOBAL_MUTEX_LOCK;
17499     *pVirtualBlock = vma_new(pCreateInfo->pAllocationCallbacks, VmaVirtualBlock_T)(*pCreateInfo);
17500     VkResult res = (*pVirtualBlock)->Init();
17501     if(res < 0)
17502     {
17503         vma_delete(pCreateInfo->pAllocationCallbacks, *pVirtualBlock);
17504         *pVirtualBlock = VK_NULL_HANDLE;
17505     }
17506     return res;
17507 }
17508 
vmaDestroyVirtualBlock(VmaVirtualBlock VMA_NULLABLE virtualBlock)17509 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyVirtualBlock(VmaVirtualBlock VMA_NULLABLE virtualBlock)
17510 {
17511     if(virtualBlock != VK_NULL_HANDLE)
17512     {
17513         VMA_DEBUG_LOG("vmaDestroyVirtualBlock");
17514         VMA_DEBUG_GLOBAL_MUTEX_LOCK;
17515         VkAllocationCallbacks allocationCallbacks = virtualBlock->m_AllocationCallbacks; // Have to copy the callbacks when destroying.
17516         vma_delete(&allocationCallbacks, virtualBlock);
17517     }
17518 }
17519 
vmaIsVirtualBlockEmpty(VmaVirtualBlock VMA_NOT_NULL virtualBlock)17520 VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaIsVirtualBlockEmpty(VmaVirtualBlock VMA_NOT_NULL virtualBlock)
17521 {
17522     VMA_ASSERT(virtualBlock != VK_NULL_HANDLE);
17523     VMA_DEBUG_LOG("vmaIsVirtualBlockEmpty");
17524     VMA_DEBUG_GLOBAL_MUTEX_LOCK;
17525     return virtualBlock->IsEmpty() ? VK_TRUE : VK_FALSE;
17526 }
17527 
vmaGetVirtualAllocationInfo(VmaVirtualBlock VMA_NOT_NULL virtualBlock,VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation,VmaVirtualAllocationInfo * VMA_NOT_NULL pVirtualAllocInfo)17528 VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualAllocationInfo(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
17529     VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, VmaVirtualAllocationInfo* VMA_NOT_NULL pVirtualAllocInfo)
17530 {
17531     VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pVirtualAllocInfo != VMA_NULL);
17532     VMA_DEBUG_LOG("vmaGetVirtualAllocationInfo");
17533     VMA_DEBUG_GLOBAL_MUTEX_LOCK;
17534     virtualBlock->GetAllocationInfo(allocation, *pVirtualAllocInfo);
17535 }
17536 
vmaVirtualAllocate(VmaVirtualBlock VMA_NOT_NULL virtualBlock,const VmaVirtualAllocationCreateInfo * VMA_NOT_NULL pCreateInfo,VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE * VMA_NOT_NULL pAllocation,VkDeviceSize * VMA_NULLABLE pOffset)17537 VMA_CALL_PRE VkResult VMA_CALL_POST vmaVirtualAllocate(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
17538     const VmaVirtualAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pAllocation,
17539     VkDeviceSize* VMA_NULLABLE pOffset)
17540 {
17541     VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pCreateInfo != VMA_NULL && pAllocation != VMA_NULL);
17542     VMA_DEBUG_LOG("vmaVirtualAllocate");
17543     VMA_DEBUG_GLOBAL_MUTEX_LOCK;
17544     return virtualBlock->Allocate(*pCreateInfo, *pAllocation, pOffset);
17545 }
17546 
vmaVirtualFree(VmaVirtualBlock VMA_NOT_NULL virtualBlock,VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE allocation)17547 VMA_CALL_PRE void VMA_CALL_POST vmaVirtualFree(VmaVirtualBlock VMA_NOT_NULL virtualBlock, VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE allocation)
17548 {
17549     if(allocation != VK_NULL_HANDLE)
17550     {
17551         VMA_ASSERT(virtualBlock != VK_NULL_HANDLE);
17552         VMA_DEBUG_LOG("vmaVirtualFree");
17553         VMA_DEBUG_GLOBAL_MUTEX_LOCK;
17554         virtualBlock->Free(allocation);
17555     }
17556 }
17557 
vmaClearVirtualBlock(VmaVirtualBlock VMA_NOT_NULL virtualBlock)17558 VMA_CALL_PRE void VMA_CALL_POST vmaClearVirtualBlock(VmaVirtualBlock VMA_NOT_NULL virtualBlock)
17559 {
17560     VMA_ASSERT(virtualBlock != VK_NULL_HANDLE);
17561     VMA_DEBUG_LOG("vmaClearVirtualBlock");
17562     VMA_DEBUG_GLOBAL_MUTEX_LOCK;
17563     virtualBlock->Clear();
17564 }
17565 
vmaSetVirtualAllocationUserData(VmaVirtualBlock VMA_NOT_NULL virtualBlock,VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation,void * VMA_NULLABLE pUserData)17566 VMA_CALL_PRE void VMA_CALL_POST vmaSetVirtualAllocationUserData(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
17567     VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, void* VMA_NULLABLE pUserData)
17568 {
17569     VMA_ASSERT(virtualBlock != VK_NULL_HANDLE);
17570     VMA_DEBUG_LOG("vmaSetVirtualAllocationUserData");
17571     VMA_DEBUG_GLOBAL_MUTEX_LOCK;
17572     virtualBlock->SetAllocationUserData(allocation, pUserData);
17573 }
17574 
vmaGetVirtualBlockStatistics(VmaVirtualBlock VMA_NOT_NULL virtualBlock,VmaStatistics * VMA_NOT_NULL pStats)17575 VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualBlockStatistics(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
17576     VmaStatistics* VMA_NOT_NULL pStats)
17577 {
17578     VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pStats != VMA_NULL);
17579     VMA_DEBUG_LOG("vmaGetVirtualBlockStatistics");
17580     VMA_DEBUG_GLOBAL_MUTEX_LOCK;
17581     virtualBlock->GetStatistics(*pStats);
17582 }
17583 
vmaCalculateVirtualBlockStatistics(VmaVirtualBlock VMA_NOT_NULL virtualBlock,VmaDetailedStatistics * VMA_NOT_NULL pStats)17584 VMA_CALL_PRE void VMA_CALL_POST vmaCalculateVirtualBlockStatistics(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
17585     VmaDetailedStatistics* VMA_NOT_NULL pStats)
17586 {
17587     VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pStats != VMA_NULL);
17588     VMA_DEBUG_LOG("vmaCalculateVirtualBlockStatistics");
17589     VMA_DEBUG_GLOBAL_MUTEX_LOCK;
17590     virtualBlock->CalculateDetailedStatistics(*pStats);
17591 }
17592 
17593 #if VMA_STATS_STRING_ENABLED
17594 
vmaBuildVirtualBlockStatsString(VmaVirtualBlock VMA_NOT_NULL virtualBlock,char * VMA_NULLABLE * VMA_NOT_NULL ppStatsString,VkBool32 detailedMap)17595 VMA_CALL_PRE void VMA_CALL_POST vmaBuildVirtualBlockStatsString(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
17596     char* VMA_NULLABLE * VMA_NOT_NULL ppStatsString, VkBool32 detailedMap)
17597 {
17598     VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && ppStatsString != VMA_NULL);
17599     VMA_DEBUG_GLOBAL_MUTEX_LOCK;
17600     const VkAllocationCallbacks* allocationCallbacks = virtualBlock->GetAllocationCallbacks();
17601     VmaStringBuilder sb(allocationCallbacks);
17602     virtualBlock->BuildStatsString(detailedMap != VK_FALSE, sb);
17603     *ppStatsString = VmaCreateStringCopy(allocationCallbacks, sb.GetData(), sb.GetLength());
17604 }
17605 
vmaFreeVirtualBlockStatsString(VmaVirtualBlock VMA_NOT_NULL virtualBlock,char * VMA_NULLABLE pStatsString)17606 VMA_CALL_PRE void VMA_CALL_POST vmaFreeVirtualBlockStatsString(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
17607     char* VMA_NULLABLE pStatsString)
17608 {
17609     if(pStatsString != VMA_NULL)
17610     {
17611         VMA_ASSERT(virtualBlock != VK_NULL_HANDLE);
17612         VMA_DEBUG_GLOBAL_MUTEX_LOCK;
17613         VmaFreeString(virtualBlock->GetAllocationCallbacks(), pStatsString);
17614     }
17615 }
17616 #endif // VMA_STATS_STRING_ENABLED
17617 #endif // _VMA_PUBLIC_INTERFACE
17618 #endif // VMA_IMPLEMENTATION
17619 
17620 /**
17621 \page quick_start Quick start
17622 
17623 \section quick_start_project_setup Project setup
17624 
17625 Vulkan Memory Allocator comes in form of a "stb-style" single header file.
17626 You don't need to build it as a separate library project.
17627 You can add this file directly to your project and submit it to code repository next to your other source files.
17628 
17629 "Single header" doesn't mean that everything is contained in C/C++ declarations,
17630 like it tends to be in case of inline functions or C++ templates.
17631 It means that implementation is bundled with interface in a single file and needs to be extracted using preprocessor macro.
17632 If you don't do it properly, you will get linker errors.
17633 
17634 To do it properly:
17635 
17636 -# Include "vk_mem_alloc.h" file in each CPP file where you want to use the library.
17637    This includes declarations of all members of the library.
17638 -# In exactly one CPP file define following macro before this include.
17639    It enables also internal definitions.
17640 
17641 \code
17642 #define VMA_IMPLEMENTATION
17643 #include "vk_mem_alloc.h"
17644 \endcode
17645 
17646 It may be a good idea to create dedicated CPP file just for this purpose.
17647 
17648 This library includes header `<vulkan/vulkan.h>`, which in turn
17649 includes `<windows.h>` on Windows. If you need some specific macros defined
17650 before including these headers (like `WIN32_LEAN_AND_MEAN` or
17651 `WINVER` for Windows, `VK_USE_PLATFORM_WIN32_KHR` for Vulkan), you must define
17652 them before every `#include` of this library.
17653 
17654 This library is written in C++, but has C-compatible interface.
17655 Thus you can include and use vk_mem_alloc.h in C or C++ code, but full
17656 implementation with `VMA_IMPLEMENTATION` macro must be compiled as C++, NOT as C.
17657 Some features of C++14 are used. STL containers, RTTI, or C++ exceptions are not used.
17658 
17659 
17660 \section quick_start_initialization Initialization
17661 
17662 At program startup:
17663 
17664 -# Initialize Vulkan to have `VkPhysicalDevice`, `VkDevice` and `VkInstance` object.
17665 -# Fill VmaAllocatorCreateInfo structure and create #VmaAllocator object by
17666    calling vmaCreateAllocator().
17667 
17668 Only members `physicalDevice`, `device`, `instance` are required.
17669 However, you should inform the library which Vulkan version do you use by setting
17670 VmaAllocatorCreateInfo::vulkanApiVersion and which extensions did you enable
17671 by setting VmaAllocatorCreateInfo::flags (like #VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT for VK_KHR_buffer_device_address).
17672 Otherwise, VMA would use only features of Vulkan 1.0 core with no extensions.
17673 
17674 \subsection quick_start_initialization_selecting_vulkan_version Selecting Vulkan version
17675 
17676 VMA supports Vulkan version down to 1.0, for backward compatibility.
17677 If you want to use higher version, you need to inform the library about it.
17678 This is a two-step process.
17679 
17680 <b>Step 1: Compile time.</b> By default, VMA compiles with code supporting the highest
17681 Vulkan version found in the included `<vulkan/vulkan.h>` that is also supported by the library.
17682 If this is OK, you don't need to do anything.
17683 However, if you want to compile VMA as if only some lower Vulkan version was available,
17684 define macro `VMA_VULKAN_VERSION` before every `#include "vk_mem_alloc.h"`.
17685 It should have decimal numeric value in form of ABBBCCC, where A = major, BBB = minor, CCC = patch Vulkan version.
17686 For example, to compile against Vulkan 1.2:
17687 
17688 \code
17689 #define VMA_VULKAN_VERSION 1002000 // Vulkan 1.2
17690 #include "vk_mem_alloc.h"
17691 \endcode
17692 
17693 <b>Step 2: Runtime.</b> Even when compiled with higher Vulkan version available,
17694 VMA can use only features of a lower version, which is configurable during creation of the #VmaAllocator object.
17695 By default, only Vulkan 1.0 is used.
17696 To initialize the allocator with support for higher Vulkan version, you need to set member
17697 VmaAllocatorCreateInfo::vulkanApiVersion to an appropriate value, e.g. using constants like `VK_API_VERSION_1_2`.
17698 See code sample below.
17699 
17700 \subsection quick_start_initialization_importing_vulkan_functions Importing Vulkan functions
17701 
17702 You may need to configure importing Vulkan functions. There are 3 ways to do this:
17703 
17704 -# **If you link with Vulkan static library** (e.g. "vulkan-1.lib" on Windows):
17705    - You don't need to do anything.
17706    - VMA will use these, as macro `VMA_STATIC_VULKAN_FUNCTIONS` is defined to 1 by default.
17707 -# **If you want VMA to fetch pointers to Vulkan functions dynamically** using `vkGetInstanceProcAddr`,
17708    `vkGetDeviceProcAddr` (this is the option presented in the example below):
17709    - Define `VMA_STATIC_VULKAN_FUNCTIONS` to 0, `VMA_DYNAMIC_VULKAN_FUNCTIONS` to 1.
17710    - Provide pointers to these two functions via VmaVulkanFunctions::vkGetInstanceProcAddr,
17711      VmaVulkanFunctions::vkGetDeviceProcAddr.
17712    - The library will fetch pointers to all other functions it needs internally.
17713 -# **If you fetch pointers to all Vulkan functions in a custom way**, e.g. using some loader like
17714    [Volk](https://github.com/zeux/volk):
17715    - Define `VMA_STATIC_VULKAN_FUNCTIONS` and `VMA_DYNAMIC_VULKAN_FUNCTIONS` to 0.
17716    - Pass these pointers via structure #VmaVulkanFunctions.
17717 
17718 Example for case 2:
17719 
17720 \code
17721 #define VMA_STATIC_VULKAN_FUNCTIONS 0
17722 #define VMA_DYNAMIC_VULKAN_FUNCTIONS 1
17723 #include "vk_mem_alloc.h"
17724 
17725 ...
17726 
17727 VmaVulkanFunctions vulkanFunctions = {};
17728 vulkanFunctions.vkGetInstanceProcAddr = &vkGetInstanceProcAddr;
17729 vulkanFunctions.vkGetDeviceProcAddr = &vkGetDeviceProcAddr;
17730 
17731 VmaAllocatorCreateInfo allocatorCreateInfo = {};
17732 allocatorCreateInfo.vulkanApiVersion = VK_API_VERSION_1_2;
17733 allocatorCreateInfo.physicalDevice = physicalDevice;
17734 allocatorCreateInfo.device = device;
17735 allocatorCreateInfo.instance = instance;
17736 allocatorCreateInfo.pVulkanFunctions = &vulkanFunctions;
17737 
17738 VmaAllocator allocator;
17739 vmaCreateAllocator(&allocatorCreateInfo, &allocator);
17740 \endcode
17741 
17742 
17743 \section quick_start_resource_allocation Resource allocation
17744 
17745 When you want to create a buffer or image:
17746 
17747 -# Fill `VkBufferCreateInfo` / `VkImageCreateInfo` structure.
17748 -# Fill VmaAllocationCreateInfo structure.
17749 -# Call vmaCreateBuffer() / vmaCreateImage() to get `VkBuffer`/`VkImage` with memory
17750    already allocated and bound to it, plus #VmaAllocation objects that represents its underlying memory.
17751 
17752 \code
17753 VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
17754 bufferInfo.size = 65536;
17755 bufferInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
17756 
17757 VmaAllocationCreateInfo allocInfo = {};
17758 allocInfo.usage = VMA_MEMORY_USAGE_AUTO;
17759 
17760 VkBuffer buffer;
17761 VmaAllocation allocation;
17762 vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);
17763 \endcode
17764 
17765 Don't forget to destroy your objects when no longer needed:
17766 
17767 \code
17768 vmaDestroyBuffer(allocator, buffer, allocation);
17769 vmaDestroyAllocator(allocator);
17770 \endcode
17771 
17772 
17773 \page choosing_memory_type Choosing memory type
17774 
17775 Physical devices in Vulkan support various combinations of memory heaps and
17776 types. Help with choosing correct and optimal memory type for your specific
17777 resource is one of the key features of this library. You can use it by filling
17778 appropriate members of VmaAllocationCreateInfo structure, as described below.
17779 You can also combine multiple methods.
17780 
17781 -# If you just want to find memory type index that meets your requirements, you
17782    can use function: vmaFindMemoryTypeIndexForBufferInfo(),
17783    vmaFindMemoryTypeIndexForImageInfo(), vmaFindMemoryTypeIndex().
17784 -# If you want to allocate a region of device memory without association with any
17785    specific image or buffer, you can use function vmaAllocateMemory(). Usage of
17786    this function is not recommended and usually not needed.
17787    vmaAllocateMemoryPages() function is also provided for creating multiple allocations at once,
17788    which may be useful for sparse binding.
17789 -# If you already have a buffer or an image created, you want to allocate memory
17790    for it and then you will bind it yourself, you can use function
17791    vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage().
17792    For binding you should use functions: vmaBindBufferMemory(), vmaBindImageMemory()
17793    or their extended versions: vmaBindBufferMemory2(), vmaBindImageMemory2().
17794 -# **This is the easiest and recommended way to use this library:**
17795    If you want to create a buffer or an image, allocate memory for it and bind
17796    them together, all in one call, you can use function vmaCreateBuffer(),
17797    vmaCreateImage().
17798 
17799 When using 3. or 4., the library internally queries Vulkan for memory types
17800 supported for that buffer or image (function `vkGetBufferMemoryRequirements()`)
17801 and uses only one of these types.
17802 
17803 If no memory type can be found that meets all the requirements, these functions
17804 return `VK_ERROR_FEATURE_NOT_PRESENT`.
17805 
17806 You can leave VmaAllocationCreateInfo structure completely filled with zeros.
17807 It means no requirements are specified for memory type.
17808 It is valid, although not very useful.
17809 
17810 \section choosing_memory_type_usage Usage
17811 
17812 The easiest way to specify memory requirements is to fill member
17813 VmaAllocationCreateInfo::usage using one of the values of enum #VmaMemoryUsage.
17814 It defines high level, common usage types.
17815 Since version 3 of the library, it is recommended to use #VMA_MEMORY_USAGE_AUTO to let it select best memory type for your resource automatically.
17816 
17817 For example, if you want to create a uniform buffer that will be filled using
17818 transfer only once or infrequently and then used for rendering every frame as a uniform buffer, you can
17819 do it using following code. The buffer will most likely end up in a memory type with
17820 `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT` to be fast to access by the GPU device.
17821 
17822 \code
17823 VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
17824 bufferInfo.size = 65536;
17825 bufferInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
17826 
17827 VmaAllocationCreateInfo allocInfo = {};
17828 allocInfo.usage = VMA_MEMORY_USAGE_AUTO;
17829 
17830 VkBuffer buffer;
17831 VmaAllocation allocation;
17832 vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);
17833 \endcode
17834 
17835 If you have a preference for putting the resource in GPU (device) memory or CPU (host) memory
17836 on systems with discrete graphics card that have the memories separate, you can use
17837 #VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE or #VMA_MEMORY_USAGE_AUTO_PREFER_HOST.
17838 
17839 When using `VMA_MEMORY_USAGE_AUTO*` while you want to map the allocated memory,
17840 you also need to specify one of the host access flags:
17841 #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT.
17842 This will help the library decide about preferred memory type to ensure it has `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`
17843 so you can map it.
17844 
17845 For example, a staging buffer that will be filled via mapped pointer and then
17846 used as a source of transfer to the buffer described previously can be created like this.
17847 It will likely end up in a memory type that is `HOST_VISIBLE` and `HOST_COHERENT`
17848 but not `HOST_CACHED` (meaning uncached, write-combined) and not `DEVICE_LOCAL` (meaning system RAM).
17849 
17850 \code
17851 VkBufferCreateInfo stagingBufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
17852 stagingBufferInfo.size = 65536;
17853 stagingBufferInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
17854 
17855 VmaAllocationCreateInfo stagingAllocInfo = {};
17856 stagingAllocInfo.usage = VMA_MEMORY_USAGE_AUTO;
17857 stagingAllocInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT;
17858 
17859 VkBuffer stagingBuffer;
17860 VmaAllocation stagingAllocation;
17861 vmaCreateBuffer(allocator, &stagingBufferInfo, &stagingAllocInfo, &stagingBuffer, &stagingAllocation, nullptr);
17862 \endcode
17863 
17864 For more examples of creating different kinds of resources, see chapter \ref usage_patterns.
17865 
17866 Usage values `VMA_MEMORY_USAGE_AUTO*` are legal to use only when the library knows
17867 about the resource being created by having `VkBufferCreateInfo` / `VkImageCreateInfo` passed,
17868 so they work with functions like: vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo() etc.
17869 If you allocate raw memory using function vmaAllocateMemory(), you have to use other means of selecting
17870 memory type, as described below.
17871 
17872 \note
17873 Old usage values (`VMA_MEMORY_USAGE_GPU_ONLY`, `VMA_MEMORY_USAGE_CPU_ONLY`,
17874 `VMA_MEMORY_USAGE_CPU_TO_GPU`, `VMA_MEMORY_USAGE_GPU_TO_CPU`, `VMA_MEMORY_USAGE_CPU_COPY`)
17875 are still available and work same way as in previous versions of the library
17876 for backward compatibility, but they are not recommended.
17877 
17878 \section choosing_memory_type_required_preferred_flags Required and preferred flags
17879 
17880 You can specify more detailed requirements by filling members
17881 VmaAllocationCreateInfo::requiredFlags and VmaAllocationCreateInfo::preferredFlags
17882 with a combination of bits from enum `VkMemoryPropertyFlags`. For example,
17883 if you want to create a buffer that will be persistently mapped on host (so it
17884 must be `HOST_VISIBLE`) and preferably will also be `HOST_COHERENT` and `HOST_CACHED`,
17885 use following code:
17886 
17887 \code
17888 VmaAllocationCreateInfo allocInfo = {};
17889 allocInfo.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
17890 allocInfo.preferredFlags = VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
17891 allocInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT;
17892 
17893 VkBuffer buffer;
17894 VmaAllocation allocation;
17895 vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);
17896 \endcode
17897 
17898 A memory type is chosen that has all the required flags and as many preferred
17899 flags set as possible.
17900 
17901 Value passed in VmaAllocationCreateInfo::usage is internally converted to a set of required and preferred flags,
17902 plus some extra "magic" (heuristics).
17903 
17904 \section choosing_memory_type_explicit_memory_types Explicit memory types
17905 
17906 If you inspected memory types available on the physical device and you have
17907 a preference for memory types that you want to use, you can fill member
17908 VmaAllocationCreateInfo::memoryTypeBits. It is a bit mask, where each bit set
17909 means that a memory type with that index is allowed to be used for the
17910 allocation. Special value 0, just like `UINT32_MAX`, means there are no
17911 restrictions to memory type index.
17912 
17913 Please note that this member is NOT just a memory type index.
17914 Still you can use it to choose just one, specific memory type.
17915 For example, if you already determined that your buffer should be created in
17916 memory type 2, use following code:
17917 
17918 \code
17919 uint32_t memoryTypeIndex = 2;
17920 
17921 VmaAllocationCreateInfo allocInfo = {};
17922 allocInfo.memoryTypeBits = 1u << memoryTypeIndex;
17923 
17924 VkBuffer buffer;
17925 VmaAllocation allocation;
17926 vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);
17927 \endcode
17928 
17929 
17930 \section choosing_memory_type_custom_memory_pools Custom memory pools
17931 
17932 If you allocate from custom memory pool, all the ways of specifying memory
17933 requirements described above are not applicable and the aforementioned members
17934 of VmaAllocationCreateInfo structure are ignored. Memory type is selected
17935 explicitly when creating the pool and then used to make all the allocations from
17936 that pool. For further details, see \ref custom_memory_pools.
17937 
17938 \section choosing_memory_type_dedicated_allocations Dedicated allocations
17939 
17940 Memory for allocations is reserved out of larger block of `VkDeviceMemory`
17941 allocated from Vulkan internally. That is the main feature of this whole library.
17942 You can still request a separate memory block to be created for an allocation,
17943 just like you would do in a trivial solution without using any allocator.
17944 In that case, a buffer or image is always bound to that memory at offset 0.
17945 This is called a "dedicated allocation".
17946 You can explicitly request it by using flag #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
17947 The library can also internally decide to use dedicated allocation in some cases, e.g.:
17948 
17949 - When the size of the allocation is large.
17950 - When [VK_KHR_dedicated_allocation](@ref vk_khr_dedicated_allocation) extension is enabled
17951   and it reports that dedicated allocation is required or recommended for the resource.
17952 - When allocation of next big memory block fails due to not enough device memory,
17953   but allocation with the exact requested size succeeds.
17954 
17955 
17956 \page memory_mapping Memory mapping
17957 
17958 To "map memory" in Vulkan means to obtain a CPU pointer to `VkDeviceMemory`,
17959 to be able to read from it or write to it in CPU code.
17960 Mapping is possible only of memory allocated from a memory type that has
17961 `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` flag.
17962 Functions `vkMapMemory()`, `vkUnmapMemory()` are designed for this purpose.
17963 You can use them directly with memory allocated by this library,
17964 but it is not recommended because of following issue:
17965 Mapping the same `VkDeviceMemory` block multiple times is illegal - only one mapping at a time is allowed.
17966 This includes mapping disjoint regions. Mapping is not reference-counted internally by Vulkan.
17967 Because of this, Vulkan Memory Allocator provides following facilities:
17968 
17969 \note If you want to be able to map an allocation, you need to specify one of the flags
17970 #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT
17971 in VmaAllocationCreateInfo::flags. These flags are required for an allocation to be mappable
17972 when using #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` enum values.
17973 For other usage values they are ignored and every such allocation made in `HOST_VISIBLE` memory type is mappable,
17974 but they can still be used for consistency.
17975 
17976 \section memory_mapping_mapping_functions Mapping functions
17977 
17978 The library provides following functions for mapping of a specific #VmaAllocation: vmaMapMemory(), vmaUnmapMemory().
17979 They are safer and more convenient to use than standard Vulkan functions.
17980 You can map an allocation multiple times simultaneously - mapping is reference-counted internally.
17981 You can also map different allocations simultaneously regardless of whether they use the same `VkDeviceMemory` block.
17982 The way it is implemented is that the library always maps entire memory block, not just region of the allocation.
17983 For further details, see description of vmaMapMemory() function.
17984 Example:
17985 
17986 \code
17987 // Having these objects initialized:
17988 struct ConstantBuffer
17989 {
17990     ...
17991 };
17992 ConstantBuffer constantBufferData = ...
17993 
17994 VmaAllocator allocator = ...
17995 VkBuffer constantBuffer = ...
17996 VmaAllocation constantBufferAllocation = ...
17997 
17998 // You can map and fill your buffer using following code:
17999 
18000 void* mappedData;
18001 vmaMapMemory(allocator, constantBufferAllocation, &mappedData);
18002 memcpy(mappedData, &constantBufferData, sizeof(constantBufferData));
18003 vmaUnmapMemory(allocator, constantBufferAllocation);
18004 \endcode
18005 
18006 When mapping, you may see a warning from Vulkan validation layer similar to this one:
18007 
18008 <i>Mapping an image with layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL can result in undefined behavior if this memory is used by the device. Only GENERAL or PREINITIALIZED should be used.</i>
18009 
18010 It happens because the library maps entire `VkDeviceMemory` block, where different
18011 types of images and buffers may end up together, especially on GPUs with unified memory like Intel.
18012 You can safely ignore it if you are sure you access only memory of the intended
18013 object that you wanted to map.
18014 
18015 
18016 \section memory_mapping_persistently_mapped_memory Persistently mapped memory
18017 
18018 Keeping your memory persistently mapped is generally OK in Vulkan.
18019 You don't need to unmap it before using its data on the GPU.
18020 The library provides a special feature designed for that:
18021 Allocations made with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag set in
18022 VmaAllocationCreateInfo::flags stay mapped all the time,
18023 so you can just access CPU pointer to it any time
18024 without a need to call any "map" or "unmap" function.
18025 Example:
18026 
18027 \code
18028 VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
18029 bufCreateInfo.size = sizeof(ConstantBuffer);
18030 bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
18031 
18032 VmaAllocationCreateInfo allocCreateInfo = {};
18033 allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
18034 allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |
18035     VMA_ALLOCATION_CREATE_MAPPED_BIT;
18036 
18037 VkBuffer buf;
18038 VmaAllocation alloc;
18039 VmaAllocationInfo allocInfo;
18040 vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
18041 
18042 // Buffer is already mapped. You can access its memory.
18043 memcpy(allocInfo.pMappedData, &constantBufferData, sizeof(constantBufferData));
18044 \endcode
18045 
18046 \note #VMA_ALLOCATION_CREATE_MAPPED_BIT by itself doesn't guarantee that the allocation will end up
18047 in a mappable memory type.
18048 For this, you need to also specify #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or
18049 #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT.
18050 #VMA_ALLOCATION_CREATE_MAPPED_BIT only guarantees that if the memory is `HOST_VISIBLE`, the allocation will be mapped on creation.
18051 For an example of how to make use of this fact, see section \ref usage_patterns_advanced_data_uploading.
18052 
18053 \section memory_mapping_cache_control Cache flush and invalidate
18054 
18055 Memory in Vulkan doesn't need to be unmapped before using it on GPU,
18056 but unless a memory types has `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT` flag set,
18057 you need to manually **invalidate** cache before reading of mapped pointer
18058 and **flush** cache after writing to mapped pointer.
18059 Map/unmap operations don't do that automatically.
18060 Vulkan provides following functions for this purpose `vkFlushMappedMemoryRanges()`,
18061 `vkInvalidateMappedMemoryRanges()`, but this library provides more convenient
18062 functions that refer to given allocation object: vmaFlushAllocation(),
18063 vmaInvalidateAllocation(),
18064 or multiple objects at once: vmaFlushAllocations(), vmaInvalidateAllocations().
18065 
18066 Regions of memory specified for flush/invalidate must be aligned to
18067 `VkPhysicalDeviceLimits::nonCoherentAtomSize`. This is automatically ensured by the library.
18068 In any memory type that is `HOST_VISIBLE` but not `HOST_COHERENT`, all allocations
18069 within blocks are aligned to this value, so their offsets are always multiply of
18070 `nonCoherentAtomSize` and two different allocations never share same "line" of this size.
18071 
18072 Also, Windows drivers from all 3 PC GPU vendors (AMD, Intel, NVIDIA)
18073 currently provide `HOST_COHERENT` flag on all memory types that are
18074 `HOST_VISIBLE`, so on PC you may not need to bother.
18075 
18076 
18077 \page staying_within_budget Staying within budget
18078 
18079 When developing a graphics-intensive game or program, it is important to avoid allocating
18080 more GPU memory than it is physically available. When the memory is over-committed,
18081 various bad things can happen, depending on the specific GPU, graphics driver, and
18082 operating system:
18083 
18084 - It may just work without any problems.
18085 - The application may slow down because some memory blocks are moved to system RAM
18086   and the GPU has to access them through PCI Express bus.
18087 - A new allocation may take very long time to complete, even few seconds, and possibly
18088   freeze entire system.
18089 - The new allocation may fail with `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
18090 - It may even result in GPU crash (TDR), observed as `VK_ERROR_DEVICE_LOST`
18091   returned somewhere later.
18092 
18093 \section staying_within_budget_querying_for_budget Querying for budget
18094 
18095 To query for current memory usage and available budget, use function vmaGetHeapBudgets().
18096 Returned structure #VmaBudget contains quantities expressed in bytes, per Vulkan memory heap.
18097 
18098 Please note that this function returns different information and works faster than
18099 vmaCalculateStatistics(). vmaGetHeapBudgets() can be called every frame or even before every
18100 allocation, while vmaCalculateStatistics() is intended to be used rarely,
18101 only to obtain statistical information, e.g. for debugging purposes.
18102 
18103 It is recommended to use <b>VK_EXT_memory_budget</b> device extension to obtain information
18104 about the budget from Vulkan device. VMA is able to use this extension automatically.
18105 When not enabled, the allocator behaves same way, but then it estimates current usage
18106 and available budget based on its internal information and Vulkan memory heap sizes,
18107 which may be less precise. In order to use this extension:
18108 
18109 1. Make sure extensions VK_EXT_memory_budget and VK_KHR_get_physical_device_properties2
18110    required by it are available and enable them. Please note that the first is a device
18111    extension and the second is instance extension!
18112 2. Use flag #VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT when creating #VmaAllocator object.
18113 3. Make sure to call vmaSetCurrentFrameIndex() every frame. Budget is queried from
18114    Vulkan inside of it to avoid overhead of querying it with every allocation.
18115 
18116 \section staying_within_budget_controlling_memory_usage Controlling memory usage
18117 
18118 There are many ways in which you can try to stay within the budget.
18119 
18120 First, when making new allocation requires allocating a new memory block, the library
18121 tries not to exceed the budget automatically. If a block with default recommended size
18122 (e.g. 256 MB) would go over budget, a smaller block is allocated, possibly even
18123 dedicated memory for just this resource.
18124 
18125 If the size of the requested resource plus current memory usage is more than the
18126 budget, by default the library still tries to create it, leaving it to the Vulkan
18127 implementation whether the allocation succeeds or fails. You can change this behavior
18128 by using #VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT flag. With it, the allocation is
18129 not made if it would exceed the budget or if the budget is already exceeded.
18130 VMA then tries to make the allocation from the next eligible Vulkan memory type.
18131 The all of them fail, the call then fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
18132 Example usage pattern may be to pass the #VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT flag
18133 when creating resources that are not essential for the application (e.g. the texture
18134 of a specific object) and not to pass it when creating critically important resources
18135 (e.g. render targets).
18136 
18137 On AMD graphics cards there is a custom vendor extension available: <b>VK_AMD_memory_overallocation_behavior</b>
18138 that allows to control the behavior of the Vulkan implementation in out-of-memory cases -
18139 whether it should fail with an error code or still allow the allocation.
18140 Usage of this extension involves only passing extra structure on Vulkan device creation,
18141 so it is out of scope of this library.
18142 
18143 Finally, you can also use #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT flag to make sure
18144 a new allocation is created only when it fits inside one of the existing memory blocks.
18145 If it would require to allocate a new block, if fails instead with `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
18146 This also ensures that the function call is very fast because it never goes to Vulkan
18147 to obtain a new block.
18148 
18149 \note Creating \ref custom_memory_pools with VmaPoolCreateInfo::minBlockCount
18150 set to more than 0 will currently try to allocate memory blocks without checking whether they
18151 fit within budget.
18152 
18153 
18154 \page resource_aliasing Resource aliasing (overlap)
18155 
18156 New explicit graphics APIs (Vulkan and Direct3D 12), thanks to manual memory
18157 management, give an opportunity to alias (overlap) multiple resources in the
18158 same region of memory - a feature not available in the old APIs (Direct3D 11, OpenGL).
18159 It can be useful to save video memory, but it must be used with caution.
18160 
18161 For example, if you know the flow of your whole render frame in advance, you
18162 are going to use some intermediate textures or buffers only during a small range of render passes,
18163 and you know these ranges don't overlap in time, you can bind these resources to
18164 the same place in memory, even if they have completely different parameters (width, height, format etc.).
18165 
18166 ![Resource aliasing (overlap)](../gfx/Aliasing.png)
18167 
18168 Such scenario is possible using VMA, but you need to create your images manually.
18169 Then you need to calculate parameters of an allocation to be made using formula:
18170 
18171 - allocation size = max(size of each image)
18172 - allocation alignment = max(alignment of each image)
18173 - allocation memoryTypeBits = bitwise AND(memoryTypeBits of each image)
18174 
18175 Following example shows two different images bound to the same place in memory,
18176 allocated to fit largest of them.
18177 
18178 \code
18179 // A 512x512 texture to be sampled.
18180 VkImageCreateInfo img1CreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
18181 img1CreateInfo.imageType = VK_IMAGE_TYPE_2D;
18182 img1CreateInfo.extent.width = 512;
18183 img1CreateInfo.extent.height = 512;
18184 img1CreateInfo.extent.depth = 1;
18185 img1CreateInfo.mipLevels = 10;
18186 img1CreateInfo.arrayLayers = 1;
18187 img1CreateInfo.format = VK_FORMAT_R8G8B8A8_SRGB;
18188 img1CreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
18189 img1CreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
18190 img1CreateInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
18191 img1CreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
18192 
18193 // A full screen texture to be used as color attachment.
18194 VkImageCreateInfo img2CreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
18195 img2CreateInfo.imageType = VK_IMAGE_TYPE_2D;
18196 img2CreateInfo.extent.width = 1920;
18197 img2CreateInfo.extent.height = 1080;
18198 img2CreateInfo.extent.depth = 1;
18199 img2CreateInfo.mipLevels = 1;
18200 img2CreateInfo.arrayLayers = 1;
18201 img2CreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM;
18202 img2CreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
18203 img2CreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
18204 img2CreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
18205 img2CreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
18206 
18207 VkImage img1;
18208 res = vkCreateImage(device, &img1CreateInfo, nullptr, &img1);
18209 VkImage img2;
18210 res = vkCreateImage(device, &img2CreateInfo, nullptr, &img2);
18211 
18212 VkMemoryRequirements img1MemReq;
18213 vkGetImageMemoryRequirements(device, img1, &img1MemReq);
18214 VkMemoryRequirements img2MemReq;
18215 vkGetImageMemoryRequirements(device, img2, &img2MemReq);
18216 
18217 VkMemoryRequirements finalMemReq = {};
18218 finalMemReq.size = std::max(img1MemReq.size, img2MemReq.size);
18219 finalMemReq.alignment = std::max(img1MemReq.alignment, img2MemReq.alignment);
18220 finalMemReq.memoryTypeBits = img1MemReq.memoryTypeBits & img2MemReq.memoryTypeBits;
18221 // Validate if(finalMemReq.memoryTypeBits != 0)
18222 
18223 VmaAllocationCreateInfo allocCreateInfo = {};
18224 allocCreateInfo.preferredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
18225 
18226 VmaAllocation alloc;
18227 res = vmaAllocateMemory(allocator, &finalMemReq, &allocCreateInfo, &alloc, nullptr);
18228 
18229 res = vmaBindImageMemory(allocator, alloc, img1);
18230 res = vmaBindImageMemory(allocator, alloc, img2);
18231 
18232 // You can use img1, img2 here, but not at the same time!
18233 
18234 vmaFreeMemory(allocator, alloc);
18235 vkDestroyImage(allocator, img2, nullptr);
18236 vkDestroyImage(allocator, img1, nullptr);
18237 \endcode
18238 
18239 VMA also provides convenience functions that create a buffer or image and bind it to memory
18240 represented by an existing #VmaAllocation:
18241 vmaCreateAliasingBuffer(), vmaCreateAliasingBuffer2(),
18242 vmaCreateAliasingImage(), vmaCreateAliasingImage2().
18243 Versions with "2" offer additional parameter `allocationLocalOffset`.
18244 
18245 Remember that using resources that alias in memory requires proper synchronization.
18246 You need to issue a memory barrier to make sure commands that use `img1` and `img2`
18247 don't overlap on GPU timeline.
18248 You also need to treat a resource after aliasing as uninitialized - containing garbage data.
18249 For example, if you use `img1` and then want to use `img2`, you need to issue
18250 an image memory barrier for `img2` with `oldLayout` = `VK_IMAGE_LAYOUT_UNDEFINED`.
18251 
18252 Additional considerations:
18253 
18254 - Vulkan also allows to interpret contents of memory between aliasing resources consistently in some cases.
18255 See chapter 11.8. "Memory Aliasing" of Vulkan specification or `VK_IMAGE_CREATE_ALIAS_BIT` flag.
18256 - You can create more complex layout where different images and buffers are bound
18257 at different offsets inside one large allocation. For example, one can imagine
18258 a big texture used in some render passes, aliasing with a set of many small buffers
18259 used between in some further passes. To bind a resource at non-zero offset in an allocation,
18260 use vmaBindBufferMemory2() / vmaBindImageMemory2().
18261 - Before allocating memory for the resources you want to alias, check `memoryTypeBits`
18262 returned in memory requirements of each resource to make sure the bits overlap.
18263 Some GPUs may expose multiple memory types suitable e.g. only for buffers or
18264 images with `COLOR_ATTACHMENT` usage, so the sets of memory types supported by your
18265 resources may be disjoint. Aliasing them is not possible in that case.
18266 
18267 
18268 \page custom_memory_pools Custom memory pools
18269 
18270 A memory pool contains a number of `VkDeviceMemory` blocks.
18271 The library automatically creates and manages default pool for each memory type available on the device.
18272 Default memory pool automatically grows in size.
18273 Size of allocated blocks is also variable and managed automatically.
18274 
18275 You can create custom pool and allocate memory out of it.
18276 It can be useful if you want to:
18277 
18278 - Keep certain kind of allocations separate from others.
18279 - Enforce particular, fixed size of Vulkan memory blocks.
18280 - Limit maximum amount of Vulkan memory allocated for that pool.
18281 - Reserve minimum or fixed amount of Vulkan memory always preallocated for that pool.
18282 - Use extra parameters for a set of your allocations that are available in #VmaPoolCreateInfo but not in
18283   #VmaAllocationCreateInfo - e.g., custom minimum alignment, custom `pNext` chain.
18284 - Perform defragmentation on a specific subset of your allocations.
18285 
18286 To use custom memory pools:
18287 
18288 -# Fill VmaPoolCreateInfo structure.
18289 -# Call vmaCreatePool() to obtain #VmaPool handle.
18290 -# When making an allocation, set VmaAllocationCreateInfo::pool to this handle.
18291    You don't need to specify any other parameters of this structure, like `usage`.
18292 
18293 Example:
18294 
18295 \code
18296 // Find memoryTypeIndex for the pool.
18297 VkBufferCreateInfo sampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
18298 sampleBufCreateInfo.size = 0x10000; // Doesn't matter.
18299 sampleBufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
18300 
18301 VmaAllocationCreateInfo sampleAllocCreateInfo = {};
18302 sampleAllocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
18303 
18304 uint32_t memTypeIndex;
18305 VkResult res = vmaFindMemoryTypeIndexForBufferInfo(allocator,
18306     &sampleBufCreateInfo, &sampleAllocCreateInfo, &memTypeIndex);
18307 // Check res...
18308 
18309 // Create a pool that can have at most 2 blocks, 128 MiB each.
18310 VmaPoolCreateInfo poolCreateInfo = {};
18311 poolCreateInfo.memoryTypeIndex = memTypeIndex;
18312 poolCreateInfo.blockSize = 128ull * 1024 * 1024;
18313 poolCreateInfo.maxBlockCount = 2;
18314 
18315 VmaPool pool;
18316 res = vmaCreatePool(allocator, &poolCreateInfo, &pool);
18317 // Check res...
18318 
18319 // Allocate a buffer out of it.
18320 VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
18321 bufCreateInfo.size = 1024;
18322 bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
18323 
18324 VmaAllocationCreateInfo allocCreateInfo = {};
18325 allocCreateInfo.pool = pool;
18326 
18327 VkBuffer buf;
18328 VmaAllocation alloc;
18329 res = vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, nullptr);
18330 // Check res...
18331 \endcode
18332 
18333 You have to free all allocations made from this pool before destroying it.
18334 
18335 \code
18336 vmaDestroyBuffer(allocator, buf, alloc);
18337 vmaDestroyPool(allocator, pool);
18338 \endcode
18339 
18340 New versions of this library support creating dedicated allocations in custom pools.
18341 It is supported only when VmaPoolCreateInfo::blockSize = 0.
18342 To use this feature, set VmaAllocationCreateInfo::pool to the pointer to your custom pool and
18343 VmaAllocationCreateInfo::flags to #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
18344 
18345 \note Excessive use of custom pools is a common mistake when using this library.
18346 Custom pools may be useful for special purposes - when you want to
18347 keep certain type of resources separate e.g. to reserve minimum amount of memory
18348 for them or limit maximum amount of memory they can occupy. For most
18349 resources this is not needed and so it is not recommended to create #VmaPool
18350 objects and allocations out of them. Allocating from the default pool is sufficient.
18351 
18352 
18353 \section custom_memory_pools_MemTypeIndex Choosing memory type index
18354 
18355 When creating a pool, you must explicitly specify memory type index.
18356 To find the one suitable for your buffers or images, you can use helper functions
18357 vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo().
18358 You need to provide structures with example parameters of buffers or images
18359 that you are going to create in that pool.
18360 
18361 \code
18362 VkBufferCreateInfo exampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
18363 exampleBufCreateInfo.size = 1024; // Doesn't matter
18364 exampleBufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
18365 
18366 VmaAllocationCreateInfo allocCreateInfo = {};
18367 allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
18368 
18369 uint32_t memTypeIndex;
18370 vmaFindMemoryTypeIndexForBufferInfo(allocator, &exampleBufCreateInfo, &allocCreateInfo, &memTypeIndex);
18371 
18372 VmaPoolCreateInfo poolCreateInfo = {};
18373 poolCreateInfo.memoryTypeIndex = memTypeIndex;
18374 // ...
18375 \endcode
18376 
18377 When creating buffers/images allocated in that pool, provide following parameters:
18378 
18379 - `VkBufferCreateInfo`: Prefer to pass same parameters as above.
18380   Otherwise you risk creating resources in a memory type that is not suitable for them, which may result in undefined behavior.
18381   Using different `VK_BUFFER_USAGE_` flags may work, but you shouldn't create images in a pool intended for buffers
18382   or the other way around.
18383 - VmaAllocationCreateInfo: You don't need to pass same parameters. Fill only `pool` member.
18384   Other members are ignored anyway.
18385 
18386 \section linear_algorithm Linear allocation algorithm
18387 
18388 Each Vulkan memory block managed by this library has accompanying metadata that
18389 keeps track of used and unused regions. By default, the metadata structure and
18390 algorithm tries to find best place for new allocations among free regions to
18391 optimize memory usage. This way you can allocate and free objects in any order.
18392 
18393 ![Default allocation algorithm](../gfx/Linear_allocator_1_algo_default.png)
18394 
18395 Sometimes there is a need to use simpler, linear allocation algorithm. You can
18396 create custom pool that uses such algorithm by adding flag
18397 #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT to VmaPoolCreateInfo::flags while creating
18398 #VmaPool object. Then an alternative metadata management is used. It always
18399 creates new allocations after last one and doesn't reuse free regions after
18400 allocations freed in the middle. It results in better allocation performance and
18401 less memory consumed by metadata.
18402 
18403 ![Linear allocation algorithm](../gfx/Linear_allocator_2_algo_linear.png)
18404 
18405 With this one flag, you can create a custom pool that can be used in many ways:
18406 free-at-once, stack, double stack, and ring buffer. See below for details.
18407 You don't need to specify explicitly which of these options you are going to use - it is detected automatically.
18408 
18409 \subsection linear_algorithm_free_at_once Free-at-once
18410 
18411 In a pool that uses linear algorithm, you still need to free all the allocations
18412 individually, e.g. by using vmaFreeMemory() or vmaDestroyBuffer(). You can free
18413 them in any order. New allocations are always made after last one - free space
18414 in the middle is not reused. However, when you release all the allocation and
18415 the pool becomes empty, allocation starts from the beginning again. This way you
18416 can use linear algorithm to speed up creation of allocations that you are going
18417 to release all at once.
18418 
18419 ![Free-at-once](../gfx/Linear_allocator_3_free_at_once.png)
18420 
18421 This mode is also available for pools created with VmaPoolCreateInfo::maxBlockCount
18422 value that allows multiple memory blocks.
18423 
18424 \subsection linear_algorithm_stack Stack
18425 
18426 When you free an allocation that was created last, its space can be reused.
18427 Thanks to this, if you always release allocations in the order opposite to their
18428 creation (LIFO - Last In First Out), you can achieve behavior of a stack.
18429 
18430 ![Stack](../gfx/Linear_allocator_4_stack.png)
18431 
18432 This mode is also available for pools created with VmaPoolCreateInfo::maxBlockCount
18433 value that allows multiple memory blocks.
18434 
18435 \subsection linear_algorithm_double_stack Double stack
18436 
18437 The space reserved by a custom pool with linear algorithm may be used by two
18438 stacks:
18439 
18440 - First, default one, growing up from offset 0.
18441 - Second, "upper" one, growing down from the end towards lower offsets.
18442 
18443 To make allocation from the upper stack, add flag #VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT
18444 to VmaAllocationCreateInfo::flags.
18445 
18446 ![Double stack](../gfx/Linear_allocator_7_double_stack.png)
18447 
18448 Double stack is available only in pools with one memory block -
18449 VmaPoolCreateInfo::maxBlockCount must be 1. Otherwise behavior is undefined.
18450 
18451 When the two stacks' ends meet so there is not enough space between them for a
18452 new allocation, such allocation fails with usual
18453 `VK_ERROR_OUT_OF_DEVICE_MEMORY` error.
18454 
18455 \subsection linear_algorithm_ring_buffer Ring buffer
18456 
18457 When you free some allocations from the beginning and there is not enough free space
18458 for a new one at the end of a pool, allocator's "cursor" wraps around to the
18459 beginning and starts allocation there. Thanks to this, if you always release
18460 allocations in the same order as you created them (FIFO - First In First Out),
18461 you can achieve behavior of a ring buffer / queue.
18462 
18463 ![Ring buffer](../gfx/Linear_allocator_5_ring_buffer.png)
18464 
18465 Ring buffer is available only in pools with one memory block -
18466 VmaPoolCreateInfo::maxBlockCount must be 1. Otherwise behavior is undefined.
18467 
18468 \note \ref defragmentation is not supported in custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT.
18469 
18470 
18471 \page defragmentation Defragmentation
18472 
18473 Interleaved allocations and deallocations of many objects of varying size can
18474 cause fragmentation over time, which can lead to a situation where the library is unable
18475 to find a continuous range of free memory for a new allocation despite there is
18476 enough free space, just scattered across many small free ranges between existing
18477 allocations.
18478 
18479 To mitigate this problem, you can use defragmentation feature.
18480 It doesn't happen automatically though and needs your cooperation,
18481 because VMA is a low level library that only allocates memory.
18482 It cannot recreate buffers and images in a new place as it doesn't remember the contents of `VkBufferCreateInfo` / `VkImageCreateInfo` structures.
18483 It cannot copy their contents as it doesn't record any commands to a command buffer.
18484 
18485 Example:
18486 
18487 \code
18488 VmaDefragmentationInfo defragInfo = {};
18489 defragInfo.pool = myPool;
18490 defragInfo.flags = VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT;
18491 
18492 VmaDefragmentationContext defragCtx;
18493 VkResult res = vmaBeginDefragmentation(allocator, &defragInfo, &defragCtx);
18494 // Check res...
18495 
18496 for(;;)
18497 {
18498     VmaDefragmentationPassMoveInfo pass;
18499     res = vmaBeginDefragmentationPass(allocator, defragCtx, &pass);
18500     if(res == VK_SUCCESS)
18501         break;
18502     else if(res != VK_INCOMPLETE)
18503         // Handle error...
18504 
18505     for(uint32_t i = 0; i < pass.moveCount; ++i)
18506     {
18507         // Inspect pass.pMoves[i].srcAllocation, identify what buffer/image it represents.
18508         VmaAllocationInfo allocInfo;
18509         vmaGetAllocationInfo(allocator, pass.pMoves[i].srcAllocation, &allocInfo);
18510         MyEngineResourceData* resData = (MyEngineResourceData*)allocInfo.pUserData;
18511 
18512         // Recreate and bind this buffer/image at: pass.pMoves[i].dstMemory, pass.pMoves[i].dstOffset.
18513         VkImageCreateInfo imgCreateInfo = ...
18514         VkImage newImg;
18515         res = vkCreateImage(device, &imgCreateInfo, nullptr, &newImg);
18516         // Check res...
18517         res = vmaBindImageMemory(allocator, pass.pMoves[i].dstTmpAllocation, newImg);
18518         // Check res...
18519 
18520         // Issue a vkCmdCopyBuffer/vkCmdCopyImage to copy its content to the new place.
18521         vkCmdCopyImage(cmdBuf, resData->img, ..., newImg, ...);
18522     }
18523 
18524     // Make sure the copy commands finished executing.
18525     vkWaitForFences(...);
18526 
18527     // Destroy old buffers/images bound with pass.pMoves[i].srcAllocation.
18528     for(uint32_t i = 0; i < pass.moveCount; ++i)
18529     {
18530         // ...
18531         vkDestroyImage(device, resData->img, nullptr);
18532     }
18533 
18534     // Update appropriate descriptors to point to the new places...
18535 
18536     res = vmaEndDefragmentationPass(allocator, defragCtx, &pass);
18537     if(res == VK_SUCCESS)
18538         break;
18539     else if(res != VK_INCOMPLETE)
18540         // Handle error...
18541 }
18542 
18543 vmaEndDefragmentation(allocator, defragCtx, nullptr);
18544 \endcode
18545 
18546 Although functions like vmaCreateBuffer(), vmaCreateImage(), vmaDestroyBuffer(), vmaDestroyImage()
18547 create/destroy an allocation and a buffer/image at once, these are just a shortcut for
18548 creating the resource, allocating memory, and binding them together.
18549 Defragmentation works on memory allocations only. You must handle the rest manually.
18550 Defragmentation is an iterative process that should repreat "passes" as long as related functions
18551 return `VK_INCOMPLETE` not `VK_SUCCESS`.
18552 In each pass:
18553 
18554 1. vmaBeginDefragmentationPass() function call:
18555    - Calculates and returns the list of allocations to be moved in this pass.
18556      Note this can be a time-consuming process.
18557    - Reserves destination memory for them by creating temporary destination allocations
18558      that you can query for their `VkDeviceMemory` + offset using vmaGetAllocationInfo().
18559 2. Inside the pass, **you should**:
18560    - Inspect the returned list of allocations to be moved.
18561    - Create new buffers/images and bind them at the returned destination temporary allocations.
18562    - Copy data from source to destination resources if necessary.
18563    - Destroy the source buffers/images, but NOT their allocations.
18564 3. vmaEndDefragmentationPass() function call:
18565    - Frees the source memory reserved for the allocations that are moved.
18566    - Modifies source #VmaAllocation objects that are moved to point to the destination reserved memory.
18567    - Frees `VkDeviceMemory` blocks that became empty.
18568 
18569 Unlike in previous iterations of the defragmentation API, there is no list of "movable" allocations passed as a parameter.
18570 Defragmentation algorithm tries to move all suitable allocations.
18571 You can, however, refuse to move some of them inside a defragmentation pass, by setting
18572 `pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE.
18573 This is not recommended and may result in suboptimal packing of the allocations after defragmentation.
18574 If you cannot ensure any allocation can be moved, it is better to keep movable allocations separate in a custom pool.
18575 
18576 Inside a pass, for each allocation that should be moved:
18577 
18578 - You should copy its data from the source to the destination place by calling e.g. `vkCmdCopyBuffer()`, `vkCmdCopyImage()`.
18579   - You need to make sure these commands finished executing before destroying the source buffers/images and before calling vmaEndDefragmentationPass().
18580 - If a resource doesn't contain any meaningful data, e.g. it is a transient color attachment image to be cleared,
18581   filled, and used temporarily in each rendering frame, you can just recreate this image
18582   without copying its data.
18583 - If the resource is in `HOST_VISIBLE` and `HOST_CACHED` memory, you can copy its data on the CPU
18584   using `memcpy()`.
18585 - If you cannot move the allocation, you can set `pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE.
18586   This will cancel the move.
18587   - vmaEndDefragmentationPass() will then free the destination memory
18588     not the source memory of the allocation, leaving it unchanged.
18589 - If you decide the allocation is unimportant and can be destroyed instead of moved (e.g. it wasn't used for long time),
18590   you can set `pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY.
18591   - vmaEndDefragmentationPass() will then free both source and destination memory, and will destroy the source #VmaAllocation object.
18592 
18593 You can defragment a specific custom pool by setting VmaDefragmentationInfo::pool
18594 (like in the example above) or all the default pools by setting this member to null.
18595 
18596 Defragmentation is always performed in each pool separately.
18597 Allocations are never moved between different Vulkan memory types.
18598 The size of the destination memory reserved for a moved allocation is the same as the original one.
18599 Alignment of an allocation as it was determined using `vkGetBufferMemoryRequirements()` etc. is also respected after defragmentation.
18600 Buffers/images should be recreated with the same `VkBufferCreateInfo` / `VkImageCreateInfo` parameters as the original ones.
18601 
18602 You can perform the defragmentation incrementally to limit the number of allocations and bytes to be moved
18603 in each pass, e.g. to call it in sync with render frames and not to experience too big hitches.
18604 See members: VmaDefragmentationInfo::maxBytesPerPass, VmaDefragmentationInfo::maxAllocationsPerPass.
18605 
18606 It is also safe to perform the defragmentation asynchronously to render frames and other Vulkan and VMA
18607 usage, possibly from multiple threads, with the exception that allocations
18608 returned in VmaDefragmentationPassMoveInfo::pMoves shouldn't be destroyed until the defragmentation pass is ended.
18609 
18610 <b>Mapping</b> is preserved on allocations that are moved during defragmentation.
18611 Whether through #VMA_ALLOCATION_CREATE_MAPPED_BIT or vmaMapMemory(), the allocations
18612 are mapped at their new place. Of course, pointer to the mapped data changes, so it needs to be queried
18613 using VmaAllocationInfo::pMappedData.
18614 
18615 \note Defragmentation is not supported in custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT.
18616 
18617 
18618 \page statistics Statistics
18619 
18620 This library contains several functions that return information about its internal state,
18621 especially the amount of memory allocated from Vulkan.
18622 
18623 \section statistics_numeric_statistics Numeric statistics
18624 
18625 If you need to obtain basic statistics about memory usage per heap, together with current budget,
18626 you can call function vmaGetHeapBudgets() and inspect structure #VmaBudget.
18627 This is useful to keep track of memory usage and stay within budget
18628 (see also \ref staying_within_budget).
18629 Example:
18630 
18631 \code
18632 uint32_t heapIndex = ...
18633 
18634 VmaBudget budgets[VK_MAX_MEMORY_HEAPS];
18635 vmaGetHeapBudgets(allocator, budgets);
18636 
18637 printf("My heap currently has %u allocations taking %llu B,\n",
18638     budgets[heapIndex].statistics.allocationCount,
18639     budgets[heapIndex].statistics.allocationBytes);
18640 printf("allocated out of %u Vulkan device memory blocks taking %llu B,\n",
18641     budgets[heapIndex].statistics.blockCount,
18642     budgets[heapIndex].statistics.blockBytes);
18643 printf("Vulkan reports total usage %llu B with budget %llu B.\n",
18644     budgets[heapIndex].usage,
18645     budgets[heapIndex].budget);
18646 \endcode
18647 
18648 You can query for more detailed statistics per memory heap, type, and totals,
18649 including minimum and maximum allocation size and unused range size,
18650 by calling function vmaCalculateStatistics() and inspecting structure #VmaTotalStatistics.
18651 This function is slower though, as it has to traverse all the internal data structures,
18652 so it should be used only for debugging purposes.
18653 
18654 You can query for statistics of a custom pool using function vmaGetPoolStatistics()
18655 or vmaCalculatePoolStatistics().
18656 
18657 You can query for information about a specific allocation using function vmaGetAllocationInfo().
18658 It fill structure #VmaAllocationInfo.
18659 
18660 \section statistics_json_dump JSON dump
18661 
18662 You can dump internal state of the allocator to a string in JSON format using function vmaBuildStatsString().
18663 The result is guaranteed to be correct JSON.
18664 It uses ANSI encoding.
18665 Any strings provided by user (see [Allocation names](@ref allocation_names))
18666 are copied as-is and properly escaped for JSON, so if they use UTF-8, ISO-8859-2 or any other encoding,
18667 this JSON string can be treated as using this encoding.
18668 It must be freed using function vmaFreeStatsString().
18669 
18670 The format of this JSON string is not part of official documentation of the library,
18671 but it will not change in backward-incompatible way without increasing library major version number
18672 and appropriate mention in changelog.
18673 
18674 The JSON string contains all the data that can be obtained using vmaCalculateStatistics().
18675 It can also contain detailed map of allocated memory blocks and their regions -
18676 free and occupied by allocations.
18677 This allows e.g. to visualize the memory or assess fragmentation.
18678 
18679 
18680 \page allocation_annotation Allocation names and user data
18681 
18682 \section allocation_user_data Allocation user data
18683 
18684 You can annotate allocations with your own information, e.g. for debugging purposes.
18685 To do that, fill VmaAllocationCreateInfo::pUserData field when creating
18686 an allocation. It is an opaque `void*` pointer. You can use it e.g. as a pointer,
18687 some handle, index, key, ordinal number or any other value that would associate
18688 the allocation with your custom metadata.
18689 It is useful to identify appropriate data structures in your engine given #VmaAllocation,
18690 e.g. when doing \ref defragmentation.
18691 
18692 \code
18693 VkBufferCreateInfo bufCreateInfo = ...
18694 
18695 MyBufferMetadata* pMetadata = CreateBufferMetadata();
18696 
18697 VmaAllocationCreateInfo allocCreateInfo = {};
18698 allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
18699 allocCreateInfo.pUserData = pMetadata;
18700 
18701 VkBuffer buffer;
18702 VmaAllocation allocation;
18703 vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buffer, &allocation, nullptr);
18704 \endcode
18705 
18706 The pointer may be later retrieved as VmaAllocationInfo::pUserData:
18707 
18708 \code
18709 VmaAllocationInfo allocInfo;
18710 vmaGetAllocationInfo(allocator, allocation, &allocInfo);
18711 MyBufferMetadata* pMetadata = (MyBufferMetadata*)allocInfo.pUserData;
18712 \endcode
18713 
18714 It can also be changed using function vmaSetAllocationUserData().
18715 
18716 Values of (non-zero) allocations' `pUserData` are printed in JSON report created by
18717 vmaBuildStatsString() in hexadecimal form.
18718 
18719 \section allocation_names Allocation names
18720 
18721 An allocation can also carry a null-terminated string, giving a name to the allocation.
18722 To set it, call vmaSetAllocationName().
18723 The library creates internal copy of the string, so the pointer you pass doesn't need
18724 to be valid for whole lifetime of the allocation. You can free it after the call.
18725 
18726 \code
18727 std::string imageName = "Texture: ";
18728 imageName += fileName;
18729 vmaSetAllocationName(allocator, allocation, imageName.c_str());
18730 \endcode
18731 
18732 The string can be later retrieved by inspecting VmaAllocationInfo::pName.
18733 It is also printed in JSON report created by vmaBuildStatsString().
18734 
18735 \note Setting string name to VMA allocation doesn't automatically set it to the Vulkan buffer or image created with it.
18736 You must do it manually using an extension like VK_EXT_debug_utils, which is independent of this library.
18737 
18738 
18739 \page virtual_allocator Virtual allocator
18740 
18741 As an extra feature, the core allocation algorithm of the library is exposed through a simple and convenient API of "virtual allocator".
18742 It doesn't allocate any real GPU memory. It just keeps track of used and free regions of a "virtual block".
18743 You can use it to allocate your own memory or other objects, even completely unrelated to Vulkan.
18744 A common use case is sub-allocation of pieces of one large GPU buffer.
18745 
18746 \section virtual_allocator_creating_virtual_block Creating virtual block
18747 
18748 To use this functionality, there is no main "allocator" object.
18749 You don't need to have #VmaAllocator object created.
18750 All you need to do is to create a separate #VmaVirtualBlock object for each block of memory you want to be managed by the allocator:
18751 
18752 -# Fill in #VmaVirtualBlockCreateInfo structure.
18753 -# Call vmaCreateVirtualBlock(). Get new #VmaVirtualBlock object.
18754 
18755 Example:
18756 
18757 \code
18758 VmaVirtualBlockCreateInfo blockCreateInfo = {};
18759 blockCreateInfo.size = 1048576; // 1 MB
18760 
18761 VmaVirtualBlock block;
18762 VkResult res = vmaCreateVirtualBlock(&blockCreateInfo, &block);
18763 \endcode
18764 
18765 \section virtual_allocator_making_virtual_allocations Making virtual allocations
18766 
18767 #VmaVirtualBlock object contains internal data structure that keeps track of free and occupied regions
18768 using the same code as the main Vulkan memory allocator.
18769 Similarly to #VmaAllocation for standard GPU allocations, there is #VmaVirtualAllocation type
18770 that represents an opaque handle to an allocation within the virtual block.
18771 
18772 In order to make such allocation:
18773 
18774 -# Fill in #VmaVirtualAllocationCreateInfo structure.
18775 -# Call vmaVirtualAllocate(). Get new #VmaVirtualAllocation object that represents the allocation.
18776    You can also receive `VkDeviceSize offset` that was assigned to the allocation.
18777 
18778 Example:
18779 
18780 \code
18781 VmaVirtualAllocationCreateInfo allocCreateInfo = {};
18782 allocCreateInfo.size = 4096; // 4 KB
18783 
18784 VmaVirtualAllocation alloc;
18785 VkDeviceSize offset;
18786 res = vmaVirtualAllocate(block, &allocCreateInfo, &alloc, &offset);
18787 if(res == VK_SUCCESS)
18788 {
18789     // Use the 4 KB of your memory starting at offset.
18790 }
18791 else
18792 {
18793     // Allocation failed - no space for it could be found. Handle this error!
18794 }
18795 \endcode
18796 
18797 \section virtual_allocator_deallocation Deallocation
18798 
18799 When no longer needed, an allocation can be freed by calling vmaVirtualFree().
18800 You can only pass to this function an allocation that was previously returned by vmaVirtualAllocate()
18801 called for the same #VmaVirtualBlock.
18802 
18803 When whole block is no longer needed, the block object can be released by calling vmaDestroyVirtualBlock().
18804 All allocations must be freed before the block is destroyed, which is checked internally by an assert.
18805 However, if you don't want to call vmaVirtualFree() for each allocation, you can use vmaClearVirtualBlock() to free them all at once -
18806 a feature not available in normal Vulkan memory allocator. Example:
18807 
18808 \code
18809 vmaVirtualFree(block, alloc);
18810 vmaDestroyVirtualBlock(block);
18811 \endcode
18812 
18813 \section virtual_allocator_allocation_parameters Allocation parameters
18814 
18815 You can attach a custom pointer to each allocation by using vmaSetVirtualAllocationUserData().
18816 Its default value is null.
18817 It can be used to store any data that needs to be associated with that allocation - e.g. an index, a handle, or a pointer to some
18818 larger data structure containing more information. Example:
18819 
18820 \code
18821 struct CustomAllocData
18822 {
18823     std::string m_AllocName;
18824 };
18825 CustomAllocData* allocData = new CustomAllocData();
18826 allocData->m_AllocName = "My allocation 1";
18827 vmaSetVirtualAllocationUserData(block, alloc, allocData);
18828 \endcode
18829 
18830 The pointer can later be fetched, along with allocation offset and size, by passing the allocation handle to function
18831 vmaGetVirtualAllocationInfo() and inspecting returned structure #VmaVirtualAllocationInfo.
18832 If you allocated a new object to be used as the custom pointer, don't forget to delete that object before freeing the allocation!
18833 Example:
18834 
18835 \code
18836 VmaVirtualAllocationInfo allocInfo;
18837 vmaGetVirtualAllocationInfo(block, alloc, &allocInfo);
18838 delete (CustomAllocData*)allocInfo.pUserData;
18839 
18840 vmaVirtualFree(block, alloc);
18841 \endcode
18842 
18843 \section virtual_allocator_alignment_and_units Alignment and units
18844 
18845 It feels natural to express sizes and offsets in bytes.
18846 If an offset of an allocation needs to be aligned to a multiply of some number (e.g. 4 bytes), you can fill optional member
18847 VmaVirtualAllocationCreateInfo::alignment to request it. Example:
18848 
18849 \code
18850 VmaVirtualAllocationCreateInfo allocCreateInfo = {};
18851 allocCreateInfo.size = 4096; // 4 KB
18852 allocCreateInfo.alignment = 4; // Returned offset must be a multiply of 4 B
18853 
18854 VmaVirtualAllocation alloc;
18855 res = vmaVirtualAllocate(block, &allocCreateInfo, &alloc, nullptr);
18856 \endcode
18857 
18858 Alignments of different allocations made from one block may vary.
18859 However, if all alignments and sizes are always multiply of some size e.g. 4 B or `sizeof(MyDataStruct)`,
18860 you can express all sizes, alignments, and offsets in multiples of that size instead of individual bytes.
18861 It might be more convenient, but you need to make sure to use this new unit consistently in all the places:
18862 
18863 - VmaVirtualBlockCreateInfo::size
18864 - VmaVirtualAllocationCreateInfo::size and VmaVirtualAllocationCreateInfo::alignment
18865 - Using offset returned by vmaVirtualAllocate() or in VmaVirtualAllocationInfo::offset
18866 
18867 \section virtual_allocator_statistics Statistics
18868 
18869 You can obtain statistics of a virtual block using vmaGetVirtualBlockStatistics()
18870 (to get brief statistics that are fast to calculate)
18871 or vmaCalculateVirtualBlockStatistics() (to get more detailed statistics, slower to calculate).
18872 The functions fill structures #VmaStatistics, #VmaDetailedStatistics respectively - same as used by the normal Vulkan memory allocator.
18873 Example:
18874 
18875 \code
18876 VmaStatistics stats;
18877 vmaGetVirtualBlockStatistics(block, &stats);
18878 printf("My virtual block has %llu bytes used by %u virtual allocations\n",
18879     stats.allocationBytes, stats.allocationCount);
18880 \endcode
18881 
18882 You can also request a full list of allocations and free regions as a string in JSON format by calling
18883 vmaBuildVirtualBlockStatsString().
18884 Returned string must be later freed using vmaFreeVirtualBlockStatsString().
18885 The format of this string differs from the one returned by the main Vulkan allocator, but it is similar.
18886 
18887 \section virtual_allocator_additional_considerations Additional considerations
18888 
18889 The "virtual allocator" functionality is implemented on a level of individual memory blocks.
18890 Keeping track of a whole collection of blocks, allocating new ones when out of free space,
18891 deleting empty ones, and deciding which one to try first for a new allocation must be implemented by the user.
18892 
18893 Alternative allocation algorithms are supported, just like in custom pools of the real GPU memory.
18894 See enum #VmaVirtualBlockCreateFlagBits to learn how to specify them (e.g. #VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT).
18895 You can find their description in chapter \ref custom_memory_pools.
18896 Allocation strategies are also supported.
18897 See enum #VmaVirtualAllocationCreateFlagBits to learn how to specify them (e.g. #VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT).
18898 
18899 Following features are supported only by the allocator of the real GPU memory and not by virtual allocations:
18900 buffer-image granularity, `VMA_DEBUG_MARGIN`, `VMA_MIN_ALIGNMENT`.
18901 
18902 
18903 \page debugging_memory_usage Debugging incorrect memory usage
18904 
18905 If you suspect a bug with memory usage, like usage of uninitialized memory or
18906 memory being overwritten out of bounds of an allocation,
18907 you can use debug features of this library to verify this.
18908 
18909 \section debugging_memory_usage_initialization Memory initialization
18910 
18911 If you experience a bug with incorrect and nondeterministic data in your program and you suspect uninitialized memory to be used,
18912 you can enable automatic memory initialization to verify this.
18913 To do it, define macro `VMA_DEBUG_INITIALIZE_ALLOCATIONS` to 1.
18914 
18915 \code
18916 #define VMA_DEBUG_INITIALIZE_ALLOCATIONS 1
18917 #include "vk_mem_alloc.h"
18918 \endcode
18919 
18920 It makes memory of new allocations initialized to bit pattern `0xDCDCDCDC`.
18921 Before an allocation is destroyed, its memory is filled with bit pattern `0xEFEFEFEF`.
18922 Memory is automatically mapped and unmapped if necessary.
18923 
18924 If you find these values while debugging your program, good chances are that you incorrectly
18925 read Vulkan memory that is allocated but not initialized, or already freed, respectively.
18926 
18927 Memory initialization works only with memory types that are `HOST_VISIBLE` and with allocations that can be mapped.
18928 It works also with dedicated allocations.
18929 
18930 \section debugging_memory_usage_margins Margins
18931 
18932 By default, allocations are laid out in memory blocks next to each other if possible
18933 (considering required alignment, `bufferImageGranularity`, and `nonCoherentAtomSize`).
18934 
18935 ![Allocations without margin](../gfx/Margins_1.png)
18936 
18937 Define macro `VMA_DEBUG_MARGIN` to some non-zero value (e.g. 16) to enforce specified
18938 number of bytes as a margin after every allocation.
18939 
18940 \code
18941 #define VMA_DEBUG_MARGIN 16
18942 #include "vk_mem_alloc.h"
18943 \endcode
18944 
18945 ![Allocations with margin](../gfx/Margins_2.png)
18946 
18947 If your bug goes away after enabling margins, it means it may be caused by memory
18948 being overwritten outside of allocation boundaries. It is not 100% certain though.
18949 Change in application behavior may also be caused by different order and distribution
18950 of allocations across memory blocks after margins are applied.
18951 
18952 Margins work with all types of memory.
18953 
18954 Margin is applied only to allocations made out of memory blocks and not to dedicated
18955 allocations, which have their own memory block of specific size.
18956 It is thus not applied to allocations made using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT flag
18957 or those automatically decided to put into dedicated allocations, e.g. due to its
18958 large size or recommended by VK_KHR_dedicated_allocation extension.
18959 
18960 Margins appear in [JSON dump](@ref statistics_json_dump) as part of free space.
18961 
18962 Note that enabling margins increases memory usage and fragmentation.
18963 
18964 Margins do not apply to \ref virtual_allocator.
18965 
18966 \section debugging_memory_usage_corruption_detection Corruption detection
18967 
18968 You can additionally define macro `VMA_DEBUG_DETECT_CORRUPTION` to 1 to enable validation
18969 of contents of the margins.
18970 
18971 \code
18972 #define VMA_DEBUG_MARGIN 16
18973 #define VMA_DEBUG_DETECT_CORRUPTION 1
18974 #include "vk_mem_alloc.h"
18975 \endcode
18976 
18977 When this feature is enabled, number of bytes specified as `VMA_DEBUG_MARGIN`
18978 (it must be multiply of 4) after every allocation is filled with a magic number.
18979 This idea is also know as "canary".
18980 Memory is automatically mapped and unmapped if necessary.
18981 
18982 This number is validated automatically when the allocation is destroyed.
18983 If it is not equal to the expected value, `VMA_ASSERT()` is executed.
18984 It clearly means that either CPU or GPU overwritten the memory outside of boundaries of the allocation,
18985 which indicates a serious bug.
18986 
18987 You can also explicitly request checking margins of all allocations in all memory blocks
18988 that belong to specified memory types by using function vmaCheckCorruption(),
18989 or in memory blocks that belong to specified custom pool, by using function
18990 vmaCheckPoolCorruption().
18991 
18992 Margin validation (corruption detection) works only for memory types that are
18993 `HOST_VISIBLE` and `HOST_COHERENT`.
18994 
18995 
18996 \page opengl_interop OpenGL Interop
18997 
18998 VMA provides some features that help with interoperability with OpenGL.
18999 
19000 \section opengl_interop_exporting_memory Exporting memory
19001 
19002 If you want to attach `VkExportMemoryAllocateInfoKHR` structure to `pNext` chain of memory allocations made by the library:
19003 
19004 It is recommended to create \ref custom_memory_pools for such allocations.
19005 Define and fill in your `VkExportMemoryAllocateInfoKHR` structure and attach it to VmaPoolCreateInfo::pMemoryAllocateNext
19006 while creating the custom pool.
19007 Please note that the structure must remain alive and unchanged for the whole lifetime of the #VmaPool,
19008 not only while creating it, as no copy of the structure is made,
19009 but its original pointer is used for each allocation instead.
19010 
19011 If you want to export all memory allocated by the library from certain memory types,
19012 also dedicated allocations or other allocations made from default pools,
19013 an alternative solution is to fill in VmaAllocatorCreateInfo::pTypeExternalMemoryHandleTypes.
19014 It should point to an array with `VkExternalMemoryHandleTypeFlagsKHR` to be automatically passed by the library
19015 through `VkExportMemoryAllocateInfoKHR` on each allocation made from a specific memory type.
19016 Please note that new versions of the library also support dedicated allocations created in custom pools.
19017 
19018 You should not mix these two methods in a way that allows to apply both to the same memory type.
19019 Otherwise, `VkExportMemoryAllocateInfoKHR` structure would be attached twice to the `pNext` chain of `VkMemoryAllocateInfo`.
19020 
19021 
19022 \section opengl_interop_custom_alignment Custom alignment
19023 
19024 Buffers or images exported to a different API like OpenGL may require a different alignment,
19025 higher than the one used by the library automatically, queried from functions like `vkGetBufferMemoryRequirements`.
19026 To impose such alignment:
19027 
19028 It is recommended to create \ref custom_memory_pools for such allocations.
19029 Set VmaPoolCreateInfo::minAllocationAlignment member to the minimum alignment required for each allocation
19030 to be made out of this pool.
19031 The alignment actually used will be the maximum of this member and the alignment returned for the specific buffer or image
19032 from a function like `vkGetBufferMemoryRequirements`, which is called by VMA automatically.
19033 
19034 If you want to create a buffer with a specific minimum alignment out of default pools,
19035 use special function vmaCreateBufferWithAlignment(), which takes additional parameter `minAlignment`.
19036 
19037 Note the problem of alignment affects only resources placed inside bigger `VkDeviceMemory` blocks and not dedicated
19038 allocations, as these, by definition, always have alignment = 0 because the resource is bound to the beginning of its dedicated block.
19039 Contrary to Direct3D 12, Vulkan doesn't have a concept of alignment of the entire memory block passed on its allocation.
19040 
19041 
19042 \page usage_patterns Recommended usage patterns
19043 
19044 Vulkan gives great flexibility in memory allocation.
19045 This chapter shows the most common patterns.
19046 
19047 See also slides from talk:
19048 [Sawicki, Adam. Advanced Graphics Techniques Tutorial: Memory management in Vulkan and DX12. Game Developers Conference, 2018](https://www.gdcvault.com/play/1025458/Advanced-Graphics-Techniques-Tutorial-New)
19049 
19050 
19051 \section usage_patterns_gpu_only GPU-only resource
19052 
19053 <b>When:</b>
19054 Any resources that you frequently write and read on GPU,
19055 e.g. images used as color attachments (aka "render targets"), depth-stencil attachments,
19056 images/buffers used as storage image/buffer (aka "Unordered Access View (UAV)").
19057 
19058 <b>What to do:</b>
19059 Let the library select the optimal memory type, which will likely have `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`.
19060 
19061 \code
19062 VkImageCreateInfo imgCreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
19063 imgCreateInfo.imageType = VK_IMAGE_TYPE_2D;
19064 imgCreateInfo.extent.width = 3840;
19065 imgCreateInfo.extent.height = 2160;
19066 imgCreateInfo.extent.depth = 1;
19067 imgCreateInfo.mipLevels = 1;
19068 imgCreateInfo.arrayLayers = 1;
19069 imgCreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM;
19070 imgCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
19071 imgCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
19072 imgCreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
19073 imgCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
19074 
19075 VmaAllocationCreateInfo allocCreateInfo = {};
19076 allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
19077 allocCreateInfo.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
19078 allocCreateInfo.priority = 1.0f;
19079 
19080 VkImage img;
19081 VmaAllocation alloc;
19082 vmaCreateImage(allocator, &imgCreateInfo, &allocCreateInfo, &img, &alloc, nullptr);
19083 \endcode
19084 
19085 <b>Also consider:</b>
19086 Consider creating them as dedicated allocations using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT,
19087 especially if they are large or if you plan to destroy and recreate them with different sizes
19088 e.g. when display resolution changes.
19089 Prefer to create such resources first and all other GPU resources (like textures and vertex buffers) later.
19090 When VK_EXT_memory_priority extension is enabled, it is also worth setting high priority to such allocation
19091 to decrease chances to be evicted to system memory by the operating system.
19092 
19093 \section usage_patterns_staging_copy_upload Staging copy for upload
19094 
19095 <b>When:</b>
19096 A "staging" buffer than you want to map and fill from CPU code, then use as a source of transfer
19097 to some GPU resource.
19098 
19099 <b>What to do:</b>
19100 Use flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT.
19101 Let the library select the optimal memory type, which will always have `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`.
19102 
19103 \code
19104 VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
19105 bufCreateInfo.size = 65536;
19106 bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
19107 
19108 VmaAllocationCreateInfo allocCreateInfo = {};
19109 allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
19110 allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |
19111     VMA_ALLOCATION_CREATE_MAPPED_BIT;
19112 
19113 VkBuffer buf;
19114 VmaAllocation alloc;
19115 VmaAllocationInfo allocInfo;
19116 vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
19117 
19118 ...
19119 
19120 memcpy(allocInfo.pMappedData, myData, myDataSize);
19121 \endcode
19122 
19123 <b>Also consider:</b>
19124 You can map the allocation using vmaMapMemory() or you can create it as persistenly mapped
19125 using #VMA_ALLOCATION_CREATE_MAPPED_BIT, as in the example above.
19126 
19127 
19128 \section usage_patterns_readback Readback
19129 
19130 <b>When:</b>
19131 Buffers for data written by or transferred from the GPU that you want to read back on the CPU,
19132 e.g. results of some computations.
19133 
19134 <b>What to do:</b>
19135 Use flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT.
19136 Let the library select the optimal memory type, which will always have `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`
19137 and `VK_MEMORY_PROPERTY_HOST_CACHED_BIT`.
19138 
19139 \code
19140 VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
19141 bufCreateInfo.size = 65536;
19142 bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
19143 
19144 VmaAllocationCreateInfo allocCreateInfo = {};
19145 allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
19146 allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT |
19147     VMA_ALLOCATION_CREATE_MAPPED_BIT;
19148 
19149 VkBuffer buf;
19150 VmaAllocation alloc;
19151 VmaAllocationInfo allocInfo;
19152 vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
19153 
19154 ...
19155 
19156 const float* downloadedData = (const float*)allocInfo.pMappedData;
19157 \endcode
19158 
19159 
19160 \section usage_patterns_advanced_data_uploading Advanced data uploading
19161 
19162 For resources that you frequently write on CPU via mapped pointer and
19163 frequently read on GPU e.g. as a uniform buffer (also called "dynamic"), multiple options are possible:
19164 
19165 -# Easiest solution is to have one copy of the resource in `HOST_VISIBLE` memory,
19166    even if it means system RAM (not `DEVICE_LOCAL`) on systems with a discrete graphics card,
19167    and make the device reach out to that resource directly.
19168    - Reads performed by the device will then go through PCI Express bus.
19169      The performance of this access may be limited, but it may be fine depending on the size
19170      of this resource (whether it is small enough to quickly end up in GPU cache) and the sparsity
19171      of access.
19172 -# On systems with unified memory (e.g. AMD APU or Intel integrated graphics, mobile chips),
19173    a memory type may be available that is both `HOST_VISIBLE` (available for mapping) and `DEVICE_LOCAL`
19174    (fast to access from the GPU). Then, it is likely the best choice for such type of resource.
19175 -# Systems with a discrete graphics card and separate video memory may or may not expose
19176    a memory type that is both `HOST_VISIBLE` and `DEVICE_LOCAL`, also known as Base Address Register (BAR).
19177    If they do, it represents a piece of VRAM (or entire VRAM, if ReBAR is enabled in the motherboard BIOS)
19178    that is available to CPU for mapping.
19179    - Writes performed by the host to that memory go through PCI Express bus.
19180      The performance of these writes may be limited, but it may be fine, especially on PCIe 4.0,
19181      as long as rules of using uncached and write-combined memory are followed - only sequential writes and no reads.
19182 -# Finally, you may need or prefer to create a separate copy of the resource in `DEVICE_LOCAL` memory,
19183    a separate "staging" copy in `HOST_VISIBLE` memory and perform an explicit transfer command between them.
19184 
19185 Thankfully, VMA offers an aid to create and use such resources in the the way optimal
19186 for the current Vulkan device. To help the library make the best choice,
19187 use flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT together with
19188 #VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT.
19189 It will then prefer a memory type that is both `DEVICE_LOCAL` and `HOST_VISIBLE` (integrated memory or BAR),
19190 but if no such memory type is available or allocation from it fails
19191 (PC graphics cards have only 256 MB of BAR by default, unless ReBAR is supported and enabled in BIOS),
19192 it will fall back to `DEVICE_LOCAL` memory for fast GPU access.
19193 It is then up to you to detect that the allocation ended up in a memory type that is not `HOST_VISIBLE`,
19194 so you need to create another "staging" allocation and perform explicit transfers.
19195 
19196 \code
19197 VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
19198 bufCreateInfo.size = 65536;
19199 bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
19200 
19201 VmaAllocationCreateInfo allocCreateInfo = {};
19202 allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
19203 allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |
19204     VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT |
19205     VMA_ALLOCATION_CREATE_MAPPED_BIT;
19206 
19207 VkBuffer buf;
19208 VmaAllocation alloc;
19209 VmaAllocationInfo allocInfo;
19210 vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
19211 
19212 VkMemoryPropertyFlags memPropFlags;
19213 vmaGetAllocationMemoryProperties(allocator, alloc, &memPropFlags);
19214 
19215 if(memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)
19216 {
19217     // Allocation ended up in a mappable memory and is already mapped - write to it directly.
19218 
19219     // [Executed in runtime]:
19220     memcpy(allocInfo.pMappedData, myData, myDataSize);
19221 }
19222 else
19223 {
19224     // Allocation ended up in a non-mappable memory - need to transfer.
19225     VkBufferCreateInfo stagingBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
19226     stagingBufCreateInfo.size = 65536;
19227     stagingBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
19228 
19229     VmaAllocationCreateInfo stagingAllocCreateInfo = {};
19230     stagingAllocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
19231     stagingAllocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |
19232         VMA_ALLOCATION_CREATE_MAPPED_BIT;
19233 
19234     VkBuffer stagingBuf;
19235     VmaAllocation stagingAlloc;
19236     VmaAllocationInfo stagingAllocInfo;
19237     vmaCreateBuffer(allocator, &stagingBufCreateInfo, &stagingAllocCreateInfo,
19238         &stagingBuf, &stagingAlloc, stagingAllocInfo);
19239 
19240     // [Executed in runtime]:
19241     memcpy(stagingAllocInfo.pMappedData, myData, myDataSize);
19242     vmaFlushAllocation(allocator, stagingAlloc, 0, VK_WHOLE_SIZE);
19243     //vkCmdPipelineBarrier: VK_ACCESS_HOST_WRITE_BIT --> VK_ACCESS_TRANSFER_READ_BIT
19244     VkBufferCopy bufCopy = {
19245         0, // srcOffset
19246         0, // dstOffset,
19247         myDataSize); // size
19248     vkCmdCopyBuffer(cmdBuf, stagingBuf, buf, 1, &bufCopy);
19249 }
19250 \endcode
19251 
19252 \section usage_patterns_other_use_cases Other use cases
19253 
19254 Here are some other, less obvious use cases and their recommended settings:
19255 
19256 - An image that is used only as transfer source and destination, but it should stay on the device,
19257   as it is used to temporarily store a copy of some texture, e.g. from the current to the next frame,
19258   for temporal antialiasing or other temporal effects.
19259   - Use `VkImageCreateInfo::usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT`
19260   - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO
19261 - An image that is used only as transfer source and destination, but it should be placed
19262   in the system RAM despite it doesn't need to be mapped, because it serves as a "swap" copy to evict
19263   least recently used textures from VRAM.
19264   - Use `VkImageCreateInfo::usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT`
19265   - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO_PREFER_HOST,
19266     as VMA needs a hint here to differentiate from the previous case.
19267 - A buffer that you want to map and write from the CPU, directly read from the GPU
19268   (e.g. as a uniform or vertex buffer), but you have a clear preference to place it in device or
19269   host memory due to its large size.
19270   - Use `VkBufferCreateInfo::usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT`
19271   - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE or #VMA_MEMORY_USAGE_AUTO_PREFER_HOST
19272   - Use VmaAllocationCreateInfo::flags = #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT
19273 
19274 
19275 \page configuration Configuration
19276 
19277 Please check "CONFIGURATION SECTION" in the code to find macros that you can define
19278 before each include of this file or change directly in this file to provide
19279 your own implementation of basic facilities like assert, `min()` and `max()` functions,
19280 mutex, atomic etc.
19281 The library uses its own implementation of containers by default, but you can switch to using
19282 STL containers instead.
19283 
19284 For example, define `VMA_ASSERT(expr)` before including the library to provide
19285 custom implementation of the assertion, compatible with your project.
19286 By default it is defined to standard C `assert(expr)` in `_DEBUG` configuration
19287 and empty otherwise.
19288 
19289 \section config_Vulkan_functions Pointers to Vulkan functions
19290 
19291 There are multiple ways to import pointers to Vulkan functions in the library.
19292 In the simplest case you don't need to do anything.
19293 If the compilation or linking of your program or the initialization of the #VmaAllocator
19294 doesn't work for you, you can try to reconfigure it.
19295 
19296 First, the allocator tries to fetch pointers to Vulkan functions linked statically,
19297 like this:
19298 
19299 \code
19300 m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
19301 \endcode
19302 
19303 If you want to disable this feature, set configuration macro: `#define VMA_STATIC_VULKAN_FUNCTIONS 0`.
19304 
19305 Second, you can provide the pointers yourself by setting member VmaAllocatorCreateInfo::pVulkanFunctions.
19306 You can fetch them e.g. using functions `vkGetInstanceProcAddr` and `vkGetDeviceProcAddr` or
19307 by using a helper library like [volk](https://github.com/zeux/volk).
19308 
19309 Third, VMA tries to fetch remaining pointers that are still null by calling
19310 `vkGetInstanceProcAddr` and `vkGetDeviceProcAddr` on its own.
19311 You need to only fill in VmaVulkanFunctions::vkGetInstanceProcAddr and VmaVulkanFunctions::vkGetDeviceProcAddr.
19312 Other pointers will be fetched automatically.
19313 If you want to disable this feature, set configuration macro: `#define VMA_DYNAMIC_VULKAN_FUNCTIONS 0`.
19314 
19315 Finally, all the function pointers required by the library (considering selected
19316 Vulkan version and enabled extensions) are checked with `VMA_ASSERT` if they are not null.
19317 
19318 
19319 \section custom_memory_allocator Custom host memory allocator
19320 
19321 If you use custom allocator for CPU memory rather than default operator `new`
19322 and `delete` from C++, you can make this library using your allocator as well
19323 by filling optional member VmaAllocatorCreateInfo::pAllocationCallbacks. These
19324 functions will be passed to Vulkan, as well as used by the library itself to
19325 make any CPU-side allocations.
19326 
19327 \section allocation_callbacks Device memory allocation callbacks
19328 
19329 The library makes calls to `vkAllocateMemory()` and `vkFreeMemory()` internally.
19330 You can setup callbacks to be informed about these calls, e.g. for the purpose
19331 of gathering some statistics. To do it, fill optional member
19332 VmaAllocatorCreateInfo::pDeviceMemoryCallbacks.
19333 
19334 \section heap_memory_limit Device heap memory limit
19335 
19336 When device memory of certain heap runs out of free space, new allocations may
19337 fail (returning error code) or they may succeed, silently pushing some existing_
19338 memory blocks from GPU VRAM to system RAM (which degrades performance). This
19339 behavior is implementation-dependent - it depends on GPU vendor and graphics
19340 driver.
19341 
19342 On AMD cards it can be controlled while creating Vulkan device object by using
19343 VK_AMD_memory_overallocation_behavior extension, if available.
19344 
19345 Alternatively, if you want to test how your program behaves with limited amount of Vulkan device
19346 memory available without switching your graphics card to one that really has
19347 smaller VRAM, you can use a feature of this library intended for this purpose.
19348 To do it, fill optional member VmaAllocatorCreateInfo::pHeapSizeLimit.
19349 
19350 
19351 
19352 \page vk_khr_dedicated_allocation VK_KHR_dedicated_allocation
19353 
19354 VK_KHR_dedicated_allocation is a Vulkan extension which can be used to improve
19355 performance on some GPUs. It augments Vulkan API with possibility to query
19356 driver whether it prefers particular buffer or image to have its own, dedicated
19357 allocation (separate `VkDeviceMemory` block) for better efficiency - to be able
19358 to do some internal optimizations. The extension is supported by this library.
19359 It will be used automatically when enabled.
19360 
19361 It has been promoted to core Vulkan 1.1, so if you use eligible Vulkan version
19362 and inform VMA about it by setting VmaAllocatorCreateInfo::vulkanApiVersion,
19363 you are all set.
19364 
19365 Otherwise, if you want to use it as an extension:
19366 
19367 1 . When creating Vulkan device, check if following 2 device extensions are
19368 supported (call `vkEnumerateDeviceExtensionProperties()`).
19369 If yes, enable them (fill `VkDeviceCreateInfo::ppEnabledExtensionNames`).
19370 
19371 - VK_KHR_get_memory_requirements2
19372 - VK_KHR_dedicated_allocation
19373 
19374 If you enabled these extensions:
19375 
19376 2 . Use #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag when creating
19377 your #VmaAllocator to inform the library that you enabled required extensions
19378 and you want the library to use them.
19379 
19380 \code
19381 allocatorInfo.flags |= VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT;
19382 
19383 vmaCreateAllocator(&allocatorInfo, &allocator);
19384 \endcode
19385 
19386 That is all. The extension will be automatically used whenever you create a
19387 buffer using vmaCreateBuffer() or image using vmaCreateImage().
19388 
19389 When using the extension together with Vulkan Validation Layer, you will receive
19390 warnings like this:
19391 
19392 _vkBindBufferMemory(): Binding memory to buffer 0x33 but vkGetBufferMemoryRequirements() has not been called on that buffer._
19393 
19394 It is OK, you should just ignore it. It happens because you use function
19395 `vkGetBufferMemoryRequirements2KHR()` instead of standard
19396 `vkGetBufferMemoryRequirements()`, while the validation layer seems to be
19397 unaware of it.
19398 
19399 To learn more about this extension, see:
19400 
19401 - [VK_KHR_dedicated_allocation in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/chap50.html#VK_KHR_dedicated_allocation)
19402 - [VK_KHR_dedicated_allocation unofficial manual](http://asawicki.info/articles/VK_KHR_dedicated_allocation.php5)
19403 
19404 
19405 
19406 \page vk_ext_memory_priority VK_EXT_memory_priority
19407 
19408 VK_EXT_memory_priority is a device extension that allows to pass additional "priority"
19409 value to Vulkan memory allocations that the implementation may use prefer certain
19410 buffers and images that are critical for performance to stay in device-local memory
19411 in cases when the memory is over-subscribed, while some others may be moved to the system memory.
19412 
19413 VMA offers convenient usage of this extension.
19414 If you enable it, you can pass "priority" parameter when creating allocations or custom pools
19415 and the library automatically passes the value to Vulkan using this extension.
19416 
19417 If you want to use this extension in connection with VMA, follow these steps:
19418 
19419 \section vk_ext_memory_priority_initialization Initialization
19420 
19421 1) Call `vkEnumerateDeviceExtensionProperties` for the physical device.
19422 Check if the extension is supported - if returned array of `VkExtensionProperties` contains "VK_EXT_memory_priority".
19423 
19424 2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`.
19425 Attach additional structure `VkPhysicalDeviceMemoryPriorityFeaturesEXT` to `VkPhysicalDeviceFeatures2::pNext` to be returned.
19426 Check if the device feature is really supported - check if `VkPhysicalDeviceMemoryPriorityFeaturesEXT::memoryPriority` is true.
19427 
19428 3) While creating device with `vkCreateDevice`, enable this extension - add "VK_EXT_memory_priority"
19429 to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`.
19430 
19431 4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`.
19432 Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`.
19433 Enable this device feature - attach additional structure `VkPhysicalDeviceMemoryPriorityFeaturesEXT` to
19434 `VkPhysicalDeviceFeatures2::pNext` chain and set its member `memoryPriority` to `VK_TRUE`.
19435 
19436 5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you
19437 have enabled this extension and feature - add #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT
19438 to VmaAllocatorCreateInfo::flags.
19439 
19440 \section vk_ext_memory_priority_usage Usage
19441 
19442 When using this extension, you should initialize following member:
19443 
19444 - VmaAllocationCreateInfo::priority when creating a dedicated allocation with #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
19445 - VmaPoolCreateInfo::priority when creating a custom pool.
19446 
19447 It should be a floating-point value between `0.0f` and `1.0f`, where recommended default is `0.5f`.
19448 Memory allocated with higher value can be treated by the Vulkan implementation as higher priority
19449 and so it can have lower chances of being pushed out to system memory, experiencing degraded performance.
19450 
19451 It might be a good idea to create performance-critical resources like color-attachment or depth-stencil images
19452 as dedicated and set high priority to them. For example:
19453 
19454 \code
19455 VkImageCreateInfo imgCreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
19456 imgCreateInfo.imageType = VK_IMAGE_TYPE_2D;
19457 imgCreateInfo.extent.width = 3840;
19458 imgCreateInfo.extent.height = 2160;
19459 imgCreateInfo.extent.depth = 1;
19460 imgCreateInfo.mipLevels = 1;
19461 imgCreateInfo.arrayLayers = 1;
19462 imgCreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM;
19463 imgCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
19464 imgCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
19465 imgCreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
19466 imgCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
19467 
19468 VmaAllocationCreateInfo allocCreateInfo = {};
19469 allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
19470 allocCreateInfo.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
19471 allocCreateInfo.priority = 1.0f;
19472 
19473 VkImage img;
19474 VmaAllocation alloc;
19475 vmaCreateImage(allocator, &imgCreateInfo, &allocCreateInfo, &img, &alloc, nullptr);
19476 \endcode
19477 
19478 `priority` member is ignored in the following situations:
19479 
19480 - Allocations created in custom pools: They inherit the priority, along with all other allocation parameters
19481   from the parametrs passed in #VmaPoolCreateInfo when the pool was created.
19482 - Allocations created in default pools: They inherit the priority from the parameters
19483   VMA used when creating default pools, which means `priority == 0.5f`.
19484 
19485 
19486 \page vk_amd_device_coherent_memory VK_AMD_device_coherent_memory
19487 
19488 VK_AMD_device_coherent_memory is a device extension that enables access to
19489 additional memory types with `VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD` and
19490 `VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` flag. It is useful mostly for
19491 allocation of buffers intended for writing "breadcrumb markers" in between passes
19492 or draw calls, which in turn are useful for debugging GPU crash/hang/TDR cases.
19493 
19494 When the extension is available but has not been enabled, Vulkan physical device
19495 still exposes those memory types, but their usage is forbidden. VMA automatically
19496 takes care of that - it returns `VK_ERROR_FEATURE_NOT_PRESENT` when an attempt
19497 to allocate memory of such type is made.
19498 
19499 If you want to use this extension in connection with VMA, follow these steps:
19500 
19501 \section vk_amd_device_coherent_memory_initialization Initialization
19502 
19503 1) Call `vkEnumerateDeviceExtensionProperties` for the physical device.
19504 Check if the extension is supported - if returned array of `VkExtensionProperties` contains "VK_AMD_device_coherent_memory".
19505 
19506 2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`.
19507 Attach additional structure `VkPhysicalDeviceCoherentMemoryFeaturesAMD` to `VkPhysicalDeviceFeatures2::pNext` to be returned.
19508 Check if the device feature is really supported - check if `VkPhysicalDeviceCoherentMemoryFeaturesAMD::deviceCoherentMemory` is true.
19509 
19510 3) While creating device with `vkCreateDevice`, enable this extension - add "VK_AMD_device_coherent_memory"
19511 to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`.
19512 
19513 4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`.
19514 Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`.
19515 Enable this device feature - attach additional structure `VkPhysicalDeviceCoherentMemoryFeaturesAMD` to
19516 `VkPhysicalDeviceFeatures2::pNext` and set its member `deviceCoherentMemory` to `VK_TRUE`.
19517 
19518 5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you
19519 have enabled this extension and feature - add #VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT
19520 to VmaAllocatorCreateInfo::flags.
19521 
19522 \section vk_amd_device_coherent_memory_usage Usage
19523 
19524 After following steps described above, you can create VMA allocations and custom pools
19525 out of the special `DEVICE_COHERENT` and `DEVICE_UNCACHED` memory types on eligible
19526 devices. There are multiple ways to do it, for example:
19527 
19528 - You can request or prefer to allocate out of such memory types by adding
19529   `VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` to VmaAllocationCreateInfo::requiredFlags
19530   or VmaAllocationCreateInfo::preferredFlags. Those flags can be freely mixed with
19531   other ways of \ref choosing_memory_type, like setting VmaAllocationCreateInfo::usage.
19532 - If you manually found memory type index to use for this purpose, force allocation
19533   from this specific index by setting VmaAllocationCreateInfo::memoryTypeBits `= 1u << index`.
19534 
19535 \section vk_amd_device_coherent_memory_more_information More information
19536 
19537 To learn more about this extension, see [VK_AMD_device_coherent_memory in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VK_AMD_device_coherent_memory.html)
19538 
19539 Example use of this extension can be found in the code of the sample and test suite
19540 accompanying this library.
19541 
19542 
19543 \page enabling_buffer_device_address Enabling buffer device address
19544 
19545 Device extension VK_KHR_buffer_device_address
19546 allow to fetch raw GPU pointer to a buffer and pass it for usage in a shader code.
19547 It has been promoted to core Vulkan 1.2.
19548 
19549 If you want to use this feature in connection with VMA, follow these steps:
19550 
19551 \section enabling_buffer_device_address_initialization Initialization
19552 
19553 1) (For Vulkan version < 1.2) Call `vkEnumerateDeviceExtensionProperties` for the physical device.
19554 Check if the extension is supported - if returned array of `VkExtensionProperties` contains
19555 "VK_KHR_buffer_device_address".
19556 
19557 2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`.
19558 Attach additional structure `VkPhysicalDeviceBufferDeviceAddressFeatures*` to `VkPhysicalDeviceFeatures2::pNext` to be returned.
19559 Check if the device feature is really supported - check if `VkPhysicalDeviceBufferDeviceAddressFeatures::bufferDeviceAddress` is true.
19560 
19561 3) (For Vulkan version < 1.2) While creating device with `vkCreateDevice`, enable this extension - add
19562 "VK_KHR_buffer_device_address" to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`.
19563 
19564 4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`.
19565 Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`.
19566 Enable this device feature - attach additional structure `VkPhysicalDeviceBufferDeviceAddressFeatures*` to
19567 `VkPhysicalDeviceFeatures2::pNext` and set its member `bufferDeviceAddress` to `VK_TRUE`.
19568 
19569 5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you
19570 have enabled this feature - add #VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT
19571 to VmaAllocatorCreateInfo::flags.
19572 
19573 \section enabling_buffer_device_address_usage Usage
19574 
19575 After following steps described above, you can create buffers with `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT*` using VMA.
19576 The library automatically adds `VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT*` to
19577 allocated memory blocks wherever it might be needed.
19578 
19579 Please note that the library supports only `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT*`.
19580 The second part of this functionality related to "capture and replay" is not supported,
19581 as it is intended for usage in debugging tools like RenderDoc, not in everyday Vulkan usage.
19582 
19583 \section enabling_buffer_device_address_more_information More information
19584 
19585 To learn more about this extension, see [VK_KHR_buffer_device_address in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/chap46.html#VK_KHR_buffer_device_address)
19586 
19587 Example use of this extension can be found in the code of the sample and test suite
19588 accompanying this library.
19589 
19590 \page general_considerations General considerations
19591 
19592 \section general_considerations_thread_safety Thread safety
19593 
19594 - The library has no global state, so separate #VmaAllocator objects can be used
19595   independently.
19596   There should be no need to create multiple such objects though - one per `VkDevice` is enough.
19597 - By default, all calls to functions that take #VmaAllocator as first parameter
19598   are safe to call from multiple threads simultaneously because they are
19599   synchronized internally when needed.
19600   This includes allocation and deallocation from default memory pool, as well as custom #VmaPool.
19601 - When the allocator is created with #VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT
19602   flag, calls to functions that take such #VmaAllocator object must be
19603   synchronized externally.
19604 - Access to a #VmaAllocation object must be externally synchronized. For example,
19605   you must not call vmaGetAllocationInfo() and vmaMapMemory() from different
19606   threads at the same time if you pass the same #VmaAllocation object to these
19607   functions.
19608 - #VmaVirtualBlock is not safe to be used from multiple threads simultaneously.
19609 
19610 \section general_considerations_versioning_and_compatibility Versioning and compatibility
19611 
19612 The library uses [**Semantic Versioning**](https://semver.org/),
19613 which means version numbers follow convention: Major.Minor.Patch (e.g. 2.3.0), where:
19614 
19615 - Incremented Patch version means a release is backward- and forward-compatible,
19616   introducing only some internal improvements, bug fixes, optimizations etc.
19617   or changes that are out of scope of the official API described in this documentation.
19618 - Incremented Minor version means a release is backward-compatible,
19619   so existing code that uses the library should continue to work, while some new
19620   symbols could have been added: new structures, functions, new values in existing
19621   enums and bit flags, new structure members, but not new function parameters.
19622 - Incrementing Major version means a release could break some backward compatibility.
19623 
19624 All changes between official releases are documented in file "CHANGELOG.md".
19625 
19626 \warning Backward compatibility is considered on the level of C++ source code, not binary linkage.
19627 Adding new members to existing structures is treated as backward compatible if initializing
19628 the new members to binary zero results in the old behavior.
19629 You should always fully initialize all library structures to zeros and not rely on their
19630 exact binary size.
19631 
19632 \section general_considerations_validation_layer_warnings Validation layer warnings
19633 
19634 When using this library, you can meet following types of warnings issued by
19635 Vulkan validation layer. They don't necessarily indicate a bug, so you may need
19636 to just ignore them.
19637 
19638 - *vkBindBufferMemory(): Binding memory to buffer 0xeb8e4 but vkGetBufferMemoryRequirements() has not been called on that buffer.*
19639   - It happens when VK_KHR_dedicated_allocation extension is enabled.
19640     `vkGetBufferMemoryRequirements2KHR` function is used instead, while validation layer seems to be unaware of it.
19641 - *Mapping an image with layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL can result in undefined behavior if this memory is used by the device. Only GENERAL or PREINITIALIZED should be used.*
19642   - It happens when you map a buffer or image, because the library maps entire
19643     `VkDeviceMemory` block, where different types of images and buffers may end
19644     up together, especially on GPUs with unified memory like Intel.
19645 - *Non-linear image 0xebc91 is aliased with linear buffer 0xeb8e4 which may indicate a bug.*
19646   - It may happen when you use [defragmentation](@ref defragmentation).
19647 
19648 \section general_considerations_allocation_algorithm Allocation algorithm
19649 
19650 The library uses following algorithm for allocation, in order:
19651 
19652 -# Try to find free range of memory in existing blocks.
19653 -# If failed, try to create a new block of `VkDeviceMemory`, with preferred block size.
19654 -# If failed, try to create such block with size / 2, size / 4, size / 8.
19655 -# If failed, try to allocate separate `VkDeviceMemory` for this allocation,
19656    just like when you use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
19657 -# If failed, choose other memory type that meets the requirements specified in
19658    VmaAllocationCreateInfo and go to point 1.
19659 -# If failed, return `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
19660 
19661 \section general_considerations_features_not_supported Features not supported
19662 
19663 Features deliberately excluded from the scope of this library:
19664 
19665 -# **Data transfer.** Uploading (streaming) and downloading data of buffers and images
19666    between CPU and GPU memory and related synchronization is responsibility of the user.
19667    Defining some "texture" object that would automatically stream its data from a
19668    staging copy in CPU memory to GPU memory would rather be a feature of another,
19669    higher-level library implemented on top of VMA.
19670    VMA doesn't record any commands to a `VkCommandBuffer`. It just allocates memory.
19671 -# **Recreation of buffers and images.** Although the library has functions for
19672    buffer and image creation: vmaCreateBuffer(), vmaCreateImage(), you need to
19673    recreate these objects yourself after defragmentation. That is because the big
19674    structures `VkBufferCreateInfo`, `VkImageCreateInfo` are not stored in
19675    #VmaAllocation object.
19676 -# **Handling CPU memory allocation failures.** When dynamically creating small C++
19677    objects in CPU memory (not Vulkan memory), allocation failures are not checked
19678    and handled gracefully, because that would complicate code significantly and
19679    is usually not needed in desktop PC applications anyway.
19680    Success of an allocation is just checked with an assert.
19681 -# **Code free of any compiler warnings.** Maintaining the library to compile and
19682    work correctly on so many different platforms is hard enough. Being free of
19683    any warnings, on any version of any compiler, is simply not feasible.
19684    There are many preprocessor macros that make some variables unused, function parameters unreferenced,
19685    or conditional expressions constant in some configurations.
19686    The code of this library should not be bigger or more complicated just to silence these warnings.
19687    It is recommended to disable such warnings instead.
19688 -# This is a C++ library with C interface. **Bindings or ports to any other programming languages** are welcome as external projects but
19689    are not going to be included into this repository.
19690 */
19691