1 /* 2 * Copyright 2022 Google LLC. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef skgpu_VulkanMemoryAllocator_DEFINED 9 #define skgpu_VulkanMemoryAllocator_DEFINED 10 11 #include "include/core/SkRefCnt.h" 12 #include "include/gpu/vk/VulkanTypes.h" 13 #include "include/private/gpu/vk/SkiaVulkan.h" 14 15 #include <cstdint> 16 #include <utility> 17 18 namespace skgpu { 19 20 class VulkanMemoryAllocator : public SkRefCnt { 21 public: 22 enum AllocationPropertyFlags { 23 kNone_AllocationPropertyFlag = 0b0000, 24 // Allocation will be placed in its own VkDeviceMemory and not suballocated from some larger 25 // block. 26 kDedicatedAllocation_AllocationPropertyFlag = 0b0001, 27 // Says that the backing memory can only be accessed by the device. Additionally the device 28 // may lazily allocate the memory. This cannot be used with buffers that will be host 29 // visible. Setting this flag does not guarantee that we will allocate memory that respects 30 // it, but we will try to prefer memory that can respect it. 31 kLazyAllocation_AllocationPropertyFlag = 0b0010, 32 // The allocation will be mapped immediately and stay mapped until it is destroyed. This 33 // flag is only valid for buffers which are host visible (i.e. must have a usage other than 34 // BufferUsage::kGpuOnly). 35 kPersistentlyMapped_AllocationPropertyFlag = 0b0100, 36 // Allocation can only be accessed by the device using a protected context. 37 kProtected_AllocationPropertyFlag = 0b1000, 38 }; 39 40 enum class BufferUsage { 41 // Buffers that will only be accessed from the device (large const buffers) will always be 42 // in device local memory. 43 kGpuOnly, 44 // Buffers that typically will be updated multiple times by the host and read on the gpu 45 // (e.g. uniform or vertex buffers). CPU writes will generally be sequential in the buffer 46 // and will try to take advantage of the write-combined nature of the gpu buffers. Thus this 47 // will always be mappable and coherent memory, and it will prefer to be in device local 48 // memory. 49 kCpuWritesGpuReads, 50 // Buffers that will be accessed on the host and copied to another GPU resource (transfer 51 // buffers). Will always be mappable and coherent memory. 52 kTransfersFromCpuToGpu, 53 // Buffers which are typically writted to by the GPU and then read on the host. Will always 54 // be mappable memory, and will prefer cached memory. 55 kTransfersFromGpuToCpu, 56 }; 57 58 virtual VkResult allocateImageMemory(VkImage image, 59 uint32_t allocationPropertyFlags, 60 skgpu::VulkanBackendMemory* memory) = 0; 61 62 virtual VkResult allocateBufferMemory(VkBuffer buffer, 63 BufferUsage usage, 64 uint32_t allocationPropertyFlags, 65 skgpu::VulkanBackendMemory* memory) = 0; 66 67 // Fills out the passed in skgpu::VulkanAlloc struct for the passed in 68 // skgpu::VulkanBackendMemory. 69 virtual void getAllocInfo(const skgpu::VulkanBackendMemory&, skgpu::VulkanAlloc*) const = 0; 70 71 // Maps the entire allocation and returns a pointer to the start of the allocation. The 72 // implementation may map more memory than just the allocation, but the returned pointer must 73 // point at the start of the memory for the requested allocation. mapMemory(const skgpu::VulkanBackendMemory &)74 virtual void* mapMemory(const skgpu::VulkanBackendMemory&) { return nullptr; } mapMemory(const skgpu::VulkanBackendMemory & memory,void ** data)75 virtual VkResult mapMemory(const skgpu::VulkanBackendMemory& memory, void** data) { 76 *data = this->mapMemory(memory); 77 // VK_ERROR_INITIALIZATION_FAILED is a bogus result to return from this function, but it is 78 // just something to return that is not VK_SUCCESS and can't be interpreted by a caller to 79 // mean something specific happened like device lost or oom. This will be removed once we 80 // update clients to implement this virtual. 81 return *data ? VK_SUCCESS : VK_ERROR_INITIALIZATION_FAILED; 82 } 83 virtual void unmapMemory(const skgpu::VulkanBackendMemory&) = 0; 84 85 // The following two calls are used for managing non-coherent memory. The offset is relative to 86 // the start of the allocation and not the underlying VkDeviceMemory. Additionaly the client 87 // must make sure that the offset + size passed in is less that or equal to the allocation size. 88 // It is the responsibility of the implementation to make sure all alignment requirements are 89 // followed. The client should not have to deal with any sort of alignment issues. flushMappedMemory(const skgpu::VulkanBackendMemory &,VkDeviceSize,VkDeviceSize)90 virtual void flushMappedMemory(const skgpu::VulkanBackendMemory&, VkDeviceSize, VkDeviceSize) {} flushMemory(const skgpu::VulkanBackendMemory & memory,VkDeviceSize offset,VkDeviceSize size)91 virtual VkResult flushMemory(const skgpu::VulkanBackendMemory& memory, 92 VkDeviceSize offset, 93 VkDeviceSize size) { 94 this->flushMappedMemory(memory, offset, size); 95 return VK_SUCCESS; 96 } invalidateMappedMemory(const skgpu::VulkanBackendMemory &,VkDeviceSize,VkDeviceSize)97 virtual void invalidateMappedMemory(const skgpu::VulkanBackendMemory&, 98 VkDeviceSize, 99 VkDeviceSize) {} invalidateMemory(const skgpu::VulkanBackendMemory & memory,VkDeviceSize offset,VkDeviceSize size)100 virtual VkResult invalidateMemory(const skgpu::VulkanBackendMemory& memory, 101 VkDeviceSize offset, 102 VkDeviceSize size) { 103 this->invalidateMappedMemory(memory, offset, size); 104 return VK_SUCCESS; 105 } 106 107 virtual void freeMemory(const skgpu::VulkanBackendMemory&) = 0; 108 109 // Returns the total amount of memory that is allocated as well as total 110 // amount of memory in use by an allocation from this allocator. 111 // Return 1st param is total allocated memory, 2nd is total used memory. 112 virtual std::pair<uint64_t, uint64_t> totalAllocatedAndUsedMemory() const = 0; 113 }; 114 115 } // namespace skgpu 116 117 #endif // skgpu_VulkanMemoryAllocator_DEFINED 118