1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "include/core/SkTypes.h"
9 #include "include/gpu/GpuTypes.h"
10 #include "include/gpu/vk/VulkanMemoryAllocator.h"
11 #include "include/gpu/vk/VulkanTypes.h"
12 #include "src/gpu/vk/VulkanMemory.h"
13
14 #include <cstdint>
15 #include <cstring>
16
17 namespace skgpu {
18
19 using BufferUsage = VulkanMemoryAllocator::BufferUsage;
20
AllocBufferMemory(VulkanMemoryAllocator * allocator,VkBuffer buffer,skgpu::Protected isProtected,BufferUsage usage,bool shouldPersistentlyMapCpuToGpu,const std::function<CheckResult> & checkResult,VulkanAlloc * alloc)21 bool VulkanMemory::AllocBufferMemory(VulkanMemoryAllocator* allocator,
22 VkBuffer buffer,
23 skgpu::Protected isProtected,
24 BufferUsage usage,
25 bool shouldPersistentlyMapCpuToGpu,
26 const std::function<CheckResult>& checkResult,
27 VulkanAlloc* alloc) {
28 VulkanBackendMemory memory = 0;
29 uint32_t propFlags;
30 if (usage == BufferUsage::kTransfersFromCpuToGpu ||
31 (usage == BufferUsage::kCpuWritesGpuReads && shouldPersistentlyMapCpuToGpu)) {
32 // In general it is always fine (and often better) to keep buffers always mapped that we are
33 // writing to on the cpu.
34 propFlags = VulkanMemoryAllocator::kPersistentlyMapped_AllocationPropertyFlag;
35 } else {
36 propFlags = VulkanMemoryAllocator::kNone_AllocationPropertyFlag;
37 }
38
39 if (isProtected == Protected::kYes) {
40 propFlags = propFlags | VulkanMemoryAllocator::kProtected_AllocationPropertyFlag;
41 }
42
43 VkResult result = allocator->allocateBufferMemory(buffer, usage, propFlags, &memory);
44 if (!checkResult(result)) {
45 return false;
46 }
47 allocator->getAllocInfo(memory, alloc);
48 return true;
49 }
50
FreeBufferMemory(VulkanMemoryAllocator * allocator,const VulkanAlloc & alloc)51 void VulkanMemory::FreeBufferMemory(VulkanMemoryAllocator* allocator, const VulkanAlloc& alloc) {
52 SkASSERT(alloc.fBackendMemory);
53 allocator->freeMemory(alloc.fBackendMemory);
54 }
55
AllocImageMemory(VulkanMemoryAllocator * allocator,VkImage image,Protected isProtected,bool forceDedicatedMemory,bool useLazyAllocation,const std::function<CheckResult> & checkResult,VulkanAlloc * alloc)56 bool VulkanMemory::AllocImageMemory(VulkanMemoryAllocator* allocator,
57 VkImage image,
58 Protected isProtected,
59 bool forceDedicatedMemory,
60 bool useLazyAllocation,
61 const std::function<CheckResult>& checkResult,
62 VulkanAlloc* alloc) {
63 VulkanBackendMemory memory = 0;
64
65 uint32_t propFlags;
66 // If we ever find that our allocator is not aggressive enough in using dedicated image
67 // memory we can add a size check here to force the use of dedicate memory. However for now,
68 // we let the allocators decide. The allocator can query the GPU for each image to see if the
69 // GPU recommends or requires the use of dedicated memory.
70 if (forceDedicatedMemory) {
71 propFlags = VulkanMemoryAllocator::kDedicatedAllocation_AllocationPropertyFlag;
72 } else {
73 propFlags = VulkanMemoryAllocator::kNone_AllocationPropertyFlag;
74 }
75
76 if (isProtected == Protected::kYes) {
77 propFlags = propFlags | VulkanMemoryAllocator::kProtected_AllocationPropertyFlag;
78 }
79
80 if (useLazyAllocation) {
81 propFlags = propFlags | VulkanMemoryAllocator::kLazyAllocation_AllocationPropertyFlag;
82 }
83
84 VkResult result = allocator->allocateImageMemory(image, propFlags, &memory);
85 if (!checkResult(result)) {
86 return false;
87 }
88
89 allocator->getAllocInfo(memory, alloc);
90 return true;
91 }
92
FreeImageMemory(VulkanMemoryAllocator * allocator,const VulkanAlloc & alloc)93 void VulkanMemory::FreeImageMemory(VulkanMemoryAllocator* allocator,
94 const VulkanAlloc& alloc) {
95 SkASSERT(alloc.fBackendMemory);
96 allocator->freeMemory(alloc.fBackendMemory);
97 }
98
MapAlloc(VulkanMemoryAllocator * allocator,const VulkanAlloc & alloc,const std::function<CheckResult> & checkResult)99 void* VulkanMemory::MapAlloc(VulkanMemoryAllocator* allocator,
100 const VulkanAlloc& alloc,
101 const std::function<CheckResult>& checkResult) {
102 SkASSERT(VulkanAlloc::kMappable_Flag & alloc.fFlags);
103 SkASSERT(alloc.fBackendMemory);
104 void* mapPtr;
105 VkResult result = allocator->mapMemory(alloc.fBackendMemory, &mapPtr);
106 if (!checkResult(result)) {
107 return nullptr;
108 }
109 return mapPtr;
110 }
111
UnmapAlloc(VulkanMemoryAllocator * allocator,const VulkanAlloc & alloc)112 void VulkanMemory::UnmapAlloc(VulkanMemoryAllocator* allocator,
113 const VulkanAlloc& alloc) {
114 SkASSERT(alloc.fBackendMemory);
115 allocator->unmapMemory(alloc.fBackendMemory);
116 }
117
GetNonCoherentMappedMemoryRange(const VulkanAlloc & alloc,VkDeviceSize offset,VkDeviceSize size,VkDeviceSize alignment,VkMappedMemoryRange * range)118 void VulkanMemory::GetNonCoherentMappedMemoryRange(const VulkanAlloc& alloc,
119 VkDeviceSize offset,
120 VkDeviceSize size,
121 VkDeviceSize alignment,
122 VkMappedMemoryRange* range) {
123 SkASSERT(alloc.fFlags & VulkanAlloc::kNoncoherent_Flag);
124 offset = offset + alloc.fOffset;
125 VkDeviceSize offsetDiff = offset & (alignment -1);
126 offset = offset - offsetDiff;
127 size = (size + alignment - 1) & ~(alignment - 1);
128 #ifdef SK_DEBUG
129 SkASSERT(offset >= alloc.fOffset);
130 SkASSERT(offset + size <= alloc.fOffset + alloc.fSize);
131 SkASSERT(0 == (offset & (alignment-1)));
132 SkASSERT(size > 0);
133 SkASSERT(0 == (size & (alignment-1)));
134 #endif
135
136 std::memset(range, 0, sizeof(VkMappedMemoryRange));
137 range->sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
138 range->memory = alloc.fMemory;
139 range->offset = offset;
140 range->size = size;
141 }
142
FlushMappedAlloc(VulkanMemoryAllocator * allocator,const VulkanAlloc & alloc,VkDeviceSize offset,VkDeviceSize size,const std::function<CheckResult> & checkResult)143 void VulkanMemory::FlushMappedAlloc(VulkanMemoryAllocator* allocator,
144 const VulkanAlloc& alloc,
145 VkDeviceSize offset,
146 VkDeviceSize size,
147 const std::function<CheckResult>& checkResult) {
148 if (alloc.fFlags & VulkanAlloc::kNoncoherent_Flag) {
149 SkASSERT(offset == 0);
150 SkASSERT(size <= alloc.fSize);
151 SkASSERT(alloc.fBackendMemory);
152 VkResult result = allocator->flushMemory(alloc.fBackendMemory, offset, size);
153 checkResult(result);
154 }
155 }
156
InvalidateMappedAlloc(VulkanMemoryAllocator * allocator,const VulkanAlloc & alloc,VkDeviceSize offset,VkDeviceSize size,const std::function<CheckResult> & checkResult)157 void VulkanMemory::InvalidateMappedAlloc(VulkanMemoryAllocator* allocator,
158 const VulkanAlloc& alloc,
159 VkDeviceSize offset,
160 VkDeviceSize size,
161 const std::function<CheckResult>& checkResult) {
162 if (alloc.fFlags & VulkanAlloc::kNoncoherent_Flag) {
163 SkASSERT(offset == 0);
164 SkASSERT(size <= alloc.fSize);
165 SkASSERT(alloc.fBackendMemory);
166 VkResult result = allocator->invalidateMemory(alloc.fBackendMemory, offset, size);
167 checkResult(result);
168 }
169 }
170
171 } // namespace skgpu
172