1 /*
2 * Copyright © 2022 Collabora Ltd. and Red Hat Inc.
3 * SPDX-License-Identifier: MIT
4 */
5 #include "nvk_buffer.h"
6
7 #include "nvk_entrypoints.h"
8 #include "nvk_device.h"
9 #include "nvk_device_memory.h"
10 #include "nvk_physical_device.h"
11 #include "nvk_queue.h"
12 #include "nvkmd/nvkmd.h"
13
14 static uint32_t
nvk_get_buffer_alignment(const struct nvk_physical_device * pdev,VkBufferUsageFlags2KHR usage_flags,VkBufferCreateFlags create_flags)15 nvk_get_buffer_alignment(const struct nvk_physical_device *pdev,
16 VkBufferUsageFlags2KHR usage_flags,
17 VkBufferCreateFlags create_flags)
18 {
19 uint32_t alignment = 16;
20
21 if (usage_flags & VK_BUFFER_USAGE_2_UNIFORM_BUFFER_BIT_KHR)
22 alignment = MAX2(alignment, nvk_min_cbuf_alignment(&pdev->info));
23
24 if (usage_flags & VK_BUFFER_USAGE_2_STORAGE_BUFFER_BIT_KHR)
25 alignment = MAX2(alignment, NVK_MIN_SSBO_ALIGNMENT);
26
27 if (usage_flags & (VK_BUFFER_USAGE_2_UNIFORM_TEXEL_BUFFER_BIT_KHR |
28 VK_BUFFER_USAGE_2_STORAGE_TEXEL_BUFFER_BIT_KHR))
29 alignment = MAX2(alignment, NVK_MIN_TEXEL_BUFFER_ALIGNMENT);
30
31 if (create_flags & (VK_BUFFER_CREATE_SPARSE_BINDING_BIT |
32 VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT))
33 alignment = MAX2(alignment, pdev->nvkmd->bind_align_B);
34
35 return alignment;
36 }
37
38 static uint64_t
nvk_get_bda_replay_addr(const VkBufferCreateInfo * pCreateInfo)39 nvk_get_bda_replay_addr(const VkBufferCreateInfo *pCreateInfo)
40 {
41 uint64_t addr = 0;
42 vk_foreach_struct_const(ext, pCreateInfo->pNext) {
43 switch (ext->sType) {
44 case VK_STRUCTURE_TYPE_BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO: {
45 const VkBufferOpaqueCaptureAddressCreateInfo *bda = (void *)ext;
46 if (bda->opaqueCaptureAddress != 0) {
47 #ifdef NDEBUG
48 return bda->opaqueCaptureAddress;
49 #else
50 assert(addr == 0 || bda->opaqueCaptureAddress == addr);
51 addr = bda->opaqueCaptureAddress;
52 #endif
53 }
54 break;
55 }
56
57 case VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_CREATE_INFO_EXT: {
58 const VkBufferDeviceAddressCreateInfoEXT *bda = (void *)ext;
59 if (bda->deviceAddress != 0) {
60 #ifdef NDEBUG
61 return bda->deviceAddress;
62 #else
63 assert(addr == 0 || bda->deviceAddress == addr);
64 addr = bda->deviceAddress;
65 #endif
66 }
67 break;
68 }
69
70 default:
71 break;
72 }
73 }
74
75 return addr;
76 }
77
78 VKAPI_ATTR VkResult VKAPI_CALL
nvk_CreateBuffer(VkDevice device,const VkBufferCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkBuffer * pBuffer)79 nvk_CreateBuffer(VkDevice device,
80 const VkBufferCreateInfo *pCreateInfo,
81 const VkAllocationCallbacks *pAllocator,
82 VkBuffer *pBuffer)
83 {
84 VK_FROM_HANDLE(nvk_device, dev, device);
85 struct nvk_buffer *buffer;
86 VkResult result;
87
88 if (pCreateInfo->size > NVK_MAX_BUFFER_SIZE)
89 return vk_error(dev, VK_ERROR_OUT_OF_DEVICE_MEMORY);
90
91 buffer = vk_buffer_create(&dev->vk, pCreateInfo, pAllocator,
92 sizeof(*buffer));
93 if (!buffer)
94 return vk_error(dev, VK_ERROR_OUT_OF_HOST_MEMORY);
95
96 if (buffer->vk.size > 0 &&
97 (buffer->vk.create_flags & (VK_BUFFER_CREATE_SPARSE_BINDING_BIT |
98 VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT))) {
99 const uint32_t alignment =
100 nvk_get_buffer_alignment(nvk_device_physical(dev),
101 buffer->vk.usage,
102 buffer->vk.create_flags);
103 assert(alignment >= 4096);
104 const uint64_t va_size_B = align64(buffer->vk.size, alignment);
105
106 enum nvkmd_va_flags va_flags = 0;
107 if (buffer->vk.create_flags & VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT)
108 va_flags |= NVKMD_VA_SPARSE;
109
110 uint64_t fixed_addr = 0;
111 if (buffer->vk.create_flags & VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT) {
112 va_flags |= NVKMD_VA_REPLAY;
113
114 fixed_addr = nvk_get_bda_replay_addr(pCreateInfo);
115 if (fixed_addr != 0)
116 va_flags |= NVKMD_VA_ALLOC_FIXED;
117 }
118
119 result = nvkmd_dev_alloc_va(dev->nvkmd, &dev->vk.base,
120 va_flags, 0 /* pte_kind */,
121 va_size_B, alignment, fixed_addr,
122 &buffer->va);
123 if (result != VK_SUCCESS) {
124 vk_buffer_destroy(&dev->vk, pAllocator, &buffer->vk);
125 return result;
126 }
127
128 buffer->addr = buffer->va->addr;
129 }
130
131 *pBuffer = nvk_buffer_to_handle(buffer);
132
133 return VK_SUCCESS;
134 }
135
136 VKAPI_ATTR void VKAPI_CALL
nvk_DestroyBuffer(VkDevice device,VkBuffer _buffer,const VkAllocationCallbacks * pAllocator)137 nvk_DestroyBuffer(VkDevice device,
138 VkBuffer _buffer,
139 const VkAllocationCallbacks *pAllocator)
140 {
141 VK_FROM_HANDLE(nvk_device, dev, device);
142 VK_FROM_HANDLE(nvk_buffer, buffer, _buffer);
143
144 if (!buffer)
145 return;
146
147 if (buffer->va != NULL)
148 nvkmd_va_free(buffer->va);
149
150 vk_buffer_destroy(&dev->vk, pAllocator, &buffer->vk);
151 }
152
153 VKAPI_ATTR void VKAPI_CALL
nvk_GetDeviceBufferMemoryRequirements(VkDevice device,const VkDeviceBufferMemoryRequirements * pInfo,VkMemoryRequirements2 * pMemoryRequirements)154 nvk_GetDeviceBufferMemoryRequirements(
155 VkDevice device,
156 const VkDeviceBufferMemoryRequirements *pInfo,
157 VkMemoryRequirements2 *pMemoryRequirements)
158 {
159 VK_FROM_HANDLE(nvk_device, dev, device);
160 struct nvk_physical_device *pdev = nvk_device_physical(dev);
161
162 const uint32_t alignment =
163 nvk_get_buffer_alignment(nvk_device_physical(dev),
164 pInfo->pCreateInfo->usage,
165 pInfo->pCreateInfo->flags);
166
167 pMemoryRequirements->memoryRequirements = (VkMemoryRequirements) {
168 .size = align64(pInfo->pCreateInfo->size, alignment),
169 .alignment = alignment,
170 .memoryTypeBits = BITFIELD_MASK(pdev->mem_type_count),
171 };
172
173 vk_foreach_struct_const(ext, pMemoryRequirements->pNext) {
174 switch (ext->sType) {
175 case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS: {
176 VkMemoryDedicatedRequirements *dedicated = (void *)ext;
177 dedicated->prefersDedicatedAllocation = false;
178 dedicated->requiresDedicatedAllocation = false;
179 break;
180 }
181 default:
182 vk_debug_ignored_stype(ext->sType);
183 break;
184 }
185 }
186 }
187
188 VKAPI_ATTR void VKAPI_CALL
nvk_GetPhysicalDeviceExternalBufferProperties(VkPhysicalDevice physicalDevice,const VkPhysicalDeviceExternalBufferInfo * pExternalBufferInfo,VkExternalBufferProperties * pExternalBufferProperties)189 nvk_GetPhysicalDeviceExternalBufferProperties(
190 VkPhysicalDevice physicalDevice,
191 const VkPhysicalDeviceExternalBufferInfo *pExternalBufferInfo,
192 VkExternalBufferProperties *pExternalBufferProperties)
193 {
194 /* The Vulkan 1.3.256 spec says:
195 *
196 * VUID-VkPhysicalDeviceExternalBufferInfo-handleType-parameter
197 *
198 * "handleType must be a valid VkExternalMemoryHandleTypeFlagBits value"
199 *
200 * This differs from VkPhysicalDeviceExternalImageFormatInfo, which
201 * surprisingly permits handleType == 0.
202 */
203 assert(pExternalBufferInfo->handleType != 0);
204
205 /* All of the current flags are for sparse which we don't support yet.
206 * Even when we do support it, doing sparse on external memory sounds
207 * sketchy. Also, just disallowing flags is the safe option.
208 */
209 if (pExternalBufferInfo->flags)
210 goto unsupported;
211
212 switch (pExternalBufferInfo->handleType) {
213 case VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT:
214 case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT:
215 pExternalBufferProperties->externalMemoryProperties =
216 nvk_dma_buf_mem_props;
217 return;
218 default:
219 goto unsupported;
220 }
221
222 unsupported:
223 /* From the Vulkan 1.3.256 spec:
224 *
225 * compatibleHandleTypes must include at least handleType.
226 */
227 pExternalBufferProperties->externalMemoryProperties =
228 (VkExternalMemoryProperties) {
229 .compatibleHandleTypes = pExternalBufferInfo->handleType,
230 };
231 }
232
233 static VkResult
nvk_bind_buffer_memory(struct nvk_device * dev,const VkBindBufferMemoryInfo * info)234 nvk_bind_buffer_memory(struct nvk_device *dev,
235 const VkBindBufferMemoryInfo *info)
236 {
237 VK_FROM_HANDLE(nvk_device_memory, mem, info->memory);
238 VK_FROM_HANDLE(nvk_buffer, buffer, info->buffer);
239 VkResult result = VK_SUCCESS;
240
241 if (buffer->va != NULL) {
242 result = nvkmd_va_bind_mem(buffer->va, &buffer->vk.base,
243 0 /* va_offset */,
244 mem->mem, info->memoryOffset,
245 buffer->va->size_B);
246 } else {
247 buffer->addr = mem->mem->va->addr + info->memoryOffset;
248 }
249
250 return result;
251 }
252
253 VKAPI_ATTR VkResult VKAPI_CALL
nvk_BindBufferMemory2(VkDevice device,uint32_t bindInfoCount,const VkBindBufferMemoryInfo * pBindInfos)254 nvk_BindBufferMemory2(VkDevice device,
255 uint32_t bindInfoCount,
256 const VkBindBufferMemoryInfo *pBindInfos)
257 {
258 VK_FROM_HANDLE(nvk_device, dev, device);
259 VkResult first_error_or_success = VK_SUCCESS;
260
261 for (uint32_t i = 0; i < bindInfoCount; ++i) {
262 VkResult result = nvk_bind_buffer_memory(dev, &pBindInfos[i]);
263
264 const VkBindMemoryStatusKHR *status =
265 vk_find_struct_const(pBindInfos[i].pNext, BIND_MEMORY_STATUS_KHR);
266 if (status != NULL && status->pResult != NULL)
267 *status->pResult = result;
268
269 if (first_error_or_success == VK_SUCCESS)
270 first_error_or_success = result;
271 }
272
273 return first_error_or_success;
274 }
275
276 VKAPI_ATTR VkDeviceAddress VKAPI_CALL
nvk_GetBufferDeviceAddress(UNUSED VkDevice device,const VkBufferDeviceAddressInfo * pInfo)277 nvk_GetBufferDeviceAddress(UNUSED VkDevice device,
278 const VkBufferDeviceAddressInfo *pInfo)
279 {
280 VK_FROM_HANDLE(nvk_buffer, buffer, pInfo->buffer);
281
282 return nvk_buffer_address(buffer, 0);
283 }
284
285 VKAPI_ATTR uint64_t VKAPI_CALL
nvk_GetBufferOpaqueCaptureAddress(UNUSED VkDevice device,const VkBufferDeviceAddressInfo * pInfo)286 nvk_GetBufferOpaqueCaptureAddress(UNUSED VkDevice device,
287 const VkBufferDeviceAddressInfo *pInfo)
288 {
289 VK_FROM_HANDLE(nvk_buffer, buffer, pInfo->buffer);
290
291 return nvk_buffer_address(buffer, 0);
292 }
293
294 VkResult
nvk_queue_buffer_bind(struct nvk_queue * queue,const VkSparseBufferMemoryBindInfo * bind_info)295 nvk_queue_buffer_bind(struct nvk_queue *queue,
296 const VkSparseBufferMemoryBindInfo *bind_info)
297 {
298 VK_FROM_HANDLE(nvk_buffer, buffer, bind_info->buffer);
299 VkResult result;
300
301 const uint32_t bind_count = bind_info->bindCount;
302 if (bind_count == 0)
303 return VK_SUCCESS;
304
305 STACK_ARRAY(struct nvkmd_ctx_bind, binds, bind_count);
306
307 for (unsigned i = 0; i < bind_count; i++) {
308 const VkSparseMemoryBind *bind = &bind_info->pBinds[i];
309 VK_FROM_HANDLE(nvk_device_memory, mem, bind->memory);
310
311 binds[i] = (struct nvkmd_ctx_bind) {
312 .op = mem ? NVKMD_BIND_OP_BIND : NVKMD_BIND_OP_UNBIND,
313 .va = buffer->va,
314 .va_offset_B = bind->resourceOffset,
315 .mem = mem ? mem->mem : NULL,
316 .mem_offset_B = mem ? bind->memoryOffset : 0,
317 .range_B = bind->size,
318 };
319 }
320
321 result = nvkmd_ctx_bind(queue->bind_ctx, &queue->vk.base,
322 bind_count, binds);
323
324 STACK_ARRAY_FINISH(binds);
325
326 return result;
327 }
328
329 VKAPI_ATTR VkResult VKAPI_CALL
nvk_GetBufferOpaqueCaptureDescriptorDataEXT(VkDevice device,const VkBufferCaptureDescriptorDataInfoEXT * pInfo,void * pData)330 nvk_GetBufferOpaqueCaptureDescriptorDataEXT(
331 VkDevice device,
332 const VkBufferCaptureDescriptorDataInfoEXT *pInfo,
333 void *pData)
334 {
335 return VK_SUCCESS;
336 }
337