1 /*
2 * Copyright 2019 Google LLC
3 * SPDX-License-Identifier: MIT
4 *
5 * based in part on anv and radv which are:
6 * Copyright © 2015 Intel Corporation
7 * Copyright © 2016 Red Hat.
8 * Copyright © 2016 Bas Nieuwenhuizen
9 */
10
11 #include "vn_device.h"
12
13 #include "util/disk_cache.h"
14 #include "util/hex.h"
15 #include "venus-protocol/vn_protocol_driver_device.h"
16
17 #include "vn_android.h"
18 #include "vn_instance.h"
19 #include "vn_physical_device.h"
20 #include "vn_queue.h"
21
22 /* device commands */
23
24 static void
vn_queue_fini(struct vn_queue * queue)25 vn_queue_fini(struct vn_queue *queue)
26 {
27 VkDevice dev_handle = vk_device_to_handle(queue->base.base.base.device);
28
29 if (queue->wait_fence != VK_NULL_HANDLE) {
30 vn_DestroyFence(dev_handle, queue->wait_fence, NULL);
31 }
32 if (queue->sparse_semaphore != VK_NULL_HANDLE) {
33 vn_DestroySemaphore(dev_handle, queue->sparse_semaphore, NULL);
34 }
35 vn_cached_storage_fini(&queue->storage);
36 vn_queue_base_fini(&queue->base);
37 }
38
39 static VkResult
vn_queue_init(struct vn_device * dev,struct vn_queue * queue,const VkDeviceQueueCreateInfo * queue_info,uint32_t queue_index)40 vn_queue_init(struct vn_device *dev,
41 struct vn_queue *queue,
42 const VkDeviceQueueCreateInfo *queue_info,
43 uint32_t queue_index)
44 {
45 VkResult result =
46 vn_queue_base_init(&queue->base, &dev->base, queue_info, queue_index);
47 if (result != VK_SUCCESS)
48 return result;
49
50 vn_cached_storage_init(&queue->storage, &dev->base.base.alloc);
51
52 const int ring_idx = vn_instance_acquire_ring_idx(dev->instance);
53 if (ring_idx < 0) {
54 vn_log(dev->instance, "failed binding VkQueue to renderer timeline");
55 return VK_ERROR_INITIALIZATION_FAILED;
56 }
57 queue->ring_idx = (uint32_t)ring_idx;
58
59 const VkDeviceQueueTimelineInfoMESA timeline_info = {
60 .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_TIMELINE_INFO_MESA,
61 .ringIdx = queue->ring_idx,
62 };
63 const VkDeviceQueueInfo2 device_queue_info = {
64 .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
65 .pNext = &timeline_info,
66 .flags = queue_info->flags,
67 .queueFamilyIndex = queue_info->queueFamilyIndex,
68 .queueIndex = queue_index,
69 };
70
71 VkQueue queue_handle = vn_queue_to_handle(queue);
72 vn_async_vkGetDeviceQueue2(dev->primary_ring, vn_device_to_handle(dev),
73 &device_queue_info, &queue_handle);
74
75 return VK_SUCCESS;
76 }
77
78 static VkResult
vn_device_init_queues(struct vn_device * dev,const VkDeviceCreateInfo * create_info)79 vn_device_init_queues(struct vn_device *dev,
80 const VkDeviceCreateInfo *create_info)
81 {
82 const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
83
84 uint32_t count = 0;
85 for (uint32_t i = 0; i < create_info->queueCreateInfoCount; i++)
86 count += create_info->pQueueCreateInfos[i].queueCount;
87
88 struct vn_queue *queues =
89 vk_zalloc(alloc, sizeof(*queues) * count, VN_DEFAULT_ALIGN,
90 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
91 if (!queues)
92 return VK_ERROR_OUT_OF_HOST_MEMORY;
93
94 count = 0;
95 for (uint32_t i = 0; i < create_info->queueCreateInfoCount; i++) {
96 VkResult result;
97
98 const VkDeviceQueueCreateInfo *queue_info =
99 &create_info->pQueueCreateInfos[i];
100 for (uint32_t j = 0; j < queue_info->queueCount; j++) {
101 result = vn_queue_init(dev, &queues[count], queue_info, j);
102 if (result != VK_SUCCESS) {
103 for (uint32_t k = 0; k < count; k++)
104 vn_queue_fini(&queues[k]);
105 vk_free(alloc, queues);
106
107 return result;
108 }
109
110 count++;
111 }
112 }
113
114 dev->queues = queues;
115 dev->queue_count = count;
116
117 return VK_SUCCESS;
118 }
119
120 static bool
vn_device_queue_family_init(struct vn_device * dev,const VkDeviceCreateInfo * create_info)121 vn_device_queue_family_init(struct vn_device *dev,
122 const VkDeviceCreateInfo *create_info)
123 {
124 const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
125 uint32_t *queue_families = NULL;
126 uint32_t count = 0;
127
128 queue_families = vk_zalloc(
129 alloc, sizeof(*queue_families) * create_info->queueCreateInfoCount,
130 VN_DEFAULT_ALIGN, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
131 if (!queue_families)
132 return false;
133
134 for (uint32_t i = 0; i < create_info->queueCreateInfoCount; i++) {
135 const uint32_t index =
136 create_info->pQueueCreateInfos[i].queueFamilyIndex;
137 bool new_index = true;
138
139 for (uint32_t j = 0; j < count; j++) {
140 if (queue_families[j] == index) {
141 new_index = false;
142 break;
143 }
144 }
145 if (new_index)
146 queue_families[count++] = index;
147 }
148
149 dev->queue_families = queue_families;
150 dev->queue_family_count = count;
151
152 return true;
153 }
154
155 static inline void
vn_device_queue_family_fini(struct vn_device * dev)156 vn_device_queue_family_fini(struct vn_device *dev)
157 {
158 vk_free(&dev->base.base.alloc, dev->queue_families);
159 }
160
161 static VkResult
vn_device_memory_report_init(struct vn_device * dev,const VkDeviceCreateInfo * create_info)162 vn_device_memory_report_init(struct vn_device *dev,
163 const VkDeviceCreateInfo *create_info)
164 {
165 const struct vk_features *app_feats = &dev->base.base.enabled_features;
166 if (!app_feats->deviceMemoryReport)
167 return VK_SUCCESS;
168
169 uint32_t count = 0;
170 vk_foreach_struct_const(pnext, create_info->pNext) {
171 if (pnext->sType ==
172 VK_STRUCTURE_TYPE_DEVICE_DEVICE_MEMORY_REPORT_CREATE_INFO_EXT)
173 count++;
174 }
175
176 struct vn_device_memory_report *mem_reports = NULL;
177 if (count) {
178 mem_reports =
179 vk_alloc(&dev->base.base.alloc, sizeof(*mem_reports) * count,
180 VN_DEFAULT_ALIGN, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
181 if (!mem_reports)
182 return VK_ERROR_OUT_OF_HOST_MEMORY;
183 }
184
185 count = 0;
186 vk_foreach_struct_const(pnext, create_info->pNext) {
187 if (pnext->sType ==
188 VK_STRUCTURE_TYPE_DEVICE_DEVICE_MEMORY_REPORT_CREATE_INFO_EXT) {
189 const struct VkDeviceDeviceMemoryReportCreateInfoEXT *report =
190 (void *)pnext;
191 mem_reports[count].callback = report->pfnUserCallback;
192 mem_reports[count].data = report->pUserData;
193 count++;
194 }
195 }
196
197 dev->memory_report_count = count;
198 dev->memory_reports = mem_reports;
199
200 return VK_SUCCESS;
201 }
202
203 static inline void
vn_device_memory_report_fini(struct vn_device * dev)204 vn_device_memory_report_fini(struct vn_device *dev)
205 {
206 vk_free(&dev->base.base.alloc, dev->memory_reports);
207 }
208
209 static bool
find_extension_names(const char * const * exts,uint32_t ext_count,const char * name)210 find_extension_names(const char *const *exts,
211 uint32_t ext_count,
212 const char *name)
213 {
214 for (uint32_t i = 0; i < ext_count; i++) {
215 if (!strcmp(exts[i], name))
216 return true;
217 }
218 return false;
219 }
220
221 static bool
merge_extension_names(const char * const * exts,uint32_t ext_count,const char * const * extra_exts,uint32_t extra_count,const char * const * block_exts,uint32_t block_count,const VkAllocationCallbacks * alloc,const char * const ** out_exts,uint32_t * out_count)222 merge_extension_names(const char *const *exts,
223 uint32_t ext_count,
224 const char *const *extra_exts,
225 uint32_t extra_count,
226 const char *const *block_exts,
227 uint32_t block_count,
228 const VkAllocationCallbacks *alloc,
229 const char *const **out_exts,
230 uint32_t *out_count)
231 {
232 const char **merged =
233 vk_alloc(alloc, sizeof(*merged) * (ext_count + extra_count),
234 VN_DEFAULT_ALIGN, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
235 if (!merged)
236 return false;
237
238 uint32_t count = 0;
239 for (uint32_t i = 0; i < ext_count; i++) {
240 if (!find_extension_names(block_exts, block_count, exts[i]))
241 merged[count++] = exts[i];
242 }
243 for (uint32_t i = 0; i < extra_count; i++) {
244 if (!find_extension_names(exts, ext_count, extra_exts[i]))
245 merged[count++] = extra_exts[i];
246 }
247
248 *out_exts = merged;
249 *out_count = count;
250 return true;
251 }
252
253 static const VkDeviceCreateInfo *
vn_device_fix_create_info(const struct vn_device * dev,const VkDeviceCreateInfo * dev_info,const VkAllocationCallbacks * alloc,VkDeviceCreateInfo * local_info)254 vn_device_fix_create_info(const struct vn_device *dev,
255 const VkDeviceCreateInfo *dev_info,
256 const VkAllocationCallbacks *alloc,
257 VkDeviceCreateInfo *local_info)
258 {
259 const struct vn_physical_device *physical_dev = dev->physical_device;
260 const struct vk_device_extension_table *app_exts =
261 &dev->base.base.enabled_extensions;
262 /* extra_exts and block_exts must not overlap */
263 const char *extra_exts[16];
264 const char *block_exts[16];
265 uint32_t extra_count = 0;
266 uint32_t block_count = 0;
267
268 /* fix for WSI (treat AHB as WSI extension for simplicity) */
269 const bool has_wsi =
270 app_exts->KHR_swapchain || app_exts->ANDROID_native_buffer ||
271 app_exts->ANDROID_external_memory_android_hardware_buffer;
272 if (has_wsi) {
273 if (!app_exts->EXT_image_drm_format_modifier) {
274 extra_exts[extra_count++] =
275 VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_EXTENSION_NAME;
276
277 if (physical_dev->renderer_version < VK_API_VERSION_1_2 &&
278 !app_exts->KHR_image_format_list) {
279 extra_exts[extra_count++] =
280 VK_KHR_IMAGE_FORMAT_LIST_EXTENSION_NAME;
281 }
282 }
283
284 if (!app_exts->EXT_queue_family_foreign) {
285 extra_exts[extra_count++] =
286 VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME;
287 }
288
289 if (app_exts->KHR_swapchain) {
290 /* see vn_physical_device_get_native_extensions */
291 block_exts[block_count++] = VK_KHR_SWAPCHAIN_EXTENSION_NAME;
292 block_exts[block_count++] =
293 VK_KHR_SWAPCHAIN_MUTABLE_FORMAT_EXTENSION_NAME;
294 block_exts[block_count++] =
295 VK_KHR_INCREMENTAL_PRESENT_EXTENSION_NAME;
296 }
297
298 if (app_exts->ANDROID_native_buffer) {
299 /* see vn_QueueSignalReleaseImageANDROID */
300 if (!app_exts->KHR_external_fence_fd) {
301 assert(physical_dev->renderer_sync_fd.fence_exportable);
302 extra_exts[extra_count++] =
303 VK_KHR_EXTERNAL_FENCE_FD_EXTENSION_NAME;
304 }
305
306 block_exts[block_count++] = VK_ANDROID_NATIVE_BUFFER_EXTENSION_NAME;
307 }
308
309 if (app_exts->ANDROID_external_memory_android_hardware_buffer) {
310 block_exts[block_count++] =
311 VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME;
312 }
313 }
314
315 if (app_exts->KHR_external_memory_fd ||
316 app_exts->EXT_external_memory_dma_buf || has_wsi) {
317 if (physical_dev->external_memory.renderer_handle_type ==
318 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT) {
319 if (!app_exts->EXT_external_memory_dma_buf) {
320 extra_exts[extra_count++] =
321 VK_EXT_EXTERNAL_MEMORY_DMA_BUF_EXTENSION_NAME;
322 }
323 if (!app_exts->KHR_external_memory_fd) {
324 extra_exts[extra_count++] =
325 VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME;
326 }
327 }
328 }
329
330 /* see vn_queue_submission_count_batch_semaphores */
331 if (!app_exts->KHR_external_semaphore_fd && has_wsi) {
332 assert(physical_dev->renderer_sync_fd.semaphore_importable);
333 extra_exts[extra_count++] = VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME;
334 }
335
336 if (app_exts->EXT_device_memory_report) {
337 /* see vn_physical_device_get_native_extensions */
338 block_exts[block_count++] = VK_EXT_DEVICE_MEMORY_REPORT_EXTENSION_NAME;
339 }
340
341 if (app_exts->EXT_physical_device_drm) {
342 /* see vn_physical_device_get_native_extensions */
343 block_exts[block_count++] = VK_EXT_PHYSICAL_DEVICE_DRM_EXTENSION_NAME;
344 }
345
346 if (app_exts->EXT_tooling_info) {
347 /* see vn_physical_device_get_native_extensions */
348 block_exts[block_count++] = VK_EXT_TOOLING_INFO_EXTENSION_NAME;
349 }
350
351 if (app_exts->EXT_pci_bus_info) {
352 /* always filter for simplicity */
353 block_exts[block_count++] = VK_EXT_PCI_BUS_INFO_EXTENSION_NAME;
354 }
355
356 assert(extra_count <= ARRAY_SIZE(extra_exts));
357 assert(block_count <= ARRAY_SIZE(block_exts));
358
359 if (!extra_count && (!block_count || !dev_info->enabledExtensionCount))
360 return dev_info;
361
362 *local_info = *dev_info;
363 if (!merge_extension_names(dev_info->ppEnabledExtensionNames,
364 dev_info->enabledExtensionCount, extra_exts,
365 extra_count, block_exts, block_count, alloc,
366 &local_info->ppEnabledExtensionNames,
367 &local_info->enabledExtensionCount))
368 return NULL;
369
370 return local_info;
371 }
372
373 static inline VkResult
vn_device_feedback_pool_init(struct vn_device * dev)374 vn_device_feedback_pool_init(struct vn_device *dev)
375 {
376 /* The feedback pool defaults to suballocate slots of 8 bytes each. Initial
377 * pool size of 4096 corresponds to a total of 512 fences, semaphores and
378 * events, which well covers the common scenarios. Pool can grow anyway.
379 */
380 static const uint32_t pool_size = 4096;
381 const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
382
383 if (VN_PERF(NO_EVENT_FEEDBACK) && VN_PERF(NO_FENCE_FEEDBACK) &&
384 VN_PERF(NO_SEMAPHORE_FEEDBACK))
385 return VK_SUCCESS;
386
387 return vn_feedback_pool_init(dev, &dev->feedback_pool, pool_size, alloc);
388 }
389
390 static inline void
vn_device_feedback_pool_fini(struct vn_device * dev)391 vn_device_feedback_pool_fini(struct vn_device *dev)
392 {
393 if (VN_PERF(NO_EVENT_FEEDBACK) && VN_PERF(NO_FENCE_FEEDBACK) &&
394 VN_PERF(NO_SEMAPHORE_FEEDBACK))
395 return;
396
397 vn_feedback_pool_fini(&dev->feedback_pool);
398 }
399
400 static void
vn_device_update_shader_cache_id(struct vn_device * dev)401 vn_device_update_shader_cache_id(struct vn_device *dev)
402 {
403 /* venus utilizes the host side shader cache.
404 * This is a WA to generate shader cache files containing headers
405 * with a unique cache id that will change based on host driver
406 * identifiers. This allows fossilize replay to detect if the host
407 * side shader cach is no longer up to date.
408 * The shader cache is destroyed after creating the necessary files
409 * and not utilized by venus.
410 */
411 #if !DETECT_OS_ANDROID && defined(ENABLE_SHADER_CACHE)
412 const uint8_t *device_uuid =
413 dev->physical_device->base.base.properties.pipelineCacheUUID;
414
415 char uuid[VK_UUID_SIZE * 2 + 1];
416 mesa_bytes_to_hex(uuid, device_uuid, VK_UUID_SIZE);
417
418 struct disk_cache *cache = disk_cache_create("venus", uuid, 0);
419 if (!cache)
420 return;
421
422 /* The entry header is what contains the cache id / timestamp so we
423 * need to create a fake entry.
424 */
425 uint8_t key[20];
426 char data[] = "Fake Shader";
427
428 disk_cache_compute_key(cache, data, sizeof(data), key);
429 disk_cache_put(cache, key, data, sizeof(data), NULL);
430
431 disk_cache_destroy(cache);
432 #endif
433 }
434
435 static VkResult
vn_device_init(struct vn_device * dev,struct vn_physical_device * physical_dev,const VkDeviceCreateInfo * create_info,const VkAllocationCallbacks * alloc)436 vn_device_init(struct vn_device *dev,
437 struct vn_physical_device *physical_dev,
438 const VkDeviceCreateInfo *create_info,
439 const VkAllocationCallbacks *alloc)
440 {
441 struct vn_instance *instance = physical_dev->instance;
442 VkPhysicalDevice physical_dev_handle =
443 vn_physical_device_to_handle(physical_dev);
444 VkDevice dev_handle = vn_device_to_handle(dev);
445 VkDeviceCreateInfo local_create_info;
446 VkResult result;
447
448 dev->instance = instance;
449 dev->physical_device = physical_dev;
450 dev->device_mask = 1;
451 dev->renderer = instance->renderer;
452 dev->primary_ring = instance->ring.ring;
453
454 create_info =
455 vn_device_fix_create_info(dev, create_info, alloc, &local_create_info);
456 if (!create_info)
457 return VK_ERROR_OUT_OF_HOST_MEMORY;
458
459 const VkDeviceGroupDeviceCreateInfo *group = vk_find_struct_const(
460 create_info->pNext, DEVICE_GROUP_DEVICE_CREATE_INFO);
461 if (group && group->physicalDeviceCount)
462 dev->device_mask = (1 << group->physicalDeviceCount) - 1;
463
464 result = vn_call_vkCreateDevice(dev->primary_ring, physical_dev_handle,
465 create_info, NULL, &dev_handle);
466
467 /* free the fixed extensions here since no longer needed below */
468 if (create_info == &local_create_info)
469 vk_free(alloc, (void *)create_info->ppEnabledExtensionNames);
470
471 if (result != VK_SUCCESS)
472 return result;
473
474 result = vn_device_memory_report_init(dev, create_info);
475 if (result != VK_SUCCESS)
476 goto out_destroy_device;
477
478 if (!vn_device_queue_family_init(dev, create_info)) {
479 result = VK_ERROR_OUT_OF_HOST_MEMORY;
480 goto out_memory_report_fini;
481 }
482
483 result = vn_device_feedback_pool_init(dev);
484 if (result != VK_SUCCESS)
485 goto out_queue_family_fini;
486
487 result = vn_feedback_cmd_pools_init(dev);
488 if (result != VK_SUCCESS)
489 goto out_feedback_pool_fini;
490
491 result = vn_device_init_queues(dev, create_info);
492 if (result != VK_SUCCESS)
493 goto out_feedback_cmd_pools_fini;
494
495 vn_buffer_reqs_cache_init(dev);
496 vn_image_reqs_cache_init(dev);
497
498 /* This is a WA to allow fossilize replay to detect if the host side shader
499 * cache is no longer up to date.
500 */
501 vn_device_update_shader_cache_id(dev);
502
503 return VK_SUCCESS;
504
505 out_feedback_cmd_pools_fini:
506 vn_feedback_cmd_pools_fini(dev);
507
508 out_feedback_pool_fini:
509 vn_device_feedback_pool_fini(dev);
510
511 out_queue_family_fini:
512 vn_device_queue_family_fini(dev);
513
514 out_memory_report_fini:
515 vn_device_memory_report_fini(dev);
516
517 out_destroy_device:
518 vn_call_vkDestroyDevice(dev->primary_ring, dev_handle, NULL);
519
520 return result;
521 }
522
523 VkResult
vn_CreateDevice(VkPhysicalDevice physicalDevice,const VkDeviceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDevice * pDevice)524 vn_CreateDevice(VkPhysicalDevice physicalDevice,
525 const VkDeviceCreateInfo *pCreateInfo,
526 const VkAllocationCallbacks *pAllocator,
527 VkDevice *pDevice)
528 {
529 VN_TRACE_FUNC();
530 struct vn_physical_device *physical_dev =
531 vn_physical_device_from_handle(physicalDevice);
532 struct vn_instance *instance = physical_dev->instance;
533 const VkAllocationCallbacks *alloc =
534 pAllocator ? pAllocator : &instance->base.base.alloc;
535 struct vn_device *dev;
536 VkResult result;
537
538 dev = vk_zalloc(alloc, sizeof(*dev), VN_DEFAULT_ALIGN,
539 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
540 if (!dev)
541 return vn_error(instance, VK_ERROR_OUT_OF_HOST_MEMORY);
542
543 struct vk_device_dispatch_table dispatch_table;
544 vk_device_dispatch_table_from_entrypoints(&dispatch_table,
545 &vn_device_entrypoints, true);
546 vk_device_dispatch_table_from_entrypoints(&dispatch_table,
547 &wsi_device_entrypoints, false);
548 result = vn_device_base_init(&dev->base, &physical_dev->base,
549 &dispatch_table, pCreateInfo, alloc);
550 if (result != VK_SUCCESS) {
551 vk_free(alloc, dev);
552 return vn_error(instance, result);
553 }
554
555 result = vn_device_init(dev, physical_dev, pCreateInfo, alloc);
556 if (result != VK_SUCCESS) {
557 vn_device_base_fini(&dev->base);
558 vk_free(alloc, dev);
559 return vn_error(instance, result);
560 }
561
562 if (VN_DEBUG(LOG_CTX_INFO)) {
563 vn_log(instance, "%s", physical_dev->base.base.properties.deviceName);
564 vn_log(instance, "%s", physical_dev->base.base.properties.driverInfo);
565 }
566
567 vn_tls_set_async_pipeline_create();
568
569 *pDevice = vn_device_to_handle(dev);
570
571 return VK_SUCCESS;
572 }
573
574 void
vn_DestroyDevice(VkDevice device,const VkAllocationCallbacks * pAllocator)575 vn_DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator)
576 {
577 VN_TRACE_FUNC();
578 struct vn_device *dev = vn_device_from_handle(device);
579 const VkAllocationCallbacks *alloc =
580 pAllocator ? pAllocator : &dev->base.base.alloc;
581
582 if (!dev)
583 return;
584
585 vn_image_reqs_cache_fini(dev);
586 vn_buffer_reqs_cache_fini(dev);
587
588 for (uint32_t i = 0; i < dev->queue_count; i++)
589 vn_queue_fini(&dev->queues[i]);
590
591 vn_feedback_cmd_pools_fini(dev);
592
593 vn_device_feedback_pool_fini(dev);
594
595 vn_device_queue_family_fini(dev);
596
597 vn_device_memory_report_fini(dev);
598
599 vn_async_vkDestroyDevice(dev->primary_ring, device, NULL);
600
601 /* We must emit vn_call_vkDestroyDevice before releasing bound ring_idx.
602 * Otherwise, another thread might reuse their ring_idx while they
603 * are still bound to the queues in the renderer.
604 */
605 for (uint32_t i = 0; i < dev->queue_count; i++) {
606 vn_instance_release_ring_idx(dev->instance, dev->queues[i].ring_idx);
607 }
608
609 vk_free(alloc, dev->queues);
610
611 vn_device_base_fini(&dev->base);
612 vk_free(alloc, dev);
613 }
614
615 PFN_vkVoidFunction
vn_GetDeviceProcAddr(VkDevice device,const char * pName)616 vn_GetDeviceProcAddr(VkDevice device, const char *pName)
617 {
618 struct vn_device *dev = vn_device_from_handle(device);
619 return vk_device_get_proc_addr(&dev->base.base, pName);
620 }
621
622 void
vn_GetDeviceGroupPeerMemoryFeatures(VkDevice device,uint32_t heapIndex,uint32_t localDeviceIndex,uint32_t remoteDeviceIndex,VkPeerMemoryFeatureFlags * pPeerMemoryFeatures)623 vn_GetDeviceGroupPeerMemoryFeatures(
624 VkDevice device,
625 uint32_t heapIndex,
626 uint32_t localDeviceIndex,
627 uint32_t remoteDeviceIndex,
628 VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
629 {
630 struct vn_device *dev = vn_device_from_handle(device);
631
632 /* TODO get and cache the values in vkCreateDevice */
633 vn_call_vkGetDeviceGroupPeerMemoryFeatures(
634 dev->primary_ring, device, heapIndex, localDeviceIndex,
635 remoteDeviceIndex, pPeerMemoryFeatures);
636 }
637
638 VkResult
vn_GetCalibratedTimestampsEXT(VkDevice device,uint32_t timestampCount,const VkCalibratedTimestampInfoEXT * pTimestampInfos,uint64_t * pTimestamps,uint64_t * pMaxDeviation)639 vn_GetCalibratedTimestampsEXT(
640 VkDevice device,
641 uint32_t timestampCount,
642 const VkCalibratedTimestampInfoEXT *pTimestampInfos,
643 uint64_t *pTimestamps,
644 uint64_t *pMaxDeviation)
645 {
646 struct vn_device *dev = vn_device_from_handle(device);
647 uint64_t begin, end, max_clock_period = 0;
648 VkResult ret;
649 int domain;
650
651 #ifdef CLOCK_MONOTONIC_RAW
652 begin = vk_clock_gettime(CLOCK_MONOTONIC_RAW);
653 #else
654 begin = vk_clock_gettime(CLOCK_MONOTONIC);
655 #endif
656
657 for (domain = 0; domain < timestampCount; domain++) {
658 switch (pTimestampInfos[domain].timeDomain) {
659 case VK_TIME_DOMAIN_DEVICE_EXT: {
660 uint64_t device_max_deviation = 0;
661
662 ret = vn_call_vkGetCalibratedTimestampsEXT(
663 dev->primary_ring, device, 1, &pTimestampInfos[domain],
664 &pTimestamps[domain], &device_max_deviation);
665
666 if (ret != VK_SUCCESS)
667 return vn_error(dev->instance, ret);
668
669 max_clock_period = MAX2(max_clock_period, device_max_deviation);
670 break;
671 }
672 case VK_TIME_DOMAIN_CLOCK_MONOTONIC_EXT:
673 pTimestamps[domain] = vk_clock_gettime(CLOCK_MONOTONIC);
674 max_clock_period = MAX2(max_clock_period, 1);
675 break;
676 #ifdef CLOCK_MONOTONIC_RAW
677 case VK_TIME_DOMAIN_CLOCK_MONOTONIC_RAW_EXT:
678 pTimestamps[domain] = begin;
679 break;
680 #endif
681 default:
682 pTimestamps[domain] = 0;
683 break;
684 }
685 }
686
687 #ifdef CLOCK_MONOTONIC_RAW
688 end = vk_clock_gettime(CLOCK_MONOTONIC_RAW);
689 #else
690 end = vk_clock_gettime(CLOCK_MONOTONIC);
691 #endif
692
693 *pMaxDeviation = vk_time_max_deviation(begin, end, max_clock_period);
694
695 return VK_SUCCESS;
696 }
697