1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "wsi_common_private.h"
25 #include "wsi_common_drm.h"
26 #include "util/macros.h"
27 #include "util/os_file.h"
28 #include "util/log.h"
29 #include "util/xmlconfig.h"
30 #include "vk_device.h"
31 #include "vk_physical_device.h"
32 #include "vk_log.h"
33 #include "vk_util.h"
34 #include "drm-uapi/drm_fourcc.h"
35 #include "drm-uapi/dma-buf.h"
36 #include "util/libsync.h"
37
38 #include <errno.h>
39 #include <time.h>
40 #include <unistd.h>
41 #include <stdlib.h>
42 #include <stdio.h>
43 #include <xf86drm.h>
44
45 static VkResult
wsi_dma_buf_export_sync_file(int dma_buf_fd,int * sync_file_fd)46 wsi_dma_buf_export_sync_file(int dma_buf_fd, int *sync_file_fd)
47 {
48 /* Don't keep trying an IOCTL that doesn't exist. */
49 static bool no_dma_buf_sync_file = false;
50 if (no_dma_buf_sync_file)
51 return VK_ERROR_FEATURE_NOT_PRESENT;
52
53 struct dma_buf_export_sync_file export = {
54 .flags = DMA_BUF_SYNC_RW,
55 .fd = -1,
56 };
57 int ret = drmIoctl(dma_buf_fd, DMA_BUF_IOCTL_EXPORT_SYNC_FILE, &export);
58 if (ret) {
59 if (errno == ENOTTY || errno == EBADF || errno == ENOSYS) {
60 no_dma_buf_sync_file = true;
61 return VK_ERROR_FEATURE_NOT_PRESENT;
62 } else {
63 mesa_loge("MESA: failed to export sync file '%s'", strerror(errno));
64 return VK_ERROR_OUT_OF_HOST_MEMORY;
65 }
66 }
67
68 *sync_file_fd = export.fd;
69
70 return VK_SUCCESS;
71 }
72
73 static VkResult
wsi_dma_buf_import_sync_file(int dma_buf_fd,int sync_file_fd)74 wsi_dma_buf_import_sync_file(int dma_buf_fd, int sync_file_fd)
75 {
76 /* Don't keep trying an IOCTL that doesn't exist. */
77 static bool no_dma_buf_sync_file = false;
78 if (no_dma_buf_sync_file)
79 return VK_ERROR_FEATURE_NOT_PRESENT;
80
81 struct dma_buf_import_sync_file import = {
82 .flags = DMA_BUF_SYNC_RW,
83 .fd = sync_file_fd,
84 };
85 int ret = drmIoctl(dma_buf_fd, DMA_BUF_IOCTL_IMPORT_SYNC_FILE, &import);
86 if (ret) {
87 if (errno == ENOTTY || errno == EBADF || errno == ENOSYS) {
88 no_dma_buf_sync_file = true;
89 return VK_ERROR_FEATURE_NOT_PRESENT;
90 } else {
91 mesa_loge("MESA: failed to import sync file '%s'", strerror(errno));
92 return VK_ERROR_OUT_OF_HOST_MEMORY;
93 }
94 }
95
96 return VK_SUCCESS;
97 }
98
99 static VkResult
prepare_signal_dma_buf_from_semaphore(struct wsi_swapchain * chain,const struct wsi_image * image)100 prepare_signal_dma_buf_from_semaphore(struct wsi_swapchain *chain,
101 const struct wsi_image *image)
102 {
103 VkResult result;
104
105 if (!(chain->wsi->semaphore_export_handle_types &
106 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT))
107 return VK_ERROR_FEATURE_NOT_PRESENT;
108
109 int sync_file_fd = -1;
110 result = wsi_dma_buf_export_sync_file(image->dma_buf_fd, &sync_file_fd);
111 if (result != VK_SUCCESS)
112 return result;
113
114 result = wsi_dma_buf_import_sync_file(image->dma_buf_fd, sync_file_fd);
115 close(sync_file_fd);
116 if (result != VK_SUCCESS)
117 return result;
118
119 /* If we got here, all our checks pass. Create the actual semaphore */
120 const VkExportSemaphoreCreateInfo export_info = {
121 .sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO,
122 .handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT,
123 };
124 const VkSemaphoreCreateInfo semaphore_info = {
125 .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,
126 .pNext = &export_info,
127 };
128 result = chain->wsi->CreateSemaphore(chain->device, &semaphore_info,
129 &chain->alloc,
130 &chain->dma_buf_semaphore);
131 if (result != VK_SUCCESS)
132 return result;
133
134 return VK_SUCCESS;
135 }
136
137 VkResult
wsi_prepare_signal_dma_buf_from_semaphore(struct wsi_swapchain * chain,const struct wsi_image * image)138 wsi_prepare_signal_dma_buf_from_semaphore(struct wsi_swapchain *chain,
139 const struct wsi_image *image)
140 {
141 VkResult result;
142
143 /* We cache result - 1 in the swapchain */
144 if (unlikely(chain->signal_dma_buf_from_semaphore == 0)) {
145 result = prepare_signal_dma_buf_from_semaphore(chain, image);
146 assert(result <= 0);
147 chain->signal_dma_buf_from_semaphore = (int)result - 1;
148 } else {
149 result = (VkResult)(chain->signal_dma_buf_from_semaphore + 1);
150 }
151
152 return result;
153 }
154
155 VkResult
wsi_signal_dma_buf_from_semaphore(const struct wsi_swapchain * chain,const struct wsi_image * image)156 wsi_signal_dma_buf_from_semaphore(const struct wsi_swapchain *chain,
157 const struct wsi_image *image)
158 {
159 VkResult result;
160
161 const VkSemaphoreGetFdInfoKHR get_fd_info = {
162 .sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR,
163 .semaphore = chain->dma_buf_semaphore,
164 .handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT,
165 };
166 int sync_file_fd = -1;
167 result = chain->wsi->GetSemaphoreFdKHR(chain->device, &get_fd_info,
168 &sync_file_fd);
169 if (result != VK_SUCCESS)
170 return result;
171
172 result = wsi_dma_buf_import_sync_file(image->dma_buf_fd, sync_file_fd);
173 close(sync_file_fd);
174 return result;
175 }
176
177 static const struct vk_sync_type *
get_sync_file_sync_type(struct vk_device * device,enum vk_sync_features req_features)178 get_sync_file_sync_type(struct vk_device *device,
179 enum vk_sync_features req_features)
180 {
181 for (const struct vk_sync_type *const *t =
182 device->physical->supported_sync_types; *t; t++) {
183 if (req_features & ~(*t)->features)
184 continue;
185
186 if ((*t)->import_sync_file != NULL)
187 return *t;
188 }
189
190 return NULL;
191 }
192
193 VkResult
wsi_create_sync_for_dma_buf_wait(const struct wsi_swapchain * chain,const struct wsi_image * image,enum vk_sync_features req_features,struct vk_sync ** sync_out)194 wsi_create_sync_for_dma_buf_wait(const struct wsi_swapchain *chain,
195 const struct wsi_image *image,
196 enum vk_sync_features req_features,
197 struct vk_sync **sync_out)
198 {
199 VK_FROM_HANDLE(vk_device, device, chain->device);
200 VkResult result;
201
202 const struct vk_sync_type *sync_type =
203 get_sync_file_sync_type(device, req_features);
204 if (sync_type == NULL)
205 return VK_ERROR_FEATURE_NOT_PRESENT;
206
207 int sync_file_fd = -1;
208 result = wsi_dma_buf_export_sync_file(image->dma_buf_fd, &sync_file_fd);
209 if (result != VK_SUCCESS)
210 return result;
211
212 struct vk_sync *sync = NULL;
213 result = vk_sync_create(device, sync_type, VK_SYNC_IS_SHAREABLE, 0, &sync);
214 if (result != VK_SUCCESS)
215 goto fail_close_sync_file;
216
217 result = vk_sync_import_sync_file(device, sync, sync_file_fd);
218 if (result != VK_SUCCESS)
219 goto fail_destroy_sync;
220
221 close(sync_file_fd);
222 *sync_out = sync;
223
224 return VK_SUCCESS;
225
226 fail_destroy_sync:
227 vk_sync_destroy(device, sync);
228 fail_close_sync_file:
229 close(sync_file_fd);
230
231 return result;
232 }
233
234 VkResult
wsi_create_image_explicit_sync_drm(const struct wsi_swapchain * chain,struct wsi_image * image)235 wsi_create_image_explicit_sync_drm(const struct wsi_swapchain *chain,
236 struct wsi_image *image)
237 {
238 /* Cleanup of any failures is handled by the caller in wsi_create_image
239 * calling wsi_destroy_image -> wsi_destroy_image_explicit_sync_drm. */
240
241 VK_FROM_HANDLE(vk_device, device, chain->device);
242 const struct wsi_device *wsi = chain->wsi;
243 VkResult result = VK_SUCCESS;
244 int ret = 0;
245
246 const VkExportSemaphoreCreateInfo semaphore_export_info = {
247 .sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO,
248 /* This is a syncobj fd for any drivers using syncobj. */
249 .handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT,
250 };
251
252 const VkSemaphoreTypeCreateInfo semaphore_type_info = {
253 .sType = VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO,
254 .pNext = &semaphore_export_info,
255 .semaphoreType = VK_SEMAPHORE_TYPE_TIMELINE,
256 };
257
258 const VkSemaphoreCreateInfo semaphore_info = {
259 .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,
260 .pNext = &semaphore_type_info,
261 };
262
263 for (uint32_t i = 0; i < WSI_ES_COUNT; i++) {
264 result = wsi->CreateSemaphore(chain->device,
265 &semaphore_info,
266 &chain->alloc,
267 &image->explicit_sync[i].semaphore);
268 if (result != VK_SUCCESS)
269 return result;
270
271 const VkSemaphoreGetFdInfoKHR semaphore_get_info = {
272 .sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR,
273 .semaphore = image->explicit_sync[i].semaphore,
274 .handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT,
275 };
276
277 result = wsi->GetSemaphoreFdKHR(chain->device, &semaphore_get_info, &image->explicit_sync[i].fd);
278 if (result != VK_SUCCESS)
279 return result;
280 }
281
282 for (uint32_t i = 0; i < WSI_ES_COUNT; i++) {
283 ret = drmSyncobjFDToHandle(device->drm_fd, image->explicit_sync[i].fd, &image->explicit_sync[i].handle);
284 if (ret != 0)
285 return VK_ERROR_FEATURE_NOT_PRESENT;
286 }
287
288 return VK_SUCCESS;
289 }
290
291 void
wsi_destroy_image_explicit_sync_drm(const struct wsi_swapchain * chain,struct wsi_image * image)292 wsi_destroy_image_explicit_sync_drm(const struct wsi_swapchain *chain,
293 struct wsi_image *image)
294 {
295 VK_FROM_HANDLE(vk_device, device, chain->device);
296 const struct wsi_device *wsi = chain->wsi;
297
298 for (uint32_t i = 0; i < WSI_ES_COUNT; i++) {
299 if (image->explicit_sync[i].handle != 0) {
300 drmSyncobjDestroy(device->drm_fd, image->explicit_sync[i].handle);
301 image->explicit_sync[i].handle = 0;
302 }
303
304 if (image->explicit_sync[i].fd >= 0) {
305 close(image->explicit_sync[i].fd);
306 image->explicit_sync[i].fd = -1;
307 }
308
309 if (image->explicit_sync[i].semaphore != VK_NULL_HANDLE) {
310 wsi->DestroySemaphore(chain->device, image->explicit_sync[i].semaphore, &chain->alloc);
311 image->explicit_sync[i].semaphore = VK_NULL_HANDLE;
312 }
313 }
314 }
315
316 static VkResult
wsi_create_sync_imm(struct vk_device * device,struct vk_sync ** sync_out)317 wsi_create_sync_imm(struct vk_device *device, struct vk_sync **sync_out)
318 {
319 const struct vk_sync_type *sync_type =
320 get_sync_file_sync_type(device, VK_SYNC_FEATURE_CPU_WAIT);
321 struct vk_sync *sync = NULL;
322 VkResult result;
323
324 result = vk_sync_create(device, sync_type, VK_SYNC_IS_SHAREABLE, 0, &sync);
325 if (result != VK_SUCCESS)
326 goto error;
327
328 result = vk_sync_signal(device, sync, 0);
329 if (result != VK_SUCCESS)
330 goto error;
331
332 *sync_out = sync;
333 goto done;
334
335 error:
336 vk_sync_destroy(device, sync);
337 done:
338 return result;
339 }
340
341 VkResult
wsi_create_sync_for_image_syncobj(const struct wsi_swapchain * chain,const struct wsi_image * image,enum vk_sync_features req_features,struct vk_sync ** sync_out)342 wsi_create_sync_for_image_syncobj(const struct wsi_swapchain *chain,
343 const struct wsi_image *image,
344 enum vk_sync_features req_features,
345 struct vk_sync **sync_out)
346 {
347 VK_FROM_HANDLE(vk_device, device, chain->device);
348 const struct vk_sync_type *sync_type =
349 get_sync_file_sync_type(device, VK_SYNC_FEATURE_CPU_WAIT);
350 VkResult result = VK_SUCCESS;
351 struct vk_sync *sync = NULL;
352 int sync_file_fds[WSI_ES_COUNT] = { -1, -1 };
353 uint32_t tmp_handles[WSI_ES_COUNT] = { 0, 0 };
354 int merged_sync_fd = -1;
355 if (sync_type == NULL)
356 return VK_ERROR_FEATURE_NOT_PRESENT;
357
358 if (image->explicit_sync[WSI_ES_RELEASE].timeline == 0) {
359 /* Signal immediately, there is no release to forward. */
360 return wsi_create_sync_imm(device, sync_out);
361 }
362
363 /* Transfer over to a new sync file with a
364 * surrogate handle.
365 */
366 for (uint32_t i = 0; i < WSI_ES_COUNT; i++) {
367 if (drmSyncobjCreate(device->drm_fd, 0, &tmp_handles[i])) {
368 result = vk_errorf(NULL, VK_ERROR_OUT_OF_DEVICE_MEMORY, "Failed to create temp syncobj. Errno: %d - %s", errno, strerror(errno));
369 goto fail;
370 }
371
372 if (drmSyncobjTransfer(device->drm_fd, tmp_handles[i], 0,
373 image->explicit_sync[i].handle, image->explicit_sync[i].timeline, 0)) {
374 result = vk_errorf(NULL, VK_ERROR_OUT_OF_DEVICE_MEMORY, "Failed to transfer syncobj. Was the timeline point materialized? Errno: %d - %s", errno, strerror(errno));
375 goto fail;
376 }
377 if (drmSyncobjExportSyncFile(device->drm_fd, tmp_handles[i], &sync_file_fds[i])) {
378 result = vk_errorf(NULL, VK_ERROR_OUT_OF_DEVICE_MEMORY, "Failed to export sync file. Errno: %d - %s", errno, strerror(errno));
379 goto fail;
380 }
381 }
382
383 merged_sync_fd = sync_merge("acquire merged sync", sync_file_fds[WSI_ES_ACQUIRE], sync_file_fds[WSI_ES_RELEASE]);
384 if (merged_sync_fd < 0) {
385 result = vk_errorf(NULL, VK_ERROR_OUT_OF_DEVICE_MEMORY, "Failed to merge acquire + release sync timelines. Errno: %d - %s", errno, strerror(errno));
386 goto fail;
387 }
388
389 result = vk_sync_create(device, sync_type, VK_SYNC_IS_SHAREABLE, 0, &sync);
390 if (result != VK_SUCCESS)
391 goto fail;
392
393 result = vk_sync_import_sync_file(device, sync, merged_sync_fd);
394 if (result != VK_SUCCESS)
395 goto fail;
396
397 *sync_out = sync;
398 goto done;
399
400 fail:
401 if (sync)
402 vk_sync_destroy(device, sync);
403 done:
404 for (uint32_t i = 0; i < WSI_ES_COUNT; i++) {
405 if (tmp_handles[i])
406 drmSyncobjDestroy(device->drm_fd, tmp_handles[i]);
407 }
408 for (uint32_t i = 0; i < WSI_ES_COUNT; i++) {
409 if (sync_file_fds[i] >= 0)
410 close(sync_file_fds[i]);
411 }
412 if (merged_sync_fd >= 0)
413 close(merged_sync_fd);
414 return result;
415 }
416
417
418 bool
wsi_common_drm_devices_equal(int fd_a,int fd_b)419 wsi_common_drm_devices_equal(int fd_a, int fd_b)
420 {
421 drmDevicePtr device_a, device_b;
422 int ret;
423
424 ret = drmGetDevice2(fd_a, 0, &device_a);
425 if (ret)
426 return false;
427
428 ret = drmGetDevice2(fd_b, 0, &device_b);
429 if (ret) {
430 drmFreeDevice(&device_a);
431 return false;
432 }
433
434 bool result = drmDevicesEqual(device_a, device_b);
435
436 drmFreeDevice(&device_a);
437 drmFreeDevice(&device_b);
438
439 return result;
440 }
441
442 bool
wsi_device_matches_drm_fd(VkPhysicalDevice physicalDevice,int drm_fd)443 wsi_device_matches_drm_fd(VkPhysicalDevice physicalDevice, int drm_fd)
444 {
445 VK_FROM_HANDLE(vk_physical_device, pdevice, physicalDevice);
446 const struct wsi_device *wsi = pdevice->wsi_device;
447
448 drmDevicePtr fd_device;
449 int ret = drmGetDevice2(drm_fd, 0, &fd_device);
450 if (ret)
451 return false;
452
453 bool match = false;
454 switch (fd_device->bustype) {
455 case DRM_BUS_PCI:
456 match = wsi->pci_bus_info.pciDomain == fd_device->businfo.pci->domain &&
457 wsi->pci_bus_info.pciBus == fd_device->businfo.pci->bus &&
458 wsi->pci_bus_info.pciDevice == fd_device->businfo.pci->dev &&
459 wsi->pci_bus_info.pciFunction == fd_device->businfo.pci->func;
460 break;
461
462 default:
463 break;
464 }
465
466 drmFreeDevice(&fd_device);
467
468 return match;
469 }
470
471 static uint32_t
prime_select_buffer_memory_type(const struct wsi_device * wsi,uint32_t type_bits)472 prime_select_buffer_memory_type(const struct wsi_device *wsi,
473 uint32_t type_bits)
474 {
475 return wsi_select_memory_type(wsi, 0 /* req_props */,
476 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
477 type_bits);
478 }
479
480 static const struct VkDrmFormatModifierPropertiesEXT *
get_modifier_props(const struct wsi_image_info * info,uint64_t modifier)481 get_modifier_props(const struct wsi_image_info *info, uint64_t modifier)
482 {
483 for (uint32_t i = 0; i < info->modifier_prop_count; i++) {
484 if (info->modifier_props[i].drmFormatModifier == modifier)
485 return &info->modifier_props[i];
486 }
487 return NULL;
488 }
489
490 static VkResult
491 wsi_create_native_image_mem(const struct wsi_swapchain *chain,
492 const struct wsi_image_info *info,
493 struct wsi_image *image);
494
495 static VkResult
wsi_configure_native_image(const struct wsi_swapchain * chain,const VkSwapchainCreateInfoKHR * pCreateInfo,const struct wsi_drm_image_params * params,struct wsi_image_info * info)496 wsi_configure_native_image(const struct wsi_swapchain *chain,
497 const VkSwapchainCreateInfoKHR *pCreateInfo,
498 const struct wsi_drm_image_params *params,
499 struct wsi_image_info *info)
500 {
501 const struct wsi_device *wsi = chain->wsi;
502
503 VkExternalMemoryHandleTypeFlags handle_type =
504 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT;
505
506 VkResult result = wsi_configure_image(chain, pCreateInfo, handle_type, info);
507 if (result != VK_SUCCESS)
508 return result;
509
510 info->explicit_sync = params->explicit_sync;
511
512 if (params->num_modifier_lists == 0) {
513 /* If we don't have modifiers, fall back to the legacy "scanout" flag */
514 info->wsi.scanout = true;
515 } else {
516 /* The winsys can't request modifiers if we don't support them. */
517 assert(wsi->supports_modifiers);
518 struct VkDrmFormatModifierPropertiesListEXT modifier_props_list = {
519 .sType = VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT,
520 };
521 VkFormatProperties2 format_props = {
522 .sType = VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2,
523 .pNext = &modifier_props_list,
524 };
525 wsi->GetPhysicalDeviceFormatProperties2(wsi->pdevice,
526 pCreateInfo->imageFormat,
527 &format_props);
528 assert(modifier_props_list.drmFormatModifierCount > 0);
529 info->modifier_props =
530 vk_alloc(&chain->alloc,
531 sizeof(*info->modifier_props) *
532 modifier_props_list.drmFormatModifierCount,
533 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
534 if (info->modifier_props == NULL)
535 goto fail_oom;
536
537 modifier_props_list.pDrmFormatModifierProperties = info->modifier_props;
538 wsi->GetPhysicalDeviceFormatProperties2(wsi->pdevice,
539 pCreateInfo->imageFormat,
540 &format_props);
541
542 /* Call GetImageFormatProperties with every modifier and filter the list
543 * down to those that we know work.
544 */
545 info->modifier_prop_count = 0;
546 for (uint32_t i = 0; i < modifier_props_list.drmFormatModifierCount; i++) {
547 VkPhysicalDeviceImageDrmFormatModifierInfoEXT mod_info = {
548 .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT,
549 .drmFormatModifier = info->modifier_props[i].drmFormatModifier,
550 .sharingMode = pCreateInfo->imageSharingMode,
551 .queueFamilyIndexCount = pCreateInfo->queueFamilyIndexCount,
552 .pQueueFamilyIndices = pCreateInfo->pQueueFamilyIndices,
553 };
554 VkPhysicalDeviceImageFormatInfo2 format_info = {
555 .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2,
556 .format = pCreateInfo->imageFormat,
557 .type = VK_IMAGE_TYPE_2D,
558 .tiling = VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT,
559 .usage = pCreateInfo->imageUsage,
560 .flags = info->create.flags,
561 };
562
563 VkImageFormatListCreateInfo format_list;
564 if (info->create.flags & VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT) {
565 format_list = info->format_list;
566 format_list.pNext = NULL;
567 __vk_append_struct(&format_info, &format_list);
568 }
569
570 struct wsi_image_create_info wsi_info = (struct wsi_image_create_info) {
571 .sType = VK_STRUCTURE_TYPE_WSI_IMAGE_CREATE_INFO_MESA,
572 .pNext = NULL,
573 };
574 __vk_append_struct(&format_info, &wsi_info);
575
576 VkImageFormatProperties2 format_props = {
577 .sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2,
578 .pNext = NULL,
579 };
580 __vk_append_struct(&format_info, &mod_info);
581 result = wsi->GetPhysicalDeviceImageFormatProperties2(wsi->pdevice,
582 &format_info,
583 &format_props);
584 if (result == VK_SUCCESS &&
585 pCreateInfo->imageExtent.width <= format_props.imageFormatProperties.maxExtent.width &&
586 pCreateInfo->imageExtent.height <= format_props.imageFormatProperties.maxExtent.height)
587 info->modifier_props[info->modifier_prop_count++] = info->modifier_props[i];
588 }
589
590 uint32_t max_modifier_count = 0;
591 for (uint32_t l = 0; l < params->num_modifier_lists; l++)
592 max_modifier_count = MAX2(max_modifier_count, params->num_modifiers[l]);
593
594 uint64_t *image_modifiers =
595 vk_alloc(&chain->alloc, sizeof(*image_modifiers) * max_modifier_count,
596 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
597 if (!image_modifiers)
598 goto fail_oom;
599
600 uint32_t image_modifier_count = 0;
601 for (uint32_t l = 0; l < params->num_modifier_lists; l++) {
602 /* Walk the modifier lists and construct a list of supported
603 * modifiers.
604 */
605 for (uint32_t i = 0; i < params->num_modifiers[l]; i++) {
606 if (get_modifier_props(info, params->modifiers[l][i]))
607 image_modifiers[image_modifier_count++] = params->modifiers[l][i];
608 }
609
610 /* We only want to take the modifiers from the first list */
611 if (image_modifier_count > 0)
612 break;
613 }
614
615 if (image_modifier_count > 0) {
616 info->create.tiling = VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT;
617 info->drm_mod_list = (VkImageDrmFormatModifierListCreateInfoEXT) {
618 .sType = VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT,
619 .drmFormatModifierCount = image_modifier_count,
620 .pDrmFormatModifiers = image_modifiers,
621 };
622 image_modifiers = NULL;
623 __vk_append_struct(&info->create, &info->drm_mod_list);
624 } else {
625 vk_free(&chain->alloc, image_modifiers);
626 /* TODO: Add a proper error here */
627 assert(!"Failed to find a supported modifier! This should never "
628 "happen because LINEAR should always be available");
629 goto fail_oom;
630 }
631 }
632
633 info->create_mem = wsi_create_native_image_mem;
634
635 return VK_SUCCESS;
636
637 fail_oom:
638 wsi_destroy_image_info(chain, info);
639 return VK_ERROR_OUT_OF_HOST_MEMORY;
640 }
641
642 static VkResult
wsi_init_image_dmabuf_fd(const struct wsi_swapchain * chain,struct wsi_image * image,bool linear)643 wsi_init_image_dmabuf_fd(const struct wsi_swapchain *chain,
644 struct wsi_image *image,
645 bool linear)
646 {
647 const struct wsi_device *wsi = chain->wsi;
648 const VkMemoryGetFdInfoKHR memory_get_fd_info = {
649 .sType = VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR,
650 .pNext = NULL,
651 .memory = linear ? image->blit.memory : image->memory,
652 .handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
653 };
654
655 return wsi->GetMemoryFdKHR(chain->device, &memory_get_fd_info,
656 &image->dma_buf_fd);
657 }
658
659 static VkResult
wsi_create_native_image_mem(const struct wsi_swapchain * chain,const struct wsi_image_info * info,struct wsi_image * image)660 wsi_create_native_image_mem(const struct wsi_swapchain *chain,
661 const struct wsi_image_info *info,
662 struct wsi_image *image)
663 {
664 const struct wsi_device *wsi = chain->wsi;
665 VkResult result;
666
667 VkMemoryRequirements reqs;
668 wsi->GetImageMemoryRequirements(chain->device, image->image, &reqs);
669
670 const struct wsi_memory_allocate_info memory_wsi_info = {
671 .sType = VK_STRUCTURE_TYPE_WSI_MEMORY_ALLOCATE_INFO_MESA,
672 .pNext = NULL,
673 .implicit_sync = !info->explicit_sync,
674 };
675 const VkExportMemoryAllocateInfo memory_export_info = {
676 .sType = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO,
677 .pNext = &memory_wsi_info,
678 .handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
679 };
680 const VkMemoryDedicatedAllocateInfo memory_dedicated_info = {
681 .sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO,
682 .pNext = &memory_export_info,
683 .image = image->image,
684 .buffer = VK_NULL_HANDLE,
685 };
686 const VkMemoryAllocateInfo memory_info = {
687 .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
688 .pNext = &memory_dedicated_info,
689 .allocationSize = reqs.size,
690 .memoryTypeIndex =
691 wsi_select_device_memory_type(wsi, reqs.memoryTypeBits),
692 };
693 result = wsi->AllocateMemory(chain->device, &memory_info,
694 &chain->alloc, &image->memory);
695 if (result != VK_SUCCESS)
696 return result;
697
698 result = wsi_init_image_dmabuf_fd(chain, image, false);
699 if (result != VK_SUCCESS)
700 return result;
701
702 if (info->drm_mod_list.drmFormatModifierCount > 0) {
703 VkImageDrmFormatModifierPropertiesEXT image_mod_props = {
704 .sType = VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_PROPERTIES_EXT,
705 };
706 result = wsi->GetImageDrmFormatModifierPropertiesEXT(chain->device,
707 image->image,
708 &image_mod_props);
709 if (result != VK_SUCCESS)
710 return result;
711
712 image->drm_modifier = image_mod_props.drmFormatModifier;
713 assert(image->drm_modifier != DRM_FORMAT_MOD_INVALID);
714
715 const struct VkDrmFormatModifierPropertiesEXT *mod_props =
716 get_modifier_props(info, image->drm_modifier);
717 image->num_planes = mod_props->drmFormatModifierPlaneCount;
718
719 for (uint32_t p = 0; p < image->num_planes; p++) {
720 const VkImageSubresource image_subresource = {
721 .aspectMask = VK_IMAGE_ASPECT_MEMORY_PLANE_0_BIT_EXT << p,
722 .mipLevel = 0,
723 .arrayLayer = 0,
724 };
725 VkSubresourceLayout image_layout;
726 wsi->GetImageSubresourceLayout(chain->device, image->image,
727 &image_subresource, &image_layout);
728 image->sizes[p] = image_layout.size;
729 image->row_pitches[p] = image_layout.rowPitch;
730 image->offsets[p] = image_layout.offset;
731 }
732 } else {
733 const VkImageSubresource image_subresource = {
734 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
735 .mipLevel = 0,
736 .arrayLayer = 0,
737 };
738 VkSubresourceLayout image_layout;
739 wsi->GetImageSubresourceLayout(chain->device, image->image,
740 &image_subresource, &image_layout);
741
742 image->drm_modifier = DRM_FORMAT_MOD_INVALID;
743 image->num_planes = 1;
744 image->sizes[0] = reqs.size;
745 image->row_pitches[0] = image_layout.rowPitch;
746 image->offsets[0] = 0;
747 }
748
749 return VK_SUCCESS;
750 }
751
752 #define WSI_PRIME_LINEAR_STRIDE_ALIGN 256
753
754 static VkResult
wsi_create_prime_image_mem(const struct wsi_swapchain * chain,const struct wsi_image_info * info,struct wsi_image * image)755 wsi_create_prime_image_mem(const struct wsi_swapchain *chain,
756 const struct wsi_image_info *info,
757 struct wsi_image *image)
758 {
759 VkResult result =
760 wsi_create_buffer_blit_context(chain, info, image,
761 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
762 if (result != VK_SUCCESS)
763 return result;
764
765 result = wsi_init_image_dmabuf_fd(chain, image, true);
766 if (result != VK_SUCCESS)
767 return result;
768
769 image->drm_modifier = info->prime_use_linear_modifier ?
770 DRM_FORMAT_MOD_LINEAR : DRM_FORMAT_MOD_INVALID;
771
772 return VK_SUCCESS;
773 }
774
775 static VkResult
wsi_configure_prime_image(UNUSED const struct wsi_swapchain * chain,const VkSwapchainCreateInfoKHR * pCreateInfo,const struct wsi_drm_image_params * params,struct wsi_image_info * info)776 wsi_configure_prime_image(UNUSED const struct wsi_swapchain *chain,
777 const VkSwapchainCreateInfoKHR *pCreateInfo,
778 const struct wsi_drm_image_params *params,
779 struct wsi_image_info *info)
780 {
781 bool use_modifier = params->num_modifier_lists > 0;
782 wsi_memory_type_select_cb select_buffer_memory_type =
783 params->same_gpu ? wsi_select_device_memory_type :
784 prime_select_buffer_memory_type;
785
786 VkResult result = wsi_configure_image(chain, pCreateInfo,
787 0 /* handle_types */, info);
788 if (result != VK_SUCCESS)
789 return result;
790
791 info->explicit_sync = params->explicit_sync;
792
793 wsi_configure_buffer_image(chain, pCreateInfo,
794 WSI_PRIME_LINEAR_STRIDE_ALIGN, 4096,
795 info);
796 info->prime_use_linear_modifier = use_modifier;
797
798 info->create_mem = wsi_create_prime_image_mem;
799 info->select_blit_dst_memory_type = select_buffer_memory_type;
800 info->select_image_memory_type = wsi_select_device_memory_type;
801
802 return VK_SUCCESS;
803 }
804
805 bool
wsi_drm_image_needs_buffer_blit(const struct wsi_device * wsi,const struct wsi_drm_image_params * params)806 wsi_drm_image_needs_buffer_blit(const struct wsi_device *wsi,
807 const struct wsi_drm_image_params *params)
808 {
809 if (!params->same_gpu)
810 return true;
811
812 if (params->num_modifier_lists > 0 || wsi->supports_scanout)
813 return false;
814
815 return true;
816 }
817
818 VkResult
wsi_drm_configure_image(const struct wsi_swapchain * chain,const VkSwapchainCreateInfoKHR * pCreateInfo,const struct wsi_drm_image_params * params,struct wsi_image_info * info)819 wsi_drm_configure_image(const struct wsi_swapchain *chain,
820 const VkSwapchainCreateInfoKHR *pCreateInfo,
821 const struct wsi_drm_image_params *params,
822 struct wsi_image_info *info)
823 {
824 assert(params->base.image_type == WSI_IMAGE_TYPE_DRM);
825
826 if (chain->blit.type == WSI_SWAPCHAIN_BUFFER_BLIT) {
827 return wsi_configure_prime_image(chain, pCreateInfo,
828 params,
829 info);
830 } else {
831 return wsi_configure_native_image(chain, pCreateInfo,
832 params,
833 info);
834 }
835 }
836
837 enum wsi_explicit_sync_state_flags
838 {
839 WSI_ES_STATE_RELEASE_MATERIALIZED = (1u << 0),
840 WSI_ES_STATE_RELEASE_SIGNALLED = (1u << 1),
841 WSI_ES_STATE_ACQUIRE_SIGNALLED = (1u << 2),
842 };
843
844 /* Levels of "freeness"
845 * 0 -> Acquire Signalled + Release Signalled
846 * 1 -> Acquire Signalled + Release Materialized
847 * 2 -> Release Signalled
848 * 3 -> Release Materialized
849 */
850 static const uint32_t wsi_explicit_sync_free_levels[] = {
851 (WSI_ES_STATE_RELEASE_SIGNALLED | WSI_ES_STATE_RELEASE_MATERIALIZED | WSI_ES_STATE_ACQUIRE_SIGNALLED),
852 (WSI_ES_STATE_RELEASE_MATERIALIZED | WSI_ES_STATE_ACQUIRE_SIGNALLED),
853 (WSI_ES_STATE_RELEASE_MATERIALIZED | WSI_ES_STATE_RELEASE_SIGNALLED),
854 (WSI_ES_STATE_RELEASE_MATERIALIZED),
855 };
856
857 static void
wsi_drm_images_explicit_sync_state(struct vk_device * device,int count,uint32_t * indices,struct wsi_image ** images,uint32_t * flags)858 wsi_drm_images_explicit_sync_state(struct vk_device *device, int count, uint32_t *indices,
859 struct wsi_image **images, uint32_t *flags)
860 {
861 struct wsi_image *image;
862 int i;
863
864 memset(flags, 0, count * sizeof(flags[0]));
865
866 for (i = 0; i < count; i++) {
867 if (images[indices[i]]->explicit_sync[WSI_ES_RELEASE].timeline == 0) {
868 /* This image has never been used in a timeline.
869 * It must be free.
870 */
871 flags[i] = WSI_ES_STATE_RELEASE_SIGNALLED | WSI_ES_STATE_RELEASE_MATERIALIZED | WSI_ES_STATE_ACQUIRE_SIGNALLED;
872 return;
873 }
874 }
875
876 STACK_ARRAY(uint64_t, points, count * WSI_ES_COUNT);
877 STACK_ARRAY(uint32_t, handles, count * WSI_ES_COUNT);
878
879 for (i = 0; i < count; i++) {
880 points[i * WSI_ES_COUNT + WSI_ES_ACQUIRE] = 0;
881 points[i * WSI_ES_COUNT + WSI_ES_RELEASE] = 0;
882
883 image = images[indices[i]];
884 handles[i * WSI_ES_COUNT + WSI_ES_ACQUIRE] = image->explicit_sync[WSI_ES_ACQUIRE].handle;
885 handles[i * WSI_ES_COUNT + WSI_ES_RELEASE] = image->explicit_sync[WSI_ES_RELEASE].handle;
886 }
887
888 int ret = drmSyncobjQuery(device->drm_fd, handles, points, count * WSI_ES_COUNT);
889 if (ret)
890 goto done;
891
892 for (i = 0; i < count; i++) {
893 image = images[indices[i]];
894
895 if (points[i * WSI_ES_COUNT + WSI_ES_ACQUIRE] >= image->explicit_sync[WSI_ES_ACQUIRE].timeline)
896 flags[i] |= WSI_ES_STATE_ACQUIRE_SIGNALLED;
897
898 if (points[i * WSI_ES_COUNT + WSI_ES_RELEASE] >= image->explicit_sync[WSI_ES_RELEASE].timeline) {
899 flags[i] |= WSI_ES_STATE_RELEASE_SIGNALLED | WSI_ES_STATE_RELEASE_MATERIALIZED;
900 } else {
901 uint32_t first_signalled;
902 ret = drmSyncobjTimelineWait(device->drm_fd, &handles[i * WSI_ES_COUNT + WSI_ES_RELEASE],
903 &image->explicit_sync[WSI_ES_RELEASE].timeline, 1, 0,
904 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE, &first_signalled);
905 if (ret == 0)
906 flags[i] |= WSI_ES_STATE_RELEASE_MATERIALIZED;
907 }
908 }
909
910 done:
911 STACK_ARRAY_FINISH(handles);
912 STACK_ARRAY_FINISH(points);
913 }
914
915 static uint64_t
wsi_drm_rel_timeout_to_abs(uint64_t rel_timeout_ns)916 wsi_drm_rel_timeout_to_abs(uint64_t rel_timeout_ns)
917 {
918 uint64_t cur_time_ns = os_time_get_nano();
919
920 /* Syncobj timeouts are signed */
921 return rel_timeout_ns > INT64_MAX - cur_time_ns
922 ? INT64_MAX
923 : cur_time_ns + rel_timeout_ns;
924 }
925
926 VkResult
wsi_drm_wait_for_explicit_sync_release(struct wsi_swapchain * chain,uint32_t image_count,struct wsi_image ** images,uint64_t rel_timeout_ns,uint32_t * image_index)927 wsi_drm_wait_for_explicit_sync_release(struct wsi_swapchain *chain,
928 uint32_t image_count,
929 struct wsi_image **images,
930 uint64_t rel_timeout_ns,
931 uint32_t *image_index)
932 {
933 STACK_ARRAY(uint32_t, handles, image_count);
934 STACK_ARRAY(uint64_t, points, image_count);
935 STACK_ARRAY(uint32_t, indices, image_count);
936 STACK_ARRAY(uint32_t, flags, image_count);
937 VK_FROM_HANDLE(vk_device, device, chain->device);
938 int ret = 0;
939
940 /* We don't need to wait for the merged timeline on the CPU,
941 * only on the GPU side of things.
942 *
943 * We already know that the CPU side for the acquire has materialized,
944 * for all images in this array.
945 * That's what "busy"/"free" essentially represents.
946 */
947 uint32_t unacquired_image_count = 0;
948 for (uint32_t i = 0; i < image_count; i++) {
949 if (images[i]->acquired)
950 continue;
951
952 handles[unacquired_image_count] = images[i]->explicit_sync[WSI_ES_RELEASE].handle;
953 points[unacquired_image_count] = images[i]->explicit_sync[WSI_ES_RELEASE].timeline;
954 indices[unacquired_image_count] = i;
955 unacquired_image_count++;
956 }
957
958 /* Handle the case where there are no images to possible acquire. */
959 if (!unacquired_image_count) {
960 ret = -ETIME;
961 goto done;
962 }
963
964 wsi_drm_images_explicit_sync_state(device, unacquired_image_count, indices, images, flags);
965
966 /* Find the most optimal image using the free levels above. */
967 for (uint32_t free_level_idx = 0; free_level_idx < ARRAY_SIZE(wsi_explicit_sync_free_levels); free_level_idx++) {
968 uint32_t free_level = wsi_explicit_sync_free_levels[free_level_idx];
969
970 uint64_t present_serial = UINT64_MAX;
971 for (uint32_t i = 0; i < unacquired_image_count; i++) {
972 /* Pick the image that was presented longest ago inside
973 * of this free level, so it has the highest chance of
974 * being totally free the soonest.
975 */
976 if ((flags[i] & free_level) == free_level &&
977 images[indices[i]]->present_serial < present_serial) {
978 *image_index = indices[i];
979 present_serial = images[indices[i]]->present_serial;
980 }
981 }
982 if (present_serial != UINT64_MAX)
983 goto done;
984 }
985
986 /* Use DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE so we do not need to wait for the
987 * compositor's GPU work to be finished to acquire on the CPU side.
988 *
989 * We will forward the GPU signal to the VkSemaphore/VkFence of the acquire.
990 */
991 uint32_t first_signalled;
992 ret = drmSyncobjTimelineWait(device->drm_fd, handles, points, unacquired_image_count,
993 wsi_drm_rel_timeout_to_abs(rel_timeout_ns),
994 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE,
995 &first_signalled);
996
997 /* Return the first image that materialized. */
998 if (ret != 0)
999 goto done;
1000
1001 *image_index = indices[first_signalled];
1002 done:
1003 STACK_ARRAY_FINISH(flags);
1004 STACK_ARRAY_FINISH(indices);
1005 STACK_ARRAY_FINISH(points);
1006 STACK_ARRAY_FINISH(handles);
1007
1008 if (ret == 0)
1009 return VK_SUCCESS;
1010 else if (ret == -ETIME)
1011 return rel_timeout_ns ? VK_TIMEOUT : VK_NOT_READY;
1012 else
1013 return VK_ERROR_OUT_OF_DATE_KHR;
1014 }
1015