1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "wsi_common_private.h"
25 #include "wsi_common_entrypoints.h"
26 #include "util/u_debug.h"
27 #include "util/macros.h"
28 #include "util/os_file.h"
29 #include "util/os_time.h"
30 #include "util/xmlconfig.h"
31 #include "vk_device.h"
32 #include "vk_fence.h"
33 #include "vk_format.h"
34 #include "vk_instance.h"
35 #include "vk_physical_device.h"
36 #include "vk_queue.h"
37 #include "vk_semaphore.h"
38 #include "vk_sync.h"
39 #include "vk_sync_dummy.h"
40 #include "vk_util.h"
41
42 #include <time.h>
43 #include <stdlib.h>
44 #include <stdio.h>
45
46 #ifndef _WIN32
47 #include <unistd.h>
48 #endif
49
50 uint64_t WSI_DEBUG;
51
52 static const struct debug_control debug_control[] = {
53 { "buffer", WSI_DEBUG_BUFFER },
54 { "sw", WSI_DEBUG_SW },
55 { "noshm", WSI_DEBUG_NOSHM },
56 { "linear", WSI_DEBUG_LINEAR },
57 { "dxgi", WSI_DEBUG_DXGI },
58 { NULL, },
59 };
60
present_false(VkPhysicalDevice pdevice,int fd)61 static bool present_false(VkPhysicalDevice pdevice, int fd) {
62 return false;
63 }
64
65 VkResult
wsi_device_init(struct wsi_device * wsi,VkPhysicalDevice pdevice,WSI_FN_GetPhysicalDeviceProcAddr proc_addr,const VkAllocationCallbacks * alloc,int display_fd,const struct driOptionCache * dri_options,const struct wsi_device_options * device_options)66 wsi_device_init(struct wsi_device *wsi,
67 VkPhysicalDevice pdevice,
68 WSI_FN_GetPhysicalDeviceProcAddr proc_addr,
69 const VkAllocationCallbacks *alloc,
70 int display_fd,
71 const struct driOptionCache *dri_options,
72 const struct wsi_device_options *device_options)
73 {
74 const char *present_mode;
75 UNUSED VkResult result;
76
77 WSI_DEBUG = parse_debug_string(getenv("MESA_VK_WSI_DEBUG"), debug_control);
78
79 util_cpu_trace_init();
80
81 memset(wsi, 0, sizeof(*wsi));
82
83 wsi->instance_alloc = *alloc;
84 wsi->pdevice = pdevice;
85 wsi->supports_scanout = true;
86 wsi->sw = device_options->sw_device || (WSI_DEBUG & WSI_DEBUG_SW);
87 wsi->wants_linear = (WSI_DEBUG & WSI_DEBUG_LINEAR) != 0;
88 wsi->x11.extra_xwayland_image = device_options->extra_xwayland_image;
89 #define WSI_GET_CB(func) \
90 PFN_vk##func func = (PFN_vk##func)proc_addr(pdevice, "vk" #func)
91 WSI_GET_CB(GetPhysicalDeviceExternalSemaphoreProperties);
92 WSI_GET_CB(GetPhysicalDeviceProperties2);
93 WSI_GET_CB(GetPhysicalDeviceMemoryProperties);
94 WSI_GET_CB(GetPhysicalDeviceQueueFamilyProperties);
95 #undef WSI_GET_CB
96
97 wsi->drm_info.sType =
98 VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRM_PROPERTIES_EXT;
99 wsi->pci_bus_info.sType =
100 VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT;
101 wsi->pci_bus_info.pNext = &wsi->drm_info;
102 VkPhysicalDeviceProperties2 pdp2 = {
103 .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2,
104 .pNext = &wsi->pci_bus_info,
105 };
106 GetPhysicalDeviceProperties2(pdevice, &pdp2);
107
108 wsi->maxImageDimension2D = pdp2.properties.limits.maxImageDimension2D;
109 assert(pdp2.properties.limits.optimalBufferCopyRowPitchAlignment <= UINT32_MAX);
110 wsi->optimalBufferCopyRowPitchAlignment =
111 pdp2.properties.limits.optimalBufferCopyRowPitchAlignment;
112 wsi->override_present_mode = VK_PRESENT_MODE_MAX_ENUM_KHR;
113
114 GetPhysicalDeviceMemoryProperties(pdevice, &wsi->memory_props);
115 GetPhysicalDeviceQueueFamilyProperties(pdevice, &wsi->queue_family_count, NULL);
116
117 assert(wsi->queue_family_count <= 64);
118 VkQueueFamilyProperties queue_properties[64];
119 GetPhysicalDeviceQueueFamilyProperties(pdevice, &wsi->queue_family_count, queue_properties);
120
121 for (unsigned i = 0; i < wsi->queue_family_count; i++) {
122 VkFlags req_flags = VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT;
123 if (queue_properties[i].queueFlags & req_flags)
124 wsi->queue_supports_blit |= BITFIELD64_BIT(i);
125 }
126
127 for (VkExternalSemaphoreHandleTypeFlags handle_type = 1;
128 handle_type <= VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
129 handle_type <<= 1) {
130 VkPhysicalDeviceExternalSemaphoreInfo esi = {
131 .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO,
132 .handleType = handle_type,
133 };
134 VkExternalSemaphoreProperties esp = {
135 .sType = VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES,
136 };
137 GetPhysicalDeviceExternalSemaphoreProperties(pdevice, &esi, &esp);
138
139 if (esp.externalSemaphoreFeatures &
140 VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT)
141 wsi->semaphore_export_handle_types |= handle_type;
142
143 VkSemaphoreTypeCreateInfo timeline_tci = {
144 .sType = VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO,
145 .semaphoreType = VK_SEMAPHORE_TYPE_TIMELINE_KHR,
146 };
147 esi.pNext = &timeline_tci;
148 GetPhysicalDeviceExternalSemaphoreProperties(pdevice, &esi, &esp);
149
150 if (esp.externalSemaphoreFeatures &
151 VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT)
152 wsi->timeline_semaphore_export_handle_types |= handle_type;
153 }
154
155 const struct vk_device_extension_table *supported_extensions =
156 &vk_physical_device_from_handle(pdevice)->supported_extensions;
157 wsi->has_import_memory_host =
158 supported_extensions->EXT_external_memory_host;
159 wsi->khr_present_wait =
160 supported_extensions->KHR_present_id &&
161 supported_extensions->KHR_present_wait;
162 wsi->has_timeline_semaphore =
163 supported_extensions->KHR_timeline_semaphore;
164
165 /* We cannot expose KHR_present_wait without timeline semaphores. */
166 assert(!wsi->khr_present_wait || supported_extensions->KHR_timeline_semaphore);
167
168 list_inithead(&wsi->hotplug_fences);
169
170 #define WSI_GET_CB(func) \
171 wsi->func = (PFN_vk##func)proc_addr(pdevice, "vk" #func)
172 WSI_GET_CB(AllocateMemory);
173 WSI_GET_CB(AllocateCommandBuffers);
174 WSI_GET_CB(BindBufferMemory);
175 WSI_GET_CB(BindImageMemory);
176 WSI_GET_CB(BeginCommandBuffer);
177 WSI_GET_CB(CmdPipelineBarrier);
178 WSI_GET_CB(CmdCopyImage);
179 WSI_GET_CB(CmdCopyImageToBuffer);
180 WSI_GET_CB(CreateBuffer);
181 WSI_GET_CB(CreateCommandPool);
182 WSI_GET_CB(CreateFence);
183 WSI_GET_CB(CreateImage);
184 WSI_GET_CB(CreateSemaphore);
185 WSI_GET_CB(DestroyBuffer);
186 WSI_GET_CB(DestroyCommandPool);
187 WSI_GET_CB(DestroyFence);
188 WSI_GET_CB(DestroyImage);
189 WSI_GET_CB(DestroySemaphore);
190 WSI_GET_CB(EndCommandBuffer);
191 WSI_GET_CB(FreeMemory);
192 WSI_GET_CB(FreeCommandBuffers);
193 WSI_GET_CB(GetBufferMemoryRequirements);
194 WSI_GET_CB(GetFenceStatus);
195 WSI_GET_CB(GetImageDrmFormatModifierPropertiesEXT);
196 WSI_GET_CB(GetImageMemoryRequirements);
197 WSI_GET_CB(GetImageSubresourceLayout);
198 if (!wsi->sw)
199 WSI_GET_CB(GetMemoryFdKHR);
200 WSI_GET_CB(GetPhysicalDeviceFormatProperties);
201 WSI_GET_CB(GetPhysicalDeviceFormatProperties2);
202 WSI_GET_CB(GetPhysicalDeviceImageFormatProperties2);
203 WSI_GET_CB(GetSemaphoreFdKHR);
204 WSI_GET_CB(ResetFences);
205 WSI_GET_CB(QueueSubmit);
206 WSI_GET_CB(WaitForFences);
207 WSI_GET_CB(MapMemory);
208 WSI_GET_CB(UnmapMemory);
209 if (wsi->khr_present_wait)
210 WSI_GET_CB(WaitSemaphores);
211 #undef WSI_GET_CB
212
213 #if defined(VK_USE_PLATFORM_XCB_KHR)
214 result = wsi_x11_init_wsi(wsi, alloc, dri_options);
215 if (result != VK_SUCCESS)
216 goto fail;
217 #endif
218
219 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
220 result = wsi_wl_init_wsi(wsi, alloc, pdevice);
221 if (result != VK_SUCCESS)
222 goto fail;
223 #endif
224
225 #ifdef VK_USE_PLATFORM_WIN32_KHR
226 result = wsi_win32_init_wsi(wsi, alloc, pdevice);
227 if (result != VK_SUCCESS)
228 goto fail;
229 #endif
230
231 #ifdef VK_USE_PLATFORM_DISPLAY_KHR
232 result = wsi_display_init_wsi(wsi, alloc, display_fd);
233 if (result != VK_SUCCESS)
234 goto fail;
235 #endif
236
237 #ifdef VK_USE_PLATFORM_METAL_EXT
238 result = wsi_metal_init_wsi(wsi, alloc, pdevice);
239 if (result != VK_SUCCESS)
240 goto fail;
241 #endif
242
243 #ifndef VK_USE_PLATFORM_WIN32_KHR
244 result = wsi_headless_init_wsi(wsi, alloc, pdevice);
245 if (result != VK_SUCCESS)
246 goto fail;
247 #endif
248
249 present_mode = getenv("MESA_VK_WSI_PRESENT_MODE");
250 if (present_mode) {
251 if (!strcmp(present_mode, "fifo")) {
252 wsi->override_present_mode = VK_PRESENT_MODE_FIFO_KHR;
253 } else if (!strcmp(present_mode, "relaxed")) {
254 wsi->override_present_mode = VK_PRESENT_MODE_FIFO_RELAXED_KHR;
255 } else if (!strcmp(present_mode, "mailbox")) {
256 wsi->override_present_mode = VK_PRESENT_MODE_MAILBOX_KHR;
257 } else if (!strcmp(present_mode, "immediate")) {
258 wsi->override_present_mode = VK_PRESENT_MODE_IMMEDIATE_KHR;
259 } else {
260 fprintf(stderr, "Invalid MESA_VK_WSI_PRESENT_MODE value!\n");
261 }
262 }
263
264 wsi->force_headless_swapchain =
265 debug_get_bool_option("MESA_VK_WSI_HEADLESS_SWAPCHAIN", false);
266
267 if (dri_options) {
268 if (driCheckOption(dri_options, "adaptive_sync", DRI_BOOL))
269 wsi->enable_adaptive_sync = driQueryOptionb(dri_options,
270 "adaptive_sync");
271
272 if (driCheckOption(dri_options, "vk_wsi_force_bgra8_unorm_first", DRI_BOOL)) {
273 wsi->force_bgra8_unorm_first =
274 driQueryOptionb(dri_options, "vk_wsi_force_bgra8_unorm_first");
275 }
276
277 if (driCheckOption(dri_options, "vk_wsi_force_swapchain_to_current_extent", DRI_BOOL)) {
278 wsi->force_swapchain_to_currentExtent =
279 driQueryOptionb(dri_options, "vk_wsi_force_swapchain_to_current_extent");
280 }
281 }
282
283 /* can_present_on_device is a function pointer used to determine if images
284 * can be presented directly on a given device file descriptor (fd).
285 * If HAVE_LIBDRM is defined, it will be initialized to a platform-specific
286 * function (wsi_device_matches_drm_fd). Otherwise, it is initialized to
287 * present_false to ensure that it always returns false, preventing potential
288 * segmentation faults from unchecked calls.
289 * Drivers for non-PCI based GPUs are expected to override this after calling
290 * wsi_device_init().
291 */
292 #ifdef HAVE_LIBDRM
293 wsi->can_present_on_device = wsi_device_matches_drm_fd;
294 #else
295 wsi->can_present_on_device = present_false;
296 #endif
297
298 return VK_SUCCESS;
299 fail:
300 wsi_device_finish(wsi, alloc);
301 return result;
302 }
303
304 void
wsi_device_finish(struct wsi_device * wsi,const VkAllocationCallbacks * alloc)305 wsi_device_finish(struct wsi_device *wsi,
306 const VkAllocationCallbacks *alloc)
307 {
308 #ifndef VK_USE_PLATFORM_WIN32_KHR
309 wsi_headless_finish_wsi(wsi, alloc);
310 #endif
311 #ifdef VK_USE_PLATFORM_DISPLAY_KHR
312 wsi_display_finish_wsi(wsi, alloc);
313 #endif
314 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
315 wsi_wl_finish_wsi(wsi, alloc);
316 #endif
317 #ifdef VK_USE_PLATFORM_WIN32_KHR
318 wsi_win32_finish_wsi(wsi, alloc);
319 #endif
320 #if defined(VK_USE_PLATFORM_XCB_KHR)
321 wsi_x11_finish_wsi(wsi, alloc);
322 #endif
323 #if defined(VK_USE_PLATFORM_METAL_EXT)
324 wsi_metal_finish_wsi(wsi, alloc);
325 #endif
326 }
327
328 VKAPI_ATTR void VKAPI_CALL
wsi_DestroySurfaceKHR(VkInstance _instance,VkSurfaceKHR _surface,const VkAllocationCallbacks * pAllocator)329 wsi_DestroySurfaceKHR(VkInstance _instance,
330 VkSurfaceKHR _surface,
331 const VkAllocationCallbacks *pAllocator)
332 {
333 VK_FROM_HANDLE(vk_instance, instance, _instance);
334 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
335
336 if (!surface)
337 return;
338
339 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
340 if (surface->platform == VK_ICD_WSI_PLATFORM_WAYLAND) {
341 wsi_wl_surface_destroy(surface, _instance, pAllocator);
342 return;
343 }
344 #endif
345 #ifdef VK_USE_PLATFORM_WIN32_KHR
346 if (surface->platform == VK_ICD_WSI_PLATFORM_WIN32) {
347 wsi_win32_surface_destroy(surface, _instance, pAllocator);
348 return;
349 }
350 #endif
351
352 vk_free2(&instance->alloc, pAllocator, surface);
353 }
354
355 void
wsi_device_setup_syncobj_fd(struct wsi_device * wsi_device,int fd)356 wsi_device_setup_syncobj_fd(struct wsi_device *wsi_device,
357 int fd)
358 {
359 #ifdef VK_USE_PLATFORM_DISPLAY_KHR
360 wsi_display_setup_syncobj_fd(wsi_device, fd);
361 #endif
362 }
363
364 static enum wsi_swapchain_blit_type
get_blit_type(const struct wsi_device * wsi,const struct wsi_base_image_params * params,VkDevice device)365 get_blit_type(const struct wsi_device *wsi,
366 const struct wsi_base_image_params *params,
367 VkDevice device)
368 {
369 switch (params->image_type) {
370 case WSI_IMAGE_TYPE_CPU: {
371 const struct wsi_cpu_image_params *cpu_params =
372 container_of(params, const struct wsi_cpu_image_params, base);
373 return wsi_cpu_image_needs_buffer_blit(wsi, cpu_params) ?
374 WSI_SWAPCHAIN_BUFFER_BLIT : WSI_SWAPCHAIN_NO_BLIT;
375 }
376 #ifdef HAVE_LIBDRM
377 case WSI_IMAGE_TYPE_DRM: {
378 const struct wsi_drm_image_params *drm_params =
379 container_of(params, const struct wsi_drm_image_params, base);
380 return wsi_drm_image_needs_buffer_blit(wsi, drm_params) ?
381 WSI_SWAPCHAIN_BUFFER_BLIT : WSI_SWAPCHAIN_NO_BLIT;
382 }
383 #endif
384 #ifdef _WIN32
385 case WSI_IMAGE_TYPE_DXGI: {
386 const struct wsi_dxgi_image_params *dxgi_params =
387 container_of(params, const struct wsi_dxgi_image_params, base);
388 return wsi_dxgi_image_needs_blit(wsi, dxgi_params, device);
389 }
390 #endif
391 default:
392 unreachable("Invalid image type");
393 }
394 }
395
396 static VkResult
configure_image(const struct wsi_swapchain * chain,const VkSwapchainCreateInfoKHR * pCreateInfo,const struct wsi_base_image_params * params,struct wsi_image_info * info)397 configure_image(const struct wsi_swapchain *chain,
398 const VkSwapchainCreateInfoKHR *pCreateInfo,
399 const struct wsi_base_image_params *params,
400 struct wsi_image_info *info)
401 {
402 info->image_type = params->image_type;
403 switch (params->image_type) {
404 case WSI_IMAGE_TYPE_CPU: {
405 const struct wsi_cpu_image_params *cpu_params =
406 container_of(params, const struct wsi_cpu_image_params, base);
407 return wsi_configure_cpu_image(chain, pCreateInfo, cpu_params, info);
408 }
409 #ifdef HAVE_LIBDRM
410 case WSI_IMAGE_TYPE_DRM: {
411 const struct wsi_drm_image_params *drm_params =
412 container_of(params, const struct wsi_drm_image_params, base);
413 return wsi_drm_configure_image(chain, pCreateInfo, drm_params, info);
414 }
415 #endif
416 #ifdef _WIN32
417 case WSI_IMAGE_TYPE_DXGI: {
418 const struct wsi_dxgi_image_params *dxgi_params =
419 container_of(params, const struct wsi_dxgi_image_params, base);
420 return wsi_dxgi_configure_image(chain, pCreateInfo, dxgi_params, info);
421 }
422 #endif
423 default:
424 unreachable("Invalid image type");
425 }
426 }
427
428 VkResult
wsi_swapchain_init(const struct wsi_device * wsi,struct wsi_swapchain * chain,VkDevice _device,const VkSwapchainCreateInfoKHR * pCreateInfo,const struct wsi_base_image_params * image_params,const VkAllocationCallbacks * pAllocator)429 wsi_swapchain_init(const struct wsi_device *wsi,
430 struct wsi_swapchain *chain,
431 VkDevice _device,
432 const VkSwapchainCreateInfoKHR *pCreateInfo,
433 const struct wsi_base_image_params *image_params,
434 const VkAllocationCallbacks *pAllocator)
435 {
436 VK_FROM_HANDLE(vk_device, device, _device);
437 VkResult result;
438
439 memset(chain, 0, sizeof(*chain));
440
441 vk_object_base_init(device, &chain->base, VK_OBJECT_TYPE_SWAPCHAIN_KHR);
442
443 chain->wsi = wsi;
444 chain->device = _device;
445 chain->alloc = *pAllocator;
446 chain->blit.type = get_blit_type(wsi, image_params, _device);
447
448 chain->blit.queue = VK_NULL_HANDLE;
449 if (chain->blit.type != WSI_SWAPCHAIN_NO_BLIT && wsi->get_blit_queue)
450 chain->blit.queue = wsi->get_blit_queue(_device);
451
452 int cmd_pools_count = chain->blit.queue != VK_NULL_HANDLE ? 1 : wsi->queue_family_count;
453
454 chain->cmd_pools =
455 vk_zalloc(pAllocator, sizeof(VkCommandPool) * cmd_pools_count, 8,
456 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
457 if (!chain->cmd_pools)
458 return VK_ERROR_OUT_OF_HOST_MEMORY;
459
460 for (uint32_t i = 0; i < cmd_pools_count; i++) {
461 int queue_family_index = i;
462
463 if (chain->blit.queue != VK_NULL_HANDLE) {
464 VK_FROM_HANDLE(vk_queue, queue, chain->blit.queue);
465 queue_family_index = queue->queue_family_index;
466 } else {
467 /* Queues returned by get_blit_queue() might not be listed in
468 * GetPhysicalDeviceQueueFamilyProperties, so this check is skipped for those queues.
469 */
470 if (!(wsi->queue_supports_blit & BITFIELD64_BIT(queue_family_index)))
471 continue;
472 }
473
474 const VkCommandPoolCreateInfo cmd_pool_info = {
475 .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
476 .pNext = NULL,
477 .flags = 0,
478 .queueFamilyIndex = queue_family_index,
479 };
480 result = wsi->CreateCommandPool(_device, &cmd_pool_info, &chain->alloc,
481 &chain->cmd_pools[i]);
482 if (result != VK_SUCCESS)
483 goto fail;
484 }
485
486 result = configure_image(chain, pCreateInfo, image_params,
487 &chain->image_info);
488 if (result != VK_SUCCESS)
489 goto fail;
490
491 return VK_SUCCESS;
492
493 fail:
494 wsi_swapchain_finish(chain);
495 return result;
496 }
497
498 static bool
wsi_swapchain_is_present_mode_supported(struct wsi_device * wsi,const VkSwapchainCreateInfoKHR * pCreateInfo,VkPresentModeKHR mode)499 wsi_swapchain_is_present_mode_supported(struct wsi_device *wsi,
500 const VkSwapchainCreateInfoKHR *pCreateInfo,
501 VkPresentModeKHR mode)
502 {
503 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, pCreateInfo->surface);
504 struct wsi_interface *iface = wsi->wsi[surface->platform];
505 VkPresentModeKHR *present_modes;
506 uint32_t present_mode_count;
507 bool supported = false;
508 VkResult result;
509
510 result = iface->get_present_modes(surface, wsi, &present_mode_count, NULL);
511 if (result != VK_SUCCESS)
512 return supported;
513
514 present_modes = malloc(present_mode_count * sizeof(*present_modes));
515 if (!present_modes)
516 return supported;
517
518 result = iface->get_present_modes(surface, wsi, &present_mode_count,
519 present_modes);
520 if (result != VK_SUCCESS)
521 goto fail;
522
523 for (uint32_t i = 0; i < present_mode_count; i++) {
524 if (present_modes[i] == mode) {
525 supported = true;
526 break;
527 }
528 }
529
530 fail:
531 free(present_modes);
532 return supported;
533 }
534
535 enum VkPresentModeKHR
wsi_swapchain_get_present_mode(struct wsi_device * wsi,const VkSwapchainCreateInfoKHR * pCreateInfo)536 wsi_swapchain_get_present_mode(struct wsi_device *wsi,
537 const VkSwapchainCreateInfoKHR *pCreateInfo)
538 {
539 if (wsi->override_present_mode == VK_PRESENT_MODE_MAX_ENUM_KHR)
540 return pCreateInfo->presentMode;
541
542 if (!wsi_swapchain_is_present_mode_supported(wsi, pCreateInfo,
543 wsi->override_present_mode)) {
544 fprintf(stderr, "Unsupported MESA_VK_WSI_PRESENT_MODE value!\n");
545 return pCreateInfo->presentMode;
546 }
547
548 return wsi->override_present_mode;
549 }
550
551 void
wsi_swapchain_finish(struct wsi_swapchain * chain)552 wsi_swapchain_finish(struct wsi_swapchain *chain)
553 {
554 wsi_destroy_image_info(chain, &chain->image_info);
555
556 if (chain->fences) {
557 for (unsigned i = 0; i < chain->image_count; i++)
558 chain->wsi->DestroyFence(chain->device, chain->fences[i], &chain->alloc);
559
560 vk_free(&chain->alloc, chain->fences);
561 }
562 if (chain->blit.semaphores) {
563 for (unsigned i = 0; i < chain->image_count; i++)
564 chain->wsi->DestroySemaphore(chain->device, chain->blit.semaphores[i], &chain->alloc);
565
566 vk_free(&chain->alloc, chain->blit.semaphores);
567 }
568 chain->wsi->DestroySemaphore(chain->device, chain->dma_buf_semaphore,
569 &chain->alloc);
570 chain->wsi->DestroySemaphore(chain->device, chain->present_id_timeline,
571 &chain->alloc);
572
573 int cmd_pools_count = chain->blit.queue != VK_NULL_HANDLE ?
574 1 : chain->wsi->queue_family_count;
575 for (uint32_t i = 0; i < cmd_pools_count; i++) {
576 if (!chain->cmd_pools[i])
577 continue;
578 chain->wsi->DestroyCommandPool(chain->device, chain->cmd_pools[i],
579 &chain->alloc);
580 }
581 vk_free(&chain->alloc, chain->cmd_pools);
582
583 vk_object_base_finish(&chain->base);
584 }
585
586 VkResult
wsi_configure_image(const struct wsi_swapchain * chain,const VkSwapchainCreateInfoKHR * pCreateInfo,VkExternalMemoryHandleTypeFlags handle_types,struct wsi_image_info * info)587 wsi_configure_image(const struct wsi_swapchain *chain,
588 const VkSwapchainCreateInfoKHR *pCreateInfo,
589 VkExternalMemoryHandleTypeFlags handle_types,
590 struct wsi_image_info *info)
591 {
592 memset(info, 0, sizeof(*info));
593 uint32_t queue_family_count = 1;
594
595 if (pCreateInfo->imageSharingMode == VK_SHARING_MODE_CONCURRENT)
596 queue_family_count = pCreateInfo->queueFamilyIndexCount;
597
598 /*
599 * TODO: there should be no reason to allocate this, but
600 * 15331 shows that games crashed without doing this.
601 */
602 uint32_t *queue_family_indices =
603 vk_alloc(&chain->alloc,
604 sizeof(*queue_family_indices) *
605 queue_family_count,
606 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
607 if (!queue_family_indices)
608 goto err_oom;
609
610 if (pCreateInfo->imageSharingMode == VK_SHARING_MODE_CONCURRENT)
611 for (uint32_t i = 0; i < pCreateInfo->queueFamilyIndexCount; i++)
612 queue_family_indices[i] = pCreateInfo->pQueueFamilyIndices[i];
613
614 info->create = (VkImageCreateInfo) {
615 .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
616 .flags = VK_IMAGE_CREATE_ALIAS_BIT,
617 .imageType = VK_IMAGE_TYPE_2D,
618 .format = pCreateInfo->imageFormat,
619 .extent = {
620 .width = pCreateInfo->imageExtent.width,
621 .height = pCreateInfo->imageExtent.height,
622 .depth = 1,
623 },
624 .mipLevels = 1,
625 .arrayLayers = 1,
626 .samples = VK_SAMPLE_COUNT_1_BIT,
627 .tiling = VK_IMAGE_TILING_OPTIMAL,
628 .usage = pCreateInfo->imageUsage,
629 .sharingMode = pCreateInfo->imageSharingMode,
630 .queueFamilyIndexCount = queue_family_count,
631 .pQueueFamilyIndices = queue_family_indices,
632 .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
633 };
634
635 if (handle_types != 0) {
636 info->ext_mem = (VkExternalMemoryImageCreateInfo) {
637 .sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO,
638 .handleTypes = handle_types,
639 };
640 __vk_append_struct(&info->create, &info->ext_mem);
641 }
642
643 info->wsi = (struct wsi_image_create_info) {
644 .sType = VK_STRUCTURE_TYPE_WSI_IMAGE_CREATE_INFO_MESA,
645 };
646 __vk_append_struct(&info->create, &info->wsi);
647
648 if (pCreateInfo->flags & VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR) {
649 info->create.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT |
650 VK_IMAGE_CREATE_EXTENDED_USAGE_BIT;
651
652 const VkImageFormatListCreateInfo *format_list_in =
653 vk_find_struct_const(pCreateInfo->pNext,
654 IMAGE_FORMAT_LIST_CREATE_INFO);
655
656 assume(format_list_in && format_list_in->viewFormatCount > 0);
657
658 const uint32_t view_format_count = format_list_in->viewFormatCount;
659 VkFormat *view_formats =
660 vk_alloc(&chain->alloc, sizeof(VkFormat) * view_format_count,
661 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
662 if (!view_formats)
663 goto err_oom;
664
665 ASSERTED bool format_found = false;
666 for (uint32_t i = 0; i < format_list_in->viewFormatCount; i++) {
667 if (pCreateInfo->imageFormat == format_list_in->pViewFormats[i])
668 format_found = true;
669 view_formats[i] = format_list_in->pViewFormats[i];
670 }
671 assert(format_found);
672
673 info->format_list = (VkImageFormatListCreateInfo) {
674 .sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO,
675 .viewFormatCount = view_format_count,
676 .pViewFormats = view_formats,
677 };
678 __vk_append_struct(&info->create, &info->format_list);
679 }
680
681 return VK_SUCCESS;
682
683 err_oom:
684 wsi_destroy_image_info(chain, info);
685 return VK_ERROR_OUT_OF_HOST_MEMORY;
686 }
687
688 void
wsi_destroy_image_info(const struct wsi_swapchain * chain,struct wsi_image_info * info)689 wsi_destroy_image_info(const struct wsi_swapchain *chain,
690 struct wsi_image_info *info)
691 {
692 if (info->create.pQueueFamilyIndices != NULL) {
693 vk_free(&chain->alloc, (void *)info->create.pQueueFamilyIndices);
694 info->create.pQueueFamilyIndices = NULL;
695 }
696 if (info->format_list.pViewFormats != NULL) {
697 vk_free(&chain->alloc, (void *)info->format_list.pViewFormats);
698 info->format_list.pViewFormats = NULL;
699 }
700 if (info->drm_mod_list.pDrmFormatModifiers != NULL) {
701 vk_free(&chain->alloc, (void *)info->drm_mod_list.pDrmFormatModifiers);
702 info->drm_mod_list.pDrmFormatModifiers = NULL;
703 }
704 if (info->modifier_props != NULL) {
705 vk_free(&chain->alloc, info->modifier_props);
706 info->modifier_props = NULL;
707 }
708 }
709
710 VkResult
wsi_create_image(const struct wsi_swapchain * chain,const struct wsi_image_info * info,struct wsi_image * image)711 wsi_create_image(const struct wsi_swapchain *chain,
712 const struct wsi_image_info *info,
713 struct wsi_image *image)
714 {
715 const struct wsi_device *wsi = chain->wsi;
716 VkResult result;
717
718 memset(image, 0, sizeof(*image));
719
720 #ifndef _WIN32
721 image->dma_buf_fd = -1;
722 for (uint32_t i = 0; i < WSI_ES_COUNT; i++)
723 image->explicit_sync[i].fd = -1;
724 #endif
725
726 result = wsi->CreateImage(chain->device, &info->create,
727 &chain->alloc, &image->image);
728 if (result != VK_SUCCESS)
729 goto fail;
730
731 result = info->create_mem(chain, info, image);
732 if (result != VK_SUCCESS)
733 goto fail;
734
735 result = wsi->BindImageMemory(chain->device, image->image,
736 image->memory, 0);
737 if (result != VK_SUCCESS)
738 goto fail;
739
740 if (info->finish_create) {
741 result = info->finish_create(chain, info, image);
742 if (result != VK_SUCCESS)
743 goto fail;
744 }
745
746 if (info->explicit_sync) {
747 #if HAVE_LIBDRM
748 result = wsi_create_image_explicit_sync_drm(chain, image);
749 if (result != VK_SUCCESS)
750 goto fail;
751 #else
752 result = VK_ERROR_FEATURE_NOT_PRESENT;
753 goto fail;
754 #endif
755 }
756
757 return VK_SUCCESS;
758
759 fail:
760 wsi_destroy_image(chain, image);
761 return result;
762 }
763
764 void
wsi_destroy_image(const struct wsi_swapchain * chain,struct wsi_image * image)765 wsi_destroy_image(const struct wsi_swapchain *chain,
766 struct wsi_image *image)
767 {
768 const struct wsi_device *wsi = chain->wsi;
769
770 #ifndef _WIN32
771 if (image->dma_buf_fd >= 0)
772 close(image->dma_buf_fd);
773 #endif
774
775 if (image->explicit_sync[WSI_ES_ACQUIRE].semaphore) {
776 #if HAVE_LIBDRM
777 wsi_destroy_image_explicit_sync_drm(chain, image);
778 #endif
779 }
780
781 if (image->cpu_map != NULL) {
782 wsi->UnmapMemory(chain->device, image->blit.buffer != VK_NULL_HANDLE ?
783 image->blit.memory : image->memory);
784 }
785
786 if (image->blit.cmd_buffers) {
787 int cmd_buffer_count =
788 chain->blit.queue != VK_NULL_HANDLE ? 1 : wsi->queue_family_count;
789
790 for (uint32_t i = 0; i < cmd_buffer_count; i++) {
791 if (!chain->cmd_pools[i])
792 continue;
793 wsi->FreeCommandBuffers(chain->device, chain->cmd_pools[i],
794 1, &image->blit.cmd_buffers[i]);
795 }
796 vk_free(&chain->alloc, image->blit.cmd_buffers);
797 }
798
799 wsi->FreeMemory(chain->device, image->memory, &chain->alloc);
800 wsi->DestroyImage(chain->device, image->image, &chain->alloc);
801 wsi->DestroyImage(chain->device, image->blit.image, &chain->alloc);
802 wsi->FreeMemory(chain->device, image->blit.memory, &chain->alloc);
803 wsi->DestroyBuffer(chain->device, image->blit.buffer, &chain->alloc);
804 }
805
806 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice,uint32_t queueFamilyIndex,VkSurfaceKHR _surface,VkBool32 * pSupported)807 wsi_GetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice,
808 uint32_t queueFamilyIndex,
809 VkSurfaceKHR _surface,
810 VkBool32 *pSupported)
811 {
812 VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
813 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
814 struct wsi_device *wsi_device = device->wsi_device;
815 struct wsi_interface *iface = wsi_device->wsi[surface->platform];
816
817 VkResult res = iface->get_support(surface, wsi_device,
818 queueFamilyIndex, pSupported);
819 if (res == VK_SUCCESS) {
820 bool blit = (wsi_device->queue_supports_blit & BITFIELD64_BIT(queueFamilyIndex)) != 0;
821 *pSupported = (bool)*pSupported && blit;
822 }
823
824 return res;
825 }
826
827 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice physicalDevice,VkSurfaceKHR _surface,VkSurfaceCapabilitiesKHR * pSurfaceCapabilities)828 wsi_GetPhysicalDeviceSurfaceCapabilitiesKHR(
829 VkPhysicalDevice physicalDevice,
830 VkSurfaceKHR _surface,
831 VkSurfaceCapabilitiesKHR *pSurfaceCapabilities)
832 {
833 VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
834 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
835 struct wsi_device *wsi_device = device->wsi_device;
836 struct wsi_interface *iface = wsi_device->wsi[surface->platform];
837
838 VkSurfaceCapabilities2KHR caps2 = {
839 .sType = VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR,
840 };
841
842 VkResult result = iface->get_capabilities2(surface, wsi_device, NULL, &caps2);
843
844 if (result == VK_SUCCESS)
845 *pSurfaceCapabilities = caps2.surfaceCapabilities;
846
847 return result;
848 }
849
850 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetPhysicalDeviceSurfaceCapabilities2KHR(VkPhysicalDevice physicalDevice,const VkPhysicalDeviceSurfaceInfo2KHR * pSurfaceInfo,VkSurfaceCapabilities2KHR * pSurfaceCapabilities)851 wsi_GetPhysicalDeviceSurfaceCapabilities2KHR(
852 VkPhysicalDevice physicalDevice,
853 const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
854 VkSurfaceCapabilities2KHR *pSurfaceCapabilities)
855 {
856 VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
857 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, pSurfaceInfo->surface);
858 struct wsi_device *wsi_device = device->wsi_device;
859 struct wsi_interface *iface = wsi_device->wsi[surface->platform];
860
861 return iface->get_capabilities2(surface, wsi_device, pSurfaceInfo->pNext,
862 pSurfaceCapabilities);
863 }
864
865 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetPhysicalDeviceSurfaceCapabilities2EXT(VkPhysicalDevice physicalDevice,VkSurfaceKHR _surface,VkSurfaceCapabilities2EXT * pSurfaceCapabilities)866 wsi_GetPhysicalDeviceSurfaceCapabilities2EXT(
867 VkPhysicalDevice physicalDevice,
868 VkSurfaceKHR _surface,
869 VkSurfaceCapabilities2EXT *pSurfaceCapabilities)
870 {
871 VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
872 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
873 struct wsi_device *wsi_device = device->wsi_device;
874 struct wsi_interface *iface = wsi_device->wsi[surface->platform];
875
876 assert(pSurfaceCapabilities->sType ==
877 VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_EXT);
878
879 struct wsi_surface_supported_counters counters = {
880 .sType = VK_STRUCTURE_TYPE_WSI_SURFACE_SUPPORTED_COUNTERS_MESA,
881 .pNext = pSurfaceCapabilities->pNext,
882 .supported_surface_counters = 0,
883 };
884
885 VkSurfaceCapabilities2KHR caps2 = {
886 .sType = VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR,
887 .pNext = &counters,
888 };
889
890 VkResult result = iface->get_capabilities2(surface, wsi_device, NULL, &caps2);
891
892 if (result == VK_SUCCESS) {
893 VkSurfaceCapabilities2EXT *ext_caps = pSurfaceCapabilities;
894 VkSurfaceCapabilitiesKHR khr_caps = caps2.surfaceCapabilities;
895
896 ext_caps->minImageCount = khr_caps.minImageCount;
897 ext_caps->maxImageCount = khr_caps.maxImageCount;
898 ext_caps->currentExtent = khr_caps.currentExtent;
899 ext_caps->minImageExtent = khr_caps.minImageExtent;
900 ext_caps->maxImageExtent = khr_caps.maxImageExtent;
901 ext_caps->maxImageArrayLayers = khr_caps.maxImageArrayLayers;
902 ext_caps->supportedTransforms = khr_caps.supportedTransforms;
903 ext_caps->currentTransform = khr_caps.currentTransform;
904 ext_caps->supportedCompositeAlpha = khr_caps.supportedCompositeAlpha;
905 ext_caps->supportedUsageFlags = khr_caps.supportedUsageFlags;
906 ext_caps->supportedSurfaceCounters = counters.supported_surface_counters;
907 }
908
909 return result;
910 }
911
912 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice,VkSurfaceKHR _surface,uint32_t * pSurfaceFormatCount,VkSurfaceFormatKHR * pSurfaceFormats)913 wsi_GetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice,
914 VkSurfaceKHR _surface,
915 uint32_t *pSurfaceFormatCount,
916 VkSurfaceFormatKHR *pSurfaceFormats)
917 {
918 VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
919 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
920 struct wsi_device *wsi_device = device->wsi_device;
921 struct wsi_interface *iface = wsi_device->wsi[surface->platform];
922
923 return iface->get_formats(surface, wsi_device,
924 pSurfaceFormatCount, pSurfaceFormats);
925 }
926
927 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetPhysicalDeviceSurfaceFormats2KHR(VkPhysicalDevice physicalDevice,const VkPhysicalDeviceSurfaceInfo2KHR * pSurfaceInfo,uint32_t * pSurfaceFormatCount,VkSurfaceFormat2KHR * pSurfaceFormats)928 wsi_GetPhysicalDeviceSurfaceFormats2KHR(VkPhysicalDevice physicalDevice,
929 const VkPhysicalDeviceSurfaceInfo2KHR * pSurfaceInfo,
930 uint32_t *pSurfaceFormatCount,
931 VkSurfaceFormat2KHR *pSurfaceFormats)
932 {
933 VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
934 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, pSurfaceInfo->surface);
935 struct wsi_device *wsi_device = device->wsi_device;
936 struct wsi_interface *iface = wsi_device->wsi[surface->platform];
937
938 return iface->get_formats2(surface, wsi_device, pSurfaceInfo->pNext,
939 pSurfaceFormatCount, pSurfaceFormats);
940 }
941
942 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice,VkSurfaceKHR _surface,uint32_t * pPresentModeCount,VkPresentModeKHR * pPresentModes)943 wsi_GetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice,
944 VkSurfaceKHR _surface,
945 uint32_t *pPresentModeCount,
946 VkPresentModeKHR *pPresentModes)
947 {
948 VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
949 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
950 struct wsi_device *wsi_device = device->wsi_device;
951 struct wsi_interface *iface = wsi_device->wsi[surface->platform];
952
953 return iface->get_present_modes(surface, wsi_device, pPresentModeCount,
954 pPresentModes);
955 }
956
957 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetPhysicalDevicePresentRectanglesKHR(VkPhysicalDevice physicalDevice,VkSurfaceKHR _surface,uint32_t * pRectCount,VkRect2D * pRects)958 wsi_GetPhysicalDevicePresentRectanglesKHR(VkPhysicalDevice physicalDevice,
959 VkSurfaceKHR _surface,
960 uint32_t *pRectCount,
961 VkRect2D *pRects)
962 {
963 VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
964 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
965 struct wsi_device *wsi_device = device->wsi_device;
966 struct wsi_interface *iface = wsi_device->wsi[surface->platform];
967
968 return iface->get_present_rectangles(surface, wsi_device,
969 pRectCount, pRects);
970 }
971
972 VKAPI_ATTR VkResult VKAPI_CALL
wsi_CreateSwapchainKHR(VkDevice _device,const VkSwapchainCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSwapchainKHR * pSwapchain)973 wsi_CreateSwapchainKHR(VkDevice _device,
974 const VkSwapchainCreateInfoKHR *pCreateInfo,
975 const VkAllocationCallbacks *pAllocator,
976 VkSwapchainKHR *pSwapchain)
977 {
978 MESA_TRACE_FUNC();
979 VK_FROM_HANDLE(vk_device, device, _device);
980 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, pCreateInfo->surface);
981 struct wsi_device *wsi_device = device->physical->wsi_device;
982 struct wsi_interface *iface = wsi_device->force_headless_swapchain ?
983 wsi_device->wsi[VK_ICD_WSI_PLATFORM_HEADLESS] :
984 wsi_device->wsi[surface->platform];
985 const VkAllocationCallbacks *alloc;
986 struct wsi_swapchain *swapchain;
987
988 if (pAllocator)
989 alloc = pAllocator;
990 else
991 alloc = &device->alloc;
992
993 VkSwapchainCreateInfoKHR info = *pCreateInfo;
994
995 if (wsi_device->force_swapchain_to_currentExtent) {
996 VkSurfaceCapabilities2KHR caps2 = {
997 .sType = VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR,
998 };
999 iface->get_capabilities2(surface, wsi_device, NULL, &caps2);
1000 info.imageExtent = caps2.surfaceCapabilities.currentExtent;
1001 }
1002
1003 /* Ignore DEFERRED_MEMORY_ALLOCATION_BIT. Would require deep plumbing to be able to take advantage of it.
1004 * bool deferred_allocation = pCreateInfo->flags & VK_SWAPCHAIN_CREATE_DEFERRED_MEMORY_ALLOCATION_BIT_EXT;
1005 */
1006
1007 VkResult result = iface->create_swapchain(surface, _device, wsi_device,
1008 &info, alloc,
1009 &swapchain);
1010 if (result != VK_SUCCESS)
1011 return result;
1012
1013 swapchain->fences = vk_zalloc(alloc,
1014 sizeof (*swapchain->fences) * swapchain->image_count,
1015 sizeof (*swapchain->fences),
1016 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1017 if (!swapchain->fences) {
1018 swapchain->destroy(swapchain, alloc);
1019 return VK_ERROR_OUT_OF_HOST_MEMORY;
1020 }
1021
1022 if (wsi_device->khr_present_wait) {
1023 const VkSemaphoreTypeCreateInfo type_info = {
1024 .sType = VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO,
1025 .semaphoreType = VK_SEMAPHORE_TYPE_TIMELINE,
1026 };
1027
1028 const VkSemaphoreCreateInfo sem_info = {
1029 .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,
1030 .pNext = &type_info,
1031 .flags = 0,
1032 };
1033
1034 /* We assume here that a driver exposing present_wait also exposes VK_KHR_timeline_semaphore. */
1035 result = wsi_device->CreateSemaphore(_device, &sem_info, alloc, &swapchain->present_id_timeline);
1036 if (result != VK_SUCCESS) {
1037 swapchain->destroy(swapchain, alloc);
1038 return VK_ERROR_OUT_OF_HOST_MEMORY;
1039 }
1040 }
1041
1042 if (swapchain->blit.queue != VK_NULL_HANDLE) {
1043 swapchain->blit.semaphores = vk_zalloc(alloc,
1044 sizeof (*swapchain->blit.semaphores) * swapchain->image_count,
1045 sizeof (*swapchain->blit.semaphores),
1046 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1047 if (!swapchain->blit.semaphores) {
1048 wsi_device->DestroySemaphore(_device, swapchain->present_id_timeline, alloc);
1049 swapchain->destroy(swapchain, alloc);
1050 return VK_ERROR_OUT_OF_HOST_MEMORY;
1051 }
1052 }
1053
1054 *pSwapchain = wsi_swapchain_to_handle(swapchain);
1055
1056 return VK_SUCCESS;
1057 }
1058
1059 VKAPI_ATTR void VKAPI_CALL
wsi_DestroySwapchainKHR(VkDevice _device,VkSwapchainKHR _swapchain,const VkAllocationCallbacks * pAllocator)1060 wsi_DestroySwapchainKHR(VkDevice _device,
1061 VkSwapchainKHR _swapchain,
1062 const VkAllocationCallbacks *pAllocator)
1063 {
1064 MESA_TRACE_FUNC();
1065 VK_FROM_HANDLE(vk_device, device, _device);
1066 VK_FROM_HANDLE(wsi_swapchain, swapchain, _swapchain);
1067 const VkAllocationCallbacks *alloc;
1068
1069 if (!swapchain)
1070 return;
1071
1072 if (pAllocator)
1073 alloc = pAllocator;
1074 else
1075 alloc = &device->alloc;
1076
1077 swapchain->destroy(swapchain, alloc);
1078 }
1079
1080 VKAPI_ATTR VkResult VKAPI_CALL
wsi_ReleaseSwapchainImagesEXT(VkDevice _device,const VkReleaseSwapchainImagesInfoEXT * pReleaseInfo)1081 wsi_ReleaseSwapchainImagesEXT(VkDevice _device,
1082 const VkReleaseSwapchainImagesInfoEXT *pReleaseInfo)
1083 {
1084 VK_FROM_HANDLE(wsi_swapchain, swapchain, pReleaseInfo->swapchain);
1085
1086 for (uint32_t i = 0; i < pReleaseInfo->imageIndexCount; i++) {
1087 uint32_t index = pReleaseInfo->pImageIndices[i];
1088 assert(index < swapchain->image_count);
1089 struct wsi_image *image = swapchain->get_wsi_image(swapchain, index);
1090 assert(image->acquired);
1091 image->acquired = false;
1092 }
1093
1094 VkResult result = swapchain->release_images(swapchain,
1095 pReleaseInfo->imageIndexCount,
1096 pReleaseInfo->pImageIndices);
1097
1098 if (result != VK_SUCCESS)
1099 return result;
1100
1101 if (swapchain->wsi->set_memory_ownership) {
1102 for (uint32_t i = 0; i < pReleaseInfo->imageIndexCount; i++) {
1103 uint32_t image_index = pReleaseInfo->pImageIndices[i];
1104 VkDeviceMemory mem = swapchain->get_wsi_image(swapchain, image_index)->memory;
1105 swapchain->wsi->set_memory_ownership(swapchain->device, mem, false);
1106 }
1107 }
1108
1109 return VK_SUCCESS;
1110 }
1111
1112 VkResult
wsi_common_get_images(VkSwapchainKHR _swapchain,uint32_t * pSwapchainImageCount,VkImage * pSwapchainImages)1113 wsi_common_get_images(VkSwapchainKHR _swapchain,
1114 uint32_t *pSwapchainImageCount,
1115 VkImage *pSwapchainImages)
1116 {
1117 VK_FROM_HANDLE(wsi_swapchain, swapchain, _swapchain);
1118 VK_OUTARRAY_MAKE_TYPED(VkImage, images, pSwapchainImages, pSwapchainImageCount);
1119
1120 for (uint32_t i = 0; i < swapchain->image_count; i++) {
1121 vk_outarray_append_typed(VkImage, &images, image) {
1122 *image = swapchain->get_wsi_image(swapchain, i)->image;
1123 }
1124 }
1125
1126 return vk_outarray_status(&images);
1127 }
1128
1129 VkImage
wsi_common_get_image(VkSwapchainKHR _swapchain,uint32_t index)1130 wsi_common_get_image(VkSwapchainKHR _swapchain, uint32_t index)
1131 {
1132 VK_FROM_HANDLE(wsi_swapchain, swapchain, _swapchain);
1133 assert(index < swapchain->image_count);
1134 return swapchain->get_wsi_image(swapchain, index)->image;
1135 }
1136
1137 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetSwapchainImagesKHR(VkDevice device,VkSwapchainKHR swapchain,uint32_t * pSwapchainImageCount,VkImage * pSwapchainImages)1138 wsi_GetSwapchainImagesKHR(VkDevice device,
1139 VkSwapchainKHR swapchain,
1140 uint32_t *pSwapchainImageCount,
1141 VkImage *pSwapchainImages)
1142 {
1143 MESA_TRACE_FUNC();
1144 return wsi_common_get_images(swapchain,
1145 pSwapchainImageCount,
1146 pSwapchainImages);
1147 }
1148
1149 VKAPI_ATTR VkResult VKAPI_CALL
wsi_AcquireNextImageKHR(VkDevice _device,VkSwapchainKHR swapchain,uint64_t timeout,VkSemaphore semaphore,VkFence fence,uint32_t * pImageIndex)1150 wsi_AcquireNextImageKHR(VkDevice _device,
1151 VkSwapchainKHR swapchain,
1152 uint64_t timeout,
1153 VkSemaphore semaphore,
1154 VkFence fence,
1155 uint32_t *pImageIndex)
1156 {
1157 MESA_TRACE_FUNC();
1158 VK_FROM_HANDLE(vk_device, device, _device);
1159
1160 const VkAcquireNextImageInfoKHR acquire_info = {
1161 .sType = VK_STRUCTURE_TYPE_ACQUIRE_NEXT_IMAGE_INFO_KHR,
1162 .swapchain = swapchain,
1163 .timeout = timeout,
1164 .semaphore = semaphore,
1165 .fence = fence,
1166 .deviceMask = 0,
1167 };
1168
1169 return device->dispatch_table.AcquireNextImage2KHR(_device, &acquire_info,
1170 pImageIndex);
1171 }
1172
1173 static VkResult
wsi_signal_semaphore_for_image(struct vk_device * device,const struct wsi_swapchain * chain,const struct wsi_image * image,VkSemaphore _semaphore)1174 wsi_signal_semaphore_for_image(struct vk_device *device,
1175 const struct wsi_swapchain *chain,
1176 const struct wsi_image *image,
1177 VkSemaphore _semaphore)
1178 {
1179 if (device->physical->supported_sync_types == NULL)
1180 return VK_SUCCESS;
1181
1182 VK_FROM_HANDLE(vk_semaphore, semaphore, _semaphore);
1183
1184 vk_semaphore_reset_temporary(device, semaphore);
1185
1186 #ifdef HAVE_LIBDRM
1187 VkResult result = chain->image_info.explicit_sync ?
1188 wsi_create_sync_for_image_syncobj(chain, image,
1189 VK_SYNC_FEATURE_GPU_WAIT,
1190 &semaphore->temporary) :
1191 wsi_create_sync_for_dma_buf_wait(chain, image,
1192 VK_SYNC_FEATURE_GPU_WAIT,
1193 &semaphore->temporary);
1194 if (result != VK_ERROR_FEATURE_NOT_PRESENT)
1195 return result;
1196 #endif
1197
1198 if (chain->wsi->signal_semaphore_with_memory) {
1199 return device->create_sync_for_memory(device, image->memory,
1200 false /* signal_memory */,
1201 &semaphore->temporary);
1202 } else {
1203 return vk_sync_create(device, &vk_sync_dummy_type,
1204 0 /* flags */, 0 /* initial_value */,
1205 &semaphore->temporary);
1206 }
1207 }
1208
1209 static VkResult
wsi_signal_fence_for_image(struct vk_device * device,const struct wsi_swapchain * chain,const struct wsi_image * image,VkFence _fence)1210 wsi_signal_fence_for_image(struct vk_device *device,
1211 const struct wsi_swapchain *chain,
1212 const struct wsi_image *image,
1213 VkFence _fence)
1214 {
1215 if (device->physical->supported_sync_types == NULL)
1216 return VK_SUCCESS;
1217
1218 VK_FROM_HANDLE(vk_fence, fence, _fence);
1219
1220 vk_fence_reset_temporary(device, fence);
1221
1222 #ifdef HAVE_LIBDRM
1223 VkResult result = chain->image_info.explicit_sync ?
1224 wsi_create_sync_for_image_syncobj(chain, image,
1225 VK_SYNC_FEATURE_CPU_WAIT,
1226 &fence->temporary) :
1227 wsi_create_sync_for_dma_buf_wait(chain, image,
1228 VK_SYNC_FEATURE_CPU_WAIT,
1229 &fence->temporary);
1230 if (result != VK_ERROR_FEATURE_NOT_PRESENT)
1231 return result;
1232 #endif
1233
1234 if (chain->wsi->signal_fence_with_memory) {
1235 return device->create_sync_for_memory(device, image->memory,
1236 false /* signal_memory */,
1237 &fence->temporary);
1238 } else {
1239 return vk_sync_create(device, &vk_sync_dummy_type,
1240 0 /* flags */, 0 /* initial_value */,
1241 &fence->temporary);
1242 }
1243 }
1244
1245 VkResult
wsi_common_acquire_next_image2(const struct wsi_device * wsi,VkDevice _device,const VkAcquireNextImageInfoKHR * pAcquireInfo,uint32_t * pImageIndex)1246 wsi_common_acquire_next_image2(const struct wsi_device *wsi,
1247 VkDevice _device,
1248 const VkAcquireNextImageInfoKHR *pAcquireInfo,
1249 uint32_t *pImageIndex)
1250 {
1251 VK_FROM_HANDLE(wsi_swapchain, swapchain, pAcquireInfo->swapchain);
1252 VK_FROM_HANDLE(vk_device, device, _device);
1253
1254 VkResult result = swapchain->acquire_next_image(swapchain, pAcquireInfo,
1255 pImageIndex);
1256 if (result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR)
1257 return result;
1258 struct wsi_image *image =
1259 swapchain->get_wsi_image(swapchain, *pImageIndex);
1260
1261 image->acquired = true;
1262
1263 if (pAcquireInfo->semaphore != VK_NULL_HANDLE) {
1264 VkResult signal_result =
1265 wsi_signal_semaphore_for_image(device, swapchain, image,
1266 pAcquireInfo->semaphore);
1267 if (signal_result != VK_SUCCESS)
1268 return signal_result;
1269 }
1270
1271 if (pAcquireInfo->fence != VK_NULL_HANDLE) {
1272 VkResult signal_result =
1273 wsi_signal_fence_for_image(device, swapchain, image,
1274 pAcquireInfo->fence);
1275 if (signal_result != VK_SUCCESS)
1276 return signal_result;
1277 }
1278
1279 if (wsi->set_memory_ownership)
1280 wsi->set_memory_ownership(swapchain->device, image->memory, true);
1281
1282 return result;
1283 }
1284
1285 VKAPI_ATTR VkResult VKAPI_CALL
wsi_AcquireNextImage2KHR(VkDevice _device,const VkAcquireNextImageInfoKHR * pAcquireInfo,uint32_t * pImageIndex)1286 wsi_AcquireNextImage2KHR(VkDevice _device,
1287 const VkAcquireNextImageInfoKHR *pAcquireInfo,
1288 uint32_t *pImageIndex)
1289 {
1290 MESA_TRACE_FUNC();
1291 VK_FROM_HANDLE(vk_device, device, _device);
1292
1293 return wsi_common_acquire_next_image2(device->physical->wsi_device,
1294 _device, pAcquireInfo, pImageIndex);
1295 }
1296
wsi_signal_present_id_timeline(struct wsi_swapchain * swapchain,VkQueue queue,uint64_t present_id,VkFence present_fence)1297 static VkResult wsi_signal_present_id_timeline(struct wsi_swapchain *swapchain,
1298 VkQueue queue, uint64_t present_id,
1299 VkFence present_fence)
1300 {
1301 assert(swapchain->present_id_timeline || present_fence);
1302
1303 const VkTimelineSemaphoreSubmitInfo timeline_info = {
1304 .sType = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO,
1305 .pSignalSemaphoreValues = &present_id,
1306 .signalSemaphoreValueCount = 1,
1307 };
1308
1309 const VkSubmitInfo submit_info = {
1310 .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
1311 .pNext = &timeline_info,
1312 .signalSemaphoreCount = 1,
1313 .pSignalSemaphores = &swapchain->present_id_timeline,
1314 };
1315
1316 uint32_t submit_count = present_id ? 1 : 0;
1317 return swapchain->wsi->QueueSubmit(queue, submit_count, &submit_info, present_fence);
1318 }
1319
1320 static VkResult
handle_trace(VkQueue queue,struct vk_device * device,uint32_t current_frame)1321 handle_trace(VkQueue queue, struct vk_device *device, uint32_t current_frame)
1322 {
1323 struct vk_instance *instance = device->physical->instance;
1324 if (!instance->trace_mode)
1325 return VK_SUCCESS;
1326
1327 simple_mtx_lock(&device->trace_mtx);
1328
1329 bool frame_trigger = device->current_frame == instance->trace_frame;
1330
1331 bool file_trigger = false;
1332 #ifndef _WIN32
1333 if (instance->trace_trigger_file && access(instance->trace_trigger_file, W_OK) == 0) {
1334 if (unlink(instance->trace_trigger_file) == 0) {
1335 file_trigger = true;
1336 } else {
1337 /* Do not enable tracing if we cannot remove the file,
1338 * because by then we'll trace every frame ... */
1339 fprintf(stderr, "Could not remove trace trigger file, ignoring\n");
1340 }
1341 }
1342 #endif
1343
1344 VkResult result = VK_SUCCESS;
1345 if (frame_trigger || file_trigger || device->trace_hotkey_trigger)
1346 result = device->capture_trace(queue);
1347
1348 device->trace_hotkey_trigger = false;
1349
1350 simple_mtx_unlock(&device->trace_mtx);
1351
1352 return result;
1353 }
1354
1355 VkResult
wsi_common_queue_present(const struct wsi_device * wsi,VkDevice device,VkQueue queue,int queue_family_index,const VkPresentInfoKHR * pPresentInfo)1356 wsi_common_queue_present(const struct wsi_device *wsi,
1357 VkDevice device,
1358 VkQueue queue,
1359 int queue_family_index,
1360 const VkPresentInfoKHR *pPresentInfo)
1361 {
1362 struct vk_device *dev = vk_device_from_handle(device);
1363 uint32_t current_frame = p_atomic_fetch_add(&dev->current_frame, 1);
1364 VkResult final_result = handle_trace(queue, dev, current_frame);
1365
1366 STACK_ARRAY(VkPipelineStageFlags, stage_flags,
1367 MAX2(1, pPresentInfo->waitSemaphoreCount));
1368 for (uint32_t s = 0; s < MAX2(1, pPresentInfo->waitSemaphoreCount); s++)
1369 stage_flags[s] = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
1370
1371 const VkPresentRegionsKHR *regions =
1372 vk_find_struct_const(pPresentInfo->pNext, PRESENT_REGIONS_KHR);
1373 const VkPresentIdKHR *present_ids =
1374 vk_find_struct_const(pPresentInfo->pNext, PRESENT_ID_KHR);
1375 const VkSwapchainPresentFenceInfoEXT *present_fence_info =
1376 vk_find_struct_const(pPresentInfo->pNext, SWAPCHAIN_PRESENT_FENCE_INFO_EXT);
1377 const VkSwapchainPresentModeInfoEXT *present_mode_info =
1378 vk_find_struct_const(pPresentInfo->pNext, SWAPCHAIN_PRESENT_MODE_INFO_EXT);
1379
1380 for (uint32_t i = 0; i < pPresentInfo->swapchainCount; i++) {
1381 VK_FROM_HANDLE(wsi_swapchain, swapchain, pPresentInfo->pSwapchains[i]);
1382 uint32_t image_index = pPresentInfo->pImageIndices[i];
1383 VkResult result;
1384
1385 /* Update the present mode for this present and any subsequent present.
1386 * Only update the present mode when MESA_VK_WSI_PRESENT_MODE is not used.
1387 * We should also turn any VkSwapchainPresentModesCreateInfoEXT into a nop,
1388 * but none of the WSI backends use that currently. */
1389 if (present_mode_info && present_mode_info->pPresentModes &&
1390 swapchain->set_present_mode && wsi->override_present_mode == VK_PRESENT_MODE_MAX_ENUM_KHR) {
1391 swapchain->set_present_mode(swapchain, present_mode_info->pPresentModes[i]);
1392 }
1393
1394 if (swapchain->fences[image_index] == VK_NULL_HANDLE) {
1395 const VkFenceCreateInfo fence_info = {
1396 .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
1397 .pNext = NULL,
1398 .flags = VK_FENCE_CREATE_SIGNALED_BIT,
1399 };
1400 result = wsi->CreateFence(device, &fence_info,
1401 &swapchain->alloc,
1402 &swapchain->fences[image_index]);
1403 if (result != VK_SUCCESS)
1404 goto fail_present;
1405
1406 if (swapchain->blit.type != WSI_SWAPCHAIN_NO_BLIT &&
1407 swapchain->blit.queue != VK_NULL_HANDLE) {
1408 const VkSemaphoreCreateInfo sem_info = {
1409 .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,
1410 .pNext = NULL,
1411 .flags = 0,
1412 };
1413 result = wsi->CreateSemaphore(device, &sem_info,
1414 &swapchain->alloc,
1415 &swapchain->blit.semaphores[image_index]);
1416 if (result != VK_SUCCESS)
1417 goto fail_present;
1418 }
1419 } else {
1420 MESA_TRACE_SCOPE("throttle");
1421 result =
1422 wsi->WaitForFences(device, 1, &swapchain->fences[image_index],
1423 true, ~0ull);
1424 if (result != VK_SUCCESS)
1425 goto fail_present;
1426 }
1427
1428 result = wsi->ResetFences(device, 1, &swapchain->fences[image_index]);
1429 if (result != VK_SUCCESS)
1430 goto fail_present;
1431
1432 VkTimelineSemaphoreSubmitInfo timeline_submit_info = {
1433 .sType = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO,
1434 };
1435
1436 VkSubmitInfo submit_info = {
1437 .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
1438 };
1439
1440 if (i == 0) {
1441 /* We only need/want to wait on semaphores once. After that, we're
1442 * guaranteed ordering since it all happens on the same queue.
1443 */
1444 submit_info.waitSemaphoreCount = pPresentInfo->waitSemaphoreCount;
1445 submit_info.pWaitSemaphores = pPresentInfo->pWaitSemaphores;
1446 submit_info.pWaitDstStageMask = stage_flags;
1447 }
1448
1449 struct wsi_image *image =
1450 swapchain->get_wsi_image(swapchain, image_index);
1451
1452 VkQueue submit_queue = queue;
1453 if (swapchain->blit.type != WSI_SWAPCHAIN_NO_BLIT) {
1454 if (swapchain->blit.queue == VK_NULL_HANDLE) {
1455 submit_info.commandBufferCount = 1;
1456 submit_info.pCommandBuffers =
1457 &image->blit.cmd_buffers[queue_family_index];
1458 } else {
1459 /* If we are using a blit using the driver's private queue, then
1460 * do an empty submit signalling a semaphore, and then submit the
1461 * blit waiting on that. This ensures proper queue ordering of
1462 * vkQueueSubmit() calls.
1463 */
1464 submit_info.signalSemaphoreCount = 1;
1465 submit_info.pSignalSemaphores =
1466 &swapchain->blit.semaphores[image_index];
1467
1468 result = wsi->QueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE);
1469 if (result != VK_SUCCESS)
1470 goto fail_present;
1471
1472 /* Now prepare the blit submit. It needs to then wait on the
1473 * semaphore we signaled above.
1474 */
1475 submit_queue = swapchain->blit.queue;
1476 submit_info.waitSemaphoreCount = 1;
1477 submit_info.pWaitSemaphores = submit_info.pSignalSemaphores;
1478 submit_info.signalSemaphoreCount = 0;
1479 submit_info.pSignalSemaphores = NULL;
1480 submit_info.commandBufferCount = 1;
1481 submit_info.pCommandBuffers = &image->blit.cmd_buffers[0];
1482 submit_info.pWaitDstStageMask = stage_flags;
1483 }
1484 }
1485
1486 VkFence fence = swapchain->fences[image_index];
1487
1488 struct wsi_memory_signal_submit_info mem_signal;
1489 bool has_signal_dma_buf = false;
1490 bool explicit_sync = swapchain->image_info.explicit_sync;
1491 if (explicit_sync) {
1492 /* We will signal this acquire value ourselves when GPU work is done. */
1493 image->explicit_sync[WSI_ES_ACQUIRE].timeline++;
1494 /* The compositor will signal this value when it is done with the image. */
1495 image->explicit_sync[WSI_ES_RELEASE].timeline++;
1496
1497 timeline_submit_info.signalSemaphoreValueCount = 1;
1498 timeline_submit_info.pSignalSemaphoreValues = &image->explicit_sync[WSI_ES_ACQUIRE].timeline;
1499
1500 assert(submit_info.signalSemaphoreCount == 0);
1501 submit_info.signalSemaphoreCount = 1;
1502 submit_info.pSignalSemaphores = &image->explicit_sync[WSI_ES_ACQUIRE].semaphore;
1503 __vk_append_struct(&submit_info, &timeline_submit_info);
1504 } else {
1505 #ifdef HAVE_LIBDRM
1506 result = wsi_prepare_signal_dma_buf_from_semaphore(swapchain, image);
1507 if (result == VK_SUCCESS) {
1508 assert(submit_info.signalSemaphoreCount == 0);
1509 submit_info.signalSemaphoreCount = 1;
1510 submit_info.pSignalSemaphores = &swapchain->dma_buf_semaphore;
1511 has_signal_dma_buf = true;
1512 } else if (result == VK_ERROR_FEATURE_NOT_PRESENT) {
1513 result = VK_SUCCESS;
1514 has_signal_dma_buf = false;
1515 } else {
1516 goto fail_present;
1517 }
1518 #endif
1519
1520 if (!has_signal_dma_buf) {
1521 /* If we don't have dma-buf signaling, signal the memory object by
1522 * chaining wsi_memory_signal_submit_info into VkSubmitInfo.
1523 */
1524 result = VK_SUCCESS;
1525 has_signal_dma_buf = false;
1526 mem_signal = (struct wsi_memory_signal_submit_info) {
1527 .sType = VK_STRUCTURE_TYPE_WSI_MEMORY_SIGNAL_SUBMIT_INFO_MESA,
1528 .memory = image->memory,
1529 };
1530 __vk_append_struct(&submit_info, &mem_signal);
1531 }
1532 }
1533
1534 result = wsi->QueueSubmit(submit_queue, 1, &submit_info, fence);
1535 if (result != VK_SUCCESS)
1536 goto fail_present;
1537
1538 /* The app can only submit images they have acquired. */
1539 assert(image->acquired);
1540 image->acquired = false;
1541 image->present_serial = ++swapchain->present_serial;
1542
1543 if (!explicit_sync) {
1544 #ifdef HAVE_LIBDRM
1545 if (has_signal_dma_buf) {
1546 result = wsi_signal_dma_buf_from_semaphore(swapchain, image);
1547 if (result != VK_SUCCESS)
1548 goto fail_present;
1549 }
1550 #else
1551 assert(!has_signal_dma_buf);
1552 #endif
1553 }
1554
1555 if (wsi->sw)
1556 wsi->WaitForFences(device, 1, &swapchain->fences[image_index],
1557 true, ~0ull);
1558
1559 const VkPresentRegionKHR *region = NULL;
1560 if (regions && regions->pRegions)
1561 region = ®ions->pRegions[i];
1562
1563 uint64_t present_id = 0;
1564 if (present_ids && present_ids->pPresentIds)
1565 present_id = present_ids->pPresentIds[i];
1566 VkFence present_fence = VK_NULL_HANDLE;
1567 if (present_fence_info && present_fence_info->pFences)
1568 present_fence = present_fence_info->pFences[i];
1569
1570 if (present_id || present_fence) {
1571 result = wsi_signal_present_id_timeline(swapchain, queue, present_id, present_fence);
1572 if (result != VK_SUCCESS)
1573 goto fail_present;
1574 }
1575
1576 result = swapchain->queue_present(swapchain, image_index, present_id, region);
1577 if (result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR)
1578 goto fail_present;
1579
1580 if (wsi->set_memory_ownership) {
1581 VkDeviceMemory mem = swapchain->get_wsi_image(swapchain, image_index)->memory;
1582 wsi->set_memory_ownership(swapchain->device, mem, false);
1583 }
1584
1585 fail_present:
1586 if (pPresentInfo->pResults != NULL)
1587 pPresentInfo->pResults[i] = result;
1588
1589 /* Let the final result be our first unsuccessful result */
1590 if (final_result == VK_SUCCESS)
1591 final_result = result;
1592 }
1593
1594 STACK_ARRAY_FINISH(stage_flags);
1595
1596 return final_result;
1597 }
1598
1599 VKAPI_ATTR VkResult VKAPI_CALL
wsi_QueuePresentKHR(VkQueue _queue,const VkPresentInfoKHR * pPresentInfo)1600 wsi_QueuePresentKHR(VkQueue _queue, const VkPresentInfoKHR *pPresentInfo)
1601 {
1602 MESA_TRACE_FUNC();
1603 VK_FROM_HANDLE(vk_queue, queue, _queue);
1604
1605 return wsi_common_queue_present(queue->base.device->physical->wsi_device,
1606 vk_device_to_handle(queue->base.device),
1607 _queue,
1608 queue->queue_family_index,
1609 pPresentInfo);
1610 }
1611
1612 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetDeviceGroupPresentCapabilitiesKHR(VkDevice device,VkDeviceGroupPresentCapabilitiesKHR * pCapabilities)1613 wsi_GetDeviceGroupPresentCapabilitiesKHR(VkDevice device,
1614 VkDeviceGroupPresentCapabilitiesKHR *pCapabilities)
1615 {
1616 memset(pCapabilities->presentMask, 0,
1617 sizeof(pCapabilities->presentMask));
1618 pCapabilities->presentMask[0] = 0x1;
1619 pCapabilities->modes = VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_BIT_KHR;
1620
1621 return VK_SUCCESS;
1622 }
1623
1624 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetDeviceGroupSurfacePresentModesKHR(VkDevice device,VkSurfaceKHR surface,VkDeviceGroupPresentModeFlagsKHR * pModes)1625 wsi_GetDeviceGroupSurfacePresentModesKHR(VkDevice device,
1626 VkSurfaceKHR surface,
1627 VkDeviceGroupPresentModeFlagsKHR *pModes)
1628 {
1629 *pModes = VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_BIT_KHR;
1630
1631 return VK_SUCCESS;
1632 }
1633
1634 bool
wsi_common_vk_instance_supports_present_wait(const struct vk_instance * instance)1635 wsi_common_vk_instance_supports_present_wait(const struct vk_instance *instance)
1636 {
1637 /* We can only expose KHR_present_wait and KHR_present_id
1638 * if we are guaranteed support on all potential VkSurfaceKHR objects. */
1639 if (instance->enabled_extensions.KHR_win32_surface ||
1640 instance->enabled_extensions.KHR_android_surface) {
1641 return false;
1642 }
1643
1644 return true;
1645 }
1646
1647 VkResult
wsi_common_create_swapchain_image(const struct wsi_device * wsi,const VkImageCreateInfo * pCreateInfo,VkSwapchainKHR _swapchain,VkImage * pImage)1648 wsi_common_create_swapchain_image(const struct wsi_device *wsi,
1649 const VkImageCreateInfo *pCreateInfo,
1650 VkSwapchainKHR _swapchain,
1651 VkImage *pImage)
1652 {
1653 VK_FROM_HANDLE(wsi_swapchain, chain, _swapchain);
1654
1655 #ifndef NDEBUG
1656 const VkImageCreateInfo *swcInfo = &chain->image_info.create;
1657 assert(pCreateInfo->flags == 0);
1658 assert(pCreateInfo->imageType == swcInfo->imageType);
1659 assert(pCreateInfo->format == swcInfo->format);
1660 assert(pCreateInfo->extent.width == swcInfo->extent.width);
1661 assert(pCreateInfo->extent.height == swcInfo->extent.height);
1662 assert(pCreateInfo->extent.depth == swcInfo->extent.depth);
1663 assert(pCreateInfo->mipLevels == swcInfo->mipLevels);
1664 assert(pCreateInfo->arrayLayers == swcInfo->arrayLayers);
1665 assert(pCreateInfo->samples == swcInfo->samples);
1666 assert(pCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL);
1667 assert(!(pCreateInfo->usage & ~swcInfo->usage));
1668
1669 vk_foreach_struct_const(ext, pCreateInfo->pNext) {
1670 switch (ext->sType) {
1671 case VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO: {
1672 const VkImageFormatListCreateInfo *iflci =
1673 (const VkImageFormatListCreateInfo *)ext;
1674 const VkImageFormatListCreateInfo *swc_iflci =
1675 &chain->image_info.format_list;
1676
1677 for (uint32_t i = 0; i < iflci->viewFormatCount; i++) {
1678 bool found = false;
1679 for (uint32_t j = 0; j < swc_iflci->viewFormatCount; j++) {
1680 if (iflci->pViewFormats[i] == swc_iflci->pViewFormats[j]) {
1681 found = true;
1682 break;
1683 }
1684 }
1685 assert(found);
1686 }
1687 break;
1688 }
1689
1690 case VK_STRUCTURE_TYPE_IMAGE_SWAPCHAIN_CREATE_INFO_KHR:
1691 break;
1692
1693 default:
1694 assert(!"Unsupported image create extension");
1695 }
1696 }
1697 #endif
1698
1699 return wsi->CreateImage(chain->device, &chain->image_info.create,
1700 &chain->alloc, pImage);
1701 }
1702
1703 VkResult
wsi_common_bind_swapchain_image(const struct wsi_device * wsi,VkImage vk_image,VkSwapchainKHR _swapchain,uint32_t image_idx)1704 wsi_common_bind_swapchain_image(const struct wsi_device *wsi,
1705 VkImage vk_image,
1706 VkSwapchainKHR _swapchain,
1707 uint32_t image_idx)
1708 {
1709 VK_FROM_HANDLE(wsi_swapchain, chain, _swapchain);
1710 struct wsi_image *image = chain->get_wsi_image(chain, image_idx);
1711
1712 return wsi->BindImageMemory(chain->device, vk_image, image->memory, 0);
1713 }
1714
1715 VkResult
wsi_swapchain_wait_for_present_semaphore(const struct wsi_swapchain * chain,uint64_t present_id,uint64_t timeout)1716 wsi_swapchain_wait_for_present_semaphore(const struct wsi_swapchain *chain,
1717 uint64_t present_id, uint64_t timeout)
1718 {
1719 assert(chain->present_id_timeline);
1720 const VkSemaphoreWaitInfo wait_info = {
1721 .sType = VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO,
1722 .semaphoreCount = 1,
1723 .pSemaphores = &chain->present_id_timeline,
1724 .pValues = &present_id,
1725 };
1726
1727 return chain->wsi->WaitSemaphores(chain->device, &wait_info, timeout);
1728 }
1729
1730 uint32_t
wsi_select_memory_type(const struct wsi_device * wsi,VkMemoryPropertyFlags req_props,VkMemoryPropertyFlags deny_props,uint32_t type_bits)1731 wsi_select_memory_type(const struct wsi_device *wsi,
1732 VkMemoryPropertyFlags req_props,
1733 VkMemoryPropertyFlags deny_props,
1734 uint32_t type_bits)
1735 {
1736 assert(type_bits != 0);
1737
1738 VkMemoryPropertyFlags common_props = ~0;
1739 u_foreach_bit(t, type_bits) {
1740 const VkMemoryType type = wsi->memory_props.memoryTypes[t];
1741
1742 common_props &= type.propertyFlags;
1743
1744 if (deny_props & type.propertyFlags)
1745 continue;
1746
1747 if (!(req_props & ~type.propertyFlags))
1748 return t;
1749 }
1750
1751 if ((deny_props & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) &&
1752 (common_props & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)) {
1753 /* If they asked for non-device-local and all the types are device-local
1754 * (this is commonly true for UMA platforms), try again without denying
1755 * device-local types
1756 */
1757 deny_props &= ~VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
1758 return wsi_select_memory_type(wsi, req_props, deny_props, type_bits);
1759 }
1760
1761 unreachable("No memory type found");
1762 }
1763
1764 uint32_t
wsi_select_device_memory_type(const struct wsi_device * wsi,uint32_t type_bits)1765 wsi_select_device_memory_type(const struct wsi_device *wsi,
1766 uint32_t type_bits)
1767 {
1768 return wsi_select_memory_type(wsi, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
1769 0 /* deny_props */, type_bits);
1770 }
1771
1772 static uint32_t
wsi_select_host_memory_type(const struct wsi_device * wsi,uint32_t type_bits)1773 wsi_select_host_memory_type(const struct wsi_device *wsi,
1774 uint32_t type_bits)
1775 {
1776 return wsi_select_memory_type(wsi, VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
1777 0 /* deny_props */, type_bits);
1778 }
1779
1780 VkResult
wsi_create_buffer_blit_context(const struct wsi_swapchain * chain,const struct wsi_image_info * info,struct wsi_image * image,VkExternalMemoryHandleTypeFlags handle_types)1781 wsi_create_buffer_blit_context(const struct wsi_swapchain *chain,
1782 const struct wsi_image_info *info,
1783 struct wsi_image *image,
1784 VkExternalMemoryHandleTypeFlags handle_types)
1785 {
1786 assert(chain->blit.type == WSI_SWAPCHAIN_BUFFER_BLIT);
1787
1788 const struct wsi_device *wsi = chain->wsi;
1789 VkResult result;
1790
1791 const VkExternalMemoryBufferCreateInfo buffer_external_info = {
1792 .sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO,
1793 .pNext = NULL,
1794 .handleTypes = handle_types,
1795 };
1796 const VkBufferCreateInfo buffer_info = {
1797 .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
1798 .pNext = &buffer_external_info,
1799 .size = info->linear_size,
1800 .usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT,
1801 .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
1802 };
1803 result = wsi->CreateBuffer(chain->device, &buffer_info,
1804 &chain->alloc, &image->blit.buffer);
1805 if (result != VK_SUCCESS)
1806 return result;
1807
1808 VkMemoryRequirements reqs;
1809 wsi->GetBufferMemoryRequirements(chain->device, image->blit.buffer, &reqs);
1810 assert(reqs.size <= info->linear_size);
1811
1812 struct wsi_memory_allocate_info memory_wsi_info = {
1813 .sType = VK_STRUCTURE_TYPE_WSI_MEMORY_ALLOCATE_INFO_MESA,
1814 .pNext = NULL,
1815 .implicit_sync = info->image_type == WSI_IMAGE_TYPE_DRM &&
1816 !info->explicit_sync,
1817 };
1818 VkMemoryDedicatedAllocateInfo buf_mem_dedicated_info = {
1819 .sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO,
1820 .pNext = &memory_wsi_info,
1821 .image = VK_NULL_HANDLE,
1822 .buffer = image->blit.buffer,
1823 };
1824 VkMemoryAllocateInfo buf_mem_info = {
1825 .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
1826 .pNext = &buf_mem_dedicated_info,
1827 .allocationSize = info->linear_size,
1828 .memoryTypeIndex =
1829 info->select_blit_dst_memory_type(wsi, reqs.memoryTypeBits),
1830 };
1831
1832 void *sw_host_ptr = NULL;
1833 if (info->alloc_shm)
1834 sw_host_ptr = info->alloc_shm(image, info->linear_size);
1835
1836 VkExportMemoryAllocateInfo memory_export_info;
1837 VkImportMemoryHostPointerInfoEXT host_ptr_info;
1838 if (sw_host_ptr != NULL) {
1839 host_ptr_info = (VkImportMemoryHostPointerInfoEXT) {
1840 .sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT,
1841 .pHostPointer = sw_host_ptr,
1842 .handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT,
1843 };
1844 __vk_append_struct(&buf_mem_info, &host_ptr_info);
1845 } else if (handle_types != 0) {
1846 memory_export_info = (VkExportMemoryAllocateInfo) {
1847 .sType = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO,
1848 .handleTypes = handle_types,
1849 };
1850 __vk_append_struct(&buf_mem_info, &memory_export_info);
1851 }
1852
1853 result = wsi->AllocateMemory(chain->device, &buf_mem_info,
1854 &chain->alloc, &image->blit.memory);
1855 if (result != VK_SUCCESS)
1856 return result;
1857
1858 result = wsi->BindBufferMemory(chain->device, image->blit.buffer,
1859 image->blit.memory, 0);
1860 if (result != VK_SUCCESS)
1861 return result;
1862
1863 wsi->GetImageMemoryRequirements(chain->device, image->image, &reqs);
1864
1865 const VkMemoryDedicatedAllocateInfo memory_dedicated_info = {
1866 .sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO,
1867 .pNext = NULL,
1868 .image = image->image,
1869 .buffer = VK_NULL_HANDLE,
1870 };
1871 const VkMemoryAllocateInfo memory_info = {
1872 .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
1873 .pNext = &memory_dedicated_info,
1874 .allocationSize = reqs.size,
1875 .memoryTypeIndex =
1876 info->select_image_memory_type(wsi, reqs.memoryTypeBits),
1877 };
1878
1879 result = wsi->AllocateMemory(chain->device, &memory_info,
1880 &chain->alloc, &image->memory);
1881 if (result != VK_SUCCESS)
1882 return result;
1883
1884 image->num_planes = 1;
1885 image->sizes[0] = info->linear_size;
1886 image->row_pitches[0] = info->linear_stride;
1887 image->offsets[0] = 0;
1888
1889 return VK_SUCCESS;
1890 }
1891
1892 VkResult
wsi_finish_create_blit_context(const struct wsi_swapchain * chain,const struct wsi_image_info * info,struct wsi_image * image)1893 wsi_finish_create_blit_context(const struct wsi_swapchain *chain,
1894 const struct wsi_image_info *info,
1895 struct wsi_image *image)
1896 {
1897 const struct wsi_device *wsi = chain->wsi;
1898 VkResult result;
1899
1900 int cmd_buffer_count =
1901 chain->blit.queue != VK_NULL_HANDLE ? 1 : wsi->queue_family_count;
1902 image->blit.cmd_buffers =
1903 vk_zalloc(&chain->alloc,
1904 sizeof(VkCommandBuffer) * cmd_buffer_count, 8,
1905 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1906 if (!image->blit.cmd_buffers)
1907 return VK_ERROR_OUT_OF_HOST_MEMORY;
1908
1909 for (uint32_t i = 0; i < cmd_buffer_count; i++) {
1910 if (!chain->cmd_pools[i])
1911 continue;
1912
1913 const VkCommandBufferAllocateInfo cmd_buffer_info = {
1914 .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
1915 .pNext = NULL,
1916 .commandPool = chain->cmd_pools[i],
1917 .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,
1918 .commandBufferCount = 1,
1919 };
1920 result = wsi->AllocateCommandBuffers(chain->device, &cmd_buffer_info,
1921 &image->blit.cmd_buffers[i]);
1922 if (result != VK_SUCCESS)
1923 return result;
1924
1925 const VkCommandBufferBeginInfo begin_info = {
1926 .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
1927 };
1928 wsi->BeginCommandBuffer(image->blit.cmd_buffers[i], &begin_info);
1929
1930 VkImageMemoryBarrier img_mem_barriers[] = {
1931 {
1932 .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
1933 .pNext = NULL,
1934 .srcAccessMask = 0,
1935 .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT,
1936 .oldLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
1937 .newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1938 .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
1939 .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
1940 .image = image->image,
1941 .subresourceRange = {
1942 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
1943 .baseMipLevel = 0,
1944 .levelCount = 1,
1945 .baseArrayLayer = 0,
1946 .layerCount = 1,
1947 },
1948 },
1949 {
1950 .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
1951 .pNext = NULL,
1952 .srcAccessMask = 0,
1953 .dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
1954 .oldLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
1955 .newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1956 .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
1957 .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
1958 .image = image->blit.image,
1959 .subresourceRange = {
1960 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
1961 .baseMipLevel = 0,
1962 .levelCount = 1,
1963 .baseArrayLayer = 0,
1964 .layerCount = 1,
1965 },
1966 },
1967 };
1968 uint32_t img_mem_barrier_count =
1969 chain->blit.type == WSI_SWAPCHAIN_BUFFER_BLIT ? 1 : 2;
1970 wsi->CmdPipelineBarrier(image->blit.cmd_buffers[i],
1971 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
1972 VK_PIPELINE_STAGE_TRANSFER_BIT,
1973 0,
1974 0, NULL,
1975 0, NULL,
1976 1, img_mem_barriers);
1977
1978 if (chain->blit.type == WSI_SWAPCHAIN_BUFFER_BLIT) {
1979 struct VkBufferImageCopy buffer_image_copy = {
1980 .bufferOffset = 0,
1981 .bufferRowLength = info->linear_stride /
1982 vk_format_get_blocksize(info->create.format),
1983 .bufferImageHeight = 0,
1984 .imageSubresource = {
1985 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
1986 .mipLevel = 0,
1987 .baseArrayLayer = 0,
1988 .layerCount = 1,
1989 },
1990 .imageOffset = { .x = 0, .y = 0, .z = 0 },
1991 .imageExtent = info->create.extent,
1992 };
1993 wsi->CmdCopyImageToBuffer(image->blit.cmd_buffers[i],
1994 image->image,
1995 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1996 image->blit.buffer,
1997 1, &buffer_image_copy);
1998 } else {
1999 struct VkImageCopy image_copy = {
2000 .srcSubresource = {
2001 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
2002 .mipLevel = 0,
2003 .baseArrayLayer = 0,
2004 .layerCount = 1,
2005 },
2006 .srcOffset = { .x = 0, .y = 0, .z = 0 },
2007 .dstSubresource = {
2008 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
2009 .mipLevel = 0,
2010 .baseArrayLayer = 0,
2011 .layerCount = 1,
2012 },
2013 .dstOffset = { .x = 0, .y = 0, .z = 0 },
2014 .extent = info->create.extent,
2015 };
2016
2017 wsi->CmdCopyImage(image->blit.cmd_buffers[i],
2018 image->image,
2019 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2020 image->blit.image,
2021 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2022 1, &image_copy);
2023 }
2024
2025 img_mem_barriers[0].srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
2026 img_mem_barriers[0].dstAccessMask = 0;
2027 img_mem_barriers[0].oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
2028 img_mem_barriers[0].newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
2029 img_mem_barriers[1].srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
2030 img_mem_barriers[1].dstAccessMask = 0;
2031 img_mem_barriers[1].oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
2032 img_mem_barriers[1].newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
2033 wsi->CmdPipelineBarrier(image->blit.cmd_buffers[i],
2034 VK_PIPELINE_STAGE_TRANSFER_BIT,
2035 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
2036 0,
2037 0, NULL,
2038 0, NULL,
2039 img_mem_barrier_count, img_mem_barriers);
2040
2041 result = wsi->EndCommandBuffer(image->blit.cmd_buffers[i]);
2042 if (result != VK_SUCCESS)
2043 return result;
2044 }
2045
2046 return VK_SUCCESS;
2047 }
2048
2049 void
wsi_configure_buffer_image(UNUSED const struct wsi_swapchain * chain,const VkSwapchainCreateInfoKHR * pCreateInfo,uint32_t stride_align,uint32_t size_align,struct wsi_image_info * info)2050 wsi_configure_buffer_image(UNUSED const struct wsi_swapchain *chain,
2051 const VkSwapchainCreateInfoKHR *pCreateInfo,
2052 uint32_t stride_align, uint32_t size_align,
2053 struct wsi_image_info *info)
2054 {
2055 const struct wsi_device *wsi = chain->wsi;
2056
2057 assert(util_is_power_of_two_nonzero(stride_align));
2058 assert(util_is_power_of_two_nonzero(size_align));
2059
2060 info->create.usage |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
2061 info->wsi.blit_src = true;
2062
2063 const uint32_t cpp = vk_format_get_blocksize(pCreateInfo->imageFormat);
2064 info->linear_stride = pCreateInfo->imageExtent.width * cpp;
2065 info->linear_stride = align(info->linear_stride, stride_align);
2066
2067 /* Since we can pick the stride to be whatever we want, also align to the
2068 * device's optimalBufferCopyRowPitchAlignment so we get efficient copies.
2069 */
2070 assert(wsi->optimalBufferCopyRowPitchAlignment > 0);
2071 info->linear_stride = align(info->linear_stride,
2072 wsi->optimalBufferCopyRowPitchAlignment);
2073
2074 info->linear_size = (uint64_t)info->linear_stride *
2075 pCreateInfo->imageExtent.height;
2076 info->linear_size = align64(info->linear_size, size_align);
2077
2078 info->finish_create = wsi_finish_create_blit_context;
2079 }
2080
2081 void
wsi_configure_image_blit_image(UNUSED const struct wsi_swapchain * chain,struct wsi_image_info * info)2082 wsi_configure_image_blit_image(UNUSED const struct wsi_swapchain *chain,
2083 struct wsi_image_info *info)
2084 {
2085 info->create.usage |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
2086 info->wsi.blit_src = true;
2087 info->finish_create = wsi_finish_create_blit_context;
2088 }
2089
2090 static VkResult
wsi_create_cpu_linear_image_mem(const struct wsi_swapchain * chain,const struct wsi_image_info * info,struct wsi_image * image)2091 wsi_create_cpu_linear_image_mem(const struct wsi_swapchain *chain,
2092 const struct wsi_image_info *info,
2093 struct wsi_image *image)
2094 {
2095 const struct wsi_device *wsi = chain->wsi;
2096 VkResult result;
2097
2098 VkMemoryRequirements reqs;
2099 wsi->GetImageMemoryRequirements(chain->device, image->image, &reqs);
2100
2101 VkSubresourceLayout layout;
2102 wsi->GetImageSubresourceLayout(chain->device, image->image,
2103 &(VkImageSubresource) {
2104 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
2105 .mipLevel = 0,
2106 .arrayLayer = 0,
2107 }, &layout);
2108 assert(layout.offset == 0);
2109
2110 const VkMemoryDedicatedAllocateInfo memory_dedicated_info = {
2111 .sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO,
2112 .image = image->image,
2113 .buffer = VK_NULL_HANDLE,
2114 };
2115 VkMemoryAllocateInfo memory_info = {
2116 .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
2117 .pNext = &memory_dedicated_info,
2118 .allocationSize = reqs.size,
2119 .memoryTypeIndex =
2120 wsi_select_host_memory_type(wsi, reqs.memoryTypeBits),
2121 };
2122
2123 void *sw_host_ptr = NULL;
2124 if (info->alloc_shm)
2125 sw_host_ptr = info->alloc_shm(image, layout.size);
2126
2127 VkImportMemoryHostPointerInfoEXT host_ptr_info;
2128 if (sw_host_ptr != NULL) {
2129 host_ptr_info = (VkImportMemoryHostPointerInfoEXT) {
2130 .sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT,
2131 .pHostPointer = sw_host_ptr,
2132 .handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT,
2133 };
2134 __vk_append_struct(&memory_info, &host_ptr_info);
2135 }
2136
2137 result = wsi->AllocateMemory(chain->device, &memory_info,
2138 &chain->alloc, &image->memory);
2139 if (result != VK_SUCCESS)
2140 return result;
2141
2142 result = wsi->MapMemory(chain->device, image->memory,
2143 0, VK_WHOLE_SIZE, 0, &image->cpu_map);
2144 if (result != VK_SUCCESS)
2145 return result;
2146
2147 image->num_planes = 1;
2148 image->sizes[0] = reqs.size;
2149 image->row_pitches[0] = layout.rowPitch;
2150 image->offsets[0] = 0;
2151
2152 return VK_SUCCESS;
2153 }
2154
2155 static VkResult
wsi_create_cpu_buffer_image_mem(const struct wsi_swapchain * chain,const struct wsi_image_info * info,struct wsi_image * image)2156 wsi_create_cpu_buffer_image_mem(const struct wsi_swapchain *chain,
2157 const struct wsi_image_info *info,
2158 struct wsi_image *image)
2159 {
2160 VkResult result;
2161
2162 result = wsi_create_buffer_blit_context(chain, info, image, 0);
2163 if (result != VK_SUCCESS)
2164 return result;
2165
2166 result = chain->wsi->MapMemory(chain->device, image->blit.memory,
2167 0, VK_WHOLE_SIZE, 0, &image->cpu_map);
2168 if (result != VK_SUCCESS)
2169 return result;
2170
2171 return VK_SUCCESS;
2172 }
2173
2174 bool
wsi_cpu_image_needs_buffer_blit(const struct wsi_device * wsi,const struct wsi_cpu_image_params * params)2175 wsi_cpu_image_needs_buffer_blit(const struct wsi_device *wsi,
2176 const struct wsi_cpu_image_params *params)
2177 {
2178 if (WSI_DEBUG & WSI_DEBUG_BUFFER)
2179 return true;
2180
2181 if (wsi->wants_linear)
2182 return false;
2183
2184 return true;
2185 }
2186
2187 VkResult
wsi_configure_cpu_image(const struct wsi_swapchain * chain,const VkSwapchainCreateInfoKHR * pCreateInfo,const struct wsi_cpu_image_params * params,struct wsi_image_info * info)2188 wsi_configure_cpu_image(const struct wsi_swapchain *chain,
2189 const VkSwapchainCreateInfoKHR *pCreateInfo,
2190 const struct wsi_cpu_image_params *params,
2191 struct wsi_image_info *info)
2192 {
2193 assert(params->base.image_type == WSI_IMAGE_TYPE_CPU);
2194 assert(chain->blit.type == WSI_SWAPCHAIN_NO_BLIT ||
2195 chain->blit.type == WSI_SWAPCHAIN_BUFFER_BLIT);
2196
2197 VkExternalMemoryHandleTypeFlags handle_types = 0;
2198 if (params->alloc_shm && chain->blit.type != WSI_SWAPCHAIN_NO_BLIT)
2199 handle_types = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT;
2200
2201 VkResult result = wsi_configure_image(chain, pCreateInfo,
2202 handle_types, info);
2203 if (result != VK_SUCCESS)
2204 return result;
2205
2206 if (chain->blit.type != WSI_SWAPCHAIN_NO_BLIT) {
2207 wsi_configure_buffer_image(chain, pCreateInfo,
2208 1 /* stride_align */,
2209 1 /* size_align */,
2210 info);
2211
2212 info->select_blit_dst_memory_type = wsi_select_host_memory_type;
2213 info->select_image_memory_type = wsi_select_device_memory_type;
2214 info->create_mem = wsi_create_cpu_buffer_image_mem;
2215 } else {
2216 /* Force the image to be linear */
2217 info->create.tiling = VK_IMAGE_TILING_LINEAR;
2218
2219 info->create_mem = wsi_create_cpu_linear_image_mem;
2220 }
2221
2222 info->alloc_shm = params->alloc_shm;
2223
2224 return VK_SUCCESS;
2225 }
2226
2227 VKAPI_ATTR VkResult VKAPI_CALL
wsi_WaitForPresentKHR(VkDevice device,VkSwapchainKHR _swapchain,uint64_t presentId,uint64_t timeout)2228 wsi_WaitForPresentKHR(VkDevice device, VkSwapchainKHR _swapchain,
2229 uint64_t presentId, uint64_t timeout)
2230 {
2231 VK_FROM_HANDLE(wsi_swapchain, swapchain, _swapchain);
2232 assert(swapchain->wait_for_present);
2233 return swapchain->wait_for_present(swapchain, presentId, timeout);
2234 }
2235
2236 VkImageUsageFlags
wsi_caps_get_image_usage(void)2237 wsi_caps_get_image_usage(void)
2238 {
2239 return VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
2240 VK_IMAGE_USAGE_SAMPLED_BIT |
2241 VK_IMAGE_USAGE_TRANSFER_DST_BIT |
2242 VK_IMAGE_USAGE_STORAGE_BIT |
2243 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
2244 VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
2245 }
2246
2247 bool
wsi_device_supports_explicit_sync(struct wsi_device * device)2248 wsi_device_supports_explicit_sync(struct wsi_device *device)
2249 {
2250 return !device->sw && device->has_timeline_semaphore &&
2251 (device->timeline_semaphore_export_handle_types &
2252 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT);
2253 }
2254