1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <wayland-client.h>
25
26 #include <assert.h>
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <unistd.h>
30 #include <errno.h>
31 #include <string.h>
32 #include <poll.h>
33 #include <sys/mman.h>
34 #include <sys/types.h>
35
36 #include "drm-uapi/drm_fourcc.h"
37
38 #include "vk_instance.h"
39 #include "vk_device.h"
40 #include "vk_physical_device.h"
41 #include "vk_util.h"
42 #include "wsi_common_entrypoints.h"
43 #include "wsi_common_private.h"
44 #include "linux-dmabuf-unstable-v1-client-protocol.h"
45 #include "presentation-time-client-protocol.h"
46 #include "linux-drm-syncobj-v1-client-protocol.h"
47 #include "tearing-control-v1-client-protocol.h"
48
49 #include <util/cnd_monotonic.h>
50 #include <util/compiler.h>
51 #include <util/hash_table.h>
52 #include <util/timespec.h>
53 #include <util/u_endian.h>
54 #include <util/u_vector.h>
55 #include <util/u_dynarray.h>
56 #include <util/anon_file.h>
57 #include <util/os_time.h>
58
59 #include <loader/loader_wayland_helper.h>
60
61 #ifdef MAJOR_IN_MKDEV
62 #include <sys/mkdev.h>
63 #endif
64 #ifdef MAJOR_IN_SYSMACROS
65 #include <sys/sysmacros.h>
66 #endif
67
68 struct wsi_wayland;
69
70 struct wsi_wl_format {
71 VkFormat vk_format;
72 uint32_t flags;
73 struct u_vector modifiers;
74 };
75
76 struct dmabuf_feedback_format_table {
77 unsigned int size;
78 struct {
79 uint32_t format;
80 uint32_t padding; /* unused */
81 uint64_t modifier;
82 } *data;
83 };
84
85 struct dmabuf_feedback_tranche {
86 dev_t target_device;
87 uint32_t flags;
88 struct u_vector formats;
89 };
90
91 struct dmabuf_feedback {
92 dev_t main_device;
93 struct dmabuf_feedback_format_table format_table;
94 struct util_dynarray tranches;
95 struct dmabuf_feedback_tranche pending_tranche;
96 };
97
98 struct wsi_wl_display {
99 /* The real wl_display */
100 struct wl_display *wl_display;
101 /* Actually a proxy wrapper around the event queue */
102 struct wl_display *wl_display_wrapper;
103 struct wl_event_queue *queue;
104
105 struct wl_shm *wl_shm;
106 struct zwp_linux_dmabuf_v1 *wl_dmabuf;
107 struct zwp_linux_dmabuf_feedback_v1 *wl_dmabuf_feedback;
108 struct wp_tearing_control_manager_v1 *tearing_control_manager;
109 struct wp_linux_drm_syncobj_manager_v1 *wl_syncobj;
110
111 struct dmabuf_feedback_format_table format_table;
112
113 /* users want per-chain wsi_wl_swapchain->present_ids.wp_presentation */
114 struct wp_presentation *wp_presentation_notwrapped;
115
116 struct wsi_wayland *wsi_wl;
117
118 /* Formats populated by zwp_linux_dmabuf_v1 or wl_shm interfaces */
119 struct u_vector formats;
120
121 bool sw;
122
123 dev_t main_device;
124 bool same_gpu;
125 };
126
127 struct wsi_wayland {
128 struct wsi_interface base;
129
130 struct wsi_device *wsi;
131
132 const VkAllocationCallbacks *alloc;
133 VkPhysicalDevice physical_device;
134 };
135
136 struct wsi_wl_image {
137 struct wsi_image base;
138 struct wl_buffer *buffer;
139 bool busy;
140 int shm_fd;
141 void *shm_ptr;
142 unsigned shm_size;
143 uint64_t flow_id;
144
145 struct wp_linux_drm_syncobj_timeline_v1 *wl_syncobj_timeline[WSI_ES_COUNT];
146 };
147
148 enum wsi_wl_buffer_type {
149 WSI_WL_BUFFER_NATIVE,
150 WSI_WL_BUFFER_GPU_SHM,
151 WSI_WL_BUFFER_SHM_MEMCPY,
152 };
153
154 struct wsi_wl_surface {
155 VkIcdSurfaceWayland base;
156
157 unsigned int chain_count;
158
159 struct wsi_wl_swapchain *chain;
160 struct wl_surface *surface;
161 struct wsi_wl_display *display;
162
163 /* This has no functional use, and is here only for perfetto */
164 struct {
165 char *latency_str;
166 uint64_t presenting;
167 uint64_t presentation_track_id;
168 } analytics;
169
170 struct zwp_linux_dmabuf_feedback_v1 *wl_dmabuf_feedback;
171 struct dmabuf_feedback dmabuf_feedback, pending_dmabuf_feedback;
172
173 struct wp_linux_drm_syncobj_surface_v1 *wl_syncobj_surface;
174 };
175
176 struct wsi_wl_swapchain {
177 struct wsi_swapchain base;
178
179 struct wsi_wl_surface *wsi_wl_surface;
180 struct wp_tearing_control_v1 *tearing_control;
181
182 struct wl_callback *frame;
183
184 VkExtent2D extent;
185 VkFormat vk_format;
186 enum wsi_wl_buffer_type buffer_type;
187 uint32_t drm_format;
188 enum wl_shm_format shm_format;
189
190 bool suboptimal;
191 bool retired;
192
193 uint32_t num_drm_modifiers;
194 const uint64_t *drm_modifiers;
195
196 VkPresentModeKHR present_mode;
197 bool fifo_ready;
198
199 struct {
200 mtx_t lock; /* protects all members */
201 uint64_t max_completed;
202 struct wl_list outstanding_list;
203 struct u_cnd_monotonic list_advanced;
204 struct wl_event_queue *queue;
205 struct wp_presentation *wp_presentation;
206 /* Fallback when wp_presentation is not supported */
207 struct wl_surface *surface;
208 bool dispatch_in_progress;
209 } present_ids;
210
211 struct wsi_wl_image images[0];
212 };
213 VK_DEFINE_NONDISP_HANDLE_CASTS(wsi_wl_swapchain, base.base, VkSwapchainKHR,
214 VK_OBJECT_TYPE_SWAPCHAIN_KHR)
215
216 static bool
wsi_wl_use_explicit_sync(struct wsi_wl_display * display,struct wsi_device * device)217 wsi_wl_use_explicit_sync(struct wsi_wl_display *display, struct wsi_device *device)
218 {
219 return wsi_device_supports_explicit_sync(device) &&
220 display->wl_syncobj != NULL;
221 }
222
223 enum wsi_wl_fmt_flag {
224 WSI_WL_FMT_ALPHA = 1 << 0,
225 WSI_WL_FMT_OPAQUE = 1 << 1,
226 };
227
228 static struct wsi_wl_format *
find_format(struct u_vector * formats,VkFormat format)229 find_format(struct u_vector *formats, VkFormat format)
230 {
231 struct wsi_wl_format *f;
232
233 u_vector_foreach(f, formats)
234 if (f->vk_format == format)
235 return f;
236
237 return NULL;
238 }
239
240 static char *
stringify_wayland_id(uint32_t id)241 stringify_wayland_id(uint32_t id)
242 {
243 char *out;
244
245 if (asprintf(&out, "wl%d", id) < 0)
246 return NULL;
247
248 return out;
249 }
250
251 static struct wsi_wl_format *
wsi_wl_display_add_vk_format(struct wsi_wl_display * display,struct u_vector * formats,VkFormat format,uint32_t flags)252 wsi_wl_display_add_vk_format(struct wsi_wl_display *display,
253 struct u_vector *formats,
254 VkFormat format, uint32_t flags)
255 {
256 assert(flags & (WSI_WL_FMT_ALPHA | WSI_WL_FMT_OPAQUE));
257
258 /* Don't add a format that's already in the list */
259 struct wsi_wl_format *f = find_format(formats, format);
260 if (f) {
261 f->flags |= flags;
262 return f;
263 }
264
265 /* Don't add formats that aren't renderable. */
266 VkFormatProperties props;
267
268 display->wsi_wl->wsi->GetPhysicalDeviceFormatProperties(display->wsi_wl->physical_device,
269 format, &props);
270 if (!(props.optimalTilingFeatures & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT))
271 return NULL;
272
273 struct u_vector modifiers;
274 if (!u_vector_init_pow2(&modifiers, 4, sizeof(uint64_t)))
275 return NULL;
276
277 f = u_vector_add(formats);
278 if (!f) {
279 u_vector_finish(&modifiers);
280 return NULL;
281 }
282
283 f->vk_format = format;
284 f->flags = flags;
285 f->modifiers = modifiers;
286
287 return f;
288 }
289
290 static void
wsi_wl_format_add_modifier(struct wsi_wl_format * format,uint64_t modifier)291 wsi_wl_format_add_modifier(struct wsi_wl_format *format, uint64_t modifier)
292 {
293 uint64_t *mod;
294
295 if (modifier == DRM_FORMAT_MOD_INVALID)
296 return;
297
298 u_vector_foreach(mod, &format->modifiers)
299 if (*mod == modifier)
300 return;
301
302 mod = u_vector_add(&format->modifiers);
303 if (mod)
304 *mod = modifier;
305 }
306
307 static void
wsi_wl_display_add_vk_format_modifier(struct wsi_wl_display * display,struct u_vector * formats,VkFormat vk_format,uint32_t flags,uint64_t modifier)308 wsi_wl_display_add_vk_format_modifier(struct wsi_wl_display *display,
309 struct u_vector *formats,
310 VkFormat vk_format, uint32_t flags,
311 uint64_t modifier)
312 {
313 struct wsi_wl_format *format;
314
315 format = wsi_wl_display_add_vk_format(display, formats, vk_format, flags);
316 if (format)
317 wsi_wl_format_add_modifier(format, modifier);
318 }
319
320 static void
wsi_wl_display_add_drm_format_modifier(struct wsi_wl_display * display,struct u_vector * formats,uint32_t drm_format,uint64_t modifier)321 wsi_wl_display_add_drm_format_modifier(struct wsi_wl_display *display,
322 struct u_vector *formats,
323 uint32_t drm_format, uint64_t modifier)
324 {
325 switch (drm_format) {
326 #if 0
327 /* TODO: These are only available when VK_EXT_4444_formats is enabled, so
328 * we probably need to make their use conditional on this extension. */
329 case DRM_FORMAT_ARGB4444:
330 wsi_wl_display_add_vk_format_modifier(display, formats,
331 VK_FORMAT_A4R4G4B4_UNORM_PACK16,
332 WSI_WL_FMT_ALPHA, modifier);
333 break;
334 case DRM_FORMAT_XRGB4444:
335 wsi_wl_display_add_vk_format_modifier(display, formats,
336 VK_FORMAT_A4R4G4B4_UNORM_PACK16,
337 WSI_WL_FMT_OPAQUE, modifier);
338 break;
339 case DRM_FORMAT_ABGR4444:
340 wsi_wl_display_add_vk_format_modifier(display, formats,
341 VK_FORMAT_A4B4G4R4_UNORM_PACK16,
342 WSI_WL_FMT_ALPHA, modifier);
343 break;
344 case DRM_FORMAT_XBGR4444:
345 wsi_wl_display_add_vk_format_modifier(display, formats,
346 VK_FORMAT_A4B4G4R4_UNORM_PACK16,
347 WSI_WL_FMT_OPAQUE, modifier);
348 break;
349 #endif
350
351 /* Vulkan _PACKN formats have the same component order as DRM formats
352 * on little endian systems, on big endian there exists no analog. */
353 #if UTIL_ARCH_LITTLE_ENDIAN
354 case DRM_FORMAT_RGBA4444:
355 wsi_wl_display_add_vk_format_modifier(display, formats,
356 VK_FORMAT_R4G4B4A4_UNORM_PACK16,
357 WSI_WL_FMT_ALPHA, modifier);
358 break;
359 case DRM_FORMAT_RGBX4444:
360 wsi_wl_display_add_vk_format_modifier(display, formats,
361 VK_FORMAT_R4G4B4A4_UNORM_PACK16,
362 WSI_WL_FMT_OPAQUE, modifier);
363 break;
364 case DRM_FORMAT_BGRA4444:
365 wsi_wl_display_add_vk_format_modifier(display, formats,
366 VK_FORMAT_B4G4R4A4_UNORM_PACK16,
367 WSI_WL_FMT_ALPHA, modifier);
368 break;
369 case DRM_FORMAT_BGRX4444:
370 wsi_wl_display_add_vk_format_modifier(display, formats,
371 VK_FORMAT_B4G4R4A4_UNORM_PACK16,
372 WSI_WL_FMT_OPAQUE, modifier);
373 break;
374 case DRM_FORMAT_RGB565:
375 wsi_wl_display_add_vk_format_modifier(display, formats,
376 VK_FORMAT_R5G6B5_UNORM_PACK16,
377 WSI_WL_FMT_ALPHA | WSI_WL_FMT_OPAQUE,
378 modifier);
379 break;
380 case DRM_FORMAT_BGR565:
381 wsi_wl_display_add_vk_format_modifier(display, formats,
382 VK_FORMAT_B5G6R5_UNORM_PACK16,
383 WSI_WL_FMT_ALPHA | WSI_WL_FMT_OPAQUE,
384 modifier);
385 break;
386 case DRM_FORMAT_ARGB1555:
387 wsi_wl_display_add_vk_format_modifier(display, formats,
388 VK_FORMAT_A1R5G5B5_UNORM_PACK16,
389 WSI_WL_FMT_ALPHA, modifier);
390 break;
391 case DRM_FORMAT_XRGB1555:
392 wsi_wl_display_add_vk_format_modifier(display, formats,
393 VK_FORMAT_A1R5G5B5_UNORM_PACK16,
394 WSI_WL_FMT_OPAQUE, modifier);
395 break;
396 case DRM_FORMAT_RGBA5551:
397 wsi_wl_display_add_vk_format_modifier(display, formats,
398 VK_FORMAT_R5G5B5A1_UNORM_PACK16,
399 WSI_WL_FMT_ALPHA, modifier);
400 break;
401 case DRM_FORMAT_RGBX5551:
402 wsi_wl_display_add_vk_format_modifier(display, formats,
403 VK_FORMAT_R5G5B5A1_UNORM_PACK16,
404 WSI_WL_FMT_OPAQUE, modifier);
405 break;
406 case DRM_FORMAT_BGRA5551:
407 wsi_wl_display_add_vk_format_modifier(display, formats,
408 VK_FORMAT_B5G5R5A1_UNORM_PACK16,
409 WSI_WL_FMT_ALPHA, modifier);
410 break;
411 case DRM_FORMAT_BGRX5551:
412 wsi_wl_display_add_vk_format_modifier(display, formats,
413 VK_FORMAT_B5G5R5A1_UNORM_PACK16,
414 WSI_WL_FMT_OPAQUE, modifier);
415 break;
416 case DRM_FORMAT_ARGB2101010:
417 wsi_wl_display_add_vk_format_modifier(display, formats,
418 VK_FORMAT_A2R10G10B10_UNORM_PACK32,
419 WSI_WL_FMT_ALPHA, modifier);
420 break;
421 case DRM_FORMAT_XRGB2101010:
422 wsi_wl_display_add_vk_format_modifier(display, formats,
423 VK_FORMAT_A2R10G10B10_UNORM_PACK32,
424 WSI_WL_FMT_OPAQUE, modifier);
425 break;
426 case DRM_FORMAT_ABGR2101010:
427 wsi_wl_display_add_vk_format_modifier(display, formats,
428 VK_FORMAT_A2B10G10R10_UNORM_PACK32,
429 WSI_WL_FMT_ALPHA, modifier);
430 break;
431 case DRM_FORMAT_XBGR2101010:
432 wsi_wl_display_add_vk_format_modifier(display, formats,
433 VK_FORMAT_A2B10G10R10_UNORM_PACK32,
434 WSI_WL_FMT_OPAQUE, modifier);
435 break;
436
437 /* Vulkan 16-bits-per-channel formats have an inverted channel order
438 * compared to DRM formats, just like the 8-bits-per-channel ones.
439 * On little endian systems the memory representation of each channel
440 * matches the DRM formats'. */
441 case DRM_FORMAT_ABGR16161616:
442 wsi_wl_display_add_vk_format_modifier(display, formats,
443 VK_FORMAT_R16G16B16A16_UNORM,
444 WSI_WL_FMT_ALPHA, modifier);
445 break;
446 case DRM_FORMAT_XBGR16161616:
447 wsi_wl_display_add_vk_format_modifier(display, formats,
448 VK_FORMAT_R16G16B16A16_UNORM,
449 WSI_WL_FMT_OPAQUE, modifier);
450 break;
451 case DRM_FORMAT_ABGR16161616F:
452 wsi_wl_display_add_vk_format_modifier(display, formats,
453 VK_FORMAT_R16G16B16A16_SFLOAT,
454 WSI_WL_FMT_ALPHA, modifier);
455 break;
456 case DRM_FORMAT_XBGR16161616F:
457 wsi_wl_display_add_vk_format_modifier(display, formats,
458 VK_FORMAT_R16G16B16A16_SFLOAT,
459 WSI_WL_FMT_OPAQUE, modifier);
460 break;
461 #endif
462
463 /* Non-packed 8-bit formats have an inverted channel order compared to the
464 * little endian DRM formats, because the DRM channel ordering is high->low
465 * but the vulkan channel ordering is in memory byte order
466 *
467 * For all UNORM formats which have a SRGB variant, we must support both if
468 * we can. SRGB in this context means that rendering to it will result in a
469 * linear -> nonlinear SRGB colorspace conversion before the data is stored.
470 * The inverse function is applied when sampling from SRGB images.
471 * From Wayland's perspective nothing changes, the difference is just how
472 * Vulkan interprets the pixel data. */
473 case DRM_FORMAT_XBGR8888:
474 wsi_wl_display_add_vk_format_modifier(display, formats,
475 VK_FORMAT_R8G8B8_SRGB,
476 WSI_WL_FMT_ALPHA | WSI_WL_FMT_OPAQUE,
477 modifier);
478 wsi_wl_display_add_vk_format_modifier(display, formats,
479 VK_FORMAT_R8G8B8_UNORM,
480 WSI_WL_FMT_ALPHA | WSI_WL_FMT_OPAQUE,
481 modifier);
482 wsi_wl_display_add_vk_format_modifier(display, formats,
483 VK_FORMAT_R8G8B8A8_SRGB,
484 WSI_WL_FMT_OPAQUE, modifier);
485 wsi_wl_display_add_vk_format_modifier(display, formats,
486 VK_FORMAT_R8G8B8A8_UNORM,
487 WSI_WL_FMT_OPAQUE, modifier);
488 break;
489 case DRM_FORMAT_ABGR8888:
490 wsi_wl_display_add_vk_format_modifier(display, formats,
491 VK_FORMAT_R8G8B8A8_SRGB,
492 WSI_WL_FMT_ALPHA, modifier);
493 wsi_wl_display_add_vk_format_modifier(display, formats,
494 VK_FORMAT_R8G8B8A8_UNORM,
495 WSI_WL_FMT_ALPHA, modifier);
496 break;
497 case DRM_FORMAT_XRGB8888:
498 wsi_wl_display_add_vk_format_modifier(display, formats,
499 VK_FORMAT_B8G8R8_SRGB,
500 WSI_WL_FMT_ALPHA | WSI_WL_FMT_OPAQUE,
501 modifier);
502 wsi_wl_display_add_vk_format_modifier(display, formats,
503 VK_FORMAT_B8G8R8_UNORM,
504 WSI_WL_FMT_ALPHA | WSI_WL_FMT_OPAQUE,
505 modifier);
506 wsi_wl_display_add_vk_format_modifier(display, formats,
507 VK_FORMAT_B8G8R8A8_SRGB,
508 WSI_WL_FMT_OPAQUE, modifier);
509 wsi_wl_display_add_vk_format_modifier(display, formats,
510 VK_FORMAT_B8G8R8A8_UNORM,
511 WSI_WL_FMT_OPAQUE, modifier);
512 break;
513 case DRM_FORMAT_ARGB8888:
514 wsi_wl_display_add_vk_format_modifier(display, formats,
515 VK_FORMAT_B8G8R8A8_SRGB,
516 WSI_WL_FMT_ALPHA, modifier);
517 wsi_wl_display_add_vk_format_modifier(display, formats,
518 VK_FORMAT_B8G8R8A8_UNORM,
519 WSI_WL_FMT_ALPHA, modifier);
520 break;
521 }
522 }
523
524 static uint32_t
drm_format_for_wl_shm_format(enum wl_shm_format shm_format)525 drm_format_for_wl_shm_format(enum wl_shm_format shm_format)
526 {
527 /* wl_shm formats are identical to DRM, except ARGB8888 and XRGB8888 */
528 switch (shm_format) {
529 case WL_SHM_FORMAT_ARGB8888:
530 return DRM_FORMAT_ARGB8888;
531 case WL_SHM_FORMAT_XRGB8888:
532 return DRM_FORMAT_XRGB8888;
533 default:
534 return shm_format;
535 }
536 }
537
538 static void
wsi_wl_display_add_wl_shm_format(struct wsi_wl_display * display,struct u_vector * formats,enum wl_shm_format shm_format)539 wsi_wl_display_add_wl_shm_format(struct wsi_wl_display *display,
540 struct u_vector *formats,
541 enum wl_shm_format shm_format)
542 {
543 uint32_t drm_format = drm_format_for_wl_shm_format(shm_format);
544
545 wsi_wl_display_add_drm_format_modifier(display, formats, drm_format,
546 DRM_FORMAT_MOD_INVALID);
547 }
548
549 static uint32_t
wl_drm_format_for_vk_format(VkFormat vk_format,bool alpha)550 wl_drm_format_for_vk_format(VkFormat vk_format, bool alpha)
551 {
552 switch (vk_format) {
553 #if 0
554 case VK_FORMAT_A4R4G4B4_UNORM_PACK16:
555 return alpha ? DRM_FORMAT_ARGB4444 : DRM_FORMAT_XRGB4444;
556 case VK_FORMAT_A4B4G4R4_UNORM_PACK16:
557 return alpha ? DRM_FORMAT_ABGR4444 : DRM_FORMAT_XBGR4444;
558 #endif
559 #if UTIL_ARCH_LITTLE_ENDIAN
560 case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
561 return alpha ? DRM_FORMAT_RGBA4444 : DRM_FORMAT_RGBX4444;
562 case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
563 return alpha ? DRM_FORMAT_BGRA4444 : DRM_FORMAT_BGRX4444;
564 case VK_FORMAT_R5G6B5_UNORM_PACK16:
565 return DRM_FORMAT_RGB565;
566 case VK_FORMAT_B5G6R5_UNORM_PACK16:
567 return DRM_FORMAT_BGR565;
568 case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
569 return alpha ? DRM_FORMAT_ARGB1555 : DRM_FORMAT_XRGB1555;
570 case VK_FORMAT_R5G5B5A1_UNORM_PACK16:
571 return alpha ? DRM_FORMAT_RGBA5551 : DRM_FORMAT_RGBX5551;
572 case VK_FORMAT_B5G5R5A1_UNORM_PACK16:
573 return alpha ? DRM_FORMAT_BGRA5551 : DRM_FORMAT_BGRX5551;
574 case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
575 return alpha ? DRM_FORMAT_ARGB2101010 : DRM_FORMAT_XRGB2101010;
576 case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
577 return alpha ? DRM_FORMAT_ABGR2101010 : DRM_FORMAT_XBGR2101010;
578 case VK_FORMAT_R16G16B16A16_UNORM:
579 return alpha ? DRM_FORMAT_ABGR16161616 : DRM_FORMAT_XBGR16161616;
580 case VK_FORMAT_R16G16B16A16_SFLOAT:
581 return alpha ? DRM_FORMAT_ABGR16161616F : DRM_FORMAT_XBGR16161616F;
582 #endif
583 case VK_FORMAT_R8G8B8_UNORM:
584 case VK_FORMAT_R8G8B8_SRGB:
585 return DRM_FORMAT_XBGR8888;
586 case VK_FORMAT_R8G8B8A8_UNORM:
587 case VK_FORMAT_R8G8B8A8_SRGB:
588 return alpha ? DRM_FORMAT_ABGR8888 : DRM_FORMAT_XBGR8888;
589 case VK_FORMAT_B8G8R8_UNORM:
590 case VK_FORMAT_B8G8R8_SRGB:
591 return DRM_FORMAT_BGRX8888;
592 case VK_FORMAT_B8G8R8A8_UNORM:
593 case VK_FORMAT_B8G8R8A8_SRGB:
594 return alpha ? DRM_FORMAT_ARGB8888 : DRM_FORMAT_XRGB8888;
595
596 default:
597 assert(!"Unsupported Vulkan format");
598 return DRM_FORMAT_INVALID;
599 }
600 }
601
602 static enum wl_shm_format
wl_shm_format_for_vk_format(VkFormat vk_format,bool alpha)603 wl_shm_format_for_vk_format(VkFormat vk_format, bool alpha)
604 {
605 uint32_t drm_format = wl_drm_format_for_vk_format(vk_format, alpha);
606 if (drm_format == DRM_FORMAT_INVALID) {
607 return 0;
608 }
609
610 /* wl_shm formats are identical to DRM, except ARGB8888 and XRGB8888 */
611 switch (drm_format) {
612 case DRM_FORMAT_ARGB8888:
613 return WL_SHM_FORMAT_ARGB8888;
614 case DRM_FORMAT_XRGB8888:
615 return WL_SHM_FORMAT_XRGB8888;
616 default:
617 return drm_format;
618 }
619 }
620
621 static void
dmabuf_handle_format(void * data,struct zwp_linux_dmabuf_v1 * dmabuf,uint32_t format)622 dmabuf_handle_format(void *data, struct zwp_linux_dmabuf_v1 *dmabuf,
623 uint32_t format)
624 {
625 /* Formats are implicitly advertised by the modifier event, so we ignore
626 * them here. */
627 }
628
629 static void
dmabuf_handle_modifier(void * data,struct zwp_linux_dmabuf_v1 * dmabuf,uint32_t format,uint32_t modifier_hi,uint32_t modifier_lo)630 dmabuf_handle_modifier(void *data, struct zwp_linux_dmabuf_v1 *dmabuf,
631 uint32_t format, uint32_t modifier_hi,
632 uint32_t modifier_lo)
633 {
634 struct wsi_wl_display *display = data;
635 uint64_t modifier;
636
637 /* Ignore this if the compositor advertised dma-buf feedback. From version 4
638 * onwards (when dma-buf feedback was introduced), the compositor should not
639 * advertise this event anymore, but let's keep this for safety. */
640 if (display->wl_dmabuf_feedback)
641 return;
642
643 modifier = ((uint64_t) modifier_hi << 32) | modifier_lo;
644 wsi_wl_display_add_drm_format_modifier(display, &display->formats,
645 format, modifier);
646 }
647
648 static const struct zwp_linux_dmabuf_v1_listener dmabuf_listener = {
649 dmabuf_handle_format,
650 dmabuf_handle_modifier,
651 };
652
653 static void
dmabuf_feedback_format_table_fini(struct dmabuf_feedback_format_table * format_table)654 dmabuf_feedback_format_table_fini(struct dmabuf_feedback_format_table *format_table)
655 {
656 if (format_table->data && format_table->data != MAP_FAILED)
657 munmap(format_table->data, format_table->size);
658 }
659
660 static void
dmabuf_feedback_format_table_init(struct dmabuf_feedback_format_table * format_table)661 dmabuf_feedback_format_table_init(struct dmabuf_feedback_format_table *format_table)
662 {
663 memset(format_table, 0, sizeof(*format_table));
664 }
665
666 static void
dmabuf_feedback_tranche_fini(struct dmabuf_feedback_tranche * tranche)667 dmabuf_feedback_tranche_fini(struct dmabuf_feedback_tranche *tranche)
668 {
669 struct wsi_wl_format *format;
670
671 u_vector_foreach(format, &tranche->formats)
672 u_vector_finish(&format->modifiers);
673
674 u_vector_finish(&tranche->formats);
675 }
676
677 static int
dmabuf_feedback_tranche_init(struct dmabuf_feedback_tranche * tranche)678 dmabuf_feedback_tranche_init(struct dmabuf_feedback_tranche *tranche)
679 {
680 memset(tranche, 0, sizeof(*tranche));
681
682 if (!u_vector_init(&tranche->formats, 8, sizeof(struct wsi_wl_format)))
683 return -1;
684
685 return 0;
686 }
687
688 static void
dmabuf_feedback_fini(struct dmabuf_feedback * dmabuf_feedback)689 dmabuf_feedback_fini(struct dmabuf_feedback *dmabuf_feedback)
690 {
691 dmabuf_feedback_tranche_fini(&dmabuf_feedback->pending_tranche);
692
693 util_dynarray_foreach(&dmabuf_feedback->tranches,
694 struct dmabuf_feedback_tranche, tranche)
695 dmabuf_feedback_tranche_fini(tranche);
696 util_dynarray_fini(&dmabuf_feedback->tranches);
697
698 dmabuf_feedback_format_table_fini(&dmabuf_feedback->format_table);
699 }
700
701 static int
dmabuf_feedback_init(struct dmabuf_feedback * dmabuf_feedback)702 dmabuf_feedback_init(struct dmabuf_feedback *dmabuf_feedback)
703 {
704 memset(dmabuf_feedback, 0, sizeof(*dmabuf_feedback));
705
706 if (dmabuf_feedback_tranche_init(&dmabuf_feedback->pending_tranche) < 0)
707 return -1;
708
709 util_dynarray_init(&dmabuf_feedback->tranches, NULL);
710
711 dmabuf_feedback_format_table_init(&dmabuf_feedback->format_table);
712
713 return 0;
714 }
715
716 static void
default_dmabuf_feedback_format_table(void * data,struct zwp_linux_dmabuf_feedback_v1 * zwp_linux_dmabuf_feedback_v1,int32_t fd,uint32_t size)717 default_dmabuf_feedback_format_table(void *data,
718 struct zwp_linux_dmabuf_feedback_v1 *zwp_linux_dmabuf_feedback_v1,
719 int32_t fd, uint32_t size)
720 {
721 struct wsi_wl_display *display = data;
722
723 display->format_table.size = size;
724 display->format_table.data = mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
725
726 close(fd);
727 }
728
729 static void
default_dmabuf_feedback_main_device(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,struct wl_array * device)730 default_dmabuf_feedback_main_device(void *data,
731 struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
732 struct wl_array *device)
733 {
734 struct wsi_wl_display *display = data;
735
736 assert(device->size == sizeof(dev_t));
737 memcpy(&display->main_device, device->data, device->size);
738 }
739
740 static void
default_dmabuf_feedback_tranche_target_device(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,struct wl_array * device)741 default_dmabuf_feedback_tranche_target_device(void *data,
742 struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
743 struct wl_array *device)
744 {
745 /* ignore this event */
746 }
747
748 static void
default_dmabuf_feedback_tranche_flags(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,uint32_t flags)749 default_dmabuf_feedback_tranche_flags(void *data,
750 struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
751 uint32_t flags)
752 {
753 /* ignore this event */
754 }
755
756 static void
default_dmabuf_feedback_tranche_formats(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,struct wl_array * indices)757 default_dmabuf_feedback_tranche_formats(void *data,
758 struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
759 struct wl_array *indices)
760 {
761 struct wsi_wl_display *display = data;
762 uint32_t format;
763 uint64_t modifier;
764 uint16_t *index;
765
766 /* We couldn't map the format table or the compositor didn't advertise it,
767 * so we have to ignore the feedback. */
768 if (display->format_table.data == MAP_FAILED ||
769 display->format_table.data == NULL)
770 return;
771
772 wl_array_for_each(index, indices) {
773 format = display->format_table.data[*index].format;
774 modifier = display->format_table.data[*index].modifier;
775 wsi_wl_display_add_drm_format_modifier(display, &display->formats,
776 format, modifier);
777 }
778 }
779
780 static void
default_dmabuf_feedback_tranche_done(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback)781 default_dmabuf_feedback_tranche_done(void *data,
782 struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)
783 {
784 /* ignore this event */
785 }
786
787 static void
default_dmabuf_feedback_done(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback)788 default_dmabuf_feedback_done(void *data,
789 struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)
790 {
791 /* ignore this event */
792 }
793
794 static const struct zwp_linux_dmabuf_feedback_v1_listener
795 dmabuf_feedback_listener = {
796 .format_table = default_dmabuf_feedback_format_table,
797 .main_device = default_dmabuf_feedback_main_device,
798 .tranche_target_device = default_dmabuf_feedback_tranche_target_device,
799 .tranche_flags = default_dmabuf_feedback_tranche_flags,
800 .tranche_formats = default_dmabuf_feedback_tranche_formats,
801 .tranche_done = default_dmabuf_feedback_tranche_done,
802 .done = default_dmabuf_feedback_done,
803 };
804
805 static void
shm_handle_format(void * data,struct wl_shm * shm,uint32_t format)806 shm_handle_format(void *data, struct wl_shm *shm, uint32_t format)
807 {
808 struct wsi_wl_display *display = data;
809
810 wsi_wl_display_add_wl_shm_format(display, &display->formats, format);
811 }
812
813 static const struct wl_shm_listener shm_listener = {
814 .format = shm_handle_format
815 };
816
817 static void
registry_handle_global(void * data,struct wl_registry * registry,uint32_t name,const char * interface,uint32_t version)818 registry_handle_global(void *data, struct wl_registry *registry,
819 uint32_t name, const char *interface, uint32_t version)
820 {
821 struct wsi_wl_display *display = data;
822
823 if (display->sw) {
824 if (strcmp(interface, wl_shm_interface.name) == 0) {
825 display->wl_shm = wl_registry_bind(registry, name, &wl_shm_interface, 1);
826 wl_shm_add_listener(display->wl_shm, &shm_listener, display);
827 }
828 } else {
829 if (strcmp(interface, zwp_linux_dmabuf_v1_interface.name) == 0 && version >= 3) {
830 display->wl_dmabuf =
831 wl_registry_bind(registry, name, &zwp_linux_dmabuf_v1_interface,
832 MIN2(version, ZWP_LINUX_DMABUF_V1_GET_DEFAULT_FEEDBACK_SINCE_VERSION));
833 zwp_linux_dmabuf_v1_add_listener(display->wl_dmabuf,
834 &dmabuf_listener, display);
835 } else if (strcmp(interface, wp_linux_drm_syncobj_manager_v1_interface.name) == 0) {
836 display->wl_syncobj =
837 wl_registry_bind(registry, name, &wp_linux_drm_syncobj_manager_v1_interface, 1);
838 }
839 }
840
841 if (strcmp(interface, wp_presentation_interface.name) == 0) {
842 display->wp_presentation_notwrapped =
843 wl_registry_bind(registry, name, &wp_presentation_interface, 1);
844 } else if (strcmp(interface, wp_tearing_control_manager_v1_interface.name) == 0) {
845 display->tearing_control_manager =
846 wl_registry_bind(registry, name, &wp_tearing_control_manager_v1_interface, 1);
847 }
848 }
849
850 static void
registry_handle_global_remove(void * data,struct wl_registry * registry,uint32_t name)851 registry_handle_global_remove(void *data, struct wl_registry *registry,
852 uint32_t name)
853 { /* No-op */ }
854
855 static const struct wl_registry_listener registry_listener = {
856 registry_handle_global,
857 registry_handle_global_remove
858 };
859
860 static void
wsi_wl_display_finish(struct wsi_wl_display * display)861 wsi_wl_display_finish(struct wsi_wl_display *display)
862 {
863 struct wsi_wl_format *f;
864 u_vector_foreach(f, &display->formats)
865 u_vector_finish(&f->modifiers);
866 u_vector_finish(&display->formats);
867 if (display->wl_shm)
868 wl_shm_destroy(display->wl_shm);
869 if (display->wl_syncobj)
870 wp_linux_drm_syncobj_manager_v1_destroy(display->wl_syncobj);
871 if (display->wl_dmabuf)
872 zwp_linux_dmabuf_v1_destroy(display->wl_dmabuf);
873 if (display->wp_presentation_notwrapped)
874 wp_presentation_destroy(display->wp_presentation_notwrapped);
875 if (display->tearing_control_manager)
876 wp_tearing_control_manager_v1_destroy(display->tearing_control_manager);
877 if (display->wl_display_wrapper)
878 wl_proxy_wrapper_destroy(display->wl_display_wrapper);
879 if (display->queue)
880 wl_event_queue_destroy(display->queue);
881 }
882
883 static VkResult
wsi_wl_display_init(struct wsi_wayland * wsi_wl,struct wsi_wl_display * display,struct wl_display * wl_display,bool get_format_list,bool sw,const char * queue_name)884 wsi_wl_display_init(struct wsi_wayland *wsi_wl,
885 struct wsi_wl_display *display,
886 struct wl_display *wl_display,
887 bool get_format_list, bool sw,
888 const char *queue_name)
889 {
890 VkResult result = VK_SUCCESS;
891 memset(display, 0, sizeof(*display));
892
893 if (!u_vector_init(&display->formats, 8, sizeof(struct wsi_wl_format)))
894 return VK_ERROR_OUT_OF_HOST_MEMORY;
895
896 display->wsi_wl = wsi_wl;
897 display->wl_display = wl_display;
898 display->sw = sw;
899
900 display->queue = wl_display_create_queue_with_name(wl_display, queue_name);
901 if (!display->queue) {
902 result = VK_ERROR_OUT_OF_HOST_MEMORY;
903 goto fail;
904 }
905
906 display->wl_display_wrapper = wl_proxy_create_wrapper(wl_display);
907 if (!display->wl_display_wrapper) {
908 result = VK_ERROR_OUT_OF_HOST_MEMORY;
909 goto fail;
910 }
911
912 wl_proxy_set_queue((struct wl_proxy *) display->wl_display_wrapper,
913 display->queue);
914
915 struct wl_registry *registry =
916 wl_display_get_registry(display->wl_display_wrapper);
917 if (!registry) {
918 result = VK_ERROR_OUT_OF_HOST_MEMORY;
919 goto fail;
920 }
921
922 wl_registry_add_listener(registry, ®istry_listener, display);
923
924 /* Round-trip to get wl_shm and zwp_linux_dmabuf_v1 globals */
925 wl_display_roundtrip_queue(display->wl_display, display->queue);
926 if (!display->wl_dmabuf && !display->wl_shm) {
927 result = VK_ERROR_SURFACE_LOST_KHR;
928 goto fail_registry;
929 }
930
931 /* Caller doesn't expect us to query formats/modifiers, so return */
932 if (!get_format_list)
933 goto out;
934
935 /* Default assumption */
936 display->same_gpu = true;
937
938 /* Get the default dma-buf feedback */
939 if (display->wl_dmabuf && zwp_linux_dmabuf_v1_get_version(display->wl_dmabuf) >=
940 ZWP_LINUX_DMABUF_V1_GET_DEFAULT_FEEDBACK_SINCE_VERSION) {
941 dmabuf_feedback_format_table_init(&display->format_table);
942 display->wl_dmabuf_feedback =
943 zwp_linux_dmabuf_v1_get_default_feedback(display->wl_dmabuf);
944 zwp_linux_dmabuf_feedback_v1_add_listener(display->wl_dmabuf_feedback,
945 &dmabuf_feedback_listener, display);
946
947 /* Round-trip again to fetch dma-buf feedback */
948 wl_display_roundtrip_queue(display->wl_display, display->queue);
949
950 if (wsi_wl->wsi->drm_info.hasRender ||
951 wsi_wl->wsi->drm_info.hasPrimary) {
952 /* Apparently some wayland compositor do not send the render
953 * device node but the primary, so test against both.
954 */
955 display->same_gpu =
956 (wsi_wl->wsi->drm_info.hasRender &&
957 major(display->main_device) == wsi_wl->wsi->drm_info.renderMajor &&
958 minor(display->main_device) == wsi_wl->wsi->drm_info.renderMinor) ||
959 (wsi_wl->wsi->drm_info.hasPrimary &&
960 major(display->main_device) == wsi_wl->wsi->drm_info.primaryMajor &&
961 minor(display->main_device) == wsi_wl->wsi->drm_info.primaryMinor);
962 }
963 }
964
965 /* Round-trip again to get formats and modifiers */
966 wl_display_roundtrip_queue(display->wl_display, display->queue);
967
968 if (wsi_wl->wsi->force_bgra8_unorm_first) {
969 /* Find BGRA8_UNORM in the list and swap it to the first position if we
970 * can find it. Some apps get confused if SRGB is first in the list.
971 */
972 struct wsi_wl_format *first_fmt = u_vector_tail(&display->formats);
973 struct wsi_wl_format *f, tmp_fmt;
974 f = find_format(&display->formats, VK_FORMAT_B8G8R8A8_UNORM);
975 if (f) {
976 tmp_fmt = *f;
977 *f = *first_fmt;
978 *first_fmt = tmp_fmt;
979 }
980 }
981
982 out:
983 /* We don't need this anymore */
984 wl_registry_destroy(registry);
985
986 /* Destroy default dma-buf feedback object and format table */
987 if (display->wl_dmabuf_feedback) {
988 zwp_linux_dmabuf_feedback_v1_destroy(display->wl_dmabuf_feedback);
989 display->wl_dmabuf_feedback = NULL;
990 dmabuf_feedback_format_table_fini(&display->format_table);
991 }
992
993 return VK_SUCCESS;
994
995 fail_registry:
996 if (registry)
997 wl_registry_destroy(registry);
998
999 fail:
1000 wsi_wl_display_finish(display);
1001 return result;
1002 }
1003
1004 static VkResult
wsi_wl_display_create(struct wsi_wayland * wsi,struct wl_display * wl_display,bool sw,struct wsi_wl_display ** display_out)1005 wsi_wl_display_create(struct wsi_wayland *wsi, struct wl_display *wl_display,
1006 bool sw,
1007 struct wsi_wl_display **display_out)
1008 {
1009 struct wsi_wl_display *display =
1010 vk_alloc(wsi->alloc, sizeof(*display), 8,
1011 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
1012 if (!display)
1013 return VK_ERROR_OUT_OF_HOST_MEMORY;
1014
1015 VkResult result = wsi_wl_display_init(wsi, display, wl_display, true,
1016 sw, "mesa vk display queue");
1017 if (result != VK_SUCCESS) {
1018 vk_free(wsi->alloc, display);
1019 return result;
1020 }
1021
1022 *display_out = display;
1023
1024 return result;
1025 }
1026
1027 static void
wsi_wl_display_destroy(struct wsi_wl_display * display)1028 wsi_wl_display_destroy(struct wsi_wl_display *display)
1029 {
1030 struct wsi_wayland *wsi = display->wsi_wl;
1031 wsi_wl_display_finish(display);
1032 vk_free(wsi->alloc, display);
1033 }
1034
1035 VKAPI_ATTR VkBool32 VKAPI_CALL
wsi_GetPhysicalDeviceWaylandPresentationSupportKHR(VkPhysicalDevice physicalDevice,uint32_t queueFamilyIndex,struct wl_display * wl_display)1036 wsi_GetPhysicalDeviceWaylandPresentationSupportKHR(VkPhysicalDevice physicalDevice,
1037 uint32_t queueFamilyIndex,
1038 struct wl_display *wl_display)
1039 {
1040 VK_FROM_HANDLE(vk_physical_device, pdevice, physicalDevice);
1041 struct wsi_device *wsi_device = pdevice->wsi_device;
1042 struct wsi_wayland *wsi =
1043 (struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
1044
1045 if (!(wsi_device->queue_supports_blit & BITFIELD64_BIT(queueFamilyIndex)))
1046 return false;
1047
1048 struct wsi_wl_display display;
1049 VkResult ret = wsi_wl_display_init(wsi, &display, wl_display, false,
1050 wsi_device->sw, "mesa presentation support query");
1051 if (ret == VK_SUCCESS)
1052 wsi_wl_display_finish(&display);
1053
1054 return ret == VK_SUCCESS;
1055 }
1056
1057 static VkResult
wsi_wl_surface_get_support(VkIcdSurfaceBase * surface,struct wsi_device * wsi_device,uint32_t queueFamilyIndex,VkBool32 * pSupported)1058 wsi_wl_surface_get_support(VkIcdSurfaceBase *surface,
1059 struct wsi_device *wsi_device,
1060 uint32_t queueFamilyIndex,
1061 VkBool32* pSupported)
1062 {
1063 *pSupported = true;
1064
1065 return VK_SUCCESS;
1066 }
1067
1068 static uint32_t
wsi_wl_surface_get_min_image_count(const VkSurfacePresentModeEXT * present_mode)1069 wsi_wl_surface_get_min_image_count(const VkSurfacePresentModeEXT *present_mode)
1070 {
1071 if (present_mode && (present_mode->presentMode == VK_PRESENT_MODE_FIFO_KHR ||
1072 present_mode->presentMode == VK_PRESENT_MODE_FIFO_RELAXED_KHR)) {
1073 /* If we receive a FIFO present mode, only 2 images is required for forward progress.
1074 * Performance with 2 images will be questionable, but we only allow it for applications
1075 * using the new API, so we don't risk breaking any existing apps this way.
1076 * Other ICDs expose 2 images here already. */
1077 return 2;
1078 } else {
1079 /* For true mailbox mode, we need at least 4 images:
1080 * 1) One to scan out from
1081 * 2) One to have queued for scan-out
1082 * 3) One to be currently held by the Wayland compositor
1083 * 4) One to render to
1084 */
1085 return 4;
1086 }
1087 }
1088
1089 static VkResult
wsi_wl_surface_get_capabilities(VkIcdSurfaceBase * surface,struct wsi_device * wsi_device,const VkSurfacePresentModeEXT * present_mode,VkSurfaceCapabilitiesKHR * caps)1090 wsi_wl_surface_get_capabilities(VkIcdSurfaceBase *surface,
1091 struct wsi_device *wsi_device,
1092 const VkSurfacePresentModeEXT *present_mode,
1093 VkSurfaceCapabilitiesKHR* caps)
1094 {
1095 caps->minImageCount = wsi_wl_surface_get_min_image_count(present_mode);
1096 /* There is no real maximum */
1097 caps->maxImageCount = 0;
1098
1099 caps->currentExtent = (VkExtent2D) { UINT32_MAX, UINT32_MAX };
1100 caps->minImageExtent = (VkExtent2D) { 1, 1 };
1101 caps->maxImageExtent = (VkExtent2D) {
1102 wsi_device->maxImageDimension2D,
1103 wsi_device->maxImageDimension2D,
1104 };
1105
1106 caps->supportedTransforms = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
1107 caps->currentTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
1108 caps->maxImageArrayLayers = 1;
1109
1110 caps->supportedCompositeAlpha =
1111 VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR |
1112 VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR;
1113
1114 caps->supportedUsageFlags = wsi_caps_get_image_usage();
1115
1116 VK_FROM_HANDLE(vk_physical_device, pdevice, wsi_device->pdevice);
1117 if (pdevice->supported_extensions.EXT_attachment_feedback_loop_layout)
1118 caps->supportedUsageFlags |= VK_IMAGE_USAGE_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT;
1119
1120 return VK_SUCCESS;
1121 }
1122
1123 static VkResult
wsi_wl_surface_get_capabilities2(VkIcdSurfaceBase * surface,struct wsi_device * wsi_device,const void * info_next,VkSurfaceCapabilities2KHR * caps)1124 wsi_wl_surface_get_capabilities2(VkIcdSurfaceBase *surface,
1125 struct wsi_device *wsi_device,
1126 const void *info_next,
1127 VkSurfaceCapabilities2KHR* caps)
1128 {
1129 assert(caps->sType == VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR);
1130
1131 const VkSurfacePresentModeEXT *present_mode = vk_find_struct_const(info_next, SURFACE_PRESENT_MODE_EXT);
1132
1133 VkResult result =
1134 wsi_wl_surface_get_capabilities(surface, wsi_device, present_mode,
1135 &caps->surfaceCapabilities);
1136
1137 vk_foreach_struct(ext, caps->pNext) {
1138 switch (ext->sType) {
1139 case VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR: {
1140 VkSurfaceProtectedCapabilitiesKHR *protected = (void *)ext;
1141 protected->supportsProtected = VK_FALSE;
1142 break;
1143 }
1144
1145 case VK_STRUCTURE_TYPE_SURFACE_PRESENT_SCALING_CAPABILITIES_EXT: {
1146 /* Unsupported. */
1147 VkSurfacePresentScalingCapabilitiesEXT *scaling = (void *)ext;
1148 scaling->supportedPresentScaling = 0;
1149 scaling->supportedPresentGravityX = 0;
1150 scaling->supportedPresentGravityY = 0;
1151 scaling->minScaledImageExtent = caps->surfaceCapabilities.minImageExtent;
1152 scaling->maxScaledImageExtent = caps->surfaceCapabilities.maxImageExtent;
1153 break;
1154 }
1155
1156 case VK_STRUCTURE_TYPE_SURFACE_PRESENT_MODE_COMPATIBILITY_EXT: {
1157 /* Can easily toggle between FIFO and MAILBOX on Wayland. */
1158 VkSurfacePresentModeCompatibilityEXT *compat = (void *)ext;
1159 if (compat->pPresentModes) {
1160 assert(present_mode);
1161 VK_OUTARRAY_MAKE_TYPED(VkPresentModeKHR, modes, compat->pPresentModes, &compat->presentModeCount);
1162 /* Must always return queried present mode even when truncating. */
1163 vk_outarray_append_typed(VkPresentModeKHR, &modes, mode) {
1164 *mode = present_mode->presentMode;
1165 }
1166 switch (present_mode->presentMode) {
1167 case VK_PRESENT_MODE_MAILBOX_KHR:
1168 vk_outarray_append_typed(VkPresentModeKHR, &modes, mode) {
1169 *mode = VK_PRESENT_MODE_FIFO_KHR;
1170 }
1171 break;
1172 case VK_PRESENT_MODE_FIFO_KHR:
1173 vk_outarray_append_typed(VkPresentModeKHR, &modes, mode) {
1174 *mode = VK_PRESENT_MODE_MAILBOX_KHR;
1175 }
1176 break;
1177 default:
1178 break;
1179 }
1180 } else {
1181 if (!present_mode) {
1182 wsi_common_vk_warn_once("Use of VkSurfacePresentModeCompatibilityEXT "
1183 "without a VkSurfacePresentModeEXT set. This is an "
1184 "application bug.\n");
1185 compat->presentModeCount = 1;
1186 } else {
1187 switch (present_mode->presentMode) {
1188 case VK_PRESENT_MODE_MAILBOX_KHR:
1189 case VK_PRESENT_MODE_FIFO_KHR:
1190 compat->presentModeCount = 2;
1191 break;
1192 default:
1193 compat->presentModeCount = 1;
1194 break;
1195 }
1196 }
1197 }
1198 break;
1199 }
1200
1201 default:
1202 /* Ignored */
1203 break;
1204 }
1205 }
1206
1207 return result;
1208 }
1209
1210 static VkResult
wsi_wl_surface_get_formats(VkIcdSurfaceBase * icd_surface,struct wsi_device * wsi_device,uint32_t * pSurfaceFormatCount,VkSurfaceFormatKHR * pSurfaceFormats)1211 wsi_wl_surface_get_formats(VkIcdSurfaceBase *icd_surface,
1212 struct wsi_device *wsi_device,
1213 uint32_t* pSurfaceFormatCount,
1214 VkSurfaceFormatKHR* pSurfaceFormats)
1215 {
1216 VkIcdSurfaceWayland *surface = (VkIcdSurfaceWayland *)icd_surface;
1217 struct wsi_wayland *wsi =
1218 (struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
1219
1220 struct wsi_wl_display display;
1221 if (wsi_wl_display_init(wsi, &display, surface->display, true,
1222 wsi_device->sw, "mesa formats query"))
1223 return VK_ERROR_SURFACE_LOST_KHR;
1224
1225 VK_OUTARRAY_MAKE_TYPED(VkSurfaceFormatKHR, out,
1226 pSurfaceFormats, pSurfaceFormatCount);
1227
1228 struct wsi_wl_format *disp_fmt;
1229 u_vector_foreach(disp_fmt, &display.formats) {
1230 /* Skip formats for which we can't support both alpha & opaque
1231 * formats.
1232 */
1233 if (!(disp_fmt->flags & WSI_WL_FMT_ALPHA) ||
1234 !(disp_fmt->flags & WSI_WL_FMT_OPAQUE))
1235 continue;
1236
1237 vk_outarray_append_typed(VkSurfaceFormatKHR, &out, out_fmt) {
1238 out_fmt->format = disp_fmt->vk_format;
1239 out_fmt->colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
1240 }
1241 }
1242
1243 wsi_wl_display_finish(&display);
1244
1245 return vk_outarray_status(&out);
1246 }
1247
1248 static VkResult
wsi_wl_surface_get_formats2(VkIcdSurfaceBase * icd_surface,struct wsi_device * wsi_device,const void * info_next,uint32_t * pSurfaceFormatCount,VkSurfaceFormat2KHR * pSurfaceFormats)1249 wsi_wl_surface_get_formats2(VkIcdSurfaceBase *icd_surface,
1250 struct wsi_device *wsi_device,
1251 const void *info_next,
1252 uint32_t* pSurfaceFormatCount,
1253 VkSurfaceFormat2KHR* pSurfaceFormats)
1254 {
1255 VkIcdSurfaceWayland *surface = (VkIcdSurfaceWayland *)icd_surface;
1256 struct wsi_wayland *wsi =
1257 (struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
1258
1259 struct wsi_wl_display display;
1260 if (wsi_wl_display_init(wsi, &display, surface->display, true,
1261 wsi_device->sw, "mesa formats2 query"))
1262 return VK_ERROR_SURFACE_LOST_KHR;
1263
1264 VK_OUTARRAY_MAKE_TYPED(VkSurfaceFormat2KHR, out,
1265 pSurfaceFormats, pSurfaceFormatCount);
1266
1267 struct wsi_wl_format *disp_fmt;
1268 u_vector_foreach(disp_fmt, &display.formats) {
1269 /* Skip formats for which we can't support both alpha & opaque
1270 * formats.
1271 */
1272 if (!(disp_fmt->flags & WSI_WL_FMT_ALPHA) ||
1273 !(disp_fmt->flags & WSI_WL_FMT_OPAQUE))
1274 continue;
1275
1276 vk_outarray_append_typed(VkSurfaceFormat2KHR, &out, out_fmt) {
1277 out_fmt->surfaceFormat.format = disp_fmt->vk_format;
1278 out_fmt->surfaceFormat.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
1279 }
1280 }
1281
1282 wsi_wl_display_finish(&display);
1283
1284 return vk_outarray_status(&out);
1285 }
1286
1287 static VkResult
wsi_wl_surface_get_present_modes(VkIcdSurfaceBase * icd_surface,struct wsi_device * wsi_device,uint32_t * pPresentModeCount,VkPresentModeKHR * pPresentModes)1288 wsi_wl_surface_get_present_modes(VkIcdSurfaceBase *icd_surface,
1289 struct wsi_device *wsi_device,
1290 uint32_t* pPresentModeCount,
1291 VkPresentModeKHR* pPresentModes)
1292 {
1293 VkIcdSurfaceWayland *surface = (VkIcdSurfaceWayland *)icd_surface;
1294 struct wsi_wayland *wsi =
1295 (struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
1296
1297 struct wsi_wl_display display;
1298 if (wsi_wl_display_init(wsi, &display, surface->display, true,
1299 wsi_device->sw, "mesa present modes query"))
1300 return VK_ERROR_SURFACE_LOST_KHR;
1301
1302 VkPresentModeKHR present_modes[3];
1303 uint32_t present_modes_count = 0;
1304
1305 /* The following two modes are always supported */
1306 present_modes[present_modes_count++] = VK_PRESENT_MODE_MAILBOX_KHR;
1307 present_modes[present_modes_count++] = VK_PRESENT_MODE_FIFO_KHR;
1308
1309 if (display.tearing_control_manager)
1310 present_modes[present_modes_count++] = VK_PRESENT_MODE_IMMEDIATE_KHR;
1311
1312 assert(present_modes_count <= ARRAY_SIZE(present_modes));
1313 wsi_wl_display_finish(&display);
1314
1315 if (pPresentModes == NULL) {
1316 *pPresentModeCount = present_modes_count;
1317 return VK_SUCCESS;
1318 }
1319
1320 *pPresentModeCount = MIN2(*pPresentModeCount, present_modes_count);
1321 typed_memcpy(pPresentModes, present_modes, *pPresentModeCount);
1322
1323 if (*pPresentModeCount < present_modes_count)
1324 return VK_INCOMPLETE;
1325 else
1326 return VK_SUCCESS;
1327 }
1328
1329 static VkResult
wsi_wl_surface_get_present_rectangles(VkIcdSurfaceBase * surface,struct wsi_device * wsi_device,uint32_t * pRectCount,VkRect2D * pRects)1330 wsi_wl_surface_get_present_rectangles(VkIcdSurfaceBase *surface,
1331 struct wsi_device *wsi_device,
1332 uint32_t* pRectCount,
1333 VkRect2D* pRects)
1334 {
1335 VK_OUTARRAY_MAKE_TYPED(VkRect2D, out, pRects, pRectCount);
1336
1337 vk_outarray_append_typed(VkRect2D, &out, rect) {
1338 /* We don't know a size so just return the usual "I don't know." */
1339 *rect = (VkRect2D) {
1340 .offset = { 0, 0 },
1341 .extent = { UINT32_MAX, UINT32_MAX },
1342 };
1343 }
1344
1345 return vk_outarray_status(&out);
1346 }
1347
1348 static void
wsi_wl_surface_analytics_fini(struct wsi_wl_surface * wsi_wl_surface,const VkAllocationCallbacks * parent_pAllocator,const VkAllocationCallbacks * pAllocator)1349 wsi_wl_surface_analytics_fini(struct wsi_wl_surface *wsi_wl_surface,
1350 const VkAllocationCallbacks *parent_pAllocator,
1351 const VkAllocationCallbacks *pAllocator)
1352 {
1353 vk_free2(parent_pAllocator, pAllocator,
1354 wsi_wl_surface->analytics.latency_str);
1355 }
1356
1357 void
wsi_wl_surface_destroy(VkIcdSurfaceBase * icd_surface,VkInstance _instance,const VkAllocationCallbacks * pAllocator)1358 wsi_wl_surface_destroy(VkIcdSurfaceBase *icd_surface, VkInstance _instance,
1359 const VkAllocationCallbacks *pAllocator)
1360 {
1361 VK_FROM_HANDLE(vk_instance, instance, _instance);
1362 struct wsi_wl_surface *wsi_wl_surface =
1363 wl_container_of((VkIcdSurfaceWayland *)icd_surface, wsi_wl_surface, base);
1364
1365 if (wsi_wl_surface->wl_syncobj_surface)
1366 wp_linux_drm_syncobj_surface_v1_destroy(wsi_wl_surface->wl_syncobj_surface);
1367
1368 if (wsi_wl_surface->wl_dmabuf_feedback) {
1369 zwp_linux_dmabuf_feedback_v1_destroy(wsi_wl_surface->wl_dmabuf_feedback);
1370 dmabuf_feedback_fini(&wsi_wl_surface->dmabuf_feedback);
1371 dmabuf_feedback_fini(&wsi_wl_surface->pending_dmabuf_feedback);
1372 }
1373
1374 if (wsi_wl_surface->surface)
1375 wl_proxy_wrapper_destroy(wsi_wl_surface->surface);
1376
1377 if (wsi_wl_surface->display)
1378 wsi_wl_display_destroy(wsi_wl_surface->display);
1379
1380 wsi_wl_surface_analytics_fini(wsi_wl_surface, &instance->alloc, pAllocator);
1381
1382 vk_free2(&instance->alloc, pAllocator, wsi_wl_surface);
1383 }
1384
1385 static struct wsi_wl_format *
pick_format_from_surface_dmabuf_feedback(struct wsi_wl_surface * wsi_wl_surface,VkFormat vk_format)1386 pick_format_from_surface_dmabuf_feedback(struct wsi_wl_surface *wsi_wl_surface,
1387 VkFormat vk_format)
1388 {
1389 struct wsi_wl_format *f = NULL;
1390
1391 /* If the main_device was not advertised, we don't have valid feedback */
1392 if (wsi_wl_surface->dmabuf_feedback.main_device == 0)
1393 return NULL;
1394
1395 util_dynarray_foreach(&wsi_wl_surface->dmabuf_feedback.tranches,
1396 struct dmabuf_feedback_tranche, tranche) {
1397 f = find_format(&tranche->formats, vk_format);
1398 if (f)
1399 break;
1400 }
1401
1402 return f;
1403 }
1404
1405 static void
surface_dmabuf_feedback_format_table(void * data,struct zwp_linux_dmabuf_feedback_v1 * zwp_linux_dmabuf_feedback_v1,int32_t fd,uint32_t size)1406 surface_dmabuf_feedback_format_table(void *data,
1407 struct zwp_linux_dmabuf_feedback_v1 *zwp_linux_dmabuf_feedback_v1,
1408 int32_t fd, uint32_t size)
1409 {
1410 struct wsi_wl_surface *wsi_wl_surface = data;
1411 struct dmabuf_feedback *feedback = &wsi_wl_surface->pending_dmabuf_feedback;
1412
1413 feedback->format_table.size = size;
1414 feedback->format_table.data = mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
1415
1416 close(fd);
1417 }
1418
1419 static void
surface_dmabuf_feedback_main_device(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,struct wl_array * device)1420 surface_dmabuf_feedback_main_device(void *data,
1421 struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
1422 struct wl_array *device)
1423 {
1424 struct wsi_wl_surface *wsi_wl_surface = data;
1425 struct dmabuf_feedback *feedback = &wsi_wl_surface->pending_dmabuf_feedback;
1426
1427 memcpy(&feedback->main_device, device->data, sizeof(feedback->main_device));
1428 }
1429
1430 static void
surface_dmabuf_feedback_tranche_target_device(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,struct wl_array * device)1431 surface_dmabuf_feedback_tranche_target_device(void *data,
1432 struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
1433 struct wl_array *device)
1434 {
1435 struct wsi_wl_surface *wsi_wl_surface = data;
1436 struct dmabuf_feedback *feedback = &wsi_wl_surface->pending_dmabuf_feedback;
1437
1438 memcpy(&feedback->pending_tranche.target_device, device->data,
1439 sizeof(feedback->pending_tranche.target_device));
1440 }
1441
1442 static void
surface_dmabuf_feedback_tranche_flags(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,uint32_t flags)1443 surface_dmabuf_feedback_tranche_flags(void *data,
1444 struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
1445 uint32_t flags)
1446 {
1447 struct wsi_wl_surface *wsi_wl_surface = data;
1448 struct dmabuf_feedback *feedback = &wsi_wl_surface->pending_dmabuf_feedback;
1449
1450 feedback->pending_tranche.flags = flags;
1451 }
1452
1453 static void
surface_dmabuf_feedback_tranche_formats(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,struct wl_array * indices)1454 surface_dmabuf_feedback_tranche_formats(void *data,
1455 struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
1456 struct wl_array *indices)
1457 {
1458 struct wsi_wl_surface *wsi_wl_surface = data;
1459 struct dmabuf_feedback *feedback = &wsi_wl_surface->pending_dmabuf_feedback;
1460 uint32_t format;
1461 uint64_t modifier;
1462 uint16_t *index;
1463
1464 /* Compositor may advertise or not a format table. If it does, we use it.
1465 * Otherwise, we steal the most recent advertised format table. If we don't have
1466 * a most recent advertised format table, compositor did something wrong. */
1467 if (feedback->format_table.data == NULL) {
1468 feedback->format_table = wsi_wl_surface->dmabuf_feedback.format_table;
1469 dmabuf_feedback_format_table_init(&wsi_wl_surface->dmabuf_feedback.format_table);
1470 }
1471 if (feedback->format_table.data == MAP_FAILED ||
1472 feedback->format_table.data == NULL)
1473 return;
1474
1475 wl_array_for_each(index, indices) {
1476 format = feedback->format_table.data[*index].format;
1477 modifier = feedback->format_table.data[*index].modifier;
1478
1479 wsi_wl_display_add_drm_format_modifier(wsi_wl_surface->display,
1480 &wsi_wl_surface->pending_dmabuf_feedback.pending_tranche.formats,
1481 format, modifier);
1482 }
1483 }
1484
1485 static void
surface_dmabuf_feedback_tranche_done(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback)1486 surface_dmabuf_feedback_tranche_done(void *data,
1487 struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)
1488 {
1489 struct wsi_wl_surface *wsi_wl_surface = data;
1490 struct dmabuf_feedback *feedback = &wsi_wl_surface->pending_dmabuf_feedback;
1491
1492 /* Add tranche to array of tranches. */
1493 util_dynarray_append(&feedback->tranches, struct dmabuf_feedback_tranche,
1494 feedback->pending_tranche);
1495
1496 dmabuf_feedback_tranche_init(&feedback->pending_tranche);
1497 }
1498
1499 static bool
sets_of_modifiers_are_the_same(uint32_t num_drm_modifiers_A,const uint64_t * modifiers_A,uint32_t num_drm_modifiers_B,const uint64_t * modifiers_B)1500 sets_of_modifiers_are_the_same(uint32_t num_drm_modifiers_A, const uint64_t *modifiers_A,
1501 uint32_t num_drm_modifiers_B, const uint64_t *modifiers_B)
1502 {
1503 uint32_t i, j;
1504 bool mod_found;
1505
1506 if (num_drm_modifiers_A != num_drm_modifiers_B)
1507 return false;
1508
1509 for (i = 0; i < num_drm_modifiers_A; i++) {
1510 mod_found = false;
1511 for (j = 0; j < num_drm_modifiers_B; j++) {
1512 if (modifiers_A[i] == modifiers_B[j]) {
1513 mod_found = true;
1514 break;
1515 }
1516 }
1517 if (!mod_found)
1518 return false;
1519 }
1520
1521 return true;
1522 }
1523
1524 static void
surface_dmabuf_feedback_done(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback)1525 surface_dmabuf_feedback_done(void *data,
1526 struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)
1527 {
1528 struct wsi_wl_surface *wsi_wl_surface = data;
1529 struct wsi_wl_swapchain *chain = wsi_wl_surface->chain;
1530 struct wsi_wl_format *f;
1531
1532 dmabuf_feedback_fini(&wsi_wl_surface->dmabuf_feedback);
1533 wsi_wl_surface->dmabuf_feedback = wsi_wl_surface->pending_dmabuf_feedback;
1534 dmabuf_feedback_init(&wsi_wl_surface->pending_dmabuf_feedback);
1535
1536 /* It's not just because we received dma-buf feedback that re-allocation is a
1537 * good idea. In order to know if we should re-allocate or not, we must
1538 * compare the most recent parameters that we used to allocate with the ones
1539 * from the feedback we just received.
1540 *
1541 * The allocation parameters are: the format, its set of modifiers and the
1542 * tranche flags. On WSI we are not using the tranche flags for anything, so
1543 * we disconsider this. As we can't switch to another format (it is selected
1544 * by the client), we just need to compare the set of modifiers.
1545 *
1546 * So we just look for the vk_format in the tranches (respecting their
1547 * preferences), and compare its set of modifiers with the set of modifiers
1548 * we've used to allocate previously. If they differ, we are using suboptimal
1549 * parameters and should re-allocate.
1550 */
1551 f = pick_format_from_surface_dmabuf_feedback(wsi_wl_surface, chain->vk_format);
1552 if (f && !sets_of_modifiers_are_the_same(u_vector_length(&f->modifiers),
1553 u_vector_tail(&f->modifiers),
1554 chain->num_drm_modifiers,
1555 chain->drm_modifiers))
1556 wsi_wl_surface->chain->suboptimal = true;
1557 }
1558
1559 static const struct zwp_linux_dmabuf_feedback_v1_listener
1560 surface_dmabuf_feedback_listener = {
1561 .format_table = surface_dmabuf_feedback_format_table,
1562 .main_device = surface_dmabuf_feedback_main_device,
1563 .tranche_target_device = surface_dmabuf_feedback_tranche_target_device,
1564 .tranche_flags = surface_dmabuf_feedback_tranche_flags,
1565 .tranche_formats = surface_dmabuf_feedback_tranche_formats,
1566 .tranche_done = surface_dmabuf_feedback_tranche_done,
1567 .done = surface_dmabuf_feedback_done,
1568 };
1569
wsi_wl_surface_bind_to_dmabuf_feedback(struct wsi_wl_surface * wsi_wl_surface)1570 static VkResult wsi_wl_surface_bind_to_dmabuf_feedback(struct wsi_wl_surface *wsi_wl_surface)
1571 {
1572 wsi_wl_surface->wl_dmabuf_feedback =
1573 zwp_linux_dmabuf_v1_get_surface_feedback(wsi_wl_surface->display->wl_dmabuf,
1574 wsi_wl_surface->surface);
1575
1576 zwp_linux_dmabuf_feedback_v1_add_listener(wsi_wl_surface->wl_dmabuf_feedback,
1577 &surface_dmabuf_feedback_listener,
1578 wsi_wl_surface);
1579
1580 if (dmabuf_feedback_init(&wsi_wl_surface->dmabuf_feedback) < 0)
1581 goto fail;
1582 if (dmabuf_feedback_init(&wsi_wl_surface->pending_dmabuf_feedback) < 0)
1583 goto fail_pending;
1584
1585 return VK_SUCCESS;
1586
1587 fail_pending:
1588 dmabuf_feedback_fini(&wsi_wl_surface->dmabuf_feedback);
1589 fail:
1590 zwp_linux_dmabuf_feedback_v1_destroy(wsi_wl_surface->wl_dmabuf_feedback);
1591 wsi_wl_surface->wl_dmabuf_feedback = NULL;
1592 return VK_ERROR_OUT_OF_HOST_MEMORY;
1593 }
1594
1595 static void
wsi_wl_surface_analytics_init(struct wsi_wl_surface * wsi_wl_surface,const VkAllocationCallbacks * pAllocator)1596 wsi_wl_surface_analytics_init(struct wsi_wl_surface *wsi_wl_surface,
1597 const VkAllocationCallbacks *pAllocator)
1598 {
1599 uint64_t wl_id;
1600 char *track_name;
1601
1602 wl_id = wl_proxy_get_id((struct wl_proxy *) wsi_wl_surface->surface);
1603 track_name = vk_asprintf(pAllocator, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT,
1604 "wl%" PRIu64 " presentation", wl_id);
1605 wsi_wl_surface->analytics.presentation_track_id = util_perfetto_new_track(track_name);
1606 vk_free(pAllocator, track_name);
1607
1608 wsi_wl_surface->analytics.latency_str =
1609 vk_asprintf(pAllocator,
1610 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT,
1611 "wl%" PRIu64 " latency", wl_id);
1612 }
1613
wsi_wl_surface_init(struct wsi_wl_surface * wsi_wl_surface,struct wsi_device * wsi_device,const VkAllocationCallbacks * pAllocator)1614 static VkResult wsi_wl_surface_init(struct wsi_wl_surface *wsi_wl_surface,
1615 struct wsi_device *wsi_device,
1616 const VkAllocationCallbacks *pAllocator)
1617 {
1618 struct wsi_wayland *wsi =
1619 (struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
1620 VkResult result;
1621
1622 /* wsi_wl_surface has already been initialized. */
1623 if (wsi_wl_surface->display)
1624 return VK_SUCCESS;
1625
1626 result = wsi_wl_display_create(wsi, wsi_wl_surface->base.display,
1627 wsi_device->sw, &wsi_wl_surface->display);
1628 if (result != VK_SUCCESS)
1629 goto fail;
1630
1631 wsi_wl_surface->surface = wl_proxy_create_wrapper(wsi_wl_surface->base.surface);
1632 if (!wsi_wl_surface->surface) {
1633 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1634 goto fail;
1635 }
1636 wl_proxy_set_queue((struct wl_proxy *) wsi_wl_surface->surface,
1637 wsi_wl_surface->display->queue);
1638
1639 /* Bind wsi_wl_surface to dma-buf feedback. */
1640 if (wsi_wl_surface->display->wl_dmabuf &&
1641 zwp_linux_dmabuf_v1_get_version(wsi_wl_surface->display->wl_dmabuf) >=
1642 ZWP_LINUX_DMABUF_V1_GET_SURFACE_FEEDBACK_SINCE_VERSION) {
1643 result = wsi_wl_surface_bind_to_dmabuf_feedback(wsi_wl_surface);
1644 if (result != VK_SUCCESS)
1645 goto fail;
1646
1647 wl_display_roundtrip_queue(wsi_wl_surface->display->wl_display,
1648 wsi_wl_surface->display->queue);
1649 }
1650
1651 if (wsi_wl_use_explicit_sync(wsi_wl_surface->display, wsi_device)) {
1652 wsi_wl_surface->wl_syncobj_surface =
1653 wp_linux_drm_syncobj_manager_v1_get_surface(wsi_wl_surface->display->wl_syncobj,
1654 wsi_wl_surface->surface);
1655
1656 if (!wsi_wl_surface->wl_syncobj_surface)
1657 goto fail;
1658 }
1659
1660 wsi_wl_surface_analytics_init(wsi_wl_surface, pAllocator);
1661 return VK_SUCCESS;
1662
1663 fail:
1664 if (wsi_wl_surface->surface)
1665 wl_proxy_wrapper_destroy(wsi_wl_surface->surface);
1666
1667 if (wsi_wl_surface->display)
1668 wsi_wl_display_destroy(wsi_wl_surface->display);
1669 return result;
1670 }
1671
1672 VKAPI_ATTR VkResult VKAPI_CALL
wsi_CreateWaylandSurfaceKHR(VkInstance _instance,const VkWaylandSurfaceCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSurfaceKHR * pSurface)1673 wsi_CreateWaylandSurfaceKHR(VkInstance _instance,
1674 const VkWaylandSurfaceCreateInfoKHR *pCreateInfo,
1675 const VkAllocationCallbacks *pAllocator,
1676 VkSurfaceKHR *pSurface)
1677 {
1678 VK_FROM_HANDLE(vk_instance, instance, _instance);
1679 struct wsi_wl_surface *wsi_wl_surface;
1680 VkIcdSurfaceWayland *surface;
1681
1682 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR);
1683
1684 wsi_wl_surface = vk_zalloc2(&instance->alloc, pAllocator, sizeof *wsi_wl_surface,
1685 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1686 if (wsi_wl_surface == NULL)
1687 return VK_ERROR_OUT_OF_HOST_MEMORY;
1688
1689 surface = &wsi_wl_surface->base;
1690
1691 surface->base.platform = VK_ICD_WSI_PLATFORM_WAYLAND;
1692 surface->display = pCreateInfo->display;
1693 surface->surface = pCreateInfo->surface;
1694
1695 *pSurface = VkIcdSurfaceBase_to_handle(&surface->base);
1696
1697 return VK_SUCCESS;
1698 }
1699
1700 struct wsi_wl_present_id {
1701 struct wp_presentation_feedback *feedback;
1702 /* Fallback when wp_presentation is not supported.
1703 * Using frame callback is not the intended way to achieve
1704 * this, but it is the best effort alternative when the proper interface is
1705 * not available. This approach also matches Xwayland,
1706 * which uses frame callback to signal DRI3 COMPLETE. */
1707 struct wl_callback *frame;
1708 uint64_t present_id;
1709 uint64_t flow_id;
1710 uint64_t submission_time;
1711 const VkAllocationCallbacks *alloc;
1712 struct wsi_wl_swapchain *chain;
1713 int buffer_id;
1714 struct wl_list link;
1715 };
1716
1717 static struct wsi_image *
wsi_wl_swapchain_get_wsi_image(struct wsi_swapchain * wsi_chain,uint32_t image_index)1718 wsi_wl_swapchain_get_wsi_image(struct wsi_swapchain *wsi_chain,
1719 uint32_t image_index)
1720 {
1721 struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
1722 return &chain->images[image_index].base;
1723 }
1724
1725 static VkResult
wsi_wl_swapchain_release_images(struct wsi_swapchain * wsi_chain,uint32_t count,const uint32_t * indices)1726 wsi_wl_swapchain_release_images(struct wsi_swapchain *wsi_chain,
1727 uint32_t count, const uint32_t *indices)
1728 {
1729 struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
1730 for (uint32_t i = 0; i < count; i++) {
1731 uint32_t index = indices[i];
1732 chain->images[index].busy = false;
1733 }
1734 return VK_SUCCESS;
1735 }
1736
1737 static void
wsi_wl_swapchain_set_present_mode(struct wsi_swapchain * wsi_chain,VkPresentModeKHR mode)1738 wsi_wl_swapchain_set_present_mode(struct wsi_swapchain *wsi_chain,
1739 VkPresentModeKHR mode)
1740 {
1741 struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
1742 chain->base.present_mode = mode;
1743 }
1744
1745 static VkResult
dispatch_present_id_queue(struct wsi_swapchain * wsi_chain,struct timespec * end_time)1746 dispatch_present_id_queue(struct wsi_swapchain *wsi_chain, struct timespec *end_time)
1747 {
1748 struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
1749
1750 /* We might not own this surface if we're retired, but it is only used here to
1751 * read events from the present ID queue. This queue is private to a given VkSwapchainKHR,
1752 * so calling present wait on a retired swapchain cannot interfere with a non-retired swapchain. */
1753 struct wl_display *wl_display = chain->wsi_wl_surface->display->wl_display;
1754
1755 VkResult ret;
1756 int err;
1757
1758 /* PresentWait can be called concurrently.
1759 * If there is contention on this mutex, it means there is currently a dispatcher in flight holding the lock.
1760 * The lock is only held while there is forward progress processing events from Wayland,
1761 * so there should be no problem locking without timeout.
1762 * We would like to be able to support timeout = 0 to query the current max_completed count.
1763 * A timedlock with no timeout can be problematic in that scenario. */
1764 err = mtx_lock(&chain->present_ids.lock);
1765 if (err != thrd_success)
1766 return VK_ERROR_OUT_OF_DATE_KHR;
1767
1768 /* Someone else is dispatching events; wait for them to update the chain
1769 * status and wake us up. */
1770 if (chain->present_ids.dispatch_in_progress) {
1771 err = u_cnd_monotonic_timedwait(&chain->present_ids.list_advanced,
1772 &chain->present_ids.lock, end_time);
1773 mtx_unlock(&chain->present_ids.lock);
1774
1775 if (err == thrd_timedout)
1776 return VK_TIMEOUT;
1777 else if (err != thrd_success)
1778 return VK_ERROR_OUT_OF_DATE_KHR;
1779
1780 return VK_SUCCESS;
1781 }
1782
1783 /* Whether or not we were dispatching the events before, we are now. */
1784 assert(!chain->present_ids.dispatch_in_progress);
1785 chain->present_ids.dispatch_in_progress = true;
1786
1787 /* We drop the lock now - we're still protected by dispatch_in_progress,
1788 * and holding the lock while dispatch_queue_timeout waits in poll()
1789 * might delay other threads unnecessarily.
1790 *
1791 * We'll pick up the lock again in the dispatched functions.
1792 */
1793 mtx_unlock(&chain->present_ids.lock);
1794
1795 ret = loader_wayland_dispatch(wl_display,
1796 chain->present_ids.queue,
1797 end_time);
1798
1799 mtx_lock(&chain->present_ids.lock);
1800
1801 /* Wake up other waiters who may have been unblocked by the events
1802 * we just read. */
1803 u_cnd_monotonic_broadcast(&chain->present_ids.list_advanced);
1804
1805 assert(chain->present_ids.dispatch_in_progress);
1806 chain->present_ids.dispatch_in_progress = false;
1807
1808 u_cnd_monotonic_broadcast(&chain->present_ids.list_advanced);
1809 mtx_unlock(&chain->present_ids.lock);
1810
1811 if (ret == -1)
1812 return VK_ERROR_OUT_OF_DATE_KHR;
1813 if (ret == 0)
1814 return VK_TIMEOUT;
1815 return VK_SUCCESS;
1816 }
1817
1818 static VkResult
wsi_wl_swapchain_wait_for_present(struct wsi_swapchain * wsi_chain,uint64_t present_id,uint64_t timeout)1819 wsi_wl_swapchain_wait_for_present(struct wsi_swapchain *wsi_chain,
1820 uint64_t present_id,
1821 uint64_t timeout)
1822 {
1823 struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
1824 struct timespec end_time;
1825 VkResult ret;
1826 int err;
1827
1828 MESA_TRACE_FUNC();
1829
1830 uint64_t atimeout;
1831 if (timeout == 0 || timeout == UINT64_MAX)
1832 atimeout = timeout;
1833 else
1834 atimeout = os_time_get_absolute_timeout(timeout);
1835
1836 /* Need to observe that the swapchain semaphore has been unsignalled,
1837 * as this is guaranteed when a present is complete. */
1838 VkResult result = wsi_swapchain_wait_for_present_semaphore(
1839 &chain->base, present_id, timeout);
1840 if (result != VK_SUCCESS)
1841 return result;
1842
1843 /* If using frame callback, guard against lack of forward progress
1844 * of the frame callback in some situations,
1845 * e.g. the surface might not be visible.
1846 * If rendering has completed on GPU,
1847 * and we still haven't received a callback after 100ms, unblock the application.
1848 * 100ms is chosen arbitrarily.
1849 * The queue depth in WL WSI is just one frame due to frame callback in FIFO mode,
1850 * so from the time a frame has completed render to when it should be considered presented
1851 * will not exceed 100ms except in contrived edge cases. */
1852 uint64_t assumed_success_at = UINT64_MAX;
1853 if (!chain->present_ids.wp_presentation)
1854 assumed_success_at = os_time_get_absolute_timeout(100 * 1000 * 1000);
1855
1856 /* If app timeout is beyond the deadline we set for reply,
1857 * always treat the timeout as successful. */
1858 VkResult timeout_result = assumed_success_at < atimeout ? VK_SUCCESS : VK_TIMEOUT;
1859 timespec_from_nsec(&end_time, MIN2(atimeout, assumed_success_at));
1860
1861 while (1) {
1862 err = mtx_lock(&chain->present_ids.lock);
1863 if (err != thrd_success)
1864 return VK_ERROR_OUT_OF_DATE_KHR;
1865
1866 bool completed = chain->present_ids.max_completed >= present_id;
1867 mtx_unlock(&chain->present_ids.lock);
1868
1869 if (completed)
1870 return VK_SUCCESS;
1871
1872 ret = dispatch_present_id_queue(wsi_chain, &end_time);
1873 if (ret == VK_TIMEOUT)
1874 return timeout_result;
1875 if (ret != VK_SUCCESS)
1876 return ret;
1877 }
1878 }
1879
1880 static VkResult
wsi_wl_swapchain_acquire_next_image_explicit(struct wsi_swapchain * wsi_chain,const VkAcquireNextImageInfoKHR * info,uint32_t * image_index)1881 wsi_wl_swapchain_acquire_next_image_explicit(struct wsi_swapchain *wsi_chain,
1882 const VkAcquireNextImageInfoKHR *info,
1883 uint32_t *image_index)
1884 {
1885 struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
1886 uint64_t id = 0;
1887
1888 MESA_TRACE_FUNC_FLOW(&id);
1889
1890 /* See comments in queue_present() */
1891 if (chain->retired)
1892 return VK_ERROR_OUT_OF_DATE_KHR;
1893
1894 STACK_ARRAY(struct wsi_image*, images, wsi_chain->image_count);
1895 for (uint32_t i = 0; i < chain->base.image_count; i++)
1896 images[i] = &chain->images[i].base;
1897
1898 VkResult result;
1899 #ifdef HAVE_LIBDRM
1900 result = wsi_drm_wait_for_explicit_sync_release(wsi_chain,
1901 wsi_chain->image_count,
1902 images,
1903 info->timeout,
1904 image_index);
1905 #else
1906 result = VK_ERROR_FEATURE_NOT_PRESENT;
1907 #endif
1908 STACK_ARRAY_FINISH(images);
1909
1910 if (result == VK_SUCCESS) {
1911 chain->images[*image_index].flow_id = id;
1912 if (chain->suboptimal)
1913 result = VK_SUBOPTIMAL_KHR;
1914 }
1915
1916 return result;
1917 }
1918
1919 static VkResult
wsi_wl_swapchain_acquire_next_image_implicit(struct wsi_swapchain * wsi_chain,const VkAcquireNextImageInfoKHR * info,uint32_t * image_index)1920 wsi_wl_swapchain_acquire_next_image_implicit(struct wsi_swapchain *wsi_chain,
1921 const VkAcquireNextImageInfoKHR *info,
1922 uint32_t *image_index)
1923 {
1924 struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
1925 struct timespec start_time, end_time;
1926 struct timespec rel_timeout;
1927 uint64_t id = 0;
1928
1929 MESA_TRACE_FUNC_FLOW(&id);
1930
1931 /* See comments in queue_present() */
1932 if (chain->retired)
1933 return VK_ERROR_OUT_OF_DATE_KHR;
1934
1935 struct wsi_wl_surface *wsi_wl_surface = chain->wsi_wl_surface;
1936 timespec_from_nsec(&rel_timeout, info->timeout);
1937
1938 clock_gettime(CLOCK_MONOTONIC, &start_time);
1939 timespec_add(&end_time, &rel_timeout, &start_time);
1940
1941 while (1) {
1942 /* Try to find a free image. */
1943 for (uint32_t i = 0; i < chain->base.image_count; i++) {
1944 if (!chain->images[i].busy) {
1945 /* We found a non-busy image */
1946 *image_index = i;
1947 chain->images[i].busy = true;
1948 chain->images[i].flow_id = id;
1949 return (chain->suboptimal ? VK_SUBOPTIMAL_KHR : VK_SUCCESS);
1950 }
1951 }
1952
1953 /* Try to dispatch potential events. */
1954 int ret = loader_wayland_dispatch(wsi_wl_surface->display->wl_display,
1955 wsi_wl_surface->display->queue,
1956 &end_time);
1957 if (ret == -1)
1958 return VK_ERROR_OUT_OF_DATE_KHR;
1959
1960 /* Check for timeout. */
1961 if (ret == 0)
1962 return (info->timeout ? VK_TIMEOUT : VK_NOT_READY);
1963 }
1964 }
1965
1966 static void
presentation_handle_sync_output(void * data,struct wp_presentation_feedback * feedback,struct wl_output * output)1967 presentation_handle_sync_output(void *data,
1968 struct wp_presentation_feedback *feedback,
1969 struct wl_output *output)
1970 {
1971 }
1972
1973 static void
wsi_wl_presentation_update_present_id(struct wsi_wl_present_id * id)1974 wsi_wl_presentation_update_present_id(struct wsi_wl_present_id *id)
1975 {
1976 mtx_lock(&id->chain->present_ids.lock);
1977 if (id->present_id > id->chain->present_ids.max_completed)
1978 id->chain->present_ids.max_completed = id->present_id;
1979
1980 wl_list_remove(&id->link);
1981 mtx_unlock(&id->chain->present_ids.lock);
1982 vk_free(id->alloc, id);
1983 }
1984
1985 static void
trace_present(const struct wsi_wl_present_id * id,uint64_t presentation_time)1986 trace_present(const struct wsi_wl_present_id *id,
1987 uint64_t presentation_time)
1988 {
1989 struct wsi_wl_swapchain *chain = id->chain;
1990 struct wsi_wl_surface *surface = chain->wsi_wl_surface;
1991 char *buffer_name;
1992
1993 MESA_TRACE_SET_COUNTER(surface->analytics.latency_str,
1994 (presentation_time - id->submission_time) / 1000000.0);
1995
1996 /* Close the previous image display interval first, if there is one. */
1997 if (surface->analytics.presenting && util_perfetto_is_tracing_enabled()) {
1998 buffer_name = stringify_wayland_id(surface->analytics.presenting);
1999 MESA_TRACE_TIMESTAMP_END(buffer_name ? buffer_name : "Wayland buffer",
2000 surface->analytics.presentation_track_id,
2001 presentation_time);
2002 free(buffer_name);
2003 }
2004
2005 surface->analytics.presenting = id->buffer_id;
2006
2007 if (util_perfetto_is_tracing_enabled()) {
2008 buffer_name = stringify_wayland_id(id->buffer_id);
2009 MESA_TRACE_TIMESTAMP_BEGIN(buffer_name ? buffer_name : "Wayland buffer",
2010 surface->analytics.presentation_track_id,
2011 id->flow_id,
2012 presentation_time);
2013 free(buffer_name);
2014 }
2015 }
2016
2017 static void
presentation_handle_presented(void * data,struct wp_presentation_feedback * feedback,uint32_t tv_sec_hi,uint32_t tv_sec_lo,uint32_t tv_nsec,uint32_t refresh,uint32_t seq_hi,uint32_t seq_lo,uint32_t flags)2018 presentation_handle_presented(void *data,
2019 struct wp_presentation_feedback *feedback,
2020 uint32_t tv_sec_hi, uint32_t tv_sec_lo,
2021 uint32_t tv_nsec, uint32_t refresh,
2022 uint32_t seq_hi, uint32_t seq_lo,
2023 uint32_t flags)
2024 {
2025 struct wsi_wl_present_id *id = data;
2026 struct timespec presentation_ts;
2027 uint64_t presentation_time;
2028
2029 MESA_TRACE_FUNC_FLOW(&id->flow_id);
2030
2031 presentation_ts.tv_sec = ((uint64_t)tv_sec_hi << 32) + tv_sec_lo;
2032 presentation_ts.tv_nsec = tv_nsec;
2033 presentation_time = timespec_to_nsec(&presentation_ts);
2034 trace_present(id, presentation_time);
2035
2036 wsi_wl_presentation_update_present_id(id);
2037 wp_presentation_feedback_destroy(feedback);
2038 }
2039
2040 static void
presentation_handle_discarded(void * data,struct wp_presentation_feedback * feedback)2041 presentation_handle_discarded(void *data,
2042 struct wp_presentation_feedback *feedback)
2043 {
2044 struct wsi_wl_present_id *id = data;
2045
2046 MESA_TRACE_FUNC_FLOW(&id->flow_id);
2047
2048 wsi_wl_presentation_update_present_id(id);
2049 wp_presentation_feedback_destroy(feedback);
2050 }
2051
2052 static const struct wp_presentation_feedback_listener
2053 pres_feedback_listener = {
2054 presentation_handle_sync_output,
2055 presentation_handle_presented,
2056 presentation_handle_discarded,
2057 };
2058
2059 static void
presentation_frame_handle_done(void * data,struct wl_callback * callback,uint32_t serial)2060 presentation_frame_handle_done(void *data, struct wl_callback *callback, uint32_t serial)
2061 {
2062 struct wsi_wl_present_id *id = data;
2063 wsi_wl_presentation_update_present_id(id);
2064 wl_callback_destroy(callback);
2065 }
2066
2067 static const struct wl_callback_listener pres_frame_listener = {
2068 presentation_frame_handle_done,
2069 };
2070
2071 static void
frame_handle_done(void * data,struct wl_callback * callback,uint32_t serial)2072 frame_handle_done(void *data, struct wl_callback *callback, uint32_t serial)
2073 {
2074 struct wsi_wl_swapchain *chain = data;
2075
2076 chain->frame = NULL;
2077 chain->fifo_ready = true;
2078
2079 wl_callback_destroy(callback);
2080 }
2081
2082 static const struct wl_callback_listener frame_listener = {
2083 frame_handle_done,
2084 };
2085
2086 static VkResult
wsi_wl_swapchain_queue_present(struct wsi_swapchain * wsi_chain,uint32_t image_index,uint64_t present_id,const VkPresentRegionKHR * damage)2087 wsi_wl_swapchain_queue_present(struct wsi_swapchain *wsi_chain,
2088 uint32_t image_index,
2089 uint64_t present_id,
2090 const VkPresentRegionKHR *damage)
2091 {
2092 struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
2093 bool queue_dispatched = false;
2094 uint64_t flow_id = chain->images[image_index].flow_id;
2095
2096 MESA_TRACE_FUNC_FLOW(&flow_id);
2097
2098 /* In case we're sending presentation feedback requests, make sure the
2099 * queue their events are in is dispatched.
2100 */
2101 struct timespec instant = {0};
2102 if (dispatch_present_id_queue(wsi_chain, &instant) == VK_ERROR_OUT_OF_DATE_KHR)
2103 return VK_ERROR_OUT_OF_DATE_KHR;
2104
2105 /* While the specification suggests we can keep presenting already acquired
2106 * images on a retired swapchain, there is no requirement to support that.
2107 * From spec 1.3.278:
2108 *
2109 * After oldSwapchain is retired, the application can pass to vkQueuePresentKHR
2110 * any images it had already acquired from oldSwapchain.
2111 * E.g., an application may present an image from the old swapchain
2112 * before an image from the new swapchain is ready to be presented.
2113 * As usual, vkQueuePresentKHR may fail if oldSwapchain has entered a state
2114 * that causes VK_ERROR_OUT_OF_DATE_KHR to be returned. */
2115 if (chain->retired)
2116 return VK_ERROR_OUT_OF_DATE_KHR;
2117
2118 struct wsi_wl_surface *wsi_wl_surface = chain->wsi_wl_surface;
2119
2120 if (chain->buffer_type == WSI_WL_BUFFER_SHM_MEMCPY) {
2121 struct wsi_wl_image *image = &chain->images[image_index];
2122 memcpy(image->shm_ptr, image->base.cpu_map,
2123 image->base.row_pitches[0] * chain->extent.height);
2124 }
2125
2126 /* For EXT_swapchain_maintenance1. We might have transitioned from FIFO to MAILBOX.
2127 * In this case we need to let the FIFO request complete, before presenting MAILBOX. */
2128 while (!chain->fifo_ready) {
2129 int ret = wl_display_dispatch_queue(wsi_wl_surface->display->wl_display,
2130 wsi_wl_surface->display->queue);
2131 if (ret < 0)
2132 return VK_ERROR_OUT_OF_DATE_KHR;
2133
2134 queue_dispatched = true;
2135 }
2136
2137 if (chain->base.image_info.explicit_sync) {
2138 struct wsi_wl_image *image = &chain->images[image_index];
2139 /* Incremented by signal in base queue_present. */
2140 uint64_t acquire_point = image->base.explicit_sync[WSI_ES_ACQUIRE].timeline;
2141 uint64_t release_point = image->base.explicit_sync[WSI_ES_RELEASE].timeline;
2142 wp_linux_drm_syncobj_surface_v1_set_acquire_point(wsi_wl_surface->wl_syncobj_surface,
2143 image->wl_syncobj_timeline[WSI_ES_ACQUIRE],
2144 (uint32_t)(acquire_point >> 32),
2145 (uint32_t)(acquire_point & 0xffffffff));
2146 wp_linux_drm_syncobj_surface_v1_set_release_point(wsi_wl_surface->wl_syncobj_surface,
2147 image->wl_syncobj_timeline[WSI_ES_RELEASE],
2148 (uint32_t)(release_point >> 32),
2149 (uint32_t)(release_point & 0xffffffff));
2150 }
2151
2152 assert(image_index < chain->base.image_count);
2153 wl_surface_attach(wsi_wl_surface->surface, chain->images[image_index].buffer, 0, 0);
2154
2155 if (wl_surface_get_version(wsi_wl_surface->surface) >= 4 && damage &&
2156 damage->pRectangles && damage->rectangleCount > 0) {
2157 for (unsigned i = 0; i < damage->rectangleCount; i++) {
2158 const VkRectLayerKHR *rect = &damage->pRectangles[i];
2159 assert(rect->layer == 0);
2160 wl_surface_damage_buffer(wsi_wl_surface->surface,
2161 rect->offset.x, rect->offset.y,
2162 rect->extent.width, rect->extent.height);
2163 }
2164 } else {
2165 wl_surface_damage(wsi_wl_surface->surface, 0, 0, INT32_MAX, INT32_MAX);
2166 }
2167
2168 if (chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR) {
2169 chain->frame = wl_surface_frame(wsi_wl_surface->surface);
2170 wl_callback_add_listener(chain->frame, &frame_listener, chain);
2171 chain->fifo_ready = false;
2172 } else {
2173 /* If we present MAILBOX, any subsequent presentation in FIFO can replace this image. */
2174 chain->fifo_ready = true;
2175 }
2176
2177 if (present_id > 0 || util_perfetto_is_tracing_enabled()) {
2178 struct wsi_wl_present_id *id =
2179 vk_zalloc(chain->wsi_wl_surface->display->wsi_wl->alloc, sizeof(*id), sizeof(uintptr_t),
2180 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2181 id->chain = chain;
2182 id->present_id = present_id;
2183 id->alloc = chain->wsi_wl_surface->display->wsi_wl->alloc;
2184 id->flow_id = flow_id;
2185 id->buffer_id =
2186 wl_proxy_get_id((struct wl_proxy *)chain->images[image_index].buffer);
2187
2188 id->submission_time = os_time_get_nano();
2189
2190 mtx_lock(&chain->present_ids.lock);
2191
2192 if (chain->present_ids.wp_presentation) {
2193 id->feedback = wp_presentation_feedback(chain->present_ids.wp_presentation,
2194 chain->wsi_wl_surface->surface);
2195 wp_presentation_feedback_add_listener(id->feedback,
2196 &pres_feedback_listener,
2197 id);
2198 } else {
2199 id->frame = wl_surface_frame(chain->present_ids.surface);
2200 wl_callback_add_listener(id->frame, &pres_frame_listener, id);
2201 }
2202
2203 wl_list_insert(&chain->present_ids.outstanding_list, &id->link);
2204 mtx_unlock(&chain->present_ids.lock);
2205 }
2206
2207 chain->images[image_index].busy = true;
2208 wl_surface_commit(wsi_wl_surface->surface);
2209 wl_display_flush(wsi_wl_surface->display->wl_display);
2210
2211 if (!queue_dispatched && wsi_chain->image_info.explicit_sync) {
2212 wl_display_dispatch_queue_pending(wsi_wl_surface->display->wl_display,
2213 wsi_wl_surface->display->queue);
2214 }
2215
2216 return VK_SUCCESS;
2217 }
2218
2219 static void
buffer_handle_release(void * data,struct wl_buffer * buffer)2220 buffer_handle_release(void *data, struct wl_buffer *buffer)
2221 {
2222 struct wsi_wl_image *image = data;
2223
2224 assert(image->buffer == buffer);
2225
2226 image->busy = false;
2227 }
2228
2229 static const struct wl_buffer_listener buffer_listener = {
2230 buffer_handle_release,
2231 };
2232
2233 static uint8_t *
wsi_wl_alloc_image_shm(struct wsi_image * imagew,unsigned size)2234 wsi_wl_alloc_image_shm(struct wsi_image *imagew, unsigned size)
2235 {
2236 struct wsi_wl_image *image = (struct wsi_wl_image *)imagew;
2237
2238 /* Create a shareable buffer */
2239 int fd = os_create_anonymous_file(size, NULL);
2240 if (fd < 0)
2241 return NULL;
2242
2243 void *ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
2244 if (ptr == MAP_FAILED) {
2245 close(fd);
2246 return NULL;
2247 }
2248
2249 image->shm_fd = fd;
2250 image->shm_ptr = ptr;
2251 image->shm_size = size;
2252
2253 return ptr;
2254 }
2255
2256 static VkResult
wsi_wl_image_init(struct wsi_wl_swapchain * chain,struct wsi_wl_image * image,const VkSwapchainCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator)2257 wsi_wl_image_init(struct wsi_wl_swapchain *chain,
2258 struct wsi_wl_image *image,
2259 const VkSwapchainCreateInfoKHR *pCreateInfo,
2260 const VkAllocationCallbacks* pAllocator)
2261 {
2262 struct wsi_wl_display *display = chain->wsi_wl_surface->display;
2263 VkResult result;
2264
2265 result = wsi_create_image(&chain->base, &chain->base.image_info,
2266 &image->base);
2267 if (result != VK_SUCCESS)
2268 return result;
2269
2270 switch (chain->buffer_type) {
2271 case WSI_WL_BUFFER_GPU_SHM:
2272 case WSI_WL_BUFFER_SHM_MEMCPY: {
2273 if (chain->buffer_type == WSI_WL_BUFFER_SHM_MEMCPY) {
2274 wsi_wl_alloc_image_shm(&image->base, image->base.row_pitches[0] *
2275 chain->extent.height);
2276 }
2277 assert(image->shm_ptr != NULL);
2278
2279 /* Share it in a wl_buffer */
2280 struct wl_shm_pool *pool = wl_shm_create_pool(display->wl_shm,
2281 image->shm_fd,
2282 image->shm_size);
2283 wl_proxy_set_queue((struct wl_proxy *)pool, display->queue);
2284 image->buffer = wl_shm_pool_create_buffer(pool, 0, chain->extent.width,
2285 chain->extent.height,
2286 image->base.row_pitches[0],
2287 chain->shm_format);
2288 wl_shm_pool_destroy(pool);
2289 break;
2290 }
2291
2292 case WSI_WL_BUFFER_NATIVE: {
2293 assert(display->wl_dmabuf);
2294
2295 struct zwp_linux_buffer_params_v1 *params =
2296 zwp_linux_dmabuf_v1_create_params(display->wl_dmabuf);
2297 if (!params)
2298 goto fail_image;
2299
2300 for (int i = 0; i < image->base.num_planes; i++) {
2301 zwp_linux_buffer_params_v1_add(params,
2302 image->base.dma_buf_fd,
2303 i,
2304 image->base.offsets[i],
2305 image->base.row_pitches[i],
2306 image->base.drm_modifier >> 32,
2307 image->base.drm_modifier & 0xffffffff);
2308 }
2309
2310 image->buffer =
2311 zwp_linux_buffer_params_v1_create_immed(params,
2312 chain->extent.width,
2313 chain->extent.height,
2314 chain->drm_format,
2315 0);
2316 zwp_linux_buffer_params_v1_destroy(params);
2317
2318 if (chain->base.image_info.explicit_sync) {
2319 for (uint32_t i = 0; i < WSI_ES_COUNT; i++) {
2320 image->wl_syncobj_timeline[i] =
2321 wp_linux_drm_syncobj_manager_v1_import_timeline(display->wl_syncobj,
2322 image->base.explicit_sync[i].fd);
2323 if (!image->wl_syncobj_timeline[i])
2324 goto fail_image;
2325 }
2326 }
2327
2328 break;
2329 }
2330
2331 default:
2332 unreachable("Invalid buffer type");
2333 }
2334
2335 if (!image->buffer)
2336 goto fail_image;
2337
2338 /* No need to listen for release if we are explicit sync. */
2339 if (!chain->base.image_info.explicit_sync)
2340 wl_buffer_add_listener(image->buffer, &buffer_listener, image);
2341
2342 return VK_SUCCESS;
2343
2344 fail_image:
2345 for (uint32_t i = 0; i < WSI_ES_COUNT; i++) {
2346 if (image->wl_syncobj_timeline[i])
2347 wp_linux_drm_syncobj_timeline_v1_destroy(image->wl_syncobj_timeline[i]);
2348 }
2349 wsi_destroy_image(&chain->base, &image->base);
2350
2351 return VK_ERROR_OUT_OF_HOST_MEMORY;
2352 }
2353
2354 static void
wsi_wl_swapchain_images_free(struct wsi_wl_swapchain * chain)2355 wsi_wl_swapchain_images_free(struct wsi_wl_swapchain *chain)
2356 {
2357 for (uint32_t i = 0; i < chain->base.image_count; i++) {
2358 for (uint32_t j = 0; j < WSI_ES_COUNT; j++) {
2359 if (chain->images[i].wl_syncobj_timeline[j])
2360 wp_linux_drm_syncobj_timeline_v1_destroy(chain->images[i].wl_syncobj_timeline[j]);
2361 }
2362 if (chain->images[i].buffer) {
2363 wl_buffer_destroy(chain->images[i].buffer);
2364 wsi_destroy_image(&chain->base, &chain->images[i].base);
2365 if (chain->images[i].shm_size) {
2366 close(chain->images[i].shm_fd);
2367 munmap(chain->images[i].shm_ptr, chain->images[i].shm_size);
2368 }
2369 }
2370 }
2371 }
2372
2373 static void
wsi_wl_swapchain_chain_free(struct wsi_wl_swapchain * chain,const VkAllocationCallbacks * pAllocator)2374 wsi_wl_swapchain_chain_free(struct wsi_wl_swapchain *chain,
2375 const VkAllocationCallbacks *pAllocator)
2376 {
2377 /* Force wayland-client to release fd sent during the swapchain
2378 * creation (see MAX_FDS_OUT) to avoid filling up VRAM with
2379 * released buffers.
2380 */
2381 struct wsi_wl_surface *wsi_wl_surface = chain->wsi_wl_surface;
2382 if (!chain->retired)
2383 wl_display_flush(wsi_wl_surface->display->wl_display);
2384
2385 if (chain->frame)
2386 wl_callback_destroy(chain->frame);
2387 if (chain->tearing_control)
2388 wp_tearing_control_v1_destroy(chain->tearing_control);
2389
2390 /* Only unregister if we are the non-retired swapchain, or
2391 * we are a retired swapchain and memory allocation failed,
2392 * in which case there are only retired swapchains. */
2393 if (wsi_wl_surface->chain == chain)
2394 wsi_wl_surface->chain = NULL;
2395
2396 assert(!chain->present_ids.dispatch_in_progress);
2397
2398 /* In VK_EXT_swapchain_maintenance1 there is no requirement to wait for all present IDs to be complete.
2399 * Waiting for the swapchain fence is enough.
2400 * Just clean up anything user did not wait for. */
2401 struct wsi_wl_present_id *id, *tmp;
2402 wl_list_for_each_safe(id, tmp, &chain->present_ids.outstanding_list, link) {
2403 if (id->feedback)
2404 wp_presentation_feedback_destroy(id->feedback);
2405 if (id->frame)
2406 wl_callback_destroy(id->frame);
2407 wl_list_remove(&id->link);
2408 vk_free(id->alloc, id);
2409 }
2410
2411 if (chain->present_ids.wp_presentation)
2412 wl_proxy_wrapper_destroy(chain->present_ids.wp_presentation);
2413 if (chain->present_ids.surface)
2414 wl_proxy_wrapper_destroy(chain->present_ids.surface);
2415 u_cnd_monotonic_destroy(&chain->present_ids.list_advanced);
2416 mtx_destroy(&chain->present_ids.lock);
2417
2418 if (chain->present_ids.queue)
2419 wl_event_queue_destroy(chain->present_ids.queue);
2420
2421 vk_free(pAllocator, (void *)chain->drm_modifiers);
2422
2423 wsi_swapchain_finish(&chain->base);
2424 }
2425
2426 static VkResult
wsi_wl_swapchain_destroy(struct wsi_swapchain * wsi_chain,const VkAllocationCallbacks * pAllocator)2427 wsi_wl_swapchain_destroy(struct wsi_swapchain *wsi_chain,
2428 const VkAllocationCallbacks *pAllocator)
2429 {
2430 struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
2431
2432 wsi_wl_swapchain_images_free(chain);
2433 wsi_wl_swapchain_chain_free(chain, pAllocator);
2434
2435 vk_free(pAllocator, chain);
2436
2437 return VK_SUCCESS;
2438 }
2439
2440 static VkResult
wsi_wl_surface_create_swapchain(VkIcdSurfaceBase * icd_surface,VkDevice device,struct wsi_device * wsi_device,const VkSwapchainCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,struct wsi_swapchain ** swapchain_out)2441 wsi_wl_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
2442 VkDevice device,
2443 struct wsi_device *wsi_device,
2444 const VkSwapchainCreateInfoKHR* pCreateInfo,
2445 const VkAllocationCallbacks* pAllocator,
2446 struct wsi_swapchain **swapchain_out)
2447 {
2448 struct wsi_wl_surface *wsi_wl_surface =
2449 wl_container_of((VkIcdSurfaceWayland *)icd_surface, wsi_wl_surface, base);
2450 struct wsi_wl_swapchain *chain;
2451 VkResult result;
2452
2453 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR);
2454
2455 /* From spec 1.3.278:
2456 * Upon calling vkCreateSwapchainKHR with an oldSwapchain that is not VK_NULL_HANDLE,
2457 * oldSwapchain is retired - even if creation of the new swapchain fails. */
2458 if (pCreateInfo->oldSwapchain) {
2459 VK_FROM_HANDLE(wsi_wl_swapchain, old_chain, pCreateInfo->oldSwapchain);
2460 /* oldSwapchain is extern-sync, so it is not possible to call AcquireNextImage or QueuePresent
2461 * concurrently with this function. Next call to acquire or present will immediately
2462 * return OUT_OF_DATE. */
2463 old_chain->retired = true;
2464 }
2465
2466 int num_images = pCreateInfo->minImageCount;
2467
2468 size_t size = sizeof(*chain) + num_images * sizeof(chain->images[0]);
2469 chain = vk_zalloc(pAllocator, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2470 if (chain == NULL)
2471 return VK_ERROR_OUT_OF_HOST_MEMORY;
2472
2473 wl_list_init(&chain->present_ids.outstanding_list);
2474
2475 /* We are taking ownership of the wsi_wl_surface, so remove ownership from
2476 * oldSwapchain. If the surface is currently owned by a swapchain that is
2477 * not oldSwapchain we return an error.
2478 */
2479 if (wsi_wl_surface->chain &&
2480 wsi_swapchain_to_handle(&wsi_wl_surface->chain->base) != pCreateInfo->oldSwapchain) {
2481 result = VK_ERROR_NATIVE_WINDOW_IN_USE_KHR;
2482 goto fail;
2483 }
2484 if (pCreateInfo->oldSwapchain) {
2485 VK_FROM_HANDLE(wsi_wl_swapchain, old_chain, pCreateInfo->oldSwapchain);
2486 if (old_chain->tearing_control) {
2487 wp_tearing_control_v1_destroy(old_chain->tearing_control);
2488 old_chain->tearing_control = NULL;
2489 }
2490 }
2491
2492 /* Take ownership of the wsi_wl_surface */
2493 chain->wsi_wl_surface = wsi_wl_surface;
2494 wsi_wl_surface->chain = chain;
2495
2496 result = wsi_wl_surface_init(wsi_wl_surface, wsi_device, pAllocator);
2497 if (result != VK_SUCCESS)
2498 goto fail;
2499
2500 VkPresentModeKHR present_mode = wsi_swapchain_get_present_mode(wsi_device, pCreateInfo);
2501 if (present_mode == VK_PRESENT_MODE_IMMEDIATE_KHR) {
2502 chain->tearing_control =
2503 wp_tearing_control_manager_v1_get_tearing_control(wsi_wl_surface->display->tearing_control_manager,
2504 wsi_wl_surface->surface);
2505 if (!chain->tearing_control) {
2506 result = VK_ERROR_OUT_OF_HOST_MEMORY;
2507 goto fail;
2508 }
2509 wp_tearing_control_v1_set_presentation_hint(chain->tearing_control,
2510 WP_TEARING_CONTROL_V1_PRESENTATION_HINT_ASYNC);
2511 }
2512
2513 enum wsi_wl_buffer_type buffer_type;
2514 struct wsi_base_image_params *image_params = NULL;
2515 struct wsi_cpu_image_params cpu_image_params;
2516 struct wsi_drm_image_params drm_image_params;
2517 uint32_t num_drm_modifiers = 0;
2518 const uint64_t *drm_modifiers = NULL;
2519 if (wsi_device->sw) {
2520 cpu_image_params = (struct wsi_cpu_image_params) {
2521 .base.image_type = WSI_IMAGE_TYPE_CPU,
2522 };
2523 if (wsi_device->has_import_memory_host &&
2524 !(WSI_DEBUG & WSI_DEBUG_NOSHM)) {
2525 buffer_type = WSI_WL_BUFFER_GPU_SHM;
2526 cpu_image_params.alloc_shm = wsi_wl_alloc_image_shm;
2527 } else {
2528 buffer_type = WSI_WL_BUFFER_SHM_MEMCPY;
2529 }
2530 image_params = &cpu_image_params.base;
2531 } else {
2532 drm_image_params = (struct wsi_drm_image_params) {
2533 .base.image_type = WSI_IMAGE_TYPE_DRM,
2534 .same_gpu = wsi_wl_surface->display->same_gpu,
2535 .explicit_sync = wsi_wl_use_explicit_sync(wsi_wl_surface->display, wsi_device),
2536 };
2537 /* Use explicit DRM format modifiers when both the server and the driver
2538 * support them.
2539 */
2540 if (wsi_wl_surface->display->wl_dmabuf && wsi_device->supports_modifiers) {
2541 struct wsi_wl_format *f = NULL;
2542 /* Try to select modifiers for our vk_format from surface dma-buf
2543 * feedback. If that doesn't work, fallback to the list of supported
2544 * formats/modifiers by the display. */
2545 if (wsi_wl_surface->wl_dmabuf_feedback)
2546 f = pick_format_from_surface_dmabuf_feedback(wsi_wl_surface,
2547 pCreateInfo->imageFormat);
2548 if (f == NULL)
2549 f = find_format(&chain->wsi_wl_surface->display->formats,
2550 pCreateInfo->imageFormat);
2551 if (f != NULL) {
2552 num_drm_modifiers = u_vector_length(&f->modifiers);
2553 drm_modifiers = u_vector_tail(&f->modifiers);
2554 if (num_drm_modifiers > 0)
2555 drm_image_params.num_modifier_lists = 1;
2556 else
2557 drm_image_params.num_modifier_lists = 0;
2558 drm_image_params.num_modifiers = &num_drm_modifiers;
2559 drm_image_params.modifiers = &drm_modifiers;
2560 }
2561 }
2562 buffer_type = WSI_WL_BUFFER_NATIVE;
2563 image_params = &drm_image_params.base;
2564 }
2565
2566 result = wsi_swapchain_init(wsi_device, &chain->base, device,
2567 pCreateInfo, image_params, pAllocator);
2568 if (result != VK_SUCCESS)
2569 goto fail;
2570
2571 bool alpha = pCreateInfo->compositeAlpha ==
2572 VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR;
2573
2574 chain->base.destroy = wsi_wl_swapchain_destroy;
2575 chain->base.get_wsi_image = wsi_wl_swapchain_get_wsi_image;
2576 chain->base.acquire_next_image = chain->base.image_info.explicit_sync
2577 ? wsi_wl_swapchain_acquire_next_image_explicit
2578 : wsi_wl_swapchain_acquire_next_image_implicit;
2579 chain->base.queue_present = wsi_wl_swapchain_queue_present;
2580 chain->base.release_images = wsi_wl_swapchain_release_images;
2581 chain->base.set_present_mode = wsi_wl_swapchain_set_present_mode;
2582 chain->base.wait_for_present = wsi_wl_swapchain_wait_for_present;
2583 chain->base.present_mode = present_mode;
2584 chain->base.image_count = num_images;
2585 chain->extent = pCreateInfo->imageExtent;
2586 chain->vk_format = pCreateInfo->imageFormat;
2587 chain->buffer_type = buffer_type;
2588 if (buffer_type == WSI_WL_BUFFER_NATIVE) {
2589 chain->drm_format = wl_drm_format_for_vk_format(chain->vk_format, alpha);
2590 } else {
2591 chain->shm_format = wl_shm_format_for_vk_format(chain->vk_format, alpha);
2592 }
2593 chain->num_drm_modifiers = num_drm_modifiers;
2594 if (num_drm_modifiers) {
2595 uint64_t *drm_modifiers_copy =
2596 vk_alloc(pAllocator, sizeof(*drm_modifiers) * num_drm_modifiers, 8,
2597 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2598 if (!drm_modifiers_copy) {
2599 result = VK_ERROR_OUT_OF_HOST_MEMORY;
2600 goto fail_free_wl_chain;
2601 }
2602
2603 typed_memcpy(drm_modifiers_copy, drm_modifiers, num_drm_modifiers);
2604 chain->drm_modifiers = drm_modifiers_copy;
2605 }
2606
2607 if (u_cnd_monotonic_init(&chain->present_ids.list_advanced) != thrd_success) {
2608 result = VK_ERROR_OUT_OF_HOST_MEMORY;
2609 goto fail_free_wl_chain;
2610 }
2611 mtx_init(&chain->present_ids.lock, mtx_plain);
2612
2613 char *queue_name = vk_asprintf(pAllocator,
2614 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT,
2615 "mesa vk surface %d swapchain %d queue",
2616 wl_proxy_get_id((struct wl_proxy *) wsi_wl_surface->surface),
2617 wsi_wl_surface->chain_count++);
2618 chain->present_ids.queue =
2619 wl_display_create_queue_with_name(chain->wsi_wl_surface->display->wl_display,
2620 queue_name);
2621 vk_free(pAllocator, queue_name);
2622
2623 if (chain->wsi_wl_surface->display->wp_presentation_notwrapped) {
2624 chain->present_ids.wp_presentation =
2625 wl_proxy_create_wrapper(chain->wsi_wl_surface->display->wp_presentation_notwrapped);
2626 wl_proxy_set_queue((struct wl_proxy *) chain->present_ids.wp_presentation,
2627 chain->present_ids.queue);
2628 } else {
2629 /* Fallback to frame callbacks when presentation protocol is not available.
2630 * We already have a proxy for the surface, but need another since
2631 * presentID is pumped through a different queue to not disrupt
2632 * QueuePresentKHR frame callback's queue. */
2633 chain->present_ids.surface = wl_proxy_create_wrapper(wsi_wl_surface->base.surface);
2634 wl_proxy_set_queue((struct wl_proxy *) chain->present_ids.surface,
2635 chain->present_ids.queue);
2636 }
2637
2638 chain->fifo_ready = true;
2639
2640 for (uint32_t i = 0; i < chain->base.image_count; i++) {
2641 result = wsi_wl_image_init(chain, &chain->images[i],
2642 pCreateInfo, pAllocator);
2643 if (result != VK_SUCCESS)
2644 goto fail_free_wl_images;
2645 chain->images[i].busy = false;
2646 }
2647
2648 *swapchain_out = &chain->base;
2649
2650 return VK_SUCCESS;
2651
2652 fail_free_wl_images:
2653 wsi_wl_swapchain_images_free(chain);
2654 fail_free_wl_chain:
2655 wsi_wl_swapchain_chain_free(chain, pAllocator);
2656 fail:
2657 vk_free(pAllocator, chain);
2658 wsi_wl_surface->chain = NULL;
2659
2660 assert(result != VK_SUCCESS);
2661 return result;
2662 }
2663
2664 VkResult
wsi_wl_init_wsi(struct wsi_device * wsi_device,const VkAllocationCallbacks * alloc,VkPhysicalDevice physical_device)2665 wsi_wl_init_wsi(struct wsi_device *wsi_device,
2666 const VkAllocationCallbacks *alloc,
2667 VkPhysicalDevice physical_device)
2668 {
2669 struct wsi_wayland *wsi;
2670 VkResult result;
2671
2672 wsi = vk_alloc(alloc, sizeof(*wsi), 8,
2673 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
2674 if (!wsi) {
2675 result = VK_ERROR_OUT_OF_HOST_MEMORY;
2676 goto fail;
2677 }
2678
2679 wsi->physical_device = physical_device;
2680 wsi->alloc = alloc;
2681 wsi->wsi = wsi_device;
2682
2683 wsi->base.get_support = wsi_wl_surface_get_support;
2684 wsi->base.get_capabilities2 = wsi_wl_surface_get_capabilities2;
2685 wsi->base.get_formats = wsi_wl_surface_get_formats;
2686 wsi->base.get_formats2 = wsi_wl_surface_get_formats2;
2687 wsi->base.get_present_modes = wsi_wl_surface_get_present_modes;
2688 wsi->base.get_present_rectangles = wsi_wl_surface_get_present_rectangles;
2689 wsi->base.create_swapchain = wsi_wl_surface_create_swapchain;
2690
2691 wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND] = &wsi->base;
2692
2693 return VK_SUCCESS;
2694
2695 fail:
2696 wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND] = NULL;
2697
2698 return result;
2699 }
2700
2701 void
wsi_wl_finish_wsi(struct wsi_device * wsi_device,const VkAllocationCallbacks * alloc)2702 wsi_wl_finish_wsi(struct wsi_device *wsi_device,
2703 const VkAllocationCallbacks *alloc)
2704 {
2705 struct wsi_wayland *wsi =
2706 (struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
2707 if (!wsi)
2708 return;
2709
2710 vk_free(alloc, wsi);
2711 }
2712