1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <X11/Xlib-xcb.h>
25 #include <X11/xshmfence.h>
26 #define XK_MISCELLANY
27 #define XK_LATIN1
28 #include <X11/keysymdef.h>
29 #include <xcb/xcb.h>
30 #ifdef XCB_KEYSYMS_AVAILABLE
31 #include <xcb/xcb_keysyms.h>
32 #endif
33 #include <xcb/dri3.h>
34 #include <xcb/present.h>
35 #include <xcb/shm.h>
36
37 #include "util/macros.h"
38 #include <stdatomic.h>
39 #include <stdlib.h>
40 #include <stdio.h>
41 #include <unistd.h>
42 #include <errno.h>
43 #include <string.h>
44 #include <fcntl.h>
45 #include "drm-uapi/drm_fourcc.h"
46 #include "util/libdrm.h"
47 #include "util/cnd_monotonic.h"
48 #include "util/hash_table.h"
49 #include "util/mesa-blake3.h"
50 #include "util/os_file.h"
51 #include "util/os_time.h"
52 #include "util/u_debug.h"
53 #include "util/u_thread.h"
54 #include "util/xmlconfig.h"
55 #include "util/timespec.h"
56
57 #include "vk_format.h"
58 #include "vk_instance.h"
59 #include "vk_physical_device.h"
60 #include "vk_device.h"
61 #include "vk_util.h"
62 #include "vk_enum_to_str.h"
63 #include "wsi_common_entrypoints.h"
64 #include "wsi_common_private.h"
65 #include "wsi_common_queue.h"
66
67 #ifdef HAVE_SYS_SHM_H
68 #include <sys/ipc.h>
69 #include <sys/shm.h>
70 #endif
71
72 #ifndef XCB_PRESENT_OPTION_ASYNC_MAY_TEAR
73 #define XCB_PRESENT_OPTION_ASYNC_MAY_TEAR 16
74 #endif
75 #ifndef XCB_PRESENT_CAPABILITY_ASYNC_MAY_TEAR
76 #define XCB_PRESENT_CAPABILITY_ASYNC_MAY_TEAR 8
77 #endif
78
79 #define MAX_DAMAGE_RECTS 64
80
81 struct wsi_x11_connection {
82 bool has_dri3;
83 bool has_dri3_modifiers;
84 bool has_dri3_explicit_sync;
85 bool has_present;
86 bool is_proprietary_x11;
87 bool is_xwayland;
88 bool has_mit_shm;
89 bool has_xfixes;
90 };
91
92 struct wsi_x11 {
93 struct wsi_interface base;
94
95 mtx_t mutex;
96 /* Hash table of xcb_connection -> wsi_x11_connection mappings */
97 struct hash_table *connections;
98 };
99
100 struct wsi_x11_vk_surface {
101 union {
102 VkIcdSurfaceXlib xlib;
103 VkIcdSurfaceXcb xcb;
104 };
105 bool has_alpha;
106 };
107 #ifdef HAVE_X11_DRM
108 /**
109 * Wrapper around xcb_dri3_open. Returns the opened fd or -1 on error.
110 */
111 static int
wsi_dri3_open(xcb_connection_t * conn,xcb_window_t root,uint32_t provider)112 wsi_dri3_open(xcb_connection_t *conn,
113 xcb_window_t root,
114 uint32_t provider)
115 {
116 xcb_dri3_open_cookie_t cookie;
117 xcb_dri3_open_reply_t *reply;
118 int fd;
119
120 cookie = xcb_dri3_open(conn,
121 root,
122 provider);
123
124 reply = xcb_dri3_open_reply(conn, cookie, NULL);
125 if (!reply)
126 return -1;
127
128 /* According to DRI3 extension nfd must equal one. */
129 if (reply->nfd != 1) {
130 free(reply);
131 return -1;
132 }
133
134 fd = xcb_dri3_open_reply_fds(conn, reply)[0];
135 free(reply);
136 fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
137
138 return fd;
139 }
140 /**
141 * Checks compatibility of the device wsi_dev with the device the X server
142 * provides via DRI3.
143 *
144 * This returns true when no device could be retrieved from the X server or when
145 * the information for the X server device indicate that it is the same device.
146 */
147 static bool
wsi_x11_check_dri3_compatible(const struct wsi_device * wsi_dev,xcb_connection_t * conn)148 wsi_x11_check_dri3_compatible(const struct wsi_device *wsi_dev,
149 xcb_connection_t *conn)
150 {
151 xcb_screen_iterator_t screen_iter =
152 xcb_setup_roots_iterator(xcb_get_setup(conn));
153 xcb_screen_t *screen = screen_iter.data;
154
155 /* Open the DRI3 device from the X server. If we do not retrieve one we
156 * assume our local device is compatible.
157 */
158 int dri3_fd = wsi_dri3_open(conn, screen->root, None);
159 if (dri3_fd == -1)
160 return true;
161
162 bool match = wsi_dev->can_present_on_device(wsi_dev->pdevice, dri3_fd);
163
164 close(dri3_fd);
165
166 return match;
167 }
168 #endif
169
170 static bool
wsi_x11_detect_xwayland(xcb_connection_t * conn,xcb_query_extension_reply_t * randr_reply,xcb_query_extension_reply_t * xwl_reply)171 wsi_x11_detect_xwayland(xcb_connection_t *conn,
172 xcb_query_extension_reply_t *randr_reply,
173 xcb_query_extension_reply_t *xwl_reply)
174 {
175 /* Newer Xwayland exposes an X11 extension we can check for */
176 if (xwl_reply && xwl_reply->present)
177 return true;
178
179 /* Older Xwayland uses the word "XWAYLAND" in the RandR output names */
180 if (!randr_reply || !randr_reply->present)
181 return false;
182
183 xcb_randr_query_version_cookie_t ver_cookie =
184 xcb_randr_query_version_unchecked(conn, 1, 3);
185 xcb_randr_query_version_reply_t *ver_reply =
186 xcb_randr_query_version_reply(conn, ver_cookie, NULL);
187 bool has_randr_v1_3 = ver_reply && (ver_reply->major_version > 1 ||
188 ver_reply->minor_version >= 3);
189 free(ver_reply);
190
191 if (!has_randr_v1_3)
192 return false;
193
194 const xcb_setup_t *setup = xcb_get_setup(conn);
195 xcb_screen_iterator_t iter = xcb_setup_roots_iterator(setup);
196
197 xcb_randr_get_screen_resources_current_cookie_t gsr_cookie =
198 xcb_randr_get_screen_resources_current_unchecked(conn, iter.data->root);
199 xcb_randr_get_screen_resources_current_reply_t *gsr_reply =
200 xcb_randr_get_screen_resources_current_reply(conn, gsr_cookie, NULL);
201
202 if (!gsr_reply || gsr_reply->num_outputs == 0) {
203 free(gsr_reply);
204 return false;
205 }
206
207 xcb_randr_output_t *randr_outputs =
208 xcb_randr_get_screen_resources_current_outputs(gsr_reply);
209 xcb_randr_get_output_info_cookie_t goi_cookie =
210 xcb_randr_get_output_info(conn, randr_outputs[0], gsr_reply->config_timestamp);
211 free(gsr_reply);
212
213 xcb_randr_get_output_info_reply_t *goi_reply =
214 xcb_randr_get_output_info_reply(conn, goi_cookie, NULL);
215 if (!goi_reply) {
216 return false;
217 }
218
219 char *output_name = (char*)xcb_randr_get_output_info_name(goi_reply);
220 bool is_xwayland = output_name && strncmp(output_name, "XWAYLAND", 8) == 0;
221 free(goi_reply);
222
223 return is_xwayland;
224 }
225
226 static struct wsi_x11_connection *
wsi_x11_connection_create(struct wsi_device * wsi_dev,xcb_connection_t * conn)227 wsi_x11_connection_create(struct wsi_device *wsi_dev,
228 xcb_connection_t *conn)
229 {
230 xcb_query_extension_cookie_t dri3_cookie, pres_cookie, randr_cookie,
231 amd_cookie, nv_cookie, shm_cookie, sync_cookie,
232 xfixes_cookie, xwl_cookie;
233 xcb_query_extension_reply_t *dri3_reply, *pres_reply, *randr_reply,
234 *amd_reply, *nv_reply, *shm_reply = NULL,
235 *xfixes_reply, *xwl_reply;
236 bool wants_shm = wsi_dev->sw && !(WSI_DEBUG & WSI_DEBUG_NOSHM) &&
237 wsi_dev->has_import_memory_host;
238 bool has_dri3_v1_2 = false;
239 bool has_present_v1_2 = false;
240 bool has_dri3_v1_4 = false;
241 bool has_present_v1_4 = false;
242
243 struct wsi_x11_connection *wsi_conn =
244 vk_alloc(&wsi_dev->instance_alloc, sizeof(*wsi_conn), 8,
245 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
246 if (!wsi_conn)
247 return NULL;
248
249 sync_cookie = xcb_query_extension(conn, 4, "SYNC");
250 dri3_cookie = xcb_query_extension(conn, 4, "DRI3");
251 pres_cookie = xcb_query_extension(conn, 7, "Present");
252 randr_cookie = xcb_query_extension(conn, 5, "RANDR");
253 xfixes_cookie = xcb_query_extension(conn, 6, "XFIXES");
254 xwl_cookie = xcb_query_extension(conn, 8, "XWAYLAND");
255
256 if (wants_shm)
257 shm_cookie = xcb_query_extension(conn, 7, "MIT-SHM");
258
259 /* We try to be nice to users and emit a warning if they try to use a
260 * Vulkan application on a system without DRI3 enabled. However, this ends
261 * up spewing the warning when a user has, for example, both Intel
262 * integrated graphics and a discrete card with proprietary drivers and are
263 * running on the discrete card with the proprietary DDX. In this case, we
264 * really don't want to print the warning because it just confuses users.
265 * As a heuristic to detect this case, we check for a couple of proprietary
266 * X11 extensions.
267 */
268 amd_cookie = xcb_query_extension(conn, 11, "ATIFGLRXDRI");
269 nv_cookie = xcb_query_extension(conn, 10, "NV-CONTROL");
270
271 xcb_discard_reply(conn, sync_cookie.sequence);
272 dri3_reply = xcb_query_extension_reply(conn, dri3_cookie, NULL);
273 pres_reply = xcb_query_extension_reply(conn, pres_cookie, NULL);
274 randr_reply = xcb_query_extension_reply(conn, randr_cookie, NULL);
275 amd_reply = xcb_query_extension_reply(conn, amd_cookie, NULL);
276 nv_reply = xcb_query_extension_reply(conn, nv_cookie, NULL);
277 xfixes_reply = xcb_query_extension_reply(conn, xfixes_cookie, NULL);
278 xwl_reply = xcb_query_extension_reply(conn, xwl_cookie, NULL);
279 if (wants_shm)
280 shm_reply = xcb_query_extension_reply(conn, shm_cookie, NULL);
281 if (!dri3_reply || !pres_reply || !xfixes_reply) {
282 free(dri3_reply);
283 free(pres_reply);
284 free(xfixes_reply);
285 free(xwl_reply);
286 free(randr_reply);
287 free(amd_reply);
288 free(nv_reply);
289 if (wants_shm)
290 free(shm_reply);
291 vk_free(&wsi_dev->instance_alloc, wsi_conn);
292 return NULL;
293 }
294
295 wsi_conn->has_dri3 = dri3_reply->present != 0;
296 #ifdef HAVE_X11_DRM
297 if (wsi_conn->has_dri3) {
298 xcb_dri3_query_version_cookie_t ver_cookie;
299 xcb_dri3_query_version_reply_t *ver_reply;
300
301 ver_cookie = xcb_dri3_query_version(conn, 1, 4);
302 ver_reply = xcb_dri3_query_version_reply(conn, ver_cookie, NULL);
303 has_dri3_v1_2 = ver_reply != NULL &&
304 (ver_reply->major_version > 1 || ver_reply->minor_version >= 2);
305 has_dri3_v1_4 = ver_reply != NULL &&
306 (ver_reply->major_version > 1 || ver_reply->minor_version >= 4);
307 free(ver_reply);
308 }
309 #endif
310
311 wsi_conn->has_present = pres_reply->present != 0;
312 #ifdef HAVE_X11_DRM
313 if (wsi_conn->has_present) {
314 xcb_present_query_version_cookie_t ver_cookie;
315 xcb_present_query_version_reply_t *ver_reply;
316
317 ver_cookie = xcb_present_query_version(conn, 1, 4);
318 ver_reply = xcb_present_query_version_reply(conn, ver_cookie, NULL);
319 has_present_v1_2 =
320 (ver_reply->major_version > 1 || ver_reply->minor_version >= 2);
321 has_present_v1_4 =
322 (ver_reply->major_version > 1 || ver_reply->minor_version >= 4);
323 free(ver_reply);
324 }
325 #endif
326
327 wsi_conn->has_xfixes = xfixes_reply->present != 0;
328 if (wsi_conn->has_xfixes) {
329 xcb_xfixes_query_version_cookie_t ver_cookie;
330 xcb_xfixes_query_version_reply_t *ver_reply;
331
332 ver_cookie = xcb_xfixes_query_version(conn, 6, 0);
333 ver_reply = xcb_xfixes_query_version_reply(conn, ver_cookie, NULL);
334 wsi_conn->has_xfixes = (ver_reply->major_version >= 2);
335 free(ver_reply);
336 }
337
338 wsi_conn->is_xwayland = wsi_x11_detect_xwayland(conn, randr_reply,
339 xwl_reply);
340
341 wsi_conn->has_dri3_modifiers = has_dri3_v1_2 && has_present_v1_2;
342 wsi_conn->has_dri3_explicit_sync = has_dri3_v1_4 && has_present_v1_4;
343 wsi_conn->is_proprietary_x11 = false;
344 if (amd_reply && amd_reply->present)
345 wsi_conn->is_proprietary_x11 = true;
346 if (nv_reply && nv_reply->present)
347 wsi_conn->is_proprietary_x11 = true;
348
349 wsi_conn->has_mit_shm = false;
350 #ifdef HAVE_X11_DRM
351 if (wsi_conn->has_dri3 && wsi_conn->has_present && wants_shm) {
352 bool has_mit_shm = shm_reply->present != 0;
353
354 xcb_shm_query_version_cookie_t ver_cookie;
355 xcb_shm_query_version_reply_t *ver_reply;
356
357 ver_cookie = xcb_shm_query_version(conn);
358 ver_reply = xcb_shm_query_version_reply(conn, ver_cookie, NULL);
359
360 has_mit_shm = ver_reply->shared_pixmaps;
361 free(ver_reply);
362 xcb_void_cookie_t cookie;
363 xcb_generic_error_t *error;
364
365 if (has_mit_shm) {
366 cookie = xcb_shm_detach_checked(conn, 0);
367 if ((error = xcb_request_check(conn, cookie))) {
368 if (error->error_code != BadRequest)
369 wsi_conn->has_mit_shm = true;
370 free(error);
371 }
372 }
373 }
374 #endif
375
376 free(dri3_reply);
377 free(pres_reply);
378 free(randr_reply);
379 free(xwl_reply);
380 free(amd_reply);
381 free(nv_reply);
382 free(xfixes_reply);
383 if (wants_shm)
384 free(shm_reply);
385
386 return wsi_conn;
387 }
388
389 static void
wsi_x11_connection_destroy(struct wsi_device * wsi_dev,struct wsi_x11_connection * conn)390 wsi_x11_connection_destroy(struct wsi_device *wsi_dev,
391 struct wsi_x11_connection *conn)
392 {
393 vk_free(&wsi_dev->instance_alloc, conn);
394 }
395
396 static bool
wsi_x11_check_for_dri3(struct wsi_x11_connection * wsi_conn)397 wsi_x11_check_for_dri3(struct wsi_x11_connection *wsi_conn)
398 {
399 if (wsi_conn->has_dri3)
400 return true;
401 if (!wsi_conn->is_proprietary_x11) {
402 fprintf(stderr, "vulkan: No DRI3 support detected - required for presentation\n"
403 "Note: you can probably enable DRI3 in your Xorg config\n");
404 }
405 return false;
406 }
407
408 /**
409 * Get internal struct representing an xcb_connection_t.
410 *
411 * This can allocate the struct but the caller does not own the struct. It is
412 * deleted on wsi_x11_finish_wsi by the hash table it is inserted.
413 *
414 * If the allocation fails NULL is returned.
415 */
416 static struct wsi_x11_connection *
wsi_x11_get_connection(struct wsi_device * wsi_dev,xcb_connection_t * conn)417 wsi_x11_get_connection(struct wsi_device *wsi_dev,
418 xcb_connection_t *conn)
419 {
420 struct wsi_x11 *wsi =
421 (struct wsi_x11 *)wsi_dev->wsi[VK_ICD_WSI_PLATFORM_XCB];
422
423 mtx_lock(&wsi->mutex);
424
425 struct hash_entry *entry = _mesa_hash_table_search(wsi->connections, conn);
426 if (!entry) {
427 /* We're about to make a bunch of blocking calls. Let's drop the
428 * mutex for now so we don't block up too badly.
429 */
430 mtx_unlock(&wsi->mutex);
431
432 struct wsi_x11_connection *wsi_conn =
433 wsi_x11_connection_create(wsi_dev, conn);
434 if (!wsi_conn)
435 return NULL;
436
437 mtx_lock(&wsi->mutex);
438
439 entry = _mesa_hash_table_search(wsi->connections, conn);
440 if (entry) {
441 /* Oops, someone raced us to it */
442 wsi_x11_connection_destroy(wsi_dev, wsi_conn);
443 } else {
444 entry = _mesa_hash_table_insert(wsi->connections, conn, wsi_conn);
445 }
446 }
447
448 mtx_unlock(&wsi->mutex);
449
450 return entry->data;
451 }
452
453 static const VkFormat formats[] = {
454 VK_FORMAT_R5G6B5_UNORM_PACK16,
455 VK_FORMAT_B8G8R8A8_SRGB,
456 VK_FORMAT_B8G8R8A8_UNORM,
457 VK_FORMAT_A2R10G10B10_UNORM_PACK32,
458 };
459
460 static const VkPresentModeKHR present_modes[] = {
461 VK_PRESENT_MODE_IMMEDIATE_KHR,
462 VK_PRESENT_MODE_MAILBOX_KHR,
463 VK_PRESENT_MODE_FIFO_KHR,
464 VK_PRESENT_MODE_FIFO_RELAXED_KHR,
465 };
466
467 static xcb_screen_t *
get_screen_for_root(xcb_connection_t * conn,xcb_window_t root)468 get_screen_for_root(xcb_connection_t *conn, xcb_window_t root)
469 {
470 xcb_screen_iterator_t screen_iter =
471 xcb_setup_roots_iterator(xcb_get_setup(conn));
472
473 for (; screen_iter.rem; xcb_screen_next (&screen_iter)) {
474 if (screen_iter.data->root == root)
475 return screen_iter.data;
476 }
477
478 return NULL;
479 }
480
481 static xcb_visualtype_t *
screen_get_visualtype(xcb_screen_t * screen,xcb_visualid_t visual_id,unsigned * depth)482 screen_get_visualtype(xcb_screen_t *screen, xcb_visualid_t visual_id,
483 unsigned *depth)
484 {
485 xcb_depth_iterator_t depth_iter =
486 xcb_screen_allowed_depths_iterator(screen);
487
488 for (; depth_iter.rem; xcb_depth_next (&depth_iter)) {
489 xcb_visualtype_iterator_t visual_iter =
490 xcb_depth_visuals_iterator (depth_iter.data);
491
492 for (; visual_iter.rem; xcb_visualtype_next (&visual_iter)) {
493 if (visual_iter.data->visual_id == visual_id) {
494 if (depth)
495 *depth = depth_iter.data->depth;
496 return visual_iter.data;
497 }
498 }
499 }
500
501 return NULL;
502 }
503
504 static xcb_visualtype_t *
connection_get_visualtype(xcb_connection_t * conn,xcb_visualid_t visual_id)505 connection_get_visualtype(xcb_connection_t *conn, xcb_visualid_t visual_id)
506 {
507 xcb_screen_iterator_t screen_iter =
508 xcb_setup_roots_iterator(xcb_get_setup(conn));
509
510 /* For this we have to iterate over all of the screens which is rather
511 * annoying. Fortunately, there is probably only 1.
512 */
513 for (; screen_iter.rem; xcb_screen_next (&screen_iter)) {
514 xcb_visualtype_t *visual = screen_get_visualtype(screen_iter.data,
515 visual_id, NULL);
516 if (visual)
517 return visual;
518 }
519
520 return NULL;
521 }
522
523 static xcb_visualtype_t *
get_visualtype_for_window(xcb_connection_t * conn,xcb_window_t window,unsigned * depth,xcb_visualtype_t ** rootvis)524 get_visualtype_for_window(xcb_connection_t *conn, xcb_window_t window,
525 unsigned *depth, xcb_visualtype_t **rootvis)
526 {
527 xcb_query_tree_cookie_t tree_cookie;
528 xcb_get_window_attributes_cookie_t attrib_cookie;
529 xcb_query_tree_reply_t *tree;
530 xcb_get_window_attributes_reply_t *attrib;
531
532 tree_cookie = xcb_query_tree(conn, window);
533 attrib_cookie = xcb_get_window_attributes(conn, window);
534
535 tree = xcb_query_tree_reply(conn, tree_cookie, NULL);
536 attrib = xcb_get_window_attributes_reply(conn, attrib_cookie, NULL);
537 if (attrib == NULL || tree == NULL) {
538 free(attrib);
539 free(tree);
540 return NULL;
541 }
542
543 xcb_window_t root = tree->root;
544 xcb_visualid_t visual_id = attrib->visual;
545 free(attrib);
546 free(tree);
547
548 xcb_screen_t *screen = get_screen_for_root(conn, root);
549 if (screen == NULL)
550 return NULL;
551
552 if (rootvis)
553 *rootvis = screen_get_visualtype(screen, screen->root_visual, depth);
554 return screen_get_visualtype(screen, visual_id, depth);
555 }
556
557 static bool
visual_has_alpha(xcb_visualtype_t * visual,unsigned depth)558 visual_has_alpha(xcb_visualtype_t *visual, unsigned depth)
559 {
560 uint32_t rgb_mask = visual->red_mask |
561 visual->green_mask |
562 visual->blue_mask;
563
564 uint32_t all_mask = 0xffffffff >> (32 - depth);
565
566 /* Do we have bits left over after RGB? */
567 return (all_mask & ~rgb_mask) != 0;
568 }
569
570 static bool
visual_supported(xcb_visualtype_t * visual)571 visual_supported(xcb_visualtype_t *visual)
572 {
573 if (!visual)
574 return false;
575
576 return visual->_class == XCB_VISUAL_CLASS_TRUE_COLOR ||
577 visual->_class == XCB_VISUAL_CLASS_DIRECT_COLOR;
578 }
579
580 VKAPI_ATTR VkBool32 VKAPI_CALL
wsi_GetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice,uint32_t queueFamilyIndex,xcb_connection_t * connection,xcb_visualid_t visual_id)581 wsi_GetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice,
582 uint32_t queueFamilyIndex,
583 xcb_connection_t *connection,
584 xcb_visualid_t visual_id)
585 {
586 VK_FROM_HANDLE(vk_physical_device, pdevice, physicalDevice);
587 struct wsi_device *wsi_device = pdevice->wsi_device;
588 if (!(wsi_device->queue_supports_blit & BITFIELD64_BIT(queueFamilyIndex)))
589 return false;
590
591 struct wsi_x11_connection *wsi_conn =
592 wsi_x11_get_connection(wsi_device, connection);
593
594 if (!wsi_conn)
595 return false;
596
597 if (!wsi_device->sw) {
598 if (!wsi_x11_check_for_dri3(wsi_conn))
599 return false;
600 }
601
602 if (!visual_supported(connection_get_visualtype(connection, visual_id)))
603 return false;
604
605 return true;
606 }
607
608 VKAPI_ATTR VkBool32 VKAPI_CALL
wsi_GetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice,uint32_t queueFamilyIndex,Display * dpy,VisualID visualID)609 wsi_GetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice,
610 uint32_t queueFamilyIndex,
611 Display *dpy,
612 VisualID visualID)
613 {
614 return wsi_GetPhysicalDeviceXcbPresentationSupportKHR(physicalDevice,
615 queueFamilyIndex,
616 XGetXCBConnection(dpy),
617 visualID);
618 }
619
620 static xcb_connection_t*
x11_surface_get_connection(VkIcdSurfaceBase * icd_surface)621 x11_surface_get_connection(VkIcdSurfaceBase *icd_surface)
622 {
623 if (icd_surface->platform == VK_ICD_WSI_PLATFORM_XLIB)
624 return XGetXCBConnection(((VkIcdSurfaceXlib *)icd_surface)->dpy);
625 else
626 return ((VkIcdSurfaceXcb *)icd_surface)->connection;
627 }
628
629 static xcb_window_t
x11_surface_get_window(VkIcdSurfaceBase * icd_surface)630 x11_surface_get_window(VkIcdSurfaceBase *icd_surface)
631 {
632 if (icd_surface->platform == VK_ICD_WSI_PLATFORM_XLIB)
633 return ((VkIcdSurfaceXlib *)icd_surface)->window;
634 else
635 return ((VkIcdSurfaceXcb *)icd_surface)->window;
636 }
637
638 static VkResult
x11_surface_get_support(VkIcdSurfaceBase * icd_surface,struct wsi_device * wsi_device,uint32_t queueFamilyIndex,VkBool32 * pSupported)639 x11_surface_get_support(VkIcdSurfaceBase *icd_surface,
640 struct wsi_device *wsi_device,
641 uint32_t queueFamilyIndex,
642 VkBool32* pSupported)
643 {
644 xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
645 xcb_window_t window = x11_surface_get_window(icd_surface);
646
647 struct wsi_x11_connection *wsi_conn =
648 wsi_x11_get_connection(wsi_device, conn);
649 if (!wsi_conn)
650 return VK_ERROR_OUT_OF_HOST_MEMORY;
651
652 if (!wsi_device->sw) {
653 if (!wsi_x11_check_for_dri3(wsi_conn)) {
654 *pSupported = false;
655 return VK_SUCCESS;
656 }
657 }
658
659 if (!visual_supported(get_visualtype_for_window(conn, window, NULL, NULL))) {
660 *pSupported = false;
661 return VK_SUCCESS;
662 }
663
664 *pSupported = true;
665 return VK_SUCCESS;
666 }
667
668 static uint32_t
x11_get_min_image_count(const struct wsi_device * wsi_device,bool is_xwayland)669 x11_get_min_image_count(const struct wsi_device *wsi_device, bool is_xwayland)
670 {
671 if (wsi_device->x11.override_minImageCount)
672 return wsi_device->x11.override_minImageCount;
673
674 /* For IMMEDIATE and FIFO, most games work in a pipelined manner where the
675 * can produce frames at a rate of 1/MAX(CPU duration, GPU duration), but
676 * the render latency is CPU duration + GPU duration.
677 *
678 * This means that with scanout from pageflipping we need 3 frames to run
679 * full speed:
680 * 1) CPU rendering work
681 * 2) GPU rendering work
682 * 3) scanout
683 *
684 * Once we have a nonblocking acquire that returns a semaphore we can merge
685 * 1 and 3. Hence the ideal implementation needs only 2 images, but games
686 * cannot tellwe currently do not have an ideal implementation and that
687 * hence they need to allocate 3 images. So let us do it for them.
688 *
689 * This is a tradeoff as it uses more memory than needed for non-fullscreen
690 * and non-performance intensive applications.
691 *
692 * For Xwayland Venus reports four images as described in
693 * wsi_wl_surface_get_capabilities
694 */
695 return is_xwayland && wsi_device->x11.extra_xwayland_image ? 4 : 3;
696 }
697
698 static unsigned
699 x11_get_min_image_count_for_present_mode(struct wsi_device *wsi_device,
700 struct wsi_x11_connection *wsi_conn,
701 VkPresentModeKHR present_mode);
702
703 static VkResult
x11_surface_get_capabilities(VkIcdSurfaceBase * icd_surface,struct wsi_device * wsi_device,const VkSurfacePresentModeEXT * present_mode,VkSurfaceCapabilitiesKHR * caps)704 x11_surface_get_capabilities(VkIcdSurfaceBase *icd_surface,
705 struct wsi_device *wsi_device,
706 const VkSurfacePresentModeEXT *present_mode,
707 VkSurfaceCapabilitiesKHR *caps)
708 {
709 xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
710 xcb_window_t window = x11_surface_get_window(icd_surface);
711 struct wsi_x11_vk_surface *surface = (struct wsi_x11_vk_surface*)icd_surface;
712 struct wsi_x11_connection *wsi_conn =
713 wsi_x11_get_connection(wsi_device, conn);
714 xcb_get_geometry_cookie_t geom_cookie;
715 xcb_generic_error_t *err;
716 xcb_get_geometry_reply_t *geom;
717
718 geom_cookie = xcb_get_geometry(conn, window);
719
720 geom = xcb_get_geometry_reply(conn, geom_cookie, &err);
721 if (!geom)
722 return VK_ERROR_SURFACE_LOST_KHR;
723 {
724 VkExtent2D extent = { geom->width, geom->height };
725 caps->currentExtent = extent;
726 caps->minImageExtent = extent;
727 caps->maxImageExtent = extent;
728 }
729 free(err);
730 free(geom);
731
732 if (surface->has_alpha) {
733 caps->supportedCompositeAlpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR |
734 VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR;
735 } else {
736 caps->supportedCompositeAlpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR |
737 VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
738 }
739
740 if (present_mode) {
741 caps->minImageCount = x11_get_min_image_count_for_present_mode(wsi_device, wsi_conn, present_mode->presentMode);
742 } else {
743 caps->minImageCount = x11_get_min_image_count(wsi_device, wsi_conn->is_xwayland);
744 }
745
746 /* There is no real maximum */
747 caps->maxImageCount = 0;
748
749 caps->supportedTransforms = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
750 caps->currentTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
751 caps->maxImageArrayLayers = 1;
752 caps->supportedUsageFlags = wsi_caps_get_image_usage();
753
754 VK_FROM_HANDLE(vk_physical_device, pdevice, wsi_device->pdevice);
755 if (pdevice->supported_extensions.EXT_attachment_feedback_loop_layout)
756 caps->supportedUsageFlags |= VK_IMAGE_USAGE_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT;
757
758 return VK_SUCCESS;
759 }
760
761 static VkResult
x11_surface_get_capabilities2(VkIcdSurfaceBase * icd_surface,struct wsi_device * wsi_device,const void * info_next,VkSurfaceCapabilities2KHR * caps)762 x11_surface_get_capabilities2(VkIcdSurfaceBase *icd_surface,
763 struct wsi_device *wsi_device,
764 const void *info_next,
765 VkSurfaceCapabilities2KHR *caps)
766 {
767 assert(caps->sType == VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR);
768
769 const VkSurfacePresentModeEXT *present_mode = vk_find_struct_const(info_next, SURFACE_PRESENT_MODE_EXT);
770
771 VkResult result =
772 x11_surface_get_capabilities(icd_surface, wsi_device, present_mode,
773 &caps->surfaceCapabilities);
774
775 if (result != VK_SUCCESS)
776 return result;
777
778 vk_foreach_struct(ext, caps->pNext) {
779 switch (ext->sType) {
780 case VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR: {
781 VkSurfaceProtectedCapabilitiesKHR *protected = (void *)ext;
782 protected->supportsProtected = VK_FALSE;
783 break;
784 }
785
786 case VK_STRUCTURE_TYPE_SURFACE_PRESENT_SCALING_CAPABILITIES_EXT: {
787 /* Unsupported. */
788 VkSurfacePresentScalingCapabilitiesEXT *scaling = (void *)ext;
789 scaling->supportedPresentScaling = 0;
790 scaling->supportedPresentGravityX = 0;
791 scaling->supportedPresentGravityY = 0;
792 scaling->minScaledImageExtent = caps->surfaceCapabilities.minImageExtent;
793 scaling->maxScaledImageExtent = caps->surfaceCapabilities.maxImageExtent;
794 break;
795 }
796
797 case VK_STRUCTURE_TYPE_SURFACE_PRESENT_MODE_COMPATIBILITY_EXT: {
798 /* All present modes are compatible with each other. */
799 VkSurfacePresentModeCompatibilityEXT *compat = (void *)ext;
800 if (compat->pPresentModes) {
801 assert(present_mode);
802 VK_OUTARRAY_MAKE_TYPED(VkPresentModeKHR, modes, compat->pPresentModes, &compat->presentModeCount);
803 /* Must always return queried present mode even when truncating. */
804 vk_outarray_append_typed(VkPresentModeKHR, &modes, mode) {
805 *mode = present_mode->presentMode;
806 }
807
808 for (uint32_t i = 0; i < ARRAY_SIZE(present_modes); i++) {
809 if (present_modes[i] != present_mode->presentMode) {
810 vk_outarray_append_typed(VkPresentModeKHR, &modes, mode) {
811 *mode = present_modes[i];
812 }
813 }
814 }
815 } else {
816 if (!present_mode)
817 wsi_common_vk_warn_once("Use of VkSurfacePresentModeCompatibilityEXT "
818 "without a VkSurfacePresentModeEXT set. This is an "
819 "application bug.\n");
820
821 compat->presentModeCount = ARRAY_SIZE(present_modes);
822 }
823 break;
824 }
825
826 default:
827 /* Ignored */
828 break;
829 }
830 }
831
832 return result;
833 }
834
835 static int
format_get_component_bits(VkFormat format,int comp)836 format_get_component_bits(VkFormat format, int comp)
837 {
838 return vk_format_get_component_bits(format, UTIL_FORMAT_COLORSPACE_RGB, comp);
839 }
840
841 static bool
rgb_component_bits_are_equal(VkFormat format,const xcb_visualtype_t * type)842 rgb_component_bits_are_equal(VkFormat format, const xcb_visualtype_t* type)
843 {
844 return format_get_component_bits(format, 0) == util_bitcount(type->red_mask) &&
845 format_get_component_bits(format, 1) == util_bitcount(type->green_mask) &&
846 format_get_component_bits(format, 2) == util_bitcount(type->blue_mask);
847 }
848
849 static bool
get_sorted_vk_formats(VkIcdSurfaceBase * surface,struct wsi_device * wsi_device,VkFormat * sorted_formats,unsigned * count)850 get_sorted_vk_formats(VkIcdSurfaceBase *surface, struct wsi_device *wsi_device,
851 VkFormat *sorted_formats, unsigned *count)
852 {
853 xcb_connection_t *conn = x11_surface_get_connection(surface);
854 xcb_window_t window = x11_surface_get_window(surface);
855 xcb_visualtype_t *rootvis = NULL;
856 xcb_visualtype_t *visual = get_visualtype_for_window(conn, window, NULL, &rootvis);
857
858 if (!visual)
859 return false;
860
861 /* use the root window's visual to set the default */
862 *count = 0;
863 for (unsigned i = 0; i < ARRAY_SIZE(formats); i++) {
864 if (rgb_component_bits_are_equal(formats[i], rootvis))
865 sorted_formats[(*count)++] = formats[i];
866 }
867
868 for (unsigned i = 0; i < ARRAY_SIZE(formats); i++) {
869 for (unsigned j = 0; j < *count; j++)
870 if (formats[i] == sorted_formats[j])
871 goto next_format;
872 if (rgb_component_bits_are_equal(formats[i], visual))
873 sorted_formats[(*count)++] = formats[i];
874 next_format:;
875 }
876
877 if (wsi_device->force_bgra8_unorm_first) {
878 for (unsigned i = 0; i < *count; i++) {
879 if (sorted_formats[i] == VK_FORMAT_B8G8R8A8_UNORM) {
880 sorted_formats[i] = sorted_formats[0];
881 sorted_formats[0] = VK_FORMAT_B8G8R8A8_UNORM;
882 break;
883 }
884 }
885 }
886
887 return true;
888 }
889
890 static VkResult
x11_surface_get_formats(VkIcdSurfaceBase * surface,struct wsi_device * wsi_device,uint32_t * pSurfaceFormatCount,VkSurfaceFormatKHR * pSurfaceFormats)891 x11_surface_get_formats(VkIcdSurfaceBase *surface,
892 struct wsi_device *wsi_device,
893 uint32_t *pSurfaceFormatCount,
894 VkSurfaceFormatKHR *pSurfaceFormats)
895 {
896 VK_OUTARRAY_MAKE_TYPED(VkSurfaceFormatKHR, out,
897 pSurfaceFormats, pSurfaceFormatCount);
898
899 unsigned count;
900 VkFormat sorted_formats[ARRAY_SIZE(formats)];
901 if (!get_sorted_vk_formats(surface, wsi_device, sorted_formats, &count))
902 return VK_ERROR_SURFACE_LOST_KHR;
903
904 for (unsigned i = 0; i < count; i++) {
905 vk_outarray_append_typed(VkSurfaceFormatKHR, &out, f) {
906 f->format = sorted_formats[i];
907 f->colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
908 }
909 }
910
911 return vk_outarray_status(&out);
912 }
913
914 static VkResult
x11_surface_get_formats2(VkIcdSurfaceBase * surface,struct wsi_device * wsi_device,const void * info_next,uint32_t * pSurfaceFormatCount,VkSurfaceFormat2KHR * pSurfaceFormats)915 x11_surface_get_formats2(VkIcdSurfaceBase *surface,
916 struct wsi_device *wsi_device,
917 const void *info_next,
918 uint32_t *pSurfaceFormatCount,
919 VkSurfaceFormat2KHR *pSurfaceFormats)
920 {
921 VK_OUTARRAY_MAKE_TYPED(VkSurfaceFormat2KHR, out,
922 pSurfaceFormats, pSurfaceFormatCount);
923
924 unsigned count;
925 VkFormat sorted_formats[ARRAY_SIZE(formats)];
926 if (!get_sorted_vk_formats(surface, wsi_device, sorted_formats, &count))
927 return VK_ERROR_SURFACE_LOST_KHR;
928
929 for (unsigned i = 0; i < count; i++) {
930 vk_outarray_append_typed(VkSurfaceFormat2KHR, &out, f) {
931 assert(f->sType == VK_STRUCTURE_TYPE_SURFACE_FORMAT_2_KHR);
932 f->surfaceFormat.format = sorted_formats[i];
933 f->surfaceFormat.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
934 }
935 }
936
937 return vk_outarray_status(&out);
938 }
939
940 static VkResult
x11_surface_get_present_modes(VkIcdSurfaceBase * surface,struct wsi_device * wsi_device,uint32_t * pPresentModeCount,VkPresentModeKHR * pPresentModes)941 x11_surface_get_present_modes(VkIcdSurfaceBase *surface,
942 struct wsi_device *wsi_device,
943 uint32_t *pPresentModeCount,
944 VkPresentModeKHR *pPresentModes)
945 {
946 if (pPresentModes == NULL) {
947 *pPresentModeCount = ARRAY_SIZE(present_modes);
948 return VK_SUCCESS;
949 }
950
951 *pPresentModeCount = MIN2(*pPresentModeCount, ARRAY_SIZE(present_modes));
952 typed_memcpy(pPresentModes, present_modes, *pPresentModeCount);
953
954 return *pPresentModeCount < ARRAY_SIZE(present_modes) ?
955 VK_INCOMPLETE : VK_SUCCESS;
956 }
957
958 static VkResult
x11_surface_get_present_rectangles(VkIcdSurfaceBase * icd_surface,struct wsi_device * wsi_device,uint32_t * pRectCount,VkRect2D * pRects)959 x11_surface_get_present_rectangles(VkIcdSurfaceBase *icd_surface,
960 struct wsi_device *wsi_device,
961 uint32_t* pRectCount,
962 VkRect2D* pRects)
963 {
964 xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
965 xcb_window_t window = x11_surface_get_window(icd_surface);
966 VK_OUTARRAY_MAKE_TYPED(VkRect2D, out, pRects, pRectCount);
967
968 vk_outarray_append_typed(VkRect2D, &out, rect) {
969 xcb_generic_error_t *err = NULL;
970 xcb_get_geometry_cookie_t geom_cookie = xcb_get_geometry(conn, window);
971 xcb_get_geometry_reply_t *geom =
972 xcb_get_geometry_reply(conn, geom_cookie, &err);
973 free(err);
974 if (geom) {
975 *rect = (VkRect2D) {
976 .offset = { 0, 0 },
977 .extent = { geom->width, geom->height },
978 };
979 }
980 free(geom);
981 if (!geom)
982 return VK_ERROR_SURFACE_LOST_KHR;
983 }
984
985 return vk_outarray_status(&out);
986 }
987
988 VKAPI_ATTR VkResult VKAPI_CALL
wsi_CreateXcbSurfaceKHR(VkInstance _instance,const VkXcbSurfaceCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSurfaceKHR * pSurface)989 wsi_CreateXcbSurfaceKHR(VkInstance _instance,
990 const VkXcbSurfaceCreateInfoKHR *pCreateInfo,
991 const VkAllocationCallbacks *pAllocator,
992 VkSurfaceKHR *pSurface)
993 {
994 VK_FROM_HANDLE(vk_instance, instance, _instance);
995 struct wsi_x11_vk_surface *surface;
996
997 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR);
998
999 unsigned visual_depth;
1000 xcb_visualtype_t *visual =
1001 get_visualtype_for_window(pCreateInfo->connection, pCreateInfo->window, &visual_depth, NULL);
1002 if (!visual)
1003 return VK_ERROR_OUT_OF_HOST_MEMORY;
1004
1005 surface = vk_alloc2(&instance->alloc, pAllocator, sizeof(struct wsi_x11_vk_surface), 8,
1006 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1007 if (surface == NULL)
1008 return VK_ERROR_OUT_OF_HOST_MEMORY;
1009
1010 surface->xcb.base.platform = VK_ICD_WSI_PLATFORM_XCB;
1011 surface->xcb.connection = pCreateInfo->connection;
1012 surface->xcb.window = pCreateInfo->window;
1013
1014 surface->has_alpha = visual_has_alpha(visual, visual_depth);
1015
1016 *pSurface = VkIcdSurfaceBase_to_handle(&surface->xcb.base);
1017 return VK_SUCCESS;
1018 }
1019
1020 VKAPI_ATTR VkResult VKAPI_CALL
wsi_CreateXlibSurfaceKHR(VkInstance _instance,const VkXlibSurfaceCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSurfaceKHR * pSurface)1021 wsi_CreateXlibSurfaceKHR(VkInstance _instance,
1022 const VkXlibSurfaceCreateInfoKHR *pCreateInfo,
1023 const VkAllocationCallbacks *pAllocator,
1024 VkSurfaceKHR *pSurface)
1025 {
1026 VK_FROM_HANDLE(vk_instance, instance, _instance);
1027 struct wsi_x11_vk_surface *surface;
1028
1029 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR);
1030
1031 unsigned visual_depth;
1032 xcb_visualtype_t *visual =
1033 get_visualtype_for_window(XGetXCBConnection(pCreateInfo->dpy), pCreateInfo->window, &visual_depth, NULL);
1034 if (!visual)
1035 return VK_ERROR_OUT_OF_HOST_MEMORY;
1036
1037 surface = vk_alloc2(&instance->alloc, pAllocator, sizeof(struct wsi_x11_vk_surface), 8,
1038 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1039 if (surface == NULL)
1040 return VK_ERROR_OUT_OF_HOST_MEMORY;
1041
1042 surface->xlib.base.platform = VK_ICD_WSI_PLATFORM_XLIB;
1043 surface->xlib.dpy = pCreateInfo->dpy;
1044 surface->xlib.window = pCreateInfo->window;
1045
1046 surface->has_alpha = visual_has_alpha(visual, visual_depth);
1047
1048 *pSurface = VkIcdSurfaceBase_to_handle(&surface->xlib.base);
1049 return VK_SUCCESS;
1050 }
1051
1052 struct x11_image_pending_completion {
1053 uint32_t serial;
1054 uint64_t signal_present_id;
1055 };
1056
1057 struct x11_image {
1058 struct wsi_image base;
1059 xcb_pixmap_t pixmap;
1060 xcb_xfixes_region_t update_region; /* long lived XID */
1061 xcb_xfixes_region_t update_area; /* the above or None */
1062 struct xshmfence * shm_fence;
1063 uint32_t sync_fence;
1064 xcb_shm_seg_t shmseg;
1065 int shmid;
1066 uint8_t * shmaddr;
1067 uint64_t present_id;
1068 VkPresentModeKHR present_mode;
1069 xcb_rectangle_t rects[MAX_DAMAGE_RECTS];
1070 int rectangle_count;
1071
1072 /* In IMMEDIATE and MAILBOX modes, we can have multiple pending presentations per image.
1073 * We need to keep track of them when considering present ID. */
1074
1075 /* This is arbitrarily chosen. With IMMEDIATE on a 3 deep swapchain,
1076 * we allow over 300 outstanding presentations per vblank, which is more than enough
1077 * for any reasonable application.
1078 * This used to be 16, but it regressed benchmarks that did 15k+ FPS.
1079 * This should allow over 25k FPS on a 60 Hz monitor. Any more than this is comical. */
1080 #define X11_SWAPCHAIN_MAX_PENDING_COMPLETIONS 128
1081 uint32_t present_queued_count;
1082 struct x11_image_pending_completion pending_completions[X11_SWAPCHAIN_MAX_PENDING_COMPLETIONS];
1083 #ifdef HAVE_DRI3_EXPLICIT_SYNC
1084 uint32_t dri3_syncobj[WSI_ES_COUNT];
1085 #endif
1086 };
1087
1088 struct x11_swapchain {
1089 struct wsi_swapchain base;
1090
1091 bool has_dri3_modifiers;
1092 bool has_mit_shm;
1093 bool has_async_may_tear;
1094
1095 xcb_connection_t * conn;
1096 xcb_window_t window;
1097 xcb_gc_t gc;
1098 uint32_t depth;
1099 VkExtent2D extent;
1100
1101 blake3_hash dri3_modifier_hash;
1102
1103 xcb_present_event_t event_id;
1104 xcb_special_event_t * special_event;
1105 uint64_t send_sbc;
1106 uint64_t last_present_msc;
1107 uint32_t stamp;
1108 uint32_t sent_image_count;
1109
1110 atomic_int status;
1111 bool copy_is_suboptimal;
1112 struct wsi_queue present_queue;
1113 struct wsi_queue acquire_queue;
1114 thrd_t queue_manager;
1115 thrd_t event_manager;
1116
1117 /* Used for communicating between event_manager and queue_manager.
1118 * Lock is also taken when reading and writing status.
1119 * When reading status in application threads,
1120 * x11_swapchain_read_status_atomic can be used as a wrapper function. */
1121 mtx_t thread_state_lock;
1122 struct u_cnd_monotonic thread_state_cond;
1123
1124 /* Lock and condition variable for present wait.
1125 * Signalled by event thread and waited on by callers to PresentWaitKHR. */
1126 mtx_t present_progress_mutex;
1127 struct u_cnd_monotonic present_progress_cond;
1128 uint64_t present_id;
1129 VkResult present_progress_error;
1130
1131 struct x11_image images[0];
1132 };
1133 VK_DEFINE_NONDISP_HANDLE_CASTS(x11_swapchain, base.base, VkSwapchainKHR,
1134 VK_OBJECT_TYPE_SWAPCHAIN_KHR)
1135
x11_present_complete(struct x11_swapchain * swapchain,struct x11_image * image,uint32_t index)1136 static void x11_present_complete(struct x11_swapchain *swapchain,
1137 struct x11_image *image, uint32_t index)
1138 {
1139 uint64_t signal_present_id = image->pending_completions[index].signal_present_id;
1140 if (signal_present_id) {
1141 mtx_lock(&swapchain->present_progress_mutex);
1142 if (signal_present_id > swapchain->present_id) {
1143 swapchain->present_id = signal_present_id;
1144 u_cnd_monotonic_broadcast(&swapchain->present_progress_cond);
1145 }
1146 mtx_unlock(&swapchain->present_progress_mutex);
1147 }
1148
1149 image->present_queued_count--;
1150 if (image->present_queued_count) {
1151 memmove(image->pending_completions + index,
1152 image->pending_completions + index + 1,
1153 (image->present_queued_count - index) *
1154 sizeof(image->pending_completions[0]));
1155 }
1156
1157 u_cnd_monotonic_signal(&swapchain->thread_state_cond);
1158 }
1159
x11_notify_pending_present(struct x11_swapchain * swapchain,struct x11_image * image)1160 static void x11_notify_pending_present(struct x11_swapchain *swapchain,
1161 struct x11_image *image)
1162 {
1163 u_cnd_monotonic_signal(&swapchain->thread_state_cond);
1164 }
1165
1166 /* It is assumed that thread_state_lock is taken when calling this function. */
x11_swapchain_notify_error(struct x11_swapchain * swapchain,VkResult result)1167 static void x11_swapchain_notify_error(struct x11_swapchain *swapchain, VkResult result)
1168 {
1169 mtx_lock(&swapchain->present_progress_mutex);
1170 swapchain->present_id = UINT64_MAX;
1171 swapchain->present_progress_error = result;
1172 u_cnd_monotonic_broadcast(&swapchain->present_progress_cond);
1173 mtx_unlock(&swapchain->present_progress_mutex);
1174 u_cnd_monotonic_broadcast(&swapchain->thread_state_cond);
1175 }
1176
1177 /**
1178 * Update the swapchain status with the result of an operation, and return
1179 * the combined status. The chain status will eventually be returned from
1180 * AcquireNextImage and QueuePresent.
1181 *
1182 * We make sure to 'stick' more pessimistic statuses: an out-of-date error
1183 * is permanent once seen, and every subsequent call will return this. If
1184 * this has not been seen, success will be returned.
1185 *
1186 * It is assumed that thread_state_lock is taken when calling this function.
1187 */
1188 static VkResult
_x11_swapchain_result(struct x11_swapchain * chain,VkResult result,const char * file,int line)1189 _x11_swapchain_result(struct x11_swapchain *chain, VkResult result,
1190 const char *file, int line)
1191 {
1192 if (result < 0)
1193 x11_swapchain_notify_error(chain, result);
1194
1195 /* Prioritise returning existing errors for consistency. */
1196 if (chain->status < 0)
1197 return chain->status;
1198
1199 /* If we have a new error, mark it as permanent on the chain and return. */
1200 if (result < 0) {
1201 #ifndef NDEBUG
1202 fprintf(stderr, "%s:%d: Swapchain status changed to %s\n",
1203 file, line, vk_Result_to_str(result));
1204 #endif
1205 chain->status = result;
1206 return result;
1207 }
1208
1209 /* Return temporary errors, but don't persist them. */
1210 if (result == VK_TIMEOUT || result == VK_NOT_READY)
1211 return result;
1212
1213 /* Suboptimal isn't an error, but is a status which sticks to the swapchain
1214 * and is always returned rather than success.
1215 */
1216 if (result == VK_SUBOPTIMAL_KHR) {
1217 #ifndef NDEBUG
1218 if (chain->status != VK_SUBOPTIMAL_KHR) {
1219 fprintf(stderr, "%s:%d: Swapchain status changed to %s\n",
1220 file, line, vk_Result_to_str(result));
1221 }
1222 #endif
1223 chain->status = result;
1224 return result;
1225 }
1226
1227 /* No changes, so return the last status. */
1228 return chain->status;
1229 }
1230 #define x11_swapchain_result(chain, result) \
1231 _x11_swapchain_result(chain, result, __FILE__, __LINE__)
1232
1233 static struct wsi_image *
x11_get_wsi_image(struct wsi_swapchain * wsi_chain,uint32_t image_index)1234 x11_get_wsi_image(struct wsi_swapchain *wsi_chain, uint32_t image_index)
1235 {
1236 struct x11_swapchain *chain = (struct x11_swapchain *)wsi_chain;
1237 return &chain->images[image_index].base;
1238 }
1239 #ifdef HAVE_X11_DRM
1240 static bool
1241 wsi_x11_swapchain_query_dri3_modifiers_changed(struct x11_swapchain *chain);
1242 #endif
1243 static VkResult
x11_wait_for_explicit_sync_release_submission(struct x11_swapchain * chain,uint64_t rel_timeout_ns,uint32_t * image_index)1244 x11_wait_for_explicit_sync_release_submission(struct x11_swapchain *chain,
1245 uint64_t rel_timeout_ns,
1246 uint32_t *image_index)
1247 {
1248 STACK_ARRAY(struct wsi_image*, images, chain->base.image_count);
1249 for (uint32_t i = 0; i < chain->base.image_count; i++)
1250 images[i] = &chain->images[i].base;
1251
1252 VkResult result;
1253 #ifdef HAVE_LIBDRM
1254 result = wsi_drm_wait_for_explicit_sync_release(&chain->base,
1255 chain->base.image_count,
1256 images,
1257 rel_timeout_ns,
1258 image_index);
1259 #else
1260 result = VK_ERROR_FEATURE_NOT_PRESENT;
1261 #endif
1262 STACK_ARRAY_FINISH(images);
1263 return result;
1264 }
1265
1266 /* XXX this belongs in presentproto */
1267 #ifndef PresentWindowDestroyed
1268 #define PresentWindowDestroyed (1 << 0)
1269 #endif
1270 /**
1271 * Process an X11 Present event. Does not update chain->status.
1272 */
1273 static VkResult
x11_handle_dri3_present_event(struct x11_swapchain * chain,xcb_present_generic_event_t * event)1274 x11_handle_dri3_present_event(struct x11_swapchain *chain,
1275 xcb_present_generic_event_t *event)
1276 {
1277 switch (event->evtype) {
1278 case XCB_PRESENT_CONFIGURE_NOTIFY: {
1279 xcb_present_configure_notify_event_t *config = (void *) event;
1280 if (config->pixmap_flags & PresentWindowDestroyed)
1281 return VK_ERROR_SURFACE_LOST_KHR;
1282
1283 struct wsi_device *wsi_device = (struct wsi_device *)chain->base.wsi;
1284 if (!wsi_device->x11.ignore_suboptimal) {
1285 if (config->width != chain->extent.width ||
1286 config->height != chain->extent.height)
1287 return VK_SUBOPTIMAL_KHR;
1288 }
1289
1290 break;
1291 }
1292
1293 case XCB_PRESENT_EVENT_IDLE_NOTIFY: {
1294 xcb_present_idle_notify_event_t *idle = (void *) event;
1295
1296 assert(!chain->base.image_info.explicit_sync);
1297 for (unsigned i = 0; i < chain->base.image_count; i++) {
1298 if (chain->images[i].pixmap == idle->pixmap) {
1299 chain->sent_image_count--;
1300 assert(chain->sent_image_count >= 0);
1301 wsi_queue_push(&chain->acquire_queue, i);
1302 break;
1303 }
1304 }
1305
1306 break;
1307 }
1308
1309 case XCB_PRESENT_EVENT_COMPLETE_NOTIFY: {
1310 xcb_present_complete_notify_event_t *complete = (void *) event;
1311 if (complete->kind == XCB_PRESENT_COMPLETE_KIND_PIXMAP) {
1312 unsigned i, j;
1313 for (i = 0; i < chain->base.image_count; i++) {
1314 struct x11_image *image = &chain->images[i];
1315 for (j = 0; j < image->present_queued_count; j++) {
1316 if (image->pending_completions[j].serial == complete->serial) {
1317 x11_present_complete(chain, image, j);
1318 }
1319 }
1320 }
1321 chain->last_present_msc = complete->msc;
1322 }
1323
1324 VkResult result = VK_SUCCESS;
1325
1326 struct wsi_device *wsi_device = (struct wsi_device *)chain->base.wsi;
1327 if (wsi_device->x11.ignore_suboptimal)
1328 return result;
1329
1330 switch (complete->mode) {
1331 case XCB_PRESENT_COMPLETE_MODE_COPY:
1332 if (chain->copy_is_suboptimal)
1333 result = VK_SUBOPTIMAL_KHR;
1334 break;
1335 case XCB_PRESENT_COMPLETE_MODE_FLIP:
1336 /* If we ever go from flipping to copying, the odds are very likely
1337 * that we could reallocate in a more optimal way if we didn't have
1338 * to care about scanout, so we always do this.
1339 */
1340 chain->copy_is_suboptimal = true;
1341 break;
1342 #ifdef HAVE_X11_DRM
1343 case XCB_PRESENT_COMPLETE_MODE_SUBOPTIMAL_COPY:
1344 /* The winsys is now trying to flip directly and cannot due to our
1345 * configuration. Request the user reallocate.
1346 */
1347
1348 /* Sometimes, this complete mode is spurious, and a false positive.
1349 * Xwayland may report SUBOPTIMAL_COPY even if there are no changes in the modifiers.
1350 * https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/26616 for more details. */
1351 if (chain->status == VK_SUCCESS &&
1352 wsi_x11_swapchain_query_dri3_modifiers_changed(chain)) {
1353 result = VK_SUBOPTIMAL_KHR;
1354 }
1355 break;
1356 #endif
1357 default:
1358 break;
1359 }
1360
1361 return result;
1362 }
1363
1364 default:
1365 break;
1366 }
1367
1368 return VK_SUCCESS;
1369 }
1370 #ifdef HAVE_X11_DRM
1371 /**
1372 * Send image to X server via Present extension.
1373 */
1374 static VkResult
x11_present_to_x11_dri3(struct x11_swapchain * chain,uint32_t image_index,uint64_t target_msc,VkPresentModeKHR present_mode)1375 x11_present_to_x11_dri3(struct x11_swapchain *chain, uint32_t image_index,
1376 uint64_t target_msc, VkPresentModeKHR present_mode)
1377 {
1378 struct x11_image *image = &chain->images[image_index];
1379
1380 assert(image_index < chain->base.image_count);
1381
1382 uint32_t options = XCB_PRESENT_OPTION_NONE;
1383
1384 int64_t divisor = 0;
1385 int64_t remainder = 0;
1386
1387 struct wsi_x11_connection *wsi_conn =
1388 wsi_x11_get_connection((struct wsi_device*)chain->base.wsi, chain->conn);
1389 if (!wsi_conn)
1390 return VK_ERROR_OUT_OF_HOST_MEMORY;
1391
1392 if (present_mode == VK_PRESENT_MODE_IMMEDIATE_KHR ||
1393 (present_mode == VK_PRESENT_MODE_MAILBOX_KHR &&
1394 wsi_conn->is_xwayland) ||
1395 present_mode == VK_PRESENT_MODE_FIFO_RELAXED_KHR)
1396 options |= XCB_PRESENT_OPTION_ASYNC;
1397
1398 if (present_mode == VK_PRESENT_MODE_IMMEDIATE_KHR
1399 && chain->has_async_may_tear)
1400 options |= XCB_PRESENT_OPTION_ASYNC_MAY_TEAR;
1401
1402 if (chain->has_dri3_modifiers)
1403 options |= XCB_PRESENT_OPTION_SUBOPTIMAL;
1404
1405 xshmfence_reset(image->shm_fence);
1406
1407 if (!chain->base.image_info.explicit_sync) {
1408 ++chain->sent_image_count;
1409 assert(chain->sent_image_count <= chain->base.image_count);
1410 }
1411
1412 ++chain->send_sbc;
1413 uint32_t serial = (uint32_t)chain->send_sbc;
1414
1415 assert(image->present_queued_count < ARRAY_SIZE(image->pending_completions));
1416 image->pending_completions[image->present_queued_count++] =
1417 (struct x11_image_pending_completion) {
1418 .signal_present_id = image->present_id,
1419 .serial = serial,
1420 };
1421
1422 xcb_void_cookie_t cookie;
1423 #ifdef HAVE_DRI3_EXPLICIT_SYNC
1424 if (chain->base.image_info.explicit_sync) {
1425 uint64_t acquire_point = image->base.explicit_sync[WSI_ES_ACQUIRE].timeline;
1426 uint64_t release_point = image->base.explicit_sync[WSI_ES_RELEASE].timeline;
1427 cookie = xcb_present_pixmap_synced(
1428 chain->conn,
1429 chain->window,
1430 image->pixmap,
1431 serial,
1432 0, /* valid */
1433 image->update_area, /* update */
1434 0, /* x_off */
1435 0, /* y_off */
1436 XCB_NONE, /* target_crtc */
1437 image->dri3_syncobj[WSI_ES_ACQUIRE], /* acquire_syncobj */
1438 image->dri3_syncobj[WSI_ES_RELEASE], /* release_syncobj */
1439 acquire_point,
1440 release_point,
1441 options,
1442 target_msc,
1443 divisor,
1444 remainder, 0, NULL);
1445 } else
1446 #endif
1447 {
1448 cookie = xcb_present_pixmap(chain->conn,
1449 chain->window,
1450 image->pixmap,
1451 serial,
1452 0, /* valid */
1453 image->update_area, /* update */
1454 0, /* x_off */
1455 0, /* y_off */
1456 XCB_NONE, /* target_crtc */
1457 XCB_NONE,
1458 image->sync_fence,
1459 options,
1460 target_msc,
1461 divisor,
1462 remainder, 0, NULL);
1463 }
1464 xcb_discard_reply(chain->conn, cookie.sequence);
1465 xcb_flush(chain->conn);
1466 return x11_swapchain_result(chain, VK_SUCCESS);
1467 }
1468 #endif
1469 /**
1470 * Send image to X server unaccelerated (software drivers).
1471 */
1472 static VkResult
x11_present_to_x11_sw(struct x11_swapchain * chain,uint32_t image_index)1473 x11_present_to_x11_sw(struct x11_swapchain *chain, uint32_t image_index)
1474 {
1475 assert(!chain->base.image_info.explicit_sync);
1476 struct x11_image *image = &chain->images[image_index];
1477
1478 /* Begin querying this before submitting the frame for improved async performance.
1479 * In this _sw() mode we're expecting network round-trip delay, not just UNIX socket delay. */
1480 xcb_get_geometry_cookie_t geom_cookie = xcb_get_geometry(chain->conn, chain->window);
1481
1482 xcb_void_cookie_t cookie;
1483 void *myptr = image->base.cpu_map;
1484 size_t hdr_len = sizeof(xcb_put_image_request_t);
1485 int stride_b = image->base.row_pitches[0];
1486 size_t size = (hdr_len + stride_b * chain->extent.height) >> 2;
1487 uint64_t max_req_len = xcb_get_maximum_request_length(chain->conn);
1488
1489 if (image->rectangle_count > 0) {
1490 for (int i = 0; i < image->rectangle_count; i++) {
1491 xcb_rectangle_t rect = chain->images[image_index].rects[i];
1492 const uint8_t *data = (const uint8_t*)myptr + (rect.y * stride_b) + (rect.x * 4);
1493 for (int j = 0; j < rect.height; j++) {
1494 cookie = xcb_put_image(chain->conn, XCB_IMAGE_FORMAT_Z_PIXMAP,
1495 chain->window, chain->gc,
1496 rect.width,
1497 1,
1498 rect.x, rect.y + j,
1499 0, chain->depth,
1500 rect.width * 4,
1501 data);
1502 xcb_discard_reply(chain->conn, cookie.sequence);
1503 data += stride_b;
1504 }
1505 }
1506 } else if (size < max_req_len) {
1507 cookie = xcb_put_image(chain->conn, XCB_IMAGE_FORMAT_Z_PIXMAP,
1508 chain->window,
1509 chain->gc,
1510 image->base.row_pitches[0] / 4,
1511 chain->extent.height,
1512 0,0,0,chain->depth,
1513 image->base.row_pitches[0] * chain->extent.height,
1514 image->base.cpu_map);
1515 xcb_discard_reply(chain->conn, cookie.sequence);
1516 } else {
1517 int num_lines = ((max_req_len << 2) - hdr_len) / stride_b;
1518 int y_start = 0;
1519 int y_todo = chain->extent.height;
1520 while (y_todo) {
1521 int this_lines = MIN2(num_lines, y_todo);
1522 cookie = xcb_put_image(chain->conn, XCB_IMAGE_FORMAT_Z_PIXMAP,
1523 chain->window,
1524 chain->gc,
1525 image->base.row_pitches[0] / 4,
1526 this_lines,
1527 0,y_start,0,chain->depth,
1528 this_lines * stride_b,
1529 (const uint8_t *)myptr + (y_start * stride_b));
1530 xcb_discard_reply(chain->conn, cookie.sequence);
1531 y_start += this_lines;
1532 y_todo -= this_lines;
1533 }
1534 }
1535
1536 xcb_flush(chain->conn);
1537
1538 /* We don't have queued present here.
1539 * Immediately let application acquire again, but query geometry first so
1540 * we can report OUT_OF_DATE on resize. */
1541 xcb_generic_error_t *err;
1542
1543 xcb_get_geometry_reply_t *geom = xcb_get_geometry_reply(chain->conn, geom_cookie, &err);
1544 VkResult result = VK_SUCCESS;
1545 if (geom) {
1546 if (chain->extent.width != geom->width ||
1547 chain->extent.height != geom->height)
1548 result = VK_ERROR_OUT_OF_DATE_KHR;
1549 } else {
1550 result = VK_ERROR_SURFACE_LOST_KHR;
1551 }
1552 free(err);
1553 free(geom);
1554
1555 wsi_queue_push(&chain->acquire_queue, image_index);
1556 return result;
1557 }
1558
1559 static void
x11_capture_trace(struct x11_swapchain * chain)1560 x11_capture_trace(struct x11_swapchain *chain)
1561 {
1562 #ifdef XCB_KEYSYMS_AVAILABLE
1563 VK_FROM_HANDLE(vk_device, device, chain->base.device);
1564 if (!device->physical->instance->trace_mode)
1565 return;
1566
1567 xcb_query_keymap_cookie_t keys_cookie = xcb_query_keymap(chain->conn);
1568
1569 xcb_generic_error_t *error = NULL;
1570 xcb_query_keymap_reply_t *keys = xcb_query_keymap_reply(chain->conn, keys_cookie, &error);
1571 if (error) {
1572 free(error);
1573 return;
1574 }
1575
1576 xcb_key_symbols_t *key_symbols = xcb_key_symbols_alloc(chain->conn);
1577 xcb_keycode_t *keycodes = xcb_key_symbols_get_keycode(key_symbols, XK_F1);
1578 if (keycodes) {
1579 xcb_keycode_t keycode = keycodes[0];
1580 free(keycodes);
1581
1582 simple_mtx_lock(&device->trace_mtx);
1583 bool capture_key_pressed = keys->keys[keycode / 8] & (1u << (keycode % 8));
1584 device->trace_hotkey_trigger = capture_key_pressed && (capture_key_pressed != chain->base.capture_key_pressed);
1585 chain->base.capture_key_pressed = capture_key_pressed;
1586 simple_mtx_unlock(&device->trace_mtx);
1587 }
1588
1589 xcb_key_symbols_free(key_symbols);
1590 free(keys);
1591 #endif
1592 }
1593
1594 /* Use a trivial helper here to make it easier to read in code
1595 * where we're intending to access chain->status outside the thread lock. */
x11_swapchain_read_status_atomic(struct x11_swapchain * chain)1596 static VkResult x11_swapchain_read_status_atomic(struct x11_swapchain *chain)
1597 {
1598 return chain->status;
1599 }
1600
1601 /**
1602 * Decides if an early wait on buffer fences before buffer submission is required.
1603 * That is for mailbox mode, as otherwise the latest image in the queue might not be fully rendered at
1604 * present time, which could lead to missing a frame. This is an Xorg issue.
1605 *
1606 * On Wayland compositors, this used to be a problem as well, but not anymore,
1607 * and this check assumes that Mesa is running on a reasonable compositor.
1608 * The wait behavior can be forced by setting the 'vk_xwayland_wait_ready' DRIConf option to true.
1609 * Some drivers, like e.g. Venus may still want to require wait_ready by default,
1610 * so the option is kept around for now.
1611 *
1612 * On Wayland, we don't know at this point if tearing protocol is/can be used by Xwl,
1613 * so we have to make the MAILBOX assumption.
1614 */
1615 static bool
x11_needs_wait_for_fences(const struct wsi_device * wsi_device,struct wsi_x11_connection * wsi_conn,VkPresentModeKHR present_mode)1616 x11_needs_wait_for_fences(const struct wsi_device *wsi_device,
1617 struct wsi_x11_connection *wsi_conn,
1618 VkPresentModeKHR present_mode)
1619 {
1620 if (wsi_conn->is_xwayland && !wsi_device->x11.xwaylandWaitReady) {
1621 return false;
1622 }
1623
1624 switch (present_mode) {
1625 case VK_PRESENT_MODE_MAILBOX_KHR:
1626 return true;
1627 case VK_PRESENT_MODE_IMMEDIATE_KHR:
1628 return wsi_conn->is_xwayland;
1629 default:
1630 return false;
1631 }
1632 }
1633
1634 /* This matches Wayland. */
1635 #define X11_SWAPCHAIN_MAILBOX_IMAGES 4
1636
1637 static bool
x11_requires_mailbox_image_count(const struct wsi_device * device,struct wsi_x11_connection * wsi_conn,VkPresentModeKHR present_mode)1638 x11_requires_mailbox_image_count(const struct wsi_device *device,
1639 struct wsi_x11_connection *wsi_conn,
1640 VkPresentModeKHR present_mode)
1641 {
1642 /* If we're resorting to wait for fences, we're assuming a MAILBOX-like model,
1643 * and we should allocate accordingly.
1644 *
1645 * One potential concern here is IMMEDIATE mode on Wayland.
1646 * This situation could arise:
1647 * - Fullscreen FLIP mode
1648 * - Compositor does not support tearing protocol (we cannot know this here)
1649 *
1650 * With 3 images, during the window between latch and flip, there is only one image left to app,
1651 * so peak FPS may not be reached if the window between latch and flip is large,
1652 * but tests on contemporary compositors suggest this effect is minor.
1653 * Frame rate in the thousands can easily be reached.
1654 *
1655 * There are pragmatic reasons to expose 3 images for IMMEDIATE on Xwl.
1656 * - minImageCount is not intended as a tool to tune performance, its intent is to signal forward progress.
1657 * Our X11 and WL implementations do so for pragmatic reasons due to sync acquire interacting poorly with 2 images.
1658 * A jump from 3 to 4 is at best a minor improvement which only affects applications
1659 * running at extremely high frame rates, way beyond the monitor refresh rate.
1660 * On the other hand, lowering minImageCount to 2 would break the fundamental idea of MAILBOX
1661 * (and IMMEDIATE without tear), since FPS > refresh rate would not be possible.
1662 *
1663 * - Several games developed for other platforms and other Linux WSI implementations
1664 * do not expect that image counts arbitrarily change when changing present mode,
1665 * and will crash when Mesa does so.
1666 * There are several games using the strict_image_count drirc to work around this,
1667 * and it would be good to be friendlier in the first place, so we don't have to work around more games.
1668 * IMMEDIATE is a common presentation mode on those platforms, but MAILBOX is more Wayland-centric in nature,
1669 * so increasing image count for that mode is more reasonable.
1670 *
1671 * - IMMEDIATE expects tearing, and when tearing, 3 images are more than enough.
1672 *
1673 * - With EXT_swapchain_maintenance1, toggling between FIFO / IMMEDIATE (used extensively by D3D layering)
1674 * would require application to allocate >3 images which is unfortunate for memory usage,
1675 * and potentially disastrous for latency unless KHR_present_wait is used.
1676 */
1677 return x11_needs_wait_for_fences(device, wsi_conn, present_mode) ||
1678 present_mode == VK_PRESENT_MODE_MAILBOX_KHR;
1679 }
1680
1681 /**
1682 * Send image to the X server for presentation at target_msc.
1683 */
1684 static VkResult
x11_present_to_x11(struct x11_swapchain * chain,uint32_t image_index,uint64_t target_msc,VkPresentModeKHR present_mode)1685 x11_present_to_x11(struct x11_swapchain *chain, uint32_t image_index,
1686 uint64_t target_msc, VkPresentModeKHR present_mode)
1687 {
1688 x11_capture_trace(chain);
1689
1690 VkResult result;
1691 if (chain->base.wsi->sw && !chain->has_mit_shm)
1692 result = x11_present_to_x11_sw(chain, image_index);
1693 else
1694 #ifdef HAVE_X11_DRM
1695 result = x11_present_to_x11_dri3(chain, image_index, target_msc, present_mode);
1696 #else
1697 unreachable("X11 missing DRI3 support!");
1698 #endif
1699
1700 if (result < 0)
1701 x11_swapchain_notify_error(chain, result);
1702 else
1703 x11_notify_pending_present(chain, &chain->images[image_index]);
1704
1705 return result;
1706 }
1707
1708 static VkResult
x11_release_images(struct wsi_swapchain * wsi_chain,uint32_t count,const uint32_t * indices)1709 x11_release_images(struct wsi_swapchain *wsi_chain,
1710 uint32_t count, const uint32_t *indices)
1711 {
1712 struct x11_swapchain *chain = (struct x11_swapchain *)wsi_chain;
1713 if (chain->status == VK_ERROR_SURFACE_LOST_KHR)
1714 return chain->status;
1715
1716 /* If we're using implicit sync, push images to the acquire queue */
1717 if (!chain->base.image_info.explicit_sync) {
1718 for (uint32_t i = 0; i < count; i++) {
1719 uint32_t index = indices[i];
1720 assert(index < chain->base.image_count);
1721 wsi_queue_push(&chain->acquire_queue, index);
1722 }
1723 }
1724
1725 return VK_SUCCESS;
1726 }
1727
1728 static void
x11_set_present_mode(struct wsi_swapchain * wsi_chain,VkPresentModeKHR mode)1729 x11_set_present_mode(struct wsi_swapchain *wsi_chain,
1730 VkPresentModeKHR mode)
1731 {
1732 struct x11_swapchain *chain = (struct x11_swapchain *)wsi_chain;
1733 chain->base.present_mode = mode;
1734 }
1735
1736 /**
1737 * Acquire a ready-to-use image from the swapchain.
1738 *
1739 * This means usually that the image is not waiting on presentation and that the
1740 * image has been released by the X server to be used again by the consumer.
1741 */
1742 static VkResult
x11_acquire_next_image(struct wsi_swapchain * anv_chain,const VkAcquireNextImageInfoKHR * info,uint32_t * image_index)1743 x11_acquire_next_image(struct wsi_swapchain *anv_chain,
1744 const VkAcquireNextImageInfoKHR *info,
1745 uint32_t *image_index)
1746 {
1747 struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
1748 uint64_t timeout = info->timeout;
1749
1750 /* If the swapchain is in an error state, don't go any further. */
1751 VkResult result = x11_swapchain_read_status_atomic(chain);
1752 if (result < 0)
1753 return result;
1754
1755 if (chain->base.image_info.explicit_sync) {
1756 result = x11_wait_for_explicit_sync_release_submission(chain, timeout,
1757 image_index);
1758 } else {
1759 result = wsi_queue_pull(&chain->acquire_queue,
1760 image_index, timeout);
1761 }
1762
1763 if (result == VK_TIMEOUT)
1764 return info->timeout ? VK_TIMEOUT : VK_NOT_READY;
1765
1766 if (result < 0) {
1767 mtx_lock(&chain->thread_state_lock);
1768 result = x11_swapchain_result(chain, result);
1769 mtx_unlock(&chain->thread_state_lock);
1770 } else {
1771 result = x11_swapchain_read_status_atomic(chain);
1772 }
1773
1774 if (result < 0)
1775 return result;
1776
1777 assert(*image_index < chain->base.image_count);
1778 #ifdef HAVE_X11_DRM
1779 if (chain->images[*image_index].shm_fence &&
1780 !chain->base.image_info.explicit_sync)
1781 xshmfence_await(chain->images[*image_index].shm_fence);
1782 #endif
1783
1784 return result;
1785 }
1786
1787 /**
1788 * Queue a new presentation of an image that was previously acquired by the
1789 * consumer.
1790 *
1791 * Note that in immediate presentation mode this does not really queue the
1792 * presentation but directly asks the X server to show it.
1793 */
1794 static VkResult
x11_queue_present(struct wsi_swapchain * anv_chain,uint32_t image_index,uint64_t present_id,const VkPresentRegionKHR * damage)1795 x11_queue_present(struct wsi_swapchain *anv_chain,
1796 uint32_t image_index,
1797 uint64_t present_id,
1798 const VkPresentRegionKHR *damage)
1799 {
1800 struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
1801 xcb_xfixes_region_t update_area = 0;
1802
1803 /* If the swapchain is in an error state, don't go any further. */
1804 VkResult status = x11_swapchain_read_status_atomic(chain);
1805 if (status < 0)
1806 return status;
1807
1808 if (damage && damage->pRectangles && damage->rectangleCount > 0 &&
1809 damage->rectangleCount <= MAX_DAMAGE_RECTS) {
1810 xcb_rectangle_t *rects = chain->images[image_index].rects;
1811
1812 update_area = chain->images[image_index].update_region;
1813 for (unsigned i = 0; i < damage->rectangleCount; i++) {
1814 const VkRectLayerKHR *rect = &damage->pRectangles[i];
1815 assert(rect->layer == 0);
1816 rects[i].x = rect->offset.x;
1817 rects[i].y = rect->offset.y;
1818 rects[i].width = rect->extent.width;
1819 rects[i].height = rect->extent.height;
1820 }
1821 xcb_xfixes_set_region(chain->conn, update_area, damage->rectangleCount, rects);
1822 chain->images[image_index].rectangle_count = damage->rectangleCount;
1823 } else {
1824 chain->images[image_index].rectangle_count = 0;
1825 }
1826 chain->images[image_index].update_area = update_area;
1827 chain->images[image_index].present_id = present_id;
1828 /* With EXT_swapchain_maintenance1, the present mode can change per present. */
1829 chain->images[image_index].present_mode = chain->base.present_mode;
1830
1831 wsi_queue_push(&chain->present_queue, image_index);
1832 return x11_swapchain_read_status_atomic(chain);
1833 }
1834
1835 /**
1836 * The number of images that are not owned by X11:
1837 * (1) in the ownership of the app, or
1838 * (2) app to take ownership through an acquire, or
1839 * (3) in the present queue waiting for the FIFO thread to present to X11.
1840 */
x11_driver_owned_images(const struct x11_swapchain * chain)1841 static unsigned x11_driver_owned_images(const struct x11_swapchain *chain)
1842 {
1843 return chain->base.image_count - chain->sent_image_count;
1844 }
1845
1846 /* This thread is responsible for pumping PRESENT replies.
1847 * This is done in a separate thread from the X11 presentation thread
1848 * to be able to support non-blocking modes like IMMEDIATE and MAILBOX.
1849 * Frame completion events can happen at any time, and we need to handle
1850 * the events as soon as they come in to have a quality implementation.
1851 * The presentation thread may go to sleep waiting for new presentation events to come in,
1852 * and it cannot wait for both X events and application events at the same time.
1853 * If we only cared about FIFO, this thread wouldn't be very useful.
1854 * Earlier implementation of X11 WSI had a single FIFO thread that blocked on X events after presenting.
1855 * For IMMEDIATE and MAILBOX, the application thread pumped the event queue, which caused a lot of pain
1856 * when trying to deal with present wait.
1857 */
1858 static int
x11_manage_event_queue(void * state)1859 x11_manage_event_queue(void *state)
1860 {
1861 struct x11_swapchain *chain = state;
1862 u_thread_setname("WSI swapchain event");
1863
1864 /* While there is an outstanding IDLE we should wait for it.
1865 * In FLIP modes at most one image will not be driver owned eventually.
1866 * In BLIT modes, we expect that all images will eventually be driver owned,
1867 * but we don't know which mode is being used. */
1868 unsigned forward_progress_guaranteed_acquired_images = chain->base.image_count - 1;
1869
1870 mtx_lock(&chain->thread_state_lock);
1871
1872 while (chain->status >= 0) {
1873 /* This thread should only go sleep waiting for X events when we know there are pending events.
1874 * We expect COMPLETION events when there is at least one image marked as present_queued.
1875 * We also expect IDLE events, but we only consider waiting for them when all images are busy,
1876 * and application has fewer than N images acquired. */
1877
1878 bool assume_forward_progress = false;
1879
1880 for (uint32_t i = 0; i < chain->base.image_count; i++) {
1881 if (chain->images[i].present_queued_count != 0) {
1882 /* We must pump through a present wait and unblock FIFO thread if using FIFO mode. */
1883 assume_forward_progress = true;
1884 break;
1885 }
1886 }
1887
1888 if (!assume_forward_progress && !chain->base.image_info.explicit_sync) {
1889 /* If true, application expects acquire (IDLE) to happen in finite time. */
1890 assume_forward_progress = x11_driver_owned_images(chain) <
1891 forward_progress_guaranteed_acquired_images;
1892 }
1893
1894 if (assume_forward_progress) {
1895 /* Only yield lock when blocking on X11 event. */
1896 mtx_unlock(&chain->thread_state_lock);
1897 xcb_generic_event_t *event =
1898 xcb_wait_for_special_event(chain->conn, chain->special_event);
1899 mtx_lock(&chain->thread_state_lock);
1900
1901 /* Re-check status since we dropped the lock while waiting for X. */
1902 VkResult result = chain->status;
1903
1904 if (result >= 0) {
1905 if (event) {
1906 /* Queue thread will be woken up if anything interesting happened in handler.
1907 * Queue thread blocks on:
1908 * - Presentation events completing
1909 * - Presentation requests from application
1910 * - WaitForFence workaround if applicable */
1911 result = x11_handle_dri3_present_event(chain, (void *) event);
1912 } else {
1913 result = VK_ERROR_SURFACE_LOST_KHR;
1914 }
1915 }
1916
1917 /* Updates chain->status and wakes up threads as necessary on error. */
1918 x11_swapchain_result(chain, result);
1919 free(event);
1920 } else {
1921 /* Nothing important to do, go to sleep until queue thread wakes us up. */
1922 u_cnd_monotonic_wait(&chain->thread_state_cond, &chain->thread_state_lock);
1923 }
1924 }
1925
1926 mtx_unlock(&chain->thread_state_lock);
1927 return 0;
1928 }
1929
1930 /**
1931 * Presentation thread.
1932 *
1933 * Runs in a separate thread, blocks and reacts to queued images on the
1934 * present-queue
1935 *
1936 * This must be a thread since we have to block in two cases:
1937 * - FIFO:
1938 * We must wait for previous presentation to complete
1939 * in some way so we can compute the target MSC.
1940 * - WaitForFence workaround:
1941 * In some cases, we need to wait for image to complete rendering before submitting it to X.
1942 */
1943 static int
x11_manage_present_queue(void * state)1944 x11_manage_present_queue(void *state)
1945 {
1946 struct x11_swapchain *chain = state;
1947 struct wsi_x11_connection *wsi_conn =
1948 wsi_x11_get_connection((struct wsi_device*)chain->base.wsi, chain->conn);
1949 VkResult result = VK_SUCCESS;
1950
1951 u_thread_setname("WSI swapchain queue");
1952
1953 uint64_t target_msc = 0;
1954
1955 while (x11_swapchain_read_status_atomic(chain) >= 0) {
1956 uint32_t image_index = 0;
1957 {
1958 MESA_TRACE_SCOPE("pull present queue");
1959 result = wsi_queue_pull(&chain->present_queue, &image_index, INT64_MAX);
1960 assert(result != VK_TIMEOUT);
1961 }
1962
1963 /* The status can change underneath us if the swapchain is destroyed
1964 * from another thread. */
1965 if (result >= 0)
1966 result = x11_swapchain_read_status_atomic(chain);
1967 if (result < 0)
1968 break;
1969
1970 VkPresentModeKHR present_mode = chain->images[image_index].present_mode;
1971
1972 if (x11_needs_wait_for_fences(chain->base.wsi, wsi_conn,
1973 present_mode) &&
1974 /* not necessary with explicit sync */
1975 !chain->base.image_info.explicit_sync) {
1976 MESA_TRACE_SCOPE("wait fence");
1977 result = chain->base.wsi->WaitForFences(chain->base.device, 1,
1978 &chain->base.fences[image_index],
1979 true, UINT64_MAX);
1980 if (result != VK_SUCCESS) {
1981 result = VK_ERROR_OUT_OF_DATE_KHR;
1982 break;
1983 }
1984 }
1985
1986 mtx_lock(&chain->thread_state_lock);
1987
1988 /* In IMMEDIATE and MAILBOX modes, there is a risk that we have exhausted the presentation queue,
1989 * since IDLE could return multiple times before observing a COMPLETE. */
1990 while (chain->status >= 0 &&
1991 chain->images[image_index].present_queued_count ==
1992 ARRAY_SIZE(chain->images[image_index].pending_completions)) {
1993 u_cnd_monotonic_wait(&chain->thread_state_cond, &chain->thread_state_lock);
1994 }
1995
1996 if (chain->status < 0) {
1997 mtx_unlock(&chain->thread_state_lock);
1998 break;
1999 }
2000
2001 result = x11_present_to_x11(chain, image_index, target_msc, present_mode);
2002
2003 if (result < 0) {
2004 mtx_unlock(&chain->thread_state_lock);
2005 break;
2006 }
2007
2008 if (present_mode == VK_PRESENT_MODE_FIFO_KHR ||
2009 present_mode == VK_PRESENT_MODE_FIFO_RELAXED_KHR) {
2010 MESA_TRACE_SCOPE("wait present");
2011
2012 while (chain->status >= 0 && chain->images[image_index].present_queued_count != 0) {
2013 /* In FIFO mode, we need to make sure we observe a COMPLETE before queueing up
2014 * another present. */
2015 u_cnd_monotonic_wait(&chain->thread_state_cond, &chain->thread_state_lock);
2016 }
2017
2018 /* If next present is not FIFO, we still need to ensure we don't override that
2019 * present. If FIFO, we need to ensure MSC is larger than the COMPLETED frame. */
2020 target_msc = chain->last_present_msc + 1;
2021 }
2022
2023 mtx_unlock(&chain->thread_state_lock);
2024 }
2025
2026 mtx_lock(&chain->thread_state_lock);
2027 x11_swapchain_result(chain, result);
2028 if (!chain->base.image_info.explicit_sync)
2029 wsi_queue_push(&chain->acquire_queue, UINT32_MAX);
2030 mtx_unlock(&chain->thread_state_lock);
2031
2032 return 0;
2033 }
2034
2035 static uint8_t *
alloc_shm(struct wsi_image * imagew,unsigned size)2036 alloc_shm(struct wsi_image *imagew, unsigned size)
2037 {
2038 #ifdef HAVE_SYS_SHM_H
2039 struct x11_image *image = (struct x11_image *)imagew;
2040 image->shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | 0600);
2041 if (image->shmid < 0)
2042 return NULL;
2043
2044 uint8_t *addr = (uint8_t *)shmat(image->shmid, 0, 0);
2045 /* mark the segment immediately for deletion to avoid leaks */
2046 shmctl(image->shmid, IPC_RMID, 0);
2047
2048 if (addr == (uint8_t *) -1)
2049 return NULL;
2050
2051 image->shmaddr = addr;
2052 return addr;
2053 #else
2054 return NULL;
2055 #endif
2056 }
2057
2058 static VkResult
x11_image_init(VkDevice device_h,struct x11_swapchain * chain,const VkSwapchainCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,struct x11_image * image)2059 x11_image_init(VkDevice device_h, struct x11_swapchain *chain,
2060 const VkSwapchainCreateInfoKHR *pCreateInfo,
2061 const VkAllocationCallbacks* pAllocator,
2062 struct x11_image *image)
2063 {
2064 VkResult result;
2065
2066 result = wsi_create_image(&chain->base, &chain->base.image_info,
2067 &image->base);
2068 if (result != VK_SUCCESS)
2069 return result;
2070
2071 if (chain->base.wsi->sw && !chain->has_mit_shm)
2072 return VK_SUCCESS;
2073
2074 #ifdef HAVE_X11_DRM
2075 xcb_void_cookie_t cookie;
2076 xcb_generic_error_t *error = NULL;
2077 uint32_t bpp = 32;
2078 int fence_fd;
2079 image->update_region = xcb_generate_id(chain->conn);
2080 xcb_xfixes_create_region(chain->conn, image->update_region, 0, NULL);
2081
2082 if (chain->base.wsi->sw) {
2083 image->shmseg = xcb_generate_id(chain->conn);
2084
2085 xcb_shm_attach(chain->conn,
2086 image->shmseg,
2087 image->shmid,
2088 0);
2089 image->pixmap = xcb_generate_id(chain->conn);
2090 cookie = xcb_shm_create_pixmap_checked(chain->conn,
2091 image->pixmap,
2092 chain->window,
2093 image->base.row_pitches[0] / 4,
2094 pCreateInfo->imageExtent.height,
2095 chain->depth,
2096 image->shmseg, 0);
2097 xcb_discard_reply(chain->conn, cookie.sequence);
2098 goto out_fence;
2099 }
2100 image->pixmap = xcb_generate_id(chain->conn);
2101
2102 if (image->base.drm_modifier != DRM_FORMAT_MOD_INVALID) {
2103 /* If the image has a modifier, we must have DRI3 v1.2. */
2104 assert(chain->has_dri3_modifiers);
2105
2106 /* XCB requires an array of file descriptors but we only have one */
2107 int fds[4] = { -1, -1, -1, -1 };
2108 for (int i = 0; i < image->base.num_planes; i++) {
2109 fds[i] = os_dupfd_cloexec(image->base.dma_buf_fd);
2110 if (fds[i] == -1) {
2111 for (int j = 0; j < i; j++)
2112 close(fds[j]);
2113
2114 return VK_ERROR_OUT_OF_HOST_MEMORY;
2115 }
2116 }
2117
2118 cookie =
2119 xcb_dri3_pixmap_from_buffers_checked(chain->conn,
2120 image->pixmap,
2121 chain->window,
2122 image->base.num_planes,
2123 pCreateInfo->imageExtent.width,
2124 pCreateInfo->imageExtent.height,
2125 image->base.row_pitches[0],
2126 image->base.offsets[0],
2127 image->base.row_pitches[1],
2128 image->base.offsets[1],
2129 image->base.row_pitches[2],
2130 image->base.offsets[2],
2131 image->base.row_pitches[3],
2132 image->base.offsets[3],
2133 chain->depth, bpp,
2134 image->base.drm_modifier,
2135 fds);
2136 } else {
2137 /* Without passing modifiers, we can't have multi-plane RGB images. */
2138 assert(image->base.num_planes == 1);
2139
2140 /* XCB will take ownership of the FD we pass it. */
2141 int fd = os_dupfd_cloexec(image->base.dma_buf_fd);
2142 if (fd == -1)
2143 return VK_ERROR_OUT_OF_HOST_MEMORY;
2144
2145 cookie =
2146 xcb_dri3_pixmap_from_buffer_checked(chain->conn,
2147 image->pixmap,
2148 chain->window,
2149 image->base.sizes[0],
2150 pCreateInfo->imageExtent.width,
2151 pCreateInfo->imageExtent.height,
2152 image->base.row_pitches[0],
2153 chain->depth, bpp, fd);
2154 }
2155
2156 error = xcb_request_check(chain->conn, cookie);
2157 if (error != NULL) {
2158 free(error);
2159 goto fail_image;
2160 }
2161
2162 #ifdef HAVE_DRI3_EXPLICIT_SYNC
2163 if (chain->base.image_info.explicit_sync) {
2164 for (uint32_t i = 0; i < WSI_ES_COUNT; i++) {
2165 image->dri3_syncobj[i] = xcb_generate_id(chain->conn);
2166 int fd = dup(image->base.explicit_sync[i].fd);
2167 if (fd < 0)
2168 goto fail_image;
2169
2170 cookie = xcb_dri3_import_syncobj_checked(chain->conn,
2171 image->dri3_syncobj[i],
2172 chain->window,
2173 fd /* libxcb closes the fd */);
2174 error = xcb_request_check(chain->conn, cookie);
2175 if (error != NULL) {
2176 free(error);
2177 goto fail_image;
2178 }
2179 }
2180 }
2181 #endif
2182
2183 out_fence:
2184 fence_fd = xshmfence_alloc_shm();
2185 if (fence_fd < 0)
2186 goto fail_pixmap;
2187
2188 image->shm_fence = xshmfence_map_shm(fence_fd);
2189 if (image->shm_fence == NULL)
2190 goto fail_shmfence_alloc;
2191
2192 image->sync_fence = xcb_generate_id(chain->conn);
2193 xcb_dri3_fence_from_fd(chain->conn,
2194 image->pixmap,
2195 image->sync_fence,
2196 false,
2197 fence_fd);
2198
2199 xshmfence_trigger(image->shm_fence);
2200 return VK_SUCCESS;
2201
2202 fail_shmfence_alloc:
2203 close(fence_fd);
2204
2205 fail_pixmap:
2206 cookie = xcb_free_pixmap(chain->conn, image->pixmap);
2207 xcb_discard_reply(chain->conn, cookie.sequence);
2208
2209 fail_image:
2210 wsi_destroy_image(&chain->base, &image->base);
2211
2212 #else
2213 unreachable("SHM support not compiled in");
2214 #endif
2215 return VK_ERROR_INITIALIZATION_FAILED;
2216 }
2217
2218 static void
x11_image_finish(struct x11_swapchain * chain,const VkAllocationCallbacks * pAllocator,struct x11_image * image)2219 x11_image_finish(struct x11_swapchain *chain,
2220 const VkAllocationCallbacks* pAllocator,
2221 struct x11_image *image)
2222 {
2223 xcb_void_cookie_t cookie;
2224 if (!chain->base.wsi->sw || chain->has_mit_shm) {
2225 #ifdef HAVE_X11_DRM
2226 cookie = xcb_sync_destroy_fence(chain->conn, image->sync_fence);
2227 xcb_discard_reply(chain->conn, cookie.sequence);
2228 xshmfence_unmap_shm(image->shm_fence);
2229 #endif
2230
2231 cookie = xcb_free_pixmap(chain->conn, image->pixmap);
2232 xcb_discard_reply(chain->conn, cookie.sequence);
2233 #ifdef HAVE_X11_DRM
2234 cookie = xcb_xfixes_destroy_region(chain->conn, image->update_region);
2235 xcb_discard_reply(chain->conn, cookie.sequence);
2236 #endif
2237 #ifdef HAVE_DRI3_EXPLICIT_SYNC
2238 if (chain->base.image_info.explicit_sync) {
2239 for (uint32_t i = 0; i < WSI_ES_COUNT; i++) {
2240 cookie = xcb_dri3_free_syncobj(chain->conn, image->dri3_syncobj[i]);
2241 xcb_discard_reply(chain->conn, cookie.sequence);
2242 }
2243 }
2244 #endif
2245 }
2246
2247 wsi_destroy_image(&chain->base, &image->base);
2248 #ifdef HAVE_SYS_SHM_H
2249 if (image->shmaddr)
2250 shmdt(image->shmaddr);
2251 #endif
2252 }
2253
2254 static void
wsi_x11_recompute_dri3_modifier_hash(blake3_hash * hash,const struct wsi_drm_image_params * params)2255 wsi_x11_recompute_dri3_modifier_hash(blake3_hash *hash, const struct wsi_drm_image_params *params)
2256 {
2257 mesa_blake3 ctx;
2258 _mesa_blake3_init(&ctx);
2259 _mesa_blake3_update(&ctx, ¶ms->num_modifier_lists, sizeof(params->num_modifier_lists));
2260 for (uint32_t i = 0; i < params->num_modifier_lists; i++) {
2261 _mesa_blake3_update(&ctx, &i, sizeof(i));
2262 _mesa_blake3_update(&ctx, params->modifiers[i],
2263 params->num_modifiers[i] * sizeof(*params->modifiers[i]));
2264 }
2265 _mesa_blake3_update(&ctx, ¶ms->same_gpu, sizeof(params->same_gpu));
2266 _mesa_blake3_final(&ctx, *hash);
2267 }
2268
2269 static void
wsi_x11_get_dri3_modifiers(struct wsi_x11_connection * wsi_conn,xcb_connection_t * conn,xcb_window_t window,uint8_t depth,uint8_t bpp,uint64_t ** modifiers_in,uint32_t * num_modifiers_in,uint32_t * num_tranches_in,const VkAllocationCallbacks * pAllocator)2270 wsi_x11_get_dri3_modifiers(struct wsi_x11_connection *wsi_conn,
2271 xcb_connection_t *conn, xcb_window_t window,
2272 uint8_t depth, uint8_t bpp,
2273 uint64_t **modifiers_in, uint32_t *num_modifiers_in,
2274 uint32_t *num_tranches_in,
2275 const VkAllocationCallbacks *pAllocator)
2276 {
2277 if (!wsi_conn->has_dri3_modifiers)
2278 goto out;
2279
2280 #ifdef HAVE_X11_DRM
2281 xcb_generic_error_t *error = NULL;
2282 xcb_dri3_get_supported_modifiers_cookie_t mod_cookie =
2283 xcb_dri3_get_supported_modifiers(conn, window, depth, bpp);
2284 xcb_dri3_get_supported_modifiers_reply_t *mod_reply =
2285 xcb_dri3_get_supported_modifiers_reply(conn, mod_cookie, &error);
2286 free(error);
2287
2288 if (!mod_reply || (mod_reply->num_window_modifiers == 0 &&
2289 mod_reply->num_screen_modifiers == 0)) {
2290 free(mod_reply);
2291 goto out;
2292 }
2293
2294 uint32_t n = 0;
2295 uint32_t counts[2];
2296 uint64_t *modifiers[2];
2297
2298 if (mod_reply->num_window_modifiers) {
2299 counts[n] = mod_reply->num_window_modifiers;
2300 modifiers[n] = vk_alloc(pAllocator,
2301 counts[n] * sizeof(uint64_t),
2302 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2303 if (!modifiers[n]) {
2304 free(mod_reply);
2305 goto out;
2306 }
2307
2308 memcpy(modifiers[n],
2309 xcb_dri3_get_supported_modifiers_window_modifiers(mod_reply),
2310 counts[n] * sizeof(uint64_t));
2311 n++;
2312 }
2313
2314 if (mod_reply->num_screen_modifiers) {
2315 counts[n] = mod_reply->num_screen_modifiers;
2316 modifiers[n] = vk_alloc(pAllocator,
2317 counts[n] * sizeof(uint64_t),
2318 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2319 if (!modifiers[n]) {
2320 if (n > 0)
2321 vk_free(pAllocator, modifiers[0]);
2322 free(mod_reply);
2323 goto out;
2324 }
2325
2326 memcpy(modifiers[n],
2327 xcb_dri3_get_supported_modifiers_screen_modifiers(mod_reply),
2328 counts[n] * sizeof(uint64_t));
2329 n++;
2330 }
2331
2332 for (int i = 0; i < n; i++) {
2333 modifiers_in[i] = modifiers[i];
2334 num_modifiers_in[i] = counts[i];
2335 }
2336 *num_tranches_in = n;
2337
2338 free(mod_reply);
2339 return;
2340 #endif
2341 out:
2342 *num_tranches_in = 0;
2343 }
2344 #ifdef HAVE_X11_DRM
2345 static bool
wsi_x11_swapchain_query_dri3_modifiers_changed(struct x11_swapchain * chain)2346 wsi_x11_swapchain_query_dri3_modifiers_changed(struct x11_swapchain *chain)
2347 {
2348 const struct wsi_device *wsi_device = chain->base.wsi;
2349
2350 if (wsi_device->sw || !wsi_device->supports_modifiers)
2351 return false;
2352
2353 struct wsi_drm_image_params drm_image_params;
2354 uint64_t *modifiers[2] = {NULL, NULL};
2355 uint32_t num_modifiers[2] = {0, 0};
2356
2357 struct wsi_x11_connection *wsi_conn =
2358 wsi_x11_get_connection((struct wsi_device*)chain->base.wsi, chain->conn);
2359
2360 xcb_get_geometry_reply_t *geometry =
2361 xcb_get_geometry_reply(chain->conn, xcb_get_geometry(chain->conn, chain->window), NULL);
2362 if (geometry == NULL)
2363 return false;
2364 uint32_t bit_depth = geometry->depth;
2365 free(geometry);
2366
2367 drm_image_params = (struct wsi_drm_image_params){
2368 .base.image_type = WSI_IMAGE_TYPE_DRM,
2369 .same_gpu = wsi_x11_check_dri3_compatible(wsi_device, chain->conn),
2370 .explicit_sync = chain->base.image_info.explicit_sync,
2371 };
2372
2373 wsi_x11_get_dri3_modifiers(wsi_conn, chain->conn, chain->window, bit_depth, 32,
2374 modifiers, num_modifiers,
2375 &drm_image_params.num_modifier_lists,
2376 &wsi_device->instance_alloc);
2377
2378 drm_image_params.num_modifiers = num_modifiers;
2379 drm_image_params.modifiers = (const uint64_t **)modifiers;
2380
2381 blake3_hash hash;
2382 wsi_x11_recompute_dri3_modifier_hash(&hash, &drm_image_params);
2383
2384 for (int i = 0; i < ARRAY_SIZE(modifiers); i++)
2385 vk_free(&wsi_device->instance_alloc, modifiers[i]);
2386
2387 return memcmp(hash, chain->dri3_modifier_hash, sizeof(hash)) != 0;
2388 }
2389 #endif
2390 static VkResult
x11_swapchain_destroy(struct wsi_swapchain * anv_chain,const VkAllocationCallbacks * pAllocator)2391 x11_swapchain_destroy(struct wsi_swapchain *anv_chain,
2392 const VkAllocationCallbacks *pAllocator)
2393 {
2394 struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
2395
2396 mtx_lock(&chain->thread_state_lock);
2397 chain->status = VK_ERROR_OUT_OF_DATE_KHR;
2398 u_cnd_monotonic_broadcast(&chain->thread_state_cond);
2399 mtx_unlock(&chain->thread_state_lock);
2400
2401 /* Push a UINT32_MAX to wake up the manager */
2402 wsi_queue_push(&chain->present_queue, UINT32_MAX);
2403 thrd_join(chain->queue_manager, NULL);
2404 thrd_join(chain->event_manager, NULL);
2405
2406 if (!chain->base.image_info.explicit_sync)
2407 wsi_queue_destroy(&chain->acquire_queue);
2408 wsi_queue_destroy(&chain->present_queue);
2409
2410 for (uint32_t i = 0; i < chain->base.image_count; i++)
2411 x11_image_finish(chain, pAllocator, &chain->images[i]);
2412 #ifdef HAVE_X11_DRM
2413 xcb_void_cookie_t cookie;
2414 xcb_unregister_for_special_event(chain->conn, chain->special_event);
2415 cookie = xcb_present_select_input_checked(chain->conn, chain->event_id,
2416 chain->window,
2417 XCB_PRESENT_EVENT_MASK_NO_EVENT);
2418 xcb_discard_reply(chain->conn, cookie.sequence);
2419 #endif
2420 mtx_destroy(&chain->present_progress_mutex);
2421 u_cnd_monotonic_destroy(&chain->present_progress_cond);
2422 mtx_destroy(&chain->thread_state_lock);
2423 u_cnd_monotonic_destroy(&chain->thread_state_cond);
2424
2425 wsi_swapchain_finish(&chain->base);
2426
2427 vk_free(pAllocator, chain);
2428
2429 return VK_SUCCESS;
2430 }
2431
2432 static void
wsi_x11_set_adaptive_sync_property(xcb_connection_t * conn,xcb_drawable_t drawable,uint32_t state)2433 wsi_x11_set_adaptive_sync_property(xcb_connection_t *conn,
2434 xcb_drawable_t drawable,
2435 uint32_t state)
2436 {
2437 static char const name[] = "_VARIABLE_REFRESH";
2438 xcb_intern_atom_cookie_t cookie;
2439 xcb_intern_atom_reply_t* reply;
2440 xcb_void_cookie_t check;
2441
2442 cookie = xcb_intern_atom(conn, 0, strlen(name), name);
2443 reply = xcb_intern_atom_reply(conn, cookie, NULL);
2444 if (reply == NULL)
2445 return;
2446
2447 if (state)
2448 check = xcb_change_property_checked(conn, XCB_PROP_MODE_REPLACE,
2449 drawable, reply->atom,
2450 XCB_ATOM_CARDINAL, 32, 1, &state);
2451 else
2452 check = xcb_delete_property_checked(conn, drawable, reply->atom);
2453
2454 xcb_discard_reply(conn, check.sequence);
2455 free(reply);
2456 }
2457
x11_wait_for_present(struct wsi_swapchain * wsi_chain,uint64_t waitValue,uint64_t timeout)2458 static VkResult x11_wait_for_present(struct wsi_swapchain *wsi_chain,
2459 uint64_t waitValue,
2460 uint64_t timeout)
2461 {
2462 struct x11_swapchain *chain = (struct x11_swapchain *)wsi_chain;
2463 struct timespec abs_timespec;
2464 uint64_t abs_timeout = 0;
2465 if (timeout != 0)
2466 abs_timeout = os_time_get_absolute_timeout(timeout);
2467
2468 /* Need to observe that the swapchain semaphore has been unsignalled,
2469 * as this is guaranteed when a present is complete. */
2470 VkResult result = wsi_swapchain_wait_for_present_semaphore(
2471 &chain->base, waitValue, timeout);
2472 if (result != VK_SUCCESS)
2473 return result;
2474
2475 timespec_from_nsec(&abs_timespec, abs_timeout);
2476
2477 mtx_lock(&chain->present_progress_mutex);
2478 while (chain->present_id < waitValue) {
2479 int ret = u_cnd_monotonic_timedwait(&chain->present_progress_cond,
2480 &chain->present_progress_mutex,
2481 &abs_timespec);
2482 if (ret == ETIMEDOUT) {
2483 result = VK_TIMEOUT;
2484 break;
2485 }
2486 if (ret) {
2487 result = VK_ERROR_DEVICE_LOST;
2488 break;
2489 }
2490 }
2491 if (result == VK_SUCCESS && chain->present_progress_error)
2492 result = chain->present_progress_error;
2493 mtx_unlock(&chain->present_progress_mutex);
2494 return result;
2495 }
2496
2497 static unsigned
x11_get_min_image_count_for_present_mode(struct wsi_device * wsi_device,struct wsi_x11_connection * wsi_conn,VkPresentModeKHR present_mode)2498 x11_get_min_image_count_for_present_mode(struct wsi_device *wsi_device,
2499 struct wsi_x11_connection *wsi_conn,
2500 VkPresentModeKHR present_mode)
2501 {
2502 uint32_t min_image_count = x11_get_min_image_count(wsi_device, wsi_conn->is_xwayland);
2503 if (x11_requires_mailbox_image_count(wsi_device, wsi_conn, present_mode))
2504 return MAX2(min_image_count, X11_SWAPCHAIN_MAILBOX_IMAGES);
2505 else
2506 return min_image_count;
2507 }
2508
2509 /**
2510 * Create the swapchain.
2511 *
2512 * Supports immediate, fifo and mailbox presentation mode.
2513 *
2514 */
2515 static VkResult
x11_surface_create_swapchain(VkIcdSurfaceBase * icd_surface,VkDevice device,struct wsi_device * wsi_device,const VkSwapchainCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,struct wsi_swapchain ** swapchain_out)2516 x11_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
2517 VkDevice device,
2518 struct wsi_device *wsi_device,
2519 const VkSwapchainCreateInfoKHR *pCreateInfo,
2520 const VkAllocationCallbacks* pAllocator,
2521 struct wsi_swapchain **swapchain_out)
2522 {
2523 struct x11_swapchain *chain;
2524 xcb_void_cookie_t cookie;
2525 VkResult result;
2526 VkPresentModeKHR present_mode = wsi_swapchain_get_present_mode(wsi_device, pCreateInfo);
2527
2528 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR);
2529
2530 /* Get xcb connection from the icd_surface and from that our internal struct
2531 * representing it.
2532 */
2533 xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
2534 struct wsi_x11_connection *wsi_conn =
2535 wsi_x11_get_connection(wsi_device, conn);
2536 if (!wsi_conn)
2537 return VK_ERROR_OUT_OF_HOST_MEMORY;
2538
2539 /* Get number of images in our swapchain. This count depends on:
2540 * - requested minimal image count
2541 * - device characteristics
2542 * - presentation mode.
2543 */
2544 unsigned num_images = pCreateInfo->minImageCount;
2545 if (!wsi_device->x11.strict_imageCount) {
2546 if (x11_requires_mailbox_image_count(wsi_device, wsi_conn, present_mode) ||
2547 wsi_device->x11.ensure_minImageCount) {
2548 unsigned present_mode_images = x11_get_min_image_count_for_present_mode(
2549 wsi_device, wsi_conn, pCreateInfo->presentMode);
2550 num_images = MAX2(num_images, present_mode_images);
2551 }
2552 }
2553
2554 /* Check that we have a window up-front. It is an error to not have one. */
2555 xcb_window_t window = x11_surface_get_window(icd_surface);
2556
2557 /* Get the geometry of that window. The bit depth of the swapchain will be fitted and the
2558 * chain's images extents should fit it for performance-optimizing flips.
2559 */
2560 xcb_get_geometry_reply_t *geometry =
2561 xcb_get_geometry_reply(conn, xcb_get_geometry(conn, window), NULL);
2562 if (geometry == NULL)
2563 return VK_ERROR_SURFACE_LOST_KHR;
2564 const uint32_t bit_depth = geometry->depth;
2565 const uint16_t cur_width = geometry->width;
2566 const uint16_t cur_height = geometry->height;
2567 free(geometry);
2568
2569 /* Allocate the actual swapchain. The size depends on image count. */
2570 size_t size = sizeof(*chain) + num_images * sizeof(chain->images[0]);
2571 chain = vk_zalloc(pAllocator, size, 8,
2572 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2573 if (chain == NULL)
2574 return VK_ERROR_OUT_OF_HOST_MEMORY;
2575
2576 int ret = mtx_init(&chain->present_progress_mutex, mtx_plain);
2577 if (ret != thrd_success) {
2578 vk_free(pAllocator, chain);
2579 return VK_ERROR_OUT_OF_HOST_MEMORY;
2580 }
2581
2582 ret = mtx_init(&chain->thread_state_lock, mtx_plain);
2583 if (ret != thrd_success) {
2584 mtx_destroy(&chain->present_progress_mutex);
2585 vk_free(pAllocator, chain);
2586 return VK_ERROR_OUT_OF_HOST_MEMORY;
2587 }
2588
2589 ret = u_cnd_monotonic_init(&chain->thread_state_cond);
2590 if (ret != thrd_success) {
2591 mtx_destroy(&chain->present_progress_mutex);
2592 mtx_destroy(&chain->thread_state_lock);
2593 vk_free(pAllocator, chain);
2594 return VK_ERROR_OUT_OF_HOST_MEMORY;
2595 }
2596
2597 ret = u_cnd_monotonic_init(&chain->present_progress_cond);
2598 if (ret != thrd_success) {
2599 mtx_destroy(&chain->present_progress_mutex);
2600 mtx_destroy(&chain->thread_state_lock);
2601 u_cnd_monotonic_destroy(&chain->thread_state_cond);
2602 vk_free(pAllocator, chain);
2603 return VK_ERROR_OUT_OF_HOST_MEMORY;
2604 }
2605
2606 uint32_t present_caps = 0;
2607 #ifdef HAVE_X11_DRM
2608 xcb_present_query_capabilities_cookie_t present_query_cookie;
2609 xcb_present_query_capabilities_reply_t *present_query_reply;
2610 present_query_cookie = xcb_present_query_capabilities(conn, window);
2611 present_query_reply = xcb_present_query_capabilities_reply(conn, present_query_cookie, NULL);
2612 if (present_query_reply) {
2613 present_caps = present_query_reply->capabilities;
2614 free(present_query_reply);
2615 }
2616 #endif
2617
2618 #ifdef HAVE_X11_DRM
2619 struct wsi_drm_image_params drm_image_params;
2620 uint32_t num_modifiers[2] = {0, 0};
2621 #endif
2622 struct wsi_base_image_params *image_params = NULL;
2623 struct wsi_cpu_image_params cpu_image_params;
2624 uint64_t *modifiers[2] = {NULL, NULL};
2625 if (wsi_device->sw) {
2626 cpu_image_params = (struct wsi_cpu_image_params) {
2627 .base.image_type = WSI_IMAGE_TYPE_CPU,
2628 .alloc_shm = wsi_conn->has_mit_shm ? &alloc_shm : NULL,
2629 };
2630 image_params = &cpu_image_params.base;
2631 } else {
2632 #ifdef HAVE_X11_DRM
2633 drm_image_params = (struct wsi_drm_image_params) {
2634 .base.image_type = WSI_IMAGE_TYPE_DRM,
2635 .same_gpu = wsi_x11_check_dri3_compatible(wsi_device, conn),
2636 .explicit_sync =
2637 #ifdef HAVE_DRI3_EXPLICIT_SYNC
2638 wsi_conn->has_dri3_explicit_sync &&
2639 (present_caps & XCB_PRESENT_CAPABILITY_SYNCOBJ) &&
2640 wsi_device_supports_explicit_sync(wsi_device),
2641 #else
2642 false,
2643 #endif
2644 };
2645 if (wsi_device->supports_modifiers) {
2646 wsi_x11_get_dri3_modifiers(wsi_conn, conn, window, bit_depth, 32,
2647 modifiers, num_modifiers,
2648 &drm_image_params.num_modifier_lists,
2649 pAllocator);
2650 drm_image_params.num_modifiers = num_modifiers;
2651 drm_image_params.modifiers = (const uint64_t **)modifiers;
2652
2653 wsi_x11_recompute_dri3_modifier_hash(&chain->dri3_modifier_hash, &drm_image_params);
2654 }
2655 image_params = &drm_image_params.base;
2656 #else
2657 unreachable("X11 DRM support missing!");
2658 #endif
2659 }
2660
2661 result = wsi_swapchain_init(wsi_device, &chain->base, device, pCreateInfo,
2662 image_params, pAllocator);
2663
2664 for (int i = 0; i < ARRAY_SIZE(modifiers); i++)
2665 vk_free(pAllocator, modifiers[i]);
2666
2667 if (result != VK_SUCCESS)
2668 goto fail_alloc;
2669
2670 chain->base.destroy = x11_swapchain_destroy;
2671 chain->base.get_wsi_image = x11_get_wsi_image;
2672 chain->base.acquire_next_image = x11_acquire_next_image;
2673 chain->base.queue_present = x11_queue_present;
2674 chain->base.wait_for_present = x11_wait_for_present;
2675 chain->base.release_images = x11_release_images;
2676 chain->base.set_present_mode = x11_set_present_mode;
2677 chain->base.present_mode = present_mode;
2678 chain->base.image_count = num_images;
2679 chain->conn = conn;
2680 chain->window = window;
2681 chain->depth = bit_depth;
2682 chain->extent = pCreateInfo->imageExtent;
2683 chain->send_sbc = 0;
2684 chain->sent_image_count = 0;
2685 chain->last_present_msc = 0;
2686 chain->status = VK_SUCCESS;
2687 chain->has_dri3_modifiers = wsi_conn->has_dri3_modifiers;
2688 chain->has_mit_shm = wsi_conn->has_mit_shm;
2689 chain->has_async_may_tear = present_caps & XCB_PRESENT_CAPABILITY_ASYNC_MAY_TEAR;
2690
2691 /* When images in the swapchain don't fit the window, X can still present them, but it won't
2692 * happen by flip, only by copy. So this is a suboptimal copy, because if the client would change
2693 * the chain extents X may be able to flip
2694 */
2695 if (!wsi_device->x11.ignore_suboptimal) {
2696 if (chain->extent.width != cur_width || chain->extent.height != cur_height)
2697 chain->status = VK_SUBOPTIMAL_KHR;
2698 }
2699
2700 /* On a new swapchain this helper variable is set to false. Once we present it will have an
2701 * impact once we ever do at least one flip and go back to copying afterwards. It is presumed
2702 * that in this case here is a high likelihood X could do flips again if the client reallocates a
2703 * new swapchain.
2704 *
2705 * Note that we used to inheritted this property from 'pCreateInfo->oldSwapchain'. But when it
2706 * was true, and when the next present was completed with copying, we would return
2707 * VK_SUBOPTIMAL_KHR and hint the app to reallocate again for no good reason. If all following
2708 * presents on the surface were completed with copying because of some surface state change, we
2709 * would always return VK_SUBOPTIMAL_KHR no matter how many times the app had reallocated.
2710 *
2711 * Note also that is is questionable in general if that mechanism is really useful. It ist not
2712 * clear why on a change from flipping to copying we can assume a reallocation has a high chance
2713 * of making flips work again per se. In other words it is not clear why there is need for
2714 * another way to inform clients about suboptimal copies besides forwarding the
2715 * 'PresentOptionSuboptimal' complete mode.
2716 */
2717 chain->copy_is_suboptimal = false;
2718 #ifdef HAVE_X11_DRM
2719 /* For our swapchain we need to listen to following Present extension events:
2720 * - Configure: Window dimensions changed. Images in the swapchain might need
2721 * to be reallocated.
2722 * - Complete: An image from our swapchain was presented on the output.
2723 * - Idle: An image from our swapchain is not anymore accessed by the X
2724 * server and can be reused.
2725 */
2726 chain->event_id = xcb_generate_id(chain->conn);
2727 uint32_t event_mask = XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY |
2728 XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY;
2729 if (!chain->base.image_info.explicit_sync)
2730 event_mask |= XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY;
2731 xcb_present_select_input(chain->conn, chain->event_id, chain->window, event_mask);
2732
2733 /* Create an XCB event queue to hold present events outside of the usual
2734 * application event queue
2735 */
2736 chain->special_event =
2737 xcb_register_for_special_xge(chain->conn, &xcb_present_id,
2738 chain->event_id, NULL);
2739 #endif
2740 /* Create the graphics context. */
2741 chain->gc = xcb_generate_id(chain->conn);
2742 if (!chain->gc) {
2743 /* FINISHME: Choose a better error. */
2744 result = VK_ERROR_OUT_OF_HOST_MEMORY;
2745 goto fail_register;
2746 }
2747
2748 cookie = xcb_create_gc(chain->conn,
2749 chain->gc,
2750 chain->window,
2751 XCB_GC_GRAPHICS_EXPOSURES,
2752 (uint32_t []) { 0 });
2753 xcb_discard_reply(chain->conn, cookie.sequence);
2754
2755 uint32_t image = 0;
2756 for (; image < chain->base.image_count; image++) {
2757 result = x11_image_init(device, chain, pCreateInfo, pAllocator,
2758 &chain->images[image]);
2759 if (result != VK_SUCCESS)
2760 goto fail_init_images;
2761 }
2762
2763 /* The queues have a length of base.image_count + 1 because we will
2764 * occasionally use UINT32_MAX to signal the other thread that an error
2765 * has occurred and we don't want an overflow.
2766 */
2767 ret = wsi_queue_init(&chain->present_queue, chain->base.image_count + 1);
2768 if (ret) {
2769 goto fail_init_images;
2770 }
2771
2772 /* Acquire queue is only needed when using implicit sync */
2773 if (!chain->base.image_info.explicit_sync) {
2774 ret = wsi_queue_init(&chain->acquire_queue, chain->base.image_count + 1);
2775 if (ret) {
2776 wsi_queue_destroy(&chain->present_queue);
2777 goto fail_init_images;
2778 }
2779
2780 for (unsigned i = 0; i < chain->base.image_count; i++)
2781 wsi_queue_push(&chain->acquire_queue, i);
2782 }
2783
2784 ret = thrd_create(&chain->queue_manager,
2785 x11_manage_present_queue, chain);
2786 if (ret != thrd_success)
2787 goto fail_init_fifo_queue;
2788
2789 ret = thrd_create(&chain->event_manager,
2790 x11_manage_event_queue, chain);
2791 if (ret != thrd_success)
2792 goto fail_init_event_queue;
2793
2794 /* It is safe to set it here as only one swapchain can be associated with
2795 * the window, and swapchain creation does the association. At this point
2796 * we know the creation is going to succeed. */
2797 wsi_x11_set_adaptive_sync_property(conn, window,
2798 wsi_device->enable_adaptive_sync);
2799
2800 *swapchain_out = &chain->base;
2801
2802 return VK_SUCCESS;
2803
2804 fail_init_event_queue:
2805 /* Push a UINT32_MAX to wake up the manager */
2806 wsi_queue_push(&chain->present_queue, UINT32_MAX);
2807 thrd_join(chain->queue_manager, NULL);
2808
2809 fail_init_fifo_queue:
2810 wsi_queue_destroy(&chain->present_queue);
2811 if (!chain->base.image_info.explicit_sync)
2812 wsi_queue_destroy(&chain->acquire_queue);
2813
2814 fail_init_images:
2815 for (uint32_t j = 0; j < image; j++)
2816 x11_image_finish(chain, pAllocator, &chain->images[j]);
2817
2818 fail_register:
2819 #ifdef HAVE_X11_DRM
2820 xcb_unregister_for_special_event(chain->conn, chain->special_event);
2821 #endif
2822 wsi_swapchain_finish(&chain->base);
2823
2824 fail_alloc:
2825 vk_free(pAllocator, chain);
2826
2827 return result;
2828 }
2829
2830 VkResult
wsi_x11_init_wsi(struct wsi_device * wsi_device,const VkAllocationCallbacks * alloc,const struct driOptionCache * dri_options)2831 wsi_x11_init_wsi(struct wsi_device *wsi_device,
2832 const VkAllocationCallbacks *alloc,
2833 const struct driOptionCache *dri_options)
2834 {
2835 struct wsi_x11 *wsi;
2836 VkResult result;
2837
2838 wsi = vk_alloc(alloc, sizeof(*wsi), 8,
2839 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
2840 if (!wsi) {
2841 result = VK_ERROR_OUT_OF_HOST_MEMORY;
2842 goto fail;
2843 }
2844
2845 int ret = mtx_init(&wsi->mutex, mtx_plain);
2846 if (ret != thrd_success) {
2847 if (ret == ENOMEM) {
2848 result = VK_ERROR_OUT_OF_HOST_MEMORY;
2849 } else {
2850 /* FINISHME: Choose a better error. */
2851 result = VK_ERROR_OUT_OF_HOST_MEMORY;
2852 }
2853
2854 goto fail_alloc;
2855 }
2856
2857 wsi->connections = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
2858 _mesa_key_pointer_equal);
2859 if (!wsi->connections) {
2860 result = VK_ERROR_OUT_OF_HOST_MEMORY;
2861 goto fail_mutex;
2862 }
2863
2864 if (dri_options) {
2865 if (driCheckOption(dri_options, "vk_x11_override_min_image_count", DRI_INT)) {
2866 wsi_device->x11.override_minImageCount =
2867 driQueryOptioni(dri_options, "vk_x11_override_min_image_count");
2868 }
2869 if (driCheckOption(dri_options, "vk_x11_strict_image_count", DRI_BOOL)) {
2870 wsi_device->x11.strict_imageCount =
2871 driQueryOptionb(dri_options, "vk_x11_strict_image_count");
2872 }
2873 if (driCheckOption(dri_options, "vk_x11_ensure_min_image_count", DRI_BOOL)) {
2874 wsi_device->x11.ensure_minImageCount =
2875 driQueryOptionb(dri_options, "vk_x11_ensure_min_image_count");
2876 }
2877 wsi_device->x11.xwaylandWaitReady = true;
2878 if (driCheckOption(dri_options, "vk_xwayland_wait_ready", DRI_BOOL)) {
2879 wsi_device->x11.xwaylandWaitReady =
2880 driQueryOptionb(dri_options, "vk_xwayland_wait_ready");
2881 }
2882
2883 if (driCheckOption(dri_options, "vk_x11_ignore_suboptimal", DRI_BOOL)) {
2884 wsi_device->x11.ignore_suboptimal =
2885 driQueryOptionb(dri_options, "vk_x11_ignore_suboptimal");
2886 }
2887 }
2888
2889 wsi->base.get_support = x11_surface_get_support;
2890 wsi->base.get_capabilities2 = x11_surface_get_capabilities2;
2891 wsi->base.get_formats = x11_surface_get_formats;
2892 wsi->base.get_formats2 = x11_surface_get_formats2;
2893 wsi->base.get_present_modes = x11_surface_get_present_modes;
2894 wsi->base.get_present_rectangles = x11_surface_get_present_rectangles;
2895 wsi->base.create_swapchain = x11_surface_create_swapchain;
2896
2897 wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB] = &wsi->base;
2898 wsi_device->wsi[VK_ICD_WSI_PLATFORM_XLIB] = &wsi->base;
2899
2900 return VK_SUCCESS;
2901
2902 fail_mutex:
2903 mtx_destroy(&wsi->mutex);
2904 fail_alloc:
2905 vk_free(alloc, wsi);
2906 fail:
2907 wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB] = NULL;
2908 wsi_device->wsi[VK_ICD_WSI_PLATFORM_XLIB] = NULL;
2909
2910 return result;
2911 }
2912
2913 void
wsi_x11_finish_wsi(struct wsi_device * wsi_device,const VkAllocationCallbacks * alloc)2914 wsi_x11_finish_wsi(struct wsi_device *wsi_device,
2915 const VkAllocationCallbacks *alloc)
2916 {
2917 struct wsi_x11 *wsi =
2918 (struct wsi_x11 *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB];
2919
2920 if (wsi) {
2921 hash_table_foreach(wsi->connections, entry)
2922 wsi_x11_connection_destroy(wsi_device, entry->data);
2923
2924 _mesa_hash_table_destroy(wsi->connections, NULL);
2925
2926 mtx_destroy(&wsi->mutex);
2927
2928 vk_free(alloc, wsi);
2929 }
2930 }
2931