xref: /aosp_15_r20/external/mesa3d/src/egl/drivers/dri2/platform_wayland.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2011-2012 Intel Corporation
3  * Copyright © 2012 Collabora, Ltd.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19  * NONINFRINGEMENT.  IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
20  * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
21  * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23  * DEALINGS IN THE SOFTWARE.
24  *
25  * Authors:
26  *    Kristian Høgsberg <[email protected]>
27  *    Benjamin Franzke <[email protected]>
28  */
29 
30 #include <dlfcn.h>
31 #include <errno.h>
32 #include <fcntl.h>
33 #include <limits.h>
34 #include <stdint.h>
35 #include <stdlib.h>
36 #include <string.h>
37 #include <unistd.h>
38 #include <xf86drm.h>
39 #include "drm-uapi/drm_fourcc.h"
40 #include <sys/mman.h>
41 #include <vulkan/vulkan_core.h>
42 #include <vulkan/vulkan_wayland.h>
43 
44 #include "util/anon_file.h"
45 #include "util/u_vector.h"
46 #include "util/format/u_formats.h"
47 #include "main/glconfig.h"
48 #include "egl_dri2.h"
49 #include "eglglobals.h"
50 #include "kopper_interface.h"
51 #include "loader.h"
52 #include "loader_dri_helper.h"
53 #include "dri_util.h"
54 #include <loader_wayland_helper.h>
55 
56 #include "linux-dmabuf-unstable-v1-client-protocol.h"
57 #include "wayland-drm-client-protocol.h"
58 #include <wayland-client.h>
59 #include <wayland-egl-backend.h>
60 
61 /*
62  * The index of entries in this table is used as a bitmask in
63  * dri2_dpy->formats.formats_bitmap, which tracks the formats supported
64  * by our server.
65  */
66 static const struct dri2_wl_visual {
67    uint32_t wl_drm_format;
68    int pipe_format;
69    /* alt_pipe_format is a substitute wl_buffer format to use for a
70     * wl-server unsupported pipe_format, ie. some other pipe_format in
71     * the table, of the same precision but with different channel ordering, or
72     * PIPE_FORMAT_NONE if an alternate format is not needed or supported.
73     * The code checks if alt_pipe_format can be used as a fallback for a
74     * pipe_format for a given wl-server implementation.
75     */
76    int alt_pipe_format;
77    int opaque_wl_drm_format;
78 } dri2_wl_visuals[] = {
79    {
80       WL_DRM_FORMAT_ABGR16F,
81       PIPE_FORMAT_R16G16B16A16_FLOAT,
82       PIPE_FORMAT_NONE,
83       WL_DRM_FORMAT_XBGR16F,
84    },
85    {
86       WL_DRM_FORMAT_XBGR16F,
87       PIPE_FORMAT_R16G16B16X16_FLOAT,
88       PIPE_FORMAT_NONE,
89       WL_DRM_FORMAT_XBGR16F,
90    },
91    {
92       WL_DRM_FORMAT_XRGB2101010,
93       PIPE_FORMAT_B10G10R10X2_UNORM,
94       PIPE_FORMAT_R10G10B10X2_UNORM,
95       WL_DRM_FORMAT_XRGB2101010,
96    },
97    {
98       WL_DRM_FORMAT_ARGB2101010,
99       PIPE_FORMAT_B10G10R10A2_UNORM,
100       PIPE_FORMAT_R10G10B10A2_UNORM,
101       WL_DRM_FORMAT_XRGB2101010,
102    },
103    {
104       WL_DRM_FORMAT_XBGR2101010,
105       PIPE_FORMAT_R10G10B10X2_UNORM,
106       PIPE_FORMAT_B10G10R10X2_UNORM,
107       WL_DRM_FORMAT_XBGR2101010,
108    },
109    {
110       WL_DRM_FORMAT_ABGR2101010,
111       PIPE_FORMAT_R10G10B10A2_UNORM,
112       PIPE_FORMAT_B10G10R10A2_UNORM,
113       WL_DRM_FORMAT_XBGR2101010,
114    },
115    {
116       WL_DRM_FORMAT_XRGB8888,
117       PIPE_FORMAT_BGRX8888_UNORM,
118       PIPE_FORMAT_NONE,
119       WL_DRM_FORMAT_XRGB8888,
120    },
121    {
122       WL_DRM_FORMAT_ARGB8888,
123       PIPE_FORMAT_BGRA8888_UNORM,
124       PIPE_FORMAT_NONE,
125       WL_DRM_FORMAT_XRGB8888,
126    },
127    {
128       WL_DRM_FORMAT_ABGR8888,
129       PIPE_FORMAT_RGBA8888_UNORM,
130       PIPE_FORMAT_NONE,
131       WL_DRM_FORMAT_XBGR8888,
132    },
133    {
134       WL_DRM_FORMAT_XBGR8888,
135       PIPE_FORMAT_RGBX8888_UNORM,
136       PIPE_FORMAT_NONE,
137       WL_DRM_FORMAT_XBGR8888,
138    },
139    {
140       WL_DRM_FORMAT_RGB565,
141       PIPE_FORMAT_B5G6R5_UNORM,
142       PIPE_FORMAT_NONE,
143       WL_DRM_FORMAT_RGB565,
144    },
145    {
146       WL_DRM_FORMAT_ARGB1555,
147       PIPE_FORMAT_B5G5R5A1_UNORM,
148       PIPE_FORMAT_R5G5B5A1_UNORM,
149       WL_DRM_FORMAT_XRGB1555,
150    },
151    {
152       WL_DRM_FORMAT_XRGB1555,
153       PIPE_FORMAT_B5G5R5X1_UNORM,
154       PIPE_FORMAT_R5G5B5X1_UNORM,
155       WL_DRM_FORMAT_XRGB1555,
156    },
157    {
158       WL_DRM_FORMAT_ARGB4444,
159       PIPE_FORMAT_B4G4R4A4_UNORM,
160       PIPE_FORMAT_R4G4B4A4_UNORM,
161       WL_DRM_FORMAT_XRGB4444,
162    },
163    {
164       WL_DRM_FORMAT_XRGB4444,
165       PIPE_FORMAT_B4G4R4X4_UNORM,
166       PIPE_FORMAT_R4G4B4X4_UNORM,
167       WL_DRM_FORMAT_XRGB4444,
168    },
169 };
170 
171 static int
dri2_wl_visual_idx_from_pipe_format(enum pipe_format pipe_format)172 dri2_wl_visual_idx_from_pipe_format(enum pipe_format pipe_format)
173 {
174    if (util_format_is_srgb(pipe_format))
175       pipe_format = util_format_linear(pipe_format);
176 
177    for (int i = 0; i < ARRAY_SIZE(dri2_wl_visuals); i++) {
178       if (dri2_wl_visuals[i].pipe_format == pipe_format)
179          return i;
180    }
181 
182    return -1;
183 }
184 
185 static int
dri2_wl_visual_idx_from_config(const __DRIconfig * config)186 dri2_wl_visual_idx_from_config(const __DRIconfig *config)
187 {
188    struct gl_config *gl_config = (struct gl_config *) config;
189 
190    return dri2_wl_visual_idx_from_pipe_format(gl_config->color_format);
191 }
192 
193 static int
dri2_wl_visual_idx_from_fourcc(uint32_t fourcc)194 dri2_wl_visual_idx_from_fourcc(uint32_t fourcc)
195 {
196    for (int i = 0; i < ARRAY_SIZE(dri2_wl_visuals); i++) {
197       /* wl_drm format codes overlap with DRIImage FourCC codes for all formats
198        * we support. */
199       if (dri2_wl_visuals[i].wl_drm_format == fourcc)
200          return i;
201    }
202 
203    return -1;
204 }
205 
206 static int
dri2_wl_shm_format_from_visual_idx(int idx)207 dri2_wl_shm_format_from_visual_idx(int idx)
208 {
209    uint32_t fourcc = dri2_wl_visuals[idx].wl_drm_format;
210 
211    if (fourcc == WL_DRM_FORMAT_ARGB8888)
212       return WL_SHM_FORMAT_ARGB8888;
213    else if (fourcc == WL_DRM_FORMAT_XRGB8888)
214       return WL_SHM_FORMAT_XRGB8888;
215    else
216       return fourcc;
217 }
218 
219 static int
dri2_wl_visual_idx_from_shm_format(uint32_t shm_format)220 dri2_wl_visual_idx_from_shm_format(uint32_t shm_format)
221 {
222    uint32_t fourcc;
223 
224    if (shm_format == WL_SHM_FORMAT_ARGB8888)
225       fourcc = WL_DRM_FORMAT_ARGB8888;
226    else if (shm_format == WL_SHM_FORMAT_XRGB8888)
227       fourcc = WL_DRM_FORMAT_XRGB8888;
228    else
229       fourcc = shm_format;
230 
231    return dri2_wl_visual_idx_from_fourcc(fourcc);
232 }
233 
234 bool
dri2_wl_is_format_supported(void * user_data,uint32_t format)235 dri2_wl_is_format_supported(void *user_data, uint32_t format)
236 {
237    _EGLDisplay *disp = (_EGLDisplay *)user_data;
238    struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
239    int j = dri2_wl_visual_idx_from_fourcc(format);
240 
241    if (j == -1)
242       return false;
243 
244    for (int i = 0; dri2_dpy->driver_configs[i]; i++)
245       if (j == dri2_wl_visual_idx_from_config(dri2_dpy->driver_configs[i]))
246          return true;
247 
248    return false;
249 }
250 
251 static bool
server_supports_format(struct dri2_wl_formats * formats,int idx)252 server_supports_format(struct dri2_wl_formats *formats, int idx)
253 {
254    return idx >= 0 && BITSET_TEST(formats->formats_bitmap, idx);
255 }
256 
257 static bool
server_supports_pipe_format(struct dri2_wl_formats * formats,enum pipe_format format)258 server_supports_pipe_format(struct dri2_wl_formats *formats,
259                             enum pipe_format format)
260 {
261    return server_supports_format(formats,
262                                  dri2_wl_visual_idx_from_pipe_format(format));
263 }
264 
265 static bool
server_supports_fourcc(struct dri2_wl_formats * formats,uint32_t fourcc)266 server_supports_fourcc(struct dri2_wl_formats *formats, uint32_t fourcc)
267 {
268    return server_supports_format(formats, dri2_wl_visual_idx_from_fourcc(fourcc));
269 }
270 
271 static int
roundtrip(struct dri2_egl_display * dri2_dpy)272 roundtrip(struct dri2_egl_display *dri2_dpy)
273 {
274    return wl_display_roundtrip_queue(dri2_dpy->wl_dpy, dri2_dpy->wl_queue);
275 }
276 
277 static void
wl_buffer_release(void * data,struct wl_buffer * buffer)278 wl_buffer_release(void *data, struct wl_buffer *buffer)
279 {
280    struct dri2_egl_surface *dri2_surf = data;
281    int i;
282 
283    for (i = 0; i < ARRAY_SIZE(dri2_surf->color_buffers); ++i)
284       if (dri2_surf->color_buffers[i].wl_buffer == buffer)
285          break;
286 
287    assert(i < ARRAY_SIZE(dri2_surf->color_buffers));
288 
289    if (dri2_surf->color_buffers[i].wl_release) {
290       wl_buffer_destroy(buffer);
291       dri2_surf->color_buffers[i].wl_release = false;
292       dri2_surf->color_buffers[i].wl_buffer = NULL;
293       dri2_surf->color_buffers[i].age = 0;
294    }
295 
296    dri2_surf->color_buffers[i].locked = false;
297 }
298 
299 static const struct wl_buffer_listener wl_buffer_listener = {
300    .release = wl_buffer_release,
301 };
302 
303 static void
dri2_wl_formats_fini(struct dri2_wl_formats * formats)304 dri2_wl_formats_fini(struct dri2_wl_formats *formats)
305 {
306    unsigned int i;
307 
308    for (i = 0; i < formats->num_formats; i++)
309       u_vector_finish(&formats->modifiers[i]);
310 
311    free(formats->modifiers);
312    free(formats->formats_bitmap);
313 }
314 
315 static int
dri2_wl_formats_init(struct dri2_wl_formats * formats)316 dri2_wl_formats_init(struct dri2_wl_formats *formats)
317 {
318    unsigned int i, j;
319 
320    /* formats->formats_bitmap tells us if a format in dri2_wl_visuals is present
321     * or not. So we must compute the amount of unsigned int's needed to
322     * represent all the formats of dri2_wl_visuals. We use BITSET_WORDS for
323     * this task. */
324    formats->num_formats = ARRAY_SIZE(dri2_wl_visuals);
325    formats->formats_bitmap = calloc(BITSET_WORDS(formats->num_formats),
326                                     sizeof(*formats->formats_bitmap));
327    if (!formats->formats_bitmap)
328       goto err;
329 
330    /* Here we have an array of u_vector's to store the modifiers supported by
331     * each format in the bitmask. */
332    formats->modifiers =
333       calloc(formats->num_formats, sizeof(*formats->modifiers));
334    if (!formats->modifiers)
335       goto err_modifier;
336 
337    for (i = 0; i < formats->num_formats; i++)
338       if (!u_vector_init_pow2(&formats->modifiers[i], 4, sizeof(uint64_t))) {
339          j = i;
340          goto err_vector_init;
341       }
342 
343    return 0;
344 
345 err_vector_init:
346    for (i = 0; i < j; i++)
347       u_vector_finish(&formats->modifiers[i]);
348    free(formats->modifiers);
349 err_modifier:
350    free(formats->formats_bitmap);
351 err:
352    _eglError(EGL_BAD_ALLOC, "dri2_wl_formats_init");
353    return -1;
354 }
355 
356 static void
dmabuf_feedback_format_table_fini(struct dmabuf_feedback_format_table * format_table)357 dmabuf_feedback_format_table_fini(
358    struct dmabuf_feedback_format_table *format_table)
359 {
360    if (format_table->data && format_table->data != MAP_FAILED)
361       munmap(format_table->data, format_table->size);
362 }
363 
364 static void
dmabuf_feedback_format_table_init(struct dmabuf_feedback_format_table * format_table)365 dmabuf_feedback_format_table_init(
366    struct dmabuf_feedback_format_table *format_table)
367 {
368    memset(format_table, 0, sizeof(*format_table));
369 }
370 
371 static void
dmabuf_feedback_tranche_fini(struct dmabuf_feedback_tranche * tranche)372 dmabuf_feedback_tranche_fini(struct dmabuf_feedback_tranche *tranche)
373 {
374    dri2_wl_formats_fini(&tranche->formats);
375 }
376 
377 static int
dmabuf_feedback_tranche_init(struct dmabuf_feedback_tranche * tranche)378 dmabuf_feedback_tranche_init(struct dmabuf_feedback_tranche *tranche)
379 {
380    memset(tranche, 0, sizeof(*tranche));
381 
382    if (dri2_wl_formats_init(&tranche->formats) < 0)
383       return -1;
384 
385    return 0;
386 }
387 
388 static void
dmabuf_feedback_fini(struct dmabuf_feedback * dmabuf_feedback)389 dmabuf_feedback_fini(struct dmabuf_feedback *dmabuf_feedback)
390 {
391    dmabuf_feedback_tranche_fini(&dmabuf_feedback->pending_tranche);
392 
393    util_dynarray_foreach (&dmabuf_feedback->tranches,
394                           struct dmabuf_feedback_tranche, tranche)
395       dmabuf_feedback_tranche_fini(tranche);
396    util_dynarray_fini(&dmabuf_feedback->tranches);
397 
398    dmabuf_feedback_format_table_fini(&dmabuf_feedback->format_table);
399 }
400 
401 static int
dmabuf_feedback_init(struct dmabuf_feedback * dmabuf_feedback)402 dmabuf_feedback_init(struct dmabuf_feedback *dmabuf_feedback)
403 {
404    memset(dmabuf_feedback, 0, sizeof(*dmabuf_feedback));
405 
406    if (dmabuf_feedback_tranche_init(&dmabuf_feedback->pending_tranche) < 0)
407       return -1;
408 
409    util_dynarray_init(&dmabuf_feedback->tranches, NULL);
410 
411    dmabuf_feedback_format_table_init(&dmabuf_feedback->format_table);
412 
413    return 0;
414 }
415 
416 static void
resize_callback(struct wl_egl_window * wl_win,void * data)417 resize_callback(struct wl_egl_window *wl_win, void *data)
418 {
419    struct dri2_egl_surface *dri2_surf = data;
420 
421    if (dri2_surf->base.Width == wl_win->width &&
422        dri2_surf->base.Height == wl_win->height)
423       return;
424 
425    dri2_surf->resized = true;
426 
427    /* Update the surface size as soon as native window is resized; from user
428     * pov, this makes the effect that resize is done immediately after native
429     * window resize, without requiring to wait until the first draw.
430     *
431     * A more detailed and lengthy explanation can be found at
432     * https://lists.freedesktop.org/archives/mesa-dev/2018-June/196474.html
433     */
434    if (!dri2_surf->back) {
435       dri2_surf->base.Width = wl_win->width;
436       dri2_surf->base.Height = wl_win->height;
437    }
438    dri_invalidate_drawable(dri2_surf->dri_drawable);
439 }
440 
441 static void
destroy_window_callback(void * data)442 destroy_window_callback(void *data)
443 {
444    struct dri2_egl_surface *dri2_surf = data;
445    dri2_surf->wl_win = NULL;
446 }
447 
448 static struct wl_surface *
get_wl_surface_proxy(struct wl_egl_window * window)449 get_wl_surface_proxy(struct wl_egl_window *window)
450 {
451    /* Version 3 of wl_egl_window introduced a version field at the same
452     * location where a pointer to wl_surface was stored. Thus, if
453     * window->version is dereferenceable, we've been given an older version of
454     * wl_egl_window, and window->version points to wl_surface */
455    if (_eglPointerIsDereferenceable((void *)(window->version))) {
456       return wl_proxy_create_wrapper((void *)(window->version));
457    }
458    return wl_proxy_create_wrapper(window->surface);
459 }
460 
461 static void
surface_dmabuf_feedback_format_table(void * data,struct zwp_linux_dmabuf_feedback_v1 * zwp_linux_dmabuf_feedback_v1,int32_t fd,uint32_t size)462 surface_dmabuf_feedback_format_table(
463    void *data,
464    struct zwp_linux_dmabuf_feedback_v1 *zwp_linux_dmabuf_feedback_v1,
465    int32_t fd, uint32_t size)
466 {
467    struct dri2_egl_surface *dri2_surf = data;
468    struct dmabuf_feedback *feedback = &dri2_surf->pending_dmabuf_feedback;
469 
470    feedback->format_table.size = size;
471    feedback->format_table.data =
472       mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
473 
474    close(fd);
475 }
476 
477 static void
surface_dmabuf_feedback_main_device(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,struct wl_array * device)478 surface_dmabuf_feedback_main_device(
479    void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
480    struct wl_array *device)
481 {
482    struct dri2_egl_surface *dri2_surf = data;
483    struct dmabuf_feedback *feedback = &dri2_surf->pending_dmabuf_feedback;
484 
485    memcpy(&feedback->main_device, device->data, sizeof(feedback->main_device));
486 
487    /* Compositors may support switching render devices and change the main
488     * device of the dma-buf feedback. In this case, when we reallocate the
489     * buffers of the surface we must ensure that it is not allocated in memory
490     * that is only visible to the GPU that EGL is using, as the compositor will
491     * have to import them to the render device it is using.
492     *
493     * TODO: we still don't know how to allocate such buffers.
494     */
495    if (dri2_surf->dmabuf_feedback.main_device != 0 &&
496        (feedback->main_device != dri2_surf->dmabuf_feedback.main_device))
497       dri2_surf->compositor_using_another_device = true;
498    else
499       dri2_surf->compositor_using_another_device = false;
500 }
501 
502 static void
surface_dmabuf_feedback_tranche_target_device(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,struct wl_array * device)503 surface_dmabuf_feedback_tranche_target_device(
504    void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
505    struct wl_array *device)
506 {
507    struct dri2_egl_surface *dri2_surf = data;
508    struct dmabuf_feedback *feedback = &dri2_surf->pending_dmabuf_feedback;
509 
510    memcpy(&feedback->pending_tranche.target_device, device->data,
511           sizeof(feedback->pending_tranche.target_device));
512 }
513 
514 static void
surface_dmabuf_feedback_tranche_flags(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,uint32_t flags)515 surface_dmabuf_feedback_tranche_flags(
516    void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
517    uint32_t flags)
518 {
519    struct dri2_egl_surface *dri2_surf = data;
520    struct dmabuf_feedback *feedback = &dri2_surf->pending_dmabuf_feedback;
521 
522    feedback->pending_tranche.flags = flags;
523 }
524 
525 static void
surface_dmabuf_feedback_tranche_formats(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,struct wl_array * indices)526 surface_dmabuf_feedback_tranche_formats(
527    void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
528    struct wl_array *indices)
529 {
530    struct dri2_egl_surface *dri2_surf = data;
531    struct dmabuf_feedback *feedback = &dri2_surf->pending_dmabuf_feedback;
532    uint32_t present_format = dri2_surf->format;
533    uint64_t *modifier_ptr, modifier;
534    uint32_t format;
535    uint16_t *index;
536    int visual_idx;
537 
538    if (dri2_surf->base.PresentOpaque) {
539       visual_idx = dri2_wl_visual_idx_from_fourcc(present_format);
540       if (visual_idx != -1)
541          present_format = dri2_wl_visuals[visual_idx].opaque_wl_drm_format;
542    }
543 
544    /* Compositor may advertise or not a format table. If it does, we use it.
545     * Otherwise, we steal the most recent advertised format table. If we don't
546     * have a most recent advertised format table, compositor did something
547     * wrong. */
548    if (feedback->format_table.data == NULL) {
549       feedback->format_table = dri2_surf->dmabuf_feedback.format_table;
550       dmabuf_feedback_format_table_init(
551          &dri2_surf->dmabuf_feedback.format_table);
552    }
553    if (feedback->format_table.data == MAP_FAILED) {
554       _eglLog(_EGL_WARNING, "wayland-egl: we could not map the format table "
555                             "so we won't be able to use this batch of dma-buf "
556                             "feedback events.");
557       return;
558    }
559    if (feedback->format_table.data == NULL) {
560       _eglLog(_EGL_WARNING,
561               "wayland-egl: compositor didn't advertise a format "
562               "table, so we won't be able to use this batch of dma-buf "
563               "feedback events.");
564       return;
565    }
566 
567    wl_array_for_each (index, indices) {
568       format = feedback->format_table.data[*index].format;
569       modifier = feedback->format_table.data[*index].modifier;
570 
571       /* Skip formats that are not the one the surface is already using. We
572        * can't switch to another format. */
573       if (format != present_format)
574          continue;
575 
576       /* We are sure that the format is supported because of the check above. */
577       visual_idx = dri2_wl_visual_idx_from_fourcc(format);
578       assert(visual_idx != -1);
579 
580       BITSET_SET(feedback->pending_tranche.formats.formats_bitmap, visual_idx);
581       modifier_ptr =
582          u_vector_add(&feedback->pending_tranche.formats.modifiers[visual_idx]);
583       if (modifier_ptr)
584          *modifier_ptr = modifier;
585    }
586 }
587 
588 static void
surface_dmabuf_feedback_tranche_done(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback)589 surface_dmabuf_feedback_tranche_done(
590    void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)
591 {
592    struct dri2_egl_surface *dri2_surf = data;
593    struct dmabuf_feedback *feedback = &dri2_surf->pending_dmabuf_feedback;
594 
595    /* Add tranche to array of tranches. */
596    util_dynarray_append(&feedback->tranches, struct dmabuf_feedback_tranche,
597                         feedback->pending_tranche);
598 
599    dmabuf_feedback_tranche_init(&feedback->pending_tranche);
600 }
601 
602 static void
surface_dmabuf_feedback_done(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback)603 surface_dmabuf_feedback_done(
604    void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)
605 {
606    struct dri2_egl_surface *dri2_surf = data;
607 
608    /* The dma-buf feedback protocol states that surface dma-buf feedback should
609     * be sent by the compositor only if its buffers are using a suboptimal pair
610     * of format and modifier. We can't change the buffer format, but we can
611     * reallocate with another modifier. So we raise this flag in order to force
612     * buffer reallocation based on the dma-buf feedback sent. */
613    dri2_surf->received_dmabuf_feedback = true;
614 
615    dmabuf_feedback_fini(&dri2_surf->dmabuf_feedback);
616    dri2_surf->dmabuf_feedback = dri2_surf->pending_dmabuf_feedback;
617    dmabuf_feedback_init(&dri2_surf->pending_dmabuf_feedback);
618 }
619 
620 static const struct zwp_linux_dmabuf_feedback_v1_listener
621    surface_dmabuf_feedback_listener = {
622       .format_table = surface_dmabuf_feedback_format_table,
623       .main_device = surface_dmabuf_feedback_main_device,
624       .tranche_target_device = surface_dmabuf_feedback_tranche_target_device,
625       .tranche_flags = surface_dmabuf_feedback_tranche_flags,
626       .tranche_formats = surface_dmabuf_feedback_tranche_formats,
627       .tranche_done = surface_dmabuf_feedback_tranche_done,
628       .done = surface_dmabuf_feedback_done,
629 };
630 
631 static bool
dri2_wl_modifiers_have_common(struct u_vector * modifiers1,struct u_vector * modifiers2)632 dri2_wl_modifiers_have_common(struct u_vector *modifiers1,
633                               struct u_vector *modifiers2)
634 {
635    uint64_t *mod1, *mod2;
636 
637    /* If both modifier vectors are empty, assume there is a compatible
638     * implicit modifier. */
639    if (u_vector_length(modifiers1) == 0 && u_vector_length(modifiers2) == 0)
640        return true;
641 
642    u_vector_foreach(mod1, modifiers1)
643    {
644       u_vector_foreach(mod2, modifiers2)
645       {
646          if (*mod1 == *mod2)
647             return true;
648       }
649    }
650 
651    return false;
652 }
653 
654 /**
655  * Called via eglCreateWindowSurface(), drv->CreateWindowSurface().
656  */
657 static _EGLSurface *
dri2_wl_create_window_surface(_EGLDisplay * disp,_EGLConfig * conf,void * native_window,const EGLint * attrib_list)658 dri2_wl_create_window_surface(_EGLDisplay *disp, _EGLConfig *conf,
659                               void *native_window, const EGLint *attrib_list)
660 {
661    struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
662    struct dri2_egl_config *dri2_conf = dri2_egl_config(conf);
663    struct wl_egl_window *window = native_window;
664    struct dri2_egl_surface *dri2_surf;
665    struct zwp_linux_dmabuf_v1 *dmabuf_wrapper;
666    int visual_idx;
667    const __DRIconfig *config;
668 
669    if (!window) {
670       _eglError(EGL_BAD_NATIVE_WINDOW, "dri2_create_surface");
671       return NULL;
672    }
673 
674    if (window->driver_private) {
675       _eglError(EGL_BAD_ALLOC, "dri2_create_surface");
676       return NULL;
677    }
678 
679    dri2_surf = calloc(1, sizeof *dri2_surf);
680    if (!dri2_surf) {
681       _eglError(EGL_BAD_ALLOC, "dri2_create_surface");
682       return NULL;
683    }
684 
685    if (!dri2_init_surface(&dri2_surf->base, disp, EGL_WINDOW_BIT, conf,
686                           attrib_list, false, native_window))
687       goto cleanup_surf;
688 
689    config = dri2_get_dri_config(dri2_conf, EGL_WINDOW_BIT,
690                                 dri2_surf->base.GLColorspace);
691 
692    if (!config) {
693       _eglError(EGL_BAD_MATCH,
694                 "Unsupported surfacetype/colorspace configuration");
695       goto cleanup_surf;
696    }
697 
698    dri2_surf->base.Width = window->width;
699    dri2_surf->base.Height = window->height;
700 
701    visual_idx = dri2_wl_visual_idx_from_config(config);
702    assert(visual_idx != -1);
703    assert(dri2_wl_visuals[visual_idx].pipe_format != PIPE_FORMAT_NONE);
704 
705    if (dri2_dpy->wl_dmabuf || dri2_dpy->wl_drm) {
706       dri2_surf->format = dri2_wl_visuals[visual_idx].wl_drm_format;
707    } else {
708       assert(dri2_dpy->wl_shm);
709       dri2_surf->format = dri2_wl_shm_format_from_visual_idx(visual_idx);
710    }
711 
712    if (dri2_surf->base.PresentOpaque) {
713       uint32_t opaque_fourcc =
714          dri2_wl_visuals[visual_idx].opaque_wl_drm_format;
715       int opaque_visual_idx = dri2_wl_visual_idx_from_fourcc(opaque_fourcc);
716 
717       if (!server_supports_format(&dri2_dpy->formats, opaque_visual_idx) ||
718           !dri2_wl_modifiers_have_common(
719                &dri2_dpy->formats.modifiers[visual_idx],
720                &dri2_dpy->formats.modifiers[opaque_visual_idx])) {
721          _eglError(EGL_BAD_MATCH, "Unsupported opaque format");
722          goto cleanup_surf;
723       }
724    }
725 
726    dri2_surf->wl_queue = wl_display_create_queue_with_name(dri2_dpy->wl_dpy,
727                                                            "mesa egl surface queue");
728    if (!dri2_surf->wl_queue) {
729       _eglError(EGL_BAD_ALLOC, "dri2_create_surface");
730       goto cleanup_surf;
731    }
732 
733    if (dri2_dpy->wl_drm) {
734       dri2_surf->wl_drm_wrapper = wl_proxy_create_wrapper(dri2_dpy->wl_drm);
735       if (!dri2_surf->wl_drm_wrapper) {
736          _eglError(EGL_BAD_ALLOC, "dri2_create_surface");
737          goto cleanup_queue;
738       }
739       wl_proxy_set_queue((struct wl_proxy *)dri2_surf->wl_drm_wrapper,
740                          dri2_surf->wl_queue);
741    }
742 
743    dri2_surf->wl_dpy_wrapper = wl_proxy_create_wrapper(dri2_dpy->wl_dpy);
744    if (!dri2_surf->wl_dpy_wrapper) {
745       _eglError(EGL_BAD_ALLOC, "dri2_create_surface");
746       goto cleanup_drm;
747    }
748    wl_proxy_set_queue((struct wl_proxy *)dri2_surf->wl_dpy_wrapper,
749                       dri2_surf->wl_queue);
750 
751    dri2_surf->wl_surface_wrapper = get_wl_surface_proxy(window);
752    if (!dri2_surf->wl_surface_wrapper) {
753       _eglError(EGL_BAD_ALLOC, "dri2_create_surface");
754       goto cleanup_dpy_wrapper;
755    }
756    wl_proxy_set_queue((struct wl_proxy *)dri2_surf->wl_surface_wrapper,
757                       dri2_surf->wl_queue);
758 
759    if (dri2_dpy->wl_dmabuf &&
760        zwp_linux_dmabuf_v1_get_version(dri2_dpy->wl_dmabuf) >=
761           ZWP_LINUX_DMABUF_V1_GET_SURFACE_FEEDBACK_SINCE_VERSION) {
762       dmabuf_wrapper = wl_proxy_create_wrapper(dri2_dpy->wl_dmabuf);
763       if (!dmabuf_wrapper) {
764          _eglError(EGL_BAD_ALLOC, "dri2_create_surface");
765          goto cleanup_surf_wrapper;
766       }
767       wl_proxy_set_queue((struct wl_proxy *)dmabuf_wrapper,
768                          dri2_surf->wl_queue);
769       dri2_surf->wl_dmabuf_feedback = zwp_linux_dmabuf_v1_get_surface_feedback(
770          dmabuf_wrapper, dri2_surf->wl_surface_wrapper);
771       wl_proxy_wrapper_destroy(dmabuf_wrapper);
772 
773       zwp_linux_dmabuf_feedback_v1_add_listener(
774          dri2_surf->wl_dmabuf_feedback, &surface_dmabuf_feedback_listener,
775          dri2_surf);
776 
777       if (dmabuf_feedback_init(&dri2_surf->pending_dmabuf_feedback) < 0) {
778          zwp_linux_dmabuf_feedback_v1_destroy(dri2_surf->wl_dmabuf_feedback);
779          goto cleanup_surf_wrapper;
780       }
781       if (dmabuf_feedback_init(&dri2_surf->dmabuf_feedback) < 0) {
782          dmabuf_feedback_fini(&dri2_surf->pending_dmabuf_feedback);
783          zwp_linux_dmabuf_feedback_v1_destroy(dri2_surf->wl_dmabuf_feedback);
784          goto cleanup_surf_wrapper;
785       }
786 
787       if (roundtrip(dri2_dpy) < 0)
788          goto cleanup_dmabuf_feedback;
789    }
790 
791    dri2_surf->wl_win = window;
792    dri2_surf->wl_win->driver_private = dri2_surf;
793    dri2_surf->wl_win->destroy_window_callback = destroy_window_callback;
794    if (!dri2_dpy->swrast_not_kms)
795       dri2_surf->wl_win->resize_callback = resize_callback;
796 
797    if (!dri2_create_drawable(dri2_dpy, config, dri2_surf, dri2_surf))
798       goto cleanup_dmabuf_feedback;
799 
800    dri2_surf->base.SwapInterval = dri2_dpy->default_swap_interval;
801 
802    return &dri2_surf->base;
803 
804 cleanup_dmabuf_feedback:
805    if (dri2_surf->wl_dmabuf_feedback) {
806       zwp_linux_dmabuf_feedback_v1_destroy(dri2_surf->wl_dmabuf_feedback);
807       dmabuf_feedback_fini(&dri2_surf->dmabuf_feedback);
808       dmabuf_feedback_fini(&dri2_surf->pending_dmabuf_feedback);
809    }
810 cleanup_surf_wrapper:
811    wl_proxy_wrapper_destroy(dri2_surf->wl_surface_wrapper);
812 cleanup_dpy_wrapper:
813    wl_proxy_wrapper_destroy(dri2_surf->wl_dpy_wrapper);
814 cleanup_drm:
815    if (dri2_surf->wl_drm_wrapper)
816       wl_proxy_wrapper_destroy(dri2_surf->wl_drm_wrapper);
817 cleanup_queue:
818    wl_event_queue_destroy(dri2_surf->wl_queue);
819 cleanup_surf:
820    free(dri2_surf);
821 
822    return NULL;
823 }
824 
825 static _EGLSurface *
dri2_wl_create_pixmap_surface(_EGLDisplay * disp,_EGLConfig * conf,void * native_window,const EGLint * attrib_list)826 dri2_wl_create_pixmap_surface(_EGLDisplay *disp, _EGLConfig *conf,
827                               void *native_window, const EGLint *attrib_list)
828 {
829    /* From the EGL_EXT_platform_wayland spec, version 3:
830     *
831     *   It is not valid to call eglCreatePlatformPixmapSurfaceEXT with a <dpy>
832     *   that belongs to Wayland. Any such call fails and generates
833     *   EGL_BAD_PARAMETER.
834     */
835    _eglError(EGL_BAD_PARAMETER, "cannot create EGL pixmap surfaces on "
836                                 "Wayland");
837    return NULL;
838 }
839 
840 /**
841  * Called via eglDestroySurface(), drv->DestroySurface().
842  */
843 static EGLBoolean
dri2_wl_destroy_surface(_EGLDisplay * disp,_EGLSurface * surf)844 dri2_wl_destroy_surface(_EGLDisplay *disp, _EGLSurface *surf)
845 {
846    struct dri2_egl_surface *dri2_surf = dri2_egl_surface(surf);
847 
848    driDestroyDrawable(dri2_surf->dri_drawable);
849 
850    for (int i = 0; i < ARRAY_SIZE(dri2_surf->color_buffers); i++) {
851       if (dri2_surf->color_buffers[i].wl_buffer)
852          wl_buffer_destroy(dri2_surf->color_buffers[i].wl_buffer);
853       if (dri2_surf->color_buffers[i].dri_image)
854          dri2_destroy_image(dri2_surf->color_buffers[i].dri_image);
855       if (dri2_surf->color_buffers[i].linear_copy)
856          dri2_destroy_image(dri2_surf->color_buffers[i].linear_copy);
857       if (dri2_surf->color_buffers[i].data)
858          munmap(dri2_surf->color_buffers[i].data,
859                 dri2_surf->color_buffers[i].data_size);
860    }
861 
862    if (dri2_surf->throttle_callback)
863       wl_callback_destroy(dri2_surf->throttle_callback);
864 
865    if (dri2_surf->wl_win) {
866       dri2_surf->wl_win->driver_private = NULL;
867       dri2_surf->wl_win->resize_callback = NULL;
868       dri2_surf->wl_win->destroy_window_callback = NULL;
869    }
870 
871    wl_proxy_wrapper_destroy(dri2_surf->wl_surface_wrapper);
872    wl_proxy_wrapper_destroy(dri2_surf->wl_dpy_wrapper);
873    if (dri2_surf->wl_drm_wrapper)
874       wl_proxy_wrapper_destroy(dri2_surf->wl_drm_wrapper);
875    if (dri2_surf->wl_dmabuf_feedback) {
876       zwp_linux_dmabuf_feedback_v1_destroy(dri2_surf->wl_dmabuf_feedback);
877       dmabuf_feedback_fini(&dri2_surf->dmabuf_feedback);
878       dmabuf_feedback_fini(&dri2_surf->pending_dmabuf_feedback);
879    }
880    wl_event_queue_destroy(dri2_surf->wl_queue);
881 
882    dri2_fini_surface(surf);
883    free(surf);
884 
885    return EGL_TRUE;
886 }
887 
888 static EGLBoolean
dri2_wl_swap_interval(_EGLDisplay * disp,_EGLSurface * surf,EGLint interval)889 dri2_wl_swap_interval(_EGLDisplay *disp, _EGLSurface *surf, EGLint interval)
890 {
891    struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
892    struct dri2_egl_surface *dri2_surf = dri2_egl_surface(surf);
893 
894    if (dri2_dpy->kopper)
895       kopperSetSwapInterval(dri2_surf->dri_drawable, interval);
896 
897    return EGL_TRUE;
898 }
899 
900 static void
dri2_wl_release_buffers(struct dri2_egl_surface * dri2_surf)901 dri2_wl_release_buffers(struct dri2_egl_surface *dri2_surf)
902 {
903    for (int i = 0; i < ARRAY_SIZE(dri2_surf->color_buffers); i++) {
904       if (dri2_surf->color_buffers[i].wl_buffer) {
905          if (dri2_surf->color_buffers[i].locked) {
906             dri2_surf->color_buffers[i].wl_release = true;
907          } else {
908             wl_buffer_destroy(dri2_surf->color_buffers[i].wl_buffer);
909             dri2_surf->color_buffers[i].wl_buffer = NULL;
910          }
911       }
912       if (dri2_surf->color_buffers[i].dri_image)
913          dri2_destroy_image(dri2_surf->color_buffers[i].dri_image);
914       if (dri2_surf->color_buffers[i].linear_copy)
915          dri2_destroy_image(dri2_surf->color_buffers[i].linear_copy);
916       if (dri2_surf->color_buffers[i].data)
917          munmap(dri2_surf->color_buffers[i].data,
918                 dri2_surf->color_buffers[i].data_size);
919 
920       dri2_surf->color_buffers[i].dri_image = NULL;
921       dri2_surf->color_buffers[i].linear_copy = NULL;
922       dri2_surf->color_buffers[i].data = NULL;
923       dri2_surf->color_buffers[i].age = 0;
924    }
925 }
926 
927 /* Return list of modifiers that should be used to restrict the list of
928  * modifiers actually supported by the surface. As of now, it is only used
929  * to get the set of modifiers used for fixed-rate compression. */
930 static uint64_t *
get_surface_specific_modifiers(struct dri2_egl_surface * dri2_surf,int * modifiers_count)931 get_surface_specific_modifiers(struct dri2_egl_surface *dri2_surf,
932                                int *modifiers_count)
933 {
934    struct dri2_egl_display *dri2_dpy =
935       dri2_egl_display(dri2_surf->base.Resource.Display);
936    int rate = dri2_surf->base.CompressionRate;
937    uint64_t *modifiers;
938 
939    if (rate == EGL_SURFACE_COMPRESSION_FIXED_RATE_NONE_EXT ||
940        !dri2_surf->wl_win)
941       return NULL;
942 
943    if (!dri2_query_compression_modifiers(
944           dri2_dpy->dri_screen_render_gpu, dri2_surf->format, rate,
945           0, NULL, modifiers_count))
946       return NULL;
947 
948    modifiers = malloc(*modifiers_count * sizeof(uint64_t));
949    if (!modifiers)
950       return NULL;
951 
952    if (!dri2_query_compression_modifiers(
953           dri2_dpy->dri_screen_render_gpu, dri2_surf->format, rate,
954           *modifiers_count, modifiers, modifiers_count)) {
955       free(modifiers);
956       return NULL;
957    }
958 
959    return modifiers;
960 }
961 
962 static void
update_surface(struct dri2_egl_surface * dri2_surf,__DRIimage * dri_img)963 update_surface(struct dri2_egl_surface *dri2_surf, __DRIimage *dri_img)
964 {
965    int compression_rate;
966 
967    if (!dri_img)
968       return;
969 
970    /* Update the surface with the actual compression rate */
971    dri2_query_image(dri_img, __DRI_IMAGE_ATTRIB_COMPRESSION_RATE,
972                                &compression_rate);
973    dri2_surf->base.CompressionRate = compression_rate;
974 }
975 
976 static bool
intersect_modifiers(struct u_vector * subset,struct u_vector * set,uint64_t * other_modifiers,int other_modifiers_count)977 intersect_modifiers(struct u_vector *subset, struct u_vector *set,
978                     uint64_t *other_modifiers, int other_modifiers_count)
979 {
980    if (!u_vector_init_pow2(subset, 4, sizeof(uint64_t)))
981       return false;
982 
983    uint64_t *modifier_ptr, *mod;
984    u_vector_foreach(mod, set) {
985       for (int i = 0; i < other_modifiers_count; ++i) {
986          if (other_modifiers[i] != *mod)
987             continue;
988          modifier_ptr = u_vector_add(subset);
989          if (modifier_ptr)
990             *modifier_ptr = *mod;
991       }
992    }
993 
994    return true;
995 }
996 
997 static void
create_dri_image(struct dri2_egl_surface * dri2_surf,enum pipe_format pipe_format,uint32_t use_flags,uint64_t * surf_modifiers,int surf_modifiers_count,struct dri2_wl_formats * formats)998 create_dri_image(struct dri2_egl_surface *dri2_surf,
999                  enum pipe_format pipe_format, uint32_t use_flags,
1000                  uint64_t *surf_modifiers, int surf_modifiers_count,
1001                  struct dri2_wl_formats *formats)
1002 {
1003    struct dri2_egl_display *dri2_dpy =
1004       dri2_egl_display(dri2_surf->base.Resource.Display);
1005    int visual_idx = dri2_wl_visual_idx_from_fourcc(dri2_surf->format);
1006    struct u_vector modifiers_subset;
1007    struct u_vector modifiers_subset_opaque;
1008    uint64_t *modifiers;
1009    unsigned int num_modifiers;
1010    struct u_vector *modifiers_present;
1011 
1012    assert(visual_idx != -1);
1013 
1014    if (dri2_surf->base.PresentOpaque) {
1015       uint32_t opaque_fourcc =
1016             dri2_wl_visuals[visual_idx].opaque_wl_drm_format;
1017       int opaque_visual_idx = dri2_wl_visual_idx_from_fourcc(opaque_fourcc);
1018       struct u_vector *modifiers_dpy = &dri2_dpy->formats.modifiers[visual_idx];
1019       /* Surface creation would have failed if we didn't support the matching
1020        * opaque format. */
1021       assert(opaque_visual_idx != -1);
1022 
1023       if (!BITSET_TEST(formats->formats_bitmap, opaque_visual_idx))
1024          return;
1025 
1026       if (!intersect_modifiers(&modifiers_subset_opaque,
1027                                &formats->modifiers[opaque_visual_idx],
1028                                u_vector_tail(modifiers_dpy),
1029                                u_vector_length(modifiers_dpy)))
1030          return;
1031 
1032       modifiers_present = &modifiers_subset_opaque;
1033    } else {
1034       if (!BITSET_TEST(formats->formats_bitmap, visual_idx))
1035          return;
1036       modifiers_present = &formats->modifiers[visual_idx];
1037    }
1038 
1039    if (surf_modifiers_count > 0) {
1040       if (!intersect_modifiers(&modifiers_subset, modifiers_present,
1041                                surf_modifiers, surf_modifiers_count))
1042          goto cleanup_present;
1043       modifiers = u_vector_tail(&modifiers_subset);
1044       num_modifiers = u_vector_length(&modifiers_subset);
1045    } else {
1046       modifiers = u_vector_tail(modifiers_present);
1047       num_modifiers = u_vector_length(modifiers_present);
1048    }
1049 
1050    /* For the purposes of this function, an INVALID modifier on
1051     * its own means the modifiers aren't supported. */
1052    if (num_modifiers == 0 ||
1053        (num_modifiers == 1 && modifiers[0] == DRM_FORMAT_MOD_INVALID)) {
1054       num_modifiers = 0;
1055       modifiers = NULL;
1056    }
1057 
1058    dri2_surf->back->dri_image = dri_create_image_with_modifiers(
1059       dri2_dpy->dri_screen_render_gpu, dri2_surf->base.Width,
1060       dri2_surf->base.Height, pipe_format,
1061       (dri2_dpy->fd_render_gpu != dri2_dpy->fd_display_gpu) ? 0 : use_flags,
1062       modifiers, num_modifiers, NULL);
1063 
1064    if (surf_modifiers_count > 0) {
1065       u_vector_finish(&modifiers_subset);
1066       update_surface(dri2_surf, dri2_surf->back->dri_image);
1067    }
1068 
1069 cleanup_present:
1070    if (modifiers_present == &modifiers_subset_opaque)
1071       u_vector_finish(&modifiers_subset_opaque);
1072 }
1073 
1074 static void
create_dri_image_from_dmabuf_feedback(struct dri2_egl_surface * dri2_surf,enum pipe_format pipe_format,uint32_t use_flags,uint64_t * surf_modifiers,int surf_modifiers_count)1075 create_dri_image_from_dmabuf_feedback(struct dri2_egl_surface *dri2_surf,
1076                                       enum pipe_format pipe_format,
1077                                       uint32_t use_flags,
1078                                       uint64_t *surf_modifiers,
1079                                       int surf_modifiers_count)
1080 {
1081    uint32_t flags;
1082 
1083    /* We don't have valid dma-buf feedback, so return */
1084    if (dri2_surf->dmabuf_feedback.main_device == 0)
1085       return;
1086 
1087    /* Iterates through the dma-buf feedback to pick a new set of modifiers. The
1088     * tranches are sent in descending order of preference by the compositor, so
1089     * the first set that we can pick is the best one. For now we still can't
1090     * specify the target device in order to make the render device try its best
1091     * to allocate memory that can be directly scanned out by the KMS device. But
1092     * in the future this may change (newer versions of
1093     * createImageWithModifiers). Also, we are safe to pick modifiers from
1094     * tranches whose target device differs from the main device, as compositors
1095     * do not expose (in dma-buf feedback tranches) formats/modifiers that are
1096     * incompatible with the main device. */
1097    util_dynarray_foreach (&dri2_surf->dmabuf_feedback.tranches,
1098                           struct dmabuf_feedback_tranche, tranche) {
1099       flags = use_flags;
1100       if (tranche->flags & ZWP_LINUX_DMABUF_FEEDBACK_V1_TRANCHE_FLAGS_SCANOUT)
1101          flags |= __DRI_IMAGE_USE_SCANOUT;
1102 
1103       create_dri_image(dri2_surf, pipe_format, flags, surf_modifiers,
1104                        surf_modifiers_count, &tranche->formats);
1105 
1106       if (dri2_surf->back->dri_image)
1107          return;
1108    }
1109 }
1110 
1111 static void
create_dri_image_from_formats(struct dri2_egl_surface * dri2_surf,enum pipe_format pipe_format,uint32_t use_flags,uint64_t * surf_modifiers,int surf_modifiers_count)1112 create_dri_image_from_formats(struct dri2_egl_surface *dri2_surf,
1113                               enum pipe_format pipe_format, uint32_t use_flags,
1114                               uint64_t *surf_modifiers,
1115                               int surf_modifiers_count)
1116 {
1117    struct dri2_egl_display *dri2_dpy =
1118       dri2_egl_display(dri2_surf->base.Resource.Display);
1119    create_dri_image(dri2_surf, pipe_format, use_flags, surf_modifiers,
1120                     surf_modifiers_count, &dri2_dpy->formats);
1121 }
1122 
1123 static int
get_back_bo(struct dri2_egl_surface * dri2_surf)1124 get_back_bo(struct dri2_egl_surface *dri2_surf)
1125 {
1126    struct dri2_egl_display *dri2_dpy =
1127       dri2_egl_display(dri2_surf->base.Resource.Display);
1128    int use_flags;
1129    int visual_idx;
1130    unsigned int pipe_format;
1131    unsigned int linear_pipe_format;
1132 
1133    visual_idx = dri2_wl_visual_idx_from_fourcc(dri2_surf->format);
1134    assert(visual_idx != -1);
1135    pipe_format = dri2_wl_visuals[visual_idx].pipe_format;
1136    linear_pipe_format = pipe_format;
1137 
1138    /* Substitute dri image format if server does not support original format */
1139    if (!BITSET_TEST(dri2_dpy->formats.formats_bitmap, visual_idx))
1140       linear_pipe_format = dri2_wl_visuals[visual_idx].alt_pipe_format;
1141 
1142    /* These asserts hold, as long as dri2_wl_visuals[] is self-consistent and
1143     * the PRIME substitution logic in dri2_wl_add_configs_for_visuals() is free
1144     * of bugs.
1145     */
1146    assert(linear_pipe_format != PIPE_FORMAT_NONE);
1147    assert(BITSET_TEST(
1148       dri2_dpy->formats.formats_bitmap,
1149       dri2_wl_visual_idx_from_pipe_format(linear_pipe_format)));
1150 
1151    /* There might be a buffer release already queued that wasn't processed */
1152    wl_display_dispatch_queue_pending(dri2_dpy->wl_dpy, dri2_surf->wl_queue);
1153 
1154    while (dri2_surf->back == NULL) {
1155       for (int i = 0; i < ARRAY_SIZE(dri2_surf->color_buffers); i++) {
1156          /* Get an unlocked buffer, preferably one with a dri_buffer
1157           * already allocated and with minimum age.
1158           */
1159          if (dri2_surf->color_buffers[i].locked)
1160             continue;
1161 
1162          if (!dri2_surf->back || !dri2_surf->back->dri_image ||
1163              (dri2_surf->color_buffers[i].age > 0 &&
1164               dri2_surf->color_buffers[i].age < dri2_surf->back->age))
1165             dri2_surf->back = &dri2_surf->color_buffers[i];
1166       }
1167 
1168       if (dri2_surf->back)
1169          break;
1170 
1171       /* If we don't have a buffer, then block on the server to release one for
1172        * us, and try again. wl_display_dispatch_queue will process any pending
1173        * events, however not all servers flush on issuing a buffer release
1174        * event. So, we spam the server with roundtrips as they always cause a
1175        * client flush.
1176        */
1177       if (wl_display_roundtrip_queue(dri2_dpy->wl_dpy, dri2_surf->wl_queue) < 0)
1178          return -1;
1179    }
1180 
1181    if (dri2_surf->back == NULL)
1182       return -1;
1183 
1184    use_flags = __DRI_IMAGE_USE_SHARE | __DRI_IMAGE_USE_BACKBUFFER;
1185 
1186    if (dri2_surf->base.ProtectedContent) {
1187       /* Protected buffers can't be read from another GPU */
1188       if (dri2_dpy->fd_render_gpu != dri2_dpy->fd_display_gpu)
1189          return -1;
1190       use_flags |= __DRI_IMAGE_USE_PROTECTED;
1191    }
1192 
1193    if (dri2_dpy->fd_render_gpu != dri2_dpy->fd_display_gpu &&
1194        dri2_surf->back->linear_copy == NULL) {
1195       uint64_t linear_mod = DRM_FORMAT_MOD_LINEAR;
1196       __DRIimage *linear_copy_display_gpu_image = NULL;
1197 
1198       if (dri2_dpy->dri_screen_display_gpu) {
1199          linear_copy_display_gpu_image = dri_create_image_with_modifiers(
1200             dri2_dpy->dri_screen_display_gpu,
1201             dri2_surf->base.Width, dri2_surf->base.Height,
1202             linear_pipe_format, use_flags | __DRI_IMAGE_USE_LINEAR,
1203             &linear_mod, 1, NULL);
1204 
1205          if (linear_copy_display_gpu_image) {
1206             int i, ret = 1;
1207             int fourcc;
1208             int num_planes = 0;
1209             int buffer_fds[4];
1210             int strides[4];
1211             int offsets[4];
1212             unsigned error;
1213 
1214             if (!dri2_query_image(linear_copy_display_gpu_image,
1215                                              __DRI_IMAGE_ATTRIB_NUM_PLANES,
1216                                              &num_planes))
1217                num_planes = 1;
1218 
1219             for (i = 0; i < num_planes; i++) {
1220                __DRIimage *image = dri2_from_planar(
1221                   linear_copy_display_gpu_image, i, NULL);
1222 
1223                if (!image) {
1224                   assert(i == 0);
1225                   image = linear_copy_display_gpu_image;
1226                }
1227 
1228                buffer_fds[i] = -1;
1229                ret &= dri2_query_image(image, __DRI_IMAGE_ATTRIB_FD,
1230                                                   &buffer_fds[i]);
1231                ret &= dri2_query_image(
1232                   image, __DRI_IMAGE_ATTRIB_STRIDE, &strides[i]);
1233                ret &= dri2_query_image(
1234                   image, __DRI_IMAGE_ATTRIB_OFFSET, &offsets[i]);
1235 
1236                if (image != linear_copy_display_gpu_image)
1237                   dri2_destroy_image(image);
1238 
1239                if (!ret) {
1240                   do {
1241                      if (buffer_fds[i] != -1)
1242                         close(buffer_fds[i]);
1243                   } while (--i >= 0);
1244                   dri2_destroy_image(linear_copy_display_gpu_image);
1245                   return -1;
1246                }
1247             }
1248 
1249             ret &= dri2_query_image(linear_copy_display_gpu_image,
1250                                                __DRI_IMAGE_ATTRIB_FOURCC,
1251                                                &fourcc);
1252             if (!ret) {
1253                do {
1254                   if (buffer_fds[i] != -1)
1255                      close(buffer_fds[i]);
1256                } while (--i >= 0);
1257                dri2_destroy_image(linear_copy_display_gpu_image);
1258                return -1;
1259             }
1260 
1261             /* The linear buffer was created in the display GPU's vram, so we
1262              * need to make it visible to render GPU
1263              */
1264             dri2_surf->back->linear_copy =
1265                dri2_from_dma_bufs(
1266                   dri2_dpy->dri_screen_render_gpu,
1267                   dri2_surf->base.Width, dri2_surf->base.Height,
1268                   fourcc, linear_mod,
1269                   &buffer_fds[0], num_planes, &strides[0], &offsets[0],
1270                   __DRI_YUV_COLOR_SPACE_UNDEFINED,
1271                   __DRI_YUV_RANGE_UNDEFINED, __DRI_YUV_CHROMA_SITING_UNDEFINED,
1272                   __DRI_YUV_CHROMA_SITING_UNDEFINED, __DRI_IMAGE_PRIME_LINEAR_BUFFER,
1273                   &error, dri2_surf->back);
1274 
1275             for (i = 0; i < num_planes; ++i) {
1276                if (buffer_fds[i] != -1)
1277                   close(buffer_fds[i]);
1278             }
1279             dri2_destroy_image(linear_copy_display_gpu_image);
1280          }
1281       }
1282 
1283       if (!dri2_surf->back->linear_copy) {
1284          dri2_surf->back->linear_copy = dri_create_image_with_modifiers(
1285             dri2_dpy->dri_screen_render_gpu,
1286             dri2_surf->base.Width, dri2_surf->base.Height,
1287             linear_pipe_format, use_flags | __DRI_IMAGE_USE_LINEAR,
1288             &linear_mod, 1, NULL);
1289       }
1290 
1291       if (dri2_surf->back->linear_copy == NULL)
1292          return -1;
1293    }
1294 
1295    if (dri2_surf->back->dri_image == NULL) {
1296       int modifiers_count = 0;
1297       uint64_t *modifiers =
1298          get_surface_specific_modifiers(dri2_surf, &modifiers_count);
1299 
1300       if (dri2_surf->wl_dmabuf_feedback)
1301          create_dri_image_from_dmabuf_feedback(
1302             dri2_surf, pipe_format, use_flags, modifiers, modifiers_count);
1303       if (dri2_surf->back->dri_image == NULL)
1304          create_dri_image_from_formats(dri2_surf, pipe_format, use_flags,
1305                                        modifiers, modifiers_count);
1306 
1307       free(modifiers);
1308       dri2_surf->back->age = 0;
1309    }
1310 
1311    if (dri2_surf->back->dri_image == NULL)
1312       return -1;
1313 
1314    dri2_surf->back->locked = true;
1315 
1316    return 0;
1317 }
1318 
1319 static void
back_bo_to_dri_buffer(struct dri2_egl_surface * dri2_surf,__DRIbuffer * buffer)1320 back_bo_to_dri_buffer(struct dri2_egl_surface *dri2_surf, __DRIbuffer *buffer)
1321 {
1322    __DRIimage *image;
1323    int name, pitch;
1324 
1325    image = dri2_surf->back->dri_image;
1326 
1327    dri2_query_image(image, __DRI_IMAGE_ATTRIB_NAME, &name);
1328    dri2_query_image(image, __DRI_IMAGE_ATTRIB_STRIDE, &pitch);
1329 
1330    buffer->attachment = __DRI_BUFFER_BACK_LEFT;
1331    buffer->name = name;
1332    buffer->pitch = pitch;
1333    buffer->cpp = 4;
1334    buffer->flags = 0;
1335 }
1336 
1337 /* Value chosen empirically as a compromise between avoiding frequent
1338  * reallocations and extended time of increased memory consumption due to
1339  * unused buffers being kept.
1340  */
1341 #define BUFFER_TRIM_AGE_HYSTERESIS 20
1342 
1343 static int
update_buffers(struct dri2_egl_surface * dri2_surf)1344 update_buffers(struct dri2_egl_surface *dri2_surf)
1345 {
1346    struct dri2_egl_display *dri2_dpy =
1347       dri2_egl_display(dri2_surf->base.Resource.Display);
1348 
1349    if (dri2_surf->wl_win &&
1350        (dri2_surf->base.Width != dri2_surf->wl_win->width ||
1351         dri2_surf->base.Height != dri2_surf->wl_win->height)) {
1352 
1353       dri2_surf->base.Width = dri2_surf->wl_win->width;
1354       dri2_surf->base.Height = dri2_surf->wl_win->height;
1355       dri2_surf->dx = dri2_surf->wl_win->dx;
1356       dri2_surf->dy = dri2_surf->wl_win->dy;
1357    }
1358 
1359    if (dri2_surf->resized || dri2_surf->received_dmabuf_feedback) {
1360       dri2_wl_release_buffers(dri2_surf);
1361       dri2_surf->resized = false;
1362       dri2_surf->received_dmabuf_feedback = false;
1363    }
1364 
1365    if (get_back_bo(dri2_surf) < 0) {
1366       _eglError(EGL_BAD_ALLOC, "failed to allocate color buffer");
1367       return -1;
1368    }
1369 
1370    /* If we have an extra unlocked buffer at this point, we had to do triple
1371     * buffering for a while, but now can go back to just double buffering.
1372     * That means we can free any unlocked buffer now. To avoid toggling between
1373     * going back to double buffering and needing to allocate another buffer too
1374     * fast we let the unneeded buffer sit around for a short while. */
1375    for (int i = 0; i < ARRAY_SIZE(dri2_surf->color_buffers); i++) {
1376       if (!dri2_surf->color_buffers[i].locked &&
1377           dri2_surf->color_buffers[i].wl_buffer &&
1378           dri2_surf->color_buffers[i].age > BUFFER_TRIM_AGE_HYSTERESIS) {
1379          wl_buffer_destroy(dri2_surf->color_buffers[i].wl_buffer);
1380          dri2_destroy_image(dri2_surf->color_buffers[i].dri_image);
1381          if (dri2_dpy->fd_render_gpu != dri2_dpy->fd_display_gpu)
1382             dri2_destroy_image(
1383                dri2_surf->color_buffers[i].linear_copy);
1384          dri2_surf->color_buffers[i].wl_buffer = NULL;
1385          dri2_surf->color_buffers[i].dri_image = NULL;
1386          dri2_surf->color_buffers[i].linear_copy = NULL;
1387          dri2_surf->color_buffers[i].age = 0;
1388       }
1389    }
1390 
1391    return 0;
1392 }
1393 
1394 static int
update_buffers_if_needed(struct dri2_egl_surface * dri2_surf)1395 update_buffers_if_needed(struct dri2_egl_surface *dri2_surf)
1396 {
1397    if (dri2_surf->back != NULL)
1398       return 0;
1399 
1400    return update_buffers(dri2_surf);
1401 }
1402 
1403 static int
image_get_buffers(__DRIdrawable * driDrawable,unsigned int format,uint32_t * stamp,void * loaderPrivate,uint32_t buffer_mask,struct __DRIimageList * buffers)1404 image_get_buffers(__DRIdrawable *driDrawable, unsigned int format,
1405                   uint32_t *stamp, void *loaderPrivate, uint32_t buffer_mask,
1406                   struct __DRIimageList *buffers)
1407 {
1408    struct dri2_egl_surface *dri2_surf = loaderPrivate;
1409 
1410    if (update_buffers_if_needed(dri2_surf) < 0)
1411       return 0;
1412 
1413    buffers->image_mask = __DRI_IMAGE_BUFFER_BACK;
1414    buffers->back = dri2_surf->back->dri_image;
1415 
1416    return 1;
1417 }
1418 
1419 static void
dri2_wl_flush_front_buffer(__DRIdrawable * driDrawable,void * loaderPrivate)1420 dri2_wl_flush_front_buffer(__DRIdrawable *driDrawable, void *loaderPrivate)
1421 {
1422    (void)driDrawable;
1423    (void)loaderPrivate;
1424 }
1425 
1426 static unsigned
dri2_wl_get_capability(void * loaderPrivate,enum dri_loader_cap cap)1427 dri2_wl_get_capability(void *loaderPrivate, enum dri_loader_cap cap)
1428 {
1429    switch (cap) {
1430    case DRI_LOADER_CAP_FP16:
1431       return 1;
1432    case DRI_LOADER_CAP_RGBA_ORDERING:
1433       return 1;
1434    default:
1435       return 0;
1436    }
1437 }
1438 
1439 static const __DRIimageLoaderExtension image_loader_extension = {
1440    .base = {__DRI_IMAGE_LOADER, 2},
1441 
1442    .getBuffers = image_get_buffers,
1443    .flushFrontBuffer = dri2_wl_flush_front_buffer,
1444    .getCapability = dri2_wl_get_capability,
1445 };
1446 
1447 static void
wayland_throttle_callback(void * data,struct wl_callback * callback,uint32_t time)1448 wayland_throttle_callback(void *data, struct wl_callback *callback,
1449                           uint32_t time)
1450 {
1451    struct dri2_egl_surface *dri2_surf = data;
1452 
1453    dri2_surf->throttle_callback = NULL;
1454    wl_callback_destroy(callback);
1455 }
1456 
1457 static const struct wl_callback_listener throttle_listener = {
1458    .done = wayland_throttle_callback,
1459 };
1460 
1461 static struct wl_buffer *
create_wl_buffer(struct dri2_egl_display * dri2_dpy,struct dri2_egl_surface * dri2_surf,__DRIimage * image)1462 create_wl_buffer(struct dri2_egl_display *dri2_dpy,
1463                  struct dri2_egl_surface *dri2_surf, __DRIimage *image)
1464 {
1465    struct wl_buffer *ret = NULL;
1466    EGLBoolean query;
1467    int width, height, fourcc, num_planes;
1468    uint64_t modifier = DRM_FORMAT_MOD_INVALID;
1469    int mod_hi, mod_lo;
1470 
1471    query = dri2_query_image(image, __DRI_IMAGE_ATTRIB_WIDTH, &width);
1472    query &=
1473       dri2_query_image(image, __DRI_IMAGE_ATTRIB_HEIGHT, &height);
1474    query &=
1475       dri2_query_image(image, __DRI_IMAGE_ATTRIB_FOURCC, &fourcc);
1476    if (!query)
1477       return NULL;
1478 
1479    query = dri2_query_image(image, __DRI_IMAGE_ATTRIB_NUM_PLANES,
1480                                        &num_planes);
1481    if (!query)
1482       num_planes = 1;
1483 
1484    query = dri2_query_image(image, __DRI_IMAGE_ATTRIB_MODIFIER_UPPER,
1485                                        &mod_hi);
1486    query &= dri2_query_image(
1487       image, __DRI_IMAGE_ATTRIB_MODIFIER_LOWER, &mod_lo);
1488    if (query) {
1489       modifier = combine_u32_into_u64(mod_hi, mod_lo);
1490    }
1491 
1492    bool supported_modifier = false;
1493    bool mod_invalid_supported = false;
1494    int visual_idx = dri2_wl_visual_idx_from_fourcc(fourcc);
1495    assert(visual_idx != -1);
1496 
1497    uint64_t *mod;
1498    u_vector_foreach(mod, &dri2_dpy->formats.modifiers[visual_idx])
1499    {
1500       if (*mod == DRM_FORMAT_MOD_INVALID) {
1501          mod_invalid_supported = true;
1502       }
1503       if (*mod == modifier) {
1504          supported_modifier = true;
1505          break;
1506       }
1507    }
1508    if (!supported_modifier && mod_invalid_supported) {
1509       /* If the server has advertised DRM_FORMAT_MOD_INVALID then we trust
1510        * that the client has allocated the buffer with the right implicit
1511        * modifier for the format, even though it's allocated a buffer the
1512        * server hasn't explicitly claimed to support. */
1513       modifier = DRM_FORMAT_MOD_INVALID;
1514       supported_modifier = true;
1515    }
1516 
1517    if (dri2_dpy->wl_dmabuf && supported_modifier) {
1518       struct zwp_linux_buffer_params_v1 *params;
1519       int i;
1520 
1521       /* We don't need a wrapper for wl_dmabuf objects, because we have to
1522        * create the intermediate params object; we can set the queue on this,
1523        * and the wl_buffer inherits it race-free. */
1524       params = zwp_linux_dmabuf_v1_create_params(dri2_dpy->wl_dmabuf);
1525       if (dri2_surf)
1526          wl_proxy_set_queue((struct wl_proxy *)params, dri2_surf->wl_queue);
1527 
1528       for (i = 0; i < num_planes; i++) {
1529          __DRIimage *p_image;
1530          int stride, offset;
1531          int fd = -1;
1532 
1533          p_image = dri2_from_planar(image, i, NULL);
1534          if (!p_image) {
1535             assert(i == 0);
1536             p_image = image;
1537          }
1538 
1539          query =
1540             dri2_query_image(p_image, __DRI_IMAGE_ATTRIB_FD, &fd);
1541          query &= dri2_query_image(
1542             p_image, __DRI_IMAGE_ATTRIB_STRIDE, &stride);
1543          query &= dri2_query_image(
1544             p_image, __DRI_IMAGE_ATTRIB_OFFSET, &offset);
1545          if (image != p_image)
1546             dri2_destroy_image(p_image);
1547 
1548          if (!query) {
1549             if (fd >= 0)
1550                close(fd);
1551             zwp_linux_buffer_params_v1_destroy(params);
1552             return NULL;
1553          }
1554 
1555          zwp_linux_buffer_params_v1_add(params, fd, i, offset, stride,
1556                                         modifier >> 32, modifier & 0xffffffff);
1557          close(fd);
1558       }
1559 
1560       if (dri2_surf && dri2_surf->base.PresentOpaque)
1561          fourcc = dri2_wl_visuals[visual_idx].opaque_wl_drm_format;
1562 
1563       ret = zwp_linux_buffer_params_v1_create_immed(params, width, height,
1564                                                     fourcc, 0);
1565       zwp_linux_buffer_params_v1_destroy(params);
1566    } else if (dri2_dpy->wl_drm) {
1567       struct wl_drm *wl_drm =
1568          dri2_surf ? dri2_surf->wl_drm_wrapper : dri2_dpy->wl_drm;
1569       int fd = -1, stride;
1570 
1571       /* wl_drm doesn't support explicit modifiers, so ideally we should bail
1572        * out if modifier != DRM_FORMAT_MOD_INVALID. However many drivers will
1573        * return a valid modifier when querying the DRIImage even if a buffer
1574        * was allocated without explicit modifiers.
1575        * XXX: bail out if the buffer was allocated without explicit modifiers
1576        */
1577       if (num_planes > 1)
1578          return NULL;
1579 
1580       query = dri2_query_image(image, __DRI_IMAGE_ATTRIB_FD, &fd);
1581       query &=
1582          dri2_query_image(image, __DRI_IMAGE_ATTRIB_STRIDE, &stride);
1583       if (!query) {
1584          if (fd >= 0)
1585             close(fd);
1586          return NULL;
1587       }
1588 
1589       ret = wl_drm_create_prime_buffer(wl_drm, fd, width, height, fourcc, 0,
1590                                        stride, 0, 0, 0, 0);
1591       close(fd);
1592    }
1593 
1594    return ret;
1595 }
1596 
1597 static EGLBoolean
try_damage_buffer(struct dri2_egl_surface * dri2_surf,const EGLint * rects,EGLint n_rects)1598 try_damage_buffer(struct dri2_egl_surface *dri2_surf, const EGLint *rects,
1599                   EGLint n_rects)
1600 {
1601    if (wl_proxy_get_version((struct wl_proxy *)dri2_surf->wl_surface_wrapper) <
1602        WL_SURFACE_DAMAGE_BUFFER_SINCE_VERSION)
1603       return EGL_FALSE;
1604 
1605    for (int i = 0; i < n_rects; i++) {
1606       const int *rect = &rects[i * 4];
1607 
1608       wl_surface_damage_buffer(dri2_surf->wl_surface_wrapper, rect[0],
1609                                dri2_surf->base.Height - rect[1] - rect[3],
1610                                rect[2], rect[3]);
1611    }
1612    return EGL_TRUE;
1613 }
1614 
1615 /**
1616  * Called via eglSwapBuffers(), drv->SwapBuffers().
1617  */
1618 static EGLBoolean
dri2_wl_swap_buffers_with_damage(_EGLDisplay * disp,_EGLSurface * draw,const EGLint * rects,EGLint n_rects)1619 dri2_wl_swap_buffers_with_damage(_EGLDisplay *disp, _EGLSurface *draw,
1620                                  const EGLint *rects, EGLint n_rects)
1621 {
1622    struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
1623    struct dri2_egl_surface *dri2_surf = dri2_egl_surface(draw);
1624 
1625    if (!dri2_surf->wl_win)
1626       return _eglError(EGL_BAD_NATIVE_WINDOW, "dri2_swap_buffers");
1627 
1628    /* Flush (and finish glthread) before:
1629     *   - update_buffers_if_needed because the unmarshalling thread
1630     *     may be running currently, and we would concurrently alloc/free
1631     *     the back bo.
1632     *   - swapping current/back because flushing may free the buffer and
1633     *     dri_image and reallocate them using get_back_bo (which causes a
1634     *     a crash because 'current' becomes NULL).
1635     *   - using any wl_* function because accessing them from this thread
1636     *     and glthread causes troubles (see #7624 and #8136)
1637     */
1638    dri2_flush_drawable_for_swapbuffers(disp, draw);
1639    dri_invalidate_drawable(dri2_surf->dri_drawable);
1640 
1641    while (dri2_surf->throttle_callback != NULL)
1642       if (loader_wayland_dispatch(dri2_dpy->wl_dpy, dri2_surf->wl_queue, NULL) ==
1643           -1)
1644          return -1;
1645 
1646    for (int i = 0; i < ARRAY_SIZE(dri2_surf->color_buffers); i++)
1647       if (dri2_surf->color_buffers[i].age > 0)
1648          dri2_surf->color_buffers[i].age++;
1649 
1650    /* Make sure we have a back buffer in case we're swapping without ever
1651     * rendering. */
1652    if (update_buffers_if_needed(dri2_surf) < 0)
1653       return _eglError(EGL_BAD_ALLOC, "dri2_swap_buffers");
1654 
1655    if (draw->SwapInterval > 0) {
1656       dri2_surf->throttle_callback =
1657          wl_surface_frame(dri2_surf->wl_surface_wrapper);
1658       wl_callback_add_listener(dri2_surf->throttle_callback, &throttle_listener,
1659                                dri2_surf);
1660    }
1661 
1662    dri2_surf->back->age = 1;
1663    dri2_surf->current = dri2_surf->back;
1664    dri2_surf->back = NULL;
1665 
1666    if (!dri2_surf->current->wl_buffer) {
1667       __DRIimage *image;
1668 
1669       if (dri2_dpy->fd_render_gpu != dri2_dpy->fd_display_gpu)
1670          image = dri2_surf->current->linear_copy;
1671       else
1672          image = dri2_surf->current->dri_image;
1673 
1674       dri2_surf->current->wl_buffer =
1675          create_wl_buffer(dri2_dpy, dri2_surf, image);
1676 
1677       if (dri2_surf->current->wl_buffer == NULL)
1678          return _eglError(EGL_BAD_ALLOC, "dri2_swap_buffers");
1679 
1680       dri2_surf->current->wl_release = false;
1681 
1682       wl_buffer_add_listener(dri2_surf->current->wl_buffer, &wl_buffer_listener,
1683                              dri2_surf);
1684    }
1685 
1686    wl_surface_attach(dri2_surf->wl_surface_wrapper,
1687                      dri2_surf->current->wl_buffer, dri2_surf->dx,
1688                      dri2_surf->dy);
1689 
1690    dri2_surf->wl_win->attached_width = dri2_surf->base.Width;
1691    dri2_surf->wl_win->attached_height = dri2_surf->base.Height;
1692    /* reset resize growing parameters */
1693    dri2_surf->dx = 0;
1694    dri2_surf->dy = 0;
1695 
1696    /* If the compositor doesn't support damage_buffer, we deliberately
1697     * ignore the damage region and post maximum damage, due to
1698     * https://bugs.freedesktop.org/78190 */
1699    if (!n_rects || !try_damage_buffer(dri2_surf, rects, n_rects))
1700       wl_surface_damage(dri2_surf->wl_surface_wrapper, 0, 0, INT32_MAX,
1701                         INT32_MAX);
1702 
1703    if (dri2_dpy->fd_render_gpu != dri2_dpy->fd_display_gpu) {
1704       _EGLContext *ctx = _eglGetCurrentContext();
1705       struct dri2_egl_context *dri2_ctx = dri2_egl_context(ctx);
1706       __DRIdrawable *dri_drawable = dri2_dpy->vtbl->get_dri_drawable(draw);
1707       dri2_blit_image(
1708          dri2_ctx->dri_context, dri2_surf->current->linear_copy,
1709          dri2_surf->current->dri_image, 0, 0, dri2_surf->base.Width,
1710          dri2_surf->base.Height, 0, 0, dri2_surf->base.Width,
1711          dri2_surf->base.Height, 0);
1712       dri_flush_drawable(dri_drawable);
1713    }
1714 
1715    wl_surface_commit(dri2_surf->wl_surface_wrapper);
1716 
1717    /* If we're not waiting for a frame callback then we'll at least throttle
1718     * to a sync callback so that we always give a chance for the compositor to
1719     * handle the commit and send a release event before checking for a free
1720     * buffer */
1721    if (dri2_surf->throttle_callback == NULL) {
1722       dri2_surf->throttle_callback = wl_display_sync(dri2_surf->wl_dpy_wrapper);
1723       wl_callback_add_listener(dri2_surf->throttle_callback, &throttle_listener,
1724                                dri2_surf);
1725    }
1726 
1727    wl_display_flush(dri2_dpy->wl_dpy);
1728 
1729    return EGL_TRUE;
1730 }
1731 
1732 static EGLint
dri2_wl_query_buffer_age(_EGLDisplay * disp,_EGLSurface * surface)1733 dri2_wl_query_buffer_age(_EGLDisplay *disp, _EGLSurface *surface)
1734 {
1735    struct dri2_egl_surface *dri2_surf = dri2_egl_surface(surface);
1736 
1737    if (update_buffers_if_needed(dri2_surf) < 0) {
1738       _eglError(EGL_BAD_ALLOC, "dri2_query_buffer_age");
1739       return -1;
1740    }
1741 
1742    return dri2_surf->back->age;
1743 }
1744 
1745 static EGLBoolean
dri2_wl_swap_buffers(_EGLDisplay * disp,_EGLSurface * draw)1746 dri2_wl_swap_buffers(_EGLDisplay *disp, _EGLSurface *draw)
1747 {
1748    return dri2_wl_swap_buffers_with_damage(disp, draw, NULL, 0);
1749 }
1750 
1751 static struct wl_buffer *
dri2_wl_create_wayland_buffer_from_image(_EGLDisplay * disp,_EGLImage * img)1752 dri2_wl_create_wayland_buffer_from_image(_EGLDisplay *disp, _EGLImage *img)
1753 {
1754    struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
1755    struct dri2_egl_image *dri2_img = dri2_egl_image(img);
1756    __DRIimage *image = dri2_img->dri_image;
1757    struct wl_buffer *buffer;
1758    int fourcc;
1759 
1760    /* Check the upstream display supports this buffer's format. */
1761    dri2_query_image(image, __DRI_IMAGE_ATTRIB_FOURCC, &fourcc);
1762    if (!server_supports_fourcc(&dri2_dpy->formats, fourcc))
1763       goto bad_format;
1764 
1765    buffer = create_wl_buffer(dri2_dpy, NULL, image);
1766 
1767    /* The buffer object will have been created with our internal event queue
1768     * because it is using wl_dmabuf/wl_drm as a proxy factory. We want the
1769     * buffer to be used by the application so we'll reset it to the display's
1770     * default event queue. This isn't actually racy, as the only event the
1771     * buffer can get is a buffer release, which doesn't happen with an explicit
1772     * attach. */
1773    if (buffer)
1774       wl_proxy_set_queue((struct wl_proxy *)buffer, NULL);
1775 
1776    return buffer;
1777 
1778 bad_format:
1779    _eglError(EGL_BAD_MATCH, "unsupported image format");
1780    return NULL;
1781 }
1782 
1783 static int
dri2_wl_authenticate(_EGLDisplay * disp,uint32_t id)1784 dri2_wl_authenticate(_EGLDisplay *disp, uint32_t id)
1785 {
1786    struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
1787    int ret = 0;
1788 
1789    if (dri2_dpy->is_render_node) {
1790       _eglLog(_EGL_WARNING, "wayland-egl: client asks server to "
1791                             "authenticate for render-nodes");
1792       return 0;
1793    }
1794    dri2_dpy->authenticated = false;
1795 
1796    wl_drm_authenticate(dri2_dpy->wl_drm, id);
1797    if (roundtrip(dri2_dpy) < 0)
1798       ret = -1;
1799 
1800    if (!dri2_dpy->authenticated)
1801       ret = -1;
1802 
1803    /* reset authenticated */
1804    dri2_dpy->authenticated = true;
1805 
1806    return ret;
1807 }
1808 
1809 static void
drm_handle_device(void * data,struct wl_drm * drm,const char * device)1810 drm_handle_device(void *data, struct wl_drm *drm, const char *device)
1811 {
1812    struct dri2_egl_display *dri2_dpy = data;
1813    drm_magic_t magic;
1814 
1815    dri2_dpy->device_name = strdup(device);
1816    if (!dri2_dpy->device_name)
1817       return;
1818 
1819    dri2_dpy->fd_render_gpu = loader_open_device(dri2_dpy->device_name);
1820    if (dri2_dpy->fd_render_gpu == -1) {
1821       _eglLog(_EGL_WARNING, "wayland-egl: could not open %s (%s)",
1822               dri2_dpy->device_name, strerror(errno));
1823       free(dri2_dpy->device_name);
1824       dri2_dpy->device_name = NULL;
1825       return;
1826    }
1827 
1828    if (drmGetNodeTypeFromFd(dri2_dpy->fd_render_gpu) == DRM_NODE_RENDER) {
1829       dri2_dpy->authenticated = true;
1830    } else {
1831       if (drmGetMagic(dri2_dpy->fd_render_gpu, &magic)) {
1832          close(dri2_dpy->fd_render_gpu);
1833          dri2_dpy->fd_render_gpu = -1;
1834          free(dri2_dpy->device_name);
1835          dri2_dpy->device_name = NULL;
1836          _eglLog(_EGL_WARNING, "wayland-egl: drmGetMagic failed");
1837          return;
1838       }
1839       wl_drm_authenticate(dri2_dpy->wl_drm, magic);
1840    }
1841 }
1842 
1843 static void
drm_handle_format(void * data,struct wl_drm * drm,uint32_t format)1844 drm_handle_format(void *data, struct wl_drm *drm, uint32_t format)
1845 {
1846    struct dri2_egl_display *dri2_dpy = data;
1847    int visual_idx = dri2_wl_visual_idx_from_fourcc(format);
1848 
1849    if (visual_idx == -1)
1850       return;
1851 
1852    BITSET_SET(dri2_dpy->formats.formats_bitmap, visual_idx);
1853 }
1854 
1855 static void
drm_handle_capabilities(void * data,struct wl_drm * drm,uint32_t value)1856 drm_handle_capabilities(void *data, struct wl_drm *drm, uint32_t value)
1857 {
1858    struct dri2_egl_display *dri2_dpy = data;
1859 
1860    dri2_dpy->capabilities = value;
1861 }
1862 
1863 static void
drm_handle_authenticated(void * data,struct wl_drm * drm)1864 drm_handle_authenticated(void *data, struct wl_drm *drm)
1865 {
1866    struct dri2_egl_display *dri2_dpy = data;
1867 
1868    dri2_dpy->authenticated = true;
1869 }
1870 
1871 static const struct wl_drm_listener drm_listener = {
1872    .device = drm_handle_device,
1873    .format = drm_handle_format,
1874    .authenticated = drm_handle_authenticated,
1875    .capabilities = drm_handle_capabilities,
1876 };
1877 
1878 static void
dmabuf_ignore_format(void * data,struct zwp_linux_dmabuf_v1 * dmabuf,uint32_t format)1879 dmabuf_ignore_format(void *data, struct zwp_linux_dmabuf_v1 *dmabuf,
1880                      uint32_t format)
1881 {
1882    /* formats are implicitly advertised by the 'modifier' event, so ignore */
1883 }
1884 
1885 static void
dmabuf_handle_modifier(void * data,struct zwp_linux_dmabuf_v1 * dmabuf,uint32_t format,uint32_t modifier_hi,uint32_t modifier_lo)1886 dmabuf_handle_modifier(void *data, struct zwp_linux_dmabuf_v1 *dmabuf,
1887                        uint32_t format, uint32_t modifier_hi,
1888                        uint32_t modifier_lo)
1889 {
1890    struct dri2_egl_display *dri2_dpy = data;
1891    int visual_idx = dri2_wl_visual_idx_from_fourcc(format);
1892    uint64_t *mod;
1893 
1894    /* Ignore this if the compositor advertised dma-buf feedback. From version 4
1895     * onwards (when dma-buf feedback was introduced), the compositor should not
1896     * advertise this event anymore, but let's keep this for safety. */
1897    if (dri2_dpy->wl_dmabuf_feedback)
1898       return;
1899 
1900    if (visual_idx == -1)
1901       return;
1902 
1903    BITSET_SET(dri2_dpy->formats.formats_bitmap, visual_idx);
1904 
1905    mod = u_vector_add(&dri2_dpy->formats.modifiers[visual_idx]);
1906    if (mod)
1907       *mod = combine_u32_into_u64(modifier_hi, modifier_lo);
1908 }
1909 
1910 static const struct zwp_linux_dmabuf_v1_listener dmabuf_listener = {
1911    .format = dmabuf_ignore_format,
1912    .modifier = dmabuf_handle_modifier,
1913 };
1914 
1915 static void
wl_drm_bind(struct dri2_egl_display * dri2_dpy)1916 wl_drm_bind(struct dri2_egl_display *dri2_dpy)
1917 {
1918    dri2_dpy->wl_drm =
1919       wl_registry_bind(dri2_dpy->wl_registry, dri2_dpy->wl_drm_name,
1920                        &wl_drm_interface, dri2_dpy->wl_drm_version);
1921    wl_drm_add_listener(dri2_dpy->wl_drm, &drm_listener, dri2_dpy);
1922 }
1923 
1924 static void
default_dmabuf_feedback_format_table(void * data,struct zwp_linux_dmabuf_feedback_v1 * zwp_linux_dmabuf_feedback_v1,int32_t fd,uint32_t size)1925 default_dmabuf_feedback_format_table(
1926    void *data,
1927    struct zwp_linux_dmabuf_feedback_v1 *zwp_linux_dmabuf_feedback_v1,
1928    int32_t fd, uint32_t size)
1929 {
1930    struct dri2_egl_display *dri2_dpy = data;
1931 
1932    dri2_dpy->format_table.size = size;
1933    dri2_dpy->format_table.data =
1934       mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
1935 
1936    close(fd);
1937 }
1938 
1939 static void
default_dmabuf_feedback_main_device(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,struct wl_array * device)1940 default_dmabuf_feedback_main_device(
1941    void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
1942    struct wl_array *device)
1943 {
1944    struct dri2_egl_display *dri2_dpy = data;
1945    char *node;
1946    int fd;
1947    dev_t dev;
1948 
1949    /* Given the device, look for a render node and try to open it. */
1950    memcpy(&dev, device->data, sizeof(dev));
1951    node = loader_get_render_node(dev);
1952    if (!node)
1953       return;
1954    fd = loader_open_device(node);
1955    if (fd == -1) {
1956       free(node);
1957       return;
1958    }
1959 
1960    dri2_dpy->device_name = node;
1961    dri2_dpy->fd_render_gpu = fd;
1962    dri2_dpy->authenticated = true;
1963 }
1964 
1965 static void
default_dmabuf_feedback_tranche_target_device(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,struct wl_array * device)1966 default_dmabuf_feedback_tranche_target_device(
1967    void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
1968    struct wl_array *device)
1969 {
1970    /* ignore this event */
1971 }
1972 
1973 static void
default_dmabuf_feedback_tranche_flags(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,uint32_t flags)1974 default_dmabuf_feedback_tranche_flags(
1975    void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
1976    uint32_t flags)
1977 {
1978    /* ignore this event */
1979 }
1980 
1981 static void
default_dmabuf_feedback_tranche_formats(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,struct wl_array * indices)1982 default_dmabuf_feedback_tranche_formats(
1983    void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
1984    struct wl_array *indices)
1985 {
1986    struct dri2_egl_display *dri2_dpy = data;
1987    uint64_t *modifier_ptr, modifier;
1988    uint32_t format;
1989    uint16_t *index;
1990    int visual_idx;
1991 
1992    if (dri2_dpy->format_table.data == MAP_FAILED) {
1993       _eglLog(_EGL_WARNING, "wayland-egl: we could not map the format table "
1994                             "so we won't be able to use this batch of dma-buf "
1995                             "feedback events.");
1996       return;
1997    }
1998    if (dri2_dpy->format_table.data == NULL) {
1999       _eglLog(_EGL_WARNING,
2000               "wayland-egl: compositor didn't advertise a format "
2001               "table, so we won't be able to use this batch of dma-buf "
2002               "feedback events.");
2003       return;
2004    }
2005 
2006    wl_array_for_each (index, indices) {
2007       format = dri2_dpy->format_table.data[*index].format;
2008       modifier = dri2_dpy->format_table.data[*index].modifier;
2009 
2010       /* skip formats that we don't support */
2011       visual_idx = dri2_wl_visual_idx_from_fourcc(format);
2012       if (visual_idx == -1)
2013          continue;
2014 
2015       BITSET_SET(dri2_dpy->formats.formats_bitmap, visual_idx);
2016       modifier_ptr = u_vector_add(&dri2_dpy->formats.modifiers[visual_idx]);
2017       if (modifier_ptr)
2018          *modifier_ptr = modifier;
2019    }
2020 }
2021 
2022 static void
default_dmabuf_feedback_tranche_done(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback)2023 default_dmabuf_feedback_tranche_done(
2024    void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)
2025 {
2026    /* ignore this event */
2027 }
2028 
2029 static void
default_dmabuf_feedback_done(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback)2030 default_dmabuf_feedback_done(
2031    void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)
2032 {
2033    /* ignore this event */
2034 }
2035 
2036 static const struct zwp_linux_dmabuf_feedback_v1_listener
2037    dmabuf_feedback_listener = {
2038       .format_table = default_dmabuf_feedback_format_table,
2039       .main_device = default_dmabuf_feedback_main_device,
2040       .tranche_target_device = default_dmabuf_feedback_tranche_target_device,
2041       .tranche_flags = default_dmabuf_feedback_tranche_flags,
2042       .tranche_formats = default_dmabuf_feedback_tranche_formats,
2043       .tranche_done = default_dmabuf_feedback_tranche_done,
2044       .done = default_dmabuf_feedback_done,
2045 };
2046 
2047 static void
registry_handle_global_drm(void * data,struct wl_registry * registry,uint32_t name,const char * interface,uint32_t version)2048 registry_handle_global_drm(void *data, struct wl_registry *registry,
2049                            uint32_t name, const char *interface,
2050                            uint32_t version)
2051 {
2052    struct dri2_egl_display *dri2_dpy = data;
2053 
2054    if (strcmp(interface, wl_drm_interface.name) == 0) {
2055       dri2_dpy->wl_drm_version = MIN2(version, 2);
2056       dri2_dpy->wl_drm_name = name;
2057    } else if (strcmp(interface, zwp_linux_dmabuf_v1_interface.name) == 0 &&
2058               version >= 3) {
2059       dri2_dpy->wl_dmabuf = wl_registry_bind(
2060          registry, name, &zwp_linux_dmabuf_v1_interface,
2061          MIN2(version, ZWP_LINUX_DMABUF_V1_GET_DEFAULT_FEEDBACK_SINCE_VERSION));
2062       zwp_linux_dmabuf_v1_add_listener(dri2_dpy->wl_dmabuf, &dmabuf_listener,
2063                                        dri2_dpy);
2064    }
2065 }
2066 
2067 static void
registry_handle_global_remove(void * data,struct wl_registry * registry,uint32_t name)2068 registry_handle_global_remove(void *data, struct wl_registry *registry,
2069                               uint32_t name)
2070 {
2071 }
2072 
2073 static const struct wl_registry_listener registry_listener_drm = {
2074    .global = registry_handle_global_drm,
2075    .global_remove = registry_handle_global_remove,
2076 };
2077 
2078 static void
dri2_wl_setup_swap_interval(_EGLDisplay * disp)2079 dri2_wl_setup_swap_interval(_EGLDisplay *disp)
2080 {
2081    /* We can't use values greater than 1 on Wayland because we are using the
2082     * frame callback to synchronise the frame and the only way we be sure to
2083     * get a frame callback is to attach a new buffer. Therefore we can't just
2084     * sit drawing nothing to wait until the next ‘n’ frame callbacks */
2085 
2086    dri2_setup_swap_interval(disp, 1);
2087 }
2088 
2089 static const struct dri2_egl_display_vtbl dri2_wl_display_vtbl = {
2090    .authenticate = dri2_wl_authenticate,
2091    .create_window_surface = dri2_wl_create_window_surface,
2092    .create_pixmap_surface = dri2_wl_create_pixmap_surface,
2093    .destroy_surface = dri2_wl_destroy_surface,
2094    .swap_interval = dri2_wl_swap_interval,
2095    .create_image = dri2_create_image_khr,
2096    .swap_buffers = dri2_wl_swap_buffers,
2097    .swap_buffers_with_damage = dri2_wl_swap_buffers_with_damage,
2098    .query_buffer_age = dri2_wl_query_buffer_age,
2099    .create_wayland_buffer_from_image = dri2_wl_create_wayland_buffer_from_image,
2100    .get_dri_drawable = dri2_surface_get_dri_drawable,
2101 };
2102 
2103 static const __DRIextension *dri2_loader_extensions[] = {
2104    &image_loader_extension.base,
2105    &image_lookup_extension.base,
2106    &use_invalidate.base,
2107    NULL,
2108 };
2109 
2110 static void
dri2_wl_add_configs_for_visuals(_EGLDisplay * disp)2111 dri2_wl_add_configs_for_visuals(_EGLDisplay *disp)
2112 {
2113    struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
2114    unsigned int format_count[ARRAY_SIZE(dri2_wl_visuals)] = {0};
2115 
2116    /* Try to create an EGLConfig for every config the driver declares */
2117    for (unsigned i = 0; dri2_dpy->driver_configs[i]; i++) {
2118       struct dri2_egl_config *dri2_conf;
2119       bool conversion = false;
2120       int idx = dri2_wl_visual_idx_from_config(dri2_dpy->driver_configs[i]);
2121 
2122       if (idx < 0)
2123          continue;
2124 
2125       /* Check if the server natively supports the colour buffer format */
2126       if (!server_supports_format(&dri2_dpy->formats, idx)) {
2127          /* In multi-GPU scenarios, we usually have a different buffer, so a
2128           * format conversion is easy compared to the overhead of the copy */
2129          if (dri2_dpy->fd_render_gpu == dri2_dpy->fd_display_gpu)
2130             continue;
2131 
2132          /* Check if the server supports the alternate format */
2133          if (!server_supports_pipe_format(&dri2_dpy->formats,
2134                                           dri2_wl_visuals[idx].alt_pipe_format)) {
2135             continue;
2136          }
2137 
2138          conversion = true;
2139       }
2140 
2141       /* The format is supported one way or another; add the EGLConfig */
2142       dri2_conf = dri2_add_config(disp, dri2_dpy->driver_configs[i],
2143                                   EGL_WINDOW_BIT, NULL);
2144       if (!dri2_conf)
2145          continue;
2146 
2147       format_count[idx]++;
2148 
2149       if (conversion && format_count[idx] == 1) {
2150          _eglLog(_EGL_DEBUG, "Client format %s converted via PRIME blitImage.",
2151                  util_format_name(dri2_wl_visuals[idx].pipe_format));
2152       }
2153    }
2154 
2155    for (unsigned i = 0; i < ARRAY_SIZE(format_count); i++) {
2156       if (!format_count[i]) {
2157          _eglLog(_EGL_DEBUG, "No DRI config supports native format %s",
2158                  util_format_name(dri2_wl_visuals[i].pipe_format));
2159       }
2160    }
2161 }
2162 
2163 static bool
dri2_initialize_wayland_drm_extensions(struct dri2_egl_display * dri2_dpy)2164 dri2_initialize_wayland_drm_extensions(struct dri2_egl_display *dri2_dpy)
2165 {
2166    /* Get default dma-buf feedback */
2167    if (dri2_dpy->wl_dmabuf &&
2168        zwp_linux_dmabuf_v1_get_version(dri2_dpy->wl_dmabuf) >=
2169           ZWP_LINUX_DMABUF_V1_GET_DEFAULT_FEEDBACK_SINCE_VERSION) {
2170       dmabuf_feedback_format_table_init(&dri2_dpy->format_table);
2171       dri2_dpy->wl_dmabuf_feedback =
2172          zwp_linux_dmabuf_v1_get_default_feedback(dri2_dpy->wl_dmabuf);
2173       zwp_linux_dmabuf_feedback_v1_add_listener(
2174          dri2_dpy->wl_dmabuf_feedback, &dmabuf_feedback_listener, dri2_dpy);
2175    }
2176 
2177    if (roundtrip(dri2_dpy) < 0)
2178       return false;
2179 
2180    /* Destroy the default dma-buf feedback and the format table. */
2181    if (dri2_dpy->wl_dmabuf_feedback) {
2182       zwp_linux_dmabuf_feedback_v1_destroy(dri2_dpy->wl_dmabuf_feedback);
2183       dri2_dpy->wl_dmabuf_feedback = NULL;
2184       dmabuf_feedback_format_table_fini(&dri2_dpy->format_table);
2185    }
2186 
2187    /* We couldn't retrieve a render node from the dma-buf feedback (or the
2188     * feedback was not advertised at all), so we must fallback to wl_drm. */
2189    if (dri2_dpy->fd_render_gpu == -1) {
2190       /* wl_drm not advertised by compositor, so can't continue */
2191       if (dri2_dpy->wl_drm_name == 0)
2192          return false;
2193       wl_drm_bind(dri2_dpy);
2194 
2195       if (dri2_dpy->wl_drm == NULL)
2196          return false;
2197       if (roundtrip(dri2_dpy) < 0 || dri2_dpy->fd_render_gpu == -1)
2198          return false;
2199 
2200       if (!dri2_dpy->authenticated &&
2201           (roundtrip(dri2_dpy) < 0 || !dri2_dpy->authenticated))
2202          return false;
2203    }
2204    return true;
2205 }
2206 
2207 static EGLBoolean
dri2_initialize_wayland_drm(_EGLDisplay * disp)2208 dri2_initialize_wayland_drm(_EGLDisplay *disp)
2209 {
2210    struct dri2_egl_display *dri2_dpy = dri2_display_create();
2211    if (!dri2_dpy)
2212       return EGL_FALSE;
2213 
2214    disp->DriverData = (void *)dri2_dpy;
2215 
2216    if (dri2_wl_formats_init(&dri2_dpy->formats) < 0)
2217       goto cleanup;
2218 
2219    if (disp->PlatformDisplay == NULL) {
2220       dri2_dpy->wl_dpy = wl_display_connect(NULL);
2221       if (dri2_dpy->wl_dpy == NULL)
2222          goto cleanup;
2223       dri2_dpy->own_device = true;
2224    } else {
2225       dri2_dpy->wl_dpy = disp->PlatformDisplay;
2226    }
2227 
2228    dri2_dpy->wl_queue = wl_display_create_queue_with_name(dri2_dpy->wl_dpy,
2229                                                           "mesa egl display queue");
2230 
2231    dri2_dpy->wl_dpy_wrapper = wl_proxy_create_wrapper(dri2_dpy->wl_dpy);
2232    if (dri2_dpy->wl_dpy_wrapper == NULL)
2233       goto cleanup;
2234 
2235    wl_proxy_set_queue((struct wl_proxy *)dri2_dpy->wl_dpy_wrapper,
2236                       dri2_dpy->wl_queue);
2237 
2238    if (dri2_dpy->own_device)
2239       wl_display_dispatch_pending(dri2_dpy->wl_dpy);
2240 
2241    dri2_dpy->wl_registry = wl_display_get_registry(dri2_dpy->wl_dpy_wrapper);
2242    wl_registry_add_listener(dri2_dpy->wl_registry, &registry_listener_drm,
2243                             dri2_dpy);
2244 
2245    if (roundtrip(dri2_dpy) < 0)
2246       goto cleanup;
2247 
2248    if (!dri2_initialize_wayland_drm_extensions(dri2_dpy))
2249       goto cleanup;
2250 
2251    loader_get_user_preferred_fd(&dri2_dpy->fd_render_gpu,
2252                                 &dri2_dpy->fd_display_gpu);
2253 
2254    if (dri2_dpy->fd_render_gpu != dri2_dpy->fd_display_gpu) {
2255       free(dri2_dpy->device_name);
2256       dri2_dpy->device_name =
2257          loader_get_device_name_for_fd(dri2_dpy->fd_render_gpu);
2258       if (!dri2_dpy->device_name) {
2259          _eglError(EGL_BAD_ALLOC, "wayland-egl: failed to get device name "
2260                                   "for requested GPU");
2261          goto cleanup;
2262       }
2263    }
2264 
2265    /* we have to do the check now, because loader_get_user_preferred_fd
2266     * will return a render-node when the requested gpu is different
2267     * to the server, but also if the client asks for the same gpu than
2268     * the server by requesting its pci-id */
2269    dri2_dpy->is_render_node =
2270       drmGetNodeTypeFromFd(dri2_dpy->fd_render_gpu) == DRM_NODE_RENDER;
2271 
2272    dri2_dpy->driver_name = loader_get_driver_for_fd(dri2_dpy->fd_render_gpu);
2273    if (dri2_dpy->driver_name == NULL) {
2274       _eglError(EGL_BAD_ALLOC, "DRI2: failed to get driver name");
2275       goto cleanup;
2276    }
2277 
2278    dri2_dpy->loader_extensions = dri2_loader_extensions;
2279    if (!dri2_load_driver(disp)) {
2280       _eglError(EGL_BAD_ALLOC, "DRI2: failed to load driver");
2281       goto cleanup;
2282    }
2283 
2284    if (!dri2_create_screen(disp))
2285       goto cleanup;
2286 
2287    if (!dri2_setup_device(disp, false)) {
2288       _eglError(EGL_NOT_INITIALIZED, "DRI2: failed to setup EGLDevice");
2289       goto cleanup;
2290    }
2291 
2292    dri2_setup_screen(disp);
2293 
2294    dri2_wl_setup_swap_interval(disp);
2295 
2296    if (dri2_dpy->wl_drm) {
2297       /* To use Prime, we must have _DRI_IMAGE v7 at least.
2298        * createImageFromDmaBufs support indicates that Prime export/import is
2299        * supported by the driver. We deprecated the support to GEM names API, so
2300        * we bail out if the driver does not support Prime. */
2301       if (!(dri2_dpy->capabilities & WL_DRM_CAPABILITY_PRIME) ||
2302           !dri2_dpy->has_dmabuf_import) {
2303          _eglLog(_EGL_WARNING, "wayland-egl: display does not support prime");
2304          goto cleanup;
2305       }
2306    }
2307 
2308    dri2_wl_add_configs_for_visuals(disp);
2309 
2310    dri2_set_WL_bind_wayland_display(disp);
2311    /* When cannot convert EGLImage to wl_buffer when on a different gpu,
2312     * because the buffer of the EGLImage has likely a tiling mode the server
2313     * gpu won't support. These is no way to check for now. Thus do not support
2314     * the extension */
2315    if (dri2_dpy->fd_render_gpu == dri2_dpy->fd_display_gpu)
2316       disp->Extensions.WL_create_wayland_buffer_from_image = EGL_TRUE;
2317 
2318    disp->Extensions.EXT_buffer_age = EGL_TRUE;
2319 
2320    disp->Extensions.EXT_swap_buffers_with_damage = EGL_TRUE;
2321 
2322    disp->Extensions.EXT_present_opaque = EGL_TRUE;
2323 
2324    /* Fill vtbl last to prevent accidentally calling virtual function during
2325     * initialization.
2326     */
2327    dri2_dpy->vtbl = &dri2_wl_display_vtbl;
2328 
2329    return EGL_TRUE;
2330 
2331 cleanup:
2332    dri2_display_destroy(disp);
2333    return EGL_FALSE;
2334 }
2335 
2336 static int
dri2_wl_swrast_get_stride_for_format(int format,int w)2337 dri2_wl_swrast_get_stride_for_format(int format, int w)
2338 {
2339    int visual_idx = dri2_wl_visual_idx_from_shm_format(format);
2340 
2341    assume(visual_idx != -1);
2342 
2343    return w * util_format_get_blocksize(dri2_wl_visuals[visual_idx].pipe_format);
2344 }
2345 
2346 static EGLBoolean
dri2_wl_swrast_allocate_buffer(struct dri2_egl_surface * dri2_surf,int format,int w,int h,void ** data,int * size,struct wl_buffer ** buffer)2347 dri2_wl_swrast_allocate_buffer(struct dri2_egl_surface *dri2_surf, int format,
2348                                int w, int h, void **data, int *size,
2349                                struct wl_buffer **buffer)
2350 {
2351    struct dri2_egl_display *dri2_dpy =
2352       dri2_egl_display(dri2_surf->base.Resource.Display);
2353    struct wl_shm_pool *pool;
2354    int fd, stride, size_map;
2355    void *data_map;
2356 
2357    assert(!*buffer);
2358 
2359    stride = dri2_wl_swrast_get_stride_for_format(format, w);
2360    size_map = h * stride;
2361 
2362    /* Create a shareable buffer */
2363    fd = os_create_anonymous_file(size_map, NULL);
2364    if (fd < 0)
2365       return EGL_FALSE;
2366 
2367    data_map = mmap(NULL, size_map, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
2368    if (data_map == MAP_FAILED) {
2369       close(fd);
2370       return EGL_FALSE;
2371    }
2372 
2373    /* Share it in a wl_buffer */
2374    pool = wl_shm_create_pool(dri2_dpy->wl_shm, fd, size_map);
2375    wl_proxy_set_queue((struct wl_proxy *)pool, dri2_surf->wl_queue);
2376    *buffer = wl_shm_pool_create_buffer(pool, 0, w, h, stride, format);
2377    wl_shm_pool_destroy(pool);
2378    close(fd);
2379 
2380    *data = data_map;
2381    *size = size_map;
2382    return EGL_TRUE;
2383 }
2384 
2385 static void
kopper_update_buffers(struct dri2_egl_surface * dri2_surf)2386 kopper_update_buffers(struct dri2_egl_surface *dri2_surf)
2387 {
2388    /* we need to do the following operations only once per frame */
2389    if (dri2_surf->back)
2390       return;
2391 
2392    if (dri2_surf->wl_win &&
2393        (dri2_surf->base.Width != dri2_surf->wl_win->width ||
2394         dri2_surf->base.Height != dri2_surf->wl_win->height)) {
2395 
2396       dri2_surf->base.Width = dri2_surf->wl_win->width;
2397       dri2_surf->base.Height = dri2_surf->wl_win->height;
2398       dri2_surf->dx = dri2_surf->wl_win->dx;
2399       dri2_surf->dy = dri2_surf->wl_win->dy;
2400       dri2_surf->current = NULL;
2401    }
2402 }
2403 
2404 static int
swrast_update_buffers(struct dri2_egl_surface * dri2_surf)2405 swrast_update_buffers(struct dri2_egl_surface *dri2_surf)
2406 {
2407    struct dri2_egl_display *dri2_dpy =
2408       dri2_egl_display(dri2_surf->base.Resource.Display);
2409 
2410    /* we need to do the following operations only once per frame */
2411    if (dri2_surf->back)
2412       return 0;
2413 
2414    if (dri2_surf->wl_win &&
2415        (dri2_surf->base.Width != dri2_surf->wl_win->width ||
2416         dri2_surf->base.Height != dri2_surf->wl_win->height)) {
2417 
2418       dri2_wl_release_buffers(dri2_surf);
2419 
2420       dri2_surf->base.Width = dri2_surf->wl_win->width;
2421       dri2_surf->base.Height = dri2_surf->wl_win->height;
2422       dri2_surf->dx = dri2_surf->wl_win->dx;
2423       dri2_surf->dy = dri2_surf->wl_win->dy;
2424       dri2_surf->current = NULL;
2425    }
2426 
2427    /* find back buffer */
2428    /* There might be a buffer release already queued that wasn't processed */
2429    wl_display_dispatch_queue_pending(dri2_dpy->wl_dpy, dri2_surf->wl_queue);
2430 
2431    /* else choose any another free location */
2432    while (!dri2_surf->back) {
2433       for (int i = 0; i < ARRAY_SIZE(dri2_surf->color_buffers); i++) {
2434          if (!dri2_surf->color_buffers[i].locked) {
2435             dri2_surf->back = &dri2_surf->color_buffers[i];
2436             if (dri2_surf->back->wl_buffer)
2437                break;
2438 
2439             if (!dri2_wl_swrast_allocate_buffer(
2440                    dri2_surf, dri2_surf->format, dri2_surf->base.Width,
2441                    dri2_surf->base.Height, &dri2_surf->back->data,
2442                    &dri2_surf->back->data_size, &dri2_surf->back->wl_buffer)) {
2443                _eglError(EGL_BAD_ALLOC, "failed to allocate color buffer");
2444                return -1;
2445             }
2446             wl_buffer_add_listener(dri2_surf->back->wl_buffer,
2447                                    &wl_buffer_listener, dri2_surf);
2448             break;
2449          }
2450       }
2451 
2452       /* wait for the compositor to release a buffer */
2453       if (!dri2_surf->back) {
2454          if (loader_wayland_dispatch(dri2_dpy->wl_dpy, dri2_surf->wl_queue, NULL) ==
2455              -1) {
2456             _eglError(EGL_BAD_ALLOC, "waiting for a free buffer failed");
2457             return -1;
2458          }
2459       }
2460    }
2461 
2462    dri2_surf->back->locked = true;
2463 
2464    /* If we have an extra unlocked buffer at this point, we had to do triple
2465     * buffering for a while, but now can go back to just double buffering.
2466     * That means we can free any unlocked buffer now. To avoid toggling between
2467     * going back to double buffering and needing to allocate another buffer too
2468     * fast we let the unneeded buffer sit around for a short while. */
2469    for (int i = 0; i < ARRAY_SIZE(dri2_surf->color_buffers); i++) {
2470       if (!dri2_surf->color_buffers[i].locked &&
2471           dri2_surf->color_buffers[i].wl_buffer &&
2472           dri2_surf->color_buffers[i].age > BUFFER_TRIM_AGE_HYSTERESIS) {
2473          wl_buffer_destroy(dri2_surf->color_buffers[i].wl_buffer);
2474          munmap(dri2_surf->color_buffers[i].data,
2475                 dri2_surf->color_buffers[i].data_size);
2476          dri2_surf->color_buffers[i].wl_buffer = NULL;
2477          dri2_surf->color_buffers[i].data = NULL;
2478          dri2_surf->color_buffers[i].age = 0;
2479       }
2480    }
2481 
2482    return 0;
2483 }
2484 
2485 static void *
dri2_wl_swrast_get_frontbuffer_data(struct dri2_egl_surface * dri2_surf)2486 dri2_wl_swrast_get_frontbuffer_data(struct dri2_egl_surface *dri2_surf)
2487 {
2488    /* if there has been a resize: */
2489    if (!dri2_surf->current)
2490       return NULL;
2491 
2492    return dri2_surf->current->data;
2493 }
2494 
2495 static void *
dri2_wl_swrast_get_backbuffer_data(struct dri2_egl_surface * dri2_surf)2496 dri2_wl_swrast_get_backbuffer_data(struct dri2_egl_surface *dri2_surf)
2497 {
2498    assert(dri2_surf->back);
2499    return dri2_surf->back->data;
2500 }
2501 
2502 static EGLBoolean
dri2_wl_surface_throttle(struct dri2_egl_surface * dri2_surf)2503 dri2_wl_surface_throttle(struct dri2_egl_surface *dri2_surf)
2504 {
2505    struct dri2_egl_display *dri2_dpy =
2506       dri2_egl_display(dri2_surf->base.Resource.Display);
2507 
2508    while (dri2_surf->throttle_callback != NULL)
2509       if (loader_wayland_dispatch(dri2_dpy->wl_dpy, dri2_surf->wl_queue, NULL) ==
2510           -1)
2511          return EGL_FALSE;
2512 
2513    if (dri2_surf->base.SwapInterval > 0) {
2514       dri2_surf->throttle_callback =
2515          wl_surface_frame(dri2_surf->wl_surface_wrapper);
2516       wl_callback_add_listener(dri2_surf->throttle_callback, &throttle_listener,
2517                                dri2_surf);
2518    }
2519 
2520    return EGL_TRUE;
2521 }
2522 
2523 static void
dri2_wl_swrast_commit_backbuffer(struct dri2_egl_surface * dri2_surf)2524 dri2_wl_swrast_commit_backbuffer(struct dri2_egl_surface *dri2_surf)
2525 {
2526    struct dri2_egl_display *dri2_dpy =
2527       dri2_egl_display(dri2_surf->base.Resource.Display);
2528 
2529    dri2_surf->wl_win->attached_width = dri2_surf->base.Width;
2530    dri2_surf->wl_win->attached_height = dri2_surf->base.Height;
2531    /* reset resize growing parameters */
2532    dri2_surf->dx = 0;
2533    dri2_surf->dy = 0;
2534 
2535    wl_surface_commit(dri2_surf->wl_surface_wrapper);
2536 
2537    /* If we're not waiting for a frame callback then we'll at least throttle
2538     * to a sync callback so that we always give a chance for the compositor to
2539     * handle the commit and send a release event before checking for a free
2540     * buffer */
2541    if (dri2_surf->throttle_callback == NULL) {
2542       dri2_surf->throttle_callback = wl_display_sync(dri2_surf->wl_dpy_wrapper);
2543       wl_callback_add_listener(dri2_surf->throttle_callback, &throttle_listener,
2544                                dri2_surf);
2545    }
2546 
2547    wl_display_flush(dri2_dpy->wl_dpy);
2548 }
2549 
2550 static void
dri2_wl_kopper_get_drawable_info(__DRIdrawable * draw,int * x,int * y,int * w,int * h,void * loaderPrivate)2551 dri2_wl_kopper_get_drawable_info(__DRIdrawable *draw, int *x, int *y, int *w,
2552                                  int *h, void *loaderPrivate)
2553 {
2554    struct dri2_egl_surface *dri2_surf = loaderPrivate;
2555 
2556    kopper_update_buffers(dri2_surf);
2557    *x = 0;
2558    *y = 0;
2559    *w = dri2_surf->base.Width;
2560    *h = dri2_surf->base.Height;
2561 }
2562 
2563 static void
dri2_wl_swrast_get_drawable_info(__DRIdrawable * draw,int * x,int * y,int * w,int * h,void * loaderPrivate)2564 dri2_wl_swrast_get_drawable_info(__DRIdrawable *draw, int *x, int *y, int *w,
2565                                  int *h, void *loaderPrivate)
2566 {
2567    struct dri2_egl_surface *dri2_surf = loaderPrivate;
2568 
2569    (void)swrast_update_buffers(dri2_surf);
2570    *x = 0;
2571    *y = 0;
2572    *w = dri2_surf->base.Width;
2573    *h = dri2_surf->base.Height;
2574 }
2575 
2576 static void
dri2_wl_swrast_get_image(__DRIdrawable * read,int x,int y,int w,int h,char * data,void * loaderPrivate)2577 dri2_wl_swrast_get_image(__DRIdrawable *read, int x, int y, int w, int h,
2578                          char *data, void *loaderPrivate)
2579 {
2580    struct dri2_egl_surface *dri2_surf = loaderPrivate;
2581    int copy_width = dri2_wl_swrast_get_stride_for_format(dri2_surf->format, w);
2582    int x_offset = dri2_wl_swrast_get_stride_for_format(dri2_surf->format, x);
2583    int src_stride = dri2_wl_swrast_get_stride_for_format(dri2_surf->format,
2584                                                          dri2_surf->base.Width);
2585    int dst_stride = copy_width;
2586    char *src, *dst;
2587 
2588    src = dri2_wl_swrast_get_frontbuffer_data(dri2_surf);
2589    /* this is already the most up-to-date buffer */
2590    if (src == data)
2591       return;
2592    if (!src) {
2593       memset(data, 0, copy_width * h);
2594       return;
2595    }
2596 
2597    assert(copy_width <= src_stride);
2598 
2599    src += x_offset;
2600    src += y * src_stride;
2601    dst = data;
2602 
2603    if (copy_width > src_stride - x_offset)
2604       copy_width = src_stride - x_offset;
2605    if (h > dri2_surf->base.Height - y)
2606       h = dri2_surf->base.Height - y;
2607 
2608    for (; h > 0; h--) {
2609       memcpy(dst, src, copy_width);
2610       src += src_stride;
2611       dst += dst_stride;
2612    }
2613 }
2614 
2615 static void
dri2_wl_swrast_put_image2(__DRIdrawable * draw,int op,int x,int y,int w,int h,int stride,char * data,void * loaderPrivate)2616 dri2_wl_swrast_put_image2(__DRIdrawable *draw, int op, int x, int y, int w,
2617                           int h, int stride, char *data, void *loaderPrivate)
2618 {
2619    struct dri2_egl_surface *dri2_surf = loaderPrivate;
2620    /* clamp to surface size */
2621    w = MIN2(w, dri2_surf->base.Width);
2622    h = MIN2(h, dri2_surf->base.Height);
2623    int copy_width = dri2_wl_swrast_get_stride_for_format(dri2_surf->format, w);
2624    int dst_stride = dri2_wl_swrast_get_stride_for_format(dri2_surf->format,
2625                                                          dri2_surf->base.Width);
2626    int x_offset = dri2_wl_swrast_get_stride_for_format(dri2_surf->format, x);
2627    char *src, *dst;
2628 
2629    assert(copy_width <= stride);
2630 
2631    dst = dri2_wl_swrast_get_backbuffer_data(dri2_surf);
2632 
2633    dst += x_offset;
2634    dst += y * dst_stride;
2635 
2636    src = data;
2637 
2638    /* drivers expect we do these checks (and some rely on it) */
2639    if (copy_width > dst_stride - x_offset)
2640       copy_width = dst_stride - x_offset;
2641    if (h > dri2_surf->base.Height - y)
2642       h = dri2_surf->base.Height - y;
2643 
2644    for (; h > 0; h--) {
2645       memcpy(dst, src, copy_width);
2646       src += stride;
2647       dst += dst_stride;
2648    }
2649 }
2650 
2651 static void
dri2_wl_swrast_put_image(__DRIdrawable * draw,int op,int x,int y,int w,int h,char * data,void * loaderPrivate)2652 dri2_wl_swrast_put_image(__DRIdrawable *draw, int op, int x, int y, int w,
2653                          int h, char *data, void *loaderPrivate)
2654 {
2655    struct dri2_egl_surface *dri2_surf = loaderPrivate;
2656    int stride;
2657 
2658    stride = dri2_wl_swrast_get_stride_for_format(dri2_surf->format, w);
2659    dri2_wl_swrast_put_image2(draw, op, x, y, w, h, stride, data, loaderPrivate);
2660 }
2661 
2662 static EGLBoolean
dri2_wl_kopper_swap_buffers_with_damage(_EGLDisplay * disp,_EGLSurface * draw,const EGLint * rects,EGLint n_rects)2663 dri2_wl_kopper_swap_buffers_with_damage(_EGLDisplay *disp, _EGLSurface *draw,
2664                                         const EGLint *rects, EGLint n_rects)
2665 {
2666    struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
2667    struct dri2_egl_surface *dri2_surf = dri2_egl_surface(draw);
2668 
2669    if (!dri2_surf->wl_win)
2670       return _eglError(EGL_BAD_NATIVE_WINDOW, "dri2_swap_buffers");
2671 
2672    if (!dri2_wl_surface_throttle(dri2_surf))
2673       return EGL_FALSE;
2674 
2675    if (n_rects) {
2676       if (dri2_dpy->kopper)
2677          kopperSwapBuffersWithDamage(dri2_surf->dri_drawable, __DRI2_FLUSH_INVALIDATE_ANCILLARY, n_rects, rects);
2678       else
2679          driSwapBuffersWithDamage(dri2_surf->dri_drawable, n_rects, rects);
2680    } else {
2681       if (dri2_dpy->kopper)
2682          kopperSwapBuffers(dri2_surf->dri_drawable, __DRI2_FLUSH_INVALIDATE_ANCILLARY);
2683       else
2684          driSwapBuffers(dri2_surf->dri_drawable);
2685    }
2686 
2687    dri2_surf->current = dri2_surf->back;
2688    dri2_surf->back = NULL;
2689 
2690    return EGL_TRUE;
2691 }
2692 
2693 static EGLBoolean
dri2_wl_kopper_swap_buffers(_EGLDisplay * disp,_EGLSurface * draw)2694 dri2_wl_kopper_swap_buffers(_EGLDisplay *disp, _EGLSurface *draw)
2695 {
2696    dri2_wl_kopper_swap_buffers_with_damage(disp, draw, NULL, 0);
2697    return EGL_TRUE;
2698 }
2699 
2700 static EGLBoolean
dri2_wl_swrast_swap_buffers_with_damage(_EGLDisplay * disp,_EGLSurface * draw,const EGLint * rects,EGLint n_rects)2701 dri2_wl_swrast_swap_buffers_with_damage(_EGLDisplay *disp, _EGLSurface *draw,
2702                                         const EGLint *rects, EGLint n_rects)
2703 {
2704    struct dri2_egl_surface *dri2_surf = dri2_egl_surface(draw);
2705 
2706    if (!dri2_surf->wl_win)
2707       return _eglError(EGL_BAD_NATIVE_WINDOW, "dri2_swap_buffers");
2708 
2709    (void)swrast_update_buffers(dri2_surf);
2710 
2711    if (dri2_wl_surface_throttle(dri2_surf))
2712       wl_surface_attach(dri2_surf->wl_surface_wrapper,
2713          /* 'back' here will be promoted to 'current' */
2714          dri2_surf->back->wl_buffer, dri2_surf->dx,
2715          dri2_surf->dy);
2716 
2717    /* If the compositor doesn't support damage_buffer, we deliberately
2718     * ignore the damage region and post maximum damage, due to
2719     * https://bugs.freedesktop.org/78190 */
2720    if (!n_rects || !try_damage_buffer(dri2_surf, rects, n_rects))
2721       wl_surface_damage(dri2_surf->wl_surface_wrapper, 0, 0, INT32_MAX,
2722                         INT32_MAX);
2723 
2724    /* guarantee full copy for partial update */
2725    int w = n_rects == 1 ? (rects[2] - rects[0]) : 0;
2726    int copy_width = dri2_wl_swrast_get_stride_for_format(dri2_surf->format, w);
2727    int dst_stride = dri2_wl_swrast_get_stride_for_format(dri2_surf->format,
2728                                                          dri2_surf->base.Width);
2729    char *dst = dri2_wl_swrast_get_backbuffer_data(dri2_surf);
2730 
2731    /* partial copy, copy old content */
2732    if (copy_width < dst_stride)
2733       dri2_wl_swrast_get_image(NULL, 0, 0, dri2_surf->base.Width,
2734                                  dri2_surf->base.Height, dst, dri2_surf);
2735 
2736    if (n_rects)
2737       driSwapBuffersWithDamage(dri2_surf->dri_drawable, n_rects, rects);
2738    else
2739       driSwapBuffers(dri2_surf->dri_drawable);
2740 
2741    dri2_surf->current = dri2_surf->back;
2742    dri2_surf->back = NULL;
2743 
2744    dri2_wl_swrast_commit_backbuffer(dri2_surf);
2745    return EGL_TRUE;
2746 }
2747 
2748 static EGLBoolean
dri2_wl_swrast_swap_buffers(_EGLDisplay * disp,_EGLSurface * draw)2749 dri2_wl_swrast_swap_buffers(_EGLDisplay *disp, _EGLSurface *draw)
2750 {
2751    dri2_wl_swrast_swap_buffers_with_damage(disp, draw, NULL, 0);
2752    return EGL_TRUE;
2753 }
2754 
2755 static EGLint
dri2_wl_kopper_query_buffer_age(_EGLDisplay * disp,_EGLSurface * surface)2756 dri2_wl_kopper_query_buffer_age(_EGLDisplay *disp, _EGLSurface *surface)
2757 {
2758    struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
2759    struct dri2_egl_surface *dri2_surf = dri2_egl_surface(surface);
2760 
2761    /* This can legitimately be null for lavapipe */
2762    if (dri2_dpy->kopper)
2763       return kopperQueryBufferAge(dri2_surf->dri_drawable);
2764    else
2765       return driSWRastQueryBufferAge(dri2_surf->dri_drawable);
2766    return 0;
2767 }
2768 
2769 static EGLint
dri2_wl_swrast_query_buffer_age(_EGLDisplay * disp,_EGLSurface * surface)2770 dri2_wl_swrast_query_buffer_age(_EGLDisplay *disp, _EGLSurface *surface)
2771 {
2772    struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
2773    struct dri2_egl_surface *dri2_surf = dri2_egl_surface(surface);
2774 
2775    assert(dri2_dpy->swrast);
2776    return driSWRastQueryBufferAge(dri2_surf->dri_drawable);
2777 }
2778 
2779 static void
shm_handle_format(void * data,struct wl_shm * shm,uint32_t format)2780 shm_handle_format(void *data, struct wl_shm *shm, uint32_t format)
2781 {
2782    struct dri2_egl_display *dri2_dpy = data;
2783    int visual_idx = dri2_wl_visual_idx_from_shm_format(format);
2784 
2785    if (visual_idx == -1)
2786       return;
2787 
2788    BITSET_SET(dri2_dpy->formats.formats_bitmap, visual_idx);
2789 }
2790 
2791 static const struct wl_shm_listener shm_listener = {
2792    .format = shm_handle_format,
2793 };
2794 
2795 static void
registry_handle_global_kopper(void * data,struct wl_registry * registry,uint32_t name,const char * interface,uint32_t version)2796 registry_handle_global_kopper(void *data, struct wl_registry *registry,
2797                               uint32_t name, const char *interface,
2798                               uint32_t version)
2799 {
2800    struct dri2_egl_display *dri2_dpy = data;
2801 
2802    if (strcmp(interface, wl_shm_interface.name) == 0) {
2803       dri2_dpy->wl_shm = wl_registry_bind(registry, name, &wl_shm_interface, 1);
2804       wl_shm_add_listener(dri2_dpy->wl_shm, &shm_listener, dri2_dpy);
2805    }
2806    if (strcmp(interface, wl_drm_interface.name) == 0) {
2807       dri2_dpy->wl_drm_version = MIN2(version, 2);
2808       dri2_dpy->wl_drm_name = name;
2809    } else if (strcmp(interface, zwp_linux_dmabuf_v1_interface.name) == 0 &&
2810                version >= 3) {
2811       dri2_dpy->wl_dmabuf = wl_registry_bind(
2812          registry, name, &zwp_linux_dmabuf_v1_interface,
2813          MIN2(version,
2814                ZWP_LINUX_DMABUF_V1_GET_DEFAULT_FEEDBACK_SINCE_VERSION));
2815       zwp_linux_dmabuf_v1_add_listener(dri2_dpy->wl_dmabuf, &dmabuf_listener,
2816                                        dri2_dpy);
2817    }
2818 }
2819 
2820 static const struct wl_registry_listener registry_listener_kopper = {
2821    .global = registry_handle_global_kopper,
2822    .global_remove = registry_handle_global_remove,
2823 };
2824 
2825 static void
registry_handle_global_swrast(void * data,struct wl_registry * registry,uint32_t name,const char * interface,uint32_t version)2826 registry_handle_global_swrast(void *data, struct wl_registry *registry,
2827                               uint32_t name, const char *interface,
2828                               uint32_t version)
2829 {
2830    struct dri2_egl_display *dri2_dpy = data;
2831 
2832    if (strcmp(interface, wl_shm_interface.name) == 0) {
2833       dri2_dpy->wl_shm = wl_registry_bind(registry, name, &wl_shm_interface, 1);
2834       wl_shm_add_listener(dri2_dpy->wl_shm, &shm_listener, dri2_dpy);
2835    }
2836 }
2837 
2838 static const struct wl_registry_listener registry_listener_swrast = {
2839    .global = registry_handle_global_swrast,
2840    .global_remove = registry_handle_global_remove,
2841 };
2842 
2843 static const struct dri2_egl_display_vtbl dri2_wl_swrast_display_vtbl = {
2844    .authenticate = NULL,
2845    .create_window_surface = dri2_wl_create_window_surface,
2846    .create_pixmap_surface = dri2_wl_create_pixmap_surface,
2847    .destroy_surface = dri2_wl_destroy_surface,
2848    .swap_interval = dri2_wl_swap_interval,
2849    .create_image = dri2_create_image_khr,
2850    .swap_buffers = dri2_wl_swrast_swap_buffers,
2851    .swap_buffers_with_damage = dri2_wl_swrast_swap_buffers_with_damage,
2852    .get_dri_drawable = dri2_surface_get_dri_drawable,
2853    .query_buffer_age = dri2_wl_swrast_query_buffer_age,
2854 };
2855 
2856 static const struct dri2_egl_display_vtbl dri2_wl_kopper_display_vtbl = {
2857    .authenticate = NULL,
2858    .create_window_surface = dri2_wl_create_window_surface,
2859    .create_pixmap_surface = dri2_wl_create_pixmap_surface,
2860    .destroy_surface = dri2_wl_destroy_surface,
2861    .create_image = dri2_create_image_khr,
2862    .swap_buffers = dri2_wl_kopper_swap_buffers,
2863    .swap_buffers_with_damage = dri2_wl_kopper_swap_buffers_with_damage,
2864    .get_dri_drawable = dri2_surface_get_dri_drawable,
2865    .query_buffer_age = dri2_wl_kopper_query_buffer_age,
2866 };
2867 
2868 static const __DRIswrastLoaderExtension swrast_loader_extension = {
2869    .base = {__DRI_SWRAST_LOADER, 2},
2870 
2871    .getDrawableInfo = dri2_wl_swrast_get_drawable_info,
2872    .putImage = dri2_wl_swrast_put_image,
2873    .getImage = dri2_wl_swrast_get_image,
2874    .putImage2 = dri2_wl_swrast_put_image2,
2875 };
2876 
2877 static const __DRIswrastLoaderExtension kopper_swrast_loader_extension = {
2878    .base = {__DRI_SWRAST_LOADER, 2},
2879 
2880    .getDrawableInfo = dri2_wl_kopper_get_drawable_info,
2881    .putImage = dri2_wl_swrast_put_image,
2882    .getImage = dri2_wl_swrast_get_image,
2883    .putImage2 = dri2_wl_swrast_put_image2,
2884 };
2885 
2886 static_assert(sizeof(struct kopper_vk_surface_create_storage) >=
2887                  sizeof(VkWaylandSurfaceCreateInfoKHR),
2888               "");
2889 
2890 static void
kopperSetSurfaceCreateInfo(void * _draw,struct kopper_loader_info * out)2891 kopperSetSurfaceCreateInfo(void *_draw, struct kopper_loader_info *out)
2892 {
2893    struct dri2_egl_surface *dri2_surf = _draw;
2894    struct dri2_egl_display *dri2_dpy =
2895       dri2_egl_display(dri2_surf->base.Resource.Display);
2896    VkWaylandSurfaceCreateInfoKHR *wlsci =
2897       (VkWaylandSurfaceCreateInfoKHR *)&out->bos;
2898 
2899    wlsci->sType = VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR;
2900    wlsci->pNext = NULL;
2901    wlsci->flags = 0;
2902    wlsci->display = dri2_dpy->wl_dpy;
2903    wlsci->surface = dri2_surf->wl_surface_wrapper;
2904    out->present_opaque = dri2_surf->base.PresentOpaque;
2905 }
2906 
2907 static const __DRIkopperLoaderExtension kopper_loader_extension = {
2908    .base = {__DRI_KOPPER_LOADER, 1},
2909 
2910    .SetSurfaceCreateInfo = kopperSetSurfaceCreateInfo,
2911 };
2912 static const __DRIextension *swrast_loader_extensions[] = {
2913    &swrast_loader_extension.base,
2914    &image_lookup_extension.base,
2915    NULL,
2916 };
2917 static const __DRIextension *kopper_swrast_loader_extensions[] = {
2918    &kopper_swrast_loader_extension.base,
2919    &image_lookup_extension.base,
2920    &kopper_loader_extension.base,
2921    &use_invalidate.base,
2922    NULL,
2923 };
2924 
2925 static EGLBoolean
dri2_initialize_wayland_swrast(_EGLDisplay * disp)2926 dri2_initialize_wayland_swrast(_EGLDisplay *disp)
2927 {
2928    struct dri2_egl_display *dri2_dpy = dri2_display_create();
2929    if (!dri2_dpy)
2930       return EGL_FALSE;
2931 
2932    disp->DriverData = (void *)dri2_dpy;
2933 
2934    if (dri2_wl_formats_init(&dri2_dpy->formats) < 0)
2935       goto cleanup;
2936 
2937    if (disp->PlatformDisplay == NULL) {
2938       dri2_dpy->wl_dpy = wl_display_connect(NULL);
2939       if (dri2_dpy->wl_dpy == NULL)
2940          goto cleanup;
2941       dri2_dpy->own_device = true;
2942    } else {
2943       dri2_dpy->wl_dpy = disp->PlatformDisplay;
2944    }
2945 
2946    dri2_dpy->wl_queue = wl_display_create_queue_with_name(dri2_dpy->wl_dpy,
2947                                                           "mesa egl swrast display queue");
2948 
2949    dri2_dpy->wl_dpy_wrapper = wl_proxy_create_wrapper(dri2_dpy->wl_dpy);
2950    if (dri2_dpy->wl_dpy_wrapper == NULL)
2951       goto cleanup;
2952 
2953    wl_proxy_set_queue((struct wl_proxy *)dri2_dpy->wl_dpy_wrapper,
2954                       dri2_dpy->wl_queue);
2955 
2956    if (dri2_dpy->own_device)
2957       wl_display_dispatch_pending(dri2_dpy->wl_dpy);
2958 
2959    dri2_dpy->wl_registry = wl_display_get_registry(dri2_dpy->wl_dpy_wrapper);
2960    if (disp->Options.Zink)
2961       wl_registry_add_listener(dri2_dpy->wl_registry, &registry_listener_kopper,
2962                               dri2_dpy);
2963    else
2964       wl_registry_add_listener(dri2_dpy->wl_registry, &registry_listener_swrast,
2965                               dri2_dpy);
2966 
2967    if (roundtrip(dri2_dpy) < 0 || dri2_dpy->wl_shm == NULL)
2968       goto cleanup;
2969 
2970    if (roundtrip(dri2_dpy) < 0 ||
2971        !BITSET_TEST_RANGE(dri2_dpy->formats.formats_bitmap, 0,
2972                           dri2_dpy->formats.num_formats))
2973       goto cleanup;
2974 
2975    if (disp->Options.Zink) {
2976       if (!dri2_initialize_wayland_drm_extensions(dri2_dpy) && !disp->Options.ForceSoftware)
2977          goto cleanup;
2978 
2979       if (!disp->Options.ForceSoftware) {
2980          loader_get_user_preferred_fd(&dri2_dpy->fd_render_gpu,
2981                                        &dri2_dpy->fd_display_gpu);
2982 
2983          if (dri2_dpy->fd_render_gpu != dri2_dpy->fd_display_gpu) {
2984             free(dri2_dpy->device_name);
2985             dri2_dpy->device_name =
2986                loader_get_device_name_for_fd(dri2_dpy->fd_render_gpu);
2987             if (!dri2_dpy->device_name) {
2988                _eglError(EGL_BAD_ALLOC, "wayland-egl: failed to get device name "
2989                                           "for requested GPU");
2990                goto cleanup;
2991             }
2992          }
2993 
2994          /* we have to do the check now, because loader_get_user_preferred_fd
2995             * will return a render-node when the requested gpu is different
2996             * to the server, but also if the client asks for the same gpu than
2997             * the server by requesting its pci-id */
2998          dri2_dpy->is_render_node =
2999             drmGetNodeTypeFromFd(dri2_dpy->fd_render_gpu) == DRM_NODE_RENDER;
3000       }
3001    }
3002 
3003    dri2_dpy->driver_name = strdup(disp->Options.Zink ? "zink" : "swrast");
3004    if (!dri2_load_driver(disp))
3005       goto cleanup;
3006 
3007    dri2_dpy->loader_extensions = disp->Options.Zink ? kopper_swrast_loader_extensions : swrast_loader_extensions;
3008 
3009    if (!dri2_create_screen(disp))
3010       goto cleanup;
3011 
3012    if (!dri2_setup_device(disp, disp->Options.ForceSoftware)) {
3013       _eglError(EGL_NOT_INITIALIZED, "DRI2: failed to setup EGLDevice");
3014       goto cleanup;
3015    }
3016 
3017    dri2_setup_screen(disp);
3018 
3019    dri2_wl_setup_swap_interval(disp);
3020 
3021    dri2_wl_add_configs_for_visuals(disp);
3022 
3023    if (disp->Options.Zink && dri2_dpy->fd_render_gpu >= 0 &&
3024        (dri2_dpy->wl_dmabuf || dri2_dpy->wl_drm))
3025       dri2_set_WL_bind_wayland_display(disp);
3026    disp->Extensions.EXT_buffer_age = EGL_TRUE;
3027    disp->Extensions.EXT_swap_buffers_with_damage = EGL_TRUE;
3028    disp->Extensions.EXT_present_opaque = EGL_TRUE;
3029 
3030    /* Fill vtbl last to prevent accidentally calling virtual function during
3031     * initialization.
3032     */
3033    dri2_dpy->vtbl = disp->Options.Zink ? &dri2_wl_kopper_display_vtbl : &dri2_wl_swrast_display_vtbl;
3034 
3035    return EGL_TRUE;
3036 
3037 cleanup:
3038    dri2_display_destroy(disp);
3039    return EGL_FALSE;
3040 }
3041 
3042 EGLBoolean
dri2_initialize_wayland(_EGLDisplay * disp)3043 dri2_initialize_wayland(_EGLDisplay *disp)
3044 {
3045    if (disp->Options.ForceSoftware || disp->Options.Zink)
3046       return dri2_initialize_wayland_swrast(disp);
3047    else
3048       return dri2_initialize_wayland_drm(disp);
3049 }
3050 
3051 void
dri2_teardown_wayland(struct dri2_egl_display * dri2_dpy)3052 dri2_teardown_wayland(struct dri2_egl_display *dri2_dpy)
3053 {
3054    dri2_wl_formats_fini(&dri2_dpy->formats);
3055    if (dri2_dpy->wl_drm)
3056       wl_drm_destroy(dri2_dpy->wl_drm);
3057    if (dri2_dpy->wl_dmabuf)
3058       zwp_linux_dmabuf_v1_destroy(dri2_dpy->wl_dmabuf);
3059    if (dri2_dpy->wl_shm)
3060       wl_shm_destroy(dri2_dpy->wl_shm);
3061    if (dri2_dpy->wl_registry)
3062       wl_registry_destroy(dri2_dpy->wl_registry);
3063    if (dri2_dpy->wl_dpy_wrapper)
3064       wl_proxy_wrapper_destroy(dri2_dpy->wl_dpy_wrapper);
3065    if (dri2_dpy->wl_queue)
3066       wl_event_queue_destroy(dri2_dpy->wl_queue);
3067 
3068    if (dri2_dpy->own_device)
3069       wl_display_disconnect(dri2_dpy->wl_dpy);
3070 }
3071