xref: /aosp_15_r20/external/mesa3d/src/gallium/frontends/dri/loader_dri3_helper.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2013 Keith Packard
3  * Copyright © 2015 Boyan Ding
4  *
5  * Permission to use, copy, modify, distribute, and sell this software and its
6  * documentation for any purpose is hereby granted without fee, provided that
7  * the above copyright notice appear in all copies and that both that copyright
8  * notice and this permission notice appear in supporting documentation, and
9  * that the name of the copyright holders not be used in advertising or
10  * publicity pertaining to distribution of the software without specific,
11  * written prior permission.  The copyright holders make no representations
12  * about the suitability of this software for any purpose.  It is provided "as
13  * is" without express or implied warranty.
14  *
15  * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
16  * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
17  * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
18  * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
19  * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
20  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
21  * OF THIS SOFTWARE.
22  */
23 
24 #include <fcntl.h>
25 #include <stdlib.h>
26 #include <unistd.h>
27 #include <string.h>
28 
29 #include <X11/xshmfence.h>
30 #include <xcb/xcb.h>
31 #include <xcb/dri3.h>
32 #include <xcb/present.h>
33 #include <xcb/xfixes.h>
34 
35 #include <X11/Xlib-xcb.h>
36 
37 #include "loader_dri_helper.h"
38 #include "loader_dri3_helper.h"
39 #include "util/macros.h"
40 #include "util/simple_mtx.h"
41 #include "drm-uapi/drm_fourcc.h"
42 #include "dri_util.h"
43 
44 /**
45  * A cached blit context.
46  */
47 struct loader_dri3_blit_context {
48    simple_mtx_t mtx;
49    __DRIcontext *ctx;
50    __DRIscreen *cur_screen;
51    const __DRIcoreExtension *core;
52 };
53 
54 /* For simplicity we maintain the cache only for a single screen at a time */
55 static struct loader_dri3_blit_context blit_context = {
56    SIMPLE_MTX_INITIALIZER, NULL
57 };
58 
59 static void
60 dri3_flush_present_events(struct loader_dri3_drawable *draw);
61 
62 static struct loader_dri3_buffer *
63 dri3_find_back_alloc(struct loader_dri3_drawable *draw);
64 
65 static xcb_screen_t *
get_screen_for_root(xcb_connection_t * conn,xcb_window_t root)66 get_screen_for_root(xcb_connection_t *conn, xcb_window_t root)
67 {
68    xcb_screen_iterator_t screen_iter =
69    xcb_setup_roots_iterator(xcb_get_setup(conn));
70 
71    for (; screen_iter.rem; xcb_screen_next (&screen_iter)) {
72       if (screen_iter.data->root == root)
73          return screen_iter.data;
74    }
75 
76    return NULL;
77 }
78 
79 static xcb_visualtype_t *
get_xcb_visualtype_for_depth(struct loader_dri3_drawable * draw,int depth)80 get_xcb_visualtype_for_depth(struct loader_dri3_drawable *draw, int depth)
81 {
82    xcb_visualtype_iterator_t visual_iter;
83    xcb_screen_t *screen = draw->screen;
84    xcb_depth_iterator_t depth_iter;
85 
86    if (!screen)
87       return NULL;
88 
89    depth_iter = xcb_screen_allowed_depths_iterator(screen);
90    for (; depth_iter.rem; xcb_depth_next(&depth_iter)) {
91       if (depth_iter.data->depth != depth)
92          continue;
93 
94       visual_iter = xcb_depth_visuals_iterator(depth_iter.data);
95       if (visual_iter.rem)
96          return visual_iter.data;
97    }
98 
99    return NULL;
100 }
101 
102 /* Sets the adaptive sync window property state. */
103 static void
set_adaptive_sync_property(xcb_connection_t * conn,xcb_drawable_t drawable,uint32_t state)104 set_adaptive_sync_property(xcb_connection_t *conn, xcb_drawable_t drawable,
105                            uint32_t state)
106 {
107    static char const name[] = "_VARIABLE_REFRESH";
108    xcb_intern_atom_cookie_t cookie;
109    xcb_intern_atom_reply_t* reply;
110    xcb_void_cookie_t check;
111 
112    cookie = xcb_intern_atom(conn, 0, strlen(name), name);
113    reply = xcb_intern_atom_reply(conn, cookie, NULL);
114    if (reply == NULL)
115       return;
116 
117    if (state)
118       check = xcb_change_property_checked(conn, XCB_PROP_MODE_REPLACE,
119                                           drawable, reply->atom,
120                                           XCB_ATOM_CARDINAL, 32, 1, &state);
121    else
122       check = xcb_delete_property_checked(conn, drawable, reply->atom);
123 
124    xcb_discard_reply(conn, check.sequence);
125    free(reply);
126 }
127 
128 /* Get red channel mask for given drawable at given depth. */
129 static unsigned int
dri3_get_red_mask_for_depth(struct loader_dri3_drawable * draw,int depth)130 dri3_get_red_mask_for_depth(struct loader_dri3_drawable *draw, int depth)
131 {
132    xcb_visualtype_t *visual = get_xcb_visualtype_for_depth(draw, depth);
133 
134    if (visual)
135       return visual->red_mask;
136 
137    return 0;
138 }
139 
140 /**
141  * Get and lock (for use with the current thread) a dri context associated
142  * with the drawable's dri screen. The context is intended to be used with
143  * the dri image extension's blitImage method.
144  *
145  * \param draw[in]  Pointer to the drawable whose dri screen we want a
146  * dri context for.
147  * \return A dri context or NULL if context creation failed.
148  *
149  * When the caller is done with the context (even if the context returned was
150  * NULL), the caller must call loader_dri3_blit_context_put.
151  */
152 static __DRIcontext *
loader_dri3_blit_context_get(struct loader_dri3_drawable * draw)153 loader_dri3_blit_context_get(struct loader_dri3_drawable *draw)
154 {
155    simple_mtx_lock(&blit_context.mtx);
156 
157    if (blit_context.ctx && blit_context.cur_screen != draw->dri_screen_render_gpu) {
158       driDestroyContext(blit_context.ctx);
159       blit_context.ctx = NULL;
160    }
161 
162    if (!blit_context.ctx) {
163       blit_context.ctx = driCreateNewContext(draw->dri_screen_render_gpu,
164                                                            NULL, NULL, NULL);
165       blit_context.cur_screen = draw->dri_screen_render_gpu;
166    }
167 
168    return blit_context.ctx;
169 }
170 
171 /**
172  * Release (for use with other threads) a dri context previously obtained using
173  * loader_dri3_blit_context_get.
174  */
175 static void
loader_dri3_blit_context_put(void)176 loader_dri3_blit_context_put(void)
177 {
178    simple_mtx_unlock(&blit_context.mtx);
179 }
180 
181 /**
182  * Blit (parts of) the contents of a DRI image to another dri image
183  *
184  * \param draw[in]  The drawable which owns the images.
185  * \param dst[in]  The destination image.
186  * \param src[in]  The source image.
187  * \param dstx0[in]  Start destination coordinate.
188  * \param dsty0[in]  Start destination coordinate.
189  * \param width[in]  Blit width.
190  * \param height[in] Blit height.
191  * \param srcx0[in]  Start source coordinate.
192  * \param srcy0[in]  Start source coordinate.
193  * \param flush_flag[in]  Image blit flush flag.
194  * \return true iff successful.
195  */
196 static bool
loader_dri3_blit_image(struct loader_dri3_drawable * draw,__DRIimage * dst,__DRIimage * src,int dstx0,int dsty0,int width,int height,int srcx0,int srcy0,int flush_flag)197 loader_dri3_blit_image(struct loader_dri3_drawable *draw,
198                        __DRIimage *dst, __DRIimage *src,
199                        int dstx0, int dsty0, int width, int height,
200                        int srcx0, int srcy0, int flush_flag)
201 {
202    __DRIcontext *dri_context;
203    bool use_blit_context = false;
204 
205    dri_context = draw->vtable->get_dri_context(draw);
206 
207    if (!dri_context || !draw->vtable->in_current_context(draw)) {
208       dri_context = loader_dri3_blit_context_get(draw);
209       use_blit_context = true;
210       flush_flag |= __BLIT_FLAG_FLUSH;
211    }
212 
213    if (dri_context)
214       dri2_blit_image(dri_context, dst, src, dstx0, dsty0,
215                      width, height, srcx0, srcy0,
216                      width, height, flush_flag);
217 
218    if (use_blit_context)
219       loader_dri3_blit_context_put();
220 
221    return dri_context != NULL;
222 }
223 
224 static inline void
dri3_fence_reset(xcb_connection_t * c,struct loader_dri3_buffer * buffer)225 dri3_fence_reset(xcb_connection_t *c, struct loader_dri3_buffer *buffer)
226 {
227    xshmfence_reset(buffer->shm_fence);
228 }
229 
230 static inline void
dri3_fence_set(struct loader_dri3_buffer * buffer)231 dri3_fence_set(struct loader_dri3_buffer *buffer)
232 {
233    xshmfence_trigger(buffer->shm_fence);
234 }
235 
236 static inline void
dri3_fence_trigger(xcb_connection_t * c,struct loader_dri3_buffer * buffer)237 dri3_fence_trigger(xcb_connection_t *c, struct loader_dri3_buffer *buffer)
238 {
239    xcb_sync_trigger_fence(c, buffer->sync_fence);
240 }
241 
242 static inline void
dri3_fence_await(xcb_connection_t * c,struct loader_dri3_drawable * draw,struct loader_dri3_buffer * buffer)243 dri3_fence_await(xcb_connection_t *c, struct loader_dri3_drawable *draw,
244                  struct loader_dri3_buffer *buffer)
245 {
246    xcb_flush(c);
247    xshmfence_await(buffer->shm_fence);
248    if (draw) {
249       mtx_lock(&draw->mtx);
250       dri3_flush_present_events(draw);
251       mtx_unlock(&draw->mtx);
252    }
253 }
254 
255 static void
dri3_update_max_num_back(struct loader_dri3_drawable * draw)256 dri3_update_max_num_back(struct loader_dri3_drawable *draw)
257 {
258    switch (draw->last_present_mode) {
259    case XCB_PRESENT_COMPLETE_MODE_FLIP: {
260       if (draw->swap_interval == 0)
261          draw->max_num_back = 4;
262       else
263          draw->max_num_back = 3;
264 
265       assert(draw->max_num_back <= LOADER_DRI3_MAX_BACK);
266       break;
267    }
268 
269    case XCB_PRESENT_COMPLETE_MODE_SKIP:
270       break;
271 
272    default:
273       draw->max_num_back = 2;
274    }
275 }
276 
277 void
loader_dri3_set_swap_interval(struct loader_dri3_drawable * draw,int interval)278 loader_dri3_set_swap_interval(struct loader_dri3_drawable *draw, int interval)
279 {
280    /* Wait all previous swap done before changing swap interval.
281     *
282     * This is for preventing swap out of order in the following cases:
283     *   1. Change from sync swap mode (>0) to async mode (=0), so async swap occurs
284     *      before previous pending sync swap.
285     *   2. Change from value A to B and A > B, so the target_msc for the previous
286     *      pending swap may be bigger than newer swap.
287     *
288     * PS. changing from value A to B and A < B won't cause swap out of order but
289     * may still gets wrong target_msc value at the beginning.
290     */
291    if (draw->swap_interval != interval)
292       loader_dri3_swapbuffer_barrier(draw);
293 
294    draw->swap_interval = interval;
295 }
296 
297 static void
dri3_set_render_buffer(struct loader_dri3_drawable * draw,int buf_id,struct loader_dri3_buffer * buffer)298 dri3_set_render_buffer(struct loader_dri3_drawable *draw, int buf_id,
299                        struct loader_dri3_buffer *buffer)
300 {
301    if (buf_id != LOADER_DRI3_FRONT_ID && !draw->buffers[buf_id])
302       draw->cur_num_back++;
303 
304    draw->buffers[buf_id] = buffer;
305 }
306 
307 /** dri3_free_render_buffer
308  *
309  * Free everything associated with one render buffer including pixmap, fence
310  * stuff and the driver image
311  */
312 static void
dri3_free_render_buffer(struct loader_dri3_drawable * draw,int buf_id)313 dri3_free_render_buffer(struct loader_dri3_drawable *draw,
314                         int buf_id)
315 {
316    struct loader_dri3_buffer *buffer = draw->buffers[buf_id];
317 
318    if (!buffer)
319       return;
320 
321    if (buffer->own_pixmap)
322       xcb_free_pixmap(draw->conn, buffer->pixmap);
323    xcb_sync_destroy_fence(draw->conn, buffer->sync_fence);
324    xshmfence_unmap_shm(buffer->shm_fence);
325    dri2_destroy_image(buffer->image);
326    if (buffer->linear_buffer)
327       dri2_destroy_image(buffer->linear_buffer);
328    free(buffer);
329 
330    draw->buffers[buf_id] = NULL;
331 
332    if (buf_id != LOADER_DRI3_FRONT_ID)
333       draw->cur_num_back--;
334 }
335 
336 void
loader_dri3_drawable_fini(struct loader_dri3_drawable * draw)337 loader_dri3_drawable_fini(struct loader_dri3_drawable *draw)
338 {
339    int i;
340 
341    driDestroyDrawable(draw->dri_drawable);
342 
343    for (i = 0; i < ARRAY_SIZE(draw->buffers); i++)
344       dri3_free_render_buffer(draw, i);
345 
346    if (draw->special_event) {
347       xcb_void_cookie_t cookie =
348          xcb_present_select_input_checked(draw->conn, draw->eid, draw->drawable,
349                                           XCB_PRESENT_EVENT_MASK_NO_EVENT);
350 
351       xcb_discard_reply(draw->conn, cookie.sequence);
352       xcb_unregister_for_special_event(draw->conn, draw->special_event);
353    }
354 
355    if (draw->region)
356       xcb_xfixes_destroy_region(draw->conn, draw->region);
357 
358    cnd_destroy(&draw->event_cnd);
359    mtx_destroy(&draw->mtx);
360 }
361 
362 int
loader_dri3_drawable_init(xcb_connection_t * conn,xcb_drawable_t drawable,enum loader_dri3_drawable_type type,__DRIscreen * dri_screen_render_gpu,__DRIscreen * dri_screen_display_gpu,bool multiplanes_available,bool prefer_back_buffer_reuse,const __DRIconfig * dri_config,const struct loader_dri3_vtable * vtable,struct loader_dri3_drawable * draw)363 loader_dri3_drawable_init(xcb_connection_t *conn,
364                           xcb_drawable_t drawable,
365                           enum loader_dri3_drawable_type type,
366                           __DRIscreen *dri_screen_render_gpu,
367                           __DRIscreen *dri_screen_display_gpu,
368                           bool multiplanes_available,
369                           bool prefer_back_buffer_reuse,
370                           const __DRIconfig *dri_config,
371                           const struct loader_dri3_vtable *vtable,
372                           struct loader_dri3_drawable *draw)
373 {
374    xcb_get_geometry_cookie_t cookie;
375    xcb_get_geometry_reply_t *reply;
376    xcb_generic_error_t *error;
377 
378    draw->conn = conn;
379    draw->vtable = vtable;
380    draw->drawable = drawable;
381    draw->type = type;
382    draw->region = 0;
383    draw->dri_screen_render_gpu = dri_screen_render_gpu;
384    draw->dri_screen_display_gpu = dri_screen_display_gpu;
385    draw->multiplanes_available = multiplanes_available;
386    draw->prefer_back_buffer_reuse = prefer_back_buffer_reuse;
387    draw->queries_buffer_age = false;
388 
389    draw->have_back = 0;
390    draw->have_fake_front = 0;
391    draw->first_init = true;
392    draw->adaptive_sync = false;
393    draw->adaptive_sync_active = false;
394    draw->block_on_depleted_buffers = false;
395 
396    draw->cur_blit_source = -1;
397    draw->back_format = DRM_FORMAT_INVALID;
398    mtx_init(&draw->mtx, mtx_plain);
399    cnd_init(&draw->event_cnd);
400 
401    {
402       unsigned char adaptive_sync = 0;
403       unsigned char block_on_depleted_buffers = 0;
404 
405       dri2GalliumConfigQueryb(draw->dri_screen_render_gpu,
406                                       "adaptive_sync",
407                                       &adaptive_sync);
408 
409       draw->adaptive_sync = adaptive_sync;
410 
411       dri2GalliumConfigQueryb(draw->dri_screen_render_gpu,
412                                       "block_on_depleted_buffers",
413                                       &block_on_depleted_buffers);
414 
415       draw->block_on_depleted_buffers = block_on_depleted_buffers;
416    }
417 
418    if (!draw->adaptive_sync)
419       set_adaptive_sync_property(conn, draw->drawable, false);
420 
421    draw->swap_interval = dri_get_initial_swap_interval(draw->dri_screen_render_gpu);
422 
423    dri3_update_max_num_back(draw);
424 
425    /* Create a new drawable */
426    draw->dri_drawable = dri_create_drawable(dri_screen_render_gpu, dri_config,
427                                             type == LOADER_DRI3_DRAWABLE_PIXMAP, draw);
428 
429    if (!draw->dri_drawable)
430       return 1;
431 
432    cookie = xcb_get_geometry(draw->conn, draw->drawable);
433    reply = xcb_get_geometry_reply(draw->conn, cookie, &error);
434    if (reply == NULL || error != NULL) {
435       driDestroyDrawable(draw->dri_drawable);
436       return 1;
437    }
438 
439    draw->screen = get_screen_for_root(draw->conn, reply->root);
440    draw->width = reply->width;
441    draw->height = reply->height;
442    draw->depth = reply->depth;
443    draw->vtable->set_drawable_size(draw, draw->width, draw->height);
444    free(reply);
445 
446    /*
447     * Make sure server has the same swap interval we do for the new
448     * drawable.
449     */
450    loader_dri3_set_swap_interval(draw, draw->swap_interval);
451 
452    return 0;
453 }
454 
455 /* XXX this belongs in presentproto */
456 #ifndef PresentWindowDestroyed
457 #define PresentWindowDestroyed (1 << 0)
458 #endif
459 /*
460  * Process one Present event
461  */
462 static bool
dri3_handle_present_event(struct loader_dri3_drawable * draw,xcb_present_generic_event_t * ge)463 dri3_handle_present_event(struct loader_dri3_drawable *draw,
464                           xcb_present_generic_event_t *ge)
465 {
466    switch (ge->evtype) {
467    case XCB_PRESENT_CONFIGURE_NOTIFY: {
468       xcb_present_configure_notify_event_t *ce = (void *) ge;
469       if (ce->pixmap_flags & PresentWindowDestroyed) {
470          free(ge);
471          return false;
472       }
473 
474       draw->width = ce->width;
475       draw->height = ce->height;
476       draw->vtable->set_drawable_size(draw, draw->width, draw->height);
477       dri_invalidate_drawable(draw->dri_drawable);
478       break;
479    }
480    case XCB_PRESENT_COMPLETE_NOTIFY: {
481       xcb_present_complete_notify_event_t *ce = (void *) ge;
482 
483       /* Compute the processed SBC number from the received 32-bit serial number
484        * merged with the upper 32-bits of the sent 64-bit serial number while
485        * checking for wrap.
486        */
487       if (ce->kind == XCB_PRESENT_COMPLETE_KIND_PIXMAP) {
488          uint64_t recv_sbc = (draw->send_sbc & 0xffffffff00000000LL) | ce->serial;
489 
490          /* Only assume wraparound if that results in exactly the previous
491           * SBC + 1, otherwise ignore received SBC > sent SBC (those are
492           * probably from a previous loader_dri3_drawable instance) to avoid
493           * calculating bogus target MSC values in loader_dri3_swap_buffers_msc
494           */
495          if (recv_sbc <= draw->send_sbc)
496             draw->recv_sbc = recv_sbc;
497          else if (recv_sbc == (draw->recv_sbc + 0x100000001ULL))
498             draw->recv_sbc = recv_sbc - 0x100000000ULL;
499 
500          /* When moving from flip to copy, we assume that we can allocate in
501           * a more optimal way if we don't need to cater for the display
502           * controller.
503           */
504          if (ce->mode == XCB_PRESENT_COMPLETE_MODE_COPY &&
505              draw->last_present_mode == XCB_PRESENT_COMPLETE_MODE_FLIP) {
506             for (int b = 0; b < ARRAY_SIZE(draw->buffers); b++) {
507                if (draw->buffers[b])
508                   draw->buffers[b]->reallocate = true;
509             }
510          }
511 
512          /* If the server tells us that our allocation is suboptimal, we
513           * reallocate once.
514           */
515 #ifdef HAVE_X11_DRM
516          if (ce->mode == XCB_PRESENT_COMPLETE_MODE_SUBOPTIMAL_COPY &&
517              draw->last_present_mode != ce->mode) {
518             for (int b = 0; b < ARRAY_SIZE(draw->buffers); b++) {
519                if (draw->buffers[b])
520                   draw->buffers[b]->reallocate = true;
521             }
522          }
523 #endif
524          draw->last_present_mode = ce->mode;
525 
526          draw->ust = ce->ust;
527          draw->msc = ce->msc;
528       } else if (ce->serial == draw->eid) {
529          draw->notify_ust = ce->ust;
530          draw->notify_msc = ce->msc;
531       }
532       break;
533    }
534    case XCB_PRESENT_EVENT_IDLE_NOTIFY: {
535       xcb_present_idle_notify_event_t *ie = (void *) ge;
536       int b;
537 
538       for (b = 0; b < ARRAY_SIZE(draw->buffers); b++) {
539          struct loader_dri3_buffer *buf = draw->buffers[b];
540 
541          if (buf && buf->pixmap == ie->pixmap)
542             buf->busy = 0;
543       }
544       break;
545    }
546    }
547    free(ge);
548    return true;
549 }
550 
551 static bool
dri3_wait_for_event_locked(struct loader_dri3_drawable * draw,unsigned * full_sequence)552 dri3_wait_for_event_locked(struct loader_dri3_drawable *draw,
553                            unsigned *full_sequence)
554 {
555    xcb_generic_event_t *ev;
556    xcb_present_generic_event_t *ge;
557 
558    xcb_flush(draw->conn);
559 
560    /* Only have one thread waiting for events at a time */
561    if (draw->has_event_waiter) {
562       cnd_wait(&draw->event_cnd, &draw->mtx);
563       if (full_sequence)
564          *full_sequence = draw->last_special_event_sequence;
565       /* Another thread has updated the protected info, so retest. */
566       return true;
567    } else {
568       draw->has_event_waiter = true;
569       /* Allow other threads access to the drawable while we're waiting. */
570       mtx_unlock(&draw->mtx);
571       ev = xcb_wait_for_special_event(draw->conn, draw->special_event);
572       mtx_lock(&draw->mtx);
573       draw->has_event_waiter = false;
574       cnd_broadcast(&draw->event_cnd);
575    }
576    if (!ev)
577       return false;
578    draw->last_special_event_sequence = ev->full_sequence;
579    if (full_sequence)
580       *full_sequence = ev->full_sequence;
581    ge = (void *) ev;
582    return dri3_handle_present_event(draw, ge);
583 }
584 
585 /** loader_dri3_wait_for_msc
586  *
587  * Get the X server to send an event when the target msc/divisor/remainder is
588  * reached.
589  */
590 bool
loader_dri3_wait_for_msc(struct loader_dri3_drawable * draw,int64_t target_msc,int64_t divisor,int64_t remainder,int64_t * ust,int64_t * msc,int64_t * sbc)591 loader_dri3_wait_for_msc(struct loader_dri3_drawable *draw,
592                          int64_t target_msc,
593                          int64_t divisor, int64_t remainder,
594                          int64_t *ust, int64_t *msc, int64_t *sbc)
595 {
596    xcb_void_cookie_t cookie = xcb_present_notify_msc(draw->conn,
597                                                      draw->drawable,
598                                                      draw->eid,
599                                                      target_msc,
600                                                      divisor,
601                                                      remainder);
602    unsigned full_sequence;
603 
604    mtx_lock(&draw->mtx);
605 
606    /* Wait for the event */
607    do {
608       if (!dri3_wait_for_event_locked(draw, &full_sequence)) {
609          mtx_unlock(&draw->mtx);
610          return false;
611       }
612    } while (full_sequence != cookie.sequence || draw->notify_msc < target_msc);
613 
614    *ust = draw->notify_ust;
615    *msc = draw->notify_msc;
616    *sbc = draw->recv_sbc;
617    mtx_unlock(&draw->mtx);
618 
619    return true;
620 }
621 
622 /** loader_dri3_wait_for_sbc
623  *
624  * Wait for the completed swap buffer count to reach the specified
625  * target. Presumably the application knows that this will be reached with
626  * outstanding complete events, or we're going to be here awhile.
627  */
628 int
loader_dri3_wait_for_sbc(struct loader_dri3_drawable * draw,int64_t target_sbc,int64_t * ust,int64_t * msc,int64_t * sbc)629 loader_dri3_wait_for_sbc(struct loader_dri3_drawable *draw,
630                          int64_t target_sbc, int64_t *ust,
631                          int64_t *msc, int64_t *sbc)
632 {
633    /* From the GLX_OML_sync_control spec:
634     *
635     *     "If <target_sbc> = 0, the function will block until all previous
636     *      swaps requested with glXSwapBuffersMscOML for that window have
637     *      completed."
638     */
639    mtx_lock(&draw->mtx);
640    if (!target_sbc)
641       target_sbc = draw->send_sbc;
642 
643    while (draw->recv_sbc < target_sbc) {
644       if (!dri3_wait_for_event_locked(draw, NULL)) {
645          mtx_unlock(&draw->mtx);
646          return 0;
647       }
648    }
649 
650    *ust = draw->ust;
651    *msc = draw->msc;
652    *sbc = draw->recv_sbc;
653    mtx_unlock(&draw->mtx);
654    return 1;
655 }
656 
657 /** loader_dri3_find_back
658  *
659  * Find an idle back buffer. If there isn't one, then
660  * wait for a present idle notify event from the X server
661  */
662 static int
dri3_find_back(struct loader_dri3_drawable * draw,bool prefer_a_different)663 dri3_find_back(struct loader_dri3_drawable *draw, bool prefer_a_different)
664 {
665    struct loader_dri3_buffer *buffer;
666    int b;
667    int max_num;
668    int best_id = -1;
669    uint64_t best_swap = 0;
670 
671    mtx_lock(&draw->mtx);
672 
673    if (!prefer_a_different) {
674       /* Increase the likelyhood of reusing current buffer */
675       dri3_flush_present_events(draw);
676 
677       /* Reuse current back buffer if it's idle */
678       buffer = draw->buffers[draw->cur_back];
679       if (buffer && !buffer->busy) {
680          best_id = draw->cur_back;
681          goto unlock;
682       }
683    }
684 
685    /* Check whether we need to reuse the current back buffer as new back.
686     * In that case, wait until it's not busy anymore.
687     */
688    if (draw->cur_blit_source != -1) {
689       max_num = 1;
690       draw->cur_blit_source = -1;
691    } else {
692       max_num = LOADER_DRI3_MAX_BACK;
693    }
694 
695    /* In a DRI_PRIME situation, if prefer_a_different is true, we first try
696     * to find an idle buffer that is not the last used one.
697     * This is useful if we receive a XCB_PRESENT_EVENT_IDLE_NOTIFY event
698     * for a pixmap but it's not actually idle (eg: the DRI_PRIME blit is
699     * still in progress).
700     * Unigine Superposition hits this and this allows to use 2 back buffers
701     * instead of reusing the same one all the time, causing the next frame
702     * to wait for the copy to finish.
703     */
704    int current_back_id = draw->cur_back;
705    do {
706       /* Find idle buffer with lowest buffer age, or an unallocated slot */
707       for (b = 0; b < max_num; b++) {
708          int id = LOADER_DRI3_BACK_ID((b + current_back_id) % LOADER_DRI3_MAX_BACK);
709 
710          buffer = draw->buffers[id];
711          if (buffer) {
712             if (!buffer->busy &&
713                 (!prefer_a_different || id != current_back_id) &&
714                 (best_id == -1 || buffer->last_swap > best_swap)) {
715                best_id = id;
716                best_swap = buffer->last_swap;
717             }
718          } else if (best_id == -1 &&
719                     draw->cur_num_back < draw->max_num_back) {
720             best_id = id;
721          }
722       }
723 
724       /* Prefer re-using the same buffer over blocking */
725       if (prefer_a_different && best_id == -1 &&
726           !draw->buffers[LOADER_DRI3_BACK_ID(current_back_id)]->busy)
727          best_id = current_back_id;
728    } while (best_id == -1 && dri3_wait_for_event_locked(draw, NULL));
729 
730    if (best_id != -1)
731       draw->cur_back = best_id;
732 
733 unlock:
734    mtx_unlock(&draw->mtx);
735    return best_id;
736 }
737 
738 static xcb_gcontext_t
dri3_drawable_gc(struct loader_dri3_drawable * draw)739 dri3_drawable_gc(struct loader_dri3_drawable *draw)
740 {
741    if (!draw->gc) {
742       uint32_t v = 0;
743       xcb_create_gc(draw->conn,
744                     (draw->gc = xcb_generate_id(draw->conn)),
745                     draw->drawable,
746                     XCB_GC_GRAPHICS_EXPOSURES,
747                     &v);
748    }
749    return draw->gc;
750 }
751 
752 
753 static struct loader_dri3_buffer *
dri3_back_buffer(struct loader_dri3_drawable * draw)754 dri3_back_buffer(struct loader_dri3_drawable *draw)
755 {
756    return draw->buffers[LOADER_DRI3_BACK_ID(draw->cur_back)];
757 }
758 
759 static struct loader_dri3_buffer *
dri3_front_buffer(struct loader_dri3_drawable * draw)760 dri3_front_buffer(struct loader_dri3_drawable *draw)
761 {
762    return draw->buffers[LOADER_DRI3_FRONT_ID];
763 }
764 
765 static void
dri3_copy_area(xcb_connection_t * c,xcb_drawable_t src_drawable,xcb_drawable_t dst_drawable,xcb_gcontext_t gc,int16_t src_x,int16_t src_y,int16_t dst_x,int16_t dst_y,uint16_t width,uint16_t height)766 dri3_copy_area(xcb_connection_t *c,
767                xcb_drawable_t    src_drawable,
768                xcb_drawable_t    dst_drawable,
769                xcb_gcontext_t    gc,
770                int16_t           src_x,
771                int16_t           src_y,
772                int16_t           dst_x,
773                int16_t           dst_y,
774                uint16_t          width,
775                uint16_t          height)
776 {
777    xcb_void_cookie_t cookie;
778 
779    cookie = xcb_copy_area_checked(c,
780                                   src_drawable,
781                                   dst_drawable,
782                                   gc,
783                                   src_x,
784                                   src_y,
785                                   dst_x,
786                                   dst_y,
787                                   width,
788                                   height);
789    xcb_discard_reply(c, cookie.sequence);
790 }
791 
792 /**
793  * Asks the driver to flush any queued work necessary for serializing with the
794  * X command stream, and optionally the slightly more strict requirement of
795  * glFlush() equivalence (which would require flushing even if nothing had
796  * been drawn to a window system framebuffer, for example).
797  */
798 void
loader_dri3_flush(struct loader_dri3_drawable * draw,unsigned flags,enum __DRI2throttleReason throttle_reason)799 loader_dri3_flush(struct loader_dri3_drawable *draw,
800                   unsigned flags,
801                   enum __DRI2throttleReason throttle_reason)
802 {
803    /* NEED TO CHECK WHETHER CONTEXT IS NULL */
804    __DRIcontext *dri_context = draw->vtable->get_dri_context(draw);
805 
806    if (dri_context) {
807       dri_flush(dri_context, draw->dri_drawable, flags, throttle_reason);
808    }
809 }
810 
811 void
loader_dri3_copy_sub_buffer(struct loader_dri3_drawable * draw,int x,int y,int width,int height,bool flush)812 loader_dri3_copy_sub_buffer(struct loader_dri3_drawable *draw,
813                             int x, int y,
814                             int width, int height,
815                             bool flush)
816 {
817    struct loader_dri3_buffer *back;
818    unsigned flags = __DRI2_FLUSH_DRAWABLE;
819 
820    /* Check we have the right attachments */
821    if (!draw->have_back || draw->type != LOADER_DRI3_DRAWABLE_WINDOW)
822       return;
823 
824    if (flush)
825       flags |= __DRI2_FLUSH_CONTEXT;
826    loader_dri3_flush(draw, flags, __DRI2_THROTTLE_COPYSUBBUFFER);
827 
828    back = dri3_find_back_alloc(draw);
829    if (!back)
830       return;
831 
832    y = draw->height - y - height;
833 
834    if (draw->dri_screen_render_gpu != draw->dri_screen_display_gpu) {
835       /* Update the linear buffer part of the back buffer
836        * for the dri3_copy_area operation
837        */
838       (void) loader_dri3_blit_image(draw,
839                                     back->linear_buffer,
840                                     back->image,
841                                     0, 0, back->width, back->height,
842                                     0, 0, __BLIT_FLAG_FLUSH);
843    }
844 
845    loader_dri3_swapbuffer_barrier(draw);
846    dri3_fence_reset(draw->conn, back);
847    dri3_copy_area(draw->conn,
848                   back->pixmap,
849                   draw->drawable,
850                   dri3_drawable_gc(draw),
851                   x, y, x, y, width, height);
852    dri3_fence_trigger(draw->conn, back);
853    /* Refresh the fake front (if present) after we just damaged the real
854     * front.
855     */
856    if (draw->have_fake_front &&
857        !loader_dri3_blit_image(draw,
858                                dri3_front_buffer(draw)->image,
859                                back->image,
860                                x, y, width, height,
861                                x, y, __BLIT_FLAG_FLUSH) &&
862        draw->dri_screen_render_gpu == draw->dri_screen_display_gpu) {
863       dri3_fence_reset(draw->conn, dri3_front_buffer(draw));
864       dri3_copy_area(draw->conn,
865                      back->pixmap,
866                      dri3_front_buffer(draw)->pixmap,
867                      dri3_drawable_gc(draw),
868                      x, y, x, y, width, height);
869       dri3_fence_trigger(draw->conn, dri3_front_buffer(draw));
870       dri3_fence_await(draw->conn, NULL, dri3_front_buffer(draw));
871    }
872    dri3_fence_await(draw->conn, draw, back);
873 }
874 
875 void
loader_dri3_copy_drawable(struct loader_dri3_drawable * draw,xcb_drawable_t dest,xcb_drawable_t src)876 loader_dri3_copy_drawable(struct loader_dri3_drawable *draw,
877                           xcb_drawable_t dest,
878                           xcb_drawable_t src)
879 {
880    loader_dri3_flush(draw, __DRI2_FLUSH_DRAWABLE, __DRI2_THROTTLE_COPYSUBBUFFER);
881 
882    struct loader_dri3_buffer *front = dri3_front_buffer(draw);
883    if (front)
884       dri3_fence_reset(draw->conn, front);
885 
886    dri3_copy_area(draw->conn,
887                   src, dest,
888                   dri3_drawable_gc(draw),
889                   0, 0, 0, 0, draw->width, draw->height);
890 
891    if (front) {
892       dri3_fence_trigger(draw->conn, front);
893       dri3_fence_await(draw->conn, draw, front);
894    }
895 }
896 
897 void
loader_dri3_wait_x(struct loader_dri3_drawable * draw)898 loader_dri3_wait_x(struct loader_dri3_drawable *draw)
899 {
900    struct loader_dri3_buffer *front;
901 
902    if (draw == NULL || !draw->have_fake_front)
903       return;
904 
905    front = dri3_front_buffer(draw);
906 
907    loader_dri3_copy_drawable(draw, front->pixmap, draw->drawable);
908 
909    /* In the psc->is_different_gpu case, the linear buffer has been updated,
910     * but not yet the tiled buffer.
911     * Copy back to the tiled buffer we use for rendering.
912     * Note that we don't need flushing.
913     */
914    if (draw->dri_screen_render_gpu != draw->dri_screen_display_gpu)
915       (void) loader_dri3_blit_image(draw,
916                                     front->image,
917                                     front->linear_buffer,
918                                     0, 0, front->width, front->height,
919                                     0, 0, 0);
920 }
921 
922 void
loader_dri3_wait_gl(struct loader_dri3_drawable * draw)923 loader_dri3_wait_gl(struct loader_dri3_drawable *draw)
924 {
925    struct loader_dri3_buffer *front;
926 
927    if (draw == NULL || !draw->have_fake_front)
928       return;
929 
930    front = dri3_front_buffer(draw);
931    /* TODO: `front` is not supposed to be NULL here, fix the actual bug
932     * https://gitlab.freedesktop.org/mesa/mesa/-/issues/8982
933     */
934    if (!front)
935       return;
936 
937    /* In the psc->is_different_gpu case, we update the linear_buffer
938     * before updating the real front.
939     */
940    if (draw->dri_screen_render_gpu != draw->dri_screen_display_gpu)
941       (void) loader_dri3_blit_image(draw,
942                                     front->linear_buffer,
943                                     front->image,
944                                     0, 0, front->width, front->height,
945                                     0, 0, __BLIT_FLAG_FLUSH);
946    loader_dri3_swapbuffer_barrier(draw);
947    loader_dri3_copy_drawable(draw, draw->drawable, front->pixmap);
948 }
949 
950 /** dri3_flush_present_events
951  *
952  * Process any present events that have been received from the X server
953  */
954 static void
dri3_flush_present_events(struct loader_dri3_drawable * draw)955 dri3_flush_present_events(struct loader_dri3_drawable *draw)
956 {
957    /* Check to see if any configuration changes have occurred
958     * since we were last invoked
959     */
960    if (draw->has_event_waiter)
961       return;
962 
963    if (draw->special_event) {
964       xcb_generic_event_t    *ev;
965 
966       while ((ev = xcb_poll_for_special_event(draw->conn,
967                                               draw->special_event)) != NULL) {
968          xcb_present_generic_event_t *ge = (void *) ev;
969          if (!dri3_handle_present_event(draw, ge))
970             break;
971       }
972    }
973 }
974 
975 /** loader_dri3_swap_buffers_msc
976  *
977  * Make the current back buffer visible using the present extension
978  */
979 int64_t
loader_dri3_swap_buffers_msc(struct loader_dri3_drawable * draw,int64_t target_msc,int64_t divisor,int64_t remainder,unsigned flush_flags,const int * rects,int n_rects,bool force_copy)980 loader_dri3_swap_buffers_msc(struct loader_dri3_drawable *draw,
981                              int64_t target_msc, int64_t divisor,
982                              int64_t remainder, unsigned flush_flags,
983                              const int *rects, int n_rects,
984                              bool force_copy)
985 {
986    struct loader_dri3_buffer *back;
987    int64_t ret = 0;
988    bool wait_for_next_buffer = false;
989 
990    /* GLX spec:
991     *   void glXSwapBuffers(Display *dpy, GLXDrawable draw);
992     *   This operation is a no-op if draw was created with a non-double-buffered
993     *   GLXFBConfig, or if draw is a GLXPixmap.
994     *   ...
995     *   GLX pixmaps may be created with a config that includes back buffers and
996     *   stereoscopic buffers. However, glXSwapBuffers is ignored for these pixmaps.
997     *   ...
998     *   It is possible to create a pbuffer with back buffers and to swap the
999     *   front and back buffers by calling glXSwapBuffers.
1000     *
1001     * EGL spec:
1002     *   EGLBoolean eglSwapBuffers(EGLDisplay dpy, EGLSurface surface);
1003     *   If surface is a back-buffered window surface, then the color buffer is
1004     *   copied to the native window associated with that surface. If surface is
1005     *   a single-buffered window, pixmap, or pbuffer surface, eglSwapBuffers has
1006     *   no effect.
1007     *
1008     * SwapBuffer effect:
1009     *       |           GLX             |           EGL            |
1010     *       | window | pixmap | pbuffer | window | pixmap | pbuffer|
1011     *-------+--------+--------+---------+--------+--------+--------+
1012     * single|  nop   |  nop   |   nop   |  nop   |  nop   |   nop  |
1013     * double|  swap  |  nop   |   swap  |  swap  |  NA    |   NA   |
1014     */
1015    if (!draw->have_back || draw->type == LOADER_DRI3_DRAWABLE_PIXMAP)
1016       return ret;
1017 
1018    draw->vtable->flush_drawable(draw, flush_flags);
1019 
1020    back = dri3_find_back_alloc(draw);
1021    /* Could only happen when error case, like display is already closed. */
1022    if (!back)
1023       return ret;
1024 
1025    mtx_lock(&draw->mtx);
1026 
1027    if (draw->adaptive_sync && !draw->adaptive_sync_active) {
1028       set_adaptive_sync_property(draw->conn, draw->drawable, true);
1029       draw->adaptive_sync_active = true;
1030    }
1031 
1032    if (draw->dri_screen_render_gpu != draw->dri_screen_display_gpu) {
1033       /* Update the linear buffer before presenting the pixmap */
1034       (void) loader_dri3_blit_image(draw,
1035                                     back->linear_buffer,
1036                                     back->image,
1037                                     0, 0, back->width, back->height,
1038                                     0, 0, __BLIT_FLAG_FLUSH);
1039    }
1040 
1041    /* If we need to preload the new back buffer, remember the source.
1042     * The force_copy parameter is used by EGL to attempt to preserve
1043     * the back buffer across a call to this function.
1044     */
1045    if (force_copy)
1046       draw->cur_blit_source = LOADER_DRI3_BACK_ID(draw->cur_back);
1047 
1048    /* Exchange the back and fake front. Even though the server knows about these
1049     * buffers, it has no notion of back and fake front.
1050     */
1051    if (draw->have_fake_front) {
1052       struct loader_dri3_buffer *tmp;
1053 
1054       tmp = dri3_front_buffer(draw);
1055       draw->buffers[LOADER_DRI3_FRONT_ID] = back;
1056       draw->buffers[LOADER_DRI3_BACK_ID(draw->cur_back)] = tmp;
1057 
1058       if (force_copy)
1059          draw->cur_blit_source = LOADER_DRI3_FRONT_ID;
1060    }
1061 
1062    dri3_flush_present_events(draw);
1063 
1064    if (draw->type == LOADER_DRI3_DRAWABLE_WINDOW) {
1065       dri3_fence_reset(draw->conn, back);
1066 
1067       /* Compute when we want the frame shown by taking the last known
1068        * successful MSC and adding in a swap interval for each outstanding swap
1069        * request. target_msc=divisor=remainder=0 means "Use glXSwapBuffers()
1070        * semantic"
1071        */
1072       ++draw->send_sbc;
1073       if (target_msc == 0 && divisor == 0 && remainder == 0)
1074          target_msc = draw->msc + abs(draw->swap_interval) *
1075                       (draw->send_sbc - draw->recv_sbc);
1076       else if (divisor == 0 && remainder > 0) {
1077          /* From the GLX_OML_sync_control spec:
1078           *     "If <divisor> = 0, the swap will occur when MSC becomes
1079           *      greater than or equal to <target_msc>."
1080           *
1081           * Note that there's no mention of the remainder.  The Present
1082           * extension throws BadValue for remainder != 0 with divisor == 0, so
1083           * just drop the passed in value.
1084           */
1085          remainder = 0;
1086       }
1087 
1088       /* From the GLX_EXT_swap_control spec
1089        * and the EGL 1.4 spec (page 53):
1090        *
1091        *     "If <interval> is set to a value of 0, buffer swaps are not
1092        *      synchronized to a video frame."
1093        *
1094        * From GLX_EXT_swap_control_tear:
1095        *
1096        *     "If <interval> is negative, the minimum number of video frames
1097        *      between buffer swaps is the absolute value of <interval>. In this
1098        *      case, if abs(<interval>) video frames have already passed from
1099        *      the previous swap when the swap is ready to be performed, the
1100        *      swap will occur without synchronization to a video frame."
1101        *
1102        * Implementation note: It is possible to enable triple buffering
1103        * behaviour by not using XCB_PRESENT_OPTION_ASYNC, but this should not be
1104        * the default.
1105        */
1106       uint32_t options = XCB_PRESENT_OPTION_NONE;
1107       if (draw->swap_interval <= 0)
1108          options |= XCB_PRESENT_OPTION_ASYNC;
1109 
1110       /* If we need to populate the new back, but need to reuse the back
1111        * buffer slot due to lack of local blit capabilities, make sure
1112        * the server doesn't flip and we deadlock.
1113        */
1114       if (draw->cur_blit_source != -1)
1115          options |= XCB_PRESENT_OPTION_COPY;
1116 #ifdef HAVE_X11_DRM
1117       if (draw->multiplanes_available)
1118          options |= XCB_PRESENT_OPTION_SUBOPTIMAL;
1119 #endif
1120       back->busy = 1;
1121       back->last_swap = draw->send_sbc;
1122 
1123       if (!draw->region) {
1124          draw->region = xcb_generate_id(draw->conn);
1125          xcb_xfixes_create_region(draw->conn, draw->region, 0, NULL);
1126       }
1127 
1128       xcb_xfixes_region_t region = 0;
1129       xcb_rectangle_t xcb_rects[64];
1130 
1131       if (n_rects > 0 && n_rects <= ARRAY_SIZE(xcb_rects)) {
1132          for (int i = 0; i < n_rects; i++) {
1133             const int *rect = &rects[i * 4];
1134             xcb_rects[i].x = rect[0];
1135             xcb_rects[i].y = draw->height - rect[1] - rect[3];
1136             xcb_rects[i].width = rect[2];
1137             xcb_rects[i].height = rect[3];
1138          }
1139 
1140          region = draw->region;
1141          xcb_xfixes_set_region(draw->conn, region, n_rects, xcb_rects);
1142       }
1143 
1144       xcb_present_pixmap(draw->conn,
1145                          draw->drawable,
1146                          back->pixmap,
1147                          (uint32_t) draw->send_sbc,
1148                          0,                                    /* valid */
1149                          region,                               /* update */
1150                          0,                                    /* x_off */
1151                          0,                                    /* y_off */
1152                          None,                                 /* target_crtc */
1153                          None,
1154                          back->sync_fence,
1155                          options,
1156                          target_msc,
1157                          divisor,
1158                          remainder, 0, NULL);
1159    } else {
1160       /* This can only be reached by double buffered GLXPbuffer. */
1161       assert(draw->type == LOADER_DRI3_DRAWABLE_PBUFFER);
1162       /* GLX does not have damage regions. */
1163       assert(n_rects == 0);
1164 
1165       /* For wait and buffer age usage. */
1166       draw->send_sbc++;
1167       draw->recv_sbc = back->last_swap = draw->send_sbc;
1168 
1169       /* Pixmap is imported as front buffer image when same GPU case, so just
1170        * locally blit back buffer image to it is enough. Otherwise front buffer
1171        * is a fake one which needs to be synced with pixmap by xserver remotely.
1172        */
1173       if (draw->dri_screen_render_gpu != draw->dri_screen_display_gpu ||
1174           !loader_dri3_blit_image(draw,
1175                                   dri3_front_buffer(draw)->image,
1176                                   back->image,
1177                                   0, 0, draw->width, draw->height,
1178                                   0, 0, __BLIT_FLAG_FLUSH)) {
1179          dri3_copy_area(draw->conn, back->pixmap,
1180                         draw->drawable,
1181                         dri3_drawable_gc(draw),
1182                         0, 0, 0, 0, draw->width, draw->height);
1183       }
1184    }
1185 
1186    ret = (int64_t) draw->send_sbc;
1187 
1188    /* Schedule a server-side back-preserving blit if necessary.
1189     * This happens iff all conditions below are satisfied:
1190     * a) We have a fake front,
1191     * b) We need to preserve the back buffer,
1192     * c) We don't have local blit capabilities.
1193     */
1194    if (draw->cur_blit_source != -1 &&
1195        draw->cur_blit_source != LOADER_DRI3_BACK_ID(draw->cur_back)) {
1196       struct loader_dri3_buffer *new_back = dri3_back_buffer(draw);
1197       struct loader_dri3_buffer *src = draw->buffers[draw->cur_blit_source];
1198 
1199       dri3_fence_reset(draw->conn, new_back);
1200       dri3_copy_area(draw->conn, src->pixmap,
1201                      new_back->pixmap,
1202                      dri3_drawable_gc(draw),
1203                      0, 0, 0, 0, draw->width, draw->height);
1204       dri3_fence_trigger(draw->conn, new_back);
1205       new_back->last_swap = src->last_swap;
1206    }
1207 
1208    xcb_flush(draw->conn);
1209    if (draw->stamp)
1210       ++(*draw->stamp);
1211 
1212    /* Waiting on a buffer is only sensible if all buffers are in use and the
1213     * client doesn't use the buffer age extension. In this case a client is
1214     * relying on it receiving back control immediately.
1215     *
1216     * As waiting on a buffer can at worst make us miss a frame the option has
1217     * to be enabled explicitly with the block_on_depleted_buffers DRI option.
1218     */
1219    wait_for_next_buffer = draw->cur_num_back == draw->max_num_back &&
1220       !draw->queries_buffer_age && draw->block_on_depleted_buffers;
1221 
1222    mtx_unlock(&draw->mtx);
1223 
1224    dri_invalidate_drawable(draw->dri_drawable);
1225 
1226    /* Clients that use up all available buffers usually regulate their drawing
1227     * through swapchain contention backpressure. In such a scenario the client
1228     * draws whenever control returns to it. Its event loop is slowed down only
1229     * by us waiting on buffers becoming available again.
1230     *
1231     * By waiting here on a new buffer and only then returning back to the client
1232     * we ensure the client begins drawing only when the next buffer is available
1233     * and not draw first and then wait a refresh cycle on the next available
1234     * buffer to show it. This way we can reduce the latency between what is
1235     * being drawn by the client and what is shown on the screen by one frame.
1236     */
1237    if (wait_for_next_buffer)
1238       dri3_find_back(draw, draw->prefer_back_buffer_reuse);
1239 
1240    return ret;
1241 }
1242 
1243 int
loader_dri3_query_buffer_age(struct loader_dri3_drawable * draw)1244 loader_dri3_query_buffer_age(struct loader_dri3_drawable *draw)
1245 {
1246    struct loader_dri3_buffer *back = dri3_find_back_alloc(draw);
1247    int ret = 0;
1248 
1249    mtx_lock(&draw->mtx);
1250    draw->queries_buffer_age = true;
1251    if (back && back->last_swap != 0)
1252       ret = draw->send_sbc - back->last_swap + 1;
1253    mtx_unlock(&draw->mtx);
1254 
1255    return ret;
1256 }
1257 
1258 static uint32_t
dri3_cpp_for_fourcc(uint32_t format)1259 dri3_cpp_for_fourcc(uint32_t format) {
1260    switch (format) {
1261    case DRM_FORMAT_R8:
1262       return 1;
1263    case DRM_FORMAT_RGB565:
1264    case DRM_FORMAT_GR88:
1265       return 2;
1266    case DRM_FORMAT_XRGB8888:
1267    case DRM_FORMAT_ARGB8888:
1268    case DRM_FORMAT_ABGR8888:
1269    case DRM_FORMAT_XBGR8888:
1270    case DRM_FORMAT_XRGB2101010:
1271    case DRM_FORMAT_ARGB2101010:
1272    case DRM_FORMAT_XBGR2101010:
1273    case DRM_FORMAT_ABGR2101010:
1274    case __DRI_IMAGE_FORMAT_SARGB8:
1275    case __DRI_IMAGE_FORMAT_SABGR8:
1276    case __DRI_IMAGE_FORMAT_SXRGB8:
1277       return 4;
1278    case DRM_FORMAT_ABGR16161616:
1279    case DRM_FORMAT_XBGR16161616:
1280    case DRM_FORMAT_XBGR16161616F:
1281    case DRM_FORMAT_ABGR16161616F:
1282       return 8;
1283    case DRM_FORMAT_INVALID:
1284    default:
1285       return 0;
1286    }
1287 }
1288 
1289 /* Map format of render buffer to corresponding format for the linear_buffer
1290  * used for sharing with the display gpu of a Prime setup (== is_different_gpu).
1291  * Usually linear_format == format, except for depth >= 30 formats, where
1292  * different gpu vendors have different preferences wrt. color channel ordering.
1293  */
1294 static uint32_t
dri3_linear_format_for_format(struct loader_dri3_drawable * draw,uint32_t format)1295 dri3_linear_format_for_format(struct loader_dri3_drawable *draw, uint32_t format)
1296 {
1297    switch (format) {
1298       case  __DRI_IMAGE_FORMAT_XRGB2101010:
1299       case  __DRI_IMAGE_FORMAT_XBGR2101010:
1300          /* Different preferred formats for different hw */
1301          if (dri3_get_red_mask_for_depth(draw, 30) == 0x3ff)
1302             return __DRI_IMAGE_FORMAT_XBGR2101010;
1303          else
1304             return __DRI_IMAGE_FORMAT_XRGB2101010;
1305 
1306       case  __DRI_IMAGE_FORMAT_ARGB2101010:
1307       case  __DRI_IMAGE_FORMAT_ABGR2101010:
1308          /* Different preferred formats for different hw */
1309          if (dri3_get_red_mask_for_depth(draw, 30) == 0x3ff)
1310             return __DRI_IMAGE_FORMAT_ABGR2101010;
1311          else
1312             return __DRI_IMAGE_FORMAT_ARGB2101010;
1313 
1314       default:
1315          return format;
1316    }
1317 }
1318 
1319 #ifdef HAVE_X11_DRM
1320 static bool
has_supported_modifier(struct loader_dri3_drawable * draw,unsigned int format,uint64_t * modifiers,uint32_t count)1321 has_supported_modifier(struct loader_dri3_drawable *draw, unsigned int format,
1322                        uint64_t *modifiers, uint32_t count)
1323 {
1324    uint64_t *supported_modifiers;
1325    int32_t supported_modifiers_count;
1326    bool found = false;
1327    int i, j;
1328 
1329    if (!dri_query_dma_buf_modifiers(draw->dri_screen_render_gpu,
1330                                                format, 0, NULL, NULL,
1331                                                &supported_modifiers_count) ||
1332        supported_modifiers_count == 0)
1333       return false;
1334 
1335    supported_modifiers = malloc(supported_modifiers_count * sizeof(uint64_t));
1336    if (!supported_modifiers)
1337       return false;
1338 
1339    dri_query_dma_buf_modifiers(draw->dri_screen_render_gpu, format,
1340                                           supported_modifiers_count,
1341                                           supported_modifiers, NULL,
1342                                           &supported_modifiers_count);
1343 
1344    for (i = 0; !found && i < supported_modifiers_count; i++) {
1345       for (j = 0; !found && j < count; j++) {
1346          if (supported_modifiers[i] == modifiers[j])
1347             found = true;
1348       }
1349    }
1350 
1351    free(supported_modifiers);
1352    return found;
1353 }
1354 #endif
1355 
1356 /** loader_dri3_alloc_render_buffer
1357  *
1358  * Use the driver createImage function to construct a __DRIimage, then
1359  * get a file descriptor for that and create an X pixmap from that
1360  *
1361  * Allocate an xshmfence for synchronization
1362  */
1363 static struct loader_dri3_buffer *
dri3_alloc_render_buffer(struct loader_dri3_drawable * draw,unsigned int fourcc,int width,int height,int depth)1364 dri3_alloc_render_buffer(struct loader_dri3_drawable *draw, unsigned int fourcc,
1365                          int width, int height, int depth)
1366 {
1367    struct loader_dri3_buffer *buffer;
1368    __DRIimage *pixmap_buffer = NULL, *linear_buffer_display_gpu = NULL;
1369    int format = loader_fourcc_to_image_format(fourcc);
1370    xcb_pixmap_t pixmap;
1371    xcb_sync_fence_t sync_fence;
1372    struct xshmfence *shm_fence;
1373    int buffer_fds[4], fence_fd;
1374    int num_planes = 0;
1375    uint64_t *modifiers = NULL;
1376    uint32_t count = 0;
1377    int i, mod;
1378    int ret;
1379 
1380    /* Create an xshmfence object and
1381     * prepare to send that to the X server
1382     */
1383 
1384    fence_fd = xshmfence_alloc_shm();
1385    if (fence_fd < 0)
1386       return NULL;
1387 
1388    shm_fence = xshmfence_map_shm(fence_fd);
1389    if (shm_fence == NULL)
1390       goto no_shm_fence;
1391 
1392    /* Allocate the image from the driver
1393     */
1394    buffer = calloc(1, sizeof *buffer);
1395    if (!buffer)
1396       goto no_buffer;
1397 
1398    buffer->cpp = dri3_cpp_for_fourcc(fourcc);
1399    if (!buffer->cpp)
1400       goto no_image;
1401 
1402    if (draw->dri_screen_render_gpu == draw->dri_screen_display_gpu) {
1403 #ifdef HAVE_X11_DRM
1404       if (draw->multiplanes_available) {
1405          xcb_dri3_get_supported_modifiers_cookie_t mod_cookie;
1406          xcb_dri3_get_supported_modifiers_reply_t *mod_reply;
1407          xcb_generic_error_t *error = NULL;
1408 
1409          mod_cookie = xcb_dri3_get_supported_modifiers(draw->conn,
1410                                                        draw->window,
1411                                                        depth, buffer->cpp * 8);
1412          mod_reply = xcb_dri3_get_supported_modifiers_reply(draw->conn,
1413                                                             mod_cookie,
1414                                                             &error);
1415          if (!mod_reply)
1416             goto no_image;
1417 
1418          if (mod_reply->num_window_modifiers) {
1419             count = mod_reply->num_window_modifiers;
1420             modifiers = malloc(count * sizeof(uint64_t));
1421             if (!modifiers) {
1422                free(mod_reply);
1423                goto no_image;
1424             }
1425 
1426             memcpy(modifiers,
1427                    xcb_dri3_get_supported_modifiers_window_modifiers(mod_reply),
1428                    count * sizeof(uint64_t));
1429 
1430             if (!has_supported_modifier(draw, fourcc, modifiers, count)) {
1431                free(modifiers);
1432                count = 0;
1433                modifiers = NULL;
1434             }
1435          }
1436 
1437          if (mod_reply->num_screen_modifiers && modifiers == NULL) {
1438             count = mod_reply->num_screen_modifiers;
1439             modifiers = malloc(count * sizeof(uint64_t));
1440             if (!modifiers) {
1441                free(mod_reply);
1442                goto no_image;
1443             }
1444 
1445             memcpy(modifiers,
1446                    xcb_dri3_get_supported_modifiers_screen_modifiers(mod_reply),
1447                    count * sizeof(uint64_t));
1448          }
1449 
1450          free(mod_reply);
1451       }
1452 #endif
1453       buffer->image = dri_create_image_with_modifiers(draw->dri_screen_render_gpu,
1454                                               width, height, format,
1455                                               __DRI_IMAGE_USE_SHARE |
1456                                               __DRI_IMAGE_USE_SCANOUT |
1457                                               __DRI_IMAGE_USE_BACKBUFFER |
1458                                               (draw->is_protected_content ?
1459                                                __DRI_IMAGE_USE_PROTECTED : 0),
1460                                               modifiers, count, buffer);
1461       free(modifiers);
1462 
1463       pixmap_buffer = buffer->image;
1464 
1465       if (!buffer->image)
1466          goto no_image;
1467    } else {
1468       buffer->image =
1469          dri_create_image(draw->dri_screen_render_gpu,
1470                                        width, height, format,
1471                                        NULL, 0, 0, buffer);
1472 
1473       if (!buffer->image)
1474          goto no_image;
1475 
1476       /* if driver name is same only then dri_screen_display_gpu is set.
1477        * This check is needed because for simplicity render gpu image extension
1478        * is also used for display gpu.
1479        */
1480       if (draw->dri_screen_display_gpu) {
1481          linear_buffer_display_gpu =
1482            dri_create_image(draw->dri_screen_display_gpu,
1483                                          width, height,
1484                                          dri3_linear_format_for_format(draw, format),
1485                                          NULL, 0,
1486                                          __DRI_IMAGE_USE_SHARE |
1487                                          __DRI_IMAGE_USE_LINEAR |
1488                                          __DRI_IMAGE_USE_BACKBUFFER |
1489                                          __DRI_IMAGE_USE_SCANOUT,
1490                                          buffer);
1491          pixmap_buffer = linear_buffer_display_gpu;
1492       }
1493 
1494       if (!pixmap_buffer) {
1495          buffer->linear_buffer =
1496            dri_create_image(draw->dri_screen_render_gpu,
1497                                         width, height,
1498                                         dri3_linear_format_for_format(draw, format),
1499                                         NULL, 0,
1500                                         __DRI_IMAGE_USE_SHARE |
1501                                         __DRI_IMAGE_USE_LINEAR |
1502                                         __DRI_IMAGE_USE_BACKBUFFER |
1503                                         __DRI_IMAGE_USE_SCANOUT |
1504                                         __DRI_IMAGE_USE_PRIME_BUFFER,
1505                                         buffer);
1506 
1507          pixmap_buffer = buffer->linear_buffer;
1508          if (!buffer->linear_buffer) {
1509             goto no_linear_buffer;
1510          }
1511       }
1512    }
1513 
1514    /* X want some information about the planes, so ask the image for it
1515     */
1516    if (!dri2_query_image(pixmap_buffer, __DRI_IMAGE_ATTRIB_NUM_PLANES,
1517                                      &num_planes))
1518       num_planes = 1;
1519 
1520    for (i = 0; i < num_planes; i++) {
1521       __DRIimage *image = dri2_from_planar(pixmap_buffer, i, NULL);
1522 
1523       if (!image) {
1524          assert(i == 0);
1525          image = pixmap_buffer;
1526       }
1527 
1528       buffer_fds[i] = -1;
1529 
1530       ret = dri2_query_image(image, __DRI_IMAGE_ATTRIB_FD,
1531                                          &buffer_fds[i]);
1532       ret &= dri2_query_image(image, __DRI_IMAGE_ATTRIB_STRIDE,
1533                                           &buffer->strides[i]);
1534       ret &= dri2_query_image(image, __DRI_IMAGE_ATTRIB_OFFSET,
1535                                           &buffer->offsets[i]);
1536       if (image != pixmap_buffer)
1537          dri2_destroy_image(image);
1538 
1539       if (!ret)
1540          goto no_buffer_attrib;
1541    }
1542 
1543    ret = dri2_query_image(pixmap_buffer,
1544                                      __DRI_IMAGE_ATTRIB_MODIFIER_UPPER, &mod);
1545    buffer->modifier = (uint64_t) mod << 32;
1546    ret &= dri2_query_image(pixmap_buffer,
1547                                        __DRI_IMAGE_ATTRIB_MODIFIER_LOWER, &mod);
1548    buffer->modifier |= (uint64_t)(mod & 0xffffffff);
1549 
1550    if (!ret)
1551       buffer->modifier = DRM_FORMAT_MOD_INVALID;
1552 
1553    if (draw->dri_screen_render_gpu != draw->dri_screen_display_gpu &&
1554        draw->dri_screen_display_gpu && linear_buffer_display_gpu) {
1555       /* The linear buffer was created in the display GPU's vram, so we
1556        * need to make it visible to render GPU
1557        */
1558       buffer->linear_buffer =
1559          dri2_from_dma_bufs(draw->dri_screen_render_gpu,
1560                                                   width,
1561                                                   height,
1562                                                   fourcc,
1563                                                   DRM_FORMAT_MOD_INVALID,
1564                                                   &buffer_fds[0], num_planes,
1565                                                   &buffer->strides[0],
1566                                                   &buffer->offsets[0],
1567                                                   0, 0, 0, 0, __DRI_IMAGE_PRIME_LINEAR_BUFFER,
1568                                                   NULL, buffer);
1569       if (!buffer->linear_buffer)
1570          goto no_buffer_attrib;
1571 
1572       dri2_destroy_image(linear_buffer_display_gpu);
1573    }
1574 
1575    pixmap = xcb_generate_id(draw->conn);
1576 #ifdef HAVE_X11_DRM
1577    if (draw->multiplanes_available &&
1578        buffer->modifier != DRM_FORMAT_MOD_INVALID) {
1579       xcb_dri3_pixmap_from_buffers(draw->conn,
1580                                    pixmap,
1581                                    draw->window,
1582                                    num_planes,
1583                                    width, height,
1584                                    buffer->strides[0], buffer->offsets[0],
1585                                    buffer->strides[1], buffer->offsets[1],
1586                                    buffer->strides[2], buffer->offsets[2],
1587                                    buffer->strides[3], buffer->offsets[3],
1588                                    depth, buffer->cpp * 8,
1589                                    buffer->modifier,
1590                                    buffer_fds);
1591    } else
1592 #endif
1593    {
1594       xcb_dri3_pixmap_from_buffer(draw->conn,
1595                                   pixmap,
1596                                   draw->drawable,
1597                                   buffer->size,
1598                                   width, height, buffer->strides[0],
1599                                   depth, buffer->cpp * 8,
1600                                   buffer_fds[0]);
1601    }
1602 
1603    xcb_dri3_fence_from_fd(draw->conn,
1604                           pixmap,
1605                           (sync_fence = xcb_generate_id(draw->conn)),
1606                           false,
1607                           fence_fd);
1608 
1609    buffer->pixmap = pixmap;
1610    buffer->own_pixmap = true;
1611    buffer->sync_fence = sync_fence;
1612    buffer->shm_fence = shm_fence;
1613    buffer->width = width;
1614    buffer->height = height;
1615 
1616    /* Mark the buffer as idle
1617     */
1618    dri3_fence_set(buffer);
1619 
1620    return buffer;
1621 
1622 no_buffer_attrib:
1623    do {
1624       if (buffer_fds[i] != -1)
1625          close(buffer_fds[i]);
1626    } while (--i >= 0);
1627    dri2_destroy_image(pixmap_buffer);
1628 no_linear_buffer:
1629    if (draw->dri_screen_render_gpu != draw->dri_screen_display_gpu)
1630       dri2_destroy_image(buffer->image);
1631 no_image:
1632    free(buffer);
1633 no_buffer:
1634    xshmfence_unmap_shm(shm_fence);
1635 no_shm_fence:
1636    close(fence_fd);
1637    return NULL;
1638 }
1639 
1640 static bool
dri3_detect_drawable_is_window(struct loader_dri3_drawable * draw)1641 dri3_detect_drawable_is_window(struct loader_dri3_drawable *draw)
1642 {
1643    /* Try to select for input on the window.
1644     *
1645     * If the drawable is a window, this will get our events
1646     * delivered.
1647     *
1648     * Otherwise, we'll get a BadWindow error back from this request which
1649     * will let us know that the drawable is a pixmap instead.
1650     */
1651 
1652    xcb_void_cookie_t cookie =
1653       xcb_present_select_input_checked(draw->conn, draw->eid, draw->drawable,
1654                                        XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY |
1655                                        XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY |
1656                                        XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY);
1657 
1658    /* Check to see if our select input call failed. If it failed with a
1659     * BadWindow error, then assume the drawable is a pixmap.
1660     */
1661    xcb_generic_error_t *error = xcb_request_check(draw->conn, cookie);
1662 
1663    if (error) {
1664       if (error->error_code != BadWindow) {
1665          free(error);
1666          return false;
1667       }
1668       free(error);
1669 
1670       /* pixmap can't get here, see driFetchDrawable(). */
1671       draw->type = LOADER_DRI3_DRAWABLE_PBUFFER;
1672       return true;
1673    }
1674 
1675    draw->type = LOADER_DRI3_DRAWABLE_WINDOW;
1676    return true;
1677 }
1678 
1679 static bool
dri3_setup_present_event(struct loader_dri3_drawable * draw)1680 dri3_setup_present_event(struct loader_dri3_drawable *draw)
1681 {
1682    /* No need to setup for pixmap drawable. */
1683    if (draw->type == LOADER_DRI3_DRAWABLE_PIXMAP ||
1684        draw->type == LOADER_DRI3_DRAWABLE_PBUFFER)
1685       return true;
1686 
1687    draw->eid = xcb_generate_id(draw->conn);
1688 
1689    if (draw->type == LOADER_DRI3_DRAWABLE_WINDOW) {
1690       xcb_present_select_input(draw->conn, draw->eid, draw->drawable,
1691                                XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY |
1692                                XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY |
1693                                XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY);
1694    } else {
1695       assert(draw->type == LOADER_DRI3_DRAWABLE_UNKNOWN);
1696 
1697       if (!dri3_detect_drawable_is_window(draw))
1698          return false;
1699 
1700       if (draw->type != LOADER_DRI3_DRAWABLE_WINDOW)
1701          return true;
1702    }
1703 
1704    /* Create an XCB event queue to hold present events outside of the usual
1705     * application event queue
1706     */
1707    draw->special_event = xcb_register_for_special_xge(draw->conn,
1708                                                       &xcb_present_id,
1709                                                       draw->eid,
1710                                                       draw->stamp);
1711    return true;
1712 }
1713 
1714 /** loader_dri3_update_drawable
1715  *
1716  * Called the first time we use the drawable and then
1717  * after we receive present configure notify events to
1718  * track the geometry of the drawable
1719  */
1720 static int
dri3_update_drawable(struct loader_dri3_drawable * draw)1721 dri3_update_drawable(struct loader_dri3_drawable *draw)
1722 {
1723    mtx_lock(&draw->mtx);
1724    if (draw->first_init) {
1725       xcb_get_geometry_cookie_t                 geom_cookie;
1726       xcb_get_geometry_reply_t                  *geom_reply;
1727       xcb_window_t                               root_win;
1728 
1729       draw->first_init = false;
1730 
1731       if (!dri3_setup_present_event(draw)) {
1732          mtx_unlock(&draw->mtx);
1733          return false;
1734       }
1735 
1736       geom_cookie = xcb_get_geometry(draw->conn, draw->drawable);
1737 
1738       geom_reply = xcb_get_geometry_reply(draw->conn, geom_cookie, NULL);
1739 
1740       if (!geom_reply) {
1741          mtx_unlock(&draw->mtx);
1742          return false;
1743       }
1744       draw->width = geom_reply->width;
1745       draw->height = geom_reply->height;
1746       draw->depth = geom_reply->depth;
1747       draw->vtable->set_drawable_size(draw, draw->width, draw->height);
1748       root_win = geom_reply->root;
1749 
1750       free(geom_reply);
1751 
1752       if (draw->type != LOADER_DRI3_DRAWABLE_WINDOW)
1753          draw->window = root_win;
1754       else
1755          draw->window = draw->drawable;
1756    }
1757    dri3_flush_present_events(draw);
1758    mtx_unlock(&draw->mtx);
1759    return true;
1760 }
1761 
1762 __DRIimage *
loader_dri3_create_image(xcb_connection_t * c,xcb_dri3_buffer_from_pixmap_reply_t * bp_reply,unsigned int fourcc,__DRIscreen * dri_screen,void * loaderPrivate)1763 loader_dri3_create_image(xcb_connection_t *c,
1764                          xcb_dri3_buffer_from_pixmap_reply_t *bp_reply,
1765                          unsigned int fourcc,
1766                          __DRIscreen *dri_screen,
1767                          void *loaderPrivate)
1768 {
1769    int                                  *fds;
1770    __DRIimage                           *image_planar, *ret;
1771    int                                  stride, offset;
1772 
1773    /* Get an FD for the pixmap object
1774     */
1775    fds = xcb_dri3_buffer_from_pixmap_reply_fds(c, bp_reply);
1776 
1777    stride = bp_reply->stride;
1778    offset = 0;
1779 
1780    /* createImageFromDmaBufs creates a wrapper __DRIimage structure which
1781     * can deal with multiple planes for things like Yuv images. So, once
1782     * we've gotten the planar wrapper, pull the single plane out of it and
1783     * discard the wrapper.
1784     */
1785    image_planar = dri2_from_dma_bufs(dri_screen,
1786                                        bp_reply->width,
1787                                        bp_reply->height,
1788                                        fourcc,
1789                                        DRM_FORMAT_MOD_INVALID,
1790                                        fds, 1,
1791                                        &stride, &offset,
1792                                        0, 0, 0, 0, 0,
1793                                        NULL, loaderPrivate);
1794    close(fds[0]);
1795    if (!image_planar)
1796       return NULL;
1797 
1798    ret = dri2_from_planar(image_planar, 0, loaderPrivate);
1799 
1800    if (!ret)
1801       ret = image_planar;
1802    else
1803       dri2_destroy_image(image_planar);
1804 
1805    return ret;
1806 }
1807 
1808 #ifdef HAVE_X11_DRM
1809 __DRIimage *
loader_dri3_create_image_from_buffers(xcb_connection_t * c,xcb_dri3_buffers_from_pixmap_reply_t * bp_reply,unsigned int fourcc,__DRIscreen * dri_screen,void * loaderPrivate)1810 loader_dri3_create_image_from_buffers(xcb_connection_t *c,
1811                                       xcb_dri3_buffers_from_pixmap_reply_t *bp_reply,
1812                                       unsigned int fourcc,
1813                                       __DRIscreen *dri_screen,
1814                                       void *loaderPrivate)
1815 {
1816    __DRIimage                           *ret;
1817    int                                  *fds;
1818    uint32_t                             *strides_in, *offsets_in;
1819    int                                   strides[4], offsets[4];
1820    unsigned                              error;
1821    int                                   i;
1822 
1823    if (bp_reply->nfd > 4)
1824       return NULL;
1825 
1826    fds = xcb_dri3_buffers_from_pixmap_reply_fds(c, bp_reply);
1827    strides_in = xcb_dri3_buffers_from_pixmap_strides(bp_reply);
1828    offsets_in = xcb_dri3_buffers_from_pixmap_offsets(bp_reply);
1829    for (i = 0; i < bp_reply->nfd; i++) {
1830       strides[i] = strides_in[i];
1831       offsets[i] = offsets_in[i];
1832    }
1833 
1834    ret = dri2_from_dma_bufs(dri_screen,
1835                                        bp_reply->width,
1836                                        bp_reply->height,
1837                                        fourcc,
1838                                        bp_reply->modifier,
1839                                        fds, bp_reply->nfd,
1840                                        strides, offsets,
1841                                        0, 0, 0, 0, /* UNDEFINED */
1842                                        0, &error, loaderPrivate);
1843 
1844    for (i = 0; i < bp_reply->nfd; i++)
1845       close(fds[i]);
1846 
1847    return ret;
1848 }
1849 #endif
1850 
1851 __DRIimage *
loader_dri3_get_pixmap_buffer(xcb_connection_t * conn,xcb_drawable_t pixmap,__DRIscreen * screen,unsigned fourcc,bool multiplanes_available,int * width,int * height,void * loader_data)1852 loader_dri3_get_pixmap_buffer(xcb_connection_t *conn, xcb_drawable_t pixmap, __DRIscreen *screen,
1853                               unsigned fourcc, bool multiplanes_available,
1854                               int *width, int *height, void *loader_data)
1855 {
1856    __DRIimage *image;
1857 #ifdef HAVE_X11_DRM
1858    if (multiplanes_available) {
1859       xcb_dri3_buffers_from_pixmap_cookie_t bps_cookie;
1860       xcb_dri3_buffers_from_pixmap_reply_t *bps_reply;
1861 
1862       bps_cookie = xcb_dri3_buffers_from_pixmap(conn, pixmap);
1863       bps_reply = xcb_dri3_buffers_from_pixmap_reply(conn, bps_cookie,
1864                                                      NULL);
1865       if (!bps_reply)
1866          return NULL;
1867       image = loader_dri3_create_image_from_buffers(conn, bps_reply, fourcc,
1868                                                     screen, loader_data);
1869       *width = bps_reply->width;
1870       *height = bps_reply->height;
1871       free(bps_reply);
1872    } else
1873 #endif
1874    {
1875       xcb_dri3_buffer_from_pixmap_cookie_t bp_cookie;
1876       xcb_dri3_buffer_from_pixmap_reply_t *bp_reply;
1877 
1878       bp_cookie = xcb_dri3_buffer_from_pixmap(conn, pixmap);
1879       bp_reply = xcb_dri3_buffer_from_pixmap_reply(conn, bp_cookie, NULL);
1880       if (!bp_reply)
1881          return NULL;
1882 
1883       image = loader_dri3_create_image(conn, bp_reply, fourcc, screen,
1884                                                loader_data);
1885       *width = bp_reply->width;
1886       *height = bp_reply->height;
1887       free(bp_reply);
1888    }
1889    return image;
1890 }
1891 
1892 /** dri3_get_pixmap_buffer
1893  *
1894  * Get the DRM object for a pixmap from the X server and
1895  * wrap that with a __DRIimage structure using createImageFromDmaBufs
1896  */
1897 static struct loader_dri3_buffer *
dri3_get_pixmap_buffer(__DRIdrawable * driDrawable,unsigned int fourcc,enum loader_dri3_buffer_type buffer_type,struct loader_dri3_drawable * draw)1898 dri3_get_pixmap_buffer(__DRIdrawable *driDrawable, unsigned int fourcc,
1899                        enum loader_dri3_buffer_type buffer_type,
1900                        struct loader_dri3_drawable *draw)
1901 {
1902    int                                  buf_id = loader_dri3_pixmap_buf_id(buffer_type);
1903    struct loader_dri3_buffer            *buffer = draw->buffers[buf_id];
1904    xcb_drawable_t                       pixmap;
1905    xcb_sync_fence_t                     sync_fence;
1906    struct xshmfence                     *shm_fence;
1907    int                                  width;
1908    int                                  height;
1909    int                                  fence_fd;
1910    __DRIscreen                          *cur_screen;
1911 
1912    if (buffer)
1913       return buffer;
1914 
1915    pixmap = draw->drawable;
1916 
1917    buffer = calloc(1, sizeof *buffer);
1918    if (!buffer)
1919       goto no_buffer;
1920 
1921    fence_fd = xshmfence_alloc_shm();
1922    if (fence_fd < 0)
1923       goto no_fence;
1924    shm_fence = xshmfence_map_shm(fence_fd);
1925    if (shm_fence == NULL) {
1926       close (fence_fd);
1927       goto no_fence;
1928    }
1929 
1930    /* Get the currently-bound screen or revert to using the drawable's screen if
1931     * no contexts are currently bound. The latter case is at least necessary for
1932     * obs-studio, when using Window Capture (Xcomposite) as a Source.
1933     */
1934    cur_screen = draw->vtable->get_dri_screen();
1935    if (!cur_screen) {
1936        cur_screen = draw->dri_screen_render_gpu;
1937    }
1938 
1939    xcb_dri3_fence_from_fd(draw->conn,
1940                           pixmap,
1941                           (sync_fence = xcb_generate_id(draw->conn)),
1942                           false,
1943                           fence_fd);
1944    buffer->image = loader_dri3_get_pixmap_buffer(draw->conn, pixmap, cur_screen, fourcc,
1945                                                  draw->multiplanes_available, &width, &height, buffer);
1946 
1947    if (!buffer->image)
1948       goto no_image;
1949 
1950    buffer->pixmap = pixmap;
1951    buffer->own_pixmap = false;
1952    buffer->width = width;
1953    buffer->height = height;
1954    buffer->shm_fence = shm_fence;
1955    buffer->sync_fence = sync_fence;
1956 
1957    dri3_set_render_buffer(draw, buf_id, buffer);
1958 
1959    return buffer;
1960 
1961 no_image:
1962    xcb_sync_destroy_fence(draw->conn, sync_fence);
1963    xshmfence_unmap_shm(shm_fence);
1964 no_fence:
1965    free(buffer);
1966 no_buffer:
1967    return NULL;
1968 }
1969 
1970 /** dri3_get_buffer
1971  *
1972  * Find a front or back buffer, allocating new ones as necessary
1973  */
1974 static struct loader_dri3_buffer *
dri3_get_buffer(__DRIdrawable * driDrawable,unsigned int fourcc,enum loader_dri3_buffer_type buffer_type,struct loader_dri3_drawable * draw)1975 dri3_get_buffer(__DRIdrawable *driDrawable,
1976                 unsigned int fourcc,
1977                 enum loader_dri3_buffer_type buffer_type,
1978                 struct loader_dri3_drawable *draw)
1979 {
1980    struct loader_dri3_buffer *buffer;
1981    bool fence_await = buffer_type == loader_dri3_buffer_back;
1982    int buf_id;
1983 
1984    if (buffer_type == loader_dri3_buffer_back) {
1985       draw->back_format = fourcc;
1986 
1987       buf_id = dri3_find_back(draw, !draw->prefer_back_buffer_reuse);
1988 
1989       if (buf_id < 0)
1990          return NULL;
1991    } else {
1992       buf_id = LOADER_DRI3_FRONT_ID;
1993    }
1994 
1995    buffer = draw->buffers[buf_id];
1996 
1997    /* Allocate a new buffer if there isn't an old one, if that
1998     * old one is the wrong size, or if it's suboptimal
1999     */
2000    if (!buffer || buffer->width != draw->width ||
2001        buffer->height != draw->height ||
2002        buffer->reallocate) {
2003       struct loader_dri3_buffer *new_buffer;
2004 
2005       /* Allocate the new buffers
2006        */
2007       new_buffer = dri3_alloc_render_buffer(draw,
2008                                             fourcc,
2009                                             draw->width,
2010                                             draw->height,
2011                                             draw->depth);
2012       if (!new_buffer)
2013          return NULL;
2014 
2015       /* When resizing, copy the contents of the old buffer, waiting for that
2016        * copy to complete using our fences before proceeding
2017        */
2018       if ((buffer_type == loader_dri3_buffer_back ||
2019            (buffer_type == loader_dri3_buffer_front && draw->have_fake_front))
2020           && buffer) {
2021 
2022          /* Fill the new buffer with data from an old buffer */
2023          if (!loader_dri3_blit_image(draw,
2024                                      new_buffer->image,
2025                                      buffer->image,
2026                                      0, 0,
2027                                      MIN2(buffer->width, new_buffer->width),
2028                                      MIN2(buffer->height, new_buffer->height),
2029                                      0, 0, 0) &&
2030              !buffer->linear_buffer) {
2031             dri3_fence_reset(draw->conn, new_buffer);
2032             dri3_copy_area(draw->conn,
2033                            buffer->pixmap,
2034                            new_buffer->pixmap,
2035                            dri3_drawable_gc(draw),
2036                            0, 0, 0, 0,
2037                            draw->width, draw->height);
2038             dri3_fence_trigger(draw->conn, new_buffer);
2039             fence_await = true;
2040          }
2041          dri3_free_render_buffer(draw, buf_id);
2042       } else if (buffer_type == loader_dri3_buffer_front) {
2043          /* Fill the new fake front with data from a real front */
2044          loader_dri3_swapbuffer_barrier(draw);
2045          dri3_fence_reset(draw->conn, new_buffer);
2046          dri3_copy_area(draw->conn,
2047                         draw->drawable,
2048                         new_buffer->pixmap,
2049                         dri3_drawable_gc(draw),
2050                         0, 0, 0, 0,
2051                         draw->width, draw->height);
2052          dri3_fence_trigger(draw->conn, new_buffer);
2053 
2054          if (new_buffer->linear_buffer) {
2055             dri3_fence_await(draw->conn, draw, new_buffer);
2056             (void) loader_dri3_blit_image(draw,
2057                                           new_buffer->image,
2058                                           new_buffer->linear_buffer,
2059                                           0, 0, draw->width, draw->height,
2060                                           0, 0, 0);
2061          } else
2062             fence_await = true;
2063       }
2064       buffer = new_buffer;
2065       dri3_set_render_buffer(draw, buf_id, buffer);
2066    }
2067 
2068    if (fence_await)
2069       dri3_fence_await(draw->conn, draw, buffer);
2070 
2071    /*
2072     * Do we need to preserve the content of a previous buffer?
2073     *
2074     * Note that this blit is needed only to avoid a wait for a buffer that
2075     * is currently in the flip chain or being scanned out from. That's really
2076     * a tradeoff. If we're ok with the wait we can reduce the number of back
2077     * buffers to 1 for SWAP_EXCHANGE, and 1 for SWAP_COPY,
2078     * but in the latter case we must disallow page-flipping.
2079     */
2080    if (buffer_type == loader_dri3_buffer_back &&
2081        draw->cur_blit_source != -1 &&
2082        draw->buffers[draw->cur_blit_source] &&
2083        buffer != draw->buffers[draw->cur_blit_source]) {
2084 
2085       struct loader_dri3_buffer *source = draw->buffers[draw->cur_blit_source];
2086 
2087       /* Avoid flushing here. Will propably do good for tiling hardware. */
2088       (void) loader_dri3_blit_image(draw,
2089                                     buffer->image,
2090                                     source->image,
2091                                     0, 0, draw->width, draw->height,
2092                                     0, 0, 0);
2093       buffer->last_swap = source->last_swap;
2094       draw->cur_blit_source = -1;
2095    }
2096    /* Return the requested buffer */
2097    return buffer;
2098 }
2099 
2100 /** dri3_free_buffers
2101  *
2102  * Free the front bufffer or all of the back buffers. Used
2103  * when the application changes which buffers it needs
2104  */
2105 static void
dri3_free_buffers(__DRIdrawable * driDrawable,enum loader_dri3_buffer_type buffer_type,struct loader_dri3_drawable * draw)2106 dri3_free_buffers(__DRIdrawable *driDrawable,
2107                   enum loader_dri3_buffer_type buffer_type,
2108                   struct loader_dri3_drawable *draw)
2109 {
2110    int first_id;
2111    int n_id;
2112    int buf_id;
2113 
2114    switch (buffer_type) {
2115    case loader_dri3_buffer_back:
2116       first_id = LOADER_DRI3_BACK_ID(0);
2117       n_id = LOADER_DRI3_MAX_BACK;
2118       draw->cur_blit_source = -1;
2119       break;
2120    case loader_dri3_buffer_front:
2121       first_id = LOADER_DRI3_FRONT_ID;
2122       /* Don't free a fake front holding new backbuffer content. */
2123       n_id = (draw->cur_blit_source == LOADER_DRI3_FRONT_ID) ? 0 : 1;
2124       break;
2125    default:
2126       unreachable("unhandled buffer_type");
2127    }
2128 
2129    for (buf_id = first_id; buf_id < first_id + n_id; buf_id++)
2130       dri3_free_render_buffer(draw, buf_id);
2131 }
2132 
2133 /** loader_dri3_get_buffers
2134  *
2135  * The published buffer allocation API.
2136  * Returns all of the necessary buffers, allocating
2137  * as needed.
2138  */
2139 int
loader_dri3_get_buffers(__DRIdrawable * driDrawable,unsigned int format,uint32_t * stamp,void * loaderPrivate,uint32_t buffer_mask,struct __DRIimageList * buffers)2140 loader_dri3_get_buffers(__DRIdrawable *driDrawable,
2141                         unsigned int format,
2142                         uint32_t *stamp,
2143                         void *loaderPrivate,
2144                         uint32_t buffer_mask,
2145                         struct __DRIimageList *buffers)
2146 {
2147    struct loader_dri3_drawable *draw = loaderPrivate;
2148    struct loader_dri3_buffer   *front, *back;
2149    int fourcc = loader_image_format_to_fourcc(format);
2150    int buf_id;
2151 
2152    buffers->image_mask = 0;
2153    buffers->front = NULL;
2154    buffers->back = NULL;
2155 
2156    if (!dri3_update_drawable(draw))
2157       return false;
2158 
2159    dri3_update_max_num_back(draw);
2160 
2161    /* Free no longer needed back buffers */
2162    for (buf_id = 0; buf_id < LOADER_DRI3_MAX_BACK; buf_id++) {
2163       int buffer_age;
2164 
2165       back = draw->buffers[buf_id];
2166       if (!back || !back->last_swap || draw->cur_blit_source == buf_id)
2167          continue;
2168 
2169       buffer_age = draw->send_sbc - back->last_swap + 1;
2170       if (buffer_age > 200)
2171          dri3_free_render_buffer(draw, buf_id);
2172    }
2173 
2174    /* pixmaps always have front buffers.
2175     */
2176    if (draw->type != LOADER_DRI3_DRAWABLE_WINDOW)
2177       buffer_mask |= __DRI_IMAGE_BUFFER_FRONT;
2178 
2179    if (buffer_mask & __DRI_IMAGE_BUFFER_FRONT) {
2180       /* All pixmaps are owned by the server gpu.
2181        * When we use a different gpu, we can't use the pixmap
2182        * as buffer since it is potentially tiled a way
2183        * our device can't understand. In this case, use
2184        * a fake front buffer. Hopefully the pixmap
2185        * content will get synced with the fake front
2186        * buffer.
2187        */
2188       if (draw->type != LOADER_DRI3_DRAWABLE_WINDOW &&
2189           draw->dri_screen_render_gpu == draw->dri_screen_display_gpu)
2190          front = dri3_get_pixmap_buffer(driDrawable,
2191                                         fourcc,
2192                                         loader_dri3_buffer_front,
2193                                         draw);
2194       else
2195          front = dri3_get_buffer(driDrawable,
2196                                  fourcc,
2197                                  loader_dri3_buffer_front,
2198                                  draw);
2199 
2200       if (!front)
2201          return false;
2202    } else {
2203       dri3_free_buffers(driDrawable, loader_dri3_buffer_front, draw);
2204       draw->have_fake_front = 0;
2205       front = NULL;
2206    }
2207 
2208    if (buffer_mask & __DRI_IMAGE_BUFFER_BACK) {
2209       back = dri3_get_buffer(driDrawable,
2210                              fourcc,
2211                              loader_dri3_buffer_back,
2212                              draw);
2213       if (!back)
2214          return false;
2215       draw->have_back = 1;
2216    } else {
2217       dri3_free_buffers(driDrawable, loader_dri3_buffer_back, draw);
2218       draw->have_back = 0;
2219       back = NULL;
2220    }
2221 
2222    if (front) {
2223       buffers->image_mask |= __DRI_IMAGE_BUFFER_FRONT;
2224       buffers->front = front->image;
2225       draw->have_fake_front =
2226          draw->dri_screen_render_gpu != draw->dri_screen_display_gpu ||
2227          draw->type == LOADER_DRI3_DRAWABLE_WINDOW;
2228    }
2229 
2230    if (back) {
2231       buffers->image_mask |= __DRI_IMAGE_BUFFER_BACK;
2232       buffers->back = back->image;
2233    }
2234 
2235    draw->stamp = stamp;
2236 
2237    return true;
2238 }
2239 
2240 /** loader_dri3_update_drawable_geometry
2241  *
2242  * Get the current drawable geometry.
2243  */
2244 void
loader_dri3_update_drawable_geometry(struct loader_dri3_drawable * draw)2245 loader_dri3_update_drawable_geometry(struct loader_dri3_drawable *draw)
2246 {
2247    xcb_get_geometry_cookie_t geom_cookie;
2248    xcb_get_geometry_reply_t *geom_reply;
2249 
2250    geom_cookie = xcb_get_geometry(draw->conn, draw->drawable);
2251 
2252    geom_reply = xcb_get_geometry_reply(draw->conn, geom_cookie, NULL);
2253 
2254    if (geom_reply) {
2255       bool changed = draw->width != geom_reply->width || draw->height != geom_reply->height;
2256       draw->width = geom_reply->width;
2257       draw->height = geom_reply->height;
2258       if (changed) {
2259          draw->vtable->set_drawable_size(draw, draw->width, draw->height);
2260          dri_invalidate_drawable(draw->dri_drawable);
2261       }
2262 
2263       free(geom_reply);
2264    }
2265 }
2266 
2267 /**
2268  * Make sure the server has flushed all pending swap buffers to hardware
2269  * for this drawable. Ideally we'd want to send an X protocol request to
2270  * have the server block our connection until the swaps are complete. That
2271  * would avoid the potential round-trip here.
2272  */
2273 void
loader_dri3_swapbuffer_barrier(struct loader_dri3_drawable * draw)2274 loader_dri3_swapbuffer_barrier(struct loader_dri3_drawable *draw)
2275 {
2276    int64_t ust, msc, sbc;
2277 
2278    (void) loader_dri3_wait_for_sbc(draw, 0, &ust, &msc, &sbc);
2279 }
2280 
2281 /**
2282  * Perform any cleanup associated with a close screen operation.
2283  * \param dri_screen[in,out] Pointer to __DRIscreen about to be closed.
2284  *
2285  * This function destroys the screen's cached swap context if any.
2286  */
2287 void
loader_dri3_close_screen(__DRIscreen * dri_screen)2288 loader_dri3_close_screen(__DRIscreen *dri_screen)
2289 {
2290    simple_mtx_lock(&blit_context.mtx);
2291    if (blit_context.ctx && blit_context.cur_screen == dri_screen) {
2292       driDestroyContext(blit_context.ctx);
2293       blit_context.ctx = NULL;
2294    }
2295    simple_mtx_unlock(&blit_context.mtx);
2296 }
2297 
2298 /**
2299  * Find a backbuffer slot - potentially allocating a back buffer
2300  *
2301  * \param draw[in,out]  Pointer to the drawable for which to find back.
2302  * \return Pointer to a new back buffer or NULL if allocation failed or was
2303  * not mandated.
2304  *
2305  * Find a potentially new back buffer, and if it's not been allocated yet and
2306  * in addition needs initializing, then try to allocate and initialize it.
2307  */
2308 static struct loader_dri3_buffer *
dri3_find_back_alloc(struct loader_dri3_drawable * draw)2309 dri3_find_back_alloc(struct loader_dri3_drawable *draw)
2310 {
2311    struct loader_dri3_buffer *back;
2312    int id;
2313 
2314    id = dri3_find_back(draw, false);
2315    if (id < 0)
2316       return NULL;
2317 
2318    back = draw->buffers[id];
2319    /* Allocate a new back if we haven't got one */
2320    if (!back && draw->back_format != DRM_FORMAT_INVALID &&
2321        dri3_update_drawable(draw))
2322       back = dri3_alloc_render_buffer(draw, draw->back_format,
2323                                       draw->width, draw->height, draw->depth);
2324 
2325    if (!back)
2326       return NULL;
2327 
2328    dri3_set_render_buffer(draw, id, back);
2329 
2330    /* If necessary, prefill the back with data. */
2331    if (draw->cur_blit_source != -1 &&
2332        draw->buffers[draw->cur_blit_source] &&
2333        back != draw->buffers[draw->cur_blit_source]) {
2334       struct loader_dri3_buffer *source = draw->buffers[draw->cur_blit_source];
2335 
2336       dri3_fence_await(draw->conn, draw, source);
2337       dri3_fence_await(draw->conn, draw, back);
2338       (void) loader_dri3_blit_image(draw,
2339                                     back->image,
2340                                     source->image,
2341                                     0, 0, draw->width, draw->height,
2342                                     0, 0, 0);
2343       back->last_swap = source->last_swap;
2344       draw->cur_blit_source = -1;
2345    }
2346 
2347    return back;
2348 }
2349