xref: /aosp_15_r20/external/mesa3d/src/mesa/main/glthread.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2012 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 /** @file glthread.c
25  *
26  * Support functions for the glthread feature of Mesa.
27  *
28  * In multicore systems, many applications end up CPU-bound with about half
29  * their time spent inside their rendering thread and half inside Mesa.  To
30  * alleviate this, we put a shim layer in Mesa at the GL dispatch level that
31  * quickly logs the GL commands to a buffer to be processed by a worker
32  * thread.
33  */
34 
35 #include "main/mtypes.h"
36 #include "main/glthread.h"
37 #include "main/glthread_marshal.h"
38 #include "main/hash.h"
39 #include "main/pixelstore.h"
40 #include "util/u_atomic.h"
41 #include "util/u_thread.h"
42 #include "util/u_cpu_detect.h"
43 #include "util/thread_sched.h"
44 
45 #include "state_tracker/st_context.h"
46 
47 static void
glthread_update_global_locking(struct gl_context * ctx)48 glthread_update_global_locking(struct gl_context *ctx)
49 {
50    struct gl_shared_state *shared = ctx->Shared;
51 
52    /* Determine if we should lock the global mutexes. */
53    simple_mtx_lock(&shared->Mutex);
54    int64_t current_time = os_time_get_nano();
55 
56    /* We can only lock the mutexes after NoLockDuration nanoseconds have
57     * passed since multiple contexts were active.
58     */
59    bool lock_mutexes = shared->GLThread.LastContextSwitchTime +
60                        shared->GLThread.NoLockDuration < current_time;
61 
62    /* Check if multiple contexts are active (the last executing context is
63     * different).
64     */
65    if (ctx != shared->GLThread.LastExecutingCtx) {
66       if (lock_mutexes) {
67          /* If we get here, we've been locking the global mutexes for a while
68           * and now we are switching contexts. */
69          if (shared->GLThread.LastContextSwitchTime +
70              120 * ONE_SECOND_IN_NS < current_time) {
71             /* If it's been more than 2 minutes of only one active context,
72              * indicating that there was no other active context for a long
73              * time, reset the no-lock time to its initial state of only 1
74              * second. This is most likely an infrequent situation of
75              * multi-context loading of game content and shaders.
76              * (this is a heuristic)
77              */
78             shared->GLThread.NoLockDuration = ONE_SECOND_IN_NS;
79          } else if (shared->GLThread.NoLockDuration < 32 * ONE_SECOND_IN_NS) {
80             /* Double the no-lock duration if we are transitioning from only
81              * one active context to multiple active contexts after a short
82              * time, up to a maximum of 32 seconds, indicating that multiple
83              * contexts are frequently executing. (this is a heuristic)
84              */
85             shared->GLThread.NoLockDuration *= 2;
86          }
87 
88          lock_mutexes = false;
89       }
90 
91       /* There are multiple active contexts. Update the last executing context
92        * and the last context switch time. We only start locking global mutexes
93        * after LastContextSwitchTime + NoLockDuration passes, so this
94        * effectively resets the non-locking stopwatch to 0, so that multiple
95        * contexts can execute simultaneously as long as they are not idle.
96        */
97       shared->GLThread.LastExecutingCtx = ctx;
98       shared->GLThread.LastContextSwitchTime = current_time;
99    }
100    simple_mtx_unlock(&shared->Mutex);
101 
102    ctx->GLThread.LockGlobalMutexes = lock_mutexes;
103 }
104 
105 static void
glthread_unmarshal_batch(void * job,void * gdata,int thread_index)106 glthread_unmarshal_batch(void *job, void *gdata, int thread_index)
107 {
108    struct glthread_batch *batch = (struct glthread_batch*)job;
109    struct gl_context *ctx = batch->ctx;
110    unsigned pos = 0;
111    unsigned used = batch->used;
112    uint64_t *buffer = batch->buffer;
113    struct gl_shared_state *shared = ctx->Shared;
114 
115    /* Determine once every 64 batches whether shared mutexes should be locked.
116     * We have to do this less frequently because os_time_get_nano() is very
117     * expensive if the clock source is not TSC. See:
118     *    https://gitlab.freedesktop.org/mesa/mesa/-/issues/8910
119     */
120    if (ctx->GLThread.GlobalLockUpdateBatchCounter++ % 64 == 0)
121       glthread_update_global_locking(ctx);
122 
123    /* Execute the GL calls. */
124    _glapi_set_dispatch(ctx->Dispatch.Current);
125 
126    /* Here we lock the mutexes once globally if possible. If not, we just
127     * fallback to the individual API calls doing it.
128     */
129    bool lock_mutexes = ctx->GLThread.LockGlobalMutexes;
130    if (lock_mutexes) {
131       _mesa_HashLockMutex(&shared->BufferObjects);
132       ctx->BufferObjectsLocked = true;
133       simple_mtx_lock(&shared->TexMutex);
134       ctx->TexturesLocked = true;
135    }
136 
137    while (pos < used) {
138       const struct marshal_cmd_base *cmd =
139          (const struct marshal_cmd_base *)&buffer[pos];
140 
141       pos += _mesa_unmarshal_dispatch[cmd->cmd_id](ctx, cmd);
142    }
143 
144    if (lock_mutexes) {
145       ctx->TexturesLocked = false;
146       simple_mtx_unlock(&shared->TexMutex);
147       ctx->BufferObjectsLocked = false;
148       _mesa_HashUnlockMutex(&shared->BufferObjects);
149    }
150 
151    assert(pos == used);
152    batch->used = 0;
153 
154    unsigned batch_index = batch - ctx->GLThread.batches;
155    _mesa_glthread_signal_call(&ctx->GLThread.LastProgramChangeBatch, batch_index);
156    _mesa_glthread_signal_call(&ctx->GLThread.LastDListChangeBatchIndex, batch_index);
157 
158    p_atomic_inc(&ctx->GLThread.stats.num_batches);
159 }
160 
161 static void
glthread_apply_thread_sched_policy(struct gl_context * ctx,bool initialization)162 glthread_apply_thread_sched_policy(struct gl_context *ctx, bool initialization)
163 {
164    struct glthread_state *glthread = &ctx->GLThread;
165 
166    if (!glthread->thread_sched_enabled)
167       return;
168 
169    /* Apply our thread scheduling policy for better multithreading
170     * performance.
171     */
172    if (initialization || ++glthread->pin_thread_counter % 128 == 0) {
173       int cpu = util_get_current_cpu();
174 
175       if (cpu >= 0 &&
176           util_thread_sched_apply_policy(glthread->queue.threads[0],
177                                          UTIL_THREAD_GLTHREAD, cpu,
178                                          &glthread->thread_sched_state)) {
179          /* If it's successful, apply the policy to the driver threads too. */
180          ctx->pipe->set_context_param(ctx->pipe,
181                                       PIPE_CONTEXT_PARAM_UPDATE_THREAD_SCHEDULING,
182                                       cpu);
183       }
184    }
185 }
186 
187 static void
glthread_thread_initialization(void * job,void * gdata,int thread_index)188 glthread_thread_initialization(void *job, void *gdata, int thread_index)
189 {
190    struct gl_context *ctx = (struct gl_context*)job;
191 
192    st_set_background_context(ctx, &ctx->GLThread.stats);
193    _glapi_set_context(ctx);
194 }
195 
196 static void
_mesa_glthread_init_dispatch(struct gl_context * ctx,struct _glapi_table * table)197 _mesa_glthread_init_dispatch(struct gl_context *ctx,
198                              struct _glapi_table *table)
199 {
200    _mesa_glthread_init_dispatch0(ctx, table);
201    _mesa_glthread_init_dispatch1(ctx, table);
202    _mesa_glthread_init_dispatch2(ctx, table);
203    _mesa_glthread_init_dispatch3(ctx, table);
204    _mesa_glthread_init_dispatch4(ctx, table);
205    _mesa_glthread_init_dispatch5(ctx, table);
206    _mesa_glthread_init_dispatch6(ctx, table);
207    _mesa_glthread_init_dispatch7(ctx, table);
208 }
209 
210 void
_mesa_glthread_init(struct gl_context * ctx)211 _mesa_glthread_init(struct gl_context *ctx)
212 {
213    struct pipe_screen *screen = ctx->screen;
214    struct glthread_state *glthread = &ctx->GLThread;
215    assert(!glthread->enabled);
216 
217    if (!screen->get_param(screen, PIPE_CAP_MAP_UNSYNCHRONIZED_THREAD_SAFE) ||
218        !screen->get_param(screen, PIPE_CAP_ALLOW_MAPPED_BUFFERS_DURING_EXECUTION))
219       return;
220 
221    if (!util_queue_init(&glthread->queue, "gl", MARSHAL_MAX_BATCHES - 2,
222                         1, 0, NULL)) {
223       return;
224    }
225 
226    _mesa_InitHashTable(&glthread->VAOs);
227    _mesa_glthread_reset_vao(&glthread->DefaultVAO);
228    glthread->CurrentVAO = &glthread->DefaultVAO;
229 
230    ctx->MarshalExec = _mesa_alloc_dispatch_table(true);
231    if (!ctx->MarshalExec) {
232       _mesa_DeinitHashTable(&glthread->VAOs, NULL, NULL);
233       util_queue_destroy(&glthread->queue);
234       return;
235    }
236 
237    _mesa_glthread_init_dispatch(ctx, ctx->MarshalExec);
238    _mesa_init_pixelstore_attrib(ctx, &glthread->Unpack);
239 
240    for (unsigned i = 0; i < MARSHAL_MAX_BATCHES; i++) {
241       glthread->batches[i].ctx = ctx;
242       util_queue_fence_init(&glthread->batches[i].fence);
243    }
244    glthread->next_batch = &glthread->batches[glthread->next];
245    glthread->used = 0;
246    glthread->stats.queue = &glthread->queue;
247 
248    _mesa_glthread_init_call_fence(&glthread->LastProgramChangeBatch);
249    _mesa_glthread_init_call_fence(&glthread->LastDListChangeBatchIndex);
250 
251    _mesa_glthread_enable(ctx);
252 
253    /* Execute the thread initialization function in the thread. */
254    struct util_queue_fence fence;
255    util_queue_fence_init(&fence);
256    util_queue_add_job(&glthread->queue, ctx, &fence,
257                       glthread_thread_initialization, NULL, 0);
258    util_queue_fence_wait(&fence);
259    util_queue_fence_destroy(&fence);
260 
261    glthread->thread_sched_enabled = ctx->pipe->set_context_param &&
262                                     util_thread_scheduler_enabled();
263    util_thread_scheduler_init_state(&glthread->thread_sched_state);
264    glthread_apply_thread_sched_policy(ctx, true);
265 }
266 
267 static void
free_vao(void * data,UNUSED void * userData)268 free_vao(void *data, UNUSED void *userData)
269 {
270    free(data);
271 }
272 
273 void
_mesa_glthread_destroy(struct gl_context * ctx)274 _mesa_glthread_destroy(struct gl_context *ctx)
275 {
276    struct glthread_state *glthread = &ctx->GLThread;
277 
278    _mesa_glthread_disable(ctx);
279 
280    if (util_queue_is_initialized(&glthread->queue)) {
281       util_queue_destroy(&glthread->queue);
282 
283       for (unsigned i = 0; i < MARSHAL_MAX_BATCHES; i++)
284          util_queue_fence_destroy(&glthread->batches[i].fence);
285 
286       _mesa_DeinitHashTable(&glthread->VAOs, free_vao, NULL);
287       _mesa_glthread_release_upload_buffer(ctx);
288    }
289 }
290 
_mesa_glthread_enable(struct gl_context * ctx)291 void _mesa_glthread_enable(struct gl_context *ctx)
292 {
293    if (ctx->GLThread.enabled ||
294        ctx->Dispatch.Current == ctx->Dispatch.ContextLost ||
295        ctx->GLThread.DebugOutputSynchronous)
296       return;
297 
298    ctx->GLThread.enabled = true;
299    ctx->GLApi = ctx->MarshalExec;
300 
301    /* glthread takes over all thread scheduling. */
302    ctx->st->pin_thread_counter = ST_THREAD_SCHEDULER_DISABLED;
303 
304    /* Update the dispatch only if the dispatch is current. */
305    if (_glapi_get_dispatch() == ctx->Dispatch.Current) {
306        _glapi_set_dispatch(ctx->GLApi);
307    }
308 }
309 
_mesa_glthread_disable(struct gl_context * ctx)310 void _mesa_glthread_disable(struct gl_context *ctx)
311 {
312    if (!ctx->GLThread.enabled)
313       return;
314 
315    _mesa_glthread_finish(ctx);
316 
317    ctx->GLThread.enabled = false;
318    ctx->GLApi = ctx->Dispatch.Current;
319 
320    /* Re-enable thread scheduling in st/mesa when glthread is disabled. */
321    if (ctx->pipe->set_context_param && util_thread_scheduler_enabled())
322       ctx->st->pin_thread_counter = 0;
323 
324    /* Update the dispatch only if the dispatch is current. */
325    if (_glapi_get_dispatch() == ctx->MarshalExec) {
326        _glapi_set_dispatch(ctx->GLApi);
327    }
328 
329    /* Unbind VBOs in all VAOs that glthread bound for non-VBO vertex uploads
330     * to restore original states.
331     */
332    if (ctx->API != API_OPENGL_CORE)
333       _mesa_glthread_unbind_uploaded_vbos(ctx);
334 }
335 
336 static void
glthread_finalize_batch(struct glthread_state * glthread,unsigned * num_items_counter)337 glthread_finalize_batch(struct glthread_state *glthread,
338                         unsigned *num_items_counter)
339 {
340    struct glthread_batch *next = glthread->next_batch;
341 
342    /* Mark the end of the batch, but don't increment "used". */
343    struct marshal_cmd_base *last =
344       (struct marshal_cmd_base *)&next->buffer[glthread->used];
345    last->cmd_id = NUM_DISPATCH_CMD;
346 
347    p_atomic_add(num_items_counter, glthread->used);
348    next->used = glthread->used;
349    glthread->used = 0;
350 
351    glthread->LastCallList = NULL;
352    glthread->LastBindBuffer1 = NULL;
353    glthread->LastBindBuffer2 = NULL;
354 }
355 
356 void
_mesa_glthread_flush_batch(struct gl_context * ctx)357 _mesa_glthread_flush_batch(struct gl_context *ctx)
358 {
359    struct glthread_state *glthread = &ctx->GLThread;
360    if (!glthread->enabled)
361       return;
362 
363    if (ctx->Dispatch.Current == ctx->Dispatch.ContextLost) {
364       _mesa_glthread_disable(ctx);
365       return;
366    }
367 
368    if (!glthread->used)
369       return; /* the batch is empty */
370 
371    glthread_apply_thread_sched_policy(ctx, false);
372    glthread_finalize_batch(glthread, &glthread->stats.num_offloaded_items);
373 
374    struct glthread_batch *next = glthread->next_batch;
375 
376    util_queue_add_job(&glthread->queue, next, &next->fence,
377                       glthread_unmarshal_batch, NULL, 0);
378    glthread->last = glthread->next;
379    glthread->next = (glthread->next + 1) % MARSHAL_MAX_BATCHES;
380    glthread->next_batch = &glthread->batches[glthread->next];
381 }
382 
383 /**
384  * Waits for all pending batches have been unmarshaled.
385  *
386  * This can be used by the main thread to synchronize access to the context,
387  * since the worker thread will be idle after this.
388  */
389 void
_mesa_glthread_finish(struct gl_context * ctx)390 _mesa_glthread_finish(struct gl_context *ctx)
391 {
392    struct glthread_state *glthread = &ctx->GLThread;
393    if (!glthread->enabled)
394       return;
395 
396    /* If this is called from the worker thread, then we've hit a path that
397     * might be called from either the main thread or the worker (such as some
398     * dri interface entrypoints), in which case we don't need to actually
399     * synchronize against ourself.
400     */
401    if (u_thread_is_self(glthread->queue.threads[0]))
402       return;
403 
404    struct glthread_batch *last = &glthread->batches[glthread->last];
405    struct glthread_batch *next = glthread->next_batch;
406    bool synced = false;
407 
408    if (!util_queue_fence_is_signalled(&last->fence)) {
409       util_queue_fence_wait(&last->fence);
410       synced = true;
411    }
412 
413    glthread_apply_thread_sched_policy(ctx, false);
414 
415    if (glthread->used) {
416       glthread_finalize_batch(glthread, &glthread->stats.num_direct_items);
417 
418       /* Since glthread_unmarshal_batch changes the dispatch to direct,
419        * restore it after it's done.
420        */
421       struct _glapi_table *dispatch = _glapi_get_dispatch();
422       glthread_unmarshal_batch(next, NULL, 0);
423       _glapi_set_dispatch(dispatch);
424 
425       /* It's not a sync because we don't enqueue partial batches, but
426        * it would be a sync if we did. So count it anyway.
427        */
428       synced = true;
429    }
430 
431    if (synced)
432       p_atomic_inc(&glthread->stats.num_syncs);
433 }
434 
435 void
_mesa_glthread_finish_before(struct gl_context * ctx,const char * func)436 _mesa_glthread_finish_before(struct gl_context *ctx, const char *func)
437 {
438    _mesa_glthread_finish(ctx);
439 
440    /* Uncomment this if you want to know where glthread syncs. */
441    /*printf("fallback to sync: %s\n", func);*/
442 }
443 
444 void
_mesa_error_glthread_safe(struct gl_context * ctx,GLenum error,bool glthread,const char * format,...)445 _mesa_error_glthread_safe(struct gl_context *ctx, GLenum error, bool glthread,
446                           const char *format, ...)
447 {
448    if (glthread) {
449       _mesa_marshal_InternalSetError(error);
450    } else {
451       char s[MAX_DEBUG_MESSAGE_LENGTH];
452       va_list args;
453 
454       va_start(args, format);
455       ASSERTED size_t len = vsnprintf(s, MAX_DEBUG_MESSAGE_LENGTH, format, args);
456       va_end(args);
457 
458       /* Whoever calls _mesa_error should use shorter strings. */
459       assert(len < MAX_DEBUG_MESSAGE_LENGTH);
460 
461       _mesa_error(ctx, error, "%s", s);
462    }
463 }
464 
465 bool
_mesa_glthread_invalidate_zsbuf(struct gl_context * ctx)466 _mesa_glthread_invalidate_zsbuf(struct gl_context *ctx)
467 {
468    struct glthread_state *glthread = &ctx->GLThread;
469    if (!glthread->enabled)
470       return false;
471    _mesa_marshal_InternalInvalidateFramebufferAncillaryMESA();
472    return true;
473 }
474 
475 void
_mesa_glthread_PixelStorei(struct gl_context * ctx,GLenum pname,GLint param)476 _mesa_glthread_PixelStorei(struct gl_context *ctx, GLenum pname, GLint param)
477 {
478    switch (pname) {
479    case GL_UNPACK_SWAP_BYTES:
480       ctx->GLThread.Unpack.SwapBytes = !!param;
481       break;
482    case GL_UNPACK_LSB_FIRST:
483       ctx->GLThread.Unpack.LsbFirst = !!param;
484       break;
485    case GL_UNPACK_ROW_LENGTH:
486       if (param >= 0)
487          ctx->GLThread.Unpack.RowLength = param;
488       break;
489    case GL_UNPACK_IMAGE_HEIGHT:
490       if (param >= 0)
491          ctx->GLThread.Unpack.ImageHeight = param;
492       break;
493    case GL_UNPACK_SKIP_PIXELS:
494       if (param >= 0)
495          ctx->GLThread.Unpack.SkipPixels = param;
496       break;
497    case GL_UNPACK_SKIP_ROWS:
498       if (param >= 0)
499          ctx->GLThread.Unpack.SkipRows = param;
500       break;
501    case GL_UNPACK_SKIP_IMAGES:
502       if (param >= 0)
503          ctx->GLThread.Unpack.SkipImages = param;
504       break;
505    case GL_UNPACK_ALIGNMENT:
506       if (param >= 1 && param <= 8 && util_is_power_of_two_nonzero(param))
507          ctx->GLThread.Unpack.Alignment = param;
508       break;
509    case GL_UNPACK_COMPRESSED_BLOCK_WIDTH:
510       if (param >= 0)
511          ctx->GLThread.Unpack.CompressedBlockWidth = param;
512       break;
513    case GL_UNPACK_COMPRESSED_BLOCK_HEIGHT:
514       if (param >= 0)
515          ctx->GLThread.Unpack.CompressedBlockHeight = param;
516       break;
517    case GL_UNPACK_COMPRESSED_BLOCK_DEPTH:
518       if (param >= 0)
519          ctx->GLThread.Unpack.CompressedBlockDepth = param;
520       break;
521    case GL_UNPACK_COMPRESSED_BLOCK_SIZE:
522       if (param >= 0)
523          ctx->GLThread.Unpack.CompressedBlockSize = param;
524       break;
525    }
526 }
527