1 /*
2 * Copyright © 2020 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /* This implements vertex array state tracking for glthread. It's separate
25 * from the rest of Mesa. Only minimum functionality is implemented here
26 * to serve glthread.
27 */
28
29 #include "main/glthread.h"
30 #include "main/glformats.h"
31 #include "main/mtypes.h"
32 #include "main/hash.h"
33 #include "main/dispatch.h"
34 #include "main/varray.h"
35
36 static unsigned
element_size(union gl_vertex_format_user format)37 element_size(union gl_vertex_format_user format)
38 {
39 return _mesa_bytes_per_vertex_attrib(format.Size, format.Type);
40 }
41
42 static void
init_attrib(struct glthread_attrib * attrib,int index,int size,GLenum type)43 init_attrib(struct glthread_attrib *attrib, int index, int size, GLenum type)
44 {
45 attrib->Format = MESA_PACK_VFORMAT(type, size, 0, 0, 0);
46 attrib->ElementSize = element_size(attrib->Format);
47 attrib->RelativeOffset = 0;
48 attrib->BufferIndex = index;
49 attrib->Stride = attrib->ElementSize;
50 attrib->Divisor = 0;
51 attrib->EnabledAttribCount = 0;
52 attrib->Pointer = NULL;
53 }
54
55 void
_mesa_glthread_reset_vao(struct glthread_vao * vao)56 _mesa_glthread_reset_vao(struct glthread_vao *vao)
57 {
58 vao->CurrentElementBufferName = 0;
59 vao->UserEnabled = 0;
60 vao->Enabled = 0;
61 vao->BufferEnabled = 0;
62 vao->UserPointerMask = 0;
63 vao->NonNullPointerMask = 0;
64 vao->NonZeroDivisorMask = 0;
65
66 for (unsigned i = 0; i < ARRAY_SIZE(vao->Attrib); i++) {
67 switch (i) {
68 case VERT_ATTRIB_NORMAL:
69 init_attrib(&vao->Attrib[i], i, 3, GL_FLOAT);
70 break;
71 case VERT_ATTRIB_COLOR1:
72 init_attrib(&vao->Attrib[i], i, 3, GL_FLOAT);
73 break;
74 case VERT_ATTRIB_FOG:
75 init_attrib(&vao->Attrib[i], i, 1, GL_FLOAT);
76 break;
77 case VERT_ATTRIB_COLOR_INDEX:
78 init_attrib(&vao->Attrib[i], i, 1, GL_FLOAT);
79 break;
80 case VERT_ATTRIB_EDGEFLAG:
81 init_attrib(&vao->Attrib[i], i, 1, GL_UNSIGNED_BYTE);
82 break;
83 case VERT_ATTRIB_POINT_SIZE:
84 init_attrib(&vao->Attrib[i], i, 1, GL_FLOAT);
85 break;
86 default:
87 init_attrib(&vao->Attrib[i], i, 4, GL_FLOAT);
88 break;
89 }
90 }
91 }
92
93 static struct glthread_vao *
lookup_vao(struct gl_context * ctx,GLuint id)94 lookup_vao(struct gl_context *ctx, GLuint id)
95 {
96 struct glthread_state *glthread = &ctx->GLThread;
97 struct glthread_vao *vao;
98
99 assert(id != 0);
100
101 if (glthread->LastLookedUpVAO &&
102 glthread->LastLookedUpVAO->Name == id) {
103 vao = glthread->LastLookedUpVAO;
104 } else {
105 vao = _mesa_HashLookupLocked(&glthread->VAOs, id);
106 if (!vao)
107 return NULL;
108
109 glthread->LastLookedUpVAO = vao;
110 }
111
112 return vao;
113 }
114
115 void
_mesa_glthread_BindVertexArray(struct gl_context * ctx,GLuint id)116 _mesa_glthread_BindVertexArray(struct gl_context *ctx, GLuint id)
117 {
118 struct glthread_state *glthread = &ctx->GLThread;
119
120 if (id == 0) {
121 glthread->CurrentVAO = &glthread->DefaultVAO;
122 } else {
123 struct glthread_vao *vao = lookup_vao(ctx, id);
124
125 if (vao)
126 glthread->CurrentVAO = vao;
127 }
128 }
129
130 void
_mesa_glthread_DeleteVertexArrays(struct gl_context * ctx,GLsizei n,const GLuint * ids)131 _mesa_glthread_DeleteVertexArrays(struct gl_context *ctx,
132 GLsizei n, const GLuint *ids)
133 {
134 struct glthread_state *glthread = &ctx->GLThread;
135
136 if (!ids)
137 return;
138
139 for (int i = 0; i < n; i++) {
140 /* IDs equal to 0 should be silently ignored. */
141 if (!ids[i])
142 continue;
143
144 struct glthread_vao *vao = lookup_vao(ctx, ids[i]);
145 if (!vao)
146 continue;
147
148 /* If the array object is currently bound, the spec says "the binding
149 * for that object reverts to zero and the default vertex array
150 * becomes current."
151 */
152 if (glthread->CurrentVAO == vao)
153 glthread->CurrentVAO = &glthread->DefaultVAO;
154
155 if (glthread->LastLookedUpVAO == vao)
156 glthread->LastLookedUpVAO = NULL;
157
158 /* The ID is immediately freed for re-use */
159 _mesa_HashRemoveLocked(&glthread->VAOs, vao->Name);
160 free(vao);
161 }
162 }
163
164 void
_mesa_glthread_GenVertexArrays(struct gl_context * ctx,GLsizei n,GLuint * arrays)165 _mesa_glthread_GenVertexArrays(struct gl_context *ctx,
166 GLsizei n, GLuint *arrays)
167 {
168 struct glthread_state *glthread = &ctx->GLThread;
169
170 if (!arrays)
171 return;
172
173 /* The IDs have been generated at this point. Create VAOs for glthread. */
174 for (int i = 0; i < n; i++) {
175 GLuint id = arrays[i];
176 struct glthread_vao *vao;
177
178 vao = calloc(1, sizeof(*vao));
179 if (!vao)
180 continue; /* Is that all we can do? */
181
182 vao->Name = id;
183 _mesa_glthread_reset_vao(vao);
184 _mesa_HashInsertLocked(&glthread->VAOs, id, vao);
185 }
186 }
187
188 /* If vaobj is NULL, use the currently-bound VAO. */
189 static inline struct glthread_vao *
get_vao(struct gl_context * ctx,const GLuint * vaobj)190 get_vao(struct gl_context *ctx, const GLuint *vaobj)
191 {
192 if (vaobj)
193 return lookup_vao(ctx, *vaobj);
194
195 return ctx->GLThread.CurrentVAO;
196 }
197
198 static void
update_primitive_restart(struct gl_context * ctx)199 update_primitive_restart(struct gl_context *ctx)
200 {
201 struct glthread_state *glthread = &ctx->GLThread;
202
203 glthread->_PrimitiveRestart = glthread->PrimitiveRestart ||
204 glthread->PrimitiveRestartFixedIndex;
205 glthread->_RestartIndex[0] =
206 _mesa_get_prim_restart_index(glthread->PrimitiveRestartFixedIndex,
207 glthread->RestartIndex, 1);
208 glthread->_RestartIndex[1] =
209 _mesa_get_prim_restart_index(glthread->PrimitiveRestartFixedIndex,
210 glthread->RestartIndex, 2);
211 glthread->_RestartIndex[3] =
212 _mesa_get_prim_restart_index(glthread->PrimitiveRestartFixedIndex,
213 glthread->RestartIndex, 4);
214 }
215
216 void
_mesa_glthread_set_prim_restart(struct gl_context * ctx,GLenum cap,bool value)217 _mesa_glthread_set_prim_restart(struct gl_context *ctx, GLenum cap, bool value)
218 {
219 switch (cap) {
220 case GL_PRIMITIVE_RESTART:
221 ctx->GLThread.PrimitiveRestart = value;
222 break;
223 case GL_PRIMITIVE_RESTART_FIXED_INDEX:
224 ctx->GLThread.PrimitiveRestartFixedIndex = value;
225 break;
226 }
227
228 update_primitive_restart(ctx);
229 }
230
231 void
_mesa_glthread_PrimitiveRestartIndex(struct gl_context * ctx,GLuint index)232 _mesa_glthread_PrimitiveRestartIndex(struct gl_context *ctx, GLuint index)
233 {
234 ctx->GLThread.RestartIndex = index;
235 update_primitive_restart(ctx);
236 }
237
238 static inline void
enable_buffer(struct glthread_vao * vao,unsigned binding_index)239 enable_buffer(struct glthread_vao *vao, unsigned binding_index)
240 {
241 int attrib_count = ++vao->Attrib[binding_index].EnabledAttribCount;
242
243 if (attrib_count == 1)
244 vao->BufferEnabled |= 1 << binding_index;
245 else if (attrib_count == 2)
246 vao->BufferInterleaved |= 1 << binding_index;
247 }
248
249 static inline void
disable_buffer(struct glthread_vao * vao,unsigned binding_index)250 disable_buffer(struct glthread_vao *vao, unsigned binding_index)
251 {
252 int attrib_count = --vao->Attrib[binding_index].EnabledAttribCount;
253
254 if (attrib_count == 0)
255 vao->BufferEnabled &= ~(1 << binding_index);
256 else if (attrib_count == 1)
257 vao->BufferInterleaved &= ~(1 << binding_index);
258 else
259 assert(attrib_count >= 0);
260 }
261
262 void
_mesa_glthread_ClientState(struct gl_context * ctx,GLuint * vaobj,gl_vert_attrib attrib,bool enable)263 _mesa_glthread_ClientState(struct gl_context *ctx, GLuint *vaobj,
264 gl_vert_attrib attrib, bool enable)
265 {
266 /* The primitive restart client state uses a special value. */
267 if (attrib == VERT_ATTRIB_PRIMITIVE_RESTART_NV) {
268 ctx->GLThread.PrimitiveRestart = enable;
269 update_primitive_restart(ctx);
270 return;
271 }
272
273 if (attrib >= VERT_ATTRIB_MAX)
274 return;
275
276 struct glthread_vao *vao = get_vao(ctx, vaobj);
277 if (!vao)
278 return;
279
280 const unsigned attrib_bit = 1u << attrib;
281
282 if (enable && !(vao->UserEnabled & attrib_bit)) {
283 vao->UserEnabled |= attrib_bit;
284
285 /* The generic0 attribute supersedes the position attribute. We need to
286 * update BufferBindingEnabled accordingly.
287 */
288 if (attrib == VERT_ATTRIB_POS) {
289 if (!(vao->UserEnabled & VERT_BIT_GENERIC0))
290 enable_buffer(vao, vao->Attrib[VERT_ATTRIB_POS].BufferIndex);
291 } else {
292 enable_buffer(vao, vao->Attrib[attrib].BufferIndex);
293
294 if (attrib == VERT_ATTRIB_GENERIC0 && vao->UserEnabled & VERT_BIT_POS)
295 disable_buffer(vao, vao->Attrib[VERT_ATTRIB_POS].BufferIndex);
296 }
297 } else if (!enable && (vao->UserEnabled & attrib_bit)) {
298 vao->UserEnabled &= ~attrib_bit;
299
300 /* The generic0 attribute supersedes the position attribute. We need to
301 * update BufferBindingEnabled accordingly.
302 */
303 if (attrib == VERT_ATTRIB_POS) {
304 if (!(vao->UserEnabled & VERT_BIT_GENERIC0))
305 disable_buffer(vao, vao->Attrib[VERT_ATTRIB_POS].BufferIndex);
306 } else {
307 disable_buffer(vao, vao->Attrib[attrib].BufferIndex);
308
309 if (attrib == VERT_ATTRIB_GENERIC0 && vao->UserEnabled & VERT_BIT_POS)
310 enable_buffer(vao, vao->Attrib[VERT_ATTRIB_POS].BufferIndex);
311 }
312 }
313
314 /* The generic0 attribute supersedes the position attribute. */
315 vao->Enabled = vao->UserEnabled;
316 if (vao->Enabled & VERT_BIT_GENERIC0)
317 vao->Enabled &= ~VERT_BIT_POS;
318 }
319
320 static void
set_attrib_binding(struct glthread_state * glthread,struct glthread_vao * vao,gl_vert_attrib attrib,unsigned new_binding_index)321 set_attrib_binding(struct glthread_state *glthread, struct glthread_vao *vao,
322 gl_vert_attrib attrib, unsigned new_binding_index)
323 {
324 unsigned old_binding_index = vao->Attrib[attrib].BufferIndex;
325
326 if (old_binding_index != new_binding_index) {
327 vao->Attrib[attrib].BufferIndex = new_binding_index;
328
329 if (vao->Enabled & (1u << attrib)) {
330 /* Update BufferBindingEnabled. */
331 enable_buffer(vao, new_binding_index);
332 disable_buffer(vao, old_binding_index);
333 }
334 }
335 }
336
_mesa_glthread_AttribDivisor(struct gl_context * ctx,const GLuint * vaobj,gl_vert_attrib attrib,GLuint divisor)337 void _mesa_glthread_AttribDivisor(struct gl_context *ctx, const GLuint *vaobj,
338 gl_vert_attrib attrib, GLuint divisor)
339 {
340 if (attrib >= VERT_ATTRIB_MAX)
341 return;
342
343 struct glthread_vao *vao = get_vao(ctx, vaobj);
344 if (!vao)
345 return;
346
347 vao->Attrib[attrib].Divisor = divisor;
348
349 set_attrib_binding(&ctx->GLThread, vao, attrib, attrib);
350
351 if (divisor)
352 vao->NonZeroDivisorMask |= 1u << attrib;
353 else
354 vao->NonZeroDivisorMask &= ~(1u << attrib);
355 }
356
357 static void
attrib_pointer(struct glthread_state * glthread,struct glthread_vao * vao,GLuint buffer,gl_vert_attrib attrib,union gl_vertex_format_user format,GLsizei stride,const void * pointer)358 attrib_pointer(struct glthread_state *glthread, struct glthread_vao *vao,
359 GLuint buffer, gl_vert_attrib attrib,
360 union gl_vertex_format_user format, GLsizei stride,
361 const void *pointer)
362 {
363 if (attrib >= VERT_ATTRIB_MAX)
364 return;
365
366 unsigned elem_size = element_size(format);
367
368 vao->Attrib[attrib].Format = format;
369 vao->Attrib[attrib].ElementSize = elem_size;
370 vao->Attrib[attrib].Stride = stride ? stride : elem_size;
371 vao->Attrib[attrib].Pointer = pointer;
372 vao->Attrib[attrib].RelativeOffset = 0;
373
374 set_attrib_binding(glthread, vao, attrib, attrib);
375
376 if (buffer != 0)
377 vao->UserPointerMask &= ~(1u << attrib);
378 else
379 vao->UserPointerMask |= 1u << attrib;
380
381 if (pointer)
382 vao->NonNullPointerMask |= 1u << attrib;
383 else
384 vao->NonNullPointerMask &= ~(1u << attrib);
385 }
386
387 void
_mesa_glthread_AttribPointer(struct gl_context * ctx,gl_vert_attrib attrib,union gl_vertex_format_user format,GLsizei stride,const void * pointer)388 _mesa_glthread_AttribPointer(struct gl_context *ctx, gl_vert_attrib attrib,
389 union gl_vertex_format_user format,
390 GLsizei stride, const void *pointer)
391 {
392 struct glthread_state *glthread = &ctx->GLThread;
393
394 attrib_pointer(glthread, glthread->CurrentVAO,
395 glthread->CurrentArrayBufferName,
396 attrib, format, stride, pointer);
397 }
398
399 void
_mesa_glthread_DSAAttribPointer(struct gl_context * ctx,GLuint vaobj,GLuint buffer,gl_vert_attrib attrib,union gl_vertex_format_user format,GLsizei stride,GLintptr offset)400 _mesa_glthread_DSAAttribPointer(struct gl_context *ctx, GLuint vaobj,
401 GLuint buffer, gl_vert_attrib attrib,
402 union gl_vertex_format_user format,
403 GLsizei stride, GLintptr offset)
404 {
405 struct glthread_state *glthread = &ctx->GLThread;
406 struct glthread_vao *vao;
407
408 vao = lookup_vao(ctx, vaobj);
409 if (!vao)
410 return;
411
412 attrib_pointer(glthread, vao, buffer, attrib, format, stride,
413 (const void*)offset);
414 }
415
416 static void
attrib_format(struct glthread_state * glthread,struct glthread_vao * vao,GLuint attribindex,union gl_vertex_format_user format,GLuint relativeoffset)417 attrib_format(struct glthread_state *glthread, struct glthread_vao *vao,
418 GLuint attribindex, union gl_vertex_format_user format,
419 GLuint relativeoffset)
420 {
421 if (attribindex >= VERT_ATTRIB_GENERIC_MAX)
422 return;
423
424 unsigned elem_size = element_size(format);
425
426 unsigned i = VERT_ATTRIB_GENERIC(attribindex);
427 vao->Attrib[i].Format = format;
428 vao->Attrib[i].ElementSize = elem_size;
429 vao->Attrib[i].RelativeOffset = relativeoffset;
430 }
431
432 void
_mesa_glthread_AttribFormat(struct gl_context * ctx,GLuint attribindex,union gl_vertex_format_user format,GLuint relativeoffset)433 _mesa_glthread_AttribFormat(struct gl_context *ctx, GLuint attribindex,
434 union gl_vertex_format_user format,
435 GLuint relativeoffset)
436 {
437 struct glthread_state *glthread = &ctx->GLThread;
438
439 attrib_format(glthread, glthread->CurrentVAO, attribindex, format,
440 relativeoffset);
441 }
442
443 void
_mesa_glthread_DSAAttribFormat(struct gl_context * ctx,GLuint vaobj,GLuint attribindex,union gl_vertex_format_user format,GLuint relativeoffset)444 _mesa_glthread_DSAAttribFormat(struct gl_context *ctx, GLuint vaobj,
445 GLuint attribindex,
446 union gl_vertex_format_user format,
447 GLuint relativeoffset)
448 {
449 struct glthread_state *glthread = &ctx->GLThread;
450 struct glthread_vao *vao = lookup_vao(ctx, vaobj);
451
452 if (vao)
453 attrib_format(glthread, vao, attribindex, format, relativeoffset);
454 }
455
456 static void
bind_vertex_buffer(struct glthread_state * glthread,struct glthread_vao * vao,GLuint bindingindex,GLuint buffer,GLintptr offset,GLsizei stride)457 bind_vertex_buffer(struct glthread_state *glthread, struct glthread_vao *vao,
458 GLuint bindingindex, GLuint buffer, GLintptr offset,
459 GLsizei stride)
460 {
461 if (bindingindex >= VERT_ATTRIB_GENERIC_MAX)
462 return;
463
464 unsigned i = VERT_ATTRIB_GENERIC(bindingindex);
465 vao->Attrib[i].Pointer = (const void*)offset;
466 vao->Attrib[i].Stride = stride;
467
468 if (buffer != 0)
469 vao->UserPointerMask &= ~(1u << i);
470 else
471 vao->UserPointerMask |= 1u << i;
472
473 if (offset)
474 vao->NonNullPointerMask |= 1u << i;
475 else
476 vao->NonNullPointerMask &= ~(1u << i);
477 }
478
479 void
_mesa_glthread_VertexBuffer(struct gl_context * ctx,GLuint bindingindex,GLuint buffer,GLintptr offset,GLsizei stride)480 _mesa_glthread_VertexBuffer(struct gl_context *ctx, GLuint bindingindex,
481 GLuint buffer, GLintptr offset, GLsizei stride)
482 {
483 struct glthread_state *glthread = &ctx->GLThread;
484
485 bind_vertex_buffer(glthread, glthread->CurrentVAO, bindingindex, buffer,
486 offset, stride);
487 }
488
489 void
_mesa_glthread_DSAVertexBuffer(struct gl_context * ctx,GLuint vaobj,GLuint bindingindex,GLuint buffer,GLintptr offset,GLsizei stride)490 _mesa_glthread_DSAVertexBuffer(struct gl_context *ctx, GLuint vaobj,
491 GLuint bindingindex, GLuint buffer,
492 GLintptr offset, GLsizei stride)
493 {
494 struct glthread_state *glthread = &ctx->GLThread;
495 struct glthread_vao *vao = lookup_vao(ctx, vaobj);
496
497 if (vao)
498 bind_vertex_buffer(glthread, vao, bindingindex, buffer, offset, stride);
499 }
500
501 void
_mesa_glthread_DSAVertexBuffers(struct gl_context * ctx,GLuint vaobj,GLuint first,GLsizei count,const GLuint * buffers,const GLintptr * offsets,const GLsizei * strides)502 _mesa_glthread_DSAVertexBuffers(struct gl_context *ctx, GLuint vaobj,
503 GLuint first, GLsizei count,
504 const GLuint *buffers,
505 const GLintptr *offsets,
506 const GLsizei *strides)
507 {
508 struct glthread_state *glthread = &ctx->GLThread;
509 struct glthread_vao *vao;
510
511 vao = lookup_vao(ctx, vaobj);
512 if (!vao)
513 return;
514
515 for (unsigned i = 0; i < count; i++) {
516 bind_vertex_buffer(glthread, vao, first + i, buffers[i], offsets[i],
517 strides[i]);
518 }
519 }
520
521 static void
binding_divisor(struct glthread_state * glthread,struct glthread_vao * vao,GLuint bindingindex,GLuint divisor)522 binding_divisor(struct glthread_state *glthread, struct glthread_vao *vao,
523 GLuint bindingindex, GLuint divisor)
524 {
525 if (bindingindex >= VERT_ATTRIB_GENERIC_MAX)
526 return;
527
528 unsigned i = VERT_ATTRIB_GENERIC(bindingindex);
529 vao->Attrib[i].Divisor = divisor;
530
531 if (divisor)
532 vao->NonZeroDivisorMask |= 1u << i;
533 else
534 vao->NonZeroDivisorMask &= ~(1u << i);
535 }
536
537 void
_mesa_glthread_BindingDivisor(struct gl_context * ctx,GLuint bindingindex,GLuint divisor)538 _mesa_glthread_BindingDivisor(struct gl_context *ctx, GLuint bindingindex,
539 GLuint divisor)
540 {
541 struct glthread_state *glthread = &ctx->GLThread;
542
543 binding_divisor(glthread, glthread->CurrentVAO, bindingindex, divisor);
544 }
545
546 void
_mesa_glthread_DSABindingDivisor(struct gl_context * ctx,GLuint vaobj,GLuint bindingindex,GLuint divisor)547 _mesa_glthread_DSABindingDivisor(struct gl_context *ctx, GLuint vaobj,
548 GLuint bindingindex, GLuint divisor)
549 {
550 struct glthread_state *glthread = &ctx->GLThread;
551 struct glthread_vao *vao = lookup_vao(ctx, vaobj);
552
553 if (vao)
554 binding_divisor(glthread, vao, bindingindex, divisor);
555 }
556
557 void
_mesa_glthread_AttribBinding(struct gl_context * ctx,GLuint attribindex,GLuint bindingindex)558 _mesa_glthread_AttribBinding(struct gl_context *ctx, GLuint attribindex,
559 GLuint bindingindex)
560 {
561 struct glthread_state *glthread = &ctx->GLThread;
562
563 if (attribindex >= VERT_ATTRIB_GENERIC_MAX ||
564 bindingindex >= VERT_ATTRIB_GENERIC_MAX)
565 return;
566
567 set_attrib_binding(glthread, glthread->CurrentVAO,
568 VERT_ATTRIB_GENERIC(attribindex),
569 VERT_ATTRIB_GENERIC(bindingindex));
570 }
571
572 void
_mesa_glthread_DSAAttribBinding(struct gl_context * ctx,GLuint vaobj,GLuint attribindex,GLuint bindingindex)573 _mesa_glthread_DSAAttribBinding(struct gl_context *ctx, GLuint vaobj,
574 GLuint attribindex, GLuint bindingindex)
575 {
576 struct glthread_state *glthread = &ctx->GLThread;
577
578 if (attribindex >= VERT_ATTRIB_GENERIC_MAX ||
579 bindingindex >= VERT_ATTRIB_GENERIC_MAX)
580 return;
581
582 struct glthread_vao *vao = lookup_vao(ctx, vaobj);
583 if (vao) {
584 set_attrib_binding(glthread, vao,
585 VERT_ATTRIB_GENERIC(attribindex),
586 VERT_ATTRIB_GENERIC(bindingindex));
587 }
588 }
589
590 void
_mesa_glthread_DSAElementBuffer(struct gl_context * ctx,GLuint vaobj,GLuint buffer)591 _mesa_glthread_DSAElementBuffer(struct gl_context *ctx, GLuint vaobj,
592 GLuint buffer)
593 {
594 struct glthread_vao *vao = lookup_vao(ctx, vaobj);
595
596 if (vao)
597 vao->CurrentElementBufferName = buffer;
598 }
599
600 void
_mesa_glthread_PushClientAttrib(struct gl_context * ctx,GLbitfield mask,bool set_default)601 _mesa_glthread_PushClientAttrib(struct gl_context *ctx, GLbitfield mask,
602 bool set_default)
603 {
604 struct glthread_state *glthread = &ctx->GLThread;
605
606 if (glthread->ClientAttribStackTop >= MAX_CLIENT_ATTRIB_STACK_DEPTH)
607 return;
608
609 struct glthread_client_attrib *top =
610 &glthread->ClientAttribStack[glthread->ClientAttribStackTop];
611
612 if (mask & GL_CLIENT_VERTEX_ARRAY_BIT) {
613 top->VAO = *glthread->CurrentVAO;
614 top->CurrentArrayBufferName = glthread->CurrentArrayBufferName;
615 top->ClientActiveTexture = glthread->ClientActiveTexture;
616 top->RestartIndex = glthread->RestartIndex;
617 top->PrimitiveRestart = glthread->PrimitiveRestart;
618 top->PrimitiveRestartFixedIndex = glthread->PrimitiveRestartFixedIndex;
619 top->Valid = true;
620 } else {
621 top->Valid = false;
622 }
623
624 glthread->ClientAttribStackTop++;
625
626 if (set_default)
627 _mesa_glthread_ClientAttribDefault(ctx, mask);
628 }
629
630 void
_mesa_glthread_PopClientAttrib(struct gl_context * ctx)631 _mesa_glthread_PopClientAttrib(struct gl_context *ctx)
632 {
633 struct glthread_state *glthread = &ctx->GLThread;
634
635 if (glthread->ClientAttribStackTop == 0)
636 return;
637
638 glthread->ClientAttribStackTop--;
639
640 struct glthread_client_attrib *top =
641 &glthread->ClientAttribStack[glthread->ClientAttribStackTop];
642
643 if (!top->Valid)
644 return;
645
646 /* Popping a delete VAO is an error. */
647 struct glthread_vao *vao = NULL;
648 if (top->VAO.Name) {
649 vao = lookup_vao(ctx, top->VAO.Name);
650 if (!vao)
651 return;
652 }
653
654 /* Restore states. */
655 glthread->CurrentArrayBufferName = top->CurrentArrayBufferName;
656 glthread->ClientActiveTexture = top->ClientActiveTexture;
657 glthread->RestartIndex = top->RestartIndex;
658 glthread->PrimitiveRestart = top->PrimitiveRestart;
659 glthread->PrimitiveRestartFixedIndex = top->PrimitiveRestartFixedIndex;
660
661 if (!vao)
662 vao = &glthread->DefaultVAO;
663
664 assert(top->VAO.Name == vao->Name);
665 *vao = top->VAO; /* Copy all fields. */
666 glthread->CurrentVAO = vao;
667 }
668
669 void
_mesa_glthread_ClientAttribDefault(struct gl_context * ctx,GLbitfield mask)670 _mesa_glthread_ClientAttribDefault(struct gl_context *ctx, GLbitfield mask)
671 {
672 struct glthread_state *glthread = &ctx->GLThread;
673
674 if (!(mask & GL_CLIENT_VERTEX_ARRAY_BIT))
675 return;
676
677 glthread->CurrentArrayBufferName = 0;
678 glthread->ClientActiveTexture = 0;
679 glthread->RestartIndex = 0;
680 glthread->PrimitiveRestart = false;
681 glthread->PrimitiveRestartFixedIndex = false;
682 glthread->CurrentVAO = &glthread->DefaultVAO;
683 _mesa_glthread_reset_vao(glthread->CurrentVAO);
684 }
685
686 void
_mesa_glthread_InterleavedArrays(struct gl_context * ctx,GLenum format,GLsizei stride,const GLvoid * pointer)687 _mesa_glthread_InterleavedArrays(struct gl_context *ctx, GLenum format,
688 GLsizei stride, const GLvoid *pointer)
689 {
690 struct gl_interleaved_layout layout;
691 unsigned tex = VERT_ATTRIB_TEX(ctx->GLThread.ClientActiveTexture);
692
693 if (stride < 0 || !_mesa_get_interleaved_layout(format, &layout))
694 return;
695
696 if (!stride)
697 stride = layout.defstride;
698
699 _mesa_glthread_ClientState(ctx, NULL, VERT_ATTRIB_EDGEFLAG, false);
700 _mesa_glthread_ClientState(ctx, NULL, VERT_ATTRIB_COLOR_INDEX, false);
701 /* XXX also disable secondary color and generic arrays? */
702
703 /* Texcoords */
704 if (layout.tflag) {
705 _mesa_glthread_ClientState(ctx, NULL, tex, true);
706 _mesa_glthread_AttribPointer(ctx, tex,
707 MESA_PACK_VFORMAT(GL_FLOAT, layout.tcomps,
708 0, 0, 0),
709 stride, (GLubyte *)pointer + layout.toffset);
710 } else {
711 _mesa_glthread_ClientState(ctx, NULL, tex, false);
712 }
713
714 /* Color */
715 if (layout.cflag) {
716 _mesa_glthread_ClientState(ctx, NULL, VERT_ATTRIB_COLOR0, true);
717 _mesa_glthread_AttribPointer(ctx, VERT_ATTRIB_COLOR0,
718 MESA_PACK_VFORMAT(layout.ctype, layout.ccomps,
719 1, 0, 0),
720 stride, (GLubyte *)pointer + layout.coffset);
721 } else {
722 _mesa_glthread_ClientState(ctx, NULL, VERT_ATTRIB_COLOR0, false);
723 }
724
725 /* Normals */
726 if (layout.nflag) {
727 _mesa_glthread_ClientState(ctx, NULL, VERT_ATTRIB_NORMAL, true);
728 _mesa_glthread_AttribPointer(ctx, VERT_ATTRIB_NORMAL,
729 MESA_PACK_VFORMAT(GL_FLOAT, 3, 1, 0, 0),
730 stride, (GLubyte *) pointer + layout.noffset);
731 } else {
732 _mesa_glthread_ClientState(ctx, NULL, VERT_ATTRIB_NORMAL, false);
733 }
734
735 /* Vertices */
736 _mesa_glthread_ClientState(ctx, NULL, VERT_ATTRIB_POS, true);
737 _mesa_glthread_AttribPointer(ctx, VERT_ATTRIB_POS,
738 MESA_PACK_VFORMAT(GL_FLOAT, layout.vcomps,
739 0, 0, 0),
740 stride, (GLubyte *) pointer + layout.voffset);
741 }
742
743 static void
unbind_uploaded_vbos(void * _vao,void * _ctx)744 unbind_uploaded_vbos(void *_vao, void *_ctx)
745 {
746 struct gl_context *ctx = _ctx;
747 struct gl_vertex_array_object *vao = _vao;
748
749 assert(ctx->API != API_OPENGL_CORE);
750
751 for (unsigned i = 0; i < ARRAY_SIZE(vao->BufferBinding); i++) {
752 if (vao->BufferBinding[i].BufferObj &&
753 vao->BufferBinding[i].BufferObj->GLThreadInternal) {
754 /* We don't need to restore the user pointer because it's never
755 * overwritten. When we bind a VBO internally, the user pointer
756 * in gl_array_attribute::Ptr becomes ignored and unchanged.
757 */
758 _mesa_bind_vertex_buffer(ctx, vao, i, NULL, 0,
759 vao->BufferBinding[i].Stride, false, false);
760 }
761 }
762 }
763
764 /* Unbind VBOs in all VAOs that glthread bound for non-VBO vertex uploads
765 * to restore original states.
766 */
767 void
_mesa_glthread_unbind_uploaded_vbos(struct gl_context * ctx)768 _mesa_glthread_unbind_uploaded_vbos(struct gl_context *ctx)
769 {
770 assert(ctx->API != API_OPENGL_CORE);
771
772 /* Iterate over all VAOs. */
773 _mesa_HashWalk(&ctx->Array.Objects, unbind_uploaded_vbos, ctx);
774 unbind_uploaded_vbos(ctx->Array.DefaultVAO, ctx);
775 }
776