xref: /aosp_15_r20/external/mesa3d/src/mesa/main/glthread_bufferobj.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2012 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "main/glthread_marshal.h"
25 #include "main/dispatch.h"
26 #include "main/bufferobj.h"
27 
28 /**
29  * Create an upload buffer. This is called from the app thread, so everything
30  * has to be thread-safe in the driver.
31  */
32 static struct gl_buffer_object *
new_upload_buffer(struct gl_context * ctx,GLsizeiptr size,uint8_t ** ptr)33 new_upload_buffer(struct gl_context *ctx, GLsizeiptr size, uint8_t **ptr)
34 {
35    /* id 0 is used to avoid returning invalid binding values to apps */
36    struct gl_buffer_object *obj =
37       _mesa_bufferobj_alloc(ctx, 0);
38    if (!obj)
39       return NULL;
40 
41    obj->Immutable = true;
42    obj->GLThreadInternal = true;
43 
44    if (!_mesa_bufferobj_data(ctx, GL_ARRAY_BUFFER, size, NULL,
45                           GL_WRITE_ONLY,
46                           GL_CLIENT_STORAGE_BIT | GL_MAP_WRITE_BIT,
47                           obj)) {
48       _mesa_delete_buffer_object(ctx, obj);
49       return NULL;
50    }
51 
52    *ptr = _mesa_bufferobj_map_range(ctx, 0, size,
53                                  GL_MAP_WRITE_BIT |
54                                  GL_MAP_UNSYNCHRONIZED_BIT |
55                                  MESA_MAP_THREAD_SAFE_BIT,
56                                  obj, MAP_GLTHREAD);
57    if (!*ptr) {
58       _mesa_delete_buffer_object(ctx, obj);
59       return NULL;
60    }
61 
62    return obj;
63 }
64 
65 void
_mesa_glthread_release_upload_buffer(struct gl_context * ctx)66 _mesa_glthread_release_upload_buffer(struct gl_context *ctx)
67 {
68    struct glthread_state *glthread = &ctx->GLThread;
69 
70    if (glthread->upload_buffer_private_refcount > 0) {
71       p_atomic_add(&glthread->upload_buffer->RefCount,
72                    -glthread->upload_buffer_private_refcount);
73       glthread->upload_buffer_private_refcount = 0;
74    }
75    _mesa_reference_buffer_object(ctx, &glthread->upload_buffer, NULL);
76 }
77 
78 void
_mesa_glthread_upload(struct gl_context * ctx,const void * data,GLsizeiptr size,unsigned * out_offset,struct gl_buffer_object ** out_buffer,uint8_t ** out_ptr,unsigned start_offset)79 _mesa_glthread_upload(struct gl_context *ctx, const void *data,
80                       GLsizeiptr size, unsigned *out_offset,
81                       struct gl_buffer_object **out_buffer,
82                       uint8_t **out_ptr,
83                       unsigned start_offset)
84 {
85    struct glthread_state *glthread = &ctx->GLThread;
86    const unsigned default_size = 1024 * 1024;
87 
88    if (unlikely(size > INT_MAX))
89       return;
90 
91    /* The alignment was chosen arbitrarily. */
92    unsigned offset = align(glthread->upload_offset, size <= 4 ? 4 : 8) + start_offset;
93 
94    /* Allocate a new buffer if needed. */
95    if (unlikely(!glthread->upload_buffer || offset + size > default_size)) {
96       /* If the size is greater than the buffer size, allocate a separate buffer
97        * just for this upload.
98        */
99       if (unlikely(start_offset + size > default_size)) {
100          uint8_t *ptr;
101 
102          assert(*out_buffer == NULL);
103          *out_buffer = new_upload_buffer(ctx, size + start_offset, &ptr);
104          if (!*out_buffer)
105             return;
106 
107          ptr += start_offset;
108          *out_offset = start_offset;
109          if (data)
110             memcpy(ptr, data, size);
111          else
112             *out_ptr = ptr;
113          return;
114       }
115 
116       _mesa_glthread_release_upload_buffer(ctx);
117 
118       glthread->upload_buffer =
119          new_upload_buffer(ctx, default_size, &glthread->upload_ptr);
120       glthread->upload_offset = 0;
121       offset = start_offset;
122 
123       /* Since atomic operations are very very slow when 2 threads are not
124        * sharing one L3 cache (which can happen on AMD Zen), prevent using
125        * atomics as follows:
126        *
127        * This function has to return a buffer reference to the caller.
128        * Instead of atomic_inc for every call, it does all possible future
129        * increments in advance when the upload buffer is allocated.
130        * The maximum number of times the function can be called per upload
131        * buffer is default_size, because the minimum allocation size is 1.
132        * Therefore the function can only return default_size number of
133        * references at most, so we will never need more. This is the number
134        * that is added to RefCount at allocation.
135        *
136        * upload_buffer_private_refcount tracks how many buffer references
137        * are left to return to callers. If the buffer is full and there are
138        * still references left, they are atomically subtracted from RefCount
139        * before the buffer is unreferenced.
140        *
141        * This can increase performance by 20%.
142        */
143       glthread->upload_buffer->RefCount += default_size;
144       glthread->upload_buffer_private_refcount = default_size;
145    }
146 
147    /* Upload data. */
148    if (data)
149       memcpy(glthread->upload_ptr + offset, data, size);
150    else
151       *out_ptr = glthread->upload_ptr + offset;
152 
153    glthread->upload_offset = offset + size;
154    *out_offset = offset;
155 
156    assert(*out_buffer == NULL);
157    assert(glthread->upload_buffer_private_refcount > 0);
158    *out_buffer = glthread->upload_buffer;
159    glthread->upload_buffer_private_refcount--;
160 }
161 
162 /** Tracks the current bindings for the vertex array and index array buffers.
163  *
164  * This is part of what we need to enable glthread on compat-GL contexts that
165  * happen to use VBOs, without also supporting the full tracking of VBO vs
166  * user vertex array bindings per attribute on each vertex array for
167  * determining what to upload at draw call time.
168  *
169  * Note that GL core makes it so that a buffer binding with an invalid handle
170  * in the "buffer" parameter will throw an error, and then a
171  * glVertexAttribPointer() that followsmight not end up pointing at a VBO.
172  * However, in GL core the draw call would throw an error as well, so we don't
173  * really care if our tracking is wrong for this case -- we never need to
174  * marshal user data for draw calls, and the unmarshal will just generate an
175  * error or not as appropriate.
176  *
177  * For compatibility GL, we do need to accurately know whether the draw call
178  * on the unmarshal side will dereference a user pointer or load data from a
179  * VBO per vertex.  That would make it seem like we need to track whether a
180  * "buffer" is valid, so that we can know when an error will be generated
181  * instead of updating the binding.  However, compat GL has the ridiculous
182  * feature that if you pass a bad name, it just gens a buffer object for you,
183  * so we escape without having to know if things are valid or not.
184  */
185 static void
_mesa_glthread_BindBuffer(struct gl_context * ctx,GLenum target,GLuint buffer)186 _mesa_glthread_BindBuffer(struct gl_context *ctx, GLenum target, GLuint buffer)
187 {
188    struct glthread_state *glthread = &ctx->GLThread;
189 
190    switch (target) {
191    case GL_ARRAY_BUFFER:
192       glthread->CurrentArrayBufferName = buffer;
193       break;
194    case GL_ELEMENT_ARRAY_BUFFER:
195       /* The current element array buffer binding is actually tracked in the
196        * vertex array object instead of the context, so this would need to
197        * change on vertex array object updates.
198        */
199       glthread->CurrentVAO->CurrentElementBufferName = buffer;
200       break;
201    case GL_DRAW_INDIRECT_BUFFER:
202       glthread->CurrentDrawIndirectBufferName = buffer;
203       break;
204    case GL_PIXEL_PACK_BUFFER:
205       glthread->CurrentPixelPackBufferName = buffer;
206       break;
207    case GL_PIXEL_UNPACK_BUFFER:
208       glthread->CurrentPixelUnpackBufferName = buffer;
209       break;
210    case GL_QUERY_BUFFER:
211       glthread->CurrentQueryBufferName = buffer;
212       break;
213    }
214 }
215 
216 struct marshal_cmd_BindBuffer
217 {
218    struct marshal_cmd_base cmd_base;
219    GLenum16 target;
220    GLuint buffer;
221 };
222 
223 uint32_t
_mesa_unmarshal_BindBuffer(struct gl_context * ctx,const struct marshal_cmd_BindBuffer * restrict cmd)224 _mesa_unmarshal_BindBuffer(struct gl_context *ctx,
225                            const struct marshal_cmd_BindBuffer *restrict cmd)
226 {
227    CALL_BindBuffer(ctx->Dispatch.Current, (cmd->target, cmd->buffer));
228    return align(sizeof(struct marshal_cmd_BindBuffer), 8) / 8;
229 }
230 
231 void GLAPIENTRY
_mesa_marshal_BindBuffer(GLenum target,GLuint buffer)232 _mesa_marshal_BindBuffer(GLenum target, GLuint buffer)
233 {
234    GET_CURRENT_CONTEXT(ctx);
235    _mesa_glthread_BindBuffer(ctx, target, buffer);
236 
237    struct glthread_state *glthread = &ctx->GLThread;
238    struct marshal_cmd_BindBuffer *last1 = glthread->LastBindBuffer1;
239    struct marshal_cmd_BindBuffer *last2 = glthread->LastBindBuffer2;
240    int cmd_size = sizeof(struct marshal_cmd_BindBuffer);
241 
242    /* Eliminate duplicated BindBuffer calls, which are plentiful
243     * in viewperf2020/catia. In this example, the first 2 calls are eliminated
244     * by glthread by keeping track of the last 2 BindBuffer calls and
245     * overwriting them if the target matches.
246     *
247     *   glBindBuffer(GL_ARRAY_BUFFER, 0);
248     *   glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
249     *   glBindBuffer(GL_ARRAY_BUFFER, 6);
250     *   glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 7);
251     *
252     * If the last call is BindBuffer...
253     * last2 is more recent. last1 is before last2.
254     */
255    if (_mesa_glthread_call_is_last(glthread, &last2->cmd_base,
256                                    align(cmd_size, 8) / 8)) {
257       /* If the target is in the last call and unbinding the buffer, overwrite
258        * the buffer ID there.
259        */
260       if (target == last2->target) {
261          /* We can't overwrite binding non-zero buffers because binding also
262           * creates the GL objects (like glCreateBuffers), which can't be skipped.
263           */
264          if (!last2->buffer) {
265             last2->buffer = buffer;
266             return;
267          }
268       } else if (last1 + 1 == last2 && target == last1->target &&
269                  !last1->buffer) {
270          last1->buffer = buffer;
271          return;
272       }
273    }
274 
275    struct marshal_cmd_BindBuffer *cmd =
276       _mesa_glthread_allocate_command(ctx, DISPATCH_CMD_BindBuffer, cmd_size);
277    cmd->target = MIN2(target, 0xffff); /* clamped to 0xffff (invalid enum) */
278    cmd->buffer = buffer;
279 
280    glthread->LastBindBuffer1 = last2;
281    glthread->LastBindBuffer2 = cmd;
282 }
283 
284 void
_mesa_glthread_DeleteBuffers(struct gl_context * ctx,GLsizei n,const GLuint * buffers)285 _mesa_glthread_DeleteBuffers(struct gl_context *ctx, GLsizei n,
286                              const GLuint *buffers)
287 {
288    struct glthread_state *glthread = &ctx->GLThread;
289 
290    if (!buffers || n < 0)
291       return;
292 
293    for (unsigned i = 0; i < n; i++) {
294       GLuint id = buffers[i];
295 
296       if (id == glthread->CurrentArrayBufferName)
297          _mesa_glthread_BindBuffer(ctx, GL_ARRAY_BUFFER, 0);
298       if (id == glthread->CurrentVAO->CurrentElementBufferName)
299          _mesa_glthread_BindBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER, 0);
300       if (id == glthread->CurrentDrawIndirectBufferName)
301          _mesa_glthread_BindBuffer(ctx, GL_DRAW_INDIRECT_BUFFER, 0);
302       if (id == glthread->CurrentPixelPackBufferName)
303          _mesa_glthread_BindBuffer(ctx, GL_PIXEL_PACK_BUFFER, 0);
304       if (id == glthread->CurrentPixelUnpackBufferName)
305          _mesa_glthread_BindBuffer(ctx, GL_PIXEL_UNPACK_BUFFER, 0);
306    }
307 }
308 
309 /* BufferData: marshalled asynchronously */
310 struct marshal_cmd_BufferData
311 {
312    struct marshal_cmd_base cmd_base;
313    uint16_t num_slots;
314    GLuint target_or_name;
315    GLsizeiptr size;
316    GLenum usage;
317    const GLvoid *data_external_mem;
318    bool data_null; /* If set, no data follows for "data" */
319    bool named;
320    bool ext_dsa;
321    /* Next size bytes are GLubyte data[size] */
322 };
323 
324 uint32_t
_mesa_unmarshal_BufferData(struct gl_context * ctx,const struct marshal_cmd_BufferData * restrict cmd)325 _mesa_unmarshal_BufferData(struct gl_context *ctx,
326                            const struct marshal_cmd_BufferData *restrict cmd)
327 {
328    const GLuint target_or_name = cmd->target_or_name;
329    const GLsizei size = cmd->size;
330    const GLenum usage = cmd->usage;
331    const void *data;
332 
333    if (cmd->data_null)
334       data = NULL;
335    else if (!cmd->named && target_or_name == GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD)
336       data = cmd->data_external_mem;
337    else
338       data = (const void *) (cmd + 1);
339 
340    if (cmd->ext_dsa) {
341       CALL_NamedBufferDataEXT(ctx->Dispatch.Current,
342                               (target_or_name, size, data, usage));
343    } else if (cmd->named) {
344       CALL_NamedBufferData(ctx->Dispatch.Current,
345                            (target_or_name, size, data, usage));
346    } else {
347       CALL_BufferData(ctx->Dispatch.Current,
348                       (target_or_name, size, data, usage));
349    }
350    return cmd->num_slots;
351 }
352 
353 uint32_t
_mesa_unmarshal_NamedBufferData(struct gl_context * ctx,const struct marshal_cmd_NamedBufferData * restrict cmd)354 _mesa_unmarshal_NamedBufferData(struct gl_context *ctx,
355                                 const struct marshal_cmd_NamedBufferData *restrict cmd)
356 {
357    unreachable("never used - all BufferData variants use DISPATCH_CMD_BufferData");
358    return 0;
359 }
360 
361 uint32_t
_mesa_unmarshal_NamedBufferDataEXT(struct gl_context * ctx,const struct marshal_cmd_NamedBufferDataEXT * restrict cmd)362 _mesa_unmarshal_NamedBufferDataEXT(struct gl_context *ctx,
363                                    const struct marshal_cmd_NamedBufferDataEXT *restrict cmd)
364 {
365    unreachable("never used - all BufferData variants use DISPATCH_CMD_BufferData");
366    return 0;
367 }
368 
369 static void
_mesa_marshal_BufferData_merged(GLuint target_or_name,GLsizeiptr size,const GLvoid * data,GLenum usage,bool named,bool ext_dsa,const char * func)370 _mesa_marshal_BufferData_merged(GLuint target_or_name, GLsizeiptr size,
371                                 const GLvoid *data, GLenum usage, bool named,
372                                 bool ext_dsa, const char *func)
373 {
374    GET_CURRENT_CONTEXT(ctx);
375    bool external_mem = !named &&
376                        target_or_name == GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD;
377    bool copy_data = data && !external_mem;
378    size_t cmd_size = sizeof(struct marshal_cmd_BufferData) + (copy_data ? size : 0);
379 
380    if (unlikely(size < 0 || size > INT_MAX || cmd_size > MARSHAL_MAX_CMD_SIZE ||
381                 (named && target_or_name == 0))) {
382       _mesa_glthread_finish_before(ctx, func);
383       if (named) {
384          CALL_NamedBufferData(ctx->Dispatch.Current,
385                               (target_or_name, size, data, usage));
386       } else {
387          CALL_BufferData(ctx->Dispatch.Current,
388                          (target_or_name, size, data, usage));
389       }
390       return;
391    }
392 
393    struct marshal_cmd_BufferData *cmd =
394       _mesa_glthread_allocate_command(ctx, DISPATCH_CMD_BufferData,
395                                       cmd_size);
396    cmd->num_slots = align(cmd_size, 8) / 8;
397    cmd->target_or_name = target_or_name;
398    cmd->size = size;
399    cmd->usage = usage;
400    cmd->data_null = !data;
401    cmd->named = named;
402    cmd->ext_dsa = ext_dsa;
403    cmd->data_external_mem = data;
404 
405    if (copy_data) {
406       char *variable_data = (char *) (cmd + 1);
407       memcpy(variable_data, data, size);
408    }
409 }
410 
411 void GLAPIENTRY
_mesa_marshal_BufferData(GLenum target,GLsizeiptr size,const GLvoid * data,GLenum usage)412 _mesa_marshal_BufferData(GLenum target, GLsizeiptr size, const GLvoid * data,
413                          GLenum usage)
414 {
415    _mesa_marshal_BufferData_merged(target, size, data, usage, false, false,
416                                    "BufferData");
417 }
418 
419 void GLAPIENTRY
_mesa_marshal_NamedBufferData(GLuint buffer,GLsizeiptr size,const GLvoid * data,GLenum usage)420 _mesa_marshal_NamedBufferData(GLuint buffer, GLsizeiptr size,
421                               const GLvoid * data, GLenum usage)
422 {
423    _mesa_marshal_BufferData_merged(buffer, size, data, usage, true, false,
424                                    "NamedBufferData");
425 }
426 
427 void GLAPIENTRY
_mesa_marshal_NamedBufferDataEXT(GLuint buffer,GLsizeiptr size,const GLvoid * data,GLenum usage)428 _mesa_marshal_NamedBufferDataEXT(GLuint buffer, GLsizeiptr size,
429                                  const GLvoid *data, GLenum usage)
430 {
431    _mesa_marshal_BufferData_merged(buffer, size, data, usage, true, true,
432                                    "NamedBufferDataEXT");
433 }
434 
435 
436 /* BufferSubData: marshalled asynchronously */
437 struct marshal_cmd_BufferSubData
438 {
439    struct marshal_cmd_base cmd_base;
440    uint16_t num_slots;
441    GLenum target_or_name;
442    GLintptr offset;
443    GLsizeiptr size;
444    bool named;
445    bool ext_dsa;
446    /* Next size bytes are GLubyte data[size] */
447 };
448 
449 uint32_t
_mesa_unmarshal_BufferSubData(struct gl_context * ctx,const struct marshal_cmd_BufferSubData * restrict cmd)450 _mesa_unmarshal_BufferSubData(struct gl_context *ctx,
451                               const struct marshal_cmd_BufferSubData *restrict cmd)
452 {
453    const GLenum target_or_name = cmd->target_or_name;
454    const GLintptr offset = cmd->offset;
455    const GLsizeiptr size = cmd->size;
456    const void *data = (const void *) (cmd + 1);
457 
458    if (cmd->ext_dsa) {
459       CALL_NamedBufferSubDataEXT(ctx->Dispatch.Current,
460                                  (target_or_name, offset, size, data));
461    } else if (cmd->named) {
462       CALL_NamedBufferSubData(ctx->Dispatch.Current,
463                               (target_or_name, offset, size, data));
464    } else {
465       CALL_BufferSubData(ctx->Dispatch.Current,
466                          (target_or_name, offset, size, data));
467    }
468    return cmd->num_slots;
469 }
470 
471 uint32_t
_mesa_unmarshal_NamedBufferSubData(struct gl_context * ctx,const struct marshal_cmd_NamedBufferSubData * restrict cmd)472 _mesa_unmarshal_NamedBufferSubData(struct gl_context *ctx,
473                                    const struct marshal_cmd_NamedBufferSubData *restrict cmd)
474 {
475    unreachable("never used - all BufferSubData variants use DISPATCH_CMD_BufferSubData");
476    return 0;
477 }
478 
479 uint32_t
_mesa_unmarshal_NamedBufferSubDataEXT(struct gl_context * ctx,const struct marshal_cmd_NamedBufferSubDataEXT * restrict cmd)480 _mesa_unmarshal_NamedBufferSubDataEXT(struct gl_context *ctx,
481                                       const struct marshal_cmd_NamedBufferSubDataEXT *restrict cmd)
482 {
483    unreachable("never used - all BufferSubData variants use DISPATCH_CMD_BufferSubData");
484    return 0;
485 }
486 
487 static void
_mesa_marshal_BufferSubData_merged(GLuint target_or_name,GLintptr offset,GLsizeiptr size,const GLvoid * data,bool named,bool ext_dsa,const char * func)488 _mesa_marshal_BufferSubData_merged(GLuint target_or_name, GLintptr offset,
489                                    GLsizeiptr size, const GLvoid *data,
490                                    bool named, bool ext_dsa, const char *func)
491 {
492    GET_CURRENT_CONTEXT(ctx);
493    size_t cmd_size = sizeof(struct marshal_cmd_BufferSubData) + size;
494 
495    /* Fast path: Copy the data to an upload buffer, and use the GPU
496     * to copy the uploaded data to the destination buffer.
497     */
498    /* TODO: Handle offset == 0 && size < buffer_size.
499     *       If offset == 0 and size == buffer_size, it's better to discard
500     *       the buffer storage, but we don't know the buffer size in glthread.
501     */
502    if (ctx->Const.AllowGLThreadBufferSubDataOpt &&
503        ctx->Dispatch.Current != ctx->Dispatch.ContextLost &&
504        data && offset > 0 && size > 0) {
505       struct gl_buffer_object *upload_buffer = NULL;
506       unsigned upload_offset = 0;
507 
508       _mesa_glthread_upload(ctx, data, size, &upload_offset, &upload_buffer,
509                             NULL, 0);
510 
511       if (upload_buffer) {
512          _mesa_marshal_InternalBufferSubDataCopyMESA((GLintptr)upload_buffer,
513                                                      upload_offset,
514                                                      target_or_name,
515                                                      offset, size, named,
516                                                      ext_dsa);
517          return;
518       }
519    }
520 
521    if (unlikely(size < 0 || size > INT_MAX || cmd_size < 0 ||
522                 cmd_size > MARSHAL_MAX_CMD_SIZE || !data ||
523                 (named && target_or_name == 0))) {
524       _mesa_glthread_finish_before(ctx, func);
525       if (named) {
526          CALL_NamedBufferSubData(ctx->Dispatch.Current,
527                                  (target_or_name, offset, size, data));
528       } else {
529          CALL_BufferSubData(ctx->Dispatch.Current,
530                             (target_or_name, offset, size, data));
531       }
532       return;
533    }
534 
535    struct marshal_cmd_BufferSubData *cmd =
536       _mesa_glthread_allocate_command(ctx, DISPATCH_CMD_BufferSubData,
537                                       cmd_size);
538    cmd->num_slots = align(cmd_size, 8) / 8;
539    cmd->target_or_name = target_or_name;
540    cmd->offset = offset;
541    cmd->size = size;
542    cmd->named = named;
543    cmd->ext_dsa = ext_dsa;
544 
545    char *variable_data = (char *) (cmd + 1);
546    memcpy(variable_data, data, size);
547 }
548 
549 void GLAPIENTRY
_mesa_marshal_BufferSubData(GLenum target,GLintptr offset,GLsizeiptr size,const GLvoid * data)550 _mesa_marshal_BufferSubData(GLenum target, GLintptr offset, GLsizeiptr size,
551                             const GLvoid * data)
552 {
553    _mesa_marshal_BufferSubData_merged(target, offset, size, data, false,
554                                       false, "BufferSubData");
555 }
556 
557 void GLAPIENTRY
_mesa_marshal_NamedBufferSubData(GLuint buffer,GLintptr offset,GLsizeiptr size,const GLvoid * data)558 _mesa_marshal_NamedBufferSubData(GLuint buffer, GLintptr offset,
559                                  GLsizeiptr size, const GLvoid * data)
560 {
561    _mesa_marshal_BufferSubData_merged(buffer, offset, size, data, true,
562                                       false, "NamedBufferSubData");
563 }
564 
565 void GLAPIENTRY
_mesa_marshal_NamedBufferSubDataEXT(GLuint buffer,GLintptr offset,GLsizeiptr size,const GLvoid * data)566 _mesa_marshal_NamedBufferSubDataEXT(GLuint buffer, GLintptr offset,
567                                     GLsizeiptr size, const GLvoid * data)
568 {
569    _mesa_marshal_BufferSubData_merged(buffer, offset, size, data, true,
570                                       true, "NamedBufferSubDataEXT");
571 }
572