1 /*
2 * Mesa 3-D graphics library
3 *
4 * Copyright (C) 1999-2008 Brian Paul All Rights Reserved.
5 * Copyright (C) 2009 VMware, Inc. All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included
15 * in all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26
27 /**
28 * \file bufferobj.c
29 * \brief Functions for the GL_ARB_vertex/pixel_buffer_object extensions.
30 * \author Brian Paul, Ian Romanick
31 */
32
33 #include <stdbool.h>
34 #include <inttypes.h> /* for PRId64 macro */
35 #include "util/u_debug.h"
36 #include "util/glheader.h"
37 #include "enums.h"
38 #include "hash.h"
39 #include "context.h"
40 #include "bufferobj.h"
41 #include "externalobjects.h"
42 #include "mtypes.h"
43 #include "teximage.h"
44 #include "glformats.h"
45 #include "texstore.h"
46 #include "transformfeedback.h"
47 #include "varray.h"
48 #include "util/u_atomic.h"
49 #include "util/u_memory.h"
50 #include "api_exec_decl.h"
51 #include "util/set.h"
52
53 #include "state_tracker/st_debug.h"
54 #include "state_tracker/st_atom.h"
55 #include "frontend/api.h"
56
57 #include "util/u_inlines.h"
58 /* Debug flags */
59 /*#define VBO_DEBUG*/
60 /*#define BOUNDS_CHECK*/
61
62
63 /**
64 * We count the number of buffer modification calls to check for
65 * inefficient buffer use. This is the number of such calls before we
66 * issue a warning.
67 */
68 #define BUFFER_WARNING_CALL_COUNT 4
69
70
71 /**
72 * Replace data in a subrange of buffer object. If the data range
73 * specified by size + offset extends beyond the end of the buffer or
74 * if data is NULL, no copy is performed.
75 * Called via glBufferSubDataARB().
76 */
77 void
_mesa_bufferobj_subdata(struct gl_context * ctx,GLintptrARB offset,GLsizeiptrARB size,const void * data,struct gl_buffer_object * obj)78 _mesa_bufferobj_subdata(struct gl_context *ctx,
79 GLintptrARB offset,
80 GLsizeiptrARB size,
81 const void *data, struct gl_buffer_object *obj)
82 {
83 /* we may be called from VBO code, so double-check params here */
84 assert(offset >= 0);
85 assert(size >= 0);
86 assert(offset + size <= obj->Size);
87
88 if (!size)
89 return;
90
91 /*
92 * According to ARB_vertex_buffer_object specification, if data is null,
93 * then the contents of the buffer object's data store is undefined. We just
94 * ignore, and leave it unchanged.
95 */
96 if (!data)
97 return;
98
99 if (!obj->buffer) {
100 /* we probably ran out of memory during buffer allocation */
101 return;
102 }
103
104 /* Now that transfers are per-context, we don't have to figure out
105 * flushing here. Usually drivers won't need to flush in this case
106 * even if the buffer is currently referenced by hardware - they
107 * just queue the upload as dma rather than mapping the underlying
108 * buffer directly.
109 *
110 * If the buffer is mapped, suppress implicit buffer range invalidation
111 * by using PIPE_MAP_DIRECTLY.
112 */
113 struct pipe_context *pipe = ctx->pipe;
114
115 pipe->buffer_subdata(pipe, obj->buffer,
116 _mesa_bufferobj_mapped(obj, MAP_USER) ?
117 PIPE_MAP_DIRECTLY : 0,
118 offset, size, data);
119 }
120
121
122 /**
123 * Called via glGetBufferSubDataARB().
124 */
125 static void
bufferobj_get_subdata(struct gl_context * ctx,GLintptrARB offset,GLsizeiptrARB size,void * data,struct gl_buffer_object * obj)126 bufferobj_get_subdata(struct gl_context *ctx,
127 GLintptrARB offset,
128 GLsizeiptrARB size,
129 void *data, struct gl_buffer_object *obj)
130 {
131 /* we may be called from VBO code, so double-check params here */
132 assert(offset >= 0);
133 assert(size >= 0);
134 assert(offset + size <= obj->Size);
135
136 if (!size)
137 return;
138
139 if (!obj->buffer) {
140 /* we probably ran out of memory during buffer allocation */
141 return;
142 }
143
144 pipe_buffer_read(ctx->pipe, obj->buffer,
145 offset, size, data);
146 }
147
148 void
_mesa_bufferobj_get_subdata(struct gl_context * ctx,GLintptrARB offset,GLsizeiptrARB size,void * data,struct gl_buffer_object * obj)149 _mesa_bufferobj_get_subdata(struct gl_context *ctx,
150 GLintptrARB offset,
151 GLsizeiptrARB size,
152 void *data, struct gl_buffer_object *obj)
153 {
154 bufferobj_get_subdata(ctx, offset, size, data, obj);
155 }
156
157 /**
158 * Return bitmask of PIPE_BIND_x flags corresponding a GL buffer target.
159 */
160 static unsigned
buffer_target_to_bind_flags(GLenum target)161 buffer_target_to_bind_flags(GLenum target)
162 {
163 switch (target) {
164 case GL_PIXEL_PACK_BUFFER_ARB:
165 case GL_PIXEL_UNPACK_BUFFER_ARB:
166 return PIPE_BIND_RENDER_TARGET | PIPE_BIND_SAMPLER_VIEW;
167 case GL_ARRAY_BUFFER_ARB:
168 return PIPE_BIND_VERTEX_BUFFER;
169 case GL_ELEMENT_ARRAY_BUFFER_ARB:
170 return PIPE_BIND_INDEX_BUFFER;
171 case GL_TEXTURE_BUFFER:
172 return PIPE_BIND_SAMPLER_VIEW;
173 case GL_TRANSFORM_FEEDBACK_BUFFER:
174 return PIPE_BIND_STREAM_OUTPUT;
175 case GL_UNIFORM_BUFFER:
176 return PIPE_BIND_CONSTANT_BUFFER;
177 case GL_DRAW_INDIRECT_BUFFER:
178 case GL_PARAMETER_BUFFER_ARB:
179 return PIPE_BIND_COMMAND_ARGS_BUFFER;
180 case GL_ATOMIC_COUNTER_BUFFER:
181 case GL_SHADER_STORAGE_BUFFER:
182 return PIPE_BIND_SHADER_BUFFER;
183 case GL_QUERY_BUFFER:
184 return PIPE_BIND_QUERY_BUFFER;
185 default:
186 return 0;
187 }
188 }
189
190
191 /**
192 * Return bitmask of PIPE_RESOURCE_x flags corresponding to GL_MAP_x flags.
193 */
194 static unsigned
storage_flags_to_buffer_flags(GLbitfield storageFlags)195 storage_flags_to_buffer_flags(GLbitfield storageFlags)
196 {
197 unsigned flags = 0;
198 if (storageFlags & GL_MAP_PERSISTENT_BIT)
199 flags |= PIPE_RESOURCE_FLAG_MAP_PERSISTENT;
200 if (storageFlags & GL_MAP_COHERENT_BIT)
201 flags |= PIPE_RESOURCE_FLAG_MAP_COHERENT;
202 if (storageFlags & GL_SPARSE_STORAGE_BIT_ARB)
203 flags |= PIPE_RESOURCE_FLAG_SPARSE;
204 return flags;
205 }
206
207
208 /**
209 * From a buffer object's target, immutability flag, storage flags and
210 * usage hint, return a pipe_resource_usage value (PIPE_USAGE_DYNAMIC,
211 * STREAM, etc).
212 */
213 static enum pipe_resource_usage
buffer_usage(GLenum target,GLboolean immutable,GLbitfield storageFlags,GLenum usage)214 buffer_usage(GLenum target, GLboolean immutable,
215 GLbitfield storageFlags, GLenum usage)
216 {
217 /* "immutable" means that "storageFlags" was set by the user and "usage"
218 * was guessed by Mesa. Otherwise, "usage" was set by the user and
219 * storageFlags was guessed by Mesa.
220 *
221 * Therefore, use storageFlags with immutable, else use "usage".
222 */
223 if (immutable) {
224 /* BufferStorage */
225 if (storageFlags & GL_MAP_READ_BIT)
226 return PIPE_USAGE_STAGING;
227 else if (storageFlags & GL_CLIENT_STORAGE_BIT)
228 return PIPE_USAGE_STREAM;
229 else
230 return PIPE_USAGE_DEFAULT;
231 }
232 else {
233 /* These are often read by the CPU, so enable CPU caches. */
234 if (target == GL_PIXEL_PACK_BUFFER ||
235 target == GL_PIXEL_UNPACK_BUFFER)
236 return PIPE_USAGE_STAGING;
237
238 /* BufferData */
239 switch (usage) {
240 case GL_DYNAMIC_DRAW:
241 case GL_DYNAMIC_COPY:
242 return PIPE_USAGE_DYNAMIC;
243 case GL_STREAM_DRAW:
244 case GL_STREAM_COPY:
245 return PIPE_USAGE_STREAM;
246 case GL_STATIC_READ:
247 case GL_DYNAMIC_READ:
248 case GL_STREAM_READ:
249 return PIPE_USAGE_STAGING;
250 case GL_STATIC_DRAW:
251 case GL_STATIC_COPY:
252 default:
253 return PIPE_USAGE_DEFAULT;
254 }
255 }
256 }
257
258
259 static ALWAYS_INLINE GLboolean
bufferobj_data(struct gl_context * ctx,GLenum target,GLsizeiptrARB size,const void * data,struct gl_memory_object * memObj,GLuint64 offset,GLenum usage,GLbitfield storageFlags,struct gl_buffer_object * obj)260 bufferobj_data(struct gl_context *ctx,
261 GLenum target,
262 GLsizeiptrARB size,
263 const void *data,
264 struct gl_memory_object *memObj,
265 GLuint64 offset,
266 GLenum usage,
267 GLbitfield storageFlags,
268 struct gl_buffer_object *obj)
269 {
270 struct pipe_context *pipe = ctx->pipe;
271 struct pipe_screen *screen = pipe->screen;
272 bool is_mapped = _mesa_bufferobj_mapped(obj, MAP_USER);
273
274 if (size > UINT32_MAX || offset > UINT32_MAX) {
275 /* pipe_resource.width0 is 32 bits only and increasing it
276 * to 64 bits doesn't make much sense since hw support
277 * for > 4GB resources is limited.
278 */
279 obj->Size = 0;
280 return GL_FALSE;
281 }
282
283 if (target != GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD &&
284 size && obj->buffer &&
285 obj->Size == size &&
286 obj->Usage == usage &&
287 obj->StorageFlags == storageFlags) {
288 if (data) {
289 /* Just discard the old contents and write new data.
290 * This should be the same as creating a new buffer, but we avoid
291 * a lot of validation in Mesa.
292 *
293 * If the buffer is mapped, we can't discard it.
294 *
295 * PIPE_MAP_DIRECTLY supresses implicit buffer range
296 * invalidation.
297 */
298 pipe->buffer_subdata(pipe, obj->buffer,
299 is_mapped ? PIPE_MAP_DIRECTLY :
300 PIPE_MAP_DISCARD_WHOLE_RESOURCE,
301 0, size, data);
302 return GL_TRUE;
303 } else if (is_mapped) {
304 return GL_TRUE; /* can't reallocate, nothing to do */
305 } else if (screen->get_param(screen, PIPE_CAP_INVALIDATE_BUFFER)) {
306 pipe->invalidate_resource(pipe, obj->buffer);
307 return GL_TRUE;
308 }
309 }
310
311 obj->Size = size;
312 obj->Usage = usage;
313 obj->StorageFlags = storageFlags;
314
315 _mesa_bufferobj_release_buffer(obj);
316
317 unsigned bindings = buffer_target_to_bind_flags(target);
318
319 if (storageFlags & MESA_GALLIUM_VERTEX_STATE_STORAGE)
320 bindings |= PIPE_BIND_VERTEX_STATE;
321
322 if (ST_DEBUG & DEBUG_BUFFER) {
323 debug_printf("Create buffer size %" PRId64 " bind 0x%x\n",
324 (int64_t) size, bindings);
325 }
326
327 if (size != 0) {
328 struct pipe_resource buffer;
329
330 memset(&buffer, 0, sizeof buffer);
331 buffer.target = PIPE_BUFFER;
332 buffer.format = PIPE_FORMAT_R8_UNORM; /* want TYPELESS or similar */
333 buffer.bind = bindings;
334 buffer.usage =
335 buffer_usage(target, obj->Immutable, storageFlags, usage);
336 buffer.flags = storage_flags_to_buffer_flags(storageFlags);
337 buffer.width0 = size;
338 buffer.height0 = 1;
339 buffer.depth0 = 1;
340 buffer.array_size = 1;
341
342 if (memObj) {
343 obj->buffer = screen->resource_from_memobj(screen, &buffer,
344 memObj->memory,
345 offset);
346 }
347 else if (target == GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD) {
348 obj->buffer =
349 screen->resource_from_user_memory(screen, &buffer, (void*)data);
350 }
351 else {
352 obj->buffer = screen->resource_create(screen, &buffer);
353
354 if (obj->buffer && data)
355 pipe_buffer_write(pipe, obj->buffer, 0, size, data);
356 }
357
358 if (!obj->buffer) {
359 /* out of memory */
360 obj->Size = 0;
361 return GL_FALSE;
362 }
363
364 obj->private_refcount_ctx = ctx;
365 }
366
367 /* The current buffer may be bound, so we have to revalidate all atoms that
368 * might be using it.
369 */
370 if (obj->UsageHistory & USAGE_ARRAY_BUFFER)
371 ctx->NewDriverState |= ST_NEW_VERTEX_ARRAYS;
372 if (obj->UsageHistory & USAGE_UNIFORM_BUFFER)
373 ctx->NewDriverState |= ST_NEW_UNIFORM_BUFFER;
374 if (obj->UsageHistory & USAGE_SHADER_STORAGE_BUFFER)
375 ctx->NewDriverState |= ST_NEW_STORAGE_BUFFER;
376 if (obj->UsageHistory & USAGE_TEXTURE_BUFFER)
377 ctx->NewDriverState |= ST_NEW_SAMPLER_VIEWS | ST_NEW_IMAGE_UNITS;
378 if (obj->UsageHistory & USAGE_ATOMIC_COUNTER_BUFFER)
379 ctx->NewDriverState |= ctx->DriverFlags.NewAtomicBuffer;
380
381 return GL_TRUE;
382 }
383
384 /**
385 * Allocate space for and store data in a buffer object. Any data that was
386 * previously stored in the buffer object is lost. If data is NULL,
387 * memory will be allocated, but no copy will occur.
388 * Called via ctx->Driver.BufferData().
389 * \return GL_TRUE for success, GL_FALSE if out of memory
390 */
391 GLboolean
_mesa_bufferobj_data(struct gl_context * ctx,GLenum target,GLsizeiptrARB size,const void * data,GLenum usage,GLbitfield storageFlags,struct gl_buffer_object * obj)392 _mesa_bufferobj_data(struct gl_context *ctx,
393 GLenum target,
394 GLsizeiptrARB size,
395 const void *data,
396 GLenum usage,
397 GLbitfield storageFlags,
398 struct gl_buffer_object *obj)
399 {
400 return bufferobj_data(ctx, target, size, data, NULL, 0, usage, storageFlags, obj);
401 }
402
403 static GLboolean
bufferobj_data_mem(struct gl_context * ctx,GLenum target,GLsizeiptrARB size,struct gl_memory_object * memObj,GLuint64 offset,GLenum usage,struct gl_buffer_object * bufObj)404 bufferobj_data_mem(struct gl_context *ctx,
405 GLenum target,
406 GLsizeiptrARB size,
407 struct gl_memory_object *memObj,
408 GLuint64 offset,
409 GLenum usage,
410 struct gl_buffer_object *bufObj)
411 {
412 return bufferobj_data(ctx, target, size, NULL, memObj, offset, usage, GL_DYNAMIC_STORAGE_BIT, bufObj);
413 }
414
415 /**
416 * Convert GLbitfield of GL_MAP_x flags to gallium pipe_map_flags flags.
417 * \param wholeBuffer is the whole buffer being mapped?
418 */
419 enum pipe_map_flags
_mesa_access_flags_to_transfer_flags(GLbitfield access,bool wholeBuffer)420 _mesa_access_flags_to_transfer_flags(GLbitfield access, bool wholeBuffer)
421 {
422 enum pipe_map_flags flags = 0;
423
424 if (access & GL_MAP_WRITE_BIT)
425 flags |= PIPE_MAP_WRITE;
426
427 if (access & GL_MAP_READ_BIT)
428 flags |= PIPE_MAP_READ;
429
430 if (access & GL_MAP_FLUSH_EXPLICIT_BIT)
431 flags |= PIPE_MAP_FLUSH_EXPLICIT;
432
433 if (access & GL_MAP_INVALIDATE_BUFFER_BIT) {
434 flags |= PIPE_MAP_DISCARD_WHOLE_RESOURCE;
435 }
436 else if (access & GL_MAP_INVALIDATE_RANGE_BIT) {
437 if (wholeBuffer)
438 flags |= PIPE_MAP_DISCARD_WHOLE_RESOURCE;
439 else
440 flags |= PIPE_MAP_DISCARD_RANGE;
441 }
442
443 if (access & GL_MAP_UNSYNCHRONIZED_BIT)
444 flags |= PIPE_MAP_UNSYNCHRONIZED;
445
446 if (access & GL_MAP_PERSISTENT_BIT)
447 flags |= PIPE_MAP_PERSISTENT;
448
449 if (access & GL_MAP_COHERENT_BIT)
450 flags |= PIPE_MAP_COHERENT;
451
452 /* ... other flags ...
453 */
454
455 if (access & MESA_MAP_NOWAIT_BIT)
456 flags |= PIPE_MAP_DONTBLOCK;
457 if (access & MESA_MAP_THREAD_SAFE_BIT)
458 flags |= PIPE_MAP_THREAD_SAFE;
459 if (access & MESA_MAP_ONCE)
460 flags |= PIPE_MAP_ONCE;
461
462 return flags;
463 }
464
465
466 /**
467 * Called via glMapBufferRange().
468 */
469 void *
_mesa_bufferobj_map_range(struct gl_context * ctx,GLintptr offset,GLsizeiptr length,GLbitfield access,struct gl_buffer_object * obj,gl_map_buffer_index index)470 _mesa_bufferobj_map_range(struct gl_context *ctx,
471 GLintptr offset, GLsizeiptr length, GLbitfield access,
472 struct gl_buffer_object *obj,
473 gl_map_buffer_index index)
474 {
475 struct pipe_context *pipe = ctx->pipe;
476
477 assert(offset >= 0);
478 assert(length >= 0);
479 assert(offset < obj->Size);
480 assert(offset + length <= obj->Size);
481
482 enum pipe_map_flags transfer_flags =
483 _mesa_access_flags_to_transfer_flags(access,
484 offset == 0 && length == obj->Size);
485
486 /* Sometimes games do silly things like MapBufferRange(UNSYNC|DISCARD_RANGE)
487 * In this case, the the UNSYNC is a bit redundant, but the games rely
488 * on the driver rebinding/replacing the backing storage rather than
489 * going down the UNSYNC path (ie. honoring DISCARD_x first before UNSYNC).
490 */
491 if (unlikely(ctx->st_opts->ignore_map_unsynchronized)) {
492 if (transfer_flags & (PIPE_MAP_DISCARD_RANGE | PIPE_MAP_DISCARD_WHOLE_RESOURCE))
493 transfer_flags &= ~PIPE_MAP_UNSYNCHRONIZED;
494 }
495
496 if (ctx->Const.ForceMapBufferSynchronized)
497 transfer_flags &= ~PIPE_MAP_UNSYNCHRONIZED;
498
499 obj->Mappings[index].Pointer = pipe_buffer_map_range(pipe,
500 obj->buffer,
501 offset, length,
502 transfer_flags,
503 &obj->transfer[index]);
504 if (obj->Mappings[index].Pointer) {
505 obj->Mappings[index].Offset = offset;
506 obj->Mappings[index].Length = length;
507 obj->Mappings[index].AccessFlags = access;
508 }
509 else {
510 obj->transfer[index] = NULL;
511 }
512
513 return obj->Mappings[index].Pointer;
514 }
515
516
517 void
_mesa_bufferobj_flush_mapped_range(struct gl_context * ctx,GLintptr offset,GLsizeiptr length,struct gl_buffer_object * obj,gl_map_buffer_index index)518 _mesa_bufferobj_flush_mapped_range(struct gl_context *ctx,
519 GLintptr offset, GLsizeiptr length,
520 struct gl_buffer_object *obj,
521 gl_map_buffer_index index)
522 {
523 struct pipe_context *pipe = ctx->pipe;
524
525 /* Subrange is relative to mapped range */
526 assert(offset >= 0);
527 assert(length >= 0);
528 assert(offset + length <= obj->Mappings[index].Length);
529 assert(obj->Mappings[index].Pointer);
530
531 if (!length)
532 return;
533
534 pipe_buffer_flush_mapped_range(pipe, obj->transfer[index],
535 obj->Mappings[index].Offset + offset,
536 length);
537 }
538
539
540 /**
541 * Called via glUnmapBufferARB().
542 */
543 GLboolean
_mesa_bufferobj_unmap(struct gl_context * ctx,struct gl_buffer_object * obj,gl_map_buffer_index index)544 _mesa_bufferobj_unmap(struct gl_context *ctx, struct gl_buffer_object *obj,
545 gl_map_buffer_index index)
546 {
547 struct pipe_context *pipe = ctx->pipe;
548
549 if (obj->Mappings[index].Length)
550 pipe_buffer_unmap(pipe, obj->transfer[index]);
551
552 obj->transfer[index] = NULL;
553 obj->Mappings[index].Pointer = NULL;
554 obj->Mappings[index].Offset = 0;
555 obj->Mappings[index].Length = 0;
556 return GL_TRUE;
557 }
558
559
560 /**
561 * Called via glCopyBufferSubData().
562 */
563 static void
bufferobj_copy_subdata(struct gl_context * ctx,struct gl_buffer_object * src,struct gl_buffer_object * dst,GLintptr readOffset,GLintptr writeOffset,GLsizeiptr size)564 bufferobj_copy_subdata(struct gl_context *ctx,
565 struct gl_buffer_object *src,
566 struct gl_buffer_object *dst,
567 GLintptr readOffset, GLintptr writeOffset,
568 GLsizeiptr size)
569 {
570 struct pipe_context *pipe = ctx->pipe;
571 struct pipe_box box;
572
573 dst->MinMaxCacheDirty = true;
574 if (!size)
575 return;
576
577 /* buffer should not already be mapped */
578 assert(!_mesa_check_disallowed_mapping(src));
579 /* dst can be mapped, just not the same range as the target range */
580
581 u_box_1d(readOffset, size, &box);
582
583 pipe->resource_copy_region(pipe, dst->buffer, 0, writeOffset, 0, 0,
584 src->buffer, 0, &box);
585 }
586
587 static void
clear_buffer_subdata_sw(struct gl_context * ctx,GLintptr offset,GLsizeiptr size,const GLvoid * clearValue,GLsizeiptr clearValueSize,struct gl_buffer_object * bufObj)588 clear_buffer_subdata_sw(struct gl_context *ctx,
589 GLintptr offset, GLsizeiptr size,
590 const GLvoid *clearValue,
591 GLsizeiptr clearValueSize,
592 struct gl_buffer_object *bufObj)
593 {
594 GLsizeiptr i;
595 GLubyte *dest;
596
597 dest = _mesa_bufferobj_map_range(ctx, offset, size,
598 GL_MAP_WRITE_BIT |
599 GL_MAP_INVALIDATE_RANGE_BIT,
600 bufObj, MAP_INTERNAL);
601
602 if (!dest) {
603 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glClearBuffer[Sub]Data");
604 return;
605 }
606
607 if (clearValue == NULL) {
608 /* Clear with zeros, per the spec */
609 memset(dest, 0, size);
610 _mesa_bufferobj_unmap(ctx, bufObj, MAP_INTERNAL);
611 return;
612 }
613
614 for (i = 0; i < size/clearValueSize; ++i) {
615 memcpy(dest, clearValue, clearValueSize);
616 dest += clearValueSize;
617 }
618
619 _mesa_bufferobj_unmap(ctx, bufObj, MAP_INTERNAL);
620 }
621
622 /**
623 * Helper to warn of possible performance issues, such as frequently
624 * updating a buffer created with GL_STATIC_DRAW. Called via the macro
625 * below.
626 */
627 static void
buffer_usage_warning(struct gl_context * ctx,GLuint * id,const char * fmt,...)628 buffer_usage_warning(struct gl_context *ctx, GLuint *id, const char *fmt, ...)
629 {
630 va_list args;
631
632 va_start(args, fmt);
633 _mesa_gl_vdebugf(ctx, id,
634 MESA_DEBUG_SOURCE_API,
635 MESA_DEBUG_TYPE_PERFORMANCE,
636 MESA_DEBUG_SEVERITY_MEDIUM,
637 fmt, args);
638 va_end(args);
639 }
640
641 #define BUFFER_USAGE_WARNING(CTX, FMT, ...) \
642 do { \
643 static GLuint id = 0; \
644 buffer_usage_warning(CTX, &id, FMT, ##__VA_ARGS__); \
645 } while (0)
646
647
648 /**
649 * Used as a placeholder for buffer objects between glGenBuffers() and
650 * glBindBuffer() so that glIsBuffer() can work correctly.
651 */
652 static struct gl_buffer_object DummyBufferObject = {
653 .MinMaxCacheMutex = SIMPLE_MTX_INITIALIZER,
654 .RefCount = 1000*1000*1000, /* never delete */
655 };
656
657
658 /**
659 * Return pointer to address of a buffer object target.
660 * \param ctx the GL context
661 * \param target the buffer object target to be retrieved.
662 * \return pointer to pointer to the buffer object bound to \c target in the
663 * specified context or \c NULL if \c target is invalid.
664 */
665 static ALWAYS_INLINE struct gl_buffer_object **
get_buffer_target(struct gl_context * ctx,GLenum target,bool no_error)666 get_buffer_target(struct gl_context *ctx, GLenum target, bool no_error)
667 {
668 /* Other targets are only supported in desktop OpenGL and OpenGL ES 3.0. */
669 if (!no_error && !_mesa_is_desktop_gl(ctx) && !_mesa_is_gles3(ctx)) {
670 switch (target) {
671 case GL_ARRAY_BUFFER:
672 case GL_ELEMENT_ARRAY_BUFFER:
673 case GL_PIXEL_PACK_BUFFER:
674 case GL_PIXEL_UNPACK_BUFFER:
675 break;
676 default:
677 return NULL;
678 }
679 }
680
681 switch (target) {
682 case GL_ARRAY_BUFFER_ARB:
683 return &ctx->Array.ArrayBufferObj;
684 case GL_ELEMENT_ARRAY_BUFFER_ARB:
685 return &ctx->Array.VAO->IndexBufferObj;
686 case GL_PIXEL_PACK_BUFFER_EXT:
687 return &ctx->Pack.BufferObj;
688 case GL_PIXEL_UNPACK_BUFFER_EXT:
689 return &ctx->Unpack.BufferObj;
690 case GL_COPY_READ_BUFFER:
691 return &ctx->CopyReadBuffer;
692 case GL_COPY_WRITE_BUFFER:
693 return &ctx->CopyWriteBuffer;
694 case GL_QUERY_BUFFER:
695 if (no_error || _mesa_has_ARB_query_buffer_object(ctx))
696 return &ctx->QueryBuffer;
697 break;
698 case GL_DRAW_INDIRECT_BUFFER:
699 if (no_error ||
700 (_mesa_is_desktop_gl(ctx) && ctx->Extensions.ARB_draw_indirect) ||
701 _mesa_is_gles31(ctx)) {
702 return &ctx->DrawIndirectBuffer;
703 }
704 break;
705 case GL_PARAMETER_BUFFER_ARB:
706 if (no_error || _mesa_has_ARB_indirect_parameters(ctx)) {
707 return &ctx->ParameterBuffer;
708 }
709 break;
710 case GL_DISPATCH_INDIRECT_BUFFER:
711 if (no_error || _mesa_has_compute_shaders(ctx)) {
712 return &ctx->DispatchIndirectBuffer;
713 }
714 break;
715 case GL_TRANSFORM_FEEDBACK_BUFFER:
716 if (no_error || ctx->Extensions.EXT_transform_feedback) {
717 return &ctx->TransformFeedback.CurrentBuffer;
718 }
719 break;
720 case GL_TEXTURE_BUFFER:
721 if (no_error ||
722 _mesa_has_ARB_texture_buffer_object(ctx) ||
723 _mesa_has_OES_texture_buffer(ctx)) {
724 return &ctx->Texture.BufferObject;
725 }
726 break;
727 case GL_UNIFORM_BUFFER:
728 if (no_error || ctx->Extensions.ARB_uniform_buffer_object) {
729 return &ctx->UniformBuffer;
730 }
731 break;
732 case GL_SHADER_STORAGE_BUFFER:
733 if (no_error ||
734 ctx->Extensions.ARB_shader_storage_buffer_object ||
735 _mesa_is_gles31(ctx)) {
736 return &ctx->ShaderStorageBuffer;
737 }
738 break;
739 case GL_ATOMIC_COUNTER_BUFFER:
740 if (no_error ||
741 ctx->Extensions.ARB_shader_atomic_counters || _mesa_is_gles31(ctx)) {
742 return &ctx->AtomicBuffer;
743 }
744 break;
745 case GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD:
746 if (no_error || ctx->Extensions.AMD_pinned_memory) {
747 return &ctx->ExternalVirtualMemoryBuffer;
748 }
749 break;
750 }
751 return NULL;
752 }
753
754
755 /**
756 * Get the buffer object bound to the specified target in a GL context.
757 * \param ctx the GL context
758 * \param target the buffer object target to be retrieved.
759 * \param error the GL error to record if target is illegal.
760 * \return pointer to the buffer object bound to \c target in the
761 * specified context or \c NULL if \c target is invalid.
762 */
763 static inline struct gl_buffer_object *
get_buffer(struct gl_context * ctx,const char * func,GLenum target,GLenum error)764 get_buffer(struct gl_context *ctx, const char *func, GLenum target,
765 GLenum error)
766 {
767 struct gl_buffer_object **bufObj = get_buffer_target(ctx, target, false);
768
769 if (!bufObj) {
770 _mesa_error(ctx, GL_INVALID_ENUM, "%s(target)", func);
771 return NULL;
772 }
773
774 if (!*bufObj) {
775 _mesa_error(ctx, error, "%s(no buffer bound)", func);
776 return NULL;
777 }
778
779 return *bufObj;
780 }
781
782
783 /**
784 * Convert a GLbitfield describing the mapped buffer access flags
785 * into one of GL_READ_WRITE, GL_READ_ONLY, or GL_WRITE_ONLY.
786 */
787 static GLenum
simplified_access_mode(struct gl_context * ctx,GLbitfield access)788 simplified_access_mode(struct gl_context *ctx, GLbitfield access)
789 {
790 const GLbitfield rwFlags = GL_MAP_READ_BIT | GL_MAP_WRITE_BIT;
791 if ((access & rwFlags) == rwFlags)
792 return GL_READ_WRITE;
793 if ((access & GL_MAP_READ_BIT) == GL_MAP_READ_BIT)
794 return GL_READ_ONLY;
795 if ((access & GL_MAP_WRITE_BIT) == GL_MAP_WRITE_BIT)
796 return GL_WRITE_ONLY;
797
798 /* Otherwise, AccessFlags is zero (the default state).
799 *
800 * Table 2.6 on page 31 (page 44 of the PDF) of the OpenGL 1.5 spec says:
801 *
802 * Name Type Initial Value Legal Values
803 * ... ... ... ...
804 * BUFFER_ACCESS enum READ_WRITE READ_ONLY, WRITE_ONLY
805 * READ_WRITE
806 *
807 * However, table 6.8 in the GL_OES_mapbuffer extension says:
808 *
809 * Get Value Type Get Command Value Description
810 * --------- ---- ----------- ----- -----------
811 * BUFFER_ACCESS_OES Z1 GetBufferParameteriv WRITE_ONLY_OES buffer map flag
812 *
813 * The difference is because GL_OES_mapbuffer only supports mapping buffers
814 * write-only.
815 */
816 assert(access == 0);
817
818 return _mesa_is_gles(ctx) ? GL_WRITE_ONLY : GL_READ_WRITE;
819 }
820
821
822 /**
823 * Test if the buffer is mapped, and if so, if the mapped range overlaps the
824 * given range.
825 * The regions do not overlap if and only if the end of the given
826 * region is before the mapped region or the start of the given region
827 * is after the mapped region.
828 *
829 * \param obj Buffer object target on which to operate.
830 * \param offset Offset of the first byte of the subdata range.
831 * \param size Size, in bytes, of the subdata range.
832 * \return true if ranges overlap, false otherwise
833 *
834 */
835 static bool
bufferobj_range_mapped(const struct gl_buffer_object * obj,GLintptr offset,GLsizeiptr size)836 bufferobj_range_mapped(const struct gl_buffer_object *obj,
837 GLintptr offset, GLsizeiptr size)
838 {
839 if (_mesa_bufferobj_mapped(obj, MAP_USER)) {
840 const GLintptr end = offset + size;
841 const GLintptr mapEnd = obj->Mappings[MAP_USER].Offset +
842 obj->Mappings[MAP_USER].Length;
843
844 if (!(end <= obj->Mappings[MAP_USER].Offset || offset >= mapEnd)) {
845 return true;
846 }
847 }
848 return false;
849 }
850
851
852 /**
853 * Tests the subdata range parameters and sets the GL error code for
854 * \c glBufferSubDataARB, \c glGetBufferSubDataARB and
855 * \c glClearBufferSubData.
856 *
857 * \param ctx GL context.
858 * \param bufObj The buffer object.
859 * \param offset Offset of the first byte of the subdata range.
860 * \param size Size, in bytes, of the subdata range.
861 * \param mappedRange If true, checks if an overlapping range is mapped.
862 * If false, checks if buffer is mapped.
863 * \param caller Name of calling function for recording errors.
864 * \return false if error, true otherwise
865 *
866 * \sa glBufferSubDataARB, glGetBufferSubDataARB, glClearBufferSubData
867 */
868 static bool
buffer_object_subdata_range_good(struct gl_context * ctx,const struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr size,bool mappedRange,const char * caller)869 buffer_object_subdata_range_good(struct gl_context *ctx,
870 const struct gl_buffer_object *bufObj,
871 GLintptr offset, GLsizeiptr size,
872 bool mappedRange, const char *caller)
873 {
874 if (size < 0) {
875 _mesa_error(ctx, GL_INVALID_VALUE, "%s(size < 0)", caller);
876 return false;
877 }
878
879 if (offset < 0) {
880 _mesa_error(ctx, GL_INVALID_VALUE, "%s(offset < 0)", caller);
881 return false;
882 }
883
884 if (offset + size > bufObj->Size) {
885 _mesa_error(ctx, GL_INVALID_VALUE,
886 "%s(offset %lu + size %lu > buffer size %lu)", caller,
887 (unsigned long) offset,
888 (unsigned long) size,
889 (unsigned long) bufObj->Size);
890 return false;
891 }
892
893 if (bufObj->Mappings[MAP_USER].AccessFlags & GL_MAP_PERSISTENT_BIT)
894 return true;
895
896 if (mappedRange) {
897 if (bufferobj_range_mapped(bufObj, offset, size)) {
898 _mesa_error(ctx, GL_INVALID_OPERATION,
899 "%s(range is mapped without persistent bit)",
900 caller);
901 return false;
902 }
903 }
904 else {
905 if (_mesa_bufferobj_mapped(bufObj, MAP_USER)) {
906 _mesa_error(ctx, GL_INVALID_OPERATION,
907 "%s(buffer is mapped without persistent bit)",
908 caller);
909 return false;
910 }
911 }
912
913 return true;
914 }
915
916
917 /**
918 * Test the format and type parameters and set the GL error code for
919 * \c glClearBufferData, \c glClearNamedBufferData, \c glClearBufferSubData
920 * and \c glClearNamedBufferSubData.
921 *
922 * \param ctx GL context.
923 * \param internalformat Format to which the data is to be converted.
924 * \param format Format of the supplied data.
925 * \param type Type of the supplied data.
926 * \param caller Name of calling function for recording errors.
927 * \return If internalformat, format and type are legal the mesa_format
928 * corresponding to internalformat, otherwise MESA_FORMAT_NONE.
929 *
930 * \sa glClearBufferData, glClearNamedBufferData, glClearBufferSubData and
931 * glClearNamedBufferSubData.
932 */
933 static mesa_format
validate_clear_buffer_format(struct gl_context * ctx,GLenum internalformat,GLenum format,GLenum type,const char * caller)934 validate_clear_buffer_format(struct gl_context *ctx,
935 GLenum internalformat,
936 GLenum format, GLenum type,
937 const char *caller)
938 {
939 mesa_format mesaFormat;
940 GLenum errorFormatType;
941
942 mesaFormat = _mesa_validate_texbuffer_format(ctx, internalformat);
943 if (mesaFormat == MESA_FORMAT_NONE) {
944 _mesa_error(ctx, GL_INVALID_ENUM,
945 "%s(invalid internalformat)", caller);
946 return MESA_FORMAT_NONE;
947 }
948
949 /* NOTE: not mentioned in ARB_clear_buffer_object but according to
950 * EXT_texture_integer there is no conversion between integer and
951 * non-integer formats
952 */
953 if (_mesa_is_enum_format_signed_int(format) !=
954 _mesa_is_format_integer_color(mesaFormat)) {
955 _mesa_error(ctx, GL_INVALID_OPERATION,
956 "%s(integer vs non-integer)", caller);
957 return MESA_FORMAT_NONE;
958 }
959
960 if (!_mesa_is_color_format(format)) {
961 _mesa_error(ctx, GL_INVALID_VALUE,
962 "%s(format is not a color format)", caller);
963 return MESA_FORMAT_NONE;
964 }
965
966 errorFormatType = _mesa_error_check_format_and_type(ctx, format, type);
967 if (errorFormatType != GL_NO_ERROR) {
968 _mesa_error(ctx, GL_INVALID_VALUE,
969 "%s(invalid format or type)", caller);
970 return MESA_FORMAT_NONE;
971 }
972
973 return mesaFormat;
974 }
975
976
977 /**
978 * Convert user-specified clear value to the specified internal format.
979 *
980 * \param ctx GL context.
981 * \param internalformat Format to which the data is converted.
982 * \param clearValue Points to the converted clear value.
983 * \param format Format of the supplied data.
984 * \param type Type of the supplied data.
985 * \param data Data which is to be converted to internalformat.
986 * \param caller Name of calling function for recording errors.
987 * \return true if data could be converted, false otherwise.
988 *
989 * \sa glClearBufferData, glClearBufferSubData
990 */
991 static bool
convert_clear_buffer_data(struct gl_context * ctx,mesa_format internalformat,GLubyte * clearValue,GLenum format,GLenum type,const GLvoid * data,const char * caller)992 convert_clear_buffer_data(struct gl_context *ctx,
993 mesa_format internalformat,
994 GLubyte *clearValue, GLenum format, GLenum type,
995 const GLvoid *data, const char *caller)
996 {
997 GLenum internalformatBase = _mesa_get_format_base_format(internalformat);
998
999 if (_mesa_texstore(ctx, 1, internalformatBase, internalformat,
1000 0, &clearValue, 1, 1, 1,
1001 format, type, data, &ctx->Unpack)) {
1002 return true;
1003 }
1004 else {
1005 _mesa_error(ctx, GL_OUT_OF_MEMORY, "%s", caller);
1006 return false;
1007 }
1008 }
1009
1010 void
_mesa_bufferobj_release_buffer(struct gl_buffer_object * obj)1011 _mesa_bufferobj_release_buffer(struct gl_buffer_object *obj)
1012 {
1013 if (!obj->buffer)
1014 return;
1015
1016 /* Subtract the remaining private references before unreferencing
1017 * the buffer. See the header file for explanation.
1018 */
1019 if (obj->private_refcount) {
1020 assert(obj->private_refcount > 0);
1021 p_atomic_add(&obj->buffer->reference.count,
1022 -obj->private_refcount);
1023 obj->private_refcount = 0;
1024 }
1025 obj->private_refcount_ctx = NULL;
1026
1027 pipe_resource_reference(&obj->buffer, NULL);
1028 }
1029
1030 /**
1031 * Delete a buffer object.
1032 *
1033 * Default callback for the \c dd_function_table::DeleteBuffer() hook.
1034 */
1035 void
_mesa_delete_buffer_object(struct gl_context * ctx,struct gl_buffer_object * bufObj)1036 _mesa_delete_buffer_object(struct gl_context *ctx,
1037 struct gl_buffer_object *bufObj)
1038 {
1039 assert(bufObj->RefCount == 0);
1040 _mesa_buffer_unmap_all_mappings(ctx, bufObj);
1041 _mesa_bufferobj_release_buffer(bufObj);
1042
1043 vbo_delete_minmax_cache(bufObj);
1044
1045 /* assign strange values here to help w/ debugging */
1046 bufObj->RefCount = -1000;
1047 bufObj->Name = ~0;
1048
1049 simple_mtx_destroy(&bufObj->MinMaxCacheMutex);
1050 free(bufObj->Label);
1051 free(bufObj);
1052 }
1053
1054
1055 /**
1056 * Get the value of MESA_NO_MINMAX_CACHE.
1057 */
1058 static bool
get_no_minmax_cache()1059 get_no_minmax_cache()
1060 {
1061 static bool read = false;
1062 static bool disable = false;
1063
1064 if (!read) {
1065 disable = debug_get_bool_option("MESA_NO_MINMAX_CACHE", false);
1066 read = true;
1067 }
1068
1069 return disable;
1070 }
1071
1072 /**
1073 * Callback called from _mesa_HashWalk()
1074 */
1075 static void
count_buffer_size(void * data,void * userData)1076 count_buffer_size(void *data, void *userData)
1077 {
1078 const struct gl_buffer_object *bufObj =
1079 (const struct gl_buffer_object *) data;
1080 GLuint *total = (GLuint *) userData;
1081
1082 *total = *total + bufObj->Size;
1083 }
1084
1085
1086 /**
1087 * Initialize the state associated with buffer objects
1088 */
1089 void
_mesa_init_buffer_objects(struct gl_context * ctx)1090 _mesa_init_buffer_objects( struct gl_context *ctx )
1091 {
1092 GLuint i;
1093
1094 for (i = 0; i < MAX_COMBINED_UNIFORM_BUFFERS; i++) {
1095 _mesa_reference_buffer_object(ctx,
1096 &ctx->UniformBufferBindings[i].BufferObject,
1097 NULL);
1098 ctx->UniformBufferBindings[i].Offset = -1;
1099 ctx->UniformBufferBindings[i].Size = -1;
1100 }
1101
1102 for (i = 0; i < MAX_COMBINED_SHADER_STORAGE_BUFFERS; i++) {
1103 _mesa_reference_buffer_object(ctx,
1104 &ctx->ShaderStorageBufferBindings[i].BufferObject,
1105 NULL);
1106 ctx->ShaderStorageBufferBindings[i].Offset = -1;
1107 ctx->ShaderStorageBufferBindings[i].Size = -1;
1108 }
1109
1110 for (i = 0; i < MAX_COMBINED_ATOMIC_BUFFERS; i++) {
1111 _mesa_reference_buffer_object(ctx,
1112 &ctx->AtomicBufferBindings[i].BufferObject,
1113 NULL);
1114 ctx->AtomicBufferBindings[i].Offset = 0;
1115 ctx->AtomicBufferBindings[i].Size = 0;
1116 }
1117 }
1118
1119 /**
1120 * Detach the context from the buffer to re-enable buffer reference counting
1121 * for this context.
1122 */
1123 static void
detach_ctx_from_buffer(struct gl_context * ctx,struct gl_buffer_object * buf)1124 detach_ctx_from_buffer(struct gl_context *ctx, struct gl_buffer_object *buf)
1125 {
1126 assert(buf->Ctx == ctx);
1127
1128 /* Move private non-atomic context references to the global ref count. */
1129 p_atomic_add(&buf->RefCount, buf->CtxRefCount);
1130 buf->CtxRefCount = 0;
1131 buf->Ctx = NULL;
1132
1133 /* Remove the context reference where the context holds one
1134 * reference for the lifetime of the buffer ID to skip refcount
1135 * atomics instead of each binding point holding the reference.
1136 */
1137 _mesa_reference_buffer_object(ctx, &buf, NULL);
1138 }
1139
1140 /**
1141 * Zombie buffers are buffers that were created by one context and deleted
1142 * by another context. The creating context holds a global reference for each
1143 * buffer it created that can't be unreferenced when another context deletes
1144 * it. Such a buffer becomes a zombie, which means that it's no longer usable
1145 * by OpenGL, but the creating context still holds its global reference of
1146 * the buffer. Only the creating context can remove the reference, which is
1147 * what this function does.
1148 *
1149 * For all zombie buffers, decrement the reference count if the current
1150 * context owns the buffer.
1151 */
1152 static void
unreference_zombie_buffers_for_ctx(struct gl_context * ctx)1153 unreference_zombie_buffers_for_ctx(struct gl_context *ctx)
1154 {
1155 /* It's assumed that the mutex of Shared->BufferObjects is locked. */
1156 set_foreach(ctx->Shared->ZombieBufferObjects, entry) {
1157 struct gl_buffer_object *buf = (struct gl_buffer_object *)entry->key;
1158
1159 if (buf->Ctx == ctx) {
1160 _mesa_set_remove(ctx->Shared->ZombieBufferObjects, entry);
1161 detach_ctx_from_buffer(ctx, buf);
1162 }
1163 }
1164 }
1165
1166 /**
1167 * When a context creates buffers, it holds a global buffer reference count
1168 * for each buffer and doesn't update their RefCount. When the context is
1169 * destroyed before the buffers are destroyed, the context must remove
1170 * its global reference from the buffers, so that the buffers can live
1171 * on their own.
1172 *
1173 * At this point, the buffers shouldn't be bound in any bounding point owned
1174 * by the context. (it would crash if they did)
1175 */
1176 static void
detach_unrefcounted_buffer_from_ctx(void * data,void * userData)1177 detach_unrefcounted_buffer_from_ctx(void *data, void *userData)
1178 {
1179 struct gl_context *ctx = (struct gl_context *)userData;
1180 struct gl_buffer_object *buf = (struct gl_buffer_object *)data;
1181
1182 if (buf->Ctx == ctx) {
1183 /* Detach the current context from live objects. There should be no
1184 * bound buffer in the context at this point, therefore we can just
1185 * unreference the global reference. Other contexts and texture objects
1186 * might still be using the buffer.
1187 */
1188 assert(buf->CtxRefCount == 0);
1189 buf->Ctx = NULL;
1190 _mesa_reference_buffer_object(ctx, &buf, NULL);
1191 }
1192 }
1193
1194 void
_mesa_free_buffer_objects(struct gl_context * ctx)1195 _mesa_free_buffer_objects( struct gl_context *ctx )
1196 {
1197 GLuint i;
1198
1199 _mesa_reference_buffer_object(ctx, &ctx->Array.ArrayBufferObj, NULL);
1200
1201 _mesa_reference_buffer_object(ctx, &ctx->CopyReadBuffer, NULL);
1202 _mesa_reference_buffer_object(ctx, &ctx->CopyWriteBuffer, NULL);
1203
1204 _mesa_reference_buffer_object(ctx, &ctx->UniformBuffer, NULL);
1205
1206 _mesa_reference_buffer_object(ctx, &ctx->ShaderStorageBuffer, NULL);
1207
1208 _mesa_reference_buffer_object(ctx, &ctx->AtomicBuffer, NULL);
1209
1210 _mesa_reference_buffer_object(ctx, &ctx->DrawIndirectBuffer, NULL);
1211
1212 _mesa_reference_buffer_object(ctx, &ctx->ParameterBuffer, NULL);
1213
1214 _mesa_reference_buffer_object(ctx, &ctx->DispatchIndirectBuffer, NULL);
1215
1216 _mesa_reference_buffer_object(ctx, &ctx->QueryBuffer, NULL);
1217
1218 for (i = 0; i < MAX_COMBINED_UNIFORM_BUFFERS; i++) {
1219 _mesa_reference_buffer_object(ctx,
1220 &ctx->UniformBufferBindings[i].BufferObject,
1221 NULL);
1222 }
1223
1224 for (i = 0; i < MAX_COMBINED_SHADER_STORAGE_BUFFERS; i++) {
1225 _mesa_reference_buffer_object(ctx,
1226 &ctx->ShaderStorageBufferBindings[i].BufferObject,
1227 NULL);
1228 }
1229
1230 for (i = 0; i < MAX_COMBINED_ATOMIC_BUFFERS; i++) {
1231 _mesa_reference_buffer_object(ctx,
1232 &ctx->AtomicBufferBindings[i].BufferObject,
1233 NULL);
1234 }
1235
1236 _mesa_HashLockMutex(&ctx->Shared->BufferObjects);
1237 unreference_zombie_buffers_for_ctx(ctx);
1238 _mesa_HashWalkLocked(&ctx->Shared->BufferObjects,
1239 detach_unrefcounted_buffer_from_ctx, ctx);
1240 _mesa_HashUnlockMutex(&ctx->Shared->BufferObjects);
1241 }
1242
1243 struct gl_buffer_object *
_mesa_bufferobj_alloc(struct gl_context * ctx,GLuint id)1244 _mesa_bufferobj_alloc(struct gl_context *ctx, GLuint id)
1245 {
1246 struct gl_buffer_object *buf = CALLOC_STRUCT(gl_buffer_object);
1247 if (!buf)
1248 return NULL;
1249
1250 buf->RefCount = 1;
1251 buf->Name = id;
1252 buf->Usage = GL_STATIC_DRAW_ARB;
1253
1254 simple_mtx_init(&buf->MinMaxCacheMutex, mtx_plain);
1255 if (get_no_minmax_cache())
1256 buf->UsageHistory |= USAGE_DISABLE_MINMAX_CACHE;
1257 return buf;
1258 }
1259 /**
1260 * Create a buffer object that will be backed by an OpenGL buffer ID
1261 * where the creating context will hold one global buffer reference instead
1262 * of updating buffer RefCount for every binding point.
1263 *
1264 * This shouldn't be used for internal buffers.
1265 */
1266 static struct gl_buffer_object *
new_gl_buffer_object(struct gl_context * ctx,GLuint id)1267 new_gl_buffer_object(struct gl_context *ctx, GLuint id)
1268 {
1269 struct gl_buffer_object *buf = _mesa_bufferobj_alloc(ctx, id);
1270
1271 buf->Ctx = ctx;
1272 buf->RefCount++; /* global buffer reference held by the context */
1273 return buf;
1274 }
1275
1276 static ALWAYS_INLINE bool
handle_bind_buffer_gen(struct gl_context * ctx,GLuint buffer,struct gl_buffer_object ** buf_handle,const char * caller,bool no_error)1277 handle_bind_buffer_gen(struct gl_context *ctx,
1278 GLuint buffer,
1279 struct gl_buffer_object **buf_handle,
1280 const char *caller, bool no_error)
1281 {
1282 struct gl_buffer_object *buf = *buf_handle;
1283
1284 if (unlikely(!no_error && !buf && _mesa_is_desktop_gl_core(ctx))) {
1285 _mesa_error(ctx, GL_INVALID_OPERATION, "%s(non-gen name)", caller);
1286 return false;
1287 }
1288
1289 if (unlikely(!buf || buf == &DummyBufferObject)) {
1290 /* If this is a new buffer object id, or one which was generated but
1291 * never used before, allocate a buffer object now.
1292 */
1293 *buf_handle = new_gl_buffer_object(ctx, buffer);
1294 if (!no_error && !*buf_handle) {
1295 _mesa_error(ctx, GL_OUT_OF_MEMORY, "%s", caller);
1296 return false;
1297 }
1298 _mesa_HashLockMaybeLocked(&ctx->Shared->BufferObjects,
1299 ctx->BufferObjectsLocked);
1300 _mesa_HashInsertLocked(&ctx->Shared->BufferObjects, buffer,
1301 *buf_handle);
1302 /* If one context only creates buffers and another context only deletes
1303 * buffers, buffers don't get released because it only produces zombie
1304 * buffers. Only the context that has created the buffers can release
1305 * them. Thus, when we create buffers, we prune the list of zombie
1306 * buffers.
1307 */
1308 unreference_zombie_buffers_for_ctx(ctx);
1309 _mesa_HashUnlockMaybeLocked(&ctx->Shared->BufferObjects,
1310 ctx->BufferObjectsLocked);
1311 }
1312
1313 return true;
1314 }
1315
1316 bool
_mesa_handle_bind_buffer_gen(struct gl_context * ctx,GLuint buffer,struct gl_buffer_object ** buf_handle,const char * caller,bool no_error)1317 _mesa_handle_bind_buffer_gen(struct gl_context *ctx,
1318 GLuint buffer,
1319 struct gl_buffer_object **buf_handle,
1320 const char *caller, bool no_error)
1321 {
1322 return handle_bind_buffer_gen(ctx, buffer, buf_handle, caller, no_error);
1323 }
1324
1325 /**
1326 * Bind the specified target to buffer for the specified context.
1327 * Called by glBindBuffer() and other functions.
1328 */
1329 static void
bind_buffer_object(struct gl_context * ctx,struct gl_buffer_object ** bindTarget,GLuint buffer,bool no_error)1330 bind_buffer_object(struct gl_context *ctx,
1331 struct gl_buffer_object **bindTarget, GLuint buffer,
1332 bool no_error)
1333 {
1334 struct gl_buffer_object *oldBufObj;
1335 struct gl_buffer_object *newBufObj;
1336
1337 assert(bindTarget);
1338
1339 /* Fast path that unbinds. It's better when NULL is a literal, so that
1340 * the compiler can simplify this code after inlining.
1341 */
1342 if (buffer == 0) {
1343 _mesa_reference_buffer_object(ctx, bindTarget, NULL);
1344 return;
1345 }
1346
1347 /* Get pointer to old buffer object (to be unbound) */
1348 oldBufObj = *bindTarget;
1349 GLuint old_name = oldBufObj && !oldBufObj->DeletePending ? oldBufObj->Name : 0;
1350 if (unlikely(old_name == buffer))
1351 return; /* rebinding the same buffer object- no change */
1352
1353 newBufObj = _mesa_lookup_bufferobj(ctx, buffer);
1354 /* Get a new buffer object if it hasn't been created. */
1355 if (unlikely(!handle_bind_buffer_gen(ctx, buffer, &newBufObj, "glBindBuffer",
1356 no_error)))
1357 return;
1358
1359 /* At this point, the compiler should deduce that newBufObj is non-NULL if
1360 * everything has been inlined, so the compiler should simplify this.
1361 */
1362 _mesa_reference_buffer_object(ctx, bindTarget, newBufObj);
1363 }
1364
1365
1366 /**
1367 * Update the default buffer objects in the given context to reference those
1368 * specified in the shared state and release those referencing the old
1369 * shared state.
1370 */
1371 void
_mesa_update_default_objects_buffer_objects(struct gl_context * ctx)1372 _mesa_update_default_objects_buffer_objects(struct gl_context *ctx)
1373 {
1374 /* Bind 0 to remove references to those in the shared context hash table. */
1375 bind_buffer_object(ctx, &ctx->Array.ArrayBufferObj, 0, false);
1376 bind_buffer_object(ctx, &ctx->Array.VAO->IndexBufferObj, 0, false);
1377 bind_buffer_object(ctx, &ctx->Pack.BufferObj, 0, false);
1378 bind_buffer_object(ctx, &ctx->Unpack.BufferObj, 0, false);
1379 }
1380
1381
1382
1383 /**
1384 * Return the gl_buffer_object for the given ID.
1385 * Always return NULL for ID 0.
1386 */
1387 struct gl_buffer_object *
_mesa_lookup_bufferobj(struct gl_context * ctx,GLuint buffer)1388 _mesa_lookup_bufferobj(struct gl_context *ctx, GLuint buffer)
1389 {
1390 if (buffer == 0)
1391 return NULL;
1392 else
1393 return (struct gl_buffer_object *)
1394 _mesa_HashLookupMaybeLocked(&ctx->Shared->BufferObjects, buffer,
1395 ctx->BufferObjectsLocked);
1396 }
1397
1398
1399 struct gl_buffer_object *
_mesa_lookup_bufferobj_locked(struct gl_context * ctx,GLuint buffer)1400 _mesa_lookup_bufferobj_locked(struct gl_context *ctx, GLuint buffer)
1401 {
1402 if (buffer == 0)
1403 return NULL;
1404 else
1405 return (struct gl_buffer_object *)
1406 _mesa_HashLookupLocked(&ctx->Shared->BufferObjects, buffer);
1407 }
1408
1409 /**
1410 * A convenience function for direct state access functions that throws
1411 * GL_INVALID_OPERATION if buffer is not the name of an existing
1412 * buffer object.
1413 */
1414 struct gl_buffer_object *
_mesa_lookup_bufferobj_err(struct gl_context * ctx,GLuint buffer,const char * caller)1415 _mesa_lookup_bufferobj_err(struct gl_context *ctx, GLuint buffer,
1416 const char *caller)
1417 {
1418 struct gl_buffer_object *bufObj;
1419
1420 bufObj = _mesa_lookup_bufferobj(ctx, buffer);
1421 if (!bufObj || bufObj == &DummyBufferObject) {
1422 _mesa_error(ctx, GL_INVALID_OPERATION,
1423 "%s(non-existent buffer object %u)", caller, buffer);
1424 return NULL;
1425 }
1426
1427 return bufObj;
1428 }
1429
1430
1431 /**
1432 * Look up a buffer object for a multi-bind function.
1433 *
1434 * Unlike _mesa_lookup_bufferobj(), this function also takes care
1435 * of generating an error if the buffer ID is not zero or the name
1436 * of an existing buffer object.
1437 *
1438 * If the buffer ID refers to an existing buffer object, a pointer
1439 * to the buffer object is returned. If the ID is zero, NULL is returned.
1440 * If the ID is not zero and does not refer to a valid buffer object, this
1441 * function returns NULL.
1442 *
1443 * This function assumes that the caller has already locked the
1444 * hash table mutex by calling
1445 * _mesa_HashLockMutex(&ctx->Shared->BufferObjects).
1446 */
1447 struct gl_buffer_object *
_mesa_multi_bind_lookup_bufferobj(struct gl_context * ctx,const GLuint * buffers,GLuint index,const char * caller,bool * error)1448 _mesa_multi_bind_lookup_bufferobj(struct gl_context *ctx,
1449 const GLuint *buffers,
1450 GLuint index, const char *caller,
1451 bool *error)
1452 {
1453 struct gl_buffer_object *bufObj = NULL;
1454
1455 *error = false;
1456
1457 if (buffers[index] != 0) {
1458 bufObj = _mesa_lookup_bufferobj_locked(ctx, buffers[index]);
1459
1460 /* The multi-bind functions don't create the buffer objects
1461 when they don't exist. */
1462 if (bufObj == &DummyBufferObject)
1463 bufObj = NULL;
1464
1465 if (!bufObj) {
1466 /* The ARB_multi_bind spec says:
1467 *
1468 * "An INVALID_OPERATION error is generated if any value
1469 * in <buffers> is not zero or the name of an existing
1470 * buffer object (per binding)."
1471 */
1472 _mesa_error(ctx, GL_INVALID_OPERATION,
1473 "%s(buffers[%u]=%u is not zero or the name "
1474 "of an existing buffer object)",
1475 caller, index, buffers[index]);
1476 *error = true;
1477 }
1478 }
1479
1480 return bufObj;
1481 }
1482
1483
1484 /**
1485 * If *ptr points to obj, set ptr = the Null/default buffer object.
1486 * This is a helper for buffer object deletion.
1487 * The GL spec says that deleting a buffer object causes it to get
1488 * unbound from all arrays in the current context.
1489 */
1490 static void
unbind(struct gl_context * ctx,struct gl_vertex_array_object * vao,unsigned index,struct gl_buffer_object * obj)1491 unbind(struct gl_context *ctx,
1492 struct gl_vertex_array_object *vao, unsigned index,
1493 struct gl_buffer_object *obj)
1494 {
1495 if (vao->BufferBinding[index].BufferObj == obj) {
1496 _mesa_bind_vertex_buffer(ctx, vao, index, NULL,
1497 vao->BufferBinding[index].Offset,
1498 vao->BufferBinding[index].Stride, true, false);
1499 }
1500 }
1501
1502 void
_mesa_buffer_unmap_all_mappings(struct gl_context * ctx,struct gl_buffer_object * bufObj)1503 _mesa_buffer_unmap_all_mappings(struct gl_context *ctx,
1504 struct gl_buffer_object *bufObj)
1505 {
1506 for (int i = 0; i < MAP_COUNT; i++) {
1507 if (_mesa_bufferobj_mapped(bufObj, i)) {
1508 _mesa_bufferobj_unmap(ctx, bufObj, i);
1509 assert(bufObj->Mappings[i].Pointer == NULL);
1510 bufObj->Mappings[i].AccessFlags = 0;
1511 }
1512 }
1513 }
1514
1515
1516 /**********************************************************************/
1517 /* API Functions */
1518 /**********************************************************************/
1519
1520 void GLAPIENTRY
_mesa_BindBuffer_no_error(GLenum target,GLuint buffer)1521 _mesa_BindBuffer_no_error(GLenum target, GLuint buffer)
1522 {
1523 GET_CURRENT_CONTEXT(ctx);
1524
1525 struct gl_buffer_object **bindTarget = get_buffer_target(ctx, target, true);
1526 bind_buffer_object(ctx, bindTarget, buffer, true);
1527 }
1528
1529
1530 void GLAPIENTRY
_mesa_BindBuffer(GLenum target,GLuint buffer)1531 _mesa_BindBuffer(GLenum target, GLuint buffer)
1532 {
1533 GET_CURRENT_CONTEXT(ctx);
1534
1535 if (MESA_VERBOSE & VERBOSE_API) {
1536 _mesa_debug(ctx, "glBindBuffer(%s, %u)\n",
1537 _mesa_enum_to_string(target), buffer);
1538 }
1539
1540 struct gl_buffer_object **bindTarget = get_buffer_target(ctx, target, false);
1541 if (!bindTarget) {
1542 _mesa_error(ctx, GL_INVALID_ENUM, "glBindBufferARB(target %s)",
1543 _mesa_enum_to_string(target));
1544 return;
1545 }
1546
1547 bind_buffer_object(ctx, bindTarget, buffer, false);
1548 }
1549
1550 /**
1551 * Binds a buffer object to a binding point.
1552 *
1553 * The caller is responsible for validating the offset,
1554 * flushing the vertices and updating NewDriverState.
1555 */
1556 static void
set_buffer_binding(struct gl_context * ctx,struct gl_buffer_binding * binding,struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr size,bool autoSize,gl_buffer_usage usage)1557 set_buffer_binding(struct gl_context *ctx,
1558 struct gl_buffer_binding *binding,
1559 struct gl_buffer_object *bufObj,
1560 GLintptr offset,
1561 GLsizeiptr size,
1562 bool autoSize, gl_buffer_usage usage)
1563 {
1564 _mesa_reference_buffer_object(ctx, &binding->BufferObject, bufObj);
1565
1566 binding->Offset = offset;
1567 binding->Size = size;
1568 binding->AutomaticSize = autoSize;
1569
1570 /* If this is a real buffer object, mark it has having been used
1571 * at some point as an atomic counter buffer.
1572 */
1573 if (size >= 0)
1574 bufObj->UsageHistory |= usage;
1575 }
1576
1577 static void
set_buffer_multi_binding(struct gl_context * ctx,const GLuint * buffers,int idx,const char * caller,struct gl_buffer_binding * binding,GLintptr offset,GLsizeiptr size,bool range,gl_buffer_usage usage)1578 set_buffer_multi_binding(struct gl_context *ctx,
1579 const GLuint *buffers,
1580 int idx,
1581 const char *caller,
1582 struct gl_buffer_binding *binding,
1583 GLintptr offset,
1584 GLsizeiptr size,
1585 bool range,
1586 gl_buffer_usage usage)
1587 {
1588 struct gl_buffer_object *bufObj;
1589
1590 if (binding->BufferObject && binding->BufferObject->Name == buffers[idx])
1591 bufObj = binding->BufferObject;
1592 else {
1593 bool error;
1594 bufObj = _mesa_multi_bind_lookup_bufferobj(ctx, buffers, idx, caller,
1595 &error);
1596 if (error)
1597 return;
1598 }
1599
1600 if (!bufObj)
1601 set_buffer_binding(ctx, binding, bufObj, -1, -1, !range, usage);
1602 else
1603 set_buffer_binding(ctx, binding, bufObj, offset, size, !range, usage);
1604 }
1605
1606 static void
bind_buffer(struct gl_context * ctx,struct gl_buffer_binding * binding,struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr size,GLboolean autoSize,uint64_t driver_state,gl_buffer_usage usage)1607 bind_buffer(struct gl_context *ctx,
1608 struct gl_buffer_binding *binding,
1609 struct gl_buffer_object *bufObj,
1610 GLintptr offset,
1611 GLsizeiptr size,
1612 GLboolean autoSize,
1613 uint64_t driver_state,
1614 gl_buffer_usage usage)
1615 {
1616 if (binding->BufferObject == bufObj &&
1617 binding->Offset == offset &&
1618 binding->Size == size &&
1619 binding->AutomaticSize == autoSize) {
1620 return;
1621 }
1622
1623 FLUSH_VERTICES(ctx, 0, 0);
1624 ctx->NewDriverState |= driver_state;
1625
1626 set_buffer_binding(ctx, binding, bufObj, offset, size, autoSize, usage);
1627 }
1628
1629 /**
1630 * Binds a buffer object to a uniform buffer binding point.
1631 *
1632 * Unlike set_buffer_binding(), this function also flushes vertices
1633 * and updates NewDriverState. It also checks if the binding
1634 * has actually changed before updating it.
1635 */
1636 static void
bind_uniform_buffer(struct gl_context * ctx,GLuint index,struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr size,GLboolean autoSize)1637 bind_uniform_buffer(struct gl_context *ctx,
1638 GLuint index,
1639 struct gl_buffer_object *bufObj,
1640 GLintptr offset,
1641 GLsizeiptr size,
1642 GLboolean autoSize)
1643 {
1644 bind_buffer(ctx, &ctx->UniformBufferBindings[index],
1645 bufObj, offset, size, autoSize,
1646 ST_NEW_UNIFORM_BUFFER,
1647 USAGE_UNIFORM_BUFFER);
1648 }
1649
1650 /**
1651 * Binds a buffer object to a shader storage buffer binding point.
1652 *
1653 * Unlike set_ssbo_binding(), this function also flushes vertices
1654 * and updates NewDriverState. It also checks if the binding
1655 * has actually changed before updating it.
1656 */
1657 static void
bind_shader_storage_buffer(struct gl_context * ctx,GLuint index,struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr size,GLboolean autoSize)1658 bind_shader_storage_buffer(struct gl_context *ctx,
1659 GLuint index,
1660 struct gl_buffer_object *bufObj,
1661 GLintptr offset,
1662 GLsizeiptr size,
1663 GLboolean autoSize)
1664 {
1665 bind_buffer(ctx, &ctx->ShaderStorageBufferBindings[index],
1666 bufObj, offset, size, autoSize,
1667 ST_NEW_STORAGE_BUFFER,
1668 USAGE_SHADER_STORAGE_BUFFER);
1669 }
1670
1671 /**
1672 * Binds a buffer object to an atomic buffer binding point.
1673 *
1674 * Unlike set_atomic_binding(), this function also flushes vertices
1675 * and updates NewDriverState. It also checks if the binding
1676 * has actually changed before updating it.
1677 */
1678 static void
bind_atomic_buffer(struct gl_context * ctx,unsigned index,struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr size,GLboolean autoSize)1679 bind_atomic_buffer(struct gl_context *ctx, unsigned index,
1680 struct gl_buffer_object *bufObj, GLintptr offset,
1681 GLsizeiptr size, GLboolean autoSize)
1682 {
1683 bind_buffer(ctx, &ctx->AtomicBufferBindings[index],
1684 bufObj, offset, size, autoSize,
1685 ctx->DriverFlags.NewAtomicBuffer,
1686 USAGE_ATOMIC_COUNTER_BUFFER);
1687 }
1688
1689 /**
1690 * Bind a buffer object to a uniform block binding point.
1691 * As above, but offset = 0.
1692 */
1693 static void
bind_buffer_base_uniform_buffer(struct gl_context * ctx,GLuint index,struct gl_buffer_object * bufObj)1694 bind_buffer_base_uniform_buffer(struct gl_context *ctx,
1695 GLuint index,
1696 struct gl_buffer_object *bufObj)
1697 {
1698 if (index >= ctx->Const.MaxUniformBufferBindings) {
1699 _mesa_error(ctx, GL_INVALID_VALUE, "glBindBufferBase(index=%d)", index);
1700 return;
1701 }
1702
1703 _mesa_reference_buffer_object(ctx, &ctx->UniformBuffer, bufObj);
1704
1705 if (!bufObj)
1706 bind_uniform_buffer(ctx, index, bufObj, -1, -1, GL_TRUE);
1707 else
1708 bind_uniform_buffer(ctx, index, bufObj, 0, 0, GL_TRUE);
1709 }
1710
1711 /**
1712 * Bind a buffer object to a shader storage block binding point.
1713 * As above, but offset = 0.
1714 */
1715 static void
bind_buffer_base_shader_storage_buffer(struct gl_context * ctx,GLuint index,struct gl_buffer_object * bufObj)1716 bind_buffer_base_shader_storage_buffer(struct gl_context *ctx,
1717 GLuint index,
1718 struct gl_buffer_object *bufObj)
1719 {
1720 if (index >= ctx->Const.MaxShaderStorageBufferBindings) {
1721 _mesa_error(ctx, GL_INVALID_VALUE, "glBindBufferBase(index=%d)", index);
1722 return;
1723 }
1724
1725 _mesa_reference_buffer_object(ctx, &ctx->ShaderStorageBuffer, bufObj);
1726
1727 if (!bufObj)
1728 bind_shader_storage_buffer(ctx, index, bufObj, -1, -1, GL_TRUE);
1729 else
1730 bind_shader_storage_buffer(ctx, index, bufObj, 0, 0, GL_TRUE);
1731 }
1732
1733 /**
1734 * Bind a buffer object to a shader storage block binding point.
1735 * As above, but offset = 0.
1736 */
1737 static void
bind_buffer_base_atomic_buffer(struct gl_context * ctx,GLuint index,struct gl_buffer_object * bufObj)1738 bind_buffer_base_atomic_buffer(struct gl_context *ctx,
1739 GLuint index,
1740 struct gl_buffer_object *bufObj)
1741 {
1742 if (index >= ctx->Const.MaxAtomicBufferBindings) {
1743 _mesa_error(ctx, GL_INVALID_VALUE, "glBindBufferBase(index=%d)", index);
1744 return;
1745 }
1746
1747 _mesa_reference_buffer_object(ctx, &ctx->AtomicBuffer, bufObj);
1748
1749 if (!bufObj)
1750 bind_atomic_buffer(ctx, index, bufObj, -1, -1, GL_TRUE);
1751 else
1752 bind_atomic_buffer(ctx, index, bufObj, 0, 0, GL_TRUE);
1753 }
1754
1755 /**
1756 * Delete a set of buffer objects.
1757 *
1758 * \param n Number of buffer objects to delete.
1759 * \param ids Array of \c n buffer object IDs.
1760 */
1761 static void
delete_buffers(struct gl_context * ctx,GLsizei n,const GLuint * ids)1762 delete_buffers(struct gl_context *ctx, GLsizei n, const GLuint *ids)
1763 {
1764 FLUSH_VERTICES(ctx, 0, 0);
1765
1766 _mesa_HashLockMaybeLocked(&ctx->Shared->BufferObjects,
1767 ctx->BufferObjectsLocked);
1768 unreference_zombie_buffers_for_ctx(ctx);
1769
1770 for (GLsizei i = 0; i < n; i++) {
1771 struct gl_buffer_object *bufObj =
1772 _mesa_lookup_bufferobj_locked(ctx, ids[i]);
1773 if (bufObj) {
1774 struct gl_vertex_array_object *vao = ctx->Array.VAO;
1775 GLuint j;
1776
1777 assert(bufObj->Name == ids[i] || bufObj == &DummyBufferObject);
1778
1779 _mesa_buffer_unmap_all_mappings(ctx, bufObj);
1780
1781 /* unbind any vertex pointers bound to this buffer */
1782 for (j = 0; j < ARRAY_SIZE(vao->BufferBinding); j++) {
1783 unbind(ctx, vao, j, bufObj);
1784 }
1785
1786 if (ctx->Array.ArrayBufferObj == bufObj) {
1787 bind_buffer_object(ctx, &ctx->Array.ArrayBufferObj, 0, false);
1788 }
1789 if (vao->IndexBufferObj == bufObj) {
1790 bind_buffer_object(ctx, &vao->IndexBufferObj, 0, false);
1791 }
1792
1793 /* unbind ARB_draw_indirect binding point */
1794 if (ctx->DrawIndirectBuffer == bufObj) {
1795 bind_buffer_object(ctx, &ctx->DrawIndirectBuffer, 0, false);
1796 }
1797
1798 /* unbind ARB_indirect_parameters binding point */
1799 if (ctx->ParameterBuffer == bufObj) {
1800 bind_buffer_object(ctx, &ctx->ParameterBuffer, 0, false);
1801 }
1802
1803 /* unbind ARB_compute_shader binding point */
1804 if (ctx->DispatchIndirectBuffer == bufObj) {
1805 bind_buffer_object(ctx, &ctx->DispatchIndirectBuffer, 0, false);
1806 }
1807
1808 /* unbind ARB_copy_buffer binding points */
1809 if (ctx->CopyReadBuffer == bufObj) {
1810 bind_buffer_object(ctx, &ctx->CopyReadBuffer, 0, false);
1811 }
1812 if (ctx->CopyWriteBuffer == bufObj) {
1813 bind_buffer_object(ctx, &ctx->CopyWriteBuffer, 0, false);
1814 }
1815
1816 /* unbind transform feedback binding points */
1817 if (ctx->TransformFeedback.CurrentBuffer == bufObj) {
1818 bind_buffer_object(ctx, &ctx->TransformFeedback.CurrentBuffer, 0, false);
1819 }
1820 for (j = 0; j < MAX_FEEDBACK_BUFFERS; j++) {
1821 if (ctx->TransformFeedback.CurrentObject->Buffers[j] == bufObj) {
1822 _mesa_bind_buffer_base_transform_feedback(ctx,
1823 ctx->TransformFeedback.CurrentObject,
1824 j, NULL, false);
1825 }
1826 }
1827
1828 /* unbind UBO binding points */
1829 for (j = 0; j < ctx->Const.MaxUniformBufferBindings; j++) {
1830 if (ctx->UniformBufferBindings[j].BufferObject == bufObj) {
1831 bind_buffer_base_uniform_buffer(ctx, j, NULL);
1832 }
1833 }
1834
1835 if (ctx->UniformBuffer == bufObj) {
1836 bind_buffer_object(ctx, &ctx->UniformBuffer, 0, false);
1837 }
1838
1839 /* unbind SSBO binding points */
1840 for (j = 0; j < ctx->Const.MaxShaderStorageBufferBindings; j++) {
1841 if (ctx->ShaderStorageBufferBindings[j].BufferObject == bufObj) {
1842 bind_buffer_base_shader_storage_buffer(ctx, j, NULL);
1843 }
1844 }
1845
1846 if (ctx->ShaderStorageBuffer == bufObj) {
1847 bind_buffer_object(ctx, &ctx->ShaderStorageBuffer, 0, false);
1848 }
1849
1850 /* unbind Atomci Buffer binding points */
1851 for (j = 0; j < ctx->Const.MaxAtomicBufferBindings; j++) {
1852 if (ctx->AtomicBufferBindings[j].BufferObject == bufObj) {
1853 bind_buffer_base_atomic_buffer(ctx, j, NULL);
1854 }
1855 }
1856
1857 if (ctx->AtomicBuffer == bufObj) {
1858 bind_buffer_object(ctx, &ctx->AtomicBuffer, 0, false);
1859 }
1860
1861 /* unbind any pixel pack/unpack pointers bound to this buffer */
1862 if (ctx->Pack.BufferObj == bufObj) {
1863 bind_buffer_object(ctx, &ctx->Pack.BufferObj, 0, false);
1864 }
1865 if (ctx->Unpack.BufferObj == bufObj) {
1866 bind_buffer_object(ctx, &ctx->Unpack.BufferObj, 0, false);
1867 }
1868
1869 if (ctx->Texture.BufferObject == bufObj) {
1870 bind_buffer_object(ctx, &ctx->Texture.BufferObject, 0, false);
1871 }
1872
1873 if (ctx->ExternalVirtualMemoryBuffer == bufObj) {
1874 bind_buffer_object(ctx, &ctx->ExternalVirtualMemoryBuffer, 0, false);
1875 }
1876
1877 /* unbind query buffer binding point */
1878 if (ctx->QueryBuffer == bufObj) {
1879 bind_buffer_object(ctx, &ctx->QueryBuffer, 0, false);
1880 }
1881
1882 /* The ID is immediately freed for re-use */
1883 _mesa_HashRemoveLocked(&ctx->Shared->BufferObjects, ids[i]);
1884 /* Make sure we do not run into the classic ABA problem on bind.
1885 * We don't want to allow re-binding a buffer object that's been
1886 * "deleted" by glDeleteBuffers().
1887 *
1888 * The explicit rebinding to the default object in the current context
1889 * prevents the above in the current context, but another context
1890 * sharing the same objects might suffer from this problem.
1891 * The alternative would be to do the hash lookup in any case on bind
1892 * which would introduce more runtime overhead than this.
1893 */
1894 bufObj->DeletePending = GL_TRUE;
1895
1896 /* The GLuint ID holds one reference and the context that created
1897 * the buffer holds the other one.
1898 */
1899 assert(p_atomic_read(&bufObj->RefCount) >= (bufObj->Ctx ? 2 : 1));
1900
1901 if (bufObj->Ctx == ctx) {
1902 detach_ctx_from_buffer(ctx, bufObj);
1903 } else if (bufObj->Ctx) {
1904 /* Only the context holding it can release it. */
1905 _mesa_set_add(ctx->Shared->ZombieBufferObjects, bufObj);
1906 }
1907
1908 _mesa_reference_buffer_object(ctx, &bufObj, NULL);
1909 }
1910 }
1911
1912 _mesa_HashUnlockMaybeLocked(&ctx->Shared->BufferObjects,
1913 ctx->BufferObjectsLocked);
1914 }
1915
1916
1917 void GLAPIENTRY
_mesa_DeleteBuffers_no_error(GLsizei n,const GLuint * ids)1918 _mesa_DeleteBuffers_no_error(GLsizei n, const GLuint *ids)
1919 {
1920 GET_CURRENT_CONTEXT(ctx);
1921 delete_buffers(ctx, n, ids);
1922 }
1923
1924
1925 void GLAPIENTRY
_mesa_DeleteBuffers(GLsizei n,const GLuint * ids)1926 _mesa_DeleteBuffers(GLsizei n, const GLuint *ids)
1927 {
1928 GET_CURRENT_CONTEXT(ctx);
1929
1930 if (n < 0) {
1931 _mesa_error(ctx, GL_INVALID_VALUE, "glDeleteBuffersARB(n)");
1932 return;
1933 }
1934
1935 delete_buffers(ctx, n, ids);
1936 }
1937
1938
1939 /**
1940 * This is the implementation for glGenBuffers and glCreateBuffers. It is not
1941 * exposed to the rest of Mesa to encourage the use of nameless buffers in
1942 * driver internals.
1943 */
1944 static void
create_buffers(struct gl_context * ctx,GLsizei n,GLuint * buffers,bool dsa)1945 create_buffers(struct gl_context *ctx, GLsizei n, GLuint *buffers, bool dsa)
1946 {
1947 struct gl_buffer_object *buf;
1948
1949 if (!buffers)
1950 return;
1951
1952 /*
1953 * This must be atomic (generation and allocation of buffer object IDs)
1954 */
1955 _mesa_HashLockMaybeLocked(&ctx->Shared->BufferObjects,
1956 ctx->BufferObjectsLocked);
1957 /* If one context only creates buffers and another context only deletes
1958 * buffers, buffers don't get released because it only produces zombie
1959 * buffers. Only the context that has created the buffers can release
1960 * them. Thus, when we create buffers, we prune the list of zombie
1961 * buffers.
1962 */
1963 unreference_zombie_buffers_for_ctx(ctx);
1964
1965 _mesa_HashFindFreeKeys(&ctx->Shared->BufferObjects, buffers, n);
1966
1967 /* Insert the ID and pointer into the hash table. If non-DSA, insert a
1968 * DummyBufferObject. Otherwise, create a new buffer object and insert
1969 * it.
1970 */
1971 for (int i = 0; i < n; i++) {
1972 if (dsa) {
1973 buf = new_gl_buffer_object(ctx, buffers[i]);
1974 if (!buf) {
1975 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glCreateBuffers");
1976 _mesa_HashUnlockMaybeLocked(&ctx->Shared->BufferObjects,
1977 ctx->BufferObjectsLocked);
1978 return;
1979 }
1980 }
1981 else
1982 buf = &DummyBufferObject;
1983
1984 _mesa_HashInsertLocked(&ctx->Shared->BufferObjects, buffers[i], buf);
1985 }
1986
1987 _mesa_HashUnlockMaybeLocked(&ctx->Shared->BufferObjects,
1988 ctx->BufferObjectsLocked);
1989 }
1990
1991
1992 static void
create_buffers_err(struct gl_context * ctx,GLsizei n,GLuint * buffers,bool dsa)1993 create_buffers_err(struct gl_context *ctx, GLsizei n, GLuint *buffers, bool dsa)
1994 {
1995 const char *func = dsa ? "glCreateBuffers" : "glGenBuffers";
1996
1997 if (MESA_VERBOSE & VERBOSE_API)
1998 _mesa_debug(ctx, "%s(%d)\n", func, n);
1999
2000 if (n < 0) {
2001 _mesa_error(ctx, GL_INVALID_VALUE, "%s(n %d < 0)", func, n);
2002 return;
2003 }
2004
2005 create_buffers(ctx, n, buffers, dsa);
2006 }
2007
2008 /**
2009 * Generate a set of unique buffer object IDs and store them in \c buffers.
2010 *
2011 * \param n Number of IDs to generate.
2012 * \param buffers Array of \c n locations to store the IDs.
2013 */
2014 void GLAPIENTRY
_mesa_GenBuffers_no_error(GLsizei n,GLuint * buffers)2015 _mesa_GenBuffers_no_error(GLsizei n, GLuint *buffers)
2016 {
2017 GET_CURRENT_CONTEXT(ctx);
2018 create_buffers(ctx, n, buffers, false);
2019 }
2020
2021
2022 void GLAPIENTRY
_mesa_GenBuffers(GLsizei n,GLuint * buffers)2023 _mesa_GenBuffers(GLsizei n, GLuint *buffers)
2024 {
2025 GET_CURRENT_CONTEXT(ctx);
2026 create_buffers_err(ctx, n, buffers, false);
2027 }
2028
2029 /**
2030 * Create a set of buffer objects and store their unique IDs in \c buffers.
2031 *
2032 * \param n Number of IDs to generate.
2033 * \param buffers Array of \c n locations to store the IDs.
2034 */
2035 void GLAPIENTRY
_mesa_CreateBuffers_no_error(GLsizei n,GLuint * buffers)2036 _mesa_CreateBuffers_no_error(GLsizei n, GLuint *buffers)
2037 {
2038 GET_CURRENT_CONTEXT(ctx);
2039 create_buffers(ctx, n, buffers, true);
2040 }
2041
2042
2043 void GLAPIENTRY
_mesa_CreateBuffers(GLsizei n,GLuint * buffers)2044 _mesa_CreateBuffers(GLsizei n, GLuint *buffers)
2045 {
2046 GET_CURRENT_CONTEXT(ctx);
2047 create_buffers_err(ctx, n, buffers, true);
2048 }
2049
2050
2051 /**
2052 * Determine if ID is the name of a buffer object.
2053 *
2054 * \param id ID of the potential buffer object.
2055 * \return \c GL_TRUE if \c id is the name of a buffer object,
2056 * \c GL_FALSE otherwise.
2057 */
2058 GLboolean GLAPIENTRY
_mesa_IsBuffer(GLuint id)2059 _mesa_IsBuffer(GLuint id)
2060 {
2061 struct gl_buffer_object *bufObj;
2062 GET_CURRENT_CONTEXT(ctx);
2063 ASSERT_OUTSIDE_BEGIN_END_WITH_RETVAL(ctx, GL_FALSE);
2064
2065 bufObj = _mesa_lookup_bufferobj(ctx, id);
2066
2067 return bufObj && bufObj != &DummyBufferObject;
2068 }
2069
2070
2071 static bool
validate_buffer_storage(struct gl_context * ctx,struct gl_buffer_object * bufObj,GLsizeiptr size,GLbitfield flags,const char * func)2072 validate_buffer_storage(struct gl_context *ctx,
2073 struct gl_buffer_object *bufObj, GLsizeiptr size,
2074 GLbitfield flags, const char *func)
2075 {
2076 if (size <= 0) {
2077 _mesa_error(ctx, GL_INVALID_VALUE, "%s(size <= 0)", func);
2078 return false;
2079 }
2080
2081 GLbitfield valid_flags = GL_MAP_READ_BIT |
2082 GL_MAP_WRITE_BIT |
2083 GL_MAP_PERSISTENT_BIT |
2084 GL_MAP_COHERENT_BIT |
2085 GL_DYNAMIC_STORAGE_BIT |
2086 GL_CLIENT_STORAGE_BIT;
2087
2088 if (ctx->Extensions.ARB_sparse_buffer)
2089 valid_flags |= GL_SPARSE_STORAGE_BIT_ARB;
2090
2091 if (flags & ~valid_flags) {
2092 _mesa_error(ctx, GL_INVALID_VALUE, "%s(invalid flag bits set)", func);
2093 return false;
2094 }
2095
2096 /* The Errors section of the GL_ARB_sparse_buffer spec says:
2097 *
2098 * "INVALID_VALUE is generated by BufferStorage if <flags> contains
2099 * SPARSE_STORAGE_BIT_ARB and <flags> also contains any combination of
2100 * MAP_READ_BIT or MAP_WRITE_BIT."
2101 */
2102 if (flags & GL_SPARSE_STORAGE_BIT_ARB &&
2103 flags & (GL_MAP_READ_BIT | GL_MAP_WRITE_BIT)) {
2104 _mesa_error(ctx, GL_INVALID_VALUE, "%s(SPARSE_STORAGE and READ/WRITE)", func);
2105 return false;
2106 }
2107
2108 if (flags & GL_MAP_PERSISTENT_BIT &&
2109 !(flags & (GL_MAP_READ_BIT | GL_MAP_WRITE_BIT))) {
2110 _mesa_error(ctx, GL_INVALID_VALUE,
2111 "%s(PERSISTENT and flags!=READ/WRITE)", func);
2112 return false;
2113 }
2114
2115 if (flags & GL_MAP_COHERENT_BIT && !(flags & GL_MAP_PERSISTENT_BIT)) {
2116 _mesa_error(ctx, GL_INVALID_VALUE,
2117 "%s(COHERENT and flags!=PERSISTENT)", func);
2118 return false;
2119 }
2120
2121 if (bufObj->Immutable || bufObj->HandleAllocated) {
2122 _mesa_error(ctx, GL_INVALID_OPERATION, "%s(immutable)", func);
2123 return false;
2124 }
2125
2126 return true;
2127 }
2128
2129
2130 static void
buffer_storage(struct gl_context * ctx,struct gl_buffer_object * bufObj,struct gl_memory_object * memObj,GLenum target,GLsizeiptr size,const GLvoid * data,GLbitfield flags,GLuint64 offset,const char * func)2131 buffer_storage(struct gl_context *ctx, struct gl_buffer_object *bufObj,
2132 struct gl_memory_object *memObj, GLenum target,
2133 GLsizeiptr size, const GLvoid *data, GLbitfield flags,
2134 GLuint64 offset, const char *func)
2135 {
2136 GLboolean res;
2137
2138 /* Unmap the existing buffer. We'll replace it now. Not an error. */
2139 _mesa_buffer_unmap_all_mappings(ctx, bufObj);
2140
2141 FLUSH_VERTICES(ctx, 0, 0);
2142
2143 bufObj->Immutable = GL_TRUE;
2144 bufObj->MinMaxCacheDirty = true;
2145
2146 if (memObj) {
2147 res = bufferobj_data_mem(ctx, target, size, memObj, offset,
2148 GL_DYNAMIC_DRAW, bufObj);
2149 }
2150 else {
2151 res = _mesa_bufferobj_data(ctx, target, size, data, GL_DYNAMIC_DRAW,
2152 flags, bufObj);
2153 }
2154
2155 if (!res) {
2156 if (target == GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD) {
2157 /* Even though the interaction between AMD_pinned_memory and
2158 * glBufferStorage is not described in the spec, Graham Sellers
2159 * said that it should behave the same as glBufferData.
2160 */
2161 _mesa_error(ctx, GL_INVALID_OPERATION, "%s", func);
2162 }
2163 else {
2164 _mesa_error(ctx, GL_OUT_OF_MEMORY, "%s", func);
2165 }
2166 }
2167 }
2168
2169
2170 static ALWAYS_INLINE void
inlined_buffer_storage(GLenum target,GLuint buffer,GLsizeiptr size,const GLvoid * data,GLbitfield flags,GLuint memory,GLuint64 offset,bool dsa,bool mem,bool no_error,const char * func)2171 inlined_buffer_storage(GLenum target, GLuint buffer, GLsizeiptr size,
2172 const GLvoid *data, GLbitfield flags,
2173 GLuint memory, GLuint64 offset,
2174 bool dsa, bool mem, bool no_error, const char *func)
2175 {
2176 GET_CURRENT_CONTEXT(ctx);
2177 struct gl_buffer_object *bufObj;
2178 struct gl_memory_object *memObj = NULL;
2179
2180 if (mem) {
2181 if (!no_error) {
2182 if (!ctx->Extensions.EXT_memory_object) {
2183 _mesa_error(ctx, GL_INVALID_OPERATION, "%s(unsupported)", func);
2184 return;
2185 }
2186
2187 /* From the EXT_external_objects spec:
2188 *
2189 * "An INVALID_VALUE error is generated by BufferStorageMemEXT and
2190 * NamedBufferStorageMemEXT if <memory> is 0, or ..."
2191 */
2192 if (memory == 0) {
2193 _mesa_error(ctx, GL_INVALID_VALUE, "%s(memory == 0)", func);
2194 }
2195 }
2196
2197 memObj = _mesa_lookup_memory_object(ctx, memory);
2198 if (!memObj)
2199 return;
2200
2201 /* From the EXT_external_objects spec:
2202 *
2203 * "An INVALID_OPERATION error is generated if <memory> names a
2204 * valid memory object which has no associated memory."
2205 */
2206 if (!no_error && !memObj->Immutable) {
2207 _mesa_error(ctx, GL_INVALID_OPERATION, "%s(no associated memory)",
2208 func);
2209 return;
2210 }
2211 }
2212
2213 if (dsa) {
2214 if (no_error) {
2215 bufObj = _mesa_lookup_bufferobj(ctx, buffer);
2216 } else {
2217 bufObj = _mesa_lookup_bufferobj_err(ctx, buffer, func);
2218 if (!bufObj)
2219 return;
2220 }
2221 } else {
2222 if (no_error) {
2223 struct gl_buffer_object **bufObjPtr =
2224 get_buffer_target(ctx, target, true);
2225 bufObj = *bufObjPtr;
2226 } else {
2227 bufObj = get_buffer(ctx, func, target, GL_INVALID_OPERATION);
2228 if (!bufObj)
2229 return;
2230 }
2231 }
2232
2233 if (no_error || validate_buffer_storage(ctx, bufObj, size, flags, func))
2234 buffer_storage(ctx, bufObj, memObj, target, size, data, flags, offset, func);
2235 }
2236
2237
2238 void GLAPIENTRY
_mesa_BufferStorage_no_error(GLenum target,GLsizeiptr size,const GLvoid * data,GLbitfield flags)2239 _mesa_BufferStorage_no_error(GLenum target, GLsizeiptr size,
2240 const GLvoid *data, GLbitfield flags)
2241 {
2242 inlined_buffer_storage(target, 0, size, data, flags, GL_NONE, 0,
2243 false, false, true, "glBufferStorage");
2244 }
2245
2246
2247 void GLAPIENTRY
_mesa_BufferStorage(GLenum target,GLsizeiptr size,const GLvoid * data,GLbitfield flags)2248 _mesa_BufferStorage(GLenum target, GLsizeiptr size, const GLvoid *data,
2249 GLbitfield flags)
2250 {
2251 inlined_buffer_storage(target, 0, size, data, flags, GL_NONE, 0,
2252 false, false, false, "glBufferStorage");
2253 }
2254
2255 void GLAPIENTRY
_mesa_NamedBufferStorageEXT(GLuint buffer,GLsizeiptr size,const GLvoid * data,GLbitfield flags)2256 _mesa_NamedBufferStorageEXT(GLuint buffer, GLsizeiptr size,
2257 const GLvoid *data, GLbitfield flags)
2258 {
2259 GET_CURRENT_CONTEXT(ctx);
2260
2261 struct gl_buffer_object *bufObj = _mesa_lookup_bufferobj(ctx, buffer);
2262 if (!handle_bind_buffer_gen(ctx, buffer,
2263 &bufObj, "glNamedBufferStorageEXT", false))
2264 return;
2265
2266 inlined_buffer_storage(GL_NONE, buffer, size, data, flags, GL_NONE, 0,
2267 true, false, false, "glNamedBufferStorageEXT");
2268 }
2269
2270
2271 void GLAPIENTRY
_mesa_BufferStorageMemEXT(GLenum target,GLsizeiptr size,GLuint memory,GLuint64 offset)2272 _mesa_BufferStorageMemEXT(GLenum target, GLsizeiptr size,
2273 GLuint memory, GLuint64 offset)
2274 {
2275 inlined_buffer_storage(target, 0, size, NULL, 0, memory, offset,
2276 false, true, false, "glBufferStorageMemEXT");
2277 }
2278
2279
2280 void GLAPIENTRY
_mesa_BufferStorageMemEXT_no_error(GLenum target,GLsizeiptr size,GLuint memory,GLuint64 offset)2281 _mesa_BufferStorageMemEXT_no_error(GLenum target, GLsizeiptr size,
2282 GLuint memory, GLuint64 offset)
2283 {
2284 inlined_buffer_storage(target, 0, size, NULL, 0, memory, offset,
2285 false, true, true, "glBufferStorageMemEXT");
2286 }
2287
2288
2289 void GLAPIENTRY
_mesa_NamedBufferStorage_no_error(GLuint buffer,GLsizeiptr size,const GLvoid * data,GLbitfield flags)2290 _mesa_NamedBufferStorage_no_error(GLuint buffer, GLsizeiptr size,
2291 const GLvoid *data, GLbitfield flags)
2292 {
2293 /* In direct state access, buffer objects have an unspecified target
2294 * since they are not required to be bound.
2295 */
2296 inlined_buffer_storage(GL_NONE, buffer, size, data, flags, GL_NONE, 0,
2297 true, false, true, "glNamedBufferStorage");
2298 }
2299
2300
2301 void GLAPIENTRY
_mesa_NamedBufferStorage(GLuint buffer,GLsizeiptr size,const GLvoid * data,GLbitfield flags)2302 _mesa_NamedBufferStorage(GLuint buffer, GLsizeiptr size, const GLvoid *data,
2303 GLbitfield flags)
2304 {
2305 /* In direct state access, buffer objects have an unspecified target
2306 * since they are not required to be bound.
2307 */
2308 inlined_buffer_storage(GL_NONE, buffer, size, data, flags, GL_NONE, 0,
2309 true, false, false, "glNamedBufferStorage");
2310 }
2311
2312 void GLAPIENTRY
_mesa_NamedBufferStorageMemEXT(GLuint buffer,GLsizeiptr size,GLuint memory,GLuint64 offset)2313 _mesa_NamedBufferStorageMemEXT(GLuint buffer, GLsizeiptr size,
2314 GLuint memory, GLuint64 offset)
2315 {
2316 inlined_buffer_storage(GL_NONE, buffer, size, NULL, 0, memory, offset,
2317 true, true, false, "glNamedBufferStorageMemEXT");
2318 }
2319
2320
2321 void GLAPIENTRY
_mesa_NamedBufferStorageMemEXT_no_error(GLuint buffer,GLsizeiptr size,GLuint memory,GLuint64 offset)2322 _mesa_NamedBufferStorageMemEXT_no_error(GLuint buffer, GLsizeiptr size,
2323 GLuint memory, GLuint64 offset)
2324 {
2325 inlined_buffer_storage(GL_NONE, buffer, size, NULL, 0, memory, offset,
2326 true, true, true, "glNamedBufferStorageMemEXT");
2327 }
2328
2329
2330 static ALWAYS_INLINE void
buffer_data(struct gl_context * ctx,struct gl_buffer_object * bufObj,GLenum target,GLsizeiptr size,const GLvoid * data,GLenum usage,const char * func,bool no_error)2331 buffer_data(struct gl_context *ctx, struct gl_buffer_object *bufObj,
2332 GLenum target, GLsizeiptr size, const GLvoid *data, GLenum usage,
2333 const char *func, bool no_error)
2334 {
2335 bool valid_usage;
2336
2337 if (MESA_VERBOSE & VERBOSE_API) {
2338 _mesa_debug(ctx, "%s(%s, %ld, %p, %s)\n",
2339 func,
2340 _mesa_enum_to_string(target),
2341 (long int) size, data,
2342 _mesa_enum_to_string(usage));
2343 }
2344
2345 if (!no_error) {
2346 if (size < 0) {
2347 _mesa_error(ctx, GL_INVALID_VALUE, "%s(size < 0)", func);
2348 return;
2349 }
2350
2351 switch (usage) {
2352 case GL_STREAM_DRAW_ARB:
2353 valid_usage = (ctx->API != API_OPENGLES);
2354 break;
2355 case GL_STATIC_DRAW_ARB:
2356 case GL_DYNAMIC_DRAW_ARB:
2357 valid_usage = true;
2358 break;
2359 case GL_STREAM_READ_ARB:
2360 case GL_STREAM_COPY_ARB:
2361 case GL_STATIC_READ_ARB:
2362 case GL_STATIC_COPY_ARB:
2363 case GL_DYNAMIC_READ_ARB:
2364 case GL_DYNAMIC_COPY_ARB:
2365 valid_usage = _mesa_is_desktop_gl(ctx) || _mesa_is_gles3(ctx);
2366 break;
2367 default:
2368 valid_usage = false;
2369 break;
2370 }
2371
2372 if (!valid_usage) {
2373 _mesa_error(ctx, GL_INVALID_ENUM, "%s(invalid usage: %s)", func,
2374 _mesa_enum_to_string(usage));
2375 return;
2376 }
2377
2378 if (bufObj->Immutable || bufObj->HandleAllocated) {
2379 _mesa_error(ctx, GL_INVALID_OPERATION, "%s(immutable)", func);
2380 return;
2381 }
2382 }
2383
2384 /* Unmap the existing buffer. We'll replace it now. Not an error. */
2385 _mesa_buffer_unmap_all_mappings(ctx, bufObj);
2386
2387 FLUSH_VERTICES(ctx, 0, 0);
2388
2389 bufObj->MinMaxCacheDirty = true;
2390
2391 #ifdef VBO_DEBUG
2392 printf("glBufferDataARB(%u, sz %ld, from %p, usage 0x%x)\n",
2393 bufObj->Name, size, data, usage);
2394 #endif
2395
2396 #ifdef BOUNDS_CHECK
2397 size += 100;
2398 #endif
2399
2400 if (!_mesa_bufferobj_data(ctx, target, size, data, usage,
2401 GL_MAP_READ_BIT |
2402 GL_MAP_WRITE_BIT |
2403 GL_DYNAMIC_STORAGE_BIT,
2404 bufObj)) {
2405 if (target == GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD) {
2406 if (!no_error) {
2407 /* From GL_AMD_pinned_memory:
2408 *
2409 * INVALID_OPERATION is generated by BufferData if <target> is
2410 * EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD, and the store cannot be
2411 * mapped to the GPU address space.
2412 */
2413 _mesa_error(ctx, GL_INVALID_OPERATION, "%s", func);
2414 }
2415 } else {
2416 _mesa_error(ctx, GL_OUT_OF_MEMORY, "%s", func);
2417 }
2418 }
2419 }
2420
2421 static void
buffer_data_error(struct gl_context * ctx,struct gl_buffer_object * bufObj,GLenum target,GLsizeiptr size,const GLvoid * data,GLenum usage,const char * func)2422 buffer_data_error(struct gl_context *ctx, struct gl_buffer_object *bufObj,
2423 GLenum target, GLsizeiptr size, const GLvoid *data,
2424 GLenum usage, const char *func)
2425 {
2426 buffer_data(ctx, bufObj, target, size, data, usage, func, false);
2427 }
2428
2429 static void
buffer_data_no_error(struct gl_context * ctx,struct gl_buffer_object * bufObj,GLenum target,GLsizeiptr size,const GLvoid * data,GLenum usage,const char * func)2430 buffer_data_no_error(struct gl_context *ctx, struct gl_buffer_object *bufObj,
2431 GLenum target, GLsizeiptr size, const GLvoid *data,
2432 GLenum usage, const char *func)
2433 {
2434 buffer_data(ctx, bufObj, target, size, data, usage, func, true);
2435 }
2436
2437 void
_mesa_buffer_data(struct gl_context * ctx,struct gl_buffer_object * bufObj,GLenum target,GLsizeiptr size,const GLvoid * data,GLenum usage,const char * func)2438 _mesa_buffer_data(struct gl_context *ctx, struct gl_buffer_object *bufObj,
2439 GLenum target, GLsizeiptr size, const GLvoid *data,
2440 GLenum usage, const char *func)
2441 {
2442 buffer_data_error(ctx, bufObj, target, size, data, usage, func);
2443 }
2444
2445 void GLAPIENTRY
_mesa_BufferData_no_error(GLenum target,GLsizeiptr size,const GLvoid * data,GLenum usage)2446 _mesa_BufferData_no_error(GLenum target, GLsizeiptr size, const GLvoid *data,
2447 GLenum usage)
2448 {
2449 GET_CURRENT_CONTEXT(ctx);
2450
2451 struct gl_buffer_object **bufObj = get_buffer_target(ctx, target, true);
2452 buffer_data_no_error(ctx, *bufObj, target, size, data, usage,
2453 "glBufferData");
2454 }
2455
2456 void GLAPIENTRY
_mesa_BufferData(GLenum target,GLsizeiptr size,const GLvoid * data,GLenum usage)2457 _mesa_BufferData(GLenum target, GLsizeiptr size,
2458 const GLvoid *data, GLenum usage)
2459 {
2460 GET_CURRENT_CONTEXT(ctx);
2461 struct gl_buffer_object *bufObj;
2462
2463 bufObj = get_buffer(ctx, "glBufferData", target, GL_INVALID_OPERATION);
2464 if (!bufObj)
2465 return;
2466
2467 _mesa_buffer_data(ctx, bufObj, target, size, data, usage,
2468 "glBufferData");
2469 }
2470
2471 void GLAPIENTRY
_mesa_NamedBufferData_no_error(GLuint buffer,GLsizeiptr size,const GLvoid * data,GLenum usage)2472 _mesa_NamedBufferData_no_error(GLuint buffer, GLsizeiptr size,
2473 const GLvoid *data, GLenum usage)
2474 {
2475 GET_CURRENT_CONTEXT(ctx);
2476
2477 struct gl_buffer_object *bufObj = _mesa_lookup_bufferobj(ctx, buffer);
2478 buffer_data_no_error(ctx, bufObj, GL_NONE, size, data, usage,
2479 "glNamedBufferData");
2480 }
2481
2482 void GLAPIENTRY
_mesa_NamedBufferData(GLuint buffer,GLsizeiptr size,const GLvoid * data,GLenum usage)2483 _mesa_NamedBufferData(GLuint buffer, GLsizeiptr size, const GLvoid *data,
2484 GLenum usage)
2485 {
2486 GET_CURRENT_CONTEXT(ctx);
2487 struct gl_buffer_object *bufObj;
2488
2489 bufObj = _mesa_lookup_bufferobj_err(ctx, buffer, "glNamedBufferData");
2490 if (!bufObj)
2491 return;
2492
2493 /* In direct state access, buffer objects have an unspecified target since
2494 * they are not required to be bound.
2495 */
2496 _mesa_buffer_data(ctx, bufObj, GL_NONE, size, data, usage,
2497 "glNamedBufferData");
2498 }
2499
2500 void GLAPIENTRY
_mesa_NamedBufferDataEXT(GLuint buffer,GLsizeiptr size,const GLvoid * data,GLenum usage)2501 _mesa_NamedBufferDataEXT(GLuint buffer, GLsizeiptr size, const GLvoid *data,
2502 GLenum usage)
2503 {
2504 GET_CURRENT_CONTEXT(ctx);
2505 struct gl_buffer_object *bufObj;
2506
2507 if (!buffer) {
2508 _mesa_error(ctx, GL_INVALID_OPERATION,
2509 "glNamedBufferDataEXT(buffer=0)");
2510 return;
2511 }
2512
2513 bufObj = _mesa_lookup_bufferobj(ctx, buffer);
2514 if (!handle_bind_buffer_gen(ctx, buffer,
2515 &bufObj, "glNamedBufferDataEXT", false))
2516 return;
2517
2518 _mesa_buffer_data(ctx, bufObj, GL_NONE, size, data, usage,
2519 "glNamedBufferDataEXT");
2520 }
2521
2522 static bool
validate_buffer_sub_data(struct gl_context * ctx,struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr size,const char * func)2523 validate_buffer_sub_data(struct gl_context *ctx,
2524 struct gl_buffer_object *bufObj,
2525 GLintptr offset, GLsizeiptr size,
2526 const char *func)
2527 {
2528 if (!buffer_object_subdata_range_good(ctx, bufObj, offset, size,
2529 true, func)) {
2530 /* error already recorded */
2531 return false;
2532 }
2533
2534 if (bufObj->Immutable &&
2535 !(bufObj->StorageFlags & GL_DYNAMIC_STORAGE_BIT)) {
2536 _mesa_error(ctx, GL_INVALID_OPERATION, "%s", func);
2537 return false;
2538 }
2539
2540 if ((bufObj->Usage == GL_STATIC_DRAW ||
2541 bufObj->Usage == GL_STATIC_COPY) &&
2542 bufObj->NumSubDataCalls >= BUFFER_WARNING_CALL_COUNT - 1) {
2543 /* If the application declared the buffer as static draw/copy or stream
2544 * draw, it should not be frequently modified with glBufferSubData.
2545 */
2546 BUFFER_USAGE_WARNING(ctx,
2547 "using %s(buffer %u, offset %u, size %u) to "
2548 "update a %s buffer",
2549 func, bufObj->Name, offset, size,
2550 _mesa_enum_to_string(bufObj->Usage));
2551 }
2552
2553 return true;
2554 }
2555
2556
2557 /**
2558 * Implementation for glBufferSubData and glNamedBufferSubData.
2559 *
2560 * \param ctx GL context.
2561 * \param bufObj The buffer object.
2562 * \param offset Offset of the first byte of the subdata range.
2563 * \param size Size, in bytes, of the subdata range.
2564 * \param data The data store.
2565 * \param func Name of calling function for recording errors.
2566 *
2567 */
2568 void
_mesa_buffer_sub_data(struct gl_context * ctx,struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr size,const GLvoid * data)2569 _mesa_buffer_sub_data(struct gl_context *ctx, struct gl_buffer_object *bufObj,
2570 GLintptr offset, GLsizeiptr size, const GLvoid *data)
2571 {
2572 if (size == 0)
2573 return;
2574
2575 bufObj->NumSubDataCalls++;
2576 bufObj->MinMaxCacheDirty = true;
2577
2578 _mesa_bufferobj_subdata(ctx, offset, size, data, bufObj);
2579 }
2580
2581
2582 static ALWAYS_INLINE void
buffer_sub_data(GLenum target,GLuint buffer,GLintptr offset,GLsizeiptr size,const GLvoid * data,bool dsa,bool no_error,const char * func)2583 buffer_sub_data(GLenum target, GLuint buffer, GLintptr offset,
2584 GLsizeiptr size, const GLvoid *data,
2585 bool dsa, bool no_error, const char *func)
2586 {
2587 GET_CURRENT_CONTEXT(ctx);
2588 struct gl_buffer_object *bufObj;
2589
2590 if (dsa) {
2591 if (no_error) {
2592 bufObj = _mesa_lookup_bufferobj(ctx, buffer);
2593 } else {
2594 bufObj = _mesa_lookup_bufferobj_err(ctx, buffer, func);
2595 if (!bufObj)
2596 return;
2597 }
2598 } else {
2599 if (no_error) {
2600 struct gl_buffer_object **bufObjPtr = get_buffer_target(ctx, target, true);
2601 bufObj = *bufObjPtr;
2602 } else {
2603 bufObj = get_buffer(ctx, func, target, GL_INVALID_OPERATION);
2604 if (!bufObj)
2605 return;
2606 }
2607 }
2608
2609 if (no_error || validate_buffer_sub_data(ctx, bufObj, offset, size, func))
2610 _mesa_buffer_sub_data(ctx, bufObj, offset, size, data);
2611 }
2612
2613
2614 void GLAPIENTRY
_mesa_BufferSubData_no_error(GLenum target,GLintptr offset,GLsizeiptr size,const GLvoid * data)2615 _mesa_BufferSubData_no_error(GLenum target, GLintptr offset,
2616 GLsizeiptr size, const GLvoid *data)
2617 {
2618 buffer_sub_data(target, 0, offset, size, data, false, true,
2619 "glBufferSubData");
2620 }
2621
2622
2623 void GLAPIENTRY
_mesa_BufferSubData(GLenum target,GLintptr offset,GLsizeiptr size,const GLvoid * data)2624 _mesa_BufferSubData(GLenum target, GLintptr offset,
2625 GLsizeiptr size, const GLvoid *data)
2626 {
2627 buffer_sub_data(target, 0, offset, size, data, false, false,
2628 "glBufferSubData");
2629 }
2630
2631 void GLAPIENTRY
_mesa_NamedBufferSubData_no_error(GLuint buffer,GLintptr offset,GLsizeiptr size,const GLvoid * data)2632 _mesa_NamedBufferSubData_no_error(GLuint buffer, GLintptr offset,
2633 GLsizeiptr size, const GLvoid *data)
2634 {
2635 buffer_sub_data(0, buffer, offset, size, data, true, true,
2636 "glNamedBufferSubData");
2637 }
2638
2639 void GLAPIENTRY
_mesa_NamedBufferSubData(GLuint buffer,GLintptr offset,GLsizeiptr size,const GLvoid * data)2640 _mesa_NamedBufferSubData(GLuint buffer, GLintptr offset,
2641 GLsizeiptr size, const GLvoid *data)
2642 {
2643 buffer_sub_data(0, buffer, offset, size, data, true, false,
2644 "glNamedBufferSubData");
2645 }
2646
2647 void GLAPIENTRY
_mesa_NamedBufferSubDataEXT(GLuint buffer,GLintptr offset,GLsizeiptr size,const GLvoid * data)2648 _mesa_NamedBufferSubDataEXT(GLuint buffer, GLintptr offset,
2649 GLsizeiptr size, const GLvoid *data)
2650 {
2651 GET_CURRENT_CONTEXT(ctx);
2652 struct gl_buffer_object *bufObj;
2653
2654 if (!buffer) {
2655 _mesa_error(ctx, GL_INVALID_OPERATION,
2656 "glNamedBufferSubDataEXT(buffer=0)");
2657 return;
2658 }
2659
2660 bufObj = _mesa_lookup_bufferobj(ctx, buffer);
2661 if (!handle_bind_buffer_gen(ctx, buffer,
2662 &bufObj, "glNamedBufferSubDataEXT", false))
2663 return;
2664
2665 if (validate_buffer_sub_data(ctx, bufObj, offset, size,
2666 "glNamedBufferSubDataEXT")) {
2667 _mesa_buffer_sub_data(ctx, bufObj, offset, size, data);
2668 }
2669 }
2670
2671
2672 void GLAPIENTRY
_mesa_GetBufferSubData(GLenum target,GLintptr offset,GLsizeiptr size,GLvoid * data)2673 _mesa_GetBufferSubData(GLenum target, GLintptr offset,
2674 GLsizeiptr size, GLvoid *data)
2675 {
2676 GET_CURRENT_CONTEXT(ctx);
2677 struct gl_buffer_object *bufObj;
2678
2679 bufObj = get_buffer(ctx, "glGetBufferSubData", target,
2680 GL_INVALID_OPERATION);
2681 if (!bufObj)
2682 return;
2683
2684 if (!buffer_object_subdata_range_good(ctx, bufObj, offset, size, false,
2685 "glGetBufferSubData")) {
2686 return;
2687 }
2688
2689 bufferobj_get_subdata(ctx, offset, size, data, bufObj);
2690 }
2691
2692 void GLAPIENTRY
_mesa_GetNamedBufferSubData(GLuint buffer,GLintptr offset,GLsizeiptr size,GLvoid * data)2693 _mesa_GetNamedBufferSubData(GLuint buffer, GLintptr offset,
2694 GLsizeiptr size, GLvoid *data)
2695 {
2696 GET_CURRENT_CONTEXT(ctx);
2697 struct gl_buffer_object *bufObj;
2698
2699 bufObj = _mesa_lookup_bufferobj_err(ctx, buffer,
2700 "glGetNamedBufferSubData");
2701 if (!bufObj)
2702 return;
2703
2704 if (!buffer_object_subdata_range_good(ctx, bufObj, offset, size, false,
2705 "glGetNamedBufferSubData")) {
2706 return;
2707 }
2708
2709 bufferobj_get_subdata(ctx, offset, size, data, bufObj);
2710 }
2711
2712
2713 void GLAPIENTRY
_mesa_GetNamedBufferSubDataEXT(GLuint buffer,GLintptr offset,GLsizeiptr size,GLvoid * data)2714 _mesa_GetNamedBufferSubDataEXT(GLuint buffer, GLintptr offset,
2715 GLsizeiptr size, GLvoid *data)
2716 {
2717 GET_CURRENT_CONTEXT(ctx);
2718 struct gl_buffer_object *bufObj;
2719
2720 if (!buffer) {
2721 _mesa_error(ctx, GL_INVALID_OPERATION,
2722 "glGetNamedBufferSubDataEXT(buffer=0)");
2723 return;
2724 }
2725
2726 bufObj = _mesa_lookup_bufferobj(ctx, buffer);
2727 if (!handle_bind_buffer_gen(ctx, buffer,
2728 &bufObj, "glGetNamedBufferSubDataEXT", false))
2729 return;
2730
2731 if (!buffer_object_subdata_range_good(ctx, bufObj, offset, size, false,
2732 "glGetNamedBufferSubDataEXT")) {
2733 return;
2734 }
2735
2736 bufferobj_get_subdata(ctx, offset, size, data, bufObj);
2737 }
2738
2739 /**
2740 * \param subdata true if caller is *SubData, false if *Data
2741 */
2742 static ALWAYS_INLINE void
clear_buffer_sub_data(struct gl_context * ctx,struct gl_buffer_object * bufObj,GLenum internalformat,GLintptr offset,GLsizeiptr size,GLenum format,GLenum type,const GLvoid * data,const char * func,bool subdata,bool no_error)2743 clear_buffer_sub_data(struct gl_context *ctx, struct gl_buffer_object *bufObj,
2744 GLenum internalformat, GLintptr offset, GLsizeiptr size,
2745 GLenum format, GLenum type, const GLvoid *data,
2746 const char *func, bool subdata, bool no_error)
2747 {
2748 mesa_format mesaFormat;
2749 GLubyte clearValue[MAX_PIXEL_BYTES];
2750 GLsizeiptr clearValueSize;
2751
2752 /* This checks for disallowed mappings. */
2753 if (!no_error && !buffer_object_subdata_range_good(ctx, bufObj, offset, size,
2754 subdata, func)) {
2755 return;
2756 }
2757
2758 if (no_error) {
2759 mesaFormat = _mesa_get_texbuffer_format(ctx, internalformat);
2760 } else {
2761 mesaFormat = validate_clear_buffer_format(ctx, internalformat,
2762 format, type, func);
2763 }
2764
2765 if (mesaFormat == MESA_FORMAT_NONE)
2766 return;
2767
2768 clearValueSize = _mesa_get_format_bytes(mesaFormat);
2769 if (!no_error &&
2770 (offset % clearValueSize != 0 || size % clearValueSize != 0)) {
2771 _mesa_error(ctx, GL_INVALID_VALUE,
2772 "%s(offset or size is not a multiple of "
2773 "internalformat size)", func);
2774 return;
2775 }
2776
2777 /* Bail early. Negative size has already been checked. */
2778 if (size == 0)
2779 return;
2780
2781 bufObj->MinMaxCacheDirty = true;
2782
2783 if (!ctx->pipe->clear_buffer) {
2784 clear_buffer_subdata_sw(ctx, offset, size,
2785 data, clearValueSize, bufObj);
2786 return;
2787 }
2788
2789 if (!data)
2790 memset(clearValue, 0, MAX_PIXEL_BYTES);
2791 else if (!convert_clear_buffer_data(ctx, mesaFormat, clearValue,
2792 format, type, data, func)) {
2793 return;
2794 }
2795
2796 ctx->pipe->clear_buffer(ctx->pipe, bufObj->buffer, offset, size,
2797 clearValue, clearValueSize);
2798 }
2799
2800 static void
clear_buffer_sub_data_error(struct gl_context * ctx,struct gl_buffer_object * bufObj,GLenum internalformat,GLintptr offset,GLsizeiptr size,GLenum format,GLenum type,const GLvoid * data,const char * func,bool subdata)2801 clear_buffer_sub_data_error(struct gl_context *ctx,
2802 struct gl_buffer_object *bufObj,
2803 GLenum internalformat, GLintptr offset,
2804 GLsizeiptr size, GLenum format, GLenum type,
2805 const GLvoid *data, const char *func, bool subdata)
2806 {
2807 clear_buffer_sub_data(ctx, bufObj, internalformat, offset, size, format,
2808 type, data, func, subdata, false);
2809 }
2810
2811
2812 static void
clear_buffer_sub_data_no_error(struct gl_context * ctx,struct gl_buffer_object * bufObj,GLenum internalformat,GLintptr offset,GLsizeiptr size,GLenum format,GLenum type,const GLvoid * data,const char * func,bool subdata)2813 clear_buffer_sub_data_no_error(struct gl_context *ctx,
2814 struct gl_buffer_object *bufObj,
2815 GLenum internalformat, GLintptr offset,
2816 GLsizeiptr size, GLenum format, GLenum type,
2817 const GLvoid *data, const char *func,
2818 bool subdata)
2819 {
2820 clear_buffer_sub_data(ctx, bufObj, internalformat, offset, size, format,
2821 type, data, func, subdata, true);
2822 }
2823
2824
2825 void GLAPIENTRY
_mesa_ClearBufferData_no_error(GLenum target,GLenum internalformat,GLenum format,GLenum type,const GLvoid * data)2826 _mesa_ClearBufferData_no_error(GLenum target, GLenum internalformat,
2827 GLenum format, GLenum type, const GLvoid *data)
2828 {
2829 GET_CURRENT_CONTEXT(ctx);
2830
2831 struct gl_buffer_object **bufObj = get_buffer_target(ctx, target, true);
2832 clear_buffer_sub_data_no_error(ctx, *bufObj, internalformat, 0,
2833 (*bufObj)->Size, format, type, data,
2834 "glClearBufferData", false);
2835 }
2836
2837
2838 void GLAPIENTRY
_mesa_ClearBufferData(GLenum target,GLenum internalformat,GLenum format,GLenum type,const GLvoid * data)2839 _mesa_ClearBufferData(GLenum target, GLenum internalformat, GLenum format,
2840 GLenum type, const GLvoid *data)
2841 {
2842 GET_CURRENT_CONTEXT(ctx);
2843 struct gl_buffer_object *bufObj;
2844
2845 bufObj = get_buffer(ctx, "glClearBufferData", target, GL_INVALID_VALUE);
2846 if (!bufObj)
2847 return;
2848
2849 clear_buffer_sub_data_error(ctx, bufObj, internalformat, 0, bufObj->Size,
2850 format, type, data, "glClearBufferData", false);
2851 }
2852
2853
2854 void GLAPIENTRY
_mesa_ClearNamedBufferData_no_error(GLuint buffer,GLenum internalformat,GLenum format,GLenum type,const GLvoid * data)2855 _mesa_ClearNamedBufferData_no_error(GLuint buffer, GLenum internalformat,
2856 GLenum format, GLenum type,
2857 const GLvoid *data)
2858 {
2859 GET_CURRENT_CONTEXT(ctx);
2860
2861 struct gl_buffer_object *bufObj = _mesa_lookup_bufferobj(ctx, buffer);
2862 clear_buffer_sub_data_no_error(ctx, bufObj, internalformat, 0, bufObj->Size,
2863 format, type, data, "glClearNamedBufferData",
2864 false);
2865 }
2866
2867
2868 void GLAPIENTRY
_mesa_ClearNamedBufferData(GLuint buffer,GLenum internalformat,GLenum format,GLenum type,const GLvoid * data)2869 _mesa_ClearNamedBufferData(GLuint buffer, GLenum internalformat,
2870 GLenum format, GLenum type, const GLvoid *data)
2871 {
2872 GET_CURRENT_CONTEXT(ctx);
2873 struct gl_buffer_object *bufObj;
2874
2875 bufObj = _mesa_lookup_bufferobj_err(ctx, buffer, "glClearNamedBufferData");
2876 if (!bufObj)
2877 return;
2878
2879 clear_buffer_sub_data_error(ctx, bufObj, internalformat, 0, bufObj->Size,
2880 format, type, data, "glClearNamedBufferData",
2881 false);
2882 }
2883
2884
2885 void GLAPIENTRY
_mesa_ClearNamedBufferDataEXT(GLuint buffer,GLenum internalformat,GLenum format,GLenum type,const GLvoid * data)2886 _mesa_ClearNamedBufferDataEXT(GLuint buffer, GLenum internalformat,
2887 GLenum format, GLenum type, const GLvoid *data)
2888 {
2889 GET_CURRENT_CONTEXT(ctx);
2890 struct gl_buffer_object *bufObj = _mesa_lookup_bufferobj(ctx, buffer);
2891 if (!handle_bind_buffer_gen(ctx, buffer,
2892 &bufObj, "glClearNamedBufferDataEXT", false))
2893 return;
2894
2895 clear_buffer_sub_data_error(ctx, bufObj, internalformat, 0, bufObj->Size,
2896 format, type, data, "glClearNamedBufferDataEXT",
2897 false);
2898 }
2899
2900
2901 void GLAPIENTRY
_mesa_ClearBufferSubData_no_error(GLenum target,GLenum internalformat,GLintptr offset,GLsizeiptr size,GLenum format,GLenum type,const GLvoid * data)2902 _mesa_ClearBufferSubData_no_error(GLenum target, GLenum internalformat,
2903 GLintptr offset, GLsizeiptr size,
2904 GLenum format, GLenum type,
2905 const GLvoid *data)
2906 {
2907 GET_CURRENT_CONTEXT(ctx);
2908
2909 struct gl_buffer_object **bufObj = get_buffer_target(ctx, target, true);
2910 clear_buffer_sub_data_no_error(ctx, *bufObj, internalformat, offset, size,
2911 format, type, data, "glClearBufferSubData",
2912 true);
2913 }
2914
2915
2916 void GLAPIENTRY
_mesa_ClearBufferSubData(GLenum target,GLenum internalformat,GLintptr offset,GLsizeiptr size,GLenum format,GLenum type,const GLvoid * data)2917 _mesa_ClearBufferSubData(GLenum target, GLenum internalformat,
2918 GLintptr offset, GLsizeiptr size,
2919 GLenum format, GLenum type,
2920 const GLvoid *data)
2921 {
2922 GET_CURRENT_CONTEXT(ctx);
2923 struct gl_buffer_object *bufObj;
2924
2925 bufObj = get_buffer(ctx, "glClearBufferSubData", target, GL_INVALID_VALUE);
2926 if (!bufObj)
2927 return;
2928
2929 clear_buffer_sub_data_error(ctx, bufObj, internalformat, offset, size,
2930 format, type, data, "glClearBufferSubData",
2931 true);
2932 }
2933
2934
2935 void GLAPIENTRY
_mesa_ClearNamedBufferSubData_no_error(GLuint buffer,GLenum internalformat,GLintptr offset,GLsizeiptr size,GLenum format,GLenum type,const GLvoid * data)2936 _mesa_ClearNamedBufferSubData_no_error(GLuint buffer, GLenum internalformat,
2937 GLintptr offset, GLsizeiptr size,
2938 GLenum format, GLenum type,
2939 const GLvoid *data)
2940 {
2941 GET_CURRENT_CONTEXT(ctx);
2942
2943 struct gl_buffer_object *bufObj = _mesa_lookup_bufferobj(ctx, buffer);
2944 clear_buffer_sub_data_no_error(ctx, bufObj, internalformat, offset, size,
2945 format, type, data,
2946 "glClearNamedBufferSubData", true);
2947 }
2948
2949
2950 void GLAPIENTRY
_mesa_ClearNamedBufferSubData(GLuint buffer,GLenum internalformat,GLintptr offset,GLsizeiptr size,GLenum format,GLenum type,const GLvoid * data)2951 _mesa_ClearNamedBufferSubData(GLuint buffer, GLenum internalformat,
2952 GLintptr offset, GLsizeiptr size,
2953 GLenum format, GLenum type,
2954 const GLvoid *data)
2955 {
2956 GET_CURRENT_CONTEXT(ctx);
2957 struct gl_buffer_object *bufObj;
2958
2959 bufObj = _mesa_lookup_bufferobj_err(ctx, buffer,
2960 "glClearNamedBufferSubData");
2961 if (!bufObj)
2962 return;
2963
2964 clear_buffer_sub_data_error(ctx, bufObj, internalformat, offset, size,
2965 format, type, data, "glClearNamedBufferSubData",
2966 true);
2967 }
2968
2969 void GLAPIENTRY
_mesa_ClearNamedBufferSubDataEXT(GLuint buffer,GLenum internalformat,GLintptr offset,GLsizeiptr size,GLenum format,GLenum type,const GLvoid * data)2970 _mesa_ClearNamedBufferSubDataEXT(GLuint buffer, GLenum internalformat,
2971 GLintptr offset, GLsizeiptr size,
2972 GLenum format, GLenum type,
2973 const GLvoid *data)
2974 {
2975 GET_CURRENT_CONTEXT(ctx);
2976 struct gl_buffer_object *bufObj = _mesa_lookup_bufferobj(ctx, buffer);
2977 if (!handle_bind_buffer_gen(ctx, buffer,
2978 &bufObj, "glClearNamedBufferSubDataEXT", false))
2979 return;
2980
2981 clear_buffer_sub_data_error(ctx, bufObj, internalformat, offset, size,
2982 format, type, data, "glClearNamedBufferSubDataEXT",
2983 true);
2984 }
2985
2986 static GLboolean
unmap_buffer(struct gl_context * ctx,struct gl_buffer_object * bufObj)2987 unmap_buffer(struct gl_context *ctx, struct gl_buffer_object *bufObj)
2988 {
2989 GLboolean status = _mesa_bufferobj_unmap(ctx, bufObj, MAP_USER);
2990 bufObj->Mappings[MAP_USER].AccessFlags = 0;
2991 assert(bufObj->Mappings[MAP_USER].Pointer == NULL);
2992 assert(bufObj->Mappings[MAP_USER].Offset == 0);
2993 assert(bufObj->Mappings[MAP_USER].Length == 0);
2994
2995 return status;
2996 }
2997
2998 static GLboolean
validate_and_unmap_buffer(struct gl_context * ctx,struct gl_buffer_object * bufObj,const char * func)2999 validate_and_unmap_buffer(struct gl_context *ctx,
3000 struct gl_buffer_object *bufObj,
3001 const char *func)
3002 {
3003 ASSERT_OUTSIDE_BEGIN_END_WITH_RETVAL(ctx, GL_FALSE);
3004
3005 if (!_mesa_bufferobj_mapped(bufObj, MAP_USER)) {
3006 _mesa_error(ctx, GL_INVALID_OPERATION,
3007 "%s(buffer is not mapped)", func);
3008 return GL_FALSE;
3009 }
3010
3011 #ifdef BOUNDS_CHECK
3012 if (bufObj->Mappings[MAP_USER].AccessFlags != GL_READ_ONLY_ARB) {
3013 GLubyte *buf = (GLubyte *) bufObj->Mappings[MAP_USER].Pointer;
3014 GLuint i;
3015 /* check that last 100 bytes are still = magic value */
3016 for (i = 0; i < 100; i++) {
3017 GLuint pos = bufObj->Size - i - 1;
3018 if (buf[pos] != 123) {
3019 _mesa_warning(ctx, "Out of bounds buffer object write detected"
3020 " at position %d (value = %u)\n",
3021 pos, buf[pos]);
3022 }
3023 }
3024 }
3025 #endif
3026
3027 #ifdef VBO_DEBUG
3028 if (bufObj->Mappings[MAP_USER].AccessFlags & GL_MAP_WRITE_BIT) {
3029 GLuint i, unchanged = 0;
3030 GLubyte *b = (GLubyte *) bufObj->Mappings[MAP_USER].Pointer;
3031 GLint pos = -1;
3032 /* check which bytes changed */
3033 for (i = 0; i < bufObj->Size - 1; i++) {
3034 if (b[i] == (i & 0xff) && b[i+1] == ((i+1) & 0xff)) {
3035 unchanged++;
3036 if (pos == -1)
3037 pos = i;
3038 }
3039 }
3040 if (unchanged) {
3041 printf("glUnmapBufferARB(%u): %u of %ld unchanged, starting at %d\n",
3042 bufObj->Name, unchanged, bufObj->Size, pos);
3043 }
3044 }
3045 #endif
3046
3047 return unmap_buffer(ctx, bufObj);
3048 }
3049
3050 GLboolean GLAPIENTRY
_mesa_UnmapBuffer_no_error(GLenum target)3051 _mesa_UnmapBuffer_no_error(GLenum target)
3052 {
3053 GET_CURRENT_CONTEXT(ctx);
3054 struct gl_buffer_object **bufObjPtr = get_buffer_target(ctx, target, true);
3055 struct gl_buffer_object *bufObj = *bufObjPtr;
3056
3057 return unmap_buffer(ctx, bufObj);
3058 }
3059
3060 GLboolean GLAPIENTRY
_mesa_UnmapBuffer(GLenum target)3061 _mesa_UnmapBuffer(GLenum target)
3062 {
3063 GET_CURRENT_CONTEXT(ctx);
3064 struct gl_buffer_object *bufObj;
3065
3066 bufObj = get_buffer(ctx, "glUnmapBuffer", target, GL_INVALID_OPERATION);
3067 if (!bufObj)
3068 return GL_FALSE;
3069
3070 return validate_and_unmap_buffer(ctx, bufObj, "glUnmapBuffer");
3071 }
3072
3073 GLboolean GLAPIENTRY
_mesa_UnmapNamedBufferEXT_no_error(GLuint buffer)3074 _mesa_UnmapNamedBufferEXT_no_error(GLuint buffer)
3075 {
3076 GET_CURRENT_CONTEXT(ctx);
3077 struct gl_buffer_object *bufObj = _mesa_lookup_bufferobj(ctx, buffer);
3078
3079 return unmap_buffer(ctx, bufObj);
3080 }
3081
3082 GLboolean GLAPIENTRY
_mesa_UnmapNamedBufferEXT(GLuint buffer)3083 _mesa_UnmapNamedBufferEXT(GLuint buffer)
3084 {
3085 GET_CURRENT_CONTEXT(ctx);
3086 struct gl_buffer_object *bufObj;
3087
3088 if (!buffer) {
3089 _mesa_error(ctx, GL_INVALID_OPERATION,
3090 "glUnmapNamedBufferEXT(buffer=0)");
3091 return GL_FALSE;
3092 }
3093
3094 bufObj = _mesa_lookup_bufferobj_err(ctx, buffer, "glUnmapNamedBuffer");
3095 if (!bufObj)
3096 return GL_FALSE;
3097
3098 return validate_and_unmap_buffer(ctx, bufObj, "glUnmapNamedBuffer");
3099 }
3100
3101
3102 static bool
get_buffer_parameter(struct gl_context * ctx,struct gl_buffer_object * bufObj,GLenum pname,GLint64 * params,const char * func)3103 get_buffer_parameter(struct gl_context *ctx,
3104 struct gl_buffer_object *bufObj, GLenum pname,
3105 GLint64 *params, const char *func)
3106 {
3107 switch (pname) {
3108 case GL_BUFFER_SIZE_ARB:
3109 *params = bufObj->Size;
3110 break;
3111 case GL_BUFFER_USAGE_ARB:
3112 *params = bufObj->Usage;
3113 break;
3114 case GL_BUFFER_ACCESS_ARB:
3115 *params = simplified_access_mode(ctx,
3116 bufObj->Mappings[MAP_USER].AccessFlags);
3117 break;
3118 case GL_BUFFER_MAPPED_ARB:
3119 *params = _mesa_bufferobj_mapped(bufObj, MAP_USER);
3120 break;
3121 case GL_BUFFER_ACCESS_FLAGS:
3122 if (!ctx->Extensions.ARB_map_buffer_range)
3123 goto invalid_pname;
3124 *params = bufObj->Mappings[MAP_USER].AccessFlags;
3125 break;
3126 case GL_BUFFER_MAP_OFFSET:
3127 if (!ctx->Extensions.ARB_map_buffer_range)
3128 goto invalid_pname;
3129 *params = bufObj->Mappings[MAP_USER].Offset;
3130 break;
3131 case GL_BUFFER_MAP_LENGTH:
3132 if (!ctx->Extensions.ARB_map_buffer_range)
3133 goto invalid_pname;
3134 *params = bufObj->Mappings[MAP_USER].Length;
3135 break;
3136 case GL_BUFFER_IMMUTABLE_STORAGE:
3137 if (!ctx->Extensions.ARB_buffer_storage)
3138 goto invalid_pname;
3139 *params = bufObj->Immutable;
3140 break;
3141 case GL_BUFFER_STORAGE_FLAGS:
3142 if (!ctx->Extensions.ARB_buffer_storage)
3143 goto invalid_pname;
3144 *params = bufObj->StorageFlags;
3145 break;
3146 default:
3147 goto invalid_pname;
3148 }
3149
3150 return true;
3151
3152 invalid_pname:
3153 _mesa_error(ctx, GL_INVALID_ENUM, "%s(invalid pname: %s)", func,
3154 _mesa_enum_to_string(pname));
3155 return false;
3156 }
3157
3158 void GLAPIENTRY
_mesa_GetBufferParameteriv(GLenum target,GLenum pname,GLint * params)3159 _mesa_GetBufferParameteriv(GLenum target, GLenum pname, GLint *params)
3160 {
3161 GET_CURRENT_CONTEXT(ctx);
3162 struct gl_buffer_object *bufObj;
3163 GLint64 parameter;
3164
3165 bufObj = get_buffer(ctx, "glGetBufferParameteriv", target,
3166 GL_INVALID_OPERATION);
3167 if (!bufObj)
3168 return;
3169
3170 if (!get_buffer_parameter(ctx, bufObj, pname, ¶meter,
3171 "glGetBufferParameteriv"))
3172 return; /* Error already recorded. */
3173
3174 *params = (GLint) parameter;
3175 }
3176
3177 void GLAPIENTRY
_mesa_GetBufferParameteri64v(GLenum target,GLenum pname,GLint64 * params)3178 _mesa_GetBufferParameteri64v(GLenum target, GLenum pname, GLint64 *params)
3179 {
3180 GET_CURRENT_CONTEXT(ctx);
3181 struct gl_buffer_object *bufObj;
3182 GLint64 parameter;
3183
3184 bufObj = get_buffer(ctx, "glGetBufferParameteri64v", target,
3185 GL_INVALID_OPERATION);
3186 if (!bufObj)
3187 return;
3188
3189 if (!get_buffer_parameter(ctx, bufObj, pname, ¶meter,
3190 "glGetBufferParameteri64v"))
3191 return; /* Error already recorded. */
3192
3193 *params = parameter;
3194 }
3195
3196 void GLAPIENTRY
_mesa_GetNamedBufferParameteriv(GLuint buffer,GLenum pname,GLint * params)3197 _mesa_GetNamedBufferParameteriv(GLuint buffer, GLenum pname, GLint *params)
3198 {
3199 GET_CURRENT_CONTEXT(ctx);
3200 struct gl_buffer_object *bufObj;
3201 GLint64 parameter;
3202
3203 bufObj = _mesa_lookup_bufferobj_err(ctx, buffer,
3204 "glGetNamedBufferParameteriv");
3205 if (!bufObj)
3206 return;
3207
3208 if (!get_buffer_parameter(ctx, bufObj, pname, ¶meter,
3209 "glGetNamedBufferParameteriv"))
3210 return; /* Error already recorded. */
3211
3212 *params = (GLint) parameter;
3213 }
3214
3215 void GLAPIENTRY
_mesa_GetNamedBufferParameterivEXT(GLuint buffer,GLenum pname,GLint * params)3216 _mesa_GetNamedBufferParameterivEXT(GLuint buffer, GLenum pname, GLint *params)
3217 {
3218 GET_CURRENT_CONTEXT(ctx);
3219 struct gl_buffer_object *bufObj;
3220 GLint64 parameter;
3221
3222 if (!buffer) {
3223 _mesa_error(ctx, GL_INVALID_OPERATION,
3224 "glGetNamedBufferParameterivEXT: buffer=0");
3225 return;
3226 }
3227
3228 bufObj = _mesa_lookup_bufferobj(ctx, buffer);
3229 if (!handle_bind_buffer_gen(ctx, buffer,
3230 &bufObj, "glGetNamedBufferParameterivEXT", false))
3231 return;
3232
3233 if (!get_buffer_parameter(ctx, bufObj, pname, ¶meter,
3234 "glGetNamedBufferParameterivEXT"))
3235 return; /* Error already recorded. */
3236
3237 *params = (GLint) parameter;
3238 }
3239
3240 void GLAPIENTRY
_mesa_GetNamedBufferParameteri64v(GLuint buffer,GLenum pname,GLint64 * params)3241 _mesa_GetNamedBufferParameteri64v(GLuint buffer, GLenum pname,
3242 GLint64 *params)
3243 {
3244 GET_CURRENT_CONTEXT(ctx);
3245 struct gl_buffer_object *bufObj;
3246 GLint64 parameter;
3247
3248 bufObj = _mesa_lookup_bufferobj_err(ctx, buffer,
3249 "glGetNamedBufferParameteri64v");
3250 if (!bufObj)
3251 return;
3252
3253 if (!get_buffer_parameter(ctx, bufObj, pname, ¶meter,
3254 "glGetNamedBufferParameteri64v"))
3255 return; /* Error already recorded. */
3256
3257 *params = parameter;
3258 }
3259
3260
3261 void GLAPIENTRY
_mesa_GetBufferPointerv(GLenum target,GLenum pname,GLvoid ** params)3262 _mesa_GetBufferPointerv(GLenum target, GLenum pname, GLvoid **params)
3263 {
3264 GET_CURRENT_CONTEXT(ctx);
3265 struct gl_buffer_object *bufObj;
3266
3267 if (pname != GL_BUFFER_MAP_POINTER) {
3268 _mesa_error(ctx, GL_INVALID_ENUM, "glGetBufferPointerv(pname != "
3269 "GL_BUFFER_MAP_POINTER)");
3270 return;
3271 }
3272
3273 bufObj = get_buffer(ctx, "glGetBufferPointerv", target,
3274 GL_INVALID_OPERATION);
3275 if (!bufObj)
3276 return;
3277
3278 *params = bufObj->Mappings[MAP_USER].Pointer;
3279 }
3280
3281 void GLAPIENTRY
_mesa_GetNamedBufferPointerv(GLuint buffer,GLenum pname,GLvoid ** params)3282 _mesa_GetNamedBufferPointerv(GLuint buffer, GLenum pname, GLvoid **params)
3283 {
3284 GET_CURRENT_CONTEXT(ctx);
3285 struct gl_buffer_object *bufObj;
3286
3287 if (pname != GL_BUFFER_MAP_POINTER) {
3288 _mesa_error(ctx, GL_INVALID_ENUM, "glGetNamedBufferPointerv(pname != "
3289 "GL_BUFFER_MAP_POINTER)");
3290 return;
3291 }
3292
3293 bufObj = _mesa_lookup_bufferobj_err(ctx, buffer,
3294 "glGetNamedBufferPointerv");
3295 if (!bufObj)
3296 return;
3297
3298 *params = bufObj->Mappings[MAP_USER].Pointer;
3299 }
3300
3301 void GLAPIENTRY
_mesa_GetNamedBufferPointervEXT(GLuint buffer,GLenum pname,GLvoid ** params)3302 _mesa_GetNamedBufferPointervEXT(GLuint buffer, GLenum pname, GLvoid **params)
3303 {
3304 GET_CURRENT_CONTEXT(ctx);
3305 struct gl_buffer_object *bufObj;
3306
3307 if (!buffer) {
3308 _mesa_error(ctx, GL_INVALID_OPERATION,
3309 "glGetNamedBufferPointervEXT(buffer=0)");
3310 return;
3311 }
3312 if (pname != GL_BUFFER_MAP_POINTER) {
3313 _mesa_error(ctx, GL_INVALID_ENUM, "glGetNamedBufferPointervEXT(pname != "
3314 "GL_BUFFER_MAP_POINTER)");
3315 return;
3316 }
3317
3318 bufObj = _mesa_lookup_bufferobj(ctx, buffer);
3319 if (!handle_bind_buffer_gen(ctx, buffer,
3320 &bufObj, "glGetNamedBufferPointervEXT", false))
3321 return;
3322
3323 *params = bufObj->Mappings[MAP_USER].Pointer;
3324 }
3325
3326 static void
copy_buffer_sub_data(struct gl_context * ctx,struct gl_buffer_object * src,struct gl_buffer_object * dst,GLintptr readOffset,GLintptr writeOffset,GLsizeiptr size,const char * func)3327 copy_buffer_sub_data(struct gl_context *ctx, struct gl_buffer_object *src,
3328 struct gl_buffer_object *dst, GLintptr readOffset,
3329 GLintptr writeOffset, GLsizeiptr size, const char *func)
3330 {
3331 if (_mesa_check_disallowed_mapping(src)) {
3332 _mesa_error(ctx, GL_INVALID_OPERATION,
3333 "%s(readBuffer is mapped)", func);
3334 return;
3335 }
3336
3337 if (_mesa_check_disallowed_mapping(dst)) {
3338 _mesa_error(ctx, GL_INVALID_OPERATION,
3339 "%s(writeBuffer is mapped)", func);
3340 return;
3341 }
3342
3343 if (readOffset < 0) {
3344 _mesa_error(ctx, GL_INVALID_VALUE,
3345 "%s(readOffset %d < 0)", func, (int) readOffset);
3346 return;
3347 }
3348
3349 if (writeOffset < 0) {
3350 _mesa_error(ctx, GL_INVALID_VALUE,
3351 "%s(writeOffset %d < 0)", func, (int) writeOffset);
3352 return;
3353 }
3354
3355 if (size < 0) {
3356 _mesa_error(ctx, GL_INVALID_VALUE,
3357 "%s(size %d < 0)", func, (int) size);
3358 return;
3359 }
3360
3361 if (size > src->Size || readOffset > src->Size - size) {
3362 _mesa_error(ctx, GL_INVALID_VALUE,
3363 "%s(readOffset %d + size %d > src_buffer_size %d)", func,
3364 (int) readOffset, (int) size, (int) src->Size);
3365 return;
3366 }
3367
3368 if (size > dst->Size || writeOffset > dst->Size - size) {
3369 _mesa_error(ctx, GL_INVALID_VALUE,
3370 "%s(writeOffset %d + size %d > dst_buffer_size %d)", func,
3371 (int) writeOffset, (int) size, (int) dst->Size);
3372 return;
3373 }
3374
3375 if (src == dst) {
3376 if (readOffset + size <= writeOffset) {
3377 /* OK */
3378 }
3379 else if (writeOffset + size <= readOffset) {
3380 /* OK */
3381 }
3382 else {
3383 /* overlapping src/dst is illegal */
3384 _mesa_error(ctx, GL_INVALID_VALUE,
3385 "%s(overlapping src/dst)", func);
3386 return;
3387 }
3388 }
3389
3390 bufferobj_copy_subdata(ctx, src, dst, readOffset, writeOffset, size);
3391 }
3392
3393 void GLAPIENTRY
_mesa_CopyBufferSubData_no_error(GLenum readTarget,GLenum writeTarget,GLintptr readOffset,GLintptr writeOffset,GLsizeiptr size)3394 _mesa_CopyBufferSubData_no_error(GLenum readTarget, GLenum writeTarget,
3395 GLintptr readOffset, GLintptr writeOffset,
3396 GLsizeiptr size)
3397 {
3398 GET_CURRENT_CONTEXT(ctx);
3399
3400 struct gl_buffer_object **src_ptr = get_buffer_target(ctx, readTarget, true);
3401 struct gl_buffer_object *src = *src_ptr;
3402
3403 struct gl_buffer_object **dst_ptr = get_buffer_target(ctx, writeTarget, true);
3404 struct gl_buffer_object *dst = *dst_ptr;
3405
3406 bufferobj_copy_subdata(ctx, src, dst, readOffset, writeOffset,
3407 size);
3408 }
3409
3410 void GLAPIENTRY
_mesa_CopyBufferSubData(GLenum readTarget,GLenum writeTarget,GLintptr readOffset,GLintptr writeOffset,GLsizeiptr size)3411 _mesa_CopyBufferSubData(GLenum readTarget, GLenum writeTarget,
3412 GLintptr readOffset, GLintptr writeOffset,
3413 GLsizeiptr size)
3414 {
3415 GET_CURRENT_CONTEXT(ctx);
3416 struct gl_buffer_object *src, *dst;
3417
3418 src = get_buffer(ctx, "glCopyBufferSubData", readTarget,
3419 GL_INVALID_OPERATION);
3420 if (!src)
3421 return;
3422
3423 dst = get_buffer(ctx, "glCopyBufferSubData", writeTarget,
3424 GL_INVALID_OPERATION);
3425 if (!dst)
3426 return;
3427
3428 copy_buffer_sub_data(ctx, src, dst, readOffset, writeOffset, size,
3429 "glCopyBufferSubData");
3430 }
3431
3432 void GLAPIENTRY
_mesa_NamedCopyBufferSubDataEXT(GLuint readBuffer,GLuint writeBuffer,GLintptr readOffset,GLintptr writeOffset,GLsizeiptr size)3433 _mesa_NamedCopyBufferSubDataEXT(GLuint readBuffer, GLuint writeBuffer,
3434 GLintptr readOffset, GLintptr writeOffset,
3435 GLsizeiptr size)
3436 {
3437 GET_CURRENT_CONTEXT(ctx);
3438 struct gl_buffer_object *src, *dst;
3439
3440 src = _mesa_lookup_bufferobj(ctx, readBuffer);
3441 if (!handle_bind_buffer_gen(ctx, readBuffer,
3442 &src,
3443 "glNamedCopyBufferSubDataEXT", false))
3444 return;
3445
3446 dst = _mesa_lookup_bufferobj(ctx, writeBuffer);
3447 if (!handle_bind_buffer_gen(ctx, writeBuffer,
3448 &dst, "glNamedCopyBufferSubDataEXT", false))
3449 return;
3450
3451 copy_buffer_sub_data(ctx, src, dst, readOffset, writeOffset, size,
3452 "glNamedCopyBufferSubDataEXT");
3453 }
3454
3455 void GLAPIENTRY
_mesa_CopyNamedBufferSubData_no_error(GLuint readBuffer,GLuint writeBuffer,GLintptr readOffset,GLintptr writeOffset,GLsizeiptr size)3456 _mesa_CopyNamedBufferSubData_no_error(GLuint readBuffer, GLuint writeBuffer,
3457 GLintptr readOffset,
3458 GLintptr writeOffset, GLsizeiptr size)
3459 {
3460 GET_CURRENT_CONTEXT(ctx);
3461
3462 struct gl_buffer_object *src = _mesa_lookup_bufferobj(ctx, readBuffer);
3463 struct gl_buffer_object *dst = _mesa_lookup_bufferobj(ctx, writeBuffer);
3464
3465 bufferobj_copy_subdata(ctx, src, dst, readOffset, writeOffset,
3466 size);
3467 }
3468
3469 void GLAPIENTRY
_mesa_CopyNamedBufferSubData(GLuint readBuffer,GLuint writeBuffer,GLintptr readOffset,GLintptr writeOffset,GLsizeiptr size)3470 _mesa_CopyNamedBufferSubData(GLuint readBuffer, GLuint writeBuffer,
3471 GLintptr readOffset, GLintptr writeOffset,
3472 GLsizeiptr size)
3473 {
3474 GET_CURRENT_CONTEXT(ctx);
3475 struct gl_buffer_object *src, *dst;
3476
3477 src = _mesa_lookup_bufferobj_err(ctx, readBuffer,
3478 "glCopyNamedBufferSubData");
3479 if (!src)
3480 return;
3481
3482 dst = _mesa_lookup_bufferobj_err(ctx, writeBuffer,
3483 "glCopyNamedBufferSubData");
3484 if (!dst)
3485 return;
3486
3487 copy_buffer_sub_data(ctx, src, dst, readOffset, writeOffset, size,
3488 "glCopyNamedBufferSubData");
3489 }
3490
3491 void GLAPIENTRY
_mesa_InternalBufferSubDataCopyMESA(GLintptr srcBuffer,GLuint srcOffset,GLuint dstTargetOrName,GLintptr dstOffset,GLsizeiptr size,GLboolean named,GLboolean ext_dsa)3492 _mesa_InternalBufferSubDataCopyMESA(GLintptr srcBuffer, GLuint srcOffset,
3493 GLuint dstTargetOrName, GLintptr dstOffset,
3494 GLsizeiptr size, GLboolean named,
3495 GLboolean ext_dsa)
3496 {
3497 GET_CURRENT_CONTEXT(ctx);
3498 struct gl_buffer_object *src = (struct gl_buffer_object *)srcBuffer;
3499 struct gl_buffer_object *dst;
3500 const char *func;
3501
3502 /* Handle behavior for all 3 variants. */
3503 if (named && ext_dsa) {
3504 func = "glNamedBufferSubDataEXT";
3505 dst = _mesa_lookup_bufferobj(ctx, dstTargetOrName);
3506 if (!handle_bind_buffer_gen(ctx, dstTargetOrName, &dst, func, false))
3507 goto done;
3508 } else if (named) {
3509 func = "glNamedBufferSubData";
3510 dst = _mesa_lookup_bufferobj_err(ctx, dstTargetOrName, func);
3511 if (!dst)
3512 goto done;
3513 } else {
3514 assert(!ext_dsa);
3515 func = "glBufferSubData";
3516 dst = get_buffer(ctx, func, dstTargetOrName, GL_INVALID_OPERATION);
3517 if (!dst)
3518 goto done;
3519 }
3520
3521 if (!validate_buffer_sub_data(ctx, dst, dstOffset, size, func))
3522 goto done; /* the error is already set */
3523
3524 bufferobj_copy_subdata(ctx, src, dst, srcOffset, dstOffset, size);
3525
3526 done:
3527 /* The caller passes the reference to this function, so unreference it. */
3528 _mesa_reference_buffer_object(ctx, &src, NULL);
3529 }
3530
3531 static bool
validate_map_buffer_range(struct gl_context * ctx,struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr length,GLbitfield access,const char * func)3532 validate_map_buffer_range(struct gl_context *ctx,
3533 struct gl_buffer_object *bufObj, GLintptr offset,
3534 GLsizeiptr length, GLbitfield access,
3535 const char *func)
3536 {
3537 GLbitfield allowed_access;
3538
3539 ASSERT_OUTSIDE_BEGIN_END_WITH_RETVAL(ctx, false);
3540
3541 if (offset < 0) {
3542 _mesa_error(ctx, GL_INVALID_VALUE,
3543 "%s(offset %ld < 0)", func, (long) offset);
3544 return false;
3545 }
3546
3547 if (length < 0) {
3548 _mesa_error(ctx, GL_INVALID_VALUE,
3549 "%s(length %ld < 0)", func, (long) length);
3550 return false;
3551 }
3552
3553 /* Page 38 of the PDF of the OpenGL ES 3.0 spec says:
3554 *
3555 * "An INVALID_OPERATION error is generated for any of the following
3556 * conditions:
3557 *
3558 * * <length> is zero."
3559 *
3560 * Additionally, page 94 of the PDF of the OpenGL 4.5 core spec
3561 * (30.10.2014) also says this, so it's no longer allowed for desktop GL,
3562 * either.
3563 */
3564 if (length == 0) {
3565 _mesa_error(ctx, GL_INVALID_OPERATION, "%s(length = 0)", func);
3566 return false;
3567 }
3568
3569 allowed_access = GL_MAP_READ_BIT |
3570 GL_MAP_WRITE_BIT |
3571 GL_MAP_INVALIDATE_RANGE_BIT |
3572 GL_MAP_INVALIDATE_BUFFER_BIT |
3573 GL_MAP_FLUSH_EXPLICIT_BIT |
3574 GL_MAP_UNSYNCHRONIZED_BIT;
3575
3576 if (ctx->Extensions.ARB_buffer_storage) {
3577 allowed_access |= GL_MAP_PERSISTENT_BIT |
3578 GL_MAP_COHERENT_BIT;
3579 }
3580
3581 if (access & ~allowed_access) {
3582 /* generate an error if any bits other than those allowed are set */
3583 _mesa_error(ctx, GL_INVALID_VALUE,
3584 "%s(access has undefined bits set)", func);
3585 return false;
3586 }
3587
3588 if ((access & (GL_MAP_READ_BIT | GL_MAP_WRITE_BIT)) == 0) {
3589 _mesa_error(ctx, GL_INVALID_OPERATION,
3590 "%s(access indicates neither read or write)", func);
3591 return false;
3592 }
3593
3594 if ((access & GL_MAP_READ_BIT) &&
3595 (access & (GL_MAP_INVALIDATE_RANGE_BIT |
3596 GL_MAP_INVALIDATE_BUFFER_BIT |
3597 GL_MAP_UNSYNCHRONIZED_BIT))) {
3598 _mesa_error(ctx, GL_INVALID_OPERATION,
3599 "%s(read access with disallowed bits)", func);
3600 return false;
3601 }
3602
3603 if ((access & GL_MAP_FLUSH_EXPLICIT_BIT) &&
3604 ((access & GL_MAP_WRITE_BIT) == 0)) {
3605 _mesa_error(ctx, GL_INVALID_OPERATION,
3606 "%s(access has flush explicit without write)", func);
3607 return false;
3608 }
3609
3610 if (access & GL_MAP_READ_BIT &&
3611 !(bufObj->StorageFlags & GL_MAP_READ_BIT)) {
3612 _mesa_error(ctx, GL_INVALID_OPERATION,
3613 "%s(buffer does not allow read access)", func);
3614 return false;
3615 }
3616
3617 if (access & GL_MAP_WRITE_BIT &&
3618 !(bufObj->StorageFlags & GL_MAP_WRITE_BIT)) {
3619 _mesa_error(ctx, GL_INVALID_OPERATION,
3620 "%s(buffer does not allow write access)", func);
3621 return false;
3622 }
3623
3624 if (access & GL_MAP_COHERENT_BIT &&
3625 !(bufObj->StorageFlags & GL_MAP_COHERENT_BIT)) {
3626 _mesa_error(ctx, GL_INVALID_OPERATION,
3627 "%s(buffer does not allow coherent access)", func);
3628 return false;
3629 }
3630
3631 if (access & GL_MAP_PERSISTENT_BIT &&
3632 !(bufObj->StorageFlags & GL_MAP_PERSISTENT_BIT)) {
3633 _mesa_error(ctx, GL_INVALID_OPERATION,
3634 "%s(buffer does not allow persistent access)", func);
3635 return false;
3636 }
3637
3638 if (offset + length > bufObj->Size) {
3639 _mesa_error(ctx, GL_INVALID_VALUE,
3640 "%s(offset %lu + length %lu > buffer_size %lu)", func,
3641 (unsigned long) offset, (unsigned long) length,
3642 (unsigned long) bufObj->Size);
3643 return false;
3644 }
3645
3646 if (_mesa_bufferobj_mapped(bufObj, MAP_USER)) {
3647 _mesa_error(ctx, GL_INVALID_OPERATION,
3648 "%s(buffer already mapped)", func);
3649 return false;
3650 }
3651
3652 if (access & GL_MAP_WRITE_BIT) {
3653 bufObj->NumMapBufferWriteCalls++;
3654 if ((bufObj->Usage == GL_STATIC_DRAW ||
3655 bufObj->Usage == GL_STATIC_COPY) &&
3656 bufObj->NumMapBufferWriteCalls >= BUFFER_WARNING_CALL_COUNT) {
3657 BUFFER_USAGE_WARNING(ctx,
3658 "using %s(buffer %u, offset %u, length %u) to "
3659 "update a %s buffer",
3660 func, bufObj->Name, offset, length,
3661 _mesa_enum_to_string(bufObj->Usage));
3662 }
3663 }
3664
3665 return true;
3666 }
3667
3668 static void *
map_buffer_range(struct gl_context * ctx,struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr length,GLbitfield access,const char * func)3669 map_buffer_range(struct gl_context *ctx, struct gl_buffer_object *bufObj,
3670 GLintptr offset, GLsizeiptr length, GLbitfield access,
3671 const char *func)
3672 {
3673 if (!bufObj->Size) {
3674 _mesa_error(ctx, GL_OUT_OF_MEMORY, "%s(buffer size = 0)", func);
3675 return NULL;
3676 }
3677
3678 void *map = _mesa_bufferobj_map_range(ctx, offset, length, access, bufObj,
3679 MAP_USER);
3680 if (!map) {
3681 _mesa_error(ctx, GL_OUT_OF_MEMORY, "%s(map failed)", func);
3682 }
3683 else {
3684 /* The driver callback should have set all these fields.
3685 * This is important because other modules (like VBO) might call
3686 * the driver function directly.
3687 */
3688 assert(bufObj->Mappings[MAP_USER].Pointer == map);
3689 assert(bufObj->Mappings[MAP_USER].Length == length);
3690 assert(bufObj->Mappings[MAP_USER].Offset == offset);
3691 assert(bufObj->Mappings[MAP_USER].AccessFlags == access);
3692 }
3693
3694 if (access & GL_MAP_WRITE_BIT) {
3695 bufObj->MinMaxCacheDirty = true;
3696 }
3697
3698 #ifdef VBO_DEBUG
3699 if (strstr(func, "Range") == NULL) { /* If not MapRange */
3700 printf("glMapBuffer(%u, sz %ld, access 0x%x)\n",
3701 bufObj->Name, bufObj->Size, access);
3702 /* Access must be write only */
3703 if ((access & GL_MAP_WRITE_BIT) && (!(access & ~GL_MAP_WRITE_BIT))) {
3704 GLuint i;
3705 GLubyte *b = (GLubyte *) bufObj->Mappings[MAP_USER].Pointer;
3706 for (i = 0; i < bufObj->Size; i++)
3707 b[i] = i & 0xff;
3708 }
3709 }
3710 #endif
3711
3712 #ifdef BOUNDS_CHECK
3713 if (strstr(func, "Range") == NULL) { /* If not MapRange */
3714 GLubyte *buf = (GLubyte *) bufObj->Mappings[MAP_USER].Pointer;
3715 GLuint i;
3716 /* buffer is 100 bytes larger than requested, fill with magic value */
3717 for (i = 0; i < 100; i++) {
3718 buf[bufObj->Size - i - 1] = 123;
3719 }
3720 }
3721 #endif
3722
3723 return map;
3724 }
3725
3726 void * GLAPIENTRY
_mesa_MapBufferRange_no_error(GLenum target,GLintptr offset,GLsizeiptr length,GLbitfield access)3727 _mesa_MapBufferRange_no_error(GLenum target, GLintptr offset,
3728 GLsizeiptr length, GLbitfield access)
3729 {
3730 GET_CURRENT_CONTEXT(ctx);
3731
3732 struct gl_buffer_object **bufObjPtr = get_buffer_target(ctx, target, true);
3733 struct gl_buffer_object *bufObj = *bufObjPtr;
3734
3735 return map_buffer_range(ctx, bufObj, offset, length, access,
3736 "glMapBufferRange");
3737 }
3738
3739 void * GLAPIENTRY
_mesa_MapBufferRange(GLenum target,GLintptr offset,GLsizeiptr length,GLbitfield access)3740 _mesa_MapBufferRange(GLenum target, GLintptr offset, GLsizeiptr length,
3741 GLbitfield access)
3742 {
3743 GET_CURRENT_CONTEXT(ctx);
3744 struct gl_buffer_object *bufObj;
3745
3746 if (!ctx->Extensions.ARB_map_buffer_range) {
3747 _mesa_error(ctx, GL_INVALID_OPERATION,
3748 "glMapBufferRange(ARB_map_buffer_range not supported)");
3749 return NULL;
3750 }
3751
3752 bufObj = get_buffer(ctx, "glMapBufferRange", target, GL_INVALID_OPERATION);
3753 if (!bufObj)
3754 return NULL;
3755
3756 if (!validate_map_buffer_range(ctx, bufObj, offset, length, access,
3757 "glMapBufferRange"))
3758 return NULL;
3759
3760 return map_buffer_range(ctx, bufObj, offset, length, access,
3761 "glMapBufferRange");
3762 }
3763
3764 void * GLAPIENTRY
_mesa_MapNamedBufferRange_no_error(GLuint buffer,GLintptr offset,GLsizeiptr length,GLbitfield access)3765 _mesa_MapNamedBufferRange_no_error(GLuint buffer, GLintptr offset,
3766 GLsizeiptr length, GLbitfield access)
3767 {
3768 GET_CURRENT_CONTEXT(ctx);
3769 struct gl_buffer_object *bufObj = _mesa_lookup_bufferobj(ctx, buffer);
3770
3771 return map_buffer_range(ctx, bufObj, offset, length, access,
3772 "glMapNamedBufferRange");
3773 }
3774
3775 static void *
map_named_buffer_range(GLuint buffer,GLintptr offset,GLsizeiptr length,GLbitfield access,bool dsa_ext,const char * func)3776 map_named_buffer_range(GLuint buffer, GLintptr offset, GLsizeiptr length,
3777 GLbitfield access, bool dsa_ext, const char *func)
3778 {
3779 GET_CURRENT_CONTEXT(ctx);
3780 struct gl_buffer_object *bufObj = NULL;
3781
3782 if (!ctx->Extensions.ARB_map_buffer_range) {
3783 _mesa_error(ctx, GL_INVALID_OPERATION,
3784 "%s(ARB_map_buffer_range not supported)", func);
3785 return NULL;
3786 }
3787
3788 if (dsa_ext) {
3789 bufObj = _mesa_lookup_bufferobj(ctx, buffer);
3790 if (!handle_bind_buffer_gen(ctx, buffer, &bufObj, func, false))
3791 return NULL;
3792 } else {
3793 bufObj = _mesa_lookup_bufferobj_err(ctx, buffer, func);
3794 if (!bufObj)
3795 return NULL;
3796 }
3797
3798 if (!validate_map_buffer_range(ctx, bufObj, offset, length, access, func))
3799 return NULL;
3800
3801 return map_buffer_range(ctx, bufObj, offset, length, access, func);
3802 }
3803
3804 void * GLAPIENTRY
_mesa_MapNamedBufferRangeEXT(GLuint buffer,GLintptr offset,GLsizeiptr length,GLbitfield access)3805 _mesa_MapNamedBufferRangeEXT(GLuint buffer, GLintptr offset, GLsizeiptr length,
3806 GLbitfield access)
3807 {
3808 GET_CURRENT_CONTEXT(ctx);
3809 if (!buffer) {
3810 _mesa_error(ctx, GL_INVALID_OPERATION,
3811 "glMapNamedBufferRangeEXT(buffer=0)");
3812 return NULL;
3813 }
3814 return map_named_buffer_range(buffer, offset, length, access, true,
3815 "glMapNamedBufferRangeEXT");
3816 }
3817
3818 void * GLAPIENTRY
_mesa_MapNamedBufferRange(GLuint buffer,GLintptr offset,GLsizeiptr length,GLbitfield access)3819 _mesa_MapNamedBufferRange(GLuint buffer, GLintptr offset, GLsizeiptr length,
3820 GLbitfield access)
3821 {
3822 return map_named_buffer_range(buffer, offset, length, access, false,
3823 "glMapNamedBufferRange");
3824 }
3825
3826 /**
3827 * Converts GLenum access from MapBuffer and MapNamedBuffer into
3828 * flags for input to map_buffer_range.
3829 *
3830 * \return true if the type of requested access is permissible.
3831 */
3832 static bool
get_map_buffer_access_flags(struct gl_context * ctx,GLenum access,GLbitfield * flags)3833 get_map_buffer_access_flags(struct gl_context *ctx, GLenum access,
3834 GLbitfield *flags)
3835 {
3836 switch (access) {
3837 case GL_READ_ONLY_ARB:
3838 *flags = GL_MAP_READ_BIT;
3839 return _mesa_is_desktop_gl(ctx);
3840 case GL_WRITE_ONLY_ARB:
3841 *flags = GL_MAP_WRITE_BIT;
3842 return true;
3843 case GL_READ_WRITE_ARB:
3844 *flags = GL_MAP_READ_BIT | GL_MAP_WRITE_BIT;
3845 return _mesa_is_desktop_gl(ctx);
3846 default:
3847 *flags = 0;
3848 return false;
3849 }
3850 }
3851
3852 void * GLAPIENTRY
_mesa_MapBuffer_no_error(GLenum target,GLenum access)3853 _mesa_MapBuffer_no_error(GLenum target, GLenum access)
3854 {
3855 GET_CURRENT_CONTEXT(ctx);
3856
3857 GLbitfield accessFlags;
3858 get_map_buffer_access_flags(ctx, access, &accessFlags);
3859
3860 struct gl_buffer_object **bufObjPtr = get_buffer_target(ctx, target, true);
3861 struct gl_buffer_object *bufObj = *bufObjPtr;
3862
3863 return map_buffer_range(ctx, bufObj, 0, bufObj->Size, accessFlags,
3864 "glMapBuffer");
3865 }
3866
3867 void * GLAPIENTRY
_mesa_MapBuffer(GLenum target,GLenum access)3868 _mesa_MapBuffer(GLenum target, GLenum access)
3869 {
3870 GET_CURRENT_CONTEXT(ctx);
3871 struct gl_buffer_object *bufObj;
3872 GLbitfield accessFlags;
3873
3874 if (!get_map_buffer_access_flags(ctx, access, &accessFlags)) {
3875 _mesa_error(ctx, GL_INVALID_ENUM, "glMapBuffer(invalid access)");
3876 return NULL;
3877 }
3878
3879 bufObj = get_buffer(ctx, "glMapBuffer", target, GL_INVALID_OPERATION);
3880 if (!bufObj)
3881 return NULL;
3882
3883 if (!validate_map_buffer_range(ctx, bufObj, 0, bufObj->Size, accessFlags,
3884 "glMapBuffer"))
3885 return NULL;
3886
3887 return map_buffer_range(ctx, bufObj, 0, bufObj->Size, accessFlags,
3888 "glMapBuffer");
3889 }
3890
3891 void * GLAPIENTRY
_mesa_MapNamedBuffer_no_error(GLuint buffer,GLenum access)3892 _mesa_MapNamedBuffer_no_error(GLuint buffer, GLenum access)
3893 {
3894 GET_CURRENT_CONTEXT(ctx);
3895
3896 GLbitfield accessFlags;
3897 get_map_buffer_access_flags(ctx, access, &accessFlags);
3898
3899 struct gl_buffer_object *bufObj = _mesa_lookup_bufferobj(ctx, buffer);
3900
3901 return map_buffer_range(ctx, bufObj, 0, bufObj->Size, accessFlags,
3902 "glMapNamedBuffer");
3903 }
3904
3905 void * GLAPIENTRY
_mesa_MapNamedBuffer(GLuint buffer,GLenum access)3906 _mesa_MapNamedBuffer(GLuint buffer, GLenum access)
3907 {
3908 GET_CURRENT_CONTEXT(ctx);
3909 struct gl_buffer_object *bufObj;
3910 GLbitfield accessFlags;
3911
3912 if (!get_map_buffer_access_flags(ctx, access, &accessFlags)) {
3913 _mesa_error(ctx, GL_INVALID_ENUM, "glMapNamedBuffer(invalid access)");
3914 return NULL;
3915 }
3916
3917 bufObj = _mesa_lookup_bufferobj_err(ctx, buffer, "glMapNamedBuffer");
3918 if (!bufObj)
3919 return NULL;
3920
3921 if (!validate_map_buffer_range(ctx, bufObj, 0, bufObj->Size, accessFlags,
3922 "glMapNamedBuffer"))
3923 return NULL;
3924
3925 return map_buffer_range(ctx, bufObj, 0, bufObj->Size, accessFlags,
3926 "glMapNamedBuffer");
3927 }
3928
3929 void * GLAPIENTRY
_mesa_MapNamedBufferEXT(GLuint buffer,GLenum access)3930 _mesa_MapNamedBufferEXT(GLuint buffer, GLenum access)
3931 {
3932 GET_CURRENT_CONTEXT(ctx);
3933
3934 GLbitfield accessFlags;
3935 if (!buffer) {
3936 _mesa_error(ctx, GL_INVALID_OPERATION,
3937 "glMapNamedBufferEXT(buffer=0)");
3938 return NULL;
3939 }
3940 if (!get_map_buffer_access_flags(ctx, access, &accessFlags)) {
3941 _mesa_error(ctx, GL_INVALID_ENUM, "glMapNamedBufferEXT(invalid access)");
3942 return NULL;
3943 }
3944
3945 struct gl_buffer_object *bufObj = _mesa_lookup_bufferobj(ctx, buffer);
3946 if (!handle_bind_buffer_gen(ctx, buffer,
3947 &bufObj, "glMapNamedBufferEXT", false))
3948 return NULL;
3949
3950 if (!validate_map_buffer_range(ctx, bufObj, 0, bufObj->Size, accessFlags,
3951 "glMapNamedBufferEXT"))
3952 return NULL;
3953
3954 return map_buffer_range(ctx, bufObj, 0, bufObj->Size, accessFlags,
3955 "glMapNamedBufferEXT");
3956 }
3957
3958 static void
flush_mapped_buffer_range(struct gl_context * ctx,struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr length,const char * func)3959 flush_mapped_buffer_range(struct gl_context *ctx,
3960 struct gl_buffer_object *bufObj,
3961 GLintptr offset, GLsizeiptr length,
3962 const char *func)
3963 {
3964 if (!ctx->Extensions.ARB_map_buffer_range) {
3965 _mesa_error(ctx, GL_INVALID_OPERATION,
3966 "%s(ARB_map_buffer_range not supported)", func);
3967 return;
3968 }
3969
3970 if (offset < 0) {
3971 _mesa_error(ctx, GL_INVALID_VALUE,
3972 "%s(offset %ld < 0)", func, (long) offset);
3973 return;
3974 }
3975
3976 if (length < 0) {
3977 _mesa_error(ctx, GL_INVALID_VALUE,
3978 "%s(length %ld < 0)", func, (long) length);
3979 return;
3980 }
3981
3982 if (!_mesa_bufferobj_mapped(bufObj, MAP_USER)) {
3983 /* buffer is not mapped */
3984 _mesa_error(ctx, GL_INVALID_OPERATION,
3985 "%s(buffer is not mapped)", func);
3986 return;
3987 }
3988
3989 if ((bufObj->Mappings[MAP_USER].AccessFlags &
3990 GL_MAP_FLUSH_EXPLICIT_BIT) == 0) {
3991 _mesa_error(ctx, GL_INVALID_OPERATION,
3992 "%s(GL_MAP_FLUSH_EXPLICIT_BIT not set)", func);
3993 return;
3994 }
3995
3996 if (offset + length > bufObj->Mappings[MAP_USER].Length) {
3997 _mesa_error(ctx, GL_INVALID_VALUE,
3998 "%s(offset %ld + length %ld > mapped length %ld)", func,
3999 (long) offset, (long) length,
4000 (long) bufObj->Mappings[MAP_USER].Length);
4001 return;
4002 }
4003
4004 assert(bufObj->Mappings[MAP_USER].AccessFlags & GL_MAP_WRITE_BIT);
4005
4006 _mesa_bufferobj_flush_mapped_range(ctx, offset, length, bufObj,
4007 MAP_USER);
4008 }
4009
4010 void GLAPIENTRY
_mesa_FlushMappedBufferRange_no_error(GLenum target,GLintptr offset,GLsizeiptr length)4011 _mesa_FlushMappedBufferRange_no_error(GLenum target, GLintptr offset,
4012 GLsizeiptr length)
4013 {
4014 GET_CURRENT_CONTEXT(ctx);
4015 struct gl_buffer_object **bufObjPtr = get_buffer_target(ctx, target, true);
4016 struct gl_buffer_object *bufObj = *bufObjPtr;
4017
4018 _mesa_bufferobj_flush_mapped_range(ctx, offset, length, bufObj,
4019 MAP_USER);
4020 }
4021
4022 void GLAPIENTRY
_mesa_FlushMappedBufferRange(GLenum target,GLintptr offset,GLsizeiptr length)4023 _mesa_FlushMappedBufferRange(GLenum target, GLintptr offset,
4024 GLsizeiptr length)
4025 {
4026 GET_CURRENT_CONTEXT(ctx);
4027 struct gl_buffer_object *bufObj;
4028
4029 bufObj = get_buffer(ctx, "glFlushMappedBufferRange", target,
4030 GL_INVALID_OPERATION);
4031 if (!bufObj)
4032 return;
4033
4034 flush_mapped_buffer_range(ctx, bufObj, offset, length,
4035 "glFlushMappedBufferRange");
4036 }
4037
4038 void GLAPIENTRY
_mesa_FlushMappedNamedBufferRange_no_error(GLuint buffer,GLintptr offset,GLsizeiptr length)4039 _mesa_FlushMappedNamedBufferRange_no_error(GLuint buffer, GLintptr offset,
4040 GLsizeiptr length)
4041 {
4042 GET_CURRENT_CONTEXT(ctx);
4043 struct gl_buffer_object *bufObj = _mesa_lookup_bufferobj(ctx, buffer);
4044
4045 _mesa_bufferobj_flush_mapped_range(ctx, offset, length, bufObj,
4046 MAP_USER);
4047 }
4048
4049 void GLAPIENTRY
_mesa_FlushMappedNamedBufferRange(GLuint buffer,GLintptr offset,GLsizeiptr length)4050 _mesa_FlushMappedNamedBufferRange(GLuint buffer, GLintptr offset,
4051 GLsizeiptr length)
4052 {
4053 GET_CURRENT_CONTEXT(ctx);
4054 struct gl_buffer_object *bufObj;
4055
4056 bufObj = _mesa_lookup_bufferobj_err(ctx, buffer,
4057 "glFlushMappedNamedBufferRange");
4058 if (!bufObj)
4059 return;
4060
4061 flush_mapped_buffer_range(ctx, bufObj, offset, length,
4062 "glFlushMappedNamedBufferRange");
4063 }
4064
4065 void GLAPIENTRY
_mesa_FlushMappedNamedBufferRangeEXT(GLuint buffer,GLintptr offset,GLsizeiptr length)4066 _mesa_FlushMappedNamedBufferRangeEXT(GLuint buffer, GLintptr offset,
4067 GLsizeiptr length)
4068 {
4069 GET_CURRENT_CONTEXT(ctx);
4070 struct gl_buffer_object *bufObj;
4071
4072 if (!buffer) {
4073 _mesa_error(ctx, GL_INVALID_OPERATION,
4074 "glFlushMappedNamedBufferRangeEXT(buffer=0)");
4075 return;
4076 }
4077
4078 bufObj = _mesa_lookup_bufferobj(ctx, buffer);
4079 if (!handle_bind_buffer_gen(ctx, buffer,
4080 &bufObj, "glFlushMappedNamedBufferRangeEXT", false))
4081 return;
4082
4083 flush_mapped_buffer_range(ctx, bufObj, offset, length,
4084 "glFlushMappedNamedBufferRangeEXT");
4085 }
4086
4087 static void
bind_buffer_range_uniform_buffer(struct gl_context * ctx,GLuint index,struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr size)4088 bind_buffer_range_uniform_buffer(struct gl_context *ctx, GLuint index,
4089 struct gl_buffer_object *bufObj,
4090 GLintptr offset, GLsizeiptr size)
4091 {
4092 if (!bufObj) {
4093 offset = -1;
4094 size = -1;
4095 }
4096
4097 _mesa_reference_buffer_object(ctx, &ctx->UniformBuffer, bufObj);
4098 bind_uniform_buffer(ctx, index, bufObj, offset, size, GL_FALSE);
4099 }
4100
4101 /**
4102 * Bind a region of a buffer object to a uniform block binding point.
4103 * \param index the uniform buffer binding point index
4104 * \param bufObj the buffer object
4105 * \param offset offset to the start of buffer object region
4106 * \param size size of the buffer object region
4107 */
4108 static void
bind_buffer_range_uniform_buffer_err(struct gl_context * ctx,GLuint index,struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr size)4109 bind_buffer_range_uniform_buffer_err(struct gl_context *ctx, GLuint index,
4110 struct gl_buffer_object *bufObj,
4111 GLintptr offset, GLsizeiptr size)
4112 {
4113 if (index >= ctx->Const.MaxUniformBufferBindings) {
4114 _mesa_error(ctx, GL_INVALID_VALUE, "glBindBufferRange(index=%d)", index);
4115 return;
4116 }
4117
4118 if (offset & (ctx->Const.UniformBufferOffsetAlignment - 1)) {
4119 _mesa_error(ctx, GL_INVALID_VALUE,
4120 "glBindBufferRange(offset misaligned %d/%d)", (int) offset,
4121 ctx->Const.UniformBufferOffsetAlignment);
4122 return;
4123 }
4124
4125 bind_buffer_range_uniform_buffer(ctx, index, bufObj, offset, size);
4126 }
4127
4128 static void
bind_buffer_range_shader_storage_buffer(struct gl_context * ctx,GLuint index,struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr size)4129 bind_buffer_range_shader_storage_buffer(struct gl_context *ctx,
4130 GLuint index,
4131 struct gl_buffer_object *bufObj,
4132 GLintptr offset,
4133 GLsizeiptr size)
4134 {
4135 if (!bufObj) {
4136 offset = -1;
4137 size = -1;
4138 }
4139
4140 _mesa_reference_buffer_object(ctx, &ctx->ShaderStorageBuffer, bufObj);
4141 bind_shader_storage_buffer(ctx, index, bufObj, offset, size, GL_FALSE);
4142 }
4143
4144 /**
4145 * Bind a region of a buffer object to a shader storage block binding point.
4146 * \param index the shader storage buffer binding point index
4147 * \param bufObj the buffer object
4148 * \param offset offset to the start of buffer object region
4149 * \param size size of the buffer object region
4150 */
4151 static void
bind_buffer_range_shader_storage_buffer_err(struct gl_context * ctx,GLuint index,struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr size)4152 bind_buffer_range_shader_storage_buffer_err(struct gl_context *ctx,
4153 GLuint index,
4154 struct gl_buffer_object *bufObj,
4155 GLintptr offset, GLsizeiptr size)
4156 {
4157 if (index >= ctx->Const.MaxShaderStorageBufferBindings) {
4158 _mesa_error(ctx, GL_INVALID_VALUE, "glBindBufferRange(index=%d)", index);
4159 return;
4160 }
4161
4162 if (offset & (ctx->Const.ShaderStorageBufferOffsetAlignment - 1)) {
4163 _mesa_error(ctx, GL_INVALID_VALUE,
4164 "glBindBufferRange(offset misaligned %d/%d)", (int) offset,
4165 ctx->Const.ShaderStorageBufferOffsetAlignment);
4166 return;
4167 }
4168
4169 bind_buffer_range_shader_storage_buffer(ctx, index, bufObj, offset, size);
4170 }
4171
4172 static void
bind_buffer_range_atomic_buffer(struct gl_context * ctx,GLuint index,struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr size)4173 bind_buffer_range_atomic_buffer(struct gl_context *ctx, GLuint index,
4174 struct gl_buffer_object *bufObj,
4175 GLintptr offset, GLsizeiptr size)
4176 {
4177 if (!bufObj) {
4178 offset = -1;
4179 size = -1;
4180 }
4181
4182 _mesa_reference_buffer_object(ctx, &ctx->AtomicBuffer, bufObj);
4183 bind_atomic_buffer(ctx, index, bufObj, offset, size, GL_FALSE);
4184 }
4185
4186 /**
4187 * Bind a region of a buffer object to an atomic storage block binding point.
4188 * \param index the shader storage buffer binding point index
4189 * \param bufObj the buffer object
4190 * \param offset offset to the start of buffer object region
4191 * \param size size of the buffer object region
4192 */
4193 static void
bind_buffer_range_atomic_buffer_err(struct gl_context * ctx,GLuint index,struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr size)4194 bind_buffer_range_atomic_buffer_err(struct gl_context *ctx,
4195 GLuint index,
4196 struct gl_buffer_object *bufObj,
4197 GLintptr offset, GLsizeiptr size)
4198 {
4199 if (index >= ctx->Const.MaxAtomicBufferBindings) {
4200 _mesa_error(ctx, GL_INVALID_VALUE, "glBindBufferRange(index=%d)", index);
4201 return;
4202 }
4203
4204 if (offset & (ATOMIC_COUNTER_SIZE - 1)) {
4205 _mesa_error(ctx, GL_INVALID_VALUE,
4206 "glBindBufferRange(offset misaligned %d/%d)", (int) offset,
4207 ATOMIC_COUNTER_SIZE);
4208 return;
4209 }
4210
4211 bind_buffer_range_atomic_buffer(ctx, index, bufObj, offset, size);
4212 }
4213
4214 static inline bool
bind_buffers_check_offset_and_size(struct gl_context * ctx,GLuint index,const GLintptr * offsets,const GLsizeiptr * sizes)4215 bind_buffers_check_offset_and_size(struct gl_context *ctx,
4216 GLuint index,
4217 const GLintptr *offsets,
4218 const GLsizeiptr *sizes)
4219 {
4220 if (offsets[index] < 0) {
4221 /* The ARB_multi_bind spec says:
4222 *
4223 * "An INVALID_VALUE error is generated by BindBuffersRange if any
4224 * value in <offsets> is less than zero (per binding)."
4225 */
4226 _mesa_error(ctx, GL_INVALID_VALUE,
4227 "glBindBuffersRange(offsets[%u]=%" PRId64 " < 0)",
4228 index, (int64_t) offsets[index]);
4229 return false;
4230 }
4231
4232 if (sizes[index] <= 0) {
4233 /* The ARB_multi_bind spec says:
4234 *
4235 * "An INVALID_VALUE error is generated by BindBuffersRange if any
4236 * value in <sizes> is less than or equal to zero (per binding)."
4237 */
4238 _mesa_error(ctx, GL_INVALID_VALUE,
4239 "glBindBuffersRange(sizes[%u]=%" PRId64 " <= 0)",
4240 index, (int64_t) sizes[index]);
4241 return false;
4242 }
4243
4244 return true;
4245 }
4246
4247 static bool
error_check_bind_uniform_buffers(struct gl_context * ctx,GLuint first,GLsizei count,const char * caller)4248 error_check_bind_uniform_buffers(struct gl_context *ctx,
4249 GLuint first, GLsizei count,
4250 const char *caller)
4251 {
4252 if (!ctx->Extensions.ARB_uniform_buffer_object) {
4253 _mesa_error(ctx, GL_INVALID_ENUM,
4254 "%s(target=GL_UNIFORM_BUFFER)", caller);
4255 return false;
4256 }
4257
4258 /* The ARB_multi_bind_spec says:
4259 *
4260 * "An INVALID_OPERATION error is generated if <first> + <count> is
4261 * greater than the number of target-specific indexed binding points,
4262 * as described in section 6.7.1."
4263 */
4264 if (first + count > ctx->Const.MaxUniformBufferBindings) {
4265 _mesa_error(ctx, GL_INVALID_OPERATION,
4266 "%s(first=%u + count=%d > the value of "
4267 "GL_MAX_UNIFORM_BUFFER_BINDINGS=%u)",
4268 caller, first, count,
4269 ctx->Const.MaxUniformBufferBindings);
4270 return false;
4271 }
4272
4273 return true;
4274 }
4275
4276 static bool
error_check_bind_shader_storage_buffers(struct gl_context * ctx,GLuint first,GLsizei count,const char * caller)4277 error_check_bind_shader_storage_buffers(struct gl_context *ctx,
4278 GLuint first, GLsizei count,
4279 const char *caller)
4280 {
4281 if (!ctx->Extensions.ARB_shader_storage_buffer_object) {
4282 _mesa_error(ctx, GL_INVALID_ENUM,
4283 "%s(target=GL_SHADER_STORAGE_BUFFER)", caller);
4284 return false;
4285 }
4286
4287 /* The ARB_multi_bind_spec says:
4288 *
4289 * "An INVALID_OPERATION error is generated if <first> + <count> is
4290 * greater than the number of target-specific indexed binding points,
4291 * as described in section 6.7.1."
4292 */
4293 if (first + count > ctx->Const.MaxShaderStorageBufferBindings) {
4294 _mesa_error(ctx, GL_INVALID_OPERATION,
4295 "%s(first=%u + count=%d > the value of "
4296 "GL_MAX_SHADER_STORAGE_BUFFER_BINDINGS=%u)",
4297 caller, first, count,
4298 ctx->Const.MaxShaderStorageBufferBindings);
4299 return false;
4300 }
4301
4302 return true;
4303 }
4304
4305 /**
4306 * Unbind all uniform buffers in the range
4307 * <first> through <first>+<count>-1
4308 */
4309 static void
unbind_uniform_buffers(struct gl_context * ctx,GLuint first,GLsizei count)4310 unbind_uniform_buffers(struct gl_context *ctx, GLuint first, GLsizei count)
4311 {
4312 for (int i = 0; i < count; i++)
4313 set_buffer_binding(ctx, &ctx->UniformBufferBindings[first + i],
4314 NULL, -1, -1, GL_TRUE, 0);
4315 }
4316
4317 /**
4318 * Unbind all shader storage buffers in the range
4319 * <first> through <first>+<count>-1
4320 */
4321 static void
unbind_shader_storage_buffers(struct gl_context * ctx,GLuint first,GLsizei count)4322 unbind_shader_storage_buffers(struct gl_context *ctx, GLuint first,
4323 GLsizei count)
4324 {
4325 for (int i = 0; i < count; i++)
4326 set_buffer_binding(ctx, &ctx->ShaderStorageBufferBindings[first + i],
4327 NULL, -1, -1, GL_TRUE, 0);
4328 }
4329
4330 static void
bind_uniform_buffers(struct gl_context * ctx,GLuint first,GLsizei count,const GLuint * buffers,bool range,const GLintptr * offsets,const GLsizeiptr * sizes,const char * caller)4331 bind_uniform_buffers(struct gl_context *ctx, GLuint first, GLsizei count,
4332 const GLuint *buffers,
4333 bool range,
4334 const GLintptr *offsets, const GLsizeiptr *sizes,
4335 const char *caller)
4336 {
4337 if (!error_check_bind_uniform_buffers(ctx, first, count, caller))
4338 return;
4339
4340 /* Assume that at least one binding will be changed */
4341 FLUSH_VERTICES(ctx, 0, 0);
4342 ctx->NewDriverState |= ST_NEW_UNIFORM_BUFFER;
4343
4344 if (!buffers) {
4345 /* The ARB_multi_bind spec says:
4346 *
4347 * "If <buffers> is NULL, all bindings from <first> through
4348 * <first>+<count>-1 are reset to their unbound (zero) state.
4349 * In this case, the offsets and sizes associated with the
4350 * binding points are set to default values, ignoring
4351 * <offsets> and <sizes>."
4352 */
4353 unbind_uniform_buffers(ctx, first, count);
4354 return;
4355 }
4356
4357 /* Note that the error semantics for multi-bind commands differ from
4358 * those of other GL commands.
4359 *
4360 * The Issues section in the ARB_multi_bind spec says:
4361 *
4362 * "(11) Typically, OpenGL specifies that if an error is generated by a
4363 * command, that command has no effect. This is somewhat
4364 * unfortunate for multi-bind commands, because it would require a
4365 * first pass to scan the entire list of bound objects for errors
4366 * and then a second pass to actually perform the bindings.
4367 * Should we have different error semantics?
4368 *
4369 * RESOLVED: Yes. In this specification, when the parameters for
4370 * one of the <count> binding points are invalid, that binding point
4371 * is not updated and an error will be generated. However, other
4372 * binding points in the same command will be updated if their
4373 * parameters are valid and no other error occurs."
4374 */
4375
4376 _mesa_HashLockMaybeLocked(&ctx->Shared->BufferObjects,
4377 ctx->BufferObjectsLocked);
4378
4379 for (int i = 0; i < count; i++) {
4380 struct gl_buffer_binding *binding =
4381 &ctx->UniformBufferBindings[first + i];
4382 GLintptr offset = 0;
4383 GLsizeiptr size = 0;
4384
4385 if (range) {
4386 if (!bind_buffers_check_offset_and_size(ctx, i, offsets, sizes))
4387 continue;
4388
4389 /* The ARB_multi_bind spec says:
4390 *
4391 * "An INVALID_VALUE error is generated by BindBuffersRange if any
4392 * pair of values in <offsets> and <sizes> does not respectively
4393 * satisfy the constraints described for those parameters for the
4394 * specified target, as described in section 6.7.1 (per binding)."
4395 *
4396 * Section 6.7.1 refers to table 6.5, which says:
4397 *
4398 * "┌───────────────────────────────────────────────────────────────┐
4399 * │ Uniform buffer array bindings (see sec. 7.6) │
4400 * ├─────────────────────┬─────────────────────────────────────────┤
4401 * │ ... │ ... │
4402 * │ offset restriction │ multiple of value of UNIFORM_BUFFER_- │
4403 * │ │ OFFSET_ALIGNMENT │
4404 * │ ... │ ... │
4405 * │ size restriction │ none │
4406 * └─────────────────────┴─────────────────────────────────────────┘"
4407 */
4408 if (offsets[i] & (ctx->Const.UniformBufferOffsetAlignment - 1)) {
4409 _mesa_error(ctx, GL_INVALID_VALUE,
4410 "glBindBuffersRange(offsets[%u]=%" PRId64
4411 " is misaligned; it must be a multiple of the value of "
4412 "GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT=%u when "
4413 "target=GL_UNIFORM_BUFFER)",
4414 i, (int64_t) offsets[i],
4415 ctx->Const.UniformBufferOffsetAlignment);
4416 continue;
4417 }
4418
4419 offset = offsets[i];
4420 size = sizes[i];
4421 }
4422
4423 set_buffer_multi_binding(ctx, buffers, i, caller,
4424 binding, offset, size, range,
4425 USAGE_UNIFORM_BUFFER);
4426 }
4427
4428 _mesa_HashUnlockMaybeLocked(&ctx->Shared->BufferObjects,
4429 ctx->BufferObjectsLocked);
4430 }
4431
4432 static void
bind_shader_storage_buffers(struct gl_context * ctx,GLuint first,GLsizei count,const GLuint * buffers,bool range,const GLintptr * offsets,const GLsizeiptr * sizes,const char * caller)4433 bind_shader_storage_buffers(struct gl_context *ctx, GLuint first,
4434 GLsizei count, const GLuint *buffers,
4435 bool range,
4436 const GLintptr *offsets,
4437 const GLsizeiptr *sizes,
4438 const char *caller)
4439 {
4440 if (!error_check_bind_shader_storage_buffers(ctx, first, count, caller))
4441 return;
4442
4443 /* Assume that at least one binding will be changed */
4444 FLUSH_VERTICES(ctx, 0, 0);
4445 ctx->NewDriverState |= ST_NEW_STORAGE_BUFFER;
4446
4447 if (!buffers) {
4448 /* The ARB_multi_bind spec says:
4449 *
4450 * "If <buffers> is NULL, all bindings from <first> through
4451 * <first>+<count>-1 are reset to their unbound (zero) state.
4452 * In this case, the offsets and sizes associated with the
4453 * binding points are set to default values, ignoring
4454 * <offsets> and <sizes>."
4455 */
4456 unbind_shader_storage_buffers(ctx, first, count);
4457 return;
4458 }
4459
4460 /* Note that the error semantics for multi-bind commands differ from
4461 * those of other GL commands.
4462 *
4463 * The Issues section in the ARB_multi_bind spec says:
4464 *
4465 * "(11) Typically, OpenGL specifies that if an error is generated by a
4466 * command, that command has no effect. This is somewhat
4467 * unfortunate for multi-bind commands, because it would require a
4468 * first pass to scan the entire list of bound objects for errors
4469 * and then a second pass to actually perform the bindings.
4470 * Should we have different error semantics?
4471 *
4472 * RESOLVED: Yes. In this specification, when the parameters for
4473 * one of the <count> binding points are invalid, that binding point
4474 * is not updated and an error will be generated. However, other
4475 * binding points in the same command will be updated if their
4476 * parameters are valid and no other error occurs."
4477 */
4478
4479 _mesa_HashLockMaybeLocked(&ctx->Shared->BufferObjects,
4480 ctx->BufferObjectsLocked);
4481
4482 for (int i = 0; i < count; i++) {
4483 struct gl_buffer_binding *binding =
4484 &ctx->ShaderStorageBufferBindings[first + i];
4485 GLintptr offset = 0;
4486 GLsizeiptr size = 0;
4487
4488 if (range) {
4489 if (!bind_buffers_check_offset_and_size(ctx, i, offsets, sizes))
4490 continue;
4491
4492 /* The ARB_multi_bind spec says:
4493 *
4494 * "An INVALID_VALUE error is generated by BindBuffersRange if any
4495 * pair of values in <offsets> and <sizes> does not respectively
4496 * satisfy the constraints described for those parameters for the
4497 * specified target, as described in section 6.7.1 (per binding)."
4498 *
4499 * Section 6.7.1 refers to table 6.5, which says:
4500 *
4501 * "┌───────────────────────────────────────────────────────────────┐
4502 * │ Shader storage buffer array bindings (see sec. 7.8) │
4503 * ├─────────────────────┬─────────────────────────────────────────┤
4504 * │ ... │ ... │
4505 * │ offset restriction │ multiple of value of SHADER_STORAGE_- │
4506 * │ │ BUFFER_OFFSET_ALIGNMENT │
4507 * │ ... │ ... │
4508 * │ size restriction │ none │
4509 * └─────────────────────┴─────────────────────────────────────────┘"
4510 */
4511 if (offsets[i] & (ctx->Const.ShaderStorageBufferOffsetAlignment - 1)) {
4512 _mesa_error(ctx, GL_INVALID_VALUE,
4513 "glBindBuffersRange(offsets[%u]=%" PRId64
4514 " is misaligned; it must be a multiple of the value of "
4515 "GL_SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT=%u when "
4516 "target=GL_SHADER_STORAGE_BUFFER)",
4517 i, (int64_t) offsets[i],
4518 ctx->Const.ShaderStorageBufferOffsetAlignment);
4519 continue;
4520 }
4521
4522 offset = offsets[i];
4523 size = sizes[i];
4524 }
4525
4526 set_buffer_multi_binding(ctx, buffers, i, caller,
4527 binding, offset, size, range,
4528 USAGE_SHADER_STORAGE_BUFFER);
4529 }
4530
4531 _mesa_HashUnlockMaybeLocked(&ctx->Shared->BufferObjects,
4532 ctx->BufferObjectsLocked);
4533 }
4534
4535 static bool
error_check_bind_xfb_buffers(struct gl_context * ctx,struct gl_transform_feedback_object * tfObj,GLuint first,GLsizei count,const char * caller)4536 error_check_bind_xfb_buffers(struct gl_context *ctx,
4537 struct gl_transform_feedback_object *tfObj,
4538 GLuint first, GLsizei count, const char *caller)
4539 {
4540 if (!ctx->Extensions.EXT_transform_feedback) {
4541 _mesa_error(ctx, GL_INVALID_ENUM,
4542 "%s(target=GL_TRANSFORM_FEEDBACK_BUFFER)", caller);
4543 return false;
4544 }
4545
4546 /* Page 398 of the PDF of the OpenGL 4.4 (Core Profile) spec says:
4547 *
4548 * "An INVALID_OPERATION error is generated :
4549 *
4550 * ...
4551 * • by BindBufferRange or BindBufferBase if target is TRANSFORM_-
4552 * FEEDBACK_BUFFER and transform feedback is currently active."
4553 *
4554 * We assume that this is also meant to apply to BindBuffersRange
4555 * and BindBuffersBase.
4556 */
4557 if (tfObj->Active) {
4558 _mesa_error(ctx, GL_INVALID_OPERATION,
4559 "%s(Changing transform feedback buffers while "
4560 "transform feedback is active)", caller);
4561 return false;
4562 }
4563
4564 /* The ARB_multi_bind_spec says:
4565 *
4566 * "An INVALID_OPERATION error is generated if <first> + <count> is
4567 * greater than the number of target-specific indexed binding points,
4568 * as described in section 6.7.1."
4569 */
4570 if (first + count > ctx->Const.MaxTransformFeedbackBuffers) {
4571 _mesa_error(ctx, GL_INVALID_OPERATION,
4572 "%s(first=%u + count=%d > the value of "
4573 "GL_MAX_TRANSFORM_FEEDBACK_BUFFERS=%u)",
4574 caller, first, count,
4575 ctx->Const.MaxTransformFeedbackBuffers);
4576 return false;
4577 }
4578
4579 return true;
4580 }
4581
4582 /**
4583 * Unbind all transform feedback buffers in the range
4584 * <first> through <first>+<count>-1
4585 */
4586 static void
unbind_xfb_buffers(struct gl_context * ctx,struct gl_transform_feedback_object * tfObj,GLuint first,GLsizei count)4587 unbind_xfb_buffers(struct gl_context *ctx,
4588 struct gl_transform_feedback_object *tfObj,
4589 GLuint first, GLsizei count)
4590 {
4591 for (int i = 0; i < count; i++)
4592 _mesa_set_transform_feedback_binding(ctx, tfObj, first + i,
4593 NULL, 0, 0);
4594 }
4595
4596 static void
bind_xfb_buffers(struct gl_context * ctx,GLuint first,GLsizei count,const GLuint * buffers,bool range,const GLintptr * offsets,const GLsizeiptr * sizes,const char * caller)4597 bind_xfb_buffers(struct gl_context *ctx,
4598 GLuint first, GLsizei count,
4599 const GLuint *buffers,
4600 bool range,
4601 const GLintptr *offsets,
4602 const GLsizeiptr *sizes,
4603 const char *caller)
4604 {
4605 struct gl_transform_feedback_object *tfObj =
4606 ctx->TransformFeedback.CurrentObject;
4607
4608 if (!error_check_bind_xfb_buffers(ctx, tfObj, first, count, caller))
4609 return;
4610
4611 /* Assume that at least one binding will be changed */
4612 FLUSH_VERTICES(ctx, 0, 0);
4613
4614 if (!buffers) {
4615 /* The ARB_multi_bind spec says:
4616 *
4617 * "If <buffers> is NULL, all bindings from <first> through
4618 * <first>+<count>-1 are reset to their unbound (zero) state.
4619 * In this case, the offsets and sizes associated with the
4620 * binding points are set to default values, ignoring
4621 * <offsets> and <sizes>."
4622 */
4623 unbind_xfb_buffers(ctx, tfObj, first, count);
4624 return;
4625 }
4626
4627 /* Note that the error semantics for multi-bind commands differ from
4628 * those of other GL commands.
4629 *
4630 * The Issues section in the ARB_multi_bind spec says:
4631 *
4632 * "(11) Typically, OpenGL specifies that if an error is generated by a
4633 * command, that command has no effect. This is somewhat
4634 * unfortunate for multi-bind commands, because it would require a
4635 * first pass to scan the entire list of bound objects for errors
4636 * and then a second pass to actually perform the bindings.
4637 * Should we have different error semantics?
4638 *
4639 * RESOLVED: Yes. In this specification, when the parameters for
4640 * one of the <count> binding points are invalid, that binding point
4641 * is not updated and an error will be generated. However, other
4642 * binding points in the same command will be updated if their
4643 * parameters are valid and no other error occurs."
4644 */
4645
4646 _mesa_HashLockMaybeLocked(&ctx->Shared->BufferObjects,
4647 ctx->BufferObjectsLocked);
4648
4649 for (int i = 0; i < count; i++) {
4650 const GLuint index = first + i;
4651 struct gl_buffer_object * const boundBufObj = tfObj->Buffers[index];
4652 struct gl_buffer_object *bufObj;
4653 GLintptr offset = 0;
4654 GLsizeiptr size = 0;
4655
4656 if (range) {
4657 if (!bind_buffers_check_offset_and_size(ctx, i, offsets, sizes))
4658 continue;
4659
4660 /* The ARB_multi_bind spec says:
4661 *
4662 * "An INVALID_VALUE error is generated by BindBuffersRange if any
4663 * pair of values in <offsets> and <sizes> does not respectively
4664 * satisfy the constraints described for those parameters for the
4665 * specified target, as described in section 6.7.1 (per binding)."
4666 *
4667 * Section 6.7.1 refers to table 6.5, which says:
4668 *
4669 * "┌───────────────────────────────────────────────────────────────┐
4670 * │ Transform feedback array bindings (see sec. 13.2.2) │
4671 * ├───────────────────────┬───────────────────────────────────────┤
4672 * │ ... │ ... │
4673 * │ offset restriction │ multiple of 4 │
4674 * │ ... │ ... │
4675 * │ size restriction │ multiple of 4 │
4676 * └───────────────────────┴───────────────────────────────────────┘"
4677 */
4678 if (offsets[i] & 0x3) {
4679 _mesa_error(ctx, GL_INVALID_VALUE,
4680 "glBindBuffersRange(offsets[%u]=%" PRId64
4681 " is misaligned; it must be a multiple of 4 when "
4682 "target=GL_TRANSFORM_FEEDBACK_BUFFER)",
4683 i, (int64_t) offsets[i]);
4684 continue;
4685 }
4686
4687 if (sizes[i] & 0x3) {
4688 _mesa_error(ctx, GL_INVALID_VALUE,
4689 "glBindBuffersRange(sizes[%u]=%" PRId64
4690 " is misaligned; it must be a multiple of 4 when "
4691 "target=GL_TRANSFORM_FEEDBACK_BUFFER)",
4692 i, (int64_t) sizes[i]);
4693 continue;
4694 }
4695
4696 offset = offsets[i];
4697 size = sizes[i];
4698 }
4699
4700 if (boundBufObj && boundBufObj->Name == buffers[i])
4701 bufObj = boundBufObj;
4702 else {
4703 bool error;
4704 bufObj = _mesa_multi_bind_lookup_bufferobj(ctx, buffers, i, caller,
4705 &error);
4706 if (error)
4707 continue;
4708 }
4709
4710 _mesa_set_transform_feedback_binding(ctx, tfObj, index, bufObj,
4711 offset, size);
4712 }
4713
4714 _mesa_HashUnlockMaybeLocked(&ctx->Shared->BufferObjects,
4715 ctx->BufferObjectsLocked);
4716 }
4717
4718 static bool
error_check_bind_atomic_buffers(struct gl_context * ctx,GLuint first,GLsizei count,const char * caller)4719 error_check_bind_atomic_buffers(struct gl_context *ctx,
4720 GLuint first, GLsizei count,
4721 const char *caller)
4722 {
4723 if (!ctx->Extensions.ARB_shader_atomic_counters) {
4724 _mesa_error(ctx, GL_INVALID_ENUM,
4725 "%s(target=GL_ATOMIC_COUNTER_BUFFER)", caller);
4726 return false;
4727 }
4728
4729 /* The ARB_multi_bind_spec says:
4730 *
4731 * "An INVALID_OPERATION error is generated if <first> + <count> is
4732 * greater than the number of target-specific indexed binding points,
4733 * as described in section 6.7.1."
4734 */
4735 if (first + count > ctx->Const.MaxAtomicBufferBindings) {
4736 _mesa_error(ctx, GL_INVALID_OPERATION,
4737 "%s(first=%u + count=%d > the value of "
4738 "GL_MAX_ATOMIC_BUFFER_BINDINGS=%u)",
4739 caller, first, count, ctx->Const.MaxAtomicBufferBindings);
4740 return false;
4741 }
4742
4743 return true;
4744 }
4745
4746 /**
4747 * Unbind all atomic counter buffers in the range
4748 * <first> through <first>+<count>-1
4749 */
4750 static void
unbind_atomic_buffers(struct gl_context * ctx,GLuint first,GLsizei count)4751 unbind_atomic_buffers(struct gl_context *ctx, GLuint first, GLsizei count)
4752 {
4753 for (int i = 0; i < count; i++)
4754 set_buffer_binding(ctx, &ctx->AtomicBufferBindings[first + i],
4755 NULL, -1, -1, GL_TRUE, 0);
4756 }
4757
4758 static void
bind_atomic_buffers(struct gl_context * ctx,GLuint first,GLsizei count,const GLuint * buffers,bool range,const GLintptr * offsets,const GLsizeiptr * sizes,const char * caller)4759 bind_atomic_buffers(struct gl_context *ctx,
4760 GLuint first,
4761 GLsizei count,
4762 const GLuint *buffers,
4763 bool range,
4764 const GLintptr *offsets,
4765 const GLsizeiptr *sizes,
4766 const char *caller)
4767 {
4768 if (!error_check_bind_atomic_buffers(ctx, first, count, caller))
4769 return;
4770
4771 /* Assume that at least one binding will be changed */
4772 FLUSH_VERTICES(ctx, 0, 0);
4773 ctx->NewDriverState |= ctx->DriverFlags.NewAtomicBuffer;
4774
4775 if (!buffers) {
4776 /* The ARB_multi_bind spec says:
4777 *
4778 * "If <buffers> is NULL, all bindings from <first> through
4779 * <first>+<count>-1 are reset to their unbound (zero) state.
4780 * In this case, the offsets and sizes associated with the
4781 * binding points are set to default values, ignoring
4782 * <offsets> and <sizes>."
4783 */
4784 unbind_atomic_buffers(ctx, first, count);
4785 return;
4786 }
4787
4788 /* Note that the error semantics for multi-bind commands differ from
4789 * those of other GL commands.
4790 *
4791 * The Issues section in the ARB_multi_bind spec says:
4792 *
4793 * "(11) Typically, OpenGL specifies that if an error is generated by a
4794 * command, that command has no effect. This is somewhat
4795 * unfortunate for multi-bind commands, because it would require a
4796 * first pass to scan the entire list of bound objects for errors
4797 * and then a second pass to actually perform the bindings.
4798 * Should we have different error semantics?
4799 *
4800 * RESOLVED: Yes. In this specification, when the parameters for
4801 * one of the <count> binding points are invalid, that binding point
4802 * is not updated and an error will be generated. However, other
4803 * binding points in the same command will be updated if their
4804 * parameters are valid and no other error occurs."
4805 */
4806
4807 _mesa_HashLockMaybeLocked(&ctx->Shared->BufferObjects,
4808 ctx->BufferObjectsLocked);
4809
4810 for (int i = 0; i < count; i++) {
4811 struct gl_buffer_binding *binding =
4812 &ctx->AtomicBufferBindings[first + i];
4813 GLintptr offset = 0;
4814 GLsizeiptr size = 0;
4815
4816 if (range) {
4817 if (!bind_buffers_check_offset_and_size(ctx, i, offsets, sizes))
4818 continue;
4819
4820 /* The ARB_multi_bind spec says:
4821 *
4822 * "An INVALID_VALUE error is generated by BindBuffersRange if any
4823 * pair of values in <offsets> and <sizes> does not respectively
4824 * satisfy the constraints described for those parameters for the
4825 * specified target, as described in section 6.7.1 (per binding)."
4826 *
4827 * Section 6.7.1 refers to table 6.5, which says:
4828 *
4829 * "┌───────────────────────────────────────────────────────────────┐
4830 * │ Atomic counter array bindings (see sec. 7.7.2) │
4831 * ├───────────────────────┬───────────────────────────────────────┤
4832 * │ ... │ ... │
4833 * │ offset restriction │ multiple of 4 │
4834 * │ ... │ ... │
4835 * │ size restriction │ none │
4836 * └───────────────────────┴───────────────────────────────────────┘"
4837 */
4838 if (offsets[i] & (ATOMIC_COUNTER_SIZE - 1)) {
4839 _mesa_error(ctx, GL_INVALID_VALUE,
4840 "glBindBuffersRange(offsets[%u]=%" PRId64
4841 " is misaligned; it must be a multiple of %d when "
4842 "target=GL_ATOMIC_COUNTER_BUFFER)",
4843 i, (int64_t) offsets[i], ATOMIC_COUNTER_SIZE);
4844 continue;
4845 }
4846
4847 offset = offsets[i];
4848 size = sizes[i];
4849 }
4850
4851 set_buffer_multi_binding(ctx, buffers, i, caller,
4852 binding, offset, size, range,
4853 USAGE_ATOMIC_COUNTER_BUFFER);
4854 }
4855
4856 _mesa_HashUnlockMaybeLocked(&ctx->Shared->BufferObjects,
4857 ctx->BufferObjectsLocked);
4858 }
4859
4860 static ALWAYS_INLINE void
bind_buffer_range(GLenum target,GLuint index,GLuint buffer,GLintptr offset,GLsizeiptr size,bool no_error)4861 bind_buffer_range(GLenum target, GLuint index, GLuint buffer, GLintptr offset,
4862 GLsizeiptr size, bool no_error)
4863 {
4864 GET_CURRENT_CONTEXT(ctx);
4865 struct gl_buffer_object *bufObj;
4866
4867 if (MESA_VERBOSE & VERBOSE_API) {
4868 _mesa_debug(ctx, "glBindBufferRange(%s, %u, %u, %lu, %lu)\n",
4869 _mesa_enum_to_string(target), index, buffer,
4870 (unsigned long) offset, (unsigned long) size);
4871 }
4872
4873 if (buffer == 0) {
4874 bufObj = NULL;
4875 } else {
4876 bufObj = _mesa_lookup_bufferobj(ctx, buffer);
4877 if (!handle_bind_buffer_gen(ctx, buffer,
4878 &bufObj, "glBindBufferRange", no_error))
4879 return;
4880 }
4881
4882 if (no_error) {
4883 switch (target) {
4884 case GL_TRANSFORM_FEEDBACK_BUFFER:
4885 _mesa_bind_buffer_range_xfb(ctx, ctx->TransformFeedback.CurrentObject,
4886 index, bufObj, offset, size);
4887 return;
4888 case GL_UNIFORM_BUFFER:
4889 bind_buffer_range_uniform_buffer(ctx, index, bufObj, offset, size);
4890 return;
4891 case GL_SHADER_STORAGE_BUFFER:
4892 bind_buffer_range_shader_storage_buffer(ctx, index, bufObj, offset,
4893 size);
4894 return;
4895 case GL_ATOMIC_COUNTER_BUFFER:
4896 bind_buffer_range_atomic_buffer(ctx, index, bufObj, offset, size);
4897 return;
4898 default:
4899 unreachable("invalid BindBufferRange target with KHR_no_error");
4900 }
4901 } else {
4902 if (buffer != 0) {
4903 if (size <= 0) {
4904 _mesa_error(ctx, GL_INVALID_VALUE, "glBindBufferRange(size=%d)",
4905 (int) size);
4906 return;
4907 }
4908 }
4909
4910 switch (target) {
4911 case GL_TRANSFORM_FEEDBACK_BUFFER:
4912 if (!_mesa_validate_buffer_range_xfb(ctx,
4913 ctx->TransformFeedback.CurrentObject,
4914 index, bufObj, offset, size,
4915 false))
4916 return;
4917
4918 _mesa_bind_buffer_range_xfb(ctx, ctx->TransformFeedback.CurrentObject,
4919 index, bufObj, offset, size);
4920 return;
4921 case GL_UNIFORM_BUFFER:
4922 bind_buffer_range_uniform_buffer_err(ctx, index, bufObj, offset,
4923 size);
4924 return;
4925 case GL_SHADER_STORAGE_BUFFER:
4926 bind_buffer_range_shader_storage_buffer_err(ctx, index, bufObj,
4927 offset, size);
4928 return;
4929 case GL_ATOMIC_COUNTER_BUFFER:
4930 bind_buffer_range_atomic_buffer_err(ctx, index, bufObj,
4931 offset, size);
4932 return;
4933 default:
4934 _mesa_error(ctx, GL_INVALID_ENUM, "glBindBufferRange(target)");
4935 return;
4936 }
4937 }
4938 }
4939
4940 void GLAPIENTRY
_mesa_BindBufferRange_no_error(GLenum target,GLuint index,GLuint buffer,GLintptr offset,GLsizeiptr size)4941 _mesa_BindBufferRange_no_error(GLenum target, GLuint index, GLuint buffer,
4942 GLintptr offset, GLsizeiptr size)
4943 {
4944 bind_buffer_range(target, index, buffer, offset, size, true);
4945 }
4946
4947 void GLAPIENTRY
_mesa_BindBufferRange(GLenum target,GLuint index,GLuint buffer,GLintptr offset,GLsizeiptr size)4948 _mesa_BindBufferRange(GLenum target, GLuint index,
4949 GLuint buffer, GLintptr offset, GLsizeiptr size)
4950 {
4951 bind_buffer_range(target, index, buffer, offset, size, false);
4952 }
4953
4954 void GLAPIENTRY
_mesa_BindBufferBase(GLenum target,GLuint index,GLuint buffer)4955 _mesa_BindBufferBase(GLenum target, GLuint index, GLuint buffer)
4956 {
4957 GET_CURRENT_CONTEXT(ctx);
4958 struct gl_buffer_object *bufObj;
4959
4960 if (MESA_VERBOSE & VERBOSE_API) {
4961 _mesa_debug(ctx, "glBindBufferBase(%s, %u, %u)\n",
4962 _mesa_enum_to_string(target), index, buffer);
4963 }
4964
4965 if (buffer == 0) {
4966 bufObj = NULL;
4967 } else {
4968 bufObj = _mesa_lookup_bufferobj(ctx, buffer);
4969 if (!handle_bind_buffer_gen(ctx, buffer,
4970 &bufObj, "glBindBufferBase", false))
4971 return;
4972 }
4973
4974 /* Note that there's some oddness in the GL 3.1-GL 3.3 specifications with
4975 * regards to BindBufferBase. It says (GL 3.1 core spec, page 63):
4976 *
4977 * "BindBufferBase is equivalent to calling BindBufferRange with offset
4978 * zero and size equal to the size of buffer."
4979 *
4980 * but it says for glGetIntegeri_v (GL 3.1 core spec, page 230):
4981 *
4982 * "If the parameter (starting offset or size) was not specified when the
4983 * buffer object was bound, zero is returned."
4984 *
4985 * What happens if the size of the buffer changes? Does the size of the
4986 * buffer at the moment glBindBufferBase was called still play a role, like
4987 * the first quote would imply, or is the size meaningless in the
4988 * glBindBufferBase case like the second quote would suggest? The GL 4.1
4989 * core spec page 45 says:
4990 *
4991 * "It is equivalent to calling BindBufferRange with offset zero, while
4992 * size is determined by the size of the bound buffer at the time the
4993 * binding is used."
4994 *
4995 * My interpretation is that the GL 4.1 spec was a clarification of the
4996 * behavior, not a change. In particular, this choice will only make
4997 * rendering work in cases where it would have had undefined results.
4998 */
4999
5000 switch (target) {
5001 case GL_TRANSFORM_FEEDBACK_BUFFER:
5002 _mesa_bind_buffer_base_transform_feedback(ctx,
5003 ctx->TransformFeedback.CurrentObject,
5004 index, bufObj, false);
5005 return;
5006 case GL_UNIFORM_BUFFER:
5007 bind_buffer_base_uniform_buffer(ctx, index, bufObj);
5008 return;
5009 case GL_SHADER_STORAGE_BUFFER:
5010 bind_buffer_base_shader_storage_buffer(ctx, index, bufObj);
5011 return;
5012 case GL_ATOMIC_COUNTER_BUFFER:
5013 bind_buffer_base_atomic_buffer(ctx, index, bufObj);
5014 return;
5015 default:
5016 _mesa_error(ctx, GL_INVALID_ENUM, "glBindBufferBase(target)");
5017 return;
5018 }
5019 }
5020
5021 void GLAPIENTRY
_mesa_BindBuffersRange(GLenum target,GLuint first,GLsizei count,const GLuint * buffers,const GLintptr * offsets,const GLsizeiptr * sizes)5022 _mesa_BindBuffersRange(GLenum target, GLuint first, GLsizei count,
5023 const GLuint *buffers,
5024 const GLintptr *offsets, const GLsizeiptr *sizes)
5025 {
5026 GET_CURRENT_CONTEXT(ctx);
5027
5028 if (MESA_VERBOSE & VERBOSE_API) {
5029 _mesa_debug(ctx, "glBindBuffersRange(%s, %u, %d, %p, %p, %p)\n",
5030 _mesa_enum_to_string(target), first, count,
5031 buffers, offsets, sizes);
5032 }
5033
5034 switch (target) {
5035 case GL_TRANSFORM_FEEDBACK_BUFFER:
5036 bind_xfb_buffers(ctx, first, count, buffers, true, offsets, sizes,
5037 "glBindBuffersRange");
5038 return;
5039 case GL_UNIFORM_BUFFER:
5040 bind_uniform_buffers(ctx, first, count, buffers, true, offsets, sizes,
5041 "glBindBuffersRange");
5042 return;
5043 case GL_SHADER_STORAGE_BUFFER:
5044 bind_shader_storage_buffers(ctx, first, count, buffers, true, offsets, sizes,
5045 "glBindBuffersRange");
5046 return;
5047 case GL_ATOMIC_COUNTER_BUFFER:
5048 bind_atomic_buffers(ctx, first, count, buffers, true, offsets, sizes,
5049 "glBindBuffersRange");
5050 return;
5051 default:
5052 _mesa_error(ctx, GL_INVALID_ENUM, "glBindBuffersRange(target=%s)",
5053 _mesa_enum_to_string(target));
5054 break;
5055 }
5056 }
5057
5058 void GLAPIENTRY
_mesa_BindBuffersBase(GLenum target,GLuint first,GLsizei count,const GLuint * buffers)5059 _mesa_BindBuffersBase(GLenum target, GLuint first, GLsizei count,
5060 const GLuint *buffers)
5061 {
5062 GET_CURRENT_CONTEXT(ctx);
5063
5064 if (MESA_VERBOSE & VERBOSE_API) {
5065 _mesa_debug(ctx, "glBindBuffersBase(%s, %u, %d, %p)\n",
5066 _mesa_enum_to_string(target), first, count, buffers);
5067 }
5068
5069 switch (target) {
5070 case GL_TRANSFORM_FEEDBACK_BUFFER:
5071 bind_xfb_buffers(ctx, first, count, buffers, false, NULL, NULL,
5072 "glBindBuffersBase");
5073 return;
5074 case GL_UNIFORM_BUFFER:
5075 bind_uniform_buffers(ctx, first, count, buffers, false, NULL, NULL,
5076 "glBindBuffersBase");
5077 return;
5078 case GL_SHADER_STORAGE_BUFFER:
5079 bind_shader_storage_buffers(ctx, first, count, buffers, false, NULL, NULL,
5080 "glBindBuffersBase");
5081 return;
5082 case GL_ATOMIC_COUNTER_BUFFER:
5083 bind_atomic_buffers(ctx, first, count, buffers, false, NULL, NULL,
5084 "glBindBuffersBase");
5085 return;
5086 default:
5087 _mesa_error(ctx, GL_INVALID_ENUM, "glBindBuffersBase(target=%s)",
5088 _mesa_enum_to_string(target));
5089 break;
5090 }
5091 }
5092
5093 /**
5094 * Called via glInvalidateBuffer(Sub)Data.
5095 */
5096 static void
bufferobj_invalidate(struct gl_context * ctx,struct gl_buffer_object * obj,GLintptr offset,GLsizeiptr size)5097 bufferobj_invalidate(struct gl_context *ctx,
5098 struct gl_buffer_object *obj,
5099 GLintptr offset,
5100 GLsizeiptr size)
5101 {
5102 struct pipe_context *pipe = ctx->pipe;
5103
5104 /* We ignore partial invalidates. */
5105 if (offset != 0 || size != obj->Size)
5106 return;
5107
5108 /* If the buffer is mapped, we can't invalidate it. */
5109 if (!obj->buffer || _mesa_bufferobj_mapped(obj, MAP_USER))
5110 return;
5111
5112 pipe->invalidate_resource(pipe, obj->buffer);
5113 }
5114
5115 static ALWAYS_INLINE void
invalidate_buffer_subdata(struct gl_context * ctx,struct gl_buffer_object * bufObj,GLintptr offset,GLsizeiptr length)5116 invalidate_buffer_subdata(struct gl_context *ctx,
5117 struct gl_buffer_object *bufObj, GLintptr offset,
5118 GLsizeiptr length)
5119 {
5120 if (ctx->has_invalidate_buffer)
5121 bufferobj_invalidate(ctx, bufObj, offset, length);
5122 }
5123
5124 void GLAPIENTRY
_mesa_InvalidateBufferSubData_no_error(GLuint buffer,GLintptr offset,GLsizeiptr length)5125 _mesa_InvalidateBufferSubData_no_error(GLuint buffer, GLintptr offset,
5126 GLsizeiptr length)
5127 {
5128 GET_CURRENT_CONTEXT(ctx);
5129
5130 struct gl_buffer_object *bufObj = _mesa_lookup_bufferobj(ctx, buffer);
5131 invalidate_buffer_subdata(ctx, bufObj, offset, length);
5132 }
5133
5134 void GLAPIENTRY
_mesa_InvalidateBufferSubData(GLuint buffer,GLintptr offset,GLsizeiptr length)5135 _mesa_InvalidateBufferSubData(GLuint buffer, GLintptr offset,
5136 GLsizeiptr length)
5137 {
5138 GET_CURRENT_CONTEXT(ctx);
5139 struct gl_buffer_object *bufObj;
5140 const GLintptr end = offset + length;
5141
5142 /* Section 6.5 (Invalidating Buffer Data) of the OpenGL 4.5 (Compatibility
5143 * Profile) spec says:
5144 *
5145 * "An INVALID_VALUE error is generated if buffer is zero or is not the
5146 * name of an existing buffer object."
5147 */
5148 bufObj = _mesa_lookup_bufferobj(ctx, buffer);
5149 if (!bufObj || bufObj == &DummyBufferObject) {
5150 _mesa_error(ctx, GL_INVALID_VALUE,
5151 "glInvalidateBufferSubData(name = %u) invalid object",
5152 buffer);
5153 return;
5154 }
5155
5156 /* The GL_ARB_invalidate_subdata spec says:
5157 *
5158 * "An INVALID_VALUE error is generated if <offset> or <length> is
5159 * negative, or if <offset> + <length> is greater than the value of
5160 * BUFFER_SIZE."
5161 */
5162 if (offset < 0 || length < 0 || end > bufObj->Size) {
5163 _mesa_error(ctx, GL_INVALID_VALUE,
5164 "glInvalidateBufferSubData(invalid offset or length)");
5165 return;
5166 }
5167
5168 /* The OpenGL 4.4 (Core Profile) spec says:
5169 *
5170 * "An INVALID_OPERATION error is generated if buffer is currently
5171 * mapped by MapBuffer or if the invalidate range intersects the range
5172 * currently mapped by MapBufferRange, unless it was mapped
5173 * with MAP_PERSISTENT_BIT set in the MapBufferRange access flags."
5174 */
5175 if (!(bufObj->Mappings[MAP_USER].AccessFlags & GL_MAP_PERSISTENT_BIT) &&
5176 bufferobj_range_mapped(bufObj, offset, length)) {
5177 _mesa_error(ctx, GL_INVALID_OPERATION,
5178 "glInvalidateBufferSubData(intersection with mapped "
5179 "range)");
5180 return;
5181 }
5182
5183 invalidate_buffer_subdata(ctx, bufObj, offset, length);
5184 }
5185
5186 void GLAPIENTRY
_mesa_InvalidateBufferData_no_error(GLuint buffer)5187 _mesa_InvalidateBufferData_no_error(GLuint buffer)
5188 {
5189 GET_CURRENT_CONTEXT(ctx);
5190
5191 struct gl_buffer_object *bufObj =_mesa_lookup_bufferobj(ctx, buffer);
5192 invalidate_buffer_subdata(ctx, bufObj, 0, bufObj->Size);
5193 }
5194
5195 void GLAPIENTRY
_mesa_InvalidateBufferData(GLuint buffer)5196 _mesa_InvalidateBufferData(GLuint buffer)
5197 {
5198 GET_CURRENT_CONTEXT(ctx);
5199 struct gl_buffer_object *bufObj;
5200
5201 /* Section 6.5 (Invalidating Buffer Data) of the OpenGL 4.5 (Compatibility
5202 * Profile) spec says:
5203 *
5204 * "An INVALID_VALUE error is generated if buffer is zero or is not the
5205 * name of an existing buffer object."
5206 */
5207 bufObj = _mesa_lookup_bufferobj(ctx, buffer);
5208 if (!bufObj || bufObj == &DummyBufferObject) {
5209 _mesa_error(ctx, GL_INVALID_VALUE,
5210 "glInvalidateBufferData(name = %u) invalid object",
5211 buffer);
5212 return;
5213 }
5214
5215 /* The OpenGL 4.4 (Core Profile) spec says:
5216 *
5217 * "An INVALID_OPERATION error is generated if buffer is currently
5218 * mapped by MapBuffer or if the invalidate range intersects the range
5219 * currently mapped by MapBufferRange, unless it was mapped
5220 * with MAP_PERSISTENT_BIT set in the MapBufferRange access flags."
5221 */
5222 if (_mesa_check_disallowed_mapping(bufObj)) {
5223 _mesa_error(ctx, GL_INVALID_OPERATION,
5224 "glInvalidateBufferData(intersection with mapped "
5225 "range)");
5226 return;
5227 }
5228
5229 invalidate_buffer_subdata(ctx, bufObj, 0, bufObj->Size);
5230 }
5231
5232 static void
buffer_page_commitment(struct gl_context * ctx,struct gl_buffer_object * bufferObj,GLintptr offset,GLsizeiptr size,GLboolean commit,const char * func)5233 buffer_page_commitment(struct gl_context *ctx,
5234 struct gl_buffer_object *bufferObj,
5235 GLintptr offset, GLsizeiptr size,
5236 GLboolean commit, const char *func)
5237 {
5238 if (!(bufferObj->StorageFlags & GL_SPARSE_STORAGE_BIT_ARB)) {
5239 _mesa_error(ctx, GL_INVALID_OPERATION, "%s(not a sparse buffer object)",
5240 func);
5241 return;
5242 }
5243
5244 if (size < 0 || size > bufferObj->Size ||
5245 offset < 0 || offset > bufferObj->Size - size) {
5246 _mesa_error(ctx, GL_INVALID_VALUE, "%s(out of bounds)",
5247 func);
5248 return;
5249 }
5250
5251 /* The GL_ARB_sparse_buffer extension specification says:
5252 *
5253 * "INVALID_VALUE is generated by BufferPageCommitmentARB if <offset> is
5254 * not an integer multiple of SPARSE_BUFFER_PAGE_SIZE_ARB, or if <size>
5255 * is not an integer multiple of SPARSE_BUFFER_PAGE_SIZE_ARB and does
5256 * not extend to the end of the buffer's data store."
5257 */
5258 if (offset % ctx->Const.SparseBufferPageSize != 0) {
5259 _mesa_error(ctx, GL_INVALID_VALUE, "%s(offset not aligned to page size)",
5260 func);
5261 return;
5262 }
5263
5264 if (size % ctx->Const.SparseBufferPageSize != 0 &&
5265 offset + size != bufferObj->Size) {
5266 _mesa_error(ctx, GL_INVALID_VALUE, "%s(size not aligned to page size)",
5267 func);
5268 return;
5269 }
5270
5271 struct pipe_context *pipe = ctx->pipe;
5272 struct pipe_box box;
5273
5274 u_box_1d(offset, size, &box);
5275
5276 if (!pipe->resource_commit(pipe, bufferObj->buffer, 0, &box, commit)) {
5277 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glBufferPageCommitmentARB(out of memory)");
5278 }
5279 }
5280
5281 void GLAPIENTRY
_mesa_BufferPageCommitmentARB(GLenum target,GLintptr offset,GLsizeiptr size,GLboolean commit)5282 _mesa_BufferPageCommitmentARB(GLenum target, GLintptr offset, GLsizeiptr size,
5283 GLboolean commit)
5284 {
5285 GET_CURRENT_CONTEXT(ctx);
5286 struct gl_buffer_object *bufferObj;
5287
5288 bufferObj = get_buffer(ctx, "glBufferPageCommitmentARB", target,
5289 GL_INVALID_ENUM);
5290 if (!bufferObj)
5291 return;
5292
5293 buffer_page_commitment(ctx, bufferObj, offset, size, commit,
5294 "glBufferPageCommitmentARB");
5295 }
5296
5297 void GLAPIENTRY
_mesa_NamedBufferPageCommitmentARB(GLuint buffer,GLintptr offset,GLsizeiptr size,GLboolean commit)5298 _mesa_NamedBufferPageCommitmentARB(GLuint buffer, GLintptr offset,
5299 GLsizeiptr size, GLboolean commit)
5300 {
5301 GET_CURRENT_CONTEXT(ctx);
5302 struct gl_buffer_object *bufferObj;
5303
5304 bufferObj = _mesa_lookup_bufferobj(ctx, buffer);
5305 if (!bufferObj || bufferObj == &DummyBufferObject) {
5306 /* Note: the extension spec is not clear about the excpected error value. */
5307 _mesa_error(ctx, GL_INVALID_VALUE,
5308 "glNamedBufferPageCommitmentARB(name = %u) invalid object",
5309 buffer);
5310 return;
5311 }
5312
5313 buffer_page_commitment(ctx, bufferObj, offset, size, commit,
5314 "glNamedBufferPageCommitmentARB");
5315 }
5316
5317 void GLAPIENTRY
_mesa_NamedBufferPageCommitmentEXT(GLuint buffer,GLintptr offset,GLsizeiptr size,GLboolean commit)5318 _mesa_NamedBufferPageCommitmentEXT(GLuint buffer, GLintptr offset,
5319 GLsizeiptr size, GLboolean commit)
5320 {
5321 GET_CURRENT_CONTEXT(ctx);
5322 struct gl_buffer_object *bufferObj;
5323
5324 /* Use NamedBuffer* functions logic from EXT_direct_state_access */
5325 if (buffer != 0) {
5326 bufferObj = _mesa_lookup_bufferobj(ctx, buffer);
5327 if (!handle_bind_buffer_gen(ctx, buffer, &bufferObj,
5328 "glNamedBufferPageCommitmentEXT", false))
5329 return;
5330 } else {
5331 /* GL_EXT_direct_state_access says about NamedBuffer* functions:
5332 *
5333 * There is no buffer corresponding to the name zero, these commands
5334 * generate the INVALID_OPERATION error if the buffer parameter is
5335 * zero.
5336 */
5337 _mesa_error(ctx, GL_INVALID_OPERATION,
5338 "glNamedBufferPageCommitmentEXT(buffer = 0)");
5339 return;
5340 }
5341 buffer_page_commitment(ctx, bufferObj, offset, size, commit,
5342 "glNamedBufferPageCommitmentEXT");
5343 }
5344