1 /**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #ifndef U_INLINES_H
29 #define U_INLINES_H
30
31 #include "pipe/p_context.h"
32 #include "pipe/p_defines.h"
33 #include "pipe/p_shader_tokens.h"
34 #include "pipe/p_state.h"
35 #include "pipe/p_screen.h"
36 #include "util/compiler.h"
37 #include "util/format/u_format.h"
38 #include "util/u_debug.h"
39 #include "util/u_debug_describe.h"
40 #include "util/u_debug_refcnt.h"
41 #include "util/u_atomic.h"
42 #include "util/box.h"
43 #include "util/u_math.h"
44
45
46 #ifdef __cplusplus
47 extern "C" {
48 #endif
49
50
51 /*
52 * Reference counting helper functions.
53 */
54
55
56 static inline void
pipe_reference_init(struct pipe_reference * dst,unsigned count)57 pipe_reference_init(struct pipe_reference *dst, unsigned count)
58 {
59 dst->count = count;
60 }
61
62 static inline bool
pipe_is_referenced(struct pipe_reference * src)63 pipe_is_referenced(struct pipe_reference *src)
64 {
65 return p_atomic_read(&src->count) != 0;
66 }
67
68 /**
69 * Update reference counting.
70 * The old thing pointed to, if any, will be unreferenced.
71 * Both 'dst' and 'src' may be NULL.
72 * \return TRUE if the object's refcount hits zero and should be destroyed.
73 */
74 static inline bool
pipe_reference_described(struct pipe_reference * dst,struct pipe_reference * src,debug_reference_descriptor get_desc)75 pipe_reference_described(struct pipe_reference *dst,
76 struct pipe_reference *src,
77 debug_reference_descriptor get_desc)
78 {
79 if (dst != src) {
80 /* bump the src.count first */
81 if (src) {
82 ASSERTED int count = p_atomic_inc_return(&src->count);
83 assert(count != 1); /* src had to be referenced */
84 debug_reference(src, get_desc, 1);
85 }
86
87 if (dst) {
88 int count = p_atomic_dec_return(&dst->count);
89 assert(count != -1); /* dst had to be referenced */
90 debug_reference(dst, get_desc, -1);
91 if (!count)
92 return true;
93 }
94 }
95
96 return false;
97 }
98
99 static inline bool
pipe_reference(struct pipe_reference * dst,struct pipe_reference * src)100 pipe_reference(struct pipe_reference *dst, struct pipe_reference *src)
101 {
102 return pipe_reference_described(dst, src,
103 (debug_reference_descriptor)
104 debug_describe_reference);
105 }
106
107 static inline void
pipe_surface_reference(struct pipe_surface ** dst,struct pipe_surface * src)108 pipe_surface_reference(struct pipe_surface **dst, struct pipe_surface *src)
109 {
110 struct pipe_surface *old_dst = *dst;
111
112 if (pipe_reference_described(old_dst ? &old_dst->reference : NULL,
113 src ? &src->reference : NULL,
114 (debug_reference_descriptor)
115 debug_describe_surface))
116 old_dst->context->surface_destroy(old_dst->context, old_dst);
117 *dst = src;
118 }
119
120 /**
121 * Similar to pipe_surface_reference() but always set the pointer to NULL
122 * and pass in an explicit context. The explicit context avoids the problem
123 * of using a deleted context's surface_destroy() method when freeing a surface
124 * that's shared by multiple contexts.
125 */
126 static inline void
pipe_surface_release(struct pipe_context * pipe,struct pipe_surface ** ptr)127 pipe_surface_release(struct pipe_context *pipe, struct pipe_surface **ptr)
128 {
129 struct pipe_surface *old = *ptr;
130
131 if (pipe_reference_described(old ? &old->reference : NULL,
132 NULL,
133 (debug_reference_descriptor)
134 debug_describe_surface))
135 pipe->surface_destroy(pipe, old);
136 *ptr = NULL;
137 }
138
139 static inline void
pipe_resource_destroy(struct pipe_resource * res)140 pipe_resource_destroy(struct pipe_resource *res)
141 {
142 /* Avoid recursion, which would prevent inlining this function */
143 do {
144 struct pipe_resource *next = res->next;
145
146 res->screen->resource_destroy(res->screen, res);
147 res = next;
148 } while (pipe_reference_described(res ? &res->reference : NULL,
149 NULL,
150 (debug_reference_descriptor)
151 debug_describe_resource));
152 }
153
154 static inline void
pipe_resource_reference(struct pipe_resource ** dst,struct pipe_resource * src)155 pipe_resource_reference(struct pipe_resource **dst, struct pipe_resource *src)
156 {
157 struct pipe_resource *old_dst = *dst;
158
159 if (pipe_reference_described(old_dst ? &old_dst->reference : NULL,
160 src ? &src->reference : NULL,
161 (debug_reference_descriptor)
162 debug_describe_resource)) {
163 pipe_resource_destroy(old_dst);
164 }
165 *dst = src;
166 }
167
168 /**
169 * Subtract the given number of references.
170 */
171 static inline void
pipe_drop_resource_references(struct pipe_resource * dst,int num_refs)172 pipe_drop_resource_references(struct pipe_resource *dst, int num_refs)
173 {
174 int count = p_atomic_add_return(&dst->reference.count, -num_refs);
175
176 assert(count >= 0);
177 /* Underflows shouldn't happen, but let's be safe. */
178 if (count <= 0)
179 pipe_resource_destroy(dst);
180 }
181
182 /**
183 * Same as pipe_surface_release, but used when pipe_context doesn't exist
184 * anymore.
185 */
186 static inline void
pipe_surface_release_no_context(struct pipe_surface ** ptr)187 pipe_surface_release_no_context(struct pipe_surface **ptr)
188 {
189 struct pipe_surface *surf = *ptr;
190
191 if (pipe_reference_described(&surf->reference, NULL,
192 (debug_reference_descriptor)
193 debug_describe_surface)) {
194 /* trivially destroy pipe_surface */
195 pipe_resource_reference(&surf->texture, NULL);
196 free(surf);
197 }
198 *ptr = NULL;
199 }
200
201 /**
202 * Set *dst to \p src with proper reference counting.
203 *
204 * The caller must guarantee that \p src and *dst were created in
205 * the same context (if they exist), and that this must be the current context.
206 */
207 static inline void
pipe_sampler_view_reference(struct pipe_sampler_view ** dst,struct pipe_sampler_view * src)208 pipe_sampler_view_reference(struct pipe_sampler_view **dst,
209 struct pipe_sampler_view *src)
210 {
211 struct pipe_sampler_view *old_dst = *dst;
212
213 if (pipe_reference_described(old_dst ? &old_dst->reference : NULL,
214 src ? &src->reference : NULL,
215 (debug_reference_descriptor)
216 debug_describe_sampler_view))
217 old_dst->context->sampler_view_destroy(old_dst->context, old_dst);
218 *dst = src;
219 }
220
221 static inline void
pipe_so_target_reference(struct pipe_stream_output_target ** dst,struct pipe_stream_output_target * src)222 pipe_so_target_reference(struct pipe_stream_output_target **dst,
223 struct pipe_stream_output_target *src)
224 {
225 struct pipe_stream_output_target *old_dst = *dst;
226
227 if (pipe_reference_described(old_dst ? &old_dst->reference : NULL,
228 src ? &src->reference : NULL,
229 (debug_reference_descriptor)debug_describe_so_target))
230 old_dst->context->stream_output_target_destroy(old_dst->context, old_dst);
231 *dst = src;
232 }
233
234 static inline void
pipe_vertex_state_reference(struct pipe_vertex_state ** dst,struct pipe_vertex_state * src)235 pipe_vertex_state_reference(struct pipe_vertex_state **dst,
236 struct pipe_vertex_state *src)
237 {
238 struct pipe_vertex_state *old_dst = *dst;
239
240 if (pipe_reference(old_dst ? &old_dst->reference : NULL,
241 src ? &src->reference : NULL))
242 old_dst->screen->vertex_state_destroy(old_dst->screen, old_dst);
243 *dst = src;
244 }
245
246 static inline void
pipe_vertex_buffer_unreference(struct pipe_vertex_buffer * dst)247 pipe_vertex_buffer_unreference(struct pipe_vertex_buffer *dst)
248 {
249 if (dst->is_user_buffer)
250 dst->buffer.user = NULL;
251 else
252 pipe_resource_reference(&dst->buffer.resource, NULL);
253 }
254
255 static inline void
pipe_vertex_buffer_reference(struct pipe_vertex_buffer * dst,const struct pipe_vertex_buffer * src)256 pipe_vertex_buffer_reference(struct pipe_vertex_buffer *dst,
257 const struct pipe_vertex_buffer *src)
258 {
259 if (dst->buffer.resource == src->buffer.resource) {
260 /* Just copy the fields, don't touch reference counts. */
261 dst->is_user_buffer = src->is_user_buffer;
262 dst->buffer_offset = src->buffer_offset;
263 return;
264 }
265
266 pipe_vertex_buffer_unreference(dst);
267 /* Don't use memcpy because there is a hole between variables.
268 * dst can be used as a hash key.
269 */
270 dst->is_user_buffer = src->is_user_buffer;
271 dst->buffer_offset = src->buffer_offset;
272
273 if (src->is_user_buffer)
274 dst->buffer.user = src->buffer.user;
275 else
276 pipe_resource_reference(&dst->buffer.resource, src->buffer.resource);
277 }
278
279 static inline void
pipe_surface_reset(struct pipe_context * ctx,struct pipe_surface * ps,struct pipe_resource * pt,unsigned level,unsigned layer)280 pipe_surface_reset(struct pipe_context *ctx, struct pipe_surface* ps,
281 struct pipe_resource *pt, unsigned level, unsigned layer)
282 {
283 pipe_resource_reference(&ps->texture, pt);
284 ps->format = pt->format;
285 ps->width = u_minify(pt->width0, level);
286 ps->height = u_minify(pt->height0, level);
287 ps->u.tex.level = level;
288 ps->u.tex.first_layer = ps->u.tex.last_layer = layer;
289 ps->context = ctx;
290 }
291
292 static inline void
pipe_surface_init(struct pipe_context * ctx,struct pipe_surface * ps,struct pipe_resource * pt,unsigned level,unsigned layer)293 pipe_surface_init(struct pipe_context *ctx, struct pipe_surface* ps,
294 struct pipe_resource *pt, unsigned level, unsigned layer)
295 {
296 ps->texture = 0;
297 pipe_reference_init(&ps->reference, 1);
298 pipe_surface_reset(ctx, ps, pt, level, layer);
299 }
300
301 /* Return true if the surfaces are equal. */
302 static inline bool
pipe_surface_equal(struct pipe_surface * s1,struct pipe_surface * s2)303 pipe_surface_equal(struct pipe_surface *s1, struct pipe_surface *s2)
304 {
305 return s1->texture == s2->texture &&
306 s1->format == s2->format &&
307 (s1->texture->target != PIPE_BUFFER ||
308 (s1->u.buf.first_element == s2->u.buf.first_element &&
309 s1->u.buf.last_element == s2->u.buf.last_element)) &&
310 (s1->texture->target == PIPE_BUFFER ||
311 (s1->u.tex.level == s2->u.tex.level &&
312 s1->u.tex.first_layer == s2->u.tex.first_layer &&
313 s1->u.tex.last_layer == s2->u.tex.last_layer));
314 }
315
316 /*
317 * Convenience wrappers for screen buffer functions.
318 */
319
320
321 static inline unsigned
pipe_buffer_size(const struct pipe_resource * buffer)322 pipe_buffer_size(const struct pipe_resource *buffer)
323 {
324 return buffer->width0;
325 }
326
327
328 /**
329 * Create a new resource.
330 * \param bind bitmask of PIPE_BIND_x flags
331 * \param usage a PIPE_USAGE_x value
332 */
333 static inline struct pipe_resource *
pipe_buffer_create(struct pipe_screen * screen,unsigned bind,enum pipe_resource_usage usage,unsigned size)334 pipe_buffer_create(struct pipe_screen *screen,
335 unsigned bind,
336 enum pipe_resource_usage usage,
337 unsigned size)
338 {
339 struct pipe_resource buffer;
340 memset(&buffer, 0, sizeof buffer);
341 buffer.target = PIPE_BUFFER;
342 buffer.format = PIPE_FORMAT_R8_UNORM; /* want TYPELESS or similar */
343 buffer.bind = bind;
344 buffer.usage = usage;
345 buffer.flags = 0;
346 buffer.width0 = size;
347 buffer.height0 = 1;
348 buffer.depth0 = 1;
349 buffer.array_size = 1;
350 return screen->resource_create(screen, &buffer);
351 }
352
353
354 static inline struct pipe_resource *
pipe_buffer_create_const0(struct pipe_screen * screen,unsigned bind,enum pipe_resource_usage usage,unsigned size)355 pipe_buffer_create_const0(struct pipe_screen *screen,
356 unsigned bind,
357 enum pipe_resource_usage usage,
358 unsigned size)
359 {
360 struct pipe_resource buffer;
361 memset(&buffer, 0, sizeof buffer);
362 buffer.target = PIPE_BUFFER;
363 buffer.format = PIPE_FORMAT_R8_UNORM;
364 buffer.bind = bind;
365 buffer.usage = usage;
366 buffer.flags = screen->get_param(screen, PIPE_CAP_CONSTBUF0_FLAGS);
367 buffer.width0 = size;
368 buffer.height0 = 1;
369 buffer.depth0 = 1;
370 buffer.array_size = 1;
371 return screen->resource_create(screen, &buffer);
372 }
373
374
375 /**
376 * Map a range of a resource.
377 * \param offset start of region, in bytes
378 * \param length size of region, in bytes
379 * \param access bitmask of PIPE_MAP_x flags
380 * \param transfer returns a transfer object
381 */
382 static inline void *
pipe_buffer_map_range(struct pipe_context * pipe,struct pipe_resource * buffer,unsigned offset,unsigned length,unsigned access,struct pipe_transfer ** transfer)383 pipe_buffer_map_range(struct pipe_context *pipe,
384 struct pipe_resource *buffer,
385 unsigned offset,
386 unsigned length,
387 unsigned access,
388 struct pipe_transfer **transfer)
389 {
390 struct pipe_box box;
391 void *map;
392
393 assert(offset < buffer->width0);
394 assert(offset + length <= buffer->width0);
395 assert(length);
396
397 u_box_1d(offset, length, &box);
398
399 map = pipe->buffer_map(pipe, buffer, 0, access, &box, transfer);
400 if (!map) {
401 return NULL;
402 }
403
404 return map;
405 }
406
407
408 /**
409 * Map whole resource.
410 * \param access bitmask of PIPE_MAP_x flags
411 * \param transfer returns a transfer object
412 */
413 static inline void *
pipe_buffer_map(struct pipe_context * pipe,struct pipe_resource * buffer,unsigned access,struct pipe_transfer ** transfer)414 pipe_buffer_map(struct pipe_context *pipe,
415 struct pipe_resource *buffer,
416 unsigned access,
417 struct pipe_transfer **transfer)
418 {
419 return pipe_buffer_map_range(pipe, buffer, 0, buffer->width0,
420 access, transfer);
421 }
422
423
424 static inline void
pipe_buffer_unmap(struct pipe_context * pipe,struct pipe_transfer * transfer)425 pipe_buffer_unmap(struct pipe_context *pipe,
426 struct pipe_transfer *transfer)
427 {
428 pipe->buffer_unmap(pipe, transfer);
429 }
430
431 static inline void
pipe_buffer_flush_mapped_range(struct pipe_context * pipe,struct pipe_transfer * transfer,unsigned offset,unsigned length)432 pipe_buffer_flush_mapped_range(struct pipe_context *pipe,
433 struct pipe_transfer *transfer,
434 unsigned offset,
435 unsigned length)
436 {
437 struct pipe_box box;
438 int transfer_offset;
439
440 assert(length);
441 assert(transfer->box.x <= (int) offset);
442 assert((int) (offset + length) <= transfer->box.x + transfer->box.width);
443
444 /* Match old screen->buffer_flush_mapped_range() behaviour, where
445 * offset parameter is relative to the start of the buffer, not the
446 * mapped range.
447 */
448 transfer_offset = offset - transfer->box.x;
449
450 u_box_1d(transfer_offset, length, &box);
451
452 pipe->transfer_flush_region(pipe, transfer, &box);
453 }
454
455 static inline void
pipe_buffer_write(struct pipe_context * pipe,struct pipe_resource * buf,unsigned offset,unsigned size,const void * data)456 pipe_buffer_write(struct pipe_context *pipe,
457 struct pipe_resource *buf,
458 unsigned offset,
459 unsigned size,
460 const void *data)
461 {
462 /* Don't set any other usage bits. Drivers should derive them. */
463 pipe->buffer_subdata(pipe, buf, PIPE_MAP_WRITE, offset, size, data);
464 }
465
466 /**
467 * Special case for writing non-overlapping ranges.
468 *
469 * We can avoid GPU/CPU synchronization when writing range that has never
470 * been written before.
471 */
472 static inline void
pipe_buffer_write_nooverlap(struct pipe_context * pipe,struct pipe_resource * buf,unsigned offset,unsigned size,const void * data)473 pipe_buffer_write_nooverlap(struct pipe_context *pipe,
474 struct pipe_resource *buf,
475 unsigned offset, unsigned size,
476 const void *data)
477 {
478 pipe->buffer_subdata(pipe, buf,
479 (PIPE_MAP_WRITE |
480 PIPE_MAP_UNSYNCHRONIZED),
481 offset, size, data);
482 }
483
484 /**
485 * Utility for simplifying pipe_context::resource_copy_region calls
486 */
487 static inline void
pipe_buffer_copy(struct pipe_context * pipe,struct pipe_resource * dst,struct pipe_resource * src,unsigned dst_offset,unsigned src_offset,unsigned size)488 pipe_buffer_copy(struct pipe_context *pipe,
489 struct pipe_resource *dst,
490 struct pipe_resource *src,
491 unsigned dst_offset,
492 unsigned src_offset,
493 unsigned size)
494 {
495 struct pipe_box box;
496 u_box_1d(src_offset, size, &box);
497 pipe->resource_copy_region(pipe, dst, 0, dst_offset, 0, 0, src, 0, &box);
498 }
499
500 /**
501 * Create a new resource and immediately put data into it
502 * \param bind bitmask of PIPE_BIND_x flags
503 * \param usage bitmask of PIPE_USAGE_x flags
504 */
505 static inline struct pipe_resource *
pipe_buffer_create_with_data(struct pipe_context * pipe,unsigned bind,enum pipe_resource_usage usage,unsigned size,const void * ptr)506 pipe_buffer_create_with_data(struct pipe_context *pipe,
507 unsigned bind,
508 enum pipe_resource_usage usage,
509 unsigned size,
510 const void *ptr)
511 {
512 struct pipe_resource *res = pipe_buffer_create(pipe->screen,
513 bind, usage, size);
514 pipe_buffer_write_nooverlap(pipe, res, 0, size, ptr);
515 return res;
516 }
517
518 static inline void
pipe_buffer_read(struct pipe_context * pipe,struct pipe_resource * buf,unsigned offset,unsigned size,void * data)519 pipe_buffer_read(struct pipe_context *pipe,
520 struct pipe_resource *buf,
521 unsigned offset,
522 unsigned size,
523 void *data)
524 {
525 struct pipe_transfer *src_transfer;
526 uint8_t *map;
527
528 map = (uint8_t *) pipe_buffer_map_range(pipe,
529 buf,
530 offset, size,
531 PIPE_MAP_READ,
532 &src_transfer);
533 if (!map)
534 return;
535
536 memcpy(data, map, size);
537 pipe_buffer_unmap(pipe, src_transfer);
538 }
539
540
541 /**
542 * Map a resource for reading/writing.
543 * \param access bitmask of PIPE_MAP_x flags
544 */
545 static inline void *
pipe_texture_map(struct pipe_context * context,struct pipe_resource * resource,unsigned level,unsigned layer,unsigned access,unsigned x,unsigned y,unsigned w,unsigned h,struct pipe_transfer ** transfer)546 pipe_texture_map(struct pipe_context *context,
547 struct pipe_resource *resource,
548 unsigned level, unsigned layer,
549 unsigned access,
550 unsigned x, unsigned y,
551 unsigned w, unsigned h,
552 struct pipe_transfer **transfer)
553 {
554 struct pipe_box box;
555 u_box_2d_zslice(x, y, layer, w, h, &box);
556 return context->texture_map(context, resource, level, access,
557 &box, transfer);
558 }
559
560
561 /**
562 * Map a 3D (texture) resource for reading/writing.
563 * \param access bitmask of PIPE_MAP_x flags
564 */
565 static inline void *
pipe_texture_map_3d(struct pipe_context * context,struct pipe_resource * resource,unsigned level,unsigned access,unsigned x,unsigned y,unsigned z,unsigned w,unsigned h,unsigned d,struct pipe_transfer ** transfer)566 pipe_texture_map_3d(struct pipe_context *context,
567 struct pipe_resource *resource,
568 unsigned level,
569 unsigned access,
570 unsigned x, unsigned y, unsigned z,
571 unsigned w, unsigned h, unsigned d,
572 struct pipe_transfer **transfer)
573 {
574 struct pipe_box box;
575 u_box_3d(x, y, z, w, h, d, &box);
576 return context->texture_map(context, resource, level, access,
577 &box, transfer);
578 }
579
580 static inline void
pipe_texture_unmap(struct pipe_context * context,struct pipe_transfer * transfer)581 pipe_texture_unmap(struct pipe_context *context,
582 struct pipe_transfer *transfer)
583 {
584 context->texture_unmap(context, transfer);
585 }
586
587 static inline void
pipe_set_constant_buffer(struct pipe_context * pipe,enum pipe_shader_type shader,uint index,struct pipe_resource * buf)588 pipe_set_constant_buffer(struct pipe_context *pipe,
589 enum pipe_shader_type shader, uint index,
590 struct pipe_resource *buf)
591 {
592 if (buf) {
593 struct pipe_constant_buffer cb;
594 cb.buffer = buf;
595 cb.buffer_offset = 0;
596 cb.buffer_size = buf->width0;
597 cb.user_buffer = NULL;
598 pipe->set_constant_buffer(pipe, shader, index, false, &cb);
599 } else {
600 pipe->set_constant_buffer(pipe, shader, index, false, NULL);
601 }
602 }
603
604
605 /**
606 * Get the polygon offset enable/disable flag for the given polygon fill mode.
607 * \param fill_mode one of PIPE_POLYGON_MODE_POINT/LINE/FILL
608 */
609 static inline bool
util_get_offset(const struct pipe_rasterizer_state * templ,unsigned fill_mode)610 util_get_offset(const struct pipe_rasterizer_state *templ,
611 unsigned fill_mode)
612 {
613 switch(fill_mode) {
614 case PIPE_POLYGON_MODE_POINT:
615 return templ->offset_point;
616 case PIPE_POLYGON_MODE_LINE:
617 return templ->offset_line;
618 case PIPE_POLYGON_MODE_FILL:
619 return templ->offset_tri;
620 default:
621 assert(0);
622 return false;
623 }
624 }
625
626 static inline float
util_get_min_point_size(const struct pipe_rasterizer_state * state)627 util_get_min_point_size(const struct pipe_rasterizer_state *state)
628 {
629 /* The point size should be clamped to this value at the rasterizer stage.
630 */
631 return !state->point_quad_rasterization &&
632 !state->point_smooth &&
633 !state->multisample ? 1.0f : 0.0f;
634 }
635
636 static inline void
util_query_clear_result(union pipe_query_result * result,unsigned type)637 util_query_clear_result(union pipe_query_result *result, unsigned type)
638 {
639 switch (type) {
640 case PIPE_QUERY_OCCLUSION_PREDICATE:
641 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
642 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
643 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
644 case PIPE_QUERY_GPU_FINISHED:
645 result->b = false;
646 break;
647 case PIPE_QUERY_OCCLUSION_COUNTER:
648 case PIPE_QUERY_TIMESTAMP:
649 case PIPE_QUERY_TIME_ELAPSED:
650 case PIPE_QUERY_PRIMITIVES_GENERATED:
651 case PIPE_QUERY_PRIMITIVES_EMITTED:
652 result->u64 = 0;
653 break;
654 case PIPE_QUERY_SO_STATISTICS:
655 memset(&result->so_statistics, 0, sizeof(result->so_statistics));
656 break;
657 case PIPE_QUERY_TIMESTAMP_DISJOINT:
658 memset(&result->timestamp_disjoint, 0, sizeof(result->timestamp_disjoint));
659 break;
660 case PIPE_QUERY_PIPELINE_STATISTICS:
661 memset(&result->pipeline_statistics, 0, sizeof(result->pipeline_statistics));
662 break;
663 default:
664 memset(result, 0, sizeof(*result));
665 }
666 }
667
668 /** Convert PIPE_TEXTURE_x to TGSI_TEXTURE_x */
669 static inline enum tgsi_texture_type
util_pipe_tex_to_tgsi_tex(enum pipe_texture_target pipe_tex_target,unsigned nr_samples)670 util_pipe_tex_to_tgsi_tex(enum pipe_texture_target pipe_tex_target,
671 unsigned nr_samples)
672 {
673 switch (pipe_tex_target) {
674 case PIPE_BUFFER:
675 return TGSI_TEXTURE_BUFFER;
676
677 case PIPE_TEXTURE_1D:
678 assert(nr_samples <= 1);
679 return TGSI_TEXTURE_1D;
680
681 case PIPE_TEXTURE_2D:
682 return nr_samples > 1 ? TGSI_TEXTURE_2D_MSAA : TGSI_TEXTURE_2D;
683
684 case PIPE_TEXTURE_RECT:
685 assert(nr_samples <= 1);
686 return TGSI_TEXTURE_RECT;
687
688 case PIPE_TEXTURE_3D:
689 assert(nr_samples <= 1);
690 return TGSI_TEXTURE_3D;
691
692 case PIPE_TEXTURE_CUBE:
693 assert(nr_samples <= 1);
694 return TGSI_TEXTURE_CUBE;
695
696 case PIPE_TEXTURE_1D_ARRAY:
697 assert(nr_samples <= 1);
698 return TGSI_TEXTURE_1D_ARRAY;
699
700 case PIPE_TEXTURE_2D_ARRAY:
701 return nr_samples > 1 ? TGSI_TEXTURE_2D_ARRAY_MSAA :
702 TGSI_TEXTURE_2D_ARRAY;
703
704 case PIPE_TEXTURE_CUBE_ARRAY:
705 return TGSI_TEXTURE_CUBE_ARRAY;
706
707 default:
708 assert(0 && "unexpected texture target");
709 return TGSI_TEXTURE_UNKNOWN;
710 }
711 }
712
713
714 static inline void
util_copy_constant_buffer(struct pipe_constant_buffer * dst,const struct pipe_constant_buffer * src,bool take_ownership)715 util_copy_constant_buffer(struct pipe_constant_buffer *dst,
716 const struct pipe_constant_buffer *src,
717 bool take_ownership)
718 {
719 if (src) {
720 if (take_ownership) {
721 pipe_resource_reference(&dst->buffer, NULL);
722 dst->buffer = src->buffer;
723 } else {
724 pipe_resource_reference(&dst->buffer, src->buffer);
725 }
726 dst->buffer_offset = src->buffer_offset;
727 dst->buffer_size = src->buffer_size;
728 dst->user_buffer = src->user_buffer;
729 }
730 else {
731 pipe_resource_reference(&dst->buffer, NULL);
732 dst->buffer_offset = 0;
733 dst->buffer_size = 0;
734 dst->user_buffer = NULL;
735 }
736 }
737
738 static inline void
util_copy_shader_buffer(struct pipe_shader_buffer * dst,const struct pipe_shader_buffer * src)739 util_copy_shader_buffer(struct pipe_shader_buffer *dst,
740 const struct pipe_shader_buffer *src)
741 {
742 if (src) {
743 pipe_resource_reference(&dst->buffer, src->buffer);
744 dst->buffer_offset = src->buffer_offset;
745 dst->buffer_size = src->buffer_size;
746 }
747 else {
748 pipe_resource_reference(&dst->buffer, NULL);
749 dst->buffer_offset = 0;
750 dst->buffer_size = 0;
751 }
752 }
753
754 static inline void
util_copy_image_view(struct pipe_image_view * dst,const struct pipe_image_view * src)755 util_copy_image_view(struct pipe_image_view *dst,
756 const struct pipe_image_view *src)
757 {
758 if (src) {
759 pipe_resource_reference(&dst->resource, src->resource);
760 dst->format = src->format;
761 dst->access = src->access;
762 dst->shader_access = src->shader_access;
763 dst->u = src->u;
764 } else {
765 pipe_resource_reference(&dst->resource, NULL);
766 dst->format = PIPE_FORMAT_NONE;
767 dst->access = 0;
768 dst->shader_access = 0;
769 memset(&dst->u, 0, sizeof(dst->u));
770 }
771 }
772
773 static inline unsigned
util_max_layer(const struct pipe_resource * r,unsigned level)774 util_max_layer(const struct pipe_resource *r, unsigned level)
775 {
776 switch (r->target) {
777 case PIPE_TEXTURE_3D:
778 return u_minify(r->depth0, level) - 1;
779 case PIPE_TEXTURE_CUBE:
780 assert(r->array_size == 6);
781 FALLTHROUGH;
782 case PIPE_TEXTURE_1D_ARRAY:
783 case PIPE_TEXTURE_2D_ARRAY:
784 case PIPE_TEXTURE_CUBE_ARRAY:
785 return r->array_size - 1;
786 default:
787 return 0;
788 }
789 }
790
791 static inline unsigned
util_num_layers(const struct pipe_resource * r,unsigned level)792 util_num_layers(const struct pipe_resource *r, unsigned level)
793 {
794 return util_max_layer(r, level) + 1;
795 }
796
797 static inline bool
util_texrange_covers_whole_level(const struct pipe_resource * tex,unsigned level,unsigned x,unsigned y,unsigned z,unsigned width,unsigned height,unsigned depth)798 util_texrange_covers_whole_level(const struct pipe_resource *tex,
799 unsigned level, unsigned x, unsigned y,
800 unsigned z, unsigned width,
801 unsigned height, unsigned depth)
802 {
803 return x == 0 && y == 0 && z == 0 &&
804 width == u_minify(tex->width0, level) &&
805 height == u_minify(tex->height0, level) &&
806 depth == util_num_layers(tex, level);
807 }
808
809 /**
810 * Returns true if the blit will fully initialize all pixels in the resource.
811 */
812 static inline bool
util_blit_covers_whole_resource(const struct pipe_blit_info * info)813 util_blit_covers_whole_resource(const struct pipe_blit_info *info)
814 {
815 /* No conditional rendering or scissoring. (We assume that the caller would
816 * have dropped any redundant scissoring)
817 */
818 if (info->scissor_enable || info->window_rectangle_include || info->render_condition_enable || info->alpha_blend)
819 return false;
820
821 const struct pipe_resource *dst = info->dst.resource;
822 /* A single blit can't initialize a miptree. */
823 if (dst->last_level != 0)
824 return false;
825
826 assert(info->dst.level == 0);
827
828 /* Make sure the dst box covers the whole resource. */
829 if (!(util_texrange_covers_whole_level(dst, 0,
830 0, 0, 0,
831 info->dst.box.width, info->dst.box.height, info->dst.box.depth))) {
832 return false;
833 }
834
835 /* Make sure the mask actually updates all the channels present in the dst format. */
836 if (info->mask & PIPE_MASK_RGBA) {
837 if ((info->mask & PIPE_MASK_RGBA) != PIPE_MASK_RGBA)
838 return false;
839 }
840
841 if (info->mask & PIPE_MASK_ZS) {
842 const struct util_format_description *format_desc = util_format_description(info->dst.format);
843 uint32_t dst_has = 0;
844 if (util_format_has_depth(format_desc))
845 dst_has |= PIPE_MASK_Z;
846 if (util_format_has_stencil(format_desc))
847 dst_has |= PIPE_MASK_S;
848 if (dst_has & ~(info->mask & PIPE_MASK_ZS))
849 return false;
850 }
851
852 return true;
853 }
854
855 static inline bool
util_logicop_reads_dest(enum pipe_logicop op)856 util_logicop_reads_dest(enum pipe_logicop op)
857 {
858 switch (op) {
859 case PIPE_LOGICOP_NOR:
860 case PIPE_LOGICOP_AND_INVERTED:
861 case PIPE_LOGICOP_AND_REVERSE:
862 case PIPE_LOGICOP_INVERT:
863 case PIPE_LOGICOP_XOR:
864 case PIPE_LOGICOP_NAND:
865 case PIPE_LOGICOP_AND:
866 case PIPE_LOGICOP_EQUIV:
867 case PIPE_LOGICOP_NOOP:
868 case PIPE_LOGICOP_OR_INVERTED:
869 case PIPE_LOGICOP_OR_REVERSE:
870 case PIPE_LOGICOP_OR:
871 return true;
872 case PIPE_LOGICOP_CLEAR:
873 case PIPE_LOGICOP_COPY_INVERTED:
874 case PIPE_LOGICOP_COPY:
875 case PIPE_LOGICOP_SET:
876 return false;
877 }
878 unreachable("bad logicop");
879 }
880
881 static inline bool
util_writes_stencil(const struct pipe_stencil_state * s)882 util_writes_stencil(const struct pipe_stencil_state *s)
883 {
884 return s->enabled && s->writemask &&
885 ((s->fail_op != PIPE_STENCIL_OP_KEEP) ||
886 (s->zpass_op != PIPE_STENCIL_OP_KEEP) ||
887 (s->zfail_op != PIPE_STENCIL_OP_KEEP));
888 }
889
890 static inline bool
util_writes_depth(const struct pipe_depth_stencil_alpha_state * zsa)891 util_writes_depth(const struct pipe_depth_stencil_alpha_state *zsa)
892 {
893 return zsa->depth_enabled && zsa->depth_writemask &&
894 (zsa->depth_func != PIPE_FUNC_NEVER);
895 }
896
897 static inline bool
util_writes_depth_stencil(const struct pipe_depth_stencil_alpha_state * zsa)898 util_writes_depth_stencil(const struct pipe_depth_stencil_alpha_state *zsa)
899 {
900 return util_writes_depth(zsa) ||
901 util_writes_stencil(&zsa->stencil[0]) ||
902 util_writes_stencil(&zsa->stencil[1]);
903 }
904
905 static inline struct pipe_context *
pipe_create_multimedia_context(struct pipe_screen * screen)906 pipe_create_multimedia_context(struct pipe_screen *screen)
907 {
908 unsigned flags = 0;
909
910 if (!screen->get_param(screen, PIPE_CAP_GRAPHICS) &&
911 !screen->get_param(screen, PIPE_CAP_COMPUTE))
912 flags |= PIPE_CONTEXT_MEDIA_ONLY;
913 else if (!screen->get_param(screen, PIPE_CAP_GRAPHICS))
914 flags |= PIPE_CONTEXT_COMPUTE_ONLY;
915
916 return screen->context_create(screen, NULL, flags);
917 }
918
util_res_sample_count(const struct pipe_resource * res)919 static inline unsigned util_res_sample_count(const struct pipe_resource *res)
920 {
921 return res->nr_samples > 0 ? res->nr_samples : 1;
922 }
923
924 static inline void
util_set_vertex_buffers(struct pipe_context * pipe,unsigned num_buffers,bool take_ownership,const struct pipe_vertex_buffer * buffers)925 util_set_vertex_buffers(struct pipe_context *pipe,
926 unsigned num_buffers, bool take_ownership,
927 const struct pipe_vertex_buffer *buffers)
928 {
929 /* set_vertex_buffers requires that reference counts are incremented
930 * by the caller.
931 */
932 if (!take_ownership) {
933 for (unsigned i = 0; i < num_buffers; i++) {
934 if (!buffers[i].is_user_buffer && buffers[i].buffer.resource)
935 p_atomic_inc(&buffers[i].buffer.resource->reference.count);
936 }
937 }
938
939 pipe->set_vertex_buffers(pipe, num_buffers, buffers);
940 }
941
942 #ifdef __cplusplus
943 }
944 #endif
945
946 #endif /* U_INLINES_H */
947