xref: /aosp_15_r20/external/mesa3d/src/gallium/include/pipe/p_context.h (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /**************************************************************************
2  *
3  * Copyright 2007 VMware, Inc.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21  * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #ifndef PIPE_CONTEXT_H
29 #define PIPE_CONTEXT_H
30 
31 #include "util/compiler.h"
32 #include "util/format/u_formats.h"
33 #include "p_video_enums.h"
34 #include "p_defines.h"
35 #include "util/u_debug.h"
36 #include <stdio.h>
37 #include "frontend/winsys_handle.h"
38 
39 #ifdef __cplusplus
40 extern "C" {
41 #endif
42 
43 
44 struct pipe_blend_color;
45 struct pipe_blend_state;
46 struct pipe_blit_info;
47 struct pipe_box;
48 struct pipe_clip_state;
49 struct pipe_compute_state_object_info;
50 struct pipe_constant_buffer;
51 struct pipe_depth_stencil_alpha_state;
52 struct pipe_device_reset_callback;
53 struct pipe_draw_info;
54 struct pipe_draw_indirect_info;
55 struct pipe_draw_start_count_bias;
56 struct pipe_draw_vertex_state_info;
57 struct pipe_grid_info;
58 struct pipe_fence_handle;
59 struct pipe_framebuffer_state;
60 struct pipe_image_view;
61 struct pipe_query;
62 struct pipe_poly_stipple;
63 struct pipe_rasterizer_state;
64 struct pipe_resolve_info;
65 struct pipe_resource;
66 struct pipe_sampler_state;
67 struct pipe_sampler_view;
68 struct pipe_scissor_state;
69 struct pipe_shader_buffer;
70 struct pipe_shader_state;
71 struct pipe_stencil_ref;
72 struct pipe_stream_output_target;
73 struct pipe_surface;
74 struct pipe_transfer;
75 struct pipe_vertex_buffer;
76 struct pipe_vertex_element;
77 struct pipe_vertex_state;
78 struct pipe_video_buffer;
79 struct pipe_video_codec;
80 struct pipe_viewport_state;
81 struct pipe_compute_state;
82 struct pipe_ml_operation;
83 struct pipe_tensor;
84 union pipe_color_union;
85 union pipe_query_result;
86 struct u_log_context;
87 struct u_upload_mgr;
88 struct util_debug_callback;
89 struct u_vbuf;
90 struct pipe_context;
91 
92 typedef void (*pipe_draw_func)(struct pipe_context *pipe,
93                                const struct pipe_draw_info *info,
94                                unsigned drawid_offset,
95                                const struct pipe_draw_indirect_info *indirect,
96                                const struct pipe_draw_start_count_bias *draws,
97                                unsigned num_draws);
98 
99 /**
100  * Gallium rendering context.  Basically:
101  *  - state setting functions
102  *  - VBO drawing functions
103  *  - surface functions
104  */
105 struct pipe_context {
106    struct pipe_screen *screen;
107 
108    void *priv;  /**< context private data (for DRI for example) */
109    void *draw;  /**< private, for draw module (temporary?) */
110    struct u_vbuf *vbuf; /**< for cso_context, don't use in drivers */
111 
112    /**
113     * Stream uploaders created by the driver. All drivers, gallium frontends, and
114     * modules should use them.
115     *
116     * Use u_upload_alloc or u_upload_data as many times as you want.
117     * Once you are done, use u_upload_unmap.
118     */
119    struct u_upload_mgr *stream_uploader; /* everything but shader constants */
120    struct u_upload_mgr *const_uploader;  /* shader constants only */
121 
122    /**
123     * Debug callback set by u_default_set_debug_callback. Frontends should use
124     * set_debug_callback in case drivers need to flush compiler queues.
125     */
126    struct util_debug_callback debug;
127 
128    void (*destroy)(struct pipe_context *);
129 
130    /**
131     * VBO drawing
132     */
133    /*@{*/
134    /**
135     * Multi draw.
136     *
137     * For indirect multi draws, num_draws is 1 and indirect->draw_count
138     * is used instead.
139     *
140     * Caps:
141     * - Always supported: Direct multi draws
142     * - PIPE_CAP_MULTI_DRAW_INDIRECT: Indirect multi draws
143     * - PIPE_CAP_MULTI_DRAW_INDIRECT_PARAMS: Indirect draw count
144     *
145     * Differences against glMultiDraw and glMultiMode:
146     * - "info->mode" and "draws->index_bias" are always constant due to the lack
147     *   of hardware support and CPU performance concerns. Only start and count
148     *   vary.
149     * - if "info->increment_draw_id" is false, draw_id doesn't change between
150     *   draws
151     *
152     * Direct multi draws are also generated by u_threaded_context, which looks
153     * ahead in gallium command buffers and merges single draws.
154     *
155     * \param pipe          context
156     * \param info          draw info
157     * \param drawid_offset offset to add for drawid param of each draw
158     * \param indirect      indirect multi draws
159     * \param draws         array of (start, count) pairs for direct draws
160     * \param num_draws     number of direct draws; 1 for indirect multi draws
161     */
162    pipe_draw_func draw_vbo;
163 
164    /**
165     * Multi draw for display lists.
166     *
167     * For more information, see pipe_vertex_state and
168     * pipe_draw_vertex_state_info.
169     *
170     * Explanation of partial_vertex_mask:
171     *
172     * 1. pipe_vertex_state::input::elements have a monotonic logical index
173     *    determined by pipe_vertex_state::input::full_velem_mask, specifically,
174     *    the position of the i-th bit set is the logical index of the i-th
175     *    vertex element, up to 31.
176     *
177     * 2. pipe_vertex_state::input::partial_velem_mask is a subset of
178     *    full_velem_mask where the bits set determine which vertex elements
179     *    should be bound contiguously. The vertex elements corresponding to
180     *    the bits not set in partial_velem_mask should be ignored.
181     *
182     * Those two allow creating pipe_vertex_state that has more vertex
183     * attributes than the vertex shader has inputs. The idea is that
184     * pipe_vertex_state can be used with any vertex shader that has the same
185     * number of inputs and same logical indices or less. This may sound like
186     * an overly complicated way to bind a subset of vertex elements, but it
187     * actually simplifies everything else:
188     *
189     * - In st/mesa, full_velem_mask is exactly the mask of enabled vertex
190     *   attributes (VERT_ATTRIB_x) in the display list VAO, while
191     *   partial_velem_mask is exactly the inputs_read mask of the vertex
192     *   shader (also VERT_ATTRIB_x).
193     *
194     * - In the driver, some bit ops and popcnt is needed to assemble vertex
195     *   elements very quickly.
196     */
197    void (*draw_vertex_state)(struct pipe_context *ctx,
198                              struct pipe_vertex_state *state,
199                              uint32_t partial_velem_mask,
200                              struct pipe_draw_vertex_state_info info,
201                              const struct pipe_draw_start_count_bias *draws,
202                              unsigned num_draws);
203    /*@}*/
204 
205    /**
206     * Predicate subsequent rendering on occlusion query result
207     * \param query  the query predicate, or NULL if no predicate
208     * \param condition whether to skip on FALSE or TRUE query results
209     * \param mode  one of PIPE_RENDER_COND_x
210     */
211    void (*render_condition)(struct pipe_context *pipe,
212                             struct pipe_query *query,
213                             bool condition,
214                             enum pipe_render_cond_flag mode);
215 
216    /**
217     * Predicate subsequent rendering on a value in a buffer
218     * \param buffer The buffer to query for the value
219     * \param offset Offset in the buffer to query 32-bit
220     * \param condition whether to skip on FALSE or TRUE query results
221     */
222    void (*render_condition_mem)(struct pipe_context *pipe,
223                                 struct pipe_resource *buffer,
224                                 uint32_t offset,
225                                 bool condition);
226    /**
227     * Query objects
228     */
229    /*@{*/
230    struct pipe_query *(*create_query)(struct pipe_context *pipe,
231                                       unsigned query_type,
232                                       unsigned index);
233 
234    /**
235     * Create a query object that queries all given query types simultaneously.
236     *
237     * This can only be used for those query types for which
238     * get_driver_query_info indicates that it must be used. Only one batch
239     * query object may be active at a time.
240     *
241     * There may be additional constraints on which query types can be used
242     * together, in particular those that are implied by
243     * get_driver_query_group_info.
244     *
245     * \param num_queries the number of query types
246     * \param query_types array of \p num_queries query types
247     * \return a query object, or NULL on error.
248     */
249    struct pipe_query *(*create_batch_query)(struct pipe_context *pipe,
250                                             unsigned num_queries,
251                                             unsigned *query_types);
252 
253    void (*destroy_query)(struct pipe_context *pipe,
254                          struct pipe_query *q);
255 
256    bool (*begin_query)(struct pipe_context *pipe, struct pipe_query *q);
257    bool (*end_query)(struct pipe_context *pipe, struct pipe_query *q);
258 
259    /**
260     * Get results of a query.
261     * \param wait  if true, this query will block until the result is ready
262     * \return TRUE if results are ready, FALSE otherwise
263     */
264    bool (*get_query_result)(struct pipe_context *pipe,
265                             struct pipe_query *q,
266                             bool wait,
267                             union pipe_query_result *result);
268 
269    /**
270     * Get results of a query, storing into resource. Note that this may not
271     * be used with batch queries.
272     *
273     * \param wait  if true, this query will block until the result is ready
274     * \param result_type  the type of the value being stored:
275     * \param index  for queries that return multiple pieces of data, which
276     *               item of that data to store (e.g. for
277     *               PIPE_QUERY_PIPELINE_STATISTICS).
278     *               When the index is -1, instead of the value of the query
279     *               the driver should instead write a 1 or 0 to the appropriate
280     *               location with 1 meaning that the query result is available.
281     */
282    void (*get_query_result_resource)(struct pipe_context *pipe,
283                                      struct pipe_query *q,
284                                      enum pipe_query_flags flags,
285                                      enum pipe_query_value_type result_type,
286                                      int index,
287                                      struct pipe_resource *resource,
288                                      unsigned offset);
289 
290    /**
291     * Set whether all current non-driver queries except TIME_ELAPSED are
292     * active or paused.
293     */
294    void (*set_active_query_state)(struct pipe_context *pipe, bool enable);
295 
296    /**
297     * INTEL Performance Query
298     */
299    /*@{*/
300 
301    unsigned (*init_intel_perf_query_info)(struct pipe_context *pipe);
302 
303    void (*get_intel_perf_query_info)(struct pipe_context *pipe,
304                                      unsigned query_index,
305                                      const char **name,
306                                      uint32_t *data_size,
307                                      uint32_t *n_counters,
308                                      uint32_t *n_active);
309 
310    void (*get_intel_perf_query_counter_info)(struct pipe_context *pipe,
311                                              unsigned query_index,
312                                              unsigned counter_index,
313                                              const char **name,
314                                              const char **desc,
315                                              uint32_t *offset,
316                                              uint32_t *data_size,
317                                              uint32_t *type_enum,
318                                              uint32_t *data_type_enum,
319                                              uint64_t *raw_max);
320 
321    struct pipe_query *(*new_intel_perf_query_obj)(struct pipe_context *pipe,
322                                                  unsigned query_index);
323 
324    bool (*begin_intel_perf_query)(struct pipe_context *pipe, struct pipe_query *q);
325 
326    void (*end_intel_perf_query)(struct pipe_context *pipe, struct pipe_query *q);
327 
328    void (*delete_intel_perf_query)(struct pipe_context *pipe, struct pipe_query *q);
329 
330    void (*wait_intel_perf_query)(struct pipe_context *pipe, struct pipe_query *q);
331 
332    bool (*is_intel_perf_query_ready)(struct pipe_context *pipe, struct pipe_query *q);
333 
334    bool (*get_intel_perf_query_data)(struct pipe_context *pipe,
335                                      struct pipe_query *q,
336                                      size_t data_size,
337                                      uint32_t *data,
338                                      uint32_t *bytes_written);
339 
340    /*@}*/
341 
342    /**
343     * \name GLSL shader/program functions.
344     */
345    /*@{*/
346    /**
347     * Called when a shader program is linked.
348     * \param handles  Array of shader handles attached to this program.
349     *                 The size of the array is \c PIPE_SHADER_TYPES, and each
350     *                 position contains the corresponding \c pipe_shader_state*
351     *                 or \c pipe_compute_state*, or \c NULL.
352     *                 E.g. You can retrieve the fragment shader handle with
353     *                      \c handles[PIPE_SHADER_FRAGMENT]
354     */
355    void (*link_shader)(struct pipe_context *, void** handles);
356    /*@}*/
357 
358    /**
359     * State functions (create/bind/destroy state objects)
360     */
361    /*@{*/
362    void * (*create_blend_state)(struct pipe_context *,
363                                 const struct pipe_blend_state *);
364    void   (*bind_blend_state)(struct pipe_context *, void *);
365    void   (*delete_blend_state)(struct pipe_context *, void  *);
366 
367    void * (*create_sampler_state)(struct pipe_context *,
368                                   const struct pipe_sampler_state *);
369    void   (*bind_sampler_states)(struct pipe_context *,
370                                  enum pipe_shader_type shader,
371                                  unsigned start_slot, unsigned num_samplers,
372                                  void **samplers);
373    void   (*delete_sampler_state)(struct pipe_context *, void *);
374 
375    void * (*create_rasterizer_state)(struct pipe_context *,
376                                      const struct pipe_rasterizer_state *);
377    void   (*bind_rasterizer_state)(struct pipe_context *, void *);
378    void   (*delete_rasterizer_state)(struct pipe_context *, void *);
379 
380    void * (*create_depth_stencil_alpha_state)(struct pipe_context *,
381                                         const struct pipe_depth_stencil_alpha_state *);
382    void   (*bind_depth_stencil_alpha_state)(struct pipe_context *, void *);
383    void   (*delete_depth_stencil_alpha_state)(struct pipe_context *, void *);
384 
385    void * (*create_fs_state)(struct pipe_context *,
386                              const struct pipe_shader_state *);
387    void   (*bind_fs_state)(struct pipe_context *, void *);
388    void   (*delete_fs_state)(struct pipe_context *, void *);
389 
390    void * (*create_vs_state)(struct pipe_context *,
391                              const struct pipe_shader_state *);
392    void   (*bind_vs_state)(struct pipe_context *, void *);
393    void   (*delete_vs_state)(struct pipe_context *, void *);
394 
395    void * (*create_gs_state)(struct pipe_context *,
396                              const struct pipe_shader_state *);
397    void   (*bind_gs_state)(struct pipe_context *, void *);
398    void   (*delete_gs_state)(struct pipe_context *, void *);
399 
400    void * (*create_tcs_state)(struct pipe_context *,
401                               const struct pipe_shader_state *);
402    void   (*bind_tcs_state)(struct pipe_context *, void *);
403    void   (*delete_tcs_state)(struct pipe_context *, void *);
404 
405    void * (*create_tes_state)(struct pipe_context *,
406                               const struct pipe_shader_state *);
407    void   (*bind_tes_state)(struct pipe_context *, void *);
408    void   (*delete_tes_state)(struct pipe_context *, void *);
409 
410    void * (*create_vertex_elements_state)(struct pipe_context *,
411                                           unsigned num_elements,
412                                           const struct pipe_vertex_element *);
413    /**
414     * Bind vertex elements state.
415     *
416     * Frontends MUST call set_vertex_buffers after bind_vertex_elements_state
417     * and before the next draw. This ensures the driver can apply the state
418     * change before the next draw. Drivers MAY use this constraint to merge
419     * vertex elements and vertex buffers in set_vertex_buffers instead of
420     * in draw_vbo.
421     */
422    void   (*bind_vertex_elements_state)(struct pipe_context *, void *);
423    void   (*delete_vertex_elements_state)(struct pipe_context *, void *);
424 
425    void * (*create_ts_state)(struct pipe_context *,
426                              const struct pipe_shader_state *);
427    void   (*bind_ts_state)(struct pipe_context *, void *);
428    void   (*delete_ts_state)(struct pipe_context *, void *);
429 
430    void * (*create_ms_state)(struct pipe_context *,
431                              const struct pipe_shader_state *);
432    void   (*bind_ms_state)(struct pipe_context *, void *);
433    void   (*delete_ms_state)(struct pipe_context *, void *);
434    /*@}*/
435 
436    /**
437     * Parameter-like state (or properties)
438     */
439    /*@{*/
440    void (*set_blend_color)(struct pipe_context *,
441                            const struct pipe_blend_color *);
442 
443    void (*set_stencil_ref)(struct pipe_context *,
444                            const struct pipe_stencil_ref ref);
445 
446    void (*set_sample_mask)(struct pipe_context *,
447                            unsigned sample_mask);
448 
449    void (*set_min_samples)(struct pipe_context *,
450                            unsigned min_samples);
451 
452    void (*set_clip_state)(struct pipe_context *,
453                           const struct pipe_clip_state *);
454 
455    /**
456     * Set constant buffer
457     *
458     * \param shader           Shader stage
459     * \param index            Buffer binding slot index within a shader stage
460     * \param take_ownership   The callee takes ownership of the buffer reference.
461     *                         (the callee shouldn't increment the ref count)
462     * \param buf              Constant buffer parameters
463     */
464    void (*set_constant_buffer)(struct pipe_context *,
465                                enum pipe_shader_type shader, uint index,
466                                bool take_ownership,
467                                const struct pipe_constant_buffer *buf);
468 
469    /**
470     * Set inlinable constants for constant buffer 0.
471     *
472     * These are constants that the driver would like to inline in the IR
473     * of the current shader and recompile it. Drivers can determine which
474     * constants they prefer to inline in finalize_nir and store that
475     * information in shader_info::*inlinable_uniform*. When the state tracker
476     * or frontend uploads constants to a constant buffer, it can pass
477     * inlinable constants separately via this call.
478     *
479     * Any set_constant_buffer call invalidates this state, so this function
480     * must be called after it. Binding a shader also invalidates this state.
481     *
482     * There is no PIPE_CAP for this. Drivers shouldn't set the shader_info
483     * fields if they don't want this or if they don't implement this.
484     */
485    void (*set_inlinable_constants)(struct pipe_context *,
486                                    enum pipe_shader_type shader,
487                                    uint num_values, uint32_t *values);
488 
489    void (*set_framebuffer_state)(struct pipe_context *,
490                                  const struct pipe_framebuffer_state *);
491 
492    /**
493     * Set the sample locations used during rasterization. When NULL or sized
494     * zero, the default locations are used.
495     *
496     * Note that get_sample_position() still returns the default locations.
497     *
498     * The samples are accessed with
499     * locations[(pixel_y*grid_w+pixel_x)*ms+i],
500     * where:
501     * ms      = the sample count
502     * grid_w  = the pixel grid width for the sample count
503     * grid_w  = the pixel grid height for the sample count
504     * pixel_x = the window x coordinate modulo grid_w
505     * pixel_y = the window y coordinate modulo grid_w
506     * i       = the sample index
507     * This gives a result with the x coordinate as the low 4 bits and the y
508     * coordinate as the high 4 bits. For each coordinate 0 is the left or top
509     * edge of the pixel's rectangle and 16 (not 15) is the right or bottom edge.
510     *
511     * Out of bounds accesses are return undefined values.
512     *
513     * The pixel grid is used to vary sample locations across pixels and its
514     * size can be queried with get_sample_pixel_grid().
515     */
516    void (*set_sample_locations)(struct pipe_context *,
517                                 size_t size, const uint8_t *locations);
518 
519    void (*set_polygon_stipple)(struct pipe_context *,
520                                const struct pipe_poly_stipple *);
521 
522    void (*set_scissor_states)(struct pipe_context *,
523                               unsigned start_slot,
524                               unsigned num_scissors,
525                               const struct pipe_scissor_state *);
526 
527    void (*set_window_rectangles)(struct pipe_context *,
528                                  bool include,
529                                  unsigned num_rectangles,
530                                  const struct pipe_scissor_state *);
531 
532    void (*set_viewport_states)(struct pipe_context *,
533                                unsigned start_slot,
534                                unsigned num_viewports,
535                                const struct pipe_viewport_state *);
536 
537    void (*set_sampler_views)(struct pipe_context *,
538                              enum pipe_shader_type shader,
539                              unsigned start_slot, unsigned num_views,
540                              unsigned unbind_num_trailing_slots,
541                              bool take_ownership,
542                              struct pipe_sampler_view **views);
543 
544    void (*set_tess_state)(struct pipe_context *,
545                           const float default_outer_level[4],
546                           const float default_inner_level[2]);
547 
548    /**
549     * Set the number of vertices per input patch for tessellation.
550     */
551    void (*set_patch_vertices)(struct pipe_context *ctx, uint8_t patch_vertices);
552 
553    /**
554     * Sets the debug callback. If the pointer is null, then no callback is
555     * set, otherwise a copy of the data should be made.
556     */
557    void (*set_debug_callback)(struct pipe_context *,
558                               const struct util_debug_callback *);
559 
560    /**
561     * Bind an array of shader buffers that will be used by a shader.
562     * Any buffers that were previously bound to the specified range
563     * will be unbound.
564     *
565     * \param shader     selects shader stage
566     * \param start_slot first buffer slot to bind.
567     * \param count      number of consecutive buffers to bind.
568     * \param buffers    array of pointers to the buffers to bind, it
569     *                   should contain at least \a count elements
570     *                   unless it's NULL, in which case no buffers will
571     *                   be bound.
572     * \param writable_bitmask  If bit i is not set, buffers[i] will only be
573     *                          used with loads. If unsure, set to ~0.
574     */
575    void (*set_shader_buffers)(struct pipe_context *,
576                               enum pipe_shader_type shader,
577                               unsigned start_slot, unsigned count,
578                               const struct pipe_shader_buffer *buffers,
579                               unsigned writable_bitmask);
580 
581    /**
582     * Bind an array of hw atomic buffers for use by all shaders.
583     * And buffers that were previously bound to the specified range
584     * will be unbound.
585     *
586     * \param start_slot first buffer slot to bind.
587     * \param count      number of consecutive buffers to bind.
588     * \param buffers    array of pointers to the buffers to bind, it
589     *                   should contain at least \a count elements
590     *                   unless it's NULL, in which case no buffers will
591     *                   be bound.
592     */
593    void (*set_hw_atomic_buffers)(struct pipe_context *,
594                                  unsigned start_slot, unsigned count,
595                                  const struct pipe_shader_buffer *buffers);
596 
597    /**
598     * Bind an array of images that will be used by a shader.
599     * Any images that were previously bound to the specified range
600     * will be unbound.
601     *
602     * \param shader     selects shader stage
603     * \param start_slot first image slot to bind.
604     * \param count      number of consecutive images to bind.
605     * \param unbind_num_trailing_slots  number of images to unbind after
606     *                                   the bound slot
607     * \param buffers    array of the images to bind, it
608     *                   should contain at least \a count elements
609     *                   unless it's NULL, in which case no images will
610     *                   be bound.
611     */
612    void (*set_shader_images)(struct pipe_context *,
613                              enum pipe_shader_type shader,
614                              unsigned start_slot, unsigned count,
615                              unsigned unbind_num_trailing_slots,
616                              const struct pipe_image_view *images);
617 
618    /**
619     * Bind an array of vertex buffers to the specified slots.
620     *
621     * Unlike other set functions, the caller should always increment
622     * the buffer reference counts because the driver should only copy
623     * the pipe_resource pointers. This is the same behavior as setting
624     * take_ownership = true in other functions.
625     *
626     * count must be equal to the maximum used vertex buffer index + 1
627     * in vertex elements or 0.
628     *
629     * \param count           number of consecutive vertex buffers to bind.
630     * \param buffers         array of the buffers to bind
631     */
632    void (*set_vertex_buffers)(struct pipe_context *,
633                               unsigned count,
634                               const struct pipe_vertex_buffer *);
635 
636    /*@}*/
637 
638    /**
639     * Stream output functions.
640     */
641    /*@{*/
642 
643    struct pipe_stream_output_target *(*create_stream_output_target)(
644                         struct pipe_context *,
645                         struct pipe_resource *,
646                         unsigned buffer_offset,
647                         unsigned buffer_size);
648 
649    void (*stream_output_target_destroy)(struct pipe_context *,
650                                         struct pipe_stream_output_target *);
651 
652    void (*set_stream_output_targets)(struct pipe_context *,
653                               unsigned num_targets,
654                               struct pipe_stream_output_target **targets,
655                               const unsigned *offsets);
656 
657    uint32_t (*stream_output_target_offset)(struct pipe_stream_output_target *target);
658 
659    /*@}*/
660 
661 
662    /**
663     * INTEL_blackhole_render
664     */
665    /*@{*/
666 
667    void (*set_frontend_noop)(struct pipe_context *,
668                              bool enable);
669 
670    /*@}*/
671 
672 
673    /**
674     * Resource functions for blit-like functionality
675     *
676     * If a driver supports multisampling, blit must implement color resolve.
677     */
678    /*@{*/
679 
680    /**
681     * Copy a block of pixels from one resource to another.
682     * The resource must be of the same format.
683     * Resources with nr_samples > 1 are not allowed.
684     */
685    void (*resource_copy_region)(struct pipe_context *pipe,
686                                 struct pipe_resource *dst,
687                                 unsigned dst_level,
688                                 unsigned dstx, unsigned dsty, unsigned dstz,
689                                 struct pipe_resource *src,
690                                 unsigned src_level,
691                                 const struct pipe_box *src_box);
692 
693    /* Optimal hardware path for blitting pixels.
694     * Scaling, format conversion, up- and downsampling (resolve) are allowed.
695     */
696    void (*blit)(struct pipe_context *pipe,
697                 const struct pipe_blit_info *info);
698 
699    /*@}*/
700 
701    /**
702     * Clear the specified set of currently bound buffers to specified values.
703     * The entire buffers are cleared (no scissor, no colormask, etc).
704     *
705     * \param buffers  bitfield of PIPE_CLEAR_* values.
706     * \param scissor_state  the scissored region to clear
707     * \param color  pointer to a union of fiu array for each of r, g, b, a.
708     * \param depth  depth clear value in [0,1].
709     * \param stencil  stencil clear value
710     */
711    void (*clear)(struct pipe_context *pipe,
712                  unsigned buffers,
713                  const struct pipe_scissor_state *scissor_state,
714                  const union pipe_color_union *color,
715                  double depth,
716                  unsigned stencil);
717 
718    /**
719     * Clear a color rendertarget surface.
720     * \param color  pointer to an union of fiu array for each of r, g, b, a.
721     */
722    void (*clear_render_target)(struct pipe_context *pipe,
723                                struct pipe_surface *dst,
724                                const union pipe_color_union *color,
725                                unsigned dstx, unsigned dsty,
726                                unsigned width, unsigned height,
727                                bool render_condition_enabled);
728 
729    /**
730     * Clear a depth-stencil surface.
731     * \param clear_flags  bitfield of PIPE_CLEAR_DEPTH/STENCIL values.
732     * \param depth  depth clear value in [0,1].
733     * \param stencil  stencil clear value
734     */
735    void (*clear_depth_stencil)(struct pipe_context *pipe,
736                                struct pipe_surface *dst,
737                                unsigned clear_flags,
738                                double depth,
739                                unsigned stencil,
740                                unsigned dstx, unsigned dsty,
741                                unsigned width, unsigned height,
742                                bool render_condition_enabled);
743 
744    /**
745     * Clear the texture with the specified texel. Not guaranteed to be a
746     * renderable format. Data provided in the resource's format.
747     */
748    void (*clear_texture)(struct pipe_context *pipe,
749                          struct pipe_resource *res,
750                          unsigned level,
751                          const struct pipe_box *box,
752                          const void *data);
753 
754    /**
755     * Clear a buffer. Runs a memset over the specified region with the element
756     * value passed in through clear_value of size clear_value_size.
757     */
758    void (*clear_buffer)(struct pipe_context *pipe,
759                         struct pipe_resource *res,
760                         unsigned offset,
761                         unsigned size,
762                         const void *clear_value,
763                         int clear_value_size);
764 
765    /**
766     * If a depth buffer is rendered with different sample location state than
767     * what is current at the time of reading, the values may differ because
768     * depth buffer compression can depend the sample locations.
769     *
770     * This function is a hint to decompress the current depth buffer to avoid
771     * such problems.
772     */
773    void (*evaluate_depth_buffer)(struct pipe_context *pipe);
774 
775    /**
776     * Flush draw commands.
777     *
778     * This guarantees that the new fence (if any) will finish in finite time,
779     * unless PIPE_FLUSH_DEFERRED is used.
780     *
781     * Subsequent operations on other contexts of the same screen are guaranteed
782     * to execute after the flushed commands, unless PIPE_FLUSH_ASYNC is used.
783     *
784     * NOTE: use screen->fence_reference() (or equivalent) to transfer
785     * new fence ref to **fence, to ensure that previous fence is unref'd
786     *
787     * \param fence  if not NULL, an old fence to unref and transfer a
788     *    new fence reference to
789     * \param flags  bitfield of enum pipe_flush_flags values.
790     */
791    void (*flush)(struct pipe_context *pipe,
792                  struct pipe_fence_handle **fence,
793                  unsigned flags);
794 
795    /**
796     * Create a fence from a fd.
797     *
798     * This is used for importing a foreign/external fence fd.
799     *
800     * \param fence  if not NULL, an old fence to unref and transfer a
801     *    new fence reference to
802     * \param fd     fd representing the fence object
803     * \param type   indicates which fence types backs fd
804     */
805    void (*create_fence_fd)(struct pipe_context *pipe,
806                            struct pipe_fence_handle **fence,
807                            int fd,
808                            enum pipe_fd_type type);
809 
810    /**
811     * Insert commands to have GPU wait for fence to be signaled.
812     */
813    void (*fence_server_sync)(struct pipe_context *pipe,
814                              struct pipe_fence_handle *fence);
815 
816    /**
817     * Insert commands to have the GPU signal a fence.
818     */
819    void (*fence_server_signal)(struct pipe_context *pipe,
820                                struct pipe_fence_handle *fence);
821 
822    /**
823     * Create a view on a texture to be used by a shader stage.
824     */
825    struct pipe_sampler_view * (*create_sampler_view)(struct pipe_context *ctx,
826                                                      struct pipe_resource *texture,
827                                                      const struct pipe_sampler_view *templat);
828 
829    /**
830     * Destroy a view on a texture.
831     *
832     * \param ctx the current context
833     * \param view the view to be destroyed
834     *
835     * \note The current context may not be the context in which the view was
836     *       created (view->context). However, the caller must guarantee that
837     *       the context which created the view is still alive.
838     */
839    void (*sampler_view_destroy)(struct pipe_context *ctx,
840                                 struct pipe_sampler_view *view);
841 
842 
843    /**
844     * Get a surface which is a "view" into a resource, used by
845     * render target / depth stencil stages.
846     */
847    struct pipe_surface *(*create_surface)(struct pipe_context *ctx,
848                                           struct pipe_resource *resource,
849                                           const struct pipe_surface *templat);
850 
851    void (*surface_destroy)(struct pipe_context *ctx,
852                            struct pipe_surface *);
853 
854 
855    /**
856     * Map a resource.
857     *
858     * Transfers are (by default) context-private and allow uploads to be
859     * interleaved with rendering.
860     *
861     * out_transfer will contain the transfer object that must be passed
862     * to all the other transfer functions. It also contains useful
863     * information (like texture strides for texture_map).
864     */
865    void *(*buffer_map)(struct pipe_context *,
866                        struct pipe_resource *resource,
867                        unsigned level,
868                        unsigned usage,  /* a combination of PIPE_MAP_x */
869                        const struct pipe_box *,
870                        struct pipe_transfer **out_transfer);
871 
872    /* If transfer was created with WRITE|FLUSH_EXPLICIT, only the
873     * regions specified with this call are guaranteed to be written to
874     * the resource.
875     */
876    void (*transfer_flush_region)(struct pipe_context *,
877                                  struct pipe_transfer *transfer,
878                                  const struct pipe_box *);
879 
880    void (*buffer_unmap)(struct pipe_context *,
881                         struct pipe_transfer *transfer);
882 
883    void *(*texture_map)(struct pipe_context *,
884                         struct pipe_resource *resource,
885                         unsigned level,
886                         unsigned usage,  /* a combination of PIPE_MAP_x */
887                         const struct pipe_box *,
888                         struct pipe_transfer **out_transfer);
889 
890    void (*texture_unmap)(struct pipe_context *,
891                          struct pipe_transfer *transfer);
892 
893    /* One-shot transfer operation with data supplied in a user
894     * pointer.
895     */
896    void (*buffer_subdata)(struct pipe_context *,
897                           struct pipe_resource *,
898                           unsigned usage, /* a combination of PIPE_MAP_x */
899                           unsigned offset,
900                           unsigned size,
901                           const void *data);
902 
903    void (*texture_subdata)(struct pipe_context *,
904                            struct pipe_resource *,
905                            unsigned level,
906                            unsigned usage, /* a combination of PIPE_MAP_x */
907                            const struct pipe_box *,
908                            const void *data,
909                            unsigned stride,
910                            uintptr_t layer_stride);
911 
912    /**
913     * Flush any pending framebuffer writes and invalidate texture caches.
914     */
915    void (*texture_barrier)(struct pipe_context *, unsigned flags);
916 
917    /**
918     * Flush caches according to flags.
919     */
920    void (*memory_barrier)(struct pipe_context *, unsigned flags);
921 
922    /**
923     * Change the commitment status of a part of the given resource, which must
924     * have been created with the PIPE_RESOURCE_FLAG_SPARSE bit.
925     *
926     * \param level The texture level whose commitment should be changed.
927     * \param box The region of the resource whose commitment should be changed.
928     * \param commit Whether memory should be committed or un-committed.
929     *
930     * \return false if out of memory, true on success.
931     */
932    bool (*resource_commit)(struct pipe_context *, struct pipe_resource *,
933                            unsigned level, struct pipe_box *box, bool commit);
934 
935    /**
936     * Creates a video codec for a specific video format/profile
937     */
938    struct pipe_video_codec *(*create_video_codec)(struct pipe_context *context,
939                                                   const struct pipe_video_codec *templat);
940 
941    /**
942     * Creates a video buffer as decoding target
943     */
944    struct pipe_video_buffer *(*create_video_buffer)(struct pipe_context *context,
945                                                     const struct pipe_video_buffer *templat);
946 
947    /**
948     * Compute kernel execution
949     */
950    /*@{*/
951    /**
952     * Define the compute program and parameters to be used by
953     * pipe_context::launch_grid.
954     */
955    void *(*create_compute_state)(struct pipe_context *context,
956                                  const struct pipe_compute_state *);
957    void (*bind_compute_state)(struct pipe_context *, void *);
958    void (*delete_compute_state)(struct pipe_context *, void *);
959 
960    void (*get_compute_state_info)(struct pipe_context *, void *,
961                                   struct pipe_compute_state_object_info *);
962 
963    uint32_t (*get_compute_state_subgroup_size)(struct pipe_context *, void *,
964                                                const uint32_t block[3]);
965 
966    /**
967     * Bind an array of shader resources that will be used by the
968     * compute program.  Any resources that were previously bound to
969     * the specified range will be unbound after this call.
970     *
971     * \param start      first resource to bind.
972     * \param count      number of consecutive resources to bind.
973     * \param resources  array of pointers to the resources to bind, it
974     *                   should contain at least \a count elements
975     *                   unless it's NULL, in which case no new
976     *                   resources will be bound.
977     */
978    void (*set_compute_resources)(struct pipe_context *,
979                                  unsigned start, unsigned count,
980                                  struct pipe_surface **resources);
981 
982    /**
983     * Bind an array of buffers to be mapped into the address space of
984     * the GLOBAL resource.  Any buffers that were previously bound
985     * between [first, first + count - 1] are unbound after this call.
986     *
987     * \param first      first buffer to map.
988     * \param count      number of consecutive buffers to map.
989     * \param resources  array of pointers to the buffers to map, it
990     *                   should contain at least \a count elements
991     *                   unless it's NULL, in which case no new
992     *                   resources will be bound.
993     * \param handles    array of pointers to the memory locations that
994     *                   will be updated with the address each buffer
995     *                   will be mapped to.  The base memory address of
996     *                   each of the buffers will be added to the value
997     *                   pointed to by its corresponding handle to form
998     *                   the final address argument.  It should contain
999     *                   at least \a count elements, unless \a
1000     *                   resources is NULL in which case \a handles
1001     *                   should be NULL as well.
1002     *
1003     * Note that the driver isn't required to make any guarantees about
1004     * the contents of the \a handles array being valid anytime except
1005     * during the subsequent calls to pipe_context::launch_grid.  This
1006     * means that the only sensible location handles[i] may point to is
1007     * somewhere within the INPUT buffer itself.  This is so to
1008     * accommodate implementations that lack virtual memory but
1009     * nevertheless migrate buffers on the fly, leading to resource
1010     * base addresses that change on each kernel invocation or are
1011     * unknown to the pipe driver.
1012     */
1013    void (*set_global_binding)(struct pipe_context *context,
1014                               unsigned first, unsigned count,
1015                               struct pipe_resource **resources,
1016                               uint32_t **handles);
1017 
1018    /**
1019     * Launch the compute kernel starting from instruction \a pc of the
1020     * currently bound compute program.
1021     */
1022    void (*launch_grid)(struct pipe_context *context,
1023                        const struct pipe_grid_info *info);
1024 
1025    void (*draw_mesh_tasks)(struct pipe_context *context,
1026                            unsigned drawid_offset,
1027                            const struct pipe_grid_info *info);
1028    /*@}*/
1029 
1030    /**
1031     * SVM (Share Virtual Memory) helpers
1032     */
1033    /*@{*/
1034    /**
1035     * Migrate range of virtual address to device or host memory.
1036     *
1037     * \param to_device - true if the virtual memory is migrated to the device
1038     *                    false if the virtual memory is migrated to the host
1039     * \param content_undefined - whether the content of the migrated memory
1040     *                            is undefined after migration
1041     */
1042    void (*svm_migrate)(struct pipe_context *context, unsigned num_ptrs,
1043                        const void* const* ptrs, const size_t *sizes,
1044                        bool to_device, bool content_undefined);
1045    /*@}*/
1046 
1047    /**
1048     * Get the default sample position for an individual sample point.
1049     *
1050     * \param sample_count - total number of samples
1051     * \param sample_index - sample to get the position values for
1052     * \param out_value - return value of 2 floats for x and y position for
1053     *                    requested sample.
1054     */
1055    void (*get_sample_position)(struct pipe_context *context,
1056                                unsigned sample_count,
1057                                unsigned sample_index,
1058                                float *out_value);
1059 
1060    /**
1061     * Query a timestamp in nanoseconds.  This is completely equivalent to
1062     * pipe_screen::get_timestamp() but takes a context handle for drivers
1063     * that require a context.
1064     */
1065    uint64_t (*get_timestamp)(struct pipe_context *);
1066 
1067    /**
1068     * Flush the resource cache, so that the resource can be used
1069     * by an external client. Possible usage:
1070     * - flushing a resource before presenting it on the screen
1071     * - flushing a resource if some other process or device wants to use it
1072     * This shouldn't be used to flush caches if the resource is only managed
1073     * by a single pipe_screen and is not shared with another process.
1074     * (i.e. you shouldn't use it to flush caches explicitly if you want to e.g.
1075     * use the resource for texturing)
1076     */
1077    void (*flush_resource)(struct pipe_context *ctx,
1078                           struct pipe_resource *resource);
1079 
1080    /**
1081     * Invalidate the contents of the resource. This is used to
1082     *
1083     * (1) implement EGL's semantic of undefined depth/stencil
1084     * contents after a swapbuffers.  This allows a tiled renderer (for
1085     * example) to not store the depth buffer.
1086     *
1087     * (2) implement GL's InvalidateBufferData. For backwards compatibility,
1088     * you must only rely on the usability for this purpose when
1089     * PIPE_CAP_INVALIDATE_BUFFER is enabled.
1090     */
1091    void (*invalidate_resource)(struct pipe_context *ctx,
1092                                struct pipe_resource *resource);
1093 
1094    /**
1095     * Return information about unexpected device resets.
1096     */
1097    enum pipe_reset_status (*get_device_reset_status)(struct pipe_context *ctx);
1098 
1099    /**
1100     * Sets the reset status callback. If the pointer is null, then no callback
1101     * is set, otherwise a copy of the data should be made.
1102     */
1103    void (*set_device_reset_callback)(struct pipe_context *ctx,
1104                                      const struct pipe_device_reset_callback *cb);
1105 
1106    /**
1107     * Dump driver-specific debug information into a stream. This is
1108     * used by debugging tools.
1109     *
1110     * \param ctx        pipe context
1111     * \param stream     where the output should be written to
1112     * \param flags      a mask of PIPE_DUMP_* flags
1113     */
1114    void (*dump_debug_state)(struct pipe_context *ctx, FILE *stream,
1115                             unsigned flags);
1116 
1117    /**
1118     * Set the log context to which the driver should write internal debug logs
1119     * (internal states, command streams).
1120     *
1121     * The caller must ensure that the log context is destroyed and reset to
1122     * NULL before the pipe context is destroyed, and that log context functions
1123     * are only called from the driver thread.
1124     *
1125     * \param ctx pipe context
1126     * \param log logging context
1127     */
1128    void (*set_log_context)(struct pipe_context *ctx, struct u_log_context *log);
1129 
1130    /**
1131     * Emit string marker in cmdstream
1132     */
1133    void (*emit_string_marker)(struct pipe_context *ctx,
1134                               const char *string,
1135                               int len);
1136 
1137    /**
1138     * Generate mipmap.
1139     * \return TRUE if mipmap generation succeeds, FALSE otherwise
1140     */
1141    bool (*generate_mipmap)(struct pipe_context *ctx,
1142                            struct pipe_resource *resource,
1143                            enum pipe_format format,
1144                            unsigned base_level,
1145                            unsigned last_level,
1146                            unsigned first_layer,
1147                            unsigned last_layer);
1148 
1149    /**
1150     * Create a 64-bit texture handle.
1151     *
1152     * \param ctx        pipe context
1153     * \param view       pipe sampler view object
1154     * \param state      pipe sampler state template
1155     * \return           a 64-bit texture handle if success, 0 otherwise
1156     */
1157    uint64_t (*create_texture_handle)(struct pipe_context *ctx,
1158                                      struct pipe_sampler_view *view,
1159                                      const struct pipe_sampler_state *state);
1160 
1161    /**
1162     * Delete a texture handle.
1163     *
1164     * \param ctx        pipe context
1165     * \param handle     64-bit texture handle
1166     */
1167    void (*delete_texture_handle)(struct pipe_context *ctx, uint64_t handle);
1168 
1169    /**
1170     * Make a texture handle resident.
1171     *
1172     * \param ctx        pipe context
1173     * \param handle     64-bit texture handle
1174     * \param resident   TRUE for resident, FALSE otherwise
1175     */
1176    void (*make_texture_handle_resident)(struct pipe_context *ctx,
1177                                         uint64_t handle, bool resident);
1178 
1179    /**
1180     * Create a 64-bit image handle.
1181     *
1182     * \param ctx        pipe context
1183     * \param image      pipe image view template
1184     * \return           a 64-bit image handle if success, 0 otherwise
1185     */
1186    uint64_t (*create_image_handle)(struct pipe_context *ctx,
1187                                    const struct pipe_image_view *image);
1188 
1189    /**
1190     * Delete an image handle.
1191     *
1192     * \param ctx        pipe context
1193     * \param handle     64-bit image handle
1194     */
1195    void (*delete_image_handle)(struct pipe_context *ctx, uint64_t handle);
1196 
1197    /**
1198     * Make an image handle resident.
1199     *
1200     * \param ctx        pipe context
1201     * \param handle     64-bit image handle
1202     * \param access     GL_READ_ONLY, GL_WRITE_ONLY or GL_READ_WRITE
1203     * \param resident   TRUE for resident, FALSE otherwise
1204     */
1205    void (*make_image_handle_resident)(struct pipe_context *ctx, uint64_t handle,
1206                                       unsigned access, bool resident);
1207 
1208    /**
1209     * Call the given function from the driver thread.
1210     *
1211     * This is set by threaded contexts for use by debugging wrappers.
1212     *
1213     * \param asap if true, run the callback immediately if there are no pending
1214     *             commands to be processed by the driver thread
1215     */
1216    void (*callback)(struct pipe_context *ctx, void (*fn)(void *), void *data,
1217                     bool asap);
1218 
1219    /**
1220     * Set a context parameter See enum pipe_context_param for more details.
1221     */
1222    void (*set_context_param)(struct pipe_context *ctx,
1223                              enum pipe_context_param param,
1224                              unsigned value);
1225 
1226    /**
1227     * Creates a video buffer as decoding target, with modifiers.
1228     */
1229    struct pipe_video_buffer *(*create_video_buffer_with_modifiers)(struct pipe_context *context,
1230                                                                    const struct pipe_video_buffer *templat,
1231                                                                    const uint64_t *modifiers,
1232                                                                    unsigned int modifiers_count);
1233 
1234    /**
1235     * Creates a video buffer as decoding target, from external memory
1236     */
1237    struct pipe_video_buffer *(*video_buffer_from_handle)( struct pipe_context *context,
1238                                                      const struct pipe_video_buffer *templat,
1239                                                      struct winsys_handle *handle,
1240                                                      unsigned usage );
1241 
1242    /**
1243     * Compiles a ML subgraph, to be executed later. The returned pipe_ml_subgraph
1244     * should contain all information needed to execute the subgraph with as
1245     * little effort as strictly needed.
1246     *
1247     * \param ctx         pipe context
1248     * \param operations  array containing the definitions of the operations in the graph
1249     * \param count       number of operations
1250     * \return            a newly allocated pipe_ml_subgraph
1251     */
1252    struct pipe_ml_subgraph *(*ml_subgraph_create)(struct pipe_context *context,
1253                                                   const struct pipe_ml_operation *operations,
1254                                                   unsigned count);
1255 
1256    /**
1257     * Invokes a ML subgraph for a given input tensor.
1258     *
1259     * \param ctx         pipe context
1260     * \param subgraph    previously-compiled subgraph
1261     * \param input       tensor to use as the input
1262     */
1263    void (*ml_subgraph_invoke)(struct pipe_context *context,
1264                               struct pipe_ml_subgraph *subgraph,
1265                               struct pipe_tensor *input);
1266 
1267    /**
1268     * After a ML subgraph has been invoked, copy the contents of the output
1269     * tensors to the provided buffers.
1270     *
1271     * \param ctx           pipe context
1272     * \param subgraph      previously-executed subgraph
1273     * \param outputs_count number of output tensors to copy out
1274     * \param output_idxs   array with the indices of output tensors
1275     * \param outputs       array of buffers to copy the tensor data to
1276     */
1277    void (*ml_subgraph_read_output)(struct pipe_context *context,
1278                                    struct pipe_ml_subgraph *subgraph,
1279                                    unsigned outputs_count, unsigned output_idxs[], void *outputs[]);
1280 
1281    /**
1282     * Release all resources allocated by the implementation of ml_subgraph_create
1283     *
1284     * \param ctx           pipe context
1285     * \param subgraph      subgraph to release
1286     */
1287    void (*ml_subgraph_destroy)(struct pipe_context *context,
1288                                struct pipe_ml_subgraph *subgraph);
1289 };
1290 
1291 
1292 #ifdef __cplusplus
1293 }
1294 #endif
1295 
1296 #endif /* PIPE_CONTEXT_H */
1297