xref: /aosp_15_r20/external/mesa3d/src/gallium/auxiliary/cso_cache/cso_context.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /**************************************************************************
2  *
3  * Copyright 2007 VMware, Inc.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21  * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28  /**
29   * @file
30   *
31   * Wrap the cso cache & hash mechanisms in a simplified
32   * pipe-driver-specific interface.
33   *
34   * @author Zack Rusin <[email protected]>
35   * @author Keith Whitwell <[email protected]>
36   */
37 
38 #include "pipe/p_state.h"
39 #include "util/u_draw.h"
40 #include "util/u_framebuffer.h"
41 #include "util/u_helpers.h"
42 #include "util/u_inlines.h"
43 #include "util/u_math.h"
44 #include "util/u_memory.h"
45 #include "util/u_vbuf.h"
46 
47 #include "cso_cache/cso_context.h"
48 #include "cso_cache/cso_cache.h"
49 #include "cso_cache/cso_hash.h"
50 #include "cso_context.h"
51 #include "driver_trace/tr_dump.h"
52 #include "util/u_threaded_context.h"
53 
54 /**
55  * Per-shader sampler information.
56  */
57 struct sampler_info
58 {
59    struct cso_sampler *cso_samplers[PIPE_MAX_SAMPLERS];
60    void *samplers[PIPE_MAX_SAMPLERS];
61 };
62 
63 
64 
65 struct cso_context_priv {
66    struct cso_context base;
67 
68    struct u_vbuf *vbuf;
69    struct u_vbuf *vbuf_current;
70    bool always_use_vbuf;
71    bool sampler_format;
72 
73    bool has_geometry_shader;
74    bool has_tessellation;
75    bool has_compute_shader;
76    bool has_task_mesh_shader;
77    bool has_streamout;
78 
79    uint32_t max_fs_samplerviews : 16;
80 
81    unsigned saved_state;  /**< bitmask of CSO_BIT_x flags */
82    unsigned saved_compute_state;  /**< bitmask of CSO_BIT_COMPUTE_x flags */
83 
84    struct sampler_info fragment_samplers_saved;
85    struct sampler_info compute_samplers_saved;
86    struct sampler_info samplers[PIPE_SHADER_MESH_TYPES];
87 
88    /* Temporary number until cso_single_sampler_done is called.
89     * It tracks the highest sampler seen in cso_single_sampler.
90     */
91    int max_sampler_seen;
92 
93    unsigned nr_so_targets;
94    struct pipe_stream_output_target *so_targets[PIPE_MAX_SO_BUFFERS];
95 
96    unsigned nr_so_targets_saved;
97    struct pipe_stream_output_target *so_targets_saved[PIPE_MAX_SO_BUFFERS];
98 
99    /** Current and saved state.
100     * The saved state is used as a 1-deep stack.
101     */
102    void *blend, *blend_saved;
103    void *depth_stencil, *depth_stencil_saved;
104    void *rasterizer, *rasterizer_saved;
105    void *fragment_shader, *fragment_shader_saved;
106    void *vertex_shader, *vertex_shader_saved;
107    void *geometry_shader, *geometry_shader_saved;
108    void *tessctrl_shader, *tessctrl_shader_saved;
109    void *tesseval_shader, *tesseval_shader_saved;
110    void *compute_shader, *compute_shader_saved;
111    void *velements, *velements_saved;
112    struct pipe_query *render_condition, *render_condition_saved;
113    enum pipe_render_cond_flag render_condition_mode, render_condition_mode_saved;
114    bool render_condition_cond, render_condition_cond_saved;
115    bool flatshade_first, flatshade_first_saved;
116 
117    struct pipe_framebuffer_state fb, fb_saved;
118    struct pipe_viewport_state vp, vp_saved;
119    unsigned sample_mask, sample_mask_saved;
120    unsigned min_samples, min_samples_saved;
121    struct pipe_stencil_ref stencil_ref, stencil_ref_saved;
122 
123    /* This should be last to keep all of the above together in memory. */
124    struct cso_cache cache;
125 };
126 
127 
128 static inline bool
delete_cso(struct cso_context_priv * ctx,void * state,enum cso_cache_type type)129 delete_cso(struct cso_context_priv *ctx,
130            void *state, enum cso_cache_type type)
131 {
132    switch (type) {
133    case CSO_BLEND:
134       if (ctx->blend == ((struct cso_blend*)state)->data ||
135           ctx->blend_saved == ((struct cso_blend*)state)->data)
136          return false;
137       break;
138    case CSO_DEPTH_STENCIL_ALPHA:
139       if (ctx->depth_stencil == ((struct cso_depth_stencil_alpha*)state)->data ||
140           ctx->depth_stencil_saved == ((struct cso_depth_stencil_alpha*)state)->data)
141          return false;
142       break;
143    case CSO_RASTERIZER:
144       if (ctx->rasterizer == ((struct cso_rasterizer*)state)->data ||
145           ctx->rasterizer_saved == ((struct cso_rasterizer*)state)->data)
146          return false;
147       break;
148    case CSO_VELEMENTS:
149       if (ctx->velements == ((struct cso_velements*)state)->data ||
150           ctx->velements_saved == ((struct cso_velements*)state)->data)
151          return false;
152       break;
153    case CSO_SAMPLER:
154       /* nothing to do for samplers */
155       break;
156    default:
157       assert(0);
158    }
159 
160    cso_delete_state(ctx->base.pipe, state, type);
161    return true;
162 }
163 
164 
165 static inline void
sanitize_hash(struct cso_hash * hash,enum cso_cache_type type,int max_size,void * user_data)166 sanitize_hash(struct cso_hash *hash, enum cso_cache_type type,
167               int max_size, void *user_data)
168 {
169    struct cso_context_priv *ctx = (struct cso_context_priv *)user_data;
170    /* if we're approach the maximum size, remove fourth of the entries
171     * otherwise every subsequent call will go through the same */
172    const int hash_size = cso_hash_size(hash);
173    const int max_entries = (max_size > hash_size) ? max_size : hash_size;
174    int to_remove =  (max_size < max_entries) * max_entries/4;
175    struct cso_sampler **samplers_to_restore = NULL;
176    unsigned to_restore = 0;
177 
178    if (hash_size > max_size)
179       to_remove += hash_size - max_size;
180 
181    if (to_remove == 0)
182       return;
183 
184    if (type == CSO_SAMPLER) {
185       samplers_to_restore = MALLOC((PIPE_SHADER_MESH_TYPES + 2) * PIPE_MAX_SAMPLERS *
186                                    sizeof(*samplers_to_restore));
187 
188       /* Temporarily remove currently bound sampler states from the hash
189        * table, to prevent them from being deleted
190        */
191       for (int i = 0; i < PIPE_SHADER_MESH_TYPES; i++) {
192          for (int j = 0; j < PIPE_MAX_SAMPLERS; j++) {
193             struct cso_sampler *sampler = ctx->samplers[i].cso_samplers[j];
194 
195             if (sampler && cso_hash_take(hash, sampler->hash_key))
196                samplers_to_restore[to_restore++] = sampler;
197          }
198       }
199       for (int j = 0; j < PIPE_MAX_SAMPLERS; j++) {
200          struct cso_sampler *sampler = ctx->fragment_samplers_saved.cso_samplers[j];
201 
202          if (sampler && cso_hash_take(hash, sampler->hash_key))
203             samplers_to_restore[to_restore++] = sampler;
204       }
205       for (int j = 0; j < PIPE_MAX_SAMPLERS; j++) {
206          struct cso_sampler *sampler = ctx->compute_samplers_saved.cso_samplers[j];
207 
208          if (sampler && cso_hash_take(hash, sampler->hash_key))
209             samplers_to_restore[to_restore++] = sampler;
210       }
211    }
212 
213    struct cso_hash_iter iter = cso_hash_first_node(hash);
214    while (to_remove) {
215       /*remove elements until we're good */
216       /*fixme: currently we pick the nodes to remove at random*/
217       void *cso = cso_hash_iter_data(iter);
218 
219       if (!cso)
220          break;
221 
222       if (delete_cso(ctx, cso, type)) {
223          iter = cso_hash_erase(hash, iter);
224          --to_remove;
225       } else {
226          iter = cso_hash_iter_next(iter);
227       }
228    }
229 
230    if (type == CSO_SAMPLER) {
231       /* Put currently bound sampler states back into the hash table */
232       while (to_restore--) {
233          struct cso_sampler *sampler = samplers_to_restore[to_restore];
234 
235          cso_hash_insert(hash, sampler->hash_key, sampler);
236       }
237 
238       FREE(samplers_to_restore);
239    }
240 }
241 
242 
243 static void
cso_init_vbuf(struct cso_context_priv * cso,unsigned flags)244 cso_init_vbuf(struct cso_context_priv *cso, unsigned flags)
245 {
246    struct u_vbuf_caps caps;
247    bool uses_user_vertex_buffers = !(flags & CSO_NO_USER_VERTEX_BUFFERS);
248    bool needs64b = !(flags & CSO_NO_64B_VERTEX_BUFFERS);
249 
250    u_vbuf_get_caps(cso->base.pipe->screen, &caps, needs64b);
251 
252    /* Enable u_vbuf if needed. */
253    if (caps.fallback_always ||
254        (uses_user_vertex_buffers &&
255         caps.fallback_only_for_user_vbuffers)) {
256       assert(!cso->base.pipe->vbuf);
257       cso->vbuf = u_vbuf_create(cso->base.pipe, &caps);
258       cso->base.pipe->vbuf = cso->vbuf;
259       cso->always_use_vbuf = caps.fallback_always;
260       cso->vbuf_current = cso->base.pipe->vbuf =
261          caps.fallback_always ? cso->vbuf : NULL;
262    }
263 }
264 
265 static void
cso_draw_vbo_default(struct pipe_context * pipe,const struct pipe_draw_info * info,unsigned drawid_offset,const struct pipe_draw_indirect_info * indirect,const struct pipe_draw_start_count_bias * draws,unsigned num_draws)266 cso_draw_vbo_default(struct pipe_context *pipe,
267                      const struct pipe_draw_info *info,
268                      unsigned drawid_offset,
269                      const struct pipe_draw_indirect_info *indirect,
270                      const struct pipe_draw_start_count_bias *draws,
271                      unsigned num_draws)
272 {
273    if (pipe->vbuf)
274       u_vbuf_draw_vbo(pipe, info, drawid_offset, indirect, draws, num_draws);
275    else
276       pipe->draw_vbo(pipe, info, drawid_offset, indirect, draws, num_draws);
277 }
278 
279 struct cso_context *
cso_create_context(struct pipe_context * pipe,unsigned flags)280 cso_create_context(struct pipe_context *pipe, unsigned flags)
281 {
282    struct cso_context_priv *ctx = CALLOC_STRUCT(cso_context_priv);
283    if (!ctx)
284       return NULL;
285 
286    cso_cache_init(&ctx->cache, pipe);
287    cso_cache_set_sanitize_callback(&ctx->cache, sanitize_hash, ctx);
288 
289    ctx->base.pipe = pipe;
290    ctx->sample_mask = ~0;
291 
292    if (!(flags & CSO_NO_VBUF))
293       cso_init_vbuf(ctx, flags);
294 
295    /* Only drivers using u_threaded_context benefit from the direct call.
296     * This is because drivers can change draw_vbo, but u_threaded_context
297     * never changes it.
298     */
299    if (pipe->draw_vbo == tc_draw_vbo) {
300       if (ctx->vbuf_current)
301          ctx->base.draw_vbo = u_vbuf_draw_vbo;
302       else
303          ctx->base.draw_vbo = pipe->draw_vbo;
304    } else if (ctx->always_use_vbuf) {
305       ctx->base.draw_vbo = u_vbuf_draw_vbo;
306    } else {
307       ctx->base.draw_vbo = cso_draw_vbo_default;
308    }
309 
310    /* Enable for testing: */
311    if (0) cso_set_maximum_cache_size(&ctx->cache, 4);
312 
313    if (pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_GEOMETRY,
314                                 PIPE_SHADER_CAP_MAX_INSTRUCTIONS) > 0) {
315       ctx->has_geometry_shader = true;
316    }
317    if (pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_TESS_CTRL,
318                                 PIPE_SHADER_CAP_MAX_INSTRUCTIONS) > 0) {
319       ctx->has_tessellation = true;
320    }
321    if (pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_COMPUTE,
322                                       PIPE_SHADER_CAP_MAX_INSTRUCTIONS) > 0) {
323       int supported_irs =
324          pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_COMPUTE,
325                                         PIPE_SHADER_CAP_SUPPORTED_IRS);
326       if (supported_irs & ((1 << PIPE_SHADER_IR_TGSI) |
327                            (1 << PIPE_SHADER_IR_NIR))) {
328          ctx->has_compute_shader = true;
329       }
330    }
331    if (pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_MESH,
332                                 PIPE_SHADER_CAP_MAX_INSTRUCTIONS) > 0) {
333       ctx->has_task_mesh_shader = true;
334    }
335    if (pipe->screen->get_param(pipe->screen,
336                                PIPE_CAP_MAX_STREAM_OUTPUT_BUFFERS) != 0) {
337       ctx->has_streamout = true;
338    }
339 
340    if (pipe->screen->get_param(pipe->screen,
341                                PIPE_CAP_TEXTURE_BORDER_COLOR_QUIRK) &
342        PIPE_QUIRK_TEXTURE_BORDER_COLOR_SWIZZLE_FREEDRENO)
343       ctx->sampler_format = true;
344 
345    ctx->max_fs_samplerviews =
346       pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_FRAGMENT,
347                                      PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS);
348 
349    ctx->max_sampler_seen = -1;
350    return &ctx->base;
351 }
352 
353 
354 void
cso_unbind_context(struct cso_context * cso)355 cso_unbind_context(struct cso_context *cso)
356 {
357    struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
358    unsigned i;
359 
360    bool dumping = trace_dumping_enabled_locked();
361    if (dumping)
362       trace_dumping_stop_locked();
363    if (ctx->base.pipe) {
364       ctx->base.pipe->bind_blend_state(ctx->base.pipe, NULL);
365       ctx->base.pipe->bind_rasterizer_state(ctx->base.pipe, NULL);
366 
367       {
368          static struct pipe_sampler_view *views[PIPE_MAX_SHADER_SAMPLER_VIEWS] = { NULL };
369          static struct pipe_shader_buffer ssbos[PIPE_MAX_SHADER_BUFFERS] = { 0 };
370          static void *zeros[PIPE_MAX_SAMPLERS] = { NULL };
371          struct pipe_screen *scr = ctx->base.pipe->screen;
372          enum pipe_shader_type sh;
373          for (sh = 0; sh < PIPE_SHADER_MESH_TYPES; sh++) {
374             switch (sh) {
375             case PIPE_SHADER_GEOMETRY:
376                if (!ctx->has_geometry_shader)
377                   continue;
378                break;
379             case PIPE_SHADER_TESS_CTRL:
380             case PIPE_SHADER_TESS_EVAL:
381                if (!ctx->has_tessellation)
382                   continue;
383                break;
384             case PIPE_SHADER_COMPUTE:
385                if (!ctx->has_compute_shader)
386                   continue;
387                break;
388             case PIPE_SHADER_MESH:
389             case PIPE_SHADER_TASK:
390                if (!ctx->has_task_mesh_shader)
391                   continue;
392                break;
393             default:
394                break;
395             }
396 
397             int maxsam = scr->get_shader_param(scr, sh,
398                                                PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS);
399             int maxview = scr->get_shader_param(scr, sh,
400                                                 PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS);
401             int maxssbo = scr->get_shader_param(scr, sh,
402                                                 PIPE_SHADER_CAP_MAX_SHADER_BUFFERS);
403             int maxcb = scr->get_shader_param(scr, sh,
404                                               PIPE_SHADER_CAP_MAX_CONST_BUFFERS);
405             int maximg = scr->get_shader_param(scr, sh,
406                                               PIPE_SHADER_CAP_MAX_SHADER_IMAGES);
407             assert(maxsam <= PIPE_MAX_SAMPLERS);
408             assert(maxview <= PIPE_MAX_SHADER_SAMPLER_VIEWS);
409             assert(maxssbo <= PIPE_MAX_SHADER_BUFFERS);
410             assert(maxcb <= PIPE_MAX_CONSTANT_BUFFERS);
411             assert(maximg <= PIPE_MAX_SHADER_IMAGES);
412             if (maxsam > 0) {
413                ctx->base.pipe->bind_sampler_states(ctx->base.pipe, sh, 0, maxsam, zeros);
414             }
415             if (maxview > 0) {
416                ctx->base.pipe->set_sampler_views(ctx->base.pipe, sh, 0, maxview, 0, false, views);
417             }
418             if (maxssbo > 0) {
419                ctx->base.pipe->set_shader_buffers(ctx->base.pipe, sh, 0, maxssbo, ssbos, 0);
420             }
421             if (maximg > 0) {
422                ctx->base.pipe->set_shader_images(ctx->base.pipe, sh, 0, 0, maximg, NULL);
423             }
424             for (int i = 0; i < maxcb; i++) {
425                ctx->base.pipe->set_constant_buffer(ctx->base.pipe, sh, i, false, NULL);
426             }
427          }
428       }
429 
430       ctx->base.pipe->bind_depth_stencil_alpha_state(ctx->base.pipe, NULL);
431       struct pipe_stencil_ref sr = {0};
432       ctx->base.pipe->set_stencil_ref(ctx->base.pipe, sr);
433       ctx->base.pipe->bind_fs_state(ctx->base.pipe, NULL);
434       ctx->base.pipe->set_constant_buffer(ctx->base.pipe, PIPE_SHADER_FRAGMENT, 0, false, NULL);
435       ctx->base.pipe->bind_vs_state(ctx->base.pipe, NULL);
436       ctx->base.pipe->set_constant_buffer(ctx->base.pipe, PIPE_SHADER_VERTEX, 0, false, NULL);
437       if (ctx->has_geometry_shader) {
438          ctx->base.pipe->bind_gs_state(ctx->base.pipe, NULL);
439       }
440       if (ctx->has_tessellation) {
441          ctx->base.pipe->bind_tcs_state(ctx->base.pipe, NULL);
442          ctx->base.pipe->bind_tes_state(ctx->base.pipe, NULL);
443       }
444       if (ctx->has_compute_shader) {
445          ctx->base.pipe->bind_compute_state(ctx->base.pipe, NULL);
446       }
447       if (ctx->has_task_mesh_shader) {
448          ctx->base.pipe->bind_ts_state(ctx->base.pipe, NULL);
449          ctx->base.pipe->bind_ms_state(ctx->base.pipe, NULL);
450       }
451       ctx->base.pipe->bind_vertex_elements_state(ctx->base.pipe, NULL);
452 
453       if (ctx->has_streamout)
454          ctx->base.pipe->set_stream_output_targets(ctx->base.pipe, 0, NULL, NULL);
455 
456       struct pipe_framebuffer_state fb = {0};
457       ctx->base.pipe->set_framebuffer_state(ctx->base.pipe, &fb);
458    }
459 
460    util_unreference_framebuffer_state(&ctx->fb);
461    util_unreference_framebuffer_state(&ctx->fb_saved);
462 
463    for (i = 0; i < PIPE_MAX_SO_BUFFERS; i++) {
464       pipe_so_target_reference(&ctx->so_targets[i], NULL);
465       pipe_so_target_reference(&ctx->so_targets_saved[i], NULL);
466    }
467 
468    memset(&ctx->samplers, 0, sizeof(ctx->samplers));
469    memset(&ctx->nr_so_targets, 0,
470           offsetof(struct cso_context_priv, cache)
471           - offsetof(struct cso_context_priv, nr_so_targets));
472    ctx->sample_mask = ~0;
473    /*
474     * If the cso context is reused (with the same pipe context),
475     * need to really make sure the context state doesn't get out of sync.
476     */
477    ctx->base.pipe->set_sample_mask(ctx->base.pipe, ctx->sample_mask);
478    if (ctx->base.pipe->set_min_samples)
479       ctx->base.pipe->set_min_samples(ctx->base.pipe, ctx->min_samples);
480    if (dumping)
481       trace_dumping_start_locked();
482 }
483 
484 
485 /**
486  * Free the CSO context.
487  */
488 void
cso_destroy_context(struct cso_context * cso)489 cso_destroy_context(struct cso_context *cso)
490 {
491    struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
492 
493    cso_unbind_context(cso);
494    cso_cache_delete(&ctx->cache);
495 
496    if (ctx->vbuf)
497       u_vbuf_destroy(ctx->vbuf);
498 
499    ctx->base.pipe->vbuf = NULL;
500    FREE(ctx);
501 }
502 
503 
504 /* Those function will either find the state of the given template
505  * in the cache or they will create a new state from the given
506  * template, insert it in the cache and return it.
507  */
508 
509 #define CSO_BLEND_KEY_SIZE_RT0      offsetof(struct pipe_blend_state, rt[1])
510 #define CSO_BLEND_KEY_SIZE_ALL_RT   sizeof(struct pipe_blend_state)
511 
512 /*
513  * If the driver returns 0 from the create method then they will assign
514  * the data member of the cso to be the template itself.
515  */
516 
517 enum pipe_error
cso_set_blend(struct cso_context * cso,const struct pipe_blend_state * templ)518 cso_set_blend(struct cso_context *cso,
519               const struct pipe_blend_state *templ)
520 {
521    struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
522    unsigned key_size, hash_key;
523    struct cso_hash_iter iter;
524    void *handle;
525 
526    if (templ->independent_blend_enable) {
527       /* This is duplicated with the else block below because we want key_size
528        * to be a literal constant, so that memcpy and the hash computation can
529        * be inlined and unrolled.
530        */
531       hash_key = cso_construct_key(templ, CSO_BLEND_KEY_SIZE_ALL_RT);
532       iter = cso_find_state_template(&ctx->cache, hash_key, CSO_BLEND,
533                                      templ, CSO_BLEND_KEY_SIZE_ALL_RT);
534       key_size = CSO_BLEND_KEY_SIZE_ALL_RT;
535    } else {
536       hash_key = cso_construct_key(templ, CSO_BLEND_KEY_SIZE_RT0);
537       iter = cso_find_state_template(&ctx->cache, hash_key, CSO_BLEND,
538                                      templ, CSO_BLEND_KEY_SIZE_RT0);
539       key_size = CSO_BLEND_KEY_SIZE_RT0;
540    }
541 
542    if (cso_hash_iter_is_null(iter)) {
543       struct cso_blend *cso = MALLOC(sizeof(struct cso_blend));
544       if (!cso)
545          return PIPE_ERROR_OUT_OF_MEMORY;
546 
547       memset(&cso->state, 0, sizeof cso->state);
548       memcpy(&cso->state, templ, key_size);
549       cso->data = ctx->base.pipe->create_blend_state(ctx->base.pipe, &cso->state);
550 
551       iter = cso_insert_state(&ctx->cache, hash_key, CSO_BLEND, cso);
552       if (cso_hash_iter_is_null(iter)) {
553          FREE(cso);
554          return PIPE_ERROR_OUT_OF_MEMORY;
555       }
556 
557       handle = cso->data;
558    } else {
559       handle = ((struct cso_blend *)cso_hash_iter_data(iter))->data;
560    }
561 
562    if (ctx->blend != handle) {
563       ctx->blend = handle;
564       ctx->base.pipe->bind_blend_state(ctx->base.pipe, handle);
565    }
566    return PIPE_OK;
567 }
568 
569 
570 static void
cso_save_blend(struct cso_context_priv * ctx)571 cso_save_blend(struct cso_context_priv *ctx)
572 {
573    assert(!ctx->blend_saved);
574    ctx->blend_saved = ctx->blend;
575 }
576 
577 
578 static void
cso_restore_blend(struct cso_context_priv * ctx)579 cso_restore_blend(struct cso_context_priv *ctx)
580 {
581    if (ctx->blend != ctx->blend_saved) {
582       ctx->blend = ctx->blend_saved;
583       ctx->base.pipe->bind_blend_state(ctx->base.pipe, ctx->blend_saved);
584    }
585    ctx->blend_saved = NULL;
586 }
587 
588 
589 enum pipe_error
cso_set_depth_stencil_alpha(struct cso_context * cso,const struct pipe_depth_stencil_alpha_state * templ)590 cso_set_depth_stencil_alpha(struct cso_context *cso,
591                             const struct pipe_depth_stencil_alpha_state *templ)
592 {
593    struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
594    const unsigned key_size = sizeof(struct pipe_depth_stencil_alpha_state);
595    const unsigned hash_key = cso_construct_key(templ, key_size);
596    struct cso_hash_iter iter = cso_find_state_template(&ctx->cache,
597                                                        hash_key,
598                                                        CSO_DEPTH_STENCIL_ALPHA,
599                                                        templ, key_size);
600    void *handle;
601 
602    if (cso_hash_iter_is_null(iter)) {
603       struct cso_depth_stencil_alpha *cso =
604          MALLOC(sizeof(struct cso_depth_stencil_alpha));
605       if (!cso)
606          return PIPE_ERROR_OUT_OF_MEMORY;
607 
608       memcpy(&cso->state, templ, sizeof(*templ));
609       cso->data = ctx->base.pipe->create_depth_stencil_alpha_state(ctx->base.pipe,
610                                                               &cso->state);
611 
612       iter = cso_insert_state(&ctx->cache, hash_key,
613                               CSO_DEPTH_STENCIL_ALPHA, cso);
614       if (cso_hash_iter_is_null(iter)) {
615          FREE(cso);
616          return PIPE_ERROR_OUT_OF_MEMORY;
617       }
618 
619       handle = cso->data;
620    } else {
621       handle = ((struct cso_depth_stencil_alpha *)
622                 cso_hash_iter_data(iter))->data;
623    }
624 
625    if (ctx->depth_stencil != handle) {
626       ctx->depth_stencil = handle;
627       ctx->base.pipe->bind_depth_stencil_alpha_state(ctx->base.pipe, handle);
628    }
629    return PIPE_OK;
630 }
631 
632 
633 static void
cso_save_depth_stencil_alpha(struct cso_context_priv * ctx)634 cso_save_depth_stencil_alpha(struct cso_context_priv *ctx)
635 {
636    assert(!ctx->depth_stencil_saved);
637    ctx->depth_stencil_saved = ctx->depth_stencil;
638 }
639 
640 
641 static void
cso_restore_depth_stencil_alpha(struct cso_context_priv * ctx)642 cso_restore_depth_stencil_alpha(struct cso_context_priv *ctx)
643 {
644    if (ctx->depth_stencil != ctx->depth_stencil_saved) {
645       ctx->depth_stencil = ctx->depth_stencil_saved;
646       ctx->base.pipe->bind_depth_stencil_alpha_state(ctx->base.pipe,
647                                                 ctx->depth_stencil_saved);
648    }
649    ctx->depth_stencil_saved = NULL;
650 }
651 
652 
653 enum pipe_error
cso_set_rasterizer(struct cso_context * cso,const struct pipe_rasterizer_state * templ)654 cso_set_rasterizer(struct cso_context *cso,
655                    const struct pipe_rasterizer_state *templ)
656 {
657    struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
658    const unsigned key_size = sizeof(struct pipe_rasterizer_state);
659    const unsigned hash_key = cso_construct_key(templ, key_size);
660    struct cso_hash_iter iter = cso_find_state_template(&ctx->cache,
661                                                        hash_key,
662                                                        CSO_RASTERIZER,
663                                                        templ, key_size);
664    void *handle = NULL;
665 
666    /* We can't have both point_quad_rasterization (sprites) and point_smooth
667     * (round AA points) enabled at the same time.
668     */
669    assert(!(templ->point_quad_rasterization && templ->point_smooth));
670 
671    if (cso_hash_iter_is_null(iter)) {
672       struct cso_rasterizer *cso = MALLOC(sizeof(struct cso_rasterizer));
673       if (!cso)
674          return PIPE_ERROR_OUT_OF_MEMORY;
675 
676       memcpy(&cso->state, templ, sizeof(*templ));
677       cso->data = ctx->base.pipe->create_rasterizer_state(ctx->base.pipe, &cso->state);
678 
679       iter = cso_insert_state(&ctx->cache, hash_key, CSO_RASTERIZER, cso);
680       if (cso_hash_iter_is_null(iter)) {
681          FREE(cso);
682          return PIPE_ERROR_OUT_OF_MEMORY;
683       }
684 
685       handle = cso->data;
686    } else {
687       handle = ((struct cso_rasterizer *)cso_hash_iter_data(iter))->data;
688    }
689 
690    if (ctx->rasterizer != handle) {
691       ctx->rasterizer = handle;
692       ctx->flatshade_first = templ->flatshade_first;
693       if (ctx->vbuf)
694          u_vbuf_set_flatshade_first(ctx->vbuf, ctx->flatshade_first);
695       ctx->base.pipe->bind_rasterizer_state(ctx->base.pipe, handle);
696    }
697    return PIPE_OK;
698 }
699 
700 
701 static void
cso_save_rasterizer(struct cso_context_priv * ctx)702 cso_save_rasterizer(struct cso_context_priv *ctx)
703 {
704    assert(!ctx->rasterizer_saved);
705    ctx->rasterizer_saved = ctx->rasterizer;
706    ctx->flatshade_first_saved = ctx->flatshade_first;
707 }
708 
709 
710 static void
cso_restore_rasterizer(struct cso_context_priv * ctx)711 cso_restore_rasterizer(struct cso_context_priv *ctx)
712 {
713    if (ctx->rasterizer != ctx->rasterizer_saved) {
714       ctx->rasterizer = ctx->rasterizer_saved;
715       ctx->flatshade_first = ctx->flatshade_first_saved;
716       if (ctx->vbuf)
717          u_vbuf_set_flatshade_first(ctx->vbuf, ctx->flatshade_first);
718       ctx->base.pipe->bind_rasterizer_state(ctx->base.pipe, ctx->rasterizer_saved);
719    }
720    ctx->rasterizer_saved = NULL;
721 }
722 
723 
724 void
cso_set_fragment_shader_handle(struct cso_context * cso,void * handle)725 cso_set_fragment_shader_handle(struct cso_context *cso, void *handle)
726 {
727    struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
728 
729    if (ctx->fragment_shader != handle) {
730       ctx->fragment_shader = handle;
731       ctx->base.pipe->bind_fs_state(ctx->base.pipe, handle);
732    }
733 }
734 
735 
736 static void
cso_save_fragment_shader(struct cso_context_priv * ctx)737 cso_save_fragment_shader(struct cso_context_priv *ctx)
738 {
739    assert(!ctx->fragment_shader_saved);
740    ctx->fragment_shader_saved = ctx->fragment_shader;
741 }
742 
743 
744 static void
cso_restore_fragment_shader(struct cso_context_priv * ctx)745 cso_restore_fragment_shader(struct cso_context_priv *ctx)
746 {
747    if (ctx->fragment_shader_saved != ctx->fragment_shader) {
748       ctx->base.pipe->bind_fs_state(ctx->base.pipe, ctx->fragment_shader_saved);
749       ctx->fragment_shader = ctx->fragment_shader_saved;
750    }
751    ctx->fragment_shader_saved = NULL;
752 }
753 
754 
755 void
cso_set_vertex_shader_handle(struct cso_context * cso,void * handle)756 cso_set_vertex_shader_handle(struct cso_context *cso, void *handle)
757 {
758    struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
759 
760    if (ctx->vertex_shader != handle) {
761       ctx->vertex_shader = handle;
762       ctx->base.pipe->bind_vs_state(ctx->base.pipe, handle);
763    }
764 }
765 
766 
767 static void
cso_save_vertex_shader(struct cso_context_priv * ctx)768 cso_save_vertex_shader(struct cso_context_priv *ctx)
769 {
770    assert(!ctx->vertex_shader_saved);
771    ctx->vertex_shader_saved = ctx->vertex_shader;
772 }
773 
774 
775 static void
cso_restore_vertex_shader(struct cso_context_priv * ctx)776 cso_restore_vertex_shader(struct cso_context_priv *ctx)
777 {
778    if (ctx->vertex_shader_saved != ctx->vertex_shader) {
779       ctx->base.pipe->bind_vs_state(ctx->base.pipe, ctx->vertex_shader_saved);
780       ctx->vertex_shader = ctx->vertex_shader_saved;
781    }
782    ctx->vertex_shader_saved = NULL;
783 }
784 
785 
786 void
cso_set_framebuffer(struct cso_context * cso,const struct pipe_framebuffer_state * fb)787 cso_set_framebuffer(struct cso_context *cso,
788                     const struct pipe_framebuffer_state *fb)
789 {
790    struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
791 
792    if (memcmp(&ctx->fb, fb, sizeof(*fb)) != 0) {
793       util_copy_framebuffer_state(&ctx->fb, fb);
794       ctx->base.pipe->set_framebuffer_state(ctx->base.pipe, fb);
795    }
796 }
797 
798 
799 static void
cso_save_framebuffer(struct cso_context_priv * ctx)800 cso_save_framebuffer(struct cso_context_priv *ctx)
801 {
802    util_copy_framebuffer_state(&ctx->fb_saved, &ctx->fb);
803 }
804 
805 
806 static void
cso_restore_framebuffer(struct cso_context_priv * ctx)807 cso_restore_framebuffer(struct cso_context_priv *ctx)
808 {
809    if (memcmp(&ctx->fb, &ctx->fb_saved, sizeof(ctx->fb))) {
810       util_copy_framebuffer_state(&ctx->fb, &ctx->fb_saved);
811       ctx->base.pipe->set_framebuffer_state(ctx->base.pipe, &ctx->fb);
812       util_unreference_framebuffer_state(&ctx->fb_saved);
813    }
814 }
815 
816 
817 void
cso_set_viewport(struct cso_context * cso,const struct pipe_viewport_state * vp)818 cso_set_viewport(struct cso_context *cso,
819                  const struct pipe_viewport_state *vp)
820 {
821    struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
822 
823    if (memcmp(&ctx->vp, vp, sizeof(*vp))) {
824       ctx->vp = *vp;
825       ctx->base.pipe->set_viewport_states(ctx->base.pipe, 0, 1, vp);
826    }
827 }
828 
829 
830 /**
831  * Setup viewport state for given width and height (position is always (0,0)).
832  * Invert the Y axis if 'invert' is true.
833  */
834 void
cso_set_viewport_dims(struct cso_context * ctx,float width,float height,bool invert)835 cso_set_viewport_dims(struct cso_context *ctx,
836                       float width, float height, bool invert)
837 {
838    struct pipe_viewport_state vp;
839    vp.scale[0] = width * 0.5f;
840    vp.scale[1] = height * (invert ? -0.5f : 0.5f);
841    vp.scale[2] = 0.5f;
842    vp.translate[0] = 0.5f * width;
843    vp.translate[1] = 0.5f * height;
844    vp.translate[2] = 0.5f;
845    vp.swizzle_x = PIPE_VIEWPORT_SWIZZLE_POSITIVE_X;
846    vp.swizzle_y = PIPE_VIEWPORT_SWIZZLE_POSITIVE_Y;
847    vp.swizzle_z = PIPE_VIEWPORT_SWIZZLE_POSITIVE_Z;
848    vp.swizzle_w = PIPE_VIEWPORT_SWIZZLE_POSITIVE_W;
849    cso_set_viewport(ctx, &vp);
850 }
851 
852 
853 static void
cso_save_viewport(struct cso_context_priv * ctx)854 cso_save_viewport(struct cso_context_priv *ctx)
855 {
856    ctx->vp_saved = ctx->vp;
857 }
858 
859 
860 static void
cso_restore_viewport(struct cso_context_priv * ctx)861 cso_restore_viewport(struct cso_context_priv *ctx)
862 {
863    if (memcmp(&ctx->vp, &ctx->vp_saved, sizeof(ctx->vp))) {
864       ctx->vp = ctx->vp_saved;
865       ctx->base.pipe->set_viewport_states(ctx->base.pipe, 0, 1, &ctx->vp);
866    }
867 }
868 
869 
870 void
cso_set_sample_mask(struct cso_context * cso,unsigned sample_mask)871 cso_set_sample_mask(struct cso_context *cso, unsigned sample_mask)
872 {
873    struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
874 
875    if (ctx->sample_mask != sample_mask) {
876       ctx->sample_mask = sample_mask;
877       ctx->base.pipe->set_sample_mask(ctx->base.pipe, sample_mask);
878    }
879 }
880 
881 
882 static void
cso_save_sample_mask(struct cso_context_priv * ctx)883 cso_save_sample_mask(struct cso_context_priv *ctx)
884 {
885    ctx->sample_mask_saved = ctx->sample_mask;
886 }
887 
888 
889 static void
cso_restore_sample_mask(struct cso_context_priv * ctx)890 cso_restore_sample_mask(struct cso_context_priv *ctx)
891 {
892    cso_set_sample_mask(&ctx->base, ctx->sample_mask_saved);
893 }
894 
895 
896 void
cso_set_min_samples(struct cso_context * cso,unsigned min_samples)897 cso_set_min_samples(struct cso_context *cso, unsigned min_samples)
898 {
899    struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
900 
901    if (ctx->min_samples != min_samples && ctx->base.pipe->set_min_samples) {
902       ctx->min_samples = min_samples;
903       ctx->base.pipe->set_min_samples(ctx->base.pipe, min_samples);
904    }
905 }
906 
907 
908 static void
cso_save_min_samples(struct cso_context_priv * ctx)909 cso_save_min_samples(struct cso_context_priv *ctx)
910 {
911    ctx->min_samples_saved = ctx->min_samples;
912 }
913 
914 
915 static void
cso_restore_min_samples(struct cso_context_priv * ctx)916 cso_restore_min_samples(struct cso_context_priv *ctx)
917 {
918    cso_set_min_samples(&ctx->base, ctx->min_samples_saved);
919 }
920 
921 
922 void
cso_set_stencil_ref(struct cso_context * cso,const struct pipe_stencil_ref sr)923 cso_set_stencil_ref(struct cso_context *cso,
924                     const struct pipe_stencil_ref sr)
925 {
926    struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
927 
928    if (memcmp(&ctx->stencil_ref, &sr, sizeof(ctx->stencil_ref))) {
929       ctx->stencil_ref = sr;
930       ctx->base.pipe->set_stencil_ref(ctx->base.pipe, sr);
931    }
932 }
933 
934 
935 static void
cso_save_stencil_ref(struct cso_context_priv * ctx)936 cso_save_stencil_ref(struct cso_context_priv *ctx)
937 {
938    ctx->stencil_ref_saved = ctx->stencil_ref;
939 }
940 
941 
942 static void
cso_restore_stencil_ref(struct cso_context_priv * ctx)943 cso_restore_stencil_ref(struct cso_context_priv *ctx)
944 {
945    if (memcmp(&ctx->stencil_ref, &ctx->stencil_ref_saved,
946               sizeof(ctx->stencil_ref))) {
947       ctx->stencil_ref = ctx->stencil_ref_saved;
948       ctx->base.pipe->set_stencil_ref(ctx->base.pipe, ctx->stencil_ref);
949    }
950 }
951 
952 
953 void
cso_set_render_condition(struct cso_context * cso,struct pipe_query * query,bool condition,enum pipe_render_cond_flag mode)954 cso_set_render_condition(struct cso_context *cso,
955                          struct pipe_query *query,
956                          bool condition,
957                          enum pipe_render_cond_flag mode)
958 {
959    struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
960    struct pipe_context *pipe = ctx->base.pipe;
961 
962    if (ctx->render_condition != query ||
963        ctx->render_condition_mode != mode ||
964        ctx->render_condition_cond != condition) {
965       pipe->render_condition(pipe, query, condition, mode);
966       ctx->render_condition = query;
967       ctx->render_condition_cond = condition;
968       ctx->render_condition_mode = mode;
969    }
970 }
971 
972 
973 static void
cso_save_render_condition(struct cso_context_priv * ctx)974 cso_save_render_condition(struct cso_context_priv *ctx)
975 {
976    ctx->render_condition_saved = ctx->render_condition;
977    ctx->render_condition_cond_saved = ctx->render_condition_cond;
978    ctx->render_condition_mode_saved = ctx->render_condition_mode;
979 }
980 
981 
982 static void
cso_restore_render_condition(struct cso_context_priv * ctx)983 cso_restore_render_condition(struct cso_context_priv *ctx)
984 {
985    cso_set_render_condition(&ctx->base, ctx->render_condition_saved,
986                             ctx->render_condition_cond_saved,
987                             ctx->render_condition_mode_saved);
988 }
989 
990 
991 void
cso_set_geometry_shader_handle(struct cso_context * cso,void * handle)992 cso_set_geometry_shader_handle(struct cso_context *cso, void *handle)
993 {
994    struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
995    assert(ctx->has_geometry_shader || !handle);
996 
997    if (ctx->has_geometry_shader && ctx->geometry_shader != handle) {
998       ctx->geometry_shader = handle;
999       ctx->base.pipe->bind_gs_state(ctx->base.pipe, handle);
1000    }
1001 }
1002 
1003 
1004 static void
cso_save_geometry_shader(struct cso_context_priv * ctx)1005 cso_save_geometry_shader(struct cso_context_priv *ctx)
1006 {
1007    if (!ctx->has_geometry_shader) {
1008       return;
1009    }
1010 
1011    assert(!ctx->geometry_shader_saved);
1012    ctx->geometry_shader_saved = ctx->geometry_shader;
1013 }
1014 
1015 
1016 static void
cso_restore_geometry_shader(struct cso_context_priv * ctx)1017 cso_restore_geometry_shader(struct cso_context_priv *ctx)
1018 {
1019    if (!ctx->has_geometry_shader) {
1020       return;
1021    }
1022 
1023    if (ctx->geometry_shader_saved != ctx->geometry_shader) {
1024       ctx->base.pipe->bind_gs_state(ctx->base.pipe, ctx->geometry_shader_saved);
1025       ctx->geometry_shader = ctx->geometry_shader_saved;
1026    }
1027    ctx->geometry_shader_saved = NULL;
1028 }
1029 
1030 
1031 void
cso_set_tessctrl_shader_handle(struct cso_context * cso,void * handle)1032 cso_set_tessctrl_shader_handle(struct cso_context *cso, void *handle)
1033 {
1034    struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
1035    assert(ctx->has_tessellation || !handle);
1036 
1037    if (ctx->has_tessellation && ctx->tessctrl_shader != handle) {
1038       ctx->tessctrl_shader = handle;
1039       ctx->base.pipe->bind_tcs_state(ctx->base.pipe, handle);
1040    }
1041 }
1042 
1043 
1044 static void
cso_save_tessctrl_shader(struct cso_context_priv * ctx)1045 cso_save_tessctrl_shader(struct cso_context_priv *ctx)
1046 {
1047    if (!ctx->has_tessellation) {
1048       return;
1049    }
1050 
1051    assert(!ctx->tessctrl_shader_saved);
1052    ctx->tessctrl_shader_saved = ctx->tessctrl_shader;
1053 }
1054 
1055 
1056 static void
cso_restore_tessctrl_shader(struct cso_context_priv * ctx)1057 cso_restore_tessctrl_shader(struct cso_context_priv *ctx)
1058 {
1059    if (!ctx->has_tessellation) {
1060       return;
1061    }
1062 
1063    if (ctx->tessctrl_shader_saved != ctx->tessctrl_shader) {
1064       ctx->base.pipe->bind_tcs_state(ctx->base.pipe, ctx->tessctrl_shader_saved);
1065       ctx->tessctrl_shader = ctx->tessctrl_shader_saved;
1066    }
1067    ctx->tessctrl_shader_saved = NULL;
1068 }
1069 
1070 
1071 void
cso_set_tesseval_shader_handle(struct cso_context * cso,void * handle)1072 cso_set_tesseval_shader_handle(struct cso_context *cso, void *handle)
1073 {
1074    struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
1075 
1076    assert(ctx->has_tessellation || !handle);
1077 
1078    if (ctx->has_tessellation && ctx->tesseval_shader != handle) {
1079       ctx->tesseval_shader = handle;
1080       ctx->base.pipe->bind_tes_state(ctx->base.pipe, handle);
1081    }
1082 }
1083 
1084 
1085 static void
cso_save_tesseval_shader(struct cso_context_priv * ctx)1086 cso_save_tesseval_shader(struct cso_context_priv *ctx)
1087 {
1088    if (!ctx->has_tessellation) {
1089       return;
1090    }
1091 
1092    assert(!ctx->tesseval_shader_saved);
1093    ctx->tesseval_shader_saved = ctx->tesseval_shader;
1094 }
1095 
1096 
1097 static void
cso_restore_tesseval_shader(struct cso_context_priv * ctx)1098 cso_restore_tesseval_shader(struct cso_context_priv *ctx)
1099 {
1100    if (!ctx->has_tessellation) {
1101       return;
1102    }
1103 
1104    if (ctx->tesseval_shader_saved != ctx->tesseval_shader) {
1105       ctx->base.pipe->bind_tes_state(ctx->base.pipe, ctx->tesseval_shader_saved);
1106       ctx->tesseval_shader = ctx->tesseval_shader_saved;
1107    }
1108    ctx->tesseval_shader_saved = NULL;
1109 }
1110 
1111 
1112 void
cso_set_compute_shader_handle(struct cso_context * cso,void * handle)1113 cso_set_compute_shader_handle(struct cso_context *cso, void *handle)
1114 {
1115    struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
1116    assert(ctx->has_compute_shader || !handle);
1117 
1118    if (ctx->has_compute_shader && ctx->compute_shader != handle) {
1119       ctx->compute_shader = handle;
1120       ctx->base.pipe->bind_compute_state(ctx->base.pipe, handle);
1121    }
1122 }
1123 
1124 
1125 static void
cso_save_compute_shader(struct cso_context_priv * ctx)1126 cso_save_compute_shader(struct cso_context_priv *ctx)
1127 {
1128    if (!ctx->has_compute_shader) {
1129       return;
1130    }
1131 
1132    assert(!ctx->compute_shader_saved);
1133    ctx->compute_shader_saved = ctx->compute_shader;
1134 }
1135 
1136 
1137 static void
cso_restore_compute_shader(struct cso_context_priv * ctx)1138 cso_restore_compute_shader(struct cso_context_priv *ctx)
1139 {
1140    if (!ctx->has_compute_shader) {
1141       return;
1142    }
1143 
1144    if (ctx->compute_shader_saved != ctx->compute_shader) {
1145       ctx->base.pipe->bind_compute_state(ctx->base.pipe, ctx->compute_shader_saved);
1146       ctx->compute_shader = ctx->compute_shader_saved;
1147    }
1148    ctx->compute_shader_saved = NULL;
1149 }
1150 
1151 
1152 static void
cso_save_compute_samplers(struct cso_context_priv * ctx)1153 cso_save_compute_samplers(struct cso_context_priv *ctx)
1154 {
1155    struct sampler_info *info = &ctx->samplers[PIPE_SHADER_COMPUTE];
1156    struct sampler_info *saved = &ctx->compute_samplers_saved;
1157 
1158    memcpy(saved->cso_samplers, info->cso_samplers,
1159           sizeof(info->cso_samplers));
1160    memcpy(saved->samplers, info->samplers, sizeof(info->samplers));
1161 }
1162 
1163 
1164 static void
cso_restore_compute_samplers(struct cso_context_priv * ctx)1165 cso_restore_compute_samplers(struct cso_context_priv *ctx)
1166 {
1167    struct sampler_info *info = &ctx->samplers[PIPE_SHADER_COMPUTE];
1168    struct sampler_info *saved = &ctx->compute_samplers_saved;
1169 
1170    memcpy(info->cso_samplers, saved->cso_samplers,
1171           sizeof(info->cso_samplers));
1172    memcpy(info->samplers, saved->samplers, sizeof(info->samplers));
1173 
1174    for (int i = PIPE_MAX_SAMPLERS - 1; i >= 0; i--) {
1175       if (info->samplers[i]) {
1176          ctx->max_sampler_seen = i;
1177          break;
1178       }
1179    }
1180 
1181    cso_single_sampler_done(&ctx->base, PIPE_SHADER_COMPUTE);
1182 }
1183 
1184 
1185 static void
cso_set_vertex_elements_direct(struct cso_context_priv * ctx,const struct cso_velems_state * velems)1186 cso_set_vertex_elements_direct(struct cso_context_priv *ctx,
1187                                const struct cso_velems_state *velems)
1188 {
1189    /* Need to include the count into the stored state data too.
1190     * Otherwise first few count pipe_vertex_elements could be identical
1191     * even if count is different, and there's no guarantee the hash would
1192     * be different in that case neither.
1193     */
1194    const unsigned key_size =
1195       sizeof(struct pipe_vertex_element) * velems->count + sizeof(unsigned);
1196    const unsigned hash_key = cso_construct_key((void*)velems, key_size);
1197    struct cso_hash_iter iter =
1198       cso_find_state_template(&ctx->cache, hash_key, CSO_VELEMENTS,
1199                               velems, key_size);
1200    void *handle;
1201 
1202    if (cso_hash_iter_is_null(iter)) {
1203       struct cso_velements *cso = MALLOC(sizeof(struct cso_velements));
1204       if (!cso)
1205          return;
1206 
1207       memcpy(&cso->state, velems, key_size);
1208 
1209       /* Lower 64-bit vertex attributes. */
1210       unsigned new_count = velems->count;
1211       const struct pipe_vertex_element *new_elems = velems->velems;
1212       struct pipe_vertex_element tmp[PIPE_MAX_ATTRIBS];
1213       util_lower_uint64_vertex_elements(&new_elems, &new_count, tmp);
1214 
1215       cso->data = ctx->base.pipe->create_vertex_elements_state(ctx->base.pipe, new_count,
1216                                                           new_elems);
1217 
1218       iter = cso_insert_state(&ctx->cache, hash_key, CSO_VELEMENTS, cso);
1219       if (cso_hash_iter_is_null(iter)) {
1220          FREE(cso);
1221          return;
1222       }
1223 
1224       handle = cso->data;
1225    } else {
1226       handle = ((struct cso_velements *)cso_hash_iter_data(iter))->data;
1227    }
1228 
1229    if (ctx->velements != handle) {
1230       ctx->velements = handle;
1231       ctx->base.pipe->bind_vertex_elements_state(ctx->base.pipe, handle);
1232    }
1233 }
1234 
1235 
1236 enum pipe_error
cso_set_vertex_elements(struct cso_context * cso,const struct cso_velems_state * velems)1237 cso_set_vertex_elements(struct cso_context *cso,
1238                         const struct cso_velems_state *velems)
1239 {
1240    struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
1241    struct u_vbuf *vbuf = ctx->vbuf_current;
1242 
1243    if (vbuf) {
1244       u_vbuf_set_vertex_elements(vbuf, velems);
1245       return PIPE_OK;
1246    }
1247 
1248    cso_set_vertex_elements_direct(ctx, velems);
1249    return PIPE_OK;
1250 }
1251 
1252 
1253 static void
cso_save_vertex_elements(struct cso_context_priv * ctx)1254 cso_save_vertex_elements(struct cso_context_priv *ctx)
1255 {
1256    struct u_vbuf *vbuf = ctx->vbuf_current;
1257 
1258    if (vbuf) {
1259       u_vbuf_save_vertex_elements(vbuf);
1260       return;
1261    }
1262 
1263    assert(!ctx->velements_saved);
1264    ctx->velements_saved = ctx->velements;
1265 }
1266 
1267 
1268 static void
cso_restore_vertex_elements(struct cso_context_priv * ctx)1269 cso_restore_vertex_elements(struct cso_context_priv *ctx)
1270 {
1271    struct u_vbuf *vbuf = ctx->vbuf_current;
1272 
1273    if (vbuf) {
1274       u_vbuf_restore_vertex_elements(vbuf);
1275       return;
1276    }
1277 
1278    if (ctx->velements != ctx->velements_saved) {
1279       ctx->velements = ctx->velements_saved;
1280       ctx->base.pipe->bind_vertex_elements_state(ctx->base.pipe, ctx->velements_saved);
1281    }
1282    ctx->velements_saved = NULL;
1283 }
1284 
1285 /* vertex buffers */
1286 
1287 void
cso_set_vertex_buffers(struct cso_context * cso,unsigned count,bool take_ownership,const struct pipe_vertex_buffer * buffers)1288 cso_set_vertex_buffers(struct cso_context *cso,
1289                        unsigned count,
1290                        bool take_ownership,
1291                        const struct pipe_vertex_buffer *buffers)
1292 {
1293    struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
1294    struct u_vbuf *vbuf = ctx->vbuf_current;
1295 
1296    if (vbuf) {
1297       u_vbuf_set_vertex_buffers(vbuf, count, take_ownership, buffers);
1298       return;
1299    }
1300 
1301    util_set_vertex_buffers(ctx->base.pipe, count, take_ownership, buffers);
1302 }
1303 
1304 
1305 /**
1306  * Set vertex buffers and vertex elements. Skip u_vbuf if it's only needed
1307  * for user vertex buffers and user vertex buffers are not set by this call.
1308  * u_vbuf will be disabled. To re-enable u_vbuf, call this function again.
1309  *
1310  * Skipping u_vbuf decreases CPU overhead for draw calls that don't need it,
1311  * such as VBOs, glBegin/End, and display lists.
1312  *
1313  * Internal operations that do "save states, draw, restore states" shouldn't
1314  * use this, because the states are only saved in either cso_context or
1315  * u_vbuf, not both.
1316  */
1317 void
cso_set_vertex_buffers_and_elements(struct cso_context * cso,const struct cso_velems_state * velems,unsigned vb_count,bool uses_user_vertex_buffers,const struct pipe_vertex_buffer * vbuffers)1318 cso_set_vertex_buffers_and_elements(struct cso_context *cso,
1319                                     const struct cso_velems_state *velems,
1320                                     unsigned vb_count,
1321                                     bool uses_user_vertex_buffers,
1322                                     const struct pipe_vertex_buffer *vbuffers)
1323 {
1324    struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
1325    struct u_vbuf *vbuf = ctx->vbuf;
1326    struct pipe_context *pipe = ctx->base.pipe;
1327 
1328    if (vbuf && (ctx->always_use_vbuf || uses_user_vertex_buffers)) {
1329       if (!ctx->vbuf_current) {
1330          /* Unset this to make sure the CSO is re-bound on the next use. */
1331          ctx->velements = NULL;
1332          ctx->vbuf_current = pipe->vbuf = vbuf;
1333          if (pipe->draw_vbo == tc_draw_vbo)
1334             ctx->base.draw_vbo = u_vbuf_draw_vbo;
1335       }
1336 
1337       u_vbuf_set_vertex_elements(vbuf, velems);
1338       u_vbuf_set_vertex_buffers(vbuf, vb_count, true, vbuffers);
1339       return;
1340    }
1341 
1342    if (ctx->vbuf_current) {
1343       /* Unset this to make sure the CSO is re-bound on the next use. */
1344       u_vbuf_unset_vertex_elements(vbuf);
1345       ctx->vbuf_current = pipe->vbuf = NULL;
1346       if (pipe->draw_vbo == tc_draw_vbo)
1347          ctx->base.draw_vbo = pipe->draw_vbo;
1348    }
1349 
1350    cso_set_vertex_elements_direct(ctx, velems);
1351    pipe->set_vertex_buffers(pipe, vb_count, vbuffers);
1352 }
1353 
1354 
1355 ALWAYS_INLINE static struct cso_sampler *
set_sampler(struct cso_context_priv * ctx,enum pipe_shader_type shader_stage,unsigned idx,const struct pipe_sampler_state * templ,size_t key_size)1356 set_sampler(struct cso_context_priv *ctx, enum pipe_shader_type shader_stage,
1357             unsigned idx, const struct pipe_sampler_state *templ,
1358             size_t key_size)
1359 {
1360    unsigned hash_key = cso_construct_key(templ, key_size);
1361    struct cso_sampler *cso;
1362    struct cso_hash_iter iter =
1363       cso_find_state_template(&ctx->cache,
1364                               hash_key, CSO_SAMPLER,
1365                               templ, key_size);
1366 
1367    if (cso_hash_iter_is_null(iter)) {
1368       cso = MALLOC(sizeof(struct cso_sampler));
1369       if (!cso)
1370          return false;
1371 
1372       memcpy(&cso->state, templ, sizeof(*templ));
1373       cso->data = ctx->base.pipe->create_sampler_state(ctx->base.pipe, &cso->state);
1374       cso->hash_key = hash_key;
1375 
1376       iter = cso_insert_state(&ctx->cache, hash_key, CSO_SAMPLER, cso);
1377       if (cso_hash_iter_is_null(iter)) {
1378          FREE(cso);
1379          return false;
1380       }
1381    } else {
1382       cso = cso_hash_iter_data(iter);
1383    }
1384    return cso;
1385 }
1386 
1387 
1388 ALWAYS_INLINE static bool
cso_set_sampler(struct cso_context_priv * ctx,enum pipe_shader_type shader_stage,unsigned idx,const struct pipe_sampler_state * templ,size_t size)1389 cso_set_sampler(struct cso_context_priv *ctx, enum pipe_shader_type shader_stage,
1390                 unsigned idx, const struct pipe_sampler_state *templ,
1391                 size_t size)
1392 {
1393    struct cso_sampler *cso = set_sampler(ctx, shader_stage, idx, templ, size);
1394    ctx->samplers[shader_stage].cso_samplers[idx] = cso;
1395    ctx->samplers[shader_stage].samplers[idx] = cso->data;
1396    return true;
1397 }
1398 
1399 
1400 void
cso_single_sampler(struct cso_context * cso,enum pipe_shader_type shader_stage,unsigned idx,const struct pipe_sampler_state * templ)1401 cso_single_sampler(struct cso_context *cso, enum pipe_shader_type shader_stage,
1402                    unsigned idx, const struct pipe_sampler_state *templ)
1403 {
1404    struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
1405 
1406    /* The reasons both blocks are duplicated is that we want the size parameter
1407     * to be a constant expression to inline and unroll memcmp and hash key
1408     * computations.
1409     */
1410    if (ctx->sampler_format) {
1411       if (cso_set_sampler(ctx, shader_stage, idx, templ,
1412                           sizeof(struct pipe_sampler_state)))
1413          ctx->max_sampler_seen = MAX2(ctx->max_sampler_seen, (int)idx);
1414    } else {
1415       if (cso_set_sampler(ctx, shader_stage, idx, templ,
1416                           offsetof(struct pipe_sampler_state, border_color_format)))
1417          ctx->max_sampler_seen = MAX2(ctx->max_sampler_seen, (int)idx);
1418    }
1419 }
1420 
1421 
1422 /**
1423  * Send staged sampler state to the driver.
1424  */
1425 void
cso_single_sampler_done(struct cso_context * cso,enum pipe_shader_type shader_stage)1426 cso_single_sampler_done(struct cso_context *cso,
1427                         enum pipe_shader_type shader_stage)
1428 {
1429    struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
1430    struct sampler_info *info = &ctx->samplers[shader_stage];
1431 
1432    if (ctx->max_sampler_seen == -1)
1433       return;
1434 
1435    ctx->base.pipe->bind_sampler_states(ctx->base.pipe, shader_stage, 0,
1436                                   ctx->max_sampler_seen + 1,
1437                                   info->samplers);
1438    ctx->max_sampler_seen = -1;
1439 }
1440 
1441 
1442 ALWAYS_INLINE static int
set_samplers(struct cso_context_priv * ctx,enum pipe_shader_type shader_stage,unsigned nr,const struct pipe_sampler_state ** templates,size_t key_size)1443 set_samplers(struct cso_context_priv *ctx,
1444              enum pipe_shader_type shader_stage,
1445              unsigned nr,
1446              const struct pipe_sampler_state **templates,
1447              size_t key_size)
1448 {
1449    int last = -1;
1450    for (unsigned i = 0; i < nr; i++) {
1451       if (!templates[i])
1452          continue;
1453 
1454       /* Reuse the same sampler state CSO if 2 consecutive sampler states
1455        * are identical.
1456        *
1457        * The trivial case where both pointers are equal doesn't occur in
1458        * frequented codepaths.
1459        *
1460        * Reuse rate:
1461        * - Borderlands 2: 55%
1462        * - Hitman: 65%
1463        * - Rocket League: 75%
1464        * - Tomb Raider: 50-65%
1465        * - XCOM 2: 55%
1466        */
1467       if (last >= 0 &&
1468           !memcmp(templates[i], templates[last],
1469                   key_size)) {
1470          ctx->samplers[shader_stage].cso_samplers[i] =
1471             ctx->samplers[shader_stage].cso_samplers[last];
1472          ctx->samplers[shader_stage].samplers[i] =
1473             ctx->samplers[shader_stage].samplers[last];
1474       } else {
1475          /* Look up the sampler state CSO. */
1476          cso_set_sampler(ctx, shader_stage, i, templates[i], key_size);
1477       }
1478 
1479       last = i;
1480    }
1481    return last;
1482 }
1483 
1484 
1485 /*
1486  * If the function encouters any errors it will return the
1487  * last one. Done to always try to set as many samplers
1488  * as possible.
1489  */
1490 void
cso_set_samplers(struct cso_context * cso,enum pipe_shader_type shader_stage,unsigned nr,const struct pipe_sampler_state ** templates)1491 cso_set_samplers(struct cso_context *cso,
1492                  enum pipe_shader_type shader_stage,
1493                  unsigned nr,
1494                  const struct pipe_sampler_state **templates)
1495 {
1496    struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
1497    int last;
1498 
1499    /* ensure sampler size is a constant for memcmp */
1500    if (ctx->sampler_format) {
1501       last = set_samplers(ctx, shader_stage, nr, templates,
1502                           sizeof(struct pipe_sampler_state));
1503    } else {
1504       last = set_samplers(ctx, shader_stage, nr, templates,
1505                           offsetof(struct pipe_sampler_state, border_color_format));
1506    }
1507 
1508    ctx->max_sampler_seen = MAX2(ctx->max_sampler_seen, last);
1509    cso_single_sampler_done(&ctx->base, shader_stage);
1510 }
1511 
1512 
1513 static void
cso_save_fragment_samplers(struct cso_context_priv * ctx)1514 cso_save_fragment_samplers(struct cso_context_priv *ctx)
1515 {
1516    struct sampler_info *info = &ctx->samplers[PIPE_SHADER_FRAGMENT];
1517    struct sampler_info *saved = &ctx->fragment_samplers_saved;
1518 
1519    memcpy(saved->cso_samplers, info->cso_samplers,
1520           sizeof(info->cso_samplers));
1521    memcpy(saved->samplers, info->samplers, sizeof(info->samplers));
1522 }
1523 
1524 
1525 static void
cso_restore_fragment_samplers(struct cso_context_priv * ctx)1526 cso_restore_fragment_samplers(struct cso_context_priv *ctx)
1527 {
1528    struct sampler_info *info = &ctx->samplers[PIPE_SHADER_FRAGMENT];
1529    struct sampler_info *saved = &ctx->fragment_samplers_saved;
1530 
1531    memcpy(info->cso_samplers, saved->cso_samplers,
1532           sizeof(info->cso_samplers));
1533    memcpy(info->samplers, saved->samplers, sizeof(info->samplers));
1534 
1535    for (int i = PIPE_MAX_SAMPLERS - 1; i >= 0; i--) {
1536       if (info->samplers[i]) {
1537          ctx->max_sampler_seen = i;
1538          break;
1539       }
1540    }
1541 
1542    cso_single_sampler_done(&ctx->base, PIPE_SHADER_FRAGMENT);
1543 }
1544 
1545 
1546 void
cso_set_stream_outputs(struct cso_context * cso,unsigned num_targets,struct pipe_stream_output_target ** targets,const unsigned * offsets)1547 cso_set_stream_outputs(struct cso_context *cso,
1548                        unsigned num_targets,
1549                        struct pipe_stream_output_target **targets,
1550                        const unsigned *offsets)
1551 {
1552    struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
1553    struct pipe_context *pipe = ctx->base.pipe;
1554    unsigned i;
1555 
1556    if (!ctx->has_streamout) {
1557       assert(num_targets == 0);
1558       return;
1559    }
1560 
1561    if (ctx->nr_so_targets == 0 && num_targets == 0) {
1562       /* Nothing to do. */
1563       return;
1564    }
1565 
1566    /* reference new targets */
1567    for (i = 0; i < num_targets; i++) {
1568       pipe_so_target_reference(&ctx->so_targets[i], targets[i]);
1569    }
1570    /* unref extra old targets, if any */
1571    for (; i < ctx->nr_so_targets; i++) {
1572       pipe_so_target_reference(&ctx->so_targets[i], NULL);
1573    }
1574 
1575    pipe->set_stream_output_targets(pipe, num_targets, targets,
1576                                    offsets);
1577    ctx->nr_so_targets = num_targets;
1578 }
1579 
1580 
1581 static void
cso_save_stream_outputs(struct cso_context_priv * ctx)1582 cso_save_stream_outputs(struct cso_context_priv *ctx)
1583 {
1584    if (!ctx->has_streamout) {
1585       return;
1586    }
1587 
1588    ctx->nr_so_targets_saved = ctx->nr_so_targets;
1589 
1590    for (unsigned i = 0; i < ctx->nr_so_targets; i++) {
1591       assert(!ctx->so_targets_saved[i]);
1592       pipe_so_target_reference(&ctx->so_targets_saved[i], ctx->so_targets[i]);
1593    }
1594 }
1595 
1596 
1597 static void
cso_restore_stream_outputs(struct cso_context_priv * ctx)1598 cso_restore_stream_outputs(struct cso_context_priv *ctx)
1599 {
1600    struct pipe_context *pipe = ctx->base.pipe;
1601    unsigned i;
1602    unsigned offset[PIPE_MAX_SO_BUFFERS];
1603 
1604    if (!ctx->has_streamout) {
1605       return;
1606    }
1607 
1608    if (ctx->nr_so_targets == 0 && ctx->nr_so_targets_saved == 0) {
1609       /* Nothing to do. */
1610       return;
1611    }
1612 
1613    assert(ctx->nr_so_targets_saved <= PIPE_MAX_SO_BUFFERS);
1614    for (i = 0; i < ctx->nr_so_targets_saved; i++) {
1615       pipe_so_target_reference(&ctx->so_targets[i], NULL);
1616       /* move the reference from one pointer to another */
1617       ctx->so_targets[i] = ctx->so_targets_saved[i];
1618       ctx->so_targets_saved[i] = NULL;
1619       /* -1 means append */
1620       offset[i] = (unsigned)-1;
1621    }
1622    for (; i < ctx->nr_so_targets; i++) {
1623       pipe_so_target_reference(&ctx->so_targets[i], NULL);
1624    }
1625 
1626    pipe->set_stream_output_targets(pipe, ctx->nr_so_targets_saved,
1627                                    ctx->so_targets, offset);
1628 
1629    ctx->nr_so_targets = ctx->nr_so_targets_saved;
1630    ctx->nr_so_targets_saved = 0;
1631 }
1632 
1633 
1634 /**
1635  * Save all the CSO state items specified by the state_mask bitmask
1636  * of CSO_BIT_x flags.
1637  */
1638 void
cso_save_state(struct cso_context * ctx,unsigned state_mask)1639 cso_save_state(struct cso_context *ctx, unsigned state_mask)
1640 {
1641    struct cso_context_priv *cso = (struct cso_context_priv *)ctx;
1642    assert(cso->saved_state == 0);
1643 
1644    cso->saved_state = state_mask;
1645 
1646    if (state_mask & CSO_BIT_BLEND)
1647       cso_save_blend(cso);
1648    if (state_mask & CSO_BIT_DEPTH_STENCIL_ALPHA)
1649       cso_save_depth_stencil_alpha(cso);
1650    if (state_mask & CSO_BIT_FRAGMENT_SAMPLERS)
1651       cso_save_fragment_samplers(cso);
1652    if (state_mask & CSO_BIT_FRAGMENT_SHADER)
1653       cso_save_fragment_shader(cso);
1654    if (state_mask & CSO_BIT_FRAMEBUFFER)
1655       cso_save_framebuffer(cso);
1656    if (state_mask & CSO_BIT_GEOMETRY_SHADER)
1657       cso_save_geometry_shader(cso);
1658    if (state_mask & CSO_BIT_MIN_SAMPLES)
1659       cso_save_min_samples(cso);
1660    if (state_mask & CSO_BIT_RASTERIZER)
1661       cso_save_rasterizer(cso);
1662    if (state_mask & CSO_BIT_RENDER_CONDITION)
1663       cso_save_render_condition(cso);
1664    if (state_mask & CSO_BIT_SAMPLE_MASK)
1665       cso_save_sample_mask(cso);
1666    if (state_mask & CSO_BIT_STENCIL_REF)
1667       cso_save_stencil_ref(cso);
1668    if (state_mask & CSO_BIT_STREAM_OUTPUTS)
1669       cso_save_stream_outputs(cso);
1670    if (state_mask & CSO_BIT_TESSCTRL_SHADER)
1671       cso_save_tessctrl_shader(cso);
1672    if (state_mask & CSO_BIT_TESSEVAL_SHADER)
1673       cso_save_tesseval_shader(cso);
1674    if (state_mask & CSO_BIT_VERTEX_ELEMENTS)
1675       cso_save_vertex_elements(cso);
1676    if (state_mask & CSO_BIT_VERTEX_SHADER)
1677       cso_save_vertex_shader(cso);
1678    if (state_mask & CSO_BIT_VIEWPORT)
1679       cso_save_viewport(cso);
1680    if (state_mask & CSO_BIT_PAUSE_QUERIES)
1681       cso->base.pipe->set_active_query_state(cso->base.pipe, false);
1682 }
1683 
1684 
1685 /**
1686  * Restore the state which was saved by cso_save_state().
1687  */
1688 void
cso_restore_state(struct cso_context * ctx,unsigned unbind)1689 cso_restore_state(struct cso_context *ctx, unsigned unbind)
1690 {
1691    struct cso_context_priv *cso = (struct cso_context_priv *)ctx;
1692    unsigned state_mask = cso->saved_state;
1693 
1694    assert(state_mask);
1695 
1696    if (state_mask & CSO_BIT_DEPTH_STENCIL_ALPHA)
1697       cso_restore_depth_stencil_alpha(cso);
1698    if (state_mask & CSO_BIT_STENCIL_REF)
1699       cso_restore_stencil_ref(cso);
1700    if (state_mask & CSO_BIT_FRAGMENT_SHADER)
1701       cso_restore_fragment_shader(cso);
1702    if (state_mask & CSO_BIT_GEOMETRY_SHADER)
1703       cso_restore_geometry_shader(cso);
1704    if (state_mask & CSO_BIT_TESSEVAL_SHADER)
1705       cso_restore_tesseval_shader(cso);
1706    if (state_mask & CSO_BIT_TESSCTRL_SHADER)
1707       cso_restore_tessctrl_shader(cso);
1708    if (state_mask & CSO_BIT_VERTEX_SHADER)
1709       cso_restore_vertex_shader(cso);
1710    if (unbind & CSO_UNBIND_FS_SAMPLERVIEWS)
1711       cso->base.pipe->set_sampler_views(cso->base.pipe, PIPE_SHADER_FRAGMENT, 0, 0,
1712                                    cso->max_fs_samplerviews, false, NULL);
1713    if (unbind & CSO_UNBIND_FS_SAMPLERVIEW0)
1714       cso->base.pipe->set_sampler_views(cso->base.pipe, PIPE_SHADER_FRAGMENT, 0, 0,
1715                                    1, false, NULL);
1716    if (state_mask & CSO_BIT_FRAGMENT_SAMPLERS)
1717       cso_restore_fragment_samplers(cso);
1718    if (unbind & CSO_UNBIND_FS_IMAGE0)
1719       cso->base.pipe->set_shader_images(cso->base.pipe, PIPE_SHADER_FRAGMENT, 0, 0, 1, NULL);
1720    if (state_mask & CSO_BIT_FRAMEBUFFER)
1721       cso_restore_framebuffer(cso);
1722    if (state_mask & CSO_BIT_BLEND)
1723       cso_restore_blend(cso);
1724    if (state_mask & CSO_BIT_RASTERIZER)
1725       cso_restore_rasterizer(cso);
1726    if (state_mask & CSO_BIT_MIN_SAMPLES)
1727       cso_restore_min_samples(cso);
1728    if (state_mask & CSO_BIT_RENDER_CONDITION)
1729       cso_restore_render_condition(cso);
1730    if (state_mask & CSO_BIT_SAMPLE_MASK)
1731       cso_restore_sample_mask(cso);
1732    if (state_mask & CSO_BIT_VIEWPORT)
1733       cso_restore_viewport(cso);
1734    if (unbind & CSO_UNBIND_VS_CONSTANTS)
1735       cso->base.pipe->set_constant_buffer(cso->base.pipe, PIPE_SHADER_VERTEX, 0, false, NULL);
1736    if (unbind & CSO_UNBIND_FS_CONSTANTS)
1737       cso->base.pipe->set_constant_buffer(cso->base.pipe, PIPE_SHADER_FRAGMENT, 0, false, NULL);
1738    if (state_mask & CSO_BIT_VERTEX_ELEMENTS)
1739       cso_restore_vertex_elements(cso);
1740    if (state_mask & CSO_BIT_STREAM_OUTPUTS)
1741       cso_restore_stream_outputs(cso);
1742    if (state_mask & CSO_BIT_PAUSE_QUERIES)
1743       cso->base.pipe->set_active_query_state(cso->base.pipe, true);
1744 
1745    cso->saved_state = 0;
1746 }
1747 
1748 
1749 /**
1750  * Save all the CSO state items specified by the state_mask bitmask
1751  * of CSO_BIT_COMPUTE_x flags.
1752  */
1753 void
cso_save_compute_state(struct cso_context * ctx,unsigned state_mask)1754 cso_save_compute_state(struct cso_context *ctx, unsigned state_mask)
1755 {
1756    struct cso_context_priv *cso = (struct cso_context_priv *)ctx;
1757    assert(cso->saved_compute_state == 0);
1758 
1759    cso->saved_compute_state = state_mask;
1760 
1761    if (state_mask & CSO_BIT_COMPUTE_SHADER)
1762       cso_save_compute_shader(cso);
1763 
1764    if (state_mask & CSO_BIT_COMPUTE_SAMPLERS)
1765       cso_save_compute_samplers(cso);
1766 }
1767 
1768 
1769 /**
1770  * Restore the state which was saved by cso_save_compute_state().
1771  */
1772 void
cso_restore_compute_state(struct cso_context * ctx)1773 cso_restore_compute_state(struct cso_context *ctx)
1774 {
1775    struct cso_context_priv *cso = (struct cso_context_priv *)ctx;
1776    unsigned state_mask = cso->saved_compute_state;
1777 
1778    assert(state_mask);
1779 
1780    if (state_mask & CSO_BIT_COMPUTE_SHADER)
1781       cso_restore_compute_shader(cso);
1782 
1783    if (state_mask & CSO_BIT_COMPUTE_SAMPLERS)
1784       cso_restore_compute_samplers(cso);
1785 
1786    cso->saved_compute_state = 0;
1787 }
1788 
1789 
1790 
1791 /* drawing */
1792 
1793 void
cso_draw_arrays(struct cso_context * ctx,unsigned mode,unsigned start,unsigned count)1794 cso_draw_arrays(struct cso_context *ctx, unsigned mode, unsigned start, unsigned count)
1795 {
1796    struct pipe_draw_info info;
1797    struct pipe_draw_start_count_bias draw;
1798 
1799    util_draw_init_info(&info);
1800 
1801    info.mode = mode;
1802    info.index_bounds_valid = true;
1803    info.min_index = start;
1804    info.max_index = start + count - 1;
1805 
1806    draw.start = start;
1807    draw.count = count;
1808    draw.index_bias = 0;
1809 
1810    cso_draw_vbo(ctx, &info, 0, NULL, &draw, 1);
1811 }
1812 
1813 
1814 void
cso_draw_arrays_instanced(struct cso_context * ctx,unsigned mode,unsigned start,unsigned count,unsigned start_instance,unsigned instance_count)1815 cso_draw_arrays_instanced(struct cso_context *ctx, unsigned mode,
1816                           unsigned start, unsigned count,
1817                           unsigned start_instance, unsigned instance_count)
1818 {
1819    struct pipe_draw_info info;
1820    struct pipe_draw_start_count_bias draw;
1821 
1822    util_draw_init_info(&info);
1823 
1824    info.mode = mode;
1825    info.index_bounds_valid = true;
1826    info.min_index = start;
1827    info.max_index = start + count - 1;
1828    info.start_instance = start_instance;
1829    info.instance_count = instance_count;
1830 
1831    draw.start = start;
1832    draw.count = count;
1833    draw.index_bias = 0;
1834 
1835    cso_draw_vbo(ctx, &info, 0, NULL, &draw, 1);
1836 }
1837