xref: /aosp_15_r20/external/mesa3d/src/gallium/drivers/panfrost/pan_shader.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright (c) 2022 Amazon.com, Inc. or its affiliates.
3  * Copyright (C) 2019-2022 Collabora, Ltd.
4  * Copyright (C) 2019 Red Hat Inc.
5  * Copyright (C) 2018 Alyssa Rosenzweig
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the "Software"),
9  * to deal in the Software without restriction, including without limitation
10  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11  * and/or sell copies of the Software, and to permit persons to whom the
12  * Software is furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the next
15  * paragraph) shall be included in all copies or substantial portions of the
16  * Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
21  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24  * SOFTWARE.
25  *
26  * Authors (Collabora):
27  *   Alyssa Rosenzweig <[email protected]>
28  *
29  */
30 
31 #include "pan_shader.h"
32 #include "nir/tgsi_to_nir.h"
33 #include "util/u_memory.h"
34 #include "util/u_prim.h"
35 #include "nir_builder.h"
36 #include "nir_serialize.h"
37 #include "pan_bo.h"
38 #include "pan_context.h"
39 
40 static struct panfrost_uncompiled_shader *
panfrost_alloc_shader(const nir_shader * nir)41 panfrost_alloc_shader(const nir_shader *nir)
42 {
43    struct panfrost_uncompiled_shader *so =
44       rzalloc(NULL, struct panfrost_uncompiled_shader);
45 
46    simple_mtx_init(&so->lock, mtx_plain);
47    util_dynarray_init(&so->variants, so);
48 
49    so->nir = nir;
50 
51    /* Serialize the NIR to a binary blob that we can hash for the disk
52     * cache. Drop unnecessary information (like variable names) so the
53     * serialized NIR is smaller, and also to let us detect more isomorphic
54     * shaders when hashing, increasing cache hits.
55     */
56    struct blob blob;
57    blob_init(&blob);
58    nir_serialize(&blob, nir, true);
59    _mesa_sha1_compute(blob.data, blob.size, so->nir_sha1);
60    blob_finish(&blob);
61 
62    return so;
63 }
64 
65 static struct panfrost_compiled_shader *
panfrost_alloc_variant(struct panfrost_uncompiled_shader * so)66 panfrost_alloc_variant(struct panfrost_uncompiled_shader *so)
67 {
68    return util_dynarray_grow(&so->variants, struct panfrost_compiled_shader, 1);
69 }
70 
71 static void
lower_load_poly_line_smooth_enabled(nir_shader * nir,const struct panfrost_shader_key * key)72 lower_load_poly_line_smooth_enabled(nir_shader *nir,
73                                     const struct panfrost_shader_key *key)
74 {
75    nir_function_impl *impl = nir_shader_get_entrypoint(nir);
76    nir_builder b = nir_builder_create(impl);
77 
78    nir_foreach_block_safe(block, impl) {
79       nir_foreach_instr_safe(instr, block) {
80          if (instr->type != nir_instr_type_intrinsic)
81             continue;
82 
83          nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
84          if (intrin->intrinsic != nir_intrinsic_load_poly_line_smooth_enabled)
85             continue;
86 
87          b.cursor = nir_before_instr(instr);
88          nir_def_replace(&intrin->def, nir_imm_true(&b));
89          nir_instr_free(instr);
90       }
91    }
92 }
93 
94 static void
panfrost_shader_compile(struct panfrost_screen * screen,const nir_shader * ir,struct util_debug_callback * dbg,struct panfrost_shader_key * key,unsigned req_local_mem,unsigned fixed_varying_mask,struct panfrost_shader_binary * out)95 panfrost_shader_compile(struct panfrost_screen *screen, const nir_shader *ir,
96                         struct util_debug_callback *dbg,
97                         struct panfrost_shader_key *key, unsigned req_local_mem,
98                         unsigned fixed_varying_mask,
99                         struct panfrost_shader_binary *out)
100 {
101    struct panfrost_device *dev = pan_device(&screen->base);
102 
103    nir_shader *s = nir_shader_clone(NULL, ir);
104 
105    /* While graphics shaders are preprocessed at CSO create time, compute
106     * kernels are not preprocessed until they're cloned since the driver does
107     * not get ownership of the NIR from compute CSOs. Do this preprocessing now.
108     * Compute CSOs call this function during create time, so preprocessing
109     * happens at CSO create time regardless.
110     */
111    if (gl_shader_stage_is_compute(s->info.stage))
112       pan_shader_preprocess(s, panfrost_device_gpu_id(dev));
113 
114    struct panfrost_compile_inputs inputs = {
115       .debug = dbg,
116       .gpu_id = panfrost_device_gpu_id(dev),
117    };
118 
119    /* Lower this early so the backends don't have to worry about it */
120    if (s->info.stage == MESA_SHADER_FRAGMENT) {
121       inputs.fixed_varying_mask = key->fs.fixed_varying_mask;
122    } else if (s->info.stage == MESA_SHADER_VERTEX) {
123       inputs.fixed_varying_mask = fixed_varying_mask;
124 
125       /* No IDVS for internal XFB shaders */
126       inputs.no_idvs = s->info.has_transform_feedback_varyings;
127 
128       if (s->info.has_transform_feedback_varyings) {
129          NIR_PASS_V(s, nir_io_add_const_offset_to_base,
130                     nir_var_shader_in | nir_var_shader_out);
131          NIR_PASS_V(s, nir_io_add_intrinsic_xfb_info);
132          NIR_PASS_V(s, pan_lower_xfb);
133       }
134    }
135 
136    util_dynarray_init(&out->binary, NULL);
137 
138    if (s->info.stage == MESA_SHADER_FRAGMENT) {
139       if (key->fs.nr_cbufs_for_fragcolor) {
140          NIR_PASS_V(s, panfrost_nir_remove_fragcolor_stores,
141                     key->fs.nr_cbufs_for_fragcolor);
142       }
143 
144       if (key->fs.sprite_coord_enable) {
145          NIR_PASS_V(s, nir_lower_texcoord_replace_late,
146                     key->fs.sprite_coord_enable,
147                     true /* point coord is sysval */);
148       }
149 
150       if (key->fs.clip_plane_enable) {
151          NIR_PASS_V(s, nir_lower_clip_fs, key->fs.clip_plane_enable, false);
152       }
153 
154       if (key->fs.line_smooth) {
155          NIR_PASS_V(s, nir_lower_poly_line_smooth, 16);
156          NIR_PASS_V(s, lower_load_poly_line_smooth_enabled, key);
157          NIR_PASS_V(s, nir_lower_alu);
158       }
159    }
160 
161    if (dev->arch <= 5 && s->info.stage == MESA_SHADER_FRAGMENT) {
162       NIR_PASS_V(s, pan_lower_framebuffer, key->fs.rt_formats,
163                  pan_raw_format_mask_midgard(key->fs.rt_formats), 0,
164                  panfrost_device_gpu_id(dev) < 0x700);
165    }
166 
167    NIR_PASS_V(s, panfrost_nir_lower_sysvals, dev->arch, &out->sysvals);
168 
169    /* Lower resource indices */
170    NIR_PASS_V(s, panfrost_nir_lower_res_indices, &inputs);
171 
172    screen->vtbl.compile_shader(s, &inputs, &out->binary, &out->info);
173 
174    assert(req_local_mem >= out->info.wls_size);
175    out->info.wls_size = req_local_mem;
176 
177    /* In both clone and tgsi_to_nir paths, the shader is ralloc'd against
178     * a NULL context
179     */
180    ralloc_free(s);
181 }
182 
183 static void
panfrost_shader_get(struct pipe_screen * pscreen,struct panfrost_pool * shader_pool,struct panfrost_pool * desc_pool,struct panfrost_uncompiled_shader * uncompiled,struct util_debug_callback * dbg,struct panfrost_compiled_shader * state,unsigned req_local_mem)184 panfrost_shader_get(struct pipe_screen *pscreen,
185                     struct panfrost_pool *shader_pool,
186                     struct panfrost_pool *desc_pool,
187                     struct panfrost_uncompiled_shader *uncompiled,
188                     struct util_debug_callback *dbg,
189                     struct panfrost_compiled_shader *state,
190                     unsigned req_local_mem)
191 {
192    struct panfrost_screen *screen = pan_screen(pscreen);
193    struct panfrost_device *dev = pan_device(pscreen);
194 
195    struct panfrost_shader_binary res = {0};
196 
197    /* Try to retrieve the variant from the disk cache. If that fails,
198     * compile a new variant and store in the disk cache for later reuse.
199     */
200    if (!panfrost_disk_cache_retrieve(screen->disk_cache, uncompiled,
201                                      &state->key, &res)) {
202       panfrost_shader_compile(screen, uncompiled->nir, dbg, &state->key,
203                               req_local_mem, uncompiled->fixed_varying_mask,
204                               &res);
205 
206       panfrost_disk_cache_store(screen->disk_cache, uncompiled, &state->key,
207                                 &res);
208    }
209 
210    state->info = res.info;
211    state->sysvals = res.sysvals;
212 
213    if (res.binary.size) {
214       state->bin = panfrost_pool_take_ref(
215          shader_pool,
216          pan_pool_upload_aligned(&shader_pool->base, res.binary.data,
217                                  res.binary.size, 128));
218    }
219 
220    util_dynarray_fini(&res.binary);
221 
222    /* Don't upload RSD for fragment shaders since they need draw-time
223     * merging for e.g. depth/stencil/alpha. RSDs are replaced by simpler
224     * shader program descriptors on Valhall, which can be preuploaded even
225     * for fragment shaders. */
226    bool upload =
227       !(uncompiled->nir->info.stage == MESA_SHADER_FRAGMENT && dev->arch <= 7);
228    screen->vtbl.prepare_shader(state, desc_pool, upload);
229 
230    panfrost_analyze_sysvals(state);
231 }
232 
233 static void
panfrost_build_key(struct panfrost_context * ctx,struct panfrost_shader_key * key,struct panfrost_uncompiled_shader * uncompiled)234 panfrost_build_key(struct panfrost_context *ctx,
235                    struct panfrost_shader_key *key,
236                    struct panfrost_uncompiled_shader *uncompiled)
237 {
238    const nir_shader *nir = uncompiled->nir;
239 
240    /* We don't currently have vertex shader variants */
241    if (nir->info.stage != MESA_SHADER_FRAGMENT)
242       return;
243 
244    struct panfrost_device *dev = pan_device(ctx->base.screen);
245    struct pipe_framebuffer_state *fb = &ctx->pipe_framebuffer;
246    struct pipe_rasterizer_state *rast = (void *)ctx->rasterizer;
247    struct panfrost_uncompiled_shader *vs = ctx->uncompiled[MESA_SHADER_VERTEX];
248 
249    /* gl_FragColor lowering needs the number of colour buffers */
250    if (uncompiled->fragcolor_lowered) {
251       key->fs.nr_cbufs_for_fragcolor = fb->nr_cbufs;
252    }
253 
254    /* Point sprite lowering needed on Bifrost and newer */
255    if (dev->arch >= 6 && rast && ctx->active_prim == MESA_PRIM_POINTS) {
256       key->fs.sprite_coord_enable = rast->sprite_coord_enable;
257    }
258 
259    /* User clip plane lowering needed everywhere */
260    if (rast) {
261       key->fs.clip_plane_enable = rast->clip_plane_enable;
262 
263       if (u_reduced_prim(ctx->active_prim) == MESA_PRIM_LINES)
264          key->fs.line_smooth = rast->line_smooth;
265    }
266 
267    if (dev->arch <= 5) {
268       u_foreach_bit(i, (nir->info.outputs_read >> FRAG_RESULT_DATA0)) {
269          enum pipe_format fmt = PIPE_FORMAT_R8G8B8A8_UNORM;
270 
271          if ((fb->nr_cbufs > i) && fb->cbufs[i])
272             fmt = fb->cbufs[i]->format;
273 
274          if (panfrost_blendable_formats_v6[fmt].internal)
275             fmt = PIPE_FORMAT_NONE;
276 
277          key->fs.rt_formats[i] = fmt;
278       }
279    }
280 
281    /* Funny desktop GL varying lowering on Valhall */
282    if (dev->arch >= 9) {
283       assert(vs != NULL && "too early");
284       key->fs.fixed_varying_mask = vs->fixed_varying_mask;
285    }
286 }
287 
288 static struct panfrost_compiled_shader *
panfrost_new_variant_locked(struct panfrost_context * ctx,struct panfrost_uncompiled_shader * uncompiled,struct panfrost_shader_key * key)289 panfrost_new_variant_locked(struct panfrost_context *ctx,
290                             struct panfrost_uncompiled_shader *uncompiled,
291                             struct panfrost_shader_key *key)
292 {
293    struct panfrost_compiled_shader *prog = panfrost_alloc_variant(uncompiled);
294 
295    *prog = (struct panfrost_compiled_shader){
296       .key = *key,
297       .stream_output = uncompiled->stream_output,
298    };
299 
300    panfrost_shader_get(ctx->base.screen, &ctx->shaders, &ctx->descs, uncompiled,
301                        &ctx->base.debug, prog, 0);
302 
303    prog->earlyzs = pan_earlyzs_analyze(&prog->info);
304 
305    return prog;
306 }
307 
308 static void
panfrost_bind_shader_state(struct pipe_context * pctx,void * hwcso,enum pipe_shader_type type)309 panfrost_bind_shader_state(struct pipe_context *pctx, void *hwcso,
310                            enum pipe_shader_type type)
311 {
312    struct panfrost_context *ctx = pan_context(pctx);
313    ctx->uncompiled[type] = hwcso;
314    ctx->prog[type] = NULL;
315 
316    ctx->dirty |= PAN_DIRTY_TLS_SIZE;
317    ctx->dirty_shader[type] |= PAN_DIRTY_STAGE_SHADER;
318 
319    if (hwcso)
320       panfrost_update_shader_variant(ctx, type);
321 }
322 
323 void
panfrost_update_shader_variant(struct panfrost_context * ctx,enum pipe_shader_type type)324 panfrost_update_shader_variant(struct panfrost_context *ctx,
325                                enum pipe_shader_type type)
326 {
327    /* No shader variants for compute */
328    if (type == PIPE_SHADER_COMPUTE)
329       return;
330 
331    /* We need linking information, defer this */
332    if (type == PIPE_SHADER_FRAGMENT && !ctx->uncompiled[PIPE_SHADER_VERTEX])
333       return;
334 
335    /* Also defer, happens with GALLIUM_HUD */
336    if (!ctx->uncompiled[type])
337       return;
338 
339    /* Match the appropriate variant */
340    struct panfrost_uncompiled_shader *uncompiled = ctx->uncompiled[type];
341    struct panfrost_compiled_shader *compiled = NULL;
342 
343    simple_mtx_lock(&uncompiled->lock);
344 
345    struct panfrost_shader_key key = {0};
346    panfrost_build_key(ctx, &key, uncompiled);
347 
348    util_dynarray_foreach(&uncompiled->variants, struct panfrost_compiled_shader,
349                          so) {
350       if (memcmp(&key, &so->key, sizeof(key)) == 0) {
351          compiled = so;
352          break;
353       }
354    }
355 
356    if (compiled == NULL)
357       compiled = panfrost_new_variant_locked(ctx, uncompiled, &key);
358 
359    ctx->prog[type] = compiled;
360 
361    simple_mtx_unlock(&uncompiled->lock);
362 }
363 
364 static void
panfrost_bind_vs_state(struct pipe_context * pctx,void * hwcso)365 panfrost_bind_vs_state(struct pipe_context *pctx, void *hwcso)
366 {
367    panfrost_bind_shader_state(pctx, hwcso, PIPE_SHADER_VERTEX);
368 
369    /* Fragment shaders are linked with vertex shaders */
370    struct panfrost_context *ctx = pan_context(pctx);
371    panfrost_update_shader_variant(ctx, PIPE_SHADER_FRAGMENT);
372 }
373 
374 static void
panfrost_bind_fs_state(struct pipe_context * pctx,void * hwcso)375 panfrost_bind_fs_state(struct pipe_context *pctx, void *hwcso)
376 {
377    panfrost_bind_shader_state(pctx, hwcso, PIPE_SHADER_FRAGMENT);
378 }
379 
380 static void *
panfrost_create_shader_state(struct pipe_context * pctx,const struct pipe_shader_state * cso)381 panfrost_create_shader_state(struct pipe_context *pctx,
382                              const struct pipe_shader_state *cso)
383 {
384    nir_shader *nir = (cso->type == PIPE_SHADER_IR_TGSI)
385                         ? tgsi_to_nir(cso->tokens, pctx->screen, false)
386                         : cso->ir.nir;
387 
388    struct panfrost_uncompiled_shader *so = panfrost_alloc_shader(nir);
389 
390    /* The driver gets ownership of the nir_shader for graphics. The NIR is
391     * ralloc'd. Free the NIR when we free the uncompiled shader.
392     */
393    ralloc_steal(so, nir);
394 
395    so->stream_output = cso->stream_output;
396    so->nir = nir;
397 
398    /* Fix linkage early */
399    if (so->nir->info.stage == MESA_SHADER_VERTEX) {
400       so->fixed_varying_mask =
401          (so->nir->info.outputs_written & BITFIELD_MASK(VARYING_SLOT_VAR0)) &
402          ~VARYING_BIT_POS & ~VARYING_BIT_PSIZ;
403    }
404 
405    /* gl_FragColor needs to be lowered before lowering I/O, do that now */
406    if (nir->info.stage == MESA_SHADER_FRAGMENT &&
407        nir->info.outputs_written & BITFIELD_BIT(FRAG_RESULT_COLOR)) {
408 
409       NIR_PASS_V(nir, nir_lower_fragcolor, nir->info.fs.color_is_dual_source ? 1 : 8);
410       so->fragcolor_lowered = true;
411    }
412 
413    /* Then run the suite of lowering and optimization, including I/O lowering */
414    struct panfrost_device *dev = pan_device(pctx->screen);
415    pan_shader_preprocess(nir, panfrost_device_gpu_id(dev));
416 
417    /* Vertex shaders get passed images through the vertex attribute descriptor
418     * array. We need to add an offset to all image intrinsics so they point
419     * to the right attribute.
420     */
421    if (nir->info.stage == MESA_SHADER_VERTEX && dev->arch <= 7) {
422       NIR_PASS_V(nir, pan_lower_image_index,
423                  util_bitcount64(nir->info.inputs_read));
424    }
425 
426    /* If this shader uses transform feedback, compile the transform
427     * feedback program. This is a special shader variant.
428     */
429    struct panfrost_context *ctx = pan_context(pctx);
430 
431    if (so->nir->xfb_info) {
432       so->xfb = calloc(1, sizeof(struct panfrost_compiled_shader));
433       so->xfb->key.vs_is_xfb = true;
434 
435       panfrost_shader_get(ctx->base.screen, &ctx->shaders, &ctx->descs, so,
436                           &ctx->base.debug, so->xfb, 0);
437 
438       /* Since transform feedback is handled via the transform
439        * feedback program, the original program no longer uses XFB
440        */
441       nir->info.has_transform_feedback_varyings = false;
442    }
443 
444    /* Compile the program. We don't use vertex shader keys, so there will
445     * be no further vertex shader variants. We do have fragment shader
446     * keys, but we can still compile with a default key that will work most
447     * of the time.
448     */
449    struct panfrost_shader_key key = {0};
450 
451    /* gl_FragColor lowering needs the number of colour buffers on desktop
452     * GL, where it acts as an implicit broadcast to all colour buffers.
453     *
454     * However, gl_FragColor is a legacy feature, so assume that if
455     * gl_FragColor is used, there is only a single render target. The
456     * implicit broadcast is neither especially useful nor required by GLES.
457     */
458    if (so->fragcolor_lowered)
459       key.fs.nr_cbufs_for_fragcolor = 1;
460 
461    /* Creating a CSO is single-threaded, so it's ok to use the
462     * locked function without explicitly taking the lock. Creating a
463     * default variant acts as a precompile.
464     */
465    panfrost_new_variant_locked(ctx, so, &key);
466 
467    return so;
468 }
469 
470 static void
panfrost_delete_shader_state(struct pipe_context * pctx,void * so)471 panfrost_delete_shader_state(struct pipe_context *pctx, void *so)
472 {
473    struct panfrost_uncompiled_shader *cso =
474       (struct panfrost_uncompiled_shader *)so;
475 
476    util_dynarray_foreach(&cso->variants, struct panfrost_compiled_shader, so) {
477       panfrost_bo_unreference(so->bin.bo);
478       panfrost_bo_unreference(so->state.bo);
479       panfrost_bo_unreference(so->linkage.bo);
480    }
481 
482    if (cso->xfb) {
483       panfrost_bo_unreference(cso->xfb->bin.bo);
484       panfrost_bo_unreference(cso->xfb->state.bo);
485       panfrost_bo_unreference(cso->xfb->linkage.bo);
486       free(cso->xfb);
487    }
488 
489    simple_mtx_destroy(&cso->lock);
490 
491    ralloc_free(so);
492 }
493 
494 /*
495  * Create a compute CSO. As compute kernels do not require variants, they are
496  * precompiled, creating both the uncompiled and compiled shaders now.
497  */
498 static void *
panfrost_create_compute_state(struct pipe_context * pctx,const struct pipe_compute_state * cso)499 panfrost_create_compute_state(struct pipe_context *pctx,
500                               const struct pipe_compute_state *cso)
501 {
502    struct panfrost_context *ctx = pan_context(pctx);
503    struct panfrost_uncompiled_shader *so = panfrost_alloc_shader(cso->prog);
504    struct panfrost_compiled_shader *v = panfrost_alloc_variant(so);
505    memset(v, 0, sizeof *v);
506 
507    assert(cso->ir_type == PIPE_SHADER_IR_NIR && "TGSI kernels unsupported");
508 
509    panfrost_shader_get(pctx->screen, &ctx->shaders, &ctx->descs, so,
510                        &ctx->base.debug, v, cso->static_shared_mem);
511 
512    /* The NIR becomes invalid after this. For compute kernels, we never
513     * need to access it again. Don't keep a dangling pointer around.
514     */
515    ralloc_free((void *)so->nir);
516    so->nir = NULL;
517 
518    return so;
519 }
520 
521 static void
panfrost_bind_compute_state(struct pipe_context * pipe,void * cso)522 panfrost_bind_compute_state(struct pipe_context *pipe, void *cso)
523 {
524    struct panfrost_context *ctx = pan_context(pipe);
525    struct panfrost_uncompiled_shader *uncompiled = cso;
526 
527    ctx->uncompiled[PIPE_SHADER_COMPUTE] = uncompiled;
528 
529    ctx->prog[PIPE_SHADER_COMPUTE] =
530       uncompiled ? util_dynarray_begin(&uncompiled->variants) : NULL;
531 }
532 
533 static void
panfrost_get_compute_state_info(struct pipe_context * pipe,void * cso,struct pipe_compute_state_object_info * info)534 panfrost_get_compute_state_info(struct pipe_context *pipe, void *cso,
535                                 struct pipe_compute_state_object_info *info)
536 {
537    struct panfrost_device *dev = pan_device(pipe->screen);
538    struct panfrost_uncompiled_shader *uncompiled = cso;
539    struct panfrost_compiled_shader *cs =
540       util_dynarray_begin(&uncompiled->variants);
541 
542    info->max_threads = panfrost_compute_max_thread_count(
543       &dev->kmod.props, cs->info.work_reg_count);
544    info->private_memory = cs->info.tls_size;
545    info->simd_sizes = pan_subgroup_size(dev->arch);
546    info->preferred_simd_size = info->simd_sizes;
547 }
548 
549 void
panfrost_shader_context_init(struct pipe_context * pctx)550 panfrost_shader_context_init(struct pipe_context *pctx)
551 {
552    pctx->create_vs_state = panfrost_create_shader_state;
553    pctx->delete_vs_state = panfrost_delete_shader_state;
554    pctx->bind_vs_state = panfrost_bind_vs_state;
555 
556    pctx->create_fs_state = panfrost_create_shader_state;
557    pctx->delete_fs_state = panfrost_delete_shader_state;
558    pctx->bind_fs_state = panfrost_bind_fs_state;
559 
560    pctx->create_compute_state = panfrost_create_compute_state;
561    pctx->bind_compute_state = panfrost_bind_compute_state;
562    pctx->get_compute_state_info = panfrost_get_compute_state_info;
563    pctx->delete_compute_state = panfrost_delete_shader_state;
564 }
565