1 /*
2 * Copyright © 2014-2017 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <inttypes.h>
25 #include "util/format/u_format.h"
26 #include "util/u_math.h"
27 #include "util/u_memory.h"
28 #include "util/ralloc.h"
29 #include "util/hash_table.h"
30 #include "util/u_upload_mgr.h"
31 #include "tgsi/tgsi_dump.h"
32 #include "compiler/nir/nir.h"
33 #include "compiler/nir/nir_builder.h"
34 #include "compiler/nir/nir_serialize.h"
35 #include "nir/tgsi_to_nir.h"
36 #include "compiler/v3d_compiler.h"
37 #include "v3d_context.h"
38 /* packets here are the same across V3D versions. */
39 #include "broadcom/cle/v3d_packet_v42_pack.h"
40
41 static struct v3d_compiled_shader *
42 v3d_get_compiled_shader(struct v3d_context *v3d,
43 struct v3d_key *key, size_t key_size,
44 struct v3d_uncompiled_shader *uncompiled);
45
46 static void
47 v3d_setup_shared_precompile_key(struct v3d_uncompiled_shader *uncompiled,
48 struct v3d_key *key);
49
50 static gl_varying_slot
v3d_get_slot_for_driver_location(nir_shader * s,uint32_t driver_location)51 v3d_get_slot_for_driver_location(nir_shader *s, uint32_t driver_location)
52 {
53 nir_foreach_shader_out_variable(var, s) {
54 if (var->data.driver_location == driver_location) {
55 return var->data.location;
56 }
57
58 /* For compact arrays, we have more than one location to
59 * check.
60 */
61 if (var->data.compact) {
62 assert(glsl_type_is_array(var->type));
63 for (int i = 0; i < DIV_ROUND_UP(glsl_array_size(var->type), 4); i++) {
64 if ((var->data.driver_location + i) == driver_location) {
65 return var->data.location;
66 }
67 }
68 }
69 }
70
71 return -1;
72 }
73
74 /**
75 * Precomputes the TRANSFORM_FEEDBACK_OUTPUT_DATA_SPEC array for the shader.
76 *
77 * A shader can have 16 of these specs, and each one of them can write up to
78 * 16 dwords. Since we allow a total of 64 transform feedback output
79 * components (not 16 vectors), we have to group the writes of multiple
80 * varyings together in a single data spec.
81 */
82 static void
v3d_set_transform_feedback_outputs(struct v3d_uncompiled_shader * so,const struct pipe_stream_output_info * stream_output)83 v3d_set_transform_feedback_outputs(struct v3d_uncompiled_shader *so,
84 const struct pipe_stream_output_info *stream_output)
85 {
86 if (!stream_output->num_outputs)
87 return;
88
89 struct v3d_varying_slot slots[PIPE_MAX_SO_OUTPUTS * 4];
90 int slot_count = 0;
91
92 for (int buffer = 0; buffer < PIPE_MAX_SO_BUFFERS; buffer++) {
93 uint32_t buffer_offset = 0;
94 uint32_t vpm_start = slot_count;
95
96 for (int i = 0; i < stream_output->num_outputs; i++) {
97 const struct pipe_stream_output *output =
98 &stream_output->output[i];
99
100 if (output->output_buffer != buffer)
101 continue;
102
103 /* We assume that the SO outputs appear in increasing
104 * order in the buffer.
105 */
106 assert(output->dst_offset >= buffer_offset);
107
108 /* Pad any undefined slots in the output */
109 for (int j = buffer_offset; j < output->dst_offset; j++) {
110 slots[slot_count] =
111 v3d_slot_from_slot_and_component(VARYING_SLOT_POS, 0);
112 slot_count++;
113 buffer_offset++;
114 }
115
116 /* Set the coordinate shader up to output the
117 * components of this varying.
118 */
119 for (int j = 0; j < output->num_components; j++) {
120 gl_varying_slot slot =
121 v3d_get_slot_for_driver_location(so->base.ir.nir, output->register_index);
122
123 slots[slot_count] =
124 v3d_slot_from_slot_and_component(slot,
125 output->start_component + j);
126 slot_count++;
127 buffer_offset++;
128 }
129 }
130
131 uint32_t vpm_size = slot_count - vpm_start;
132 if (!vpm_size)
133 continue;
134
135 uint32_t vpm_start_offset = vpm_start + 6;
136
137 while (vpm_size) {
138 uint32_t write_size = MIN2(vpm_size, 1 << 4);
139
140 struct V3D42_TRANSFORM_FEEDBACK_OUTPUT_DATA_SPEC unpacked = {
141 /* We need the offset from the coordinate shader's VPM
142 * output block, which has the [X, Y, Z, W, Xs, Ys]
143 * values at the start.
144 */
145 .first_shaded_vertex_value_to_output = vpm_start_offset,
146 .number_of_consecutive_vertex_values_to_output_as_32_bit_values = write_size,
147 .output_buffer_to_write_to = buffer,
148 };
149
150 /* GFXH-1559 */
151 assert(unpacked.first_shaded_vertex_value_to_output != 8 ||
152 so->num_tf_specs != 0);
153
154 assert(so->num_tf_specs != ARRAY_SIZE(so->tf_specs));
155 V3D42_TRANSFORM_FEEDBACK_OUTPUT_DATA_SPEC_pack(NULL,
156 (void *)&so->tf_specs[so->num_tf_specs],
157 &unpacked);
158
159 /* If point size is being written by the shader, then
160 * all the VPM start offsets are shifted up by one.
161 * We won't know that until the variant is compiled,
162 * though.
163 */
164 unpacked.first_shaded_vertex_value_to_output++;
165
166 /* GFXH-1559 */
167 assert(unpacked.first_shaded_vertex_value_to_output != 8 ||
168 so->num_tf_specs != 0);
169
170 V3D42_TRANSFORM_FEEDBACK_OUTPUT_DATA_SPEC_pack(NULL,
171 (void *)&so->tf_specs_psiz[so->num_tf_specs],
172 &unpacked);
173 so->num_tf_specs++;
174 vpm_start_offset += write_size;
175 vpm_size -= write_size;
176 }
177 so->base.stream_output.stride[buffer] =
178 stream_output->stride[buffer];
179 }
180
181 so->num_tf_outputs = slot_count;
182 so->tf_outputs = ralloc_array(so->base.ir.nir, struct v3d_varying_slot,
183 slot_count);
184 memcpy(so->tf_outputs, slots, sizeof(*slots) * slot_count);
185 }
186
187 static int
type_size(const struct glsl_type * type,bool bindless)188 type_size(const struct glsl_type *type, bool bindless)
189 {
190 return glsl_count_attribute_slots(type, false);
191 }
192
193 static void
precompile_all_outputs(nir_shader * s,struct v3d_varying_slot * outputs,uint8_t * num_outputs)194 precompile_all_outputs(nir_shader *s,
195 struct v3d_varying_slot *outputs,
196 uint8_t *num_outputs)
197 {
198 nir_foreach_shader_out_variable(var, s) {
199 const int array_len = glsl_type_is_vector_or_scalar(var->type) ?
200 1 : MAX2(glsl_get_length(var->type), 1);
201 for (int j = 0; j < array_len; j++) {
202 const int slot = var->data.location + j;
203 const int num_components =
204 glsl_get_components(var->type);
205 for (int i = 0; i < num_components; i++) {
206 const int swiz = var->data.location_frac + i;
207 outputs[(*num_outputs)++] =
208 v3d_slot_from_slot_and_component(slot,
209 swiz);
210 }
211 }
212 }
213 }
214
215 /**
216 * Precompiles a shader variant at shader state creation time if
217 * V3D_DEBUG=precompile is set. Used for shader-db
218 * (https://gitlab.freedesktop.org/mesa/shader-db)
219 */
220 static void
v3d_shader_precompile(struct v3d_context * v3d,struct v3d_uncompiled_shader * so)221 v3d_shader_precompile(struct v3d_context *v3d,
222 struct v3d_uncompiled_shader *so)
223 {
224 nir_shader *s = so->base.ir.nir;
225
226 if (s->info.stage == MESA_SHADER_FRAGMENT) {
227 struct v3d_fs_key key = {
228 };
229
230 nir_foreach_shader_out_variable(var, s) {
231 if (var->data.location == FRAG_RESULT_COLOR) {
232 key.cbufs |= 1 << 0;
233 } else if (var->data.location >= FRAG_RESULT_DATA0) {
234 key.cbufs |= 1 << (var->data.location -
235 FRAG_RESULT_DATA0);
236 }
237 }
238
239 key.logicop_func = PIPE_LOGICOP_COPY;
240
241 v3d_setup_shared_precompile_key(so, &key.base);
242 v3d_get_compiled_shader(v3d, &key.base, sizeof(key), so);
243 } else if (s->info.stage == MESA_SHADER_GEOMETRY) {
244 struct v3d_gs_key key = {
245 .base.is_last_geometry_stage = true,
246 };
247
248 v3d_setup_shared_precompile_key(so, &key.base);
249
250 precompile_all_outputs(s,
251 key.used_outputs,
252 &key.num_used_outputs);
253
254 v3d_get_compiled_shader(v3d, &key.base, sizeof(key), so);
255
256 /* Compile GS bin shader: only position (XXX: include TF) */
257 key.is_coord = true;
258 key.num_used_outputs = 0;
259 for (int i = 0; i < 4; i++) {
260 key.used_outputs[key.num_used_outputs++] =
261 v3d_slot_from_slot_and_component(VARYING_SLOT_POS,
262 i);
263 }
264 v3d_get_compiled_shader(v3d, &key.base, sizeof(key), so);
265 } else {
266 assert(s->info.stage == MESA_SHADER_VERTEX);
267 struct v3d_vs_key key = {
268 /* Emit fixed function outputs */
269 .base.is_last_geometry_stage = true,
270 };
271
272 v3d_setup_shared_precompile_key(so, &key.base);
273
274 precompile_all_outputs(s,
275 key.used_outputs,
276 &key.num_used_outputs);
277
278 v3d_get_compiled_shader(v3d, &key.base, sizeof(key), so);
279
280 /* Compile VS bin shader: only position (XXX: include TF) */
281 key.is_coord = true;
282 key.num_used_outputs = 0;
283 for (int i = 0; i < 4; i++) {
284 key.used_outputs[key.num_used_outputs++] =
285 v3d_slot_from_slot_and_component(VARYING_SLOT_POS,
286 i);
287 }
288 v3d_get_compiled_shader(v3d, &key.base, sizeof(key), so);
289 }
290 }
291
292 static bool
lower_uniform_offset_to_bytes_cb(nir_builder * b,nir_intrinsic_instr * intr,void * _state)293 lower_uniform_offset_to_bytes_cb(nir_builder *b, nir_intrinsic_instr *intr,
294 void *_state)
295 {
296 if (intr->intrinsic != nir_intrinsic_load_uniform)
297 return false;
298
299 b->cursor = nir_before_instr(&intr->instr);
300 nir_intrinsic_set_base(intr, nir_intrinsic_base(intr) * 16);
301 nir_src_rewrite(&intr->src[0], nir_ishl_imm(b, intr->src[0].ssa, 4));
302 return true;
303 }
304
305 static bool
lower_textures_cb(nir_builder * b,nir_instr * instr,void * _state)306 lower_textures_cb(nir_builder *b, nir_instr *instr, void *_state)
307 {
308 if (instr->type != nir_instr_type_tex)
309 return false;
310
311 nir_tex_instr *tex = nir_instr_as_tex(instr);
312 if (nir_tex_instr_need_sampler(tex))
313 return false;
314
315 /* Use the texture index as sampler index for the purposes of
316 * lower_tex_packing, since in GL we currently make packing
317 * decisions based on texture format.
318 */
319 tex->backend_flags = tex->texture_index;
320 return true;
321 }
322
323 static bool
v3d_nir_lower_uniform_offset_to_bytes(nir_shader * s)324 v3d_nir_lower_uniform_offset_to_bytes(nir_shader *s)
325 {
326 return nir_shader_intrinsics_pass(s, lower_uniform_offset_to_bytes_cb,
327 nir_metadata_control_flow, NULL);
328 }
329
330 static bool
v3d_nir_lower_textures(nir_shader * s)331 v3d_nir_lower_textures(nir_shader *s)
332 {
333 return nir_shader_instructions_pass(s, lower_textures_cb,
334 nir_metadata_control_flow, NULL);
335 }
336
337 static void *
v3d_uncompiled_shader_create(struct pipe_context * pctx,enum pipe_shader_ir type,void * ir)338 v3d_uncompiled_shader_create(struct pipe_context *pctx,
339 enum pipe_shader_ir type, void *ir)
340 {
341 struct v3d_context *v3d = v3d_context(pctx);
342 struct v3d_uncompiled_shader *so = CALLOC_STRUCT(v3d_uncompiled_shader);
343 if (!so)
344 return NULL;
345
346 so->program_id = v3d->next_uncompiled_program_id++;
347
348 nir_shader *s;
349
350 if (type == PIPE_SHADER_IR_NIR) {
351 /* The backend takes ownership of the NIR shader on state
352 * creation.
353 */
354 s = ir;
355 } else {
356 assert(type == PIPE_SHADER_IR_TGSI);
357
358 if (V3D_DBG(TGSI)) {
359 fprintf(stderr, "prog %d TGSI:\n",
360 so->program_id);
361 tgsi_dump(ir, 0);
362 fprintf(stderr, "\n");
363 }
364 s = tgsi_to_nir(ir, pctx->screen, false);
365 }
366
367 if (s->info.stage == MESA_SHADER_KERNEL)
368 s->info.stage = MESA_SHADER_COMPUTE;
369
370 if (s->info.stage != MESA_SHADER_VERTEX &&
371 s->info.stage != MESA_SHADER_GEOMETRY) {
372 NIR_PASS(_, s, nir_lower_io,
373 nir_var_shader_in | nir_var_shader_out,
374 type_size, (nir_lower_io_options)0);
375 }
376
377 NIR_PASS(_, s, nir_normalize_cubemap_coords);
378
379 NIR_PASS(_, s, nir_lower_load_const_to_scalar);
380
381 v3d_optimize_nir(NULL, s);
382
383 NIR_PASS(_, s, nir_lower_var_copies);
384
385 /* Get rid of base CS sys vals */
386 if (s->info.stage == MESA_SHADER_COMPUTE) {
387 struct nir_lower_compute_system_values_options cs_options = {
388 .has_base_global_invocation_id = false,
389 .has_base_workgroup_id = false,
390 };
391 NIR_PASS(_, s, nir_lower_compute_system_values, &cs_options);
392 }
393
394 /* Get rid of split copies */
395 v3d_optimize_nir(NULL, s);
396
397 NIR_PASS(_, s, nir_remove_dead_variables, nir_var_function_temp, NULL);
398
399 NIR_PASS(_, s, nir_lower_frexp);
400
401 /* Since we can't expose PIPE_CAP_PACKED_UNIFORMS the state tracker
402 * will produce uniform intrinsics with offsets in vec4 units but
403 * our compiler expects to work in units of bytes.
404 */
405 NIR_PASS(_, s, v3d_nir_lower_uniform_offset_to_bytes);
406
407 NIR_PASS(_, s, v3d_nir_lower_textures);
408
409 /* Garbage collect dead instructions */
410 nir_sweep(s);
411
412 so->base.type = PIPE_SHADER_IR_NIR;
413 so->base.ir.nir = s;
414
415 /* Generate sha1 from NIR for caching */
416 struct blob blob;
417 blob_init(&blob);
418 nir_serialize(&blob, s, true);
419 assert(!blob.out_of_memory);
420 _mesa_sha1_compute(blob.data, blob.size, so->sha1);
421 blob_finish(&blob);
422
423 if (V3D_DBG(NIR) || v3d_debug_flag_for_shader_stage(s->info.stage)) {
424 fprintf(stderr, "%s prog %d NIR:\n",
425 gl_shader_stage_name(s->info.stage),
426 so->program_id);
427 nir_print_shader(s, stderr);
428 fprintf(stderr, "\n");
429 }
430
431 if (V3D_DBG(PRECOMPILE))
432 v3d_shader_precompile(v3d, so);
433
434 return so;
435 }
436
437 static void
v3d_shader_debug_output(const char * message,void * data)438 v3d_shader_debug_output(const char *message, void *data)
439 {
440 struct pipe_context *ctx = data;
441
442 util_debug_message(&ctx->debug, SHADER_INFO, "%s", message);
443 }
444
445 static void *
v3d_shader_state_create(struct pipe_context * pctx,const struct pipe_shader_state * cso)446 v3d_shader_state_create(struct pipe_context *pctx,
447 const struct pipe_shader_state *cso)
448 {
449 struct v3d_uncompiled_shader *so =
450 v3d_uncompiled_shader_create(pctx,
451 cso->type,
452 (cso->type == PIPE_SHADER_IR_TGSI ?
453 (void *)cso->tokens :
454 cso->ir.nir));
455
456 v3d_set_transform_feedback_outputs(so, &cso->stream_output);
457
458 return so;
459 }
460
461 /* Key ued with the RAM cache */
462 struct v3d_cache_key {
463 struct v3d_key *key;
464 unsigned char sha1[20];
465 };
466
467 struct v3d_compiled_shader *
v3d_get_compiled_shader(struct v3d_context * v3d,struct v3d_key * key,size_t key_size,struct v3d_uncompiled_shader * uncompiled)468 v3d_get_compiled_shader(struct v3d_context *v3d,
469 struct v3d_key *key,
470 size_t key_size,
471 struct v3d_uncompiled_shader *uncompiled)
472 {
473 nir_shader *s = uncompiled->base.ir.nir;
474 struct hash_table *ht = v3d->prog.cache[s->info.stage];
475 struct v3d_cache_key cache_key;
476 cache_key.key = key;
477 memcpy(cache_key.sha1, uncompiled->sha1, sizeof(cache_key.sha1));
478 struct hash_entry *entry = _mesa_hash_table_search(ht, &cache_key);
479 if (entry)
480 return entry->data;
481
482 int variant_id =
483 p_atomic_inc_return(&uncompiled->compiled_variant_count);
484
485 struct v3d_compiled_shader *shader = NULL;
486
487 #ifdef ENABLE_SHADER_CACHE
488 shader = v3d_disk_cache_retrieve(v3d, key, uncompiled);
489 #endif
490 if (!shader) {
491 shader = rzalloc(NULL, struct v3d_compiled_shader);
492
493 int program_id = uncompiled->program_id;
494 uint64_t *qpu_insts;
495 uint32_t shader_size;
496
497 qpu_insts = v3d_compile(v3d->screen->compiler, key,
498 &shader->prog_data.base, s,
499 v3d_shader_debug_output,
500 v3d,
501 program_id, variant_id, &shader_size);
502
503 /* qpu_insts being NULL can happen if the register allocation
504 * failed. At this point we can't really trigger an OpenGL API
505 * error, as the final compilation could happen on the draw
506 * call. So let's at least assert, so debug builds finish at
507 * this point.
508 */
509 assert(qpu_insts);
510 ralloc_steal(shader, shader->prog_data.base);
511
512 if (shader_size) {
513 u_upload_data(v3d->state_uploader, 0, shader_size, 8,
514 qpu_insts, &shader->offset, &shader->resource);
515 }
516
517 #ifdef ENABLE_SHADER_CACHE
518 v3d_disk_cache_store(v3d, key, uncompiled,
519 shader, qpu_insts, shader_size);
520 #endif
521
522 free(qpu_insts);
523 }
524
525 v3d_set_shader_uniform_dirty_flags(shader);
526
527 if (ht) {
528 struct v3d_cache_key *dup_cache_key =
529 ralloc_size(shader, sizeof(struct v3d_cache_key));
530 dup_cache_key->key = ralloc_memdup(shader, cache_key.key,
531 key_size);
532 memcpy(dup_cache_key->sha1, cache_key.sha1 ,sizeof(dup_cache_key->sha1));
533 _mesa_hash_table_insert(ht, dup_cache_key, shader);
534 }
535
536 if (shader->prog_data.base->spill_size >
537 v3d->prog.spill_size_per_thread) {
538 /* The TIDX register we use for choosing the area to access
539 * for scratch space is: (core << 6) | (qpu << 2) | thread.
540 * Even at minimum threadcount in a particular shader, that
541 * means we still multiply by qpus by 4.
542 */
543 int total_spill_size = (v3d->screen->devinfo.qpu_count * 4 *
544 shader->prog_data.base->spill_size);
545
546 v3d_bo_unreference(&v3d->prog.spill_bo);
547 v3d->prog.spill_bo = v3d_bo_alloc(v3d->screen,
548 total_spill_size, "spill");
549 v3d->prog.spill_size_per_thread =
550 shader->prog_data.base->spill_size;
551 }
552
553 return shader;
554 }
555
556 static void
v3d_free_compiled_shader(struct v3d_compiled_shader * shader)557 v3d_free_compiled_shader(struct v3d_compiled_shader *shader)
558 {
559 pipe_resource_reference(&shader->resource, NULL);
560 ralloc_free(shader);
561 }
562
563 static void
v3d_setup_shared_key(struct v3d_context * v3d,struct v3d_key * key,struct v3d_texture_stateobj * texstate)564 v3d_setup_shared_key(struct v3d_context *v3d, struct v3d_key *key,
565 struct v3d_texture_stateobj *texstate)
566 {
567 const struct v3d_device_info *devinfo = &v3d->screen->devinfo;
568
569 key->num_tex_used = texstate->num_textures;
570 key->num_samplers_used = texstate->num_textures;
571 assert(key->num_tex_used == key->num_samplers_used);
572 for (int i = 0; i < texstate->num_textures; i++) {
573 struct pipe_sampler_view *sampler = texstate->textures[i];
574
575 if (!sampler)
576 continue;
577
578 key->sampler[i].return_size =
579 v3d_get_tex_return_size(devinfo, sampler->format);
580
581 /* For 16-bit, we set up the sampler to always return 2
582 * channels (meaning no recompiles for most statechanges),
583 * while for 32 we actually scale the returns with channels.
584 */
585 if (key->sampler[i].return_size == 16) {
586 key->sampler[i].return_channels = 2;
587 } else {
588 key->sampler[i].return_channels = 4;
589 }
590
591 /* We let the sampler state handle the swizzle.
592 */
593 key->tex[i].swizzle[0] = PIPE_SWIZZLE_X;
594 key->tex[i].swizzle[1] = PIPE_SWIZZLE_Y;
595 key->tex[i].swizzle[2] = PIPE_SWIZZLE_Z;
596 key->tex[i].swizzle[3] = PIPE_SWIZZLE_W;
597 }
598 }
599
600 static void
v3d_setup_shared_precompile_key(struct v3d_uncompiled_shader * uncompiled,struct v3d_key * key)601 v3d_setup_shared_precompile_key(struct v3d_uncompiled_shader *uncompiled,
602 struct v3d_key *key)
603 {
604 nir_shader *s = uncompiled->base.ir.nir;
605
606 /* The shader may have gaps in the texture bindings, so figure out
607 * the largest binding in use and setup the number of textures and
608 * samplers from there instead of just the texture count from shader
609 * info.
610 */
611 key->num_tex_used = 0;
612 key->num_samplers_used = 0;
613 for (int i = V3D_MAX_TEXTURE_SAMPLERS - 1; i >= 0; i--) {
614 if (s->info.textures_used[0] & (1 << i)) {
615 key->num_tex_used = i + 1;
616 key->num_samplers_used = i + 1;
617 break;
618 }
619 }
620
621 /* Note that below we access they key's texture and sampler fields
622 * using the same index. On OpenGL they are the same (they are
623 * combined)
624 */
625 for (int i = 0; i < s->info.num_textures; i++) {
626 key->sampler[i].return_size = 16;
627 key->sampler[i].return_channels = 2;
628
629 key->tex[i].swizzle[0] = PIPE_SWIZZLE_X;
630 key->tex[i].swizzle[1] = PIPE_SWIZZLE_Y;
631 key->tex[i].swizzle[2] = PIPE_SWIZZLE_Z;
632 key->tex[i].swizzle[3] = PIPE_SWIZZLE_W;
633 }
634 }
635
636 static void
v3d_update_compiled_fs(struct v3d_context * v3d,uint8_t prim_mode)637 v3d_update_compiled_fs(struct v3d_context *v3d, uint8_t prim_mode)
638 {
639 struct v3d_job *job = v3d->job;
640 struct v3d_fs_key local_key;
641 struct v3d_fs_key *key = &local_key;
642 nir_shader *s = v3d->prog.bind_fs->base.ir.nir;
643
644 if (!(v3d->dirty & (V3D_DIRTY_PRIM_MODE |
645 V3D_DIRTY_BLEND |
646 V3D_DIRTY_FRAMEBUFFER |
647 V3D_DIRTY_ZSA |
648 V3D_DIRTY_RASTERIZER |
649 V3D_DIRTY_SAMPLE_STATE |
650 V3D_DIRTY_FRAGTEX |
651 V3D_DIRTY_UNCOMPILED_FS))) {
652 return;
653 }
654
655 memset(key, 0, sizeof(*key));
656 v3d_setup_shared_key(v3d, &key->base, &v3d->tex[PIPE_SHADER_FRAGMENT]);
657 key->base.ucp_enables = v3d->rasterizer->base.clip_plane_enable;
658 key->is_points = (prim_mode == MESA_PRIM_POINTS);
659 key->is_lines = (prim_mode >= MESA_PRIM_LINES &&
660 prim_mode <= MESA_PRIM_LINE_STRIP);
661 key->line_smoothing = (key->is_lines &&
662 v3d_line_smoothing_enabled(v3d));
663 key->has_gs = v3d->prog.bind_gs != NULL;
664 if (v3d->blend->base.logicop_enable) {
665 key->logicop_func = v3d->blend->base.logicop_func;
666 } else {
667 key->logicop_func = PIPE_LOGICOP_COPY;
668 }
669 if (job->msaa) {
670 key->msaa = v3d->rasterizer->base.multisample;
671 key->sample_alpha_to_coverage = v3d->blend->base.alpha_to_coverage;
672 key->sample_alpha_to_one = v3d->blend->base.alpha_to_one;
673 }
674
675 key->swap_color_rb = v3d->swap_color_rb;
676
677 for (int i = 0; i < v3d->framebuffer.nr_cbufs; i++) {
678 struct pipe_surface *cbuf = v3d->framebuffer.cbufs[i];
679 if (!cbuf)
680 continue;
681
682 /* gl_FragColor's propagation to however many bound color
683 * buffers there are means that the shader compile needs to
684 * know what buffers are present.
685 */
686 key->cbufs |= 1 << i;
687
688 /* If logic operations are enabled then we might emit color
689 * reads and we need to know the color buffer format and
690 * swizzle for that.
691 */
692 if (key->logicop_func != PIPE_LOGICOP_COPY) {
693 key->color_fmt[i].format = cbuf->format;
694 memcpy(key->color_fmt[i].swizzle,
695 v3d_get_format_swizzle(&v3d->screen->devinfo,
696 cbuf->format),
697 sizeof(key->color_fmt[i].swizzle));
698 }
699
700 const struct util_format_description *desc =
701 util_format_description(cbuf->format);
702
703 if (desc->channel[0].type == UTIL_FORMAT_TYPE_FLOAT &&
704 desc->channel[0].size == 32) {
705 key->f32_color_rb |= 1 << i;
706 }
707
708 if (s->info.fs.untyped_color_outputs) {
709 if (util_format_is_pure_uint(cbuf->format))
710 key->uint_color_rb |= 1 << i;
711 else if (util_format_is_pure_sint(cbuf->format))
712 key->int_color_rb |= 1 << i;
713 }
714 }
715
716 if (key->is_points) {
717 key->point_sprite_mask =
718 v3d->rasterizer->base.sprite_coord_enable;
719 /* this is handled by lower_wpos_pntc */
720 key->point_coord_upper_left = false;
721 }
722
723 struct v3d_compiled_shader *old_fs = v3d->prog.fs;
724 v3d->prog.fs = v3d_get_compiled_shader(v3d, &key->base, sizeof(*key),
725 v3d->prog.bind_fs);
726 if (v3d->prog.fs == old_fs)
727 return;
728
729 v3d->dirty |= V3D_DIRTY_COMPILED_FS;
730
731 if (old_fs) {
732 if (v3d->prog.fs->prog_data.fs->flat_shade_flags !=
733 old_fs->prog_data.fs->flat_shade_flags) {
734 v3d->dirty |= V3D_DIRTY_FLAT_SHADE_FLAGS;
735 }
736
737 if (v3d->prog.fs->prog_data.fs->noperspective_flags !=
738 old_fs->prog_data.fs->noperspective_flags) {
739 v3d->dirty |= V3D_DIRTY_NOPERSPECTIVE_FLAGS;
740 }
741
742 if (v3d->prog.fs->prog_data.fs->centroid_flags !=
743 old_fs->prog_data.fs->centroid_flags) {
744 v3d->dirty |= V3D_DIRTY_CENTROID_FLAGS;
745 }
746 }
747
748 if (old_fs && memcmp(v3d->prog.fs->prog_data.fs->input_slots,
749 old_fs->prog_data.fs->input_slots,
750 sizeof(v3d->prog.fs->prog_data.fs->input_slots))) {
751 v3d->dirty |= V3D_DIRTY_FS_INPUTS;
752 }
753 }
754
755 static void
v3d_update_compiled_gs(struct v3d_context * v3d,uint8_t prim_mode)756 v3d_update_compiled_gs(struct v3d_context *v3d, uint8_t prim_mode)
757 {
758 struct v3d_gs_key local_key;
759 struct v3d_gs_key *key = &local_key;
760
761 if (!(v3d->dirty & (V3D_DIRTY_GEOMTEX |
762 V3D_DIRTY_RASTERIZER |
763 V3D_DIRTY_UNCOMPILED_GS |
764 V3D_DIRTY_PRIM_MODE |
765 V3D_DIRTY_FS_INPUTS))) {
766 return;
767 }
768
769 if (!v3d->prog.bind_gs) {
770 v3d->prog.gs = NULL;
771 v3d->prog.gs_bin = NULL;
772 return;
773 }
774
775 memset(key, 0, sizeof(*key));
776 v3d_setup_shared_key(v3d, &key->base, &v3d->tex[PIPE_SHADER_GEOMETRY]);
777 key->base.ucp_enables = v3d->rasterizer->base.clip_plane_enable;
778 key->base.is_last_geometry_stage = true;
779 key->num_used_outputs = v3d->prog.fs->prog_data.fs->num_inputs;
780 STATIC_ASSERT(sizeof(key->used_outputs) ==
781 sizeof(v3d->prog.fs->prog_data.fs->input_slots));
782 memcpy(key->used_outputs, v3d->prog.fs->prog_data.fs->input_slots,
783 sizeof(key->used_outputs));
784
785 key->per_vertex_point_size =
786 (prim_mode == MESA_PRIM_POINTS &&
787 v3d->rasterizer->base.point_size_per_vertex);
788
789 struct v3d_uncompiled_shader *uncompiled = v3d->prog.bind_gs;
790 struct v3d_compiled_shader *gs =
791 v3d_get_compiled_shader(v3d, &key->base, sizeof(*key),
792 uncompiled);
793 if (gs != v3d->prog.gs) {
794 v3d->prog.gs = gs;
795 v3d->dirty |= V3D_DIRTY_COMPILED_GS;
796 }
797
798 key->is_coord = true;
799
800 /* The last bin-mode shader in the geometry pipeline only outputs
801 * varyings used by transform feedback.
802 */
803 if (uncompiled->num_tf_outputs > 0) {
804 memcpy(key->used_outputs, uncompiled->tf_outputs,
805 sizeof(*key->used_outputs) * uncompiled->num_tf_outputs);
806 }
807 if (uncompiled->num_tf_outputs < key->num_used_outputs) {
808 uint32_t size = sizeof(*key->used_outputs) *
809 (key->num_used_outputs -
810 uncompiled->num_tf_outputs);
811 memset(&key->used_outputs[uncompiled->num_tf_outputs],
812 0, size);
813 }
814 key->num_used_outputs = uncompiled->num_tf_outputs;
815
816 struct v3d_compiled_shader *old_gs = v3d->prog.gs;
817 struct v3d_compiled_shader *gs_bin =
818 v3d_get_compiled_shader(v3d, &key->base, sizeof(*key),
819 uncompiled);
820 if (gs_bin != old_gs) {
821 v3d->prog.gs_bin = gs_bin;
822 v3d->dirty |= V3D_DIRTY_COMPILED_GS_BIN;
823 }
824
825 if (old_gs && memcmp(v3d->prog.gs->prog_data.gs->input_slots,
826 old_gs->prog_data.gs->input_slots,
827 sizeof(v3d->prog.gs->prog_data.gs->input_slots))) {
828 v3d->dirty |= V3D_DIRTY_GS_INPUTS;
829 }
830 }
831
832 static void
v3d_update_compiled_vs(struct v3d_context * v3d,uint8_t prim_mode)833 v3d_update_compiled_vs(struct v3d_context *v3d, uint8_t prim_mode)
834 {
835 struct v3d_vs_key local_key;
836 struct v3d_vs_key *key = &local_key;
837
838 if (!(v3d->dirty & (V3D_DIRTY_VERTTEX |
839 V3D_DIRTY_VTXSTATE |
840 V3D_DIRTY_UNCOMPILED_VS |
841 (v3d->prog.bind_gs ? 0 : V3D_DIRTY_RASTERIZER) |
842 (v3d->prog.bind_gs ? 0 : V3D_DIRTY_PRIM_MODE) |
843 (v3d->prog.bind_gs ? V3D_DIRTY_GS_INPUTS :
844 V3D_DIRTY_FS_INPUTS)))) {
845 return;
846 }
847
848 memset(key, 0, sizeof(*key));
849 v3d_setup_shared_key(v3d, &key->base, &v3d->tex[PIPE_SHADER_VERTEX]);
850 key->base.ucp_enables = v3d->rasterizer->base.clip_plane_enable;
851 key->base.is_last_geometry_stage = !v3d->prog.bind_gs;
852
853 if (!v3d->prog.bind_gs) {
854 key->num_used_outputs = v3d->prog.fs->prog_data.fs->num_inputs;
855 STATIC_ASSERT(sizeof(key->used_outputs) ==
856 sizeof(v3d->prog.fs->prog_data.fs->input_slots));
857 memcpy(key->used_outputs, v3d->prog.fs->prog_data.fs->input_slots,
858 sizeof(key->used_outputs));
859 } else {
860 key->num_used_outputs = v3d->prog.gs->prog_data.gs->num_inputs;
861 STATIC_ASSERT(sizeof(key->used_outputs) ==
862 sizeof(v3d->prog.gs->prog_data.gs->input_slots));
863 memcpy(key->used_outputs, v3d->prog.gs->prog_data.gs->input_slots,
864 sizeof(key->used_outputs));
865 }
866
867 key->per_vertex_point_size =
868 (prim_mode == MESA_PRIM_POINTS &&
869 v3d->rasterizer->base.point_size_per_vertex);
870
871 nir_shader *s = v3d->prog.bind_vs->base.ir.nir;
872 uint64_t inputs_read = s->info.inputs_read;
873 assert(util_bitcount(inputs_read) <= v3d->vtx->num_elements);
874
875 while (inputs_read) {
876 int location = u_bit_scan64(&inputs_read);
877 nir_variable *var =
878 nir_find_variable_with_location(s, nir_var_shader_in, location);
879 assert (var != NULL);
880 int driver_location = var->data.driver_location;
881 switch (v3d->vtx->pipe[driver_location].src_format) {
882 case PIPE_FORMAT_B8G8R8A8_UNORM:
883 case PIPE_FORMAT_B10G10R10A2_UNORM:
884 case PIPE_FORMAT_B10G10R10A2_SNORM:
885 case PIPE_FORMAT_B10G10R10A2_USCALED:
886 case PIPE_FORMAT_B10G10R10A2_SSCALED:
887 key->va_swap_rb_mask |= 1 << location;
888 break;
889 default:
890 break;
891 }
892 }
893
894 struct v3d_uncompiled_shader *shader_state = v3d->prog.bind_vs;
895 struct v3d_compiled_shader *vs =
896 v3d_get_compiled_shader(v3d, &key->base, sizeof(*key),
897 shader_state);
898 if (vs != v3d->prog.vs) {
899 v3d->prog.vs = vs;
900 v3d->dirty |= V3D_DIRTY_COMPILED_VS;
901 }
902
903 key->is_coord = true;
904
905 /* Coord shaders only output varyings used by transform feedback,
906 * unless they are linked to other shaders in the geometry side
907 * of the pipeline, since in that case any of the output varyings
908 * could be required in later geometry stages to compute
909 * gl_Position or TF outputs.
910 */
911 if (!v3d->prog.bind_gs) {
912 if (shader_state->num_tf_outputs > 0) {
913 memcpy(key->used_outputs, shader_state->tf_outputs,
914 sizeof(*key->used_outputs) *
915 shader_state->num_tf_outputs);
916 }
917 if (shader_state->num_tf_outputs < key->num_used_outputs) {
918 uint32_t tail_bytes =
919 sizeof(*key->used_outputs) *
920 (key->num_used_outputs -
921 shader_state->num_tf_outputs);
922 memset(&key->used_outputs[shader_state->num_tf_outputs],
923 0, tail_bytes);
924 }
925 key->num_used_outputs = shader_state->num_tf_outputs;
926 } else {
927 key->num_used_outputs = v3d->prog.gs_bin->prog_data.gs->num_inputs;
928 STATIC_ASSERT(sizeof(key->used_outputs) ==
929 sizeof(v3d->prog.gs_bin->prog_data.gs->input_slots));
930 memcpy(key->used_outputs, v3d->prog.gs_bin->prog_data.gs->input_slots,
931 sizeof(key->used_outputs));
932 }
933
934 struct v3d_compiled_shader *cs =
935 v3d_get_compiled_shader(v3d, &key->base, sizeof(*key),
936 shader_state);
937 if (cs != v3d->prog.cs) {
938 v3d->prog.cs = cs;
939 v3d->dirty |= V3D_DIRTY_COMPILED_CS;
940 }
941 }
942
943 void
v3d_update_compiled_shaders(struct v3d_context * v3d,uint8_t prim_mode)944 v3d_update_compiled_shaders(struct v3d_context *v3d, uint8_t prim_mode)
945 {
946 v3d_update_compiled_fs(v3d, prim_mode);
947 v3d_update_compiled_gs(v3d, prim_mode);
948 v3d_update_compiled_vs(v3d, prim_mode);
949 }
950
951 void
v3d_update_compiled_cs(struct v3d_context * v3d)952 v3d_update_compiled_cs(struct v3d_context *v3d)
953 {
954 struct v3d_key local_key;
955 struct v3d_key *key = &local_key;
956
957 if (!(v3d->dirty & (V3D_DIRTY_UNCOMPILED_CS |
958 V3D_DIRTY_COMPTEX))) {
959 return;
960 }
961
962 memset(key, 0, sizeof(*key));
963 v3d_setup_shared_key(v3d, key, &v3d->tex[PIPE_SHADER_COMPUTE]);
964
965 struct v3d_compiled_shader *cs =
966 v3d_get_compiled_shader(v3d, key, sizeof(*key),
967 v3d->prog.bind_compute);
968 if (cs != v3d->prog.compute) {
969 v3d->prog.compute = cs;
970 v3d->dirty |= V3D_DIRTY_COMPILED_CS; /* XXX */
971 }
972 }
973
974 static inline uint32_t
cache_hash(const void * _key,uint32_t key_size)975 cache_hash(const void *_key, uint32_t key_size)
976 {
977 const struct v3d_cache_key *key = (struct v3d_cache_key *) _key;
978
979 struct mesa_sha1 ctx;
980 unsigned char sha1[20];
981 _mesa_sha1_init(&ctx);
982 _mesa_sha1_update(&ctx, key->key, key_size);
983 _mesa_sha1_update(&ctx, key->sha1, 20);
984 _mesa_sha1_final(&ctx, sha1);
985 return _mesa_hash_data(sha1, 20);
986 }
987
988 static inline bool
cache_compare(const void * _key1,const void * _key2,uint32_t key_size)989 cache_compare(const void *_key1, const void *_key2, uint32_t key_size)
990 {
991 const struct v3d_cache_key *key1 = (struct v3d_cache_key *) _key1;
992 const struct v3d_cache_key *key2 = (struct v3d_cache_key *) _key2;
993
994 if (memcmp(key1->key, key2->key, key_size) != 0)
995 return false;
996
997 return memcmp(key1->sha1, key2->sha1, 20) == 0;
998 }
999
1000 static uint32_t
fs_cache_hash(const void * key)1001 fs_cache_hash(const void *key)
1002 {
1003 return cache_hash(key, sizeof(struct v3d_fs_key));
1004 }
1005
1006 static uint32_t
gs_cache_hash(const void * key)1007 gs_cache_hash(const void *key)
1008 {
1009 return cache_hash(key, sizeof(struct v3d_gs_key));
1010 }
1011
1012 static uint32_t
vs_cache_hash(const void * key)1013 vs_cache_hash(const void *key)
1014 {
1015 return cache_hash(key, sizeof(struct v3d_vs_key));
1016 }
1017
1018 static uint32_t
cs_cache_hash(const void * key)1019 cs_cache_hash(const void *key)
1020 {
1021 return cache_hash(key, sizeof(struct v3d_key));
1022 }
1023
1024 static bool
fs_cache_compare(const void * key1,const void * key2)1025 fs_cache_compare(const void *key1, const void *key2)
1026 {
1027 return cache_compare(key1, key2, sizeof(struct v3d_fs_key));
1028 }
1029
1030 static bool
gs_cache_compare(const void * key1,const void * key2)1031 gs_cache_compare(const void *key1, const void *key2)
1032 {
1033 return cache_compare(key1, key2, sizeof(struct v3d_gs_key));
1034 }
1035
1036 static bool
vs_cache_compare(const void * key1,const void * key2)1037 vs_cache_compare(const void *key1, const void *key2)
1038 {
1039 return cache_compare(key1, key2, sizeof(struct v3d_vs_key));
1040 }
1041
1042 static bool
cs_cache_compare(const void * key1,const void * key2)1043 cs_cache_compare(const void *key1, const void *key2)
1044 {
1045 return cache_compare(key1, key2, sizeof(struct v3d_key));
1046 }
1047
1048 static void
v3d_shader_state_delete(struct pipe_context * pctx,void * hwcso)1049 v3d_shader_state_delete(struct pipe_context *pctx, void *hwcso)
1050 {
1051 struct v3d_context *v3d = v3d_context(pctx);
1052 struct v3d_uncompiled_shader *so = hwcso;
1053 nir_shader *s = so->base.ir.nir;
1054
1055 hash_table_foreach(v3d->prog.cache[s->info.stage], entry) {
1056 const struct v3d_cache_key *cache_key = entry->key;
1057 struct v3d_compiled_shader *shader = entry->data;
1058
1059 if (memcmp(cache_key->sha1, so->sha1, 20) != 0)
1060 continue;
1061
1062 if (v3d->prog.fs == shader)
1063 v3d->prog.fs = NULL;
1064 if (v3d->prog.vs == shader)
1065 v3d->prog.vs = NULL;
1066 if (v3d->prog.cs == shader)
1067 v3d->prog.cs = NULL;
1068 if (v3d->prog.compute == shader)
1069 v3d->prog.compute = NULL;
1070
1071 _mesa_hash_table_remove(v3d->prog.cache[s->info.stage], entry);
1072 v3d_free_compiled_shader(shader);
1073 }
1074
1075 ralloc_free(so->base.ir.nir);
1076 free(so);
1077 }
1078
1079 static void
v3d_fp_state_bind(struct pipe_context * pctx,void * hwcso)1080 v3d_fp_state_bind(struct pipe_context *pctx, void *hwcso)
1081 {
1082 struct v3d_context *v3d = v3d_context(pctx);
1083 v3d->prog.bind_fs = hwcso;
1084 v3d->dirty |= V3D_DIRTY_UNCOMPILED_FS;
1085 }
1086
1087 static void
v3d_gp_state_bind(struct pipe_context * pctx,void * hwcso)1088 v3d_gp_state_bind(struct pipe_context *pctx, void *hwcso)
1089 {
1090 struct v3d_context *v3d = v3d_context(pctx);
1091 v3d->prog.bind_gs = hwcso;
1092 v3d->dirty |= V3D_DIRTY_UNCOMPILED_GS;
1093 }
1094
1095 static void
v3d_vp_state_bind(struct pipe_context * pctx,void * hwcso)1096 v3d_vp_state_bind(struct pipe_context *pctx, void *hwcso)
1097 {
1098 struct v3d_context *v3d = v3d_context(pctx);
1099 v3d->prog.bind_vs = hwcso;
1100 v3d->dirty |= V3D_DIRTY_UNCOMPILED_VS;
1101 }
1102
1103 static void
v3d_compute_state_bind(struct pipe_context * pctx,void * state)1104 v3d_compute_state_bind(struct pipe_context *pctx, void *state)
1105 {
1106 struct v3d_context *v3d = v3d_context(pctx);
1107
1108 v3d->prog.bind_compute = state;
1109 v3d->dirty |= V3D_DIRTY_UNCOMPILED_CS;
1110 }
1111
1112 static void *
v3d_create_compute_state(struct pipe_context * pctx,const struct pipe_compute_state * cso)1113 v3d_create_compute_state(struct pipe_context *pctx,
1114 const struct pipe_compute_state *cso)
1115 {
1116 return v3d_uncompiled_shader_create(pctx, cso->ir_type,
1117 (void *)cso->prog);
1118 }
1119
1120 static void
v3d_get_compute_state_info(struct pipe_context * pctx,void * cso,struct pipe_compute_state_object_info * info)1121 v3d_get_compute_state_info(struct pipe_context *pctx,
1122 void *cso,
1123 struct pipe_compute_state_object_info *info)
1124 {
1125 struct v3d_context *v3d = v3d_context(pctx);
1126
1127 /* this API requires compiled shaders */
1128 v3d_compute_state_bind(pctx, cso);
1129 v3d_update_compiled_cs(v3d);
1130
1131 info->max_threads = V3D_CHANNELS * v3d->prog.compute->prog_data.base->threads;
1132 info->preferred_simd_size = V3D_CHANNELS;
1133 info->private_memory = 0;
1134 }
1135
1136 void
v3d_program_init(struct pipe_context * pctx)1137 v3d_program_init(struct pipe_context *pctx)
1138 {
1139 struct v3d_context *v3d = v3d_context(pctx);
1140
1141 pctx->create_vs_state = v3d_shader_state_create;
1142 pctx->delete_vs_state = v3d_shader_state_delete;
1143
1144 pctx->create_gs_state = v3d_shader_state_create;
1145 pctx->delete_gs_state = v3d_shader_state_delete;
1146
1147 pctx->create_fs_state = v3d_shader_state_create;
1148 pctx->delete_fs_state = v3d_shader_state_delete;
1149
1150 pctx->bind_fs_state = v3d_fp_state_bind;
1151 pctx->bind_gs_state = v3d_gp_state_bind;
1152 pctx->bind_vs_state = v3d_vp_state_bind;
1153
1154 if (v3d->screen->has_csd) {
1155 pctx->create_compute_state = v3d_create_compute_state;
1156 pctx->delete_compute_state = v3d_shader_state_delete;
1157 pctx->bind_compute_state = v3d_compute_state_bind;
1158 pctx->get_compute_state_info = v3d_get_compute_state_info;
1159 }
1160
1161 v3d->prog.cache[MESA_SHADER_VERTEX] =
1162 _mesa_hash_table_create(pctx, vs_cache_hash, vs_cache_compare);
1163 v3d->prog.cache[MESA_SHADER_GEOMETRY] =
1164 _mesa_hash_table_create(pctx, gs_cache_hash, gs_cache_compare);
1165 v3d->prog.cache[MESA_SHADER_FRAGMENT] =
1166 _mesa_hash_table_create(pctx, fs_cache_hash, fs_cache_compare);
1167 v3d->prog.cache[MESA_SHADER_COMPUTE] =
1168 _mesa_hash_table_create(pctx, cs_cache_hash, cs_cache_compare);
1169 }
1170
1171 void
v3d_program_fini(struct pipe_context * pctx)1172 v3d_program_fini(struct pipe_context *pctx)
1173 {
1174 struct v3d_context *v3d = v3d_context(pctx);
1175
1176 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
1177 struct hash_table *cache = v3d->prog.cache[i];
1178 if (!cache)
1179 continue;
1180
1181 hash_table_foreach(cache, entry) {
1182 struct v3d_compiled_shader *shader = entry->data;
1183 v3d_free_compiled_shader(shader);
1184 _mesa_hash_table_remove(cache, entry);
1185 }
1186 }
1187
1188 v3d_bo_unreference(&v3d->prog.spill_bo);
1189 }
1190