1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * SPDX-License-Identifier: MIT
9 */
10
11 #ifndef RADV_SHADER_H
12 #define RADV_SHADER_H
13
14 #include "util/mesa-blake3.h"
15 #include "util/u_math.h"
16 #include "vulkan/vulkan.h"
17 #include "ac_binary.h"
18 #include "ac_shader_util.h"
19 #include "amd_family.h"
20 #include "radv_constants.h"
21 #include "radv_shader_args.h"
22 #include "radv_shader_info.h"
23 #include "vk_pipeline_cache.h"
24
25 #include "aco_shader_info.h"
26
27 struct radv_physical_device;
28 struct radv_device;
29 struct radv_pipeline;
30 struct radv_ray_tracing_pipeline;
31 struct radv_shader_args;
32 struct radv_vertex_input_state;
33 struct radv_shader_args;
34 struct radv_serialized_shader_arena_block;
35 struct vk_pipeline_robustness_state;
36
37 enum {
38 RADV_GRAPHICS_STAGE_BITS =
39 (VK_SHADER_STAGE_ALL_GRAPHICS | VK_SHADER_STAGE_MESH_BIT_EXT | VK_SHADER_STAGE_TASK_BIT_EXT),
40 RADV_RT_STAGE_BITS =
41 (VK_SHADER_STAGE_RAYGEN_BIT_KHR | VK_SHADER_STAGE_ANY_HIT_BIT_KHR | VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR |
42 VK_SHADER_STAGE_MISS_BIT_KHR | VK_SHADER_STAGE_INTERSECTION_BIT_KHR | VK_SHADER_STAGE_CALLABLE_BIT_KHR)
43 };
44
45 #define RADV_STAGE_MASK ((1 << MESA_VULKAN_SHADER_STAGES) - 1)
46
47 #define radv_foreach_stage(stage, stage_bits) \
48 for (gl_shader_stage stage, __tmp = (gl_shader_stage)((stage_bits)&RADV_STAGE_MASK); stage = ffs(__tmp) - 1, __tmp; \
49 __tmp &= ~(1 << (stage)))
50
51 enum radv_nggc_settings {
52 radv_nggc_none = 0,
53 radv_nggc_front_face = 1 << 0,
54 radv_nggc_back_face = 1 << 1,
55 radv_nggc_face_is_ccw = 1 << 2,
56 radv_nggc_small_primitives = 1 << 3,
57 };
58
59 enum radv_shader_query_state {
60 radv_shader_query_none = 0,
61 radv_shader_query_pipeline_stat = 1 << 0,
62 radv_shader_query_prim_gen = 1 << 1,
63 radv_shader_query_prim_xfb = 1 << 2,
64 };
65
66 enum radv_required_subgroup_size {
67 RADV_REQUIRED_NONE = 0,
68 RADV_REQUIRED_WAVE32 = 1,
69 RADV_REQUIRED_WAVE64 = 2,
70 };
71
72 struct radv_shader_stage_key {
73 uint8_t subgroup_required_size : 2; /* radv_required_subgroup_size */
74 uint8_t subgroup_require_full : 1; /* whether full subgroups are required */
75
76 uint8_t storage_robustness2 : 1;
77 uint8_t uniform_robustness2 : 1;
78 uint8_t vertex_robustness1 : 1;
79
80 uint8_t optimisations_disabled : 1;
81 uint8_t keep_statistic_info : 1;
82 uint8_t view_index_from_device_index : 1;
83
84 /* Shader version (up to 8) to force re-compilation when RADV_BUILD_ID_OVERRIDE is enabled. */
85 uint8_t version : 3;
86
87 /* Whether the mesh shader is used with a task shader. */
88 uint8_t has_task_shader : 1;
89
90 /* Whether the shader is used with indirect pipeline binds. */
91 uint8_t indirect_bindable : 1;
92 };
93
94 struct radv_ps_epilog_key {
95 uint32_t spi_shader_col_format;
96 uint32_t spi_shader_z_format;
97
98 /* Bitmasks, each bit represents one of the 8 MRTs. */
99 uint8_t color_is_int8;
100 uint8_t color_is_int10;
101 uint8_t enable_mrt_output_nan_fixup;
102
103 uint32_t colors_written;
104 uint8_t color_map[MAX_RTS];
105 bool mrt0_is_dual_src;
106 bool export_depth;
107 bool export_stencil;
108 bool export_sample_mask;
109 bool alpha_to_coverage_via_mrtz;
110 bool alpha_to_one;
111 };
112
113 struct radv_spirv_to_nir_options {
114 uint32_t lower_view_index_to_zero : 1;
115 uint32_t fix_dual_src_mrt1_export : 1;
116 uint32_t lower_view_index_to_device_index : 1;
117 };
118
119 struct radv_graphics_state_key {
120 uint32_t lib_flags : 4; /* VkGraphicsPipelineLibraryFlagBitsEXT */
121
122 uint32_t has_multiview_view_index : 1;
123 uint32_t adjust_frag_coord_z : 1;
124 uint32_t dynamic_rasterization_samples : 1;
125 uint32_t dynamic_provoking_vtx_mode : 1;
126 uint32_t dynamic_line_rast_mode : 1;
127 uint32_t enable_remove_point_size : 1;
128 uint32_t unknown_rast_prim : 1;
129
130 struct {
131 uint8_t topology;
132 } ia;
133
134 struct {
135 uint32_t instance_rate_inputs;
136 uint32_t instance_rate_divisors[MAX_VERTEX_ATTRIBS];
137 uint8_t vertex_attribute_formats[MAX_VERTEX_ATTRIBS];
138 uint32_t vertex_attribute_bindings[MAX_VERTEX_ATTRIBS];
139 uint32_t vertex_attribute_offsets[MAX_VERTEX_ATTRIBS];
140 uint32_t vertex_attribute_strides[MAX_VERTEX_ATTRIBS];
141 uint8_t vertex_binding_align[MAX_VBS];
142 } vi;
143
144 struct {
145 unsigned patch_control_points;
146 } ts;
147
148 struct {
149 uint32_t provoking_vtx_last : 1;
150 } rs;
151
152 struct {
153 bool sample_shading_enable;
154 bool alpha_to_coverage_via_mrtz; /* GFX11+ */
155 uint8_t rasterization_samples;
156 } ms;
157
158 struct vs {
159 bool has_prolog;
160 } vs;
161
162 struct {
163 struct radv_ps_epilog_key epilog;
164 bool force_vrs_enabled;
165 bool exports_mrtz_via_epilog;
166 bool has_epilog;
167 } ps;
168 };
169
170 struct radv_graphics_pipeline_key {
171 struct radv_graphics_state_key gfx_state;
172
173 struct radv_shader_stage_key stage_info[MESA_VULKAN_SHADER_STAGES];
174 };
175
176 struct radv_nir_compiler_options {
177 bool robust_buffer_access_llvm;
178 bool dump_shader;
179 bool dump_preoptir;
180 bool record_ir;
181 bool record_stats;
182 bool check_ir;
183 uint8_t enable_mrt_output_nan_fixup;
184 bool wgp_mode;
185 const struct radeon_info *info;
186
187 struct {
188 void (*func)(void *private_data, enum aco_compiler_debug_level level, const char *message);
189 void *private_data;
190 } debug;
191 };
192
193 #define SET_SGPR_FIELD(field, value) (((unsigned)(value)&field##__MASK) << field##__SHIFT)
194
195 #define TCS_OFFCHIP_LAYOUT_NUM_PATCHES__SHIFT 0
196 #define TCS_OFFCHIP_LAYOUT_NUM_PATCHES__MASK 0x7f
197 #define TCS_OFFCHIP_LAYOUT_PATCH_CONTROL_POINTS__SHIFT 12
198 #define TCS_OFFCHIP_LAYOUT_PATCH_CONTROL_POINTS__MASK 0x1f
199 #define TCS_OFFCHIP_LAYOUT_OUT_PATCH_CP__SHIFT 7
200 #define TCS_OFFCHIP_LAYOUT_OUT_PATCH_CP__MASK 0x1f
201 #define TCS_OFFCHIP_LAYOUT_NUM_LS_OUTPUTS__SHIFT 17
202 #define TCS_OFFCHIP_LAYOUT_NUM_LS_OUTPUTS__MASK 0x3f
203 #define TCS_OFFCHIP_LAYOUT_NUM_HS_OUTPUTS__SHIFT 23
204 #define TCS_OFFCHIP_LAYOUT_NUM_HS_OUTPUTS__MASK 0x3f
205 #define TCS_OFFCHIP_LAYOUT_PRIMITIVE_MODE__SHIFT 29
206 #define TCS_OFFCHIP_LAYOUT_PRIMITIVE_MODE__MASK 0x03
207 #define TCS_OFFCHIP_LAYOUT_TES_READS_TF__SHIFT 31
208 #define TCS_OFFCHIP_LAYOUT_TES_READS_TF__MASK 0x01
209
210 #define TES_STATE_NUM_PATCHES__SHIFT 0
211 #define TES_STATE_NUM_PATCHES__MASK 0xff
212 #define TES_STATE_TCS_VERTICES_OUT__SHIFT 8
213 #define TES_STATE_TCS_VERTICES_OUT__MASK 0xff
214 #define TES_STATE_NUM_TCS_OUTPUTS__SHIFT 16
215 #define TES_STATE_NUM_TCS_OUTPUTS__MASK 0xff
216
217 #define NGG_LDS_LAYOUT_GS_OUT_VERTEX_BASE__SHIFT 0
218 #define NGG_LDS_LAYOUT_GS_OUT_VERTEX_BASE__MASK 0xffff
219 #define NGG_LDS_LAYOUT_SCRATCH_BASE__SHIFT 16
220 #define NGG_LDS_LAYOUT_SCRATCH_BASE__MASK 0xffff
221
222 #define PS_STATE_NUM_SAMPLES__SHIFT 0
223 #define PS_STATE_NUM_SAMPLES__MASK 0xf
224 #define PS_STATE_LINE_RAST_MODE__SHIFT 4
225 #define PS_STATE_LINE_RAST_MODE__MASK 0x3
226 #define PS_STATE_PS_ITER_MASK__SHIFT 6
227 #define PS_STATE_PS_ITER_MASK__MASK 0xffff
228 #define PS_STATE_RAST_PRIM__SHIFT 22
229 #define PS_STATE_RAST_PRIM__MASK 0x3
230
231 struct radv_shader_layout {
232 uint32_t num_sets;
233
234 struct {
235 struct radv_descriptor_set_layout *layout;
236 uint32_t dynamic_offset_start;
237 } set[MAX_SETS];
238
239 uint32_t push_constant_size;
240 uint32_t dynamic_offset_count;
241 bool use_dynamic_descriptors;
242 };
243
244 struct radv_shader_stage {
245 gl_shader_stage stage;
246 gl_shader_stage next_stage;
247
248 struct {
249 const struct vk_object_base *object;
250 const char *data;
251 uint32_t size;
252 } spirv;
253
254 const char *entrypoint;
255 const VkSpecializationInfo *spec_info;
256
257 unsigned char shader_sha1[20];
258
259 nir_shader *nir;
260 nir_shader *internal_nir; /* meta shaders */
261
262 struct radv_shader_info info;
263 struct radv_shader_args args;
264 struct radv_shader_stage_key key;
265
266 VkPipelineCreationFeedback feedback;
267
268 struct radv_shader_layout layout;
269 };
270
271 static inline bool
radv_is_last_vgt_stage(const struct radv_shader_stage * stage)272 radv_is_last_vgt_stage(const struct radv_shader_stage *stage)
273 {
274 return (stage->info.stage == MESA_SHADER_VERTEX || stage->info.stage == MESA_SHADER_TESS_EVAL ||
275 stage->info.stage == MESA_SHADER_GEOMETRY || stage->info.stage == MESA_SHADER_MESH) &&
276 (stage->info.next_stage == MESA_SHADER_FRAGMENT || stage->info.next_stage == MESA_SHADER_NONE);
277 }
278
279 struct radv_vertex_input_state {
280 uint32_t attribute_mask;
281
282 uint32_t instance_rate_inputs;
283 uint32_t nontrivial_divisors;
284 uint32_t zero_divisors;
285 uint32_t post_shuffle;
286 /* Having two separate fields instead of a single uint64_t makes it easier to remove attributes
287 * using bitwise arithmetic.
288 */
289 uint32_t alpha_adjust_lo;
290 uint32_t alpha_adjust_hi;
291 uint32_t nontrivial_formats;
292
293 uint8_t bindings[MAX_VERTEX_ATTRIBS];
294 uint32_t divisors[MAX_VERTEX_ATTRIBS];
295 uint32_t offsets[MAX_VERTEX_ATTRIBS];
296 uint8_t formats[MAX_VERTEX_ATTRIBS];
297 uint8_t format_align_req_minus_1[MAX_VERTEX_ATTRIBS];
298 uint8_t component_align_req_minus_1[MAX_VERTEX_ATTRIBS];
299 uint8_t format_sizes[MAX_VERTEX_ATTRIBS];
300 uint32_t attrib_index_offset[MAX_VERTEX_ATTRIBS]; /* Only used with static strides. */
301
302 bool bindings_match_attrib;
303 };
304
305 struct radv_vs_prolog_key {
306 /* All the fields are pre-masked with BITFIELD_MASK(num_attributes).
307 * Some of the fields are pre-masked by other conditions. See lookup_vs_prolog.
308 */
309 uint32_t instance_rate_inputs;
310 uint32_t nontrivial_divisors;
311 uint32_t zero_divisors;
312 uint32_t post_shuffle;
313 /* Having two separate fields instead of a single uint64_t makes it easier to remove attributes
314 * using bitwise arithmetic.
315 */
316 uint32_t alpha_adjust_lo;
317 uint32_t alpha_adjust_hi;
318 uint8_t formats[MAX_VERTEX_ATTRIBS];
319 unsigned num_attributes;
320 uint32_t misaligned_mask;
321 uint32_t unaligned_mask;
322 bool as_ls;
323 bool is_ngg;
324 bool wave32;
325 gl_shader_stage next_stage;
326 };
327
328 enum radv_shader_binary_type { RADV_BINARY_TYPE_LEGACY, RADV_BINARY_TYPE_RTLD };
329
330 struct radv_shader_binary {
331 uint32_t type; /* enum radv_shader_binary_type */
332
333 struct ac_shader_config config;
334 struct radv_shader_info info;
335
336 /* Self-referential size so we avoid consistency issues. */
337 uint32_t total_size;
338 };
339
340 struct radv_shader_binary_legacy {
341 struct radv_shader_binary base;
342 uint32_t code_size;
343 uint32_t exec_size;
344 uint32_t ir_size;
345 uint32_t disasm_size;
346 uint32_t stats_size;
347 uint32_t padding;
348
349 /* data has size of stats_size + code_size + ir_size + disasm_size + 2,
350 * where the +2 is for 0 of the ir strings. */
351 uint8_t data[0];
352 };
353 static_assert(sizeof(struct radv_shader_binary_legacy) == offsetof(struct radv_shader_binary_legacy, data),
354 "Unexpected padding");
355
356 struct radv_shader_binary_rtld {
357 struct radv_shader_binary base;
358 unsigned elf_size;
359 unsigned llvm_ir_size;
360 uint8_t data[0];
361 };
362
363 struct radv_shader_part_binary {
364 struct {
365 uint32_t spi_shader_col_format;
366 uint32_t cb_shader_mask;
367 uint32_t spi_shader_z_format;
368 } info;
369
370 uint8_t num_sgprs;
371 uint8_t num_vgprs;
372 unsigned code_size;
373 unsigned disasm_size;
374
375 /* Self-referential size so we avoid consistency issues. */
376 uint32_t total_size;
377
378 uint8_t data[0];
379 };
380
381 enum radv_shader_arena_type { RADV_SHADER_ARENA_DEFAULT, RADV_SHADER_ARENA_REPLAYABLE, RADV_SHADER_ARENA_REPLAYED };
382
383 struct radv_shader_arena {
384 struct list_head list;
385 struct list_head entries;
386 uint32_t size;
387 struct radeon_winsys_bo *bo;
388 char *ptr;
389 enum radv_shader_arena_type type;
390 };
391
392 union radv_shader_arena_block {
393 struct list_head pool;
394 struct {
395 /* List of blocks in the arena, sorted by address. */
396 struct list_head list;
397 /* For holes, a list_head for the free-list. For allocations, freelist.prev=NULL and
398 * freelist.next is a pointer associated with the allocation.
399 */
400 struct list_head freelist;
401 struct radv_shader_arena *arena;
402 uint32_t offset;
403 uint32_t size;
404 };
405 };
406
407 struct radv_shader_free_list {
408 uint8_t size_mask;
409 struct list_head free_lists[RADV_SHADER_ALLOC_NUM_FREE_LISTS];
410 };
411
412 struct radv_serialized_shader_arena_block {
413 uint32_t offset;
414 uint32_t size;
415 uint64_t arena_va;
416 uint32_t arena_size;
417 };
418
419 struct radv_shader {
420 struct vk_pipeline_cache_object base;
421
422 simple_mtx_t replay_mtx;
423 bool has_replay_alloc;
424
425 struct radeon_winsys_bo *bo;
426 union radv_shader_arena_block *alloc;
427 uint64_t va;
428
429 uint64_t upload_seq;
430
431 struct ac_shader_config config;
432 uint32_t code_size;
433 uint32_t exec_size;
434 struct radv_shader_info info;
435 uint32_t max_waves;
436
437 blake3_hash hash;
438 void *code;
439
440 /* debug only */
441 char *spirv;
442 uint32_t spirv_size;
443 char *nir_string;
444 char *disasm_string;
445 char *ir_string;
446 uint32_t *statistics;
447 };
448
449 struct radv_shader_part {
450 uint32_t ref_count;
451
452 union {
453 struct radv_vs_prolog_key vs;
454 struct radv_ps_epilog_key ps;
455 } key;
456
457 uint64_t va;
458
459 struct radeon_winsys_bo *bo;
460 union radv_shader_arena_block *alloc;
461 uint32_t code_size;
462 uint32_t rsrc1;
463 bool nontrivial_divisors;
464 uint32_t spi_shader_col_format;
465 uint32_t cb_shader_mask;
466 uint32_t spi_shader_z_format;
467 uint64_t upload_seq;
468
469 /* debug only */
470 char *disasm_string;
471 };
472
473 struct radv_shader_part_cache_ops {
474 uint32_t (*hash)(const void *key);
475 bool (*equals)(const void *a, const void *b);
476 struct radv_shader_part *(*create)(struct radv_device *device, const void *key);
477 };
478
479 struct radv_shader_part_cache {
480 simple_mtx_t lock;
481 struct radv_shader_part_cache_ops *ops;
482 struct set entries;
483 };
484
485 struct radv_shader_dma_submission {
486 struct list_head list;
487
488 struct radeon_cmdbuf *cs;
489 struct radeon_winsys_bo *bo;
490 uint64_t bo_size;
491 char *ptr;
492
493 /* The semaphore value to wait for before reusing this submission. */
494 uint64_t seq;
495 };
496
497 struct radv_pipeline_layout;
498 struct radv_shader_stage;
499
500 void radv_optimize_nir(struct nir_shader *shader, bool optimize_conservatively);
501 void radv_optimize_nir_algebraic(nir_shader *shader, bool opt_offsets, bool opt_mqsad);
502
503 void radv_nir_lower_rt_io(nir_shader *shader, bool monolithic, uint32_t payload_offset);
504
505 struct radv_ray_tracing_stage_info;
506
507 void radv_nir_lower_rt_abi(nir_shader *shader, const VkRayTracingPipelineCreateInfoKHR *pCreateInfo,
508 const struct radv_shader_args *args, const struct radv_shader_info *info,
509 uint32_t *stack_size, bool resume_shader, struct radv_device *device,
510 struct radv_ray_tracing_pipeline *pipeline, bool monolithic,
511 const struct radv_ray_tracing_stage_info *traversal_info);
512
513 void radv_gather_unused_args(struct radv_ray_tracing_stage_info *info, nir_shader *nir);
514
515 struct radv_shader_stage;
516
517 nir_shader *radv_shader_spirv_to_nir(struct radv_device *device, const struct radv_shader_stage *stage,
518 const struct radv_spirv_to_nir_options *options, bool is_internal);
519
520 void radv_init_shader_arenas(struct radv_device *device);
521 void radv_destroy_shader_arenas(struct radv_device *device);
522 VkResult radv_init_shader_upload_queue(struct radv_device *device);
523 void radv_destroy_shader_upload_queue(struct radv_device *device);
524
525 struct radv_shader_args;
526
527 VkResult radv_shader_create_uncached(struct radv_device *device, const struct radv_shader_binary *binary,
528 bool replayable, struct radv_serialized_shader_arena_block *replay_block,
529 struct radv_shader **out_shader);
530
531 struct radv_shader_binary *radv_shader_nir_to_asm(struct radv_device *device, struct radv_shader_stage *pl_stage,
532 struct nir_shader *const *shaders, int shader_count,
533 const struct radv_graphics_state_key *gfx_state,
534 bool keep_shader_info, bool keep_statistic_info);
535
536 void radv_shader_generate_debug_info(struct radv_device *device, bool dump_shader, bool keep_shader_info,
537 struct radv_shader_binary *binary, struct radv_shader *shader,
538 struct nir_shader *const *shaders, int shader_count,
539 struct radv_shader_info *info);
540
541 VkResult radv_shader_wait_for_upload(struct radv_device *device, uint64_t seq);
542
543 struct radv_shader_dma_submission *radv_shader_dma_pop_submission(struct radv_device *device);
544
545 void radv_shader_dma_push_submission(struct radv_device *device, struct radv_shader_dma_submission *submission,
546 uint64_t seq);
547
548 struct radv_shader_dma_submission *
549 radv_shader_dma_get_submission(struct radv_device *device, struct radeon_winsys_bo *bo, uint64_t va, uint64_t size);
550
551 bool radv_shader_dma_submit(struct radv_device *device, struct radv_shader_dma_submission *submission,
552 uint64_t *upload_seq_out);
553
554 union radv_shader_arena_block *radv_alloc_shader_memory(struct radv_device *device, uint32_t size, bool replayable,
555 void *ptr);
556
557 union radv_shader_arena_block *radv_replay_shader_arena_block(struct radv_device *device,
558 const struct radv_serialized_shader_arena_block *src,
559 void *ptr);
560
561 void radv_free_shader_memory(struct radv_device *device, union radv_shader_arena_block *alloc);
562
563 struct radv_shader *radv_create_trap_handler_shader(struct radv_device *device);
564
565 struct radv_shader *radv_create_rt_prolog(struct radv_device *device);
566
567 struct radv_shader_part *radv_shader_part_create(struct radv_device *device, struct radv_shader_part_binary *binary,
568 unsigned wave_size);
569
570 struct radv_shader_part *radv_create_vs_prolog(struct radv_device *device, const struct radv_vs_prolog_key *key);
571
572 struct radv_shader_part *radv_create_ps_epilog(struct radv_device *device, const struct radv_ps_epilog_key *key,
573 struct radv_shader_part_binary **binary_out);
574
575 void radv_shader_part_destroy(struct radv_device *device, struct radv_shader_part *shader_part);
576
577 bool radv_shader_part_cache_init(struct radv_shader_part_cache *cache, struct radv_shader_part_cache_ops *ops);
578 void radv_shader_part_cache_finish(struct radv_device *device, struct radv_shader_part_cache *cache);
579 struct radv_shader_part *radv_shader_part_cache_get(struct radv_device *device, struct radv_shader_part_cache *cache,
580 struct set *local_entries, const void *key);
581
582 uint64_t radv_shader_get_va(const struct radv_shader *shader);
583 struct radv_shader *radv_find_shader(struct radv_device *device, uint64_t pc);
584
585 unsigned radv_get_max_waves(const struct radv_device *device, const struct ac_shader_config *conf,
586 const struct radv_shader_info *info);
587
588 unsigned radv_get_max_scratch_waves(const struct radv_device *device, struct radv_shader *shader);
589
590 const char *radv_get_shader_name(const struct radv_shader_info *info, gl_shader_stage stage);
591
592 unsigned radv_compute_spi_ps_input(const struct radv_physical_device *pdev,
593 const struct radv_graphics_state_key *gfx_state,
594 const struct radv_shader_info *info);
595
596 bool radv_can_dump_shader(struct radv_device *device, nir_shader *nir, bool meta_shader);
597
598 bool radv_can_dump_shader_stats(struct radv_device *device, nir_shader *nir);
599
600 VkResult radv_dump_shader_stats(struct radv_device *device, struct radv_pipeline *pipeline, struct radv_shader *shader,
601 gl_shader_stage stage, FILE *output);
602
603 /* Returns true on success and false on failure */
604 bool radv_shader_reupload(struct radv_device *device, struct radv_shader *shader);
605
606 extern const struct vk_pipeline_cache_object_ops radv_shader_ops;
607
608 static inline struct radv_shader *
radv_shader_ref(struct radv_shader * shader)609 radv_shader_ref(struct radv_shader *shader)
610 {
611 vk_pipeline_cache_object_ref(&shader->base);
612 return shader;
613 }
614
615 static inline void
radv_shader_unref(struct radv_device * device,struct radv_shader * shader)616 radv_shader_unref(struct radv_device *device, struct radv_shader *shader)
617 {
618 vk_pipeline_cache_object_unref((struct vk_device *)device, &shader->base);
619 }
620
621 static inline struct radv_shader_part *
radv_shader_part_ref(struct radv_shader_part * shader_part)622 radv_shader_part_ref(struct radv_shader_part *shader_part)
623 {
624 assert(shader_part && shader_part->ref_count >= 1);
625 p_atomic_inc(&shader_part->ref_count);
626 return shader_part;
627 }
628
629 static inline void
radv_shader_part_unref(struct radv_device * device,struct radv_shader_part * shader_part)630 radv_shader_part_unref(struct radv_device *device, struct radv_shader_part *shader_part)
631 {
632 assert(shader_part && shader_part->ref_count >= 1);
633 if (p_atomic_dec_zero(&shader_part->ref_count))
634 radv_shader_part_destroy(device, shader_part);
635 }
636
637 static inline struct radv_shader_part *
radv_shader_part_from_cache_entry(const void * key)638 radv_shader_part_from_cache_entry(const void *key)
639 {
640 return container_of(key, struct radv_shader_part, key);
641 }
642
643 static inline unsigned
get_tcs_input_vertex_stride(unsigned tcs_num_inputs)644 get_tcs_input_vertex_stride(unsigned tcs_num_inputs)
645 {
646 unsigned stride = tcs_num_inputs * 16;
647
648 /* Add 1 dword to reduce LDS bank conflicts. */
649 if (stride)
650 stride += 4;
651
652 return stride;
653 }
654
655 uint32_t radv_get_tcs_num_patches(const struct radv_physical_device *pdev, unsigned tcs_num_input_vertices,
656 unsigned tcs_num_output_vertices, unsigned tcs_num_inputs,
657 unsigned tcs_num_lds_outputs, unsigned tcs_num_lds_patch_outputs,
658 unsigned tcs_num_vram_outputs, unsigned tcs_num_vram_patch_outputs);
659
660 uint32_t radv_get_tess_lds_size(const struct radv_physical_device *pdev, uint32_t tcs_num_input_vertices,
661 uint32_t tcs_num_output_vertices, uint32_t tcs_num_inputs, uint32_t tcs_num_patches,
662 uint32_t tcs_num_lds_outputs, uint32_t tcs_num_lds_patch_outputs);
663
664 void radv_lower_ngg(struct radv_device *device, struct radv_shader_stage *ngg_stage,
665 const struct radv_graphics_state_key *gfx_state);
666
667 bool radv_consider_culling(const struct radv_physical_device *pdev, struct nir_shader *nir, uint64_t ps_inputs_read,
668 unsigned num_vertices_per_primitive, const struct radv_shader_info *info);
669
670 void radv_get_nir_options(struct radv_physical_device *pdev);
671
672 struct radv_ray_tracing_stage_info;
673
674 nir_shader *radv_build_traversal_shader(struct radv_device *device, struct radv_ray_tracing_pipeline *pipeline,
675 const VkRayTracingPipelineCreateInfoKHR *pCreateInfo,
676 struct radv_ray_tracing_stage_info *info);
677
678 enum radv_rt_priority {
679 radv_rt_priority_raygen = 0,
680 radv_rt_priority_traversal = 1,
681 radv_rt_priority_hit_miss = 2,
682 radv_rt_priority_callable = 3,
683 radv_rt_priority_mask = 0x3,
684 };
685
686 static inline enum radv_rt_priority
radv_get_rt_priority(gl_shader_stage stage)687 radv_get_rt_priority(gl_shader_stage stage)
688 {
689 switch (stage) {
690 case MESA_SHADER_RAYGEN:
691 return radv_rt_priority_raygen;
692 case MESA_SHADER_INTERSECTION:
693 case MESA_SHADER_ANY_HIT:
694 return radv_rt_priority_traversal;
695 case MESA_SHADER_CLOSEST_HIT:
696 case MESA_SHADER_MISS:
697 return radv_rt_priority_hit_miss;
698 case MESA_SHADER_CALLABLE:
699 return radv_rt_priority_callable;
700 default:
701 unreachable("Unimplemented RT shader stage.");
702 }
703 }
704
705 struct radv_shader_layout;
706 enum radv_pipeline_type;
707
708 void radv_shader_combine_cfg_vs_tcs(const struct radv_shader *vs, const struct radv_shader *tcs, uint32_t *rsrc1_out,
709 uint32_t *rsrc2_out);
710
711 void radv_shader_combine_cfg_vs_gs(const struct radv_shader *vs, const struct radv_shader *gs, uint32_t *rsrc1_out,
712 uint32_t *rsrc2_out);
713
714 void radv_shader_combine_cfg_tes_gs(const struct radv_shader *tes, const struct radv_shader *gs, uint32_t *rsrc1_out,
715 uint32_t *rsrc2_out);
716
717 const struct radv_userdata_info *radv_get_user_sgpr_info(const struct radv_shader *shader, int idx);
718
719 uint32_t radv_get_user_sgpr_loc(const struct radv_shader *shader, int idx);
720
721 uint32_t radv_get_user_sgpr(const struct radv_shader *shader, int idx);
722
723 void radv_precompute_registers_hw_ngg(struct radv_device *device, const struct ac_shader_config *config,
724 struct radv_shader_info *info);
725
726 void radv_set_stage_key_robustness(const struct vk_pipeline_robustness_state *rs, gl_shader_stage stage,
727 struct radv_shader_stage_key *key);
728
729 #endif /* RADV_SHADER_H */
730