xref: /aosp_15_r20/external/mesa3d/src/amd/vulkan/radv_shader_info.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2017 Red Hat
3  *
4  * SPDX-License-Identifier: MIT
5  */
6 #include "radv_shader_info.h"
7 #include "nir/nir.h"
8 #include "nir/nir_xfb_info.h"
9 #include "nir/radv_nir.h"
10 #include "radv_device.h"
11 #include "radv_physical_device.h"
12 #include "radv_pipeline_graphics.h"
13 #include "radv_shader.h"
14 
15 #include "ac_nir.h"
16 
17 static void
mark_sampler_desc(const nir_variable * var,struct radv_shader_info * info)18 mark_sampler_desc(const nir_variable *var, struct radv_shader_info *info)
19 {
20    info->desc_set_used_mask |= (1u << var->data.descriptor_set);
21 }
22 
23 static bool
radv_use_vs_prolog(const nir_shader * nir,const struct radv_graphics_state_key * gfx_state)24 radv_use_vs_prolog(const nir_shader *nir,
25                    const struct radv_graphics_state_key *gfx_state)
26 {
27    return gfx_state->vs.has_prolog && nir->info.inputs_read;
28 }
29 
30 static bool
radv_use_per_attribute_vb_descs(const nir_shader * nir,const struct radv_graphics_state_key * gfx_state,const struct radv_shader_stage_key * stage_key)31 radv_use_per_attribute_vb_descs(const nir_shader *nir,
32                                 const struct radv_graphics_state_key *gfx_state,
33                                 const struct radv_shader_stage_key *stage_key)
34 {
35    return stage_key->vertex_robustness1 || radv_use_vs_prolog(nir, gfx_state);
36 }
37 
38 static void
gather_load_vs_input_info(const nir_shader * nir,const nir_intrinsic_instr * intrin,struct radv_shader_info * info,const struct radv_graphics_state_key * gfx_state,const struct radv_shader_stage_key * stage_key)39 gather_load_vs_input_info(const nir_shader *nir, const nir_intrinsic_instr *intrin, struct radv_shader_info *info,
40                           const struct radv_graphics_state_key *gfx_state,
41                           const struct radv_shader_stage_key *stage_key)
42 {
43    const nir_io_semantics io_sem = nir_intrinsic_io_semantics(intrin);
44    const unsigned location = io_sem.location;
45    const unsigned component = nir_intrinsic_component(intrin);
46    unsigned mask = nir_def_components_read(&intrin->def);
47    mask = (intrin->def.bit_size == 64 ? util_widen_mask(mask, 2) : mask) << component;
48 
49    if (location >= VERT_ATTRIB_GENERIC0) {
50       const unsigned generic_loc = location - VERT_ATTRIB_GENERIC0;
51 
52       if (gfx_state->vi.instance_rate_inputs & BITFIELD_BIT(generic_loc)) {
53          info->vs.needs_instance_id = true;
54          info->vs.needs_base_instance = true;
55       }
56 
57       if (radv_use_per_attribute_vb_descs(nir, gfx_state, stage_key))
58          info->vs.vb_desc_usage_mask |= BITFIELD_BIT(generic_loc);
59       else
60          info->vs.vb_desc_usage_mask |= BITFIELD_BIT(gfx_state->vi.vertex_attribute_bindings[generic_loc]);
61 
62       info->vs.input_slot_usage_mask |= BITFIELD_RANGE(generic_loc, io_sem.num_slots);
63    }
64 }
65 
66 static void
gather_load_fs_input_info(const nir_shader * nir,const nir_intrinsic_instr * intrin,struct radv_shader_info * info,const struct radv_graphics_state_key * gfx_state)67 gather_load_fs_input_info(const nir_shader *nir, const nir_intrinsic_instr *intrin, struct radv_shader_info *info,
68                           const struct radv_graphics_state_key *gfx_state)
69 {
70    const nir_io_semantics io_sem = nir_intrinsic_io_semantics(intrin);
71    const unsigned location = io_sem.location;
72    const unsigned mapped_location = nir_intrinsic_base(intrin);
73    const unsigned attrib_count = io_sem.num_slots;
74    const unsigned component = nir_intrinsic_component(intrin);
75 
76    switch (location) {
77    case VARYING_SLOT_CLIP_DIST0:
78       info->ps.input_clips_culls_mask |= BITFIELD_RANGE(component, intrin->num_components);
79       break;
80    case VARYING_SLOT_CLIP_DIST1:
81       info->ps.input_clips_culls_mask |= BITFIELD_RANGE(component, intrin->num_components) << 4;
82       break;
83    default:
84       break;
85    }
86 
87    const uint32_t mapped_mask = BITFIELD_RANGE(mapped_location, attrib_count);
88    const bool per_primitive = nir->info.per_primitive_inputs & BITFIELD64_BIT(location);
89 
90    if (!per_primitive) {
91       if (intrin->intrinsic == nir_intrinsic_load_input_vertex) {
92          if (io_sem.interp_explicit_strict)
93             info->ps.explicit_strict_shaded_mask |= mapped_mask;
94          else
95             info->ps.explicit_shaded_mask |= mapped_mask;
96       } else if (intrin->intrinsic == nir_intrinsic_load_interpolated_input && intrin->def.bit_size == 16) {
97          if (io_sem.high_16bits)
98             info->ps.float16_hi_shaded_mask |= mapped_mask;
99          else
100             info->ps.float16_shaded_mask |= mapped_mask;
101       } else if (intrin->intrinsic == nir_intrinsic_load_interpolated_input) {
102          info->ps.float32_shaded_mask |= mapped_mask;
103       }
104    }
105 
106    if (location >= VARYING_SLOT_VAR0) {
107       const uint32_t var_mask = BITFIELD_RANGE(location - VARYING_SLOT_VAR0, attrib_count);
108 
109       if (per_primitive)
110          info->ps.input_per_primitive_mask |= var_mask;
111       else
112          info->ps.input_mask |= var_mask;
113    }
114 }
115 
116 static void
gather_intrinsic_load_input_info(const nir_shader * nir,const nir_intrinsic_instr * instr,struct radv_shader_info * info,const struct radv_graphics_state_key * gfx_state,const struct radv_shader_stage_key * stage_key)117 gather_intrinsic_load_input_info(const nir_shader *nir, const nir_intrinsic_instr *instr, struct radv_shader_info *info,
118                                  const struct radv_graphics_state_key *gfx_state,
119                                  const struct radv_shader_stage_key *stage_key)
120 {
121    switch (nir->info.stage) {
122    case MESA_SHADER_VERTEX:
123       gather_load_vs_input_info(nir, instr, info, gfx_state, stage_key);
124       break;
125    case MESA_SHADER_FRAGMENT:
126       gather_load_fs_input_info(nir, instr, info, gfx_state);
127       break;
128    default:
129       break;
130    }
131 }
132 
133 static void
gather_intrinsic_store_output_info(const nir_shader * nir,const nir_intrinsic_instr * instr,struct radv_shader_info * info,bool consider_force_vrs)134 gather_intrinsic_store_output_info(const nir_shader *nir, const nir_intrinsic_instr *instr,
135                                    struct radv_shader_info *info, bool consider_force_vrs)
136 {
137    const nir_io_semantics io_sem = nir_intrinsic_io_semantics(instr);
138    const unsigned location = io_sem.location;
139    const unsigned num_slots = io_sem.num_slots;
140    const unsigned component = nir_intrinsic_component(instr);
141    const unsigned write_mask = nir_intrinsic_write_mask(instr);
142    uint8_t *output_usage_mask = NULL;
143 
144    switch (nir->info.stage) {
145    case MESA_SHADER_VERTEX:
146       output_usage_mask = info->vs.output_usage_mask;
147       break;
148    case MESA_SHADER_TESS_EVAL:
149       output_usage_mask = info->tes.output_usage_mask;
150       break;
151    case MESA_SHADER_GEOMETRY:
152       output_usage_mask = info->gs.output_usage_mask;
153       break;
154    case MESA_SHADER_FRAGMENT:
155       if (location >= FRAG_RESULT_DATA0) {
156          const unsigned fs_semantic = location + io_sem.dual_source_blend_index;
157          info->ps.colors_written |= 0xfu << (4 * (fs_semantic - FRAG_RESULT_DATA0));
158 
159          if (fs_semantic == FRAG_RESULT_DATA0)
160             info->ps.color0_written = write_mask;
161       }
162       break;
163    default:
164       break;
165    }
166 
167    if (output_usage_mask) {
168       for (unsigned i = 0; i < num_slots; i++) {
169          output_usage_mask[location + i] |= ((write_mask >> (i * 4)) & 0xf) << component;
170       }
171    }
172 
173    if (consider_force_vrs && location == VARYING_SLOT_POS) {
174       unsigned pos_w_chan = 3 - component;
175 
176       if (write_mask & BITFIELD_BIT(pos_w_chan)) {
177          nir_scalar pos_w = nir_scalar_resolved(instr->src[0].ssa, pos_w_chan);
178          /* Use coarse shading if the value of Pos.W can't be determined or if its value is != 1
179           * (typical for non-GUI elements).
180           */
181          if (!nir_scalar_is_const(pos_w) || nir_scalar_as_uint(pos_w) != 0x3f800000u)
182             info->force_vrs_per_vertex = true;
183       }
184    }
185 
186    if (nir->info.stage == MESA_SHADER_GEOMETRY) {
187       const uint8_t gs_streams = nir_intrinsic_io_semantics(instr).gs_streams;
188       info->gs.output_streams[location] |= gs_streams << (component * 2);
189    }
190 }
191 
192 static void
gather_push_constant_info(const nir_shader * nir,const nir_intrinsic_instr * instr,struct radv_shader_info * info)193 gather_push_constant_info(const nir_shader *nir, const nir_intrinsic_instr *instr, struct radv_shader_info *info)
194 {
195    info->loads_push_constants = true;
196 
197    if (nir_src_is_const(instr->src[0]) && instr->def.bit_size >= 32) {
198       uint32_t start = (nir_intrinsic_base(instr) + nir_src_as_uint(instr->src[0])) / 4u;
199       uint32_t size = instr->num_components * (instr->def.bit_size / 32u);
200 
201       if (start + size <= (MAX_PUSH_CONSTANTS_SIZE / 4u)) {
202          info->inline_push_constant_mask |= u_bit_consecutive64(start, size);
203          return;
204       }
205    }
206 
207    info->can_inline_all_push_constants = false;
208 }
209 
210 static void
gather_intrinsic_info(const nir_shader * nir,const nir_intrinsic_instr * instr,struct radv_shader_info * info,const struct radv_graphics_state_key * gfx_state,const struct radv_shader_stage_key * stage_key,bool consider_force_vrs)211 gather_intrinsic_info(const nir_shader *nir, const nir_intrinsic_instr *instr, struct radv_shader_info *info,
212                       const struct radv_graphics_state_key *gfx_state, const struct radv_shader_stage_key *stage_key,
213                       bool consider_force_vrs)
214 {
215    switch (instr->intrinsic) {
216    case nir_intrinsic_load_barycentric_sample:
217    case nir_intrinsic_load_barycentric_pixel:
218    case nir_intrinsic_load_barycentric_centroid:
219    case nir_intrinsic_load_barycentric_at_sample:
220    case nir_intrinsic_load_barycentric_at_offset: {
221       enum glsl_interp_mode mode = nir_intrinsic_interp_mode(instr);
222       switch (mode) {
223       case INTERP_MODE_SMOOTH:
224       case INTERP_MODE_NONE:
225          if (instr->intrinsic == nir_intrinsic_load_barycentric_pixel ||
226              instr->intrinsic == nir_intrinsic_load_barycentric_at_sample ||
227              instr->intrinsic == nir_intrinsic_load_barycentric_at_offset)
228             info->ps.reads_persp_center = true;
229          else if (instr->intrinsic == nir_intrinsic_load_barycentric_centroid)
230             info->ps.reads_persp_centroid = true;
231          else if (instr->intrinsic == nir_intrinsic_load_barycentric_sample)
232             info->ps.reads_persp_sample = true;
233          break;
234       case INTERP_MODE_NOPERSPECTIVE:
235          if (instr->intrinsic == nir_intrinsic_load_barycentric_pixel ||
236              instr->intrinsic == nir_intrinsic_load_barycentric_at_sample ||
237              instr->intrinsic == nir_intrinsic_load_barycentric_at_offset)
238             info->ps.reads_linear_center = true;
239          else if (instr->intrinsic == nir_intrinsic_load_barycentric_centroid)
240             info->ps.reads_linear_centroid = true;
241          else if (instr->intrinsic == nir_intrinsic_load_barycentric_sample)
242             info->ps.reads_linear_sample = true;
243          break;
244       default:
245          break;
246       }
247       if (instr->intrinsic == nir_intrinsic_load_barycentric_at_sample)
248          info->ps.needs_sample_positions = true;
249       break;
250    }
251    case nir_intrinsic_load_provoking_vtx_amd:
252       info->ps.load_provoking_vtx = true;
253       break;
254    case nir_intrinsic_load_sample_positions_amd:
255       info->ps.needs_sample_positions = true;
256       break;
257    case nir_intrinsic_load_rasterization_primitive_amd:
258       info->ps.load_rasterization_prim = true;
259       break;
260    case nir_intrinsic_load_local_invocation_id:
261    case nir_intrinsic_load_workgroup_id: {
262       unsigned mask = nir_def_components_read(&instr->def);
263       while (mask) {
264          unsigned i = u_bit_scan(&mask);
265 
266          if (instr->intrinsic == nir_intrinsic_load_workgroup_id)
267             info->cs.uses_block_id[i] = true;
268          else
269             info->cs.uses_thread_id[i] = true;
270       }
271       break;
272    }
273    case nir_intrinsic_load_frag_coord:
274       info->ps.reads_frag_coord_mask |= nir_def_components_read(&instr->def);
275       break;
276    case nir_intrinsic_load_sample_pos:
277       info->ps.reads_sample_pos_mask |= nir_def_components_read(&instr->def);
278       break;
279    case nir_intrinsic_load_push_constant:
280       gather_push_constant_info(nir, instr, info);
281       break;
282    case nir_intrinsic_vulkan_resource_index:
283       info->desc_set_used_mask |= (1u << nir_intrinsic_desc_set(instr));
284       break;
285    case nir_intrinsic_image_deref_load:
286    case nir_intrinsic_image_deref_sparse_load:
287    case nir_intrinsic_image_deref_store:
288    case nir_intrinsic_image_deref_atomic:
289    case nir_intrinsic_image_deref_atomic_swap:
290    case nir_intrinsic_image_deref_size:
291    case nir_intrinsic_image_deref_samples: {
292       nir_variable *var = nir_deref_instr_get_variable(nir_instr_as_deref(instr->src[0].ssa->parent_instr));
293       mark_sampler_desc(var, info);
294       break;
295    }
296    case nir_intrinsic_load_input:
297    case nir_intrinsic_load_per_primitive_input:
298    case nir_intrinsic_load_interpolated_input:
299    case nir_intrinsic_load_input_vertex:
300       gather_intrinsic_load_input_info(nir, instr, info, gfx_state, stage_key);
301       break;
302    case nir_intrinsic_store_output:
303       gather_intrinsic_store_output_info(nir, instr, info, consider_force_vrs);
304       break;
305    case nir_intrinsic_bvh64_intersect_ray_amd:
306       info->cs.uses_rt = true;
307       break;
308    case nir_intrinsic_load_poly_line_smooth_enabled:
309       info->ps.needs_poly_line_smooth = true;
310       break;
311    case nir_intrinsic_begin_invocation_interlock:
312       info->ps.pops = true;
313       break;
314    default:
315       break;
316    }
317 }
318 
319 static void
gather_tex_info(const nir_shader * nir,const nir_tex_instr * instr,struct radv_shader_info * info)320 gather_tex_info(const nir_shader *nir, const nir_tex_instr *instr, struct radv_shader_info *info)
321 {
322    for (unsigned i = 0; i < instr->num_srcs; i++) {
323       switch (instr->src[i].src_type) {
324       case nir_tex_src_texture_deref:
325          mark_sampler_desc(nir_deref_instr_get_variable(nir_src_as_deref(instr->src[i].src)), info);
326          break;
327       case nir_tex_src_sampler_deref:
328          mark_sampler_desc(nir_deref_instr_get_variable(nir_src_as_deref(instr->src[i].src)), info);
329          break;
330       default:
331          break;
332       }
333    }
334 }
335 
336 static void
gather_info_block(const nir_shader * nir,const nir_block * block,struct radv_shader_info * info,const struct radv_graphics_state_key * gfx_state,const struct radv_shader_stage_key * stage_key,bool consider_force_vrs)337 gather_info_block(const nir_shader *nir, const nir_block *block, struct radv_shader_info *info,
338                   const struct radv_graphics_state_key *gfx_state, const struct radv_shader_stage_key *stage_key,
339                   bool consider_force_vrs)
340 {
341    nir_foreach_instr (instr, block) {
342       switch (instr->type) {
343       case nir_instr_type_intrinsic:
344          gather_intrinsic_info(nir, nir_instr_as_intrinsic(instr), info, gfx_state, stage_key, consider_force_vrs);
345          break;
346       case nir_instr_type_tex:
347          gather_tex_info(nir, nir_instr_as_tex(instr), info);
348          break;
349       default:
350          break;
351       }
352    }
353 }
354 
355 static void
gather_xfb_info(const nir_shader * nir,struct radv_shader_info * info)356 gather_xfb_info(const nir_shader *nir, struct radv_shader_info *info)
357 {
358    struct radv_streamout_info *so = &info->so;
359 
360    if (!nir->xfb_info)
361       return;
362 
363    const nir_xfb_info *xfb = nir->xfb_info;
364    assert(xfb->output_count <= MAX_SO_OUTPUTS);
365    so->num_outputs = xfb->output_count;
366 
367    for (unsigned i = 0; i < xfb->output_count; i++) {
368       unsigned output_buffer = xfb->outputs[i].buffer;
369       unsigned stream = xfb->buffer_to_stream[xfb->outputs[i].buffer];
370       so->enabled_stream_buffers_mask |= (1 << output_buffer) << (stream * 4);
371    }
372 
373    for (unsigned i = 0; i < NIR_MAX_XFB_BUFFERS; i++) {
374       so->strides[i] = xfb->buffers[i].stride / 4;
375    }
376 }
377 
378 static void
assign_outinfo_param(struct radv_vs_output_info * outinfo,gl_varying_slot idx,unsigned * total_param_exports,unsigned extra_offset)379 assign_outinfo_param(struct radv_vs_output_info *outinfo, gl_varying_slot idx, unsigned *total_param_exports,
380                      unsigned extra_offset)
381 {
382    if (outinfo->vs_output_param_offset[idx] == AC_EXP_PARAM_UNDEFINED)
383       outinfo->vs_output_param_offset[idx] = extra_offset + (*total_param_exports)++;
384 }
385 
386 static void
assign_outinfo_params(struct radv_vs_output_info * outinfo,uint64_t mask,unsigned * total_param_exports,unsigned extra_offset)387 assign_outinfo_params(struct radv_vs_output_info *outinfo, uint64_t mask, unsigned *total_param_exports,
388                       unsigned extra_offset)
389 {
390    u_foreach_bit64 (idx, mask) {
391       if (idx >= VARYING_SLOT_VAR0 || idx == VARYING_SLOT_LAYER || idx == VARYING_SLOT_PRIMITIVE_ID ||
392           idx == VARYING_SLOT_VIEWPORT)
393          assign_outinfo_param(outinfo, idx, total_param_exports, extra_offset);
394    }
395 }
396 
397 static uint8_t
radv_get_wave_size(struct radv_device * device,gl_shader_stage stage,const struct radv_shader_info * info,const struct radv_shader_stage_key * stage_key)398 radv_get_wave_size(struct radv_device *device, gl_shader_stage stage, const struct radv_shader_info *info,
399                    const struct radv_shader_stage_key *stage_key)
400 {
401    const struct radv_physical_device *pdev = radv_device_physical(device);
402 
403    if (stage_key->subgroup_required_size)
404       return stage_key->subgroup_required_size * 32;
405 
406    if (stage == MESA_SHADER_GEOMETRY && !info->is_ngg)
407       return 64;
408    else if (stage == MESA_SHADER_COMPUTE || stage == MESA_SHADER_TASK)
409       return info->wave_size;
410    else if (stage == MESA_SHADER_FRAGMENT)
411       return pdev->ps_wave_size;
412    else if (gl_shader_stage_is_rt(stage))
413       return pdev->rt_wave_size;
414    else
415       return pdev->ge_wave_size;
416 }
417 
418 static uint8_t
radv_get_ballot_bit_size(struct radv_device * device,gl_shader_stage stage,const struct radv_shader_info * info,const struct radv_shader_stage_key * stage_key)419 radv_get_ballot_bit_size(struct radv_device *device, gl_shader_stage stage, const struct radv_shader_info *info,
420                          const struct radv_shader_stage_key *stage_key)
421 {
422    if (stage_key->subgroup_required_size)
423       return stage_key->subgroup_required_size * 32;
424 
425    return 64;
426 }
427 
428 static uint32_t
radv_compute_esgs_itemsize(const struct radv_device * device,uint32_t num_varyings)429 radv_compute_esgs_itemsize(const struct radv_device *device, uint32_t num_varyings)
430 {
431    const struct radv_physical_device *pdev = radv_device_physical(device);
432    uint32_t esgs_itemsize;
433 
434    esgs_itemsize = num_varyings * 16;
435 
436    /* For the ESGS ring in LDS, add 1 dword to reduce LDS bank
437     * conflicts, i.e. each vertex will start on a different bank.
438     */
439    if (pdev->info.gfx_level >= GFX9 && esgs_itemsize)
440       esgs_itemsize += 4;
441 
442    return esgs_itemsize;
443 }
444 
445 static void
gather_shader_info_ngg_query(struct radv_device * device,struct radv_shader_info * info)446 gather_shader_info_ngg_query(struct radv_device *device, struct radv_shader_info *info)
447 {
448    info->has_xfb_query = info->so.num_outputs > 0;
449    info->has_prim_query = device->cache_key.primitives_generated_query || info->has_xfb_query;
450 }
451 
452 uint64_t
radv_gather_unlinked_io_mask(const uint64_t nir_io_mask)453 radv_gather_unlinked_io_mask(const uint64_t nir_io_mask)
454 {
455    /* Create a mask of driver locations mapped from NIR semantics. */
456    uint64_t radv_io_mask = 0;
457    u_foreach_bit64 (semantic, nir_io_mask) {
458       /* These outputs are not used when fixed output slots are needed. */
459       if (semantic == VARYING_SLOT_LAYER || semantic == VARYING_SLOT_VIEWPORT ||
460           semantic == VARYING_SLOT_PRIMITIVE_ID || semantic == VARYING_SLOT_PRIMITIVE_SHADING_RATE)
461          continue;
462 
463       radv_io_mask |= BITFIELD64_BIT(radv_map_io_driver_location(semantic));
464    }
465 
466    return radv_io_mask;
467 }
468 
469 uint64_t
radv_gather_unlinked_patch_io_mask(const uint64_t nir_io_mask,const uint32_t nir_patch_io_mask)470 radv_gather_unlinked_patch_io_mask(const uint64_t nir_io_mask, const uint32_t nir_patch_io_mask)
471 {
472    uint64_t radv_io_mask = 0;
473    u_foreach_bit64 (semantic, nir_patch_io_mask) {
474       radv_io_mask |= BITFIELD64_BIT(radv_map_io_driver_location(semantic + VARYING_SLOT_PATCH0));
475    }
476 
477    /* Tess levels need to be handled separately because they are not part of patch_outputs_written. */
478    if (nir_io_mask & VARYING_BIT_TESS_LEVEL_OUTER)
479       radv_io_mask |= BITFIELD64_BIT(radv_map_io_driver_location(VARYING_SLOT_TESS_LEVEL_OUTER));
480    if (nir_io_mask & VARYING_BIT_TESS_LEVEL_INNER)
481       radv_io_mask |= BITFIELD64_BIT(radv_map_io_driver_location(VARYING_SLOT_TESS_LEVEL_INNER));
482 
483    return radv_io_mask;
484 }
485 
486 static void
gather_shader_info_vs(struct radv_device * device,const nir_shader * nir,const struct radv_graphics_state_key * gfx_state,const struct radv_shader_stage_key * stage_key,struct radv_shader_info * info)487 gather_shader_info_vs(struct radv_device *device, const nir_shader *nir,
488                       const struct radv_graphics_state_key *gfx_state, const struct radv_shader_stage_key *stage_key,
489                       struct radv_shader_info *info)
490 {
491    if (radv_use_vs_prolog(nir, gfx_state)) {
492       info->vs.has_prolog = true;
493       info->vs.dynamic_inputs = true;
494    }
495 
496    info->gs_inputs_read = ~0ULL;
497    info->vs.hs_inputs_read = ~0ULL;
498 
499    /* Use per-attribute vertex descriptors to prevent faults and for correct bounds checking. */
500    info->vs.use_per_attribute_vb_descs = radv_use_per_attribute_vb_descs(nir, gfx_state, stage_key);
501 
502    /* We have to ensure consistent input register assignments between the main shader and the
503     * prolog.
504     */
505    info->vs.needs_instance_id |= info->vs.has_prolog;
506    info->vs.needs_base_instance |= info->vs.has_prolog;
507    info->vs.needs_draw_id |= info->vs.has_prolog;
508 
509    if (info->vs.dynamic_inputs)
510       info->vs.vb_desc_usage_mask = BITFIELD_MASK(util_last_bit(info->vs.vb_desc_usage_mask));
511 
512    /* When the topology is unknown (with GPL), the number of vertices per primitive needs be passed
513     * through a user SGPR for NGG streamout with VS. Otherwise, the XFB offset is incorrectly
514     * computed because using the maximum number of vertices can't work.
515     */
516    info->vs.dynamic_num_verts_per_prim = gfx_state->ia.topology == V_008958_DI_PT_NONE && info->is_ngg && nir->xfb_info;
517 
518    if (!info->outputs_linked)
519       info->vs.num_linked_outputs = util_last_bit64(radv_gather_unlinked_io_mask(nir->info.outputs_written));
520 
521    if (info->next_stage == MESA_SHADER_TESS_CTRL) {
522       info->vs.as_ls = true;
523    } else if (info->next_stage == MESA_SHADER_GEOMETRY) {
524       info->vs.as_es = true;
525       info->esgs_itemsize = radv_compute_esgs_itemsize(device, info->vs.num_linked_outputs);
526    }
527 
528    if (info->is_ngg) {
529       info->vs.num_outputs = nir->num_outputs;
530 
531       if (info->next_stage == MESA_SHADER_FRAGMENT || info->next_stage == MESA_SHADER_NONE) {
532          gather_shader_info_ngg_query(device, info);
533       }
534    }
535 }
536 
537 static void
gather_shader_info_tcs(struct radv_device * device,const nir_shader * nir,const struct radv_graphics_state_key * gfx_state,struct radv_shader_info * info)538 gather_shader_info_tcs(struct radv_device *device, const nir_shader *nir,
539                        const struct radv_graphics_state_key *gfx_state, struct radv_shader_info *info)
540 {
541    const struct radv_physical_device *pdev = radv_device_physical(device);
542 
543    const uint64_t tess_lvl_mask = VARYING_BIT_TESS_LEVEL_OUTER | VARYING_BIT_TESS_LEVEL_INNER;
544    const uint64_t per_vtx_out_mask = nir->info.outputs_read & nir->info.outputs_written & ~tess_lvl_mask;
545    const uint64_t tess_lvl_out_mask = nir->info.outputs_written & tess_lvl_mask;
546    const uint32_t per_patch_out_mask = nir->info.patch_outputs_read & nir->info.patch_outputs_written;
547 
548    info->tcs.num_lds_per_vertex_outputs = util_bitcount64(per_vtx_out_mask);
549    info->tcs.num_lds_per_patch_outputs = util_bitcount64(tess_lvl_out_mask) + util_bitcount(per_patch_out_mask);
550    info->tcs.tcs_vertices_out = nir->info.tess.tcs_vertices_out;
551    info->tcs.tes_inputs_read = ~0ULL;
552    info->tcs.tes_patch_inputs_read = ~0ULL;
553 
554    if (!info->inputs_linked)
555       info->tcs.num_linked_inputs = util_last_bit64(radv_gather_unlinked_io_mask(nir->info.inputs_read));
556    if (!info->outputs_linked) {
557       info->tcs.num_linked_outputs = util_last_bit64(radv_gather_unlinked_io_mask(
558          nir->info.outputs_written & ~(VARYING_BIT_TESS_LEVEL_OUTER | VARYING_BIT_TESS_LEVEL_INNER)));
559       info->tcs.num_linked_patch_outputs = util_last_bit64(
560          radv_gather_unlinked_patch_io_mask(nir->info.outputs_written, nir->info.patch_outputs_written));
561    }
562 
563    if (gfx_state->ts.patch_control_points) {
564       /* Number of tessellation patches per workgroup processed by the current pipeline. */
565       info->num_tess_patches = radv_get_tcs_num_patches(
566          pdev, gfx_state->ts.patch_control_points, nir->info.tess.tcs_vertices_out, info->tcs.num_linked_inputs,
567          info->tcs.num_lds_per_vertex_outputs, info->tcs.num_lds_per_patch_outputs, info->tcs.num_linked_outputs,
568          info->tcs.num_linked_patch_outputs);
569 
570       /* LDS size used by VS+TCS for storing TCS inputs and outputs. */
571       info->tcs.num_lds_blocks = radv_get_tess_lds_size(
572          pdev, gfx_state->ts.patch_control_points, nir->info.tess.tcs_vertices_out, info->tcs.num_linked_inputs,
573          info->num_tess_patches, info->tcs.num_lds_per_vertex_outputs, info->tcs.num_lds_per_patch_outputs);
574    }
575 }
576 
577 static void
gather_shader_info_tes(struct radv_device * device,const nir_shader * nir,struct radv_shader_info * info)578 gather_shader_info_tes(struct radv_device *device, const nir_shader *nir, struct radv_shader_info *info)
579 {
580    info->gs_inputs_read = ~0ULL;
581    info->tes._primitive_mode = nir->info.tess._primitive_mode;
582    info->tes.spacing = nir->info.tess.spacing;
583    info->tes.ccw = nir->info.tess.ccw;
584    info->tes.point_mode = nir->info.tess.point_mode;
585    info->tes.tcs_vertices_out = nir->info.tess.tcs_vertices_out;
586    info->tes.reads_tess_factors =
587       !!(nir->info.inputs_read & (VARYING_BIT_TESS_LEVEL_INNER | VARYING_BIT_TESS_LEVEL_OUTER));
588 
589    if (!info->inputs_linked) {
590       info->tes.num_linked_inputs = util_last_bit64(radv_gather_unlinked_io_mask(
591          nir->info.inputs_read & ~(VARYING_BIT_TESS_LEVEL_OUTER | VARYING_BIT_TESS_LEVEL_INNER)));
592       info->tes.num_linked_patch_inputs = util_last_bit64(
593          radv_gather_unlinked_patch_io_mask(nir->info.inputs_read, nir->info.patch_inputs_read));
594    }
595    if (!info->outputs_linked)
596       info->tes.num_linked_outputs = util_last_bit64(radv_gather_unlinked_io_mask(nir->info.outputs_written));
597 
598    if (info->next_stage == MESA_SHADER_GEOMETRY) {
599       info->tes.as_es = true;
600       info->esgs_itemsize = radv_compute_esgs_itemsize(device, info->tes.num_linked_outputs);
601    }
602 
603    if (info->is_ngg) {
604       info->tes.num_outputs = nir->num_outputs;
605 
606       if (info->next_stage == MESA_SHADER_FRAGMENT || info->next_stage == MESA_SHADER_NONE) {
607          gather_shader_info_ngg_query(device, info);
608       }
609    }
610 }
611 
612 static void
radv_init_legacy_gs_ring_info(const struct radv_device * device,struct radv_shader_info * gs_info)613 radv_init_legacy_gs_ring_info(const struct radv_device *device, struct radv_shader_info *gs_info)
614 {
615    const struct radv_physical_device *pdev = radv_device_physical(device);
616    struct radv_legacy_gs_info *gs_ring_info = &gs_info->gs_ring_info;
617    unsigned num_se = pdev->info.max_se;
618    unsigned wave_size = 64;
619    unsigned max_gs_waves = 32 * num_se; /* max 32 per SE on GCN */
620    /* On GFX6-GFX7, the value comes from VGT_GS_VERTEX_REUSE = 16.
621     * On GFX8+, the value comes from VGT_VERTEX_REUSE_BLOCK_CNTL = 30 (+2).
622     */
623    unsigned gs_vertex_reuse = (pdev->info.gfx_level >= GFX8 ? 32 : 16) * num_se;
624    unsigned alignment = 256 * num_se;
625    /* The maximum size is 63.999 MB per SE. */
626    unsigned max_size = ((unsigned)(63.999 * 1024 * 1024) & ~255) * num_se;
627 
628    /* Calculate the minimum size. */
629    unsigned min_esgs_ring_size = align(gs_ring_info->esgs_itemsize * 4 * gs_vertex_reuse * wave_size, alignment);
630    /* These are recommended sizes, not minimum sizes. */
631    unsigned esgs_ring_size = max_gs_waves * 2 * wave_size * gs_ring_info->esgs_itemsize * 4 * gs_info->gs.vertices_in;
632    unsigned gsvs_ring_size = max_gs_waves * 2 * wave_size * gs_info->gs.max_gsvs_emit_size;
633 
634    min_esgs_ring_size = align(min_esgs_ring_size, alignment);
635    esgs_ring_size = align(esgs_ring_size, alignment);
636    gsvs_ring_size = align(gsvs_ring_size, alignment);
637 
638    if (pdev->info.gfx_level <= GFX8)
639       gs_ring_info->esgs_ring_size = CLAMP(esgs_ring_size, min_esgs_ring_size, max_size);
640 
641    gs_ring_info->gsvs_ring_size = MIN2(gsvs_ring_size, max_size);
642 }
643 
644 static void
radv_get_legacy_gs_info(const struct radv_device * device,struct radv_shader_info * gs_info)645 radv_get_legacy_gs_info(const struct radv_device *device, struct radv_shader_info *gs_info)
646 {
647    const struct radv_physical_device *pdev = radv_device_physical(device);
648    struct radv_legacy_gs_info *out = &gs_info->gs_ring_info;
649    const unsigned gs_num_invocations = MAX2(gs_info->gs.invocations, 1);
650    const bool uses_adjacency =
651       gs_info->gs.input_prim == MESA_PRIM_LINES_ADJACENCY || gs_info->gs.input_prim == MESA_PRIM_TRIANGLES_ADJACENCY;
652 
653    /* All these are in dwords: */
654    /* We can't allow using the whole LDS, because GS waves compete with
655     * other shader stages for LDS space. */
656    const unsigned max_lds_size = 8 * 1024;
657    const unsigned esgs_itemsize = radv_compute_esgs_itemsize(device, gs_info->gs.num_linked_inputs) / 4;
658    unsigned esgs_lds_size;
659 
660    /* All these are per subgroup: */
661    const unsigned max_out_prims = 32 * 1024;
662    const unsigned max_es_verts = 255;
663    const unsigned ideal_gs_prims = 64;
664    unsigned max_gs_prims, gs_prims;
665    unsigned min_es_verts, es_verts, worst_case_es_verts;
666 
667    if (uses_adjacency || gs_num_invocations > 1)
668       max_gs_prims = 127 / gs_num_invocations;
669    else
670       max_gs_prims = 255;
671 
672    /* MAX_PRIMS_PER_SUBGROUP = gs_prims * max_vert_out * gs_invocations.
673     * Make sure we don't go over the maximum value.
674     */
675    if (gs_info->gs.vertices_out > 0) {
676       max_gs_prims = MIN2(max_gs_prims, max_out_prims / (gs_info->gs.vertices_out * gs_num_invocations));
677    }
678    assert(max_gs_prims > 0);
679 
680    /* If the primitive has adjacency, halve the number of vertices
681     * that will be reused in multiple primitives.
682     */
683    min_es_verts = gs_info->gs.vertices_in / (uses_adjacency ? 2 : 1);
684 
685    gs_prims = MIN2(ideal_gs_prims, max_gs_prims);
686    worst_case_es_verts = MIN2(min_es_verts * gs_prims, max_es_verts);
687 
688    /* Compute ESGS LDS size based on the worst case number of ES vertices
689     * needed to create the target number of GS prims per subgroup.
690     */
691    esgs_lds_size = esgs_itemsize * worst_case_es_verts;
692 
693    /* If total LDS usage is too big, refactor partitions based on ratio
694     * of ESGS item sizes.
695     */
696    if (esgs_lds_size > max_lds_size) {
697       /* Our target GS Prims Per Subgroup was too large. Calculate
698        * the maximum number of GS Prims Per Subgroup that will fit
699        * into LDS, capped by the maximum that the hardware can support.
700        */
701       gs_prims = MIN2((max_lds_size / (esgs_itemsize * min_es_verts)), max_gs_prims);
702       assert(gs_prims > 0);
703       worst_case_es_verts = MIN2(min_es_verts * gs_prims, max_es_verts);
704 
705       esgs_lds_size = esgs_itemsize * worst_case_es_verts;
706       assert(esgs_lds_size <= max_lds_size);
707    }
708 
709    /* Now calculate remaining ESGS information. */
710    if (esgs_lds_size)
711       es_verts = MIN2(esgs_lds_size / esgs_itemsize, max_es_verts);
712    else
713       es_verts = max_es_verts;
714 
715    /* Vertices for adjacency primitives are not always reused, so restore
716     * it for ES_VERTS_PER_SUBGRP.
717     */
718    min_es_verts = gs_info->gs.vertices_in;
719 
720    /* For normal primitives, the VGT only checks if they are past the ES
721     * verts per subgroup after allocating a full GS primitive and if they
722     * are, kick off a new subgroup.  But if those additional ES verts are
723     * unique (e.g. not reused) we need to make sure there is enough LDS
724     * space to account for those ES verts beyond ES_VERTS_PER_SUBGRP.
725     */
726    es_verts -= min_es_verts - 1;
727 
728    const uint32_t es_verts_per_subgroup = es_verts;
729    const uint32_t gs_prims_per_subgroup = gs_prims;
730    const uint32_t gs_inst_prims_in_subgroup = gs_prims * gs_num_invocations;
731    const uint32_t max_prims_per_subgroup = gs_inst_prims_in_subgroup * gs_info->gs.vertices_out;
732    const uint32_t lds_granularity = pdev->info.lds_encode_granularity;
733    const uint32_t total_lds_bytes = align(esgs_lds_size * 4, lds_granularity);
734 
735    out->gs_inst_prims_in_subgroup = gs_inst_prims_in_subgroup;
736    out->es_verts_per_subgroup = es_verts_per_subgroup;
737    out->gs_prims_per_subgroup = gs_prims_per_subgroup;
738    out->esgs_itemsize = esgs_itemsize;
739    out->lds_size = total_lds_bytes / lds_granularity;
740    assert(max_prims_per_subgroup <= max_out_prims);
741 
742    radv_init_legacy_gs_ring_info(device, gs_info);
743 }
744 
745 static void
gather_shader_info_gs(struct radv_device * device,const nir_shader * nir,struct radv_shader_info * info)746 gather_shader_info_gs(struct radv_device *device, const nir_shader *nir, struct radv_shader_info *info)
747 {
748    const struct radv_physical_device *pdev = radv_device_physical(device);
749    unsigned add_clip = nir->info.clip_distance_array_size + nir->info.cull_distance_array_size > 4;
750    info->gs.gsvs_vertex_size = (util_bitcount64(nir->info.outputs_written) + add_clip) * 16;
751    info->gs.max_gsvs_emit_size = info->gs.gsvs_vertex_size * nir->info.gs.vertices_out;
752 
753    info->gs.vertices_in = nir->info.gs.vertices_in;
754    info->gs.vertices_out = nir->info.gs.vertices_out;
755    info->gs.input_prim = nir->info.gs.input_primitive;
756    info->gs.output_prim = nir->info.gs.output_primitive;
757    info->gs.invocations = nir->info.gs.invocations;
758    info->gs.max_stream = nir->info.gs.active_stream_mask ? util_last_bit(nir->info.gs.active_stream_mask) - 1 : 0;
759    info->gs.has_pipeline_stat_query = pdev->emulate_ngg_gs_query_pipeline_stat;
760 
761    for (unsigned slot = 0; slot < VARYING_SLOT_MAX; ++slot) {
762       const uint8_t usage_mask = info->gs.output_usage_mask[slot];
763       const uint8_t gs_streams = info->gs.output_streams[slot];
764 
765       for (unsigned component = 0; component < 4; ++component) {
766          if (!(usage_mask & BITFIELD_BIT(component)))
767             continue;
768 
769          const uint8_t stream = (gs_streams >> (component * 2)) & 0x3;
770          info->gs.num_stream_output_components[stream]++;
771       }
772    }
773 
774    if (!info->inputs_linked)
775       info->gs.num_linked_inputs = util_last_bit64(radv_gather_unlinked_io_mask(nir->info.inputs_read));
776 
777    if (info->is_ngg) {
778       gather_shader_info_ngg_query(device, info);
779    } else {
780       radv_get_legacy_gs_info(device, info);
781    }
782 }
783 
784 static void
gather_shader_info_mesh(struct radv_device * device,const nir_shader * nir,const struct radv_shader_stage_key * stage_key,struct radv_shader_info * info)785 gather_shader_info_mesh(struct radv_device *device, const nir_shader *nir,
786                         const struct radv_shader_stage_key *stage_key, struct radv_shader_info *info)
787 {
788    struct gfx10_ngg_info *ngg_info = &info->ngg_info;
789 
790    info->ms.output_prim = nir->info.mesh.primitive_type;
791 
792    /* Special case for mesh shader workgroups.
793     *
794     * Mesh shaders don't have any real vertex input, but they can produce
795     * an arbitrary number of vertices and primitives (up to 256).
796     * We need to precisely control the number of mesh shader workgroups
797     * that are launched from draw calls.
798     *
799     * To achieve that, we set:
800     * - input primitive topology to point list
801     * - input vertex and primitive count to 1
802     * - max output vertex count and primitive amplification factor
803     *   to the boundaries of the shader
804     *
805     * With that, in the draw call:
806     * - drawing 1 input vertex ~ launching 1 mesh shader workgroup
807     *
808     * In the shader:
809     * - input vertex id ~ workgroup id (in 1D - shader needs to calculate in 3D)
810     *
811     * Notes:
812     * - without GS_EN=1 PRIM_AMP_FACTOR and MAX_VERTS_PER_SUBGROUP don't seem to work
813     * - with GS_EN=1 we must also set VGT_GS_MAX_VERT_OUT (otherwise the GPU hangs)
814     * - with GS_FAST_LAUNCH=1 every lane's VGPRs are initialized to the same input vertex index
815     *
816     */
817    ngg_info->esgs_ring_size = 1;
818    ngg_info->hw_max_esverts = 1;
819    ngg_info->max_gsprims = 1;
820    ngg_info->max_out_verts = nir->info.mesh.max_vertices_out;
821    ngg_info->max_vert_out_per_gs_instance = false;
822    ngg_info->ngg_emit_size = 0;
823    ngg_info->prim_amp_factor = nir->info.mesh.max_primitives_out;
824    ngg_info->vgt_esgs_ring_itemsize = 1;
825 
826    info->ms.has_query = device->cache_key.mesh_shader_queries;
827    info->ms.has_task = stage_key->has_task_shader;
828 }
829 
830 static void
calc_mesh_workgroup_size(const struct radv_device * device,const nir_shader * nir,struct radv_shader_info * info)831 calc_mesh_workgroup_size(const struct radv_device *device, const nir_shader *nir, struct radv_shader_info *info)
832 {
833    const struct radv_physical_device *pdev = radv_device_physical(device);
834    unsigned api_workgroup_size = ac_compute_cs_workgroup_size(nir->info.workgroup_size, false, UINT32_MAX);
835 
836    if (pdev->mesh_fast_launch_2) {
837       /* Use multi-row export. It is also necessary to use the API workgroup size for non-emulated queries. */
838       info->workgroup_size = api_workgroup_size;
839    } else {
840       struct gfx10_ngg_info *ngg_info = &info->ngg_info;
841       unsigned min_ngg_workgroup_size = ac_compute_ngg_workgroup_size(
842          ngg_info->hw_max_esverts, ngg_info->max_gsprims, ngg_info->max_out_verts, ngg_info->prim_amp_factor);
843 
844       info->workgroup_size = MAX2(min_ngg_workgroup_size, api_workgroup_size);
845    }
846 }
847 
848 static void
gather_shader_info_fs(const struct radv_device * device,const nir_shader * nir,const struct radv_graphics_state_key * gfx_state,struct radv_shader_info * info)849 gather_shader_info_fs(const struct radv_device *device, const nir_shader *nir,
850                       const struct radv_graphics_state_key *gfx_state, struct radv_shader_info *info)
851 {
852    const struct radv_physical_device *pdev = radv_device_physical(device);
853    const uint64_t per_primitive_input_mask = nir->info.inputs_read & nir->info.per_primitive_inputs;
854    const unsigned num_per_primitive_inputs = util_bitcount64(per_primitive_input_mask);
855    const unsigned num_inputs = util_bitcount64(nir->info.inputs_read);
856    assert(num_per_primitive_inputs <= num_inputs);
857 
858    info->ps.num_interp = num_inputs;
859    info->ps.num_prim_interp = 0;
860 
861    if (pdev->info.gfx_level == GFX10_3) {
862       /* GFX10.3 distinguishes NUM_INTERP and NUM_PRIM_INTERP, but
863        * these are counted together in NUM_INTERP on GFX11.
864        */
865       info->ps.num_interp = num_inputs - num_per_primitive_inputs;
866       info->ps.num_prim_interp = num_per_primitive_inputs;
867    }
868 
869    info->ps.can_discard = nir->info.fs.uses_discard;
870    info->ps.early_fragment_test =
871       nir->info.fs.early_fragment_tests ||
872       (nir->info.fs.early_and_late_fragment_tests && nir->info.fs.depth_layout == FRAG_DEPTH_LAYOUT_NONE &&
873        nir->info.fs.stencil_front_layout == FRAG_STENCIL_LAYOUT_NONE &&
874        nir->info.fs.stencil_back_layout == FRAG_STENCIL_LAYOUT_NONE);
875    info->ps.post_depth_coverage = nir->info.fs.post_depth_coverage;
876    info->ps.depth_layout = nir->info.fs.depth_layout;
877    info->ps.uses_sample_shading = nir->info.fs.uses_sample_shading;
878    info->ps.uses_fbfetch_output = nir->info.fs.uses_fbfetch_output;
879    info->ps.writes_memory = nir->info.writes_memory;
880    info->ps.has_pcoord = nir->info.inputs_read & VARYING_BIT_PNTC;
881    info->ps.prim_id_input = nir->info.inputs_read & VARYING_BIT_PRIMITIVE_ID;
882    info->ps.layer_input = nir->info.inputs_read & VARYING_BIT_LAYER;
883    info->ps.viewport_index_input = nir->info.inputs_read & VARYING_BIT_VIEWPORT;
884    info->ps.writes_z = nir->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_DEPTH);
885    info->ps.writes_stencil = nir->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_STENCIL);
886    info->ps.writes_sample_mask = nir->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK);
887    info->ps.reads_sample_mask_in = BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_SAMPLE_MASK_IN);
888    info->ps.reads_sample_id = BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_SAMPLE_ID);
889    info->ps.reads_frag_shading_rate = BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_FRAG_SHADING_RATE);
890    info->ps.reads_front_face = BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_FRONT_FACE);
891    info->ps.reads_barycentric_model = BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_BARYCENTRIC_PULL_MODEL);
892    info->ps.reads_fully_covered = BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_FULLY_COVERED);
893 
894    bool uses_persp_or_linear_interp = info->ps.reads_persp_center || info->ps.reads_persp_centroid ||
895                                       info->ps.reads_persp_sample || info->ps.reads_linear_center ||
896                                       info->ps.reads_linear_centroid || info->ps.reads_linear_sample;
897 
898    info->ps.allow_flat_shading =
899       !(uses_persp_or_linear_interp || info->ps.needs_sample_positions || info->ps.reads_frag_shading_rate ||
900         info->ps.writes_memory || nir->info.fs.needs_quad_helper_invocations ||
901         BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_FRAG_COORD) ||
902         BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_POINT_COORD) ||
903         BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_SAMPLE_ID) ||
904         BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_SAMPLE_POS) ||
905         BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_SAMPLE_MASK_IN) ||
906         BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_HELPER_INVOCATION));
907 
908    info->ps.pops_is_per_sample =
909       info->ps.pops && (nir->info.fs.sample_interlock_ordered || nir->info.fs.sample_interlock_unordered);
910 
911    info->ps.spi_ps_input_ena = radv_compute_spi_ps_input(pdev, gfx_state, info);
912    info->ps.spi_ps_input_addr = info->ps.spi_ps_input_ena;
913    if (pdev->info.gfx_level >= GFX12) {
914       /* Only SPI_PS_INPUT_ENA has this bit on GFX12. */
915       info->ps.spi_ps_input_addr &= C_02865C_COVERAGE_TO_SHADER_SELECT;
916    }
917 
918    info->ps.has_epilog = gfx_state->ps.has_epilog && info->ps.colors_written;
919 
920    if (!info->ps.has_epilog) {
921       info->ps.mrt0_is_dual_src = gfx_state->ps.epilog.mrt0_is_dual_src;
922       info->ps.spi_shader_col_format = gfx_state->ps.epilog.spi_shader_col_format;
923 
924       /* Clear color attachments that aren't exported by the FS to match IO shader arguments. */
925       info->ps.spi_shader_col_format &= info->ps.colors_written;
926 
927       info->ps.cb_shader_mask = ac_get_cb_shader_mask(info->ps.spi_shader_col_format);
928    }
929 
930    const bool export_alpha_and_mrtz =
931       (info->ps.color0_written & 0x8) && (info->ps.writes_z || info->ps.writes_stencil || info->ps.writes_sample_mask);
932 
933    info->ps.exports_mrtz_via_epilog =
934       info->ps.has_epilog && gfx_state->ps.exports_mrtz_via_epilog && export_alpha_and_mrtz;
935 
936    if (!info->ps.exports_mrtz_via_epilog) {
937       info->ps.writes_mrt0_alpha = gfx_state->ms.alpha_to_coverage_via_mrtz && export_alpha_and_mrtz;
938    }
939 
940    /* Disable VRS and use the rates from PS_ITER_SAMPLES if:
941     *
942     * - The fragment shader reads gl_SampleMaskIn because the 16-bit sample coverage mask isn't enough for MSAA8x and
943     *   2x2 coarse shading.
944     * - On GFX10.3, if the fragment shader requests a fragment interlock execution mode even if the ordered section was
945     *   optimized out, to consistently implement fragmentShadingRateWithFragmentShaderInterlock = VK_FALSE.
946     */
947    info->ps.force_sample_iter_shading_rate =
948       (info->ps.reads_sample_mask_in && !info->ps.needs_poly_line_smooth) ||
949       (pdev->info.gfx_level == GFX10_3 &&
950        (nir->info.fs.sample_interlock_ordered || nir->info.fs.sample_interlock_unordered ||
951         nir->info.fs.pixel_interlock_ordered || nir->info.fs.pixel_interlock_unordered));
952 }
953 
954 static void
gather_shader_info_rt(const nir_shader * nir,struct radv_shader_info * info)955 gather_shader_info_rt(const nir_shader *nir, struct radv_shader_info *info)
956 {
957    // TODO: inline push_constants again
958    info->loads_dynamic_offsets = true;
959    info->loads_push_constants = true;
960    info->can_inline_all_push_constants = false;
961    info->inline_push_constant_mask = 0;
962    info->desc_set_used_mask = -1u;
963 }
964 
965 static void
gather_shader_info_cs(struct radv_device * device,const nir_shader * nir,const struct radv_shader_stage_key * stage_key,struct radv_shader_info * info)966 gather_shader_info_cs(struct radv_device *device, const nir_shader *nir, const struct radv_shader_stage_key *stage_key,
967                       struct radv_shader_info *info)
968 {
969    const struct radv_physical_device *pdev = radv_device_physical(device);
970    unsigned default_wave_size = pdev->cs_wave_size;
971    if (info->cs.uses_rt)
972       default_wave_size = pdev->rt_wave_size;
973 
974    unsigned local_size = nir->info.workgroup_size[0] * nir->info.workgroup_size[1] * nir->info.workgroup_size[2];
975 
976    /* Games don't always request full subgroups when they should, which can cause bugs if cswave32
977     * is enabled. Furthermore, if cooperative matrices or subgroup info are used, we can't transparently change
978     * the subgroup size.
979     */
980    const bool require_full_subgroups =
981       stage_key->subgroup_require_full || nir->info.cs.has_cooperative_matrix ||
982       (default_wave_size == 32 && nir->info.uses_wide_subgroup_intrinsics && local_size % RADV_SUBGROUP_SIZE == 0);
983 
984    const unsigned required_subgroup_size = stage_key->subgroup_required_size * 32;
985 
986    if (required_subgroup_size) {
987       info->wave_size = required_subgroup_size;
988    } else if (require_full_subgroups) {
989       info->wave_size = RADV_SUBGROUP_SIZE;
990    } else if (pdev->info.gfx_level >= GFX10 && local_size <= 32) {
991       /* Use wave32 for small workgroups. */
992       info->wave_size = 32;
993    } else {
994       info->wave_size = default_wave_size;
995    }
996 
997    if (pdev->info.has_cs_regalloc_hang_bug) {
998       info->cs.regalloc_hang_bug = info->cs.block_size[0] * info->cs.block_size[1] * info->cs.block_size[2] > 256;
999    }
1000 }
1001 
1002 static void
gather_shader_info_task(struct radv_device * device,const nir_shader * nir,const struct radv_shader_stage_key * stage_key,struct radv_shader_info * info)1003 gather_shader_info_task(struct radv_device *device, const nir_shader *nir,
1004                         const struct radv_shader_stage_key *stage_key, struct radv_shader_info *info)
1005 {
1006    gather_shader_info_cs(device, nir, stage_key, info);
1007 
1008    /* Task shaders always need these for the I/O lowering even if the API shader doesn't actually
1009     * use them.
1010     */
1011 
1012    /* Needed to address the task draw/payload rings. */
1013    info->cs.uses_block_id[0] = true;
1014    info->cs.uses_block_id[1] = true;
1015    info->cs.uses_block_id[2] = true;
1016    info->cs.uses_grid_size = true;
1017 
1018    /* Needed for storing draw ready only on the 1st thread. */
1019    info->cs.uses_local_invocation_idx = true;
1020 
1021    /* Task->Mesh dispatch is linear when Y = Z = 1.
1022     * GFX11 CP can optimize this case with a field in its draw packets.
1023     */
1024    info->cs.linear_taskmesh_dispatch =
1025       nir->info.mesh.ts_mesh_dispatch_dimensions[1] == 1 && nir->info.mesh.ts_mesh_dispatch_dimensions[2] == 1;
1026 
1027    info->cs.has_query = device->cache_key.mesh_shader_queries;
1028 }
1029 
1030 static uint32_t
radv_get_user_data_0(const struct radv_device * device,struct radv_shader_info * info)1031 radv_get_user_data_0(const struct radv_device *device, struct radv_shader_info *info)
1032 {
1033    const struct radv_physical_device *pdev = radv_device_physical(device);
1034    const enum amd_gfx_level gfx_level = pdev->info.gfx_level;
1035 
1036    switch (info->stage) {
1037    case MESA_SHADER_VERTEX:
1038    case MESA_SHADER_TESS_EVAL:
1039    case MESA_SHADER_MESH:
1040       if (info->next_stage == MESA_SHADER_TESS_CTRL) {
1041          assert(info->stage == MESA_SHADER_VERTEX);
1042 
1043          if (gfx_level >= GFX10) {
1044             return R_00B430_SPI_SHADER_USER_DATA_HS_0;
1045          } else if (gfx_level == GFX9) {
1046             return R_00B430_SPI_SHADER_USER_DATA_LS_0;
1047          } else {
1048             return R_00B530_SPI_SHADER_USER_DATA_LS_0;
1049          }
1050       }
1051 
1052       if (info->next_stage == MESA_SHADER_GEOMETRY) {
1053          assert(info->stage == MESA_SHADER_VERTEX || info->stage == MESA_SHADER_TESS_EVAL);
1054 
1055          if (gfx_level >= GFX10) {
1056             return R_00B230_SPI_SHADER_USER_DATA_GS_0;
1057          } else {
1058             return R_00B330_SPI_SHADER_USER_DATA_ES_0;
1059          }
1060       }
1061 
1062       if (info->is_ngg)
1063          return R_00B230_SPI_SHADER_USER_DATA_GS_0;
1064 
1065       assert(info->stage != MESA_SHADER_MESH);
1066       return R_00B130_SPI_SHADER_USER_DATA_VS_0;
1067    case MESA_SHADER_TESS_CTRL:
1068       return gfx_level == GFX9 ? R_00B430_SPI_SHADER_USER_DATA_LS_0 : R_00B430_SPI_SHADER_USER_DATA_HS_0;
1069    case MESA_SHADER_GEOMETRY:
1070       return gfx_level == GFX9 ? R_00B330_SPI_SHADER_USER_DATA_ES_0 : R_00B230_SPI_SHADER_USER_DATA_GS_0;
1071    case MESA_SHADER_FRAGMENT:
1072       return R_00B030_SPI_SHADER_USER_DATA_PS_0;
1073    case MESA_SHADER_COMPUTE:
1074    case MESA_SHADER_TASK:
1075    case MESA_SHADER_RAYGEN:
1076    case MESA_SHADER_CALLABLE:
1077    case MESA_SHADER_CLOSEST_HIT:
1078    case MESA_SHADER_MISS:
1079    case MESA_SHADER_INTERSECTION:
1080    case MESA_SHADER_ANY_HIT:
1081       return R_00B900_COMPUTE_USER_DATA_0;
1082    default:
1083       unreachable("invalid shader stage");
1084    }
1085 }
1086 
1087 static bool
radv_is_merged_shader_compiled_separately(const struct radv_device * device,const struct radv_shader_info * info)1088 radv_is_merged_shader_compiled_separately(const struct radv_device *device, const struct radv_shader_info *info)
1089 {
1090    const struct radv_physical_device *pdev = radv_device_physical(device);
1091    const enum amd_gfx_level gfx_level = pdev->info.gfx_level;
1092 
1093    if (gfx_level >= GFX9) {
1094       switch (info->stage) {
1095       case MESA_SHADER_VERTEX:
1096          if (info->next_stage == MESA_SHADER_TESS_CTRL || info->next_stage == MESA_SHADER_GEOMETRY)
1097             return !info->outputs_linked;
1098          break;
1099       case MESA_SHADER_TESS_EVAL:
1100          if (info->next_stage == MESA_SHADER_GEOMETRY)
1101             return !info->outputs_linked;
1102          break;
1103       case MESA_SHADER_TESS_CTRL:
1104       case MESA_SHADER_GEOMETRY:
1105          return !info->inputs_linked;
1106       default:
1107          break;
1108       }
1109    }
1110 
1111    return false;
1112 }
1113 
1114 void
radv_nir_shader_info_init(gl_shader_stage stage,gl_shader_stage next_stage,struct radv_shader_info * info)1115 radv_nir_shader_info_init(gl_shader_stage stage, gl_shader_stage next_stage, struct radv_shader_info *info)
1116 {
1117    memset(info, 0, sizeof(*info));
1118 
1119    /* Assume that shaders can inline all push constants by default. */
1120    info->can_inline_all_push_constants = true;
1121 
1122    info->stage = stage;
1123    info->next_stage = next_stage;
1124 }
1125 
1126 void
radv_nir_shader_info_pass(struct radv_device * device,const struct nir_shader * nir,const struct radv_shader_layout * layout,const struct radv_shader_stage_key * stage_key,const struct radv_graphics_state_key * gfx_state,const enum radv_pipeline_type pipeline_type,bool consider_force_vrs,struct radv_shader_info * info)1127 radv_nir_shader_info_pass(struct radv_device *device, const struct nir_shader *nir,
1128                           const struct radv_shader_layout *layout, const struct radv_shader_stage_key *stage_key,
1129                           const struct radv_graphics_state_key *gfx_state, const enum radv_pipeline_type pipeline_type,
1130                           bool consider_force_vrs, struct radv_shader_info *info)
1131 {
1132    const struct radv_physical_device *pdev = radv_device_physical(device);
1133    struct nir_function *func = (struct nir_function *)exec_list_get_head_const(&nir->functions);
1134 
1135    if (layout->use_dynamic_descriptors) {
1136       info->loads_push_constants = true;
1137       info->loads_dynamic_offsets = true;
1138    }
1139 
1140    nir_foreach_block (block, func->impl) {
1141       gather_info_block(nir, block, info, gfx_state, stage_key, consider_force_vrs);
1142    }
1143 
1144    if (nir->info.stage == MESA_SHADER_VERTEX || nir->info.stage == MESA_SHADER_TESS_EVAL ||
1145        nir->info.stage == MESA_SHADER_GEOMETRY)
1146       gather_xfb_info(nir, info);
1147 
1148    if (nir->info.stage == MESA_SHADER_VERTEX || nir->info.stage == MESA_SHADER_TESS_EVAL ||
1149        nir->info.stage == MESA_SHADER_GEOMETRY || nir->info.stage == MESA_SHADER_MESH) {
1150       struct radv_vs_output_info *outinfo = &info->outinfo;
1151 
1152       /* These are not compiled into neither output param nor position exports. */
1153       uint64_t special_mask = BITFIELD64_BIT(VARYING_SLOT_PRIMITIVE_COUNT) |
1154                               BITFIELD64_BIT(VARYING_SLOT_PRIMITIVE_INDICES) |
1155                               BITFIELD64_BIT(VARYING_SLOT_CULL_PRIMITIVE);
1156       uint64_t per_prim_mask = nir->info.outputs_written & nir->info.per_primitive_outputs & ~special_mask;
1157       uint64_t per_vtx_mask = nir->info.outputs_written & ~nir->info.per_primitive_outputs & ~special_mask;
1158 
1159       /* Mesh multivew is only lowered in ac_nir_lower_ngg, so we have to fake it here. */
1160       if (nir->info.stage == MESA_SHADER_MESH && gfx_state->has_multiview_view_index) {
1161          per_prim_mask |= VARYING_BIT_LAYER;
1162          info->uses_view_index = true;
1163       }
1164 
1165       /* Per vertex outputs. */
1166       outinfo->writes_pointsize = per_vtx_mask & VARYING_BIT_PSIZ;
1167       outinfo->writes_viewport_index = per_vtx_mask & VARYING_BIT_VIEWPORT;
1168       outinfo->writes_layer = per_vtx_mask & VARYING_BIT_LAYER;
1169       outinfo->writes_primitive_shading_rate =
1170          (per_vtx_mask & VARYING_BIT_PRIMITIVE_SHADING_RATE) || info->force_vrs_per_vertex;
1171 
1172       /* Per primitive outputs. */
1173       outinfo->writes_viewport_index_per_primitive = per_prim_mask & VARYING_BIT_VIEWPORT;
1174       outinfo->writes_layer_per_primitive = per_prim_mask & VARYING_BIT_LAYER;
1175       outinfo->writes_primitive_shading_rate_per_primitive = per_prim_mask & VARYING_BIT_PRIMITIVE_SHADING_RATE;
1176 
1177       /* Clip/cull distances. */
1178       outinfo->clip_dist_mask = (1 << nir->info.clip_distance_array_size) - 1;
1179       outinfo->cull_dist_mask = (1 << nir->info.cull_distance_array_size) - 1;
1180       outinfo->cull_dist_mask <<= nir->info.clip_distance_array_size;
1181 
1182       int pos_written = 0x1;
1183 
1184       if (outinfo->writes_pointsize || outinfo->writes_viewport_index || outinfo->writes_layer ||
1185           outinfo->writes_primitive_shading_rate)
1186          pos_written |= 1 << 1;
1187 
1188       unsigned num_clip_distances = util_bitcount(outinfo->clip_dist_mask);
1189       unsigned num_cull_distances = util_bitcount(outinfo->cull_dist_mask);
1190 
1191       if (num_clip_distances + num_cull_distances > 0)
1192          pos_written |= 1 << 2;
1193       if (num_clip_distances + num_cull_distances > 4)
1194          pos_written |= 1 << 3;
1195 
1196       outinfo->pos_exports = util_bitcount(pos_written);
1197 
1198       memset(outinfo->vs_output_param_offset, AC_EXP_PARAM_UNDEFINED, sizeof(outinfo->vs_output_param_offset));
1199 
1200       unsigned total_param_exports = 0;
1201 
1202       /* Per-vertex outputs */
1203       assign_outinfo_params(outinfo, per_vtx_mask, &total_param_exports, 0);
1204 
1205       outinfo->param_exports = total_param_exports;
1206 
1207       /* The HW always assumes that there is at least 1 per-vertex param.
1208        * so if there aren't any, we have to offset per-primitive params by 1.
1209        */
1210       const unsigned extra_offset = !!(total_param_exports == 0 && pdev->info.gfx_level >= GFX11);
1211 
1212       /* Per-primitive outputs: the HW needs these to be last. */
1213       assign_outinfo_params(outinfo, per_prim_mask, &total_param_exports, extra_offset);
1214 
1215       outinfo->prim_param_exports = total_param_exports - outinfo->param_exports;
1216    }
1217 
1218    info->vs.needs_draw_id |= BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_DRAW_ID);
1219    info->vs.needs_base_instance |= BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_BASE_INSTANCE);
1220    info->vs.needs_instance_id |= BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_INSTANCE_ID);
1221    info->uses_view_index |= BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_VIEW_INDEX);
1222    info->uses_invocation_id |= BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_INVOCATION_ID);
1223    info->uses_prim_id |= BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_PRIMITIVE_ID);
1224 
1225    /* Used by compute and mesh shaders. Mesh shaders must always declare this before GFX11. */
1226    info->cs.uses_grid_size = BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_NUM_WORKGROUPS) ||
1227                              (nir->info.stage == MESA_SHADER_MESH && pdev->info.gfx_level < GFX11);
1228    info->cs.uses_local_invocation_idx = BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_LOCAL_INVOCATION_INDEX) |
1229                                         BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_SUBGROUP_ID) |
1230                                         BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_NUM_SUBGROUPS) |
1231                                         radv_shader_should_clear_lds(device, nir);
1232 
1233    if (nir->info.stage == MESA_SHADER_COMPUTE || nir->info.stage == MESA_SHADER_TASK ||
1234        nir->info.stage == MESA_SHADER_MESH) {
1235       for (int i = 0; i < 3; ++i)
1236          info->cs.block_size[i] = nir->info.workgroup_size[i];
1237    }
1238 
1239    info->user_data_0 = radv_get_user_data_0(device, info);
1240    info->merged_shader_compiled_separately = radv_is_merged_shader_compiled_separately(device, info);
1241    info->force_indirect_desc_sets = info->merged_shader_compiled_separately || stage_key->indirect_bindable;
1242 
1243    switch (nir->info.stage) {
1244    case MESA_SHADER_COMPUTE:
1245       gather_shader_info_cs(device, nir, stage_key, info);
1246       break;
1247    case MESA_SHADER_TASK:
1248       gather_shader_info_task(device, nir, stage_key, info);
1249       break;
1250    case MESA_SHADER_FRAGMENT:
1251       gather_shader_info_fs(device, nir, gfx_state, info);
1252       break;
1253    case MESA_SHADER_GEOMETRY:
1254       gather_shader_info_gs(device, nir, info);
1255       break;
1256    case MESA_SHADER_TESS_EVAL:
1257       gather_shader_info_tes(device, nir, info);
1258       break;
1259    case MESA_SHADER_TESS_CTRL:
1260       gather_shader_info_tcs(device, nir, gfx_state, info);
1261       break;
1262    case MESA_SHADER_VERTEX:
1263       gather_shader_info_vs(device, nir, gfx_state, stage_key, info);
1264       break;
1265    case MESA_SHADER_MESH:
1266       gather_shader_info_mesh(device, nir, stage_key, info);
1267       break;
1268    default:
1269       if (gl_shader_stage_is_rt(nir->info.stage))
1270          gather_shader_info_rt(nir, info);
1271       break;
1272    }
1273 
1274    info->wave_size = radv_get_wave_size(device, nir->info.stage, info, stage_key);
1275    info->ballot_bit_size = radv_get_ballot_bit_size(device, nir->info.stage, info, stage_key);
1276 
1277    switch (nir->info.stage) {
1278    case MESA_SHADER_COMPUTE:
1279    case MESA_SHADER_TASK:
1280       info->workgroup_size = ac_compute_cs_workgroup_size(nir->info.workgroup_size, false, UINT32_MAX);
1281 
1282       /* Allow the compiler to assume that the shader always has full subgroups,
1283        * meaning that the initial EXEC mask is -1 in all waves (all lanes enabled).
1284        * This assumption is incorrect for ray tracing and internal (meta) shaders
1285        * because they can use unaligned dispatch.
1286        */
1287       info->cs.uses_full_subgroups = pipeline_type != RADV_PIPELINE_RAY_TRACING && !nir->info.internal &&
1288                                      (info->workgroup_size % info->wave_size) == 0;
1289       break;
1290    case MESA_SHADER_VERTEX:
1291       if (info->vs.as_ls || info->vs.as_es) {
1292          /* Set the maximum possible value by default, this will be optimized during linking if
1293           * possible.
1294           */
1295          info->workgroup_size = 256;
1296       } else {
1297          info->workgroup_size = info->wave_size;
1298       }
1299       break;
1300    case MESA_SHADER_TESS_CTRL:
1301       if (gfx_state->ts.patch_control_points) {
1302          info->workgroup_size =
1303             ac_compute_lshs_workgroup_size(pdev->info.gfx_level, MESA_SHADER_TESS_CTRL, info->num_tess_patches,
1304                                            gfx_state->ts.patch_control_points, info->tcs.tcs_vertices_out);
1305       } else {
1306          /* Set the maximum possible value when the workgroup size can't be determined. */
1307          info->workgroup_size = 256;
1308       }
1309       break;
1310    case MESA_SHADER_TESS_EVAL:
1311       if (info->tes.as_es) {
1312          /* Set the maximum possible value by default, this will be optimized during linking if
1313           * possible.
1314           */
1315          info->workgroup_size = 256;
1316       } else {
1317          info->workgroup_size = info->wave_size;
1318       }
1319       break;
1320    case MESA_SHADER_GEOMETRY:
1321       if (!info->is_ngg) {
1322          unsigned es_verts_per_subgroup = info->gs_ring_info.es_verts_per_subgroup;
1323          unsigned gs_inst_prims_in_subgroup = info->gs_ring_info.gs_inst_prims_in_subgroup;
1324 
1325          info->workgroup_size = ac_compute_esgs_workgroup_size(pdev->info.gfx_level, info->wave_size,
1326                                                                es_verts_per_subgroup, gs_inst_prims_in_subgroup);
1327       } else {
1328          /* Set the maximum possible value by default, this will be optimized during linking if
1329           * possible.
1330           */
1331          info->workgroup_size = 256;
1332       }
1333       break;
1334    case MESA_SHADER_MESH:
1335       calc_mesh_workgroup_size(device, nir, info);
1336       break;
1337    default:
1338       /* FS always operates without workgroups. Other stages are computed during linking but assume
1339        * no workgroups by default.
1340        */
1341       info->workgroup_size = info->wave_size;
1342       break;
1343    }
1344 }
1345 
1346 static void
clamp_gsprims_to_esverts(unsigned * max_gsprims,unsigned max_esverts,unsigned min_verts_per_prim,bool use_adjacency)1347 clamp_gsprims_to_esverts(unsigned *max_gsprims, unsigned max_esverts, unsigned min_verts_per_prim, bool use_adjacency)
1348 {
1349    unsigned max_reuse = max_esverts - min_verts_per_prim;
1350    if (use_adjacency)
1351       max_reuse /= 2;
1352    *max_gsprims = MIN2(*max_gsprims, 1 + max_reuse);
1353 }
1354 
1355 static unsigned
radv_get_num_input_vertices(const struct radv_shader_info * es_info,const struct radv_shader_info * gs_info)1356 radv_get_num_input_vertices(const struct radv_shader_info *es_info, const struct radv_shader_info *gs_info)
1357 {
1358    if (gs_info) {
1359       return gs_info->gs.vertices_in;
1360    }
1361 
1362    if (es_info->stage == MESA_SHADER_TESS_EVAL) {
1363       if (es_info->tes.point_mode)
1364          return 1;
1365       if (es_info->tes._primitive_mode == TESS_PRIMITIVE_ISOLINES)
1366          return 2;
1367       return 3;
1368    }
1369 
1370    return 3;
1371 }
1372 
1373 static unsigned
radv_get_pre_rast_input_topology(const struct radv_shader_info * es_info,const struct radv_shader_info * gs_info)1374 radv_get_pre_rast_input_topology(const struct radv_shader_info *es_info, const struct radv_shader_info *gs_info)
1375 {
1376    if (gs_info) {
1377       return gs_info->gs.input_prim;
1378    }
1379 
1380    if (es_info->stage == MESA_SHADER_TESS_EVAL) {
1381       if (es_info->tes.point_mode)
1382          return MESA_PRIM_POINTS;
1383       if (es_info->tes._primitive_mode == TESS_PRIMITIVE_ISOLINES)
1384          return MESA_PRIM_LINES;
1385       return MESA_PRIM_TRIANGLES;
1386    }
1387 
1388    return MESA_PRIM_TRIANGLES;
1389 }
1390 
1391 static unsigned
gfx10_get_ngg_scratch_lds_base(const struct radv_device * device,const struct radv_shader_info * es_info,const struct radv_shader_info * gs_info,const struct gfx10_ngg_info * ngg_info)1392 gfx10_get_ngg_scratch_lds_base(const struct radv_device *device, const struct radv_shader_info *es_info,
1393                                const struct radv_shader_info *gs_info, const struct gfx10_ngg_info *ngg_info)
1394 {
1395    const struct radv_physical_device *pdev = radv_device_physical(device);
1396    uint32_t scratch_lds_base;
1397 
1398    if (gs_info) {
1399       const unsigned esgs_ring_lds_bytes = ngg_info->esgs_ring_size;
1400       const unsigned gs_total_out_vtx_bytes = ngg_info->ngg_emit_size * 4u;
1401 
1402       scratch_lds_base = ALIGN(esgs_ring_lds_bytes + gs_total_out_vtx_bytes, 8u /* for the repacking code */);
1403    } else {
1404       const bool uses_instanceid = es_info->vs.needs_instance_id;
1405       const bool uses_primitive_id = es_info->uses_prim_id;
1406       const bool streamout_enabled = es_info->so.num_outputs && pdev->use_ngg_streamout;
1407       const uint32_t num_outputs =
1408          es_info->stage == MESA_SHADER_VERTEX ? es_info->vs.num_outputs : es_info->tes.num_outputs;
1409       unsigned pervertex_lds_bytes = ac_ngg_nogs_get_pervertex_lds_size(
1410          es_info->stage, num_outputs, streamout_enabled, es_info->outinfo.export_prim_id, false, /* user edge flag */
1411          es_info->has_ngg_culling, uses_instanceid, uses_primitive_id);
1412 
1413       assert(ngg_info->hw_max_esverts <= 256);
1414       unsigned total_es_lds_bytes = pervertex_lds_bytes * ngg_info->hw_max_esverts;
1415 
1416       scratch_lds_base = ALIGN(total_es_lds_bytes, 8u);
1417    }
1418 
1419    return scratch_lds_base;
1420 }
1421 
1422 void
gfx10_get_ngg_info(const struct radv_device * device,struct radv_shader_info * es_info,struct radv_shader_info * gs_info,struct gfx10_ngg_info * out)1423 gfx10_get_ngg_info(const struct radv_device *device, struct radv_shader_info *es_info, struct radv_shader_info *gs_info,
1424                    struct gfx10_ngg_info *out)
1425 {
1426    const struct radv_physical_device *pdev = radv_device_physical(device);
1427    const enum amd_gfx_level gfx_level = pdev->info.gfx_level;
1428    const unsigned max_verts_per_prim = radv_get_num_input_vertices(es_info, gs_info);
1429    const unsigned min_verts_per_prim = gs_info ? max_verts_per_prim : 1;
1430 
1431    const unsigned gs_num_invocations = gs_info ? MAX2(gs_info->gs.invocations, 1) : 1;
1432 
1433    const unsigned input_prim = radv_get_pre_rast_input_topology(es_info, gs_info);
1434    const bool uses_adjacency = input_prim == MESA_PRIM_LINES_ADJACENCY || input_prim == MESA_PRIM_TRIANGLES_ADJACENCY;
1435 
1436    /* All these are in dwords: */
1437    /* We can't allow using the whole LDS, because GS waves compete with
1438     * other shader stages for LDS space.
1439     *
1440     * TODO: We should really take the shader's internal LDS use into
1441     *       account. The linker will fail if the size is greater than
1442     *       8K dwords.
1443     */
1444    const unsigned max_lds_size = 8 * 1024 - 768;
1445    const unsigned target_lds_size = max_lds_size;
1446    unsigned esvert_lds_size = 0;
1447    unsigned gsprim_lds_size = 0;
1448 
1449    /* All these are per subgroup: */
1450    const unsigned min_esverts = gfx_level >= GFX11 ? 3 : /* gfx11 requires at least 1 primitive per TG */
1451                                    gfx_level >= GFX10_3 ? 29
1452                                                         : 24;
1453    bool max_vert_out_per_gs_instance = false;
1454    unsigned max_esverts_base = 128;
1455    unsigned max_gsprims_base = 128; /* default prim group size clamp */
1456 
1457    /* Hardware has the following non-natural restrictions on the value
1458     * of GE_CNTL.VERT_GRP_SIZE based on based on the primitive type of
1459     * the draw:
1460     *  - at most 252 for any line input primitive type
1461     *  - at most 251 for any quad input primitive type
1462     *  - at most 251 for triangle strips with adjacency (this happens to
1463     *    be the natural limit for triangle *lists* with adjacency)
1464     */
1465    max_esverts_base = MIN2(max_esverts_base, 251 + max_verts_per_prim - 1);
1466 
1467    if (gs_info) {
1468       unsigned max_out_verts_per_gsprim = gs_info->gs.vertices_out * gs_num_invocations;
1469 
1470       if (max_out_verts_per_gsprim <= 256) {
1471          if (max_out_verts_per_gsprim) {
1472             max_gsprims_base = MIN2(max_gsprims_base, 256 / max_out_verts_per_gsprim);
1473          }
1474       } else {
1475          /* Use special multi-cycling mode in which each GS
1476           * instance gets its own subgroup. Does not work with
1477           * tessellation. */
1478          max_vert_out_per_gs_instance = true;
1479          max_gsprims_base = 1;
1480          max_out_verts_per_gsprim = gs_info->gs.vertices_out;
1481       }
1482 
1483       esvert_lds_size = es_info->esgs_itemsize / 4;
1484       gsprim_lds_size = (gs_info->gs.gsvs_vertex_size / 4 + 1) * max_out_verts_per_gsprim;
1485    } else {
1486       /* VS and TES. */
1487       /* LDS size for passing data from GS to ES. */
1488       struct radv_streamout_info *so_info = &es_info->so;
1489 
1490       if (so_info->num_outputs) {
1491          /* Compute the same pervertex LDS size as the NGG streamout lowering pass which allocates
1492           * space for all outputs.
1493           * TODO: only alloc space for outputs that really need streamout.
1494           */
1495          const uint32_t num_outputs =
1496             es_info->stage == MESA_SHADER_VERTEX ? es_info->vs.num_outputs : es_info->tes.num_outputs;
1497          esvert_lds_size = 4 * num_outputs + 1;
1498       }
1499 
1500       /* GS stores Primitive IDs (one DWORD) into LDS at the address
1501        * corresponding to the ES thread of the provoking vertex. All
1502        * ES threads load and export PrimitiveID for their thread.
1503        */
1504       if (es_info->stage == MESA_SHADER_VERTEX && es_info->outinfo.export_prim_id)
1505          esvert_lds_size = MAX2(esvert_lds_size, 1);
1506    }
1507 
1508    unsigned max_gsprims = max_gsprims_base;
1509    unsigned max_esverts = max_esverts_base;
1510 
1511    if (esvert_lds_size)
1512       max_esverts = MIN2(max_esverts, target_lds_size / esvert_lds_size);
1513    if (gsprim_lds_size)
1514       max_gsprims = MIN2(max_gsprims, target_lds_size / gsprim_lds_size);
1515 
1516    max_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
1517    clamp_gsprims_to_esverts(&max_gsprims, max_esverts, min_verts_per_prim, uses_adjacency);
1518    assert(max_esverts >= max_verts_per_prim && max_gsprims >= 1);
1519 
1520    if (esvert_lds_size || gsprim_lds_size) {
1521       /* Now that we have a rough proportionality between esverts
1522        * and gsprims based on the primitive type, scale both of them
1523        * down simultaneously based on required LDS space.
1524        *
1525        * We could be smarter about this if we knew how much vertex
1526        * reuse to expect.
1527        */
1528       unsigned lds_total = max_esverts * esvert_lds_size + max_gsprims * gsprim_lds_size;
1529       if (lds_total > target_lds_size) {
1530          max_esverts = max_esverts * target_lds_size / lds_total;
1531          max_gsprims = max_gsprims * target_lds_size / lds_total;
1532 
1533          max_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
1534          clamp_gsprims_to_esverts(&max_gsprims, max_esverts, min_verts_per_prim, uses_adjacency);
1535          assert(max_esverts >= max_verts_per_prim && max_gsprims >= 1);
1536       }
1537    }
1538 
1539    /* Round up towards full wave sizes for better ALU utilization. */
1540    if (!max_vert_out_per_gs_instance) {
1541       unsigned orig_max_esverts;
1542       unsigned orig_max_gsprims;
1543       unsigned wavesize;
1544 
1545       if (gs_info) {
1546          wavesize = gs_info->wave_size;
1547       } else {
1548          wavesize = es_info->wave_size;
1549       }
1550 
1551       do {
1552          orig_max_esverts = max_esverts;
1553          orig_max_gsprims = max_gsprims;
1554 
1555          max_esverts = align(max_esverts, wavesize);
1556          max_esverts = MIN2(max_esverts, max_esverts_base);
1557          if (esvert_lds_size)
1558             max_esverts = MIN2(max_esverts, (max_lds_size - max_gsprims * gsprim_lds_size) / esvert_lds_size);
1559          max_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
1560 
1561          /* Hardware restriction: minimum value of max_esverts */
1562          if (gfx_level == GFX10)
1563             max_esverts = MAX2(max_esverts, min_esverts - 1 + max_verts_per_prim);
1564          else
1565             max_esverts = MAX2(max_esverts, min_esverts);
1566 
1567          max_gsprims = align(max_gsprims, wavesize);
1568          max_gsprims = MIN2(max_gsprims, max_gsprims_base);
1569          if (gsprim_lds_size) {
1570             /* Don't count unusable vertices to the LDS
1571              * size. Those are vertices above the maximum
1572              * number of vertices that can occur in the
1573              * workgroup, which is e.g. max_gsprims * 3
1574              * for triangles.
1575              */
1576             unsigned usable_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
1577             max_gsprims = MIN2(max_gsprims, (max_lds_size - usable_esverts * esvert_lds_size) / gsprim_lds_size);
1578          }
1579          clamp_gsprims_to_esverts(&max_gsprims, max_esverts, min_verts_per_prim, uses_adjacency);
1580          assert(max_esverts >= max_verts_per_prim && max_gsprims >= 1);
1581       } while (orig_max_esverts != max_esverts || orig_max_gsprims != max_gsprims);
1582 
1583       /* Verify the restriction. */
1584       if (gfx_level == GFX10)
1585          assert(max_esverts >= min_esverts - 1 + max_verts_per_prim);
1586       else
1587          assert(max_esverts >= min_esverts);
1588    } else {
1589       /* Hardware restriction: minimum value of max_esverts */
1590       if (gfx_level == GFX10)
1591          max_esverts = MAX2(max_esverts, min_esverts - 1 + max_verts_per_prim);
1592       else
1593          max_esverts = MAX2(max_esverts, min_esverts);
1594    }
1595 
1596    unsigned max_out_vertices = max_vert_out_per_gs_instance ? gs_info->gs.vertices_out
1597                                : gs_info ? max_gsprims * gs_num_invocations * gs_info->gs.vertices_out
1598                                          : max_esverts;
1599    assert(max_out_vertices <= 256);
1600 
1601    unsigned prim_amp_factor = 1;
1602    if (gs_info) {
1603       /* Number of output primitives per GS input primitive after
1604        * GS instancing. */
1605       prim_amp_factor = gs_info->gs.vertices_out;
1606    }
1607 
1608    /* On Gfx10, the GE only checks against the maximum number of ES verts
1609     * after allocating a full GS primitive. So we need to ensure that
1610     * whenever this check passes, there is enough space for a full
1611     * primitive without vertex reuse.
1612     */
1613    if (gfx_level == GFX10)
1614       out->hw_max_esverts = max_esverts - max_verts_per_prim + 1;
1615    else
1616       out->hw_max_esverts = max_esverts;
1617 
1618    out->max_gsprims = max_gsprims;
1619    out->max_out_verts = max_out_vertices;
1620    out->prim_amp_factor = prim_amp_factor;
1621    out->max_vert_out_per_gs_instance = max_vert_out_per_gs_instance;
1622    out->ngg_emit_size = max_gsprims * gsprim_lds_size;
1623 
1624    /* Don't count unusable vertices. */
1625    out->esgs_ring_size = MIN2(max_esverts, max_gsprims * max_verts_per_prim) * esvert_lds_size * 4;
1626 
1627    if (gs_info) {
1628       out->vgt_esgs_ring_itemsize = es_info->esgs_itemsize / 4;
1629    } else {
1630       out->vgt_esgs_ring_itemsize = 1;
1631    }
1632 
1633    assert(out->hw_max_esverts >= min_esverts); /* HW limitation */
1634 
1635    out->scratch_lds_base = gfx10_get_ngg_scratch_lds_base(device, es_info, gs_info, out);
1636 
1637    /* Get scratch LDS usage. */
1638    const struct radv_shader_info *info = gs_info ? gs_info : es_info;
1639    const unsigned scratch_lds_size = ac_ngg_get_scratch_lds_size(info->stage, info->workgroup_size, info->wave_size,
1640                                                                  pdev->use_ngg_streamout, info->has_ngg_culling);
1641    out->lds_size = out->scratch_lds_base + scratch_lds_size;
1642 
1643    unsigned workgroup_size =
1644       ac_compute_ngg_workgroup_size(max_esverts, max_gsprims * gs_num_invocations, max_out_vertices, prim_amp_factor);
1645    if (gs_info) {
1646       gs_info->workgroup_size = workgroup_size;
1647    }
1648    es_info->workgroup_size = workgroup_size;
1649 }
1650 
1651 static void
radv_determine_ngg_settings(struct radv_device * device,struct radv_shader_stage * es_stage,struct radv_shader_stage * fs_stage,const struct radv_graphics_state_key * gfx_state)1652 radv_determine_ngg_settings(struct radv_device *device, struct radv_shader_stage *es_stage,
1653                             struct radv_shader_stage *fs_stage, const struct radv_graphics_state_key *gfx_state)
1654 {
1655    const struct radv_physical_device *pdev = radv_device_physical(device);
1656 
1657    assert(es_stage->stage == MESA_SHADER_VERTEX || es_stage->stage == MESA_SHADER_TESS_EVAL);
1658    assert(!fs_stage || fs_stage->stage == MESA_SHADER_FRAGMENT);
1659 
1660    /* NGG culling is implicitly disabled when the FS stage is unknown. */
1661    uint64_t ps_inputs_read = fs_stage ? fs_stage->nir->info.inputs_read : ~0;
1662 
1663    unsigned num_vertices_per_prim = 0;
1664    if (es_stage->stage == MESA_SHADER_VERTEX) {
1665       num_vertices_per_prim = radv_get_num_vertices_per_prim(gfx_state);
1666    } else if (es_stage->stage == MESA_SHADER_TESS_EVAL) {
1667       num_vertices_per_prim = es_stage->nir->info.tess.point_mode                                   ? 1
1668                               : es_stage->nir->info.tess._primitive_mode == TESS_PRIMITIVE_ISOLINES ? 2
1669                                                                                                     : 3;
1670    }
1671 
1672    es_stage->info.has_ngg_culling =
1673       radv_consider_culling(pdev, es_stage->nir, ps_inputs_read, num_vertices_per_prim, &es_stage->info);
1674 
1675    nir_function_impl *impl = nir_shader_get_entrypoint(es_stage->nir);
1676    es_stage->info.has_ngg_early_prim_export = exec_list_is_singular(&impl->body);
1677 
1678    /* NGG passthrough mode should be disabled when culling and when the vertex shader
1679     * exports the primitive ID.
1680     */
1681    es_stage->info.is_ngg_passthrough = !es_stage->info.has_ngg_culling && !(es_stage->stage == MESA_SHADER_VERTEX &&
1682                                                                             es_stage->info.outinfo.export_prim_id);
1683 }
1684 
1685 static void
radv_link_shaders_info(struct radv_device * device,struct radv_shader_stage * producer,struct radv_shader_stage * consumer,const struct radv_graphics_state_key * gfx_state)1686 radv_link_shaders_info(struct radv_device *device, struct radv_shader_stage *producer,
1687                        struct radv_shader_stage *consumer, const struct radv_graphics_state_key *gfx_state)
1688 {
1689    const struct radv_physical_device *pdev = radv_device_physical(device);
1690 
1691    /* Export primitive ID and clip/cull distances if read by the FS, or export unconditionally when
1692     * the next stage is unknown (with graphics pipeline library).
1693     */
1694    if (producer->info.next_stage == MESA_SHADER_FRAGMENT ||
1695        !(gfx_state->lib_flags & VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_SHADER_BIT_EXT)) {
1696       struct radv_vs_output_info *outinfo = &producer->info.outinfo;
1697       const bool ps_prim_id_in = !consumer || consumer->info.ps.prim_id_input;
1698       const bool ps_clip_dists_in = !consumer || !!consumer->info.ps.input_clips_culls_mask;
1699 
1700       if (ps_prim_id_in && (producer->stage == MESA_SHADER_VERTEX || producer->stage == MESA_SHADER_TESS_EVAL)) {
1701          /* Mark the primitive ID as output when it's implicitly exported by VS or TES. */
1702          if (outinfo->vs_output_param_offset[VARYING_SLOT_PRIMITIVE_ID] == AC_EXP_PARAM_UNDEFINED)
1703             outinfo->vs_output_param_offset[VARYING_SLOT_PRIMITIVE_ID] = outinfo->param_exports++;
1704 
1705          outinfo->export_prim_id = true;
1706       }
1707 
1708       if (ps_clip_dists_in) {
1709          if (producer->nir->info.outputs_written & VARYING_BIT_CLIP_DIST0)
1710             outinfo->vs_output_param_offset[VARYING_SLOT_CLIP_DIST0] = outinfo->param_exports++;
1711          if (producer->nir->info.outputs_written & VARYING_BIT_CLIP_DIST1)
1712             outinfo->vs_output_param_offset[VARYING_SLOT_CLIP_DIST1] = outinfo->param_exports++;
1713       }
1714    }
1715 
1716    if (producer->stage == MESA_SHADER_VERTEX || producer->stage == MESA_SHADER_TESS_EVAL) {
1717       /* Compute NGG info (GFX10+) or GS info. */
1718       if (producer->info.is_ngg) {
1719          struct radv_shader_stage *gs_stage = consumer && consumer->stage == MESA_SHADER_GEOMETRY ? consumer : NULL;
1720          struct gfx10_ngg_info *out = gs_stage ? &gs_stage->info.ngg_info : &producer->info.ngg_info;
1721 
1722          /* Determine other NGG settings like culling for VS or TES without GS. */
1723          if (!gs_stage) {
1724             radv_determine_ngg_settings(device, producer, consumer, gfx_state);
1725          }
1726 
1727          gfx10_get_ngg_info(device, &producer->info, gs_stage ? &gs_stage->info : NULL, out);
1728       } else if (consumer && consumer->stage == MESA_SHADER_GEOMETRY) {
1729          struct radv_shader_info *gs_info = &consumer->info;
1730          struct radv_shader_info *es_info = &producer->info;
1731 
1732          es_info->workgroup_size = gs_info->workgroup_size;
1733       }
1734 
1735       if (consumer && consumer->stage == MESA_SHADER_GEOMETRY) {
1736          producer->info.gs_inputs_read = consumer->nir->info.inputs_read;
1737       }
1738    }
1739 
1740    if (producer->stage == MESA_SHADER_VERTEX && consumer && consumer->stage == MESA_SHADER_TESS_CTRL) {
1741       struct radv_shader_stage *vs_stage = producer;
1742       struct radv_shader_stage *tcs_stage = consumer;
1743 
1744       vs_stage->info.vs.hs_inputs_read = tcs_stage->nir->info.inputs_read;
1745 
1746       if (gfx_state->ts.patch_control_points) {
1747          vs_stage->info.workgroup_size =
1748             ac_compute_lshs_workgroup_size(pdev->info.gfx_level, MESA_SHADER_VERTEX, tcs_stage->info.num_tess_patches,
1749                                            gfx_state->ts.patch_control_points, tcs_stage->info.tcs.tcs_vertices_out);
1750 
1751          if (!radv_use_llvm_for_stage(pdev, MESA_SHADER_VERTEX)) {
1752             /* When the number of TCS input and output vertices are the same (typically 3):
1753              * - There is an equal amount of LS and HS invocations
1754              * - In case of merged LSHS shaders, the LS and HS halves of the shader always process
1755              *   the exact same vertex. We can use this knowledge to optimize them.
1756              *
1757              * We don't set tcs_in_out_eq if the float controls differ because that might involve
1758              * different float modes for the same block and our optimizer doesn't handle a
1759              * instruction dominating another with a different mode.
1760              */
1761             vs_stage->info.vs.tcs_in_out_eq =
1762                pdev->info.gfx_level >= GFX9 &&
1763                gfx_state->ts.patch_control_points == tcs_stage->info.tcs.tcs_vertices_out &&
1764                vs_stage->nir->info.float_controls_execution_mode == tcs_stage->nir->info.float_controls_execution_mode;
1765 
1766             if (vs_stage->info.vs.tcs_in_out_eq)
1767                vs_stage->info.vs.tcs_temp_only_input_mask =
1768                   tcs_stage->nir->info.inputs_read & vs_stage->nir->info.outputs_written &
1769                   ~tcs_stage->nir->info.tess.tcs_cross_invocation_inputs_read &
1770                   ~tcs_stage->nir->info.inputs_read_indirectly & ~vs_stage->nir->info.outputs_accessed_indirectly;
1771          }
1772       }
1773    }
1774 
1775    /* Copy shader info between TCS<->TES. */
1776    if (producer->stage == MESA_SHADER_TESS_CTRL && consumer && consumer->stage == MESA_SHADER_TESS_EVAL) {
1777       struct radv_shader_stage *tcs_stage = producer;
1778       struct radv_shader_stage *tes_stage = consumer;
1779 
1780       tcs_stage->info.tcs.tes_reads_tess_factors = tes_stage->info.tes.reads_tess_factors;
1781       tcs_stage->info.tcs.tes_inputs_read = tes_stage->nir->info.inputs_read;
1782       tcs_stage->info.tcs.tes_patch_inputs_read = tes_stage->nir->info.patch_inputs_read;
1783       tcs_stage->info.tes._primitive_mode = tes_stage->nir->info.tess._primitive_mode;
1784 
1785       if (gfx_state->ts.patch_control_points)
1786          tes_stage->info.num_tess_patches = tcs_stage->info.num_tess_patches;
1787    }
1788 }
1789 
1790 static void
radv_nir_shader_info_merge(const struct radv_shader_stage * src,struct radv_shader_stage * dst)1791 radv_nir_shader_info_merge(const struct radv_shader_stage *src, struct radv_shader_stage *dst)
1792 {
1793    const struct radv_shader_info *src_info = &src->info;
1794    struct radv_shader_info *dst_info = &dst->info;
1795 
1796    assert((src->stage == MESA_SHADER_VERTEX && dst->stage == MESA_SHADER_TESS_CTRL) ||
1797           (src->stage == MESA_SHADER_VERTEX && dst->stage == MESA_SHADER_GEOMETRY) ||
1798           (src->stage == MESA_SHADER_TESS_EVAL && dst->stage == MESA_SHADER_GEOMETRY));
1799 
1800    dst_info->loads_push_constants |= src_info->loads_push_constants;
1801    dst_info->loads_dynamic_offsets |= src_info->loads_dynamic_offsets;
1802    dst_info->desc_set_used_mask |= src_info->desc_set_used_mask;
1803    dst_info->uses_view_index |= src_info->uses_view_index;
1804    dst_info->uses_prim_id |= src_info->uses_prim_id;
1805    dst_info->inline_push_constant_mask |= src_info->inline_push_constant_mask;
1806 
1807    /* Only inline all push constants if both allows it. */
1808    dst_info->can_inline_all_push_constants &= src_info->can_inline_all_push_constants;
1809 
1810    if (src->stage == MESA_SHADER_VERTEX) {
1811       dst_info->vs = src_info->vs;
1812    } else {
1813       dst_info->tes = src_info->tes;
1814    }
1815 
1816    if (dst->stage == MESA_SHADER_GEOMETRY)
1817       dst_info->gs.es_type = src->stage;
1818 }
1819 
1820 static const gl_shader_stage graphics_shader_order[] = {
1821    MESA_SHADER_VERTEX, MESA_SHADER_TESS_CTRL, MESA_SHADER_TESS_EVAL, MESA_SHADER_GEOMETRY,
1822 
1823    MESA_SHADER_TASK,   MESA_SHADER_MESH,
1824 };
1825 
1826 void
radv_nir_shader_info_link(struct radv_device * device,const struct radv_graphics_state_key * gfx_state,struct radv_shader_stage * stages)1827 radv_nir_shader_info_link(struct radv_device *device, const struct radv_graphics_state_key *gfx_state,
1828                           struct radv_shader_stage *stages)
1829 {
1830    const struct radv_physical_device *pdev = radv_device_physical(device);
1831 
1832    /* Walk backwards to link */
1833    struct radv_shader_stage *next_stage = stages[MESA_SHADER_FRAGMENT].nir ? &stages[MESA_SHADER_FRAGMENT] : NULL;
1834 
1835    for (int i = ARRAY_SIZE(graphics_shader_order) - 1; i >= 0; i--) {
1836       gl_shader_stage s = graphics_shader_order[i];
1837       if (!stages[s].nir)
1838          continue;
1839 
1840       radv_link_shaders_info(device, &stages[s], next_stage, gfx_state);
1841       next_stage = &stages[s];
1842    }
1843 
1844    if (pdev->info.gfx_level >= GFX9) {
1845       /* Merge shader info for VS+TCS. */
1846       if (stages[MESA_SHADER_VERTEX].nir && stages[MESA_SHADER_TESS_CTRL].nir) {
1847          radv_nir_shader_info_merge(&stages[MESA_SHADER_VERTEX], &stages[MESA_SHADER_TESS_CTRL]);
1848       }
1849 
1850       /* Merge shader info for VS+GS or TES+GS. */
1851       if ((stages[MESA_SHADER_VERTEX].nir || stages[MESA_SHADER_TESS_EVAL].nir) && stages[MESA_SHADER_GEOMETRY].nir) {
1852          gl_shader_stage pre_stage = stages[MESA_SHADER_TESS_EVAL].nir ? MESA_SHADER_TESS_EVAL : MESA_SHADER_VERTEX;
1853 
1854          radv_nir_shader_info_merge(&stages[pre_stage], &stages[MESA_SHADER_GEOMETRY]);
1855       }
1856    }
1857 }
1858 
1859 enum ac_hw_stage
radv_select_hw_stage(const struct radv_shader_info * const info,const enum amd_gfx_level gfx_level)1860 radv_select_hw_stage(const struct radv_shader_info *const info, const enum amd_gfx_level gfx_level)
1861 {
1862    switch (info->stage) {
1863    case MESA_SHADER_VERTEX:
1864       if (info->is_ngg)
1865          return AC_HW_NEXT_GEN_GEOMETRY_SHADER;
1866       else if (info->vs.as_es)
1867          return gfx_level >= GFX9 ? AC_HW_LEGACY_GEOMETRY_SHADER : AC_HW_EXPORT_SHADER;
1868       else if (info->vs.as_ls)
1869          return gfx_level >= GFX9 ? AC_HW_HULL_SHADER : AC_HW_LOCAL_SHADER;
1870       else
1871          return AC_HW_VERTEX_SHADER;
1872    case MESA_SHADER_TESS_EVAL:
1873       if (info->is_ngg)
1874          return AC_HW_NEXT_GEN_GEOMETRY_SHADER;
1875       else if (info->tes.as_es)
1876          return gfx_level >= GFX9 ? AC_HW_LEGACY_GEOMETRY_SHADER : AC_HW_EXPORT_SHADER;
1877       else
1878          return AC_HW_VERTEX_SHADER;
1879    case MESA_SHADER_TESS_CTRL:
1880       return AC_HW_HULL_SHADER;
1881    case MESA_SHADER_GEOMETRY:
1882       if (info->is_ngg)
1883          return AC_HW_NEXT_GEN_GEOMETRY_SHADER;
1884       else
1885          return AC_HW_LEGACY_GEOMETRY_SHADER;
1886    case MESA_SHADER_MESH:
1887       return AC_HW_NEXT_GEN_GEOMETRY_SHADER;
1888    case MESA_SHADER_FRAGMENT:
1889       return AC_HW_PIXEL_SHADER;
1890    case MESA_SHADER_COMPUTE:
1891    case MESA_SHADER_KERNEL:
1892    case MESA_SHADER_TASK:
1893    case MESA_SHADER_RAYGEN:
1894    case MESA_SHADER_ANY_HIT:
1895    case MESA_SHADER_CLOSEST_HIT:
1896    case MESA_SHADER_MISS:
1897    case MESA_SHADER_INTERSECTION:
1898    case MESA_SHADER_CALLABLE:
1899       return AC_HW_COMPUTE_SHADER;
1900    default:
1901       unreachable("Unsupported HW stage");
1902    }
1903 }
1904