1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "anv_private.h"
25
26 #include "genxml/gen_macros.h"
27 #include "genxml/genX_pack.h"
28 #include "genxml/genX_rt_pack.h"
29
30 #include "common/intel_compute_slm.h"
31 #include "common/intel_genX_state_brw.h"
32 #include "common/intel_l3_config.h"
33 #include "common/intel_sample_positions.h"
34 #include "nir/nir_xfb_info.h"
35 #include "vk_util.h"
36 #include "vk_format.h"
37 #include "vk_log.h"
38 #include "vk_render_pass.h"
39
40 static inline struct anv_batch *
anv_gfx_pipeline_add(struct anv_graphics_pipeline * pipeline,struct anv_gfx_state_ptr * ptr,uint32_t n_dwords)41 anv_gfx_pipeline_add(struct anv_graphics_pipeline *pipeline,
42 struct anv_gfx_state_ptr *ptr,
43 uint32_t n_dwords)
44 {
45 struct anv_batch *batch = &pipeline->base.base.batch;
46
47 assert(ptr->len == 0 ||
48 (batch->next - batch->start) / 4 == (ptr->offset + ptr->len));
49 if (ptr->len == 0)
50 ptr->offset = (batch->next - batch->start) / 4;
51 ptr->len += n_dwords;
52
53 return batch;
54 }
55
56 #define anv_pipeline_emit_tmp(pipeline, field, cmd, name) \
57 for (struct cmd name = { __anv_cmd_header(cmd) }, \
58 *_dst = (void *) field; \
59 __builtin_expect(_dst != NULL, 1); \
60 ({ __anv_cmd_pack(cmd)(&(pipeline)->base.base.batch, \
61 _dst, &name); \
62 VG(VALGRIND_CHECK_MEM_IS_DEFINED(_dst, __anv_cmd_length(cmd) * 4)); \
63 _dst = NULL; \
64 }))
65
66 #define anv_pipeline_emit(pipeline, state, cmd, name) \
67 for (struct cmd name = { __anv_cmd_header(cmd) }, \
68 *_dst = anv_batch_emit_dwords( \
69 anv_gfx_pipeline_add(pipeline, \
70 &(pipeline)->state, \
71 __anv_cmd_length(cmd)), \
72 __anv_cmd_length(cmd)); \
73 __builtin_expect(_dst != NULL, 1); \
74 ({ __anv_cmd_pack(cmd)(&(pipeline)->base.base.batch, \
75 _dst, &name); \
76 VG(VALGRIND_CHECK_MEM_IS_DEFINED(_dst, __anv_cmd_length(cmd) * 4)); \
77 _dst = NULL; \
78 }))
79
80 #define anv_pipeline_emit_merge(pipeline, state, dwords, cmd, name) \
81 for (struct cmd name = { 0 }, \
82 *_dst = anv_batch_emit_dwords( \
83 anv_gfx_pipeline_add(pipeline, \
84 &(pipeline)->state, \
85 __anv_cmd_length(cmd)), \
86 __anv_cmd_length(cmd)); \
87 __builtin_expect(_dst != NULL, 1); \
88 ({ uint32_t _partial[__anv_cmd_length(cmd)]; \
89 assert((pipeline)->state.len == __anv_cmd_length(cmd)); \
90 __anv_cmd_pack(cmd)(&(pipeline)->base.base.batch, \
91 _partial, &name); \
92 for (uint32_t i = 0; i < __anv_cmd_length(cmd); i++) { \
93 ((uint32_t *)_dst)[i] = _partial[i] | dwords[i]; \
94 } \
95 VG(VALGRIND_CHECK_MEM_IS_DEFINED(_dst, __anv_cmd_length(cmd) * 4)); \
96 _dst = NULL; \
97 }))
98
99 #define anv_pipeline_emitn(pipeline, state, n, cmd, ...) ({ \
100 void *__dst = anv_batch_emit_dwords( \
101 anv_gfx_pipeline_add(pipeline, &(pipeline)->state, n), n); \
102 if (__dst) { \
103 struct cmd __template = { \
104 __anv_cmd_header(cmd), \
105 .DWordLength = n - __anv_cmd_length_bias(cmd), \
106 __VA_ARGS__ \
107 }; \
108 __anv_cmd_pack(cmd)(&pipeline->base.base.batch, \
109 __dst, &__template); \
110 } \
111 __dst; \
112 })
113
114 #define pipeline_needs_protected(pipeline) \
115 ((pipeline)->device->vk.enabled_features.protectedMemory)
116
117 static uint32_t
vertex_element_comp_control(enum isl_format format,unsigned comp)118 vertex_element_comp_control(enum isl_format format, unsigned comp)
119 {
120 uint8_t bits;
121 switch (comp) {
122 case 0: bits = isl_format_layouts[format].channels.r.bits; break;
123 case 1: bits = isl_format_layouts[format].channels.g.bits; break;
124 case 2: bits = isl_format_layouts[format].channels.b.bits; break;
125 case 3: bits = isl_format_layouts[format].channels.a.bits; break;
126 default: unreachable("Invalid component");
127 }
128
129 /*
130 * Take in account hardware restrictions when dealing with 64-bit floats.
131 *
132 * From Broadwell spec, command reference structures, page 586:
133 * "When SourceElementFormat is set to one of the *64*_PASSTHRU formats,
134 * 64-bit components are stored * in the URB without any conversion. In
135 * this case, vertex elements must be written as 128 or 256 bits, with
136 * VFCOMP_STORE_0 being used to pad the output as required. E.g., if
137 * R64_PASSTHRU is used to copy a 64-bit Red component into the URB,
138 * Component 1 must be specified as VFCOMP_STORE_0 (with Components 2,3
139 * set to VFCOMP_NOSTORE) in order to output a 128-bit vertex element, or
140 * Components 1-3 must be specified as VFCOMP_STORE_0 in order to output
141 * a 256-bit vertex element. Likewise, use of R64G64B64_PASSTHRU requires
142 * Component 3 to be specified as VFCOMP_STORE_0 in order to output a
143 * 256-bit vertex element."
144 */
145 if (bits) {
146 return VFCOMP_STORE_SRC;
147 } else if (comp >= 2 &&
148 !isl_format_layouts[format].channels.b.bits &&
149 isl_format_layouts[format].channels.r.type == ISL_RAW) {
150 /* When emitting 64-bit attributes, we need to write either 128 or 256
151 * bit chunks, using VFCOMP_NOSTORE when not writing the chunk, and
152 * VFCOMP_STORE_0 to pad the written chunk */
153 return VFCOMP_NOSTORE;
154 } else if (comp < 3 ||
155 isl_format_layouts[format].channels.r.type == ISL_RAW) {
156 /* Note we need to pad with value 0, not 1, due hardware restrictions
157 * (see comment above) */
158 return VFCOMP_STORE_0;
159 } else if (isl_format_layouts[format].channels.r.type == ISL_UINT ||
160 isl_format_layouts[format].channels.r.type == ISL_SINT) {
161 assert(comp == 3);
162 return VFCOMP_STORE_1_INT;
163 } else {
164 assert(comp == 3);
165 return VFCOMP_STORE_1_FP;
166 }
167 }
168
169 void
genX(emit_vertex_input)170 genX(emit_vertex_input)(struct anv_batch *batch,
171 uint32_t *vertex_element_dws,
172 struct anv_graphics_pipeline *pipeline,
173 const struct vk_vertex_input_state *vi,
174 bool emit_in_pipeline)
175 {
176 const struct anv_device *device = pipeline->base.base.device;
177 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
178 const uint64_t inputs_read = vs_prog_data->inputs_read;
179 const uint64_t double_inputs_read =
180 vs_prog_data->double_inputs_read & inputs_read;
181 assert((inputs_read & ((1 << VERT_ATTRIB_GENERIC0) - 1)) == 0);
182 const uint32_t elements = inputs_read >> VERT_ATTRIB_GENERIC0;
183 const uint32_t elements_double = double_inputs_read >> VERT_ATTRIB_GENERIC0;
184
185 for (uint32_t i = 0; i < pipeline->vs_input_elements; i++) {
186 /* The SKL docs for VERTEX_ELEMENT_STATE say:
187 *
188 * "All elements must be valid from Element[0] to the last valid
189 * element. (I.e. if Element[2] is valid then Element[1] and
190 * Element[0] must also be valid)."
191 *
192 * The SKL docs for 3D_Vertex_Component_Control say:
193 *
194 * "Don't store this component. (Not valid for Component 0, but can
195 * be used for Component 1-3)."
196 *
197 * So we can't just leave a vertex element blank and hope for the best.
198 * We have to tell the VF hardware to put something in it; so we just
199 * store a bunch of zero.
200 *
201 * TODO: Compact vertex elements so we never end up with holes.
202 */
203 struct GENX(VERTEX_ELEMENT_STATE) element = {
204 .Valid = true,
205 .Component0Control = VFCOMP_STORE_0,
206 .Component1Control = VFCOMP_STORE_0,
207 .Component2Control = VFCOMP_STORE_0,
208 .Component3Control = VFCOMP_STORE_0,
209 };
210 GENX(VERTEX_ELEMENT_STATE_pack)(NULL,
211 &vertex_element_dws[i * 2],
212 &element);
213 }
214
215 u_foreach_bit(a, vi->attributes_valid) {
216 enum isl_format format = anv_get_isl_format(device->info,
217 vi->attributes[a].format,
218 VK_IMAGE_ASPECT_COLOR_BIT,
219 VK_IMAGE_TILING_LINEAR);
220 assume(format < ISL_NUM_FORMATS);
221
222 uint32_t binding = vi->attributes[a].binding;
223 assert(binding < MAX_VBS);
224
225 if ((elements & (1 << a)) == 0)
226 continue; /* Binding unused */
227
228 uint32_t slot =
229 __builtin_popcount(elements & ((1 << a) - 1)) -
230 DIV_ROUND_UP(__builtin_popcount(elements_double &
231 ((1 << a) -1)), 2);
232
233 struct GENX(VERTEX_ELEMENT_STATE) element = {
234 .VertexBufferIndex = vi->attributes[a].binding,
235 .Valid = true,
236 .SourceElementFormat = format,
237 .EdgeFlagEnable = false,
238 .SourceElementOffset = vi->attributes[a].offset,
239 .Component0Control = vertex_element_comp_control(format, 0),
240 .Component1Control = vertex_element_comp_control(format, 1),
241 .Component2Control = vertex_element_comp_control(format, 2),
242 .Component3Control = vertex_element_comp_control(format, 3),
243 };
244 GENX(VERTEX_ELEMENT_STATE_pack)(NULL,
245 &vertex_element_dws[slot * 2],
246 &element);
247
248 /* On Broadwell and later, we have a separate VF_INSTANCING packet
249 * that controls instancing. On Haswell and prior, that's part of
250 * VERTEX_BUFFER_STATE which we emit later.
251 */
252 if (emit_in_pipeline) {
253 anv_pipeline_emit(pipeline, final.vf_instancing, GENX(3DSTATE_VF_INSTANCING), vfi) {
254 bool per_instance = vi->bindings[binding].input_rate ==
255 VK_VERTEX_INPUT_RATE_INSTANCE;
256 uint32_t divisor = vi->bindings[binding].divisor *
257 pipeline->instance_multiplier;
258
259 vfi.InstancingEnable = per_instance;
260 vfi.VertexElementIndex = slot;
261 vfi.InstanceDataStepRate = per_instance ? divisor : 1;
262 }
263 } else {
264 anv_batch_emit(batch, GENX(3DSTATE_VF_INSTANCING), vfi) {
265 bool per_instance = vi->bindings[binding].input_rate ==
266 VK_VERTEX_INPUT_RATE_INSTANCE;
267 uint32_t divisor = vi->bindings[binding].divisor *
268 pipeline->instance_multiplier;
269
270 vfi.InstancingEnable = per_instance;
271 vfi.VertexElementIndex = slot;
272 vfi.InstanceDataStepRate = per_instance ? divisor : 1;
273 }
274 }
275 }
276 }
277
278 static void
emit_vertex_input(struct anv_graphics_pipeline * pipeline,const struct vk_graphics_pipeline_state * state,const struct vk_vertex_input_state * vi)279 emit_vertex_input(struct anv_graphics_pipeline *pipeline,
280 const struct vk_graphics_pipeline_state *state,
281 const struct vk_vertex_input_state *vi)
282 {
283 /* Only pack the VERTEX_ELEMENT_STATE if not dynamic so we can just memcpy
284 * everything in gfx8_cmd_buffer.c
285 */
286 if (!BITSET_TEST(state->dynamic, MESA_VK_DYNAMIC_VI)) {
287 genX(emit_vertex_input)(NULL,
288 pipeline->vertex_input_data,
289 pipeline, vi, true /* emit_in_pipeline */);
290 }
291
292 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
293 const bool needs_svgs_elem = pipeline->svgs_count > 1 ||
294 !vs_prog_data->uses_drawid;
295 const uint32_t id_slot = pipeline->vs_input_elements;
296 const uint32_t drawid_slot = id_slot + needs_svgs_elem;
297 if (pipeline->svgs_count > 0) {
298 assert(pipeline->vertex_input_elems >= pipeline->svgs_count);
299 uint32_t slot_offset =
300 pipeline->vertex_input_elems - pipeline->svgs_count;
301
302 if (needs_svgs_elem) {
303 #if GFX_VER < 11
304 /* From the Broadwell PRM for the 3D_Vertex_Component_Control enum:
305 * "Within a VERTEX_ELEMENT_STATE structure, if a Component
306 * Control field is set to something other than VFCOMP_STORE_SRC,
307 * no higher-numbered Component Control fields may be set to
308 * VFCOMP_STORE_SRC"
309 *
310 * This means, that if we have BaseInstance, we need BaseVertex as
311 * well. Just do all or nothing.
312 */
313 uint32_t base_ctrl = (vs_prog_data->uses_firstvertex ||
314 vs_prog_data->uses_baseinstance) ?
315 VFCOMP_STORE_SRC : VFCOMP_STORE_0;
316 #endif
317
318 struct GENX(VERTEX_ELEMENT_STATE) element = {
319 .VertexBufferIndex = ANV_SVGS_VB_INDEX,
320 .Valid = true,
321 .SourceElementFormat = ISL_FORMAT_R32G32_UINT,
322 #if GFX_VER >= 11
323 /* On gen11, these are taken care of by extra parameter slots */
324 .Component0Control = VFCOMP_STORE_0,
325 .Component1Control = VFCOMP_STORE_0,
326 #else
327 .Component0Control = base_ctrl,
328 .Component1Control = base_ctrl,
329 #endif
330 .Component2Control = VFCOMP_STORE_0,
331 .Component3Control = VFCOMP_STORE_0,
332 };
333 GENX(VERTEX_ELEMENT_STATE_pack)(NULL,
334 &pipeline->vertex_input_data[slot_offset * 2],
335 &element);
336 slot_offset++;
337
338 anv_pipeline_emit(pipeline, final.vf_sgvs_instancing,
339 GENX(3DSTATE_VF_INSTANCING), vfi) {
340 vfi.VertexElementIndex = id_slot;
341 }
342 }
343
344 if (vs_prog_data->uses_drawid) {
345 struct GENX(VERTEX_ELEMENT_STATE) element = {
346 .VertexBufferIndex = ANV_DRAWID_VB_INDEX,
347 .Valid = true,
348 .SourceElementFormat = ISL_FORMAT_R32_UINT,
349 #if GFX_VER >= 11
350 /* On gen11, this is taken care of by extra parameter slots */
351 .Component0Control = VFCOMP_STORE_0,
352 #else
353 .Component0Control = VFCOMP_STORE_SRC,
354 #endif
355 .Component1Control = VFCOMP_STORE_0,
356 .Component2Control = VFCOMP_STORE_0,
357 .Component3Control = VFCOMP_STORE_0,
358 };
359 GENX(VERTEX_ELEMENT_STATE_pack)(NULL,
360 &pipeline->vertex_input_data[slot_offset * 2],
361 &element);
362 slot_offset++;
363
364 anv_pipeline_emit(pipeline, final.vf_sgvs_instancing,
365 GENX(3DSTATE_VF_INSTANCING), vfi) {
366 vfi.VertexElementIndex = drawid_slot;
367 }
368 }
369 }
370
371 anv_pipeline_emit(pipeline, final.vf_sgvs, GENX(3DSTATE_VF_SGVS), sgvs) {
372 sgvs.VertexIDEnable = vs_prog_data->uses_vertexid;
373 sgvs.VertexIDComponentNumber = 2;
374 sgvs.VertexIDElementOffset = id_slot;
375 sgvs.InstanceIDEnable = vs_prog_data->uses_instanceid;
376 sgvs.InstanceIDComponentNumber = 3;
377 sgvs.InstanceIDElementOffset = id_slot;
378 }
379
380 #if GFX_VER >= 11
381 anv_pipeline_emit(pipeline, final.vf_sgvs_2, GENX(3DSTATE_VF_SGVS_2), sgvs) {
382 /* gl_BaseVertex */
383 sgvs.XP0Enable = vs_prog_data->uses_firstvertex;
384 sgvs.XP0SourceSelect = XP0_PARAMETER;
385 sgvs.XP0ComponentNumber = 0;
386 sgvs.XP0ElementOffset = id_slot;
387
388 /* gl_BaseInstance */
389 sgvs.XP1Enable = vs_prog_data->uses_baseinstance;
390 sgvs.XP1SourceSelect = StartingInstanceLocation;
391 sgvs.XP1ComponentNumber = 1;
392 sgvs.XP1ElementOffset = id_slot;
393
394 /* gl_DrawID */
395 sgvs.XP2Enable = vs_prog_data->uses_drawid;
396 sgvs.XP2ComponentNumber = 0;
397 sgvs.XP2ElementOffset = drawid_slot;
398 }
399 #endif
400 }
401
402 void
genX(emit_urb_setup)403 genX(emit_urb_setup)(struct anv_device *device, struct anv_batch *batch,
404 const struct intel_l3_config *l3_config,
405 VkShaderStageFlags active_stages,
406 const struct intel_urb_config *urb_cfg_in,
407 struct intel_urb_config *urb_cfg_out,
408 enum intel_urb_deref_block_size *deref_block_size)
409 {
410 const struct intel_device_info *devinfo = device->info;
411
412 bool constrained;
413 intel_get_urb_config(devinfo, l3_config,
414 active_stages &
415 VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
416 active_stages & VK_SHADER_STAGE_GEOMETRY_BIT,
417 urb_cfg_out, deref_block_size,
418 &constrained);
419
420 #if INTEL_NEEDS_WA_16014912113
421 if (intel_urb_setup_changed(urb_cfg_in, urb_cfg_out,
422 MESA_SHADER_TESS_EVAL) && urb_cfg_in->size[0] != 0) {
423 for (int i = 0; i <= MESA_SHADER_GEOMETRY; i++) {
424 anv_batch_emit(batch, GENX(3DSTATE_URB_VS), urb) {
425 urb._3DCommandSubOpcode += i;
426 urb.VSURBStartingAddress = urb_cfg_in->start[i];
427 urb.VSURBEntryAllocationSize = urb_cfg_in->size[i] - 1;
428 urb.VSNumberofURBEntries = i == 0 ? 256 : 0;
429 }
430 }
431 genx_batch_emit_pipe_control(batch, device->info, _3D,
432 ANV_PIPE_HDC_PIPELINE_FLUSH_BIT);
433 }
434 #endif
435
436 for (int i = 0; i <= MESA_SHADER_GEOMETRY; i++) {
437 anv_batch_emit(batch, GENX(3DSTATE_URB_VS), urb) {
438 urb._3DCommandSubOpcode += i;
439 urb.VSURBStartingAddress = urb_cfg_out->start[i];
440 urb.VSURBEntryAllocationSize = urb_cfg_out->size[i] - 1;
441 urb.VSNumberofURBEntries = urb_cfg_out->entries[i];
442 }
443 }
444 #if GFX_VERx10 >= 125
445 if (device->vk.enabled_extensions.EXT_mesh_shader) {
446 anv_batch_emit(batch, GENX(3DSTATE_URB_ALLOC_MESH), zero);
447 anv_batch_emit(batch, GENX(3DSTATE_URB_ALLOC_TASK), zero);
448 }
449 #endif
450 }
451
452 #if GFX_VERx10 >= 125
453 static void
emit_urb_setup_mesh(struct anv_graphics_pipeline * pipeline,enum intel_urb_deref_block_size * deref_block_size)454 emit_urb_setup_mesh(struct anv_graphics_pipeline *pipeline,
455 enum intel_urb_deref_block_size *deref_block_size)
456 {
457 const struct intel_device_info *devinfo = pipeline->base.base.device->info;
458
459 const struct brw_task_prog_data *task_prog_data =
460 anv_pipeline_has_stage(pipeline, MESA_SHADER_TASK) ?
461 get_task_prog_data(pipeline) : NULL;
462 const struct brw_mesh_prog_data *mesh_prog_data = get_mesh_prog_data(pipeline);
463
464 const struct intel_mesh_urb_allocation alloc =
465 intel_get_mesh_urb_config(devinfo, pipeline->base.base.l3_config,
466 task_prog_data ? task_prog_data->map.size_dw : 0,
467 mesh_prog_data->map.size_dw);
468
469 /* Zero out the primitive pipeline URB allocations. */
470 for (int i = 0; i <= MESA_SHADER_GEOMETRY; i++) {
471 anv_pipeline_emit(pipeline, final.urb, GENX(3DSTATE_URB_VS), urb) {
472 urb._3DCommandSubOpcode += i;
473 }
474 }
475
476 anv_pipeline_emit(pipeline, final.urb, GENX(3DSTATE_URB_ALLOC_TASK), urb) {
477 if (task_prog_data) {
478 urb.TASKURBEntryAllocationSize = alloc.task_entry_size_64b - 1;
479 urb.TASKNumberofURBEntriesSlice0 = alloc.task_entries;
480 urb.TASKNumberofURBEntriesSliceN = alloc.task_entries;
481 urb.TASKURBStartingAddressSlice0 = alloc.task_starting_address_8kb;
482 urb.TASKURBStartingAddressSliceN = alloc.task_starting_address_8kb;
483 }
484 }
485
486 anv_pipeline_emit(pipeline, final.urb, GENX(3DSTATE_URB_ALLOC_MESH), urb) {
487 urb.MESHURBEntryAllocationSize = alloc.mesh_entry_size_64b - 1;
488 urb.MESHNumberofURBEntriesSlice0 = alloc.mesh_entries;
489 urb.MESHNumberofURBEntriesSliceN = alloc.mesh_entries;
490 urb.MESHURBStartingAddressSlice0 = alloc.mesh_starting_address_8kb;
491 urb.MESHURBStartingAddressSliceN = alloc.mesh_starting_address_8kb;
492 }
493
494 *deref_block_size = alloc.deref_block_size;
495 }
496 #endif
497
498 static void
emit_urb_setup(struct anv_graphics_pipeline * pipeline,enum intel_urb_deref_block_size * deref_block_size)499 emit_urb_setup(struct anv_graphics_pipeline *pipeline,
500 enum intel_urb_deref_block_size *deref_block_size)
501 {
502 #if GFX_VERx10 >= 125
503 if (anv_pipeline_is_mesh(pipeline)) {
504 emit_urb_setup_mesh(pipeline, deref_block_size);
505 return;
506 }
507 #endif
508 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
509 const struct brw_vue_prog_data *prog_data =
510 !anv_pipeline_has_stage(pipeline, i) ? NULL :
511 (const struct brw_vue_prog_data *) pipeline->base.shaders[i]->prog_data;
512
513 pipeline->urb_cfg.size[i] = prog_data ? prog_data->urb_entry_size : 1;
514 }
515
516 struct anv_device *device = pipeline->base.base.device;
517 const struct intel_device_info *devinfo = device->info;
518
519
520 bool constrained;
521 intel_get_urb_config(devinfo,
522 pipeline->base.base.l3_config,
523 pipeline->base.base.active_stages &
524 VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
525 pipeline->base.base.active_stages &
526 VK_SHADER_STAGE_GEOMETRY_BIT,
527 &pipeline->urb_cfg, deref_block_size,
528 &constrained);
529
530 for (int i = 0; i <= MESA_SHADER_GEOMETRY; i++) {
531 anv_pipeline_emit(pipeline, final.urb, GENX(3DSTATE_URB_VS), urb) {
532 urb._3DCommandSubOpcode += i;
533 urb.VSURBStartingAddress = pipeline->urb_cfg.start[i];
534 urb.VSURBEntryAllocationSize = pipeline->urb_cfg.size[i] - 1;
535 urb.VSNumberofURBEntries = pipeline->urb_cfg.entries[i];
536 }
537 }
538
539 #if GFX_VERx10 >= 125
540 if (device->vk.enabled_extensions.EXT_mesh_shader) {
541 anv_pipeline_emit(pipeline, final.urb, GENX(3DSTATE_URB_ALLOC_TASK), zero);
542 anv_pipeline_emit(pipeline, final.urb, GENX(3DSTATE_URB_ALLOC_MESH), zero);
543 }
544 #endif
545
546 }
547
548 static bool
sbe_primitive_id_override(struct anv_graphics_pipeline * pipeline)549 sbe_primitive_id_override(struct anv_graphics_pipeline *pipeline)
550 {
551 const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);
552 if (!wm_prog_data)
553 return false;
554
555 const struct intel_vue_map *fs_input_map =
556 &anv_pipeline_get_last_vue_prog_data(pipeline)->vue_map;
557
558 return (wm_prog_data->inputs & VARYING_BIT_PRIMITIVE_ID) &&
559 fs_input_map->varying_to_slot[VARYING_SLOT_PRIMITIVE_ID] == -1;
560 }
561
562 static void
emit_3dstate_sbe(struct anv_graphics_pipeline * pipeline)563 emit_3dstate_sbe(struct anv_graphics_pipeline *pipeline)
564 {
565 const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);
566
567 if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT)) {
568 anv_pipeline_emit(pipeline, final.sbe, GENX(3DSTATE_SBE), sbe);
569 anv_pipeline_emit(pipeline, final.sbe_swiz, GENX(3DSTATE_SBE_SWIZ), sbe);
570 #if GFX_VERx10 >= 125
571 if (anv_pipeline_is_mesh(pipeline))
572 anv_pipeline_emit(pipeline, final.sbe_mesh, GENX(3DSTATE_SBE_MESH), sbe);
573 #endif
574 return;
575 }
576
577 anv_pipeline_emit(pipeline, final.sbe, GENX(3DSTATE_SBE), sbe) {
578 anv_pipeline_emit(pipeline, final.sbe_swiz, GENX(3DSTATE_SBE_SWIZ), swiz) {
579
580 /* TODO(mesh): Figure out cases where we need attribute swizzling. See also
581 * calculate_urb_setup() and related functions.
582 */
583 sbe.AttributeSwizzleEnable = anv_pipeline_is_primitive(pipeline);
584 sbe.PointSpriteTextureCoordinateOrigin = UPPERLEFT;
585 sbe.NumberofSFOutputAttributes = wm_prog_data->num_varying_inputs;
586 sbe.ConstantInterpolationEnable = wm_prog_data->flat_inputs;
587
588 for (unsigned i = 0; i < 32; i++)
589 sbe.AttributeActiveComponentFormat[i] = ACF_XYZW;
590
591 if (anv_pipeline_is_primitive(pipeline)) {
592 const struct intel_vue_map *fs_input_map =
593 &anv_pipeline_get_last_vue_prog_data(pipeline)->vue_map;
594
595 int first_slot =
596 brw_compute_first_urb_slot_required(wm_prog_data->inputs,
597 fs_input_map);
598 assert(first_slot % 2 == 0);
599 unsigned urb_entry_read_offset = first_slot / 2;
600 int max_source_attr = 0;
601 for (uint8_t idx = 0; idx < wm_prog_data->urb_setup_attribs_count; idx++) {
602 uint8_t attr = wm_prog_data->urb_setup_attribs[idx];
603 int input_index = wm_prog_data->urb_setup[attr];
604
605 assert(0 <= input_index);
606
607 /* gl_Viewport, gl_Layer and FragmentShadingRateKHR are stored in the
608 * VUE header
609 */
610 if (attr == VARYING_SLOT_VIEWPORT ||
611 attr == VARYING_SLOT_LAYER ||
612 attr == VARYING_SLOT_PRIMITIVE_SHADING_RATE) {
613 continue;
614 }
615
616 if (attr == VARYING_SLOT_PNTC) {
617 sbe.PointSpriteTextureCoordinateEnable = 1 << input_index;
618 continue;
619 }
620
621 const int slot = fs_input_map->varying_to_slot[attr];
622
623 if (slot == -1) {
624 /* This attribute does not exist in the VUE--that means that
625 * the vertex shader did not write to it. It could be that it's
626 * a regular varying read by the fragment shader but not
627 * written by the vertex shader or it's gl_PrimitiveID. In the
628 * first case the value is undefined, in the second it needs to
629 * be gl_PrimitiveID.
630 */
631 swiz.Attribute[input_index].ConstantSource = PRIM_ID;
632 swiz.Attribute[input_index].ComponentOverrideX = true;
633 swiz.Attribute[input_index].ComponentOverrideY = true;
634 swiz.Attribute[input_index].ComponentOverrideZ = true;
635 swiz.Attribute[input_index].ComponentOverrideW = true;
636 continue;
637 }
638
639 /* We have to subtract two slots to account for the URB entry
640 * output read offset in the VS and GS stages.
641 */
642 const int source_attr = slot - 2 * urb_entry_read_offset;
643 assert(source_attr >= 0 && source_attr < 32);
644 max_source_attr = MAX2(max_source_attr, source_attr);
645 /* The hardware can only do overrides on 16 overrides at a time,
646 * and the other up to 16 have to be lined up so that the input
647 * index = the output index. We'll need to do some tweaking to
648 * make sure that's the case.
649 */
650 if (input_index < 16)
651 swiz.Attribute[input_index].SourceAttribute = source_attr;
652 else
653 assert(source_attr == input_index);
654 }
655
656 sbe.VertexURBEntryReadOffset = urb_entry_read_offset;
657 sbe.VertexURBEntryReadLength = DIV_ROUND_UP(max_source_attr + 1, 2);
658 sbe.ForceVertexURBEntryReadOffset = true;
659 sbe.ForceVertexURBEntryReadLength = true;
660
661 /* Ask the hardware to supply PrimitiveID if the fragment shader
662 * reads it but a previous stage didn't write one.
663 */
664 if (sbe_primitive_id_override(pipeline)) {
665 sbe.PrimitiveIDOverrideAttributeSelect =
666 wm_prog_data->urb_setup[VARYING_SLOT_PRIMITIVE_ID];
667 sbe.PrimitiveIDOverrideComponentX = true;
668 sbe.PrimitiveIDOverrideComponentY = true;
669 sbe.PrimitiveIDOverrideComponentZ = true;
670 sbe.PrimitiveIDOverrideComponentW = true;
671 }
672 } else {
673 assert(anv_pipeline_is_mesh(pipeline));
674 #if GFX_VERx10 >= 125
675 const struct brw_mesh_prog_data *mesh_prog_data = get_mesh_prog_data(pipeline);
676 anv_pipeline_emit(pipeline, final.sbe_mesh,
677 GENX(3DSTATE_SBE_MESH), sbe_mesh) {
678 const struct brw_mue_map *mue = &mesh_prog_data->map;
679
680 assert(mue->per_vertex_header_size_dw % 8 == 0);
681 sbe_mesh.PerVertexURBEntryOutputReadOffset = mue->per_vertex_header_size_dw / 8;
682 sbe_mesh.PerVertexURBEntryOutputReadLength = DIV_ROUND_UP(mue->per_vertex_data_size_dw, 8);
683
684 /* Clip distance array is passed in the per-vertex header so that
685 * it can be consumed by the HW. If user wants to read it in the
686 * FS, adjust the offset and length to cover it. Conveniently it
687 * is at the end of the per-vertex header, right before per-vertex
688 * attributes.
689 *
690 * Note that FS attribute reading must be aware that the clip
691 * distances have fixed position.
692 */
693 if (mue->per_vertex_header_size_dw > 8 &&
694 (wm_prog_data->urb_setup[VARYING_SLOT_CLIP_DIST0] >= 0 ||
695 wm_prog_data->urb_setup[VARYING_SLOT_CLIP_DIST1] >= 0)) {
696 sbe_mesh.PerVertexURBEntryOutputReadOffset -= 1;
697 sbe_mesh.PerVertexURBEntryOutputReadLength += 1;
698 }
699
700 if (mue->user_data_in_vertex_header) {
701 sbe_mesh.PerVertexURBEntryOutputReadOffset -= 1;
702 sbe_mesh.PerVertexURBEntryOutputReadLength += 1;
703 }
704
705 assert(mue->per_primitive_header_size_dw % 8 == 0);
706 sbe_mesh.PerPrimitiveURBEntryOutputReadOffset =
707 mue->per_primitive_header_size_dw / 8;
708 sbe_mesh.PerPrimitiveURBEntryOutputReadLength =
709 DIV_ROUND_UP(mue->per_primitive_data_size_dw, 8);
710
711 /* Just like with clip distances, if Primitive Shading Rate,
712 * Viewport Index or Layer is read back in the FS, adjust the
713 * offset and length to cover the Primitive Header, where PSR,
714 * Viewport Index & Layer are stored.
715 */
716 if (wm_prog_data->urb_setup[VARYING_SLOT_VIEWPORT] >= 0 ||
717 wm_prog_data->urb_setup[VARYING_SLOT_PRIMITIVE_SHADING_RATE] >= 0 ||
718 wm_prog_data->urb_setup[VARYING_SLOT_LAYER] >= 0 ||
719 mue->user_data_in_primitive_header) {
720 assert(sbe_mesh.PerPrimitiveURBEntryOutputReadOffset > 0);
721 sbe_mesh.PerPrimitiveURBEntryOutputReadOffset -= 1;
722 sbe_mesh.PerPrimitiveURBEntryOutputReadLength += 1;
723 }
724 }
725 #endif
726 }
727 }
728 }
729 }
730
731 /** Returns the final polygon mode for rasterization
732 *
733 * This function takes into account polygon mode, primitive topology and the
734 * different shader stages which might generate their own type of primitives.
735 */
736 VkPolygonMode
genX(raster_polygon_mode)737 genX(raster_polygon_mode)(const struct anv_graphics_pipeline *pipeline,
738 VkPolygonMode polygon_mode,
739 VkPrimitiveTopology primitive_topology)
740 {
741 if (anv_pipeline_is_mesh(pipeline)) {
742 switch (get_mesh_prog_data(pipeline)->primitive_type) {
743 case MESA_PRIM_POINTS:
744 return VK_POLYGON_MODE_POINT;
745 case MESA_PRIM_LINES:
746 return VK_POLYGON_MODE_LINE;
747 case MESA_PRIM_TRIANGLES:
748 return polygon_mode;
749 default:
750 unreachable("invalid primitive type for mesh");
751 }
752 } else if (anv_pipeline_has_stage(pipeline, MESA_SHADER_GEOMETRY)) {
753 switch (get_gs_prog_data(pipeline)->output_topology) {
754 case _3DPRIM_POINTLIST:
755 return VK_POLYGON_MODE_POINT;
756
757 case _3DPRIM_LINELIST:
758 case _3DPRIM_LINESTRIP:
759 case _3DPRIM_LINELOOP:
760 return VK_POLYGON_MODE_LINE;
761
762 case _3DPRIM_TRILIST:
763 case _3DPRIM_TRIFAN:
764 case _3DPRIM_TRISTRIP:
765 case _3DPRIM_RECTLIST:
766 case _3DPRIM_QUADLIST:
767 case _3DPRIM_QUADSTRIP:
768 case _3DPRIM_POLYGON:
769 return polygon_mode;
770 }
771 unreachable("Unsupported GS output topology");
772 } else if (anv_pipeline_has_stage(pipeline, MESA_SHADER_TESS_EVAL)) {
773 switch (get_tes_prog_data(pipeline)->output_topology) {
774 case INTEL_TESS_OUTPUT_TOPOLOGY_POINT:
775 return VK_POLYGON_MODE_POINT;
776
777 case INTEL_TESS_OUTPUT_TOPOLOGY_LINE:
778 return VK_POLYGON_MODE_LINE;
779
780 case INTEL_TESS_OUTPUT_TOPOLOGY_TRI_CW:
781 case INTEL_TESS_OUTPUT_TOPOLOGY_TRI_CCW:
782 return polygon_mode;
783 }
784 unreachable("Unsupported TCS output topology");
785 } else {
786 switch (primitive_topology) {
787 case VK_PRIMITIVE_TOPOLOGY_POINT_LIST:
788 return VK_POLYGON_MODE_POINT;
789
790 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST:
791 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP:
792 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY:
793 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY:
794 return VK_POLYGON_MODE_LINE;
795
796 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST:
797 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP:
798 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN:
799 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY:
800 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY:
801 return polygon_mode;
802
803 default:
804 unreachable("Unsupported primitive topology");
805 }
806 }
807 }
808
809 const uint32_t genX(vk_to_intel_cullmode)[] = {
810 [VK_CULL_MODE_NONE] = CULLMODE_NONE,
811 [VK_CULL_MODE_FRONT_BIT] = CULLMODE_FRONT,
812 [VK_CULL_MODE_BACK_BIT] = CULLMODE_BACK,
813 [VK_CULL_MODE_FRONT_AND_BACK] = CULLMODE_BOTH
814 };
815
816 const uint32_t genX(vk_to_intel_fillmode)[] = {
817 [VK_POLYGON_MODE_FILL] = FILL_MODE_SOLID,
818 [VK_POLYGON_MODE_LINE] = FILL_MODE_WIREFRAME,
819 [VK_POLYGON_MODE_POINT] = FILL_MODE_POINT,
820 };
821
822 const uint32_t genX(vk_to_intel_front_face)[] = {
823 [VK_FRONT_FACE_COUNTER_CLOCKWISE] = 1,
824 [VK_FRONT_FACE_CLOCKWISE] = 0
825 };
826
827 static void
emit_rs_state(struct anv_graphics_pipeline * pipeline,const struct vk_input_assembly_state * ia,const struct vk_rasterization_state * rs,const struct vk_multisample_state * ms,const struct vk_render_pass_state * rp,enum intel_urb_deref_block_size urb_deref_block_size)828 emit_rs_state(struct anv_graphics_pipeline *pipeline,
829 const struct vk_input_assembly_state *ia,
830 const struct vk_rasterization_state *rs,
831 const struct vk_multisample_state *ms,
832 const struct vk_render_pass_state *rp,
833 enum intel_urb_deref_block_size urb_deref_block_size)
834 {
835 anv_pipeline_emit(pipeline, partial.sf, GENX(3DSTATE_SF), sf) {
836 sf.ViewportTransformEnable = true;
837 sf.StatisticsEnable = true;
838 sf.VertexSubPixelPrecisionSelect = _8Bit;
839 sf.AALineDistanceMode = true;
840
841 #if GFX_VER >= 12
842 sf.DerefBlockSize = urb_deref_block_size;
843 #endif
844
845 bool point_from_shader;
846 if (anv_pipeline_is_primitive(pipeline)) {
847 const struct brw_vue_prog_data *last_vue_prog_data =
848 anv_pipeline_get_last_vue_prog_data(pipeline);
849 point_from_shader = last_vue_prog_data->vue_map.slots_valid & VARYING_BIT_PSIZ;
850 } else {
851 assert(anv_pipeline_is_mesh(pipeline));
852 const struct brw_mesh_prog_data *mesh_prog_data = get_mesh_prog_data(pipeline);
853 point_from_shader = mesh_prog_data->map.start_dw[VARYING_SLOT_PSIZ] >= 0;
854 }
855
856 if (point_from_shader) {
857 sf.PointWidthSource = Vertex;
858 } else {
859 sf.PointWidthSource = State;
860 sf.PointWidth = 1.0;
861 }
862 }
863
864 anv_pipeline_emit(pipeline, partial.raster, GENX(3DSTATE_RASTER), raster) {
865 /* For details on 3DSTATE_RASTER multisample state, see the BSpec table
866 * "Multisample Modes State".
867 */
868 /* NOTE: 3DSTATE_RASTER::ForcedSampleCount affects the BDW and SKL PMA fix
869 * computations. If we ever set this bit to a different value, they will
870 * need to be updated accordingly.
871 */
872 raster.ForcedSampleCount = FSC_NUMRASTSAMPLES_0;
873 raster.ForceMultisampling = false;
874
875 raster.ScissorRectangleEnable = true;
876 }
877 }
878
879 static void
emit_ms_state(struct anv_graphics_pipeline * pipeline,const struct vk_multisample_state * ms)880 emit_ms_state(struct anv_graphics_pipeline *pipeline,
881 const struct vk_multisample_state *ms)
882 {
883 anv_pipeline_emit(pipeline, partial.ms, GENX(3DSTATE_MULTISAMPLE), ms) {
884 ms.PixelLocation = CENTER;
885
886 /* The PRM says that this bit is valid only for DX9:
887 *
888 * SW can choose to set this bit only for DX9 API. DX10/OGL API's
889 * should not have any effect by setting or not setting this bit.
890 */
891 ms.PixelPositionOffsetEnable = false;
892 }
893 }
894
895 const uint32_t genX(vk_to_intel_logic_op)[] = {
896 [VK_LOGIC_OP_COPY] = LOGICOP_COPY,
897 [VK_LOGIC_OP_CLEAR] = LOGICOP_CLEAR,
898 [VK_LOGIC_OP_AND] = LOGICOP_AND,
899 [VK_LOGIC_OP_AND_REVERSE] = LOGICOP_AND_REVERSE,
900 [VK_LOGIC_OP_AND_INVERTED] = LOGICOP_AND_INVERTED,
901 [VK_LOGIC_OP_NO_OP] = LOGICOP_NOOP,
902 [VK_LOGIC_OP_XOR] = LOGICOP_XOR,
903 [VK_LOGIC_OP_OR] = LOGICOP_OR,
904 [VK_LOGIC_OP_NOR] = LOGICOP_NOR,
905 [VK_LOGIC_OP_EQUIVALENT] = LOGICOP_EQUIV,
906 [VK_LOGIC_OP_INVERT] = LOGICOP_INVERT,
907 [VK_LOGIC_OP_OR_REVERSE] = LOGICOP_OR_REVERSE,
908 [VK_LOGIC_OP_COPY_INVERTED] = LOGICOP_COPY_INVERTED,
909 [VK_LOGIC_OP_OR_INVERTED] = LOGICOP_OR_INVERTED,
910 [VK_LOGIC_OP_NAND] = LOGICOP_NAND,
911 [VK_LOGIC_OP_SET] = LOGICOP_SET,
912 };
913
914 const uint32_t genX(vk_to_intel_compare_op)[] = {
915 [VK_COMPARE_OP_NEVER] = PREFILTEROP_NEVER,
916 [VK_COMPARE_OP_LESS] = PREFILTEROP_LESS,
917 [VK_COMPARE_OP_EQUAL] = PREFILTEROP_EQUAL,
918 [VK_COMPARE_OP_LESS_OR_EQUAL] = PREFILTEROP_LEQUAL,
919 [VK_COMPARE_OP_GREATER] = PREFILTEROP_GREATER,
920 [VK_COMPARE_OP_NOT_EQUAL] = PREFILTEROP_NOTEQUAL,
921 [VK_COMPARE_OP_GREATER_OR_EQUAL] = PREFILTEROP_GEQUAL,
922 [VK_COMPARE_OP_ALWAYS] = PREFILTEROP_ALWAYS,
923 };
924
925 const uint32_t genX(vk_to_intel_stencil_op)[] = {
926 [VK_STENCIL_OP_KEEP] = STENCILOP_KEEP,
927 [VK_STENCIL_OP_ZERO] = STENCILOP_ZERO,
928 [VK_STENCIL_OP_REPLACE] = STENCILOP_REPLACE,
929 [VK_STENCIL_OP_INCREMENT_AND_CLAMP] = STENCILOP_INCRSAT,
930 [VK_STENCIL_OP_DECREMENT_AND_CLAMP] = STENCILOP_DECRSAT,
931 [VK_STENCIL_OP_INVERT] = STENCILOP_INVERT,
932 [VK_STENCIL_OP_INCREMENT_AND_WRAP] = STENCILOP_INCR,
933 [VK_STENCIL_OP_DECREMENT_AND_WRAP] = STENCILOP_DECR,
934 };
935
936 const uint32_t genX(vk_to_intel_primitive_type)[] = {
937 [VK_PRIMITIVE_TOPOLOGY_POINT_LIST] = _3DPRIM_POINTLIST,
938 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST] = _3DPRIM_LINELIST,
939 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP] = _3DPRIM_LINESTRIP,
940 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST] = _3DPRIM_TRILIST,
941 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP] = _3DPRIM_TRISTRIP,
942 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN] = _3DPRIM_TRIFAN,
943 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY] = _3DPRIM_LINELIST_ADJ,
944 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY] = _3DPRIM_LINESTRIP_ADJ,
945 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY] = _3DPRIM_TRILIST_ADJ,
946 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY] = _3DPRIM_TRISTRIP_ADJ,
947 };
948
949 static void
emit_3dstate_clip(struct anv_graphics_pipeline * pipeline,const struct vk_input_assembly_state * ia,const struct vk_viewport_state * vp,const struct vk_rasterization_state * rs)950 emit_3dstate_clip(struct anv_graphics_pipeline *pipeline,
951 const struct vk_input_assembly_state *ia,
952 const struct vk_viewport_state *vp,
953 const struct vk_rasterization_state *rs)
954 {
955 const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);
956 (void) wm_prog_data;
957
958 anv_pipeline_emit(pipeline, partial.clip, GENX(3DSTATE_CLIP), clip) {
959 clip.ClipEnable = true;
960 clip.StatisticsEnable = true;
961 clip.EarlyCullEnable = true;
962 clip.GuardbandClipTestEnable = true;
963
964 clip.VertexSubPixelPrecisionSelect = _8Bit;
965 clip.ClipMode = CLIPMODE_NORMAL;
966
967 clip.MinimumPointWidth = 0.125;
968 clip.MaximumPointWidth = 255.875;
969
970 /* TODO(mesh): Multiview. */
971 if (anv_pipeline_is_primitive(pipeline)) {
972 const struct brw_vue_prog_data *last =
973 anv_pipeline_get_last_vue_prog_data(pipeline);
974
975 /* From the Vulkan 1.0.45 spec:
976 *
977 * "If the last active vertex processing stage shader entry point's
978 * interface does not include a variable decorated with Layer, then
979 * the first layer is used."
980 */
981 clip.ForceZeroRTAIndexEnable =
982 !(last->vue_map.slots_valid & VARYING_BIT_LAYER);
983
984 } else if (anv_pipeline_is_mesh(pipeline)) {
985 const struct brw_mesh_prog_data *mesh_prog_data = get_mesh_prog_data(pipeline);
986
987 clip.ForceZeroRTAIndexEnable =
988 mesh_prog_data->map.start_dw[VARYING_SLOT_LAYER] < 0;
989 }
990
991 clip.NonPerspectiveBarycentricEnable = wm_prog_data ?
992 wm_prog_data->uses_nonperspective_interp_modes : 0;
993 }
994
995 #if GFX_VERx10 >= 125
996 if (anv_pipeline_is_mesh(pipeline)) {
997 const struct brw_mesh_prog_data *mesh_prog_data = get_mesh_prog_data(pipeline);
998 anv_pipeline_emit(pipeline, final.clip_mesh,
999 GENX(3DSTATE_CLIP_MESH), clip_mesh) {
1000 clip_mesh.PrimitiveHeaderEnable = mesh_prog_data->map.per_primitive_header_size_dw > 0;
1001 clip_mesh.UserClipDistanceClipTestEnableBitmask = mesh_prog_data->clip_distance_mask;
1002 clip_mesh.UserClipDistanceCullTestEnableBitmask = mesh_prog_data->cull_distance_mask;
1003 }
1004 }
1005 #endif
1006 }
1007
1008 static void
emit_3dstate_streamout(struct anv_graphics_pipeline * pipeline,const struct vk_rasterization_state * rs)1009 emit_3dstate_streamout(struct anv_graphics_pipeline *pipeline,
1010 const struct vk_rasterization_state *rs)
1011 {
1012 const struct brw_vue_prog_data *prog_data =
1013 anv_pipeline_get_last_vue_prog_data(pipeline);
1014 const struct intel_vue_map *vue_map = &prog_data->vue_map;
1015
1016 nir_xfb_info *xfb_info;
1017 if (anv_pipeline_has_stage(pipeline, MESA_SHADER_GEOMETRY))
1018 xfb_info = pipeline->base.shaders[MESA_SHADER_GEOMETRY]->xfb_info;
1019 else if (anv_pipeline_has_stage(pipeline, MESA_SHADER_TESS_EVAL))
1020 xfb_info = pipeline->base.shaders[MESA_SHADER_TESS_EVAL]->xfb_info;
1021 else
1022 xfb_info = pipeline->base.shaders[MESA_SHADER_VERTEX]->xfb_info;
1023
1024 if (xfb_info) {
1025 struct GENX(SO_DECL) so_decl[MAX_XFB_STREAMS][128];
1026 int next_offset[MAX_XFB_BUFFERS] = {0, 0, 0, 0};
1027 int decls[MAX_XFB_STREAMS] = {0, 0, 0, 0};
1028
1029 memset(so_decl, 0, sizeof(so_decl));
1030
1031 for (unsigned i = 0; i < xfb_info->output_count; i++) {
1032 const nir_xfb_output_info *output = &xfb_info->outputs[i];
1033 unsigned buffer = output->buffer;
1034 unsigned stream = xfb_info->buffer_to_stream[buffer];
1035
1036 /* Our hardware is unusual in that it requires us to program SO_DECLs
1037 * for fake "hole" components, rather than simply taking the offset
1038 * for each real varying. Each hole can have size 1, 2, 3, or 4; we
1039 * program as many size = 4 holes as we can, then a final hole to
1040 * accommodate the final 1, 2, or 3 remaining.
1041 */
1042 int hole_dwords = (output->offset - next_offset[buffer]) / 4;
1043 while (hole_dwords > 0) {
1044 so_decl[stream][decls[stream]++] = (struct GENX(SO_DECL)) {
1045 .HoleFlag = 1,
1046 .OutputBufferSlot = buffer,
1047 .ComponentMask = (1 << MIN2(hole_dwords, 4)) - 1,
1048 };
1049 hole_dwords -= 4;
1050 }
1051
1052 int varying = output->location;
1053 uint8_t component_mask = output->component_mask;
1054 /* VARYING_SLOT_PSIZ contains four scalar fields packed together:
1055 * - VARYING_SLOT_PRIMITIVE_SHADING_RATE in VARYING_SLOT_PSIZ.x
1056 * - VARYING_SLOT_LAYER in VARYING_SLOT_PSIZ.y
1057 * - VARYING_SLOT_VIEWPORT in VARYING_SLOT_PSIZ.z
1058 * - VARYING_SLOT_PSIZ in VARYING_SLOT_PSIZ.w
1059 */
1060 if (varying == VARYING_SLOT_PRIMITIVE_SHADING_RATE) {
1061 varying = VARYING_SLOT_PSIZ;
1062 component_mask = 1 << 0; // SO_DECL_COMPMASK_X
1063 } else if (varying == VARYING_SLOT_LAYER) {
1064 varying = VARYING_SLOT_PSIZ;
1065 component_mask = 1 << 1; // SO_DECL_COMPMASK_Y
1066 } else if (varying == VARYING_SLOT_VIEWPORT) {
1067 varying = VARYING_SLOT_PSIZ;
1068 component_mask = 1 << 2; // SO_DECL_COMPMASK_Z
1069 } else if (varying == VARYING_SLOT_PSIZ) {
1070 component_mask = 1 << 3; // SO_DECL_COMPMASK_W
1071 }
1072
1073 next_offset[buffer] = output->offset +
1074 __builtin_popcount(component_mask) * 4;
1075
1076 const int slot = vue_map->varying_to_slot[varying];
1077 if (slot < 0) {
1078 /* This can happen if the shader never writes to the varying.
1079 * Insert a hole instead of actual varying data.
1080 */
1081 so_decl[stream][decls[stream]++] = (struct GENX(SO_DECL)) {
1082 .HoleFlag = true,
1083 .OutputBufferSlot = buffer,
1084 .ComponentMask = component_mask,
1085 };
1086 } else {
1087 so_decl[stream][decls[stream]++] = (struct GENX(SO_DECL)) {
1088 .OutputBufferSlot = buffer,
1089 .RegisterIndex = slot,
1090 .ComponentMask = component_mask,
1091 };
1092 }
1093 }
1094
1095 int max_decls = 0;
1096 for (unsigned s = 0; s < MAX_XFB_STREAMS; s++)
1097 max_decls = MAX2(max_decls, decls[s]);
1098
1099 uint8_t sbs[MAX_XFB_STREAMS] = { };
1100 for (unsigned b = 0; b < MAX_XFB_BUFFERS; b++) {
1101 if (xfb_info->buffers_written & (1 << b))
1102 sbs[xfb_info->buffer_to_stream[b]] |= 1 << b;
1103 }
1104
1105 uint32_t *dw = anv_pipeline_emitn(pipeline, final.so_decl_list,
1106 3 + 2 * max_decls,
1107 GENX(3DSTATE_SO_DECL_LIST),
1108 .StreamtoBufferSelects0 = sbs[0],
1109 .StreamtoBufferSelects1 = sbs[1],
1110 .StreamtoBufferSelects2 = sbs[2],
1111 .StreamtoBufferSelects3 = sbs[3],
1112 .NumEntries0 = decls[0],
1113 .NumEntries1 = decls[1],
1114 .NumEntries2 = decls[2],
1115 .NumEntries3 = decls[3]);
1116
1117 for (int i = 0; i < max_decls; i++) {
1118 GENX(SO_DECL_ENTRY_pack)(NULL, dw + 3 + i * 2,
1119 &(struct GENX(SO_DECL_ENTRY)) {
1120 .Stream0Decl = so_decl[0][i],
1121 .Stream1Decl = so_decl[1][i],
1122 .Stream2Decl = so_decl[2][i],
1123 .Stream3Decl = so_decl[3][i],
1124 });
1125 }
1126 }
1127
1128 anv_pipeline_emit(pipeline, partial.so, GENX(3DSTATE_STREAMOUT), so) {
1129 if (xfb_info) {
1130 pipeline->uses_xfb = true;
1131
1132 so.SOFunctionEnable = true;
1133 so.SOStatisticsEnable = true;
1134
1135 so.Buffer0SurfacePitch = xfb_info->buffers[0].stride;
1136 so.Buffer1SurfacePitch = xfb_info->buffers[1].stride;
1137 so.Buffer2SurfacePitch = xfb_info->buffers[2].stride;
1138 so.Buffer3SurfacePitch = xfb_info->buffers[3].stride;
1139
1140 int urb_entry_read_offset = 0;
1141 int urb_entry_read_length =
1142 (prog_data->vue_map.num_slots + 1) / 2 - urb_entry_read_offset;
1143
1144 /* We always read the whole vertex. This could be reduced at some
1145 * point by reading less and offsetting the register index in the
1146 * SO_DECLs.
1147 */
1148 so.Stream0VertexReadOffset = urb_entry_read_offset;
1149 so.Stream0VertexReadLength = urb_entry_read_length - 1;
1150 so.Stream1VertexReadOffset = urb_entry_read_offset;
1151 so.Stream1VertexReadLength = urb_entry_read_length - 1;
1152 so.Stream2VertexReadOffset = urb_entry_read_offset;
1153 so.Stream2VertexReadLength = urb_entry_read_length - 1;
1154 so.Stream3VertexReadOffset = urb_entry_read_offset;
1155 so.Stream3VertexReadLength = urb_entry_read_length - 1;
1156 }
1157 }
1158 }
1159
1160 static inline uint32_t
get_sampler_count(const struct anv_shader_bin * bin)1161 get_sampler_count(const struct anv_shader_bin *bin)
1162 {
1163 /* We can potentially have way more than 32 samplers and that's ok.
1164 * However, the 3DSTATE_XS packets only have 3 bits to specify how
1165 * many to pre-fetch and all values above 4 are marked reserved.
1166 */
1167 return DIV_ROUND_UP(CLAMP(bin->bind_map.sampler_count, 0, 16), 4);
1168 }
1169
1170 static UNUSED struct anv_address
get_scratch_address(struct anv_pipeline * pipeline,gl_shader_stage stage,const struct anv_shader_bin * bin)1171 get_scratch_address(struct anv_pipeline *pipeline,
1172 gl_shader_stage stage,
1173 const struct anv_shader_bin *bin)
1174 {
1175 return (struct anv_address) {
1176 .bo = anv_scratch_pool_alloc(pipeline->device,
1177 &pipeline->device->scratch_pool,
1178 stage, bin->prog_data->total_scratch),
1179 .offset = 0,
1180 };
1181 }
1182
1183 static UNUSED uint32_t
get_scratch_space(const struct anv_shader_bin * bin)1184 get_scratch_space(const struct anv_shader_bin *bin)
1185 {
1186 return ffs(bin->prog_data->total_scratch / 2048);
1187 }
1188
1189 static UNUSED uint32_t
get_scratch_surf(struct anv_pipeline * pipeline,gl_shader_stage stage,const struct anv_shader_bin * bin,bool protected)1190 get_scratch_surf(struct anv_pipeline *pipeline,
1191 gl_shader_stage stage,
1192 const struct anv_shader_bin *bin,
1193 bool protected)
1194 {
1195 if (bin->prog_data->total_scratch == 0)
1196 return 0;
1197
1198 struct anv_scratch_pool *pool = protected ?
1199 &pipeline->device->protected_scratch_pool :
1200 &pipeline->device->scratch_pool;
1201 struct anv_bo *bo =
1202 anv_scratch_pool_alloc(pipeline->device, pool,
1203 stage, bin->prog_data->total_scratch);
1204 anv_reloc_list_add_bo(pipeline->batch.relocs, bo);
1205 return anv_scratch_pool_get_surf(pipeline->device, pool,
1206 bin->prog_data->total_scratch) >> ANV_SCRATCH_SPACE_SHIFT(GFX_VER);
1207 }
1208
1209 static void
emit_3dstate_vs(struct anv_graphics_pipeline * pipeline)1210 emit_3dstate_vs(struct anv_graphics_pipeline *pipeline)
1211 {
1212 const struct intel_device_info *devinfo = pipeline->base.base.device->info;
1213 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
1214 const struct anv_shader_bin *vs_bin =
1215 pipeline->base.shaders[MESA_SHADER_VERTEX];
1216
1217 assert(anv_pipeline_has_stage(pipeline, MESA_SHADER_VERTEX));
1218
1219 uint32_t vs_dwords[GENX(3DSTATE_VS_length)];
1220 anv_pipeline_emit_tmp(pipeline, vs_dwords, GENX(3DSTATE_VS), vs) {
1221 vs.Enable = true;
1222 vs.StatisticsEnable = true;
1223 vs.KernelStartPointer = vs_bin->kernel.offset;
1224 #if GFX_VER < 20
1225 vs.SIMD8DispatchEnable =
1226 vs_prog_data->base.dispatch_mode == DISPATCH_MODE_SIMD8;
1227 #endif
1228
1229 assert(!vs_prog_data->base.base.use_alt_mode);
1230 #if GFX_VER < 11
1231 vs.SingleVertexDispatch = false;
1232 #endif
1233 vs.VectorMaskEnable = false;
1234 /* Wa_1606682166:
1235 * Incorrect TDL's SSP address shift in SARB for 16:6 & 18:8 modes.
1236 * Disable the Sampler state prefetch functionality in the SARB by
1237 * programming 0xB000[30] to '1'.
1238 */
1239 vs.SamplerCount = GFX_VER == 11 ? 0 : get_sampler_count(vs_bin);
1240 vs.BindingTableEntryCount = vs_bin->bind_map.surface_count;
1241 vs.FloatingPointMode = IEEE754;
1242 vs.IllegalOpcodeExceptionEnable = false;
1243 vs.SoftwareExceptionEnable = false;
1244 vs.MaximumNumberofThreads = devinfo->max_vs_threads - 1;
1245
1246 if (GFX_VER == 9 && devinfo->gt == 4 &&
1247 anv_pipeline_has_stage(pipeline, MESA_SHADER_TESS_EVAL)) {
1248 /* On Sky Lake GT4, we have experienced some hangs related to the VS
1249 * cache and tessellation. It is unknown exactly what is happening
1250 * but the Haswell docs for the "VS Reference Count Full Force Miss
1251 * Enable" field of the "Thread Mode" register refer to a HSW bug in
1252 * which the VUE handle reference count would overflow resulting in
1253 * internal reference counting bugs. My (Faith's) best guess is that
1254 * this bug cropped back up on SKL GT4 when we suddenly had more
1255 * threads in play than any previous gfx9 hardware.
1256 *
1257 * What we do know for sure is that setting this bit when
1258 * tessellation shaders are in use fixes a GPU hang in Batman: Arkham
1259 * City when playing with DXVK (https://bugs.freedesktop.org/107280).
1260 * Disabling the vertex cache with tessellation shaders should only
1261 * have a minor performance impact as the tessellation shaders are
1262 * likely generating and processing far more geometry than the vertex
1263 * stage.
1264 */
1265 vs.VertexCacheDisable = true;
1266 }
1267
1268 vs.VertexURBEntryReadLength = vs_prog_data->base.urb_read_length;
1269 vs.VertexURBEntryReadOffset = 0;
1270 vs.DispatchGRFStartRegisterForURBData =
1271 vs_prog_data->base.base.dispatch_grf_start_reg;
1272
1273 vs.UserClipDistanceClipTestEnableBitmask =
1274 vs_prog_data->base.clip_distance_mask;
1275 vs.UserClipDistanceCullTestEnableBitmask =
1276 vs_prog_data->base.cull_distance_mask;
1277
1278 #if GFX_VERx10 < 125
1279 vs.PerThreadScratchSpace = get_scratch_space(vs_bin);
1280 vs.ScratchSpaceBasePointer =
1281 get_scratch_address(&pipeline->base.base, MESA_SHADER_VERTEX, vs_bin);
1282 #endif
1283 }
1284
1285 anv_pipeline_emit_merge(pipeline, final.vs, vs_dwords, GENX(3DSTATE_VS), vs) {
1286 #if GFX_VERx10 >= 125
1287 vs.ScratchSpaceBuffer = get_scratch_surf(&pipeline->base.base,
1288 MESA_SHADER_VERTEX,
1289 vs_bin, false);
1290 #endif
1291 }
1292 if (pipeline_needs_protected(&pipeline->base.base)) {
1293 anv_pipeline_emit_merge(pipeline, final.vs_protected,
1294 vs_dwords, GENX(3DSTATE_VS), vs) {
1295 #if GFX_VERx10 >= 125
1296 vs.ScratchSpaceBuffer = get_scratch_surf(&pipeline->base.base,
1297 MESA_SHADER_VERTEX,
1298 vs_bin, true);
1299 #endif
1300 }
1301 }
1302 }
1303
1304 static void
emit_3dstate_hs_ds(struct anv_graphics_pipeline * pipeline,const struct vk_tessellation_state * ts)1305 emit_3dstate_hs_ds(struct anv_graphics_pipeline *pipeline,
1306 const struct vk_tessellation_state *ts)
1307 {
1308 if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_TESS_EVAL)) {
1309 anv_pipeline_emit(pipeline, final.hs, GENX(3DSTATE_HS), hs);
1310 anv_pipeline_emit(pipeline, final.hs_protected, GENX(3DSTATE_HS), hs);
1311 anv_pipeline_emit(pipeline, final.ds, GENX(3DSTATE_DS), ds);
1312 anv_pipeline_emit(pipeline, final.ds_protected, GENX(3DSTATE_DS), ds);
1313 return;
1314 }
1315
1316 const struct intel_device_info *devinfo = pipeline->base.base.device->info;
1317 const struct anv_shader_bin *tcs_bin =
1318 pipeline->base.shaders[MESA_SHADER_TESS_CTRL];
1319 const struct anv_shader_bin *tes_bin =
1320 pipeline->base.shaders[MESA_SHADER_TESS_EVAL];
1321
1322 const struct brw_tcs_prog_data *tcs_prog_data = get_tcs_prog_data(pipeline);
1323 const struct brw_tes_prog_data *tes_prog_data = get_tes_prog_data(pipeline);
1324
1325 uint32_t hs_dwords[GENX(3DSTATE_HS_length)];
1326 anv_pipeline_emit_tmp(pipeline, hs_dwords, GENX(3DSTATE_HS), hs) {
1327 hs.Enable = true;
1328 hs.StatisticsEnable = true;
1329 hs.KernelStartPointer = tcs_bin->kernel.offset;
1330 /* Wa_1606682166 */
1331 hs.SamplerCount = GFX_VER == 11 ? 0 : get_sampler_count(tcs_bin);
1332 hs.BindingTableEntryCount = tcs_bin->bind_map.surface_count;
1333
1334 #if GFX_VER >= 12
1335 /* Wa_1604578095:
1336 *
1337 * Hang occurs when the number of max threads is less than 2 times
1338 * the number of instance count. The number of max threads must be
1339 * more than 2 times the number of instance count.
1340 */
1341 assert((devinfo->max_tcs_threads / 2) > tcs_prog_data->instances);
1342 #endif
1343
1344 hs.MaximumNumberofThreads = devinfo->max_tcs_threads - 1;
1345 hs.IncludeVertexHandles = true;
1346 hs.InstanceCount = tcs_prog_data->instances - 1;
1347
1348 hs.VertexURBEntryReadLength = 0;
1349 hs.VertexURBEntryReadOffset = 0;
1350 hs.DispatchGRFStartRegisterForURBData =
1351 tcs_prog_data->base.base.dispatch_grf_start_reg & 0x1f;
1352 #if GFX_VER >= 12
1353 hs.DispatchGRFStartRegisterForURBData5 =
1354 tcs_prog_data->base.base.dispatch_grf_start_reg >> 5;
1355 #endif
1356
1357 #if GFX_VERx10 < 125
1358 hs.PerThreadScratchSpace = get_scratch_space(tcs_bin);
1359 hs.ScratchSpaceBasePointer =
1360 get_scratch_address(&pipeline->base.base, MESA_SHADER_TESS_CTRL, tcs_bin);
1361 #endif
1362
1363 #if GFX_VER == 12
1364 /* Patch Count threshold specifies the maximum number of patches that
1365 * will be accumulated before a thread dispatch is forced.
1366 */
1367 hs.PatchCountThreshold = tcs_prog_data->patch_count_threshold;
1368 #endif
1369
1370 #if GFX_VER < 20
1371 hs.DispatchMode = tcs_prog_data->base.dispatch_mode;
1372 #endif
1373 hs.IncludePrimitiveID = tcs_prog_data->include_primitive_id;
1374 };
1375
1376 uint32_t ds_dwords[GENX(3DSTATE_DS_length)];
1377 anv_pipeline_emit_tmp(pipeline, ds_dwords, GENX(3DSTATE_DS), ds) {
1378 ds.Enable = true;
1379 ds.StatisticsEnable = true;
1380 ds.KernelStartPointer = tes_bin->kernel.offset;
1381 /* Wa_1606682166 */
1382 ds.SamplerCount = GFX_VER == 11 ? 0 : get_sampler_count(tes_bin);
1383 ds.BindingTableEntryCount = tes_bin->bind_map.surface_count;
1384 ds.MaximumNumberofThreads = devinfo->max_tes_threads - 1;
1385
1386 ds.ComputeWCoordinateEnable =
1387 tes_prog_data->domain == INTEL_TESS_DOMAIN_TRI;
1388
1389 ds.PatchURBEntryReadLength = tes_prog_data->base.urb_read_length;
1390 ds.PatchURBEntryReadOffset = 0;
1391 ds.DispatchGRFStartRegisterForURBData =
1392 tes_prog_data->base.base.dispatch_grf_start_reg;
1393
1394 #if GFX_VER < 11
1395 ds.DispatchMode =
1396 tes_prog_data->base.dispatch_mode == DISPATCH_MODE_SIMD8 ?
1397 DISPATCH_MODE_SIMD8_SINGLE_PATCH :
1398 DISPATCH_MODE_SIMD4X2;
1399 #else
1400 assert(tes_prog_data->base.dispatch_mode == INTEL_DISPATCH_MODE_SIMD8);
1401 ds.DispatchMode = DISPATCH_MODE_SIMD8_SINGLE_PATCH;
1402 #endif
1403
1404 ds.UserClipDistanceClipTestEnableBitmask =
1405 tes_prog_data->base.clip_distance_mask;
1406 ds.UserClipDistanceCullTestEnableBitmask =
1407 tes_prog_data->base.cull_distance_mask;
1408
1409 #if GFX_VER >= 12
1410 ds.PrimitiveIDNotRequired = !tes_prog_data->include_primitive_id;
1411 #endif
1412 #if GFX_VERx10 < 125
1413 ds.PerThreadScratchSpace = get_scratch_space(tes_bin);
1414 ds.ScratchSpaceBasePointer =
1415 get_scratch_address(&pipeline->base.base, MESA_SHADER_TESS_EVAL, tes_bin);
1416 #endif
1417 }
1418
1419 anv_pipeline_emit_merge(pipeline, final.hs, hs_dwords, GENX(3DSTATE_HS), hs) {
1420 #if GFX_VERx10 >= 125
1421 hs.ScratchSpaceBuffer = get_scratch_surf(&pipeline->base.base,
1422 MESA_SHADER_TESS_CTRL,
1423 tcs_bin, false);
1424 #endif
1425 }
1426 anv_pipeline_emit_merge(pipeline, final.ds, ds_dwords, GENX(3DSTATE_DS), ds) {
1427 #if GFX_VERx10 >= 125
1428 ds.ScratchSpaceBuffer = get_scratch_surf(&pipeline->base.base,
1429 MESA_SHADER_TESS_EVAL,
1430 tes_bin, false);
1431 #endif
1432 }
1433 if (pipeline_needs_protected(&pipeline->base.base)) {
1434 anv_pipeline_emit_merge(pipeline, final.hs_protected,
1435 hs_dwords, GENX(3DSTATE_HS), hs) {
1436 #if GFX_VERx10 >= 125
1437 hs.ScratchSpaceBuffer = get_scratch_surf(&pipeline->base.base,
1438 MESA_SHADER_TESS_CTRL,
1439 tcs_bin, true);
1440 #endif
1441 }
1442 anv_pipeline_emit_merge(pipeline, final.ds_protected,
1443 ds_dwords, GENX(3DSTATE_DS), ds) {
1444 #if GFX_VERx10 >= 125
1445 ds.ScratchSpaceBuffer = get_scratch_surf(&pipeline->base.base,
1446 MESA_SHADER_TESS_EVAL,
1447 tes_bin, true);
1448 #endif
1449 }
1450 }
1451 }
1452
1453 static UNUSED bool
geom_or_tess_prim_id_used(struct anv_graphics_pipeline * pipeline)1454 geom_or_tess_prim_id_used(struct anv_graphics_pipeline *pipeline)
1455 {
1456 const struct brw_tcs_prog_data *tcs_prog_data =
1457 anv_pipeline_has_stage(pipeline, MESA_SHADER_TESS_CTRL) ?
1458 get_tcs_prog_data(pipeline) : NULL;
1459 const struct brw_tes_prog_data *tes_prog_data =
1460 anv_pipeline_has_stage(pipeline, MESA_SHADER_TESS_EVAL) ?
1461 get_tes_prog_data(pipeline) : NULL;
1462 const struct brw_gs_prog_data *gs_prog_data =
1463 anv_pipeline_has_stage(pipeline, MESA_SHADER_GEOMETRY) ?
1464 get_gs_prog_data(pipeline) : NULL;
1465
1466 return (tcs_prog_data && tcs_prog_data->include_primitive_id) ||
1467 (tes_prog_data && tes_prog_data->include_primitive_id) ||
1468 (gs_prog_data && gs_prog_data->include_primitive_id);
1469 }
1470
1471 static void
emit_3dstate_te(struct anv_graphics_pipeline * pipeline)1472 emit_3dstate_te(struct anv_graphics_pipeline *pipeline)
1473 {
1474 anv_pipeline_emit(pipeline, partial.te, GENX(3DSTATE_TE), te) {
1475 if (anv_pipeline_has_stage(pipeline, MESA_SHADER_TESS_EVAL)) {
1476 const struct brw_tes_prog_data *tes_prog_data =
1477 get_tes_prog_data(pipeline);
1478
1479 te.Partitioning = tes_prog_data->partitioning;
1480 te.TEDomain = tes_prog_data->domain;
1481 te.TEEnable = true;
1482 te.MaximumTessellationFactorOdd = 63.0;
1483 te.MaximumTessellationFactorNotOdd = 64.0;
1484 #if GFX_VERx10 >= 125
1485 const struct anv_device *device = pipeline->base.base.device;
1486 if (intel_needs_workaround(device->info, 22012699309))
1487 te.TessellationDistributionMode = TEDMODE_RR_STRICT;
1488 else
1489 te.TessellationDistributionMode = TEDMODE_RR_FREE;
1490
1491 if (intel_needs_workaround(device->info, 14015055625)) {
1492 /* Wa_14015055625:
1493 *
1494 * Disable Tessellation Distribution when primitive Id is enabled.
1495 */
1496 if (sbe_primitive_id_override(pipeline) ||
1497 geom_or_tess_prim_id_used(pipeline))
1498 te.TessellationDistributionMode = TEDMODE_OFF;
1499 }
1500
1501 #if GFX_VER >= 20
1502 te.TessellationDistributionLevel = TEDLEVEL_REGION;
1503 #else
1504 te.TessellationDistributionLevel = TEDLEVEL_PATCH;
1505 #endif
1506 /* 64_TRIANGLES */
1507 te.SmallPatchThreshold = 3;
1508 /* 1K_TRIANGLES */
1509 te.TargetBlockSize = 8;
1510 /* 1K_TRIANGLES */
1511 te.LocalBOPAccumulatorThreshold = 1;
1512 #endif
1513
1514 #if GFX_VER >= 20
1515 te.NumberOfRegionsPerPatch = 2;
1516 #endif
1517 }
1518 }
1519 }
1520
1521 static void
emit_3dstate_gs(struct anv_graphics_pipeline * pipeline)1522 emit_3dstate_gs(struct anv_graphics_pipeline *pipeline)
1523 {
1524 if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_GEOMETRY)) {
1525 anv_pipeline_emit(pipeline, partial.gs, GENX(3DSTATE_GS), gs);
1526 anv_pipeline_emit(pipeline, partial.gs_protected, GENX(3DSTATE_GS), gs);
1527 return;
1528 }
1529
1530 const struct intel_device_info *devinfo = pipeline->base.base.device->info;
1531 const struct anv_shader_bin *gs_bin =
1532 pipeline->base.shaders[MESA_SHADER_GEOMETRY];
1533 const struct brw_gs_prog_data *gs_prog_data = get_gs_prog_data(pipeline);
1534
1535 uint32_t gs_dwords[GENX(3DSTATE_GS_length)];
1536 anv_pipeline_emit_tmp(pipeline, gs_dwords, GENX(3DSTATE_GS), gs) {
1537 gs.Enable = true;
1538 gs.StatisticsEnable = true;
1539 gs.KernelStartPointer = gs_bin->kernel.offset;
1540 #if GFX_VER < 20
1541 gs.DispatchMode = gs_prog_data->base.dispatch_mode;
1542 #endif
1543
1544 gs.SingleProgramFlow = false;
1545 gs.VectorMaskEnable = false;
1546 /* Wa_1606682166 */
1547 gs.SamplerCount = GFX_VER == 11 ? 0 : get_sampler_count(gs_bin);
1548 gs.BindingTableEntryCount = gs_bin->bind_map.surface_count;
1549 gs.IncludeVertexHandles = gs_prog_data->base.include_vue_handles;
1550 gs.IncludePrimitiveID = gs_prog_data->include_primitive_id;
1551
1552 gs.MaximumNumberofThreads = devinfo->max_gs_threads - 1;
1553
1554 gs.OutputVertexSize = gs_prog_data->output_vertex_size_hwords * 2 - 1;
1555 gs.OutputTopology = gs_prog_data->output_topology;
1556 gs.ControlDataFormat = gs_prog_data->control_data_format;
1557 gs.ControlDataHeaderSize = gs_prog_data->control_data_header_size_hwords;
1558 gs.InstanceControl = MAX2(gs_prog_data->invocations, 1) - 1;
1559
1560 gs.ExpectedVertexCount = gs_prog_data->vertices_in;
1561 gs.StaticOutput = gs_prog_data->static_vertex_count >= 0;
1562 gs.StaticOutputVertexCount = gs_prog_data->static_vertex_count >= 0 ?
1563 gs_prog_data->static_vertex_count : 0;
1564
1565 gs.VertexURBEntryReadOffset = 0;
1566 gs.VertexURBEntryReadLength = gs_prog_data->base.urb_read_length;
1567 gs.DispatchGRFStartRegisterForURBData =
1568 gs_prog_data->base.base.dispatch_grf_start_reg;
1569
1570 gs.UserClipDistanceClipTestEnableBitmask =
1571 gs_prog_data->base.clip_distance_mask;
1572 gs.UserClipDistanceCullTestEnableBitmask =
1573 gs_prog_data->base.cull_distance_mask;
1574
1575 #if GFX_VERx10 < 125
1576 gs.PerThreadScratchSpace = get_scratch_space(gs_bin);
1577 gs.ScratchSpaceBasePointer =
1578 get_scratch_address(&pipeline->base.base, MESA_SHADER_GEOMETRY, gs_bin);
1579 #endif
1580 }
1581
1582 anv_pipeline_emit_merge(pipeline, partial.gs, gs_dwords, GENX(3DSTATE_GS), gs) {
1583 #if GFX_VERx10 >= 125
1584 gs.ScratchSpaceBuffer =
1585 get_scratch_surf(&pipeline->base.base, MESA_SHADER_GEOMETRY, gs_bin, false);
1586 #endif
1587 }
1588 if (pipeline_needs_protected(&pipeline->base.base)) {
1589 anv_pipeline_emit_merge(pipeline, partial.gs_protected,
1590 gs_dwords, GENX(3DSTATE_GS), gs) {
1591 #if GFX_VERx10 >= 125
1592 gs.ScratchSpaceBuffer = get_scratch_surf(&pipeline->base.base,
1593 MESA_SHADER_GEOMETRY,
1594 gs_bin, true);
1595 #endif
1596 }
1597 }
1598 }
1599
1600 static void
emit_3dstate_wm(struct anv_graphics_pipeline * pipeline,const struct vk_input_assembly_state * ia,const struct vk_rasterization_state * rs,const struct vk_multisample_state * ms,const struct vk_color_blend_state * cb,const struct vk_render_pass_state * rp)1601 emit_3dstate_wm(struct anv_graphics_pipeline *pipeline,
1602 const struct vk_input_assembly_state *ia,
1603 const struct vk_rasterization_state *rs,
1604 const struct vk_multisample_state *ms,
1605 const struct vk_color_blend_state *cb,
1606 const struct vk_render_pass_state *rp)
1607 {
1608 const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);
1609
1610 anv_pipeline_emit(pipeline, partial.wm, GENX(3DSTATE_WM), wm) {
1611 wm.StatisticsEnable = true;
1612 wm.LineEndCapAntialiasingRegionWidth = _05pixels;
1613 wm.LineAntialiasingRegionWidth = _10pixels;
1614 wm.PointRasterizationRule = RASTRULE_UPPER_LEFT;
1615
1616 if (anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT)) {
1617 if (wm_prog_data->early_fragment_tests) {
1618 wm.EarlyDepthStencilControl = EDSC_PREPS;
1619 } else if (wm_prog_data->has_side_effects) {
1620 wm.EarlyDepthStencilControl = EDSC_PSEXEC;
1621 } else {
1622 wm.EarlyDepthStencilControl = EDSC_NORMAL;
1623 }
1624 }
1625 }
1626 }
1627
1628 static void
emit_3dstate_ps(struct anv_graphics_pipeline * pipeline,const struct vk_multisample_state * ms,const struct vk_color_blend_state * cb)1629 emit_3dstate_ps(struct anv_graphics_pipeline *pipeline,
1630 const struct vk_multisample_state *ms,
1631 const struct vk_color_blend_state *cb)
1632 {
1633 UNUSED const struct intel_device_info *devinfo =
1634 pipeline->base.base.device->info;
1635 const struct anv_shader_bin *fs_bin =
1636 pipeline->base.shaders[MESA_SHADER_FRAGMENT];
1637
1638 if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT)) {
1639 anv_pipeline_emit(pipeline, partial.ps, GENX(3DSTATE_PS), ps);
1640 anv_pipeline_emit(pipeline, partial.ps_protected, GENX(3DSTATE_PS), ps);
1641 return;
1642 }
1643
1644 const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);
1645
1646 uint32_t ps_dwords[GENX(3DSTATE_PS_length)];
1647 anv_pipeline_emit_tmp(pipeline, ps_dwords, GENX(3DSTATE_PS), ps) {
1648 #if GFX_VER == 12
1649 assert(wm_prog_data->dispatch_multi == 0 ||
1650 (wm_prog_data->dispatch_multi == 16 && wm_prog_data->max_polygons == 2));
1651 ps.DualSIMD8DispatchEnable = wm_prog_data->dispatch_multi;
1652 /* XXX - No major improvement observed from enabling
1653 * overlapping subspans, but it could be helpful
1654 * in theory when the requirements listed on the
1655 * BSpec page for 3DSTATE_PS_BODY are met.
1656 */
1657 ps.OverlappingSubspansEnable = false;
1658 #endif
1659
1660 ps.SingleProgramFlow = false;
1661 ps.VectorMaskEnable = wm_prog_data->uses_vmask;
1662 /* Wa_1606682166 */
1663 ps.SamplerCount = GFX_VER == 11 ? 0 : get_sampler_count(fs_bin);
1664 ps.BindingTableEntryCount = fs_bin->bind_map.surface_count;
1665 #if GFX_VER < 20
1666 ps.PushConstantEnable =
1667 devinfo->needs_null_push_constant_tbimr_workaround ||
1668 wm_prog_data->base.nr_params > 0 ||
1669 wm_prog_data->base.ubo_ranges[0].length;
1670 #endif
1671
1672 ps.MaximumNumberofThreadsPerPSD = devinfo->max_threads_per_psd - 1;
1673
1674 #if GFX_VERx10 < 125
1675 ps.PerThreadScratchSpace = get_scratch_space(fs_bin);
1676 ps.ScratchSpaceBasePointer =
1677 get_scratch_address(&pipeline->base.base, MESA_SHADER_FRAGMENT, fs_bin);
1678 #endif
1679 }
1680 anv_pipeline_emit_merge(pipeline, partial.ps, ps_dwords, GENX(3DSTATE_PS), ps) {
1681 #if GFX_VERx10 >= 125
1682 ps.ScratchSpaceBuffer =
1683 get_scratch_surf(&pipeline->base.base, MESA_SHADER_FRAGMENT, fs_bin, false);
1684 #endif
1685 }
1686 if (pipeline_needs_protected(&pipeline->base.base)) {
1687 anv_pipeline_emit_merge(pipeline, partial.ps_protected,
1688 ps_dwords, GENX(3DSTATE_PS), ps) {
1689 #if GFX_VERx10 >= 125
1690 ps.ScratchSpaceBuffer =
1691 get_scratch_surf(&pipeline->base.base, MESA_SHADER_FRAGMENT, fs_bin, true);
1692 #endif
1693 }
1694 }
1695 }
1696
1697 static void
emit_3dstate_ps_extra(struct anv_graphics_pipeline * pipeline,const struct vk_rasterization_state * rs,const struct vk_graphics_pipeline_state * state)1698 emit_3dstate_ps_extra(struct anv_graphics_pipeline *pipeline,
1699 const struct vk_rasterization_state *rs,
1700 const struct vk_graphics_pipeline_state *state)
1701 {
1702 const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);
1703
1704 if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT)) {
1705 anv_pipeline_emit(pipeline, partial.ps_extra, GENX(3DSTATE_PS_EXTRA), ps);
1706 return;
1707 }
1708
1709 anv_pipeline_emit(pipeline, partial.ps_extra, GENX(3DSTATE_PS_EXTRA), ps) {
1710 ps.PixelShaderValid = true;
1711 #if GFX_VER < 20
1712 ps.AttributeEnable = wm_prog_data->num_varying_inputs > 0;
1713 #endif
1714 ps.oMaskPresenttoRenderTarget = wm_prog_data->uses_omask;
1715 ps.PixelShaderComputedDepthMode = wm_prog_data->computed_depth_mode;
1716 ps.PixelShaderUsesSourceDepth = wm_prog_data->uses_src_depth;
1717 ps.PixelShaderUsesSourceW = wm_prog_data->uses_src_w;
1718
1719 ps.PixelShaderComputesStencil = wm_prog_data->computed_stencil;
1720 #if GFX_VER >= 20
1721 assert(!wm_prog_data->pulls_bary);
1722 #else
1723 ps.PixelShaderPullsBary = wm_prog_data->pulls_bary;
1724 #endif
1725
1726 ps.InputCoverageMaskState = ICMS_NONE;
1727 assert(!wm_prog_data->inner_coverage); /* Not available in SPIR-V */
1728 if (!wm_prog_data->uses_sample_mask)
1729 ps.InputCoverageMaskState = ICMS_NONE;
1730 else if (brw_wm_prog_data_is_coarse(wm_prog_data, 0))
1731 ps.InputCoverageMaskState = ICMS_NORMAL;
1732 else if (wm_prog_data->post_depth_coverage)
1733 ps.InputCoverageMaskState = ICMS_DEPTH_COVERAGE;
1734 else
1735 ps.InputCoverageMaskState = ICMS_NORMAL;
1736
1737 #if GFX_VER >= 11
1738 ps.PixelShaderRequiresSubpixelSampleOffsets =
1739 wm_prog_data->uses_sample_offsets;
1740 ps.PixelShaderRequiresNonPerspectiveBaryPlaneCoefficients =
1741 wm_prog_data->uses_npc_bary_coefficients;
1742 ps.PixelShaderRequiresPerspectiveBaryPlaneCoefficients =
1743 wm_prog_data->uses_pc_bary_coefficients;
1744 ps.PixelShaderRequiresSourceDepthandorWPlaneCoefficients =
1745 wm_prog_data->uses_depth_w_coefficients;
1746 #endif
1747 }
1748 }
1749
1750 static void
emit_3dstate_vf_statistics(struct anv_graphics_pipeline * pipeline)1751 emit_3dstate_vf_statistics(struct anv_graphics_pipeline *pipeline)
1752 {
1753 anv_pipeline_emit(pipeline, final.vf_statistics,
1754 GENX(3DSTATE_VF_STATISTICS), vfs) {
1755 vfs.StatisticsEnable = true;
1756 }
1757 }
1758
1759 static void
compute_kill_pixel(struct anv_graphics_pipeline * pipeline,const struct vk_multisample_state * ms,const struct vk_graphics_pipeline_state * state)1760 compute_kill_pixel(struct anv_graphics_pipeline *pipeline,
1761 const struct vk_multisample_state *ms,
1762 const struct vk_graphics_pipeline_state *state)
1763 {
1764 if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT)) {
1765 pipeline->kill_pixel = false;
1766 return;
1767 }
1768
1769 const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);
1770
1771 /* This computes the KillPixel portion of the computation for whether or
1772 * not we want to enable the PMA fix on gfx8 or gfx9. It's given by this
1773 * chunk of the giant formula:
1774 *
1775 * (3DSTATE_PS_EXTRA::PixelShaderKillsPixels ||
1776 * 3DSTATE_PS_EXTRA::oMask Present to RenderTarget ||
1777 * 3DSTATE_PS_BLEND::AlphaToCoverageEnable ||
1778 * 3DSTATE_PS_BLEND::AlphaTestEnable ||
1779 * 3DSTATE_WM_CHROMAKEY::ChromaKeyKillEnable)
1780 *
1781 * 3DSTATE_WM_CHROMAKEY::ChromaKeyKillEnable is always false and so is
1782 * 3DSTATE_PS_BLEND::AlphaTestEnable since Vulkan doesn't have a concept
1783 * of an alpha test.
1784 */
1785 pipeline->rp_has_ds_self_dep =
1786 (state->pipeline_flags &
1787 VK_PIPELINE_CREATE_2_DEPTH_STENCIL_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT) != 0;
1788 pipeline->kill_pixel =
1789 pipeline->rp_has_ds_self_dep ||
1790 wm_prog_data->uses_kill ||
1791 wm_prog_data->uses_omask ||
1792 (ms && ms->alpha_to_coverage_enable);
1793 }
1794
1795 #if GFX_VER >= 12
1796 static void
emit_3dstate_primitive_replication(struct anv_graphics_pipeline * pipeline,const struct vk_render_pass_state * rp)1797 emit_3dstate_primitive_replication(struct anv_graphics_pipeline *pipeline,
1798 const struct vk_render_pass_state *rp)
1799 {
1800 if (anv_pipeline_is_mesh(pipeline)) {
1801 anv_pipeline_emit(pipeline, final.primitive_replication,
1802 GENX(3DSTATE_PRIMITIVE_REPLICATION), pr);
1803 return;
1804 }
1805
1806 const int replication_count =
1807 anv_pipeline_get_last_vue_prog_data(pipeline)->vue_map.num_pos_slots;
1808
1809 assert(replication_count >= 1);
1810 if (replication_count == 1) {
1811 anv_pipeline_emit(pipeline, final.primitive_replication,
1812 GENX(3DSTATE_PRIMITIVE_REPLICATION), pr);
1813 return;
1814 }
1815
1816 assert(replication_count == util_bitcount(rp->view_mask));
1817 assert(replication_count <= MAX_VIEWS_FOR_PRIMITIVE_REPLICATION);
1818
1819 anv_pipeline_emit(pipeline, final.primitive_replication,
1820 GENX(3DSTATE_PRIMITIVE_REPLICATION), pr) {
1821 pr.ReplicaMask = (1 << replication_count) - 1;
1822 pr.ReplicationCount = replication_count - 1;
1823
1824 int i = 0;
1825 u_foreach_bit(view_index, rp->view_mask) {
1826 pr.RTAIOffset[i] = view_index;
1827 i++;
1828 }
1829 }
1830 }
1831 #endif
1832
1833 #if GFX_VERx10 >= 125
1834 static void
emit_task_state(struct anv_graphics_pipeline * pipeline)1835 emit_task_state(struct anv_graphics_pipeline *pipeline)
1836 {
1837 assert(anv_pipeline_is_mesh(pipeline));
1838
1839 if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_TASK)) {
1840 anv_pipeline_emit(pipeline, final.task_control,
1841 GENX(3DSTATE_TASK_CONTROL), zero);
1842 anv_pipeline_emit(pipeline, final.task_control_protected,
1843 GENX(3DSTATE_TASK_CONTROL), zero);
1844 anv_pipeline_emit(pipeline, final.task_shader,
1845 GENX(3DSTATE_TASK_SHADER), zero);
1846 anv_pipeline_emit(pipeline, final.task_redistrib,
1847 GENX(3DSTATE_TASK_REDISTRIB), zero);
1848 return;
1849 }
1850
1851 const struct anv_shader_bin *task_bin =
1852 pipeline->base.shaders[MESA_SHADER_TASK];
1853
1854 uint32_t task_control_dwords[GENX(3DSTATE_TASK_CONTROL_length)];
1855 anv_pipeline_emit_tmp(pipeline, task_control_dwords, GENX(3DSTATE_TASK_CONTROL), tc) {
1856 tc.TaskShaderEnable = true;
1857 tc.StatisticsEnable = true;
1858 tc.MaximumNumberofThreadGroups = 511;
1859 }
1860
1861 anv_pipeline_emit_merge(pipeline, final.task_control,
1862 task_control_dwords, GENX(3DSTATE_TASK_CONTROL), tc) {
1863 tc.ScratchSpaceBuffer =
1864 get_scratch_surf(&pipeline->base.base, MESA_SHADER_TASK, task_bin, false);
1865 }
1866 if (pipeline_needs_protected(&pipeline->base.base)) {
1867 anv_pipeline_emit_merge(pipeline, final.task_control_protected,
1868 task_control_dwords, GENX(3DSTATE_TASK_CONTROL), tc) {
1869 tc.ScratchSpaceBuffer =
1870 get_scratch_surf(&pipeline->base.base, MESA_SHADER_TASK, task_bin, true);
1871 }
1872 }
1873
1874 const struct intel_device_info *devinfo = pipeline->base.base.device->info;
1875 const struct brw_task_prog_data *task_prog_data = get_task_prog_data(pipeline);
1876 const struct intel_cs_dispatch_info task_dispatch =
1877 brw_cs_get_dispatch_info(devinfo, &task_prog_data->base, NULL);
1878
1879 anv_pipeline_emit(pipeline, final.task_shader,
1880 GENX(3DSTATE_TASK_SHADER), task) {
1881 task.KernelStartPointer = task_bin->kernel.offset;
1882 task.SIMDSize = task_dispatch.simd_size / 16;
1883 task.MessageSIMD = task.SIMDSize;
1884 task.NumberofThreadsinGPGPUThreadGroup = task_dispatch.threads;
1885 task.ExecutionMask = task_dispatch.right_mask;
1886 task.LocalXMaximum = task_dispatch.group_size - 1;
1887 task.EmitLocalIDX = true;
1888
1889 task.NumberofBarriers = task_prog_data->base.uses_barrier;
1890 task.SharedLocalMemorySize =
1891 intel_compute_slm_encode_size(GFX_VER, task_prog_data->base.base.total_shared);
1892 task.PreferredSLMAllocationSize =
1893 intel_compute_preferred_slm_calc_encode_size(devinfo,
1894 task_prog_data->base.base.total_shared,
1895 task_dispatch.group_size,
1896 task_dispatch.simd_size);
1897
1898 /*
1899 * 3DSTATE_TASK_SHADER_DATA.InlineData[0:1] will be used for an address
1900 * of a buffer with push constants and descriptor set table and
1901 * InlineData[2:7] will be used for first few push constants.
1902 */
1903 task.EmitInlineParameter = true;
1904
1905 task.XP0Required = task_prog_data->uses_drawid;
1906 }
1907
1908 /* Recommended values from "Task and Mesh Distribution Programming". */
1909 anv_pipeline_emit(pipeline, final.task_redistrib,
1910 GENX(3DSTATE_TASK_REDISTRIB), redistrib) {
1911 redistrib.LocalBOTAccumulatorThreshold = MULTIPLIER_1;
1912 redistrib.SmallTaskThreshold = 1; /* 2^N */
1913 redistrib.TargetMeshBatchSize = devinfo->num_slices > 2 ? 3 : 5; /* 2^N */
1914 redistrib.TaskRedistributionLevel = TASKREDISTRIB_BOM;
1915 redistrib.TaskRedistributionMode = TASKREDISTRIB_RR_STRICT;
1916 }
1917 }
1918
1919 static void
emit_mesh_state(struct anv_graphics_pipeline * pipeline)1920 emit_mesh_state(struct anv_graphics_pipeline *pipeline)
1921 {
1922 assert(anv_pipeline_is_mesh(pipeline));
1923
1924 const struct anv_shader_bin *mesh_bin = pipeline->base.shaders[MESA_SHADER_MESH];
1925 const struct brw_mesh_prog_data *mesh_prog_data = get_mesh_prog_data(pipeline);
1926
1927 uint32_t mesh_control_dwords[GENX(3DSTATE_MESH_CONTROL_length)];
1928 anv_pipeline_emit_tmp(pipeline, mesh_control_dwords, GENX(3DSTATE_MESH_CONTROL), mc) {
1929 mc.MeshShaderEnable = true;
1930 mc.StatisticsEnable = true;
1931 mc.MaximumNumberofThreadGroups = 511;
1932 #if GFX_VER >= 20
1933 mc.VPandRTAIndexAutostripEnable = mesh_prog_data->autostrip_enable;
1934 #endif
1935 }
1936
1937 anv_pipeline_emit_merge(pipeline, final.mesh_control,
1938 mesh_control_dwords, GENX(3DSTATE_MESH_CONTROL), mc) {
1939 mc.ScratchSpaceBuffer =
1940 get_scratch_surf(&pipeline->base.base, MESA_SHADER_MESH, mesh_bin, false);
1941 }
1942 if (pipeline_needs_protected(&pipeline->base.base)) {
1943 anv_pipeline_emit_merge(pipeline, final.mesh_control_protected,
1944 mesh_control_dwords, GENX(3DSTATE_MESH_CONTROL), mc) {
1945 mc.ScratchSpaceBuffer =
1946 get_scratch_surf(&pipeline->base.base, MESA_SHADER_MESH, mesh_bin, true);
1947 }
1948 }
1949
1950 const struct intel_device_info *devinfo = pipeline->base.base.device->info;
1951 const struct intel_cs_dispatch_info mesh_dispatch =
1952 brw_cs_get_dispatch_info(devinfo, &mesh_prog_data->base, NULL);
1953
1954 const unsigned output_topology =
1955 mesh_prog_data->primitive_type == MESA_PRIM_POINTS ? OUTPUT_POINT :
1956 mesh_prog_data->primitive_type == MESA_PRIM_LINES ? OUTPUT_LINE :
1957 OUTPUT_TRI;
1958
1959 uint32_t index_format;
1960 switch (mesh_prog_data->index_format) {
1961 case BRW_INDEX_FORMAT_U32:
1962 index_format = INDEX_U32;
1963 break;
1964 case BRW_INDEX_FORMAT_U888X:
1965 index_format = INDEX_U888X;
1966 break;
1967 default:
1968 unreachable("invalid index format");
1969 }
1970
1971 anv_pipeline_emit(pipeline, final.mesh_shader,
1972 GENX(3DSTATE_MESH_SHADER), mesh) {
1973 mesh.KernelStartPointer = mesh_bin->kernel.offset;
1974 mesh.SIMDSize = mesh_dispatch.simd_size / 16;
1975 mesh.MessageSIMD = mesh.SIMDSize;
1976 mesh.NumberofThreadsinGPGPUThreadGroup = mesh_dispatch.threads;
1977 mesh.ExecutionMask = mesh_dispatch.right_mask;
1978 mesh.LocalXMaximum = mesh_dispatch.group_size - 1;
1979 mesh.EmitLocalIDX = true;
1980
1981 mesh.MaximumPrimitiveCount = MAX2(mesh_prog_data->map.max_primitives, 1) - 1;
1982 mesh.OutputTopology = output_topology;
1983 mesh.PerVertexDataPitch = mesh_prog_data->map.per_vertex_pitch_dw / 8;
1984 mesh.PerPrimitiveDataPresent = mesh_prog_data->map.per_primitive_pitch_dw > 0;
1985 mesh.PerPrimitiveDataPitch = mesh_prog_data->map.per_primitive_pitch_dw / 8;
1986 mesh.IndexFormat = index_format;
1987
1988 mesh.NumberofBarriers = mesh_prog_data->base.uses_barrier;
1989 mesh.SharedLocalMemorySize =
1990 intel_compute_slm_encode_size(GFX_VER, mesh_prog_data->base.base.total_shared);
1991 mesh.PreferredSLMAllocationSize =
1992 intel_compute_preferred_slm_calc_encode_size(devinfo,
1993 mesh_prog_data->base.base.total_shared,
1994 mesh_dispatch.group_size,
1995 mesh_dispatch.simd_size);
1996
1997 /*
1998 * 3DSTATE_MESH_SHADER_DATA.InlineData[0:1] will be used for an address
1999 * of a buffer with push constants and descriptor set table and
2000 * InlineData[2:7] will be used for first few push constants.
2001 */
2002 mesh.EmitInlineParameter = true;
2003
2004 mesh.XP0Required = mesh_prog_data->uses_drawid;
2005 }
2006
2007 /* Recommended values from "Task and Mesh Distribution Programming". */
2008 anv_pipeline_emit(pipeline, final.mesh_distrib,
2009 GENX(3DSTATE_MESH_DISTRIB), distrib) {
2010 distrib.DistributionMode = MESH_RR_FREE;
2011 distrib.TaskDistributionBatchSize = devinfo->num_slices > 2 ? 4 : 9; /* 2^N thread groups */
2012 distrib.MeshDistributionBatchSize = devinfo->num_slices > 2 ? 3 : 3; /* 2^N thread groups */
2013 }
2014 }
2015 #endif
2016
2017 void
genX(graphics_pipeline_emit)2018 genX(graphics_pipeline_emit)(struct anv_graphics_pipeline *pipeline,
2019 const struct vk_graphics_pipeline_state *state)
2020 {
2021 enum intel_urb_deref_block_size urb_deref_block_size;
2022 emit_urb_setup(pipeline, &urb_deref_block_size);
2023
2024 emit_rs_state(pipeline, state->ia, state->rs, state->ms, state->rp,
2025 urb_deref_block_size);
2026 emit_ms_state(pipeline, state->ms);
2027 compute_kill_pixel(pipeline, state->ms, state);
2028
2029 emit_3dstate_clip(pipeline, state->ia, state->vp, state->rs);
2030
2031 #if GFX_VER >= 12
2032 emit_3dstate_primitive_replication(pipeline, state->rp);
2033 #endif
2034
2035 #if GFX_VERx10 >= 125
2036 bool needs_instance_granularity =
2037 intel_needs_workaround(pipeline->base.base.device->info, 14019166699) &&
2038 (sbe_primitive_id_override(pipeline) ||
2039 geom_or_tess_prim_id_used(pipeline));
2040
2041 anv_pipeline_emit(pipeline, partial.vfg, GENX(3DSTATE_VFG), vfg) {
2042 /* If 3DSTATE_TE: TE Enable == 1 then RR_STRICT else RR_FREE*/
2043 vfg.DistributionMode =
2044 anv_pipeline_has_stage(pipeline, MESA_SHADER_TESS_EVAL) ? RR_STRICT :
2045 RR_FREE;
2046 vfg.DistributionGranularity = needs_instance_granularity ?
2047 InstanceLevelGranularity : BatchLevelGranularity;
2048 #if INTEL_WA_14014851047_GFX_VER
2049 vfg.GranularityThresholdDisable =
2050 intel_needs_workaround(pipeline->base.base.device->info, 14014851047);
2051 #endif
2052 /* 192 vertices for TRILIST_ADJ */
2053 vfg.ListNBatchSizeScale = 0;
2054 /* Batch size of 384 vertices */
2055 vfg.List3BatchSizeScale = 2;
2056 /* Batch size of 128 vertices */
2057 vfg.List2BatchSizeScale = 1;
2058 /* Batch size of 128 vertices */
2059 vfg.List1BatchSizeScale = 2;
2060 /* Batch size of 256 vertices for STRIP topologies */
2061 vfg.StripBatchSizeScale = 3;
2062 /* 192 control points for PATCHLIST_3 */
2063 vfg.PatchBatchSizeScale = 1;
2064 /* 192 control points for PATCHLIST_3 */
2065 vfg.PatchBatchSizeMultiplier = 31;
2066 }
2067 #endif
2068
2069 emit_3dstate_vf_statistics(pipeline);
2070
2071 if (anv_pipeline_is_primitive(pipeline)) {
2072 emit_vertex_input(pipeline, state, state->vi);
2073
2074 emit_3dstate_vs(pipeline);
2075 emit_3dstate_hs_ds(pipeline, state->ts);
2076 emit_3dstate_te(pipeline);
2077 emit_3dstate_gs(pipeline);
2078
2079 emit_3dstate_streamout(pipeline, state->rs);
2080
2081 #if GFX_VERx10 >= 125
2082 const struct anv_device *device = pipeline->base.base.device;
2083 /* Disable Mesh. */
2084 if (device->vk.enabled_extensions.EXT_mesh_shader) {
2085 anv_pipeline_emit(pipeline, final.mesh_control,
2086 GENX(3DSTATE_MESH_CONTROL), zero);
2087 anv_pipeline_emit(pipeline, final.mesh_control_protected,
2088 GENX(3DSTATE_MESH_CONTROL), zero);
2089 anv_pipeline_emit(pipeline, final.mesh_shader,
2090 GENX(3DSTATE_MESH_SHADER), zero);
2091 anv_pipeline_emit(pipeline, final.mesh_distrib,
2092 GENX(3DSTATE_MESH_DISTRIB), zero);
2093 anv_pipeline_emit(pipeline, final.clip_mesh,
2094 GENX(3DSTATE_CLIP_MESH), zero);
2095 anv_pipeline_emit(pipeline, final.sbe_mesh,
2096 GENX(3DSTATE_SBE_MESH), zero);
2097 anv_pipeline_emit(pipeline, final.task_control,
2098 GENX(3DSTATE_TASK_CONTROL), zero);
2099 anv_pipeline_emit(pipeline, final.task_control_protected,
2100 GENX(3DSTATE_TASK_CONTROL), zero);
2101 anv_pipeline_emit(pipeline, final.task_shader,
2102 GENX(3DSTATE_TASK_SHADER), zero);
2103 anv_pipeline_emit(pipeline, final.task_redistrib,
2104 GENX(3DSTATE_TASK_REDISTRIB), zero);
2105 }
2106 #endif
2107 } else {
2108 assert(anv_pipeline_is_mesh(pipeline));
2109
2110 anv_pipeline_emit(pipeline, final.vf_sgvs, GENX(3DSTATE_VF_SGVS), sgvs);
2111 #if GFX_VER >= 11
2112 anv_pipeline_emit(pipeline, final.vf_sgvs_2, GENX(3DSTATE_VF_SGVS_2), sgvs);
2113 #endif
2114 anv_pipeline_emit(pipeline, final.vs, GENX(3DSTATE_VS), vs);
2115 anv_pipeline_emit(pipeline, final.hs, GENX(3DSTATE_HS), hs);
2116 anv_pipeline_emit(pipeline, final.ds, GENX(3DSTATE_DS), ds);
2117 anv_pipeline_emit(pipeline, partial.te, GENX(3DSTATE_TE), te);
2118 anv_pipeline_emit(pipeline, partial.gs, GENX(3DSTATE_GS), gs);
2119
2120 anv_pipeline_emit(pipeline, final.vs_protected, GENX(3DSTATE_VS), vs);
2121 anv_pipeline_emit(pipeline, final.hs_protected, GENX(3DSTATE_HS), hs);
2122 anv_pipeline_emit(pipeline, final.ds_protected, GENX(3DSTATE_DS), ds);
2123 anv_pipeline_emit(pipeline, partial.gs_protected, GENX(3DSTATE_GS), gs);
2124
2125 /* BSpec 46303 forbids both 3DSTATE_MESH_CONTROL.MeshShaderEnable
2126 * and 3DSTATE_STREAMOUT.SOFunctionEnable to be 1.
2127 */
2128 anv_pipeline_emit(pipeline, partial.so, GENX(3DSTATE_STREAMOUT), so);
2129
2130 #if GFX_VERx10 >= 125
2131 emit_task_state(pipeline);
2132 emit_mesh_state(pipeline);
2133 #endif
2134 }
2135
2136 emit_3dstate_sbe(pipeline);
2137 emit_3dstate_wm(pipeline, state->ia, state->rs,
2138 state->ms, state->cb, state->rp);
2139 emit_3dstate_ps(pipeline, state->ms, state->cb);
2140 emit_3dstate_ps_extra(pipeline, state->rs, state);
2141 }
2142
2143 #if GFX_VERx10 >= 125
2144
2145 void
genX(compute_pipeline_emit)2146 genX(compute_pipeline_emit)(struct anv_compute_pipeline *pipeline)
2147 {
2148 const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline);
2149 anv_pipeline_setup_l3_config(&pipeline->base, cs_prog_data->base.total_shared > 0);
2150 }
2151
2152 #else /* #if GFX_VERx10 >= 125 */
2153
2154 void
genX(compute_pipeline_emit)2155 genX(compute_pipeline_emit)(struct anv_compute_pipeline *pipeline)
2156 {
2157 struct anv_device *device = pipeline->base.device;
2158 const struct intel_device_info *devinfo = device->info;
2159 const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline);
2160
2161 anv_pipeline_setup_l3_config(&pipeline->base, cs_prog_data->base.total_shared > 0);
2162
2163 const struct intel_cs_dispatch_info dispatch =
2164 brw_cs_get_dispatch_info(devinfo, cs_prog_data, NULL);
2165 const uint32_t vfe_curbe_allocation =
2166 ALIGN(cs_prog_data->push.per_thread.regs * dispatch.threads +
2167 cs_prog_data->push.cross_thread.regs, 2);
2168
2169 const struct anv_shader_bin *cs_bin = pipeline->cs;
2170
2171 anv_batch_emit(&pipeline->base.batch, GENX(MEDIA_VFE_STATE), vfe) {
2172 vfe.StackSize = 0;
2173 vfe.MaximumNumberofThreads =
2174 devinfo->max_cs_threads * devinfo->subslice_total - 1;
2175 vfe.NumberofURBEntries = 2;
2176 #if GFX_VER < 11
2177 vfe.ResetGatewayTimer = true;
2178 #endif
2179 vfe.URBEntryAllocationSize = 2;
2180 vfe.CURBEAllocationSize = vfe_curbe_allocation;
2181
2182 if (cs_prog_data->base.total_scratch) {
2183 /* Broadwell's Per Thread Scratch Space is in the range [0, 11]
2184 * where 0 = 1k, 1 = 2k, 2 = 4k, ..., 11 = 2M.
2185 */
2186 vfe.PerThreadScratchSpace = ffs(cs_prog_data->base.total_scratch) - 11;
2187 vfe.ScratchSpaceBasePointer =
2188 get_scratch_address(&pipeline->base, MESA_SHADER_COMPUTE, cs_bin);
2189 }
2190 }
2191
2192 struct GENX(INTERFACE_DESCRIPTOR_DATA) desc = {
2193 .KernelStartPointer =
2194 cs_bin->kernel.offset +
2195 brw_cs_prog_data_prog_offset(cs_prog_data, dispatch.simd_size),
2196
2197 /* Wa_1606682166 */
2198 .SamplerCount = GFX_VER == 11 ? 0 : get_sampler_count(cs_bin),
2199 /* We add 1 because the CS indirect parameters buffer isn't accounted
2200 * for in bind_map.surface_count.
2201 *
2202 * Typically set to 0 to avoid prefetching on every thread dispatch.
2203 */
2204 .BindingTableEntryCount = devinfo->verx10 == 125 ?
2205 0 : 1 + MIN2(pipeline->cs->bind_map.surface_count, 30),
2206 .BarrierEnable = cs_prog_data->uses_barrier,
2207 .SharedLocalMemorySize =
2208 intel_compute_slm_encode_size(GFX_VER, cs_prog_data->base.total_shared),
2209
2210 .ConstantURBEntryReadOffset = 0,
2211 .ConstantURBEntryReadLength = cs_prog_data->push.per_thread.regs,
2212 .CrossThreadConstantDataReadLength =
2213 cs_prog_data->push.cross_thread.regs,
2214 #if GFX_VER >= 12
2215 /* TODO: Check if we are missing workarounds and enable mid-thread
2216 * preemption.
2217 *
2218 * We still have issues with mid-thread preemption (it was already
2219 * disabled by the kernel on gfx11, due to missing workarounds). It's
2220 * possible that we are just missing some workarounds, and could enable
2221 * it later, but for now let's disable it to fix a GPU in compute in Car
2222 * Chase (and possibly more).
2223 */
2224 .ThreadPreemptionDisable = true,
2225 #endif
2226
2227 .NumberofThreadsinGPGPUThreadGroup = dispatch.threads,
2228 };
2229 GENX(INTERFACE_DESCRIPTOR_DATA_pack)(NULL,
2230 pipeline->interface_descriptor_data,
2231 &desc);
2232 }
2233
2234 #endif /* #if GFX_VERx10 >= 125 */
2235
2236 #if GFX_VERx10 >= 125
2237
2238 void
genX(ray_tracing_pipeline_emit)2239 genX(ray_tracing_pipeline_emit)(struct anv_ray_tracing_pipeline *pipeline)
2240 {
2241 for (uint32_t i = 0; i < pipeline->group_count; i++) {
2242 struct anv_rt_shader_group *group = &pipeline->groups[i];
2243
2244 switch (group->type) {
2245 case VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_KHR: {
2246 struct GENX(RT_GENERAL_SBT_HANDLE) sh = {};
2247 sh.General = anv_shader_bin_get_bsr(group->general, 32);
2248 GENX(RT_GENERAL_SBT_HANDLE_pack)(NULL, group->handle, &sh);
2249 break;
2250 }
2251
2252 case VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_KHR: {
2253 struct GENX(RT_TRIANGLES_SBT_HANDLE) sh = {};
2254 if (group->closest_hit)
2255 sh.ClosestHit = anv_shader_bin_get_bsr(group->closest_hit, 32);
2256 if (group->any_hit)
2257 sh.AnyHit = anv_shader_bin_get_bsr(group->any_hit, 24);
2258 GENX(RT_TRIANGLES_SBT_HANDLE_pack)(NULL, group->handle, &sh);
2259 break;
2260 }
2261
2262 case VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_KHR: {
2263 struct GENX(RT_PROCEDURAL_SBT_HANDLE) sh = {};
2264 if (group->closest_hit)
2265 sh.ClosestHit = anv_shader_bin_get_bsr(group->closest_hit, 32);
2266 sh.Intersection = anv_shader_bin_get_bsr(group->intersection, 24);
2267 GENX(RT_PROCEDURAL_SBT_HANDLE_pack)(NULL, group->handle, &sh);
2268 break;
2269 }
2270
2271 default:
2272 unreachable("Invalid shader group type");
2273 }
2274 }
2275 }
2276
2277 #else
2278
2279 void
genX(ray_tracing_pipeline_emit)2280 genX(ray_tracing_pipeline_emit)(struct anv_ray_tracing_pipeline *pipeline)
2281 {
2282 unreachable("Ray tracing not supported");
2283 }
2284
2285 #endif /* GFX_VERx10 >= 125 */
2286