xref: /aosp_15_r20/external/mesa3d/src/intel/compiler/brw_fs_nir.cpp (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "brw_fs.h"
25 #include "brw_fs_builder.h"
26 #include "brw_nir.h"
27 #include "brw_eu.h"
28 #include "nir.h"
29 #include "nir_intrinsics.h"
30 #include "nir_search_helpers.h"
31 #include "dev/intel_debug.h"
32 #include "util/u_math.h"
33 #include "util/bitscan.h"
34 
35 #include <vector>
36 
37 using namespace brw;
38 
39 struct brw_fs_bind_info {
40    bool valid;
41    bool bindless;
42    unsigned block;
43    unsigned set;
44    unsigned binding;
45 };
46 
47 struct nir_to_brw_state {
48    fs_visitor &s;
49    const nir_shader *nir;
50    const intel_device_info *devinfo;
51    void *mem_ctx;
52 
53    /* Points to the end of the program.  Annotated with the current NIR
54     * instruction when applicable.
55     */
56    fs_builder bld;
57 
58    brw_reg *ssa_values;
59    fs_inst **resource_insts;
60    struct brw_fs_bind_info *ssa_bind_infos;
61    brw_reg *uniform_values;
62    brw_reg *system_values;
63 
64    bool annotate;
65 };
66 
67 static brw_reg get_nir_src(nir_to_brw_state &ntb, const nir_src &src);
68 static brw_reg get_nir_def(nir_to_brw_state &ntb, const nir_def &def);
69 static nir_component_mask_t get_nir_write_mask(const nir_def &def);
70 
71 static void fs_nir_emit_intrinsic(nir_to_brw_state &ntb, const fs_builder &bld, nir_intrinsic_instr *instr);
72 static brw_reg emit_samplepos_setup(nir_to_brw_state &ntb);
73 static brw_reg emit_sampleid_setup(nir_to_brw_state &ntb);
74 static brw_reg emit_samplemaskin_setup(nir_to_brw_state &ntb);
75 static brw_reg emit_shading_rate_setup(nir_to_brw_state &ntb);
76 
77 static void fs_nir_emit_impl(nir_to_brw_state &ntb, nir_function_impl *impl);
78 static void fs_nir_emit_cf_list(nir_to_brw_state &ntb, exec_list *list);
79 static void fs_nir_emit_if(nir_to_brw_state &ntb, nir_if *if_stmt);
80 static void fs_nir_emit_loop(nir_to_brw_state &ntb, nir_loop *loop);
81 static void fs_nir_emit_block(nir_to_brw_state &ntb, nir_block *block);
82 static void fs_nir_emit_instr(nir_to_brw_state &ntb, nir_instr *instr);
83 
84 static void fs_nir_emit_memory_access(nir_to_brw_state &ntb,
85                                       const fs_builder &bld,
86                                       nir_intrinsic_instr *instr);
87 
88 static bool
brw_texture_offset(const nir_tex_instr * tex,unsigned src,uint32_t * offset_bits_out)89 brw_texture_offset(const nir_tex_instr *tex, unsigned src,
90                    uint32_t *offset_bits_out)
91 {
92    if (!nir_src_is_const(tex->src[src].src))
93       return false;
94 
95    const unsigned num_components = nir_tex_instr_src_size(tex, src);
96 
97    /* Combine all three offsets into a single unsigned dword:
98     *
99     *    bits 11:8 - U Offset (X component)
100     *    bits  7:4 - V Offset (Y component)
101     *    bits  3:0 - R Offset (Z component)
102     */
103    uint32_t offset_bits = 0;
104    for (unsigned i = 0; i < num_components; i++) {
105       int offset = nir_src_comp_as_int(tex->src[src].src, i);
106 
107       /* offset out of bounds; caller will handle it. */
108       if (offset > 7 || offset < -8)
109          return false;
110 
111       const unsigned shift = 4 * (2 - i);
112       offset_bits |= (offset & 0xF) << shift;
113    }
114 
115    *offset_bits_out = offset_bits;
116 
117    return true;
118 }
119 
120 static brw_reg
setup_imm_b(const fs_builder & bld,int8_t v)121 setup_imm_b(const fs_builder &bld, int8_t v)
122 {
123    const brw_reg tmp = bld.vgrf(BRW_TYPE_B);
124    bld.MOV(tmp, brw_imm_w(v));
125    return tmp;
126 }
127 
128 static void
fs_nir_setup_outputs(nir_to_brw_state & ntb)129 fs_nir_setup_outputs(nir_to_brw_state &ntb)
130 {
131    fs_visitor &s = ntb.s;
132 
133    if (s.stage == MESA_SHADER_TESS_CTRL ||
134        s.stage == MESA_SHADER_TASK ||
135        s.stage == MESA_SHADER_MESH ||
136        s.stage == MESA_SHADER_FRAGMENT ||
137        s.stage == MESA_SHADER_COMPUTE)
138       return;
139 
140    unsigned vec4s[VARYING_SLOT_TESS_MAX] = { 0, };
141 
142    /* Calculate the size of output registers in a separate pass, before
143     * allocating them.  With ARB_enhanced_layouts, multiple output variables
144     * may occupy the same slot, but have different type sizes.
145     */
146    nir_foreach_shader_out_variable(var, s.nir) {
147       const int loc = var->data.driver_location;
148       const unsigned var_vec4s = nir_variable_count_slots(var, var->type);
149       vec4s[loc] = MAX2(vec4s[loc], var_vec4s);
150    }
151 
152    for (unsigned loc = 0; loc < ARRAY_SIZE(vec4s);) {
153       if (vec4s[loc] == 0) {
154          loc++;
155          continue;
156       }
157 
158       unsigned reg_size = vec4s[loc];
159 
160       /* Check if there are any ranges that start within this range and extend
161        * past it. If so, include them in this allocation.
162        */
163       for (unsigned i = 1; i < reg_size; i++) {
164          assert(i + loc < ARRAY_SIZE(vec4s));
165          reg_size = MAX2(vec4s[i + loc] + i, reg_size);
166       }
167 
168       brw_reg reg = ntb.bld.vgrf(BRW_TYPE_F, 4 * reg_size);
169       for (unsigned i = 0; i < reg_size; i++) {
170          assert(loc + i < ARRAY_SIZE(s.outputs));
171          s.outputs[loc + i] = offset(reg, ntb.bld, 4 * i);
172       }
173 
174       loc += reg_size;
175    }
176 }
177 
178 static void
fs_nir_setup_uniforms(fs_visitor & s)179 fs_nir_setup_uniforms(fs_visitor &s)
180 {
181    const intel_device_info *devinfo = s.devinfo;
182 
183    /* Only the first compile gets to set up uniforms. */
184    if (s.push_constant_loc)
185       return;
186 
187    s.uniforms = s.nir->num_uniforms / 4;
188 
189    if (gl_shader_stage_is_compute(s.stage) && devinfo->verx10 < 125) {
190       /* Add uniforms for builtins after regular NIR uniforms. */
191       assert(s.uniforms == s.prog_data->nr_params);
192 
193       /* Subgroup ID must be the last uniform on the list.  This will make
194        * easier later to split between cross thread and per thread
195        * uniforms.
196        */
197       uint32_t *param = brw_stage_prog_data_add_params(s.prog_data, 1);
198       *param = BRW_PARAM_BUILTIN_SUBGROUP_ID;
199       s.uniforms++;
200    }
201 }
202 
203 static brw_reg
emit_work_group_id_setup(nir_to_brw_state & ntb)204 emit_work_group_id_setup(nir_to_brw_state &ntb)
205 {
206    fs_visitor &s = ntb.s;
207    const fs_builder &bld = ntb.bld;
208 
209    assert(gl_shader_stage_is_compute(s.stage));
210 
211    brw_reg id = bld.vgrf(BRW_TYPE_UD, 3);
212 
213    struct brw_reg r0_1(retype(brw_vec1_grf(0, 1), BRW_TYPE_UD));
214    bld.MOV(id, r0_1);
215 
216    struct brw_reg r0_6(retype(brw_vec1_grf(0, 6), BRW_TYPE_UD));
217    struct brw_reg r0_7(retype(brw_vec1_grf(0, 7), BRW_TYPE_UD));
218    bld.MOV(offset(id, bld, 1), r0_6);
219    bld.MOV(offset(id, bld, 2), r0_7);
220 
221    return id;
222 }
223 
224 static bool
emit_system_values_block(nir_to_brw_state & ntb,nir_block * block)225 emit_system_values_block(nir_to_brw_state &ntb, nir_block *block)
226 {
227    fs_visitor &s = ntb.s;
228    brw_reg *reg;
229 
230    nir_foreach_instr(instr, block) {
231       if (instr->type != nir_instr_type_intrinsic)
232          continue;
233 
234       nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
235       switch (intrin->intrinsic) {
236       case nir_intrinsic_load_vertex_id:
237       case nir_intrinsic_load_base_vertex:
238          unreachable("should be lowered by nir_lower_system_values().");
239 
240       case nir_intrinsic_load_vertex_id_zero_base:
241       case nir_intrinsic_load_is_indexed_draw:
242       case nir_intrinsic_load_first_vertex:
243       case nir_intrinsic_load_instance_id:
244       case nir_intrinsic_load_base_instance:
245          unreachable("should be lowered by brw_nir_lower_vs_inputs().");
246          break;
247 
248       case nir_intrinsic_load_draw_id:
249          /* For Task/Mesh, draw_id will be handled later in
250           * nir_emit_mesh_task_intrinsic().
251           */
252          if (!gl_shader_stage_is_mesh(s.stage))
253             unreachable("should be lowered by brw_nir_lower_vs_inputs().");
254          break;
255 
256       case nir_intrinsic_load_invocation_id:
257          if (s.stage == MESA_SHADER_TESS_CTRL)
258             break;
259          assert(s.stage == MESA_SHADER_GEOMETRY);
260          reg = &ntb.system_values[SYSTEM_VALUE_INVOCATION_ID];
261          if (reg->file == BAD_FILE) {
262             *reg = s.gs_payload().instance_id;
263          }
264          break;
265 
266       case nir_intrinsic_load_sample_pos:
267       case nir_intrinsic_load_sample_pos_or_center:
268          assert(s.stage == MESA_SHADER_FRAGMENT);
269          reg = &ntb.system_values[SYSTEM_VALUE_SAMPLE_POS];
270          if (reg->file == BAD_FILE)
271             *reg = emit_samplepos_setup(ntb);
272          break;
273 
274       case nir_intrinsic_load_sample_id:
275          assert(s.stage == MESA_SHADER_FRAGMENT);
276          reg = &ntb.system_values[SYSTEM_VALUE_SAMPLE_ID];
277          if (reg->file == BAD_FILE)
278             *reg = emit_sampleid_setup(ntb);
279          break;
280 
281       case nir_intrinsic_load_sample_mask_in:
282          assert(s.stage == MESA_SHADER_FRAGMENT);
283          reg = &ntb.system_values[SYSTEM_VALUE_SAMPLE_MASK_IN];
284          if (reg->file == BAD_FILE)
285             *reg = emit_samplemaskin_setup(ntb);
286          break;
287 
288       case nir_intrinsic_load_workgroup_id:
289          if (gl_shader_stage_is_mesh(s.stage))
290             unreachable("should be lowered by nir_lower_compute_system_values().");
291          assert(gl_shader_stage_is_compute(s.stage));
292          reg = &ntb.system_values[SYSTEM_VALUE_WORKGROUP_ID];
293          if (reg->file == BAD_FILE)
294             *reg = emit_work_group_id_setup(ntb);
295          break;
296 
297       case nir_intrinsic_load_helper_invocation:
298          assert(s.stage == MESA_SHADER_FRAGMENT);
299          reg = &ntb.system_values[SYSTEM_VALUE_HELPER_INVOCATION];
300          if (reg->file == BAD_FILE) {
301             const fs_builder abld =
302                ntb.bld.annotate("gl_HelperInvocation");
303 
304             /* On Gfx6+ (gl_HelperInvocation is only exposed on Gfx7+) the
305              * pixel mask is in g1.7 of the thread payload.
306              *
307              * We move the per-channel pixel enable bit to the low bit of each
308              * channel by shifting the byte containing the pixel mask by the
309              * vector immediate 0x76543210UV.
310              *
311              * The region of <1,8,0> reads only 1 byte (the pixel masks for
312              * subspans 0 and 1) in SIMD8 and an additional byte (the pixel
313              * masks for 2 and 3) in SIMD16.
314              */
315             brw_reg shifted = abld.vgrf(BRW_TYPE_UW);
316 
317             for (unsigned i = 0; i < DIV_ROUND_UP(s.dispatch_width, 16); i++) {
318                const fs_builder hbld = abld.group(MIN2(16, s.dispatch_width), i);
319                /* According to the "PS Thread Payload for Normal
320                 * Dispatch" pages on the BSpec, the dispatch mask is
321                 * stored in R0.15/R1.15 on gfx20+ and in R1.7/R2.7 on
322                 * gfx6+.
323                 */
324                const struct brw_reg reg = s.devinfo->ver >= 20 ?
325                   xe2_vec1_grf(i, 15) : brw_vec1_grf(i + 1, 7);
326                hbld.SHR(offset(shifted, hbld, i),
327                         stride(retype(reg, BRW_TYPE_UB), 1, 8, 0),
328                         brw_imm_v(0x76543210));
329             }
330 
331             /* A set bit in the pixel mask means the channel is enabled, but
332              * that is the opposite of gl_HelperInvocation so we need to invert
333              * the mask.
334              *
335              * The negate source-modifier bit of logical instructions on Gfx8+
336              * performs 1's complement negation, so we can use that instead of
337              * a NOT instruction.
338              */
339             brw_reg inverted = negate(shifted);
340 
341             /* We then resolve the 0/1 result to 0/~0 boolean values by ANDing
342              * with 1 and negating.
343              */
344             brw_reg anded = abld.vgrf(BRW_TYPE_UD);
345             abld.AND(anded, inverted, brw_imm_uw(1));
346 
347             *reg = abld.MOV(negate(retype(anded, BRW_TYPE_D)));
348          }
349          break;
350 
351       case nir_intrinsic_load_frag_shading_rate:
352          reg = &ntb.system_values[SYSTEM_VALUE_FRAG_SHADING_RATE];
353          if (reg->file == BAD_FILE)
354             *reg = emit_shading_rate_setup(ntb);
355          break;
356 
357       default:
358          break;
359       }
360    }
361 
362    return true;
363 }
364 
365 static void
fs_nir_emit_system_values(nir_to_brw_state & ntb)366 fs_nir_emit_system_values(nir_to_brw_state &ntb)
367 {
368    fs_visitor &s = ntb.s;
369 
370    ntb.system_values = ralloc_array(ntb.mem_ctx, brw_reg, SYSTEM_VALUE_MAX);
371    for (unsigned i = 0; i < SYSTEM_VALUE_MAX; i++) {
372       ntb.system_values[i] = brw_reg();
373    }
374 
375    nir_function_impl *impl = nir_shader_get_entrypoint((nir_shader *)s.nir);
376    nir_foreach_block(block, impl)
377       emit_system_values_block(ntb, block);
378 }
379 
380 static void
fs_nir_emit_impl(nir_to_brw_state & ntb,nir_function_impl * impl)381 fs_nir_emit_impl(nir_to_brw_state &ntb, nir_function_impl *impl)
382 {
383    ntb.ssa_values = rzalloc_array(ntb.mem_ctx, brw_reg, impl->ssa_alloc);
384    ntb.resource_insts = rzalloc_array(ntb.mem_ctx, fs_inst *, impl->ssa_alloc);
385    ntb.ssa_bind_infos = rzalloc_array(ntb.mem_ctx, struct brw_fs_bind_info, impl->ssa_alloc);
386    ntb.uniform_values = rzalloc_array(ntb.mem_ctx, brw_reg, impl->ssa_alloc);
387 
388    fs_nir_emit_cf_list(ntb, &impl->body);
389 }
390 
391 static void
fs_nir_emit_cf_list(nir_to_brw_state & ntb,exec_list * list)392 fs_nir_emit_cf_list(nir_to_brw_state &ntb, exec_list *list)
393 {
394    exec_list_validate(list);
395    foreach_list_typed(nir_cf_node, node, node, list) {
396       switch (node->type) {
397       case nir_cf_node_if:
398          fs_nir_emit_if(ntb, nir_cf_node_as_if(node));
399          break;
400 
401       case nir_cf_node_loop:
402          fs_nir_emit_loop(ntb, nir_cf_node_as_loop(node));
403          break;
404 
405       case nir_cf_node_block:
406          fs_nir_emit_block(ntb, nir_cf_node_as_block(node));
407          break;
408 
409       default:
410          unreachable("Invalid CFG node block");
411       }
412    }
413 }
414 
415 static void
fs_nir_emit_if(nir_to_brw_state & ntb,nir_if * if_stmt)416 fs_nir_emit_if(nir_to_brw_state &ntb, nir_if *if_stmt)
417 {
418    const fs_builder &bld = ntb.bld;
419 
420    bool invert;
421    brw_reg cond_reg;
422 
423    /* If the condition has the form !other_condition, use other_condition as
424     * the source, but invert the predicate on the if instruction.
425     */
426    nir_alu_instr *cond = nir_src_as_alu_instr(if_stmt->condition);
427    if (cond != NULL && cond->op == nir_op_inot) {
428       invert = true;
429       cond_reg = get_nir_src(ntb, cond->src[0].src);
430       cond_reg = offset(cond_reg, bld, cond->src[0].swizzle[0]);
431    } else {
432       invert = false;
433       cond_reg = get_nir_src(ntb, if_stmt->condition);
434    }
435 
436    /* first, put the condition into f0 */
437    fs_inst *inst = bld.MOV(bld.null_reg_d(),
438                            retype(cond_reg, BRW_TYPE_D));
439    inst->conditional_mod = BRW_CONDITIONAL_NZ;
440 
441    fs_inst *iff = bld.IF(BRW_PREDICATE_NORMAL);
442    iff->predicate_inverse = invert;
443 
444    fs_nir_emit_cf_list(ntb, &if_stmt->then_list);
445 
446    if (!nir_cf_list_is_empty_block(&if_stmt->else_list)) {
447       bld.emit(BRW_OPCODE_ELSE);
448       fs_nir_emit_cf_list(ntb, &if_stmt->else_list);
449    }
450 
451    fs_inst *endif = bld.emit(BRW_OPCODE_ENDIF);
452 
453    /* Peephole: replace IF-JUMP-ENDIF with predicated jump */
454    if (endif->prev->prev == iff) {
455       fs_inst *jump = (fs_inst *) endif->prev;
456       if (jump->predicate == BRW_PREDICATE_NONE &&
457           (jump->opcode == BRW_OPCODE_BREAK ||
458            jump->opcode == BRW_OPCODE_CONTINUE)) {
459          jump->predicate = iff->predicate;
460          jump->predicate_inverse = iff->predicate_inverse;
461          iff->exec_node::remove();
462          endif->exec_node::remove();
463       }
464    }
465 }
466 
467 static void
fs_nir_emit_loop(nir_to_brw_state & ntb,nir_loop * loop)468 fs_nir_emit_loop(nir_to_brw_state &ntb, nir_loop *loop)
469 {
470    const fs_builder &bld = ntb.bld;
471 
472    assert(!nir_loop_has_continue_construct(loop));
473    bld.emit(BRW_OPCODE_DO);
474 
475    fs_nir_emit_cf_list(ntb, &loop->body);
476 
477    fs_inst *peep_while = bld.emit(BRW_OPCODE_WHILE);
478 
479    /* Peephole: replace (+f0) break; while with (-f0) while */
480    fs_inst *peep_break = (fs_inst *) peep_while->prev;
481 
482    if (peep_break->opcode == BRW_OPCODE_BREAK &&
483        peep_break->predicate != BRW_PREDICATE_NONE) {
484       peep_while->predicate = peep_break->predicate;
485       peep_while->predicate_inverse = !peep_break->predicate_inverse;
486       peep_break->exec_node::remove();
487    }
488 }
489 
490 static void
fs_nir_emit_block(nir_to_brw_state & ntb,nir_block * block)491 fs_nir_emit_block(nir_to_brw_state &ntb, nir_block *block)
492 {
493    fs_builder bld = ntb.bld;
494 
495    nir_foreach_instr(instr, block) {
496       fs_nir_emit_instr(ntb, instr);
497    }
498 
499    ntb.bld = bld;
500 }
501 
502 /**
503  * Recognizes a parent instruction of nir_op_extract_* and changes the type to
504  * match instr.
505  */
506 static bool
optimize_extract_to_float(nir_to_brw_state & ntb,nir_alu_instr * instr,const brw_reg & result)507 optimize_extract_to_float(nir_to_brw_state &ntb, nir_alu_instr *instr,
508                           const brw_reg &result)
509 {
510    const intel_device_info *devinfo = ntb.devinfo;
511    const fs_builder &bld = ntb.bld;
512 
513    /* No fast path for f16 (yet) or f64. */
514    assert(instr->op == nir_op_i2f32 || instr->op == nir_op_u2f32);
515 
516    if (!instr->src[0].src.ssa->parent_instr)
517       return false;
518 
519    if (instr->src[0].src.ssa->parent_instr->type != nir_instr_type_alu)
520       return false;
521 
522    nir_alu_instr *src0 =
523       nir_instr_as_alu(instr->src[0].src.ssa->parent_instr);
524 
525    unsigned bytes;
526    bool is_signed;
527 
528    switch (src0->op) {
529    case nir_op_extract_u8:
530    case nir_op_extract_u16:
531       bytes = src0->op == nir_op_extract_u8 ? 1 : 2;
532 
533       /* i2f(extract_u8(a, b)) and u2f(extract_u8(a, b)) produce the same
534        * result. Ditto for extract_u16.
535        */
536       is_signed = false;
537       break;
538 
539    case nir_op_extract_i8:
540    case nir_op_extract_i16:
541       bytes = src0->op == nir_op_extract_i8 ? 1 : 2;
542 
543       /* The fast path can't handle u2f(extract_i8(a, b)) because the implicit
544        * sign extension of the extract_i8 is lost. For example,
545        * u2f(extract_i8(0x0000ff00, 1)) should produce 4294967295.0, but a
546        * fast path could either give 255.0 (by implementing the fast path as
547        * u2f(extract_u8(x))) or -1.0 (by implementing the fast path as
548        * i2f(extract_i8(x))). At one point in time, we incorrectly implemented
549        * the former.
550        */
551       if (instr->op != nir_op_i2f32)
552          return false;
553 
554       is_signed = true;
555       break;
556 
557    default:
558       return false;
559    }
560 
561    unsigned element = nir_src_as_uint(src0->src[1].src);
562 
563    /* Element type to extract.*/
564    const brw_reg_type type = brw_int_type(bytes, is_signed);
565 
566    brw_reg op0 = get_nir_src(ntb, src0->src[0].src);
567    op0.type = brw_type_for_nir_type(devinfo,
568       (nir_alu_type)(nir_op_infos[src0->op].input_types[0] |
569                      nir_src_bit_size(src0->src[0].src)));
570    op0 = offset(op0, bld, src0->src[0].swizzle[0]);
571 
572    /* Bspec "Register Region Restrictions" for Xe says:
573     *
574     *    "In case of all float point data types used in destination
575     *
576     *    1. Register Regioning patterns where register data bit location of
577     *       the LSB of the channels are changed between source and destination
578     *       are not supported on Src0 and Src1 except for broadcast of a
579     *       scalar."
580     *
581     * This restriction is enfored in brw_fs_lower_regioning.  There is no
582     * reason to generate an optimized instruction that brw_fs_lower_regioning
583     * will have to break up later.
584     */
585    if (devinfo->verx10 >= 125 && element != 0 && !is_uniform(op0))
586       return false;
587 
588    bld.MOV(result, subscript(op0, type, element));
589    return true;
590 }
591 
592 static bool
optimize_frontfacing_ternary(nir_to_brw_state & ntb,nir_alu_instr * instr,const brw_reg & result)593 optimize_frontfacing_ternary(nir_to_brw_state &ntb,
594                              nir_alu_instr *instr,
595                              const brw_reg &result)
596 {
597    const intel_device_info *devinfo = ntb.devinfo;
598    fs_visitor &s = ntb.s;
599 
600    nir_intrinsic_instr *src0 = nir_src_as_intrinsic(instr->src[0].src);
601    if (src0 == NULL || src0->intrinsic != nir_intrinsic_load_front_face)
602       return false;
603 
604    if (!nir_src_is_const(instr->src[1].src) ||
605        !nir_src_is_const(instr->src[2].src))
606       return false;
607 
608    const float value1 = nir_src_as_float(instr->src[1].src);
609    const float value2 = nir_src_as_float(instr->src[2].src);
610    if (fabsf(value1) != 1.0f || fabsf(value2) != 1.0f)
611       return false;
612 
613    /* nir_opt_algebraic should have gotten rid of bcsel(b, a, a) */
614    assert(value1 == -value2);
615 
616    brw_reg tmp = ntb.bld.vgrf(BRW_TYPE_D);
617 
618    if (devinfo->ver >= 20) {
619       /* Gfx20+ has separate back-facing bits for each pair of
620        * subspans in order to support multiple polygons, so we need to
621        * use a <1;8,0> region in order to select the correct word for
622        * each channel.  Unfortunately they're no longer aligned to the
623        * sign bit of a 16-bit word, so a left shift is necessary.
624        */
625       brw_reg ff = ntb.bld.vgrf(BRW_TYPE_UW);
626 
627       for (unsigned i = 0; i < DIV_ROUND_UP(s.dispatch_width, 16); i++) {
628          const fs_builder hbld = ntb.bld.group(16, i);
629          const struct brw_reg gi_uw = retype(xe2_vec1_grf(i, 9),
630                                              BRW_TYPE_UW);
631          hbld.SHL(offset(ff, hbld, i), stride(gi_uw, 1, 8, 0), brw_imm_ud(4));
632       }
633 
634       if (value1 == -1.0f)
635          ff.negate = true;
636 
637       ntb.bld.OR(subscript(tmp, BRW_TYPE_UW, 1), ff,
638                   brw_imm_uw(0x3f80));
639 
640    } else if (devinfo->ver >= 12 && s.max_polygons == 2) {
641       /* According to the BSpec "PS Thread Payload for Normal
642        * Dispatch", the front/back facing interpolation bit is stored
643        * as bit 15 of either the R1.1 or R1.6 poly info field, for the
644        * first and second polygons respectively in multipolygon PS
645        * dispatch mode.
646        */
647       assert(s.dispatch_width == 16);
648 
649       for (unsigned i = 0; i < s.max_polygons; i++) {
650          const fs_builder hbld = ntb.bld.group(8, i);
651          struct brw_reg g1 = retype(brw_vec1_grf(1, 1 + 5 * i),
652                                     BRW_TYPE_UW);
653 
654          if (value1 == -1.0f)
655             g1.negate = true;
656 
657          hbld.OR(subscript(offset(tmp, hbld, i), BRW_TYPE_UW, 1),
658                  g1, brw_imm_uw(0x3f80));
659       }
660 
661    } else if (devinfo->ver >= 12) {
662       /* Bit 15 of g1.1 is 0 if the polygon is front facing. */
663       brw_reg g1 = brw_reg(retype(brw_vec1_grf(1, 1), BRW_TYPE_W));
664 
665       /* For (gl_FrontFacing ? 1.0 : -1.0), emit:
666        *
667        *    or(8)  tmp.1<2>W  g1.1<0,1,0>W  0x00003f80W
668        *    and(8) dst<1>D    tmp<8,8,1>D   0xbf800000D
669        *
670        * and negate g1.1<0,1,0>W for (gl_FrontFacing ? -1.0 : 1.0).
671        */
672       if (value1 == -1.0f)
673          g1.negate = true;
674 
675       ntb.bld.OR(subscript(tmp, BRW_TYPE_W, 1),
676                   g1, brw_imm_uw(0x3f80));
677    } else {
678       /* Bit 15 of g0.0 is 0 if the polygon is front facing. */
679       brw_reg g0 = brw_reg(retype(brw_vec1_grf(0, 0), BRW_TYPE_W));
680 
681       /* For (gl_FrontFacing ? 1.0 : -1.0), emit:
682        *
683        *    or(8)  tmp.1<2>W  g0.0<0,1,0>W  0x00003f80W
684        *    and(8) dst<1>D    tmp<8,8,1>D   0xbf800000D
685        *
686        * and negate g0.0<0,1,0>W for (gl_FrontFacing ? -1.0 : 1.0).
687        *
688        * This negation looks like it's safe in practice, because bits 0:4 will
689        * surely be TRIANGLES
690        */
691 
692       if (value1 == -1.0f) {
693          g0.negate = true;
694       }
695 
696       ntb.bld.OR(subscript(tmp, BRW_TYPE_W, 1),
697                   g0, brw_imm_uw(0x3f80));
698    }
699    ntb.bld.AND(retype(result, BRW_TYPE_D), tmp, brw_imm_d(0xbf800000));
700 
701    return true;
702 }
703 
704 static brw_rnd_mode
brw_rnd_mode_from_nir_op(const nir_op op)705 brw_rnd_mode_from_nir_op (const nir_op op) {
706    switch (op) {
707    case nir_op_f2f16_rtz:
708       return BRW_RND_MODE_RTZ;
709    case nir_op_f2f16_rtne:
710       return BRW_RND_MODE_RTNE;
711    default:
712       unreachable("Operation doesn't support rounding mode");
713    }
714 }
715 
716 static brw_rnd_mode
brw_rnd_mode_from_execution_mode(unsigned execution_mode)717 brw_rnd_mode_from_execution_mode(unsigned execution_mode)
718 {
719    if (nir_has_any_rounding_mode_rtne(execution_mode))
720       return BRW_RND_MODE_RTNE;
721    if (nir_has_any_rounding_mode_rtz(execution_mode))
722       return BRW_RND_MODE_RTZ;
723    return BRW_RND_MODE_UNSPECIFIED;
724 }
725 
726 static brw_reg
prepare_alu_destination_and_sources(nir_to_brw_state & ntb,const fs_builder & bld,nir_alu_instr * instr,brw_reg * op,bool need_dest)727 prepare_alu_destination_and_sources(nir_to_brw_state &ntb,
728                                     const fs_builder &bld,
729                                     nir_alu_instr *instr,
730                                     brw_reg *op,
731                                     bool need_dest)
732 {
733    const intel_device_info *devinfo = ntb.devinfo;
734 
735    brw_reg result =
736       need_dest ? get_nir_def(ntb, instr->def) : bld.null_reg_ud();
737 
738    result.type = brw_type_for_nir_type(devinfo,
739       (nir_alu_type)(nir_op_infos[instr->op].output_type |
740                      instr->def.bit_size));
741 
742    for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
743       op[i] = get_nir_src(ntb, instr->src[i].src);
744       op[i].type = brw_type_for_nir_type(devinfo,
745          (nir_alu_type)(nir_op_infos[instr->op].input_types[i] |
746                         nir_src_bit_size(instr->src[i].src)));
747    }
748 
749    /* Move and vecN instrutions may still be vectored.  Return the raw,
750     * vectored source and destination so that fs_visitor::nir_emit_alu can
751     * handle it.  Other callers should not have to handle these kinds of
752     * instructions.
753     */
754    switch (instr->op) {
755    case nir_op_mov:
756    case nir_op_vec2:
757    case nir_op_vec3:
758    case nir_op_vec4:
759    case nir_op_vec8:
760    case nir_op_vec16:
761       return result;
762    default:
763       break;
764    }
765 
766    /* At this point, we have dealt with any instruction that operates on
767     * more than a single channel.  Therefore, we can just adjust the source
768     * and destination registers for that channel and emit the instruction.
769     */
770    unsigned channel = 0;
771    if (nir_op_infos[instr->op].output_size == 0) {
772       /* Since NIR is doing the scalarizing for us, we should only ever see
773        * vectorized operations with a single channel.
774        */
775       nir_component_mask_t write_mask = get_nir_write_mask(instr->def);
776       assert(util_bitcount(write_mask) == 1);
777       channel = ffs(write_mask) - 1;
778 
779       result = offset(result, bld, channel);
780    }
781 
782    for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
783       assert(nir_op_infos[instr->op].input_sizes[i] < 2);
784       op[i] = offset(op[i], bld, instr->src[i].swizzle[channel]);
785    }
786 
787    return result;
788 }
789 
790 static brw_reg
resolve_source_modifiers(const fs_builder & bld,const brw_reg & src)791 resolve_source_modifiers(const fs_builder &bld, const brw_reg &src)
792 {
793    return (src.abs || src.negate) ? bld.MOV(src) : src;
794 }
795 
796 static void
resolve_inot_sources(nir_to_brw_state & ntb,const fs_builder & bld,nir_alu_instr * instr,brw_reg * op)797 resolve_inot_sources(nir_to_brw_state &ntb, const fs_builder &bld, nir_alu_instr *instr,
798                      brw_reg *op)
799 {
800    for (unsigned i = 0; i < 2; i++) {
801       nir_alu_instr *inot_instr = nir_src_as_alu_instr(instr->src[i].src);
802 
803       if (inot_instr != NULL && inot_instr->op == nir_op_inot) {
804          /* The source of the inot is now the source of instr. */
805          prepare_alu_destination_and_sources(ntb, bld, inot_instr, &op[i], false);
806 
807          assert(!op[i].negate);
808          op[i].negate = true;
809       } else {
810          op[i] = resolve_source_modifiers(bld, op[i]);
811       }
812    }
813 }
814 
815 static bool
try_emit_b2fi_of_inot(nir_to_brw_state & ntb,const fs_builder & bld,brw_reg result,nir_alu_instr * instr)816 try_emit_b2fi_of_inot(nir_to_brw_state &ntb, const fs_builder &bld,
817                       brw_reg result,
818                       nir_alu_instr *instr)
819 {
820    const intel_device_info *devinfo = bld.shader->devinfo;
821 
822    if (devinfo->verx10 >= 125)
823       return false;
824 
825    nir_alu_instr *inot_instr = nir_src_as_alu_instr(instr->src[0].src);
826 
827    if (inot_instr == NULL || inot_instr->op != nir_op_inot)
828       return false;
829 
830    /* HF is also possible as a destination on BDW+.  For nir_op_b2i, the set
831     * of valid size-changing combinations is a bit more complex.
832     *
833     * The source restriction is just because I was lazy about generating the
834     * constant below.
835     */
836    if (instr->def.bit_size != 32 ||
837        nir_src_bit_size(inot_instr->src[0].src) != 32)
838       return false;
839 
840    /* b2[fi](inot(a)) maps a=0 => 1, a=-1 => 0.  Since a can only be 0 or -1,
841     * this is float(1 + a).
842     */
843    brw_reg op;
844 
845    prepare_alu_destination_and_sources(ntb, bld, inot_instr, &op, false);
846 
847    /* Ignore the saturate modifier, if there is one.  The result of the
848     * arithmetic can only be 0 or 1, so the clamping will do nothing anyway.
849     */
850    bld.ADD(result, op, brw_imm_d(1));
851 
852    return true;
853 }
854 
855 static bool
is_const_zero(const nir_src & src)856 is_const_zero(const nir_src &src)
857 {
858    return nir_src_is_const(src) && nir_src_as_int(src) == 0;
859 }
860 
861 static void
fs_nir_emit_alu(nir_to_brw_state & ntb,nir_alu_instr * instr,bool need_dest)862 fs_nir_emit_alu(nir_to_brw_state &ntb, nir_alu_instr *instr,
863                 bool need_dest)
864 {
865    const intel_device_info *devinfo = ntb.devinfo;
866    const fs_builder &bld = ntb.bld;
867 
868    fs_inst *inst;
869    unsigned execution_mode =
870       bld.shader->nir->info.float_controls_execution_mode;
871 
872    brw_reg op[NIR_MAX_VEC_COMPONENTS];
873    brw_reg result = prepare_alu_destination_and_sources(ntb, bld, instr, op, need_dest);
874 
875 #ifndef NDEBUG
876    /* Everything except raw moves, some type conversions, iabs, and ineg
877     * should have 8-bit sources lowered by nir_lower_bit_size in
878     * brw_preprocess_nir or by brw_nir_lower_conversions in
879     * brw_postprocess_nir.
880     */
881    switch (instr->op) {
882    case nir_op_mov:
883    case nir_op_vec2:
884    case nir_op_vec3:
885    case nir_op_vec4:
886    case nir_op_vec8:
887    case nir_op_vec16:
888    case nir_op_i2f16:
889    case nir_op_i2f32:
890    case nir_op_i2i16:
891    case nir_op_i2i32:
892    case nir_op_u2f16:
893    case nir_op_u2f32:
894    case nir_op_u2u16:
895    case nir_op_u2u32:
896    case nir_op_iabs:
897    case nir_op_ineg:
898    case nir_op_pack_32_4x8_split:
899       break;
900 
901    default:
902       for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
903          assert(brw_type_size_bytes(op[i].type) > 1);
904       }
905    }
906 #endif
907 
908    switch (instr->op) {
909    case nir_op_mov:
910    case nir_op_vec2:
911    case nir_op_vec3:
912    case nir_op_vec4:
913    case nir_op_vec8:
914    case nir_op_vec16: {
915       brw_reg temp = result;
916       bool need_extra_copy = false;
917 
918       nir_intrinsic_instr *store_reg =
919          nir_store_reg_for_def(&instr->def);
920       if (store_reg != NULL) {
921          nir_def *dest_reg = store_reg->src[1].ssa;
922          for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
923             nir_intrinsic_instr *load_reg =
924                nir_load_reg_for_def(instr->src[i].src.ssa);
925             if (load_reg == NULL)
926                continue;
927 
928             if (load_reg->src[0].ssa == dest_reg) {
929                need_extra_copy = true;
930                temp = bld.vgrf(result.type, 4);
931                break;
932             }
933          }
934       }
935 
936       nir_component_mask_t write_mask = get_nir_write_mask(instr->def);
937       unsigned last_bit = util_last_bit(write_mask);
938 
939       assert(last_bit <= NIR_MAX_VEC_COMPONENTS);
940       brw_reg comps[NIR_MAX_VEC_COMPONENTS];
941 
942       for (unsigned i = 0; i < last_bit; i++) {
943          if (instr->op == nir_op_mov)
944             comps[i] = offset(op[0], bld, instr->src[0].swizzle[i]);
945          else
946             comps[i] = offset(op[i], bld, instr->src[i].swizzle[0]);
947       }
948 
949       if (write_mask == (1u << last_bit) - 1) {
950          bld.VEC(temp, comps, last_bit);
951       } else {
952          for (unsigned i = 0; i < last_bit; i++) {
953             if (write_mask & (1 << i))
954                bld.MOV(offset(temp, bld, i), comps[i]);
955          }
956       }
957 
958       /* In this case the source and destination registers were the same,
959        * so we need to insert an extra set of moves in order to deal with
960        * any swizzling.
961        */
962       if (need_extra_copy) {
963          for (unsigned i = 0; i < last_bit; i++) {
964             if (!(write_mask & (1 << i)))
965                continue;
966 
967             bld.MOV(offset(result, bld, i), offset(temp, bld, i));
968          }
969       }
970       return;
971    }
972 
973    case nir_op_i2f32:
974    case nir_op_u2f32:
975       if (optimize_extract_to_float(ntb, instr, result))
976          return;
977       bld.MOV(result, op[0]);
978       break;
979 
980    case nir_op_f2f16_rtne:
981    case nir_op_f2f16_rtz:
982    case nir_op_f2f16: {
983       brw_rnd_mode rnd = BRW_RND_MODE_UNSPECIFIED;
984 
985       if (nir_op_f2f16 == instr->op)
986          rnd = brw_rnd_mode_from_execution_mode(execution_mode);
987       else
988          rnd = brw_rnd_mode_from_nir_op(instr->op);
989 
990       if (BRW_RND_MODE_UNSPECIFIED != rnd)
991          bld.exec_all().emit(SHADER_OPCODE_RND_MODE, bld.null_reg_ud(), brw_imm_d(rnd));
992 
993       assert(brw_type_size_bytes(op[0].type) < 8); /* brw_nir_lower_conversions */
994       bld.MOV(result, op[0]);
995       break;
996    }
997 
998    case nir_op_b2i8:
999    case nir_op_b2i16:
1000    case nir_op_b2i32:
1001    case nir_op_b2i64:
1002    case nir_op_b2f16:
1003    case nir_op_b2f32:
1004    case nir_op_b2f64:
1005       if (try_emit_b2fi_of_inot(ntb, bld, result, instr))
1006          break;
1007       op[0].type = BRW_TYPE_D;
1008       op[0].negate = !op[0].negate;
1009       FALLTHROUGH;
1010    case nir_op_i2f64:
1011    case nir_op_i2i64:
1012    case nir_op_u2f64:
1013    case nir_op_u2u64:
1014    case nir_op_f2f64:
1015    case nir_op_f2i64:
1016    case nir_op_f2u64:
1017    case nir_op_i2i32:
1018    case nir_op_u2u32:
1019    case nir_op_f2i32:
1020    case nir_op_f2u32:
1021    case nir_op_i2f16:
1022    case nir_op_u2f16:
1023    case nir_op_f2i16:
1024    case nir_op_f2u16:
1025    case nir_op_f2i8:
1026    case nir_op_f2u8:
1027       if (result.type == BRW_TYPE_B ||
1028           result.type == BRW_TYPE_UB ||
1029           result.type == BRW_TYPE_HF)
1030          assert(brw_type_size_bytes(op[0].type) < 8); /* brw_nir_lower_conversions */
1031 
1032       if (op[0].type == BRW_TYPE_B ||
1033           op[0].type == BRW_TYPE_UB ||
1034           op[0].type == BRW_TYPE_HF)
1035          assert(brw_type_size_bytes(result.type) < 8); /* brw_nir_lower_conversions */
1036 
1037       bld.MOV(result, op[0]);
1038       break;
1039 
1040    case nir_op_i2i8:
1041    case nir_op_u2u8:
1042       assert(brw_type_size_bytes(op[0].type) < 8); /* brw_nir_lower_conversions */
1043       FALLTHROUGH;
1044    case nir_op_i2i16:
1045    case nir_op_u2u16: {
1046       /* Emit better code for u2u8(extract_u8(a, b)) and similar patterns.
1047        * Emitting the instructions one by one results in two MOV instructions
1048        * that won't be propagated.  By handling both instructions here, a
1049        * single MOV is emitted.
1050        */
1051       nir_alu_instr *extract_instr = nir_src_as_alu_instr(instr->src[0].src);
1052       if (extract_instr != NULL) {
1053          if (extract_instr->op == nir_op_extract_u8 ||
1054              extract_instr->op == nir_op_extract_i8) {
1055             prepare_alu_destination_and_sources(ntb, bld, extract_instr, op, false);
1056 
1057             const unsigned byte = nir_src_as_uint(extract_instr->src[1].src);
1058             const brw_reg_type type =
1059                brw_int_type(1, extract_instr->op == nir_op_extract_i8);
1060 
1061             op[0] = subscript(op[0], type, byte);
1062          } else if (extract_instr->op == nir_op_extract_u16 ||
1063                     extract_instr->op == nir_op_extract_i16) {
1064             prepare_alu_destination_and_sources(ntb, bld, extract_instr, op, false);
1065 
1066             const unsigned word = nir_src_as_uint(extract_instr->src[1].src);
1067             const brw_reg_type type =
1068                brw_int_type(2, extract_instr->op == nir_op_extract_i16);
1069 
1070             op[0] = subscript(op[0], type, word);
1071          }
1072       }
1073 
1074       bld.MOV(result, op[0]);
1075       break;
1076    }
1077 
1078    case nir_op_fsat:
1079       inst = bld.MOV(result, op[0]);
1080       inst->saturate = true;
1081       break;
1082 
1083    case nir_op_fneg:
1084    case nir_op_ineg:
1085       op[0].negate = true;
1086       bld.MOV(result, op[0]);
1087       break;
1088 
1089    case nir_op_fabs:
1090    case nir_op_iabs:
1091       op[0].negate = false;
1092       op[0].abs = true;
1093       bld.MOV(result, op[0]);
1094       break;
1095 
1096    case nir_op_f2f32:
1097       if (nir_has_any_rounding_mode_enabled(execution_mode)) {
1098          brw_rnd_mode rnd =
1099             brw_rnd_mode_from_execution_mode(execution_mode);
1100          bld.exec_all().emit(SHADER_OPCODE_RND_MODE, bld.null_reg_ud(),
1101                              brw_imm_d(rnd));
1102       }
1103 
1104       if (op[0].type == BRW_TYPE_HF)
1105          assert(brw_type_size_bytes(result.type) < 8); /* brw_nir_lower_conversions */
1106 
1107       bld.MOV(result, op[0]);
1108       break;
1109 
1110    case nir_op_fsign:
1111       unreachable("Should have been lowered by brw_nir_lower_fsign.");
1112 
1113    case nir_op_frcp:
1114       bld.RCP(result, op[0]);
1115       break;
1116 
1117    case nir_op_fexp2:
1118       bld.EXP2(result, op[0]);
1119       break;
1120 
1121    case nir_op_flog2:
1122       bld.LOG2(result, op[0]);
1123       break;
1124 
1125    case nir_op_fsin:
1126       bld.SIN(result, op[0]);
1127       break;
1128 
1129    case nir_op_fcos:
1130       bld.COS(result, op[0]);
1131       break;
1132 
1133    case nir_op_fadd:
1134       if (nir_has_any_rounding_mode_enabled(execution_mode)) {
1135          brw_rnd_mode rnd =
1136             brw_rnd_mode_from_execution_mode(execution_mode);
1137          bld.exec_all().emit(SHADER_OPCODE_RND_MODE, bld.null_reg_ud(),
1138                              brw_imm_d(rnd));
1139       }
1140       FALLTHROUGH;
1141    case nir_op_iadd:
1142       bld.ADD(result, op[0], op[1]);
1143       break;
1144 
1145    case nir_op_iadd3:
1146       assert(instr->def.bit_size < 64);
1147       bld.ADD3(result, op[0], op[1], op[2]);
1148       break;
1149 
1150    case nir_op_iadd_sat:
1151    case nir_op_uadd_sat:
1152       inst = bld.ADD(result, op[0], op[1]);
1153       inst->saturate = true;
1154       break;
1155 
1156    case nir_op_isub_sat:
1157       bld.emit(SHADER_OPCODE_ISUB_SAT, result, op[0], op[1]);
1158       break;
1159 
1160    case nir_op_usub_sat:
1161       bld.emit(SHADER_OPCODE_USUB_SAT, result, op[0], op[1]);
1162       break;
1163 
1164    case nir_op_irhadd:
1165    case nir_op_urhadd:
1166       assert(instr->def.bit_size < 64);
1167       bld.AVG(result, op[0], op[1]);
1168       break;
1169 
1170    case nir_op_ihadd:
1171    case nir_op_uhadd: {
1172       assert(instr->def.bit_size < 64);
1173 
1174       op[0] = resolve_source_modifiers(bld, op[0]);
1175       op[1] = resolve_source_modifiers(bld, op[1]);
1176 
1177       /* AVG(x, y) - ((x ^ y) & 1) */
1178       brw_reg one = retype(brw_imm_ud(1), result.type);
1179       bld.ADD(result, bld.AVG(op[0], op[1]),
1180               negate(bld.AND(bld.XOR(op[0], op[1]), one)));
1181       break;
1182    }
1183 
1184    case nir_op_fmul:
1185       if (nir_has_any_rounding_mode_enabled(execution_mode)) {
1186          brw_rnd_mode rnd =
1187             brw_rnd_mode_from_execution_mode(execution_mode);
1188          bld.exec_all().emit(SHADER_OPCODE_RND_MODE, bld.null_reg_ud(),
1189                              brw_imm_d(rnd));
1190       }
1191 
1192       bld.MUL(result, op[0], op[1]);
1193       break;
1194 
1195    case nir_op_imul_2x32_64:
1196    case nir_op_umul_2x32_64:
1197       bld.MUL(result, op[0], op[1]);
1198       break;
1199 
1200    case nir_op_imul_32x16:
1201    case nir_op_umul_32x16: {
1202       const bool ud = instr->op == nir_op_umul_32x16;
1203       const enum brw_reg_type word_type = ud ? BRW_TYPE_UW : BRW_TYPE_W;
1204       const enum brw_reg_type dword_type = ud ? BRW_TYPE_UD : BRW_TYPE_D;
1205 
1206       assert(instr->def.bit_size == 32);
1207 
1208       /* Before copy propagation there are no immediate values. */
1209       assert(op[0].file != IMM && op[1].file != IMM);
1210 
1211       op[1] = subscript(op[1], word_type, 0);
1212 
1213       bld.MUL(result, retype(op[0], dword_type), op[1]);
1214 
1215       break;
1216    }
1217 
1218    case nir_op_imul:
1219       assert(instr->def.bit_size < 64);
1220       bld.MUL(result, op[0], op[1]);
1221       break;
1222 
1223    case nir_op_imul_high:
1224    case nir_op_umul_high:
1225       assert(instr->def.bit_size < 64);
1226       if (instr->def.bit_size == 32) {
1227          bld.emit(SHADER_OPCODE_MULH, result, op[0], op[1]);
1228       } else {
1229          brw_reg tmp = bld.vgrf(brw_type_with_size(op[0].type, 32));
1230          bld.MUL(tmp, op[0], op[1]);
1231          bld.MOV(result, subscript(tmp, result.type, 1));
1232       }
1233       break;
1234 
1235    case nir_op_idiv:
1236    case nir_op_udiv:
1237       assert(instr->def.bit_size < 64);
1238       bld.INT_QUOTIENT(result, op[0], op[1]);
1239       break;
1240 
1241    case nir_op_uadd_carry:
1242       unreachable("Should have been lowered by carry_to_arith().");
1243 
1244    case nir_op_usub_borrow:
1245       unreachable("Should have been lowered by borrow_to_arith().");
1246 
1247    case nir_op_umod:
1248    case nir_op_irem:
1249       /* According to the sign table for INT DIV in the Ivy Bridge PRM, it
1250        * appears that our hardware just does the right thing for signed
1251        * remainder.
1252        */
1253       assert(instr->def.bit_size < 64);
1254       bld.INT_REMAINDER(result, op[0], op[1]);
1255       break;
1256 
1257    case nir_op_imod: {
1258       /* Get a regular C-style remainder.  If a % b == 0, set the predicate. */
1259       bld.INT_REMAINDER(result, op[0], op[1]);
1260 
1261       /* Math instructions don't support conditional mod */
1262       inst = bld.MOV(bld.null_reg_d(), result);
1263       inst->conditional_mod = BRW_CONDITIONAL_NZ;
1264 
1265       /* Now, we need to determine if signs of the sources are different.
1266        * When we XOR the sources, the top bit is 0 if they are the same and 1
1267        * if they are different.  We can then use a conditional modifier to
1268        * turn that into a predicate.  This leads us to an XOR.l instruction.
1269        *
1270        * Technically, according to the PRM, you're not allowed to use .l on a
1271        * XOR instruction.  However, empirical experiments and Curro's reading
1272        * of the simulator source both indicate that it's safe.
1273        */
1274       bld.XOR(op[0], op[1], &inst);
1275       inst->predicate = BRW_PREDICATE_NORMAL;
1276       inst->conditional_mod = BRW_CONDITIONAL_L;
1277 
1278       /* If the result of the initial remainder operation is non-zero and the
1279        * two sources have different signs, add in a copy of op[1] to get the
1280        * final integer modulus value.
1281        */
1282       inst = bld.ADD(result, result, op[1]);
1283       inst->predicate = BRW_PREDICATE_NORMAL;
1284       break;
1285    }
1286 
1287    case nir_op_flt32:
1288    case nir_op_fge32:
1289    case nir_op_feq32:
1290    case nir_op_fneu32: {
1291       brw_reg dest = result;
1292 
1293       const uint32_t bit_size =  nir_src_bit_size(instr->src[0].src);
1294       if (bit_size != 32) {
1295          dest = bld.vgrf(op[0].type);
1296          bld.UNDEF(dest);
1297       }
1298 
1299       bld.CMP(dest, op[0], op[1], brw_cmod_for_nir_comparison(instr->op));
1300 
1301       if (bit_size > 32) {
1302          bld.MOV(result, subscript(dest, BRW_TYPE_UD, 0));
1303       } else if(bit_size < 32) {
1304          /* When we convert the result to 32-bit we need to be careful and do
1305           * it as a signed conversion to get sign extension (for 32-bit true)
1306           */
1307          const brw_reg_type src_type =
1308             brw_type_with_size(BRW_TYPE_D, bit_size);
1309 
1310          bld.MOV(retype(result, BRW_TYPE_D), retype(dest, src_type));
1311       }
1312       break;
1313    }
1314 
1315    case nir_op_ilt32:
1316    case nir_op_ult32:
1317    case nir_op_ige32:
1318    case nir_op_uge32:
1319    case nir_op_ieq32:
1320    case nir_op_ine32: {
1321       brw_reg dest = result;
1322 
1323       const uint32_t bit_size = brw_type_size_bits(op[0].type);
1324       if (bit_size != 32) {
1325          dest = bld.vgrf(op[0].type);
1326          bld.UNDEF(dest);
1327       }
1328 
1329       bld.CMP(dest, op[0], op[1],
1330               brw_cmod_for_nir_comparison(instr->op));
1331 
1332       if (bit_size > 32) {
1333          bld.MOV(result, subscript(dest, BRW_TYPE_UD, 0));
1334       } else if (bit_size < 32) {
1335          /* When we convert the result to 32-bit we need to be careful and do
1336           * it as a signed conversion to get sign extension (for 32-bit true)
1337           */
1338          const brw_reg_type src_type =
1339             brw_type_with_size(BRW_TYPE_D, bit_size);
1340 
1341          bld.MOV(retype(result, BRW_TYPE_D), retype(dest, src_type));
1342       }
1343       break;
1344    }
1345 
1346    case nir_op_inot: {
1347       nir_alu_instr *inot_src_instr = nir_src_as_alu_instr(instr->src[0].src);
1348 
1349       if (inot_src_instr != NULL &&
1350           (inot_src_instr->op == nir_op_ior ||
1351            inot_src_instr->op == nir_op_ixor ||
1352            inot_src_instr->op == nir_op_iand)) {
1353          /* The sources of the source logical instruction are now the
1354           * sources of the instruction that will be generated.
1355           */
1356          prepare_alu_destination_and_sources(ntb, bld, inot_src_instr, op, false);
1357          resolve_inot_sources(ntb, bld, inot_src_instr, op);
1358 
1359          /* Smash all of the sources and destination to be signed.  This
1360           * doesn't matter for the operation of the instruction, but cmod
1361           * propagation fails on unsigned sources with negation (due to
1362           * fs_inst::can_do_cmod returning false).
1363           */
1364          result.type =
1365             brw_type_for_nir_type(devinfo,
1366                                   (nir_alu_type)(nir_type_int |
1367                                                  instr->def.bit_size));
1368          op[0].type =
1369             brw_type_for_nir_type(devinfo,
1370                                   (nir_alu_type)(nir_type_int |
1371                                                  nir_src_bit_size(inot_src_instr->src[0].src)));
1372          op[1].type =
1373             brw_type_for_nir_type(devinfo,
1374                                   (nir_alu_type)(nir_type_int |
1375                                                  nir_src_bit_size(inot_src_instr->src[1].src)));
1376 
1377          /* For XOR, only invert one of the sources.  Arbitrarily choose
1378           * the first source.
1379           */
1380          op[0].negate = !op[0].negate;
1381          if (inot_src_instr->op != nir_op_ixor)
1382             op[1].negate = !op[1].negate;
1383 
1384          switch (inot_src_instr->op) {
1385          case nir_op_ior:
1386             bld.AND(result, op[0], op[1]);
1387             return;
1388 
1389          case nir_op_iand:
1390             bld.OR(result, op[0], op[1]);
1391             return;
1392 
1393          case nir_op_ixor:
1394             bld.XOR(result, op[0], op[1]);
1395             return;
1396 
1397          default:
1398             unreachable("impossible opcode");
1399          }
1400       }
1401       op[0] = resolve_source_modifiers(bld, op[0]);
1402       bld.NOT(result, op[0]);
1403       break;
1404    }
1405 
1406    case nir_op_ixor:
1407       resolve_inot_sources(ntb, bld, instr, op);
1408       bld.XOR(result, op[0], op[1]);
1409       break;
1410    case nir_op_ior:
1411       resolve_inot_sources(ntb, bld, instr, op);
1412       bld.OR(result, op[0], op[1]);
1413       break;
1414    case nir_op_iand:
1415       resolve_inot_sources(ntb, bld, instr, op);
1416       bld.AND(result, op[0], op[1]);
1417       break;
1418 
1419    case nir_op_fdot2:
1420    case nir_op_fdot3:
1421    case nir_op_fdot4:
1422    case nir_op_b32all_fequal2:
1423    case nir_op_b32all_iequal2:
1424    case nir_op_b32all_fequal3:
1425    case nir_op_b32all_iequal3:
1426    case nir_op_b32all_fequal4:
1427    case nir_op_b32all_iequal4:
1428    case nir_op_b32any_fnequal2:
1429    case nir_op_b32any_inequal2:
1430    case nir_op_b32any_fnequal3:
1431    case nir_op_b32any_inequal3:
1432    case nir_op_b32any_fnequal4:
1433    case nir_op_b32any_inequal4:
1434       unreachable("Lowered by nir_lower_alu_reductions");
1435 
1436    case nir_op_ldexp:
1437       unreachable("not reached: should be handled by ldexp_to_arith()");
1438 
1439    case nir_op_fsqrt:
1440       bld.SQRT(result, op[0]);
1441       break;
1442 
1443    case nir_op_frsq:
1444       bld.RSQ(result, op[0]);
1445       break;
1446 
1447    case nir_op_ftrunc:
1448       bld.RNDZ(result, op[0]);
1449       break;
1450 
1451    case nir_op_fceil:
1452       bld.MOV(result, negate(bld.RNDD(negate(op[0]))));
1453       break;
1454    case nir_op_ffloor:
1455       bld.RNDD(result, op[0]);
1456       break;
1457    case nir_op_ffract:
1458       bld.FRC(result, op[0]);
1459       break;
1460    case nir_op_fround_even:
1461       bld.RNDE(result, op[0]);
1462       break;
1463 
1464    case nir_op_fquantize2f16: {
1465       brw_reg tmp16 = bld.vgrf(BRW_TYPE_D);
1466       brw_reg tmp32 = bld.vgrf(BRW_TYPE_F);
1467 
1468       /* The destination stride must be at least as big as the source stride. */
1469       tmp16 = subscript(tmp16, BRW_TYPE_HF, 0);
1470 
1471       /* Check for denormal */
1472       brw_reg abs_src0 = op[0];
1473       abs_src0.abs = true;
1474       bld.CMP(bld.null_reg_f(), abs_src0, brw_imm_f(ldexpf(1.0, -14)),
1475               BRW_CONDITIONAL_L);
1476       /* Get the appropriately signed zero */
1477       brw_reg zero = retype(bld.AND(retype(op[0], BRW_TYPE_UD),
1478                                    brw_imm_ud(0x80000000)), BRW_TYPE_F);
1479       /* Do the actual F32 -> F16 -> F32 conversion */
1480       bld.MOV(tmp16, op[0]);
1481       bld.MOV(tmp32, tmp16);
1482       /* Select that or zero based on normal status */
1483       inst = bld.SEL(result, zero, tmp32);
1484       inst->predicate = BRW_PREDICATE_NORMAL;
1485       break;
1486    }
1487 
1488    case nir_op_imin:
1489    case nir_op_umin:
1490    case nir_op_fmin:
1491       bld.emit_minmax(result, op[0], op[1], BRW_CONDITIONAL_L);
1492       break;
1493 
1494    case nir_op_imax:
1495    case nir_op_umax:
1496    case nir_op_fmax:
1497       bld.emit_minmax(result, op[0], op[1], BRW_CONDITIONAL_GE);
1498       break;
1499 
1500    case nir_op_pack_snorm_2x16:
1501    case nir_op_pack_snorm_4x8:
1502    case nir_op_pack_unorm_2x16:
1503    case nir_op_pack_unorm_4x8:
1504    case nir_op_unpack_snorm_2x16:
1505    case nir_op_unpack_snorm_4x8:
1506    case nir_op_unpack_unorm_2x16:
1507    case nir_op_unpack_unorm_4x8:
1508    case nir_op_unpack_half_2x16:
1509    case nir_op_pack_half_2x16:
1510       unreachable("not reached: should be handled by lower_packing_builtins");
1511 
1512    case nir_op_unpack_half_2x16_split_x:
1513       bld.MOV(result, subscript(op[0], BRW_TYPE_HF, 0));
1514       break;
1515 
1516    case nir_op_unpack_half_2x16_split_y:
1517       bld.MOV(result, subscript(op[0], BRW_TYPE_HF, 1));
1518       break;
1519 
1520    case nir_op_pack_64_2x32_split:
1521    case nir_op_pack_32_2x16_split:
1522       bld.emit(FS_OPCODE_PACK, result, op[0], op[1]);
1523       break;
1524 
1525    case nir_op_pack_32_4x8_split:
1526       bld.emit(FS_OPCODE_PACK, result, op, 4);
1527       break;
1528 
1529    case nir_op_unpack_64_2x32_split_x:
1530    case nir_op_unpack_64_2x32_split_y: {
1531       if (instr->op == nir_op_unpack_64_2x32_split_x)
1532          bld.MOV(result, subscript(op[0], BRW_TYPE_UD, 0));
1533       else
1534          bld.MOV(result, subscript(op[0], BRW_TYPE_UD, 1));
1535       break;
1536    }
1537 
1538    case nir_op_unpack_32_2x16_split_x:
1539    case nir_op_unpack_32_2x16_split_y: {
1540       if (instr->op == nir_op_unpack_32_2x16_split_x)
1541          bld.MOV(result, subscript(op[0], BRW_TYPE_UW, 0));
1542       else
1543          bld.MOV(result, subscript(op[0], BRW_TYPE_UW, 1));
1544       break;
1545    }
1546 
1547    case nir_op_fpow:
1548       bld.POW(result, op[0], op[1]);
1549       break;
1550 
1551    case nir_op_bitfield_reverse:
1552       assert(instr->def.bit_size == 32);
1553       assert(nir_src_bit_size(instr->src[0].src) == 32);
1554       bld.BFREV(result, op[0]);
1555       break;
1556 
1557    case nir_op_bit_count:
1558       assert(instr->def.bit_size == 32);
1559       assert(nir_src_bit_size(instr->src[0].src) < 64);
1560       bld.CBIT(result, op[0]);
1561       break;
1562 
1563    case nir_op_uclz:
1564       assert(instr->def.bit_size == 32);
1565       assert(nir_src_bit_size(instr->src[0].src) == 32);
1566       bld.LZD(retype(result, BRW_TYPE_UD), op[0]);
1567       break;
1568 
1569    case nir_op_ifind_msb: {
1570       assert(instr->def.bit_size == 32);
1571       assert(nir_src_bit_size(instr->src[0].src) == 32);
1572 
1573       brw_reg tmp = bld.FBH(retype(op[0], BRW_TYPE_D));
1574 
1575       /* FBH counts from the MSB side, while GLSL's findMSB() wants the count
1576        * from the LSB side. If FBH didn't return an error (0xFFFFFFFF), then
1577        * subtract the result from 31 to convert the MSB count into an LSB
1578        * count.
1579        */
1580       brw_reg count_from_lsb = bld.ADD(negate(tmp), brw_imm_w(31));
1581 
1582       /* The high word of the FBH result will be 0xffff or 0x0000. After
1583        * calculating 31 - fbh, we can obtain the correct result for
1584        * ifind_msb(0) by ORing the (sign extended) upper word of the
1585        * intermediate result.
1586        */
1587       bld.OR(result, count_from_lsb, subscript(tmp, BRW_TYPE_W, 1));
1588       break;
1589    }
1590 
1591    case nir_op_find_lsb:
1592       assert(instr->def.bit_size == 32);
1593       assert(nir_src_bit_size(instr->src[0].src) == 32);
1594       bld.FBL(result, op[0]);
1595       break;
1596 
1597    case nir_op_ubitfield_extract:
1598    case nir_op_ibitfield_extract:
1599       unreachable("should have been lowered");
1600    case nir_op_ubfe:
1601    case nir_op_ibfe:
1602       assert(instr->def.bit_size < 64);
1603       bld.BFE(result, op[2], op[1], op[0]);
1604       break;
1605    case nir_op_bfm:
1606       assert(instr->def.bit_size < 64);
1607       bld.BFI1(result, op[0], op[1]);
1608       break;
1609    case nir_op_bfi:
1610       assert(instr->def.bit_size < 64);
1611 
1612       /* bfi is ((...) | (~src0 & src2)). The second part is zero when src2 is
1613        * either 0 or src0. Replacing the 0 with another value can eliminate a
1614        * temporary register.
1615        */
1616       if (is_const_zero(instr->src[2].src))
1617          bld.BFI2(result, op[0], op[1], op[0]);
1618       else
1619          bld.BFI2(result, op[0], op[1], op[2]);
1620 
1621       break;
1622 
1623    case nir_op_bitfield_insert:
1624       unreachable("not reached: should have been lowered");
1625 
1626    /* With regards to implicit masking of the shift counts for 8- and 16-bit
1627     * types, the PRMs are **incorrect**. They falsely state that on Gen9+ only
1628     * the low bits of src1 matching the size of src0 (e.g., 4-bits for W or UW
1629     * src0) are used. The Bspec (backed by data from experimentation) state
1630     * that 0x3f is used for Q and UQ types, and 0x1f is used for **all** other
1631     * types.
1632     *
1633     * The match the behavior expected for the NIR opcodes, explicit masks for
1634     * 8- and 16-bit types must be added.
1635     */
1636    case nir_op_ishl:
1637       if (instr->def.bit_size < 32) {
1638          bld.SHL(result,
1639                  op[0],
1640                  bld.AND(op[1], brw_imm_ud(instr->def.bit_size - 1)));
1641       } else {
1642          bld.SHL(result, op[0], op[1]);
1643       }
1644 
1645       break;
1646    case nir_op_ishr:
1647       if (instr->def.bit_size < 32) {
1648          bld.ASR(result,
1649                  op[0],
1650                  bld.AND(op[1], brw_imm_ud(instr->def.bit_size - 1)));
1651       } else {
1652          bld.ASR(result, op[0], op[1]);
1653       }
1654 
1655       break;
1656    case nir_op_ushr:
1657       if (instr->def.bit_size < 32) {
1658          bld.SHR(result,
1659                  op[0],
1660                  bld.AND(op[1], brw_imm_ud(instr->def.bit_size - 1)));
1661       } else {
1662          bld.SHR(result, op[0], op[1]);
1663       }
1664 
1665       break;
1666 
1667    case nir_op_urol:
1668       bld.ROL(result, op[0], op[1]);
1669       break;
1670    case nir_op_uror:
1671       bld.ROR(result, op[0], op[1]);
1672       break;
1673 
1674    case nir_op_pack_half_2x16_split:
1675       bld.emit(FS_OPCODE_PACK_HALF_2x16_SPLIT, result, op[0], op[1]);
1676       break;
1677 
1678    case nir_op_sdot_4x8_iadd:
1679    case nir_op_sdot_4x8_iadd_sat:
1680       inst = bld.DP4A(retype(result, BRW_TYPE_D),
1681                       retype(op[2], BRW_TYPE_D),
1682                       retype(op[0], BRW_TYPE_D),
1683                       retype(op[1], BRW_TYPE_D));
1684 
1685       if (instr->op == nir_op_sdot_4x8_iadd_sat)
1686          inst->saturate = true;
1687       break;
1688 
1689    case nir_op_udot_4x8_uadd:
1690    case nir_op_udot_4x8_uadd_sat:
1691       inst = bld.DP4A(retype(result, BRW_TYPE_UD),
1692                       retype(op[2], BRW_TYPE_UD),
1693                       retype(op[0], BRW_TYPE_UD),
1694                       retype(op[1], BRW_TYPE_UD));
1695 
1696       if (instr->op == nir_op_udot_4x8_uadd_sat)
1697          inst->saturate = true;
1698       break;
1699 
1700    case nir_op_sudot_4x8_iadd:
1701    case nir_op_sudot_4x8_iadd_sat:
1702       inst = bld.DP4A(retype(result, BRW_TYPE_D),
1703                       retype(op[2], BRW_TYPE_D),
1704                       retype(op[0], BRW_TYPE_D),
1705                       retype(op[1], BRW_TYPE_UD));
1706 
1707       if (instr->op == nir_op_sudot_4x8_iadd_sat)
1708          inst->saturate = true;
1709       break;
1710 
1711    case nir_op_ffma:
1712       if (nir_has_any_rounding_mode_enabled(execution_mode)) {
1713          brw_rnd_mode rnd =
1714             brw_rnd_mode_from_execution_mode(execution_mode);
1715          bld.exec_all().emit(SHADER_OPCODE_RND_MODE, bld.null_reg_ud(),
1716                              brw_imm_d(rnd));
1717       }
1718 
1719       bld.MAD(result, op[2], op[1], op[0]);
1720       break;
1721 
1722    case nir_op_flrp:
1723       if (nir_has_any_rounding_mode_enabled(execution_mode)) {
1724          brw_rnd_mode rnd =
1725             brw_rnd_mode_from_execution_mode(execution_mode);
1726          bld.exec_all().emit(SHADER_OPCODE_RND_MODE, bld.null_reg_ud(),
1727                              brw_imm_d(rnd));
1728       }
1729 
1730       bld.LRP(result, op[0], op[1], op[2]);
1731       break;
1732 
1733    case nir_op_b32csel:
1734       if (optimize_frontfacing_ternary(ntb, instr, result))
1735          return;
1736 
1737       bld.CMP(bld.null_reg_d(), op[0], brw_imm_d(0), BRW_CONDITIONAL_NZ);
1738       inst = bld.SEL(result, op[1], op[2]);
1739       inst->predicate = BRW_PREDICATE_NORMAL;
1740       break;
1741 
1742    case nir_op_fcsel:
1743       bld.CSEL(result, op[1], op[2], op[0], BRW_CONDITIONAL_NZ);
1744       break;
1745 
1746    case nir_op_fcsel_gt:
1747       bld.CSEL(result, op[1], op[2], op[0], BRW_CONDITIONAL_G);
1748       break;
1749 
1750    case nir_op_fcsel_ge:
1751       bld.CSEL(result, op[1], op[2], op[0], BRW_CONDITIONAL_GE);
1752       break;
1753 
1754    case nir_op_extract_u8:
1755    case nir_op_extract_i8: {
1756       const brw_reg_type type = brw_int_type(1, instr->op == nir_op_extract_i8);
1757       unsigned byte = nir_src_as_uint(instr->src[1].src);
1758 
1759       /* The PRMs say:
1760        *
1761        *    BDW+
1762        *    There is no direct conversion from B/UB to Q/UQ or Q/UQ to B/UB.
1763        *    Use two instructions and a word or DWord intermediate integer type.
1764        */
1765       if (instr->def.bit_size == 64) {
1766          if (instr->op == nir_op_extract_i8) {
1767             /* If we need to sign extend, extract to a word first */
1768             brw_reg w_temp = bld.vgrf(BRW_TYPE_W);
1769             bld.MOV(w_temp, subscript(op[0], type, byte));
1770             bld.MOV(result, w_temp);
1771          } else if (byte & 1) {
1772             /* Extract the high byte from the word containing the desired byte
1773              * offset.
1774              */
1775             bld.SHR(result,
1776                     subscript(op[0], BRW_TYPE_UW, byte / 2),
1777                     brw_imm_uw(8));
1778          } else {
1779             /* Otherwise use an AND with 0xff and a word type */
1780             bld.AND(result,
1781                     subscript(op[0], BRW_TYPE_UW, byte / 2),
1782                     brw_imm_uw(0xff));
1783          }
1784       } else {
1785          bld.MOV(result, subscript(op[0], type, byte));
1786       }
1787       break;
1788    }
1789 
1790    case nir_op_extract_u16:
1791    case nir_op_extract_i16: {
1792       const brw_reg_type type = brw_int_type(2, instr->op == nir_op_extract_i16);
1793       unsigned word = nir_src_as_uint(instr->src[1].src);
1794       bld.MOV(result, subscript(op[0], type, word));
1795       break;
1796    }
1797 
1798    default:
1799       unreachable("unhandled instruction");
1800    }
1801 }
1802 
1803 static void
fs_nir_emit_load_const(nir_to_brw_state & ntb,nir_load_const_instr * instr)1804 fs_nir_emit_load_const(nir_to_brw_state &ntb,
1805                        nir_load_const_instr *instr)
1806 {
1807    const intel_device_info *devinfo = ntb.devinfo;
1808    const fs_builder &bld = ntb.bld;
1809 
1810    const brw_reg_type reg_type =
1811       brw_type_with_size(BRW_TYPE_D, instr->def.bit_size);
1812    brw_reg reg = bld.vgrf(reg_type, instr->def.num_components);
1813 
1814    brw_reg comps[NIR_MAX_VEC_COMPONENTS];
1815 
1816    switch (instr->def.bit_size) {
1817    case 8:
1818       for (unsigned i = 0; i < instr->def.num_components; i++)
1819          comps[i] = setup_imm_b(bld, instr->value[i].i8);
1820       break;
1821 
1822    case 16:
1823       for (unsigned i = 0; i < instr->def.num_components; i++)
1824          comps[i] = brw_imm_w(instr->value[i].i16);
1825       break;
1826 
1827    case 32:
1828       for (unsigned i = 0; i < instr->def.num_components; i++)
1829          comps[i] = brw_imm_d(instr->value[i].i32);
1830       break;
1831 
1832    case 64:
1833       if (!devinfo->has_64bit_int) {
1834          reg.type = BRW_TYPE_DF;
1835          for (unsigned i = 0; i < instr->def.num_components; i++)
1836             comps[i] = brw_imm_df(instr->value[i].f64);
1837       } else {
1838          for (unsigned i = 0; i < instr->def.num_components; i++)
1839             comps[i] = brw_imm_q(instr->value[i].i64);
1840       }
1841       break;
1842 
1843    default:
1844       unreachable("Invalid bit size");
1845    }
1846 
1847    bld.VEC(reg, comps, instr->def.num_components);
1848 
1849    ntb.ssa_values[instr->def.index] = reg;
1850 }
1851 
1852 static bool
get_nir_src_bindless(nir_to_brw_state & ntb,const nir_src & src)1853 get_nir_src_bindless(nir_to_brw_state &ntb, const nir_src &src)
1854 {
1855    return ntb.ssa_bind_infos[src.ssa->index].bindless;
1856 }
1857 
1858 static bool
is_resource_src(nir_src src)1859 is_resource_src(nir_src src)
1860 {
1861    return src.ssa->parent_instr->type == nir_instr_type_intrinsic &&
1862           nir_instr_as_intrinsic(src.ssa->parent_instr)->intrinsic == nir_intrinsic_resource_intel;
1863 }
1864 
1865 static brw_reg
get_resource_nir_src(nir_to_brw_state & ntb,const nir_src & src)1866 get_resource_nir_src(nir_to_brw_state &ntb, const nir_src &src)
1867 {
1868    if (!is_resource_src(src))
1869       return brw_reg();
1870    return ntb.uniform_values[src.ssa->index];
1871 }
1872 
1873 static brw_reg
get_nir_src(nir_to_brw_state & ntb,const nir_src & src)1874 get_nir_src(nir_to_brw_state &ntb, const nir_src &src)
1875 {
1876    nir_intrinsic_instr *load_reg = nir_load_reg_for_def(src.ssa);
1877 
1878    brw_reg reg;
1879    if (!load_reg) {
1880       if (nir_src_is_undef(src)) {
1881          const brw_reg_type reg_type =
1882             brw_type_with_size(BRW_TYPE_D, src.ssa->bit_size);
1883          reg = ntb.bld.vgrf(reg_type, src.ssa->num_components);
1884       } else {
1885          reg = ntb.ssa_values[src.ssa->index];
1886       }
1887    } else {
1888       nir_intrinsic_instr *decl_reg = nir_reg_get_decl(load_reg->src[0].ssa);
1889       /* We don't handle indirects on locals */
1890       assert(nir_intrinsic_base(load_reg) == 0);
1891       assert(load_reg->intrinsic != nir_intrinsic_load_reg_indirect);
1892       reg = ntb.ssa_values[decl_reg->def.index];
1893    }
1894 
1895    /* To avoid floating-point denorm flushing problems, set the type by
1896     * default to an integer type - instructions that need floating point
1897     * semantics will set this to F if they need to
1898     */
1899    reg.type = brw_type_with_size(BRW_TYPE_D, nir_src_bit_size(src));
1900 
1901    return reg;
1902 }
1903 
1904 /**
1905  * Return an IMM for constants; otherwise call get_nir_src() as normal.
1906  *
1907  * This function should not be called on any value which may be 64 bits.
1908  * We could theoretically support 64-bit on gfx8+ but we choose not to
1909  * because it wouldn't work in general (no gfx7 support) and there are
1910  * enough restrictions in 64-bit immediates that you can't take the return
1911  * value and treat it the same as the result of get_nir_src().
1912  */
1913 static brw_reg
get_nir_src_imm(nir_to_brw_state & ntb,const nir_src & src)1914 get_nir_src_imm(nir_to_brw_state &ntb, const nir_src &src)
1915 {
1916    assert(nir_src_bit_size(src) == 32);
1917    return nir_src_is_const(src) ?
1918           brw_reg(brw_imm_d(nir_src_as_int(src))) : get_nir_src(ntb, src);
1919 }
1920 
1921 static brw_reg
get_nir_def(nir_to_brw_state & ntb,const nir_def & def)1922 get_nir_def(nir_to_brw_state &ntb, const nir_def &def)
1923 {
1924    const fs_builder &bld = ntb.bld;
1925 
1926    nir_intrinsic_instr *store_reg = nir_store_reg_for_def(&def);
1927    if (!store_reg) {
1928       const brw_reg_type reg_type =
1929          brw_type_with_size(def.bit_size == 8 ? BRW_TYPE_D : BRW_TYPE_F,
1930                             def.bit_size);
1931       ntb.ssa_values[def.index] =
1932          bld.vgrf(reg_type, def.num_components);
1933 
1934       if (def.bit_size * bld.dispatch_width() < 8 * REG_SIZE)
1935          bld.UNDEF(ntb.ssa_values[def.index]);
1936 
1937       return ntb.ssa_values[def.index];
1938    } else {
1939       nir_intrinsic_instr *decl_reg =
1940          nir_reg_get_decl(store_reg->src[1].ssa);
1941       /* We don't handle indirects on locals */
1942       assert(nir_intrinsic_base(store_reg) == 0);
1943       assert(store_reg->intrinsic != nir_intrinsic_store_reg_indirect);
1944       return ntb.ssa_values[decl_reg->def.index];
1945    }
1946 }
1947 
1948 static nir_component_mask_t
get_nir_write_mask(const nir_def & def)1949 get_nir_write_mask(const nir_def &def)
1950 {
1951    nir_intrinsic_instr *store_reg = nir_store_reg_for_def(&def);
1952    if (!store_reg) {
1953       return nir_component_mask(def.num_components);
1954    } else {
1955       return nir_intrinsic_write_mask(store_reg);
1956    }
1957 }
1958 
1959 static fs_inst *
emit_pixel_interpolater_send(const fs_builder & bld,enum opcode opcode,const brw_reg & dst,const brw_reg & src,const brw_reg & desc,const brw_reg & flag_reg,glsl_interp_mode interpolation)1960 emit_pixel_interpolater_send(const fs_builder &bld,
1961                              enum opcode opcode,
1962                              const brw_reg &dst,
1963                              const brw_reg &src,
1964                              const brw_reg &desc,
1965                              const brw_reg &flag_reg,
1966                              glsl_interp_mode interpolation)
1967 {
1968    struct brw_wm_prog_data *wm_prog_data =
1969       brw_wm_prog_data(bld.shader->prog_data);
1970 
1971    brw_reg srcs[INTERP_NUM_SRCS];
1972    srcs[INTERP_SRC_OFFSET]       = src;
1973    srcs[INTERP_SRC_MSG_DESC]     = desc;
1974    srcs[INTERP_SRC_DYNAMIC_MODE] = flag_reg;
1975 
1976    fs_inst *inst = bld.emit(opcode, dst, srcs, INTERP_NUM_SRCS);
1977    /* 2 floats per slot returned */
1978    inst->size_written = 2 * dst.component_size(inst->exec_size);
1979    if (interpolation == INTERP_MODE_NOPERSPECTIVE) {
1980       inst->pi_noperspective = true;
1981       /* TGL BSpec says:
1982        *     This field cannot be set to "Linear Interpolation"
1983        *     unless Non-Perspective Barycentric Enable in 3DSTATE_CLIP is enabled"
1984        */
1985       wm_prog_data->uses_nonperspective_interp_modes = true;
1986    }
1987 
1988    wm_prog_data->pulls_bary = true;
1989 
1990    return inst;
1991 }
1992 
1993 /**
1994  * Return the specified component \p subreg of a per-polygon PS
1995  * payload register for the polygon corresponding to each channel
1996  * specified in the provided \p bld.
1997  *
1998  * \p reg specifies the payload register in REG_SIZE units for the
1999  * first polygon dispatched to the thread.  This function requires
2000  * that subsequent registers on the payload contain the corresponding
2001  * register for subsequent polygons, one GRF register per polygon, if
2002  * multiple polygons are being processed by the same PS thread.
2003  *
2004  * This can be used to access the value of a "Source Depth and/or W
2005  * Attribute Vertex Deltas", "Perspective Bary Planes" or
2006  * "Non-Perspective Bary Planes" payload field conveniently for
2007  * multiple polygons as a single brw_reg.
2008  */
2009 static brw_reg
fetch_polygon_reg(const fs_builder & bld,unsigned reg,unsigned subreg)2010 fetch_polygon_reg(const fs_builder &bld, unsigned reg, unsigned subreg)
2011 {
2012    const fs_visitor *shader = bld.shader;
2013    assert(shader->stage == MESA_SHADER_FRAGMENT);
2014 
2015    const struct intel_device_info *devinfo = shader->devinfo;
2016    const unsigned poly_width = shader->dispatch_width / shader->max_polygons;
2017    const unsigned poly_idx = bld.group() / poly_width;
2018    assert(bld.group() % poly_width == 0);
2019 
2020    if (bld.dispatch_width() > poly_width) {
2021       assert(bld.dispatch_width() <= 2 * poly_width);
2022       const unsigned reg_size = reg_unit(devinfo) * REG_SIZE;
2023       const unsigned vstride = reg_size / brw_type_size_bytes(BRW_TYPE_F);
2024       return stride(brw_vec1_grf(reg + reg_unit(devinfo) * poly_idx, subreg),
2025                     vstride, poly_width, 0);
2026    } else {
2027       return brw_vec1_grf(reg + reg_unit(devinfo) * poly_idx, subreg);
2028    }
2029 }
2030 
2031 /**
2032  * Interpolate per-polygon barycentrics at a specific offset relative
2033  * to each channel fragment coordinates, optionally using
2034  * perspective-correct interpolation if requested.  This is mostly
2035  * useful as replacement for the PI shared function that existed on
2036  * platforms prior to Xe2, but is expected to work on earlier
2037  * platforms since we can get the required polygon setup information
2038  * from the thread payload as far back as ICL.
2039  */
2040 static void
emit_pixel_interpolater_alu_at_offset(const fs_builder & bld,const brw_reg & dst,const brw_reg & offs,glsl_interp_mode interpolation)2041 emit_pixel_interpolater_alu_at_offset(const fs_builder &bld,
2042                                       const brw_reg &dst,
2043                                       const brw_reg &offs,
2044                                       glsl_interp_mode interpolation)
2045 {
2046    const fs_visitor *shader = bld.shader;
2047    assert(shader->stage == MESA_SHADER_FRAGMENT);
2048 
2049    const intel_device_info *devinfo = shader->devinfo;
2050    assert(devinfo->ver >= 11);
2051 
2052    const fs_thread_payload &payload = shader->fs_payload();
2053    const struct brw_wm_prog_data *wm_prog_data =
2054       brw_wm_prog_data(shader->prog_data);
2055 
2056    if (interpolation == INTERP_MODE_NOPERSPECTIVE) {
2057       assert(wm_prog_data->uses_npc_bary_coefficients &&
2058              wm_prog_data->uses_nonperspective_interp_modes);
2059    } else {
2060       assert(interpolation == INTERP_MODE_SMOOTH);
2061       assert(wm_prog_data->uses_pc_bary_coefficients &&
2062              wm_prog_data->uses_depth_w_coefficients);
2063    }
2064 
2065    /* Account for half-pixel X/Y coordinate offset. */
2066    const brw_reg off_x = bld.vgrf(BRW_TYPE_F);
2067    bld.ADD(off_x, offs, brw_imm_f(0.5));
2068 
2069    const brw_reg off_y = bld.vgrf(BRW_TYPE_F);
2070    bld.ADD(off_y, offset(offs, bld, 1), brw_imm_f(0.5));
2071 
2072    /* Process no more than two polygons at a time to avoid hitting
2073     * regioning restrictions.
2074     */
2075    const unsigned poly_width = shader->dispatch_width / shader->max_polygons;
2076 
2077    for (unsigned i = 0; i < DIV_ROUND_UP(shader->max_polygons, 2); i++) {
2078       const fs_builder ibld = bld.group(MIN2(bld.dispatch_width(), 2 * poly_width), i);
2079 
2080       /* Fetch needed parameters from the thread payload. */
2081       const unsigned bary_coef_reg = interpolation == INTERP_MODE_NOPERSPECTIVE ?
2082          payload.npc_bary_coef_reg : payload.pc_bary_coef_reg;
2083       const brw_reg start_x = devinfo->ver < 12 ? fetch_polygon_reg(ibld, 1, 1) :
2084          fetch_polygon_reg(ibld, bary_coef_reg,
2085                            devinfo->ver >= 20 ? 6 : 2);
2086       const brw_reg start_y = devinfo->ver < 12 ? fetch_polygon_reg(ibld, 1, 6) :
2087          fetch_polygon_reg(ibld, bary_coef_reg,
2088                            devinfo->ver >= 20 ? 7 : 6);
2089 
2090       const brw_reg bary1_c0 = fetch_polygon_reg(ibld, bary_coef_reg,
2091                                                 devinfo->ver >= 20 ? 2 : 3);
2092       const brw_reg bary1_cx = fetch_polygon_reg(ibld, bary_coef_reg, 1);
2093       const brw_reg bary1_cy = fetch_polygon_reg(ibld, bary_coef_reg, 0);
2094 
2095       const brw_reg bary2_c0 = fetch_polygon_reg(ibld, bary_coef_reg,
2096                                                 devinfo->ver >= 20 ? 5 : 7);
2097       const brw_reg bary2_cx = fetch_polygon_reg(ibld, bary_coef_reg,
2098                                                 devinfo->ver >= 20 ? 4 : 5);
2099       const brw_reg bary2_cy = fetch_polygon_reg(ibld, bary_coef_reg,
2100                                                 devinfo->ver >= 20 ? 3 : 4);
2101 
2102       const brw_reg rhw_c0 = devinfo->ver >= 20 ?
2103          fetch_polygon_reg(ibld, payload.depth_w_coef_reg + 1, 5) :
2104          fetch_polygon_reg(ibld, payload.depth_w_coef_reg, 7);
2105       const brw_reg rhw_cx = devinfo->ver >= 20 ?
2106          fetch_polygon_reg(ibld, payload.depth_w_coef_reg + 1, 4) :
2107          fetch_polygon_reg(ibld, payload.depth_w_coef_reg, 5);
2108       const brw_reg rhw_cy = devinfo->ver >= 20 ?
2109          fetch_polygon_reg(ibld, payload.depth_w_coef_reg + 1, 3) :
2110          fetch_polygon_reg(ibld, payload.depth_w_coef_reg, 4);
2111 
2112       /* Compute X/Y coordinate deltas relative to the origin of the polygon. */
2113       const brw_reg delta_x = ibld.vgrf(BRW_TYPE_F);
2114       ibld.ADD(delta_x, offset(shader->pixel_x, ibld, i), negate(start_x));
2115       ibld.ADD(delta_x, delta_x, offset(off_x, ibld, i));
2116 
2117       const brw_reg delta_y = ibld.vgrf(BRW_TYPE_F);
2118       ibld.ADD(delta_y, offset(shader->pixel_y, ibld, i), negate(start_y));
2119       ibld.ADD(delta_y, delta_y, offset(off_y, ibld, i));
2120 
2121       /* Evaluate the plane equations obtained above for the
2122        * barycentrics and RHW coordinate at the offset specified for
2123        * each channel.  Limit arithmetic to acc_width in order to
2124        * allow the accumulator to be used for linear interpolation.
2125        */
2126       const unsigned acc_width = 16 * reg_unit(devinfo);
2127       const brw_reg rhw = ibld.vgrf(BRW_TYPE_F);
2128       const brw_reg bary1 = ibld.vgrf(BRW_TYPE_F);
2129       const brw_reg bary2 = ibld.vgrf(BRW_TYPE_F);
2130 
2131       for (unsigned j = 0; j < DIV_ROUND_UP(ibld.dispatch_width(), acc_width); j++) {
2132          const fs_builder jbld = ibld.group(MIN2(ibld.dispatch_width(), acc_width), j);
2133          const brw_reg acc = suboffset(brw_acc_reg(16), jbld.group() % acc_width);
2134 
2135          if (interpolation != INTERP_MODE_NOPERSPECTIVE) {
2136             jbld.MAD(acc, horiz_offset(rhw_c0, acc_width * j),
2137                      horiz_offset(rhw_cx, acc_width * j), offset(delta_x, jbld, j));
2138             jbld.MAC(offset(rhw, jbld, j),
2139                      horiz_offset(rhw_cy, acc_width * j), offset(delta_y, jbld, j));
2140          }
2141 
2142          jbld.MAD(acc, horiz_offset(bary1_c0, acc_width * j),
2143                   horiz_offset(bary1_cx, acc_width * j), offset(delta_x, jbld, j));
2144          jbld.MAC(offset(bary1, jbld, j),
2145                   horiz_offset(bary1_cy, acc_width * j), offset(delta_y, jbld, j));
2146 
2147          jbld.MAD(acc, horiz_offset(bary2_c0, acc_width * j),
2148                   horiz_offset(bary2_cx, acc_width * j), offset(delta_x, jbld, j));
2149          jbld.MAC(offset(bary2, jbld, j),
2150                   horiz_offset(bary2_cy, acc_width * j), offset(delta_y, jbld, j));
2151       }
2152 
2153       /* Scale the results dividing by the interpolated RHW coordinate
2154        * if the interpolation is required to be perspective-correct.
2155        */
2156       if (interpolation == INTERP_MODE_NOPERSPECTIVE) {
2157          ibld.MOV(offset(dst, ibld, i), bary1);
2158          ibld.MOV(offset(offset(dst, bld, 1), ibld, i), bary2);
2159       } else {
2160          const brw_reg w = ibld.vgrf(BRW_TYPE_F);
2161          ibld.emit(SHADER_OPCODE_RCP, w, rhw);
2162          ibld.MUL(offset(dst, ibld, i), bary1, w);
2163          ibld.MUL(offset(offset(dst, bld, 1), ibld, i), bary2, w);
2164       }
2165    }
2166 }
2167 
2168 /**
2169  * Interpolate per-polygon barycentrics at a specified sample index,
2170  * optionally using perspective-correct interpolation if requested.
2171  * This is mostly useful as replacement for the PI shared function
2172  * that existed on platforms prior to Xe2, but is expected to work on
2173  * earlier platforms since we can get the required polygon setup
2174  * information from the thread payload as far back as ICL.
2175  */
2176 static void
emit_pixel_interpolater_alu_at_sample(const fs_builder & bld,const brw_reg & dst,const brw_reg & idx,glsl_interp_mode interpolation)2177 emit_pixel_interpolater_alu_at_sample(const fs_builder &bld,
2178                                       const brw_reg &dst,
2179                                       const brw_reg &idx,
2180                                       glsl_interp_mode interpolation)
2181 {
2182    const fs_thread_payload &payload = bld.shader->fs_payload();
2183    const struct brw_wm_prog_data *wm_prog_data =
2184       brw_wm_prog_data(bld.shader->prog_data);
2185    const fs_builder ubld = bld.exec_all().group(16, 0);
2186    const brw_reg sample_offs_xy = ubld.vgrf(BRW_TYPE_UD);
2187    assert(wm_prog_data->uses_sample_offsets);
2188 
2189    /* Interleave the X/Y coordinates of each sample in order to allow
2190     * a single indirect look-up, by using a MOV for the 16 X
2191     * coordinates, then another MOV for the 16 Y coordinates.
2192     */
2193    for (unsigned i = 0; i < 2; i++) {
2194       const brw_reg reg = retype(brw_vec16_grf(payload.sample_offsets_reg, 4 * i),
2195                                 BRW_TYPE_UB);
2196       ubld.MOV(subscript(sample_offs_xy, BRW_TYPE_UW, i), reg);
2197    }
2198 
2199    /* Use indirect addressing to fetch the X/Y offsets of the sample
2200     * index provided for each channel.
2201     */
2202    const brw_reg idx_b = bld.vgrf(BRW_TYPE_UD);
2203    bld.MUL(idx_b, idx, brw_imm_ud(brw_type_size_bytes(BRW_TYPE_UD)));
2204 
2205    const brw_reg off_xy = bld.vgrf(BRW_TYPE_UD);
2206    bld.emit(SHADER_OPCODE_MOV_INDIRECT, off_xy, component(sample_offs_xy, 0),
2207             idx_b, brw_imm_ud(16 * brw_type_size_bytes(BRW_TYPE_UD)));
2208 
2209    /* Convert the selected fixed-point offsets to floating-point
2210     * offsets.
2211     */
2212    const brw_reg offs = bld.vgrf(BRW_TYPE_F, 2);
2213 
2214    for (unsigned i = 0; i < 2; i++) {
2215       const brw_reg tmp = bld.vgrf(BRW_TYPE_F);
2216       bld.MOV(tmp, subscript(off_xy, BRW_TYPE_UW, i));
2217       bld.MUL(tmp, tmp, brw_imm_f(0.0625));
2218       bld.ADD(offset(offs, bld, i), tmp, brw_imm_f(-0.5));
2219    }
2220 
2221    /* Interpolate at the resulting offsets. */
2222    emit_pixel_interpolater_alu_at_offset(bld, dst, offs, interpolation);
2223 }
2224 
2225 /**
2226  * Computes 1 << x, given a D/UD register containing some value x.
2227  */
2228 static brw_reg
intexp2(const fs_builder & bld,const brw_reg & x)2229 intexp2(const fs_builder &bld, const brw_reg &x)
2230 {
2231    assert(x.type == BRW_TYPE_UD || x.type == BRW_TYPE_D);
2232 
2233    return bld.SHL(bld.MOV(retype(brw_imm_d(1), x.type)), x);
2234 }
2235 
2236 static void
emit_gs_end_primitive(nir_to_brw_state & ntb,const nir_src & vertex_count_nir_src)2237 emit_gs_end_primitive(nir_to_brw_state &ntb, const nir_src &vertex_count_nir_src)
2238 {
2239    fs_visitor &s = ntb.s;
2240    assert(s.stage == MESA_SHADER_GEOMETRY);
2241 
2242    struct brw_gs_prog_data *gs_prog_data = brw_gs_prog_data(s.prog_data);
2243 
2244    if (s.gs_compile->control_data_header_size_bits == 0)
2245       return;
2246 
2247    /* We can only do EndPrimitive() functionality when the control data
2248     * consists of cut bits.  Fortunately, the only time it isn't is when the
2249     * output type is points, in which case EndPrimitive() is a no-op.
2250     */
2251    if (gs_prog_data->control_data_format !=
2252        GFX7_GS_CONTROL_DATA_FORMAT_GSCTL_CUT) {
2253       return;
2254    }
2255 
2256    /* Cut bits use one bit per vertex. */
2257    assert(s.gs_compile->control_data_bits_per_vertex == 1);
2258 
2259    brw_reg vertex_count = get_nir_src(ntb, vertex_count_nir_src);
2260    vertex_count.type = BRW_TYPE_UD;
2261 
2262    /* Cut bit n should be set to 1 if EndPrimitive() was called after emitting
2263     * vertex n, 0 otherwise.  So all we need to do here is mark bit
2264     * (vertex_count - 1) % 32 in the cut_bits register to indicate that
2265     * EndPrimitive() was called after emitting vertex (vertex_count - 1);
2266     * vec4_gs_visitor::emit_control_data_bits() will take care of the rest.
2267     *
2268     * Note that if EndPrimitive() is called before emitting any vertices, this
2269     * will cause us to set bit 31 of the control_data_bits register to 1.
2270     * That's fine because:
2271     *
2272     * - If max_vertices < 32, then vertex number 31 (zero-based) will never be
2273     *   output, so the hardware will ignore cut bit 31.
2274     *
2275     * - If max_vertices == 32, then vertex number 31 is guaranteed to be the
2276     *   last vertex, so setting cut bit 31 has no effect (since the primitive
2277     *   is automatically ended when the GS terminates).
2278     *
2279     * - If max_vertices > 32, then the ir_emit_vertex visitor will reset the
2280     *   control_data_bits register to 0 when the first vertex is emitted.
2281     */
2282 
2283    const fs_builder abld = ntb.bld.annotate("end primitive");
2284 
2285    /* control_data_bits |= 1 << ((vertex_count - 1) % 32) */
2286    brw_reg prev_count = abld.ADD(vertex_count, brw_imm_ud(0xffffffffu));
2287    brw_reg mask = intexp2(abld, prev_count);
2288    /* Note: we're relying on the fact that the GEN SHL instruction only pays
2289     * attention to the lower 5 bits of its second source argument, so on this
2290     * architecture, 1 << (vertex_count - 1) is equivalent to 1 <<
2291     * ((vertex_count - 1) % 32).
2292     */
2293    abld.OR(s.control_data_bits, s.control_data_bits, mask);
2294 }
2295 
2296 brw_reg
gs_urb_per_slot_dword_index(const brw_reg & vertex_count)2297 fs_visitor::gs_urb_per_slot_dword_index(const brw_reg &vertex_count)
2298 {
2299    /* We use a single UD register to accumulate control data bits (32 bits
2300     * for each of the SIMD8 channels).  So we need to write a DWord (32 bits)
2301     * at a time.
2302     *
2303     * On platforms < Xe2:
2304     *    Unfortunately,the URB_WRITE_SIMD8 message uses 128-bit (OWord)
2305     *    offsets.  We have select a 128-bit group via the Global and Per-Slot
2306     *    Offsets, then use the Channel Mask phase to enable/disable which DWord
2307     *    within that group to write.  (Remember, different SIMD8 channels may
2308     *    have emitted different numbers of vertices, so we may need per-slot
2309     *    offsets.)
2310     *
2311     *    Channel masking presents an annoying problem: we may have to replicate
2312     *    the data up to 4 times:
2313     *
2314     *    Msg = Handles, Per-Slot Offsets, Channel Masks, Data, Data, Data,
2315     *          Data.
2316     *
2317     *    To avoid penalizing shaders that emit a small number of vertices, we
2318     *    can avoid these sometimes: if the size of the control data header is
2319     *    <= 128 bits, then there is only 1 OWord.  All SIMD8 channels will land
2320     *    land in the same 128-bit group, so we can skip per-slot offsets.
2321     *
2322     *    Similarly, if the control data header is <= 32 bits, there is only one
2323     *    DWord, so we can skip channel masks.
2324     */
2325    const fs_builder bld = fs_builder(this).at_end();
2326    const fs_builder abld = bld.annotate("urb per slot offset");
2327 
2328    /* Figure out which DWord we're trying to write to using the formula:
2329     *
2330     *    dword_index = (vertex_count - 1) * bits_per_vertex / 32
2331     *
2332     * Since bits_per_vertex is a power of two, and is known at compile
2333     * time, this can be optimized to:
2334     *
2335     *    dword_index = (vertex_count - 1) >> (6 - log2(bits_per_vertex))
2336     */
2337    brw_reg prev_count = abld.ADD(vertex_count, brw_imm_ud(0xffffffffu));
2338    unsigned log2_bits_per_vertex =
2339       util_last_bit(gs_compile->control_data_bits_per_vertex);
2340    return abld.SHR(prev_count, brw_imm_ud(6u - log2_bits_per_vertex));
2341 }
2342 
2343 brw_reg
gs_urb_channel_mask(const brw_reg & dword_index)2344 fs_visitor::gs_urb_channel_mask(const brw_reg &dword_index)
2345 {
2346    brw_reg channel_mask;
2347 
2348    /* Xe2+ can do URB loads with a byte offset, so we don't need to
2349     * construct a channel mask.
2350     */
2351    if (devinfo->ver >= 20)
2352       return channel_mask;
2353 
2354    /* Channel masking presents an annoying problem: we may have to replicate
2355     * the data up to 4 times:
2356     *
2357     * Msg = Handles, Per-Slot Offsets, Channel Masks, Data, Data, Data, Data.
2358     *
2359     * To avoid penalizing shaders that emit a small number of vertices, we
2360     * can avoid these sometimes: if the size of the control data header is
2361     * <= 128 bits, then there is only 1 OWord.  All SIMD8 channels will land
2362     * land in the same 128-bit group, so we can skip per-slot offsets.
2363     *
2364     * Similarly, if the control data header is <= 32 bits, there is only one
2365     * DWord, so we can skip channel masks.
2366     */
2367    if (gs_compile->control_data_header_size_bits <= 32)
2368       return channel_mask;
2369 
2370    const fs_builder bld = fs_builder(this).at_end();
2371    const fs_builder ubld = bld.exec_all();
2372 
2373    /* Set the channel masks to 1 << (dword_index % 4), so that we'll
2374     * write to the appropriate DWORD within the OWORD.
2375     */
2376    brw_reg channel = ubld.AND(dword_index, brw_imm_ud(3u));
2377    /* Then the channel masks need to be in bits 23:16. */
2378    return ubld.SHL(intexp2(ubld, channel), brw_imm_ud(16u));
2379 }
2380 
2381 void
emit_gs_control_data_bits(const brw_reg & vertex_count)2382 fs_visitor::emit_gs_control_data_bits(const brw_reg &vertex_count)
2383 {
2384    assert(stage == MESA_SHADER_GEOMETRY);
2385    assert(gs_compile->control_data_bits_per_vertex != 0);
2386 
2387    const struct brw_gs_prog_data *gs_prog_data = brw_gs_prog_data(prog_data);
2388 
2389    const fs_builder bld = fs_builder(this).at_end();
2390    const fs_builder abld = bld.annotate("emit control data bits");
2391 
2392    brw_reg dword_index = gs_urb_per_slot_dword_index(vertex_count);
2393    brw_reg channel_mask = gs_urb_channel_mask(dword_index);
2394    brw_reg per_slot_offset;
2395 
2396    const unsigned max_control_data_header_size_bits =
2397       devinfo->ver >= 20 ? 32 : 128;
2398 
2399    if (gs_compile->control_data_header_size_bits > max_control_data_header_size_bits) {
2400       /* Convert dword_index to bytes on Xe2+ since LSC can do operate on byte
2401        * offset granularity.
2402        */
2403       if (devinfo->ver >= 20) {
2404          per_slot_offset = abld.SHL(dword_index, brw_imm_ud(2u));
2405       } else {
2406          /* Set the per-slot offset to dword_index / 4, so that we'll write to
2407           * the appropriate OWord within the control data header.
2408           */
2409          per_slot_offset = abld.SHR(dword_index, brw_imm_ud(2u));
2410       }
2411    }
2412 
2413    /* If there are channel masks, add 3 extra copies of the data. */
2414    const unsigned length = 1 + 3 * unsigned(channel_mask.file != BAD_FILE);
2415    assert(length <= 4);
2416    brw_reg sources[4];
2417 
2418    for (unsigned i = 0; i < length; i++)
2419       sources[i] = this->control_data_bits;
2420 
2421    brw_reg srcs[URB_LOGICAL_NUM_SRCS];
2422    srcs[URB_LOGICAL_SRC_HANDLE] = gs_payload().urb_handles;
2423    srcs[URB_LOGICAL_SRC_PER_SLOT_OFFSETS] = per_slot_offset;
2424    srcs[URB_LOGICAL_SRC_CHANNEL_MASK] = channel_mask;
2425    srcs[URB_LOGICAL_SRC_DATA] = bld.vgrf(BRW_TYPE_F, length);
2426    srcs[URB_LOGICAL_SRC_COMPONENTS] = brw_imm_ud(length);
2427    abld.LOAD_PAYLOAD(srcs[URB_LOGICAL_SRC_DATA], sources, length, 0);
2428 
2429    fs_inst *inst = abld.emit(SHADER_OPCODE_URB_WRITE_LOGICAL, reg_undef,
2430                              srcs, ARRAY_SIZE(srcs));
2431 
2432    /* We need to increment Global Offset by 256-bits to make room for
2433     * Broadwell's extra "Vertex Count" payload at the beginning of the
2434     * URB entry.  Since this is an OWord message, Global Offset is counted
2435     * in 128-bit units, so we must set it to 2.
2436     */
2437    if (gs_prog_data->static_vertex_count == -1)
2438       inst->offset = 2;
2439 }
2440 
2441 static void
set_gs_stream_control_data_bits(nir_to_brw_state & ntb,const brw_reg & vertex_count,unsigned stream_id)2442 set_gs_stream_control_data_bits(nir_to_brw_state &ntb, const brw_reg &vertex_count,
2443                                 unsigned stream_id)
2444 {
2445    fs_visitor &s = ntb.s;
2446 
2447    /* control_data_bits |= stream_id << ((2 * (vertex_count - 1)) % 32) */
2448 
2449    /* Note: we are calling this *before* increasing vertex_count, so
2450     * this->vertex_count == vertex_count - 1 in the formula above.
2451     */
2452 
2453    /* Stream mode uses 2 bits per vertex */
2454    assert(s.gs_compile->control_data_bits_per_vertex == 2);
2455 
2456    /* Must be a valid stream */
2457    assert(stream_id < 4); /* MAX_VERTEX_STREAMS */
2458 
2459    /* Control data bits are initialized to 0 so we don't have to set any
2460     * bits when sending vertices to stream 0.
2461     */
2462    if (stream_id == 0)
2463       return;
2464 
2465    const fs_builder abld = ntb.bld.annotate("set stream control data bits");
2466 
2467    /* reg::sid = stream_id */
2468    brw_reg sid = abld.MOV(brw_imm_ud(stream_id));
2469 
2470    /* reg:shift_count = 2 * (vertex_count - 1) */
2471    brw_reg shift_count = abld.SHL(vertex_count, brw_imm_ud(1u));
2472 
2473    /* Note: we're relying on the fact that the GEN SHL instruction only pays
2474     * attention to the lower 5 bits of its second source argument, so on this
2475     * architecture, stream_id << 2 * (vertex_count - 1) is equivalent to
2476     * stream_id << ((2 * (vertex_count - 1)) % 32).
2477     */
2478    brw_reg mask = abld.SHL(sid, shift_count);
2479    abld.OR(s.control_data_bits, s.control_data_bits, mask);
2480 }
2481 
2482 static void
emit_gs_vertex(nir_to_brw_state & ntb,const nir_src & vertex_count_nir_src,unsigned stream_id)2483 emit_gs_vertex(nir_to_brw_state &ntb, const nir_src &vertex_count_nir_src,
2484                unsigned stream_id)
2485 {
2486    fs_visitor &s = ntb.s;
2487 
2488    assert(s.stage == MESA_SHADER_GEOMETRY);
2489 
2490    struct brw_gs_prog_data *gs_prog_data = brw_gs_prog_data(s.prog_data);
2491 
2492    brw_reg vertex_count = get_nir_src(ntb, vertex_count_nir_src);
2493    vertex_count.type = BRW_TYPE_UD;
2494 
2495    /* Haswell and later hardware ignores the "Render Stream Select" bits
2496     * from the 3DSTATE_STREAMOUT packet when the SOL stage is disabled,
2497     * and instead sends all primitives down the pipeline for rasterization.
2498     * If the SOL stage is enabled, "Render Stream Select" is honored and
2499     * primitives bound to non-zero streams are discarded after stream output.
2500     *
2501     * Since the only purpose of primives sent to non-zero streams is to
2502     * be recorded by transform feedback, we can simply discard all geometry
2503     * bound to these streams when transform feedback is disabled.
2504     */
2505    if (stream_id > 0 && !s.nir->info.has_transform_feedback_varyings)
2506       return;
2507 
2508    /* If we're outputting 32 control data bits or less, then we can wait
2509     * until the shader is over to output them all.  Otherwise we need to
2510     * output them as we go.  Now is the time to do it, since we're about to
2511     * output the vertex_count'th vertex, so it's guaranteed that the
2512     * control data bits associated with the (vertex_count - 1)th vertex are
2513     * correct.
2514     */
2515    if (s.gs_compile->control_data_header_size_bits > 32) {
2516       const fs_builder abld =
2517          ntb.bld.annotate("emit vertex: emit control data bits");
2518 
2519       /* Only emit control data bits if we've finished accumulating a batch
2520        * of 32 bits.  This is the case when:
2521        *
2522        *     (vertex_count * bits_per_vertex) % 32 == 0
2523        *
2524        * (in other words, when the last 5 bits of vertex_count *
2525        * bits_per_vertex are 0).  Assuming bits_per_vertex == 2^n for some
2526        * integer n (which is always the case, since bits_per_vertex is
2527        * always 1 or 2), this is equivalent to requiring that the last 5-n
2528        * bits of vertex_count are 0:
2529        *
2530        *     vertex_count & (2^(5-n) - 1) == 0
2531        *
2532        * 2^(5-n) == 2^5 / 2^n == 32 / bits_per_vertex, so this is
2533        * equivalent to:
2534        *
2535        *     vertex_count & (32 / bits_per_vertex - 1) == 0
2536        *
2537        * TODO: If vertex_count is an immediate, we could do some of this math
2538        *       at compile time...
2539        */
2540       fs_inst *inst =
2541          abld.AND(ntb.bld.null_reg_d(), vertex_count,
2542                   brw_imm_ud(32u / s.gs_compile->control_data_bits_per_vertex - 1u));
2543       inst->conditional_mod = BRW_CONDITIONAL_Z;
2544 
2545       abld.IF(BRW_PREDICATE_NORMAL);
2546       /* If vertex_count is 0, then no control data bits have been
2547        * accumulated yet, so we can skip emitting them.
2548        */
2549       abld.CMP(ntb.bld.null_reg_d(), vertex_count, brw_imm_ud(0u),
2550                BRW_CONDITIONAL_NEQ);
2551       abld.IF(BRW_PREDICATE_NORMAL);
2552       s.emit_gs_control_data_bits(vertex_count);
2553       abld.emit(BRW_OPCODE_ENDIF);
2554 
2555       /* Reset control_data_bits to 0 so we can start accumulating a new
2556        * batch.
2557        *
2558        * Note: in the case where vertex_count == 0, this neutralizes the
2559        * effect of any call to EndPrimitive() that the shader may have
2560        * made before outputting its first vertex.
2561        */
2562       abld.exec_all().MOV(s.control_data_bits, brw_imm_ud(0u));
2563       abld.emit(BRW_OPCODE_ENDIF);
2564    }
2565 
2566    s.emit_urb_writes(vertex_count);
2567 
2568    /* In stream mode we have to set control data bits for all vertices
2569     * unless we have disabled control data bits completely (which we do
2570     * do for MESA_PRIM_POINTS outputs that don't use streams).
2571     */
2572    if (s.gs_compile->control_data_header_size_bits > 0 &&
2573        gs_prog_data->control_data_format ==
2574           GFX7_GS_CONTROL_DATA_FORMAT_GSCTL_SID) {
2575       set_gs_stream_control_data_bits(ntb, vertex_count, stream_id);
2576    }
2577 }
2578 
2579 static void
brw_combine_with_vec(const fs_builder & bld,const brw_reg & dst,const brw_reg & src,unsigned n)2580 brw_combine_with_vec(const fs_builder &bld, const brw_reg &dst,
2581                      const brw_reg &src, unsigned n)
2582 {
2583    assert(n <= NIR_MAX_VEC_COMPONENTS);
2584    brw_reg comps[NIR_MAX_VEC_COMPONENTS];
2585    for (unsigned i = 0; i < n; i++)
2586       comps[i] = offset(src, bld, i);
2587    bld.VEC(dst, comps, n);
2588 }
2589 
2590 static void
emit_gs_input_load(nir_to_brw_state & ntb,const brw_reg & dst,const nir_src & vertex_src,unsigned base_offset,const nir_src & offset_src,unsigned num_components,unsigned first_component)2591 emit_gs_input_load(nir_to_brw_state &ntb, const brw_reg &dst,
2592                    const nir_src &vertex_src,
2593                    unsigned base_offset,
2594                    const nir_src &offset_src,
2595                    unsigned num_components,
2596                    unsigned first_component)
2597 {
2598    const fs_builder &bld = ntb.bld;
2599    const struct intel_device_info *devinfo = ntb.devinfo;
2600 
2601    fs_visitor &s = ntb.s;
2602 
2603    assert(brw_type_size_bytes(dst.type) == 4);
2604    struct brw_gs_prog_data *gs_prog_data = brw_gs_prog_data(s.prog_data);
2605    const unsigned push_reg_count = gs_prog_data->base.urb_read_length * 8;
2606 
2607    /* TODO: figure out push input layout for invocations == 1 */
2608    if (gs_prog_data->invocations == 1 &&
2609        nir_src_is_const(offset_src) && nir_src_is_const(vertex_src) &&
2610        4 * (base_offset + nir_src_as_uint(offset_src)) < push_reg_count) {
2611       int imm_offset = (base_offset + nir_src_as_uint(offset_src)) * 4 +
2612                        nir_src_as_uint(vertex_src) * push_reg_count;
2613 
2614       const brw_reg attr = offset(brw_attr_reg(0, dst.type), bld,
2615                                   first_component + imm_offset);
2616       brw_combine_with_vec(bld, dst, attr, num_components);
2617       return;
2618    }
2619 
2620    /* Resort to the pull model.  Ensure the VUE handles are provided. */
2621    assert(gs_prog_data->base.include_vue_handles);
2622 
2623    brw_reg start = s.gs_payload().icp_handle_start;
2624    brw_reg icp_handle = ntb.bld.vgrf(BRW_TYPE_UD);
2625    const unsigned grf_size_bytes = REG_SIZE * reg_unit(devinfo);
2626 
2627    if (gs_prog_data->invocations == 1) {
2628       if (nir_src_is_const(vertex_src)) {
2629          /* The vertex index is constant; just select the proper URB handle. */
2630          icp_handle =
2631             byte_offset(start, nir_src_as_uint(vertex_src) * grf_size_bytes);
2632       } else {
2633          /* The vertex index is non-constant.  We need to use indirect
2634           * addressing to fetch the proper URB handle.
2635           *
2636           * First, we start with the sequence <7, 6, 5, 4, 3, 2, 1, 0>
2637           * indicating that channel <n> should read the handle from
2638           * DWord <n>.  We convert that to bytes by multiplying by 4.
2639           *
2640           * Next, we convert the vertex index to bytes by multiplying
2641           * by 32/64 (shifting by 5/6), and add the two together.  This is
2642           * the final indirect byte offset.
2643           */
2644          brw_reg sequence = bld.LOAD_SUBGROUP_INVOCATION();
2645 
2646          /* channel_offsets = 4 * sequence = <28, 24, 20, 16, 12, 8, 4, 0> */
2647          brw_reg channel_offsets = bld.SHL(sequence, brw_imm_ud(2u));
2648          /* Convert vertex_index to bytes (multiply by 32/64) */
2649          assert(util_is_power_of_two_nonzero(grf_size_bytes)); /* for ffs() */
2650          brw_reg vertex_offset_bytes =
2651             bld.SHL(retype(get_nir_src(ntb, vertex_src), BRW_TYPE_UD),
2652                     brw_imm_ud(ffs(grf_size_bytes) - 1));
2653          brw_reg icp_offset_bytes =
2654             bld.ADD(vertex_offset_bytes, channel_offsets);
2655 
2656          /* Use first_icp_handle as the base offset.  There is one register
2657           * of URB handles per vertex, so inform the register allocator that
2658           * we might read up to nir->info.gs.vertices_in registers.
2659           */
2660          bld.emit(SHADER_OPCODE_MOV_INDIRECT, icp_handle, start,
2661                   brw_reg(icp_offset_bytes),
2662                   brw_imm_ud(s.nir->info.gs.vertices_in * grf_size_bytes));
2663       }
2664    } else {
2665       assert(gs_prog_data->invocations > 1);
2666 
2667       if (nir_src_is_const(vertex_src)) {
2668          unsigned vertex = nir_src_as_uint(vertex_src);
2669          bld.MOV(icp_handle, component(start, vertex));
2670       } else {
2671          /* The vertex index is non-constant.  We need to use indirect
2672           * addressing to fetch the proper URB handle.
2673           *
2674           * Convert vertex_index to bytes (multiply by 4)
2675           */
2676          brw_reg icp_offset_bytes =
2677             bld.SHL(retype(get_nir_src(ntb, vertex_src), BRW_TYPE_UD),
2678                     brw_imm_ud(2u));
2679 
2680          /* Use first_icp_handle as the base offset.  There is one DWord
2681           * of URB handles per vertex, so inform the register allocator that
2682           * we might read up to ceil(nir->info.gs.vertices_in / 8) registers.
2683           */
2684          bld.emit(SHADER_OPCODE_MOV_INDIRECT, icp_handle, start,
2685                   brw_reg(icp_offset_bytes),
2686                   brw_imm_ud(DIV_ROUND_UP(s.nir->info.gs.vertices_in, 8) *
2687                              grf_size_bytes));
2688       }
2689    }
2690 
2691    fs_inst *inst;
2692    brw_reg indirect_offset = get_nir_src(ntb, offset_src);
2693 
2694    if (nir_src_is_const(offset_src)) {
2695       brw_reg srcs[URB_LOGICAL_NUM_SRCS];
2696       srcs[URB_LOGICAL_SRC_HANDLE] = icp_handle;
2697 
2698       /* Constant indexing - use global offset. */
2699       if (first_component != 0) {
2700          unsigned read_components = num_components + first_component;
2701          brw_reg tmp = bld.vgrf(dst.type, read_components);
2702          inst = bld.emit(SHADER_OPCODE_URB_READ_LOGICAL, tmp, srcs,
2703                          ARRAY_SIZE(srcs));
2704          inst->size_written = read_components *
2705                               tmp.component_size(inst->exec_size);
2706          brw_combine_with_vec(bld, dst, offset(tmp, bld, first_component),
2707                               num_components);
2708       } else {
2709          inst = bld.emit(SHADER_OPCODE_URB_READ_LOGICAL, dst, srcs,
2710                          ARRAY_SIZE(srcs));
2711          inst->size_written = num_components *
2712                               dst.component_size(inst->exec_size);
2713       }
2714       inst->offset = base_offset + nir_src_as_uint(offset_src);
2715    } else {
2716       /* Indirect indexing - use per-slot offsets as well. */
2717       unsigned read_components = num_components + first_component;
2718       brw_reg tmp = bld.vgrf(dst.type, read_components);
2719 
2720       /* Convert oword offset to bytes on Xe2+ */
2721       if (devinfo->ver >= 20)
2722          indirect_offset = bld.SHL(indirect_offset, brw_imm_ud(4u));
2723 
2724       brw_reg srcs[URB_LOGICAL_NUM_SRCS];
2725       srcs[URB_LOGICAL_SRC_HANDLE] = icp_handle;
2726       srcs[URB_LOGICAL_SRC_PER_SLOT_OFFSETS] = indirect_offset;
2727 
2728       if (first_component != 0) {
2729          inst = bld.emit(SHADER_OPCODE_URB_READ_LOGICAL, tmp,
2730                          srcs, ARRAY_SIZE(srcs));
2731          inst->size_written = read_components *
2732                               tmp.component_size(inst->exec_size);
2733          brw_combine_with_vec(bld, dst, offset(tmp, bld, first_component),
2734                               num_components);
2735       } else {
2736          inst = bld.emit(SHADER_OPCODE_URB_READ_LOGICAL, dst,
2737                          srcs, ARRAY_SIZE(srcs));
2738          inst->size_written = num_components *
2739                               dst.component_size(inst->exec_size);
2740       }
2741       inst->offset = base_offset;
2742    }
2743 }
2744 
2745 static brw_reg
get_indirect_offset(nir_to_brw_state & ntb,nir_intrinsic_instr * instr)2746 get_indirect_offset(nir_to_brw_state &ntb, nir_intrinsic_instr *instr)
2747 {
2748    const intel_device_info *devinfo = ntb.devinfo;
2749    nir_src *offset_src = nir_get_io_offset_src(instr);
2750 
2751    if (nir_src_is_const(*offset_src)) {
2752       /* The only constant offset we should find is 0.  brw_nir.c's
2753        * add_const_offset_to_base() will fold other constant offsets
2754        * into the "base" index.
2755        */
2756       assert(nir_src_as_uint(*offset_src) == 0);
2757       return brw_reg();
2758    }
2759 
2760    brw_reg offset = get_nir_src(ntb, *offset_src);
2761 
2762    if (devinfo->ver < 20)
2763       return offset;
2764 
2765    /* Convert Owords (16-bytes) to bytes */
2766    return ntb.bld.SHL(retype(offset, BRW_TYPE_UD), brw_imm_ud(4u));
2767 }
2768 
2769 static void
fs_nir_emit_vs_intrinsic(nir_to_brw_state & ntb,nir_intrinsic_instr * instr)2770 fs_nir_emit_vs_intrinsic(nir_to_brw_state &ntb,
2771                          nir_intrinsic_instr *instr)
2772 {
2773    const fs_builder &bld = ntb.bld;
2774    fs_visitor &s = ntb.s;
2775    assert(s.stage == MESA_SHADER_VERTEX);
2776 
2777    brw_reg dest;
2778    if (nir_intrinsic_infos[instr->intrinsic].has_dest)
2779       dest = get_nir_def(ntb, instr->def);
2780 
2781    switch (instr->intrinsic) {
2782    case nir_intrinsic_load_vertex_id:
2783    case nir_intrinsic_load_base_vertex:
2784       unreachable("should be lowered by nir_lower_system_values()");
2785 
2786    case nir_intrinsic_load_input: {
2787       assert(instr->def.bit_size == 32);
2788       const brw_reg src = offset(brw_attr_reg(0, dest.type), bld,
2789                                 nir_intrinsic_base(instr) * 4 +
2790                                 nir_intrinsic_component(instr) +
2791                                 nir_src_as_uint(instr->src[0]));
2792       brw_combine_with_vec(bld, dest, src, instr->num_components);
2793       break;
2794    }
2795 
2796    case nir_intrinsic_load_vertex_id_zero_base:
2797    case nir_intrinsic_load_instance_id:
2798    case nir_intrinsic_load_base_instance:
2799    case nir_intrinsic_load_draw_id:
2800    case nir_intrinsic_load_first_vertex:
2801    case nir_intrinsic_load_is_indexed_draw:
2802       unreachable("lowered by brw_nir_lower_vs_inputs");
2803 
2804    default:
2805       fs_nir_emit_intrinsic(ntb, bld, instr);
2806       break;
2807    }
2808 }
2809 
2810 static brw_reg
get_tcs_single_patch_icp_handle(nir_to_brw_state & ntb,const fs_builder & bld,nir_intrinsic_instr * instr)2811 get_tcs_single_patch_icp_handle(nir_to_brw_state &ntb, const fs_builder &bld,
2812                                 nir_intrinsic_instr *instr)
2813 {
2814    fs_visitor &s = ntb.s;
2815 
2816    struct brw_tcs_prog_data *tcs_prog_data = brw_tcs_prog_data(s.prog_data);
2817    const nir_src &vertex_src = instr->src[0];
2818    nir_intrinsic_instr *vertex_intrin = nir_src_as_intrinsic(vertex_src);
2819 
2820    const brw_reg start = s.tcs_payload().icp_handle_start;
2821 
2822    brw_reg icp_handle;
2823 
2824    if (nir_src_is_const(vertex_src)) {
2825       /* Emit a MOV to resolve <0,1,0> regioning. */
2826       unsigned vertex = nir_src_as_uint(vertex_src);
2827       icp_handle = bld.MOV(component(start, vertex));
2828    } else if (tcs_prog_data->instances == 1 && vertex_intrin &&
2829               vertex_intrin->intrinsic == nir_intrinsic_load_invocation_id) {
2830       /* For the common case of only 1 instance, an array index of
2831        * gl_InvocationID means reading the handles from the start.  Skip all
2832        * the indirect work.
2833        */
2834       icp_handle = start;
2835    } else {
2836       /* The vertex index is non-constant.  We need to use indirect
2837        * addressing to fetch the proper URB handle.
2838        */
2839       icp_handle = bld.vgrf(BRW_TYPE_UD);
2840 
2841       /* Each ICP handle is a single DWord (4 bytes) */
2842       brw_reg vertex_offset_bytes =
2843          bld.SHL(retype(get_nir_src(ntb, vertex_src), BRW_TYPE_UD),
2844                  brw_imm_ud(2u));
2845 
2846       /* We might read up to 4 registers. */
2847       bld.emit(SHADER_OPCODE_MOV_INDIRECT, icp_handle,
2848                start, vertex_offset_bytes,
2849                brw_imm_ud(4 * REG_SIZE));
2850    }
2851 
2852    return icp_handle;
2853 }
2854 
2855 static brw_reg
get_tcs_multi_patch_icp_handle(nir_to_brw_state & ntb,const fs_builder & bld,nir_intrinsic_instr * instr)2856 get_tcs_multi_patch_icp_handle(nir_to_brw_state &ntb, const fs_builder &bld,
2857                                nir_intrinsic_instr *instr)
2858 {
2859    fs_visitor &s = ntb.s;
2860    const intel_device_info *devinfo = s.devinfo;
2861 
2862    struct brw_tcs_prog_key *tcs_key = (struct brw_tcs_prog_key *) s.key;
2863    const nir_src &vertex_src = instr->src[0];
2864    const unsigned grf_size_bytes = REG_SIZE * reg_unit(devinfo);
2865 
2866    const brw_reg start = s.tcs_payload().icp_handle_start;
2867 
2868    if (nir_src_is_const(vertex_src))
2869       return byte_offset(start, nir_src_as_uint(vertex_src) * grf_size_bytes);
2870 
2871    /* The vertex index is non-constant.  We need to use indirect
2872     * addressing to fetch the proper URB handle.
2873     *
2874     * First, we start with the sequence indicating that channel <n>
2875     * should read the handle from DWord <n>.  We convert that to bytes
2876     * by multiplying by 4.
2877     *
2878     * Next, we convert the vertex index to bytes by multiplying
2879     * by the GRF size (by shifting), and add the two together.  This is
2880     * the final indirect byte offset.
2881     */
2882    brw_reg sequence = bld.LOAD_SUBGROUP_INVOCATION();
2883 
2884    /* Offsets will be 0, 4, 8, ... */
2885    brw_reg channel_offsets = bld.SHL(sequence, brw_imm_ud(2u));
2886    /* Convert vertex_index to bytes (multiply by 32) */
2887    assert(util_is_power_of_two_nonzero(grf_size_bytes)); /* for ffs() */
2888    brw_reg vertex_offset_bytes =
2889       bld.SHL(retype(get_nir_src(ntb, vertex_src), BRW_TYPE_UD),
2890               brw_imm_ud(ffs(grf_size_bytes) - 1));
2891    brw_reg icp_offset_bytes =
2892       bld.ADD(vertex_offset_bytes, channel_offsets);
2893 
2894    /* Use start of ICP handles as the base offset.  There is one register
2895     * of URB handles per vertex, so inform the register allocator that
2896     * we might read up to nir->info.gs.vertices_in registers.
2897     */
2898    brw_reg icp_handle = bld.vgrf(BRW_TYPE_UD);
2899    bld.emit(SHADER_OPCODE_MOV_INDIRECT, icp_handle, start,
2900             icp_offset_bytes,
2901             brw_imm_ud(brw_tcs_prog_key_input_vertices(tcs_key) *
2902                        grf_size_bytes));
2903 
2904    return icp_handle;
2905 }
2906 
2907 static void
setup_barrier_message_payload_gfx125(const fs_builder & bld,const brw_reg & msg_payload)2908 setup_barrier_message_payload_gfx125(const fs_builder &bld,
2909                                      const brw_reg &msg_payload)
2910 {
2911    const fs_builder ubld = bld.exec_all().group(1, 0);
2912    const struct intel_device_info *devinfo = bld.shader->devinfo;
2913    assert(devinfo->verx10 >= 125);
2914 
2915    /* From BSpec: 54006, mov r0.2[31:24] into m0.2[31:24] and m0.2[23:16] */
2916    brw_reg m0_10ub = horiz_offset(retype(msg_payload, BRW_TYPE_UB), 10);
2917    brw_reg r0_11ub =
2918       stride(suboffset(retype(brw_vec1_grf(0, 0), BRW_TYPE_UB), 11),
2919              0, 1, 0);
2920    ubld.group(2, 0).MOV(m0_10ub, r0_11ub);
2921 
2922    if (devinfo->ver >= 20) {
2923       /* Use an active threads barrier. */
2924       const brw_reg m0_2ud = component(retype(msg_payload, BRW_TYPE_UD), 2);
2925       ubld.OR(m0_2ud, m0_2ud, brw_imm_ud(1u << 8));
2926    }
2927 }
2928 
2929 static void
emit_barrier(nir_to_brw_state & ntb)2930 emit_barrier(nir_to_brw_state &ntb)
2931 {
2932    const intel_device_info *devinfo = ntb.devinfo;
2933    const fs_builder &bld = ntb.bld;
2934    fs_visitor &s = ntb.s;
2935 
2936    /* We are getting the barrier ID from the compute shader header */
2937    assert(gl_shader_stage_uses_workgroup(s.stage));
2938 
2939    brw_reg payload = brw_vgrf(s.alloc.allocate(1), BRW_TYPE_UD);
2940 
2941    /* Clear the message payload */
2942    bld.exec_all().group(8, 0).MOV(payload, brw_imm_ud(0u));
2943 
2944    if (devinfo->verx10 >= 125) {
2945       setup_barrier_message_payload_gfx125(bld, payload);
2946    } else {
2947       assert(gl_shader_stage_is_compute(s.stage));
2948 
2949       uint32_t barrier_id_mask;
2950       switch (devinfo->ver) {
2951       case 7:
2952       case 8:
2953          barrier_id_mask = 0x0f000000u; break;
2954       case 9:
2955          barrier_id_mask = 0x8f000000u; break;
2956       case 11:
2957       case 12:
2958          barrier_id_mask = 0x7f000000u; break;
2959       default:
2960          unreachable("barrier is only available on gen >= 7");
2961       }
2962 
2963       /* Copy the barrier id from r0.2 to the message payload reg.2 */
2964       brw_reg r0_2 = brw_reg(retype(brw_vec1_grf(0, 2), BRW_TYPE_UD));
2965       bld.exec_all().group(1, 0).AND(component(payload, 2), r0_2,
2966                                      brw_imm_ud(barrier_id_mask));
2967    }
2968 
2969    /* Emit a gateway "barrier" message using the payload we set up, followed
2970     * by a wait instruction.
2971     */
2972    bld.exec_all().emit(SHADER_OPCODE_BARRIER, reg_undef, payload);
2973 }
2974 
2975 static void
emit_tcs_barrier(nir_to_brw_state & ntb)2976 emit_tcs_barrier(nir_to_brw_state &ntb)
2977 {
2978    const intel_device_info *devinfo = ntb.devinfo;
2979    const fs_builder &bld = ntb.bld;
2980    fs_visitor &s = ntb.s;
2981 
2982    assert(s.stage == MESA_SHADER_TESS_CTRL);
2983    struct brw_tcs_prog_data *tcs_prog_data = brw_tcs_prog_data(s.prog_data);
2984 
2985    brw_reg m0 = bld.vgrf(BRW_TYPE_UD);
2986    brw_reg m0_2 = component(m0, 2);
2987 
2988    const fs_builder chanbld = bld.exec_all().group(1, 0);
2989 
2990    /* Zero the message header */
2991    bld.exec_all().MOV(m0, brw_imm_ud(0u));
2992 
2993    if (devinfo->verx10 >= 125) {
2994       setup_barrier_message_payload_gfx125(bld, m0);
2995    } else if (devinfo->ver >= 11) {
2996       chanbld.AND(m0_2, retype(brw_vec1_grf(0, 2), BRW_TYPE_UD),
2997                   brw_imm_ud(INTEL_MASK(30, 24)));
2998 
2999       /* Set the Barrier Count and the enable bit */
3000       chanbld.OR(m0_2, m0_2,
3001                  brw_imm_ud(tcs_prog_data->instances << 8 | (1 << 15)));
3002    } else {
3003       /* Copy "Barrier ID" from r0.2, bits 16:13 */
3004       chanbld.AND(m0_2, retype(brw_vec1_grf(0, 2), BRW_TYPE_UD),
3005                   brw_imm_ud(INTEL_MASK(16, 13)));
3006 
3007       /* Shift it up to bits 27:24. */
3008       chanbld.SHL(m0_2, m0_2, brw_imm_ud(11));
3009 
3010       /* Set the Barrier Count and the enable bit */
3011       chanbld.OR(m0_2, m0_2,
3012                  brw_imm_ud(tcs_prog_data->instances << 9 | (1 << 15)));
3013    }
3014 
3015    bld.emit(SHADER_OPCODE_BARRIER, bld.null_reg_ud(), m0);
3016 }
3017 
3018 static void
fs_nir_emit_tcs_intrinsic(nir_to_brw_state & ntb,nir_intrinsic_instr * instr)3019 fs_nir_emit_tcs_intrinsic(nir_to_brw_state &ntb,
3020                           nir_intrinsic_instr *instr)
3021 {
3022    const intel_device_info *devinfo = ntb.devinfo;
3023    const fs_builder &bld = ntb.bld;
3024    fs_visitor &s = ntb.s;
3025 
3026    assert(s.stage == MESA_SHADER_TESS_CTRL);
3027    struct brw_tcs_prog_data *tcs_prog_data = brw_tcs_prog_data(s.prog_data);
3028    struct brw_vue_prog_data *vue_prog_data = &tcs_prog_data->base;
3029 
3030    brw_reg dst;
3031    if (nir_intrinsic_infos[instr->intrinsic].has_dest)
3032       dst = get_nir_def(ntb, instr->def);
3033 
3034    switch (instr->intrinsic) {
3035    case nir_intrinsic_load_primitive_id:
3036       bld.MOV(dst, s.tcs_payload().primitive_id);
3037       break;
3038    case nir_intrinsic_load_invocation_id:
3039       bld.MOV(retype(dst, s.invocation_id.type), s.invocation_id);
3040       break;
3041 
3042    case nir_intrinsic_barrier:
3043       if (nir_intrinsic_memory_scope(instr) != SCOPE_NONE)
3044          fs_nir_emit_intrinsic(ntb, bld, instr);
3045       if (nir_intrinsic_execution_scope(instr) == SCOPE_WORKGROUP) {
3046          if (tcs_prog_data->instances != 1)
3047             emit_tcs_barrier(ntb);
3048       }
3049       break;
3050 
3051    case nir_intrinsic_load_input:
3052       unreachable("nir_lower_io should never give us these.");
3053       break;
3054 
3055    case nir_intrinsic_load_per_vertex_input: {
3056       assert(instr->def.bit_size == 32);
3057       brw_reg indirect_offset = get_indirect_offset(ntb, instr);
3058       unsigned imm_offset = nir_intrinsic_base(instr);
3059       fs_inst *inst;
3060 
3061       const bool multi_patch =
3062          vue_prog_data->dispatch_mode == INTEL_DISPATCH_MODE_TCS_MULTI_PATCH;
3063 
3064       brw_reg icp_handle = multi_patch ?
3065          get_tcs_multi_patch_icp_handle(ntb, bld, instr) :
3066          get_tcs_single_patch_icp_handle(ntb, bld, instr);
3067 
3068       /* We can only read two double components with each URB read, so
3069        * we send two read messages in that case, each one loading up to
3070        * two double components.
3071        */
3072       unsigned num_components = instr->num_components;
3073       unsigned first_component = nir_intrinsic_component(instr);
3074 
3075       brw_reg srcs[URB_LOGICAL_NUM_SRCS];
3076       srcs[URB_LOGICAL_SRC_HANDLE] = icp_handle;
3077 
3078       if (indirect_offset.file == BAD_FILE) {
3079          /* Constant indexing - use global offset. */
3080          if (first_component != 0) {
3081             unsigned read_components = num_components + first_component;
3082             brw_reg tmp = bld.vgrf(dst.type, read_components);
3083             inst = bld.emit(SHADER_OPCODE_URB_READ_LOGICAL, tmp, srcs,
3084                             ARRAY_SIZE(srcs));
3085             brw_combine_with_vec(bld, dst, offset(tmp, bld, first_component),
3086                                  num_components);
3087          } else {
3088             inst = bld.emit(SHADER_OPCODE_URB_READ_LOGICAL, dst, srcs,
3089                             ARRAY_SIZE(srcs));
3090          }
3091          inst->offset = imm_offset;
3092       } else {
3093          /* Indirect indexing - use per-slot offsets as well. */
3094          srcs[URB_LOGICAL_SRC_PER_SLOT_OFFSETS] = indirect_offset;
3095 
3096          if (first_component != 0) {
3097             unsigned read_components = num_components + first_component;
3098             brw_reg tmp = bld.vgrf(dst.type, read_components);
3099             inst = bld.emit(SHADER_OPCODE_URB_READ_LOGICAL, tmp,
3100                             srcs, ARRAY_SIZE(srcs));
3101             brw_combine_with_vec(bld, dst, offset(tmp, bld, first_component),
3102                                  num_components);
3103          } else {
3104             inst = bld.emit(SHADER_OPCODE_URB_READ_LOGICAL, dst,
3105                             srcs, ARRAY_SIZE(srcs));
3106          }
3107          inst->offset = imm_offset;
3108       }
3109       inst->size_written = (num_components + first_component) *
3110                            inst->dst.component_size(inst->exec_size);
3111 
3112       /* Copy the temporary to the destination to deal with writemasking.
3113        *
3114        * Also attempt to deal with gl_PointSize being in the .w component.
3115        */
3116       if (inst->offset == 0 && indirect_offset.file == BAD_FILE) {
3117          assert(brw_type_size_bytes(dst.type) == 4);
3118          inst->dst = bld.vgrf(dst.type, 4);
3119          inst->size_written = 4 * REG_SIZE * reg_unit(devinfo);
3120          bld.MOV(dst, offset(inst->dst, bld, 3));
3121       }
3122       break;
3123    }
3124 
3125    case nir_intrinsic_load_output:
3126    case nir_intrinsic_load_per_vertex_output: {
3127       assert(instr->def.bit_size == 32);
3128       brw_reg indirect_offset = get_indirect_offset(ntb, instr);
3129       unsigned imm_offset = nir_intrinsic_base(instr);
3130       unsigned first_component = nir_intrinsic_component(instr);
3131 
3132       fs_inst *inst;
3133       if (indirect_offset.file == BAD_FILE) {
3134          /* This MOV replicates the output handle to all enabled channels
3135           * is SINGLE_PATCH mode.
3136           */
3137          brw_reg patch_handle = bld.MOV(s.tcs_payload().patch_urb_output);
3138 
3139          {
3140             brw_reg srcs[URB_LOGICAL_NUM_SRCS];
3141             srcs[URB_LOGICAL_SRC_HANDLE] = patch_handle;
3142 
3143             if (first_component != 0) {
3144                unsigned read_components =
3145                   instr->num_components + first_component;
3146                brw_reg tmp = bld.vgrf(dst.type, read_components);
3147                inst = bld.emit(SHADER_OPCODE_URB_READ_LOGICAL, tmp,
3148                                srcs, ARRAY_SIZE(srcs));
3149                inst->size_written = read_components * REG_SIZE * reg_unit(devinfo);
3150                brw_combine_with_vec(bld, dst, offset(tmp, bld, first_component),
3151                                     instr->num_components);
3152             } else {
3153                inst = bld.emit(SHADER_OPCODE_URB_READ_LOGICAL, dst,
3154                                srcs, ARRAY_SIZE(srcs));
3155                inst->size_written = instr->num_components * REG_SIZE * reg_unit(devinfo);
3156             }
3157             inst->offset = imm_offset;
3158          }
3159       } else {
3160          /* Indirect indexing - use per-slot offsets as well. */
3161          brw_reg srcs[URB_LOGICAL_NUM_SRCS];
3162          srcs[URB_LOGICAL_SRC_HANDLE] = s.tcs_payload().patch_urb_output;
3163          srcs[URB_LOGICAL_SRC_PER_SLOT_OFFSETS] = indirect_offset;
3164 
3165          if (first_component != 0) {
3166             unsigned read_components =
3167                instr->num_components + first_component;
3168             brw_reg tmp = bld.vgrf(dst.type, read_components);
3169             inst = bld.emit(SHADER_OPCODE_URB_READ_LOGICAL, tmp,
3170                             srcs, ARRAY_SIZE(srcs));
3171             inst->size_written = read_components * REG_SIZE * reg_unit(devinfo);
3172             brw_combine_with_vec(bld, dst, offset(tmp, bld, first_component),
3173                                  instr->num_components);
3174          } else {
3175             inst = bld.emit(SHADER_OPCODE_URB_READ_LOGICAL, dst,
3176                             srcs, ARRAY_SIZE(srcs));
3177             inst->size_written = instr->num_components * REG_SIZE * reg_unit(devinfo);
3178          }
3179          inst->offset = imm_offset;
3180       }
3181       break;
3182    }
3183 
3184    case nir_intrinsic_store_output:
3185    case nir_intrinsic_store_per_vertex_output: {
3186       assert(nir_src_bit_size(instr->src[0]) == 32);
3187       brw_reg value = get_nir_src(ntb, instr->src[0]);
3188       brw_reg indirect_offset = get_indirect_offset(ntb, instr);
3189       unsigned imm_offset = nir_intrinsic_base(instr);
3190       unsigned mask = nir_intrinsic_write_mask(instr);
3191 
3192       if (mask == 0)
3193          break;
3194 
3195       unsigned num_components = util_last_bit(mask);
3196       unsigned first_component = nir_intrinsic_component(instr);
3197       assert((first_component + num_components) <= 4);
3198 
3199       mask = mask << first_component;
3200 
3201       const bool has_urb_lsc = devinfo->ver >= 20;
3202 
3203       brw_reg mask_reg;
3204       if (mask != WRITEMASK_XYZW)
3205          mask_reg = brw_imm_ud(mask << 16);
3206 
3207       brw_reg sources[4];
3208 
3209       unsigned m = has_urb_lsc ? 0 : first_component;
3210       for (unsigned i = 0; i < num_components; i++) {
3211          int c = i + first_component;
3212          if (mask & (1 << c)) {
3213             sources[m++] = offset(value, bld, i);
3214          } else if (devinfo->ver < 20) {
3215             m++;
3216          }
3217       }
3218 
3219       assert(has_urb_lsc || m == (first_component + num_components));
3220 
3221       brw_reg srcs[URB_LOGICAL_NUM_SRCS];
3222       srcs[URB_LOGICAL_SRC_HANDLE] = s.tcs_payload().patch_urb_output;
3223       srcs[URB_LOGICAL_SRC_PER_SLOT_OFFSETS] = indirect_offset;
3224       srcs[URB_LOGICAL_SRC_CHANNEL_MASK] = mask_reg;
3225       srcs[URB_LOGICAL_SRC_DATA] = bld.vgrf(BRW_TYPE_F, m);
3226       srcs[URB_LOGICAL_SRC_COMPONENTS] = brw_imm_ud(m);
3227       bld.LOAD_PAYLOAD(srcs[URB_LOGICAL_SRC_DATA], sources, m, 0);
3228 
3229       fs_inst *inst = bld.emit(SHADER_OPCODE_URB_WRITE_LOGICAL, reg_undef,
3230                                srcs, ARRAY_SIZE(srcs));
3231       inst->offset = imm_offset;
3232       break;
3233    }
3234 
3235    default:
3236       fs_nir_emit_intrinsic(ntb, bld, instr);
3237       break;
3238    }
3239 }
3240 
3241 static void
fs_nir_emit_tes_intrinsic(nir_to_brw_state & ntb,nir_intrinsic_instr * instr)3242 fs_nir_emit_tes_intrinsic(nir_to_brw_state &ntb,
3243                           nir_intrinsic_instr *instr)
3244 {
3245    const intel_device_info *devinfo = ntb.devinfo;
3246    const fs_builder &bld = ntb.bld;
3247    fs_visitor &s = ntb.s;
3248 
3249    assert(s.stage == MESA_SHADER_TESS_EVAL);
3250    struct brw_tes_prog_data *tes_prog_data = brw_tes_prog_data(s.prog_data);
3251 
3252    brw_reg dest;
3253    if (nir_intrinsic_infos[instr->intrinsic].has_dest)
3254       dest = get_nir_def(ntb, instr->def);
3255 
3256    switch (instr->intrinsic) {
3257    case nir_intrinsic_load_primitive_id:
3258       bld.MOV(dest, s.tes_payload().primitive_id);
3259       break;
3260 
3261    case nir_intrinsic_load_tess_coord:
3262       for (unsigned i = 0; i < 3; i++)
3263          bld.MOV(offset(dest, bld, i), s.tes_payload().coords[i]);
3264       break;
3265 
3266    case nir_intrinsic_load_input:
3267    case nir_intrinsic_load_per_vertex_input: {
3268       assert(instr->def.bit_size == 32);
3269       brw_reg indirect_offset = get_indirect_offset(ntb, instr);
3270       unsigned imm_offset = nir_intrinsic_base(instr);
3271       unsigned first_component = nir_intrinsic_component(instr);
3272 
3273       fs_inst *inst;
3274       if (indirect_offset.file == BAD_FILE) {
3275          /* Arbitrarily only push up to 32 vec4 slots worth of data,
3276           * which is 16 registers (since each holds 2 vec4 slots).
3277           */
3278          const unsigned max_push_slots = 32;
3279          if (imm_offset < max_push_slots) {
3280             const brw_reg src = horiz_offset(brw_attr_reg(0, dest.type),
3281                                             4 * imm_offset + first_component);
3282             brw_reg comps[NIR_MAX_VEC_COMPONENTS];
3283             for (unsigned i = 0; i < instr->num_components; i++) {
3284                comps[i] = component(src, i);
3285             }
3286             bld.VEC(dest, comps, instr->num_components);
3287 
3288             tes_prog_data->base.urb_read_length =
3289                MAX2(tes_prog_data->base.urb_read_length,
3290                     (imm_offset / 2) + 1);
3291          } else {
3292             /* Replicate the patch handle to all enabled channels */
3293             brw_reg srcs[URB_LOGICAL_NUM_SRCS];
3294             srcs[URB_LOGICAL_SRC_HANDLE] = s.tes_payload().patch_urb_input;
3295 
3296             if (first_component != 0) {
3297                unsigned read_components =
3298                   instr->num_components + first_component;
3299                brw_reg tmp = bld.vgrf(dest.type, read_components);
3300                inst = bld.emit(SHADER_OPCODE_URB_READ_LOGICAL, tmp,
3301                                srcs, ARRAY_SIZE(srcs));
3302                inst->size_written = read_components * REG_SIZE * reg_unit(devinfo);
3303                brw_combine_with_vec(bld, dest, offset(tmp, bld, first_component),
3304                                     instr->num_components);
3305             } else {
3306                inst = bld.emit(SHADER_OPCODE_URB_READ_LOGICAL, dest,
3307                                srcs, ARRAY_SIZE(srcs));
3308                inst->size_written = instr->num_components * REG_SIZE * reg_unit(devinfo);
3309             }
3310             inst->offset = imm_offset;
3311          }
3312       } else {
3313          /* Indirect indexing - use per-slot offsets as well. */
3314 
3315          /* We can only read two double components with each URB read, so
3316           * we send two read messages in that case, each one loading up to
3317           * two double components.
3318           */
3319          unsigned num_components = instr->num_components;
3320 
3321          brw_reg srcs[URB_LOGICAL_NUM_SRCS];
3322          srcs[URB_LOGICAL_SRC_HANDLE] = s.tes_payload().patch_urb_input;
3323          srcs[URB_LOGICAL_SRC_PER_SLOT_OFFSETS] = indirect_offset;
3324 
3325          if (first_component != 0) {
3326             unsigned read_components =
3327                 num_components + first_component;
3328             brw_reg tmp = bld.vgrf(dest.type, read_components);
3329             inst = bld.emit(SHADER_OPCODE_URB_READ_LOGICAL, tmp,
3330                             srcs, ARRAY_SIZE(srcs));
3331             brw_combine_with_vec(bld, dest, offset(tmp, bld, first_component),
3332                                  num_components);
3333          } else {
3334             inst = bld.emit(SHADER_OPCODE_URB_READ_LOGICAL, dest,
3335                             srcs, ARRAY_SIZE(srcs));
3336          }
3337          inst->offset = imm_offset;
3338          inst->size_written = (num_components + first_component) *
3339                               inst->dst.component_size(inst->exec_size);
3340       }
3341       break;
3342    }
3343    default:
3344       fs_nir_emit_intrinsic(ntb, bld, instr);
3345       break;
3346    }
3347 }
3348 
3349 static void
fs_nir_emit_gs_intrinsic(nir_to_brw_state & ntb,nir_intrinsic_instr * instr)3350 fs_nir_emit_gs_intrinsic(nir_to_brw_state &ntb,
3351                          nir_intrinsic_instr *instr)
3352 {
3353    const fs_builder &bld = ntb.bld;
3354    fs_visitor &s = ntb.s;
3355 
3356    assert(s.stage == MESA_SHADER_GEOMETRY);
3357 
3358    brw_reg dest;
3359    if (nir_intrinsic_infos[instr->intrinsic].has_dest)
3360       dest = get_nir_def(ntb, instr->def);
3361 
3362    switch (instr->intrinsic) {
3363    case nir_intrinsic_load_primitive_id:
3364       assert(s.stage == MESA_SHADER_GEOMETRY);
3365       assert(brw_gs_prog_data(s.prog_data)->include_primitive_id);
3366       bld.MOV(retype(dest, BRW_TYPE_UD), s.gs_payload().primitive_id);
3367       break;
3368 
3369    case nir_intrinsic_load_input:
3370       unreachable("load_input intrinsics are invalid for the GS stage");
3371 
3372    case nir_intrinsic_load_per_vertex_input:
3373       emit_gs_input_load(ntb, dest, instr->src[0], nir_intrinsic_base(instr),
3374                          instr->src[1], instr->num_components,
3375                          nir_intrinsic_component(instr));
3376       break;
3377 
3378    case nir_intrinsic_emit_vertex_with_counter:
3379       emit_gs_vertex(ntb, instr->src[0], nir_intrinsic_stream_id(instr));
3380 
3381       /* After an EmitVertex() call, the values of all outputs are undefined.
3382        * If this is not in control flow, recreate a fresh set of output
3383        * registers to keep their live ranges separate.
3384        */
3385       if (instr->instr.block->cf_node.parent->type == nir_cf_node_function)
3386          fs_nir_setup_outputs(ntb);
3387       break;
3388 
3389    case nir_intrinsic_end_primitive_with_counter:
3390       emit_gs_end_primitive(ntb, instr->src[0]);
3391       break;
3392 
3393    case nir_intrinsic_set_vertex_and_primitive_count:
3394       bld.MOV(s.final_gs_vertex_count, get_nir_src(ntb, instr->src[0]));
3395       break;
3396 
3397    case nir_intrinsic_load_invocation_id: {
3398       brw_reg val = ntb.system_values[SYSTEM_VALUE_INVOCATION_ID];
3399       assert(val.file != BAD_FILE);
3400       dest.type = val.type;
3401       bld.MOV(dest, val);
3402       break;
3403    }
3404 
3405    default:
3406       fs_nir_emit_intrinsic(ntb, bld, instr);
3407       break;
3408    }
3409 }
3410 
3411 /**
3412  * Fetch the current render target layer index.
3413  */
3414 static brw_reg
fetch_render_target_array_index(const fs_builder & bld)3415 fetch_render_target_array_index(const fs_builder &bld)
3416 {
3417    const fs_visitor *v = bld.shader;
3418 
3419    if (bld.shader->devinfo->ver >= 20) {
3420       /* Gfx20+ has separate Render Target Array indices for each pair
3421        * of subspans in order to support multiple polygons, so we need
3422        * to use a <1;8,0> region in order to select the correct word
3423        * for each channel.
3424        */
3425       const brw_reg idx = bld.vgrf(BRW_TYPE_UD);
3426 
3427       for (unsigned i = 0; i < DIV_ROUND_UP(bld.dispatch_width(), 16); i++) {
3428          const fs_builder hbld = bld.group(16, i);
3429          const struct brw_reg reg = retype(brw_vec1_grf(2 * i + 1, 1),
3430                                            BRW_TYPE_UW);
3431          hbld.AND(offset(idx, hbld, i), stride(reg, 1, 8, 0),
3432                   brw_imm_uw(0x7ff));
3433       }
3434 
3435       return idx;
3436    } else if (bld.shader->devinfo->ver >= 12 && v->max_polygons == 2) {
3437       /* According to the BSpec "PS Thread Payload for Normal
3438        * Dispatch", the render target array index is stored as bits
3439        * 26:16 of either the R1.1 or R1.6 poly info dwords, for the
3440        * first and second polygons respectively in multipolygon PS
3441        * dispatch mode.
3442        */
3443       assert(bld.dispatch_width() == 16);
3444       const brw_reg idx = bld.vgrf(BRW_TYPE_UD);
3445 
3446       for (unsigned i = 0; i < v->max_polygons; i++) {
3447          const fs_builder hbld = bld.group(8, i);
3448          const struct brw_reg g1 = brw_uw1_reg(FIXED_GRF, 1, 3 + 10 * i);
3449          hbld.AND(offset(idx, hbld, i), g1, brw_imm_uw(0x7ff));
3450       }
3451 
3452       return idx;
3453    } else if (bld.shader->devinfo->ver >= 12) {
3454       /* The render target array index is provided in the thread payload as
3455        * bits 26:16 of r1.1.
3456        */
3457       const brw_reg idx = bld.vgrf(BRW_TYPE_UD);
3458       bld.AND(idx, brw_uw1_reg(FIXED_GRF, 1, 3),
3459               brw_imm_uw(0x7ff));
3460       return idx;
3461    } else {
3462       /* The render target array index is provided in the thread payload as
3463        * bits 26:16 of r0.0.
3464        */
3465       const brw_reg idx = bld.vgrf(BRW_TYPE_UD);
3466       bld.AND(idx, brw_uw1_reg(FIXED_GRF, 0, 1),
3467               brw_imm_uw(0x7ff));
3468       return idx;
3469    }
3470 }
3471 
3472 static brw_reg
fetch_viewport_index(const fs_builder & bld)3473 fetch_viewport_index(const fs_builder &bld)
3474 {
3475    const fs_visitor *v = bld.shader;
3476 
3477    if (bld.shader->devinfo->ver >= 20) {
3478       /* Gfx20+ has separate viewport indices for each pair
3479        * of subspans in order to support multiple polygons, so we need
3480        * to use a <1;8,0> region in order to select the correct word
3481        * for each channel.
3482        */
3483       const brw_reg idx = bld.vgrf(BRW_TYPE_UD);
3484 
3485       for (unsigned i = 0; i < DIV_ROUND_UP(bld.dispatch_width(), 16); i++) {
3486          const fs_builder hbld = bld.group(16, i);
3487          const struct brw_reg reg = retype(xe2_vec1_grf(i, 9),
3488                                            BRW_TYPE_UW);
3489          hbld.AND(offset(idx, hbld, i), stride(reg, 1, 8, 0),
3490                   brw_imm_uw(0xf000));
3491       }
3492 
3493       bld.SHR(idx, idx, brw_imm_ud(12));
3494       return idx;
3495    } else if (bld.shader->devinfo->ver >= 12 && v->max_polygons == 2) {
3496       /* According to the BSpec "PS Thread Payload for Normal
3497        * Dispatch", the viewport index is stored as bits
3498        * 30:27 of either the R1.1 or R1.6 poly info dwords, for the
3499        * first and second polygons respectively in multipolygon PS
3500        * dispatch mode.
3501        */
3502       assert(bld.dispatch_width() == 16);
3503       const brw_reg idx = bld.vgrf(BRW_TYPE_UD);
3504       brw_reg vp_idx_per_poly_dw[2] = {
3505          brw_ud1_reg(FIXED_GRF, 1, 1), /* R1.1 bits 30:27 */
3506          brw_ud1_reg(FIXED_GRF, 1, 6), /* R1.6 bits 30:27 */
3507       };
3508 
3509       for (unsigned i = 0; i < v->max_polygons; i++) {
3510          const fs_builder hbld = bld.group(8, i);
3511          hbld.SHR(offset(idx, hbld, i), vp_idx_per_poly_dw[i], brw_imm_ud(27));
3512       }
3513 
3514       return bld.AND(idx, brw_imm_ud(0xf));
3515    } else if (bld.shader->devinfo->ver >= 12) {
3516       /* The viewport index is provided in the thread payload as
3517        * bits 30:27 of r1.1.
3518        */
3519       const brw_reg idx = bld.vgrf(BRW_TYPE_UD);
3520       bld.SHR(idx,
3521               bld.AND(brw_uw1_reg(FIXED_GRF, 1, 3),
3522                       brw_imm_uw(0x7800)),
3523               brw_imm_ud(11));
3524       return idx;
3525    } else {
3526       /* The viewport index is provided in the thread payload as
3527        * bits 30:27 of r0.0.
3528        */
3529       const brw_reg idx = bld.vgrf(BRW_TYPE_UD);
3530       bld.SHR(idx,
3531               bld.AND(brw_uw1_reg(FIXED_GRF, 0, 1),
3532                       brw_imm_uw(0x7800)),
3533               brw_imm_ud(11));
3534       return idx;
3535    }
3536 }
3537 
3538 /* Sample from the MCS surface attached to this multisample texture. */
3539 static brw_reg
emit_mcs_fetch(nir_to_brw_state & ntb,const brw_reg & coordinate,unsigned components,const brw_reg & texture,const brw_reg & texture_handle)3540 emit_mcs_fetch(nir_to_brw_state &ntb, const brw_reg &coordinate, unsigned components,
3541                const brw_reg &texture,
3542                const brw_reg &texture_handle)
3543 {
3544    const fs_builder &bld = ntb.bld;
3545 
3546    const brw_reg dest = bld.vgrf(BRW_TYPE_UD, 4);
3547 
3548    brw_reg srcs[TEX_LOGICAL_NUM_SRCS];
3549    srcs[TEX_LOGICAL_SRC_COORDINATE] = coordinate;
3550    srcs[TEX_LOGICAL_SRC_SURFACE] = texture;
3551    srcs[TEX_LOGICAL_SRC_SAMPLER] = brw_imm_ud(0);
3552    srcs[TEX_LOGICAL_SRC_SURFACE_HANDLE] = texture_handle;
3553    srcs[TEX_LOGICAL_SRC_COORD_COMPONENTS] = brw_imm_d(components);
3554    srcs[TEX_LOGICAL_SRC_GRAD_COMPONENTS] = brw_imm_d(0);
3555    srcs[TEX_LOGICAL_SRC_RESIDENCY] = brw_imm_d(0);
3556 
3557    fs_inst *inst = bld.emit(SHADER_OPCODE_TXF_MCS_LOGICAL, dest, srcs,
3558                             ARRAY_SIZE(srcs));
3559 
3560    /* We only care about one or two regs of response, but the sampler always
3561     * writes 4/8.
3562     */
3563    inst->size_written = 4 * dest.component_size(inst->exec_size);
3564 
3565    return dest;
3566 }
3567 
3568 /**
3569  * Fake non-coherent framebuffer read implemented using TXF to fetch from the
3570  * framebuffer at the current fragment coordinates and sample index.
3571  */
3572 static fs_inst *
emit_non_coherent_fb_read(nir_to_brw_state & ntb,const fs_builder & bld,const brw_reg & dst,unsigned target)3573 emit_non_coherent_fb_read(nir_to_brw_state &ntb, const fs_builder &bld, const brw_reg &dst,
3574                           unsigned target)
3575 {
3576    fs_visitor &s = ntb.s;
3577    const struct intel_device_info *devinfo = s.devinfo;
3578 
3579    assert(bld.shader->stage == MESA_SHADER_FRAGMENT);
3580    const brw_wm_prog_key *wm_key =
3581       reinterpret_cast<const brw_wm_prog_key *>(s.key);
3582    assert(!wm_key->coherent_fb_fetch);
3583 
3584    /* Calculate the fragment coordinates. */
3585    const brw_reg coords = bld.vgrf(BRW_TYPE_UD, 3);
3586    bld.MOV(offset(coords, bld, 0), s.pixel_x);
3587    bld.MOV(offset(coords, bld, 1), s.pixel_y);
3588    bld.MOV(offset(coords, bld, 2), fetch_render_target_array_index(bld));
3589 
3590    /* Calculate the sample index and MCS payload when multisampling.  Luckily
3591     * the MCS fetch message behaves deterministically for UMS surfaces, so it
3592     * shouldn't be necessary to recompile based on whether the framebuffer is
3593     * CMS or UMS.
3594     */
3595    assert(wm_key->multisample_fbo == BRW_ALWAYS ||
3596           wm_key->multisample_fbo == BRW_NEVER);
3597    if (wm_key->multisample_fbo &&
3598        ntb.system_values[SYSTEM_VALUE_SAMPLE_ID].file == BAD_FILE)
3599       ntb.system_values[SYSTEM_VALUE_SAMPLE_ID] = emit_sampleid_setup(ntb);
3600 
3601    const brw_reg sample = ntb.system_values[SYSTEM_VALUE_SAMPLE_ID];
3602    const brw_reg mcs = wm_key->multisample_fbo ?
3603       emit_mcs_fetch(ntb, coords, 3, brw_imm_ud(target), brw_reg()) : brw_reg();
3604 
3605    /* Use either a normal or a CMS texel fetch message depending on whether
3606     * the framebuffer is single or multisample.  On SKL+ use the wide CMS
3607     * message just in case the framebuffer uses 16x multisampling, it should
3608     * be equivalent to the normal CMS fetch for lower multisampling modes.
3609     */
3610    opcode op;
3611    if (wm_key->multisample_fbo) {
3612       /* On SKL+ use the wide CMS message just in case the framebuffer uses 16x
3613        * multisampling, it should be equivalent to the normal CMS fetch for
3614        * lower multisampling modes.
3615        *
3616        * On Gfx12HP, there is only CMS_W variant available.
3617        */
3618       if (devinfo->verx10 >= 125)
3619          op = SHADER_OPCODE_TXF_CMS_W_GFX12_LOGICAL;
3620       else
3621          op = SHADER_OPCODE_TXF_CMS_W_LOGICAL;
3622    } else {
3623       op = SHADER_OPCODE_TXF_LOGICAL;
3624    }
3625 
3626    /* Emit the instruction. */
3627    brw_reg srcs[TEX_LOGICAL_NUM_SRCS];
3628    srcs[TEX_LOGICAL_SRC_COORDINATE]       = coords;
3629    srcs[TEX_LOGICAL_SRC_LOD]              = brw_imm_ud(0);
3630    srcs[TEX_LOGICAL_SRC_SAMPLE_INDEX]     = sample;
3631    srcs[TEX_LOGICAL_SRC_MCS]              = mcs;
3632    srcs[TEX_LOGICAL_SRC_SURFACE]          = brw_imm_ud(target);
3633    srcs[TEX_LOGICAL_SRC_SAMPLER]          = brw_imm_ud(0);
3634    srcs[TEX_LOGICAL_SRC_COORD_COMPONENTS] = brw_imm_ud(3);
3635    srcs[TEX_LOGICAL_SRC_GRAD_COMPONENTS]  = brw_imm_ud(0);
3636    srcs[TEX_LOGICAL_SRC_RESIDENCY]        = brw_imm_ud(0);
3637 
3638    fs_inst *inst = bld.emit(op, dst, srcs, ARRAY_SIZE(srcs));
3639    inst->size_written = 4 * inst->dst.component_size(inst->exec_size);
3640 
3641    return inst;
3642 }
3643 
3644 /**
3645  * Actual coherent framebuffer read implemented using the native render target
3646  * read message.  Requires SKL+.
3647  */
3648 static fs_inst *
emit_coherent_fb_read(const fs_builder & bld,const brw_reg & dst,unsigned target)3649 emit_coherent_fb_read(const fs_builder &bld, const brw_reg &dst, unsigned target)
3650 {
3651    fs_inst *inst = bld.emit(FS_OPCODE_FB_READ_LOGICAL, dst);
3652    inst->target = target;
3653    inst->size_written = 4 * inst->dst.component_size(inst->exec_size);
3654 
3655    return inst;
3656 }
3657 
3658 static brw_reg
alloc_temporary(const fs_builder & bld,unsigned size,brw_reg * regs,unsigned n)3659 alloc_temporary(const fs_builder &bld, unsigned size, brw_reg *regs, unsigned n)
3660 {
3661    if (n && regs[0].file != BAD_FILE) {
3662       return regs[0];
3663 
3664    } else {
3665       const brw_reg tmp = bld.vgrf(BRW_TYPE_F, size);
3666 
3667       for (unsigned i = 0; i < n; i++)
3668          regs[i] = tmp;
3669 
3670       return tmp;
3671    }
3672 }
3673 
3674 static brw_reg
alloc_frag_output(nir_to_brw_state & ntb,unsigned location)3675 alloc_frag_output(nir_to_brw_state &ntb, unsigned location)
3676 {
3677    fs_visitor &s = ntb.s;
3678 
3679    assert(s.stage == MESA_SHADER_FRAGMENT);
3680    const brw_wm_prog_key *const key =
3681       reinterpret_cast<const brw_wm_prog_key *>(s.key);
3682    const unsigned l = GET_FIELD(location, BRW_NIR_FRAG_OUTPUT_LOCATION);
3683    const unsigned i = GET_FIELD(location, BRW_NIR_FRAG_OUTPUT_INDEX);
3684 
3685    if (i > 0 || (key->force_dual_color_blend && l == FRAG_RESULT_DATA1))
3686       return alloc_temporary(ntb.bld, 4, &s.dual_src_output, 1);
3687 
3688    else if (l == FRAG_RESULT_COLOR)
3689       return alloc_temporary(ntb.bld, 4, s.outputs,
3690                              MAX2(key->nr_color_regions, 1));
3691 
3692    else if (l == FRAG_RESULT_DEPTH)
3693       return alloc_temporary(ntb.bld, 1, &s.frag_depth, 1);
3694 
3695    else if (l == FRAG_RESULT_STENCIL)
3696       return alloc_temporary(ntb.bld, 1, &s.frag_stencil, 1);
3697 
3698    else if (l == FRAG_RESULT_SAMPLE_MASK)
3699       return alloc_temporary(ntb.bld, 1, &s.sample_mask, 1);
3700 
3701    else if (l >= FRAG_RESULT_DATA0 &&
3702             l < FRAG_RESULT_DATA0 + BRW_MAX_DRAW_BUFFERS)
3703       return alloc_temporary(ntb.bld, 4,
3704                              &s.outputs[l - FRAG_RESULT_DATA0], 1);
3705 
3706    else
3707       unreachable("Invalid location");
3708 }
3709 
3710 static void
emit_is_helper_invocation(nir_to_brw_state & ntb,brw_reg result)3711 emit_is_helper_invocation(nir_to_brw_state &ntb, brw_reg result)
3712 {
3713    const fs_builder &bld = ntb.bld;
3714 
3715    /* Unlike the regular gl_HelperInvocation, that is defined at dispatch,
3716     * the helperInvocationEXT() (aka SpvOpIsHelperInvocationEXT) takes into
3717     * consideration demoted invocations.
3718     */
3719    result.type = BRW_TYPE_UD;
3720 
3721    bld.MOV(result, brw_imm_ud(0));
3722 
3723    /* See brw_sample_mask_reg() for why we split SIMD32 into SIMD16 here. */
3724    unsigned width = bld.dispatch_width();
3725    for (unsigned i = 0; i < DIV_ROUND_UP(width, 16); i++) {
3726       const fs_builder b = bld.group(MIN2(width, 16), i);
3727 
3728       fs_inst *mov = b.MOV(offset(result, b, i), brw_imm_ud(~0));
3729 
3730       /* The at() ensures that any code emitted to get the predicate happens
3731        * before the mov right above.  This is not an issue elsewhere because
3732        * lowering code already set up the builder this way.
3733        */
3734       brw_emit_predicate_on_sample_mask(b.at(NULL, mov), mov);
3735       mov->predicate_inverse = true;
3736    }
3737 }
3738 
3739 static brw_reg
emit_frontfacing_interpolation(nir_to_brw_state & ntb)3740 emit_frontfacing_interpolation(nir_to_brw_state &ntb)
3741 {
3742    const intel_device_info *devinfo = ntb.devinfo;
3743    const fs_builder &bld = ntb.bld;
3744    fs_visitor &s = ntb.s;
3745 
3746    brw_reg ff = bld.vgrf(BRW_TYPE_D);
3747 
3748    if (devinfo->ver >= 20) {
3749       /* Gfx20+ has separate back-facing bits for each pair of
3750        * subspans in order to support multiple polygons, so we need to
3751        * use a <1;8,0> region in order to select the correct word for
3752        * each channel.
3753        */
3754       const brw_reg tmp = bld.vgrf(BRW_TYPE_UW);
3755 
3756       for (unsigned i = 0; i < DIV_ROUND_UP(s.dispatch_width, 16); i++) {
3757          const fs_builder hbld = bld.group(16, i);
3758          const struct brw_reg gi_uw = retype(xe2_vec1_grf(i, 9),
3759                                              BRW_TYPE_UW);
3760          hbld.AND(offset(tmp, hbld, i), gi_uw, brw_imm_uw(0x800));
3761       }
3762 
3763       bld.CMP(ff, tmp, brw_imm_uw(0), BRW_CONDITIONAL_Z);
3764 
3765    } else if (devinfo->ver >= 12 && s.max_polygons == 2) {
3766       /* According to the BSpec "PS Thread Payload for Normal
3767        * Dispatch", the front/back facing interpolation bit is stored
3768        * as bit 15 of either the R1.1 or R1.6 poly info field, for the
3769        * first and second polygons respectively in multipolygon PS
3770        * dispatch mode.
3771        */
3772       assert(s.dispatch_width == 16);
3773       brw_reg tmp = bld.vgrf(BRW_TYPE_W);
3774 
3775       for (unsigned i = 0; i < s.max_polygons; i++) {
3776          const fs_builder hbld = bld.group(8, i);
3777          const struct brw_reg g1 = retype(brw_vec1_grf(1, 1 + 5 * i),
3778                                           BRW_TYPE_W);
3779          hbld.ASR(offset(tmp, hbld, i), g1, brw_imm_d(15));
3780       }
3781 
3782       bld.NOT(ff, tmp);
3783 
3784    } else if (devinfo->ver >= 12) {
3785       brw_reg g1 = brw_reg(retype(brw_vec1_grf(1, 1), BRW_TYPE_W));
3786 
3787       brw_reg tmp = bld.vgrf(BRW_TYPE_W);
3788       bld.ASR(tmp, g1, brw_imm_d(15));
3789       bld.NOT(ff, tmp);
3790    } else {
3791       /* Bit 15 of g0.0 is 0 if the polygon is front facing. We want to create
3792        * a boolean result from this (~0/true or 0/false).
3793        *
3794        * We can use the fact that bit 15 is the MSB of g0.0:W to accomplish
3795        * this task in only one instruction:
3796        *    - a negation source modifier will flip the bit; and
3797        *    - a W -> D type conversion will sign extend the bit into the high
3798        *      word of the destination.
3799        *
3800        * An ASR 15 fills the low word of the destination.
3801        */
3802       brw_reg g0 = brw_reg(retype(brw_vec1_grf(0, 0), BRW_TYPE_W));
3803 
3804       bld.ASR(ff, negate(g0), brw_imm_d(15));
3805    }
3806 
3807    return ff;
3808 }
3809 
3810 static brw_reg
emit_samplepos_setup(nir_to_brw_state & ntb)3811 emit_samplepos_setup(nir_to_brw_state &ntb)
3812 {
3813    const fs_builder &bld = ntb.bld;
3814    fs_visitor &s = ntb.s;
3815 
3816    assert(s.stage == MESA_SHADER_FRAGMENT);
3817    struct brw_wm_prog_data *wm_prog_data = brw_wm_prog_data(s.prog_data);
3818 
3819    const fs_builder abld = bld.annotate("compute sample position");
3820    brw_reg pos = abld.vgrf(BRW_TYPE_F, 2);
3821 
3822    if (wm_prog_data->persample_dispatch == BRW_NEVER) {
3823       /* From ARB_sample_shading specification:
3824        * "When rendering to a non-multisample buffer, or if multisample
3825        *  rasterization is disabled, gl_SamplePosition will always be
3826        *  (0.5, 0.5).
3827        */
3828       bld.MOV(offset(pos, bld, 0), brw_imm_f(0.5f));
3829       bld.MOV(offset(pos, bld, 1), brw_imm_f(0.5f));
3830       return pos;
3831    }
3832 
3833    /* WM will be run in MSDISPMODE_PERSAMPLE. So, only one of SIMD8 or SIMD16
3834     * mode will be enabled.
3835     *
3836     * From the Ivy Bridge PRM, volume 2 part 1, page 344:
3837     * R31.1:0         Position Offset X/Y for Slot[3:0]
3838     * R31.3:2         Position Offset X/Y for Slot[7:4]
3839     * .....
3840     *
3841     * The X, Y sample positions come in as bytes in  thread payload. So, read
3842     * the positions using vstride=16, width=8, hstride=2.
3843     */
3844    const brw_reg sample_pos_reg =
3845       fetch_payload_reg(abld, s.fs_payload().sample_pos_reg, BRW_TYPE_W);
3846 
3847    for (unsigned i = 0; i < 2; i++) {
3848       brw_reg tmp_d = bld.vgrf(BRW_TYPE_D);
3849       abld.MOV(tmp_d, subscript(sample_pos_reg, BRW_TYPE_B, i));
3850       /* Convert int_sample_pos to floating point */
3851       brw_reg tmp_f = bld.vgrf(BRW_TYPE_F);
3852       abld.MOV(tmp_f, tmp_d);
3853       /* Scale to the range [0, 1] */
3854       abld.MUL(offset(pos, abld, i), tmp_f, brw_imm_f(1 / 16.0f));
3855    }
3856 
3857    if (wm_prog_data->persample_dispatch == BRW_SOMETIMES) {
3858       check_dynamic_msaa_flag(abld, wm_prog_data,
3859                               INTEL_MSAA_FLAG_PERSAMPLE_DISPATCH);
3860       for (unsigned i = 0; i < 2; i++) {
3861          set_predicate(BRW_PREDICATE_NORMAL,
3862                        bld.SEL(offset(pos, abld, i), offset(pos, abld, i),
3863                                brw_imm_f(0.5f)));
3864       }
3865    }
3866 
3867    return pos;
3868 }
3869 
3870 static brw_reg
emit_sampleid_setup(nir_to_brw_state & ntb)3871 emit_sampleid_setup(nir_to_brw_state &ntb)
3872 {
3873    const intel_device_info *devinfo = ntb.devinfo;
3874    const fs_builder &bld = ntb.bld;
3875    fs_visitor &s = ntb.s;
3876 
3877    assert(s.stage == MESA_SHADER_FRAGMENT);
3878    ASSERTED brw_wm_prog_key *key = (brw_wm_prog_key*) s.key;
3879    struct brw_wm_prog_data *wm_prog_data = brw_wm_prog_data(s.prog_data);
3880 
3881    const fs_builder abld = bld.annotate("compute sample id");
3882    brw_reg sample_id = abld.vgrf(BRW_TYPE_UD);
3883 
3884    assert(key->multisample_fbo != BRW_NEVER);
3885 
3886    /* Sample ID comes in as 4-bit numbers in g1.0:
3887     *
3888     *    15:12 Slot 3 SampleID (only used in SIMD16)
3889     *     11:8 Slot 2 SampleID (only used in SIMD16)
3890     *      7:4 Slot 1 SampleID
3891     *      3:0 Slot 0 SampleID
3892     *
3893     * Each slot corresponds to four channels, so we want to replicate each
3894     * half-byte value to 4 channels in a row:
3895     *
3896     *    dst+0:    .7    .6    .5    .4    .3    .2    .1    .0
3897     *             7:4   7:4   7:4   7:4   3:0   3:0   3:0   3:0
3898     *
3899     *    dst+1:    .7    .6    .5    .4    .3    .2    .1    .0  (if SIMD16)
3900     *           15:12 15:12 15:12 15:12  11:8  11:8  11:8  11:8
3901     *
3902     * First, we read g1.0 with a <1,8,0>UB region, causing the first 8
3903     * channels to read the first byte (7:0), and the second group of 8
3904     * channels to read the second byte (15:8).  Then, we shift right by
3905     * a vector immediate of <4, 4, 4, 4, 0, 0, 0, 0>, moving the slot 1 / 3
3906     * values into place.  Finally, we AND with 0xf to keep the low nibble.
3907     *
3908     *    shr(16) tmp<1>W g1.0<1,8,0>B 0x44440000:V
3909     *    and(16) dst<1>D tmp<8,8,1>W  0xf:W
3910     *
3911     * TODO: These payload bits exist on Gfx7 too, but they appear to always
3912     *       be zero, so this code fails to work.  We should find out why.
3913     */
3914    const brw_reg tmp = abld.vgrf(BRW_TYPE_UW);
3915 
3916    for (unsigned i = 0; i < DIV_ROUND_UP(s.dispatch_width, 16); i++) {
3917       const fs_builder hbld = abld.group(MIN2(16, s.dispatch_width), i);
3918       /* According to the "PS Thread Payload for Normal Dispatch"
3919        * pages on the BSpec, the sample ids are stored in R0.8/R1.8
3920        * on gfx20+ and in R1.0/R2.0 on gfx8+.
3921        */
3922       const struct brw_reg id_reg = devinfo->ver >= 20 ? xe2_vec1_grf(i, 8) :
3923                                     brw_vec1_grf(i + 1, 0);
3924       hbld.SHR(offset(tmp, hbld, i),
3925                stride(retype(id_reg, BRW_TYPE_UB), 1, 8, 0),
3926                brw_imm_v(0x44440000));
3927    }
3928 
3929    abld.AND(sample_id, tmp, brw_imm_w(0xf));
3930 
3931    if (key->multisample_fbo == BRW_SOMETIMES) {
3932       check_dynamic_msaa_flag(abld, wm_prog_data,
3933                               INTEL_MSAA_FLAG_MULTISAMPLE_FBO);
3934       set_predicate(BRW_PREDICATE_NORMAL,
3935                     abld.SEL(sample_id, sample_id, brw_imm_ud(0)));
3936    }
3937 
3938    return sample_id;
3939 }
3940 
3941 static brw_reg
emit_samplemaskin_setup(nir_to_brw_state & ntb)3942 emit_samplemaskin_setup(nir_to_brw_state &ntb)
3943 {
3944    const fs_builder &bld = ntb.bld;
3945    fs_visitor &s = ntb.s;
3946 
3947    assert(s.stage == MESA_SHADER_FRAGMENT);
3948    struct brw_wm_prog_data *wm_prog_data = brw_wm_prog_data(s.prog_data);
3949 
3950    /* The HW doesn't provide us with expected values. */
3951    assert(wm_prog_data->coarse_pixel_dispatch != BRW_ALWAYS);
3952 
3953    brw_reg coverage_mask =
3954       fetch_payload_reg(bld, s.fs_payload().sample_mask_in_reg, BRW_TYPE_UD);
3955 
3956    if (wm_prog_data->persample_dispatch == BRW_NEVER)
3957       return coverage_mask;
3958 
3959    /* gl_SampleMaskIn[] comes from two sources: the input coverage mask,
3960     * and a mask representing which sample is being processed by the
3961     * current shader invocation.
3962     *
3963     * From the OES_sample_variables specification:
3964     * "When per-sample shading is active due to the use of a fragment input
3965     *  qualified by "sample" or due to the use of the gl_SampleID or
3966     *  gl_SamplePosition variables, only the bit for the current sample is
3967     *  set in gl_SampleMaskIn."
3968     */
3969    const fs_builder abld = bld.annotate("compute gl_SampleMaskIn");
3970 
3971    if (ntb.system_values[SYSTEM_VALUE_SAMPLE_ID].file == BAD_FILE)
3972       ntb.system_values[SYSTEM_VALUE_SAMPLE_ID] = emit_sampleid_setup(ntb);
3973 
3974    brw_reg one = abld.MOV(brw_imm_ud(1));
3975    brw_reg enabled_mask = abld.SHL(one, ntb.system_values[SYSTEM_VALUE_SAMPLE_ID]);
3976    brw_reg mask = abld.AND(enabled_mask, coverage_mask);
3977 
3978    if (wm_prog_data->persample_dispatch == BRW_ALWAYS)
3979       return mask;
3980 
3981    check_dynamic_msaa_flag(abld, wm_prog_data,
3982                            INTEL_MSAA_FLAG_PERSAMPLE_DISPATCH);
3983    set_predicate(BRW_PREDICATE_NORMAL, abld.SEL(mask, mask, coverage_mask));
3984 
3985    return mask;
3986 }
3987 
3988 static brw_reg
emit_shading_rate_setup(nir_to_brw_state & ntb)3989 emit_shading_rate_setup(nir_to_brw_state &ntb)
3990 {
3991    const intel_device_info *devinfo = ntb.devinfo;
3992    const fs_builder &bld = ntb.bld;
3993 
3994    assert(devinfo->ver >= 11);
3995 
3996    struct brw_wm_prog_data *wm_prog_data =
3997       brw_wm_prog_data(bld.shader->prog_data);
3998 
3999    /* Coarse pixel shading size fields overlap with other fields of not in
4000     * coarse pixel dispatch mode, so report 0 when that's not the case.
4001     */
4002    if (wm_prog_data->coarse_pixel_dispatch == BRW_NEVER)
4003       return brw_imm_ud(0);
4004 
4005    const fs_builder abld = bld.annotate("compute fragment shading rate");
4006 
4007    /* The shading rates provided in the shader are the actual 2D shading
4008     * rate while the SPIR-V built-in is the enum value that has the shading
4009     * rate encoded as a bitfield.  Fortunately, the bitfield value is just
4010     * the shading rate divided by two and shifted.
4011     */
4012 
4013    /* r1.0 - 0:7 ActualCoarsePixelShadingSize.X */
4014    brw_reg actual_x = brw_reg(retype(brw_vec1_grf(1, 0), BRW_TYPE_UB));
4015    /* r1.0 - 15:8 ActualCoarsePixelShadingSize.Y */
4016    brw_reg actual_y = byte_offset(actual_x, 1);
4017 
4018    brw_reg int_rate_y = abld.SHR(actual_y, brw_imm_ud(1));
4019    brw_reg int_rate_x = abld.SHR(actual_x, brw_imm_ud(1));
4020 
4021    brw_reg rate = abld.OR(abld.SHL(int_rate_x, brw_imm_ud(2)), int_rate_y);
4022 
4023    if (wm_prog_data->coarse_pixel_dispatch == BRW_ALWAYS)
4024       return rate;
4025 
4026    check_dynamic_msaa_flag(abld, wm_prog_data,
4027                            INTEL_MSAA_FLAG_COARSE_RT_WRITES);
4028    set_predicate(BRW_PREDICATE_NORMAL, abld.SEL(rate, rate, brw_imm_ud(0)));
4029 
4030    return rate;
4031 }
4032 
4033 /* Input data is organized with first the per-primitive values, followed
4034  * by per-vertex values.  The per-vertex will have interpolation information
4035  * associated, so use 4 components for each value.
4036  */
4037 
4038 /* The register location here is relative to the start of the URB
4039  * data.  It will get adjusted to be a real location before
4040  * generate_code() time.
4041  */
4042 static brw_reg
brw_interp_reg(const fs_builder & bld,unsigned location,unsigned channel,unsigned comp)4043 brw_interp_reg(const fs_builder &bld, unsigned location,
4044                unsigned channel, unsigned comp)
4045 {
4046    fs_visitor &s = *bld.shader;
4047    assert(s.stage == MESA_SHADER_FRAGMENT);
4048    assert(BITFIELD64_BIT(location) & ~s.nir->info.per_primitive_inputs);
4049 
4050    const struct brw_wm_prog_data *prog_data = brw_wm_prog_data(s.prog_data);
4051 
4052    assert(prog_data->urb_setup[location] >= 0);
4053    unsigned nr = prog_data->urb_setup[location];
4054    channel += prog_data->urb_setup_channel[location];
4055 
4056    /* Adjust so we start counting from the first per_vertex input. */
4057    assert(nr >= prog_data->num_per_primitive_inputs);
4058    nr -= prog_data->num_per_primitive_inputs;
4059 
4060    const unsigned per_vertex_start = prog_data->num_per_primitive_inputs;
4061    const unsigned regnr = per_vertex_start + (nr * 4) + channel;
4062 
4063    if (s.max_polygons > 1) {
4064       /* In multipolygon dispatch each plane parameter is a
4065        * dispatch_width-wide SIMD vector (see comment in
4066        * assign_urb_setup()), so we need to use offset() instead of
4067        * component() to select the specified parameter.
4068        */
4069       const brw_reg tmp = bld.vgrf(BRW_TYPE_UD);
4070       bld.MOV(tmp, offset(brw_attr_reg(regnr, BRW_TYPE_UD),
4071                           s.dispatch_width, comp));
4072       return retype(tmp, BRW_TYPE_F);
4073    } else {
4074       return component(brw_attr_reg(regnr, BRW_TYPE_F), comp);
4075    }
4076 }
4077 
4078 /* The register location here is relative to the start of the URB
4079  * data.  It will get adjusted to be a real location before
4080  * generate_code() time.
4081  */
4082 static brw_reg
brw_per_primitive_reg(const fs_builder & bld,int location,unsigned comp)4083 brw_per_primitive_reg(const fs_builder &bld, int location, unsigned comp)
4084 {
4085    fs_visitor &s = *bld.shader;
4086    assert(s.stage == MESA_SHADER_FRAGMENT);
4087    assert(BITFIELD64_BIT(location) & s.nir->info.per_primitive_inputs);
4088 
4089    const struct brw_wm_prog_data *prog_data = brw_wm_prog_data(s.prog_data);
4090 
4091    comp += prog_data->urb_setup_channel[location];
4092 
4093    assert(prog_data->urb_setup[location] >= 0);
4094 
4095    const unsigned regnr = prog_data->urb_setup[location] + comp / 4;
4096 
4097    assert(regnr < prog_data->num_per_primitive_inputs);
4098 
4099    if (s.max_polygons > 1) {
4100       /* In multipolygon dispatch each primitive constant is a
4101        * dispatch_width-wide SIMD vector (see comment in
4102        * assign_urb_setup()), so we need to use offset() instead of
4103        * component() to select the specified parameter.
4104        */
4105       const brw_reg tmp = bld.vgrf(BRW_TYPE_UD);
4106       bld.MOV(tmp, offset(brw_attr_reg(regnr, BRW_TYPE_UD),
4107                           s.dispatch_width, comp % 4));
4108       return retype(tmp, BRW_TYPE_F);
4109    } else {
4110       return component(brw_attr_reg(regnr, BRW_TYPE_F), comp % 4);
4111    }
4112 }
4113 
4114 static void
fs_nir_emit_fs_intrinsic(nir_to_brw_state & ntb,nir_intrinsic_instr * instr)4115 fs_nir_emit_fs_intrinsic(nir_to_brw_state &ntb,
4116                          nir_intrinsic_instr *instr)
4117 {
4118    const intel_device_info *devinfo = ntb.devinfo;
4119    const fs_builder &bld = ntb.bld;
4120    fs_visitor &s = ntb.s;
4121 
4122    assert(s.stage == MESA_SHADER_FRAGMENT);
4123 
4124    brw_reg dest;
4125    if (nir_intrinsic_infos[instr->intrinsic].has_dest)
4126       dest = get_nir_def(ntb, instr->def);
4127 
4128    switch (instr->intrinsic) {
4129    case nir_intrinsic_load_front_face:
4130       bld.MOV(retype(dest, BRW_TYPE_D), emit_frontfacing_interpolation(ntb));
4131       break;
4132 
4133    case nir_intrinsic_load_sample_pos:
4134    case nir_intrinsic_load_sample_pos_or_center: {
4135       brw_reg sample_pos = ntb.system_values[SYSTEM_VALUE_SAMPLE_POS];
4136       assert(sample_pos.file != BAD_FILE);
4137       dest.type = sample_pos.type;
4138       bld.MOV(dest, sample_pos);
4139       bld.MOV(offset(dest, bld, 1), offset(sample_pos, bld, 1));
4140       break;
4141    }
4142 
4143    case nir_intrinsic_load_layer_id:
4144       dest.type = BRW_TYPE_UD;
4145       bld.MOV(dest, fetch_render_target_array_index(bld));
4146       break;
4147 
4148    case nir_intrinsic_is_helper_invocation:
4149       emit_is_helper_invocation(ntb, dest);
4150       break;
4151 
4152    case nir_intrinsic_load_helper_invocation:
4153    case nir_intrinsic_load_sample_mask_in:
4154    case nir_intrinsic_load_sample_id:
4155    case nir_intrinsic_load_frag_shading_rate: {
4156       gl_system_value sv = nir_system_value_from_intrinsic(instr->intrinsic);
4157       brw_reg val = ntb.system_values[sv];
4158       assert(val.file != BAD_FILE);
4159       dest.type = val.type;
4160       bld.MOV(dest, val);
4161       break;
4162    }
4163 
4164    case nir_intrinsic_store_output: {
4165       const brw_reg src = get_nir_src(ntb, instr->src[0]);
4166       const unsigned store_offset = nir_src_as_uint(instr->src[1]);
4167       const unsigned location = nir_intrinsic_base(instr) +
4168          SET_FIELD(store_offset, BRW_NIR_FRAG_OUTPUT_LOCATION);
4169       const brw_reg new_dest =
4170          offset(retype(alloc_frag_output(ntb, location), src.type),
4171                 bld, nir_intrinsic_component(instr));
4172 
4173       brw_combine_with_vec(bld, new_dest, src, instr->num_components);
4174       break;
4175    }
4176 
4177    case nir_intrinsic_load_output: {
4178       const unsigned l = GET_FIELD(nir_intrinsic_base(instr),
4179                                    BRW_NIR_FRAG_OUTPUT_LOCATION);
4180       assert(l >= FRAG_RESULT_DATA0);
4181       const unsigned load_offset = nir_src_as_uint(instr->src[0]);
4182       const unsigned target = l - FRAG_RESULT_DATA0 + load_offset;
4183       const brw_reg tmp = bld.vgrf(dest.type, 4);
4184 
4185       if (reinterpret_cast<const brw_wm_prog_key *>(s.key)->coherent_fb_fetch)
4186          emit_coherent_fb_read(bld, tmp, target);
4187       else
4188          emit_non_coherent_fb_read(ntb, bld, tmp, target);
4189 
4190       brw_combine_with_vec(bld, dest,
4191                            offset(tmp, bld, nir_intrinsic_component(instr)),
4192                            instr->num_components);
4193       break;
4194    }
4195 
4196    case nir_intrinsic_demote:
4197    case nir_intrinsic_terminate:
4198    case nir_intrinsic_demote_if:
4199    case nir_intrinsic_terminate_if: {
4200       /* We track our discarded pixels in f0.1/f1.0.  By predicating on it, we
4201        * can update just the flag bits that aren't yet discarded.  If there's
4202        * no condition, we emit a CMP of g0 != g0, so all currently executing
4203        * channels will get turned off.
4204        */
4205       fs_inst *cmp = NULL;
4206       if (instr->intrinsic == nir_intrinsic_demote_if ||
4207           instr->intrinsic == nir_intrinsic_terminate_if) {
4208          nir_alu_instr *alu = nir_src_as_alu_instr(instr->src[0]);
4209 
4210          if (alu != NULL &&
4211              alu->op != nir_op_bcsel) {
4212             /* Re-emit the instruction that generated the Boolean value, but
4213              * do not store it.  Since this instruction will be conditional,
4214              * other instructions that want to use the real Boolean value may
4215              * get garbage.  This was a problem for piglit's fs-discard-exit-2
4216              * test.
4217              *
4218              * Ideally we'd detect that the instruction cannot have a
4219              * conditional modifier before emitting the instructions.  Alas,
4220              * that is nigh impossible.  Instead, we're going to assume the
4221              * instruction (or last instruction) generated can have a
4222              * conditional modifier.  If it cannot, fallback to the old-style
4223              * compare, and hope dead code elimination will clean up the
4224              * extra instructions generated.
4225              */
4226             fs_nir_emit_alu(ntb, alu, false);
4227 
4228             cmp = (fs_inst *) s.instructions.get_tail();
4229             if (cmp->conditional_mod == BRW_CONDITIONAL_NONE) {
4230                if (cmp->can_do_cmod())
4231                   cmp->conditional_mod = BRW_CONDITIONAL_Z;
4232                else
4233                   cmp = NULL;
4234             } else {
4235                /* The old sequence that would have been generated is,
4236                 * basically, bool_result == false.  This is equivalent to
4237                 * !bool_result, so negate the old modifier.
4238                 */
4239                cmp->conditional_mod = brw_negate_cmod(cmp->conditional_mod);
4240             }
4241          }
4242 
4243          if (cmp == NULL) {
4244             cmp = bld.CMP(bld.null_reg_f(), get_nir_src(ntb, instr->src[0]),
4245                           brw_imm_d(0), BRW_CONDITIONAL_Z);
4246          }
4247       } else {
4248          brw_reg some_reg = brw_reg(retype(brw_vec8_grf(0, 0), BRW_TYPE_UW));
4249          cmp = bld.CMP(bld.null_reg_f(), some_reg, some_reg, BRW_CONDITIONAL_NZ);
4250       }
4251 
4252       cmp->predicate = BRW_PREDICATE_NORMAL;
4253       cmp->flag_subreg = sample_mask_flag_subreg(s);
4254 
4255       fs_inst *jump = bld.emit(BRW_OPCODE_HALT);
4256       jump->flag_subreg = sample_mask_flag_subreg(s);
4257       jump->predicate_inverse = true;
4258 
4259       if (instr->intrinsic == nir_intrinsic_terminate ||
4260           instr->intrinsic == nir_intrinsic_terminate_if) {
4261          jump->predicate = BRW_PREDICATE_NORMAL;
4262       } else {
4263          /* Only jump when the whole quad is demoted.  For historical
4264           * reasons this is also used for discard.
4265           */
4266          jump->predicate = (devinfo->ver >= 20 ? XE2_PREDICATE_ANY :
4267                             BRW_PREDICATE_ALIGN1_ANY4H);
4268       }
4269       break;
4270    }
4271 
4272    case nir_intrinsic_load_input:
4273    case nir_intrinsic_load_per_primitive_input: {
4274       /* In Fragment Shaders load_input is used either for flat inputs or
4275        * per-primitive inputs.
4276        */
4277       assert(instr->def.bit_size == 32);
4278       unsigned base = nir_intrinsic_base(instr);
4279       unsigned comp = nir_intrinsic_component(instr);
4280       unsigned num_components = instr->num_components;
4281 
4282       /* TODO(mesh): Multiview. Verify and handle these special cases for Mesh. */
4283 
4284       if (base == VARYING_SLOT_LAYER) {
4285          dest.type = BRW_TYPE_UD;
4286          bld.MOV(dest, fetch_render_target_array_index(bld));
4287          break;
4288       } else if (base == VARYING_SLOT_VIEWPORT) {
4289          dest.type = BRW_TYPE_UD;
4290          bld.MOV(dest, fetch_viewport_index(bld));
4291          break;
4292       }
4293 
4294       if (BITFIELD64_BIT(base) & s.nir->info.per_primitive_inputs) {
4295          assert(base != VARYING_SLOT_PRIMITIVE_INDICES);
4296          for (unsigned int i = 0; i < num_components; i++) {
4297             bld.MOV(offset(dest, bld, i),
4298                     retype(brw_per_primitive_reg(bld, base, comp + i), dest.type));
4299          }
4300       } else {
4301          /* Gfx20+ packs the plane parameters of a single logical
4302           * input in a vec3 format instead of the previously used vec4
4303           * format.
4304           */
4305          const unsigned k = devinfo->ver >= 20 ? 0 : 3;
4306          for (unsigned int i = 0; i < num_components; i++) {
4307             bld.MOV(offset(dest, bld, i),
4308                     retype(brw_interp_reg(bld, base, comp + i, k), dest.type));
4309          }
4310       }
4311       break;
4312    }
4313 
4314    case nir_intrinsic_load_fs_input_interp_deltas: {
4315       assert(s.stage == MESA_SHADER_FRAGMENT);
4316       assert(nir_src_as_uint(instr->src[0]) == 0);
4317       const unsigned base = nir_intrinsic_base(instr);
4318       const unsigned comp = nir_intrinsic_component(instr);
4319       dest.type = BRW_TYPE_F;
4320 
4321       /* Gfx20+ packs the plane parameters of a single logical
4322        * input in a vec3 format instead of the previously used vec4
4323        * format.
4324        */
4325       if (devinfo->ver >= 20) {
4326          bld.MOV(offset(dest, bld, 0), brw_interp_reg(bld, base, comp, 0));
4327          bld.MOV(offset(dest, bld, 1), brw_interp_reg(bld, base, comp, 2));
4328          bld.MOV(offset(dest, bld, 2), brw_interp_reg(bld, base, comp, 1));
4329       } else {
4330          bld.MOV(offset(dest, bld, 0), brw_interp_reg(bld, base, comp, 3));
4331          bld.MOV(offset(dest, bld, 1), brw_interp_reg(bld, base, comp, 1));
4332          bld.MOV(offset(dest, bld, 2), brw_interp_reg(bld, base, comp, 0));
4333       }
4334 
4335       break;
4336    }
4337 
4338    case nir_intrinsic_load_barycentric_pixel:
4339    case nir_intrinsic_load_barycentric_centroid:
4340    case nir_intrinsic_load_barycentric_sample: {
4341       /* Use the delta_xy values computed from the payload */
4342       enum brw_barycentric_mode bary = brw_barycentric_mode(
4343          reinterpret_cast<const brw_wm_prog_key *>(s.key), instr);
4344       const brw_reg srcs[] = { offset(s.delta_xy[bary], bld, 0),
4345                               offset(s.delta_xy[bary], bld, 1) };
4346       bld.LOAD_PAYLOAD(dest, srcs, ARRAY_SIZE(srcs), 0);
4347       break;
4348    }
4349 
4350    case nir_intrinsic_load_barycentric_at_sample: {
4351       const glsl_interp_mode interpolation =
4352          (enum glsl_interp_mode) nir_intrinsic_interp_mode(instr);
4353 
4354       if (devinfo->ver >= 20) {
4355          emit_pixel_interpolater_alu_at_sample(
4356             bld, dest, retype(get_nir_src(ntb, instr->src[0]),
4357                               BRW_TYPE_UD),
4358             interpolation);
4359 
4360       } else {
4361          brw_reg msg_data;
4362          if (nir_src_is_const(instr->src[0])) {
4363             msg_data = brw_imm_ud(nir_src_as_uint(instr->src[0]) << 4);
4364          } else {
4365             const brw_reg sample_src = retype(get_nir_src(ntb, instr->src[0]),
4366                                              BRW_TYPE_UD);
4367             const brw_reg sample_id = bld.emit_uniformize(sample_src);
4368             msg_data = component(bld.group(8, 0).vgrf(BRW_TYPE_UD), 0);
4369             bld.exec_all().group(1, 0).SHL(msg_data, sample_id, brw_imm_ud(4u));
4370          }
4371 
4372          brw_reg flag_reg;
4373          struct brw_wm_prog_key *wm_prog_key = (struct brw_wm_prog_key *) s.key;
4374          if (wm_prog_key->multisample_fbo == BRW_SOMETIMES) {
4375             struct brw_wm_prog_data *wm_prog_data = brw_wm_prog_data(s.prog_data);
4376 
4377             check_dynamic_msaa_flag(bld.exec_all().group(8, 0),
4378                                     wm_prog_data,
4379                                     INTEL_MSAA_FLAG_MULTISAMPLE_FBO);
4380             flag_reg = brw_flag_reg(0, 0);
4381          }
4382 
4383          emit_pixel_interpolater_send(bld,
4384                                       FS_OPCODE_INTERPOLATE_AT_SAMPLE,
4385                                       dest,
4386                                       brw_reg(), /* src */
4387                                       msg_data,
4388                                       flag_reg,
4389                                       interpolation);
4390       }
4391       break;
4392    }
4393 
4394    case nir_intrinsic_load_barycentric_at_offset: {
4395       const glsl_interp_mode interpolation =
4396          (enum glsl_interp_mode) nir_intrinsic_interp_mode(instr);
4397 
4398       if (devinfo->ver >= 20) {
4399          emit_pixel_interpolater_alu_at_offset(
4400             bld, dest,
4401             retype(get_nir_src(ntb, instr->src[0]), BRW_TYPE_F),
4402             interpolation);
4403 
4404       } else if (nir_const_value *const_offset = nir_src_as_const_value(instr->src[0])) {
4405          assert(nir_src_bit_size(instr->src[0]) == 32);
4406          unsigned off_x = const_offset[0].u32 & 0xf;
4407          unsigned off_y = const_offset[1].u32 & 0xf;
4408 
4409          emit_pixel_interpolater_send(bld,
4410                                       FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET,
4411                                       dest,
4412                                       brw_reg(), /* src */
4413                                       brw_imm_ud(off_x | (off_y << 4)),
4414                                       brw_reg(), /* flag_reg */
4415                                       interpolation);
4416       } else {
4417          brw_reg src = retype(get_nir_src(ntb, instr->src[0]), BRW_TYPE_D);
4418          const enum opcode opcode = FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET;
4419          emit_pixel_interpolater_send(bld,
4420                                       opcode,
4421                                       dest,
4422                                       src,
4423                                       brw_imm_ud(0u),
4424                                       brw_reg(), /* flag_reg */
4425                                       interpolation);
4426       }
4427       break;
4428    }
4429 
4430    case nir_intrinsic_load_frag_coord: {
4431       brw_reg comps[4] = { s.pixel_x, s.pixel_y, s.pixel_z, s.wpos_w };
4432       bld.VEC(dest, comps, 4);
4433       break;
4434    }
4435 
4436    case nir_intrinsic_load_interpolated_input: {
4437       assert(instr->src[0].ssa &&
4438              instr->src[0].ssa->parent_instr->type == nir_instr_type_intrinsic);
4439       nir_intrinsic_instr *bary_intrinsic =
4440          nir_instr_as_intrinsic(instr->src[0].ssa->parent_instr);
4441       nir_intrinsic_op bary_intrin = bary_intrinsic->intrinsic;
4442       brw_reg dst_xy;
4443 
4444       if (bary_intrin == nir_intrinsic_load_barycentric_at_offset ||
4445           bary_intrin == nir_intrinsic_load_barycentric_at_sample) {
4446          /* Use the result of the PI message. */
4447          dst_xy = retype(get_nir_src(ntb, instr->src[0]), BRW_TYPE_F);
4448       } else {
4449          /* Use the delta_xy values computed from the payload */
4450          enum brw_barycentric_mode bary = brw_barycentric_mode(
4451             reinterpret_cast<const brw_wm_prog_key *>(s.key), bary_intrinsic);
4452          dst_xy = s.delta_xy[bary];
4453       }
4454 
4455       for (unsigned int i = 0; i < instr->num_components; i++) {
4456          brw_reg interp =
4457             brw_interp_reg(bld, nir_intrinsic_base(instr),
4458                            nir_intrinsic_component(instr) + i, 0);
4459          interp.type = BRW_TYPE_F;
4460          dest.type = BRW_TYPE_F;
4461 
4462          bld.PLN(offset(dest, bld, i), interp, dst_xy);
4463       }
4464       break;
4465    }
4466 
4467    default:
4468       fs_nir_emit_intrinsic(ntb, bld, instr);
4469       break;
4470    }
4471 }
4472 
4473 static unsigned
brw_workgroup_size(fs_visitor & s)4474 brw_workgroup_size(fs_visitor &s)
4475 {
4476    assert(gl_shader_stage_uses_workgroup(s.stage));
4477    assert(!s.nir->info.workgroup_size_variable);
4478    const struct brw_cs_prog_data *cs = brw_cs_prog_data(s.prog_data);
4479    return cs->local_size[0] * cs->local_size[1] * cs->local_size[2];
4480 }
4481 
4482 static void
fs_nir_emit_cs_intrinsic(nir_to_brw_state & ntb,nir_intrinsic_instr * instr)4483 fs_nir_emit_cs_intrinsic(nir_to_brw_state &ntb,
4484                          nir_intrinsic_instr *instr)
4485 {
4486    const intel_device_info *devinfo = ntb.devinfo;
4487    const fs_builder &bld = ntb.bld;
4488    fs_visitor &s = ntb.s;
4489 
4490    assert(gl_shader_stage_uses_workgroup(s.stage));
4491    struct brw_cs_prog_data *cs_prog_data = brw_cs_prog_data(s.prog_data);
4492 
4493    brw_reg dest;
4494    if (nir_intrinsic_infos[instr->intrinsic].has_dest)
4495       dest = get_nir_def(ntb, instr->def);
4496 
4497    switch (instr->intrinsic) {
4498    case nir_intrinsic_barrier:
4499       if (nir_intrinsic_memory_scope(instr) != SCOPE_NONE)
4500          fs_nir_emit_intrinsic(ntb, bld, instr);
4501       if (nir_intrinsic_execution_scope(instr) == SCOPE_WORKGROUP) {
4502          /* The whole workgroup fits in a single HW thread, so all the
4503           * invocations are already executed lock-step.  Instead of an actual
4504           * barrier just emit a scheduling fence, that will generate no code.
4505           */
4506          if (!s.nir->info.workgroup_size_variable &&
4507              brw_workgroup_size(s) <= s.dispatch_width) {
4508             bld.exec_all().group(1, 0).emit(FS_OPCODE_SCHEDULING_FENCE);
4509             break;
4510          }
4511 
4512          emit_barrier(ntb);
4513          cs_prog_data->uses_barrier = true;
4514       }
4515       break;
4516 
4517    case nir_intrinsic_load_subgroup_id:
4518       s.cs_payload().load_subgroup_id(bld, dest);
4519       break;
4520 
4521    case nir_intrinsic_load_local_invocation_id:
4522       /* This is only used for hardware generated local IDs. */
4523       assert(cs_prog_data->generate_local_id);
4524 
4525       dest.type = BRW_TYPE_UD;
4526 
4527       for (unsigned i = 0; i < 3; i++)
4528          bld.MOV(offset(dest, bld, i), s.cs_payload().local_invocation_id[i]);
4529       break;
4530 
4531    case nir_intrinsic_load_workgroup_id: {
4532       brw_reg val = ntb.system_values[SYSTEM_VALUE_WORKGROUP_ID];
4533       assert(val.file != BAD_FILE);
4534       dest.type = val.type;
4535       for (unsigned i = 0; i < 3; i++)
4536          bld.MOV(offset(dest, bld, i), offset(val, bld, i));
4537       break;
4538    }
4539 
4540    case nir_intrinsic_load_num_workgroups: {
4541       assert(instr->def.bit_size == 32);
4542 
4543       cs_prog_data->uses_num_work_groups = true;
4544 
4545       brw_reg srcs[MEMORY_LOGICAL_NUM_SRCS];
4546       srcs[MEMORY_LOGICAL_OPCODE] = brw_imm_ud(LSC_OP_LOAD);
4547       srcs[MEMORY_LOGICAL_MODE] = brw_imm_ud(MEMORY_MODE_UNTYPED);
4548       srcs[MEMORY_LOGICAL_BINDING_TYPE] = brw_imm_ud(LSC_ADDR_SURFTYPE_BTI);
4549       srcs[MEMORY_LOGICAL_BINDING] = brw_imm_ud(0);
4550       srcs[MEMORY_LOGICAL_ADDRESS] = brw_imm_ud(0);
4551       srcs[MEMORY_LOGICAL_COORD_COMPONENTS] = brw_imm_ud(1);
4552       srcs[MEMORY_LOGICAL_ALIGNMENT] = brw_imm_ud(4);
4553       srcs[MEMORY_LOGICAL_DATA_SIZE] = brw_imm_ud(LSC_DATA_SIZE_D32);
4554       srcs[MEMORY_LOGICAL_COMPONENTS] = brw_imm_ud(3);
4555       srcs[MEMORY_LOGICAL_FLAGS] = brw_imm_ud(0);
4556 
4557       fs_inst *inst =
4558          bld.emit(SHADER_OPCODE_MEMORY_LOAD_LOGICAL,
4559                   dest, srcs, MEMORY_LOGICAL_NUM_SRCS);
4560       inst->size_written = 3 * s.dispatch_width * 4;
4561       break;
4562    }
4563 
4564    case nir_intrinsic_load_workgroup_size: {
4565       /* Should have been lowered by brw_nir_lower_cs_intrinsics() or
4566        * crocus/iris_setup_uniforms() for the variable group size case.
4567        */
4568       unreachable("Should have been lowered");
4569       break;
4570    }
4571 
4572    case nir_intrinsic_dpas_intel: {
4573       const unsigned sdepth = nir_intrinsic_systolic_depth(instr);
4574       const unsigned rcount = nir_intrinsic_repeat_count(instr);
4575 
4576       const brw_reg_type dest_type =
4577          brw_type_for_nir_type(devinfo, nir_intrinsic_dest_type(instr));
4578       const brw_reg_type src_type =
4579          brw_type_for_nir_type(devinfo, nir_intrinsic_src_type(instr));
4580 
4581       dest = retype(dest, dest_type);
4582       brw_reg src0 = retype(get_nir_src(ntb, instr->src[0]), dest_type);
4583 
4584       fs_builder bld16 = bld.exec_all().group(16, 0);
4585       fs_builder bldn = devinfo->ver >= 20 ? bld16 : bld.exec_all().group(8, 0);
4586 
4587       bldn.DPAS(dest,
4588                 src0,
4589                 retype(get_nir_src(ntb, instr->src[2]), src_type),
4590                 retype(get_nir_src(ntb, instr->src[1]), src_type),
4591                 sdepth,
4592                 rcount)
4593          ->saturate = nir_intrinsic_saturate(instr);
4594 
4595       cs_prog_data->uses_systolic = true;
4596       break;
4597    }
4598 
4599    default:
4600       fs_nir_emit_intrinsic(ntb, bld, instr);
4601       break;
4602    }
4603 }
4604 
4605 static void
emit_rt_lsc_fence(const fs_builder & bld,enum lsc_fence_scope scope,enum lsc_flush_type flush_type)4606 emit_rt_lsc_fence(const fs_builder &bld,
4607                   enum lsc_fence_scope scope,
4608                   enum lsc_flush_type flush_type)
4609 {
4610    const intel_device_info *devinfo = bld.shader->devinfo;
4611 
4612    const fs_builder ubld = bld.exec_all().group(8, 0);
4613    brw_reg tmp = ubld.vgrf(BRW_TYPE_UD);
4614    fs_inst *send = ubld.emit(SHADER_OPCODE_SEND, tmp,
4615                              brw_imm_ud(0) /* desc */,
4616                              brw_imm_ud(0) /* ex_desc */,
4617                              brw_vec8_grf(0, 0) /* payload */);
4618    send->sfid = GFX12_SFID_UGM;
4619    send->desc = lsc_fence_msg_desc(devinfo, scope, flush_type, true);
4620    send->mlen = reg_unit(devinfo); /* g0 header */
4621    send->ex_mlen = 0;
4622    /* Temp write for scheduling */
4623    send->size_written = REG_SIZE * reg_unit(devinfo);
4624    send->send_has_side_effects = true;
4625 
4626    ubld.emit(FS_OPCODE_SCHEDULING_FENCE, ubld.null_reg_ud(), tmp);
4627 }
4628 
4629 
4630 static void
fs_nir_emit_bs_intrinsic(nir_to_brw_state & ntb,nir_intrinsic_instr * instr)4631 fs_nir_emit_bs_intrinsic(nir_to_brw_state &ntb,
4632                          nir_intrinsic_instr *instr)
4633 {
4634    const fs_builder &bld = ntb.bld;
4635    fs_visitor &s = ntb.s;
4636 
4637    assert(brw_shader_stage_is_bindless(s.stage));
4638    const bs_thread_payload &payload = s.bs_payload();
4639 
4640    brw_reg dest;
4641    if (nir_intrinsic_infos[instr->intrinsic].has_dest)
4642       dest = get_nir_def(ntb, instr->def);
4643 
4644    switch (instr->intrinsic) {
4645    case nir_intrinsic_load_btd_global_arg_addr_intel:
4646       bld.MOV(dest, retype(payload.global_arg_ptr, dest.type));
4647       break;
4648 
4649    case nir_intrinsic_load_btd_local_arg_addr_intel:
4650       bld.MOV(dest, retype(payload.local_arg_ptr, dest.type));
4651       break;
4652 
4653    case nir_intrinsic_load_btd_shader_type_intel:
4654       payload.load_shader_type(bld, dest);
4655       break;
4656 
4657    default:
4658       fs_nir_emit_intrinsic(ntb, bld, instr);
4659       break;
4660    }
4661 }
4662 
4663 static brw_reg
brw_nir_reduction_op_identity(const fs_builder & bld,nir_op op,brw_reg_type type)4664 brw_nir_reduction_op_identity(const fs_builder &bld,
4665                               nir_op op, brw_reg_type type)
4666 {
4667    nir_const_value value =
4668       nir_alu_binop_identity(op, brw_type_size_bits(type));
4669 
4670    switch (brw_type_size_bytes(type)) {
4671    case 1:
4672       if (type == BRW_TYPE_UB) {
4673          return brw_imm_uw(value.u8);
4674       } else {
4675          assert(type == BRW_TYPE_B);
4676          return brw_imm_w(value.i8);
4677       }
4678    case 2:
4679       return retype(brw_imm_uw(value.u16), type);
4680    case 4:
4681       return retype(brw_imm_ud(value.u32), type);
4682    case 8:
4683       if (type == BRW_TYPE_DF)
4684          return brw_imm_df(value.f64);
4685       else
4686          return retype(brw_imm_u64(value.u64), type);
4687    default:
4688       unreachable("Invalid type size");
4689    }
4690 }
4691 
4692 static opcode
brw_op_for_nir_reduction_op(nir_op op)4693 brw_op_for_nir_reduction_op(nir_op op)
4694 {
4695    switch (op) {
4696    case nir_op_iadd: return BRW_OPCODE_ADD;
4697    case nir_op_fadd: return BRW_OPCODE_ADD;
4698    case nir_op_imul: return BRW_OPCODE_MUL;
4699    case nir_op_fmul: return BRW_OPCODE_MUL;
4700    case nir_op_imin: return BRW_OPCODE_SEL;
4701    case nir_op_umin: return BRW_OPCODE_SEL;
4702    case nir_op_fmin: return BRW_OPCODE_SEL;
4703    case nir_op_imax: return BRW_OPCODE_SEL;
4704    case nir_op_umax: return BRW_OPCODE_SEL;
4705    case nir_op_fmax: return BRW_OPCODE_SEL;
4706    case nir_op_iand: return BRW_OPCODE_AND;
4707    case nir_op_ior:  return BRW_OPCODE_OR;
4708    case nir_op_ixor: return BRW_OPCODE_XOR;
4709    default:
4710       unreachable("Invalid reduction operation");
4711    }
4712 }
4713 
4714 static brw_conditional_mod
brw_cond_mod_for_nir_reduction_op(nir_op op)4715 brw_cond_mod_for_nir_reduction_op(nir_op op)
4716 {
4717    switch (op) {
4718    case nir_op_iadd: return BRW_CONDITIONAL_NONE;
4719    case nir_op_fadd: return BRW_CONDITIONAL_NONE;
4720    case nir_op_imul: return BRW_CONDITIONAL_NONE;
4721    case nir_op_fmul: return BRW_CONDITIONAL_NONE;
4722    case nir_op_imin: return BRW_CONDITIONAL_L;
4723    case nir_op_umin: return BRW_CONDITIONAL_L;
4724    case nir_op_fmin: return BRW_CONDITIONAL_L;
4725    case nir_op_imax: return BRW_CONDITIONAL_GE;
4726    case nir_op_umax: return BRW_CONDITIONAL_GE;
4727    case nir_op_fmax: return BRW_CONDITIONAL_GE;
4728    case nir_op_iand: return BRW_CONDITIONAL_NONE;
4729    case nir_op_ior:  return BRW_CONDITIONAL_NONE;
4730    case nir_op_ixor: return BRW_CONDITIONAL_NONE;
4731    default:
4732       unreachable("Invalid reduction operation");
4733    }
4734 }
4735 
4736 struct rebuild_resource {
4737    unsigned idx;
4738    std::vector<nir_def *> array;
4739 };
4740 
4741 static bool
skip_rebuild_instr(nir_instr * instr)4742 skip_rebuild_instr(nir_instr *instr)
4743 {
4744    if (instr->type != nir_instr_type_intrinsic)
4745       return false;
4746 
4747    nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
4748    switch (intrin->intrinsic) {
4749    case nir_intrinsic_load_ubo_uniform_block_intel:
4750    case nir_intrinsic_load_ssbo_uniform_block_intel:
4751    case nir_intrinsic_load_global_constant_uniform_block_intel:
4752       /* Those intrinsic are generated using NoMask so we can trust their
4753        * destination registers are fully populated. No need to rematerialize
4754        * further.
4755        */
4756       return true;
4757 
4758    default:
4759       return false;
4760    }
4761 }
4762 
4763 static bool
add_rebuild_src(nir_src * src,void * state)4764 add_rebuild_src(nir_src *src, void *state)
4765 {
4766    struct rebuild_resource *res = (struct rebuild_resource *) state;
4767 
4768    for (nir_def *def : res->array) {
4769       if (def == src->ssa)
4770          return true;
4771    }
4772 
4773    if (!skip_rebuild_instr(src->ssa->parent_instr))
4774       nir_foreach_src(src->ssa->parent_instr, add_rebuild_src, state);
4775    res->array.push_back(src->ssa);
4776    return true;
4777 }
4778 
4779 static brw_reg
try_rebuild_source(nir_to_brw_state & ntb,const brw::fs_builder & bld,nir_def * resource_def,bool a64=false)4780 try_rebuild_source(nir_to_brw_state &ntb, const brw::fs_builder &bld,
4781                    nir_def *resource_def, bool a64 = false)
4782 {
4783    /* Create a build at the location of the resource_intel intrinsic */
4784    fs_builder ubld = bld.exec_all().group(8 * reg_unit(ntb.devinfo), 0);
4785    const unsigned grf_size = REG_SIZE * reg_unit(ntb.devinfo);
4786 
4787    struct rebuild_resource resources = {};
4788    resources.idx = 0;
4789 
4790    if (!nir_foreach_src(resource_def->parent_instr,
4791                         add_rebuild_src, &resources))
4792       return brw_reg();
4793    resources.array.push_back(resource_def);
4794 
4795    if (resources.array.size() == 1) {
4796       nir_def *def = resources.array[0];
4797 
4798       if (def->parent_instr->type == nir_instr_type_load_const) {
4799          nir_load_const_instr *load_const =
4800             nir_instr_as_load_const(def->parent_instr);
4801          return brw_imm_ud(load_const->value[0].i32);
4802       } else {
4803          nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(def->parent_instr);
4804          switch (intrin->intrinsic) {
4805          case nir_intrinsic_load_uniform: {
4806             unsigned base_offset = nir_intrinsic_base(intrin);
4807             unsigned load_offset = nir_src_as_uint(intrin->src[0]);
4808             brw_reg src = brw_uniform_reg(base_offset / 4,
4809                                           brw_type_with_size(BRW_TYPE_D, intrin->def.bit_size));
4810             src.offset = load_offset + base_offset % 4;
4811             return src;
4812          }
4813 
4814          case nir_intrinsic_load_mesh_inline_data_intel: {
4815             assert(ntb.s.stage == MESA_SHADER_MESH ||
4816                    ntb.s.stage == MESA_SHADER_TASK);
4817             const task_mesh_thread_payload &payload = ntb.s.task_mesh_payload();
4818             brw_reg data = offset(payload.inline_parameter, 1,
4819                                   nir_intrinsic_align_offset(intrin));
4820             return retype(data, brw_type_with_size(BRW_TYPE_D, intrin->def.bit_size));
4821          }
4822 
4823          case nir_intrinsic_load_btd_local_arg_addr_intel: {
4824             assert(brw_shader_stage_is_bindless(ntb.s.stage));
4825             const bs_thread_payload &payload = ntb.s.bs_payload();
4826             return retype(payload.local_arg_ptr, BRW_TYPE_Q);
4827          }
4828 
4829          case nir_intrinsic_load_btd_global_arg_addr_intel: {
4830             assert(brw_shader_stage_is_bindless(ntb.s.stage));
4831             const bs_thread_payload &payload = ntb.s.bs_payload();
4832             return retype(payload.global_arg_ptr, BRW_TYPE_Q);
4833          }
4834 
4835          default:
4836             /* Execute the code below, since we have to generate new
4837              * instructions.
4838              */
4839             break;
4840          }
4841       }
4842    }
4843 
4844 #if 0
4845    fprintf(stderr, "Trying remat :\n");
4846    for (unsigned i = 0; i < resources.array.size(); i++) {
4847       fprintf(stderr, "   ");
4848       nir_print_instr(resources.array[i]->parent_instr, stderr);
4849       fprintf(stderr, "\n");
4850    }
4851 #endif
4852 
4853    for (unsigned i = 0; i < resources.array.size(); i++) {
4854       nir_def *def = resources.array[i];
4855 
4856       nir_instr *instr = def->parent_instr;
4857       switch (instr->type) {
4858       case nir_instr_type_load_const: {
4859          nir_load_const_instr *load_const =
4860             nir_instr_as_load_const(instr);
4861          ubld.MOV(brw_imm_d(load_const->value[0].i32),
4862                   &ntb.resource_insts[def->index]);
4863          break;
4864       }
4865 
4866       case nir_instr_type_alu: {
4867          nir_alu_instr *alu = nir_instr_as_alu(instr);
4868 
4869          /* Not supported ALU source count */
4870          if (nir_op_infos[alu->op].num_inputs > 3)
4871             break;
4872 
4873          brw_reg srcs[3];
4874          for (unsigned s = 0; s < nir_op_infos[alu->op].num_inputs; s++) {
4875             srcs[s] = offset(
4876                ntb.resource_insts[alu->src[s].src.ssa->index]->dst,
4877                ubld, alu->src[s].swizzle[0]);
4878             assert(srcs[s].file != BAD_FILE);
4879          }
4880 
4881          switch (alu->op) {
4882          case nir_op_iadd:
4883             ubld.ADD(srcs[0].file != IMM ? srcs[0] : srcs[1],
4884                      srcs[0].file != IMM ? srcs[1] : srcs[0],
4885                      &ntb.resource_insts[def->index]);
4886             break;
4887          case nir_op_iadd3: {
4888             brw_reg dst = ubld.vgrf(srcs[0].type);
4889             ntb.resource_insts[def->index] =
4890                ubld.ADD3(dst,
4891                          srcs[1].file == IMM ? srcs[1] : srcs[0],
4892                          srcs[1].file == IMM ? srcs[0] : srcs[1],
4893                          srcs[2]);
4894             break;
4895          }
4896          case nir_op_ushr: {
4897             enum brw_reg_type utype =
4898                brw_type_with_size(srcs[0].type,
4899                                   brw_type_size_bits(srcs[0].type));
4900             ubld.SHR(retype(srcs[0], utype),
4901                      retype(srcs[1], utype),
4902                      &ntb.resource_insts[def->index]);
4903             break;
4904          }
4905          case nir_op_iand:
4906             ubld.AND(srcs[0], srcs[1], &ntb.resource_insts[def->index]);
4907             break;
4908          case nir_op_ishl:
4909             ubld.SHL(srcs[0], srcs[1], &ntb.resource_insts[def->index]);
4910             break;
4911          case nir_op_mov:
4912             break;
4913          case nir_op_ult32: {
4914             if (brw_type_size_bits(srcs[0].type) != 32)
4915                break;
4916             brw_reg dst = ubld.vgrf(srcs[0].type);
4917             enum brw_reg_type utype =
4918                brw_type_with_size(srcs[0].type,
4919                                   brw_type_size_bits(srcs[0].type));
4920             ntb.resource_insts[def->index] =
4921                ubld.CMP(dst,
4922                         retype(srcs[0], utype),
4923                         retype(srcs[1], utype),
4924                         brw_cmod_for_nir_comparison(alu->op));
4925             break;
4926          }
4927          case nir_op_b2i32:
4928             ubld.MOV(negate(retype(srcs[0], BRW_TYPE_D)),
4929                      &ntb.resource_insts[def->index]);
4930             break;
4931          case nir_op_unpack_64_2x32_split_x:
4932             ubld.MOV(subscript(srcs[0], BRW_TYPE_D, 0),
4933                      &ntb.resource_insts[def->index]);
4934             break;
4935          case nir_op_unpack_64_2x32_split_y:
4936             ubld.MOV(subscript(srcs[0], BRW_TYPE_D, 1),
4937                      &ntb.resource_insts[def->index]);
4938             break;
4939          case nir_op_pack_64_2x32_split: {
4940             brw_reg dst = ubld.vgrf(BRW_TYPE_Q);
4941             ntb.resource_insts[def->index] =
4942                ubld.emit(FS_OPCODE_PACK, dst, srcs[0], srcs[1]);
4943          }
4944          default:
4945             break;
4946          }
4947          break;
4948       }
4949 
4950       case nir_instr_type_intrinsic: {
4951          nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
4952          switch (intrin->intrinsic) {
4953          case nir_intrinsic_resource_intel:
4954             ntb.resource_insts[def->index] =
4955                ntb.resource_insts[intrin->src[1].ssa->index];
4956             break;
4957 
4958          case nir_intrinsic_load_uniform: {
4959             if (!nir_src_is_const(intrin->src[0]))
4960                break;
4961 
4962             unsigned base_offset = nir_intrinsic_base(intrin);
4963             unsigned load_offset = nir_src_as_uint(intrin->src[0]);
4964 
4965             enum brw_reg_type type =
4966                brw_type_with_size(BRW_TYPE_D, intrin->def.bit_size);
4967             brw_reg dst_data = ubld.vgrf(type, intrin->def.num_components);
4968 
4969             for (unsigned i = 0; i < intrin->def.num_components; i++) {
4970                brw_reg src = brw_uniform_reg(base_offset / 4, type);
4971                src.offset = load_offset + base_offset % 4 + i * intrin->def.bit_size / 8;
4972                fs_inst *inst = ubld.MOV(byte_offset(dst_data, i * grf_size), src);
4973                if (i == 0)
4974                   ntb.resource_insts[def->index] = inst;
4975             }
4976             break;
4977          }
4978 
4979          case nir_intrinsic_load_mesh_inline_data_intel: {
4980             assert(ntb.s.stage == MESA_SHADER_MESH ||
4981                    ntb.s.stage == MESA_SHADER_TASK);
4982             const task_mesh_thread_payload &payload = ntb.s.task_mesh_payload();
4983             enum brw_reg_type type =
4984                brw_type_with_size(BRW_TYPE_D, intrin->def.bit_size);
4985             brw_reg dst_data = ubld.vgrf(type, intrin->def.num_components);
4986 
4987             for (unsigned i = 0; i < intrin->def.num_components; i++) {
4988                brw_reg src = retype(
4989                   offset(payload.inline_parameter, 1,
4990                          nir_intrinsic_align_offset(intrin) + i * intrin->def.bit_size / 8),
4991                   brw_type_with_size(BRW_TYPE_D, intrin->def.bit_size));
4992                fs_inst *inst = ubld.MOV(byte_offset(dst_data, i * grf_size), src);
4993                if (i == 0)
4994                   ntb.resource_insts[def->index] = inst;
4995             }
4996             break;
4997          }
4998 
4999          case nir_intrinsic_load_btd_local_arg_addr_intel: {
5000             assert(brw_shader_stage_is_bindless(ntb.s.stage));
5001             const bs_thread_payload &payload = ntb.s.bs_payload();
5002             ubld.MOV(retype(payload.local_arg_ptr, BRW_TYPE_Q),
5003                      &ntb.resource_insts[def->index]);
5004             break;
5005          }
5006 
5007          case nir_intrinsic_load_btd_global_arg_addr_intel: {
5008             assert(brw_shader_stage_is_bindless(ntb.s.stage));
5009             const bs_thread_payload &payload = ntb.s.bs_payload();
5010             ubld.MOV(retype(payload.global_arg_ptr, BRW_TYPE_Q),
5011                      &ntb.resource_insts[def->index]);
5012             break;
5013          }
5014 
5015          case nir_intrinsic_load_reloc_const_intel: {
5016             uint32_t id = nir_intrinsic_param_idx(intrin);
5017             brw_reg dst = ubld.vgrf(BRW_TYPE_D);
5018             ntb.resource_insts[def->index] =
5019                ubld.emit(SHADER_OPCODE_MOV_RELOC_IMM, dst,
5020                          brw_imm_ud(id), brw_imm_ud(0));
5021             break;
5022          }
5023 
5024          case nir_intrinsic_load_ubo_uniform_block_intel:
5025          case nir_intrinsic_load_ssbo_uniform_block_intel: {
5026             enum brw_reg_type type =
5027                brw_type_with_size(BRW_TYPE_D, intrin->def.bit_size);
5028             brw_reg src_data = retype(ntb.ssa_values[def->index], type);
5029             unsigned n_components = ntb.s.alloc.sizes[src_data.nr] /
5030                                     (bld.dispatch_width() / 8);
5031             brw_reg dst_data = ubld.vgrf(type, n_components);
5032             ntb.resource_insts[def->index] = ubld.MOV(dst_data, src_data);
5033             for (unsigned i = 1; i < n_components; i++) {
5034                ubld.MOV(offset(dst_data, ubld, i),
5035                         offset(src_data, bld, i));
5036             }
5037             break;
5038          }
5039 
5040          default:
5041             break;
5042          }
5043          break;
5044       }
5045 
5046       default:
5047          break;
5048       }
5049 
5050       if (ntb.resource_insts[def->index] == NULL) {
5051 #if 0
5052          if (a64) {
5053          fprintf(stderr, "Tried remat :\n");
5054          for (unsigned i = 0; i < resources.array.size(); i++) {
5055             fprintf(stderr, "   ");
5056             nir_print_instr(resources.array[i]->parent_instr, stderr);
5057             fprintf(stderr, "\n");
5058          }
5059          fprintf(stderr, "failed at! : ");
5060          nir_print_instr(instr, stderr);
5061          fprintf(stderr, "\n");
5062          }
5063 #endif
5064          return brw_reg();
5065       }
5066    }
5067 
5068    assert(ntb.resource_insts[resource_def->index] != NULL);
5069    return component(ntb.resource_insts[resource_def->index]->dst, 0);
5070 }
5071 
5072 static brw_reg
get_nir_image_intrinsic_image(nir_to_brw_state & ntb,const brw::fs_builder & bld,nir_intrinsic_instr * instr)5073 get_nir_image_intrinsic_image(nir_to_brw_state &ntb, const brw::fs_builder &bld,
5074                               nir_intrinsic_instr *instr)
5075 {
5076    if (is_resource_src(instr->src[0])) {
5077       brw_reg surf_index = get_resource_nir_src(ntb, instr->src[0]);
5078       if (surf_index.file != BAD_FILE)
5079          return surf_index;
5080    }
5081 
5082    brw_reg image = retype(get_nir_src_imm(ntb, instr->src[0]), BRW_TYPE_UD);
5083    brw_reg surf_index = image;
5084 
5085    return bld.emit_uniformize(surf_index);
5086 }
5087 
5088 static brw_reg
get_nir_buffer_intrinsic_index(nir_to_brw_state & ntb,const brw::fs_builder & bld,nir_intrinsic_instr * instr,bool * no_mask_handle=NULL)5089 get_nir_buffer_intrinsic_index(nir_to_brw_state &ntb, const brw::fs_builder &bld,
5090                                nir_intrinsic_instr *instr, bool *no_mask_handle = NULL)
5091 {
5092    /* SSBO stores are weird in that their index is in src[1] */
5093    const bool is_store =
5094       instr->intrinsic == nir_intrinsic_store_ssbo ||
5095       instr->intrinsic == nir_intrinsic_store_ssbo_block_intel;
5096    nir_src src = is_store ? instr->src[1] : instr->src[0];
5097 
5098    if (no_mask_handle)
5099       *no_mask_handle = false;
5100 
5101    if (nir_src_is_const(src)) {
5102       if (no_mask_handle)
5103          *no_mask_handle = true;
5104       return brw_imm_ud(nir_src_as_uint(src));
5105    } else if (is_resource_src(src)) {
5106       brw_reg surf_index = get_resource_nir_src(ntb, src);
5107       if (surf_index.file != BAD_FILE) {
5108          if (no_mask_handle)
5109             *no_mask_handle = true;
5110          return surf_index;
5111       }
5112    }
5113    return bld.emit_uniformize(get_nir_src(ntb, src));
5114 }
5115 
5116 /**
5117  * The offsets we get from NIR act as if each SIMD channel has it's own blob
5118  * of contiguous space.  However, if we actually place each SIMD channel in
5119  * it's own space, we end up with terrible cache performance because each SIMD
5120  * channel accesses a different cache line even when they're all accessing the
5121  * same byte offset.  To deal with this problem, we swizzle the address using
5122  * a simple algorithm which ensures that any time a SIMD message reads or
5123  * writes the same address, it's all in the same cache line.  We have to keep
5124  * the bottom two bits fixed so that we can read/write up to a dword at a time
5125  * and the individual element is contiguous.  We do this by splitting the
5126  * address as follows:
5127  *
5128  *    31                             4-6           2          0
5129  *    +-------------------------------+------------+----------+
5130  *    |        Hi address bits        | chan index | addr low |
5131  *    +-------------------------------+------------+----------+
5132  *
5133  * In other words, the bottom two address bits stay, and the top 30 get
5134  * shifted up so that we can stick the SIMD channel index in the middle.  This
5135  * way, we can access 8, 16, or 32-bit elements and, when accessing a 32-bit
5136  * at the same logical offset, the scratch read/write instruction acts on
5137  * continuous elements and we get good cache locality.
5138  */
5139 static brw_reg
swizzle_nir_scratch_addr(nir_to_brw_state & ntb,const brw::fs_builder & bld,const nir_src & nir_addr_src,bool in_dwords)5140 swizzle_nir_scratch_addr(nir_to_brw_state &ntb,
5141                          const brw::fs_builder &bld,
5142                          const nir_src &nir_addr_src,
5143                          bool in_dwords)
5144 {
5145    fs_visitor &s = ntb.s;
5146 
5147    const brw_reg chan_index = bld.LOAD_SUBGROUP_INVOCATION();
5148    const unsigned chan_index_bits = ffs(s.dispatch_width) - 1;
5149 
5150    if (nir_src_is_const(nir_addr_src)) {
5151       unsigned nir_addr = nir_src_as_uint(nir_addr_src);
5152       if (in_dwords) {
5153          /* In this case, we know the address is aligned to a DWORD and we want
5154           * the final address in DWORDs.
5155           */
5156          return bld.OR(chan_index,
5157                        brw_imm_ud(nir_addr << (chan_index_bits - 2)));
5158       } else {
5159          /* This case is substantially more annoying because we have to pay
5160           * attention to those pesky two bottom bits.
5161           */
5162          unsigned addr_hi = (nir_addr & ~0x3u) << chan_index_bits;
5163          unsigned addr_lo = (nir_addr &  0x3u);
5164 
5165          return bld.OR(bld.SHL(chan_index, brw_imm_ud(2)),
5166                        brw_imm_ud(addr_lo | addr_hi));
5167       }
5168    }
5169 
5170    const brw_reg nir_addr =
5171       retype(get_nir_src(ntb, nir_addr_src), BRW_TYPE_UD);
5172 
5173    if (in_dwords) {
5174       /* In this case, we know the address is aligned to a DWORD and we want
5175        * the final address in DWORDs.
5176        */
5177       return bld.OR(bld.SHL(nir_addr, brw_imm_ud(chan_index_bits - 2)),
5178                     chan_index);
5179    } else {
5180       /* This case substantially more annoying because we have to pay
5181        * attention to those pesky two bottom bits.
5182        */
5183       brw_reg chan_addr = bld.SHL(chan_index, brw_imm_ud(2));
5184       brw_reg addr_bits =
5185          bld.OR(bld.AND(nir_addr, brw_imm_ud(0x3u)),
5186                 bld.SHL(bld.AND(nir_addr, brw_imm_ud(~0x3u)),
5187                         brw_imm_ud(chan_index_bits)));
5188       return bld.OR(addr_bits, chan_addr);
5189    }
5190 }
5191 
5192 static unsigned
choose_oword_block_size_dwords(const struct intel_device_info * devinfo,unsigned dwords)5193 choose_oword_block_size_dwords(const struct intel_device_info *devinfo,
5194                                unsigned dwords)
5195 {
5196    unsigned block;
5197    if (devinfo->has_lsc && dwords >= 64) {
5198       block = 64;
5199    } else if (dwords >= 32) {
5200       block = 32;
5201    } else if (dwords >= 16) {
5202       block = 16;
5203    } else {
5204       block = 8;
5205    }
5206    assert(block <= dwords);
5207    return block;
5208 }
5209 
5210 static brw_reg
increment_a64_address(const fs_builder & _bld,brw_reg address,uint32_t v,bool use_no_mask)5211 increment_a64_address(const fs_builder &_bld, brw_reg address, uint32_t v, bool use_no_mask)
5212 {
5213    const fs_builder bld = use_no_mask ? _bld.exec_all().group(8, 0) : _bld;
5214 
5215    if (bld.shader->devinfo->has_64bit_int) {
5216       struct brw_reg imm = brw_imm_reg(address.type);
5217       imm.u64 = v;
5218       return bld.ADD(address, imm);
5219    } else {
5220       brw_reg dst = bld.vgrf(BRW_TYPE_UQ);
5221       brw_reg dst_low = subscript(dst, BRW_TYPE_UD, 0);
5222       brw_reg dst_high = subscript(dst, BRW_TYPE_UD, 1);
5223       brw_reg src_low = subscript(address, BRW_TYPE_UD, 0);
5224       brw_reg src_high = subscript(address, BRW_TYPE_UD, 1);
5225 
5226       /* Add low and if that overflows, add carry to high. */
5227       bld.ADD(dst_low, src_low, brw_imm_ud(v))->conditional_mod = BRW_CONDITIONAL_O;
5228       bld.ADD(dst_high, src_high, brw_imm_ud(0x1))->predicate = BRW_PREDICATE_NORMAL;
5229       return dst_low;
5230    }
5231 }
5232 
5233 static brw_reg
emit_fence(const fs_builder & bld,enum opcode opcode,uint8_t sfid,uint32_t desc,bool commit_enable,uint8_t bti)5234 emit_fence(const fs_builder &bld, enum opcode opcode,
5235            uint8_t sfid, uint32_t desc,
5236            bool commit_enable, uint8_t bti)
5237 {
5238    assert(opcode == SHADER_OPCODE_INTERLOCK ||
5239           opcode == SHADER_OPCODE_MEMORY_FENCE);
5240 
5241    brw_reg dst = bld.vgrf(BRW_TYPE_UD);
5242    fs_inst *fence = bld.emit(opcode, dst, brw_vec8_grf(0, 0),
5243                              brw_imm_ud(commit_enable),
5244                              brw_imm_ud(bti));
5245    fence->sfid = sfid;
5246    fence->desc = desc;
5247 
5248    return dst;
5249 }
5250 
5251 static uint32_t
lsc_fence_descriptor_for_intrinsic(const struct intel_device_info * devinfo,nir_intrinsic_instr * instr)5252 lsc_fence_descriptor_for_intrinsic(const struct intel_device_info *devinfo,
5253                                    nir_intrinsic_instr *instr)
5254 {
5255    assert(devinfo->has_lsc);
5256 
5257    enum lsc_fence_scope scope = LSC_FENCE_LOCAL;
5258    enum lsc_flush_type flush_type = LSC_FLUSH_TYPE_NONE;
5259 
5260    if (nir_intrinsic_has_memory_scope(instr)) {
5261       switch (nir_intrinsic_memory_scope(instr)) {
5262       case SCOPE_DEVICE:
5263       case SCOPE_QUEUE_FAMILY:
5264          scope = LSC_FENCE_TILE;
5265          flush_type = LSC_FLUSH_TYPE_EVICT;
5266          break;
5267       case SCOPE_WORKGROUP:
5268          scope = LSC_FENCE_THREADGROUP;
5269          break;
5270       case SCOPE_SHADER_CALL:
5271       case SCOPE_INVOCATION:
5272       case SCOPE_SUBGROUP:
5273       case SCOPE_NONE:
5274          break;
5275       }
5276    } else {
5277       /* No scope defined. */
5278       scope = LSC_FENCE_TILE;
5279       flush_type = LSC_FLUSH_TYPE_EVICT;
5280    }
5281    return lsc_fence_msg_desc(devinfo, scope, flush_type, true);
5282 }
5283 
5284 /**
5285  * Create a MOV to read the timestamp register.
5286  */
5287 static brw_reg
get_timestamp(const fs_builder & bld)5288 get_timestamp(const fs_builder &bld)
5289 {
5290    fs_visitor &s = *bld.shader;
5291 
5292    brw_reg ts = brw_reg(retype(brw_vec4_reg(ARF,
5293                                           BRW_ARF_TIMESTAMP, 0), BRW_TYPE_UD));
5294 
5295    brw_reg dst = brw_vgrf(s.alloc.allocate(1), BRW_TYPE_UD);
5296 
5297    /* We want to read the 3 fields we care about even if it's not enabled in
5298     * the dispatch.
5299     */
5300    bld.group(4, 0).exec_all().MOV(dst, ts);
5301 
5302    return dst;
5303 }
5304 
5305 static unsigned
component_from_intrinsic(nir_intrinsic_instr * instr)5306 component_from_intrinsic(nir_intrinsic_instr *instr)
5307 {
5308    if (nir_intrinsic_has_component(instr))
5309       return nir_intrinsic_component(instr);
5310    else
5311       return 0;
5312 }
5313 
5314 static void
adjust_handle_and_offset(const fs_builder & bld,brw_reg & urb_handle,unsigned & urb_global_offset)5315 adjust_handle_and_offset(const fs_builder &bld,
5316                          brw_reg &urb_handle,
5317                          unsigned &urb_global_offset)
5318 {
5319    /* Make sure that URB global offset is below 2048 (2^11), because
5320     * that's the maximum possible value encoded in Message Descriptor.
5321     */
5322    unsigned adjustment = (urb_global_offset >> 11) << 11;
5323 
5324    if (adjustment) {
5325       fs_builder ubld8 = bld.group(8, 0).exec_all();
5326       /* Allocate new register to not overwrite the shared URB handle. */
5327       urb_handle = ubld8.ADD(urb_handle, brw_imm_ud(adjustment));
5328       urb_global_offset -= adjustment;
5329    }
5330 }
5331 
5332 static void
emit_urb_direct_vec4_write(const fs_builder & bld,unsigned urb_global_offset,const brw_reg & src,brw_reg urb_handle,unsigned dst_comp_offset,unsigned comps,unsigned mask)5333 emit_urb_direct_vec4_write(const fs_builder &bld,
5334                            unsigned urb_global_offset,
5335                            const brw_reg &src,
5336                            brw_reg urb_handle,
5337                            unsigned dst_comp_offset,
5338                            unsigned comps,
5339                            unsigned mask)
5340 {
5341    for (unsigned q = 0; q < bld.dispatch_width() / 8; q++) {
5342       fs_builder bld8 = bld.group(8, q);
5343 
5344       brw_reg payload_srcs[8];
5345       unsigned length = 0;
5346 
5347       for (unsigned i = 0; i < dst_comp_offset; i++)
5348          payload_srcs[length++] = reg_undef;
5349 
5350       for (unsigned c = 0; c < comps; c++)
5351          payload_srcs[length++] = quarter(offset(src, bld, c), q);
5352 
5353       brw_reg srcs[URB_LOGICAL_NUM_SRCS];
5354       srcs[URB_LOGICAL_SRC_HANDLE] = urb_handle;
5355       srcs[URB_LOGICAL_SRC_CHANNEL_MASK] = brw_imm_ud(mask << 16);
5356       srcs[URB_LOGICAL_SRC_DATA] = brw_vgrf(bld.shader->alloc.allocate(length),
5357                                             BRW_TYPE_F);
5358       srcs[URB_LOGICAL_SRC_COMPONENTS] = brw_imm_ud(length);
5359       bld8.LOAD_PAYLOAD(srcs[URB_LOGICAL_SRC_DATA], payload_srcs, length, 0);
5360 
5361       fs_inst *inst = bld8.emit(SHADER_OPCODE_URB_WRITE_LOGICAL,
5362                                 reg_undef, srcs, ARRAY_SIZE(srcs));
5363       inst->offset = urb_global_offset;
5364       assert(inst->offset < 2048);
5365    }
5366 }
5367 
5368 static void
emit_urb_direct_writes(const fs_builder & bld,nir_intrinsic_instr * instr,const brw_reg & src,brw_reg urb_handle)5369 emit_urb_direct_writes(const fs_builder &bld, nir_intrinsic_instr *instr,
5370                        const brw_reg &src, brw_reg urb_handle)
5371 {
5372    assert(nir_src_bit_size(instr->src[0]) == 32);
5373 
5374    nir_src *offset_nir_src = nir_get_io_offset_src(instr);
5375    assert(nir_src_is_const(*offset_nir_src));
5376 
5377    const unsigned comps = nir_src_num_components(instr->src[0]);
5378    assert(comps <= 4);
5379 
5380    const unsigned offset_in_dwords = nir_intrinsic_base(instr) +
5381                                      nir_src_as_uint(*offset_nir_src) +
5382                                      component_from_intrinsic(instr);
5383 
5384    /* URB writes are vec4 aligned but the intrinsic offsets are in dwords.
5385     * We can write up to 8 dwords, so single vec4 write is enough.
5386     */
5387    const unsigned comp_shift = offset_in_dwords % 4;
5388    const unsigned mask = nir_intrinsic_write_mask(instr) << comp_shift;
5389 
5390    unsigned urb_global_offset = offset_in_dwords / 4;
5391    adjust_handle_and_offset(bld, urb_handle, urb_global_offset);
5392 
5393    emit_urb_direct_vec4_write(bld, urb_global_offset, src, urb_handle,
5394                               comp_shift, comps, mask);
5395 }
5396 
5397 static void
emit_urb_direct_vec4_write_xe2(const fs_builder & bld,unsigned offset_in_bytes,const brw_reg & src,brw_reg urb_handle,unsigned comps,unsigned mask)5398 emit_urb_direct_vec4_write_xe2(const fs_builder &bld,
5399                                unsigned offset_in_bytes,
5400                                const brw_reg &src,
5401                                brw_reg urb_handle,
5402                                unsigned comps,
5403                                unsigned mask)
5404 {
5405    const struct intel_device_info *devinfo = bld.shader->devinfo;
5406    const unsigned runit = reg_unit(devinfo);
5407    const unsigned write_size = 8 * runit;
5408 
5409    if (offset_in_bytes > 0) {
5410       fs_builder bldall = bld.group(write_size, 0).exec_all();
5411       urb_handle = bldall.ADD(urb_handle, brw_imm_ud(offset_in_bytes));
5412    }
5413 
5414    for (unsigned q = 0; q < bld.dispatch_width() / write_size; q++) {
5415       fs_builder hbld = bld.group(write_size, q);
5416 
5417       assert(comps <= 4);
5418       brw_reg payload_srcs[4];
5419 
5420       for (unsigned c = 0; c < comps; c++)
5421          payload_srcs[c] = horiz_offset(offset(src, bld, c), write_size * q);
5422 
5423       brw_reg srcs[URB_LOGICAL_NUM_SRCS];
5424       srcs[URB_LOGICAL_SRC_HANDLE] = urb_handle;
5425       srcs[URB_LOGICAL_SRC_CHANNEL_MASK] = brw_imm_ud(mask << 16);
5426       int nr = bld.shader->alloc.allocate(comps * runit);
5427       srcs[URB_LOGICAL_SRC_DATA] = brw_vgrf(nr, BRW_TYPE_F);
5428       srcs[URB_LOGICAL_SRC_COMPONENTS] = brw_imm_ud(comps);
5429       hbld.LOAD_PAYLOAD(srcs[URB_LOGICAL_SRC_DATA], payload_srcs, comps, 0);
5430 
5431       hbld.emit(SHADER_OPCODE_URB_WRITE_LOGICAL,
5432                 reg_undef, srcs, ARRAY_SIZE(srcs));
5433    }
5434 }
5435 
5436 static void
emit_urb_direct_writes_xe2(const fs_builder & bld,nir_intrinsic_instr * instr,const brw_reg & src,brw_reg urb_handle)5437 emit_urb_direct_writes_xe2(const fs_builder &bld, nir_intrinsic_instr *instr,
5438                            const brw_reg &src, brw_reg urb_handle)
5439 {
5440    assert(nir_src_bit_size(instr->src[0]) == 32);
5441 
5442    nir_src *offset_nir_src = nir_get_io_offset_src(instr);
5443    assert(nir_src_is_const(*offset_nir_src));
5444 
5445    const unsigned comps = nir_src_num_components(instr->src[0]);
5446    assert(comps <= 4);
5447 
5448    const unsigned offset_in_dwords = nir_intrinsic_base(instr) +
5449                                      nir_src_as_uint(*offset_nir_src) +
5450                                      component_from_intrinsic(instr);
5451 
5452    const unsigned mask = nir_intrinsic_write_mask(instr);
5453 
5454    emit_urb_direct_vec4_write_xe2(bld, offset_in_dwords * 4, src,
5455                                     urb_handle, comps, mask);
5456 }
5457 
5458 static void
emit_urb_indirect_vec4_write(const fs_builder & bld,const brw_reg & offset_src,unsigned base,const brw_reg & src,brw_reg urb_handle,unsigned dst_comp_offset,unsigned comps,unsigned mask)5459 emit_urb_indirect_vec4_write(const fs_builder &bld,
5460                              const brw_reg &offset_src,
5461                              unsigned base,
5462                              const brw_reg &src,
5463                              brw_reg urb_handle,
5464                              unsigned dst_comp_offset,
5465                              unsigned comps,
5466                              unsigned mask)
5467 {
5468    for (unsigned q = 0; q < bld.dispatch_width() / 8; q++) {
5469       fs_builder bld8 = bld.group(8, q);
5470 
5471       /* offset is always positive, so signedness doesn't matter */
5472       assert(offset_src.type == BRW_TYPE_D || offset_src.type == BRW_TYPE_UD);
5473       brw_reg qtr = bld8.MOV(quarter(retype(offset_src, BRW_TYPE_UD), q));
5474       brw_reg off = bld8.SHR(bld8.ADD(qtr, brw_imm_ud(base)), brw_imm_ud(2));
5475 
5476       brw_reg payload_srcs[8];
5477       unsigned length = 0;
5478 
5479       for (unsigned i = 0; i < dst_comp_offset; i++)
5480          payload_srcs[length++] = reg_undef;
5481 
5482       for (unsigned c = 0; c < comps; c++)
5483          payload_srcs[length++] = quarter(offset(src, bld, c), q);
5484 
5485       brw_reg srcs[URB_LOGICAL_NUM_SRCS];
5486       srcs[URB_LOGICAL_SRC_HANDLE] = urb_handle;
5487       srcs[URB_LOGICAL_SRC_PER_SLOT_OFFSETS] = off;
5488       srcs[URB_LOGICAL_SRC_CHANNEL_MASK] = brw_imm_ud(mask << 16);
5489       srcs[URB_LOGICAL_SRC_DATA] = brw_vgrf(bld.shader->alloc.allocate(length),
5490                                             BRW_TYPE_F);
5491       srcs[URB_LOGICAL_SRC_COMPONENTS] = brw_imm_ud(length);
5492       bld8.LOAD_PAYLOAD(srcs[URB_LOGICAL_SRC_DATA], payload_srcs, length, 0);
5493 
5494       fs_inst *inst = bld8.emit(SHADER_OPCODE_URB_WRITE_LOGICAL,
5495                                 reg_undef, srcs, ARRAY_SIZE(srcs));
5496       inst->offset = 0;
5497    }
5498 }
5499 
5500 static void
emit_urb_indirect_writes_mod(const fs_builder & bld,nir_intrinsic_instr * instr,const brw_reg & src,const brw_reg & offset_src,brw_reg urb_handle,unsigned mod)5501 emit_urb_indirect_writes_mod(const fs_builder &bld, nir_intrinsic_instr *instr,
5502                              const brw_reg &src, const brw_reg &offset_src,
5503                              brw_reg urb_handle, unsigned mod)
5504 {
5505    assert(nir_src_bit_size(instr->src[0]) == 32);
5506 
5507    const unsigned comps = nir_src_num_components(instr->src[0]);
5508    assert(comps <= 4);
5509 
5510    const unsigned base_in_dwords = nir_intrinsic_base(instr) +
5511                                    component_from_intrinsic(instr);
5512 
5513    const unsigned comp_shift = mod;
5514    const unsigned mask = nir_intrinsic_write_mask(instr) << comp_shift;
5515 
5516    emit_urb_indirect_vec4_write(bld, offset_src, base_in_dwords, src,
5517                                 urb_handle, comp_shift, comps, mask);
5518 }
5519 
5520 static void
emit_urb_indirect_writes_xe2(const fs_builder & bld,nir_intrinsic_instr * instr,const brw_reg & src,const brw_reg & offset_src,brw_reg urb_handle)5521 emit_urb_indirect_writes_xe2(const fs_builder &bld, nir_intrinsic_instr *instr,
5522                              const brw_reg &src, const brw_reg &offset_src,
5523                              brw_reg urb_handle)
5524 {
5525    assert(nir_src_bit_size(instr->src[0]) == 32);
5526 
5527    const struct intel_device_info *devinfo = bld.shader->devinfo;
5528    const unsigned runit = reg_unit(devinfo);
5529    const unsigned write_size = 8 * runit;
5530 
5531    const unsigned comps = nir_src_num_components(instr->src[0]);
5532    assert(comps <= 4);
5533 
5534    const unsigned base_in_dwords = nir_intrinsic_base(instr) +
5535                                    component_from_intrinsic(instr);
5536 
5537    if (base_in_dwords > 0) {
5538       fs_builder bldall = bld.group(write_size, 0).exec_all();
5539       urb_handle = bldall.ADD(urb_handle, brw_imm_ud(base_in_dwords * 4));
5540    }
5541 
5542    const unsigned mask = nir_intrinsic_write_mask(instr);
5543 
5544    for (unsigned q = 0; q < bld.dispatch_width() / write_size; q++) {
5545       fs_builder wbld = bld.group(write_size, q);
5546 
5547       brw_reg payload_srcs[4];
5548 
5549       for (unsigned c = 0; c < comps; c++)
5550          payload_srcs[c] = horiz_offset(offset(src, bld, c), write_size * q);
5551 
5552       brw_reg addr =
5553          wbld.ADD(wbld.SHL(retype(horiz_offset(offset_src, write_size * q),
5554                                   BRW_TYPE_UD),
5555                            brw_imm_ud(2)), urb_handle);
5556 
5557       brw_reg srcs[URB_LOGICAL_NUM_SRCS];
5558       srcs[URB_LOGICAL_SRC_HANDLE] = addr;
5559       srcs[URB_LOGICAL_SRC_CHANNEL_MASK] = brw_imm_ud(mask << 16);
5560       int nr = bld.shader->alloc.allocate(comps * runit);
5561       srcs[URB_LOGICAL_SRC_DATA] = brw_vgrf(nr, BRW_TYPE_F);
5562       srcs[URB_LOGICAL_SRC_COMPONENTS] = brw_imm_ud(comps);
5563       wbld.LOAD_PAYLOAD(srcs[URB_LOGICAL_SRC_DATA], payload_srcs, comps, 0);
5564 
5565       wbld.emit(SHADER_OPCODE_URB_WRITE_LOGICAL,
5566                 reg_undef, srcs, ARRAY_SIZE(srcs));
5567    }
5568 }
5569 
5570 static void
emit_urb_indirect_writes(const fs_builder & bld,nir_intrinsic_instr * instr,const brw_reg & src,const brw_reg & offset_src,brw_reg urb_handle)5571 emit_urb_indirect_writes(const fs_builder &bld, nir_intrinsic_instr *instr,
5572                          const brw_reg &src, const brw_reg &offset_src,
5573                          brw_reg urb_handle)
5574 {
5575    assert(nir_src_bit_size(instr->src[0]) == 32);
5576 
5577    const unsigned comps = nir_src_num_components(instr->src[0]);
5578    assert(comps <= 4);
5579 
5580    const unsigned base_in_dwords = nir_intrinsic_base(instr) +
5581                                    component_from_intrinsic(instr);
5582 
5583    /* Use URB write message that allow different offsets per-slot.  The offset
5584     * is in units of vec4s (128 bits), so we use a write for each component,
5585     * replicating it in the sources and applying the appropriate mask based on
5586     * the dword offset.
5587     */
5588 
5589    for (unsigned c = 0; c < comps; c++) {
5590       if (((1 << c) & nir_intrinsic_write_mask(instr)) == 0)
5591          continue;
5592 
5593       brw_reg src_comp = offset(src, bld, c);
5594 
5595       for (unsigned q = 0; q < bld.dispatch_width() / 8; q++) {
5596          fs_builder bld8 = bld.group(8, q);
5597 
5598          /* offset is always positive, so signedness doesn't matter */
5599          assert(offset_src.type == BRW_TYPE_D ||
5600                 offset_src.type == BRW_TYPE_UD);
5601 
5602          brw_reg off =
5603             bld8.ADD(quarter(retype(offset_src, BRW_TYPE_UD), q),
5604                      brw_imm_ud(c + base_in_dwords));
5605          brw_reg m = bld8.AND(off, brw_imm_ud(0x3));
5606          brw_reg t = bld8.SHL(bld8.MOV(brw_imm_ud(1)), m);
5607          brw_reg mask = bld8.SHL(t, brw_imm_ud(16));
5608          brw_reg final_offset = bld8.SHR(off, brw_imm_ud(2));
5609 
5610          brw_reg payload_srcs[4];
5611          unsigned length = 0;
5612 
5613          for (unsigned j = 0; j < 4; j++)
5614             payload_srcs[length++] = quarter(src_comp, q);
5615 
5616          brw_reg srcs[URB_LOGICAL_NUM_SRCS];
5617          srcs[URB_LOGICAL_SRC_HANDLE] = urb_handle;
5618          srcs[URB_LOGICAL_SRC_PER_SLOT_OFFSETS] = final_offset;
5619          srcs[URB_LOGICAL_SRC_CHANNEL_MASK] = mask;
5620          srcs[URB_LOGICAL_SRC_DATA] = brw_vgrf(bld.shader->alloc.allocate(length),
5621                                                BRW_TYPE_F);
5622          srcs[URB_LOGICAL_SRC_COMPONENTS] = brw_imm_ud(length);
5623          bld8.LOAD_PAYLOAD(srcs[URB_LOGICAL_SRC_DATA], payload_srcs, length, 0);
5624 
5625          fs_inst *inst = bld8.emit(SHADER_OPCODE_URB_WRITE_LOGICAL,
5626                                    reg_undef, srcs, ARRAY_SIZE(srcs));
5627          inst->offset = 0;
5628       }
5629    }
5630 }
5631 
5632 static void
emit_urb_direct_reads(const fs_builder & bld,nir_intrinsic_instr * instr,const brw_reg & dest,brw_reg urb_handle)5633 emit_urb_direct_reads(const fs_builder &bld, nir_intrinsic_instr *instr,
5634                       const brw_reg &dest, brw_reg urb_handle)
5635 {
5636    assert(instr->def.bit_size == 32);
5637 
5638    unsigned comps = instr->def.num_components;
5639    if (comps == 0)
5640       return;
5641 
5642    nir_src *offset_nir_src = nir_get_io_offset_src(instr);
5643    assert(nir_src_is_const(*offset_nir_src));
5644 
5645    const unsigned offset_in_dwords = nir_intrinsic_base(instr) +
5646                                      nir_src_as_uint(*offset_nir_src) +
5647                                      component_from_intrinsic(instr);
5648 
5649    unsigned urb_global_offset = offset_in_dwords / 4;
5650    adjust_handle_and_offset(bld, urb_handle, urb_global_offset);
5651 
5652    const unsigned comp_offset = offset_in_dwords % 4;
5653    const unsigned num_regs = comp_offset + comps;
5654 
5655    fs_builder ubld8 = bld.group(8, 0).exec_all();
5656    brw_reg data = ubld8.vgrf(BRW_TYPE_UD, num_regs);
5657    brw_reg srcs[URB_LOGICAL_NUM_SRCS];
5658    srcs[URB_LOGICAL_SRC_HANDLE] = urb_handle;
5659 
5660    fs_inst *inst = ubld8.emit(SHADER_OPCODE_URB_READ_LOGICAL, data,
5661                               srcs, ARRAY_SIZE(srcs));
5662    inst->offset = urb_global_offset;
5663    assert(inst->offset < 2048);
5664    inst->size_written = num_regs * REG_SIZE;
5665 
5666    for (unsigned c = 0; c < comps; c++) {
5667       brw_reg dest_comp = offset(dest, bld, c);
5668       brw_reg data_comp = horiz_stride(offset(data, ubld8, comp_offset + c), 0);
5669       bld.MOV(retype(dest_comp, BRW_TYPE_UD), data_comp);
5670    }
5671 }
5672 
5673 static void
emit_urb_direct_reads_xe2(const fs_builder & bld,nir_intrinsic_instr * instr,const brw_reg & dest,brw_reg urb_handle)5674 emit_urb_direct_reads_xe2(const fs_builder &bld, nir_intrinsic_instr *instr,
5675                           const brw_reg &dest, brw_reg urb_handle)
5676 {
5677    assert(instr->def.bit_size == 32);
5678 
5679    unsigned comps = instr->def.num_components;
5680    if (comps == 0)
5681       return;
5682 
5683    nir_src *offset_nir_src = nir_get_io_offset_src(instr);
5684    assert(nir_src_is_const(*offset_nir_src));
5685 
5686    fs_builder ubld16 = bld.group(16, 0).exec_all();
5687 
5688    const unsigned offset_in_dwords = nir_intrinsic_base(instr) +
5689                                      nir_src_as_uint(*offset_nir_src) +
5690                                      component_from_intrinsic(instr);
5691 
5692    if (offset_in_dwords > 0)
5693       urb_handle = ubld16.ADD(urb_handle, brw_imm_ud(offset_in_dwords * 4));
5694 
5695    brw_reg data = ubld16.vgrf(BRW_TYPE_UD, comps);
5696    brw_reg srcs[URB_LOGICAL_NUM_SRCS];
5697    srcs[URB_LOGICAL_SRC_HANDLE] = urb_handle;
5698 
5699    fs_inst *inst = ubld16.emit(SHADER_OPCODE_URB_READ_LOGICAL,
5700                                data, srcs, ARRAY_SIZE(srcs));
5701    inst->size_written = 2 * comps * REG_SIZE;
5702 
5703    for (unsigned c = 0; c < comps; c++) {
5704       brw_reg dest_comp = offset(dest, bld, c);
5705       brw_reg data_comp = horiz_stride(offset(data, ubld16, c), 0);
5706       bld.MOV(retype(dest_comp, BRW_TYPE_UD), data_comp);
5707    }
5708 }
5709 
5710 static void
emit_urb_indirect_reads(const fs_builder & bld,nir_intrinsic_instr * instr,const brw_reg & dest,const brw_reg & offset_src,brw_reg urb_handle)5711 emit_urb_indirect_reads(const fs_builder &bld, nir_intrinsic_instr *instr,
5712                         const brw_reg &dest, const brw_reg &offset_src, brw_reg urb_handle)
5713 {
5714    assert(instr->def.bit_size == 32);
5715 
5716    unsigned comps = instr->def.num_components;
5717    if (comps == 0)
5718       return;
5719 
5720    brw_reg seq_ud;
5721    {
5722       fs_builder ubld8 = bld.group(8, 0).exec_all();
5723       seq_ud = ubld8.vgrf(BRW_TYPE_UD, 1);
5724       brw_reg seq_uw = ubld8.vgrf(BRW_TYPE_UW, 1);
5725       ubld8.MOV(seq_uw, brw_reg(brw_imm_v(0x76543210)));
5726       ubld8.MOV(seq_ud, seq_uw);
5727       seq_ud = ubld8.SHL(seq_ud, brw_imm_ud(2));
5728    }
5729 
5730    const unsigned base_in_dwords = nir_intrinsic_base(instr) +
5731                                    component_from_intrinsic(instr);
5732 
5733    for (unsigned c = 0; c < comps; c++) {
5734       for (unsigned q = 0; q < bld.dispatch_width() / 8; q++) {
5735          fs_builder bld8 = bld.group(8, q);
5736 
5737          /* offset is always positive, so signedness doesn't matter */
5738          assert(offset_src.type == BRW_TYPE_D ||
5739                 offset_src.type == BRW_TYPE_UD);
5740          brw_reg off =
5741             bld8.ADD(bld8.MOV(quarter(retype(offset_src, BRW_TYPE_UD), q)),
5742                      brw_imm_ud(base_in_dwords + c));
5743 
5744          STATIC_ASSERT(IS_POT(REG_SIZE) && REG_SIZE > 1);
5745 
5746          brw_reg comp;
5747          comp = bld8.AND(off, brw_imm_ud(0x3));
5748          comp = bld8.SHL(comp, brw_imm_ud(ffs(REG_SIZE) - 1));
5749          comp = bld8.ADD(comp, seq_ud);
5750 
5751          off = bld8.SHR(off, brw_imm_ud(2));
5752 
5753          brw_reg srcs[URB_LOGICAL_NUM_SRCS];
5754          srcs[URB_LOGICAL_SRC_HANDLE] = urb_handle;
5755          srcs[URB_LOGICAL_SRC_PER_SLOT_OFFSETS] = off;
5756 
5757          brw_reg data = bld8.vgrf(BRW_TYPE_UD, 4);
5758 
5759          fs_inst *inst = bld8.emit(SHADER_OPCODE_URB_READ_LOGICAL,
5760                                    data, srcs, ARRAY_SIZE(srcs));
5761          inst->offset = 0;
5762          inst->size_written = 4 * REG_SIZE;
5763 
5764          brw_reg dest_comp = offset(dest, bld, c);
5765          bld8.emit(SHADER_OPCODE_MOV_INDIRECT,
5766                    retype(quarter(dest_comp, q), BRW_TYPE_UD),
5767                    data,
5768                    comp,
5769                    brw_imm_ud(4 * REG_SIZE));
5770       }
5771    }
5772 }
5773 
5774 static void
emit_urb_indirect_reads_xe2(const fs_builder & bld,nir_intrinsic_instr * instr,const brw_reg & dest,const brw_reg & offset_src,brw_reg urb_handle)5775 emit_urb_indirect_reads_xe2(const fs_builder &bld, nir_intrinsic_instr *instr,
5776                             const brw_reg &dest, const brw_reg &offset_src,
5777                             brw_reg urb_handle)
5778 {
5779    assert(instr->def.bit_size == 32);
5780 
5781    unsigned comps = instr->def.num_components;
5782    if (comps == 0)
5783       return;
5784 
5785    fs_builder ubld16 = bld.group(16, 0).exec_all();
5786 
5787    const unsigned offset_in_dwords = nir_intrinsic_base(instr) +
5788                                      component_from_intrinsic(instr);
5789 
5790    if (offset_in_dwords > 0)
5791       urb_handle = ubld16.ADD(urb_handle, brw_imm_ud(offset_in_dwords * 4));
5792 
5793    brw_reg data = ubld16.vgrf(BRW_TYPE_UD, comps);
5794 
5795    for (unsigned q = 0; q < bld.dispatch_width() / 16; q++) {
5796       fs_builder wbld = bld.group(16, q);
5797 
5798       brw_reg addr = wbld.SHL(retype(horiz_offset(offset_src, 16 * q),
5799                                      BRW_TYPE_UD),
5800                               brw_imm_ud(2));
5801 
5802       brw_reg srcs[URB_LOGICAL_NUM_SRCS];
5803       srcs[URB_LOGICAL_SRC_HANDLE] = wbld.ADD(addr, urb_handle);
5804 
5805       fs_inst *inst = wbld.emit(SHADER_OPCODE_URB_READ_LOGICAL,
5806                                  data, srcs, ARRAY_SIZE(srcs));
5807       inst->size_written = 2 * comps * REG_SIZE;
5808 
5809       for (unsigned c = 0; c < comps; c++) {
5810          brw_reg dest_comp = horiz_offset(offset(dest, bld, c), 16 * q);
5811          brw_reg data_comp = offset(data, wbld, c);
5812          wbld.MOV(retype(dest_comp, BRW_TYPE_UD), data_comp);
5813       }
5814    }
5815 }
5816 
5817 static void
emit_task_mesh_store(nir_to_brw_state & ntb,const fs_builder & bld,nir_intrinsic_instr * instr,const brw_reg & urb_handle)5818 emit_task_mesh_store(nir_to_brw_state &ntb,
5819                      const fs_builder &bld, nir_intrinsic_instr *instr,
5820                      const brw_reg &urb_handle)
5821 {
5822    brw_reg src = get_nir_src(ntb, instr->src[0]);
5823    nir_src *offset_nir_src = nir_get_io_offset_src(instr);
5824 
5825    if (nir_src_is_const(*offset_nir_src)) {
5826       if (bld.shader->devinfo->ver >= 20)
5827          emit_urb_direct_writes_xe2(bld, instr, src, urb_handle);
5828       else
5829          emit_urb_direct_writes(bld, instr, src, urb_handle);
5830    } else {
5831       if (bld.shader->devinfo->ver >= 20) {
5832          emit_urb_indirect_writes_xe2(bld, instr, src, get_nir_src(ntb, *offset_nir_src), urb_handle);
5833          return;
5834       }
5835       bool use_mod = false;
5836       unsigned mod;
5837 
5838       /* Try to calculate the value of (offset + base) % 4. If we can do
5839        * this, then we can do indirect writes using only 1 URB write.
5840        */
5841       use_mod = nir_mod_analysis(nir_get_scalar(offset_nir_src->ssa, 0), nir_type_uint, 4, &mod);
5842       if (use_mod) {
5843          mod += nir_intrinsic_base(instr) + component_from_intrinsic(instr);
5844          mod %= 4;
5845       }
5846 
5847       if (use_mod) {
5848          emit_urb_indirect_writes_mod(bld, instr, src, get_nir_src(ntb, *offset_nir_src), urb_handle, mod);
5849       } else {
5850          emit_urb_indirect_writes(bld, instr, src, get_nir_src(ntb, *offset_nir_src), urb_handle);
5851       }
5852    }
5853 }
5854 
5855 static void
emit_task_mesh_load(nir_to_brw_state & ntb,const fs_builder & bld,nir_intrinsic_instr * instr,const brw_reg & urb_handle)5856 emit_task_mesh_load(nir_to_brw_state &ntb,
5857                     const fs_builder &bld, nir_intrinsic_instr *instr,
5858                     const brw_reg &urb_handle)
5859 {
5860    brw_reg dest = get_nir_def(ntb, instr->def);
5861    nir_src *offset_nir_src = nir_get_io_offset_src(instr);
5862 
5863    /* TODO(mesh): for per_vertex and per_primitive, if we could keep around
5864     * the non-array-index offset, we could use to decide if we can perform
5865     * a single large aligned read instead one per component.
5866     */
5867 
5868    if (nir_src_is_const(*offset_nir_src)) {
5869       if (bld.shader->devinfo->ver >= 20)
5870          emit_urb_direct_reads_xe2(bld, instr, dest, urb_handle);
5871       else
5872          emit_urb_direct_reads(bld, instr, dest, urb_handle);
5873    } else {
5874       if (bld.shader->devinfo->ver >= 20)
5875          emit_urb_indirect_reads_xe2(bld, instr, dest, get_nir_src(ntb, *offset_nir_src), urb_handle);
5876       else
5877          emit_urb_indirect_reads(bld, instr, dest, get_nir_src(ntb, *offset_nir_src), urb_handle);
5878    }
5879 }
5880 
5881 static void
fs_nir_emit_task_mesh_intrinsic(nir_to_brw_state & ntb,const fs_builder & bld,nir_intrinsic_instr * instr)5882 fs_nir_emit_task_mesh_intrinsic(nir_to_brw_state &ntb, const fs_builder &bld,
5883                                 nir_intrinsic_instr *instr)
5884 {
5885    fs_visitor &s = ntb.s;
5886 
5887    assert(s.stage == MESA_SHADER_MESH || s.stage == MESA_SHADER_TASK);
5888    const task_mesh_thread_payload &payload = s.task_mesh_payload();
5889 
5890    brw_reg dest;
5891    if (nir_intrinsic_infos[instr->intrinsic].has_dest)
5892       dest = get_nir_def(ntb, instr->def);
5893 
5894    switch (instr->intrinsic) {
5895    case nir_intrinsic_load_mesh_inline_data_intel: {
5896       brw_reg data = offset(payload.inline_parameter, 1, nir_intrinsic_align_offset(instr));
5897       bld.MOV(dest, retype(data, dest.type));
5898       break;
5899    }
5900 
5901    case nir_intrinsic_load_draw_id:
5902       dest = retype(dest, BRW_TYPE_UD);
5903       bld.MOV(dest, payload.extended_parameter_0);
5904       break;
5905 
5906    case nir_intrinsic_load_local_invocation_id:
5907       unreachable("local invocation id should have been lowered earlier");
5908       break;
5909 
5910    case nir_intrinsic_load_local_invocation_index:
5911       dest = retype(dest, BRW_TYPE_UD);
5912       bld.MOV(dest, payload.local_index);
5913       break;
5914 
5915    case nir_intrinsic_load_num_workgroups:
5916       dest = retype(dest, BRW_TYPE_UD);
5917       bld.MOV(offset(dest, bld, 0), brw_uw1_grf(0, 13)); /* g0.6 >> 16 */
5918       bld.MOV(offset(dest, bld, 1), brw_uw1_grf(0, 8));  /* g0.4 & 0xffff */
5919       bld.MOV(offset(dest, bld, 2), brw_uw1_grf(0, 9));  /* g0.4 >> 16 */
5920       break;
5921 
5922    case nir_intrinsic_load_workgroup_index:
5923       dest = retype(dest, BRW_TYPE_UD);
5924       bld.MOV(dest, retype(brw_vec1_grf(0, 1), BRW_TYPE_UD));
5925       break;
5926 
5927    default:
5928       fs_nir_emit_cs_intrinsic(ntb, instr);
5929       break;
5930    }
5931 }
5932 
5933 static void
fs_nir_emit_task_intrinsic(nir_to_brw_state & ntb,nir_intrinsic_instr * instr)5934 fs_nir_emit_task_intrinsic(nir_to_brw_state &ntb,
5935                            nir_intrinsic_instr *instr)
5936 {
5937    const fs_builder &bld = ntb.bld;
5938    fs_visitor &s = ntb.s;
5939 
5940    assert(s.stage == MESA_SHADER_TASK);
5941    const task_mesh_thread_payload &payload = s.task_mesh_payload();
5942 
5943    switch (instr->intrinsic) {
5944    case nir_intrinsic_store_output:
5945    case nir_intrinsic_store_task_payload:
5946       emit_task_mesh_store(ntb, bld, instr, payload.urb_output);
5947       break;
5948 
5949    case nir_intrinsic_load_output:
5950    case nir_intrinsic_load_task_payload:
5951       emit_task_mesh_load(ntb, bld, instr, payload.urb_output);
5952       break;
5953 
5954    default:
5955       fs_nir_emit_task_mesh_intrinsic(ntb, bld, instr);
5956       break;
5957    }
5958 }
5959 
5960 static void
fs_nir_emit_mesh_intrinsic(nir_to_brw_state & ntb,nir_intrinsic_instr * instr)5961 fs_nir_emit_mesh_intrinsic(nir_to_brw_state &ntb,
5962                            nir_intrinsic_instr *instr)
5963 {
5964    const fs_builder &bld = ntb.bld;
5965    fs_visitor &s = ntb.s;
5966 
5967    assert(s.stage == MESA_SHADER_MESH);
5968    const task_mesh_thread_payload &payload = s.task_mesh_payload();
5969 
5970    switch (instr->intrinsic) {
5971    case nir_intrinsic_store_per_primitive_output:
5972    case nir_intrinsic_store_per_vertex_output:
5973    case nir_intrinsic_store_output:
5974       emit_task_mesh_store(ntb, bld, instr, payload.urb_output);
5975       break;
5976 
5977    case nir_intrinsic_load_per_vertex_output:
5978    case nir_intrinsic_load_per_primitive_output:
5979    case nir_intrinsic_load_output:
5980       emit_task_mesh_load(ntb, bld, instr, payload.urb_output);
5981       break;
5982 
5983    case nir_intrinsic_load_task_payload:
5984       emit_task_mesh_load(ntb, bld, instr, payload.task_urb_input);
5985       break;
5986 
5987    default:
5988       fs_nir_emit_task_mesh_intrinsic(ntb, bld, instr);
5989       break;
5990    }
5991 }
5992 
5993 static void
fs_nir_emit_intrinsic(nir_to_brw_state & ntb,const fs_builder & bld,nir_intrinsic_instr * instr)5994 fs_nir_emit_intrinsic(nir_to_brw_state &ntb,
5995                       const fs_builder &bld, nir_intrinsic_instr *instr)
5996 {
5997    const intel_device_info *devinfo = ntb.devinfo;
5998    fs_visitor &s = ntb.s;
5999 
6000    /* We handle this as a special case */
6001    if (instr->intrinsic == nir_intrinsic_decl_reg) {
6002       assert(nir_intrinsic_num_array_elems(instr) == 0);
6003       unsigned bit_size = nir_intrinsic_bit_size(instr);
6004       unsigned num_components = nir_intrinsic_num_components(instr);
6005       const brw_reg_type reg_type =
6006          brw_type_with_size(bit_size == 8 ? BRW_TYPE_D : BRW_TYPE_F,
6007                             bit_size);
6008 
6009       /* Re-use the destination's slot in the table for the register */
6010       ntb.ssa_values[instr->def.index] =
6011          bld.vgrf(reg_type, num_components);
6012       return;
6013    }
6014 
6015    brw_reg dest;
6016    if (nir_intrinsic_infos[instr->intrinsic].has_dest)
6017       dest = get_nir_def(ntb, instr->def);
6018 
6019    switch (instr->intrinsic) {
6020    case nir_intrinsic_resource_intel:
6021       ntb.ssa_bind_infos[instr->def.index].valid = true;
6022       ntb.ssa_bind_infos[instr->def.index].bindless =
6023          (nir_intrinsic_resource_access_intel(instr) &
6024           nir_resource_intel_bindless) != 0;
6025       ntb.ssa_bind_infos[instr->def.index].block =
6026          nir_intrinsic_resource_block_intel(instr);
6027       ntb.ssa_bind_infos[instr->def.index].set =
6028          nir_intrinsic_desc_set(instr);
6029       ntb.ssa_bind_infos[instr->def.index].binding =
6030          nir_intrinsic_binding(instr);
6031 
6032       if (nir_intrinsic_resource_access_intel(instr) &
6033            nir_resource_intel_non_uniform) {
6034          ntb.uniform_values[instr->def.index] = brw_reg();
6035       } else {
6036          ntb.uniform_values[instr->def.index] =
6037             try_rebuild_source(ntb, bld, instr->src[1].ssa);
6038       }
6039       ntb.ssa_values[instr->def.index] =
6040          get_nir_src(ntb, instr->src[1]);
6041       break;
6042 
6043    case nir_intrinsic_load_reg:
6044    case nir_intrinsic_store_reg:
6045       /* Nothing to do with these. */
6046       break;
6047 
6048    case nir_intrinsic_load_global_constant_uniform_block_intel:
6049       ntb.uniform_values[instr->src[0].ssa->index] =
6050          try_rebuild_source(ntb, bld, instr->src[0].ssa, true);
6051       FALLTHROUGH;
6052    case nir_intrinsic_load_ssbo_uniform_block_intel:
6053    case nir_intrinsic_load_shared_uniform_block_intel:
6054    case nir_intrinsic_load_global_block_intel:
6055    case nir_intrinsic_store_global_block_intel:
6056    case nir_intrinsic_load_shared_block_intel:
6057    case nir_intrinsic_store_shared_block_intel:
6058    case nir_intrinsic_load_ssbo_block_intel:
6059    case nir_intrinsic_store_ssbo_block_intel:
6060    case nir_intrinsic_image_load:
6061    case nir_intrinsic_image_store:
6062    case nir_intrinsic_image_atomic:
6063    case nir_intrinsic_image_atomic_swap:
6064    case nir_intrinsic_bindless_image_load:
6065    case nir_intrinsic_bindless_image_store:
6066    case nir_intrinsic_bindless_image_atomic:
6067    case nir_intrinsic_bindless_image_atomic_swap:
6068    case nir_intrinsic_load_shared:
6069    case nir_intrinsic_store_shared:
6070    case nir_intrinsic_shared_atomic:
6071    case nir_intrinsic_shared_atomic_swap:
6072    case nir_intrinsic_load_ssbo:
6073    case nir_intrinsic_store_ssbo:
6074    case nir_intrinsic_ssbo_atomic:
6075    case nir_intrinsic_ssbo_atomic_swap:
6076    case nir_intrinsic_load_global:
6077    case nir_intrinsic_load_global_constant:
6078    case nir_intrinsic_store_global:
6079    case nir_intrinsic_global_atomic:
6080    case nir_intrinsic_global_atomic_swap:
6081    case nir_intrinsic_load_scratch:
6082    case nir_intrinsic_store_scratch:
6083       fs_nir_emit_memory_access(ntb, bld, instr);
6084       break;
6085 
6086    case nir_intrinsic_image_size:
6087    case nir_intrinsic_bindless_image_size: {
6088       /* Cube image sizes should have previously been lowered to a 2D array */
6089       assert(nir_intrinsic_image_dim(instr) != GLSL_SAMPLER_DIM_CUBE);
6090 
6091       /* Unlike the [un]typed load and store opcodes, the TXS that this turns
6092        * into will handle the binding table index for us in the geneerator.
6093        * Incidentally, this means that we can handle bindless with exactly the
6094        * same code.
6095        */
6096       brw_reg image = retype(get_nir_src_imm(ntb, instr->src[0]), BRW_TYPE_UD);
6097       image = bld.emit_uniformize(image);
6098 
6099       assert(nir_src_as_uint(instr->src[1]) == 0);
6100 
6101       brw_reg srcs[TEX_LOGICAL_NUM_SRCS];
6102       if (instr->intrinsic == nir_intrinsic_image_size)
6103          srcs[TEX_LOGICAL_SRC_SURFACE] = image;
6104       else
6105          srcs[TEX_LOGICAL_SRC_SURFACE_HANDLE] = image;
6106       srcs[TEX_LOGICAL_SRC_SAMPLER] = brw_imm_d(0);
6107       srcs[TEX_LOGICAL_SRC_COORD_COMPONENTS] = brw_imm_d(0);
6108       srcs[TEX_LOGICAL_SRC_GRAD_COMPONENTS] = brw_imm_d(0);
6109       srcs[TEX_LOGICAL_SRC_RESIDENCY] = brw_imm_d(0);
6110 
6111       /* Since the image size is always uniform, we can just emit a SIMD8
6112        * query instruction and splat the result out.
6113        */
6114       const fs_builder ubld = bld.exec_all().group(8 * reg_unit(devinfo), 0);
6115 
6116       brw_reg tmp = ubld.vgrf(BRW_TYPE_UD, 4);
6117       fs_inst *inst = ubld.emit(SHADER_OPCODE_IMAGE_SIZE_LOGICAL,
6118                                 tmp, srcs, ARRAY_SIZE(srcs));
6119       inst->size_written = 4 * REG_SIZE * reg_unit(devinfo);
6120 
6121       for (unsigned c = 0; c < instr->def.num_components; ++c) {
6122          bld.MOV(offset(retype(dest, tmp.type), bld, c),
6123                  component(offset(tmp, ubld, c), 0));
6124       }
6125       break;
6126    }
6127 
6128    case nir_intrinsic_barrier:
6129    case nir_intrinsic_begin_invocation_interlock:
6130    case nir_intrinsic_end_invocation_interlock: {
6131       bool ugm_fence, slm_fence, tgm_fence, urb_fence;
6132       enum opcode opcode = BRW_OPCODE_NOP;
6133 
6134       /* Handling interlock intrinsics here will allow the logic for IVB
6135        * render cache (see below) to be reused.
6136        */
6137 
6138       switch (instr->intrinsic) {
6139       case nir_intrinsic_barrier: {
6140          /* Note we only care about the memory part of the
6141           * barrier.  The execution part will be taken care
6142           * of by the stage specific intrinsic handler functions.
6143           */
6144          nir_variable_mode modes = nir_intrinsic_memory_modes(instr);
6145          ugm_fence = modes & (nir_var_mem_ssbo | nir_var_mem_global);
6146          slm_fence = modes & nir_var_mem_shared;
6147          tgm_fence = modes & nir_var_image;
6148          urb_fence = modes & (nir_var_shader_out | nir_var_mem_task_payload);
6149          if (nir_intrinsic_memory_scope(instr) != SCOPE_NONE)
6150             opcode = SHADER_OPCODE_MEMORY_FENCE;
6151          break;
6152       }
6153 
6154       case nir_intrinsic_begin_invocation_interlock:
6155          /* For beginInvocationInterlockARB(), we will generate a memory fence
6156           * but with a different opcode so that generator can pick SENDC
6157           * instead of SEND.
6158           */
6159          assert(s.stage == MESA_SHADER_FRAGMENT);
6160          ugm_fence = tgm_fence = true;
6161          slm_fence = urb_fence = false;
6162          opcode = SHADER_OPCODE_INTERLOCK;
6163          break;
6164 
6165       case nir_intrinsic_end_invocation_interlock:
6166          /* For endInvocationInterlockARB(), we need to insert a memory fence which
6167           * stalls in the shader until the memory transactions prior to that
6168           * fence are complete.  This ensures that the shader does not end before
6169           * any writes from its critical section have landed.  Otherwise, you can
6170           * end up with a case where the next invocation on that pixel properly
6171           * stalls for previous FS invocation on its pixel to complete but
6172           * doesn't actually wait for the dataport memory transactions from that
6173           * thread to land before submitting its own.
6174           */
6175          assert(s.stage == MESA_SHADER_FRAGMENT);
6176          ugm_fence = tgm_fence = true;
6177          slm_fence = urb_fence = false;
6178          opcode = SHADER_OPCODE_MEMORY_FENCE;
6179          break;
6180 
6181       default:
6182          unreachable("invalid intrinsic");
6183       }
6184 
6185       if (opcode == BRW_OPCODE_NOP)
6186          break;
6187 
6188       if (s.nir->info.shared_size > 0) {
6189          assert(gl_shader_stage_uses_workgroup(s.stage));
6190       } else {
6191          slm_fence = false;
6192       }
6193 
6194       /* If the workgroup fits in a single HW thread, the messages for SLM are
6195        * processed in-order and the shader itself is already synchronized so
6196        * the memory fence is not necessary.
6197        *
6198        * TODO: Check if applies for many HW threads sharing same Data Port.
6199        */
6200       if (!s.nir->info.workgroup_size_variable &&
6201           slm_fence && brw_workgroup_size(s) <= s.dispatch_width)
6202          slm_fence = false;
6203 
6204       switch (s.stage) {
6205          case MESA_SHADER_TESS_CTRL:
6206          case MESA_SHADER_TASK:
6207          case MESA_SHADER_MESH:
6208             break;
6209          default:
6210             urb_fence = false;
6211             break;
6212       }
6213 
6214       unsigned fence_regs_count = 0;
6215       brw_reg fence_regs[4] = {};
6216 
6217       const fs_builder ubld = bld.group(8, 0);
6218 
6219       /* A memory barrier with acquire semantics requires us to
6220        * guarantee that memory operations of the specified storage
6221        * class sequenced-after the barrier aren't reordered before the
6222        * barrier, nor before any previous atomic operation
6223        * sequenced-before the barrier which may be synchronizing this
6224        * acquire barrier with a prior release sequence.
6225        *
6226        * In order to guarantee the latter we must make sure that any
6227        * such previous operation has completed execution before
6228        * invalidating the relevant caches, since otherwise some cache
6229        * could be polluted by a concurrent thread after its
6230        * invalidation but before the previous atomic completes, which
6231        * could lead to a violation of the expected memory ordering if
6232        * a subsequent memory read hits the polluted cacheline, which
6233        * would return a stale value read from memory before the
6234        * completion of the atomic sequenced-before the barrier.
6235        *
6236        * This ordering inversion can be avoided trivially if the
6237        * operations we need to order are all handled by a single
6238        * in-order cache, since the flush implied by the memory fence
6239        * occurs after any pending operations have completed, however
6240        * that doesn't help us when dealing with multiple caches
6241        * processing requests out of order, in which case we need to
6242        * explicitly stall the EU until any pending memory operations
6243        * have executed.
6244        *
6245        * Note that that might be somewhat heavy handed in some cases.
6246        * In particular when this memory fence was inserted by
6247        * spirv_to_nir() lowering an atomic with acquire semantics into
6248        * an atomic+barrier sequence we could do a better job by
6249        * synchronizing with respect to that one atomic *only*, but
6250        * that would require additional information not currently
6251        * available to the backend.
6252        *
6253        * XXX - Use an alternative workaround on IVB and ICL, since
6254        *       SYNC.ALLWR is only available on Gfx12+.
6255        */
6256       if (devinfo->ver >= 12 &&
6257           (!nir_intrinsic_has_memory_scope(instr) ||
6258            (nir_intrinsic_memory_semantics(instr) & NIR_MEMORY_ACQUIRE))) {
6259          ubld.exec_all().group(1, 0).SYNC(TGL_SYNC_ALLWR);
6260       }
6261 
6262       if (devinfo->has_lsc) {
6263          assert(devinfo->verx10 >= 125);
6264          uint32_t desc =
6265             lsc_fence_descriptor_for_intrinsic(devinfo, instr);
6266          if (ugm_fence) {
6267             fence_regs[fence_regs_count++] =
6268                emit_fence(ubld, opcode, GFX12_SFID_UGM, desc,
6269                           true /* commit_enable */,
6270                           0 /* bti; ignored for LSC */);
6271          }
6272 
6273          if (tgm_fence) {
6274             fence_regs[fence_regs_count++] =
6275                emit_fence(ubld, opcode, GFX12_SFID_TGM, desc,
6276                           true /* commit_enable */,
6277                           0 /* bti; ignored for LSC */);
6278          }
6279 
6280          if (slm_fence) {
6281             assert(opcode == SHADER_OPCODE_MEMORY_FENCE);
6282             if (intel_needs_workaround(devinfo, 14014063774)) {
6283                /* Wa_14014063774
6284                 *
6285                 * Before SLM fence compiler needs to insert SYNC.ALLWR in order
6286                 * to avoid the SLM data race.
6287                 */
6288                ubld.exec_all().group(1, 0).SYNC(TGL_SYNC_ALLWR);
6289             }
6290             fence_regs[fence_regs_count++] =
6291                emit_fence(ubld, opcode, GFX12_SFID_SLM, desc,
6292                           true /* commit_enable */,
6293                           0 /* BTI; ignored for LSC */);
6294          }
6295 
6296          if (urb_fence) {
6297             assert(opcode == SHADER_OPCODE_MEMORY_FENCE);
6298             fence_regs[fence_regs_count++] =
6299                emit_fence(ubld, opcode, BRW_SFID_URB, desc,
6300                           true /* commit_enable */,
6301                           0 /* BTI; ignored for LSC */);
6302          }
6303       } else if (devinfo->ver >= 11) {
6304          if (tgm_fence || ugm_fence || urb_fence) {
6305             fence_regs[fence_regs_count++] =
6306                emit_fence(ubld, opcode, GFX7_SFID_DATAPORT_DATA_CACHE, 0,
6307                           true /* commit_enable HSD ES # 1404612949 */,
6308                           0 /* BTI = 0 means data cache */);
6309          }
6310 
6311          if (slm_fence) {
6312             assert(opcode == SHADER_OPCODE_MEMORY_FENCE);
6313             fence_regs[fence_regs_count++] =
6314                emit_fence(ubld, opcode, GFX7_SFID_DATAPORT_DATA_CACHE, 0,
6315                           true /* commit_enable HSD ES # 1404612949 */,
6316                           GFX7_BTI_SLM);
6317          }
6318       } else {
6319          /* Simulation also complains on Gfx9 if we do not enable commit.
6320           */
6321          const bool commit_enable =
6322             instr->intrinsic == nir_intrinsic_end_invocation_interlock ||
6323             devinfo->ver == 9;
6324 
6325          if (tgm_fence || ugm_fence || slm_fence || urb_fence) {
6326             fence_regs[fence_regs_count++] =
6327                emit_fence(ubld, opcode, GFX7_SFID_DATAPORT_DATA_CACHE, 0,
6328                           commit_enable, 0 /* BTI */);
6329          }
6330       }
6331 
6332       assert(fence_regs_count <= ARRAY_SIZE(fence_regs));
6333 
6334       /* Be conservative in Gen11+ and always stall in a fence.  Since
6335        * there are two different fences, and shader might want to
6336        * synchronize between them.
6337        *
6338        * TODO: Use scope and visibility information for the barriers from NIR
6339        * to make a better decision on whether we need to stall.
6340        */
6341       bool force_stall = devinfo->ver >= 11;
6342 
6343       /* There are four cases where we want to insert a stall:
6344        *
6345        *  1. If we're a nir_intrinsic_end_invocation_interlock.  This is
6346        *     required to ensure that the shader EOT doesn't happen until
6347        *     after the fence returns.  Otherwise, we might end up with the
6348        *     next shader invocation for that pixel not respecting our fence
6349        *     because it may happen on a different HW thread.
6350        *
6351        *  2. If we have multiple fences.  This is required to ensure that
6352        *     they all complete and nothing gets weirdly out-of-order.
6353        *
6354        *  3. If we have no fences.  In this case, we need at least a
6355        *     scheduling barrier to keep the compiler from moving things
6356        *     around in an invalid way.
6357        *
6358        *  4. On Gen11+ and platforms with LSC, we have multiple fence types,
6359        *     without further information about the fence, we need to force a
6360        *     stall.
6361        */
6362       if (instr->intrinsic == nir_intrinsic_end_invocation_interlock ||
6363           fence_regs_count != 1 || devinfo->has_lsc || force_stall) {
6364          ubld.exec_all().group(1, 0).emit(
6365             FS_OPCODE_SCHEDULING_FENCE, ubld.null_reg_ud(),
6366             fence_regs, fence_regs_count);
6367       }
6368 
6369       break;
6370    }
6371 
6372    case nir_intrinsic_shader_clock: {
6373       /* We cannot do anything if there is an event, so ignore it for now */
6374       const brw_reg shader_clock = get_timestamp(bld);
6375       const brw_reg srcs[] = { component(shader_clock, 0),
6376                               component(shader_clock, 1) };
6377       bld.LOAD_PAYLOAD(dest, srcs, ARRAY_SIZE(srcs), 0);
6378       break;
6379    }
6380 
6381    case nir_intrinsic_load_reloc_const_intel: {
6382       uint32_t id = nir_intrinsic_param_idx(instr);
6383       uint32_t base = nir_intrinsic_base(instr);
6384 
6385       /* Emit the reloc in the smallest SIMD size to limit register usage. */
6386       const fs_builder ubld = bld.exec_all().group(1, 0);
6387       brw_reg small_dest = ubld.vgrf(dest.type);
6388       ubld.UNDEF(small_dest);
6389       ubld.exec_all().group(1, 0).emit(SHADER_OPCODE_MOV_RELOC_IMM, small_dest,
6390                                        brw_imm_ud(id), brw_imm_ud(base));
6391 
6392       /* Copy propagation will get rid of this MOV. */
6393       bld.MOV(dest, component(small_dest, 0));
6394       break;
6395    }
6396 
6397    case nir_intrinsic_load_uniform: {
6398       /* Offsets are in bytes but they should always aligned to
6399        * the type size
6400        */
6401       unsigned base_offset = nir_intrinsic_base(instr);
6402       assert(base_offset % 4 == 0 || base_offset % brw_type_size_bytes(dest.type) == 0);
6403 
6404       brw_reg src = brw_uniform_reg(base_offset / 4, dest.type);
6405 
6406       if (nir_src_is_const(instr->src[0])) {
6407          unsigned load_offset = nir_src_as_uint(instr->src[0]);
6408          assert(load_offset % brw_type_size_bytes(dest.type) == 0);
6409          /* The base offset can only handle 32-bit units, so for 16-bit
6410           * data take the modulo of the offset with 4 bytes and add it to
6411           * the offset to read from within the source register.
6412           */
6413          src.offset = load_offset + base_offset % 4;
6414 
6415          for (unsigned j = 0; j < instr->num_components; j++) {
6416             bld.MOV(offset(dest, bld, j), offset(src, bld, j));
6417          }
6418       } else {
6419          brw_reg indirect = retype(get_nir_src(ntb, instr->src[0]),
6420                                   BRW_TYPE_UD);
6421 
6422          /* We need to pass a size to the MOV_INDIRECT but we don't want it to
6423           * go past the end of the uniform.  In order to keep the n'th
6424           * component from running past, we subtract off the size of all but
6425           * one component of the vector.
6426           */
6427          assert(nir_intrinsic_range(instr) >=
6428                 instr->num_components * brw_type_size_bytes(dest.type));
6429          unsigned read_size = nir_intrinsic_range(instr) -
6430             (instr->num_components - 1) * brw_type_size_bytes(dest.type);
6431 
6432          bool supports_64bit_indirects = !intel_device_info_is_9lp(devinfo);
6433 
6434          if (brw_type_size_bytes(dest.type) != 8 || supports_64bit_indirects) {
6435             for (unsigned j = 0; j < instr->num_components; j++) {
6436                bld.emit(SHADER_OPCODE_MOV_INDIRECT,
6437                         offset(dest, bld, j), offset(src, bld, j),
6438                         indirect, brw_imm_ud(read_size));
6439             }
6440          } else {
6441             const unsigned num_mov_indirects =
6442                brw_type_size_bytes(dest.type) / brw_type_size_bytes(BRW_TYPE_UD);
6443             /* We read a little bit less per MOV INDIRECT, as they are now
6444              * 32-bits ones instead of 64-bit. Fix read_size then.
6445              */
6446             const unsigned read_size_32bit = read_size -
6447                 (num_mov_indirects - 1) * brw_type_size_bytes(BRW_TYPE_UD);
6448             for (unsigned j = 0; j < instr->num_components; j++) {
6449                for (unsigned i = 0; i < num_mov_indirects; i++) {
6450                   bld.emit(SHADER_OPCODE_MOV_INDIRECT,
6451                            subscript(offset(dest, bld, j), BRW_TYPE_UD, i),
6452                            subscript(offset(src, bld, j), BRW_TYPE_UD, i),
6453                            indirect, brw_imm_ud(read_size_32bit));
6454                }
6455             }
6456          }
6457       }
6458       break;
6459    }
6460 
6461    case nir_intrinsic_load_ubo:
6462    case nir_intrinsic_load_ubo_uniform_block_intel: {
6463       brw_reg surface, surface_handle;
6464       bool no_mask_handle = false;
6465 
6466       if (get_nir_src_bindless(ntb, instr->src[0]))
6467          surface_handle = get_nir_buffer_intrinsic_index(ntb, bld, instr, &no_mask_handle);
6468       else
6469          surface = get_nir_buffer_intrinsic_index(ntb, bld, instr, &no_mask_handle);
6470 
6471       if (!nir_src_is_const(instr->src[1])) {
6472          s.prog_data->has_ubo_pull = true;
6473 
6474          if (instr->intrinsic == nir_intrinsic_load_ubo) {
6475             /* load_ubo with non-uniform offset */
6476             brw_reg base_offset = retype(get_nir_src(ntb, instr->src[1]),
6477                                         BRW_TYPE_UD);
6478 
6479             const unsigned comps_per_load = brw_type_size_bytes(dest.type) == 8 ? 2 : 4;
6480 
6481             for (int i = 0; i < instr->num_components; i += comps_per_load) {
6482                const unsigned remaining = instr->num_components - i;
6483                bld.VARYING_PULL_CONSTANT_LOAD(offset(dest, bld, i),
6484                                               surface, surface_handle,
6485                                               base_offset,
6486                                               i * brw_type_size_bytes(dest.type),
6487                                               instr->def.bit_size / 8,
6488                                               MIN2(remaining, comps_per_load));
6489             }
6490          } else {
6491             /* load_ubo_uniform_block_intel with non-constant offset */
6492             fs_nir_emit_memory_access(ntb, bld, instr);
6493          }
6494       } else {
6495          /* Even if we are loading doubles, a pull constant load will load
6496           * a 32-bit vec4, so should only reserve vgrf space for that. If we
6497           * need to load a full dvec4 we will have to emit 2 loads. This is
6498           * similar to demote_pull_constants(), except that in that case we
6499           * see individual accesses to each component of the vector and then
6500           * we let CSE deal with duplicate loads. Here we see a vector access
6501           * and we have to split it if necessary.
6502           */
6503          const unsigned type_size = brw_type_size_bytes(dest.type);
6504          const unsigned load_offset = nir_src_as_uint(instr->src[1]);
6505          const unsigned ubo_block =
6506             brw_nir_ubo_surface_index_get_push_block(instr->src[0]);
6507          const unsigned offset_256b = load_offset / 32;
6508          const unsigned end_256b =
6509             DIV_ROUND_UP(load_offset + type_size * instr->num_components, 32);
6510 
6511          /* See if we've selected this as a push constant candidate */
6512          brw_reg push_reg;
6513          for (int i = 0; i < 4; i++) {
6514             const struct brw_ubo_range *range = &s.prog_data->ubo_ranges[i];
6515             if (range->block == ubo_block &&
6516                 offset_256b >= range->start &&
6517                 end_256b <= range->start + range->length) {
6518 
6519                push_reg = brw_uniform_reg(UBO_START + i, dest.type);
6520                push_reg.offset = load_offset - 32 * range->start;
6521                break;
6522             }
6523          }
6524 
6525          if (push_reg.file != BAD_FILE) {
6526             for (unsigned i = 0; i < instr->num_components; i++) {
6527                bld.MOV(offset(dest, bld, i),
6528                        byte_offset(push_reg, i * type_size));
6529             }
6530             break;
6531          }
6532 
6533          s.prog_data->has_ubo_pull = true;
6534 
6535          const unsigned block_sz = 64; /* Fetch one cacheline at a time. */
6536          const fs_builder ubld = bld.exec_all().group(block_sz / 4, 0);
6537 
6538          for (unsigned c = 0; c < instr->num_components;) {
6539             const unsigned base = load_offset + c * type_size;
6540             /* Number of usable components in the next block-aligned load. */
6541             const unsigned count = MIN2(instr->num_components - c,
6542                                         (block_sz - base % block_sz) / type_size);
6543 
6544             const brw_reg packed_consts = ubld.vgrf(BRW_TYPE_UD);
6545             brw_reg srcs[PULL_UNIFORM_CONSTANT_SRCS];
6546             srcs[PULL_UNIFORM_CONSTANT_SRC_SURFACE]        = surface;
6547             srcs[PULL_UNIFORM_CONSTANT_SRC_SURFACE_HANDLE] = surface_handle;
6548             srcs[PULL_UNIFORM_CONSTANT_SRC_OFFSET]         = brw_imm_ud(base & ~(block_sz - 1));
6549             srcs[PULL_UNIFORM_CONSTANT_SRC_SIZE]           = brw_imm_ud(block_sz);
6550 
6551             ubld.emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD, packed_consts,
6552                       srcs, PULL_UNIFORM_CONSTANT_SRCS);
6553 
6554             const brw_reg consts =
6555                retype(byte_offset(packed_consts, base & (block_sz - 1)),
6556                       dest.type);
6557 
6558             for (unsigned d = 0; d < count; d++)
6559                bld.MOV(offset(dest, bld, c + d), component(consts, d));
6560 
6561             c += count;
6562          }
6563       }
6564       break;
6565    }
6566 
6567    case nir_intrinsic_store_output: {
6568       assert(nir_src_bit_size(instr->src[0]) == 32);
6569       brw_reg src = get_nir_src(ntb, instr->src[0]);
6570 
6571       unsigned store_offset = nir_src_as_uint(instr->src[1]);
6572       unsigned num_components = instr->num_components;
6573       unsigned first_component = nir_intrinsic_component(instr);
6574 
6575       brw_reg new_dest = retype(offset(s.outputs[instr->const_index[0]], bld,
6576                                       4 * store_offset), src.type);
6577 
6578       brw_combine_with_vec(bld, offset(new_dest, bld, first_component),
6579                            src, num_components);
6580       break;
6581    }
6582 
6583    case nir_intrinsic_get_ssbo_size: {
6584       assert(nir_src_num_components(instr->src[0]) == 1);
6585 
6586       /* A resinfo's sampler message is used to get the buffer size.  The
6587        * SIMD8's writeback message consists of four registers and SIMD16's
6588        * writeback message consists of 8 destination registers (two per each
6589        * component).  Because we are only interested on the first channel of
6590        * the first returned component, where resinfo returns the buffer size
6591        * for SURFTYPE_BUFFER, we can just use the SIMD8 variant regardless of
6592        * the dispatch width.
6593        */
6594       const fs_builder ubld = bld.exec_all().group(8 * reg_unit(devinfo), 0);
6595       brw_reg ret_payload = ubld.vgrf(BRW_TYPE_UD, 4);
6596 
6597       /* Set LOD = 0 */
6598       brw_reg src_payload = ubld.MOV(brw_imm_ud(0));
6599 
6600       brw_reg srcs[GET_BUFFER_SIZE_SRCS];
6601       srcs[get_nir_src_bindless(ntb, instr->src[0]) ?
6602            GET_BUFFER_SIZE_SRC_SURFACE_HANDLE :
6603            GET_BUFFER_SIZE_SRC_SURFACE] =
6604          get_nir_buffer_intrinsic_index(ntb, bld, instr);
6605       srcs[GET_BUFFER_SIZE_SRC_LOD] = src_payload;
6606       fs_inst *inst = ubld.emit(SHADER_OPCODE_GET_BUFFER_SIZE, ret_payload,
6607                                 srcs, GET_BUFFER_SIZE_SRCS);
6608       inst->header_size = 0;
6609       inst->mlen = reg_unit(devinfo);
6610       inst->size_written = 4 * REG_SIZE * reg_unit(devinfo);
6611 
6612       /* SKL PRM, vol07, 3D Media GPGPU Engine, Bounds Checking and Faulting:
6613        *
6614        * "Out-of-bounds checking is always performed at a DWord granularity. If
6615        * any part of the DWord is out-of-bounds then the whole DWord is
6616        * considered out-of-bounds."
6617        *
6618        * This implies that types with size smaller than 4-bytes need to be
6619        * padded if they don't complete the last dword of the buffer. But as we
6620        * need to maintain the original size we need to reverse the padding
6621        * calculation to return the correct size to know the number of elements
6622        * of an unsized array. As we stored in the last two bits of the surface
6623        * size the needed padding for the buffer, we calculate here the
6624        * original buffer_size reversing the surface_size calculation:
6625        *
6626        * surface_size = isl_align(buffer_size, 4) +
6627        *                (isl_align(buffer_size) - buffer_size)
6628        *
6629        * buffer_size = surface_size & ~3 - surface_size & 3
6630        */
6631       brw_reg size_padding  = ubld.AND(ret_payload, brw_imm_ud(3));
6632       brw_reg size_aligned4 = ubld.AND(ret_payload, brw_imm_ud(~3));
6633       brw_reg buffer_size   = ubld.ADD(size_aligned4, negate(size_padding));
6634 
6635       bld.MOV(retype(dest, ret_payload.type), component(buffer_size, 0));
6636       break;
6637    }
6638 
6639    case nir_intrinsic_load_subgroup_size:
6640       /* This should only happen for fragment shaders because every other case
6641        * is lowered in NIR so we can optimize on it.
6642        */
6643       assert(s.stage == MESA_SHADER_FRAGMENT);
6644       bld.MOV(retype(dest, BRW_TYPE_D), brw_imm_d(s.dispatch_width));
6645       break;
6646 
6647    case nir_intrinsic_load_subgroup_invocation:
6648       bld.MOV(retype(dest, BRW_TYPE_UD), bld.LOAD_SUBGROUP_INVOCATION());
6649       break;
6650 
6651    case nir_intrinsic_load_subgroup_eq_mask:
6652    case nir_intrinsic_load_subgroup_ge_mask:
6653    case nir_intrinsic_load_subgroup_gt_mask:
6654    case nir_intrinsic_load_subgroup_le_mask:
6655    case nir_intrinsic_load_subgroup_lt_mask:
6656       unreachable("not reached");
6657 
6658    case nir_intrinsic_ddx_fine:
6659       bld.emit(FS_OPCODE_DDX_FINE, retype(dest, BRW_TYPE_F),
6660                retype(get_nir_src(ntb, instr->src[0]), BRW_TYPE_F));
6661       break;
6662    case nir_intrinsic_ddx:
6663    case nir_intrinsic_ddx_coarse:
6664       bld.emit(FS_OPCODE_DDX_COARSE, retype(dest, BRW_TYPE_F),
6665                retype(get_nir_src(ntb, instr->src[0]), BRW_TYPE_F));
6666       break;
6667    case nir_intrinsic_ddy_fine:
6668       bld.emit(FS_OPCODE_DDY_FINE, retype(dest, BRW_TYPE_F),
6669                retype(get_nir_src(ntb, instr->src[0]), BRW_TYPE_F));
6670       break;
6671    case nir_intrinsic_ddy:
6672    case nir_intrinsic_ddy_coarse:
6673       bld.emit(FS_OPCODE_DDY_COARSE, retype(dest, BRW_TYPE_F),
6674                retype(get_nir_src(ntb, instr->src[0]), BRW_TYPE_F));
6675       break;
6676 
6677    case nir_intrinsic_quad_vote_any:
6678    case nir_intrinsic_quad_vote_all: {
6679       struct brw_reg flag = brw_flag_reg(0, 0);
6680       if (s.dispatch_width == 32)
6681          flag.type = BRW_TYPE_UD;
6682 
6683       brw_reg cond = get_nir_src(ntb, instr->src[0]);
6684 
6685       /* Before Xe2, we can use specialized predicates. */
6686       if (devinfo->ver < 20) {
6687          const bool any = instr->intrinsic == nir_intrinsic_quad_vote_any;
6688 
6689          /* The any/all predicates do not consider channel enables. To prevent
6690           * dead channels from affecting the result, we initialize the flag with
6691           * with the identity value for the logical operation.
6692           */
6693          const unsigned identity = any ? 0 : 0xFFFFFFFF;
6694          bld.exec_all().group(1, 0).MOV(flag, retype(brw_imm_ud(identity), flag.type));
6695 
6696          bld.CMP(bld.null_reg_ud(), cond, brw_imm_ud(0u), BRW_CONDITIONAL_NZ);
6697          bld.exec_all().MOV(retype(dest, BRW_TYPE_UD), brw_imm_ud(0));
6698 
6699          const enum brw_predicate pred = any ? BRW_PREDICATE_ALIGN1_ANY4H
6700                                              : BRW_PREDICATE_ALIGN1_ALL4H;
6701 
6702          fs_inst *mov = bld.MOV(retype(dest, BRW_TYPE_D), brw_imm_d(-1));
6703          set_predicate(pred, mov);
6704          break;
6705       }
6706 
6707       /* This code is going to manipulate the results of flag mask, so clear it to
6708        * avoid any residual value from disabled channels.
6709        */
6710       bld.exec_all().group(1, 0).MOV(flag, retype(brw_imm_ud(0), flag.type));
6711 
6712       /* Mask of invocations where condition is true, note that mask is
6713        * replicated to each invocation.
6714        */
6715       bld.CMP(bld.null_reg_ud(), cond, brw_imm_ud(0u), BRW_CONDITIONAL_NZ);
6716       brw_reg cond_mask = bld.vgrf(BRW_TYPE_UD);
6717       bld.MOV(cond_mask, flag);
6718 
6719       /* Mask of invocations in the quad, each invocation will get
6720        * all the bits set for their quad, i.e. invocations 0-3 will have
6721        * 0b...1111, invocations 4-7 will have 0b...11110000 and so on.
6722        */
6723       brw_reg invoc_ud = bld.vgrf(BRW_TYPE_UD);
6724       bld.MOV(invoc_ud, bld.LOAD_SUBGROUP_INVOCATION());
6725       brw_reg quad_mask =
6726          bld.SHL(brw_imm_ud(0xF), bld.AND(invoc_ud, brw_imm_ud(0xFFFFFFFC)));
6727 
6728       /* An invocation will have bits set for each quad that passes the
6729        * condition.  This is uniform among each quad.
6730        */
6731       brw_reg tmp = bld.AND(cond_mask, quad_mask);
6732 
6733       if (instr->intrinsic == nir_intrinsic_quad_vote_any) {
6734          bld.CMP(retype(dest, BRW_TYPE_UD), tmp, brw_imm_ud(0), BRW_CONDITIONAL_NZ);
6735       } else {
6736          assert(instr->intrinsic == nir_intrinsic_quad_vote_all);
6737 
6738          /* Filter out quad_mask to include only active channels. */
6739          brw_reg active = bld.vgrf(BRW_TYPE_UD);
6740          bld.exec_all().emit(SHADER_OPCODE_LOAD_LIVE_CHANNELS, active);
6741          bld.MOV(active, brw_reg(component(active, 0)));
6742          bld.AND(quad_mask, quad_mask, active);
6743 
6744          bld.CMP(retype(dest, BRW_TYPE_UD), tmp, quad_mask, BRW_CONDITIONAL_Z);
6745       }
6746 
6747       break;
6748    }
6749 
6750    case nir_intrinsic_vote_any: {
6751       const fs_builder ubld1 = bld.exec_all().group(1, 0);
6752 
6753       /* The any/all predicates do not consider channel enables. To prevent
6754        * dead channels from affecting the result, we initialize the flag with
6755        * with the identity value for the logical operation.
6756        */
6757       if (s.dispatch_width == 32) {
6758          /* For SIMD32, we use a UD type so we fill both f0.0 and f0.1. */
6759          ubld1.MOV(retype(brw_flag_reg(0, 0), BRW_TYPE_UD),
6760                    brw_imm_ud(0));
6761       } else {
6762          ubld1.MOV(brw_flag_reg(0, 0), brw_imm_uw(0));
6763       }
6764       bld.CMP(bld.null_reg_d(), get_nir_src(ntb, instr->src[0]), brw_imm_d(0), BRW_CONDITIONAL_NZ);
6765 
6766       /* For some reason, the any/all predicates don't work properly with
6767        * SIMD32.  In particular, it appears that a SEL with a QtrCtrl of 2H
6768        * doesn't read the correct subset of the flag register and you end up
6769        * getting garbage in the second half.  Work around this by using a pair
6770        * of 1-wide MOVs and scattering the result.
6771        */
6772       const fs_builder ubld = devinfo->ver >= 20 ? bld.exec_all() : ubld1;
6773       brw_reg res1 = ubld.MOV(brw_imm_d(0));
6774       set_predicate(devinfo->ver >= 20 ? XE2_PREDICATE_ANY :
6775                     s.dispatch_width == 8  ? BRW_PREDICATE_ALIGN1_ANY8H :
6776                     s.dispatch_width == 16 ? BRW_PREDICATE_ALIGN1_ANY16H :
6777                                              BRW_PREDICATE_ALIGN1_ANY32H,
6778                     ubld.MOV(res1, brw_imm_d(-1)));
6779 
6780       bld.MOV(retype(dest, BRW_TYPE_D), component(res1, 0));
6781       break;
6782    }
6783    case nir_intrinsic_vote_all: {
6784       const fs_builder ubld1 = bld.exec_all().group(1, 0);
6785 
6786       /* The any/all predicates do not consider channel enables. To prevent
6787        * dead channels from affecting the result, we initialize the flag with
6788        * with the identity value for the logical operation.
6789        */
6790       if (s.dispatch_width == 32) {
6791          /* For SIMD32, we use a UD type so we fill both f0.0 and f0.1. */
6792          ubld1.MOV(retype(brw_flag_reg(0, 0), BRW_TYPE_UD),
6793                    brw_imm_ud(0xffffffff));
6794       } else {
6795          ubld1.MOV(brw_flag_reg(0, 0), brw_imm_uw(0xffff));
6796       }
6797       bld.CMP(bld.null_reg_d(), get_nir_src(ntb, instr->src[0]), brw_imm_d(0), BRW_CONDITIONAL_NZ);
6798 
6799       /* For some reason, the any/all predicates don't work properly with
6800        * SIMD32.  In particular, it appears that a SEL with a QtrCtrl of 2H
6801        * doesn't read the correct subset of the flag register and you end up
6802        * getting garbage in the second half.  Work around this by using a pair
6803        * of 1-wide MOVs and scattering the result.
6804        */
6805       const fs_builder ubld = devinfo->ver >= 20 ? bld.exec_all() : ubld1;
6806       brw_reg res1 = ubld.MOV(brw_imm_d(0));
6807       set_predicate(devinfo->ver >= 20 ? XE2_PREDICATE_ALL :
6808                     s.dispatch_width == 8  ? BRW_PREDICATE_ALIGN1_ALL8H :
6809                     s.dispatch_width == 16 ? BRW_PREDICATE_ALIGN1_ALL16H :
6810                                              BRW_PREDICATE_ALIGN1_ALL32H,
6811                     ubld.MOV(res1, brw_imm_d(-1)));
6812 
6813       bld.MOV(retype(dest, BRW_TYPE_D), component(res1, 0));
6814       break;
6815    }
6816    case nir_intrinsic_vote_feq:
6817    case nir_intrinsic_vote_ieq: {
6818       brw_reg value = get_nir_src(ntb, instr->src[0]);
6819       if (instr->intrinsic == nir_intrinsic_vote_feq) {
6820          const unsigned bit_size = nir_src_bit_size(instr->src[0]);
6821          value.type = bit_size == 8 ? BRW_TYPE_B :
6822             brw_type_with_size(BRW_TYPE_F, bit_size);
6823       }
6824 
6825       brw_reg uniformized = bld.emit_uniformize(value);
6826       const fs_builder ubld1 = bld.exec_all().group(1, 0);
6827 
6828       /* The any/all predicates do not consider channel enables. To prevent
6829        * dead channels from affecting the result, we initialize the flag with
6830        * with the identity value for the logical operation.
6831        */
6832       if (s.dispatch_width == 32) {
6833          /* For SIMD32, we use a UD type so we fill both f0.0 and f0.1. */
6834          ubld1.MOV(retype(brw_flag_reg(0, 0), BRW_TYPE_UD),
6835                          brw_imm_ud(0xffffffff));
6836       } else {
6837          ubld1.MOV(brw_flag_reg(0, 0), brw_imm_uw(0xffff));
6838       }
6839       bld.CMP(bld.null_reg_d(), value, uniformized, BRW_CONDITIONAL_Z);
6840 
6841       /* For some reason, the any/all predicates don't work properly with
6842        * SIMD32.  In particular, it appears that a SEL with a QtrCtrl of 2H
6843        * doesn't read the correct subset of the flag register and you end up
6844        * getting garbage in the second half.  Work around this by using a pair
6845        * of 1-wide MOVs and scattering the result.
6846        */
6847       const fs_builder ubld = devinfo->ver >= 20 ? bld.exec_all() : ubld1;
6848       brw_reg res1 = ubld.MOV(brw_imm_d(0));
6849       set_predicate(devinfo->ver >= 20 ? XE2_PREDICATE_ALL :
6850                     s.dispatch_width == 8  ? BRW_PREDICATE_ALIGN1_ALL8H :
6851                     s.dispatch_width == 16 ? BRW_PREDICATE_ALIGN1_ALL16H :
6852                                              BRW_PREDICATE_ALIGN1_ALL32H,
6853                     ubld.MOV(res1, brw_imm_d(-1)));
6854 
6855       bld.MOV(retype(dest, BRW_TYPE_D), component(res1, 0));
6856       break;
6857    }
6858 
6859    case nir_intrinsic_ballot: {
6860       if (instr->def.bit_size > 32) {
6861          dest.type = BRW_TYPE_UQ;
6862       } else {
6863          dest.type = BRW_TYPE_UD;
6864       }
6865 
6866       /* Implement a fast-path for ballot(true). */
6867       if (nir_src_is_const(instr->src[0]) &&
6868           nir_src_as_bool(instr->src[0])) {
6869          brw_reg tmp = bld.vgrf(BRW_TYPE_UD);
6870          bld.exec_all().emit(SHADER_OPCODE_LOAD_LIVE_CHANNELS, tmp);
6871          bld.MOV(dest, brw_reg(component(tmp, 0)));
6872          break;
6873       }
6874 
6875       const brw_reg value = retype(get_nir_src(ntb, instr->src[0]),
6876                                   BRW_TYPE_UD);
6877       struct brw_reg flag = brw_flag_reg(0, 0);
6878 
6879       if (s.dispatch_width == 32)
6880          flag.type = BRW_TYPE_UD;
6881 
6882       bld.exec_all().group(1, 0).MOV(flag, retype(brw_imm_ud(0u), flag.type));
6883       bld.CMP(bld.null_reg_ud(), value, brw_imm_ud(0u), BRW_CONDITIONAL_NZ);
6884       bld.MOV(dest, flag);
6885       break;
6886    }
6887 
6888    case nir_intrinsic_read_invocation: {
6889       const brw_reg value = get_nir_src(ntb, instr->src[0]);
6890       const brw_reg invocation = get_nir_src_imm(ntb, instr->src[1]);
6891 
6892       if (invocation.file == IMM) {
6893          unsigned i = invocation.ud & (bld.dispatch_width() - 1);
6894          bld.MOV(retype(dest, value.type), component(value, i));
6895          break;
6896       }
6897 
6898       brw_reg tmp = bld.vgrf(value.type);
6899 
6900       /* When for some reason the subgroup_size picked by NIR is larger than
6901        * the dispatch size picked by the backend (this could happen in RT,
6902        * FS), bound the invocation to the dispatch size.
6903        */
6904       brw_reg bound_invocation = retype(invocation, BRW_TYPE_UD);
6905       if (s.api_subgroup_size == 0 ||
6906           bld.dispatch_width() < s.api_subgroup_size) {
6907          bound_invocation =
6908             bld.AND(bound_invocation, brw_imm_ud(s.dispatch_width - 1));
6909       }
6910       bld.exec_all().emit(SHADER_OPCODE_BROADCAST, tmp, value,
6911                           bld.emit_uniformize(bound_invocation));
6912 
6913       bld.MOV(retype(dest, value.type), brw_reg(component(tmp, 0)));
6914       break;
6915    }
6916 
6917    case nir_intrinsic_read_first_invocation: {
6918       const brw_reg value = get_nir_src(ntb, instr->src[0]);
6919       bld.MOV(retype(dest, value.type), bld.emit_uniformize(value));
6920       break;
6921    }
6922 
6923    case nir_intrinsic_shuffle: {
6924       const brw_reg value = get_nir_src(ntb, instr->src[0]);
6925       const brw_reg index = get_nir_src(ntb, instr->src[1]);
6926 
6927       bld.emit(SHADER_OPCODE_SHUFFLE, retype(dest, value.type), value, index);
6928       break;
6929    }
6930 
6931    case nir_intrinsic_first_invocation: {
6932       brw_reg tmp = bld.vgrf(BRW_TYPE_UD);
6933       bld.exec_all().emit(SHADER_OPCODE_FIND_LIVE_CHANNEL, tmp);
6934       bld.MOV(retype(dest, BRW_TYPE_UD),
6935               brw_reg(component(tmp, 0)));
6936       break;
6937    }
6938 
6939    case nir_intrinsic_last_invocation: {
6940       brw_reg tmp = bld.vgrf(BRW_TYPE_UD);
6941       bld.exec_all().emit(SHADER_OPCODE_FIND_LAST_LIVE_CHANNEL, tmp);
6942       bld.MOV(retype(dest, BRW_TYPE_UD),
6943               brw_reg(component(tmp, 0)));
6944       break;
6945    }
6946 
6947    case nir_intrinsic_quad_broadcast: {
6948       const brw_reg value = get_nir_src(ntb, instr->src[0]);
6949       const unsigned index = nir_src_as_uint(instr->src[1]);
6950 
6951       bld.emit(SHADER_OPCODE_CLUSTER_BROADCAST, retype(dest, value.type),
6952                value, brw_imm_ud(index), brw_imm_ud(4));
6953       break;
6954    }
6955 
6956    case nir_intrinsic_quad_swap_horizontal: {
6957       const brw_reg value = get_nir_src(ntb, instr->src[0]);
6958       const brw_reg tmp = bld.vgrf(value.type);
6959 
6960       const fs_builder ubld = bld.exec_all().group(s.dispatch_width / 2, 0);
6961 
6962       const brw_reg src_left = horiz_stride(value, 2);
6963       const brw_reg src_right = horiz_stride(horiz_offset(value, 1), 2);
6964       const brw_reg tmp_left = horiz_stride(tmp, 2);
6965       const brw_reg tmp_right = horiz_stride(horiz_offset(tmp, 1), 2);
6966 
6967       ubld.MOV(tmp_left, src_right);
6968       ubld.MOV(tmp_right, src_left);
6969 
6970       bld.MOV(retype(dest, value.type), tmp);
6971       break;
6972    }
6973 
6974    case nir_intrinsic_quad_swap_vertical: {
6975       const brw_reg value = get_nir_src(ntb, instr->src[0]);
6976       if (nir_src_bit_size(instr->src[0]) == 32) {
6977          /* For 32-bit, we can use a SIMD4x2 instruction to do this easily */
6978          const brw_reg tmp = bld.vgrf(value.type);
6979          const fs_builder ubld = bld.exec_all();
6980          ubld.emit(SHADER_OPCODE_QUAD_SWIZZLE, tmp, value,
6981                    brw_imm_ud(BRW_SWIZZLE4(2,3,0,1)));
6982          bld.MOV(retype(dest, value.type), tmp);
6983       } else {
6984          /* For larger data types, we have to either emit dispatch_width many
6985           * MOVs or else fall back to doing indirects.
6986           */
6987          brw_reg idx = bld.vgrf(BRW_TYPE_W);
6988          bld.XOR(idx, bld.LOAD_SUBGROUP_INVOCATION(), brw_imm_w(0x2));
6989          bld.emit(SHADER_OPCODE_SHUFFLE, retype(dest, value.type), value, idx);
6990       }
6991       break;
6992    }
6993 
6994    case nir_intrinsic_quad_swap_diagonal: {
6995       const brw_reg value = get_nir_src(ntb, instr->src[0]);
6996       if (nir_src_bit_size(instr->src[0]) == 32) {
6997          /* For 32-bit, we can use a SIMD4x2 instruction to do this easily */
6998          const brw_reg tmp = bld.vgrf(value.type);
6999          const fs_builder ubld = bld.exec_all();
7000          ubld.emit(SHADER_OPCODE_QUAD_SWIZZLE, tmp, value,
7001                    brw_imm_ud(BRW_SWIZZLE4(3,2,1,0)));
7002          bld.MOV(retype(dest, value.type), tmp);
7003       } else {
7004          /* For larger data types, we have to either emit dispatch_width many
7005           * MOVs or else fall back to doing indirects.
7006           */
7007          brw_reg idx = bld.vgrf(BRW_TYPE_W);
7008          bld.XOR(idx, bld.LOAD_SUBGROUP_INVOCATION(), brw_imm_w(0x3));
7009          bld.emit(SHADER_OPCODE_SHUFFLE, retype(dest, value.type), value, idx);
7010       }
7011       break;
7012    }
7013 
7014    case nir_intrinsic_reduce: {
7015       brw_reg src = get_nir_src(ntb, instr->src[0]);
7016       nir_op redop = (nir_op)nir_intrinsic_reduction_op(instr);
7017       unsigned cluster_size = nir_intrinsic_cluster_size(instr);
7018       if (cluster_size == 0 || cluster_size > s.dispatch_width)
7019          cluster_size = s.dispatch_width;
7020 
7021       /* Figure out the source type */
7022       src.type = brw_type_for_nir_type(devinfo,
7023          (nir_alu_type)(nir_op_infos[redop].input_types[0] |
7024                         nir_src_bit_size(instr->src[0])));
7025 
7026       brw_reg identity = brw_nir_reduction_op_identity(bld, redop, src.type);
7027       opcode brw_op = brw_op_for_nir_reduction_op(redop);
7028       brw_conditional_mod cond_mod = brw_cond_mod_for_nir_reduction_op(redop);
7029 
7030       /* Set up a register for all of our scratching around and initialize it
7031        * to reduction operation's identity value.
7032        */
7033       brw_reg scan = bld.vgrf(src.type);
7034       bld.exec_all().emit(SHADER_OPCODE_SEL_EXEC, scan, src, identity);
7035 
7036       bld.emit_scan(brw_op, scan, cluster_size, cond_mod);
7037 
7038       dest.type = src.type;
7039       if (cluster_size * brw_type_size_bytes(src.type) >= REG_SIZE * 2) {
7040          /* In this case, CLUSTER_BROADCAST instruction isn't needed because
7041           * the distance between clusters is at least 2 GRFs.  In this case,
7042           * we don't need the weird striding of the CLUSTER_BROADCAST
7043           * instruction and can just do regular MOVs.
7044           */
7045          assert((cluster_size * brw_type_size_bytes(src.type)) % (REG_SIZE * 2) == 0);
7046          const unsigned groups =
7047             (s.dispatch_width * brw_type_size_bytes(src.type)) / (REG_SIZE * 2);
7048          const unsigned group_size = s.dispatch_width / groups;
7049          for (unsigned i = 0; i < groups; i++) {
7050             const unsigned cluster = (i * group_size) / cluster_size;
7051             const unsigned comp = cluster * cluster_size + (cluster_size - 1);
7052             bld.group(group_size, i).MOV(horiz_offset(dest, i * group_size),
7053                                          component(scan, comp));
7054          }
7055       } else {
7056          bld.emit(SHADER_OPCODE_CLUSTER_BROADCAST, dest, scan,
7057                   brw_imm_ud(cluster_size - 1), brw_imm_ud(cluster_size));
7058       }
7059       break;
7060    }
7061 
7062    case nir_intrinsic_inclusive_scan:
7063    case nir_intrinsic_exclusive_scan: {
7064       brw_reg src = get_nir_src(ntb, instr->src[0]);
7065       nir_op redop = (nir_op)nir_intrinsic_reduction_op(instr);
7066 
7067       /* Figure out the source type */
7068       src.type = brw_type_for_nir_type(devinfo,
7069          (nir_alu_type)(nir_op_infos[redop].input_types[0] |
7070                         nir_src_bit_size(instr->src[0])));
7071 
7072       brw_reg identity = brw_nir_reduction_op_identity(bld, redop, src.type);
7073       opcode brw_op = brw_op_for_nir_reduction_op(redop);
7074       brw_conditional_mod cond_mod = brw_cond_mod_for_nir_reduction_op(redop);
7075 
7076       /* Set up a register for all of our scratching around and initialize it
7077        * to reduction operation's identity value.
7078        */
7079       brw_reg scan = bld.vgrf(src.type);
7080       const fs_builder allbld = bld.exec_all();
7081       allbld.emit(SHADER_OPCODE_SEL_EXEC, scan, src, identity);
7082 
7083       if (instr->intrinsic == nir_intrinsic_exclusive_scan) {
7084          /* Exclusive scan is a bit harder because we have to do an annoying
7085           * shift of the contents before we can begin.  To make things worse,
7086           * we can't do this with a normal stride; we have to use indirects.
7087           */
7088          brw_reg shifted = bld.vgrf(src.type);
7089          brw_reg idx = bld.vgrf(BRW_TYPE_W);
7090          allbld.ADD(idx, bld.LOAD_SUBGROUP_INVOCATION(), brw_imm_w(-1));
7091          allbld.emit(SHADER_OPCODE_SHUFFLE, shifted, scan, idx);
7092          allbld.group(1, 0).MOV(horiz_offset(shifted, 0), identity);
7093          scan = shifted;
7094       }
7095 
7096       bld.emit_scan(brw_op, scan, s.dispatch_width, cond_mod);
7097 
7098       bld.MOV(retype(dest, src.type), scan);
7099       break;
7100    }
7101 
7102    case nir_intrinsic_load_topology_id_intel: {
7103       /* These move around basically every hardware generation, so don't
7104        * do any unbounded checks and fail if the platform hasn't explicitly
7105        * been enabled here.
7106        */
7107       assert(devinfo->ver >= 12 && devinfo->ver <= 20);
7108 
7109       /* Here is what the layout of SR0 looks like on Gfx12
7110        * https://gfxspecs.intel.com/Predator/Home/Index/47256
7111        *   [13:11] : Slice ID.
7112        *   [10:9]  : Dual-SubSlice ID
7113        *   [8]     : SubSlice ID
7114        *   [7]     : EUID[2] (aka EU Row ID)
7115        *   [6]     : Reserved
7116        *   [5:4]   : EUID[1:0]
7117        *   [2:0]   : Thread ID
7118        *
7119        * Xe2: Engine 3D and GPGPU Programs, EU Overview, Registers and
7120        * Register Regions, ARF Registers, State Register,
7121        * https://gfxspecs.intel.com/Predator/Home/Index/56623
7122        *   [15:11] : Slice ID.
7123        *   [9:8]   : SubSlice ID
7124        *   [6:4]   : EUID
7125        *   [2:0]   : Thread ID
7126        */
7127       brw_reg raw_id = bld.vgrf(BRW_TYPE_UD);
7128       bld.UNDEF(raw_id);
7129       bld.emit(SHADER_OPCODE_READ_ARCH_REG, raw_id, retype(brw_sr0_reg(0),
7130                                                            BRW_TYPE_UD));
7131       switch (nir_intrinsic_base(instr)) {
7132       case BRW_TOPOLOGY_ID_DSS:
7133          if (devinfo->ver >= 20) {
7134             /* Xe2+: 3D and GPGPU Programs, Shared Functions, Ray Tracing:
7135              * https://gfxspecs.intel.com/Predator/Home/Index/56936
7136              *
7137              * Note: DSSID in all formulas below is a logical identifier of an
7138              * XeCore (a value that goes from 0 to (number_of_slices *
7139              * number_of_XeCores_per_slice -1). SW can get this value from
7140              * either:
7141              *
7142              *  - Message Control Register LogicalSSID field (only in shaders
7143              *    eligible for Mid-Thread Preemption).
7144              *  - Calculated based of State Register with the following formula:
7145              *    DSSID = StateRegister.SliceID * GT_ARCH_SS_PER_SLICE +
7146              *    StateRRegister.SubSliceID where GT_SS_PER_SLICE is an
7147              *    architectural parameter defined per product SKU.
7148              *
7149              * We are using the state register to calculate the DSSID.
7150              */
7151             brw_reg slice_id =
7152                bld.SHR(bld.AND(raw_id, brw_imm_ud(INTEL_MASK(15, 11))),
7153                        brw_imm_ud(11));
7154 
7155             /* Assert that max subslices covers at least 2 bits that we use for
7156              * subslices.
7157              */
7158             unsigned slice_stride = devinfo->max_subslices_per_slice;
7159             assert(slice_stride >= (1 << 2));
7160             brw_reg subslice_id =
7161                bld.SHR(bld.AND(raw_id, brw_imm_ud(INTEL_MASK(9, 8))),
7162                        brw_imm_ud(8));
7163             bld.ADD(retype(dest, BRW_TYPE_UD),
7164                     bld.MUL(slice_id, brw_imm_ud(slice_stride)), subslice_id);
7165          } else {
7166             /* Get rid of anything below dualsubslice */
7167             bld.SHR(retype(dest, BRW_TYPE_UD),
7168                     bld.AND(raw_id, brw_imm_ud(0x3fff)), brw_imm_ud(9));
7169          }
7170          break;
7171       case BRW_TOPOLOGY_ID_EU_THREAD_SIMD: {
7172          s.limit_dispatch_width(16, "Topology helper for Ray queries, "
7173                               "not supported in SIMD32 mode.");
7174          brw_reg dst = retype(dest, BRW_TYPE_UD);
7175          brw_reg eu;
7176 
7177          if (devinfo->ver >= 20) {
7178             /* Xe2+: Graphics Engine, 3D and GPGPU Programs, Shared Functions
7179              * Ray Tracing,
7180              * https://gfxspecs.intel.com/Predator/Home/Index/56936
7181              *
7182              * SyncStackID = (EUID[2:0] <<  8) | (ThreadID[2:0] << 4) |
7183              *               SIMDLaneID[3:0];
7184              *
7185              * This section just deals with the EUID part.
7186              *
7187              * The 3bit EU[2:0] we need to build for ray query memory addresses
7188              * computations is a bit odd :
7189              *
7190              *   EU[2:0] = raw_id[6:4] (identified as EUID[2:0])
7191              */
7192             eu = bld.SHL(bld.AND(raw_id, brw_imm_ud(INTEL_MASK(6, 4))),
7193                          brw_imm_ud(4));
7194          } else {
7195             /* EU[3:0] << 7
7196              *
7197              * The 4bit EU[3:0] we need to build for ray query memory addresses
7198              * computations is a bit odd :
7199              *
7200              *   EU[1:0] = raw_id[5:4] (identified as EUID[1:0])
7201              *   EU[2]   = raw_id[8]   (identified as SubSlice ID)
7202              *   EU[3]   = raw_id[7]   (identified as EUID[2] or Row ID)
7203              */
7204             brw_reg raw5_4 = bld.AND(raw_id, brw_imm_ud(INTEL_MASK(5, 4)));
7205             brw_reg raw7   = bld.AND(raw_id, brw_imm_ud(INTEL_MASK(7, 7)));
7206             brw_reg raw8   = bld.AND(raw_id, brw_imm_ud(INTEL_MASK(8, 8)));
7207             eu = bld.OR(bld.SHL(raw5_4, brw_imm_ud(3)),
7208                         bld.OR(bld.SHL(raw7, brw_imm_ud(3)),
7209                                bld.SHL(raw8, brw_imm_ud(1))));
7210          }
7211 
7212          /* ThreadID[2:0] << 4 (ThreadID comes from raw_id[2:0]) */
7213          brw_reg tid =
7214             bld.SHL(bld.AND(raw_id, brw_imm_ud(INTEL_MASK(2, 0))),
7215                     brw_imm_ud(4));
7216 
7217          /* LaneID[0:3] << 0 (Use subgroup invocation) */
7218          assert(bld.dispatch_width() <= 16); /* Limit to 4 bits */
7219          bld.ADD(dst, bld.OR(eu, tid), bld.LOAD_SUBGROUP_INVOCATION());
7220          break;
7221       }
7222       default:
7223          unreachable("Invalid topology id type");
7224       }
7225       break;
7226    }
7227 
7228    case nir_intrinsic_load_btd_stack_id_intel:
7229       if (s.stage == MESA_SHADER_COMPUTE) {
7230          assert(brw_cs_prog_data(s.prog_data)->uses_btd_stack_ids);
7231       } else {
7232          assert(brw_shader_stage_is_bindless(s.stage));
7233       }
7234       /* Stack IDs are always in R1 regardless of whether we're coming from a
7235        * bindless shader or a regular compute shader.
7236        */
7237       bld.MOV(retype(dest, BRW_TYPE_UD),
7238               retype(brw_vec8_grf(1 * reg_unit(devinfo), 0), BRW_TYPE_UW));
7239       break;
7240 
7241    case nir_intrinsic_btd_spawn_intel:
7242       if (s.stage == MESA_SHADER_COMPUTE) {
7243          assert(brw_cs_prog_data(s.prog_data)->uses_btd_stack_ids);
7244       } else {
7245          assert(brw_shader_stage_is_bindless(s.stage));
7246       }
7247       /* Make sure all the pointers to resume shaders have landed where other
7248        * threads can see them.
7249        */
7250       emit_rt_lsc_fence(bld, LSC_FENCE_LOCAL, LSC_FLUSH_TYPE_NONE);
7251 
7252       bld.emit(SHADER_OPCODE_BTD_SPAWN_LOGICAL, bld.null_reg_ud(),
7253                bld.emit_uniformize(get_nir_src(ntb, instr->src[0])),
7254                get_nir_src(ntb, instr->src[1]));
7255       break;
7256 
7257    case nir_intrinsic_btd_retire_intel:
7258       if (s.stage == MESA_SHADER_COMPUTE) {
7259          assert(brw_cs_prog_data(s.prog_data)->uses_btd_stack_ids);
7260       } else {
7261          assert(brw_shader_stage_is_bindless(s.stage));
7262       }
7263       /* Make sure all the pointers to resume shaders have landed where other
7264        * threads can see them.
7265        */
7266       emit_rt_lsc_fence(bld, LSC_FENCE_LOCAL, LSC_FLUSH_TYPE_NONE);
7267       bld.emit(SHADER_OPCODE_BTD_RETIRE_LOGICAL);
7268       break;
7269 
7270    case nir_intrinsic_trace_ray_intel: {
7271       const bool synchronous = nir_intrinsic_synchronous(instr);
7272       assert(brw_shader_stage_is_bindless(s.stage) || synchronous);
7273 
7274       /* Make sure all the previous RT structure writes are visible to the RT
7275        * fixed function within the DSS, as well as stack pointers to resume
7276        * shaders.
7277        */
7278       emit_rt_lsc_fence(bld, LSC_FENCE_LOCAL, LSC_FLUSH_TYPE_NONE);
7279 
7280       brw_reg srcs[RT_LOGICAL_NUM_SRCS];
7281 
7282       brw_reg globals = get_nir_src(ntb, instr->src[0]);
7283       srcs[RT_LOGICAL_SRC_GLOBALS] = bld.emit_uniformize(globals);
7284       srcs[RT_LOGICAL_SRC_BVH_LEVEL] = get_nir_src(ntb, instr->src[1]);
7285       srcs[RT_LOGICAL_SRC_TRACE_RAY_CONTROL] = get_nir_src(ntb, instr->src[2]);
7286       srcs[RT_LOGICAL_SRC_SYNCHRONOUS] = brw_imm_ud(synchronous);
7287 
7288       /* Bspec 57508: Structure_SIMD16TraceRayMessage:: RayQuery Enable
7289        *
7290        *    "When this bit is set in the header, Trace Ray Message behaves like
7291        *    a Ray Query. This message requires a write-back message indicating
7292        *    RayQuery for all valid Rays (SIMD lanes) have completed."
7293        */
7294       brw_reg dst = (devinfo->ver >= 20 && synchronous) ?
7295                     bld.vgrf(BRW_TYPE_UD) :
7296                     bld.null_reg_ud();
7297 
7298       bld.emit(RT_OPCODE_TRACE_RAY_LOGICAL, dst, srcs, RT_LOGICAL_NUM_SRCS);
7299 
7300       /* There is no actual value to use in the destination register of the
7301        * synchronous trace instruction. All of the communication with the HW
7302        * unit happens through memory reads/writes. So to ensure that the
7303        * operation has completed before we go read the results in memory, we
7304        * need a barrier followed by an invalidate before accessing memory.
7305        */
7306       if (synchronous) {
7307          bld.SYNC(TGL_SYNC_ALLWR);
7308          emit_rt_lsc_fence(bld, LSC_FENCE_LOCAL, LSC_FLUSH_TYPE_INVALIDATE);
7309       }
7310       break;
7311    }
7312 
7313    default:
7314 #ifndef NDEBUG
7315       assert(instr->intrinsic < nir_num_intrinsics);
7316       fprintf(stderr, "intrinsic: %s\n", nir_intrinsic_infos[instr->intrinsic].name);
7317 #endif
7318       unreachable("unknown intrinsic");
7319    }
7320 }
7321 
7322 static enum lsc_data_size
lsc_bits_to_data_size(unsigned bit_size)7323 lsc_bits_to_data_size(unsigned bit_size)
7324 {
7325    switch (bit_size / 8) {
7326    case 1:  return LSC_DATA_SIZE_D8U32;
7327    case 2:  return LSC_DATA_SIZE_D16U32;
7328    case 4:  return LSC_DATA_SIZE_D32;
7329    case 8:  return LSC_DATA_SIZE_D64;
7330    default:
7331       unreachable("Unsupported data size.");
7332    }
7333 }
7334 
7335 static void
fs_nir_emit_memory_access(nir_to_brw_state & ntb,const fs_builder & bld,nir_intrinsic_instr * instr)7336 fs_nir_emit_memory_access(nir_to_brw_state &ntb,
7337                           const fs_builder &bld,
7338                           nir_intrinsic_instr *instr)
7339 {
7340    const intel_device_info *devinfo = ntb.devinfo;
7341    fs_visitor &s = ntb.s;
7342 
7343    brw_reg srcs[MEMORY_LOGICAL_NUM_SRCS];
7344 
7345    /* Start with some default values for most cases */
7346 
7347    enum lsc_opcode op = lsc_op_for_nir_intrinsic(instr);
7348    const bool is_store = !nir_intrinsic_infos[instr->intrinsic].has_dest;
7349    const bool is_atomic = lsc_opcode_is_atomic(op);
7350    const bool is_load = !is_store && !is_atomic;
7351    const bool include_helpers = nir_intrinsic_has_access(instr) &&
7352       (nir_intrinsic_access(instr) & ACCESS_INCLUDE_HELPERS);
7353    const unsigned align =
7354       nir_intrinsic_has_align(instr) ? nir_intrinsic_align(instr) : 0;
7355    bool no_mask_handle = false;
7356    int data_src = -1;
7357 
7358    srcs[MEMORY_LOGICAL_OPCODE] = brw_imm_ud(op);
7359    /* BINDING_TYPE, BINDING, and ADDRESS are handled in the switch */
7360    srcs[MEMORY_LOGICAL_COORD_COMPONENTS] = brw_imm_ud(1);
7361    srcs[MEMORY_LOGICAL_ALIGNMENT] = brw_imm_ud(align);
7362    /* DATA_SIZE and CHANNELS are handled below the switch */
7363    srcs[MEMORY_LOGICAL_FLAGS] =
7364       brw_imm_ud(include_helpers ? MEMORY_FLAG_INCLUDE_HELPERS : 0);
7365    /* DATA0 and DATA1 are handled below */
7366 
7367    switch (instr->intrinsic) {
7368    case nir_intrinsic_bindless_image_load:
7369    case nir_intrinsic_bindless_image_store:
7370    case nir_intrinsic_bindless_image_atomic:
7371    case nir_intrinsic_bindless_image_atomic_swap:
7372       srcs[MEMORY_LOGICAL_BINDING_TYPE] = brw_imm_ud(LSC_ADDR_SURFTYPE_BSS);
7373       FALLTHROUGH;
7374    case nir_intrinsic_image_load:
7375    case nir_intrinsic_image_store:
7376    case nir_intrinsic_image_atomic:
7377    case nir_intrinsic_image_atomic_swap:
7378       srcs[MEMORY_LOGICAL_MODE] = brw_imm_ud(MEMORY_MODE_TYPED);
7379       srcs[MEMORY_LOGICAL_BINDING] =
7380          get_nir_image_intrinsic_image(ntb, bld, instr);
7381 
7382       if (srcs[MEMORY_LOGICAL_BINDING_TYPE].file == BAD_FILE)
7383          srcs[MEMORY_LOGICAL_BINDING_TYPE] = brw_imm_ud(LSC_ADDR_SURFTYPE_BTI);
7384 
7385       srcs[MEMORY_LOGICAL_ADDRESS] = get_nir_src(ntb, instr->src[1]);
7386       srcs[MEMORY_LOGICAL_COORD_COMPONENTS] =
7387          brw_imm_ud(nir_image_intrinsic_coord_components(instr));
7388 
7389       data_src = 3;
7390       break;
7391 
7392    case nir_intrinsic_load_ubo_uniform_block_intel:
7393    case nir_intrinsic_load_ssbo:
7394    case nir_intrinsic_store_ssbo:
7395    case nir_intrinsic_ssbo_atomic:
7396    case nir_intrinsic_ssbo_atomic_swap:
7397    case nir_intrinsic_load_ssbo_block_intel:
7398    case nir_intrinsic_store_ssbo_block_intel:
7399    case nir_intrinsic_load_ssbo_uniform_block_intel:
7400       srcs[MEMORY_LOGICAL_MODE] = brw_imm_ud(MEMORY_MODE_UNTYPED);
7401       srcs[MEMORY_LOGICAL_BINDING_TYPE] =
7402          brw_imm_ud(get_nir_src_bindless(ntb, instr->src[is_store ? 1 : 0]) ?
7403                     LSC_ADDR_SURFTYPE_BSS : LSC_ADDR_SURFTYPE_BTI);
7404       srcs[MEMORY_LOGICAL_BINDING] =
7405          get_nir_buffer_intrinsic_index(ntb, bld, instr, &no_mask_handle);
7406       srcs[MEMORY_LOGICAL_ADDRESS] =
7407          get_nir_src(ntb, instr->src[is_store ? 2 : 1]);
7408 
7409       data_src = is_atomic ? 2 : 0;
7410       break;
7411    case nir_intrinsic_load_shared:
7412    case nir_intrinsic_store_shared:
7413    case nir_intrinsic_shared_atomic:
7414    case nir_intrinsic_shared_atomic_swap:
7415    case nir_intrinsic_load_shared_block_intel:
7416    case nir_intrinsic_store_shared_block_intel:
7417    case nir_intrinsic_load_shared_uniform_block_intel: {
7418       srcs[MEMORY_LOGICAL_MODE] = brw_imm_ud(MEMORY_MODE_SHARED_LOCAL);
7419       srcs[MEMORY_LOGICAL_BINDING_TYPE] = brw_imm_ud(LSC_ADDR_SURFTYPE_FLAT);
7420 
7421       const nir_src &nir_src = instr->src[is_store ? 1 : 0];
7422 
7423       srcs[MEMORY_LOGICAL_ADDRESS] = nir_src_is_const(nir_src) ?
7424          brw_imm_ud(nir_intrinsic_base(instr) + nir_src_as_uint(nir_src)) :
7425          bld.ADD(retype(get_nir_src(ntb, nir_src), BRW_TYPE_UD),
7426                  brw_imm_ud(nir_intrinsic_base(instr)));
7427 
7428       data_src = is_atomic ? 1 : 0;
7429       no_mask_handle = true;
7430       break;
7431    }
7432    case nir_intrinsic_load_scratch:
7433    case nir_intrinsic_store_scratch: {
7434       srcs[MEMORY_LOGICAL_MODE] = brw_imm_ud(MEMORY_MODE_SCRATCH);
7435 
7436       const nir_src &addr = instr->src[is_store ? 1 : 0];
7437 
7438       if (devinfo->verx10 >= 125) {
7439          srcs[MEMORY_LOGICAL_BINDING_TYPE] = brw_imm_ud(LSC_ADDR_SURFTYPE_SS);
7440 
7441          const fs_builder ubld = bld.exec_all().group(1, 0);
7442          brw_reg bind = component(ubld.vgrf(BRW_TYPE_UD), 0);
7443          ubld.AND(bind, retype(brw_vec1_grf(0, 5), BRW_TYPE_UD),
7444                         brw_imm_ud(INTEL_MASK(31, 10)));
7445          if (devinfo->ver >= 20)
7446             bind = component(ubld.SHR(bind, brw_imm_ud(4)), 0);
7447 
7448          srcs[MEMORY_LOGICAL_BINDING] = bind;
7449          srcs[MEMORY_LOGICAL_ADDRESS] =
7450             swizzle_nir_scratch_addr(ntb, bld, addr, false);
7451       } else {
7452          unsigned bit_size =
7453             is_store ? nir_src_bit_size(instr->src[0]) : instr->def.bit_size;
7454          bool dword_aligned = align >= 4 && bit_size == 32;
7455          srcs[MEMORY_LOGICAL_BINDING_TYPE] =
7456             brw_imm_ud(LSC_ADDR_SURFTYPE_FLAT);
7457          srcs[MEMORY_LOGICAL_ADDRESS] =
7458             swizzle_nir_scratch_addr(ntb, bld, addr, dword_aligned);
7459       }
7460 
7461       if (is_store)
7462          s.shader_stats.spill_count += DIV_ROUND_UP(s.dispatch_width, 16);
7463       else
7464          s.shader_stats.fill_count += DIV_ROUND_UP(s.dispatch_width, 16);
7465 
7466       data_src = 0;
7467       break;
7468    }
7469 
7470    case nir_intrinsic_load_global_constant_uniform_block_intel:
7471       no_mask_handle =
7472          ntb.uniform_values[instr->src[0].ssa->index].file != BAD_FILE;
7473       FALLTHROUGH;
7474    case nir_intrinsic_load_global:
7475    case nir_intrinsic_load_global_constant:
7476    case nir_intrinsic_store_global:
7477    case nir_intrinsic_global_atomic:
7478    case nir_intrinsic_global_atomic_swap:
7479    case nir_intrinsic_load_global_block_intel:
7480    case nir_intrinsic_store_global_block_intel:
7481       srcs[MEMORY_LOGICAL_MODE] = brw_imm_ud(MEMORY_MODE_UNTYPED);
7482       srcs[MEMORY_LOGICAL_BINDING_TYPE] = brw_imm_ud(LSC_ADDR_SURFTYPE_FLAT);
7483       srcs[MEMORY_LOGICAL_ADDRESS] = get_nir_src(ntb, instr->src[is_store ? 1 : 0]);
7484 
7485       data_src = is_atomic ? 1 : 0;
7486       break;
7487 
7488    default:
7489       unreachable("unknown memory intrinsic");
7490    }
7491 
7492    unsigned components = is_store ? instr->src[data_src].ssa->num_components
7493                                   : instr->def.num_components;
7494    if (components == 0)
7495       components = instr->num_components;
7496 
7497    srcs[MEMORY_LOGICAL_COMPONENTS] = brw_imm_ud(components);
7498 
7499    const unsigned nir_bit_size =
7500       is_store ? instr->src[data_src].ssa->bit_size : instr->def.bit_size;
7501    enum lsc_data_size data_size = lsc_bits_to_data_size(nir_bit_size);
7502    uint32_t data_bit_size = lsc_data_size_bytes(data_size) * 8;
7503 
7504    srcs[MEMORY_LOGICAL_DATA_SIZE] = brw_imm_ud(data_size);
7505 
7506    const brw_reg_type data_type =
7507       brw_type_with_size(BRW_TYPE_UD, data_bit_size);
7508    const brw_reg_type nir_data_type =
7509       brw_type_with_size(BRW_TYPE_UD, nir_bit_size);
7510    assert(data_bit_size >= nir_bit_size);
7511 
7512    if (!is_load) {
7513       for (unsigned i = 0; i < lsc_op_num_data_values(op); i++) {
7514          brw_reg nir_src =
7515             retype(get_nir_src(ntb, instr->src[data_src + i]), nir_data_type);
7516 
7517          if (data_bit_size > nir_bit_size) {
7518             /* Expand e.g. D16 to D16U32 */
7519             srcs[MEMORY_LOGICAL_DATA0 + i] = bld.vgrf(data_type, components);
7520             for (unsigned c = 0; c < components; c++) {
7521                bld.MOV(offset(srcs[MEMORY_LOGICAL_DATA0 + i], bld, c),
7522                        offset(nir_src, bld, c));
7523             }
7524          } else {
7525             srcs[MEMORY_LOGICAL_DATA0 + i] = nir_src;
7526          }
7527       }
7528    }
7529 
7530    brw_reg dest, nir_dest;
7531    if (!is_store) {
7532       nir_dest = retype(get_nir_def(ntb, instr->def), nir_data_type);
7533       dest = data_bit_size > nir_bit_size ? bld.vgrf(data_type, components)
7534                                           : nir_dest;
7535    }
7536 
7537    enum opcode opcode = is_load ? SHADER_OPCODE_MEMORY_LOAD_LOGICAL :
7538                         is_store ? SHADER_OPCODE_MEMORY_STORE_LOGICAL :
7539                         SHADER_OPCODE_MEMORY_ATOMIC_LOGICAL;
7540 
7541    const bool convergent_block_load =
7542       instr->intrinsic == nir_intrinsic_load_ubo_uniform_block_intel ||
7543       instr->intrinsic == nir_intrinsic_load_ssbo_uniform_block_intel ||
7544       instr->intrinsic == nir_intrinsic_load_shared_uniform_block_intel ||
7545       instr->intrinsic == nir_intrinsic_load_global_constant_uniform_block_intel;
7546    const bool block = convergent_block_load ||
7547       instr->intrinsic == nir_intrinsic_load_global_block_intel ||
7548       instr->intrinsic == nir_intrinsic_load_shared_block_intel ||
7549       instr->intrinsic == nir_intrinsic_load_ssbo_block_intel ||
7550       instr->intrinsic == nir_intrinsic_store_global_block_intel ||
7551       instr->intrinsic == nir_intrinsic_store_shared_block_intel ||
7552       instr->intrinsic == nir_intrinsic_store_ssbo_block_intel;
7553 
7554    fs_inst *inst;
7555 
7556    if (!block) {
7557       inst = bld.emit(opcode, dest, srcs, MEMORY_LOGICAL_NUM_SRCS);
7558       inst->size_written *= components;
7559 
7560       if (dest.file != BAD_FILE && data_bit_size > nir_bit_size) {
7561          /* Shrink e.g. D16U32 result back to D16 */
7562          for (unsigned i = 0; i < components; i++) {
7563             bld.MOV(offset(nir_dest, bld, i),
7564                     subscript(offset(dest, bld, i), nir_dest.type, 0));
7565          }
7566       }
7567    } else {
7568       assert(nir_bit_size == 32);
7569 
7570       srcs[MEMORY_LOGICAL_FLAGS] =
7571          brw_imm_ud(MEMORY_FLAG_TRANSPOSE | srcs[MEMORY_LOGICAL_FLAGS].ud);
7572       srcs[MEMORY_LOGICAL_ADDRESS] =
7573          instr->intrinsic == nir_intrinsic_load_global_constant_uniform_block_intel &&
7574          ntb.uniform_values[instr->src[0].ssa->index].file != BAD_FILE ?
7575          ntb.uniform_values[instr->src[0].ssa->index] :
7576          bld.emit_uniformize(srcs[MEMORY_LOGICAL_ADDRESS]);
7577 
7578       const fs_builder ubld = bld.exec_all().group(1, 0);
7579       unsigned total, done;
7580 
7581       if (convergent_block_load) {
7582          total = ALIGN(components, REG_SIZE * reg_unit(devinfo) / 4);
7583          dest = ubld.vgrf(BRW_TYPE_UD, total);
7584       } else {
7585          total = components * bld.dispatch_width();
7586          dest = nir_dest;
7587       }
7588 
7589       brw_reg src = srcs[MEMORY_LOGICAL_DATA0];
7590 
7591       unsigned block_comps = components;
7592 
7593       for (done = 0; done < total; done += block_comps) {
7594          block_comps = choose_oword_block_size_dwords(devinfo, total - done);
7595          const unsigned block_bytes = block_comps * (nir_bit_size / 8);
7596 
7597          srcs[MEMORY_LOGICAL_COMPONENTS] = brw_imm_ud(block_comps);
7598 
7599          brw_reg dst_offset = is_store ? brw_reg() :
7600             retype(byte_offset(dest, done * 4), BRW_TYPE_UD);
7601          if (is_store) {
7602             srcs[MEMORY_LOGICAL_DATA0] =
7603                retype(byte_offset(src, done * 4), BRW_TYPE_UD);
7604          }
7605 
7606          inst = ubld.emit(opcode, dst_offset, srcs, MEMORY_LOGICAL_NUM_SRCS);
7607          inst->has_no_mask_send_params = no_mask_handle;
7608          if (is_load)
7609             inst->size_written = block_bytes;
7610 
7611          if (brw_type_size_bits(srcs[MEMORY_LOGICAL_ADDRESS].type) == 64) {
7612             increment_a64_address(ubld, srcs[MEMORY_LOGICAL_ADDRESS],
7613                                   block_bytes, no_mask_handle);
7614          } else {
7615             srcs[MEMORY_LOGICAL_ADDRESS] =
7616                ubld.ADD(retype(srcs[MEMORY_LOGICAL_ADDRESS], BRW_TYPE_UD),
7617                         brw_imm_ud(block_bytes));
7618          }
7619       }
7620       assert(done == total);
7621 
7622       if (convergent_block_load) {
7623          for (unsigned c = 0; c < components; c++) {
7624             bld.MOV(retype(offset(nir_dest, bld, c), BRW_TYPE_UD),
7625                     component(dest, c));
7626          }
7627       }
7628    }
7629 }
7630 
7631 static void
fs_nir_emit_texture(nir_to_brw_state & ntb,nir_tex_instr * instr)7632 fs_nir_emit_texture(nir_to_brw_state &ntb,
7633                     nir_tex_instr *instr)
7634 {
7635    const intel_device_info *devinfo = ntb.devinfo;
7636    const fs_builder &bld = ntb.bld;
7637 
7638    brw_reg srcs[TEX_LOGICAL_NUM_SRCS];
7639 
7640    /* SKL PRMs: Volume 7: 3D-Media-GPGPU:
7641     *
7642     *    "The Pixel Null Mask field, when enabled via the Pixel Null Mask
7643     *     Enable will be incorect for sample_c when applied to a surface with
7644     *     64-bit per texel format such as R16G16BA16_UNORM. Pixel Null mask
7645     *     Enable may incorrectly report pixels as referencing a Null surface."
7646     *
7647     * We'll take care of this in NIR.
7648     */
7649    assert(!instr->is_sparse || srcs[TEX_LOGICAL_SRC_SHADOW_C].file == BAD_FILE);
7650 
7651    srcs[TEX_LOGICAL_SRC_RESIDENCY] = brw_imm_ud(instr->is_sparse);
7652 
7653    int lod_components = 0;
7654 
7655    /* The hardware requires a LOD for buffer textures */
7656    if (instr->sampler_dim == GLSL_SAMPLER_DIM_BUF)
7657       srcs[TEX_LOGICAL_SRC_LOD] = brw_imm_d(0);
7658 
7659    ASSERTED bool got_lod = false;
7660    ASSERTED bool got_bias = false;
7661    bool pack_lod_bias_and_offset = false;
7662    uint32_t header_bits = 0;
7663    for (unsigned i = 0; i < instr->num_srcs; i++) {
7664       nir_src nir_src = instr->src[i].src;
7665       brw_reg src = get_nir_src(ntb, nir_src);
7666       switch (instr->src[i].src_type) {
7667       case nir_tex_src_bias:
7668          assert(!got_lod);
7669          got_bias = true;
7670 
7671          srcs[TEX_LOGICAL_SRC_LOD] =
7672             retype(get_nir_src_imm(ntb, instr->src[i].src), BRW_TYPE_F);
7673          break;
7674       case nir_tex_src_comparator:
7675          srcs[TEX_LOGICAL_SRC_SHADOW_C] = retype(src, BRW_TYPE_F);
7676          break;
7677       case nir_tex_src_coord:
7678          switch (instr->op) {
7679          case nir_texop_txf:
7680          case nir_texop_txf_ms:
7681          case nir_texop_txf_ms_mcs_intel:
7682          case nir_texop_samples_identical:
7683             srcs[TEX_LOGICAL_SRC_COORDINATE] = retype(src, BRW_TYPE_D);
7684             break;
7685          default:
7686             srcs[TEX_LOGICAL_SRC_COORDINATE] = retype(src, BRW_TYPE_F);
7687             break;
7688          }
7689          break;
7690       case nir_tex_src_ddx:
7691          srcs[TEX_LOGICAL_SRC_LOD] = retype(src, BRW_TYPE_F);
7692          lod_components = nir_tex_instr_src_size(instr, i);
7693          break;
7694       case nir_tex_src_ddy:
7695          srcs[TEX_LOGICAL_SRC_LOD2] = retype(src, BRW_TYPE_F);
7696          break;
7697       case nir_tex_src_lod:
7698          assert(!got_bias);
7699          got_lod = true;
7700 
7701          switch (instr->op) {
7702          case nir_texop_txs:
7703             srcs[TEX_LOGICAL_SRC_LOD] =
7704                retype(get_nir_src_imm(ntb, instr->src[i].src), BRW_TYPE_UD);
7705             break;
7706          case nir_texop_txf:
7707             srcs[TEX_LOGICAL_SRC_LOD] =
7708                retype(get_nir_src_imm(ntb, instr->src[i].src), BRW_TYPE_D);
7709             break;
7710          default:
7711             srcs[TEX_LOGICAL_SRC_LOD] =
7712                retype(get_nir_src_imm(ntb, instr->src[i].src), BRW_TYPE_F);
7713             break;
7714          }
7715          break;
7716       case nir_tex_src_min_lod:
7717          srcs[TEX_LOGICAL_SRC_MIN_LOD] =
7718             retype(get_nir_src_imm(ntb, instr->src[i].src), BRW_TYPE_F);
7719          break;
7720       case nir_tex_src_ms_index:
7721          srcs[TEX_LOGICAL_SRC_SAMPLE_INDEX] = retype(src, BRW_TYPE_UD);
7722          break;
7723 
7724       case nir_tex_src_offset: {
7725          uint32_t offset_bits = 0;
7726          if (brw_texture_offset(instr, i, &offset_bits)) {
7727             header_bits |= offset_bits;
7728          } else {
7729             /* On gfx12.5+, if the offsets are not both constant and in the
7730              * {-8,7} range, nir_lower_tex() will have already lowered the
7731              * source offset. So we should never reach this point.
7732              */
7733             assert(devinfo->verx10 < 125);
7734             srcs[TEX_LOGICAL_SRC_TG4_OFFSET] =
7735                retype(src, BRW_TYPE_D);
7736          }
7737          break;
7738       }
7739 
7740       case nir_tex_src_projector:
7741          unreachable("should be lowered");
7742 
7743       case nir_tex_src_texture_offset: {
7744          assert(srcs[TEX_LOGICAL_SRC_SURFACE].file == BAD_FILE);
7745          /* Emit code to evaluate the actual indexing expression */
7746          if (instr->texture_index == 0 && is_resource_src(nir_src))
7747             srcs[TEX_LOGICAL_SRC_SURFACE] = get_resource_nir_src(ntb, nir_src);
7748          if (srcs[TEX_LOGICAL_SRC_SURFACE].file == BAD_FILE) {
7749             srcs[TEX_LOGICAL_SRC_SURFACE] =
7750                bld.emit_uniformize(bld.ADD(retype(src, BRW_TYPE_UD),
7751                                            brw_imm_ud(instr->texture_index)));
7752          }
7753          assert(srcs[TEX_LOGICAL_SRC_SURFACE].file != BAD_FILE);
7754          break;
7755       }
7756 
7757       case nir_tex_src_sampler_offset: {
7758          /* Emit code to evaluate the actual indexing expression */
7759          if (instr->sampler_index == 0 && is_resource_src(nir_src))
7760             srcs[TEX_LOGICAL_SRC_SAMPLER] = get_resource_nir_src(ntb, nir_src);
7761          if (srcs[TEX_LOGICAL_SRC_SAMPLER].file == BAD_FILE) {
7762             srcs[TEX_LOGICAL_SRC_SAMPLER] =
7763                bld.emit_uniformize(bld.ADD(retype(src, BRW_TYPE_UD),
7764                                            brw_imm_ud(instr->sampler_index)));
7765          }
7766          break;
7767       }
7768 
7769       case nir_tex_src_texture_handle:
7770          assert(nir_tex_instr_src_index(instr, nir_tex_src_texture_offset) == -1);
7771          srcs[TEX_LOGICAL_SRC_SURFACE] = brw_reg();
7772          if (is_resource_src(nir_src))
7773             srcs[TEX_LOGICAL_SRC_SURFACE_HANDLE] = get_resource_nir_src(ntb, nir_src);
7774          if (srcs[TEX_LOGICAL_SRC_SURFACE_HANDLE].file == BAD_FILE)
7775             srcs[TEX_LOGICAL_SRC_SURFACE_HANDLE] = bld.emit_uniformize(src);
7776          break;
7777 
7778       case nir_tex_src_sampler_handle:
7779          assert(nir_tex_instr_src_index(instr, nir_tex_src_sampler_offset) == -1);
7780          srcs[TEX_LOGICAL_SRC_SAMPLER] = brw_reg();
7781          if (is_resource_src(nir_src))
7782             srcs[TEX_LOGICAL_SRC_SAMPLER_HANDLE] = get_resource_nir_src(ntb, nir_src);
7783          if (srcs[TEX_LOGICAL_SRC_SAMPLER_HANDLE].file == BAD_FILE)
7784             srcs[TEX_LOGICAL_SRC_SAMPLER_HANDLE] = bld.emit_uniformize(src);
7785          break;
7786 
7787       case nir_tex_src_ms_mcs_intel:
7788          assert(instr->op == nir_texop_txf_ms);
7789          srcs[TEX_LOGICAL_SRC_MCS] = retype(src, BRW_TYPE_D);
7790          break;
7791 
7792       /* If this parameter is present, we are packing offset U, V and LOD/Bias
7793        * into a single (32-bit) value.
7794        */
7795       case nir_tex_src_backend2:
7796          assert(instr->op == nir_texop_tg4);
7797          pack_lod_bias_and_offset = true;
7798          srcs[TEX_LOGICAL_SRC_LOD] =
7799             retype(get_nir_src_imm(ntb, instr->src[i].src), BRW_TYPE_F);
7800          break;
7801 
7802       /* If this parameter is present, we are packing either the explicit LOD
7803        * or LOD bias and the array index into a single (32-bit) value when
7804        * 32-bit texture coordinates are used.
7805        */
7806       case nir_tex_src_backend1:
7807          assert(!got_lod && !got_bias);
7808          got_lod = true;
7809          assert(instr->op == nir_texop_txl || instr->op == nir_texop_txb);
7810          srcs[TEX_LOGICAL_SRC_LOD] =
7811             retype(get_nir_src_imm(ntb, instr->src[i].src), BRW_TYPE_F);
7812          break;
7813 
7814       default:
7815          unreachable("unknown texture source");
7816       }
7817    }
7818 
7819    /* If the surface or sampler were not specified through sources, use the
7820     * instruction index.
7821     */
7822    if (srcs[TEX_LOGICAL_SRC_SURFACE].file == BAD_FILE &&
7823        srcs[TEX_LOGICAL_SRC_SURFACE_HANDLE].file == BAD_FILE)
7824       srcs[TEX_LOGICAL_SRC_SURFACE] = brw_imm_ud(instr->texture_index);
7825    if (srcs[TEX_LOGICAL_SRC_SAMPLER].file == BAD_FILE &&
7826        srcs[TEX_LOGICAL_SRC_SAMPLER_HANDLE].file == BAD_FILE)
7827       srcs[TEX_LOGICAL_SRC_SAMPLER] = brw_imm_ud(instr->sampler_index);
7828 
7829    if (srcs[TEX_LOGICAL_SRC_MCS].file == BAD_FILE &&
7830        (instr->op == nir_texop_txf_ms ||
7831         instr->op == nir_texop_samples_identical)) {
7832       srcs[TEX_LOGICAL_SRC_MCS] =
7833          emit_mcs_fetch(ntb, srcs[TEX_LOGICAL_SRC_COORDINATE],
7834                         instr->coord_components,
7835                         srcs[TEX_LOGICAL_SRC_SURFACE],
7836                         srcs[TEX_LOGICAL_SRC_SURFACE_HANDLE]);
7837    }
7838 
7839    srcs[TEX_LOGICAL_SRC_COORD_COMPONENTS] = brw_imm_d(instr->coord_components);
7840    srcs[TEX_LOGICAL_SRC_GRAD_COMPONENTS] = brw_imm_d(lod_components);
7841 
7842    enum opcode opcode;
7843    switch (instr->op) {
7844    case nir_texop_tex:
7845       opcode = SHADER_OPCODE_TEX_LOGICAL;
7846       break;
7847    case nir_texop_txb:
7848       opcode = FS_OPCODE_TXB_LOGICAL;
7849       break;
7850    case nir_texop_txl:
7851       opcode = SHADER_OPCODE_TXL_LOGICAL;
7852       break;
7853    case nir_texop_txd:
7854       opcode = SHADER_OPCODE_TXD_LOGICAL;
7855       break;
7856    case nir_texop_txf:
7857       opcode = SHADER_OPCODE_TXF_LOGICAL;
7858       break;
7859    case nir_texop_txf_ms:
7860       /* On Gfx12HP there is only CMS_W available. From the Bspec: Shared
7861        * Functions - 3D Sampler - Messages - Message Format:
7862        *
7863        *   ld2dms REMOVEDBY(GEN:HAS:1406788836)
7864        */
7865       if (devinfo->verx10 >= 125)
7866          opcode = SHADER_OPCODE_TXF_CMS_W_GFX12_LOGICAL;
7867       else
7868          opcode = SHADER_OPCODE_TXF_CMS_W_LOGICAL;
7869       break;
7870    case nir_texop_txf_ms_mcs_intel:
7871       opcode = SHADER_OPCODE_TXF_MCS_LOGICAL;
7872       break;
7873    case nir_texop_query_levels:
7874    case nir_texop_txs:
7875       opcode = SHADER_OPCODE_TXS_LOGICAL;
7876       break;
7877    case nir_texop_lod:
7878       opcode = SHADER_OPCODE_LOD_LOGICAL;
7879       break;
7880    case nir_texop_tg4: {
7881       if (srcs[TEX_LOGICAL_SRC_TG4_OFFSET].file != BAD_FILE) {
7882          opcode = SHADER_OPCODE_TG4_OFFSET_LOGICAL;
7883       } else {
7884          opcode = SHADER_OPCODE_TG4_LOGICAL;
7885          if (devinfo->ver >= 20) {
7886             /* If SPV_AMD_texture_gather_bias_lod extension is enabled, all
7887              * texture gather functions (ie. the ones which do not take the
7888              * extra bias argument and the ones that do) fetch texels from
7889              * implicit LOD in fragment shader stage. In all other shader
7890              * stages, base level is used instead.
7891              */
7892             if (instr->is_gather_implicit_lod)
7893                opcode = SHADER_OPCODE_TG4_IMPLICIT_LOD_LOGICAL;
7894 
7895             if (got_bias)
7896                opcode = SHADER_OPCODE_TG4_BIAS_LOGICAL;
7897 
7898             if (got_lod)
7899                opcode = SHADER_OPCODE_TG4_EXPLICIT_LOD_LOGICAL;
7900 
7901             if (pack_lod_bias_and_offset) {
7902                if (got_lod)
7903                   opcode = SHADER_OPCODE_TG4_OFFSET_LOD_LOGICAL;
7904                if (got_bias)
7905                   opcode = SHADER_OPCODE_TG4_OFFSET_BIAS_LOGICAL;
7906             }
7907          }
7908       }
7909       break;
7910    }
7911    case nir_texop_texture_samples:
7912       opcode = SHADER_OPCODE_SAMPLEINFO_LOGICAL;
7913       break;
7914    case nir_texop_samples_identical: {
7915       brw_reg dst = retype(get_nir_def(ntb, instr->def), BRW_TYPE_D);
7916 
7917       /* If mcs is an immediate value, it means there is no MCS.  In that case
7918        * just return false.
7919        */
7920       if (srcs[TEX_LOGICAL_SRC_MCS].file == IMM) {
7921          bld.MOV(dst, brw_imm_ud(0u));
7922       } else {
7923          brw_reg tmp =
7924             bld.OR(srcs[TEX_LOGICAL_SRC_MCS],
7925                    offset(srcs[TEX_LOGICAL_SRC_MCS], bld, 1));
7926          bld.CMP(dst, tmp, brw_imm_ud(0u), BRW_CONDITIONAL_EQ);
7927       }
7928       return;
7929    }
7930    default:
7931       unreachable("unknown texture opcode");
7932    }
7933 
7934    if (instr->op == nir_texop_tg4) {
7935       header_bits |= instr->component << 16;
7936    }
7937 
7938    brw_reg nir_def_reg = get_nir_def(ntb, instr->def);
7939 
7940    bool is_simd8_16bit = nir_alu_type_get_type_size(instr->dest_type) == 16
7941       && bld.dispatch_width() == 8;
7942 
7943    brw_reg dst = bld.vgrf(brw_type_for_nir_type(devinfo, instr->dest_type),
7944       (is_simd8_16bit ? 8 : 4) + instr->is_sparse);
7945 
7946    fs_inst *inst = bld.emit(opcode, dst, srcs, ARRAY_SIZE(srcs));
7947    inst->offset = header_bits;
7948 
7949    const unsigned dest_size = nir_tex_instr_dest_size(instr);
7950    unsigned read_size = dest_size;
7951    if (instr->op != nir_texop_tg4 && instr->op != nir_texop_query_levels) {
7952       unsigned write_mask = nir_def_components_read(&instr->def);
7953       assert(write_mask != 0); /* dead code should have been eliminated */
7954       if (instr->is_sparse) {
7955          read_size = util_last_bit(write_mask) - 1;
7956          inst->size_written =
7957             (is_simd8_16bit ? 2 : 1) * read_size *
7958             inst->dst.component_size(inst->exec_size) +
7959             (reg_unit(devinfo) * REG_SIZE);
7960       } else {
7961          read_size = util_last_bit(write_mask);
7962          inst->size_written =
7963             (is_simd8_16bit ? 2 : 1) * read_size *
7964             inst->dst.component_size(inst->exec_size);
7965       }
7966    } else {
7967       inst->size_written = (is_simd8_16bit ? 2 : 1) * 4 *
7968                            inst->dst.component_size(inst->exec_size) +
7969                            (instr->is_sparse ? (reg_unit(devinfo) * REG_SIZE) : 0);
7970    }
7971 
7972    if (srcs[TEX_LOGICAL_SRC_SHADOW_C].file != BAD_FILE)
7973       inst->shadow_compare = true;
7974 
7975    /* Wa_14012688258:
7976     *
7977     * Don't trim zeros at the end of payload for sample operations
7978     * in cube and cube arrays.
7979     */
7980    if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE &&
7981        intel_needs_workaround(devinfo, 14012688258)) {
7982 
7983       /* Compiler should send U,V,R parameters even if V,R are 0. */
7984       if (srcs[TEX_LOGICAL_SRC_COORDINATE].file != BAD_FILE)
7985          assert(instr->coord_components >= 3u);
7986 
7987       /* See opt_zero_samples(). */
7988       inst->keep_payload_trailing_zeros = true;
7989    }
7990 
7991    if (instr->op != nir_texop_query_levels && !instr->is_sparse
7992       && !is_simd8_16bit) {
7993       /* In most cases we can write directly to the result. */
7994       inst->dst = nir_def_reg;
7995    } else {
7996       /* In other cases, we have to reorganize the sampler message's results
7997        * a bit to match the NIR intrinsic's expectations.
7998        */
7999       brw_reg nir_dest[5];
8000       for (unsigned i = 0; i < read_size; i++)
8001          nir_dest[i] = offset(dst, bld, (is_simd8_16bit ? 2 : 1) * i);
8002 
8003       for (unsigned i = read_size; i < dest_size; i++)
8004          nir_dest[i].type = dst.type;
8005 
8006       if (instr->op == nir_texop_query_levels) {
8007          /* # levels is in .w */
8008          if (devinfo->ver == 9) {
8009             /**
8010              * Wa_1940217:
8011              *
8012              * When a surface of type SURFTYPE_NULL is accessed by resinfo, the
8013              * MIPCount returned is undefined instead of 0.
8014              */
8015             fs_inst *mov = bld.MOV(bld.null_reg_d(), dst);
8016             mov->conditional_mod = BRW_CONDITIONAL_NZ;
8017             nir_dest[0] = bld.vgrf(BRW_TYPE_D);
8018             fs_inst *sel =
8019                bld.SEL(nir_dest[0], offset(dst, bld, 3), brw_imm_d(0));
8020             sel->predicate = BRW_PREDICATE_NORMAL;
8021          } else {
8022             nir_dest[0] = offset(dst, bld, 3);
8023          }
8024       }
8025 
8026       /* The residency bits are only in the first component. */
8027       if (instr->is_sparse) {
8028          nir_dest[dest_size - 1] =
8029             component(offset(dst, bld, dest_size - 1), 0);
8030       }
8031 
8032       bld.LOAD_PAYLOAD(nir_def_reg, nir_dest, dest_size, 0);
8033    }
8034 }
8035 
8036 static void
fs_nir_emit_jump(nir_to_brw_state & ntb,nir_jump_instr * instr)8037 fs_nir_emit_jump(nir_to_brw_state &ntb, nir_jump_instr *instr)
8038 {
8039    switch (instr->type) {
8040    case nir_jump_break:
8041       ntb.bld.emit(BRW_OPCODE_BREAK);
8042       break;
8043    case nir_jump_continue:
8044       ntb.bld.emit(BRW_OPCODE_CONTINUE);
8045       break;
8046    case nir_jump_halt:
8047       ntb.bld.emit(BRW_OPCODE_HALT);
8048       break;
8049    case nir_jump_return:
8050    default:
8051       unreachable("unknown jump");
8052    }
8053 }
8054 
8055 /*
8056  * This helper takes a source register and un/shuffles it into the destination
8057  * register.
8058  *
8059  * If source type size is smaller than destination type size the operation
8060  * needed is a component shuffle. The opposite case would be an unshuffle. If
8061  * source/destination type size is equal a shuffle is done that would be
8062  * equivalent to a simple MOV.
8063  *
8064  * For example, if source is a 16-bit type and destination is 32-bit. A 3
8065  * components .xyz 16-bit vector on SIMD8 would be.
8066  *
8067  *    |x1|x2|x3|x4|x5|x6|x7|x8|y1|y2|y3|y4|y5|y6|y7|y8|
8068  *    |z1|z2|z3|z4|z5|z6|z7|z8|  |  |  |  |  |  |  |  |
8069  *
8070  * This helper will return the following 2 32-bit components with the 16-bit
8071  * values shuffled:
8072  *
8073  *    |x1 y1|x2 y2|x3 y3|x4 y4|x5 y5|x6 y6|x7 y7|x8 y8|
8074  *    |z1   |z2   |z3   |z4   |z5   |z6   |z7   |z8   |
8075  *
8076  * For unshuffle, the example would be the opposite, a 64-bit type source
8077  * and a 32-bit destination. A 2 component .xy 64-bit vector on SIMD8
8078  * would be:
8079  *
8080  *    | x1l   x1h | x2l   x2h | x3l   x3h | x4l   x4h |
8081  *    | x5l   x5h | x6l   x6h | x7l   x7h | x8l   x8h |
8082  *    | y1l   y1h | y2l   y2h | y3l   y3h | y4l   y4h |
8083  *    | y5l   y5h | y6l   y6h | y7l   y7h | y8l   y8h |
8084  *
8085  * The returned result would be the following 4 32-bit components unshuffled:
8086  *
8087  *    | x1l | x2l | x3l | x4l | x5l | x6l | x7l | x8l |
8088  *    | x1h | x2h | x3h | x4h | x5h | x6h | x7h | x8h |
8089  *    | y1l | y2l | y3l | y4l | y5l | y6l | y7l | y8l |
8090  *    | y1h | y2h | y3h | y4h | y5h | y6h | y7h | y8h |
8091  *
8092  * - Source and destination register must not be overlapped.
8093  * - components units are measured in terms of the smaller type between
8094  *   source and destination because we are un/shuffling the smaller
8095  *   components from/into the bigger ones.
8096  * - first_component parameter allows skipping source components.
8097  */
8098 void
shuffle_src_to_dst(const fs_builder & bld,const brw_reg & dst,const brw_reg & src,uint32_t first_component,uint32_t components)8099 shuffle_src_to_dst(const fs_builder &bld,
8100                    const brw_reg &dst,
8101                    const brw_reg &src,
8102                    uint32_t first_component,
8103                    uint32_t components)
8104 {
8105    if (brw_type_size_bytes(src.type) == brw_type_size_bytes(dst.type)) {
8106       assert(!regions_overlap(dst,
8107          brw_type_size_bytes(dst.type) * bld.dispatch_width() * components,
8108          offset(src, bld, first_component),
8109          brw_type_size_bytes(src.type) * bld.dispatch_width() * components));
8110       for (unsigned i = 0; i < components; i++) {
8111          bld.MOV(retype(offset(dst, bld, i), src.type),
8112                  offset(src, bld, i + first_component));
8113       }
8114    } else if (brw_type_size_bytes(src.type) < brw_type_size_bytes(dst.type)) {
8115       /* Source is shuffled into destination */
8116       unsigned size_ratio = brw_type_size_bytes(dst.type) / brw_type_size_bytes(src.type);
8117       assert(!regions_overlap(dst,
8118          brw_type_size_bytes(dst.type) * bld.dispatch_width() *
8119          DIV_ROUND_UP(components, size_ratio),
8120          offset(src, bld, first_component),
8121          brw_type_size_bytes(src.type) * bld.dispatch_width() * components));
8122 
8123       brw_reg_type shuffle_type =
8124          brw_type_with_size(BRW_TYPE_D, brw_type_size_bits(src.type));
8125       for (unsigned i = 0; i < components; i++) {
8126          brw_reg shuffle_component_i =
8127             subscript(offset(dst, bld, i / size_ratio),
8128                       shuffle_type, i % size_ratio);
8129          bld.MOV(shuffle_component_i,
8130                  retype(offset(src, bld, i + first_component), shuffle_type));
8131       }
8132    } else {
8133       /* Source is unshuffled into destination */
8134       unsigned size_ratio = brw_type_size_bytes(src.type) / brw_type_size_bytes(dst.type);
8135       assert(!regions_overlap(dst,
8136          brw_type_size_bytes(dst.type) * bld.dispatch_width() * components,
8137          offset(src, bld, first_component / size_ratio),
8138          brw_type_size_bytes(src.type) * bld.dispatch_width() *
8139          DIV_ROUND_UP(components + (first_component % size_ratio),
8140                       size_ratio)));
8141 
8142       brw_reg_type shuffle_type =
8143          brw_type_with_size(BRW_TYPE_D, brw_type_size_bits(dst.type));
8144       for (unsigned i = 0; i < components; i++) {
8145          brw_reg shuffle_component_i =
8146             subscript(offset(src, bld, (first_component + i) / size_ratio),
8147                       shuffle_type, (first_component + i) % size_ratio);
8148          bld.MOV(retype(offset(dst, bld, i), shuffle_type),
8149                  shuffle_component_i);
8150       }
8151    }
8152 }
8153 
8154 void
shuffle_from_32bit_read(const fs_builder & bld,const brw_reg & dst,const brw_reg & src,uint32_t first_component,uint32_t components)8155 shuffle_from_32bit_read(const fs_builder &bld,
8156                         const brw_reg &dst,
8157                         const brw_reg &src,
8158                         uint32_t first_component,
8159                         uint32_t components)
8160 {
8161    assert(brw_type_size_bytes(src.type) == 4);
8162 
8163    /* This function takes components in units of the destination type while
8164     * shuffle_src_to_dst takes components in units of the smallest type
8165     */
8166    if (brw_type_size_bytes(dst.type) > 4) {
8167       assert(brw_type_size_bytes(dst.type) == 8);
8168       first_component *= 2;
8169       components *= 2;
8170    }
8171 
8172    shuffle_src_to_dst(bld, dst, src, first_component, components);
8173 }
8174 
8175 static void
fs_nir_emit_instr(nir_to_brw_state & ntb,nir_instr * instr)8176 fs_nir_emit_instr(nir_to_brw_state &ntb, nir_instr *instr)
8177 {
8178 #ifndef NDEBUG
8179    if (unlikely(ntb.annotate)) {
8180       /* Use shader mem_ctx since annotations outlive the NIR conversion. */
8181       ntb.bld = ntb.bld.annotate(nir_instr_as_str(instr, ntb.s.mem_ctx));
8182    }
8183 #endif
8184 
8185    switch (instr->type) {
8186    case nir_instr_type_alu:
8187       fs_nir_emit_alu(ntb, nir_instr_as_alu(instr), true);
8188       break;
8189 
8190    case nir_instr_type_deref:
8191       unreachable("All derefs should've been lowered");
8192       break;
8193 
8194    case nir_instr_type_intrinsic:
8195       switch (ntb.s.stage) {
8196       case MESA_SHADER_VERTEX:
8197          fs_nir_emit_vs_intrinsic(ntb, nir_instr_as_intrinsic(instr));
8198          break;
8199       case MESA_SHADER_TESS_CTRL:
8200          fs_nir_emit_tcs_intrinsic(ntb, nir_instr_as_intrinsic(instr));
8201          break;
8202       case MESA_SHADER_TESS_EVAL:
8203          fs_nir_emit_tes_intrinsic(ntb, nir_instr_as_intrinsic(instr));
8204          break;
8205       case MESA_SHADER_GEOMETRY:
8206          fs_nir_emit_gs_intrinsic(ntb, nir_instr_as_intrinsic(instr));
8207          break;
8208       case MESA_SHADER_FRAGMENT:
8209          fs_nir_emit_fs_intrinsic(ntb, nir_instr_as_intrinsic(instr));
8210          break;
8211       case MESA_SHADER_COMPUTE:
8212       case MESA_SHADER_KERNEL:
8213          fs_nir_emit_cs_intrinsic(ntb, nir_instr_as_intrinsic(instr));
8214          break;
8215       case MESA_SHADER_RAYGEN:
8216       case MESA_SHADER_ANY_HIT:
8217       case MESA_SHADER_CLOSEST_HIT:
8218       case MESA_SHADER_MISS:
8219       case MESA_SHADER_INTERSECTION:
8220       case MESA_SHADER_CALLABLE:
8221          fs_nir_emit_bs_intrinsic(ntb, nir_instr_as_intrinsic(instr));
8222          break;
8223       case MESA_SHADER_TASK:
8224          fs_nir_emit_task_intrinsic(ntb, nir_instr_as_intrinsic(instr));
8225          break;
8226       case MESA_SHADER_MESH:
8227          fs_nir_emit_mesh_intrinsic(ntb, nir_instr_as_intrinsic(instr));
8228          break;
8229       default:
8230          unreachable("unsupported shader stage");
8231       }
8232       break;
8233 
8234    case nir_instr_type_tex:
8235       fs_nir_emit_texture(ntb, nir_instr_as_tex(instr));
8236       break;
8237 
8238    case nir_instr_type_load_const:
8239       fs_nir_emit_load_const(ntb, nir_instr_as_load_const(instr));
8240       break;
8241 
8242    case nir_instr_type_undef:
8243       /* We create a new VGRF for undefs on every use (by handling
8244        * them in get_nir_src()), rather than for each definition.
8245        * This helps register coalescing eliminate MOVs from undef.
8246        */
8247       break;
8248 
8249    case nir_instr_type_jump:
8250       fs_nir_emit_jump(ntb, nir_instr_as_jump(instr));
8251       break;
8252 
8253    default:
8254       unreachable("unknown instruction type");
8255    }
8256 }
8257 
8258 static unsigned
brw_rnd_mode_from_nir(unsigned mode,unsigned * mask)8259 brw_rnd_mode_from_nir(unsigned mode, unsigned *mask)
8260 {
8261    unsigned brw_mode = 0;
8262    *mask = 0;
8263 
8264    if ((FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP16 |
8265         FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP32 |
8266         FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP64) &
8267        mode) {
8268       brw_mode |= BRW_RND_MODE_RTZ << BRW_CR0_RND_MODE_SHIFT;
8269       *mask |= BRW_CR0_RND_MODE_MASK;
8270    }
8271    if ((FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP16 |
8272         FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP32 |
8273         FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP64) &
8274        mode) {
8275       brw_mode |= BRW_RND_MODE_RTNE << BRW_CR0_RND_MODE_SHIFT;
8276       *mask |= BRW_CR0_RND_MODE_MASK;
8277    }
8278    if (mode & FLOAT_CONTROLS_DENORM_PRESERVE_FP16) {
8279       brw_mode |= BRW_CR0_FP16_DENORM_PRESERVE;
8280       *mask |= BRW_CR0_FP16_DENORM_PRESERVE;
8281    }
8282    if (mode & FLOAT_CONTROLS_DENORM_PRESERVE_FP32) {
8283       brw_mode |= BRW_CR0_FP32_DENORM_PRESERVE;
8284       *mask |= BRW_CR0_FP32_DENORM_PRESERVE;
8285    }
8286    if (mode & FLOAT_CONTROLS_DENORM_PRESERVE_FP64) {
8287       brw_mode |= BRW_CR0_FP64_DENORM_PRESERVE;
8288       *mask |= BRW_CR0_FP64_DENORM_PRESERVE;
8289    }
8290    if (mode & FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP16)
8291       *mask |= BRW_CR0_FP16_DENORM_PRESERVE;
8292    if (mode & FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP32)
8293       *mask |= BRW_CR0_FP32_DENORM_PRESERVE;
8294    if (mode & FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP64)
8295       *mask |= BRW_CR0_FP64_DENORM_PRESERVE;
8296    if (mode == FLOAT_CONTROLS_DEFAULT_FLOAT_CONTROL_MODE)
8297       *mask |= BRW_CR0_FP_MODE_MASK;
8298 
8299    if (*mask != 0)
8300       assert((*mask & brw_mode) == brw_mode);
8301 
8302    return brw_mode;
8303 }
8304 
8305 static void
emit_shader_float_controls_execution_mode(nir_to_brw_state & ntb)8306 emit_shader_float_controls_execution_mode(nir_to_brw_state &ntb)
8307 {
8308    const fs_builder &bld = ntb.bld;
8309    fs_visitor &s = ntb.s;
8310 
8311    unsigned execution_mode = s.nir->info.float_controls_execution_mode;
8312    if (execution_mode == FLOAT_CONTROLS_DEFAULT_FLOAT_CONTROL_MODE)
8313       return;
8314 
8315    fs_builder ubld = bld.exec_all().group(1, 0);
8316    fs_builder abld = ubld.annotate("shader floats control execution mode");
8317    unsigned mask, mode = brw_rnd_mode_from_nir(execution_mode, &mask);
8318 
8319    if (mask == 0)
8320       return;
8321 
8322    abld.emit(SHADER_OPCODE_FLOAT_CONTROL_MODE, bld.null_reg_ud(),
8323              brw_imm_d(mode), brw_imm_d(mask));
8324 }
8325 
8326 /**
8327  * Test the dispatch mask packing assumptions of
8328  * brw_stage_has_packed_dispatch().  Call this from e.g. the top of
8329  * nir_to_brw() to cause a GPU hang if any shader invocation is
8330  * executed with an unexpected dispatch mask.
8331  */
8332 static UNUSED void
brw_fs_test_dispatch_packing(const fs_builder & bld)8333 brw_fs_test_dispatch_packing(const fs_builder &bld)
8334 {
8335    const fs_visitor *shader = bld.shader;
8336    const gl_shader_stage stage = shader->stage;
8337    const bool uses_vmask =
8338       stage == MESA_SHADER_FRAGMENT &&
8339       brw_wm_prog_data(shader->prog_data)->uses_vmask;
8340 
8341    if (brw_stage_has_packed_dispatch(shader->devinfo, stage,
8342                                      shader->max_polygons,
8343                                      shader->prog_data)) {
8344       const fs_builder ubld = bld.exec_all().group(1, 0);
8345       const brw_reg tmp = component(bld.vgrf(BRW_TYPE_UD), 0);
8346       const brw_reg mask = uses_vmask ? brw_vmask_reg() : brw_dmask_reg();
8347 
8348       ubld.ADD(tmp, mask, brw_imm_ud(1));
8349       ubld.AND(tmp, mask, tmp);
8350 
8351       /* This will loop forever if the dispatch mask doesn't have the expected
8352        * form '2^n-1', in which case tmp will be non-zero.
8353        */
8354       bld.emit(BRW_OPCODE_DO);
8355       bld.CMP(bld.null_reg_ud(), tmp, brw_imm_ud(0), BRW_CONDITIONAL_NZ);
8356       set_predicate(BRW_PREDICATE_NORMAL, bld.emit(BRW_OPCODE_WHILE));
8357    }
8358 }
8359 
8360 void
nir_to_brw(fs_visitor * s)8361 nir_to_brw(fs_visitor *s)
8362 {
8363    nir_to_brw_state ntb = {
8364       .s       = *s,
8365       .nir     = s->nir,
8366       .devinfo = s->devinfo,
8367       .mem_ctx = ralloc_context(NULL),
8368       .bld     = fs_builder(s).at_end(),
8369    };
8370 
8371    if (INTEL_DEBUG(DEBUG_ANNOTATION))
8372       ntb.annotate = true;
8373 
8374    if (ENABLE_FS_TEST_DISPATCH_PACKING)
8375       brw_fs_test_dispatch_packing(ntb.bld);
8376 
8377    for (unsigned i = 0; i < s->nir->printf_info_count; i++) {
8378       brw_stage_prog_data_add_printf(s->prog_data,
8379                                      s->mem_ctx,
8380                                      &s->nir->printf_info[i]);
8381    }
8382 
8383    emit_shader_float_controls_execution_mode(ntb);
8384 
8385    /* emit the arrays used for inputs and outputs - load/store intrinsics will
8386     * be converted to reads/writes of these arrays
8387     */
8388    fs_nir_setup_outputs(ntb);
8389    fs_nir_setup_uniforms(ntb.s);
8390    fs_nir_emit_system_values(ntb);
8391    ntb.s.last_scratch = ALIGN(ntb.nir->scratch_size, 4) * ntb.s.dispatch_width;
8392 
8393    fs_nir_emit_impl(ntb, nir_shader_get_entrypoint((nir_shader *)ntb.nir));
8394 
8395    ntb.bld.emit(SHADER_OPCODE_HALT_TARGET);
8396 
8397    ralloc_free(ntb.mem_ctx);
8398 }
8399