xref: /aosp_15_r20/external/mesa3d/src/panfrost/midgard/midgard_emit.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright (C) 2018-2019 Alyssa Rosenzweig <[email protected]>
3  * Copyright (C) 2019-2020 Collabora, Ltd.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 
25 #include "compiler.h"
26 #include "midgard_ops.h"
27 #include "midgard_quirks.h"
28 
29 static midgard_int_mod
mir_get_imod(bool shift,nir_alu_type T,bool half,bool scalar)30 mir_get_imod(bool shift, nir_alu_type T, bool half, bool scalar)
31 {
32    if (!half) {
33       assert(!shift);
34       /* Doesn't matter, src mods are only used when expanding */
35       return midgard_int_sign_extend;
36    }
37 
38    if (shift)
39       return midgard_int_left_shift;
40 
41    if (nir_alu_type_get_base_type(T) == nir_type_int)
42       return midgard_int_sign_extend;
43    else
44       return midgard_int_zero_extend;
45 }
46 
47 void
midgard_pack_ubo_index_imm(midgard_load_store_word * word,unsigned index)48 midgard_pack_ubo_index_imm(midgard_load_store_word *word, unsigned index)
49 {
50    word->arg_comp = index & 0x3;
51    word->arg_reg = (index >> 2) & 0x7;
52    word->bitsize_toggle = (index >> 5) & 0x1;
53    word->index_format = (index >> 6) & 0x3;
54 }
55 
56 void
midgard_pack_varying_params(midgard_load_store_word * word,midgard_varying_params p)57 midgard_pack_varying_params(midgard_load_store_word *word,
58                             midgard_varying_params p)
59 {
60    /* Currently these parameters are not supported. */
61    assert(p.direct_sample_pos_x == 0 && p.direct_sample_pos_y == 0);
62 
63    unsigned u;
64    memcpy(&u, &p, sizeof(p));
65 
66    word->signed_offset |= u & 0x1FF;
67 }
68 
69 midgard_varying_params
midgard_unpack_varying_params(midgard_load_store_word word)70 midgard_unpack_varying_params(midgard_load_store_word word)
71 {
72    unsigned params = word.signed_offset & 0x1FF;
73 
74    midgard_varying_params p;
75    memcpy(&p, &params, sizeof(p));
76 
77    return p;
78 }
79 
80 unsigned
mir_pack_mod(midgard_instruction * ins,unsigned i,bool scalar)81 mir_pack_mod(midgard_instruction *ins, unsigned i, bool scalar)
82 {
83    bool integer = midgard_is_integer_op(ins->op);
84    unsigned base_size = max_bitsize_for_alu(ins);
85    unsigned sz = nir_alu_type_get_type_size(ins->src_types[i]);
86    bool half = (sz == (base_size >> 1));
87 
88    return integer
89              ? mir_get_imod(ins->src_shift[i], ins->src_types[i], half, scalar)
90              : ((ins->src_abs[i] << 0) | ((ins->src_neg[i] << 1)));
91 }
92 
93 /* Midgard IR only knows vector ALU types, but we sometimes need to actually
94  * use scalar ALU instructions, for functional or performance reasons. To do
95  * this, we just demote vector ALU payloads to scalar. */
96 
97 static int
component_from_mask(unsigned mask)98 component_from_mask(unsigned mask)
99 {
100    for (int c = 0; c < 8; ++c) {
101       if (mask & (1 << c))
102          return c;
103    }
104 
105    assert(0);
106    return 0;
107 }
108 
109 static unsigned
mir_pack_scalar_source(unsigned mod,bool is_full,unsigned component)110 mir_pack_scalar_source(unsigned mod, bool is_full, unsigned component)
111 {
112    midgard_scalar_alu_src s = {
113       .mod = mod,
114       .full = is_full,
115       .component = component << (is_full ? 1 : 0),
116    };
117 
118    unsigned o;
119    memcpy(&o, &s, sizeof(s));
120 
121    return o & ((1 << 6) - 1);
122 }
123 
124 static midgard_scalar_alu
vector_to_scalar_alu(midgard_vector_alu v,midgard_instruction * ins)125 vector_to_scalar_alu(midgard_vector_alu v, midgard_instruction *ins)
126 {
127    bool is_full = nir_alu_type_get_type_size(ins->dest_type) == 32;
128 
129    bool half_0 = nir_alu_type_get_type_size(ins->src_types[0]) == 16;
130    bool half_1 = nir_alu_type_get_type_size(ins->src_types[1]) == 16;
131    unsigned comp = component_from_mask(ins->mask);
132 
133    unsigned packed_src[2] = {
134       mir_pack_scalar_source(mir_pack_mod(ins, 0, true), !half_0,
135                              ins->swizzle[0][comp]),
136       mir_pack_scalar_source(mir_pack_mod(ins, 1, true), !half_1,
137                              ins->swizzle[1][comp])};
138 
139    /* The output component is from the mask */
140    midgard_scalar_alu s = {
141       .op = v.op,
142       .src1 = packed_src[0],
143       .src2 = packed_src[1],
144       .outmod = v.outmod,
145       .output_full = is_full,
146       .output_component = comp,
147    };
148 
149    /* Full components are physically spaced out */
150    if (is_full) {
151       assert(s.output_component < 4);
152       s.output_component <<= 1;
153    }
154 
155    /* Inline constant is passed along rather than trying to extract it
156     * from v */
157 
158    if (ins->has_inline_constant) {
159       uint16_t imm = 0;
160       int lower_11 = ins->inline_constant & ((1 << 12) - 1);
161       imm |= (lower_11 >> 9) & 3;
162       imm |= (lower_11 >> 6) & 4;
163       imm |= (lower_11 >> 2) & 0x38;
164       imm |= (lower_11 & 63) << 6;
165 
166       s.src2 = imm;
167    }
168 
169    return s;
170 }
171 
172 /* 64-bit swizzles are super easy since there are 2 components of 2 components
173  * in an 8-bit field ... lots of duplication to go around!
174  *
175  * Swizzles of 32-bit vectors accessed from 64-bit instructions are a little
176  * funny -- pack them *as if* they were native 64-bit, using rep_* flags to
177  * flag upper. For instance, xy would become 64-bit XY but that's just xyzw
178  * native. Likewise, zz would become 64-bit XX with rep* so it would be xyxy
179  * with rep. Pretty nifty, huh? */
180 
181 static unsigned
mir_pack_swizzle_64(unsigned * swizzle,unsigned max_component,bool expand_high)182 mir_pack_swizzle_64(unsigned *swizzle, unsigned max_component, bool expand_high)
183 {
184    unsigned packed = 0;
185    unsigned base = expand_high ? 2 : 0;
186 
187    for (unsigned i = base; i < base + 2; ++i) {
188       assert(swizzle[i] <= max_component);
189 
190       unsigned a = (swizzle[i] & 1) ? (COMPONENT_W << 2) | COMPONENT_Z
191                                     : (COMPONENT_Y << 2) | COMPONENT_X;
192 
193       if (i & 1)
194          packed |= a << 4;
195       else
196          packed |= a;
197    }
198 
199    return packed;
200 }
201 
202 static void
mir_pack_mask_alu(midgard_instruction * ins,midgard_vector_alu * alu)203 mir_pack_mask_alu(midgard_instruction *ins, midgard_vector_alu *alu)
204 {
205    unsigned effective = ins->mask;
206 
207    /* If we have a destination override, we need to figure out whether to
208     * override to the lower or upper half, shifting the effective mask in
209     * the latter, so AAAA.... becomes AAAA */
210 
211    unsigned inst_size = max_bitsize_for_alu(ins);
212    signed upper_shift = mir_upper_override(ins, inst_size);
213 
214    if (upper_shift >= 0) {
215       effective >>= upper_shift;
216       alu->shrink_mode =
217          upper_shift ? midgard_shrink_mode_upper : midgard_shrink_mode_lower;
218    } else {
219       alu->shrink_mode = midgard_shrink_mode_none;
220    }
221 
222    if (inst_size == 32)
223       alu->mask = expand_writemask(effective, 2);
224    else if (inst_size == 64)
225       alu->mask = expand_writemask(effective, 1);
226    else
227       alu->mask = effective;
228 }
229 
230 static unsigned
mir_pack_swizzle(unsigned mask,unsigned * swizzle,unsigned sz,unsigned base_size,bool op_channeled,midgard_src_expand_mode * expand_mode)231 mir_pack_swizzle(unsigned mask, unsigned *swizzle, unsigned sz,
232                  unsigned base_size, bool op_channeled,
233                  midgard_src_expand_mode *expand_mode)
234 {
235    unsigned packed = 0;
236 
237    *expand_mode = midgard_src_passthrough;
238 
239    midgard_reg_mode reg_mode = reg_mode_for_bitsize(base_size);
240 
241    if (reg_mode == midgard_reg_mode_64) {
242       assert(sz == 64 || sz == 32);
243       unsigned components = (sz == 32) ? 4 : 2;
244 
245       packed = mir_pack_swizzle_64(swizzle, components, mask & 0xc);
246 
247       if (sz == 32) {
248          ASSERTED bool dontcare = true;
249          bool hi = false;
250 
251          assert(util_bitcount(mask) <= 2);
252 
253          u_foreach_bit(i, mask) {
254             bool hi_i = swizzle[i] >= COMPONENT_Z;
255 
256             /* We can't mix halves */
257             assert(dontcare || (hi == hi_i));
258             hi = hi_i;
259             dontcare = false;
260          }
261 
262          *expand_mode = hi ? midgard_src_expand_high : midgard_src_expand_low;
263       } else if (sz < 32) {
264          unreachable("Cannot encode 8/16 swizzle in 64-bit");
265       }
266    } else {
267       /* For 32-bit, swizzle packing is stupid-simple. For 16-bit,
268        * the strategy is to check whether the nibble we're on is
269        * upper or lower. We need all components to be on the same
270        * "side"; that much is enforced by the ISA and should have
271        * been lowered. TODO: 8-bit packing. TODO: vec8 */
272 
273       unsigned first = mask ? ffs(mask) - 1 : 0;
274       bool upper = swizzle[first] > 3;
275 
276       if (upper && mask)
277          assert(sz <= 16);
278 
279       bool dest_up = !op_channeled && (first >= 4);
280 
281       for (unsigned c = (dest_up ? 4 : 0); c < (dest_up ? 8 : 4); ++c) {
282          unsigned v = swizzle[c];
283 
284          ASSERTED bool t_upper = v > (sz == 8 ? 7 : 3);
285 
286          /* Ensure we're doing something sane */
287 
288          if (mask & (1 << c)) {
289             assert(t_upper == upper);
290             assert(v <= (sz == 8 ? 15 : 7));
291          }
292 
293          /* Use the non upper part */
294          v &= 0x3;
295 
296          packed |= v << (2 * (c % 4));
297       }
298 
299       /* Replicate for now.. should really pick a side for
300        * dot products */
301 
302       if (reg_mode == midgard_reg_mode_16 && sz == 16) {
303          *expand_mode = upper ? midgard_src_rep_high : midgard_src_rep_low;
304       } else if (reg_mode == midgard_reg_mode_16 && sz == 8) {
305          if (base_size == 16) {
306             *expand_mode =
307                upper ? midgard_src_expand_high : midgard_src_expand_low;
308          } else if (upper) {
309             *expand_mode = midgard_src_swap;
310          }
311       } else if (reg_mode == midgard_reg_mode_32 && sz == 16) {
312          *expand_mode =
313             upper ? midgard_src_expand_high : midgard_src_expand_low;
314       } else if (reg_mode == midgard_reg_mode_8) {
315          unreachable("Unhandled reg mode");
316       }
317    }
318 
319    return packed;
320 }
321 
322 static void
mir_pack_vector_srcs(midgard_instruction * ins,midgard_vector_alu * alu)323 mir_pack_vector_srcs(midgard_instruction *ins, midgard_vector_alu *alu)
324 {
325    bool channeled = GET_CHANNEL_COUNT(alu_opcode_props[ins->op].props);
326 
327    unsigned base_size = max_bitsize_for_alu(ins);
328 
329    for (unsigned i = 0; i < 2; ++i) {
330       if (ins->has_inline_constant && (i == 1))
331          continue;
332 
333       if (ins->src[i] == ~0)
334          continue;
335 
336       unsigned sz = nir_alu_type_get_type_size(ins->src_types[i]);
337       assert((sz == base_size) || (sz == base_size / 2));
338 
339       midgard_src_expand_mode expand_mode = midgard_src_passthrough;
340       unsigned swizzle = mir_pack_swizzle(ins->mask, ins->swizzle[i], sz,
341                                           base_size, channeled, &expand_mode);
342 
343       midgard_vector_alu_src pack = {
344          .mod = mir_pack_mod(ins, i, false),
345          .expand_mode = expand_mode,
346          .swizzle = swizzle,
347       };
348 
349       unsigned p = vector_alu_srco_unsigned(pack);
350 
351       if (i == 0)
352          alu->src1 = p;
353       else
354          alu->src2 = p;
355    }
356 }
357 
358 static void
mir_pack_swizzle_ldst(midgard_instruction * ins)359 mir_pack_swizzle_ldst(midgard_instruction *ins)
360 {
361    unsigned compsz = OP_IS_STORE(ins->op)
362                         ? nir_alu_type_get_type_size(ins->src_types[0])
363                         : nir_alu_type_get_type_size(ins->dest_type);
364    unsigned maxcomps = 128 / compsz;
365    unsigned step = DIV_ROUND_UP(32, compsz);
366 
367    for (unsigned c = 0; c < maxcomps; c += step) {
368       unsigned v = ins->swizzle[0][c];
369 
370       /* Make sure the component index doesn't exceed the maximum
371        * number of components. */
372       assert(v <= maxcomps);
373 
374       if (compsz <= 32)
375          ins->load_store.swizzle |= (v / step) << (2 * (c / step));
376       else
377          ins->load_store.swizzle |=
378             ((v / step) << (4 * c)) | (((v / step) + 1) << ((4 * c) + 2));
379    }
380 
381    /* TODO: arg_1/2 */
382 }
383 
384 static void
mir_pack_swizzle_tex(midgard_instruction * ins)385 mir_pack_swizzle_tex(midgard_instruction *ins)
386 {
387    for (unsigned i = 0; i < 2; ++i) {
388       unsigned packed = 0;
389 
390       for (unsigned c = 0; c < 4; ++c) {
391          unsigned v = ins->swizzle[i][c];
392 
393          /* Check vec4 */
394          assert(v <= 3);
395 
396          packed |= v << (2 * c);
397       }
398 
399       if (i == 0)
400          ins->texture.swizzle = packed;
401       else
402          ins->texture.in_reg_swizzle = packed;
403    }
404 
405    /* TODO: bias component */
406 }
407 
408 /*
409  * Up to 15 { ALU, LDST } bundles can execute in parallel with a texture op.
410  * Given a texture op, lookahead to see how many such bundles we can flag for
411  * OoO execution
412  */
413 static bool
mir_can_run_ooo(midgard_block * block,midgard_bundle * bundle,unsigned dependency)414 mir_can_run_ooo(midgard_block *block, midgard_bundle *bundle,
415                 unsigned dependency)
416 {
417    /* Don't read out of bounds */
418    if (bundle >=
419        (midgard_bundle *)((char *)block->bundles.data + block->bundles.size))
420       return false;
421 
422    /* Texture ops can't execute with other texture ops */
423    if (!IS_ALU(bundle->tag) && bundle->tag != TAG_LOAD_STORE_4)
424       return false;
425 
426    for (unsigned i = 0; i < bundle->instruction_count; ++i) {
427       midgard_instruction *ins = bundle->instructions[i];
428 
429       /* No branches, jumps, or discards */
430       if (ins->compact_branch)
431          return false;
432 
433       /* No read-after-write data dependencies */
434       mir_foreach_src(ins, s) {
435          if (ins->src[s] == dependency)
436             return false;
437       }
438    }
439 
440    /* Otherwise, we're okay */
441    return true;
442 }
443 
444 static void
mir_pack_tex_ooo(midgard_block * block,midgard_bundle * bundle,midgard_instruction * ins)445 mir_pack_tex_ooo(midgard_block *block, midgard_bundle *bundle,
446                  midgard_instruction *ins)
447 {
448    unsigned count = 0;
449 
450    for (count = 0; count < 15; ++count) {
451       if (!mir_can_run_ooo(block, bundle + count + 1, ins->dest))
452          break;
453    }
454 
455    ins->texture.out_of_order = count;
456 }
457 
458 /* Load store masks are 4-bits. Load/store ops pack for that.
459  * For most operations, vec4 is the natural mask width; vec8 is constrained to
460  * be in pairs, vec2 is duplicated. TODO: 8-bit?
461  * For common stores (i.e. ST.*), each bit masks a single byte in the 32-bit
462  * case, 2 bytes in the 64-bit case and 4 bytes in the 128-bit case.
463  */
464 
465 static unsigned
midgard_pack_common_store_mask(midgard_instruction * ins)466 midgard_pack_common_store_mask(midgard_instruction *ins)
467 {
468    ASSERTED unsigned comp_sz = nir_alu_type_get_type_size(ins->src_types[0]);
469    unsigned bytemask = mir_bytemask(ins);
470    unsigned packed = 0;
471 
472    switch (ins->op) {
473    case midgard_op_st_u8:
474       return mir_bytemask(ins) & 1;
475    case midgard_op_st_u16:
476       return mir_bytemask(ins) & 3;
477    case midgard_op_st_32:
478       return mir_bytemask(ins);
479    case midgard_op_st_64:
480       assert(comp_sz >= 16);
481       for (unsigned i = 0; i < 4; i++) {
482          if (bytemask & (3 << (i * 2)))
483             packed |= 1 << i;
484       }
485       return packed;
486    case midgard_op_st_128:
487       assert(comp_sz >= 32);
488       for (unsigned i = 0; i < 4; i++) {
489          if (bytemask & (0xf << (i * 4)))
490             packed |= 1 << i;
491       }
492       return packed;
493    default:
494       unreachable("unexpected ldst opcode");
495    }
496 }
497 
498 static void
mir_pack_ldst_mask(midgard_instruction * ins)499 mir_pack_ldst_mask(midgard_instruction *ins)
500 {
501    unsigned sz = nir_alu_type_get_type_size(ins->dest_type);
502    unsigned packed = ins->mask;
503 
504    if (OP_IS_COMMON_STORE(ins->op)) {
505       packed = midgard_pack_common_store_mask(ins);
506    } else {
507       if (sz == 64) {
508          packed = ((ins->mask & 0x2) ? (0x8 | 0x4) : 0) |
509                   ((ins->mask & 0x1) ? (0x2 | 0x1) : 0);
510       } else if (sz < 32) {
511          unsigned comps_per_32b = 32 / sz;
512 
513          packed = 0;
514 
515          for (unsigned i = 0; i < 4; ++i) {
516             unsigned submask = (ins->mask >> (i * comps_per_32b)) &
517                                BITFIELD_MASK(comps_per_32b);
518 
519             /* Make sure we're duplicated */
520             assert(submask == 0 || submask == BITFIELD_MASK(comps_per_32b));
521             packed |= (submask != 0) << i;
522          }
523       } else {
524          assert(sz == 32);
525       }
526    }
527 
528    ins->load_store.mask = packed;
529 }
530 
531 static void
mir_lower_inverts(midgard_instruction * ins)532 mir_lower_inverts(midgard_instruction *ins)
533 {
534    bool inv[3] = {ins->src_invert[0], ins->src_invert[1], ins->src_invert[2]};
535 
536    switch (ins->op) {
537    case midgard_alu_op_iand:
538       /* a & ~b = iandnot(a, b) */
539       /* ~a & ~b = ~(a | b) = inor(a, b) */
540 
541       if (inv[0] && inv[1])
542          ins->op = midgard_alu_op_inor;
543       else if (inv[1])
544          ins->op = midgard_alu_op_iandnot;
545 
546       break;
547    case midgard_alu_op_ior:
548       /*  a | ~b = iornot(a, b) */
549       /* ~a | ~b = ~(a & b) = inand(a, b) */
550 
551       if (inv[0] && inv[1])
552          ins->op = midgard_alu_op_inand;
553       else if (inv[1])
554          ins->op = midgard_alu_op_iornot;
555 
556       break;
557 
558    case midgard_alu_op_ixor:
559       /* ~a ^ b = a ^ ~b = ~(a ^ b) = inxor(a, b) */
560       /* ~a ^ ~b = a ^ b */
561 
562       if (inv[0] ^ inv[1])
563          ins->op = midgard_alu_op_inxor;
564 
565       break;
566 
567    default:
568       break;
569    }
570 }
571 
572 /* Opcodes with ROUNDS are the base (rte/0) type so we can just add */
573 
574 static void
mir_lower_roundmode(midgard_instruction * ins)575 mir_lower_roundmode(midgard_instruction *ins)
576 {
577    if (alu_opcode_props[ins->op].props & MIDGARD_ROUNDS) {
578       assert(ins->roundmode <= 0x3);
579       ins->op += ins->roundmode;
580    }
581 }
582 
583 static midgard_load_store_word
load_store_from_instr(midgard_instruction * ins)584 load_store_from_instr(midgard_instruction *ins)
585 {
586    midgard_load_store_word ldst = ins->load_store;
587    ldst.op = ins->op;
588 
589    if (OP_IS_STORE(ldst.op)) {
590       ldst.reg = SSA_REG_FROM_FIXED(ins->src[0]) & 1;
591    } else {
592       ldst.reg = SSA_REG_FROM_FIXED(ins->dest);
593    }
594 
595    /* Atomic opcode swizzles have a special meaning:
596     *   - The first two bits say which component of the implicit register should
597     * be used
598     *   - The next two bits say if the implicit register is r26 or r27 */
599    if (OP_IS_ATOMIC(ins->op)) {
600       ldst.swizzle = 0;
601       ldst.swizzle |= ins->swizzle[3][0] & 3;
602       ldst.swizzle |= (SSA_REG_FROM_FIXED(ins->src[3]) & 1 ? 1 : 0) << 2;
603    }
604 
605    if (ins->src[1] != ~0) {
606       ldst.arg_reg = SSA_REG_FROM_FIXED(ins->src[1]) - REGISTER_LDST_BASE;
607       unsigned sz = nir_alu_type_get_type_size(ins->src_types[1]);
608       ldst.arg_comp = midgard_ldst_comp(ldst.arg_reg, ins->swizzle[1][0], sz);
609    }
610 
611    if (ins->src[2] != ~0) {
612       ldst.index_reg = SSA_REG_FROM_FIXED(ins->src[2]) - REGISTER_LDST_BASE;
613       unsigned sz = nir_alu_type_get_type_size(ins->src_types[2]);
614       ldst.index_comp =
615          midgard_ldst_comp(ldst.index_reg, ins->swizzle[2][0], sz);
616    }
617 
618    return ldst;
619 }
620 
621 static midgard_texture_word
texture_word_from_instr(midgard_instruction * ins)622 texture_word_from_instr(midgard_instruction *ins)
623 {
624    midgard_texture_word tex = ins->texture;
625    tex.op = ins->op;
626 
627    unsigned src1 =
628       ins->src[1] == ~0 ? REGISTER_UNUSED : SSA_REG_FROM_FIXED(ins->src[1]);
629    tex.in_reg_select = src1 & 1;
630 
631    unsigned dest =
632       ins->dest == ~0 ? REGISTER_UNUSED : SSA_REG_FROM_FIXED(ins->dest);
633    tex.out_reg_select = dest & 1;
634 
635    if (ins->src[2] != ~0) {
636       midgard_tex_register_select sel = {
637          .select = SSA_REG_FROM_FIXED(ins->src[2]) & 1,
638          .full = 1,
639          .component = ins->swizzle[2][0],
640       };
641       uint8_t packed;
642       memcpy(&packed, &sel, sizeof(packed));
643       tex.bias = packed;
644    }
645 
646    if (ins->src[3] != ~0) {
647       unsigned x = ins->swizzle[3][0];
648       unsigned y = x + 1;
649       unsigned z = x + 2;
650 
651       /* Check range, TODO: half-registers */
652       assert(z < 4);
653 
654       unsigned offset_reg = SSA_REG_FROM_FIXED(ins->src[3]);
655       tex.offset = (1) |                   /* full */
656                    (offset_reg & 1) << 1 | /* select */
657                    (0 << 2) |              /* upper */
658                    (x << 3) |              /* swizzle */
659                    (y << 5) |              /* swizzle */
660                    (z << 7);               /* swizzle */
661    }
662 
663    return tex;
664 }
665 
666 static midgard_vector_alu
vector_alu_from_instr(midgard_instruction * ins)667 vector_alu_from_instr(midgard_instruction *ins)
668 {
669    midgard_vector_alu alu = {
670       .op = ins->op,
671       .outmod = ins->outmod,
672       .reg_mode = reg_mode_for_bitsize(max_bitsize_for_alu(ins)),
673    };
674 
675    if (ins->has_inline_constant) {
676       /* Encode inline 16-bit constant. See disassembler for
677        * where the algorithm is from */
678 
679       int lower_11 = ins->inline_constant & ((1 << 12) - 1);
680       uint16_t imm = ((lower_11 >> 8) & 0x7) | ((lower_11 & 0xFF) << 3);
681 
682       alu.src2 = imm << 2;
683    }
684 
685    return alu;
686 }
687 
688 static midgard_branch_extended
midgard_create_branch_extended(midgard_condition cond,midgard_jmp_writeout_op op,unsigned dest_tag,signed quadword_offset)689 midgard_create_branch_extended(midgard_condition cond,
690                                midgard_jmp_writeout_op op, unsigned dest_tag,
691                                signed quadword_offset)
692 {
693    /* The condition code is actually a LUT describing a function to
694     * combine multiple condition codes. However, we only support a single
695     * condition code at the moment, so we just duplicate over a bunch of
696     * times. */
697 
698    uint16_t duplicated_cond = (cond << 14) | (cond << 12) | (cond << 10) |
699                               (cond << 8) | (cond << 6) | (cond << 4) |
700                               (cond << 2) | (cond << 0);
701 
702    midgard_branch_extended branch = {
703       .op = op,
704       .dest_tag = dest_tag,
705       .offset = quadword_offset,
706       .cond = duplicated_cond,
707    };
708 
709    return branch;
710 }
711 
712 static void
emit_branch(midgard_instruction * ins,compiler_context * ctx,midgard_block * block,midgard_bundle * bundle,struct util_dynarray * emission)713 emit_branch(midgard_instruction *ins, compiler_context *ctx,
714             midgard_block *block, midgard_bundle *bundle,
715             struct util_dynarray *emission)
716 {
717    /* Parse some basic branch info */
718    bool is_compact = ins->unit == ALU_ENAB_BR_COMPACT;
719    bool is_conditional = ins->branch.conditional;
720    bool is_inverted = ins->branch.invert_conditional;
721    bool is_discard = ins->branch.target_type == TARGET_DISCARD;
722    bool is_tilebuf_wait = ins->branch.target_type == TARGET_TILEBUF_WAIT;
723    bool is_special = is_discard || is_tilebuf_wait;
724    bool is_writeout = ins->writeout;
725 
726    /* Determine the block we're jumping to */
727    int target_number = ins->branch.target_block;
728 
729    /* Report the destination tag */
730    int dest_tag = is_discard ? 0
731                   : is_tilebuf_wait
732                      ? bundle->tag
733                      : midgard_get_first_tag_from_block(ctx, target_number);
734 
735    /* Count up the number of quadwords we're
736     * jumping over = number of quadwords until
737     * (br_block_idx, target_number) */
738 
739    int quadword_offset = 0;
740 
741    if (is_discard) {
742       /* Fixed encoding, not actually an offset */
743       quadword_offset = 0x2;
744    } else if (is_tilebuf_wait) {
745       quadword_offset = -1;
746    } else if (target_number > block->base.name) {
747       /* Jump forward */
748 
749       for (int idx = block->base.name + 1; idx < target_number; ++idx) {
750          midgard_block *blk = mir_get_block(ctx, idx);
751          assert(blk);
752 
753          quadword_offset += blk->quadword_count;
754       }
755    } else {
756       /* Jump backwards */
757 
758       for (int idx = block->base.name; idx >= target_number; --idx) {
759          midgard_block *blk = mir_get_block(ctx, idx);
760          assert(blk);
761 
762          quadword_offset -= blk->quadword_count;
763       }
764    }
765 
766    /* Unconditional extended branches (far jumps)
767     * have issues, so we always use a conditional
768     * branch, setting the condition to always for
769     * unconditional. For compact unconditional
770     * branches, cond isn't used so it doesn't
771     * matter what we pick. */
772 
773    midgard_condition cond = !is_conditional ? midgard_condition_always
774                             : is_inverted   ? midgard_condition_false
775                                             : midgard_condition_true;
776 
777    midgard_jmp_writeout_op op =
778       is_discard        ? midgard_jmp_writeout_op_discard
779       : is_tilebuf_wait ? midgard_jmp_writeout_op_tilebuffer_pending
780       : is_writeout     ? midgard_jmp_writeout_op_writeout
781       : (is_compact && !is_conditional) ? midgard_jmp_writeout_op_branch_uncond
782                                         : midgard_jmp_writeout_op_branch_cond;
783 
784    if (is_compact) {
785       unsigned size = sizeof(midgard_branch_cond);
786 
787       if (is_conditional || is_special) {
788          midgard_branch_cond branch = {
789             .op = op,
790             .dest_tag = dest_tag,
791             .offset = quadword_offset,
792             .cond = cond,
793          };
794          memcpy(util_dynarray_grow_bytes(emission, size, 1), &branch, size);
795       } else {
796          assert(op == midgard_jmp_writeout_op_branch_uncond);
797          midgard_branch_uncond branch = {
798             .op = op,
799             .dest_tag = dest_tag,
800             .offset = quadword_offset,
801             .call_mode = midgard_call_mode_default,
802          };
803          assert(branch.offset == quadword_offset);
804          memcpy(util_dynarray_grow_bytes(emission, size, 1), &branch, size);
805       }
806    } else { /* `ins->compact_branch`,  misnomer */
807       unsigned size = sizeof(midgard_branch_extended);
808 
809       midgard_branch_extended branch =
810          midgard_create_branch_extended(cond, op, dest_tag, quadword_offset);
811 
812       memcpy(util_dynarray_grow_bytes(emission, size, 1), &branch, size);
813    }
814 }
815 
816 static void
emit_alu_bundle(compiler_context * ctx,midgard_block * block,midgard_bundle * bundle,struct util_dynarray * emission,unsigned lookahead)817 emit_alu_bundle(compiler_context *ctx, midgard_block *block,
818                 midgard_bundle *bundle, struct util_dynarray *emission,
819                 unsigned lookahead)
820 {
821    /* Emit the control word */
822    util_dynarray_append(emission, uint32_t, bundle->control | lookahead);
823 
824    /* Next up, emit register words */
825    for (unsigned i = 0; i < bundle->instruction_count; ++i) {
826       midgard_instruction *ins = bundle->instructions[i];
827 
828       /* Check if this instruction has registers */
829       if (ins->compact_branch)
830          continue;
831 
832       unsigned src2_reg = REGISTER_UNUSED;
833       if (ins->has_inline_constant)
834          src2_reg = ins->inline_constant >> 11;
835       else if (ins->src[1] != ~0)
836          src2_reg = SSA_REG_FROM_FIXED(ins->src[1]);
837 
838       /* Otherwise, just emit the registers */
839       uint16_t reg_word = 0;
840       midgard_reg_info registers = {
841          .src1_reg = (ins->src[0] == ~0 ? REGISTER_UNUSED
842                                         : SSA_REG_FROM_FIXED(ins->src[0])),
843          .src2_reg = src2_reg,
844          .src2_imm = ins->has_inline_constant,
845          .out_reg =
846             (ins->dest == ~0 ? REGISTER_UNUSED : SSA_REG_FROM_FIXED(ins->dest)),
847       };
848       memcpy(&reg_word, &registers, sizeof(uint16_t));
849       util_dynarray_append(emission, uint16_t, reg_word);
850    }
851 
852    /* Now, we emit the body itself */
853    for (unsigned i = 0; i < bundle->instruction_count; ++i) {
854       midgard_instruction *ins = bundle->instructions[i];
855 
856       if (!ins->compact_branch) {
857          mir_lower_inverts(ins);
858          mir_lower_roundmode(ins);
859       }
860 
861       if (midgard_is_branch_unit(ins->unit)) {
862          emit_branch(ins, ctx, block, bundle, emission);
863       } else if (ins->unit & UNITS_ANY_VECTOR) {
864          midgard_vector_alu source = vector_alu_from_instr(ins);
865          mir_pack_mask_alu(ins, &source);
866          mir_pack_vector_srcs(ins, &source);
867          unsigned size = sizeof(source);
868          memcpy(util_dynarray_grow_bytes(emission, size, 1), &source, size);
869       } else {
870          midgard_scalar_alu source =
871             vector_to_scalar_alu(vector_alu_from_instr(ins), ins);
872          unsigned size = sizeof(source);
873          memcpy(util_dynarray_grow_bytes(emission, size, 1), &source, size);
874       }
875    }
876 
877    /* Emit padding (all zero) */
878    if (bundle->padding) {
879       memset(util_dynarray_grow_bytes(emission, bundle->padding, 1), 0,
880              bundle->padding);
881    }
882 
883    /* Tack on constants */
884 
885    if (bundle->has_embedded_constants)
886       util_dynarray_append(emission, midgard_constants, bundle->constants);
887 }
888 
889 /* Shift applied to the immediate used as an offset. Probably this is papering
890  * over some other semantic distinction else well, but it unifies things in the
891  * compiler so I don't mind. */
892 
893 static void
mir_ldst_pack_offset(midgard_instruction * ins,int offset)894 mir_ldst_pack_offset(midgard_instruction *ins, int offset)
895 {
896    /* These opcodes don't support offsets */
897    assert(!OP_IS_REG2REG_LDST(ins->op) || ins->op == midgard_op_lea ||
898           ins->op == midgard_op_lea_image);
899 
900    if (OP_IS_UBO_READ(ins->op))
901       ins->load_store.signed_offset |= PACK_LDST_UBO_OFS(offset);
902    else if (OP_IS_IMAGE(ins->op))
903       ins->load_store.signed_offset |= PACK_LDST_ATTRIB_OFS(offset);
904    else if (OP_IS_SPECIAL(ins->op))
905       ins->load_store.signed_offset |= PACK_LDST_SELECTOR_OFS(offset);
906    else
907       ins->load_store.signed_offset |= PACK_LDST_MEM_OFS(offset);
908 }
909 
910 static enum mali_sampler_type
midgard_sampler_type(nir_alu_type t)911 midgard_sampler_type(nir_alu_type t)
912 {
913    switch (nir_alu_type_get_base_type(t)) {
914    case nir_type_float:
915       return MALI_SAMPLER_FLOAT;
916    case nir_type_int:
917       return MALI_SAMPLER_SIGNED;
918    case nir_type_uint:
919       return MALI_SAMPLER_UNSIGNED;
920    default:
921       unreachable("Unknown sampler type");
922    }
923 }
924 
925 /* After everything is scheduled, emit whole bundles at a time */
926 
927 void
emit_binary_bundle(compiler_context * ctx,midgard_block * block,midgard_bundle * bundle,struct util_dynarray * emission,int next_tag)928 emit_binary_bundle(compiler_context *ctx, midgard_block *block,
929                    midgard_bundle *bundle, struct util_dynarray *emission,
930                    int next_tag)
931 {
932    int lookahead = next_tag << 4;
933 
934    switch (bundle->tag) {
935    case TAG_ALU_4:
936    case TAG_ALU_8:
937    case TAG_ALU_12:
938    case TAG_ALU_16:
939    case TAG_ALU_4 + 4:
940    case TAG_ALU_8 + 4:
941    case TAG_ALU_12 + 4:
942    case TAG_ALU_16 + 4:
943       emit_alu_bundle(ctx, block, bundle, emission, lookahead);
944       break;
945 
946    case TAG_LOAD_STORE_4: {
947       /* One or two composing instructions */
948 
949       uint64_t current64, next64 = LDST_NOP;
950 
951       /* Copy masks */
952 
953       for (unsigned i = 0; i < bundle->instruction_count; ++i) {
954          midgard_instruction *ins = bundle->instructions[i];
955          mir_pack_ldst_mask(ins);
956 
957          /* Atomic ops don't use this swizzle the same way as other ops */
958          if (!OP_IS_ATOMIC(ins->op))
959             mir_pack_swizzle_ldst(ins);
960 
961          /* Apply a constant offset */
962          unsigned offset = ins->constants.u32[0];
963          if (offset)
964             mir_ldst_pack_offset(ins, offset);
965       }
966 
967       midgard_load_store_word ldst0 =
968          load_store_from_instr(bundle->instructions[0]);
969       memcpy(&current64, &ldst0, sizeof(current64));
970 
971       if (bundle->instruction_count == 2) {
972          midgard_load_store_word ldst1 =
973             load_store_from_instr(bundle->instructions[1]);
974          memcpy(&next64, &ldst1, sizeof(next64));
975       }
976 
977       midgard_load_store instruction = {
978          .type = bundle->tag,
979          .next_type = next_tag,
980          .word1 = current64,
981          .word2 = next64,
982       };
983 
984       util_dynarray_append(emission, midgard_load_store, instruction);
985 
986       break;
987    }
988 
989    case TAG_TEXTURE_4:
990    case TAG_TEXTURE_4_VTX:
991    case TAG_TEXTURE_4_BARRIER: {
992       /* Texture instructions are easy, since there is no pipelining
993        * nor VLIW to worry about. We may need to set .cont/.last
994        * flags. */
995 
996       midgard_instruction *ins = bundle->instructions[0];
997 
998       ins->texture.type = bundle->tag;
999       ins->texture.next_type = next_tag;
1000       ins->texture.exec = MIDGARD_PARTIAL_EXECUTION_NONE; /* default */
1001 
1002       /* Nothing else to pack for barriers */
1003       if (ins->op == midgard_tex_op_barrier) {
1004          ins->texture.op = ins->op;
1005          util_dynarray_append(emission, midgard_texture_word, ins->texture);
1006          return;
1007       }
1008 
1009       signed override = mir_upper_override(ins, 32);
1010 
1011       ins->texture.mask = override > 0 ? ins->mask >> override : ins->mask;
1012 
1013       mir_pack_swizzle_tex(ins);
1014 
1015       if (!(ctx->quirks & MIDGARD_NO_OOO))
1016          mir_pack_tex_ooo(block, bundle, ins);
1017 
1018       unsigned osz = nir_alu_type_get_type_size(ins->dest_type);
1019       unsigned isz = nir_alu_type_get_type_size(ins->src_types[1]);
1020 
1021       assert(osz == 32 || osz == 16);
1022       assert(isz == 32 || isz == 16);
1023 
1024       ins->texture.out_full = (osz == 32);
1025       ins->texture.out_upper = override > 0;
1026       ins->texture.in_reg_full = (isz == 32);
1027       ins->texture.sampler_type = midgard_sampler_type(ins->dest_type);
1028       ins->texture.outmod = ins->outmod;
1029 
1030       if (mir_op_computes_derivatives(ctx->stage, ins->op)) {
1031          if (ins->helper_terminate)
1032             ins->texture.exec = MIDGARD_PARTIAL_EXECUTION_KILL;
1033          else if (!ins->helper_execute)
1034             ins->texture.exec = MIDGARD_PARTIAL_EXECUTION_SKIP;
1035       }
1036 
1037       midgard_texture_word texture = texture_word_from_instr(ins);
1038       util_dynarray_append(emission, midgard_texture_word, texture);
1039       break;
1040    }
1041 
1042    default:
1043       unreachable("Unknown midgard instruction type\n");
1044    }
1045 }
1046