xref: /aosp_15_r20/external/mesa3d/src/panfrost/midgard/mir.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright (C) 2019 Alyssa Rosenzweig <[email protected]>
3  * Copyright (C) 2019-2020 Collabora, Ltd.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 
25 #include "compiler.h"
26 #include "midgard_ops.h"
27 
28 void
mir_rewrite_index_src_single(midgard_instruction * ins,unsigned old,unsigned new)29 mir_rewrite_index_src_single(midgard_instruction *ins, unsigned old,
30                              unsigned new)
31 {
32    mir_foreach_src(ins, i) {
33       if (ins->src[i] == old)
34          ins->src[i] = new;
35    }
36 }
37 
38 void
mir_rewrite_index_dst_single(midgard_instruction * ins,unsigned old,unsigned new)39 mir_rewrite_index_dst_single(midgard_instruction *ins, unsigned old,
40                              unsigned new)
41 {
42    if (ins->dest == old)
43       ins->dest = new;
44 }
45 
46 static void
mir_rewrite_index_src_single_swizzle(midgard_instruction * ins,unsigned old,unsigned new,unsigned * swizzle)47 mir_rewrite_index_src_single_swizzle(midgard_instruction *ins, unsigned old,
48                                      unsigned new, unsigned *swizzle)
49 {
50    for (unsigned i = 0; i < ARRAY_SIZE(ins->src); ++i) {
51       if (ins->src[i] != old)
52          continue;
53 
54       ins->src[i] = new;
55       mir_compose_swizzle(ins->swizzle[i], swizzle, ins->swizzle[i]);
56    }
57 }
58 
59 void
mir_rewrite_index_src(compiler_context * ctx,unsigned old,unsigned new)60 mir_rewrite_index_src(compiler_context *ctx, unsigned old, unsigned new)
61 {
62    mir_foreach_instr_global(ctx, ins) {
63       mir_rewrite_index_src_single(ins, old, new);
64    }
65 }
66 
67 void
mir_rewrite_index_src_swizzle(compiler_context * ctx,unsigned old,unsigned new,unsigned * swizzle)68 mir_rewrite_index_src_swizzle(compiler_context *ctx, unsigned old, unsigned new,
69                               unsigned *swizzle)
70 {
71    mir_foreach_instr_global(ctx, ins) {
72       mir_rewrite_index_src_single_swizzle(ins, old, new, swizzle);
73    }
74 }
75 
76 void
mir_rewrite_index_dst(compiler_context * ctx,unsigned old,unsigned new)77 mir_rewrite_index_dst(compiler_context *ctx, unsigned old, unsigned new)
78 {
79    mir_foreach_instr_global(ctx, ins) {
80       mir_rewrite_index_dst_single(ins, old, new);
81    }
82 
83    /* Implicitly written before the shader */
84    if (ctx->blend_input == old)
85       ctx->blend_input = new;
86 
87    if (ctx->blend_src1 == old)
88       ctx->blend_src1 = new;
89 }
90 
91 void
mir_rewrite_index(compiler_context * ctx,unsigned old,unsigned new)92 mir_rewrite_index(compiler_context *ctx, unsigned old, unsigned new)
93 {
94    mir_rewrite_index_src(ctx, old, new);
95    mir_rewrite_index_dst(ctx, old, new);
96 }
97 
98 unsigned
mir_use_count(compiler_context * ctx,unsigned value)99 mir_use_count(compiler_context *ctx, unsigned value)
100 {
101    unsigned used_count = 0;
102 
103    mir_foreach_instr_global(ctx, ins) {
104       if (mir_has_arg(ins, value))
105          ++used_count;
106    }
107 
108    if (ctx->blend_input == value)
109       ++used_count;
110 
111    if (ctx->blend_src1 == value)
112       ++used_count;
113 
114    return used_count;
115 }
116 
117 /* Checks if a value is used only once (or totally dead), which is an important
118  * heuristic to figure out if certain optimizations are Worth It (TM) */
119 
120 bool
mir_single_use(compiler_context * ctx,unsigned value)121 mir_single_use(compiler_context *ctx, unsigned value)
122 {
123    /* We can replicate constants in places so who cares */
124    if (value == SSA_FIXED_REGISTER(REGISTER_CONSTANT))
125       return true;
126 
127    return mir_use_count(ctx, value) <= 1;
128 }
129 
130 bool
mir_nontrivial_mod(midgard_instruction * ins,unsigned i,bool check_swizzle)131 mir_nontrivial_mod(midgard_instruction *ins, unsigned i, bool check_swizzle)
132 {
133    bool is_int = midgard_is_integer_op(ins->op);
134 
135    if (is_int) {
136       if (ins->src_shift[i])
137          return true;
138    } else {
139       if (ins->src_neg[i])
140          return true;
141       if (ins->src_abs[i])
142          return true;
143    }
144 
145    if (ins->dest_type != ins->src_types[i])
146       return true;
147 
148    if (check_swizzle) {
149       for (unsigned c = 0; c < 16; ++c) {
150          if (!(ins->mask & (1 << c)))
151             continue;
152          if (ins->swizzle[i][c] != c)
153             return true;
154       }
155    }
156 
157    return false;
158 }
159 
160 bool
mir_nontrivial_outmod(midgard_instruction * ins)161 mir_nontrivial_outmod(midgard_instruction *ins)
162 {
163    bool is_int = midgard_is_integer_op(ins->op);
164    unsigned mod = ins->outmod;
165 
166    if (ins->dest_type != ins->src_types[1])
167       return true;
168 
169    if (is_int)
170       return mod != midgard_outmod_keeplo;
171    else
172       return mod != midgard_outmod_none;
173 }
174 
175 /* 128 / sz = exp2(log2(128 / sz))
176  *          = exp2(log2(128) - log2(sz))
177  *          = exp2(7 - log2(sz))
178  *          = 1 << (7 - log2(sz))
179  */
180 
181 static unsigned
mir_components_for_bits(unsigned bits)182 mir_components_for_bits(unsigned bits)
183 {
184    return 1 << (7 - util_logbase2(bits));
185 }
186 
187 unsigned
mir_components_for_type(nir_alu_type T)188 mir_components_for_type(nir_alu_type T)
189 {
190    unsigned sz = nir_alu_type_get_type_size(T);
191    return mir_components_for_bits(sz);
192 }
193 
194 uint16_t
mir_from_bytemask(uint16_t bytemask,unsigned bits)195 mir_from_bytemask(uint16_t bytemask, unsigned bits)
196 {
197    unsigned value = 0;
198    unsigned count = bits / 8;
199 
200    for (unsigned c = 0, d = 0; c < 16; c += count, ++d) {
201       bool a = (bytemask & (1 << c)) != 0;
202 
203       for (unsigned q = c; q < count; ++q)
204          assert(((bytemask & (1 << q)) != 0) == a);
205 
206       value |= (a << d);
207    }
208 
209    return value;
210 }
211 
212 /* Rounds up a bytemask to fill a given component count. Iterate each
213  * component, and check if any bytes in the component are masked on */
214 
215 uint16_t
mir_round_bytemask_up(uint16_t mask,unsigned bits)216 mir_round_bytemask_up(uint16_t mask, unsigned bits)
217 {
218    unsigned bytes = bits / 8;
219    unsigned maxmask = mask_of(bytes);
220    unsigned channels = mir_components_for_bits(bits);
221 
222    for (unsigned c = 0; c < channels; ++c) {
223       unsigned submask = maxmask << (c * bytes);
224 
225       if (mask & submask)
226          mask |= submask;
227    }
228 
229    return mask;
230 }
231 
232 /* Grabs the per-byte mask of an instruction (as opposed to per-component) */
233 
234 uint16_t
mir_bytemask(midgard_instruction * ins)235 mir_bytemask(midgard_instruction *ins)
236 {
237    unsigned type_size = nir_alu_type_get_type_size(ins->dest_type);
238    return pan_to_bytemask(type_size, ins->mask);
239 }
240 
241 void
mir_set_bytemask(midgard_instruction * ins,uint16_t bytemask)242 mir_set_bytemask(midgard_instruction *ins, uint16_t bytemask)
243 {
244    unsigned type_size = nir_alu_type_get_type_size(ins->dest_type);
245    ins->mask = mir_from_bytemask(bytemask, type_size);
246 }
247 
248 /*
249  * Checks if we should use an upper destination override, rather than the lower
250  * one in the IR. If yes, returns the bytes to shift by. If no, returns zero
251  * for a lower override and negative for no override.
252  */
253 signed
mir_upper_override(midgard_instruction * ins,unsigned inst_size)254 mir_upper_override(midgard_instruction *ins, unsigned inst_size)
255 {
256    unsigned type_size = nir_alu_type_get_type_size(ins->dest_type);
257 
258    /* If the sizes are the same, there's nothing to override */
259    if (type_size == inst_size)
260       return -1;
261 
262    /* There are 16 bytes per vector, so there are (16/bytes)
263     * components per vector. So the magic half is half of
264     * (16/bytes), which simplifies to 8/bytes = 8 / (bits / 8) = 64 / bits
265     * */
266 
267    unsigned threshold = mir_components_for_bits(type_size) >> 1;
268 
269    /* How many components did we shift over? */
270    unsigned zeroes = __builtin_ctz(ins->mask);
271 
272    /* Did we hit the threshold? */
273    return (zeroes >= threshold) ? threshold : 0;
274 }
275 
276 /* Creates a mask of the components of a node read by an instruction, by
277  * analyzing the swizzle with respect to the instruction's mask. E.g.:
278  *
279  *  fadd r0.xz, r1.yyyy, r2.zwyx
280  *
281  * will return a mask of Z/Y for r2
282  */
283 
284 static uint16_t
mir_bytemask_of_read_components_single(unsigned * swizzle,unsigned inmask,unsigned bits)285 mir_bytemask_of_read_components_single(unsigned *swizzle, unsigned inmask,
286                                        unsigned bits)
287 {
288    unsigned cmask = 0;
289 
290    for (unsigned c = 0; c < MIR_VEC_COMPONENTS; ++c) {
291       if (!(inmask & (1 << c)))
292          continue;
293       cmask |= (1 << swizzle[c]);
294    }
295 
296    return pan_to_bytemask(bits, cmask);
297 }
298 
299 uint16_t
mir_bytemask_of_read_components_index(midgard_instruction * ins,unsigned i)300 mir_bytemask_of_read_components_index(midgard_instruction *ins, unsigned i)
301 {
302    /* Conditional branches read one 32-bit component = 4 bytes (TODO: multi
303     * branch??) */
304    if (ins->compact_branch && ins->branch.conditional && (i == 0))
305       return 0xF;
306 
307    /* ALU ops act componentwise so we need to pay attention to
308     * their mask. Texture/ldst does not so we don't clamp source
309     * readmasks based on the writemask */
310    unsigned qmask = ~0;
311 
312    /* Handle dot products and things */
313    if (ins->type == TAG_ALU_4 && !ins->compact_branch) {
314       unsigned props = alu_opcode_props[ins->op].props;
315 
316       unsigned channel_override = GET_CHANNEL_COUNT(props);
317 
318       if (channel_override)
319          qmask = mask_of(channel_override);
320       else
321          qmask = ins->mask;
322    }
323 
324    return mir_bytemask_of_read_components_single(
325       ins->swizzle[i], qmask, nir_alu_type_get_type_size(ins->src_types[i]));
326 }
327 
328 uint16_t
mir_bytemask_of_read_components(midgard_instruction * ins,unsigned node)329 mir_bytemask_of_read_components(midgard_instruction *ins, unsigned node)
330 {
331    uint16_t mask = 0;
332 
333    if (node == ~0)
334       return 0;
335 
336    mir_foreach_src(ins, i) {
337       if (ins->src[i] != node)
338          continue;
339       mask |= mir_bytemask_of_read_components_index(ins, i);
340    }
341 
342    return mask;
343 }
344 
345 /* Register allocation occurs after instruction scheduling, which is fine until
346  * we start needing to spill registers and therefore insert instructions into
347  * an already-scheduled program. We don't have to be terribly efficient about
348  * this, since spilling is already slow. So just semantically we need to insert
349  * the instruction into a new bundle before/after the bundle of the instruction
350  * in question */
351 
352 static midgard_bundle
mir_bundle_for_op(compiler_context * ctx,midgard_instruction ins)353 mir_bundle_for_op(compiler_context *ctx, midgard_instruction ins)
354 {
355    midgard_instruction *u = mir_upload_ins(ctx, ins);
356 
357    midgard_bundle bundle = {
358       .tag = ins.type,
359       .instruction_count = 1,
360       .instructions = {u},
361    };
362 
363    if (bundle.tag == TAG_ALU_4) {
364       assert(OP_IS_MOVE(u->op));
365       u->unit = UNIT_VMUL;
366 
367       size_t bytes_emitted = sizeof(uint32_t) + sizeof(midgard_reg_info) +
368                              sizeof(midgard_vector_alu);
369       bundle.padding = ~(bytes_emitted - 1) & 0xF;
370       bundle.control = ins.type | u->unit;
371    }
372 
373    return bundle;
374 }
375 
376 static unsigned
mir_bundle_idx_for_ins(midgard_instruction * tag,midgard_block * block)377 mir_bundle_idx_for_ins(midgard_instruction *tag, midgard_block *block)
378 {
379    midgard_bundle *bundles = (midgard_bundle *)block->bundles.data;
380 
381    size_t count = (block->bundles.size / sizeof(midgard_bundle));
382 
383    for (unsigned i = 0; i < count; ++i) {
384       for (unsigned j = 0; j < bundles[i].instruction_count; ++j) {
385          if (bundles[i].instructions[j] == tag)
386             return i;
387       }
388    }
389 
390    mir_print_instruction(tag);
391    unreachable("Instruction not scheduled in block");
392 }
393 
394 midgard_instruction *
mir_insert_instruction_before_scheduled(compiler_context * ctx,midgard_block * block,midgard_instruction * tag,midgard_instruction ins)395 mir_insert_instruction_before_scheduled(compiler_context *ctx,
396                                         midgard_block *block,
397                                         midgard_instruction *tag,
398                                         midgard_instruction ins)
399 {
400    unsigned before = mir_bundle_idx_for_ins(tag, block);
401    size_t count = util_dynarray_num_elements(&block->bundles, midgard_bundle);
402    UNUSED void *unused = util_dynarray_grow(&block->bundles, midgard_bundle, 1);
403 
404    midgard_bundle *bundles = (midgard_bundle *)block->bundles.data;
405    memmove(bundles + before + 1, bundles + before,
406            (count - before) * sizeof(midgard_bundle));
407    midgard_bundle *before_bundle = bundles + before + 1;
408 
409    midgard_bundle new = mir_bundle_for_op(ctx, ins);
410    memcpy(bundles + before, &new, sizeof(new));
411 
412    list_addtail(&new.instructions[0]->link,
413                 &before_bundle->instructions[0]->link);
414    block->quadword_count += midgard_tag_props[new.tag].size;
415 
416    return new.instructions[0];
417 }
418 
419 midgard_instruction *
mir_insert_instruction_after_scheduled(compiler_context * ctx,midgard_block * block,midgard_instruction * tag,midgard_instruction ins)420 mir_insert_instruction_after_scheduled(compiler_context *ctx,
421                                        midgard_block *block,
422                                        midgard_instruction *tag,
423                                        midgard_instruction ins)
424 {
425    /* We need to grow the bundles array to add our new bundle */
426    size_t count = util_dynarray_num_elements(&block->bundles, midgard_bundle);
427    UNUSED void *unused = util_dynarray_grow(&block->bundles, midgard_bundle, 1);
428 
429    /* Find the bundle that we want to insert after */
430    unsigned after = mir_bundle_idx_for_ins(tag, block);
431 
432    /* All the bundles after that one, we move ahead by one */
433    midgard_bundle *bundles = (midgard_bundle *)block->bundles.data;
434    memmove(bundles + after + 2, bundles + after + 1,
435            (count - after - 1) * sizeof(midgard_bundle));
436    midgard_bundle *after_bundle = bundles + after;
437 
438    midgard_bundle new = mir_bundle_for_op(ctx, ins);
439    memcpy(bundles + after + 1, &new, sizeof(new));
440    list_add(
441       &new.instructions[0]->link,
442       &after_bundle->instructions[after_bundle->instruction_count - 1]->link);
443    block->quadword_count += midgard_tag_props[new.tag].size;
444 
445    return new.instructions[0];
446 }
447 
448 /* Flip the first-two arguments of a (binary) op. Currently ALU
449  * only, no known uses for ldst/tex */
450 
451 void
mir_flip(midgard_instruction * ins)452 mir_flip(midgard_instruction *ins)
453 {
454    unsigned temp = ins->src[0];
455    ins->src[0] = ins->src[1];
456    ins->src[1] = temp;
457 
458    assert(ins->type == TAG_ALU_4);
459 
460    temp = ins->src_types[0];
461    ins->src_types[0] = ins->src_types[1];
462    ins->src_types[1] = temp;
463 
464    temp = ins->src_abs[0];
465    ins->src_abs[0] = ins->src_abs[1];
466    ins->src_abs[1] = temp;
467 
468    temp = ins->src_neg[0];
469    ins->src_neg[0] = ins->src_neg[1];
470    ins->src_neg[1] = temp;
471 
472    temp = ins->src_invert[0];
473    ins->src_invert[0] = ins->src_invert[1];
474    ins->src_invert[1] = temp;
475 
476    unsigned temp_swizzle[16];
477    memcpy(temp_swizzle, ins->swizzle[0], sizeof(ins->swizzle[0]));
478    memcpy(ins->swizzle[0], ins->swizzle[1], sizeof(ins->swizzle[0]));
479    memcpy(ins->swizzle[1], temp_swizzle, sizeof(ins->swizzle[0]));
480 }
481 
482 /* Before squashing, calculate ctx->temp_count just by observing the MIR */
483 
484 void
mir_compute_temp_count(compiler_context * ctx)485 mir_compute_temp_count(compiler_context *ctx)
486 {
487    unsigned max_index = 0;
488 
489    mir_foreach_instr_global(ctx, ins) {
490       if (ins->dest < SSA_FIXED_MINIMUM)
491          max_index = MAX2(max_index, ins->dest + 1);
492    }
493 
494    if (ctx->blend_input != ~0)
495       max_index = MAX2(max_index, ctx->blend_input + 1);
496 
497    if (ctx->blend_src1 != ~0)
498       max_index = MAX2(max_index, ctx->blend_src1 + 1);
499 
500    ctx->temp_count = max_index;
501 }
502