xref: /aosp_15_r20/external/mesa3d/src/asahi/compiler/agx_lower_spill.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright 2023 Alyssa Rosenzweig
3  * SPDX-License-Identifier: MIT
4  */
5 
6 #include "util/macros.h"
7 #include "agx_builder.h"
8 #include "agx_compile.h"
9 #include "agx_compiler.h"
10 
11 /* Lower moves involving memory registers (created when spilling) to concrete
12  * spills and fills.
13  */
14 
15 static void
spill_fill(agx_builder * b,agx_instr * I,enum agx_size size,unsigned channels,unsigned component_offset)16 spill_fill(agx_builder *b, agx_instr *I, enum agx_size size, unsigned channels,
17            unsigned component_offset)
18 {
19    enum agx_format format =
20       size == AGX_SIZE_16 ? AGX_FORMAT_I16 : AGX_FORMAT_I32;
21 
22    unsigned offset_B = component_offset * agx_size_align_16(size) * 2;
23    unsigned effective_chans = size == AGX_SIZE_64 ? (channels * 2) : channels;
24    unsigned mask = BITFIELD_MASK(effective_chans);
25 
26    assert(effective_chans <= 4);
27 
28    /* Pick off the memory and register parts of the move */
29    agx_index mem = I->dest[0].memory ? I->dest[0] : I->src[0];
30    agx_index reg = I->dest[0].memory ? I->src[0] : I->dest[0];
31 
32    assert(mem.type == AGX_INDEX_REGISTER && mem.memory);
33    assert(reg.type == AGX_INDEX_REGISTER && !reg.memory);
34 
35    /* Slice the register according to the part of the spill we're handling */
36    if (component_offset > 0 || channels != agx_channels(reg)) {
37       reg.value += component_offset * agx_size_align_16(reg.size);
38       reg.channels_m1 = channels - 1;
39    }
40 
41    /* Calculate stack offset in bytes. IR registers are 2-bytes each. */
42    unsigned stack_offs_B = b->shader->spill_base + (mem.value * 2) + offset_B;
43 
44    /* Emit the spill/fill */
45    if (I->dest[0].memory) {
46       agx_stack_store(b, reg, agx_immediate(stack_offs_B), format, mask);
47    } else {
48       agx_stack_load_to(b, reg, agx_immediate(stack_offs_B), format, mask);
49    }
50 }
51 
52 void
agx_lower_spill(agx_context * ctx)53 agx_lower_spill(agx_context *ctx)
54 {
55    agx_foreach_instr_global_safe(ctx, I) {
56       if (I->op != AGX_OPCODE_MOV || (!I->dest[0].memory && !I->src[0].memory))
57          continue;
58 
59       enum agx_size size = I->dest[0].size;
60       unsigned channels = agx_channels(I->dest[0]);
61 
62       assert(size == I->src[0].size);
63       assert(channels == agx_channels(I->src[0]));
64 
65       /* Texture gradient sources can be vec6, and if such a vector is spilled,
66        * we need to be able to spill/fill a vec6. Since stack_store/stack_load
67        * only work up to vec4, we break up into (at most) vec4 components.
68        */
69       agx_builder b = agx_init_builder(ctx, agx_before_instr(I));
70 
71       for (unsigned c = 0; c < channels; c += 4) {
72          spill_fill(&b, I, size, MIN2(channels - c, 4), c);
73       }
74 
75       agx_remove_instruction(I);
76    }
77 }
78