xref: /aosp_15_r20/external/mesa3d/src/gallium/drivers/vc4/vc4_nir_lower_io.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2015 Broadcom
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "vc4_qir.h"
25 #include "compiler/nir/nir_builder.h"
26 #include "util/format/u_format.h"
27 #include "util/u_helpers.h"
28 
29 /**
30  * Walks the NIR generated by TGSI-to-NIR or GLSL-to-NIR to lower its io
31  * intrinsics into something amenable to the VC4 architecture.
32  *
33  * Currently, it splits VS inputs and uniforms into scalars, drops any
34  * non-position outputs in coordinate shaders, and fixes up the addressing on
35  * indirect uniform loads.  FS input and VS output scalarization is handled by
36  * nir_lower_io_to_scalar().
37  */
38 
39 static void
replace_intrinsic_with_vec(nir_builder * b,nir_intrinsic_instr * intr,nir_def ** comps)40 replace_intrinsic_with_vec(nir_builder *b, nir_intrinsic_instr *intr,
41                            nir_def **comps)
42 {
43 
44         /* Batch things back together into a vector.  This will get split by
45          * the later ALU scalarization pass.
46          */
47         nir_def *vec = nir_vec(b, comps, intr->num_components);
48 
49         /* Replace the old intrinsic with a reference to our reconstructed
50          * vector.
51          */
52         nir_def_replace(&intr->def, vec);
53 }
54 
55 static nir_def *
vc4_nir_unpack_8i(nir_builder * b,nir_def * src,unsigned chan)56 vc4_nir_unpack_8i(nir_builder *b, nir_def *src, unsigned chan)
57 {
58         return nir_ubitfield_extract(b,
59                                      src,
60                                      nir_imm_int(b, 8 * chan),
61                                      nir_imm_int(b, 8));
62 }
63 
64 /** Returns the 16 bit field as a sign-extended 32-bit value. */
65 static nir_def *
vc4_nir_unpack_16i(nir_builder * b,nir_def * src,unsigned chan)66 vc4_nir_unpack_16i(nir_builder *b, nir_def *src, unsigned chan)
67 {
68         return nir_ibitfield_extract(b,
69                                      src,
70                                      nir_imm_int(b, 16 * chan),
71                                      nir_imm_int(b, 16));
72 }
73 
74 /** Returns the 16 bit field as an unsigned 32 bit value. */
75 static nir_def *
vc4_nir_unpack_16u(nir_builder * b,nir_def * src,unsigned chan)76 vc4_nir_unpack_16u(nir_builder *b, nir_def *src, unsigned chan)
77 {
78         if (chan == 0) {
79                 return nir_iand_imm(b, src, 0xffff);
80         } else {
81                 return nir_ushr_imm(b, src, 16);
82         }
83 }
84 
85 static nir_def *
vc4_nir_unpack_8f(nir_builder * b,nir_def * src,unsigned chan)86 vc4_nir_unpack_8f(nir_builder *b, nir_def *src, unsigned chan)
87 {
88         return nir_channel(b, nir_unpack_unorm_4x8(b, src), chan);
89 }
90 
91 static nir_def *
vc4_nir_get_vattr_channel_vpm(struct vc4_compile * c,nir_builder * b,nir_def ** vpm_reads,uint8_t swiz,const struct util_format_description * desc)92 vc4_nir_get_vattr_channel_vpm(struct vc4_compile *c,
93                               nir_builder *b,
94                               nir_def **vpm_reads,
95                               uint8_t swiz,
96                               const struct util_format_description *desc)
97 {
98         if (swiz > PIPE_SWIZZLE_W)
99                 return vc4_nir_get_swizzled_channel(b, vpm_reads, swiz);
100 
101         const struct util_format_channel_description *chan =
102                 &desc->channel[swiz];
103 
104         if (chan->size == 32 && chan->type == UTIL_FORMAT_TYPE_FLOAT) {
105                 return vc4_nir_get_swizzled_channel(b, vpm_reads, swiz);
106         } else if (chan->size == 32 && chan->type == UTIL_FORMAT_TYPE_SIGNED) {
107                 if (chan->normalized) {
108                         return nir_fmul_imm(b,
109                                             nir_i2f32(b, vpm_reads[swiz]),
110                                             1.0 / 0x7fffffff);
111                 } else {
112                         return nir_i2f32(b, vpm_reads[swiz]);
113                 }
114         } else if (chan->size == 8 &&
115                    (chan->type == UTIL_FORMAT_TYPE_UNSIGNED ||
116                     chan->type == UTIL_FORMAT_TYPE_SIGNED)) {
117                 nir_def *vpm = vpm_reads[0];
118                 if (chan->type == UTIL_FORMAT_TYPE_SIGNED) {
119                         nir_def *temp = nir_ixor(b, vpm, nir_imm_int(b, 0x80808080));
120                         if (chan->normalized) {
121                                 return nir_fadd_imm(b, nir_fmul_imm(b,
122                                                                     vc4_nir_unpack_8f(b, temp, swiz),
123                                                                     2.0),
124                                                     -1.0);
125                         } else {
126                                 return nir_fadd_imm(b,
127                                                     nir_i2f32(b,
128                                                               vc4_nir_unpack_8i(b, temp,
129                                                                                 swiz)),
130                                                     -128.0);
131                         }
132                 } else {
133                         if (chan->normalized) {
134                                 return vc4_nir_unpack_8f(b, vpm, swiz);
135                         } else {
136                                 return nir_i2f32(b, vc4_nir_unpack_8i(b, vpm, swiz));
137                         }
138                 }
139         } else if (chan->size == 16 &&
140                    (chan->type == UTIL_FORMAT_TYPE_UNSIGNED ||
141                     chan->type == UTIL_FORMAT_TYPE_SIGNED)) {
142                 nir_def *vpm = vpm_reads[swiz / 2];
143                 nir_def *temp;
144 
145                 /* Note that UNPACK_16F eats a half float, not ints, so we use
146                  * UNPACK_16_I for all of these.
147                  */
148                 if (chan->type == UTIL_FORMAT_TYPE_SIGNED) {
149                         temp = nir_i2f32(b, vc4_nir_unpack_16i(b, vpm, swiz & 1));
150                         if (chan->normalized) {
151                                 return nir_fmul_imm(b, temp, 1 / 32768.0f);
152                         } else {
153                                 return temp;
154                         }
155                 } else {
156                         temp = nir_i2f32(b, vc4_nir_unpack_16u(b, vpm, swiz & 1));
157                         if (chan->normalized) {
158                                 return nir_fmul_imm(b, temp, 1 / 65535.0);
159                         } else {
160                                 return temp;
161                         }
162                 }
163         } else {
164                 return NULL;
165         }
166 }
167 
168 static void
vc4_nir_lower_vertex_attr(struct vc4_compile * c,nir_builder * b,nir_intrinsic_instr * intr)169 vc4_nir_lower_vertex_attr(struct vc4_compile *c, nir_builder *b,
170                           nir_intrinsic_instr *intr)
171 {
172         b->cursor = nir_before_instr(&intr->instr);
173 
174         int attr = nir_intrinsic_base(intr);
175         enum pipe_format format = c->vs_key->attr_formats[attr];
176         uint32_t attr_size = util_format_get_blocksize(format);
177 
178         /* We only accept direct outputs and TGSI only ever gives them to us
179          * with an offset value of 0.
180          */
181         assert(nir_src_as_uint(intr->src[0]) == 0);
182 
183         /* Generate dword loads for the VPM values (Since these intrinsics may
184          * be reordered, the actual reads will be generated at the top of the
185          * shader by ntq_setup_inputs().
186          */
187         nir_def *vpm_reads[4];
188         for (int i = 0; i < align(attr_size, 4) / 4; i++)
189                 vpm_reads[i] = nir_load_input(b, 1, 32, nir_imm_int(b, 0),
190                                               .base = nir_intrinsic_base(intr),
191                                               .component = i);
192 
193         bool format_warned = false;
194         const struct util_format_description *desc =
195                 util_format_description(format);
196 
197         nir_def *dests[4];
198         for (int i = 0; i < intr->num_components; i++) {
199                 uint8_t swiz = desc->swizzle[i];
200                 dests[i] = vc4_nir_get_vattr_channel_vpm(c, b, vpm_reads, swiz,
201                                                          desc);
202 
203                 if (!dests[i]) {
204                         if (!format_warned) {
205                                 fprintf(stderr,
206                                         "vtx element %d unsupported type: %s\n",
207                                         attr, util_format_name(format));
208                                 format_warned = true;
209                         }
210                         dests[i] = nir_imm_float(b, 0.0);
211                 }
212         }
213 
214         replace_intrinsic_with_vec(b, intr, dests);
215 }
216 
217 static void
vc4_nir_lower_fs_input(struct vc4_compile * c,nir_builder * b,nir_intrinsic_instr * intr)218 vc4_nir_lower_fs_input(struct vc4_compile *c, nir_builder *b,
219                        nir_intrinsic_instr *intr)
220 {
221         b->cursor = nir_after_instr(&intr->instr);
222 
223         unsigned int location = nir_intrinsic_io_semantics(intr).location;
224         int comp = nir_intrinsic_component(intr);
225 
226         /* Lower away point coordinates, and fix up PNTC. */
227         if (util_varying_is_point_coord(location,
228                                         c->fs_key->point_sprite_mask)) {
229                 assert(intr->num_components == 1);
230 
231                 nir_def *result = &intr->def;
232 
233                 switch (comp) {
234                 case 0:
235                 case 1:
236                         /* If we're not rendering points, we need to set a
237                          * defined value for the input that would come from
238                          * PNTC.
239                          */
240                         if (!c->fs_key->is_points)
241                                 result = nir_imm_float(b, 0.0);
242                         break;
243                 case 2:
244                         result = nir_imm_float(b, 0.0);
245                         break;
246                 case 3:
247                         result = nir_imm_float(b, 1.0);
248                         break;
249                 }
250 
251                 if (c->fs_key->point_coord_upper_left && comp == 1)
252                         result = nir_fsub_imm(b, 1.0, result);
253 
254                 if (result != &intr->def) {
255                         nir_def_rewrite_uses_after(&intr->def,
256                                                        result,
257                                                        result->parent_instr);
258                 }
259         }
260 }
261 
262 static void
vc4_nir_lower_output(struct vc4_compile * c,nir_builder * b,nir_intrinsic_instr * intr)263 vc4_nir_lower_output(struct vc4_compile *c, nir_builder *b,
264                      nir_intrinsic_instr *intr)
265 {
266         unsigned int location = nir_intrinsic_io_semantics(intr).location;
267 
268         if (c->stage == QSTAGE_COORD &&
269             location != VARYING_SLOT_POS &&
270             location != VARYING_SLOT_PSIZ) {
271                 nir_instr_remove(&intr->instr);
272                 return;
273         }
274 }
275 
276 static void
vc4_nir_lower_uniform(struct vc4_compile * c,nir_builder * b,nir_intrinsic_instr * intr)277 vc4_nir_lower_uniform(struct vc4_compile *c, nir_builder *b,
278                       nir_intrinsic_instr *intr)
279 {
280         b->cursor = nir_before_instr(&intr->instr);
281 
282         /* Generate scalar loads equivalent to the original vector. */
283         nir_def *dests[4];
284         for (unsigned i = 0; i < intr->num_components; i++) {
285                 nir_intrinsic_instr *intr_comp =
286                         nir_intrinsic_instr_create(c->s, intr->intrinsic);
287                 intr_comp->num_components = 1;
288                 nir_def_init(&intr_comp->instr, &intr_comp->def, 1,
289                              intr->def.bit_size);
290 
291                 /* Convert the uniform offset to bytes.  If it happens
292                  * to be a constant, constant-folding will clean up
293                  * the shift for us.
294                  */
295                 nir_intrinsic_set_base(intr_comp,
296                                        nir_intrinsic_base(intr) * 16 +
297                                        i * 4);
298                 nir_intrinsic_set_range(intr_comp,
299                                         nir_intrinsic_range(intr) * 16 - i * 4);
300 
301                 intr_comp->src[0] =
302                         nir_src_for_ssa(nir_ishl_imm(b, intr->src[0].ssa, 4));
303 
304                 dests[i] = &intr_comp->def;
305 
306                 nir_builder_instr_insert(b, &intr_comp->instr);
307         }
308 
309         replace_intrinsic_with_vec(b, intr, dests);
310 }
311 
312 static void
vc4_nir_lower_io_instr(struct vc4_compile * c,nir_builder * b,struct nir_instr * instr)313 vc4_nir_lower_io_instr(struct vc4_compile *c, nir_builder *b,
314                        struct nir_instr *instr)
315 {
316         if (instr->type != nir_instr_type_intrinsic)
317                 return;
318         nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
319 
320         switch (intr->intrinsic) {
321         case nir_intrinsic_load_input:
322                 if (c->stage == QSTAGE_FRAG)
323                         vc4_nir_lower_fs_input(c, b, intr);
324                 else
325                         vc4_nir_lower_vertex_attr(c, b, intr);
326                 break;
327 
328         case nir_intrinsic_store_output:
329                 vc4_nir_lower_output(c, b, intr);
330                 break;
331 
332         case nir_intrinsic_load_uniform:
333                 vc4_nir_lower_uniform(c, b, intr);
334                 break;
335 
336         case nir_intrinsic_load_user_clip_plane:
337         default:
338                 break;
339         }
340 }
341 
342 static bool
vc4_nir_lower_io_impl(struct vc4_compile * c,nir_function_impl * impl)343 vc4_nir_lower_io_impl(struct vc4_compile *c, nir_function_impl *impl)
344 {
345         nir_builder b = nir_builder_create(impl);
346 
347         nir_foreach_block(block, impl) {
348                 nir_foreach_instr_safe(instr, block)
349                         vc4_nir_lower_io_instr(c, &b, instr);
350         }
351 
352         nir_metadata_preserve(impl, nir_metadata_control_flow);
353 
354         return true;
355 }
356 
357 void
vc4_nir_lower_io(nir_shader * s,struct vc4_compile * c)358 vc4_nir_lower_io(nir_shader *s, struct vc4_compile *c)
359 {
360         nir_foreach_function_impl(impl, s) {
361                 vc4_nir_lower_io_impl(c, impl);
362         }
363 }
364