xref: /aosp_15_r20/external/mesa3d/src/compiler/nir/nir_opt_intrinsics.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2017 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "nir.h"
25 #include "nir_builder.h"
26 
27 /**
28  * \file nir_opt_intrinsics.c
29  */
30 
31 static bool
src_is_single_use_shuffle(nir_src src,nir_def ** data,nir_def ** index)32 src_is_single_use_shuffle(nir_src src, nir_def **data, nir_def **index)
33 {
34    nir_intrinsic_instr *shuffle = nir_src_as_intrinsic(src);
35    if (shuffle == NULL || shuffle->intrinsic != nir_intrinsic_shuffle)
36       return false;
37 
38    /* This is only called when src is part of an ALU op so requiring no if
39     * uses is reasonable.  If we ever want to use this from an if statement,
40     * we can change it then.
41     */
42    if (!list_is_singular(&shuffle->def.uses))
43       return false;
44 
45    if (nir_def_used_by_if(&shuffle->def))
46       return false;
47 
48    *data = shuffle->src[0].ssa;
49    *index = shuffle->src[1].ssa;
50 
51    return true;
52 }
53 
54 static nir_def *
try_opt_bcsel_of_shuffle(nir_builder * b,nir_alu_instr * alu,bool block_has_discard)55 try_opt_bcsel_of_shuffle(nir_builder *b, nir_alu_instr *alu,
56                          bool block_has_discard)
57 {
58    assert(alu->op == nir_op_bcsel);
59 
60    /* If we've seen a discard in this block, don't do the optimization.  We
61     * could try to do something fancy where we check if the shuffle is on our
62     * side of the discard or not but this is good enough for correctness for
63     * now and subgroup ops in the presence of discard aren't common.
64     */
65    if (block_has_discard)
66       return false;
67 
68    if (!nir_alu_src_is_trivial_ssa(alu, 0))
69       return NULL;
70 
71    nir_def *data1, *index1;
72    if (!nir_alu_src_is_trivial_ssa(alu, 1) ||
73        alu->src[1].src.ssa->parent_instr->block != alu->instr.block ||
74        !src_is_single_use_shuffle(alu->src[1].src, &data1, &index1))
75       return NULL;
76 
77    nir_def *data2, *index2;
78    if (!nir_alu_src_is_trivial_ssa(alu, 2) ||
79        alu->src[2].src.ssa->parent_instr->block != alu->instr.block ||
80        !src_is_single_use_shuffle(alu->src[2].src, &data2, &index2))
81       return NULL;
82 
83    if (data1 != data2)
84       return NULL;
85 
86    nir_def *index = nir_bcsel(b, alu->src[0].src.ssa, index1, index2);
87    nir_def *shuffle = nir_shuffle(b, data1, index);
88 
89    return shuffle;
90 }
91 
92 static bool
src_is_quad_broadcast(nir_block * block,nir_src src,nir_intrinsic_instr ** intrin)93 src_is_quad_broadcast(nir_block *block, nir_src src, nir_intrinsic_instr **intrin)
94 {
95    nir_intrinsic_instr *broadcast = nir_src_as_intrinsic(src);
96    if (broadcast == NULL || broadcast->instr.block != block)
97       return false;
98 
99    switch (broadcast->intrinsic) {
100    case nir_intrinsic_quad_broadcast:
101       if (!nir_src_is_const(broadcast->src[1]))
102          return false;
103       FALLTHROUGH;
104    case nir_intrinsic_quad_swap_horizontal:
105    case nir_intrinsic_quad_swap_vertical:
106    case nir_intrinsic_quad_swap_diagonal:
107    case nir_intrinsic_quad_swizzle_amd:
108       *intrin = broadcast;
109       return true;
110    default:
111       return false;
112    }
113 }
114 
115 static bool
src_is_alu(nir_op op,nir_src src,nir_src srcs[2])116 src_is_alu(nir_op op, nir_src src, nir_src srcs[2])
117 {
118    nir_alu_instr *alu = nir_src_as_alu_instr(src);
119    if (alu == NULL || alu->op != op)
120       return false;
121 
122    if (!nir_alu_src_is_trivial_ssa(alu, 0) || !nir_alu_src_is_trivial_ssa(alu, 1))
123       return false;
124 
125    srcs[0] = alu->src[0].src;
126    srcs[1] = alu->src[1].src;
127 
128    return true;
129 }
130 
131 static nir_def *
try_opt_quad_vote(nir_builder * b,nir_alu_instr * alu,bool block_has_discard)132 try_opt_quad_vote(nir_builder *b, nir_alu_instr *alu, bool block_has_discard)
133 {
134    if (block_has_discard)
135       return NULL;
136 
137    if (!nir_alu_src_is_trivial_ssa(alu, 0) || !nir_alu_src_is_trivial_ssa(alu, 1))
138       return NULL;
139 
140    nir_intrinsic_instr *quad_broadcasts[4];
141    nir_src srcs[2][2];
142    bool found = false;
143 
144    /* Match (broadcast0 op broadcast1) op (broadcast2 op broadcast3). */
145    found = src_is_alu(alu->op, alu->src[0].src, srcs[0]) &&
146            src_is_alu(alu->op, alu->src[1].src, srcs[1]) &&
147            src_is_quad_broadcast(alu->instr.block, srcs[0][0], &quad_broadcasts[0]) &&
148            src_is_quad_broadcast(alu->instr.block, srcs[0][1], &quad_broadcasts[1]) &&
149            src_is_quad_broadcast(alu->instr.block, srcs[1][0], &quad_broadcasts[2]) &&
150            src_is_quad_broadcast(alu->instr.block, srcs[1][1], &quad_broadcasts[3]);
151 
152    /* Match ((broadcast2 op broadcast3) op broadcast1) op broadcast0). */
153    if (!found) {
154       if ((src_is_alu(alu->op, alu->src[0].src, srcs[0]) &&
155            src_is_quad_broadcast(alu->instr.block, alu->src[1].src, &quad_broadcasts[0])) ||
156           (src_is_alu(alu->op, alu->src[1].src, srcs[0]) &&
157            src_is_quad_broadcast(alu->instr.block, alu->src[0].src, &quad_broadcasts[0]))) {
158          /* ((broadcast2 || broadcast3) || broadcast1) */
159          if ((src_is_alu(alu->op, srcs[0][0], srcs[1]) &&
160               src_is_quad_broadcast(alu->instr.block, srcs[0][1], &quad_broadcasts[1])) ||
161              (src_is_alu(alu->op, srcs[0][1], srcs[1]) &&
162               src_is_quad_broadcast(alu->instr.block, srcs[0][0], &quad_broadcasts[1]))) {
163             /* (broadcast2 || broadcast3) */
164             found = src_is_quad_broadcast(alu->instr.block, srcs[1][0], &quad_broadcasts[2]) &&
165                     src_is_quad_broadcast(alu->instr.block, srcs[1][1], &quad_broadcasts[3]);
166          }
167       }
168    }
169 
170    if (!found)
171       return NULL;
172 
173    /* Check if each lane in a quad reduces all lanes in the quad, and if all broadcasts read the
174     * same data.
175     */
176    uint16_t lanes_read = 0;
177    for (unsigned i = 0; i < 4; i++) {
178       if (!nir_srcs_equal(quad_broadcasts[i]->src[0], quad_broadcasts[0]->src[0]))
179          return NULL;
180 
181       for (unsigned j = 0; j < 4; j++) {
182          unsigned lane;
183          switch (quad_broadcasts[i]->intrinsic) {
184          case nir_intrinsic_quad_broadcast:
185             lane = nir_src_as_uint(quad_broadcasts[i]->src[1]) & 0x3;
186             break;
187          case nir_intrinsic_quad_swap_horizontal:
188             lane = j ^ 1;
189             break;
190          case nir_intrinsic_quad_swap_vertical:
191             lane = j ^ 2;
192             break;
193          case nir_intrinsic_quad_swap_diagonal:
194             lane = 3 - j;
195             break;
196          case nir_intrinsic_quad_swizzle_amd:
197             lane = (nir_intrinsic_swizzle_mask(quad_broadcasts[i]) >> (j * 2)) & 0x3;
198             break;
199          default:
200             unreachable();
201          }
202          lanes_read |= (1 << lane) << (j * 4);
203       }
204    }
205 
206    if (lanes_read != 0xffff)
207       return NULL;
208 
209    /* Create quad vote. */
210    if (alu->op == nir_op_iand)
211       return nir_quad_vote_all(b, 1, quad_broadcasts[0]->src[0].ssa);
212    else
213       return nir_quad_vote_any(b, 1, quad_broadcasts[0]->src[0].ssa);
214 }
215 
216 static bool
opt_intrinsics_alu(nir_builder * b,nir_alu_instr * alu,bool block_has_discard,const struct nir_shader_compiler_options * options)217 opt_intrinsics_alu(nir_builder *b, nir_alu_instr *alu,
218                    bool block_has_discard, const struct nir_shader_compiler_options *options)
219 {
220    nir_def *replacement = NULL;
221 
222    switch (alu->op) {
223    case nir_op_bcsel:
224       replacement = try_opt_bcsel_of_shuffle(b, alu, block_has_discard);
225       break;
226    case nir_op_iand:
227    case nir_op_ior:
228       if (alu->def.bit_size == 1 && options->optimize_quad_vote_to_reduce)
229          replacement = try_opt_quad_vote(b, alu, block_has_discard);
230       break;
231    default:
232       break;
233    }
234 
235    if (replacement) {
236       nir_def_replace(&alu->def, replacement);
237       return true;
238    } else {
239       return false;
240    }
241 }
242 
243 static bool
try_opt_exclusive_scan_to_inclusive(nir_intrinsic_instr * intrin)244 try_opt_exclusive_scan_to_inclusive(nir_intrinsic_instr *intrin)
245 {
246    if (intrin->def.num_components != 1)
247       return false;
248 
249    nir_foreach_use_including_if(src, &intrin->def) {
250       if (nir_src_is_if(src) || nir_src_parent_instr(src)->type != nir_instr_type_alu)
251          return false;
252 
253       nir_alu_instr *alu = nir_instr_as_alu(nir_src_parent_instr(src));
254 
255       if (alu->op != (nir_op)nir_intrinsic_reduction_op(intrin))
256          return false;
257 
258       /* Don't reassociate exact float operations. */
259       if (nir_alu_type_get_base_type(nir_op_infos[alu->op].output_type) == nir_type_float && alu->exact)
260          return false;
261 
262       /* SPIR-V rules for fmax/fmin scans are *very* stupid.
263        * The required identity is Inf instead of NaN but if one input
264        * is NaN, the other value has to be returned.
265        *
266        * This means for invocation 0:
267        * min(subgroupExclusiveMin(NaN), NaN) -> Inf
268        * subgroupInclusiveMin(NaN) -> undefined (NaN for any sane backend)
269        *
270        * SPIR-V [NF]Min/Max don't allow undefined result, even with standard
271        * float controls.
272        */
273       if (alu->op == nir_op_fmax || alu->op == nir_op_fmin)
274          return false;
275 
276       if (alu->def.num_components != 1)
277          return false;
278 
279       nir_alu_src *alu_src = list_entry(src, nir_alu_src, src);
280       unsigned src_index = alu_src - alu->src;
281 
282       assert(src_index < 2 && nir_op_infos[alu->op].num_inputs == 2);
283 
284       nir_scalar scan_scalar = nir_scalar_resolved(intrin->src[0].ssa, 0);
285       nir_scalar op_scalar = nir_scalar_resolved(alu->src[!src_index].src.ssa,
286                                                  alu->src[!src_index].swizzle[0]);
287 
288       if (!nir_scalar_equal(scan_scalar, op_scalar))
289          return false;
290    }
291 
292    /* Convert to inclusive scan. */
293    intrin->intrinsic = nir_intrinsic_inclusive_scan;
294 
295    nir_foreach_use_including_if_safe(src, &intrin->def) {
296       /* Remove alu. */
297       nir_alu_instr *alu = nir_instr_as_alu(nir_src_parent_instr(src));
298       nir_def_replace(&alu->def, &intrin->def);
299    }
300 
301    return true;
302 }
303 
304 static bool
opt_intrinsics_intrin(nir_builder * b,nir_intrinsic_instr * intrin,const struct nir_shader_compiler_options * options)305 opt_intrinsics_intrin(nir_builder *b, nir_intrinsic_instr *intrin,
306                       const struct nir_shader_compiler_options *options)
307 {
308    switch (intrin->intrinsic) {
309    case nir_intrinsic_load_sample_mask_in: {
310       /* Transform:
311        *   gl_SampleMaskIn == 0 ---> gl_HelperInvocation
312        *   gl_SampleMaskIn != 0 ---> !gl_HelperInvocation
313        */
314       if (!options->optimize_sample_mask_in)
315          return false;
316 
317       bool progress = false;
318       nir_foreach_use_safe(use_src, &intrin->def) {
319          if (nir_src_parent_instr(use_src)->type == nir_instr_type_alu) {
320             nir_alu_instr *alu = nir_instr_as_alu(nir_src_parent_instr(use_src));
321 
322             if (alu->op == nir_op_ieq ||
323                 alu->op == nir_op_ine) {
324                /* Check for 0 in either operand. */
325                nir_const_value *const_val =
326                   nir_src_as_const_value(alu->src[0].src);
327                if (!const_val)
328                   const_val = nir_src_as_const_value(alu->src[1].src);
329                if (!const_val || const_val->i32 != 0)
330                   continue;
331 
332                nir_def *new_expr = nir_load_helper_invocation(b, 1);
333 
334                if (alu->op == nir_op_ine)
335                   new_expr = nir_inot(b, new_expr);
336 
337                nir_def_replace(&alu->def, new_expr);
338                progress = true;
339             }
340          }
341       }
342       return progress;
343    }
344    case nir_intrinsic_exclusive_scan:
345       return try_opt_exclusive_scan_to_inclusive(intrin);
346    default:
347       return false;
348    }
349 }
350 
351 static bool
opt_intrinsics_impl(nir_function_impl * impl,const struct nir_shader_compiler_options * options)352 opt_intrinsics_impl(nir_function_impl *impl,
353                     const struct nir_shader_compiler_options *options)
354 {
355    nir_builder b = nir_builder_create(impl);
356    bool progress = false;
357 
358    nir_foreach_block(block, impl) {
359       bool block_has_discard = false;
360 
361       nir_foreach_instr_safe(instr, block) {
362          b.cursor = nir_before_instr(instr);
363 
364          switch (instr->type) {
365          case nir_instr_type_alu:
366             if (opt_intrinsics_alu(&b, nir_instr_as_alu(instr),
367                                    block_has_discard, options))
368                progress = true;
369             break;
370 
371          case nir_instr_type_intrinsic: {
372             nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
373             if (intrin->intrinsic == nir_intrinsic_demote ||
374                 intrin->intrinsic == nir_intrinsic_demote_if ||
375                 intrin->intrinsic == nir_intrinsic_terminate ||
376                 intrin->intrinsic == nir_intrinsic_terminate_if)
377                block_has_discard = true;
378 
379             if (opt_intrinsics_intrin(&b, intrin, options))
380                progress = true;
381             break;
382          }
383 
384          default:
385             break;
386          }
387       }
388    }
389 
390    return progress;
391 }
392 
393 bool
nir_opt_intrinsics(nir_shader * shader)394 nir_opt_intrinsics(nir_shader *shader)
395 {
396    bool progress = false;
397 
398    nir_foreach_function_impl(impl, shader) {
399       if (opt_intrinsics_impl(impl, shader->options)) {
400          progress = true;
401          nir_metadata_preserve(impl, nir_metadata_control_flow);
402       } else {
403          nir_metadata_preserve(impl, nir_metadata_all);
404       }
405    }
406 
407    return progress;
408 }
409