xref: /aosp_15_r20/external/mesa3d/src/compiler/nir/nir_opt_idiv_const.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2018 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "util/fast_idiv_by_const.h"
25 #include "util/u_math.h"
26 #include "nir.h"
27 #include "nir_builder.h"
28 
29 static nir_def *
build_udiv(nir_builder * b,nir_def * n,uint64_t d)30 build_udiv(nir_builder *b, nir_def *n, uint64_t d)
31 {
32    if (d == 0) {
33       return nir_imm_intN_t(b, 0, n->bit_size);
34    } else if (util_is_power_of_two_or_zero64(d)) {
35       return nir_ushr_imm(b, n, util_logbase2_64(d));
36    } else {
37       struct util_fast_udiv_info m =
38          util_compute_fast_udiv_info(d, n->bit_size, n->bit_size);
39 
40       if (m.pre_shift)
41          n = nir_ushr_imm(b, n, m.pre_shift);
42       if (m.increment)
43          n = nir_uadd_sat(b, n, nir_imm_intN_t(b, m.increment, n->bit_size));
44       n = nir_umul_high(b, n, nir_imm_intN_t(b, m.multiplier, n->bit_size));
45       if (m.post_shift)
46          n = nir_ushr_imm(b, n, m.post_shift);
47 
48       return n;
49    }
50 }
51 
52 static nir_def *
build_umod(nir_builder * b,nir_def * n,uint64_t d)53 build_umod(nir_builder *b, nir_def *n, uint64_t d)
54 {
55    if (d == 0) {
56       return nir_imm_intN_t(b, 0, n->bit_size);
57    } else if (util_is_power_of_two_or_zero64(d)) {
58       return nir_iand_imm(b, n, d - 1);
59    } else {
60       return nir_isub(b, n, nir_imul_imm(b, build_udiv(b, n, d), d));
61    }
62 }
63 
64 static nir_def *
build_idiv(nir_builder * b,nir_def * n,int64_t d)65 build_idiv(nir_builder *b, nir_def *n, int64_t d)
66 {
67    int64_t int_min = u_intN_min(n->bit_size);
68    if (d == int_min)
69       return nir_b2iN(b, nir_ieq_imm(b, n, int_min), n->bit_size);
70 
71    uint64_t abs_d = d < 0 ? -d : d;
72 
73    if (d == 0) {
74       return nir_imm_intN_t(b, 0, n->bit_size);
75    } else if (d == 1) {
76       return n;
77    } else if (d == -1) {
78       return nir_ineg(b, n);
79    } else if (util_is_power_of_two_or_zero64(abs_d)) {
80       nir_def *uq = nir_ushr_imm(b, nir_iabs(b, n), util_logbase2_64(abs_d));
81       nir_def *n_neg = nir_ilt_imm(b, n, 0);
82       nir_def *neg = d < 0 ? nir_inot(b, n_neg) : n_neg;
83       return nir_bcsel(b, neg, nir_ineg(b, uq), uq);
84    } else {
85       struct util_fast_sdiv_info m =
86          util_compute_fast_sdiv_info(d, n->bit_size);
87 
88       nir_def *res =
89          nir_imul_high(b, n, nir_imm_intN_t(b, m.multiplier, n->bit_size));
90       if (d > 0 && m.multiplier < 0)
91          res = nir_iadd(b, res, n);
92       if (d < 0 && m.multiplier > 0)
93          res = nir_isub(b, res, n);
94       if (m.shift)
95          res = nir_ishr_imm(b, res, m.shift);
96       res = nir_iadd(b, res, nir_ushr_imm(b, res, n->bit_size - 1));
97 
98       return res;
99    }
100 }
101 
102 static nir_def *
build_irem(nir_builder * b,nir_def * n,int64_t d)103 build_irem(nir_builder *b, nir_def *n, int64_t d)
104 {
105    int64_t int_min = u_intN_min(n->bit_size);
106    if (d == 0) {
107       return nir_imm_intN_t(b, 0, n->bit_size);
108    } else if (d == int_min) {
109       return nir_bcsel(b, nir_ieq_imm(b, n, int_min), nir_imm_intN_t(b, 0, n->bit_size), n);
110    } else {
111       d = d < 0 ? -d : d;
112       if (util_is_power_of_two_or_zero64(d)) {
113          nir_def *tmp = nir_bcsel(b, nir_ilt_imm(b, n, 0),
114                                   nir_iadd_imm(b, n, d - 1), n);
115          return nir_isub(b, n, nir_iand_imm(b, tmp, -d));
116       } else {
117          return nir_isub(b, n, nir_imul_imm(b, build_idiv(b, n, d), d));
118       }
119    }
120 }
121 
122 static nir_def *
build_imod(nir_builder * b,nir_def * n,int64_t d)123 build_imod(nir_builder *b, nir_def *n, int64_t d)
124 {
125    int64_t int_min = u_intN_min(n->bit_size);
126    if (d == 0) {
127       return nir_imm_intN_t(b, 0, n->bit_size);
128    } else if (d == int_min) {
129       nir_def *int_min_def = nir_imm_intN_t(b, int_min, n->bit_size);
130       nir_def *is_neg_not_int_min = nir_ult(b, int_min_def, n);
131       nir_def *is_zero = nir_ieq_imm(b, n, 0);
132       return nir_bcsel(b, nir_ior(b, is_neg_not_int_min, is_zero), n, nir_iadd(b, int_min_def, n));
133    } else if (d > 0 && util_is_power_of_two_or_zero64(d)) {
134       return nir_iand_imm(b, n, d - 1);
135    } else if (d < 0 && util_is_power_of_two_or_zero64(-d)) {
136       nir_def *d_def = nir_imm_intN_t(b, d, n->bit_size);
137       nir_def *res = nir_ior(b, n, d_def);
138       return nir_bcsel(b, nir_ieq(b, res, d_def), nir_imm_intN_t(b, 0, n->bit_size), res);
139    } else {
140       nir_def *rem = build_irem(b, n, d);
141       nir_def *zero = nir_imm_intN_t(b, 0, n->bit_size);
142       nir_def *sign_same = d < 0 ? nir_ilt(b, n, zero) : nir_ige(b, n, zero);
143       nir_def *rem_zero = nir_ieq(b, rem, zero);
144       return nir_bcsel(b, nir_ior(b, rem_zero, sign_same), rem, nir_iadd_imm(b, rem, d));
145    }
146 }
147 
148 static bool
nir_opt_idiv_const_instr(nir_builder * b,nir_alu_instr * alu,void * user_data)149 nir_opt_idiv_const_instr(nir_builder *b, nir_alu_instr *alu, void *user_data)
150 {
151    unsigned *min_bit_size = user_data;
152 
153    if (alu->op != nir_op_udiv &&
154        alu->op != nir_op_idiv &&
155        alu->op != nir_op_umod &&
156        alu->op != nir_op_imod &&
157        alu->op != nir_op_irem)
158       return false;
159 
160    if (alu->def.bit_size < *min_bit_size)
161       return false;
162 
163    if (!nir_src_is_const(alu->src[1].src))
164       return false;
165 
166    unsigned bit_size = alu->src[1].src.ssa->bit_size;
167 
168    b->cursor = nir_before_instr(&alu->instr);
169 
170    nir_def *q[NIR_MAX_VEC_COMPONENTS];
171    for (unsigned comp = 0; comp < alu->def.num_components; comp++) {
172       /* Get the numerator for the channel */
173       nir_def *n = nir_channel(b, alu->src[0].src.ssa,
174                                alu->src[0].swizzle[comp]);
175 
176       /* Get the denominator for the channel */
177       int64_t d = nir_src_comp_as_int(alu->src[1].src,
178                                       alu->src[1].swizzle[comp]);
179 
180       nir_alu_type d_type = nir_op_infos[alu->op].input_types[1];
181       if (nir_alu_type_get_base_type(d_type) == nir_type_uint) {
182          /* The code above sign-extended.  If we're lowering an unsigned op,
183           * we need to mask it off to the correct number of bits so that a
184           * cast to uint64_t will do the right thing.
185           */
186          if (bit_size < 64)
187             d &= (1ull << bit_size) - 1;
188       }
189 
190       switch (alu->op) {
191       case nir_op_udiv:
192          q[comp] = build_udiv(b, n, d);
193          break;
194       case nir_op_idiv:
195          q[comp] = build_idiv(b, n, d);
196          break;
197       case nir_op_umod:
198          q[comp] = build_umod(b, n, d);
199          break;
200       case nir_op_imod:
201          q[comp] = build_imod(b, n, d);
202          break;
203       case nir_op_irem:
204          q[comp] = build_irem(b, n, d);
205          break;
206       default:
207          unreachable("Unknown integer division op");
208       }
209    }
210 
211    nir_def *qvec = nir_vec(b, q, alu->def.num_components);
212    nir_def_replace(&alu->def, qvec);
213 
214    return true;
215 }
216 
217 bool
nir_opt_idiv_const(nir_shader * shader,unsigned min_bit_size)218 nir_opt_idiv_const(nir_shader *shader, unsigned min_bit_size)
219 {
220    return nir_shader_alu_pass(shader, nir_opt_idiv_const_instr,
221                               nir_metadata_control_flow,
222                               &min_bit_size);
223 }
224