1 /*
2 * Copyright © 2010 Intel Corporation
3 * Copyright © 2018 Broadcom
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "nir.h"
26 #include "nir_builder.h"
27
28 /** nir_lower_alu.c
29 *
30 * NIR's home for miscellaneous ALU operation lowering implementations.
31 *
32 * Most NIR ALU lowering occurs in nir_opt_algebraic.py, since it's generally
33 * easy to write them there. However, if terms appear multiple times in the
34 * lowered code, it can get very verbose and cause a lot of work for CSE, so
35 * it may end up being easier to write out in C code.
36 *
37 * The shader must be in SSA for this pass.
38 */
39
40 static bool
lower_alu_instr(nir_builder * b,nir_alu_instr * instr,UNUSED void * cb_data)41 lower_alu_instr(nir_builder *b, nir_alu_instr *instr, UNUSED void *cb_data)
42 {
43 nir_def *lowered = NULL;
44
45 b->cursor = nir_before_instr(&instr->instr);
46 b->exact = instr->exact;
47 b->fp_fast_math = instr->fp_fast_math;
48
49 switch (instr->op) {
50 case nir_op_bitfield_reverse:
51 if (b->shader->options->lower_bitfield_reverse) {
52 /* For more details, see:
53 *
54 * http://graphics.stanford.edu/~seander/bithacks.html#ReverseParallel
55 */
56 nir_def *c1 = nir_imm_int(b, 1);
57 nir_def *c2 = nir_imm_int(b, 2);
58 nir_def *c4 = nir_imm_int(b, 4);
59 nir_def *c8 = nir_imm_int(b, 8);
60 nir_def *c16 = nir_imm_int(b, 16);
61 nir_def *c33333333 = nir_imm_int(b, 0x33333333);
62 nir_def *c55555555 = nir_imm_int(b, 0x55555555);
63 nir_def *c0f0f0f0f = nir_imm_int(b, 0x0f0f0f0f);
64 nir_def *c00ff00ff = nir_imm_int(b, 0x00ff00ff);
65
66 lowered = nir_ssa_for_alu_src(b, instr, 0);
67
68 /* Swap odd and even bits. */
69 lowered = nir_ior(b,
70 nir_iand(b, nir_ushr(b, lowered, c1), c55555555),
71 nir_ishl(b, nir_iand(b, lowered, c55555555), c1));
72
73 /* Swap consecutive pairs. */
74 lowered = nir_ior(b,
75 nir_iand(b, nir_ushr(b, lowered, c2), c33333333),
76 nir_ishl(b, nir_iand(b, lowered, c33333333), c2));
77
78 /* Swap nibbles. */
79 lowered = nir_ior(b,
80 nir_iand(b, nir_ushr(b, lowered, c4), c0f0f0f0f),
81 nir_ishl(b, nir_iand(b, lowered, c0f0f0f0f), c4));
82
83 /* Swap bytes. */
84 lowered = nir_ior(b,
85 nir_iand(b, nir_ushr(b, lowered, c8), c00ff00ff),
86 nir_ishl(b, nir_iand(b, lowered, c00ff00ff), c8));
87
88 lowered = nir_ior(b,
89 nir_ushr(b, lowered, c16),
90 nir_ishl(b, lowered, c16));
91 }
92 break;
93
94 case nir_op_bit_count:
95 if (b->shader->options->lower_bit_count) {
96 /* For more details, see:
97 *
98 * http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
99 */
100
101 lowered = nir_ssa_for_alu_src(b, instr, 0);
102 unsigned bit_size = lowered->bit_size;
103
104 nir_def *c1 = nir_imm_int(b, 1);
105 nir_def *c2 = nir_imm_int(b, 2);
106 nir_def *c4 = nir_imm_int(b, 4);
107 nir_def *cshift = nir_imm_int(b, bit_size - 8);
108 nir_def *c33333333 = nir_imm_intN_t(b, 0x33333333, bit_size);
109 nir_def *c55555555 = nir_imm_intN_t(b, 0x55555555, bit_size);
110 nir_def *c0f0f0f0f = nir_imm_intN_t(b, 0x0f0f0f0f, bit_size);
111 nir_def *c01010101 = nir_imm_intN_t(b, 0x01010101, bit_size);
112
113
114 lowered = nir_isub(b, lowered,
115 nir_iand(b, nir_ushr(b, lowered, c1), c55555555));
116
117 lowered = nir_iadd(b,
118 nir_iand(b, lowered, c33333333),
119 nir_iand(b, nir_ushr(b, lowered, c2), c33333333));
120
121 lowered = nir_ushr(b,
122 nir_imul(b,
123 nir_iand(b,
124 nir_iadd(b,
125 lowered,
126 nir_ushr(b, lowered, c4)),
127 c0f0f0f0f),
128 c01010101),
129 cshift);
130
131 lowered = nir_u2u32(b, lowered);
132 }
133 break;
134
135 case nir_op_imul_high:
136 case nir_op_umul_high:
137 if (b->shader->options->lower_mul_high) {
138 nir_def *src0 = nir_ssa_for_alu_src(b, instr, 0);
139 nir_def *src1 = nir_ssa_for_alu_src(b, instr, 1);
140 if (src0->bit_size < 32) {
141 /* Just do the math in 32-bit space and shift the result */
142 nir_alu_type base_type = nir_op_infos[instr->op].output_type;
143
144 nir_def *src0_32 = nir_type_convert(b, src0, base_type, base_type | 32, nir_rounding_mode_undef);
145 nir_def *src1_32 = nir_type_convert(b, src1, base_type, base_type | 32, nir_rounding_mode_undef);
146 nir_def *dest_32 = nir_imul(b, src0_32, src1_32);
147 nir_def *dest_shifted = nir_ishr_imm(b, dest_32, src0->bit_size);
148 lowered = nir_type_convert(b, dest_shifted, base_type, base_type | src0->bit_size, nir_rounding_mode_undef);
149 } else {
150 nir_def *cshift = nir_imm_int(b, src0->bit_size / 2);
151 nir_def *cmask = nir_imm_intN_t(b, (1ull << (src0->bit_size / 2)) - 1, src0->bit_size);
152 nir_def *different_signs = NULL;
153 if (instr->op == nir_op_imul_high) {
154 nir_def *c0 = nir_imm_intN_t(b, 0, src0->bit_size);
155 different_signs = nir_ixor(b,
156 nir_ilt(b, src0, c0),
157 nir_ilt(b, src1, c0));
158 src0 = nir_iabs(b, src0);
159 src1 = nir_iabs(b, src1);
160 }
161
162 /* ABCD
163 * * EFGH
164 * ======
165 * (GH * CD) + (GH * AB) << 16 + (EF * CD) << 16 + (EF * AB) << 32
166 *
167 * Start by splitting into the 4 multiplies.
168 */
169 nir_def *src0l = nir_iand(b, src0, cmask);
170 nir_def *src1l = nir_iand(b, src1, cmask);
171 nir_def *src0h = nir_ushr(b, src0, cshift);
172 nir_def *src1h = nir_ushr(b, src1, cshift);
173
174 nir_def *lo = nir_imul(b, src0l, src1l);
175 nir_def *m1 = nir_imul(b, src0l, src1h);
176 nir_def *m2 = nir_imul(b, src0h, src1l);
177 nir_def *hi = nir_imul(b, src0h, src1h);
178
179 nir_def *tmp;
180
181 tmp = nir_ishl(b, m1, cshift);
182 hi = nir_iadd(b, hi, nir_uadd_carry(b, lo, tmp));
183 lo = nir_iadd(b, lo, tmp);
184 hi = nir_iadd(b, hi, nir_ushr(b, m1, cshift));
185
186 tmp = nir_ishl(b, m2, cshift);
187 hi = nir_iadd(b, hi, nir_uadd_carry(b, lo, tmp));
188 lo = nir_iadd(b, lo, tmp);
189 hi = nir_iadd(b, hi, nir_ushr(b, m2, cshift));
190
191 if (instr->op == nir_op_imul_high) {
192 /* For channels where different_signs is set we have to perform a
193 * 64-bit negation. This is *not* the same as just negating the
194 * high 32-bits. Consider -3 * 2. The high 32-bits is 0, but the
195 * desired result is -1, not -0! Recall -x == ~x + 1.
196 */
197 nir_def *c1 = nir_imm_intN_t(b, 1, src0->bit_size);
198 hi = nir_bcsel(b, different_signs,
199 nir_iadd(b,
200 nir_inot(b, hi),
201 nir_uadd_carry(b, nir_inot(b, lo), c1)),
202 hi);
203 }
204
205 lowered = hi;
206 }
207 }
208 break;
209
210 case nir_op_fmin:
211 case nir_op_fmax: {
212 if (!b->shader->options->lower_fminmax_signed_zero ||
213 !nir_alu_instr_is_signed_zero_preserve(instr))
214 break;
215
216 nir_def *s0 = nir_ssa_for_alu_src(b, instr, 0);
217 nir_def *s1 = nir_ssa_for_alu_src(b, instr, 1);
218
219 bool max = instr->op == nir_op_fmax;
220 nir_def *iminmax = max ? nir_imax(b, s0, s1) : nir_imin(b, s0, s1);
221
222 /* Lower the fmin/fmax to a no_signed_zero fmin/fmax. This ensures that
223 * nir_lower_alu is idempotent, and allows the backend to implement
224 * soundly the no_signed_zero subset of fmin/fmax.
225 */
226 b->fp_fast_math &= ~FLOAT_CONTROLS_SIGNED_ZERO_PRESERVE;
227 nir_def *fminmax = max ? nir_fmax(b, s0, s1) : nir_fmin(b, s0, s1);
228 b->fp_fast_math = instr->fp_fast_math;
229
230 lowered = nir_bcsel(b, nir_feq(b, s0, s1), iminmax, fminmax);
231 break;
232 }
233
234 default:
235 break;
236 }
237
238 if (lowered) {
239 nir_def_replace(&instr->def, lowered);
240 return true;
241 } else {
242 return false;
243 }
244 }
245
246 bool
nir_lower_alu(nir_shader * shader)247 nir_lower_alu(nir_shader *shader)
248 {
249 if (!shader->options->lower_bitfield_reverse &&
250 !shader->options->lower_bit_count &&
251 !shader->options->lower_mul_high &&
252 !shader->options->lower_fminmax_signed_zero)
253 return false;
254
255 return nir_shader_alu_pass(shader, lower_alu_instr,
256 nir_metadata_control_flow, NULL);
257 }
258