1 /*
2 * Copyright © 2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir_builder.h"
25
26 /**
27 * Some ALU operations may not be supported in hardware in specific bit-sizes.
28 * This pass allows implementations to selectively lower such operations to
29 * a bit-size that is supported natively and then converts the result back to
30 * the original bit-size.
31 */
32
33 static nir_def *
convert_to_bit_size(nir_builder * bld,nir_def * src,nir_alu_type type,unsigned bit_size)34 convert_to_bit_size(nir_builder *bld, nir_def *src,
35 nir_alu_type type, unsigned bit_size)
36 {
37 assert(src->bit_size < bit_size);
38
39 /* create b2i32(a) instead of i2i32(b2i8(a))/i2i32(b2i16(a)) */
40 nir_alu_instr *alu = nir_src_as_alu_instr(nir_src_for_ssa(src));
41 if ((type & (nir_type_uint | nir_type_int)) && bit_size == 32 &&
42 alu && (alu->op == nir_op_b2i8 || alu->op == nir_op_b2i16)) {
43 nir_alu_instr *instr = nir_alu_instr_create(bld->shader, nir_op_b2i32);
44 nir_alu_src_copy(&instr->src[0], &alu->src[0]);
45 return nir_builder_alu_instr_finish_and_insert(bld, instr);
46 }
47
48 return nir_convert_to_bit_size(bld, src, type, bit_size);
49 }
50
51 static void
lower_alu_instr(nir_builder * bld,nir_alu_instr * alu,unsigned bit_size)52 lower_alu_instr(nir_builder *bld, nir_alu_instr *alu, unsigned bit_size)
53 {
54 const nir_op op = alu->op;
55 unsigned dst_bit_size = alu->def.bit_size;
56
57 bld->cursor = nir_before_instr(&alu->instr);
58
59 /* Convert each source to the requested bit-size */
60 nir_def *srcs[NIR_MAX_VEC_COMPONENTS] = { NULL };
61 for (unsigned i = 0; i < nir_op_infos[op].num_inputs; i++) {
62 nir_def *src = nir_ssa_for_alu_src(bld, alu, i);
63
64 nir_alu_type type = nir_op_infos[op].input_types[i];
65 if (nir_alu_type_get_type_size(type) == 0)
66 src = convert_to_bit_size(bld, src, type, bit_size);
67
68 if (i == 1 && (op == nir_op_ishl || op == nir_op_ishr || op == nir_op_ushr ||
69 op == nir_op_bitz || op == nir_op_bitz8 || op == nir_op_bitz16 ||
70 op == nir_op_bitz32 || op == nir_op_bitnz || op == nir_op_bitnz8 ||
71 op == nir_op_bitnz16 || op == nir_op_bitnz32)) {
72 assert(util_is_power_of_two_nonzero(dst_bit_size));
73 src = nir_iand(bld, src, nir_imm_int(bld, dst_bit_size - 1));
74 }
75
76 srcs[i] = src;
77 }
78
79 /* Emit the lowered ALU instruction */
80 nir_def *lowered_dst = NULL;
81 if (op == nir_op_imul_high || op == nir_op_umul_high) {
82 assert(dst_bit_size * 2 <= bit_size);
83 lowered_dst = nir_imul(bld, srcs[0], srcs[1]);
84 if (nir_op_infos[op].output_type & nir_type_uint)
85 lowered_dst = nir_ushr_imm(bld, lowered_dst, dst_bit_size);
86 else
87 lowered_dst = nir_ishr_imm(bld, lowered_dst, dst_bit_size);
88 } else if (op == nir_op_iadd_sat || op == nir_op_isub_sat || op == nir_op_uadd_sat ||
89 op == nir_op_uadd_carry) {
90 if (op == nir_op_isub_sat)
91 lowered_dst = nir_isub(bld, srcs[0], srcs[1]);
92 else
93 lowered_dst = nir_iadd(bld, srcs[0], srcs[1]);
94
95 /* The add_sat and sub_sat instructions need to clamp the result to the
96 * range of the original type.
97 */
98 if (op == nir_op_iadd_sat || op == nir_op_isub_sat) {
99 const int64_t int_max = u_intN_max(dst_bit_size);
100 const int64_t int_min = u_intN_min(dst_bit_size);
101
102 lowered_dst = nir_iclamp(bld, lowered_dst,
103 nir_imm_intN_t(bld, int_min, bit_size),
104 nir_imm_intN_t(bld, int_max, bit_size));
105 } else if (op == nir_op_uadd_sat) {
106 const uint64_t uint_max = u_uintN_max(dst_bit_size);
107
108 lowered_dst = nir_umin(bld, lowered_dst,
109 nir_imm_intN_t(bld, uint_max, bit_size));
110 } else {
111 assert(op == nir_op_uadd_carry);
112 lowered_dst = nir_ushr_imm(bld, lowered_dst, dst_bit_size);
113 }
114 } else {
115 lowered_dst = nir_build_alu_src_arr(bld, op, srcs);
116 }
117
118 /* Convert result back to the original bit-size */
119 if (nir_alu_type_get_type_size(nir_op_infos[op].output_type) == 0 &&
120 dst_bit_size != bit_size) {
121 nir_alu_type type = nir_op_infos[op].output_type;
122 nir_def *dst = nir_convert_to_bit_size(bld, lowered_dst, type, dst_bit_size);
123 nir_def_rewrite_uses(&alu->def, dst);
124 } else {
125 nir_def_rewrite_uses(&alu->def, lowered_dst);
126 }
127 }
128
129 static void
lower_intrinsic_instr(nir_builder * b,nir_intrinsic_instr * intrin,unsigned bit_size)130 lower_intrinsic_instr(nir_builder *b, nir_intrinsic_instr *intrin,
131 unsigned bit_size)
132 {
133 switch (intrin->intrinsic) {
134 case nir_intrinsic_read_invocation:
135 case nir_intrinsic_read_first_invocation:
136 case nir_intrinsic_shuffle:
137 case nir_intrinsic_shuffle_xor:
138 case nir_intrinsic_shuffle_up:
139 case nir_intrinsic_shuffle_down:
140 case nir_intrinsic_quad_broadcast:
141 case nir_intrinsic_quad_swap_horizontal:
142 case nir_intrinsic_quad_swap_vertical:
143 case nir_intrinsic_quad_swap_diagonal:
144 case nir_intrinsic_reduce:
145 case nir_intrinsic_inclusive_scan:
146 case nir_intrinsic_exclusive_scan: {
147 const unsigned old_bit_size = intrin->def.bit_size;
148 assert(old_bit_size < bit_size);
149
150 nir_alu_type type = nir_type_uint;
151 if (old_bit_size == 1)
152 type = nir_type_bool;
153 else if (nir_intrinsic_has_reduction_op(intrin))
154 type = nir_op_infos[nir_intrinsic_reduction_op(intrin)].input_types[0];
155
156 b->cursor = nir_before_instr(&intrin->instr);
157 nir_intrinsic_instr *new_intrin =
158 nir_instr_as_intrinsic(nir_instr_clone(b->shader, &intrin->instr));
159
160 nir_def *new_src = nir_convert_to_bit_size(b, intrin->src[0].ssa,
161 type, bit_size);
162 new_intrin->src[0] = nir_src_for_ssa(new_src);
163
164 /* These return the same bit size as the source; we need to adjust
165 * the size and then we'll have to emit a down-cast.
166 */
167 assert(intrin->src[0].ssa->bit_size == intrin->def.bit_size);
168 new_intrin->def.bit_size = bit_size;
169
170 nir_builder_instr_insert(b, &new_intrin->instr);
171
172 nir_def *res = &new_intrin->def;
173 if (intrin->intrinsic == nir_intrinsic_exclusive_scan) {
174 /* For exclusive scan, we have to be careful because the identity
175 * value for the higher bit size may get added into the mix by
176 * disabled channels. For some cases (imin/imax in particular),
177 * this value won't convert to the right identity value when we
178 * down-cast so we have to clamp it.
179 */
180 switch (nir_intrinsic_reduction_op(intrin)) {
181 case nir_op_imin: {
182 int64_t int_max = u_intN_max(old_bit_size);
183 res = nir_imin(b, res, nir_imm_intN_t(b, int_max, bit_size));
184 break;
185 }
186 case nir_op_imax: {
187 int64_t int_min = u_intN_min(old_bit_size);
188 res = nir_imax(b, res, nir_imm_intN_t(b, int_min, bit_size));
189 break;
190 }
191 default:
192 break;
193 }
194 }
195
196 res = nir_convert_to_bit_size(b, res, type, old_bit_size);
197
198 nir_def_rewrite_uses(&intrin->def, res);
199 break;
200 }
201
202 case nir_intrinsic_vote_feq:
203 case nir_intrinsic_vote_ieq: {
204 /* These return a Boolean; it's always 1-bit */
205 assert(intrin->def.bit_size == 1);
206
207 nir_alu_type type = nir_type_uint;
208 if (intrin->intrinsic == nir_intrinsic_vote_feq)
209 type = nir_type_float;
210 else if (intrin->src[0].ssa->bit_size == 1)
211 type = nir_type_bool;
212
213 b->cursor = nir_before_instr(&intrin->instr);
214 nir_def *new_src = nir_convert_to_bit_size(b, intrin->src[0].ssa,
215 type, bit_size);
216 nir_src_rewrite(&intrin->src[0], new_src);
217 break;
218 }
219
220 default:
221 unreachable("Unsupported instruction");
222 }
223 }
224
225 static void
lower_phi_instr(nir_builder * b,nir_phi_instr * phi,unsigned bit_size,nir_phi_instr * last_phi)226 lower_phi_instr(nir_builder *b, nir_phi_instr *phi, unsigned bit_size,
227 nir_phi_instr *last_phi)
228 {
229 unsigned old_bit_size = phi->def.bit_size;
230 assert(old_bit_size < bit_size);
231
232 nir_foreach_phi_src(src, phi) {
233 b->cursor = nir_after_block_before_jump(src->pred);
234 nir_def *new_src = nir_u2uN(b, src->src.ssa, bit_size);
235
236 nir_src_rewrite(&src->src, new_src);
237 }
238
239 phi->def.bit_size = bit_size;
240
241 b->cursor = nir_after_instr(&last_phi->instr);
242
243 nir_def *new_dest = nir_u2uN(b, &phi->def, old_bit_size);
244 nir_def_rewrite_uses_after(&phi->def, new_dest,
245 new_dest->parent_instr);
246 }
247
248 static bool
lower_impl(nir_function_impl * impl,nir_lower_bit_size_callback callback,void * callback_data)249 lower_impl(nir_function_impl *impl,
250 nir_lower_bit_size_callback callback,
251 void *callback_data)
252 {
253 nir_builder b = nir_builder_create(impl);
254 bool progress = false;
255
256 nir_foreach_block(block, impl) {
257 /* Stash this so we can rewrite phi destinations quickly. */
258 nir_phi_instr *last_phi = nir_block_last_phi_instr(block);
259
260 nir_foreach_instr_safe(instr, block) {
261 unsigned lower_bit_size = callback(instr, callback_data);
262 if (lower_bit_size == 0)
263 continue;
264
265 switch (instr->type) {
266 case nir_instr_type_alu:
267 lower_alu_instr(&b, nir_instr_as_alu(instr), lower_bit_size);
268 break;
269
270 case nir_instr_type_intrinsic:
271 lower_intrinsic_instr(&b, nir_instr_as_intrinsic(instr),
272 lower_bit_size);
273 break;
274
275 case nir_instr_type_phi:
276 lower_phi_instr(&b, nir_instr_as_phi(instr),
277 lower_bit_size, last_phi);
278 break;
279
280 default:
281 unreachable("Unsupported instruction type");
282 }
283 progress = true;
284 }
285 }
286
287 if (progress) {
288 nir_metadata_preserve(impl, nir_metadata_control_flow);
289 } else {
290 nir_metadata_preserve(impl, nir_metadata_all);
291 }
292
293 return progress;
294 }
295
296 bool
nir_lower_bit_size(nir_shader * shader,nir_lower_bit_size_callback callback,void * callback_data)297 nir_lower_bit_size(nir_shader *shader,
298 nir_lower_bit_size_callback callback,
299 void *callback_data)
300 {
301 bool progress = false;
302
303 nir_foreach_function_impl(impl, shader) {
304 progress |= lower_impl(impl, callback, callback_data);
305 }
306
307 return progress;
308 }
309
310 static void
split_phi(nir_builder * b,nir_phi_instr * phi)311 split_phi(nir_builder *b, nir_phi_instr *phi)
312 {
313 nir_phi_instr *lowered[2] = {
314 nir_phi_instr_create(b->shader),
315 nir_phi_instr_create(b->shader)
316 };
317 int num_components = phi->def.num_components;
318 assert(phi->def.bit_size == 64);
319
320 nir_foreach_phi_src(src, phi) {
321 assert(num_components == src->src.ssa->num_components);
322
323 b->cursor = nir_before_src(&src->src);
324
325 nir_def *x = nir_unpack_64_2x32_split_x(b, src->src.ssa);
326 nir_def *y = nir_unpack_64_2x32_split_y(b, src->src.ssa);
327
328 nir_phi_instr_add_src(lowered[0], src->pred, x);
329 nir_phi_instr_add_src(lowered[1], src->pred, y);
330 }
331
332 nir_def_init(&lowered[0]->instr, &lowered[0]->def, num_components, 32);
333 nir_def_init(&lowered[1]->instr, &lowered[1]->def, num_components, 32);
334
335 b->cursor = nir_before_instr(&phi->instr);
336 nir_builder_instr_insert(b, &lowered[0]->instr);
337 nir_builder_instr_insert(b, &lowered[1]->instr);
338
339 b->cursor = nir_after_phis(nir_cursor_current_block(b->cursor));
340 nir_def *merged = nir_pack_64_2x32_split(b, &lowered[0]->def, &lowered[1]->def);
341 nir_def_replace(&phi->def, merged);
342 }
343
344 static bool
lower_64bit_phi_instr(nir_builder * b,nir_instr * instr,UNUSED void * cb_data)345 lower_64bit_phi_instr(nir_builder *b, nir_instr *instr, UNUSED void *cb_data)
346 {
347 if (instr->type != nir_instr_type_phi)
348 return false;
349
350 nir_phi_instr *phi = nir_instr_as_phi(instr);
351
352 if (phi->def.bit_size <= 32)
353 return false;
354
355 split_phi(b, phi);
356 return true;
357 }
358
359 bool
nir_lower_64bit_phis(nir_shader * shader)360 nir_lower_64bit_phis(nir_shader *shader)
361 {
362 return nir_shader_instructions_pass(shader, lower_64bit_phi_instr,
363 nir_metadata_control_flow,
364 NULL);
365 }
366