1 /*
2 * Copyright © 2020 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /* These passes enable converting uniforms to literals when it's profitable,
25 * effectively inlining uniform values in the IR. The main benefit is register
26 * usage decrease leading to better SMT (hyperthreading). It's accomplished
27 * by targetting uniforms that determine whether a conditional branch is
28 * taken or a loop can be unrolled.
29 *
30 * Only uniforms used in these places are analyzed:
31 * 1. if condition
32 * 2. loop terminator
33 * 3. init and update value of induction variable used in loop terminator
34 *
35 * nir_find_inlinable_uniforms finds uniforms that can be inlined and stores
36 * that information in shader_info.
37 *
38 * nir_inline_uniforms inlines uniform values.
39 *
40 * (uniforms must be lowered to load_ubo before calling this)
41 */
42
43 #include "nir_builder.h"
44 #include "nir_loop_analyze.h"
45
46 /* Maximum value in shader_info::inlinable_uniform_dw_offsets[] */
47 #define MAX_OFFSET (UINT16_MAX * 4)
48
49 #define MAX_NUM_BO 32
50
51 /**
52 * Collect uniforms used in a source
53 *
54 * Recursively collects all of the UBO loads with constant UBO index and
55 * constant offset (per the restictions of \c max_num_bo and \c
56 * max_offset). If any values are discovered that are non-constant, uniforms
57 * that don't meet the restrictions, or if more than \c
58 * MAX_INLINEABLE_UNIFORMS are discoverd for any one UBO, false is returned.
59 *
60 * When false is returned, the state of \c uni_offsets and \c num_offsets is
61 * undefined.
62 *
63 * \param max_num_bo Maximum number of uniform buffer objects
64 * \param max_offset Maximum offset within a UBO
65 * \param uni_offset Array of \c max_num_bo * \c MAX_INLINABLE_UNIFORMS values
66 * used to store offsets of discovered uniform loads.
67 * \param num_offsets Array of \c max_num_bo values used to store the number
68 * of uniforms collected from each UBO.
69 */
70 bool
nir_collect_src_uniforms(const nir_src * src,int component,uint32_t * uni_offsets,uint8_t * num_offsets,unsigned max_num_bo,unsigned max_offset)71 nir_collect_src_uniforms(const nir_src *src, int component,
72 uint32_t *uni_offsets, uint8_t *num_offsets,
73 unsigned max_num_bo, unsigned max_offset)
74 {
75 assert(max_num_bo > 0 && max_num_bo <= MAX_NUM_BO);
76 assert(component < src->ssa->num_components);
77
78 nir_instr *instr = src->ssa->parent_instr;
79
80 switch (instr->type) {
81 case nir_instr_type_alu: {
82 nir_alu_instr *alu = nir_instr_as_alu(instr);
83
84 /* Vector ops only need to check the corresponding component. */
85 if (alu->op == nir_op_mov) {
86 return nir_collect_src_uniforms(&alu->src[0].src,
87 alu->src[0].swizzle[component],
88 uni_offsets, num_offsets,
89 max_num_bo, max_offset);
90 } else if (nir_op_is_vec(alu->op)) {
91 nir_alu_src *alu_src = alu->src + component;
92 return nir_collect_src_uniforms(&alu_src->src, alu_src->swizzle[0],
93 uni_offsets, num_offsets,
94 max_num_bo, max_offset);
95 }
96
97 /* Return true if all sources return true. */
98 for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++) {
99 nir_alu_src *alu_src = alu->src + i;
100 int input_sizes = nir_op_infos[alu->op].input_sizes[i];
101
102 if (input_sizes == 0) {
103 /* For ops which has no input size, each component of dest is
104 * only determined by the same component of srcs.
105 */
106 if (!nir_collect_src_uniforms(&alu_src->src, alu_src->swizzle[component],
107 uni_offsets, num_offsets,
108 max_num_bo, max_offset))
109 return false;
110 } else {
111 /* For ops which has input size, all components of dest are
112 * determined by all components of srcs (except vec ops).
113 */
114 for (unsigned j = 0; j < input_sizes; j++) {
115 if (!nir_collect_src_uniforms(&alu_src->src, alu_src->swizzle[j],
116 uni_offsets, num_offsets,
117 max_num_bo, max_offset))
118 return false;
119 }
120 }
121 }
122 return true;
123 }
124
125 case nir_instr_type_intrinsic: {
126 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
127 /* Return true if the intrinsic loads from UBO 0 with a constant
128 * offset.
129 */
130 if (intr->intrinsic == nir_intrinsic_load_ubo &&
131 nir_src_is_const(intr->src[0]) && nir_src_num_components(intr->src[0]) == 1 &&
132 nir_src_as_uint(intr->src[0]) < max_num_bo &&
133 nir_src_is_const(intr->src[1]) &&
134 nir_src_as_uint(intr->src[1]) <= max_offset &&
135 /* TODO: Can't handle other bit sizes for now. */
136 intr->def.bit_size == 32) {
137 /* num_offsets can be NULL if-and-only-if uni_offsets is NULL. */
138 assert((num_offsets == NULL) == (uni_offsets == NULL));
139
140 /* If we're just checking that it's a uniform load, don't check (or
141 * add to) the table.
142 */
143 if (uni_offsets == NULL)
144 return true;
145
146 uint32_t offset = nir_src_as_uint(intr->src[1]) + component * 4;
147 assert(offset < MAX_OFFSET);
148
149 const unsigned ubo = nir_src_as_uint(intr->src[0]);
150
151 /* Already recorded by other one */
152 for (int i = 0; i < num_offsets[ubo]; i++) {
153 if (uni_offsets[ubo * MAX_INLINABLE_UNIFORMS + i] == offset)
154 return true;
155 }
156
157 /* Exceed uniform number limit */
158 if (num_offsets[ubo] == MAX_INLINABLE_UNIFORMS)
159 return false;
160
161 /* Record the uniform offset. */
162 uni_offsets[ubo * MAX_INLINABLE_UNIFORMS + num_offsets[ubo]++] = offset;
163 return true;
164 }
165 return false;
166 }
167
168 case nir_instr_type_load_const:
169 /* Always return true for constants. */
170 return true;
171
172 default:
173 return false;
174 }
175 }
176
177 static bool
is_induction_variable(const nir_src * src,int component,nir_loop_info * info,uint32_t * uni_offsets,uint8_t * num_offsets,unsigned max_num_bo,unsigned max_offset)178 is_induction_variable(const nir_src *src, int component, nir_loop_info *info,
179 uint32_t *uni_offsets, uint8_t *num_offsets,
180 unsigned max_num_bo, unsigned max_offset)
181 {
182 assert(component < src->ssa->num_components);
183
184 /* Return true for induction variable (ie. i in for loop) */
185 for (int i = 0; i < info->num_induction_vars; i++) {
186 nir_loop_induction_variable *var = info->induction_vars + i;
187 if (var->def == src->ssa) {
188 /* Induction variable should have constant initial value (ie. i = 0),
189 * constant update value (ie. i++) and constant end condition
190 * (ie. i < 10), so that we know the exact loop count for unrolling
191 * the loop.
192 *
193 * Add uniforms need to be inlined for this induction variable's
194 * initial and update value to be constant, for example:
195 *
196 * for (i = init; i < count; i += step)
197 *
198 * We collect uniform "init" and "step" here.
199 */
200 if (var->init_src) {
201 if (!nir_collect_src_uniforms(var->init_src, component,
202 uni_offsets, num_offsets,
203 max_num_bo, max_offset))
204 return false;
205 }
206
207 if (var->update_src) {
208 nir_alu_src *alu_src = var->update_src;
209 if (!nir_collect_src_uniforms(&alu_src->src,
210 alu_src->swizzle[component],
211 uni_offsets, num_offsets,
212 max_num_bo, max_offset))
213 return false;
214 }
215
216 return true;
217 }
218 }
219
220 return false;
221 }
222
223 void
nir_add_inlinable_uniforms(const nir_src * cond,nir_loop_info * info,uint32_t * uni_offsets,uint8_t * num_offsets,unsigned max_num_bo,unsigned max_offset)224 nir_add_inlinable_uniforms(const nir_src *cond, nir_loop_info *info,
225 uint32_t *uni_offsets, uint8_t *num_offsets,
226 unsigned max_num_bo, unsigned max_offset)
227 {
228 uint8_t new_num[MAX_NUM_BO];
229 memcpy(new_num, num_offsets, sizeof(new_num));
230
231 /* If condition SSA is always scalar, so component is 0. */
232 unsigned component = 0;
233
234 /* Allow induction variable which means a loop terminator. */
235 if (info) {
236 nir_scalar cond_scalar = { cond->ssa, 0 };
237
238 /* Limit terminator condition to loop unroll support case which is a simple
239 * comparison (ie. "i < count" is supported, but "i + 1 < count" is not).
240 */
241 if (nir_is_supported_terminator_condition(cond_scalar)) {
242 if (nir_scalar_alu_op(cond_scalar) == nir_op_inot)
243 cond_scalar = nir_scalar_chase_alu_src(cond_scalar, 0);
244
245 nir_alu_instr *alu = nir_instr_as_alu(cond_scalar.def->parent_instr);
246
247 /* One side of comparison is induction variable, the other side is
248 * only uniform.
249 */
250 for (int i = 0; i < 2; i++) {
251 if (is_induction_variable(&alu->src[i].src, alu->src[i].swizzle[0],
252 info, uni_offsets, new_num,
253 max_num_bo, max_offset)) {
254 cond = &alu->src[1 - i].src;
255 component = alu->src[1 - i].swizzle[0];
256 break;
257 }
258 }
259 }
260 }
261
262 /* Only update uniform number when all uniforms in the expression
263 * can be inlined. Partially inline uniforms can't lower if/loop.
264 *
265 * For example, uniform can be inlined for a shader is limited to 4,
266 * and we have already added 3 uniforms, then want to deal with
267 *
268 * if (uniform0 + uniform1 == 10)
269 *
270 * only uniform0 can be inlined due to we exceed the 4 limit. But
271 * unless both uniform0 and uniform1 are inlined, can we eliminate
272 * the if statement.
273 *
274 * This is even possible when we deal with loop if the induction
275 * variable init and update also contains uniform like
276 *
277 * for (i = uniform0; i < uniform1; i+= uniform2)
278 *
279 * unless uniform0, uniform1 and uniform2 can be inlined at once,
280 * can the loop be unrolled.
281 */
282 if (nir_collect_src_uniforms(cond, component, uni_offsets, new_num,
283 max_num_bo, max_offset))
284 memcpy(num_offsets, new_num, sizeof(new_num[0]) * max_num_bo);
285 }
286
287 static void
process_node(nir_cf_node * node,nir_loop_info * info,uint32_t * uni_offsets,uint8_t * num_offsets)288 process_node(nir_cf_node *node, nir_loop_info *info,
289 uint32_t *uni_offsets, uint8_t *num_offsets)
290 {
291 switch (node->type) {
292 case nir_cf_node_if: {
293 nir_if *if_node = nir_cf_node_as_if(node);
294 const nir_src *cond = &if_node->condition;
295 nir_add_inlinable_uniforms(cond, info, uni_offsets, num_offsets,
296 1, MAX_OFFSET);
297
298 /* Do not pass loop info down so only alow induction variable
299 * in loop terminator "if":
300 *
301 * for (i = 0; true; i++)
302 * if (i == count)
303 * if (i == num)
304 * <no break>
305 * break
306 *
307 * so "num" won't be inlined due to the "if" is not a
308 * terminator.
309 */
310 info = NULL;
311
312 foreach_list_typed(nir_cf_node, nested_node, node, &if_node->then_list)
313 process_node(nested_node, info, uni_offsets, num_offsets);
314 foreach_list_typed(nir_cf_node, nested_node, node, &if_node->else_list)
315 process_node(nested_node, info, uni_offsets, num_offsets);
316 break;
317 }
318
319 case nir_cf_node_loop: {
320 nir_loop *loop = nir_cf_node_as_loop(node);
321 assert(!nir_loop_has_continue_construct(loop));
322
323 /* Replace loop info, no nested loop info currently:
324 *
325 * for (i = 0; i < count0; i++)
326 * for (j = 0; j < count1; j++)
327 * if (i == num)
328 *
329 * so "num" won't be inlined due to "i" is an induction
330 * variable of upper loop.
331 */
332 info = loop->info;
333
334 foreach_list_typed(nir_cf_node, nested_node, node, &loop->body) {
335 bool is_terminator = false;
336 list_for_each_entry(nir_loop_terminator, terminator,
337 &info->loop_terminator_list,
338 loop_terminator_link) {
339 if (nested_node == &terminator->nif->cf_node) {
340 is_terminator = true;
341 break;
342 }
343 }
344
345 /* Allow induction variables for terminator "if" only:
346 *
347 * for (i = 0; i < count; i++)
348 * if (i == num)
349 * <no break>
350 *
351 * so "num" won't be inlined due to the "if" is not a
352 * terminator.
353 */
354 nir_loop_info *use_info = is_terminator ? info : NULL;
355 process_node(nested_node, use_info, uni_offsets, num_offsets);
356 }
357 break;
358 }
359
360 default:
361 break;
362 }
363 }
364
365 void
nir_find_inlinable_uniforms(nir_shader * shader)366 nir_find_inlinable_uniforms(nir_shader *shader)
367 {
368 uint32_t uni_offsets[MAX_INLINABLE_UNIFORMS];
369 uint8_t num_offsets[MAX_NUM_BO] = {0};
370
371 nir_foreach_function_impl(impl, shader) {
372 nir_metadata_require(impl, nir_metadata_loop_analysis,
373 nir_var_all, false);
374
375 foreach_list_typed(nir_cf_node, node, node, &impl->body)
376 process_node(node, NULL, uni_offsets, num_offsets);
377 }
378
379 for (int i = 0; i < num_offsets[0]; i++)
380 shader->info.inlinable_uniform_dw_offsets[i] = uni_offsets[i] / 4;
381 shader->info.num_inlinable_uniforms = num_offsets[0];
382 }
383
384 void
nir_inline_uniforms(nir_shader * shader,unsigned num_uniforms,const uint32_t * uniform_values,const uint16_t * uniform_dw_offsets)385 nir_inline_uniforms(nir_shader *shader, unsigned num_uniforms,
386 const uint32_t *uniform_values,
387 const uint16_t *uniform_dw_offsets)
388 {
389 if (!num_uniforms)
390 return;
391
392 nir_foreach_function_impl(impl, shader) {
393 nir_builder b = nir_builder_create(impl);
394 nir_foreach_block(block, impl) {
395 nir_foreach_instr_safe(instr, block) {
396 if (instr->type != nir_instr_type_intrinsic)
397 continue;
398
399 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
400
401 /* Only replace UBO 0 with constant offsets. */
402 if (intr->intrinsic == nir_intrinsic_load_ubo &&
403 nir_src_is_const(intr->src[0]) &&
404 nir_src_as_uint(intr->src[0]) == 0 &&
405 nir_src_is_const(intr->src[1]) &&
406 /* TODO: Can't handle other bit sizes for now. */
407 intr->def.bit_size == 32) {
408 int num_components = intr->def.num_components;
409 uint32_t offset = nir_src_as_uint(intr->src[1]) / 4;
410
411 if (num_components == 1) {
412 /* Just replace the uniform load to constant load. */
413 for (unsigned i = 0; i < num_uniforms; i++) {
414 if (offset == uniform_dw_offsets[i]) {
415 b.cursor = nir_before_instr(&intr->instr);
416 nir_def *def = nir_imm_int(&b, uniform_values[i]);
417 nir_def_replace(&intr->def, def);
418 break;
419 }
420 }
421 } else {
422 /* Lower vector uniform load to scalar and replace each
423 * found component load with constant load.
424 */
425 uint32_t max_offset = offset + num_components;
426 nir_def *components[NIR_MAX_VEC_COMPONENTS] = { 0 };
427 bool found = false;
428
429 b.cursor = nir_before_instr(&intr->instr);
430
431 /* Find component to replace. */
432 for (unsigned i = 0; i < num_uniforms; i++) {
433 uint32_t uni_offset = uniform_dw_offsets[i];
434 if (uni_offset >= offset && uni_offset < max_offset) {
435 int index = uni_offset - offset;
436 components[index] = nir_imm_int(&b, uniform_values[i]);
437 found = true;
438 }
439 }
440
441 if (!found)
442 continue;
443
444 /* Create per-component uniform load. */
445 for (unsigned i = 0; i < num_components; i++) {
446 if (!components[i]) {
447 uint32_t scalar_offset = (offset + i) * 4;
448 components[i] = nir_load_ubo(&b, 1, intr->def.bit_size,
449 intr->src[0].ssa,
450 nir_imm_int(&b, scalar_offset));
451 nir_intrinsic_instr *load =
452 nir_instr_as_intrinsic(components[i]->parent_instr);
453 nir_intrinsic_set_align(load, NIR_ALIGN_MUL_MAX, scalar_offset);
454 nir_intrinsic_set_range_base(load, scalar_offset);
455 nir_intrinsic_set_range(load, 4);
456 }
457 }
458
459 /* Replace the original uniform load. */
460 nir_def_replace(&intr->def,
461 nir_vec(&b, components, num_components));
462 }
463 }
464 }
465
466 nir_metadata_preserve(impl, nir_metadata_control_flow);
467 }
468 }
469 }
470