1 /*
2 * Copyright © 2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "nir_builder.h"
26 #include "nir_deref.h"
27
28 #include "util/u_math.h"
29
30 static void
read_const_values(nir_const_value * dst,const void * src,unsigned num_components,unsigned bit_size)31 read_const_values(nir_const_value *dst, const void *src,
32 unsigned num_components, unsigned bit_size)
33 {
34 memset(dst, 0, num_components * sizeof(*dst));
35
36 switch (bit_size) {
37 case 1:
38 /* Booleans are special-cased to be 32-bit */
39 assert(((uintptr_t)src & 0x3) == 0);
40 for (unsigned i = 0; i < num_components; i++)
41 dst[i].b = ((int32_t *)src)[i] != 0;
42 break;
43
44 case 8:
45 for (unsigned i = 0; i < num_components; i++)
46 dst[i].u8 = ((int8_t *)src)[i];
47 break;
48
49 case 16:
50 assert(((uintptr_t)src & 0x1) == 0);
51 for (unsigned i = 0; i < num_components; i++)
52 dst[i].u16 = ((int16_t *)src)[i];
53 break;
54
55 case 32:
56 assert(((uintptr_t)src & 0x3) == 0);
57 for (unsigned i = 0; i < num_components; i++)
58 dst[i].u32 = ((int32_t *)src)[i];
59 break;
60
61 case 64:
62 assert(((uintptr_t)src & 0x7) == 0);
63 for (unsigned i = 0; i < num_components; i++)
64 dst[i].u64 = ((int64_t *)src)[i];
65 break;
66
67 default:
68 unreachable("Invalid bit size");
69 }
70 }
71
72 static void
write_const_values(void * dst,const nir_const_value * src,nir_component_mask_t write_mask,unsigned bit_size)73 write_const_values(void *dst, const nir_const_value *src,
74 nir_component_mask_t write_mask,
75 unsigned bit_size)
76 {
77 switch (bit_size) {
78 case 1:
79 /* Booleans are special-cased to be 32-bit */
80 assert(((uintptr_t)dst & 0x3) == 0);
81 u_foreach_bit(i, write_mask)
82 ((int32_t *)dst)[i] = -(int)src[i].b;
83 break;
84
85 case 8:
86 u_foreach_bit(i, write_mask)
87 ((int8_t *)dst)[i] = src[i].u8;
88 break;
89
90 case 16:
91 assert(((uintptr_t)dst & 0x1) == 0);
92 u_foreach_bit(i, write_mask)
93 ((int16_t *)dst)[i] = src[i].u16;
94 break;
95
96 case 32:
97 assert(((uintptr_t)dst & 0x3) == 0);
98 u_foreach_bit(i, write_mask)
99 ((int32_t *)dst)[i] = src[i].u32;
100 break;
101
102 case 64:
103 assert(((uintptr_t)dst & 0x7) == 0);
104 u_foreach_bit(i, write_mask)
105 ((int64_t *)dst)[i] = src[i].u64;
106 break;
107
108 default:
109 unreachable("Invalid bit size");
110 }
111 }
112
113 struct small_constant {
114 uint64_t data;
115 uint32_t bit_size;
116 bool is_float;
117 uint32_t bit_stride;
118 };
119
120 struct var_info {
121 nir_variable *var;
122
123 bool is_constant;
124 bool is_small;
125 bool found_read;
126 bool duplicate;
127
128 /* Block that has all the variable stores. All the blocks with reads
129 * should be dominated by this block.
130 */
131 nir_block *block;
132
133 /* If is_constant, hold the collected constant data for this var. */
134 uint32_t constant_data_size;
135 void *constant_data;
136
137 struct small_constant small_constant;
138 };
139
140 static int
var_info_cmp(const void * _a,const void * _b)141 var_info_cmp(const void *_a, const void *_b)
142 {
143 const struct var_info *a = _a;
144 const struct var_info *b = _b;
145 uint32_t a_size = a->constant_data_size;
146 uint32_t b_size = b->constant_data_size;
147
148 if (a->is_constant != b->is_constant) {
149 return (int)a->is_constant - (int)b->is_constant;
150 } else if (a_size < b_size) {
151 return -1;
152 } else if (a_size > b_size) {
153 return 1;
154 } else if (a_size == 0) {
155 /* Don't call memcmp with invalid pointers. */
156 return 0;
157 } else {
158 return memcmp(a->constant_data, b->constant_data, a_size);
159 }
160 }
161
162 static nir_def *
build_constant_load(nir_builder * b,nir_deref_instr * deref,glsl_type_size_align_func size_align)163 build_constant_load(nir_builder *b, nir_deref_instr *deref,
164 glsl_type_size_align_func size_align)
165 {
166 nir_variable *var = nir_deref_instr_get_variable(deref);
167
168 const unsigned bit_size = glsl_get_bit_size(deref->type);
169 const unsigned num_components = glsl_get_vector_elements(deref->type);
170
171 UNUSED unsigned var_size, var_align;
172 size_align(var->type, &var_size, &var_align);
173 assert(var->data.location % var_align == 0);
174
175 UNUSED unsigned deref_size, deref_align;
176 size_align(deref->type, &deref_size, &deref_align);
177
178 nir_def *src = nir_build_deref_offset(b, deref, size_align);
179 nir_def *load =
180 nir_load_constant(b, num_components, bit_size, src,
181 .base = var->data.location,
182 .range = var_size,
183 .align_mul = deref_align,
184 .align_offset = 0);
185
186 if (load->bit_size < 8) {
187 /* Booleans are special-cased to be 32-bit */
188 assert(glsl_type_is_boolean(deref->type));
189 assert(deref_size == num_components * 4);
190 load->bit_size = 32;
191 return nir_b2b1(b, load);
192 } else {
193 assert(deref_size == num_components * bit_size / 8);
194 return load;
195 }
196 }
197
198 static void
handle_constant_store(void * mem_ctx,struct var_info * info,nir_deref_instr * deref,nir_const_value * val,nir_component_mask_t write_mask,glsl_type_size_align_func size_align)199 handle_constant_store(void *mem_ctx, struct var_info *info,
200 nir_deref_instr *deref, nir_const_value *val,
201 nir_component_mask_t write_mask,
202 glsl_type_size_align_func size_align)
203 {
204 assert(!nir_deref_instr_has_indirect(deref));
205 const unsigned bit_size = glsl_get_bit_size(deref->type);
206 const unsigned num_components = glsl_get_vector_elements(deref->type);
207
208 if (info->constant_data_size == 0) {
209 unsigned var_size, var_align;
210 size_align(info->var->type, &var_size, &var_align);
211 info->constant_data_size = var_size;
212 info->constant_data = rzalloc_size(mem_ctx, var_size);
213 }
214
215 const unsigned offset = nir_deref_instr_get_const_offset(deref, size_align);
216 if (offset >= info->constant_data_size)
217 return;
218
219 write_const_values((char *)info->constant_data + offset, val,
220 write_mask & nir_component_mask(num_components),
221 bit_size);
222 }
223
224 static void
get_small_constant(struct var_info * info,glsl_type_size_align_func size_align)225 get_small_constant(struct var_info *info, glsl_type_size_align_func size_align)
226 {
227 if (!glsl_type_is_array(info->var->type))
228 return;
229
230 const struct glsl_type *elem_type = glsl_get_array_element(info->var->type);
231 if (!glsl_type_is_scalar(elem_type))
232 return;
233
234 uint32_t array_len = glsl_get_length(info->var->type);
235 uint32_t bit_size = glsl_get_bit_size(elem_type);
236
237 /* If our array is large, don't even bother */
238 if (array_len > 64)
239 return;
240
241 /* Skip cases that can be lowered to a bcsel ladder more efficiently. */
242 if (array_len <= 3)
243 return;
244
245 uint32_t elem_size, elem_align;
246 size_align(elem_type, &elem_size, &elem_align);
247 uint32_t stride = ALIGN_POT(elem_size, elem_align);
248
249 if (stride != (bit_size == 1 ? 4 : bit_size / 8))
250 return;
251
252 nir_const_value values[64];
253 read_const_values(values, info->constant_data, array_len, bit_size);
254
255 bool is_float = true;
256 if (bit_size < 16) {
257 is_float = false;
258 } else {
259 for (unsigned i = 0; i < array_len; i++) {
260 /* See if it's an easily convertible float.
261 * TODO: Compute greatest common divisor to support non-integer floats.
262 * TODO: Compute min value and add it to the result of
263 * build_small_constant_load for handling negative floats.
264 */
265 uint64_t u = nir_const_value_as_float(values[i], bit_size);
266 nir_const_value fc = nir_const_value_for_float(u, bit_size);
267 is_float &= !memcmp(&fc, &values[i], bit_size / 8);
268 }
269 }
270
271 uint32_t used_bits = 0;
272 for (unsigned i = 0; i < array_len; i++) {
273 uint64_t u64_elem = is_float ? nir_const_value_as_float(values[i], bit_size)
274 : nir_const_value_as_uint(values[i], bit_size);
275 if (!u64_elem)
276 continue;
277
278 uint32_t elem_bits = util_logbase2_64(u64_elem) + 1;
279 used_bits = MAX2(used_bits, elem_bits);
280 }
281
282 /* Only use power-of-two numbers of bits so we end up with a shift
283 * instead of a multiply on our index.
284 */
285 used_bits = util_next_power_of_two(used_bits);
286
287 if (used_bits * array_len > 64)
288 return;
289
290 info->is_small = true;
291
292 for (unsigned i = 0; i < array_len; i++) {
293 uint64_t u64_elem = is_float ? nir_const_value_as_float(values[i], bit_size)
294 : nir_const_value_as_uint(values[i], bit_size);
295
296 info->small_constant.data |= u64_elem << (i * used_bits);
297 }
298
299 /* Limit bit_size >= 32 to avoid unnecessary conversions. */
300 info->small_constant.bit_size =
301 MAX2(util_next_power_of_two(used_bits * array_len), 32);
302 info->small_constant.is_float = is_float;
303 info->small_constant.bit_stride = used_bits;
304 }
305
306 static nir_def *
build_small_constant_load(nir_builder * b,nir_deref_instr * deref,struct var_info * info,glsl_type_size_align_func size_align)307 build_small_constant_load(nir_builder *b, nir_deref_instr *deref,
308 struct var_info *info, glsl_type_size_align_func size_align)
309 {
310 struct small_constant *constant = &info->small_constant;
311
312 nir_def *imm = nir_imm_intN_t(b, constant->data, constant->bit_size);
313
314 assert(deref->deref_type == nir_deref_type_array);
315 nir_def *index = deref->arr.index.ssa;
316
317 nir_def *shift = nir_imul_imm(b, index, constant->bit_stride);
318
319 nir_def *ret = nir_ushr(b, imm, nir_u2u32(b, shift));
320 ret = nir_iand_imm(b, ret, BITFIELD64_MASK(constant->bit_stride));
321
322 const unsigned bit_size = glsl_get_bit_size(deref->type);
323 if (bit_size < 8) {
324 /* Booleans are special-cased to be 32-bit */
325 assert(glsl_type_is_boolean(deref->type));
326 ret = nir_ine_imm(b, ret, 0);
327 } else {
328 if (constant->is_float)
329 ret = nir_u2fN(b, ret, bit_size);
330 else if (bit_size != constant->bit_size)
331 ret = nir_u2uN(b, ret, bit_size);
332 }
333
334 return ret;
335 }
336
337 /** Lower large constant variables to shader constant data
338 *
339 * This pass looks for large (type_size(var->type) > threshold) variables
340 * which are statically constant and moves them into shader constant data.
341 * This is especially useful when large tables are baked into the shader
342 * source code because they can be moved into a UBO by the driver to reduce
343 * register pressure and make indirect access cheaper.
344 */
345 bool
nir_opt_large_constants(nir_shader * shader,glsl_type_size_align_func size_align,unsigned threshold)346 nir_opt_large_constants(nir_shader *shader,
347 glsl_type_size_align_func size_align,
348 unsigned threshold)
349 {
350 /* Default to a natural alignment if none is provided */
351 if (size_align == NULL)
352 size_align = glsl_get_natural_size_align_bytes;
353
354 /* This only works with a single entrypoint */
355 nir_function_impl *impl = nir_shader_get_entrypoint(shader);
356
357 unsigned num_locals = nir_function_impl_index_vars(impl);
358
359 if (num_locals == 0) {
360 nir_shader_preserve_all_metadata(shader);
361 return false;
362 }
363
364 struct var_info *var_infos = ralloc_array(NULL, struct var_info, num_locals);
365 nir_foreach_function_temp_variable(var, impl) {
366 var_infos[var->index] = (struct var_info){
367 .var = var,
368 .is_constant = true,
369 .found_read = false,
370 };
371 }
372
373 nir_metadata_require(impl, nir_metadata_dominance);
374
375 /* First, walk through the shader and figure out what variables we can
376 * lower to the constant blob.
377 */
378 nir_foreach_block(block, impl) {
379 nir_foreach_instr(instr, block) {
380 if (instr->type == nir_instr_type_deref) {
381 /* If we ever see a complex use of a deref_var, we have to assume
382 * that variable is non-constant because we can't guarantee we
383 * will find all of the writers of that variable.
384 */
385 nir_deref_instr *deref = nir_instr_as_deref(instr);
386 if (deref->deref_type == nir_deref_type_var &&
387 deref->var->data.mode == nir_var_function_temp &&
388 nir_deref_instr_has_complex_use(deref, 0))
389 var_infos[deref->var->index].is_constant = false;
390 continue;
391 }
392
393 if (instr->type != nir_instr_type_intrinsic)
394 continue;
395
396 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
397
398 bool src_is_const = false;
399 nir_deref_instr *src_deref = NULL, *dst_deref = NULL;
400 nir_component_mask_t write_mask = 0;
401 switch (intrin->intrinsic) {
402 case nir_intrinsic_store_deref:
403 dst_deref = nir_src_as_deref(intrin->src[0]);
404 src_is_const = nir_src_is_const(intrin->src[1]);
405 write_mask = nir_intrinsic_write_mask(intrin);
406 break;
407
408 case nir_intrinsic_load_deref:
409 src_deref = nir_src_as_deref(intrin->src[0]);
410 break;
411
412 case nir_intrinsic_copy_deref:
413 assert(!"Lowering of copy_deref with large constants is prohibited");
414 break;
415
416 default:
417 continue;
418 }
419
420 if (dst_deref && nir_deref_mode_must_be(dst_deref, nir_var_function_temp)) {
421 nir_variable *var = nir_deref_instr_get_variable(dst_deref);
422 if (var == NULL)
423 continue;
424
425 assert(var->data.mode == nir_var_function_temp);
426
427 struct var_info *info = &var_infos[var->index];
428 if (!info->is_constant)
429 continue;
430
431 if (!info->block)
432 info->block = block;
433
434 /* We only consider variables constant if they only have constant
435 * stores, all the stores come before any reads, and all stores
436 * come from the same block. We also can't handle indirect stores.
437 */
438 if (!src_is_const || info->found_read || block != info->block ||
439 nir_deref_instr_has_indirect(dst_deref)) {
440 info->is_constant = false;
441 } else {
442 nir_const_value *val = nir_src_as_const_value(intrin->src[1]);
443 handle_constant_store(var_infos, info, dst_deref, val, write_mask,
444 size_align);
445 }
446 }
447
448 if (src_deref && nir_deref_mode_must_be(src_deref, nir_var_function_temp)) {
449 nir_variable *var = nir_deref_instr_get_variable(src_deref);
450 if (var == NULL)
451 continue;
452
453 assert(var->data.mode == nir_var_function_temp);
454
455 /* We only consider variables constant if all the reads are
456 * dominated by the block that writes to it.
457 */
458 struct var_info *info = &var_infos[var->index];
459 if (!info->is_constant)
460 continue;
461
462 if (!info->block || !nir_block_dominates(info->block, block))
463 info->is_constant = false;
464
465 info->found_read = true;
466 }
467 }
468 }
469
470 bool has_constant = false;
471
472 /* Allocate constant data space for each variable that just has constant
473 * data. We sort them by size and content so we can easily find
474 * duplicates.
475 */
476 const unsigned old_constant_data_size = shader->constant_data_size;
477 qsort(var_infos, num_locals, sizeof(struct var_info), var_info_cmp);
478 for (int i = 0; i < num_locals; i++) {
479 struct var_info *info = &var_infos[i];
480
481 /* Fix up indices after we sorted. */
482 info->var->index = i;
483
484 if (!info->is_constant)
485 continue;
486
487 get_small_constant(info, size_align);
488
489 unsigned var_size, var_align;
490 size_align(info->var->type, &var_size, &var_align);
491 if ((var_size <= threshold && !info->is_small) || !info->found_read) {
492 /* Don't bother lowering small stuff or data that's never read */
493 info->is_constant = false;
494 continue;
495 }
496
497 if (i > 0 && var_info_cmp(info, &var_infos[i - 1]) == 0) {
498 info->var->data.location = var_infos[i - 1].var->data.location;
499 info->duplicate = true;
500 } else {
501 info->var->data.location = ALIGN_POT(shader->constant_data_size, var_align);
502 shader->constant_data_size = info->var->data.location + var_size;
503 }
504
505 has_constant |= info->is_constant;
506 }
507
508 if (!has_constant) {
509 nir_shader_preserve_all_metadata(shader);
510 ralloc_free(var_infos);
511 return false;
512 }
513
514 if (shader->constant_data_size != old_constant_data_size) {
515 assert(shader->constant_data_size > old_constant_data_size);
516 shader->constant_data = rerzalloc_size(shader, shader->constant_data,
517 old_constant_data_size,
518 shader->constant_data_size);
519 for (int i = 0; i < num_locals; i++) {
520 struct var_info *info = &var_infos[i];
521 if (!info->duplicate && info->is_constant) {
522 memcpy((char *)shader->constant_data + info->var->data.location,
523 info->constant_data, info->constant_data_size);
524 }
525 }
526 }
527
528 nir_builder b = nir_builder_create(impl);
529
530 nir_foreach_block(block, impl) {
531 nir_foreach_instr_safe(instr, block) {
532 if (instr->type != nir_instr_type_intrinsic)
533 continue;
534
535 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
536
537 switch (intrin->intrinsic) {
538 case nir_intrinsic_load_deref: {
539 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
540 if (!nir_deref_mode_is(deref, nir_var_function_temp))
541 continue;
542
543 nir_variable *var = nir_deref_instr_get_variable(deref);
544 if (var == NULL)
545 continue;
546
547 struct var_info *info = &var_infos[var->index];
548 if (info->is_small) {
549 b.cursor = nir_after_instr(&intrin->instr);
550 nir_def *val = build_small_constant_load(&b, deref, info, size_align);
551 nir_def_replace(&intrin->def, val);
552 nir_deref_instr_remove_if_unused(deref);
553 } else if (info->is_constant) {
554 b.cursor = nir_after_instr(&intrin->instr);
555 nir_def *val = build_constant_load(&b, deref, size_align);
556 nir_def_replace(&intrin->def, val);
557 nir_deref_instr_remove_if_unused(deref);
558 }
559 break;
560 }
561
562 case nir_intrinsic_store_deref: {
563 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
564 if (!nir_deref_mode_is(deref, nir_var_function_temp))
565 continue;
566
567 nir_variable *var = nir_deref_instr_get_variable(deref);
568 if (var == NULL)
569 continue;
570
571 struct var_info *info = &var_infos[var->index];
572 if (info->is_constant) {
573 nir_instr_remove(&intrin->instr);
574 nir_deref_instr_remove_if_unused(deref);
575 }
576 break;
577 }
578 case nir_intrinsic_copy_deref:
579 default:
580 continue;
581 }
582 }
583 }
584
585 /* Clean up the now unused variables */
586 for (int i = 0; i < num_locals; i++) {
587 struct var_info *info = &var_infos[i];
588 if (info->is_constant)
589 exec_node_remove(&info->var->node);
590 }
591
592 ralloc_free(var_infos);
593
594 nir_metadata_preserve(impl, nir_metadata_control_flow);
595 return true;
596 }
597