1 /*
2 * Copyright © 2021 Valve Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Timur Kristóf
25 *
26 */
27
28 #include "nir.h"
29 #include "nir_builder.h"
30
31 typedef struct
32 {
33 struct hash_table *range_ht;
34 const nir_opt_offsets_options *options;
35 } opt_offsets_state;
36
37 static nir_scalar
try_extract_const_addition(nir_builder * b,nir_scalar val,opt_offsets_state * state,unsigned * out_const,uint32_t max)38 try_extract_const_addition(nir_builder *b, nir_scalar val, opt_offsets_state *state, unsigned *out_const, uint32_t max)
39 {
40 val = nir_scalar_chase_movs(val);
41
42 if (!nir_scalar_is_alu(val))
43 return val;
44
45 nir_alu_instr *alu = nir_instr_as_alu(val.def->parent_instr);
46 if (alu->op != nir_op_iadd)
47 return val;
48
49 nir_scalar src[2] = {
50 { alu->src[0].src.ssa, alu->src[0].swizzle[val.comp] },
51 { alu->src[1].src.ssa, alu->src[1].swizzle[val.comp] },
52 };
53
54 /* Make sure that we aren't taking out an addition that could trigger
55 * unsigned wrapping in a way that would change the semantics of the load.
56 * Ignored for ints-as-floats (lower_bitops is a proxy for that), where
57 * unsigned wrapping doesn't make sense.
58 */
59 if (!state->options->allow_offset_wrap && !alu->no_unsigned_wrap && !b->shader->options->lower_bitops) {
60 if (!state->range_ht) {
61 /* Cache for nir_unsigned_upper_bound */
62 state->range_ht = _mesa_pointer_hash_table_create(NULL);
63 }
64
65 /* Check if there can really be an unsigned wrap. */
66 uint32_t ub0 = nir_unsigned_upper_bound(b->shader, state->range_ht, src[0], NULL);
67 uint32_t ub1 = nir_unsigned_upper_bound(b->shader, state->range_ht, src[1], NULL);
68
69 if ((UINT32_MAX - ub0) < ub1)
70 return val;
71
72 /* We proved that unsigned wrap won't be possible, so we can set the flag too. */
73 alu->no_unsigned_wrap = true;
74 }
75
76 for (unsigned i = 0; i < 2; ++i) {
77 src[i] = nir_scalar_chase_movs(src[i]);
78 if (nir_scalar_is_const(src[i])) {
79 uint32_t offset = nir_scalar_as_uint(src[i]);
80 if (offset + *out_const <= max) {
81 *out_const += offset;
82 return try_extract_const_addition(b, src[1 - i], state, out_const, max);
83 }
84 }
85 }
86
87 uint32_t orig_offset = *out_const;
88 src[0] = try_extract_const_addition(b, src[0], state, out_const, max);
89 src[1] = try_extract_const_addition(b, src[1], state, out_const, max);
90 if (*out_const == orig_offset)
91 return val;
92
93 b->cursor = nir_before_instr(&alu->instr);
94 nir_def *r =
95 nir_iadd(b, nir_channel(b, src[0].def, src[0].comp),
96 nir_channel(b, src[1].def, src[1].comp));
97 return nir_get_scalar(r, 0);
98 }
99
100 static bool
try_fold_load_store(nir_builder * b,nir_intrinsic_instr * intrin,opt_offsets_state * state,unsigned offset_src_idx,uint32_t max)101 try_fold_load_store(nir_builder *b,
102 nir_intrinsic_instr *intrin,
103 opt_offsets_state *state,
104 unsigned offset_src_idx,
105 uint32_t max)
106 {
107 /* Assume that BASE is the constant offset of a load/store.
108 * Try to constant-fold additions to the offset source
109 * into the actual const offset of the instruction.
110 */
111
112 unsigned off_const = nir_intrinsic_base(intrin);
113 nir_src *off_src = &intrin->src[offset_src_idx];
114 nir_def *replace_src = NULL;
115
116 if (off_src->ssa->bit_size != 32)
117 return false;
118
119 if (!nir_src_is_const(*off_src)) {
120 uint32_t add_offset = 0;
121 nir_scalar val = { .def = off_src->ssa, .comp = 0 };
122 val = try_extract_const_addition(b, val, state, &add_offset, max - off_const);
123 if (add_offset == 0)
124 return false;
125 off_const += add_offset;
126 b->cursor = nir_before_instr(&intrin->instr);
127 replace_src = nir_channel(b, val.def, val.comp);
128 } else if (nir_src_as_uint(*off_src) && off_const + nir_src_as_uint(*off_src) <= max) {
129 off_const += nir_src_as_uint(*off_src);
130 b->cursor = nir_before_instr(&intrin->instr);
131 replace_src = nir_imm_zero(b, off_src->ssa->num_components, off_src->ssa->bit_size);
132 }
133
134 if (!replace_src)
135 return false;
136
137 nir_src_rewrite(&intrin->src[offset_src_idx], replace_src);
138
139 assert(off_const <= max);
140 nir_intrinsic_set_base(intrin, off_const);
141 return true;
142 }
143
144 static bool
try_fold_shared2(nir_builder * b,nir_intrinsic_instr * intrin,opt_offsets_state * state,unsigned offset_src_idx)145 try_fold_shared2(nir_builder *b,
146 nir_intrinsic_instr *intrin,
147 opt_offsets_state *state,
148 unsigned offset_src_idx)
149 {
150 unsigned comp_size = (intrin->intrinsic == nir_intrinsic_load_shared2_amd ? intrin->def.bit_size : intrin->src[0].ssa->bit_size) / 8;
151 unsigned stride = (nir_intrinsic_st64(intrin) ? 64 : 1) * comp_size;
152 unsigned offset0 = nir_intrinsic_offset0(intrin) * stride;
153 unsigned offset1 = nir_intrinsic_offset1(intrin) * stride;
154 nir_src *off_src = &intrin->src[offset_src_idx];
155
156 if (!nir_src_is_const(*off_src))
157 return false;
158
159 unsigned const_offset = nir_src_as_uint(*off_src);
160 offset0 += const_offset;
161 offset1 += const_offset;
162 bool st64 = offset0 % (64 * comp_size) == 0 && offset1 % (64 * comp_size) == 0;
163 stride = (st64 ? 64 : 1) * comp_size;
164 if (const_offset % stride || offset0 > 255 * stride || offset1 > 255 * stride)
165 return false;
166
167 b->cursor = nir_before_instr(&intrin->instr);
168 nir_src_rewrite(off_src, nir_imm_zero(b, 1, 32));
169 nir_intrinsic_set_offset0(intrin, offset0 / stride);
170 nir_intrinsic_set_offset1(intrin, offset1 / stride);
171 nir_intrinsic_set_st64(intrin, st64);
172
173 return true;
174 }
175
176 static uint32_t
get_max(opt_offsets_state * state,nir_intrinsic_instr * intrin,uint32_t default_val)177 get_max(opt_offsets_state *state, nir_intrinsic_instr *intrin, uint32_t default_val)
178 {
179 if (default_val)
180 return default_val;
181 if (state->options->max_offset_cb)
182 return state->options->max_offset_cb(intrin, state->options->max_offset_data);
183 return 0;
184 }
185
186 static bool
process_instr(nir_builder * b,nir_instr * instr,void * s)187 process_instr(nir_builder *b, nir_instr *instr, void *s)
188 {
189 if (instr->type != nir_instr_type_intrinsic)
190 return false;
191
192 opt_offsets_state *state = (opt_offsets_state *)s;
193 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
194
195 switch (intrin->intrinsic) {
196 case nir_intrinsic_load_uniform:
197 case nir_intrinsic_load_const_ir3:
198 return try_fold_load_store(b, intrin, state, 0, get_max(state, intrin, state->options->uniform_max));
199 case nir_intrinsic_load_ubo_vec4:
200 return try_fold_load_store(b, intrin, state, 1, get_max(state, intrin, state->options->ubo_vec4_max));
201 case nir_intrinsic_load_shared:
202 case nir_intrinsic_load_shared_ir3:
203 return try_fold_load_store(b, intrin, state, 0, get_max(state, intrin, state->options->shared_max));
204 case nir_intrinsic_store_shared:
205 case nir_intrinsic_store_shared_ir3:
206 return try_fold_load_store(b, intrin, state, 1, get_max(state, intrin, state->options->shared_max));
207 case nir_intrinsic_load_shared2_amd:
208 return try_fold_shared2(b, intrin, state, 0);
209 case nir_intrinsic_store_shared2_amd:
210 return try_fold_shared2(b, intrin, state, 1);
211 case nir_intrinsic_load_buffer_amd:
212 return try_fold_load_store(b, intrin, state, 1, state->options->buffer_max);
213 case nir_intrinsic_store_buffer_amd:
214 case nir_intrinsic_load_ssbo_ir3:
215 return try_fold_load_store(b, intrin, state, 2, get_max(state, intrin, state->options->buffer_max));
216 case nir_intrinsic_store_ssbo_ir3:
217 return try_fold_load_store(b, intrin, state, 3, get_max(state, intrin, state->options->buffer_max));
218 default:
219 return false;
220 }
221
222 unreachable("Can't reach here.");
223 }
224
225 bool
nir_opt_offsets(nir_shader * shader,const nir_opt_offsets_options * options)226 nir_opt_offsets(nir_shader *shader, const nir_opt_offsets_options *options)
227 {
228 opt_offsets_state state;
229 state.range_ht = NULL;
230 state.options = options;
231
232 bool p = nir_shader_instructions_pass(shader, process_instr,
233 nir_metadata_control_flow,
234 &state);
235
236 if (state.range_ht)
237 _mesa_hash_table_destroy(state.range_ht, NULL);
238
239 return p;
240 }
241