1 /*
2 * Copyright © 2020 Valve Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 /*
26 * Optimizes atomics (with uniform offsets) using subgroup operations to ensure
27 * only one atomic operation is done per subgroup. So res = atomicAdd(addr, 1)
28 * would become something like:
29 *
30 * uint tmp = subgroupAdd(1);
31 * uint res;
32 * if (subgroupElect())
33 * res = atomicAdd(addr, tmp);
34 * res = subgroupBroadcastFirst(res) + subgroupExclusiveAdd(1);
35 *
36 * This pass requires and preserves LCSSA and divergence information.
37 */
38
39 #include "nir/nir.h"
40 #include "nir/nir_builder.h"
41
42 static nir_op
atomic_op_to_alu(nir_atomic_op op)43 atomic_op_to_alu(nir_atomic_op op)
44 {
45 switch (op) {
46 case nir_atomic_op_iadd:
47 return nir_op_iadd;
48 case nir_atomic_op_imin:
49 return nir_op_imin;
50 case nir_atomic_op_umin:
51 return nir_op_umin;
52 case nir_atomic_op_imax:
53 return nir_op_imax;
54 case nir_atomic_op_umax:
55 return nir_op_umax;
56 case nir_atomic_op_iand:
57 return nir_op_iand;
58 case nir_atomic_op_ior:
59 return nir_op_ior;
60 case nir_atomic_op_ixor:
61 return nir_op_ixor;
62 case nir_atomic_op_fadd:
63 return nir_op_fadd;
64 case nir_atomic_op_fmin:
65 return nir_op_fmin;
66 case nir_atomic_op_fmax:
67 return nir_op_fmax;
68
69 /* We don't handle exchanges or wraps */
70 case nir_atomic_op_xchg:
71 case nir_atomic_op_cmpxchg:
72 case nir_atomic_op_fcmpxchg:
73 case nir_atomic_op_inc_wrap:
74 case nir_atomic_op_dec_wrap:
75 case nir_atomic_op_ordered_add_gfx12_amd:
76 return nir_num_opcodes;
77 }
78
79 unreachable("Unknown atomic op");
80 }
81
82 static nir_op
parse_atomic_op(nir_intrinsic_instr * intr,unsigned * offset_src,unsigned * data_src,unsigned * offset2_src)83 parse_atomic_op(nir_intrinsic_instr *intr, unsigned *offset_src,
84 unsigned *data_src, unsigned *offset2_src)
85 {
86 switch (intr->intrinsic) {
87 case nir_intrinsic_ssbo_atomic:
88 *offset_src = 1;
89 *data_src = 2;
90 *offset2_src = *offset_src;
91 return atomic_op_to_alu(nir_intrinsic_atomic_op(intr));
92 case nir_intrinsic_shared_atomic:
93 case nir_intrinsic_global_atomic:
94 case nir_intrinsic_deref_atomic:
95 *offset_src = 0;
96 *data_src = 1;
97 *offset2_src = *offset_src;
98 return atomic_op_to_alu(nir_intrinsic_atomic_op(intr));
99 case nir_intrinsic_global_atomic_amd:
100 *offset_src = 0;
101 *data_src = 1;
102 *offset2_src = 2;
103 return atomic_op_to_alu(nir_intrinsic_atomic_op(intr));
104 case nir_intrinsic_image_deref_atomic:
105 case nir_intrinsic_image_atomic:
106 case nir_intrinsic_bindless_image_atomic:
107 *offset_src = 1;
108 *data_src = 3;
109 *offset2_src = *offset_src;
110 return atomic_op_to_alu(nir_intrinsic_atomic_op(intr));
111
112 default:
113 return nir_num_opcodes;
114 }
115 }
116
117 static unsigned
get_dim(nir_scalar scalar)118 get_dim(nir_scalar scalar)
119 {
120 if (!scalar.def->divergent)
121 return 0;
122
123 if (nir_scalar_is_intrinsic(scalar)) {
124 switch (nir_scalar_intrinsic_op(scalar)) {
125 case nir_intrinsic_load_subgroup_invocation:
126 return 0x8;
127 case nir_intrinsic_load_global_invocation_index:
128 case nir_intrinsic_load_local_invocation_index:
129 return 0x7;
130 case nir_intrinsic_load_global_invocation_id:
131 case nir_intrinsic_load_local_invocation_id:
132 return 1 << scalar.comp;
133 default:
134 break;
135 }
136 } else if (nir_scalar_is_alu(scalar)) {
137 if (nir_scalar_alu_op(scalar) == nir_op_iadd ||
138 nir_scalar_alu_op(scalar) == nir_op_imul) {
139 nir_scalar src0 = nir_scalar_chase_alu_src(scalar, 0);
140 nir_scalar src1 = nir_scalar_chase_alu_src(scalar, 1);
141
142 unsigned src0_dim = get_dim(src0);
143 if (!src0_dim && src0.def->divergent)
144 return 0;
145 unsigned src1_dim = get_dim(src1);
146 if (!src1_dim && src1.def->divergent)
147 return 0;
148
149 return src0_dim | src1_dim;
150 } else if (nir_scalar_alu_op(scalar) == nir_op_ishl) {
151 nir_scalar src0 = nir_scalar_chase_alu_src(scalar, 0);
152 nir_scalar src1 = nir_scalar_chase_alu_src(scalar, 1);
153 return src1.def->divergent ? 0 : get_dim(src0);
154 }
155 }
156
157 return 0;
158 }
159
160 /* Returns a bitmask of invocation indices that are compared against a subgroup
161 * uniform value.
162 */
163 static unsigned
match_invocation_comparison(nir_scalar scalar)164 match_invocation_comparison(nir_scalar scalar)
165 {
166 bool is_alu = nir_scalar_is_alu(scalar);
167 if (is_alu && nir_scalar_alu_op(scalar) == nir_op_iand) {
168 return match_invocation_comparison(nir_scalar_chase_alu_src(scalar, 0)) |
169 match_invocation_comparison(nir_scalar_chase_alu_src(scalar, 1));
170 } else if (is_alu && nir_scalar_alu_op(scalar) == nir_op_ieq) {
171 if (!nir_scalar_chase_alu_src(scalar, 0).def->divergent)
172 return get_dim(nir_scalar_chase_alu_src(scalar, 1));
173 if (!nir_scalar_chase_alu_src(scalar, 1).def->divergent)
174 return get_dim(nir_scalar_chase_alu_src(scalar, 0));
175 } else if (scalar.def->parent_instr->type == nir_instr_type_intrinsic) {
176 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(scalar.def->parent_instr);
177 if (intrin->intrinsic == nir_intrinsic_elect) {
178 return 0x8;
179 } else if (intrin->intrinsic == nir_intrinsic_inverse_ballot) {
180 unsigned bitcount = 0;
181 for (unsigned i = 0; i < intrin->src[0].ssa->num_components; i++) {
182 scalar = nir_scalar_resolved(intrin->src[0].ssa, i);
183 if (!nir_scalar_is_const(scalar))
184 return 0;
185 bitcount += util_bitcount64(nir_scalar_as_uint(scalar));
186 }
187 if (bitcount <= 1)
188 return 0x8;
189 }
190 }
191
192 return 0;
193 }
194
195 /* Returns true if the intrinsic is already conditional so that at most one
196 * invocation in the subgroup does the atomic.
197 */
198 static bool
is_atomic_already_optimized(nir_shader * shader,nir_intrinsic_instr * instr)199 is_atomic_already_optimized(nir_shader *shader, nir_intrinsic_instr *instr)
200 {
201 unsigned dims = 0;
202 for (nir_cf_node *cf = &instr->instr.block->cf_node; cf; cf = cf->parent) {
203 if (cf->type == nir_cf_node_if) {
204 nir_block *first_then = nir_if_first_then_block(nir_cf_node_as_if(cf));
205 nir_block *last_then = nir_if_last_then_block(nir_cf_node_as_if(cf));
206 bool within_then = instr->instr.block->index >= first_then->index;
207 within_then = within_then && instr->instr.block->index <= last_then->index;
208 if (!within_then)
209 continue;
210
211 nir_scalar cond = { nir_cf_node_as_if(cf)->condition.ssa, 0 };
212 dims |= match_invocation_comparison(cond);
213 }
214 }
215
216 if (gl_shader_stage_uses_workgroup(shader->info.stage)) {
217 unsigned dims_needed = 0;
218 for (unsigned i = 0; i < 3; i++)
219 dims_needed |= (shader->info.workgroup_size_variable ||
220 shader->info.workgroup_size[i] > 1)
221 << i;
222 if ((dims & dims_needed) == dims_needed)
223 return true;
224 }
225
226 return dims & 0x8;
227 }
228
229 /* Perform a reduction and/or exclusive scan. */
230 static void
reduce_data(nir_builder * b,nir_op op,nir_def * data,nir_def ** reduce,nir_def ** scan)231 reduce_data(nir_builder *b, nir_op op, nir_def *data,
232 nir_def **reduce, nir_def **scan)
233 {
234 if (scan) {
235 *scan = nir_exclusive_scan(b, data, .reduction_op = op);
236 if (reduce) {
237 nir_def *last_lane = nir_last_invocation(b);
238 nir_def *res = nir_build_alu(b, op, *scan, data, NULL, NULL);
239 *reduce = nir_read_invocation(b, res, last_lane);
240 }
241 } else {
242 *reduce = nir_reduce(b, data, .reduction_op = op);
243 }
244 }
245
246 static nir_def *
optimize_atomic(nir_builder * b,nir_intrinsic_instr * intrin,bool return_prev)247 optimize_atomic(nir_builder *b, nir_intrinsic_instr *intrin, bool return_prev)
248 {
249 unsigned offset_src = 0;
250 unsigned data_src = 0;
251 unsigned offset2_src = 0;
252 nir_op op = parse_atomic_op(intrin, &offset_src, &data_src, &offset2_src);
253 nir_def *data = intrin->src[data_src].ssa;
254
255 /* Separate uniform reduction and scan is faster than doing a combined scan+reduce */
256 bool combined_scan_reduce = return_prev && data->divergent;
257 nir_def *reduce = NULL, *scan = NULL;
258 reduce_data(b, op, data, &reduce, combined_scan_reduce ? &scan : NULL);
259
260 nir_src_rewrite(&intrin->src[data_src], reduce);
261 nir_update_instr_divergence(b->shader, &intrin->instr);
262
263 nir_def *cond = nir_elect(b, 1);
264
265 nir_if *nif = nir_push_if(b, cond);
266
267 nir_instr_remove(&intrin->instr);
268 nir_builder_instr_insert(b, &intrin->instr);
269
270 if (return_prev) {
271 nir_push_else(b, nif);
272
273 nir_def *undef = nir_undef(b, 1, intrin->def.bit_size);
274
275 nir_pop_if(b, nif);
276 nir_def *result = nir_if_phi(b, &intrin->def, undef);
277 result = nir_read_first_invocation(b, result);
278
279 if (!combined_scan_reduce)
280 reduce_data(b, op, data, NULL, &scan);
281
282 return nir_build_alu(b, op, result, scan, NULL, NULL);
283 } else {
284 nir_pop_if(b, nif);
285 return NULL;
286 }
287 }
288
289 static void
optimize_and_rewrite_atomic(nir_builder * b,nir_intrinsic_instr * intrin,bool fs_atomics_predicated)290 optimize_and_rewrite_atomic(nir_builder *b, nir_intrinsic_instr *intrin,
291 bool fs_atomics_predicated)
292 {
293 nir_if *helper_nif = NULL;
294 if (b->shader->info.stage == MESA_SHADER_FRAGMENT && !fs_atomics_predicated) {
295 nir_def *helper = nir_is_helper_invocation(b, 1);
296 helper_nif = nir_push_if(b, nir_inot(b, helper));
297 }
298
299 ASSERTED bool original_result_divergent = intrin->def.divergent;
300 bool return_prev = !nir_def_is_unused(&intrin->def);
301
302 nir_def old_result = intrin->def;
303 list_replace(&intrin->def.uses, &old_result.uses);
304 nir_def_init(&intrin->instr, &intrin->def, 1,
305 intrin->def.bit_size);
306
307 nir_def *result = optimize_atomic(b, intrin, return_prev);
308
309 if (helper_nif) {
310 nir_push_else(b, helper_nif);
311 nir_def *undef = result ? nir_undef(b, 1, result->bit_size) : NULL;
312 nir_pop_if(b, helper_nif);
313 if (result)
314 result = nir_if_phi(b, result, undef);
315 }
316
317 if (result) {
318 assert(result->divergent == original_result_divergent);
319 nir_def_rewrite_uses(&old_result, result);
320 }
321 }
322
323 static bool
opt_uniform_atomics(nir_function_impl * impl,bool fs_atomics_predicated)324 opt_uniform_atomics(nir_function_impl *impl, bool fs_atomics_predicated)
325 {
326 bool progress = false;
327 nir_builder b = nir_builder_create(impl);
328 b.update_divergence = true;
329
330 nir_foreach_block(block, impl) {
331 nir_foreach_instr_safe(instr, block) {
332 if (instr->type != nir_instr_type_intrinsic)
333 continue;
334
335 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
336 unsigned offset_src, data_src, offset2_src;
337 if (parse_atomic_op(intrin, &offset_src, &data_src, &offset2_src) ==
338 nir_num_opcodes)
339 continue;
340
341 if (nir_src_is_divergent(intrin->src[offset_src]))
342 continue;
343 if (nir_src_is_divergent(intrin->src[offset2_src]))
344 continue;
345
346 if (is_atomic_already_optimized(b.shader, intrin))
347 continue;
348
349 b.cursor = nir_before_instr(instr);
350 optimize_and_rewrite_atomic(&b, intrin, fs_atomics_predicated);
351 progress = true;
352 }
353 }
354
355 return progress;
356 }
357
358 bool
nir_opt_uniform_atomics(nir_shader * shader,bool fs_atomics_predicated)359 nir_opt_uniform_atomics(nir_shader *shader, bool fs_atomics_predicated)
360 {
361 bool progress = false;
362
363 /* A 1x1x1 workgroup only ever has one active lane, so there's no point in
364 * optimizing any atomics.
365 */
366 if (gl_shader_stage_uses_workgroup(shader->info.stage) &&
367 !shader->info.workgroup_size_variable &&
368 shader->info.workgroup_size[0] == 1 && shader->info.workgroup_size[1] == 1 &&
369 shader->info.workgroup_size[2] == 1)
370 return false;
371
372 nir_foreach_function_impl(impl, shader) {
373 nir_metadata_require(impl, nir_metadata_block_index);
374
375 if (opt_uniform_atomics(impl, fs_atomics_predicated)) {
376 progress = true;
377 nir_metadata_preserve(impl, nir_metadata_none);
378 } else {
379 nir_metadata_preserve(impl, nir_metadata_all);
380 }
381 }
382
383 return progress;
384 }
385