1 /*
2 * Copyright © 2017 Red Hat
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <[email protected]>
25 */
26
27 #include "nir.h"
28 #include "nir_builder.h"
29
30 /*
31 * Remap atomic counters to SSBOs, starting from the shader's next SSBO slot
32 * (info.num_ssbos).
33 */
34
35 static nir_deref_instr *
deref_offset_var(nir_builder * b,unsigned binding,unsigned offset_align_state)36 deref_offset_var(nir_builder *b, unsigned binding, unsigned offset_align_state)
37 {
38 gl_state_index16 tokens[STATE_LENGTH] = { offset_align_state, binding };
39 nir_variable *var = nir_find_state_variable(b->shader, tokens);
40 if (!var) {
41 var = nir_state_variable_create(b->shader, glsl_uint_type(), "offset", tokens);
42 var->data.how_declared = nir_var_hidden;
43 }
44 return nir_build_deref_var(b, var);
45 }
46
47 static bool
lower_instr(nir_intrinsic_instr * instr,unsigned ssbo_offset,nir_builder * b,unsigned offset_align_state)48 lower_instr(nir_intrinsic_instr *instr, unsigned ssbo_offset, nir_builder *b, unsigned offset_align_state)
49 {
50 nir_intrinsic_op op = nir_intrinsic_ssbo_atomic;
51
52 /* Initialize to something to avoid spurious compiler warning */
53 nir_atomic_op atomic_op = nir_atomic_op_iadd;
54
55 b->cursor = nir_before_instr(&instr->instr);
56
57 switch (instr->intrinsic) {
58 case nir_intrinsic_atomic_counter_inc:
59 case nir_intrinsic_atomic_counter_add:
60 case nir_intrinsic_atomic_counter_pre_dec:
61 case nir_intrinsic_atomic_counter_post_dec:
62 /* inc and dec get remapped to add: */
63 atomic_op = nir_atomic_op_iadd;
64 break;
65 case nir_intrinsic_atomic_counter_read:
66 op = nir_intrinsic_load_ssbo;
67 break;
68 case nir_intrinsic_atomic_counter_min:
69 atomic_op = nir_atomic_op_umin;
70 break;
71 case nir_intrinsic_atomic_counter_max:
72 atomic_op = nir_atomic_op_umax;
73 break;
74 case nir_intrinsic_atomic_counter_and:
75 atomic_op = nir_atomic_op_iand;
76 break;
77 case nir_intrinsic_atomic_counter_or:
78 atomic_op = nir_atomic_op_ior;
79 break;
80 case nir_intrinsic_atomic_counter_xor:
81 atomic_op = nir_atomic_op_ixor;
82 break;
83 case nir_intrinsic_atomic_counter_exchange:
84 atomic_op = nir_atomic_op_xchg;
85 break;
86 case nir_intrinsic_atomic_counter_comp_swap:
87 op = nir_intrinsic_ssbo_atomic_swap;
88 atomic_op = nir_atomic_op_cmpxchg;
89 break;
90 default:
91 return false;
92 }
93
94 nir_def *buffer = nir_imm_int(b, ssbo_offset + nir_intrinsic_base(instr));
95 nir_def *temp = NULL;
96
97 nir_def *offset_load = NULL;
98 if (offset_align_state) {
99 nir_deref_instr *deref_offset = deref_offset_var(b, nir_intrinsic_base(instr), offset_align_state);
100 offset_load = nir_load_deref(b, deref_offset);
101 }
102 nir_intrinsic_instr *new_instr =
103 nir_intrinsic_instr_create(b->shader, op);
104 if (nir_intrinsic_has_atomic_op(new_instr))
105 nir_intrinsic_set_atomic_op(new_instr, atomic_op);
106
107 /* a couple instructions need special handling since they don't map
108 * 1:1 with ssbo atomics
109 */
110 switch (instr->intrinsic) {
111 case nir_intrinsic_atomic_counter_inc:
112 /* remapped to ssbo_atomic_add: { buffer_idx, offset, +1 } */
113 temp = nir_imm_int(b, +1);
114 new_instr->src[0] = nir_src_for_ssa(buffer);
115 new_instr->src[1] = nir_src_for_ssa(instr->src[0].ssa);
116 new_instr->src[2] = nir_src_for_ssa(temp);
117 break;
118 case nir_intrinsic_atomic_counter_pre_dec:
119 case nir_intrinsic_atomic_counter_post_dec:
120 /* remapped to ssbo_atomic_add: { buffer_idx, offset, -1 } */
121 /* NOTE semantic difference so we adjust the return value below */
122 temp = nir_imm_int(b, -1);
123 new_instr->src[0] = nir_src_for_ssa(buffer);
124 new_instr->src[1] = nir_src_for_ssa(instr->src[0].ssa);
125 new_instr->src[2] = nir_src_for_ssa(temp);
126 break;
127 case nir_intrinsic_atomic_counter_read:
128 /* remapped to load_ssbo: { buffer_idx, offset } */
129 new_instr->src[0] = nir_src_for_ssa(buffer);
130 new_instr->src[1] = nir_src_for_ssa(instr->src[0].ssa);
131 break;
132 default:
133 /* remapped to ssbo_atomic_x: { buffer_idx, offset, data, (compare)? } */
134 new_instr->src[0] = nir_src_for_ssa(buffer);
135 new_instr->src[1] = nir_src_for_ssa(instr->src[0].ssa);
136 new_instr->src[2] = nir_src_for_ssa(instr->src[1].ssa);
137 if (op == nir_intrinsic_ssbo_atomic_swap)
138 new_instr->src[3] = nir_src_for_ssa(instr->src[2].ssa);
139 break;
140 }
141
142 if (offset_load)
143 new_instr->src[1].ssa = nir_iadd(b, new_instr->src[1].ssa, offset_load);
144
145 if (nir_intrinsic_range_base(instr))
146 new_instr->src[1].ssa = nir_iadd(b, new_instr->src[1].ssa,
147 nir_imm_int(b, nir_intrinsic_range_base(instr)));
148
149 if (new_instr->intrinsic == nir_intrinsic_load_ssbo) {
150 nir_intrinsic_set_align(new_instr, 4, 0);
151
152 /* we could be replacing an intrinsic with fixed # of dest
153 * num_components with one that has variable number. So
154 * best to take this from the dest:
155 */
156 new_instr->num_components = instr->def.num_components;
157 }
158
159 nir_def_init(&new_instr->instr, &new_instr->def,
160 instr->def.num_components, instr->def.bit_size);
161 nir_instr_insert_before(&instr->instr, &new_instr->instr);
162 nir_instr_remove(&instr->instr);
163
164 if (instr->intrinsic == nir_intrinsic_atomic_counter_pre_dec) {
165 b->cursor = nir_after_instr(&new_instr->instr);
166 nir_def *result = nir_iadd(b, &new_instr->def, temp);
167 nir_def_rewrite_uses(&instr->def, result);
168 } else {
169 nir_def_rewrite_uses(&instr->def, &new_instr->def);
170 }
171
172 return true;
173 }
174
175 static bool
is_atomic_uint(const struct glsl_type * type)176 is_atomic_uint(const struct glsl_type *type)
177 {
178 if (glsl_get_base_type(type) == GLSL_TYPE_ARRAY)
179 return is_atomic_uint(glsl_get_array_element(type));
180 return glsl_get_base_type(type) == GLSL_TYPE_ATOMIC_UINT;
181 }
182
183 bool
nir_lower_atomics_to_ssbo(nir_shader * shader,unsigned offset_align_state)184 nir_lower_atomics_to_ssbo(nir_shader *shader, unsigned offset_align_state)
185 {
186 unsigned ssbo_offset = shader->info.num_ssbos;
187 bool progress = false;
188
189 nir_foreach_function_impl(impl, shader) {
190 nir_builder builder = nir_builder_create(impl);
191 nir_foreach_block(block, impl) {
192 nir_foreach_instr_safe(instr, block) {
193 if (instr->type == nir_instr_type_intrinsic)
194 progress |= lower_instr(nir_instr_as_intrinsic(instr),
195 ssbo_offset, &builder, offset_align_state);
196 }
197 }
198
199 nir_metadata_preserve(impl, nir_metadata_control_flow);
200 }
201
202 if (progress) {
203 /* replace atomic_uint uniforms with ssbo's: */
204 unsigned replaced = 0;
205 nir_foreach_uniform_variable_safe(var, shader) {
206 if (is_atomic_uint(var->type)) {
207 exec_node_remove(&var->node);
208
209 if (replaced & (1 << var->data.binding))
210 continue;
211
212 nir_variable *ssbo;
213 char name[16];
214
215 /* A length of 0 is used to denote unsized arrays */
216 const struct glsl_type *type = glsl_array_type(glsl_uint_type(), 0, 0);
217
218 snprintf(name, sizeof(name), "counter%d", var->data.binding);
219
220 ssbo = nir_variable_create(shader, nir_var_mem_ssbo, type, name);
221 ssbo->data.binding = ssbo_offset + var->data.binding;
222 ssbo->data.explicit_binding = var->data.explicit_binding;
223
224 /* We can't use num_abos, because it only represents the number of
225 * active atomic counters, and currently unlike SSBO's they aren't
226 * compacted so num_abos actually isn't a bound on the index passed
227 * to nir_intrinsic_atomic_counter_*. e.g. if we have a single atomic
228 * counter declared like:
229 *
230 * layout(binding=1) atomic_uint counter0;
231 *
232 * then when we lower accesses to it the atomic_counter_* intrinsics
233 * will have 1 as the index but num_abos will still be 1.
234 */
235 shader->info.num_ssbos = MAX2(shader->info.num_ssbos,
236 ssbo->data.binding + 1);
237
238 struct glsl_struct_field field = {
239 .type = type,
240 .name = "counters",
241 .location = -1,
242 };
243
244 ssbo->interface_type =
245 glsl_interface_type(&field, 1, GLSL_INTERFACE_PACKING_STD430,
246 false, "counters");
247
248 replaced |= (1 << var->data.binding);
249 }
250 }
251
252 shader->info.num_abos = 0;
253 }
254
255 return progress;
256 }
257