xref: /aosp_15_r20/external/mesa3d/src/compiler/nir/nir_lower_variable_initializers.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "nir.h"
25 #include "nir_builder.h"
26 
27 static void
build_constant_load(nir_builder * b,nir_deref_instr * deref,nir_constant * c)28 build_constant_load(nir_builder *b, nir_deref_instr *deref, nir_constant *c)
29 {
30    if (glsl_type_is_vector_or_scalar(deref->type)) {
31       const unsigned num_components = glsl_get_vector_elements(deref->type);
32       const unsigned bit_size = glsl_get_bit_size(deref->type);
33       nir_def *imm = nir_build_imm(b, num_components, bit_size, c->values);
34       nir_store_deref(b, deref, imm, ~0);
35    } else if (glsl_type_is_struct_or_ifc(deref->type)) {
36       unsigned len = glsl_get_length(deref->type);
37       for (unsigned i = 0; i < len; i++) {
38          build_constant_load(b, nir_build_deref_struct(b, deref, i),
39                              c->elements[i]);
40       }
41    } else if (glsl_type_is_cmat(deref->type)) {
42       const struct glsl_type *elem_type = glsl_get_cmat_element(deref->type);
43       assert(glsl_type_is_scalar(elem_type));
44       const unsigned bit_size = glsl_get_bit_size(elem_type);
45       nir_def *elem = nir_build_imm(b, 1, bit_size, c->values);
46       nir_cmat_construct(b, &deref->def, elem);
47    } else {
48       assert(glsl_type_is_array(deref->type) ||
49              glsl_type_is_matrix(deref->type));
50       unsigned len = glsl_get_length(deref->type);
51       for (unsigned i = 0; i < len; i++) {
52          build_constant_load(b,
53                              nir_build_deref_array_imm(b, deref, i),
54                              c->elements[i]);
55       }
56    }
57 }
58 
59 static bool
lower_const_initializer(struct nir_builder * b,struct exec_list * var_list,nir_variable_mode modes)60 lower_const_initializer(struct nir_builder *b, struct exec_list *var_list,
61                         nir_variable_mode modes)
62 {
63    bool progress = false;
64 
65    b->cursor = nir_before_impl(b->impl);
66 
67    nir_foreach_variable_in_list(var, var_list) {
68       if (!(var->data.mode & modes))
69          continue;
70 
71       if (var->constant_initializer) {
72          build_constant_load(b, nir_build_deref_var(b, var),
73                              var->constant_initializer);
74 
75          progress = true;
76          var->constant_initializer = NULL;
77       } else if (var->pointer_initializer) {
78          nir_deref_instr *src_deref = nir_build_deref_var(b, var->pointer_initializer);
79          nir_deref_instr *dst_deref = nir_build_deref_var(b, var);
80 
81          /* Note that this stores a pointer to src into dst */
82          nir_store_deref(b, dst_deref, &src_deref->def, ~0);
83 
84          progress = true;
85          var->pointer_initializer = NULL;
86       }
87    }
88 
89    return progress;
90 }
91 
92 bool
nir_lower_variable_initializers(nir_shader * shader,nir_variable_mode modes)93 nir_lower_variable_initializers(nir_shader *shader, nir_variable_mode modes)
94 {
95    bool progress = false;
96 
97    /* Only some variables have initializers that we want to lower.  Others
98     * such as uniforms have initializers which are useful later during linking
99     * so we want to skip over those.  Restrict to only variable types where
100     * initializers make sense so that callers can use nir_var_all.
101     */
102    modes &= nir_var_shader_out |
103             nir_var_shader_temp |
104             nir_var_function_temp |
105             nir_var_system_value;
106 
107    nir_foreach_function_with_impl(func, impl, shader) {
108       bool impl_progress = false;
109       nir_builder builder = nir_builder_create(impl);
110 
111       if ((modes & ~nir_var_function_temp) && func->is_entrypoint) {
112          impl_progress |= lower_const_initializer(&builder,
113                                                   &shader->variables,
114                                                   modes);
115       }
116 
117       if (modes & nir_var_function_temp) {
118          impl_progress |= lower_const_initializer(&builder,
119                                                   &impl->locals,
120                                                   nir_var_function_temp);
121       }
122 
123       if (impl_progress) {
124          progress = true;
125          nir_metadata_preserve(impl, nir_metadata_control_flow |
126                                         nir_metadata_live_defs);
127       } else {
128          nir_metadata_preserve(impl, nir_metadata_all);
129       }
130    }
131 
132    return progress;
133 }
134 
135 /* Zero initialize shared_size bytes of shared memory by splitting work writes
136  * of chunk_size bytes among the invocations.
137  *
138  * Used for implementing VK_KHR_zero_initialize_workgroup_memory.
139  */
140 bool
nir_zero_initialize_shared_memory(nir_shader * shader,const unsigned shared_size,const unsigned chunk_size)141 nir_zero_initialize_shared_memory(nir_shader *shader,
142                                   const unsigned shared_size,
143                                   const unsigned chunk_size)
144 {
145    assert(shared_size > 0);
146    assert(chunk_size > 0);
147    assert(chunk_size % 4 == 0);
148 
149    nir_function_impl *impl = nir_shader_get_entrypoint(shader);
150    nir_builder b = nir_builder_at(nir_before_impl(impl));
151 
152    assert(!shader->info.workgroup_size_variable);
153    const unsigned local_count = shader->info.workgroup_size[0] *
154                                 shader->info.workgroup_size[1] *
155                                 shader->info.workgroup_size[2];
156 
157    /* The initialization logic is simplified if we can always split the memory
158     * in full chunk_size units.
159     */
160    assert(shared_size % chunk_size == 0);
161 
162    const unsigned chunk_comps = chunk_size / 4;
163 
164    nir_variable *it = nir_local_variable_create(b.impl, glsl_uint_type(),
165                                                 "zero_init_iterator");
166    nir_def *local_index = nir_load_local_invocation_index(&b);
167    nir_def *first_offset = nir_imul_imm(&b, local_index, chunk_size);
168    nir_store_var(&b, it, first_offset, 0x1);
169 
170    nir_loop *loop = nir_push_loop(&b);
171    {
172       nir_def *offset = nir_load_var(&b, it);
173 
174       nir_push_if(&b, nir_uge_imm(&b, offset, shared_size));
175       {
176          nir_jump(&b, nir_jump_break);
177       }
178       nir_pop_if(&b, NULL);
179 
180       nir_store_shared(&b, nir_imm_zero(&b, chunk_comps, 32), offset,
181                        .align_mul = chunk_size,
182                        .write_mask = ((1 << chunk_comps) - 1));
183 
184       nir_def *new_offset = nir_iadd_imm(&b, offset, chunk_size * local_count);
185       nir_store_var(&b, it, new_offset, 0x1);
186    }
187    nir_pop_loop(&b, loop);
188 
189    nir_barrier(&b, SCOPE_WORKGROUP, SCOPE_WORKGROUP, NIR_MEMORY_ACQ_REL,
190                nir_var_mem_shared);
191 
192    nir_metadata_preserve(nir_shader_get_entrypoint(shader), nir_metadata_none);
193 
194    return true;
195 }
196 
197 
198 /** Clears all shared memory to zero at the end of the shader
199  *
200  * To easily get to the end of the shader it relies on all exits
201  * being lowered. Designed to be called late in the lowering process,
202  * e.g. doesn't need to lower vars to ssa.
203  */
204 bool
nir_clear_shared_memory(nir_shader * shader,const unsigned shared_size,const unsigned chunk_size)205 nir_clear_shared_memory(nir_shader *shader,
206                         const unsigned shared_size,
207                         const unsigned chunk_size)
208 {
209    assert(chunk_size > 0);
210    assert(chunk_size % 4 == 0);
211 
212    if (shared_size == 0)
213       return false;
214 
215    nir_function_impl *impl = nir_shader_get_entrypoint(shader);
216    nir_builder b = nir_builder_at(nir_after_impl(impl));
217 
218    /* The initialization logic is simplified if we can always split the memory
219     * in full chunk_size units.
220     */
221    assert(shared_size % chunk_size == 0);
222 
223    const unsigned chunk_comps = chunk_size / 4;
224 
225    nir_barrier(&b, SCOPE_WORKGROUP, SCOPE_WORKGROUP, NIR_MEMORY_ACQ_REL,
226                nir_var_mem_shared);
227 
228    nir_def *local_index = nir_load_local_invocation_index(&b);
229    nir_def *first_offset = nir_imul_imm(&b, local_index, chunk_size);
230 
231    unsigned iterations = UINT_MAX;
232    unsigned size_per_iteration = 0;
233    if (!shader->info.workgroup_size_variable) {
234       size_per_iteration = nir_static_workgroup_size(shader) * chunk_size;
235       iterations = DIV_ROUND_UP(shared_size, size_per_iteration);
236    }
237 
238    if (iterations <= shader->options->max_unroll_iterations) {
239       /* Doing a manual inline here because (a) we may not optimize after and
240        * (b) the loop unroll pass doesn't deal well with the potential partial
241        * last iteration.*/
242       for (unsigned i = 0; i < iterations; ++i) {
243          const unsigned base = size_per_iteration * i;
244          bool use_check = i >= shared_size / size_per_iteration;
245          if (use_check)
246             nir_push_if(&b, nir_ult_imm(&b, first_offset, shared_size - base));
247 
248          nir_store_shared(&b, nir_imm_zero(&b, chunk_comps, 32),
249                           nir_iadd_imm(&b, first_offset, base),
250                           .align_mul = chunk_size,
251                           .write_mask = ((1 << chunk_comps) - 1));
252          if (use_check)
253             nir_pop_if(&b, NULL);
254       }
255    } else {
256       nir_phi_instr *offset_phi = nir_phi_instr_create(shader);
257       nir_def_init(&offset_phi->instr, &offset_phi->def, 1, 32);
258       nir_phi_instr_add_src(offset_phi, nir_cursor_current_block(b.cursor), first_offset);
259 
260       nir_def *size_per_iteration_def = shader->info.workgroup_size_variable ?
261                              nir_imul_imm(&b, nir_load_workgroup_size(&b), chunk_size) :
262                              nir_imm_int(&b, size_per_iteration);
263       nir_def *value = nir_imm_zero(&b, chunk_comps, 32);
264 
265       nir_loop *loop = nir_push_loop(&b);
266       nir_block *loop_block = nir_cursor_current_block(b.cursor);
267       {
268          nir_def *offset = &offset_phi->def;
269 
270          nir_push_if(&b, nir_uge_imm(&b, offset, shared_size));
271          {
272             nir_jump(&b, nir_jump_break);
273          }
274          nir_pop_if(&b, NULL);
275          nir_store_shared(&b, value, offset,
276                           .align_mul = chunk_size,
277                           .write_mask = ((1 << chunk_comps) - 1));
278 
279          nir_def *new_offset = nir_iadd(&b, offset, size_per_iteration_def);
280          nir_phi_instr_add_src(offset_phi, nir_cursor_current_block(b.cursor), new_offset);
281       }
282       nir_pop_loop(&b, loop);
283 
284       b.cursor = nir_before_block(loop_block);
285       nir_builder_instr_insert(&b, &offset_phi->instr);
286    }
287 
288    nir_metadata_preserve(nir_shader_get_entrypoint(shader), nir_metadata_none);
289 
290    return true;
291 }
292