1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /*
25 * Implements a pass that lowers output and/or input variables to a
26 * temporary plus an output variable with a single copy at each exit
27 * point of the shader and/or an input variable with a single copy
28 * at the entrance point of the shader. This way the output variable
29 * is only ever written once and/or input is only read once, and there
30 * are no indirect outut/input accesses.
31 */
32
33 #include "nir.h"
34 #include "nir_builder.h"
35 #include "nir_deref.h"
36
37 struct lower_io_state {
38 nir_shader *shader;
39 nir_function_impl *entrypoint;
40 struct exec_list old_outputs;
41 struct exec_list old_inputs;
42 struct exec_list new_outputs;
43 struct exec_list new_inputs;
44
45 /* map from temporary to new input */
46 struct hash_table *input_map;
47 };
48
49 static void
emit_copies(nir_builder * b,struct exec_list * dest_vars,struct exec_list * src_vars)50 emit_copies(nir_builder *b, struct exec_list *dest_vars,
51 struct exec_list *src_vars)
52 {
53 assert(exec_list_length(dest_vars) == exec_list_length(src_vars));
54
55 foreach_two_lists(dest_node, dest_vars, src_node, src_vars) {
56 nir_variable *dest = exec_node_data(nir_variable, dest_node, node);
57 nir_variable *src = exec_node_data(nir_variable, src_node, node);
58
59 /* No need to copy the contents of a non-fb_fetch_output output variable
60 * to the temporary allocated for it, since its initial value is
61 * undefined.
62 */
63 if (src->data.mode == nir_var_shader_out &&
64 !src->data.fb_fetch_output)
65 continue;
66
67 /* Can't copy the contents of the temporary back to a read-only
68 * interface variable. The value of the temporary won't have been
69 * modified by the shader anyway.
70 */
71 if (dest->data.read_only)
72 continue;
73
74 nir_copy_var(b, dest, src);
75 }
76 }
77
78 static void
emit_output_copies_impl(struct lower_io_state * state,nir_function_impl * impl)79 emit_output_copies_impl(struct lower_io_state *state, nir_function_impl *impl)
80 {
81 nir_builder b = nir_builder_create(impl);
82
83 if (state->shader->info.stage == MESA_SHADER_GEOMETRY) {
84 /* For geometry shaders, we have to emit the output copies right
85 * before each EmitVertex call.
86 */
87 nir_foreach_block(block, impl) {
88 nir_foreach_instr(instr, block) {
89 if (instr->type != nir_instr_type_intrinsic)
90 continue;
91
92 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
93 if (intrin->intrinsic == nir_intrinsic_emit_vertex ||
94 intrin->intrinsic == nir_intrinsic_emit_vertex_with_counter) {
95 b.cursor = nir_before_instr(&intrin->instr);
96 emit_copies(&b, &state->new_outputs, &state->old_outputs);
97 }
98 }
99 }
100 } else if (impl == state->entrypoint) {
101 b.cursor = nir_before_impl(impl);
102 emit_copies(&b, &state->old_outputs, &state->new_outputs);
103
104 /* For all other shader types, we need to do the copies right before
105 * the jumps to the end block.
106 */
107 set_foreach(impl->end_block->predecessors, block_entry) {
108 struct nir_block *block = (void *)block_entry->key;
109 b.cursor = nir_after_block_before_jump(block);
110 emit_copies(&b, &state->new_outputs, &state->old_outputs);
111 }
112 }
113 }
114
115 /* For fragment shader inputs, when we lower to temporaries we'll invalidate
116 * interpolateAt*() because now they'll be pointing to the temporary instead
117 * of the actual variable. Since the caller presumably doesn't support
118 * indirect indexing of inputs, we'll need to lower something like:
119 *
120 * in vec4 foo[3];
121 *
122 * ... = interpolateAtCentroid(foo[i]);
123 *
124 * to a sequence of interpolations that store to our temporary, then a
125 * load at the end:
126 *
127 * in vec4 foo[3];
128 * vec4 foo_tmp[3];
129 *
130 * foo_tmp[0] = interpolateAtCentroid(foo[0]);
131 * foo_tmp[1] = interpolateAtCentroid(foo[1]);
132 * ... = foo_tmp[i];
133 */
134
135 /*
136 * Recursively emit the interpolation instructions. Here old_interp_deref
137 * refers to foo[i], temp_deref is foo_tmp[0/1], and new_interp_deref is
138 * foo[0/1].
139 */
140
141 static void
emit_interp(nir_builder * b,nir_deref_instr ** old_interp_deref,nir_deref_instr * temp_deref,nir_deref_instr * new_interp_deref,nir_intrinsic_instr * interp)142 emit_interp(nir_builder *b, nir_deref_instr **old_interp_deref,
143 nir_deref_instr *temp_deref, nir_deref_instr *new_interp_deref,
144 nir_intrinsic_instr *interp)
145 {
146 while (*old_interp_deref) {
147 switch ((*old_interp_deref)->deref_type) {
148 case nir_deref_type_struct:
149 temp_deref =
150 nir_build_deref_struct(b, temp_deref,
151 (*old_interp_deref)->strct.index);
152 new_interp_deref =
153 nir_build_deref_struct(b, new_interp_deref,
154 (*old_interp_deref)->strct.index);
155 break;
156 case nir_deref_type_array:
157 if (nir_src_is_const((*old_interp_deref)->arr.index)) {
158 temp_deref =
159 nir_build_deref_array(b, temp_deref,
160 (*old_interp_deref)->arr.index.ssa);
161 new_interp_deref =
162 nir_build_deref_array(b, new_interp_deref,
163 (*old_interp_deref)->arr.index.ssa);
164 break;
165 } else {
166 /* We have an indirect deref, so we have to emit interpolations
167 * for every index. Recurse in case we have an array of arrays.
168 */
169 unsigned length = glsl_get_length(temp_deref->type);
170 for (unsigned i = 0; i < length; i++) {
171 nir_deref_instr *new_temp =
172 nir_build_deref_array_imm(b, temp_deref, i);
173 nir_deref_instr *new_interp =
174 nir_build_deref_array_imm(b, new_interp_deref, i);
175
176 emit_interp(b, old_interp_deref + 1, new_temp, new_interp,
177 interp);
178 }
179
180 return;
181 }
182
183 case nir_deref_type_var:
184 case nir_deref_type_array_wildcard:
185 case nir_deref_type_ptr_as_array:
186 case nir_deref_type_cast:
187 unreachable("bad deref type");
188 }
189
190 old_interp_deref++;
191 }
192
193 /* Now that we've constructed a fully-qualified deref with all the indirect
194 * derefs replaced with direct ones, it's time to actually emit the new
195 * interpolation instruction.
196 */
197
198 nir_intrinsic_instr *new_interp =
199 nir_intrinsic_instr_create(b->shader, interp->intrinsic);
200
201 new_interp->src[0] = nir_src_for_ssa(&new_interp_deref->def);
202 if (interp->intrinsic == nir_intrinsic_interp_deref_at_sample ||
203 interp->intrinsic == nir_intrinsic_interp_deref_at_offset ||
204 interp->intrinsic == nir_intrinsic_interp_deref_at_vertex) {
205 new_interp->src[1] = interp->src[1];
206 }
207
208 new_interp->num_components = interp->num_components;
209 nir_def_init(&new_interp->instr, &new_interp->def,
210 interp->def.num_components, interp->def.bit_size);
211
212 nir_builder_instr_insert(b, &new_interp->instr);
213 nir_store_deref(b, temp_deref, &new_interp->def,
214 (1 << interp->def.num_components) - 1);
215 }
216
217 static void
fixup_interpolation_instr(struct lower_io_state * state,nir_intrinsic_instr * interp,nir_builder * b)218 fixup_interpolation_instr(struct lower_io_state *state,
219 nir_intrinsic_instr *interp, nir_builder *b)
220 {
221 nir_deref_path interp_path;
222 nir_deref_path_init(&interp_path, nir_src_as_deref(interp->src[0]), NULL);
223
224 b->cursor = nir_before_instr(&interp->instr);
225
226 /* The original interpolation instruction should contain a deref path
227 * starting with the original variable, which is now the temporary.
228 */
229 nir_deref_instr *temp_root = interp_path.path[0];
230
231 /* Fish out the newly-created input variable. */
232 assert(temp_root->deref_type == nir_deref_type_var);
233 struct hash_entry *entry = _mesa_hash_table_search(state->input_map,
234 temp_root->var);
235 assert(entry);
236 nir_variable *input = entry->data;
237 nir_deref_instr *input_root = nir_build_deref_var(b, input);
238
239 /* Emit the interpolation instructions. */
240 emit_interp(b, interp_path.path + 1, temp_root, input_root, interp);
241
242 /* Now the temporary contains the interpolation results, and we can just
243 * load from it. We can reuse the original deref, since it points to the
244 * correct part of the temporary.
245 */
246 nir_def *load = nir_load_deref(b, nir_src_as_deref(interp->src[0]));
247 nir_def_replace(&interp->def, load);
248
249 nir_deref_path_finish(&interp_path);
250 }
251
252 static void
fixup_interpolation(struct lower_io_state * state,nir_function_impl * impl,nir_builder * b)253 fixup_interpolation(struct lower_io_state *state, nir_function_impl *impl,
254 nir_builder *b)
255 {
256 nir_foreach_block(block, impl) {
257 nir_foreach_instr_safe(instr, block) {
258 if (instr->type != nir_instr_type_intrinsic)
259 continue;
260
261 nir_intrinsic_instr *interp = nir_instr_as_intrinsic(instr);
262
263 if (interp->intrinsic == nir_intrinsic_interp_deref_at_centroid ||
264 interp->intrinsic == nir_intrinsic_interp_deref_at_sample ||
265 interp->intrinsic == nir_intrinsic_interp_deref_at_offset ||
266 interp->intrinsic == nir_intrinsic_interp_deref_at_vertex) {
267 fixup_interpolation_instr(state, interp, b);
268 }
269 }
270 }
271 }
272
273 static void
emit_input_copies_impl(struct lower_io_state * state,nir_function_impl * impl)274 emit_input_copies_impl(struct lower_io_state *state, nir_function_impl *impl)
275 {
276 if (impl == state->entrypoint) {
277 nir_builder b = nir_builder_at(nir_before_impl(impl));
278 emit_copies(&b, &state->old_inputs, &state->new_inputs);
279 if (state->shader->info.stage == MESA_SHADER_FRAGMENT)
280 fixup_interpolation(state, impl, &b);
281 }
282 }
283
284 static nir_variable *
create_shadow_temp(struct lower_io_state * state,nir_variable * var)285 create_shadow_temp(struct lower_io_state *state, nir_variable *var)
286 {
287 nir_variable *nvar = ralloc(state->shader, nir_variable);
288 memcpy(nvar, var, sizeof *nvar);
289 nvar->data.cannot_coalesce = true;
290
291 /* The original is now the temporary */
292 nir_variable *temp = var;
293
294 /* Reparent the name to the new variable */
295 ralloc_steal(nvar, nvar->name);
296
297 assert(nvar->constant_initializer == NULL && nvar->pointer_initializer == NULL);
298
299 /* Give the original a new name with @<mode>-temp appended */
300 const char *mode = (temp->data.mode == nir_var_shader_in) ? "in" : "out";
301 temp->name = ralloc_asprintf(var, "%s@%s-temp", mode, nvar->name);
302 temp->data.mode = nir_var_shader_temp;
303 temp->data.read_only = false;
304 temp->data.fb_fetch_output = false;
305 temp->data.compact = false;
306
307 return nvar;
308 }
309
310 static void
move_variables_to_list(nir_shader * shader,nir_variable_mode mode,struct exec_list * dst_list)311 move_variables_to_list(nir_shader *shader, nir_variable_mode mode,
312 struct exec_list *dst_list)
313 {
314 nir_foreach_variable_with_modes_safe(var, shader, mode) {
315 exec_node_remove(&var->node);
316 exec_list_push_tail(dst_list, &var->node);
317 }
318 }
319
320 bool
nir_lower_io_to_temporaries(nir_shader * shader,nir_function_impl * entrypoint,bool outputs,bool inputs)321 nir_lower_io_to_temporaries(nir_shader *shader, nir_function_impl *entrypoint,
322 bool outputs, bool inputs)
323 {
324 struct lower_io_state state;
325
326 if (shader->info.stage == MESA_SHADER_TESS_CTRL ||
327 shader->info.stage == MESA_SHADER_TASK ||
328 shader->info.stage == MESA_SHADER_MESH) {
329 nir_metadata_preserve(entrypoint, nir_metadata_all);
330 return false;
331 }
332
333 state.shader = shader;
334 state.entrypoint = entrypoint;
335 state.input_map = _mesa_pointer_hash_table_create(NULL);
336
337 exec_list_make_empty(&state.old_inputs);
338 if (inputs)
339 move_variables_to_list(shader, nir_var_shader_in, &state.old_inputs);
340
341 exec_list_make_empty(&state.old_outputs);
342 if (outputs)
343 move_variables_to_list(shader, nir_var_shader_out, &state.old_outputs);
344
345 exec_list_make_empty(&state.new_inputs);
346 exec_list_make_empty(&state.new_outputs);
347
348 /* Walk over all of the outputs turn each output into a temporary and
349 * make a new variable for the actual output.
350 */
351 nir_foreach_variable_in_list(var, &state.old_outputs) {
352 nir_variable *output = create_shadow_temp(&state, var);
353 exec_list_push_tail(&state.new_outputs, &output->node);
354 }
355
356 /* and same for inputs: */
357 nir_foreach_variable_in_list(var, &state.old_inputs) {
358 nir_variable *input = create_shadow_temp(&state, var);
359 exec_list_push_tail(&state.new_inputs, &input->node);
360 _mesa_hash_table_insert(state.input_map, var, input);
361 }
362
363 nir_foreach_function_impl(impl, shader) {
364 if (inputs)
365 emit_input_copies_impl(&state, impl);
366
367 if (outputs)
368 emit_output_copies_impl(&state, impl);
369
370 nir_metadata_preserve(impl, nir_metadata_control_flow);
371 }
372
373 exec_list_append(&shader->variables, &state.old_inputs);
374 exec_list_append(&shader->variables, &state.old_outputs);
375 exec_list_append(&shader->variables, &state.new_inputs);
376 exec_list_append(&shader->variables, &state.new_outputs);
377
378 nir_fixup_deref_modes(shader);
379
380 _mesa_hash_table_destroy(state.input_map, NULL);
381 return true;
382 }
383