1 /*
2 * Copyright © 2016 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "nir_builder.h"
26 #include "nir_deref.h"
27
28 /** @file nir_lower_io_to_scalar.c
29 *
30 * Replaces nir_load_input/nir_store_output operations with num_components !=
31 * 1 with individual per-channel operations.
32 */
33
34 static void
set_io_semantics(nir_intrinsic_instr * scalar_intr,nir_intrinsic_instr * vec_intr,unsigned component)35 set_io_semantics(nir_intrinsic_instr *scalar_intr,
36 nir_intrinsic_instr *vec_intr, unsigned component)
37 {
38 nir_io_semantics sem = nir_intrinsic_io_semantics(vec_intr);
39 sem.gs_streams = (sem.gs_streams >> (component * 2)) & 0x3;
40 nir_intrinsic_set_io_semantics(scalar_intr, sem);
41 }
42
43 static void
lower_load_input_to_scalar(nir_builder * b,nir_intrinsic_instr * intr)44 lower_load_input_to_scalar(nir_builder *b, nir_intrinsic_instr *intr)
45 {
46 b->cursor = nir_before_instr(&intr->instr);
47
48 nir_def *loads[NIR_MAX_VEC_COMPONENTS];
49
50 for (unsigned i = 0; i < intr->num_components; i++) {
51 bool is_64bit = (nir_intrinsic_instr_dest_type(intr) & NIR_ALU_TYPE_SIZE_MASK) == 64;
52 unsigned newi = is_64bit ? i * 2 : i;
53 unsigned newc = nir_intrinsic_component(intr);
54 nir_intrinsic_instr *chan_intr =
55 nir_intrinsic_instr_create(b->shader, intr->intrinsic);
56 nir_def_init(&chan_intr->instr, &chan_intr->def, 1,
57 intr->def.bit_size);
58 chan_intr->num_components = 1;
59
60 if (intr->name)
61 chan_intr->name = intr->name;
62 nir_intrinsic_set_base(chan_intr, nir_intrinsic_base(intr));
63 nir_intrinsic_set_component(chan_intr, (newc + newi) % 4);
64 nir_intrinsic_set_dest_type(chan_intr, nir_intrinsic_dest_type(intr));
65 set_io_semantics(chan_intr, intr, i);
66 /* offset and vertex (if needed) */
67 for (unsigned j = 0; j < nir_intrinsic_infos[intr->intrinsic].num_srcs; ++j)
68 chan_intr->src[j] = nir_src_for_ssa(intr->src[j].ssa);
69 if (newc + newi > 3) {
70 nir_src *src = nir_get_io_offset_src(chan_intr);
71 nir_def *offset = nir_iadd_imm(b, src->ssa, (newc + newi) / 4);
72 *src = nir_src_for_ssa(offset);
73 }
74
75 nir_builder_instr_insert(b, &chan_intr->instr);
76
77 loads[i] = &chan_intr->def;
78 }
79
80 nir_def_replace(&intr->def, nir_vec(b, loads, intr->num_components));
81 }
82
83 static void
lower_load_to_scalar(nir_builder * b,nir_intrinsic_instr * intr)84 lower_load_to_scalar(nir_builder *b, nir_intrinsic_instr *intr)
85 {
86 b->cursor = nir_before_instr(&intr->instr);
87
88 nir_def *loads[NIR_MAX_VEC_COMPONENTS];
89 nir_def *base_offset = nir_get_io_offset_src(intr)->ssa;
90
91 for (unsigned i = 0; i < intr->num_components; i++) {
92 nir_intrinsic_instr *chan_intr =
93 nir_intrinsic_instr_create(b->shader, intr->intrinsic);
94 nir_def_init(&chan_intr->instr, &chan_intr->def, 1,
95 intr->def.bit_size);
96 chan_intr->num_components = 1;
97
98 if (intr->name)
99 chan_intr->name = intr->name;
100 nir_intrinsic_set_align_offset(chan_intr,
101 (nir_intrinsic_align_offset(intr) +
102 i * (intr->def.bit_size / 8)) %
103 nir_intrinsic_align_mul(intr));
104 nir_intrinsic_set_align_mul(chan_intr, nir_intrinsic_align_mul(intr));
105 if (nir_intrinsic_has_access(intr))
106 nir_intrinsic_set_access(chan_intr, nir_intrinsic_access(intr));
107 if (nir_intrinsic_has_range(intr))
108 nir_intrinsic_set_range(chan_intr, nir_intrinsic_range(intr));
109 if (nir_intrinsic_has_range_base(intr))
110 nir_intrinsic_set_range_base(chan_intr, nir_intrinsic_range_base(intr));
111 if (nir_intrinsic_has_base(intr))
112 nir_intrinsic_set_base(chan_intr, nir_intrinsic_base(intr));
113 for (unsigned j = 0; j < nir_intrinsic_infos[intr->intrinsic].num_srcs - 1; j++)
114 chan_intr->src[j] = nir_src_for_ssa(intr->src[j].ssa);
115
116 /* increment offset per component */
117 nir_def *offset = nir_iadd_imm(b, base_offset, i * (intr->def.bit_size / 8));
118 *nir_get_io_offset_src(chan_intr) = nir_src_for_ssa(offset);
119
120 nir_builder_instr_insert(b, &chan_intr->instr);
121
122 loads[i] = &chan_intr->def;
123 }
124
125 nir_def_replace(&intr->def, nir_vec(b, loads, intr->num_components));
126 }
127
128 static void
lower_store_output_to_scalar(nir_builder * b,nir_intrinsic_instr * intr)129 lower_store_output_to_scalar(nir_builder *b, nir_intrinsic_instr *intr)
130 {
131 b->cursor = nir_before_instr(&intr->instr);
132
133 nir_def *value = intr->src[0].ssa;
134
135 for (unsigned i = 0; i < intr->num_components; i++) {
136 if (!(nir_intrinsic_write_mask(intr) & (1 << i)))
137 continue;
138
139 bool is_64bit = (nir_intrinsic_instr_src_type(intr, 0) & NIR_ALU_TYPE_SIZE_MASK) == 64;
140 unsigned newi = is_64bit ? i * 2 : i;
141 unsigned newc = nir_intrinsic_component(intr);
142 unsigned new_component = (newc + newi) % 4;
143 nir_io_semantics sem = nir_intrinsic_io_semantics(intr);
144 bool has_xfb = false;
145
146 if (nir_intrinsic_has_io_xfb(intr)) {
147 /* Find out which components are written via xfb. */
148 for (unsigned c = 0; c <= new_component; c++) {
149 nir_io_xfb xfb = c < 2 ? nir_intrinsic_io_xfb(intr) : nir_intrinsic_io_xfb2(intr);
150
151 if (new_component < c + xfb.out[c % 2].num_components) {
152 has_xfb = true;
153 break;
154 }
155 }
156 }
157
158 /* After scalarization, some channels might not write anywhere - i.e.
159 * they are not a sysval output, they don't feed the next shader, and
160 * they don't write xfb. Don't create such stores.
161 */
162 if ((sem.no_sysval_output ||
163 !nir_slot_is_sysval_output(sem.location, MESA_SHADER_NONE)) &&
164 (sem.no_varying || !nir_slot_is_varying(sem.location)) &&
165 !has_xfb)
166 continue;
167
168 nir_intrinsic_instr *chan_intr =
169 nir_intrinsic_instr_create(b->shader, intr->intrinsic);
170 chan_intr->num_components = 1;
171
172 if (intr->name)
173 chan_intr->name = intr->name;
174 nir_intrinsic_set_base(chan_intr, nir_intrinsic_base(intr));
175 nir_intrinsic_set_write_mask(chan_intr, 0x1);
176 nir_intrinsic_set_component(chan_intr, new_component);
177 nir_intrinsic_set_src_type(chan_intr, nir_intrinsic_src_type(intr));
178 set_io_semantics(chan_intr, intr, i);
179
180 if (nir_intrinsic_has_io_xfb(intr)) {
181 /* Scalarize transform feedback info. */
182 for (unsigned c = 0; c <= new_component; c++) {
183 nir_io_xfb xfb = c < 2 ? nir_intrinsic_io_xfb(intr) : nir_intrinsic_io_xfb2(intr);
184
185 if (new_component < c + xfb.out[c % 2].num_components) {
186 nir_io_xfb scalar_xfb;
187
188 memset(&scalar_xfb, 0, sizeof(scalar_xfb));
189 scalar_xfb.out[new_component % 2].num_components = is_64bit ? 2 : 1;
190 scalar_xfb.out[new_component % 2].buffer = xfb.out[c % 2].buffer;
191 scalar_xfb.out[new_component % 2].offset = xfb.out[c % 2].offset +
192 new_component - c;
193 if (new_component < 2)
194 nir_intrinsic_set_io_xfb(chan_intr, scalar_xfb);
195 else
196 nir_intrinsic_set_io_xfb2(chan_intr, scalar_xfb);
197 break;
198 }
199 }
200 }
201
202 /* value */
203 chan_intr->src[0] = nir_src_for_ssa(nir_channel(b, value, i));
204 /* offset and vertex (if needed) */
205 for (unsigned j = 1; j < nir_intrinsic_infos[intr->intrinsic].num_srcs; ++j)
206 chan_intr->src[j] = nir_src_for_ssa(intr->src[j].ssa);
207 if (newc + newi > 3) {
208 nir_src *src = nir_get_io_offset_src(chan_intr);
209 nir_def *offset = nir_iadd_imm(b, src->ssa, (newc + newi) / 4);
210 *src = nir_src_for_ssa(offset);
211 }
212
213 nir_builder_instr_insert(b, &chan_intr->instr);
214 }
215
216 nir_instr_remove(&intr->instr);
217 }
218
219 static void
lower_store_to_scalar(nir_builder * b,nir_intrinsic_instr * intr)220 lower_store_to_scalar(nir_builder *b, nir_intrinsic_instr *intr)
221 {
222 b->cursor = nir_before_instr(&intr->instr);
223
224 nir_def *value = intr->src[0].ssa;
225 nir_def *base_offset = nir_get_io_offset_src(intr)->ssa;
226
227 /* iterate wrmask instead of num_components to handle split components */
228 u_foreach_bit(i, nir_intrinsic_write_mask(intr)) {
229 nir_intrinsic_instr *chan_intr =
230 nir_intrinsic_instr_create(b->shader, intr->intrinsic);
231 chan_intr->num_components = 1;
232
233 if (intr->name)
234 chan_intr->name = intr->name;
235 nir_intrinsic_set_write_mask(chan_intr, 0x1);
236 nir_intrinsic_set_align_offset(chan_intr,
237 (nir_intrinsic_align_offset(intr) +
238 i * (value->bit_size / 8)) %
239 nir_intrinsic_align_mul(intr));
240 nir_intrinsic_set_align_mul(chan_intr, nir_intrinsic_align_mul(intr));
241 if (nir_intrinsic_has_access(intr))
242 nir_intrinsic_set_access(chan_intr, nir_intrinsic_access(intr));
243 if (nir_intrinsic_has_base(intr))
244 nir_intrinsic_set_base(chan_intr, nir_intrinsic_base(intr));
245
246 /* value */
247 chan_intr->src[0] = nir_src_for_ssa(nir_channel(b, value, i));
248 for (unsigned j = 1; j < nir_intrinsic_infos[intr->intrinsic].num_srcs - 1; j++)
249 chan_intr->src[j] = nir_src_for_ssa(intr->src[j].ssa);
250
251 /* increment offset per component */
252 nir_def *offset = nir_iadd_imm(b, base_offset, i * (value->bit_size / 8));
253 *nir_get_io_offset_src(chan_intr) = nir_src_for_ssa(offset);
254
255 nir_builder_instr_insert(b, &chan_intr->instr);
256 }
257
258 nir_instr_remove(&intr->instr);
259 }
260
261 struct scalarize_state {
262 nir_variable_mode mask;
263 nir_instr_filter_cb filter;
264 void *filter_data;
265 };
266
267 static bool
nir_lower_io_to_scalar_instr(nir_builder * b,nir_instr * instr,void * data)268 nir_lower_io_to_scalar_instr(nir_builder *b, nir_instr *instr, void *data)
269 {
270 struct scalarize_state *state = data;
271
272 if (instr->type != nir_instr_type_intrinsic)
273 return false;
274
275 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
276
277 if (intr->num_components == 1)
278 return false;
279
280 if ((intr->intrinsic == nir_intrinsic_load_input ||
281 intr->intrinsic == nir_intrinsic_load_per_primitive_input ||
282 intr->intrinsic == nir_intrinsic_load_per_vertex_input ||
283 intr->intrinsic == nir_intrinsic_load_interpolated_input ||
284 intr->intrinsic == nir_intrinsic_load_input_vertex) &&
285 (state->mask & nir_var_shader_in) &&
286 (!state->filter || state->filter(instr, state->filter_data))) {
287 lower_load_input_to_scalar(b, intr);
288 return true;
289 }
290
291 if ((intr->intrinsic == nir_intrinsic_load_output ||
292 intr->intrinsic == nir_intrinsic_load_per_vertex_output ||
293 intr->intrinsic == nir_intrinsic_load_per_primitive_output) &&
294 (state->mask & nir_var_shader_out) &&
295 (!state->filter || state->filter(instr, state->filter_data))) {
296 lower_load_input_to_scalar(b, intr);
297 return true;
298 }
299
300 if (((intr->intrinsic == nir_intrinsic_load_ubo && (state->mask & nir_var_mem_ubo)) ||
301 (intr->intrinsic == nir_intrinsic_load_ssbo && (state->mask & nir_var_mem_ssbo)) ||
302 (intr->intrinsic == nir_intrinsic_load_global && (state->mask & nir_var_mem_global)) ||
303 (intr->intrinsic == nir_intrinsic_load_shared && (state->mask & nir_var_mem_shared))) &&
304 (!state->filter || state->filter(instr, state->filter_data))) {
305 lower_load_to_scalar(b, intr);
306 return true;
307 }
308
309 if ((intr->intrinsic == nir_intrinsic_store_output ||
310 intr->intrinsic == nir_intrinsic_store_per_vertex_output ||
311 intr->intrinsic == nir_intrinsic_store_per_primitive_output) &&
312 state->mask & nir_var_shader_out &&
313 (!state->filter || state->filter(instr, state->filter_data))) {
314 lower_store_output_to_scalar(b, intr);
315 return true;
316 }
317
318 if (((intr->intrinsic == nir_intrinsic_store_ssbo && (state->mask & nir_var_mem_ssbo)) ||
319 (intr->intrinsic == nir_intrinsic_store_global && (state->mask & nir_var_mem_global)) ||
320 (intr->intrinsic == nir_intrinsic_store_shared && (state->mask & nir_var_mem_shared))) &&
321 (!state->filter || state->filter(instr, state->filter_data))) {
322 lower_store_to_scalar(b, intr);
323 return true;
324 }
325
326 return false;
327 }
328
329 bool
nir_lower_io_to_scalar(nir_shader * shader,nir_variable_mode mask,nir_instr_filter_cb filter,void * filter_data)330 nir_lower_io_to_scalar(nir_shader *shader, nir_variable_mode mask, nir_instr_filter_cb filter, void *filter_data)
331 {
332 struct scalarize_state state = {
333 mask,
334 filter,
335 filter_data
336 };
337 return nir_shader_instructions_pass(shader,
338 nir_lower_io_to_scalar_instr,
339 nir_metadata_control_flow,
340 &state);
341 }
342
343 static nir_variable **
get_channel_variables(struct hash_table * ht,nir_variable * var)344 get_channel_variables(struct hash_table *ht, nir_variable *var)
345 {
346 nir_variable **chan_vars;
347 struct hash_entry *entry = _mesa_hash_table_search(ht, var);
348 if (!entry) {
349 chan_vars = (nir_variable **)calloc(4, sizeof(nir_variable *));
350 _mesa_hash_table_insert(ht, var, chan_vars);
351 } else {
352 chan_vars = (nir_variable **)entry->data;
353 }
354
355 return chan_vars;
356 }
357
358 /*
359 * Note that the src deref that we are cloning is the head of the
360 * chain of deref instructions from the original intrinsic, but
361 * the dst we are cloning to is the tail (because chains of deref
362 * instructions are created back to front)
363 */
364
365 static nir_deref_instr *
clone_deref_array(nir_builder * b,nir_deref_instr * dst_tail,const nir_deref_instr * src_head)366 clone_deref_array(nir_builder *b, nir_deref_instr *dst_tail,
367 const nir_deref_instr *src_head)
368 {
369 const nir_deref_instr *parent = nir_deref_instr_parent(src_head);
370
371 if (!parent)
372 return dst_tail;
373
374 assert(src_head->deref_type == nir_deref_type_array);
375
376 dst_tail = clone_deref_array(b, dst_tail, parent);
377
378 return nir_build_deref_array(b, dst_tail,
379 src_head->arr.index.ssa);
380 }
381
382 static void
lower_load_to_scalar_early(nir_builder * b,nir_intrinsic_instr * intr,nir_variable * var,struct hash_table * split_inputs,struct hash_table * split_outputs)383 lower_load_to_scalar_early(nir_builder *b, nir_intrinsic_instr *intr,
384 nir_variable *var, struct hash_table *split_inputs,
385 struct hash_table *split_outputs)
386 {
387 b->cursor = nir_before_instr(&intr->instr);
388
389 nir_def *loads[NIR_MAX_VEC_COMPONENTS];
390
391 nir_variable **chan_vars;
392 if (var->data.mode == nir_var_shader_in) {
393 chan_vars = get_channel_variables(split_inputs, var);
394 } else {
395 chan_vars = get_channel_variables(split_outputs, var);
396 }
397
398 for (unsigned i = 0; i < intr->num_components; i++) {
399 nir_variable *chan_var = chan_vars[var->data.location_frac + i];
400 if (!chan_vars[var->data.location_frac + i]) {
401 chan_var = nir_variable_clone(var, b->shader);
402 chan_var->data.location_frac = var->data.location_frac + i;
403 chan_var->type = glsl_channel_type(chan_var->type);
404
405 chan_vars[var->data.location_frac + i] = chan_var;
406
407 nir_shader_add_variable(b->shader, chan_var);
408 }
409
410 nir_intrinsic_instr *chan_intr =
411 nir_intrinsic_instr_create(b->shader, intr->intrinsic);
412 nir_def_init(&chan_intr->instr, &chan_intr->def, 1,
413 intr->def.bit_size);
414 chan_intr->num_components = 1;
415
416 nir_deref_instr *deref = nir_build_deref_var(b, chan_var);
417
418 deref = clone_deref_array(b, deref, nir_src_as_deref(intr->src[0]));
419
420 chan_intr->src[0] = nir_src_for_ssa(&deref->def);
421
422 if (intr->intrinsic == nir_intrinsic_interp_deref_at_offset ||
423 intr->intrinsic == nir_intrinsic_interp_deref_at_sample ||
424 intr->intrinsic == nir_intrinsic_interp_deref_at_vertex)
425 chan_intr->src[1] = nir_src_for_ssa(intr->src[1].ssa);
426
427 nir_builder_instr_insert(b, &chan_intr->instr);
428
429 loads[i] = &chan_intr->def;
430 }
431
432 nir_def_replace(&intr->def, nir_vec(b, loads, intr->num_components));
433 }
434
435 static void
lower_store_output_to_scalar_early(nir_builder * b,nir_intrinsic_instr * intr,nir_variable * var,struct hash_table * split_outputs)436 lower_store_output_to_scalar_early(nir_builder *b, nir_intrinsic_instr *intr,
437 nir_variable *var,
438 struct hash_table *split_outputs)
439 {
440 b->cursor = nir_before_instr(&intr->instr);
441
442 nir_def *value = intr->src[1].ssa;
443
444 nir_variable **chan_vars = get_channel_variables(split_outputs, var);
445 for (unsigned i = 0; i < intr->num_components; i++) {
446 if (!(nir_intrinsic_write_mask(intr) & (1 << i)))
447 continue;
448
449 nir_variable *chan_var = chan_vars[var->data.location_frac + i];
450 if (!chan_vars[var->data.location_frac + i]) {
451 chan_var = nir_variable_clone(var, b->shader);
452 chan_var->data.location_frac = var->data.location_frac + i;
453 chan_var->type = glsl_channel_type(chan_var->type);
454
455 chan_vars[var->data.location_frac + i] = chan_var;
456
457 nir_shader_add_variable(b->shader, chan_var);
458 }
459
460 nir_intrinsic_instr *chan_intr =
461 nir_intrinsic_instr_create(b->shader, intr->intrinsic);
462 chan_intr->num_components = 1;
463
464 nir_intrinsic_set_write_mask(chan_intr, 0x1);
465
466 nir_deref_instr *deref = nir_build_deref_var(b, chan_var);
467
468 deref = clone_deref_array(b, deref, nir_src_as_deref(intr->src[0]));
469
470 chan_intr->src[0] = nir_src_for_ssa(&deref->def);
471 chan_intr->src[1] = nir_src_for_ssa(nir_channel(b, value, i));
472
473 nir_builder_instr_insert(b, &chan_intr->instr);
474 }
475
476 /* Remove the old store intrinsic */
477 nir_instr_remove(&intr->instr);
478 }
479
480 struct io_to_scalar_early_state {
481 struct hash_table *split_inputs, *split_outputs;
482 nir_variable_mode mask;
483 };
484
485 static bool
nir_lower_io_to_scalar_early_instr(nir_builder * b,nir_instr * instr,void * data)486 nir_lower_io_to_scalar_early_instr(nir_builder *b, nir_instr *instr, void *data)
487 {
488 struct io_to_scalar_early_state *state = data;
489
490 if (instr->type != nir_instr_type_intrinsic)
491 return false;
492
493 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
494
495 if (intr->num_components == 1)
496 return false;
497
498 if (intr->intrinsic != nir_intrinsic_load_deref &&
499 intr->intrinsic != nir_intrinsic_store_deref &&
500 intr->intrinsic != nir_intrinsic_interp_deref_at_centroid &&
501 intr->intrinsic != nir_intrinsic_interp_deref_at_sample &&
502 intr->intrinsic != nir_intrinsic_interp_deref_at_offset &&
503 intr->intrinsic != nir_intrinsic_interp_deref_at_vertex)
504 return false;
505
506 nir_deref_instr *deref = nir_src_as_deref(intr->src[0]);
507 if (!nir_deref_mode_is_one_of(deref, state->mask))
508 return false;
509
510 nir_variable *var = nir_deref_instr_get_variable(deref);
511 nir_variable_mode mode = var->data.mode;
512
513 /* TODO: add patch support */
514 if (var->data.patch)
515 return false;
516
517 /* TODO: add doubles support */
518 if (glsl_type_is_64bit(glsl_without_array(var->type)))
519 return false;
520
521 if (!(b->shader->info.stage == MESA_SHADER_VERTEX &&
522 mode == nir_var_shader_in) &&
523 var->data.location < VARYING_SLOT_VAR0 &&
524 var->data.location >= 0)
525 return false;
526
527 /* Don't bother splitting if we can't opt away any unused
528 * components.
529 */
530 if (var->data.always_active_io)
531 return false;
532
533 if (var->data.must_be_shader_input)
534 return false;
535
536 /* Skip types we cannot split */
537 if (glsl_type_is_matrix(glsl_without_array(var->type)) ||
538 glsl_type_is_struct_or_ifc(glsl_without_array(var->type)))
539 return false;
540
541 switch (intr->intrinsic) {
542 case nir_intrinsic_interp_deref_at_centroid:
543 case nir_intrinsic_interp_deref_at_sample:
544 case nir_intrinsic_interp_deref_at_offset:
545 case nir_intrinsic_interp_deref_at_vertex:
546 case nir_intrinsic_load_deref:
547 if ((state->mask & nir_var_shader_in && mode == nir_var_shader_in) ||
548 (state->mask & nir_var_shader_out && mode == nir_var_shader_out)) {
549 lower_load_to_scalar_early(b, intr, var, state->split_inputs,
550 state->split_outputs);
551 return true;
552 }
553 break;
554 case nir_intrinsic_store_deref:
555 if (state->mask & nir_var_shader_out &&
556 mode == nir_var_shader_out) {
557 lower_store_output_to_scalar_early(b, intr, var, state->split_outputs);
558 return true;
559 }
560 break;
561 default:
562 break;
563 }
564
565 return false;
566 }
567
568 /*
569 * This function is intended to be called earlier than nir_lower_io_to_scalar()
570 * i.e. before nir_lower_io() is called.
571 */
572 bool
nir_lower_io_to_scalar_early(nir_shader * shader,nir_variable_mode mask)573 nir_lower_io_to_scalar_early(nir_shader *shader, nir_variable_mode mask)
574 {
575 struct io_to_scalar_early_state state = {
576 .split_inputs = _mesa_pointer_hash_table_create(NULL),
577 .split_outputs = _mesa_pointer_hash_table_create(NULL),
578 .mask = mask
579 };
580
581 bool progress = nir_shader_instructions_pass(shader,
582 nir_lower_io_to_scalar_early_instr,
583 nir_metadata_control_flow,
584 &state);
585
586 /* Remove old input from the shaders inputs list */
587 hash_table_foreach(state.split_inputs, entry) {
588 nir_variable *var = (nir_variable *)entry->key;
589 exec_node_remove(&var->node);
590
591 free(entry->data);
592 }
593
594 /* Remove old output from the shaders outputs list */
595 hash_table_foreach(state.split_outputs, entry) {
596 nir_variable *var = (nir_variable *)entry->key;
597 exec_node_remove(&var->node);
598
599 free(entry->data);
600 }
601
602 _mesa_hash_table_destroy(state.split_inputs, NULL);
603 _mesa_hash_table_destroy(state.split_outputs, NULL);
604
605 nir_remove_dead_derefs(shader);
606
607 return progress;
608 }
609