xref: /aosp_15_r20/external/mesa3d/src/compiler/nir/nir_lower_io_to_vector.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2019 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "util/u_dynarray.h"
25 #include "nir.h"
26 #include "nir_builder.h"
27 #include "nir_deref.h"
28 
29 /** @file nir_lower_io_to_vector.c
30  *
31  * Merges compatible input/output variables residing in different components
32  * of the same location. It's expected that further passes such as
33  * nir_lower_io_to_temporaries will combine loads and stores of the merged
34  * variables, producing vector nir_load_input/nir_store_output instructions
35  * when all is said and done.
36  */
37 
38 /* FRAG_RESULT_MAX+1 instead of just FRAG_RESULT_MAX because of how this pass
39  * handles dual source blending */
40 #define MAX_SLOTS MAX2(VARYING_SLOT_TESS_MAX, FRAG_RESULT_MAX + 1)
41 
42 static unsigned
get_slot(const nir_variable * var)43 get_slot(const nir_variable *var)
44 {
45    /* This handling of dual-source blending might not be correct when more than
46     * one render target is supported, but it seems no driver supports more than
47     * one. */
48    return var->data.location + var->data.index;
49 }
50 
51 static const struct glsl_type *
get_per_vertex_type(const nir_shader * shader,const nir_variable * var,unsigned * num_vertices)52 get_per_vertex_type(const nir_shader *shader, const nir_variable *var,
53                     unsigned *num_vertices)
54 {
55    if (nir_is_arrayed_io(var, shader->info.stage)) {
56       assert(glsl_type_is_array(var->type));
57       if (num_vertices)
58          *num_vertices = glsl_get_length(var->type);
59       return glsl_get_array_element(var->type);
60    } else {
61       if (num_vertices)
62          *num_vertices = 0;
63       return var->type;
64    }
65 }
66 
67 static const struct glsl_type *
resize_array_vec_type(const struct glsl_type * type,unsigned num_components)68 resize_array_vec_type(const struct glsl_type *type, unsigned num_components)
69 {
70    if (glsl_type_is_array(type)) {
71       const struct glsl_type *arr_elem =
72          resize_array_vec_type(glsl_get_array_element(type), num_components);
73       return glsl_array_type(arr_elem, glsl_get_length(type), 0);
74    } else {
75       assert(glsl_type_is_vector_or_scalar(type));
76       return glsl_vector_type(glsl_get_base_type(type), num_components);
77    }
78 }
79 
80 static bool
variables_can_merge(const nir_shader * shader,const nir_variable * a,const nir_variable * b,bool same_array_structure)81 variables_can_merge(const nir_shader *shader,
82                     const nir_variable *a, const nir_variable *b,
83                     bool same_array_structure)
84 {
85    if (a->data.compact || b->data.compact)
86       return false;
87 
88    if (a->data.per_view || b->data.per_view)
89       return false;
90 
91    const struct glsl_type *a_type_tail = a->type;
92    const struct glsl_type *b_type_tail = b->type;
93 
94    if (nir_is_arrayed_io(a, shader->info.stage) !=
95        nir_is_arrayed_io(b, shader->info.stage))
96       return false;
97 
98    /* They must have the same array structure */
99    if (same_array_structure) {
100       while (glsl_type_is_array(a_type_tail)) {
101          if (!glsl_type_is_array(b_type_tail))
102             return false;
103 
104          if (glsl_get_length(a_type_tail) != glsl_get_length(b_type_tail))
105             return false;
106 
107          a_type_tail = glsl_get_array_element(a_type_tail);
108          b_type_tail = glsl_get_array_element(b_type_tail);
109       }
110       if (glsl_type_is_array(b_type_tail))
111          return false;
112    } else {
113       a_type_tail = glsl_without_array(a_type_tail);
114       b_type_tail = glsl_without_array(b_type_tail);
115    }
116 
117    if (!glsl_type_is_vector_or_scalar(a_type_tail) ||
118        !glsl_type_is_vector_or_scalar(b_type_tail))
119       return false;
120 
121    if (glsl_get_base_type(a_type_tail) != glsl_get_base_type(b_type_tail))
122       return false;
123 
124    /* TODO: add 64/16bit support ? */
125    if (glsl_get_bit_size(a_type_tail) != 32)
126       return false;
127 
128    assert(a->data.mode == b->data.mode);
129    if (shader->info.stage == MESA_SHADER_FRAGMENT &&
130        a->data.mode == nir_var_shader_in &&
131        (a->data.interpolation != b->data.interpolation ||
132         a->data.centroid != b->data.centroid ||
133         a->data.sample != b->data.sample))
134       return false;
135 
136    if (shader->info.stage == MESA_SHADER_FRAGMENT &&
137        a->data.mode == nir_var_shader_out &&
138        a->data.index != b->data.index)
139       return false;
140 
141    /* It's tricky to merge XFB-outputs correctly, because we need there
142     * to not be any overlaps when we get to
143     * nir_gather_xfb_info_with_varyings later on. We'll end up
144     * triggering an assert there if we merge here.
145     */
146    if ((shader->info.stage == MESA_SHADER_VERTEX ||
147         shader->info.stage == MESA_SHADER_TESS_EVAL ||
148         shader->info.stage == MESA_SHADER_GEOMETRY) &&
149        a->data.mode == nir_var_shader_out &&
150        (a->data.explicit_xfb_buffer || b->data.explicit_xfb_buffer))
151       return false;
152 
153    return true;
154 }
155 
156 static const struct glsl_type *
get_flat_type(const nir_shader * shader,nir_variable * old_vars[MAX_SLOTS][4],unsigned * loc,nir_variable ** first_var,unsigned * num_vertices)157 get_flat_type(const nir_shader *shader, nir_variable *old_vars[MAX_SLOTS][4],
158               unsigned *loc, nir_variable **first_var, unsigned *num_vertices)
159 {
160    unsigned todo = 1;
161    unsigned slots = 0;
162    unsigned num_vars = 0;
163    enum glsl_base_type base = GLSL_TYPE_ERROR;
164    *num_vertices = 0;
165    *first_var = NULL;
166 
167    while (todo) {
168       assert(*loc < MAX_SLOTS);
169       for (unsigned frac = 0; frac < 4; frac++) {
170          nir_variable *var = old_vars[*loc][frac];
171          if (!var)
172             continue;
173          if ((*first_var &&
174               !variables_can_merge(shader, var, *first_var, false)) ||
175              var->data.compact) {
176             (*loc)++;
177             return NULL;
178          }
179 
180          if (!*first_var) {
181             if (!glsl_type_is_vector_or_scalar(glsl_without_array(var->type))) {
182                (*loc)++;
183                return NULL;
184             }
185             *first_var = var;
186             base = glsl_get_base_type(
187                glsl_without_array(get_per_vertex_type(shader, var, NULL)));
188          }
189 
190          bool vs_in = shader->info.stage == MESA_SHADER_VERTEX &&
191                       var->data.mode == nir_var_shader_in;
192          unsigned var_slots = glsl_count_attribute_slots(
193             get_per_vertex_type(shader, var, num_vertices), vs_in);
194          todo = MAX2(todo, var_slots);
195          num_vars++;
196       }
197       todo--;
198       slots++;
199       (*loc)++;
200    }
201 
202    if (num_vars <= 1)
203       return NULL;
204 
205    if (slots == 1)
206       return glsl_vector_type(base, 4);
207    else
208       return glsl_array_type(glsl_vector_type(base, 4), slots, 0);
209 }
210 
211 static bool
create_new_io_vars(nir_shader * shader,nir_variable_mode mode,nir_variable * new_vars[MAX_SLOTS][4],bool flat_vars[MAX_SLOTS],struct util_dynarray * demote_vars)212 create_new_io_vars(nir_shader *shader, nir_variable_mode mode,
213                    nir_variable *new_vars[MAX_SLOTS][4],
214                    bool flat_vars[MAX_SLOTS],
215                    struct util_dynarray *demote_vars)
216 {
217    nir_variable *old_vars[MAX_SLOTS][4] = { { 0 } };
218 
219    bool has_io_var = false;
220    nir_foreach_variable_with_modes(var, shader, mode) {
221       unsigned frac = var->data.location_frac;
222       old_vars[get_slot(var)][frac] = var;
223       has_io_var = true;
224    }
225 
226    if (!has_io_var)
227       return false;
228 
229    bool merged_any_vars = false;
230 
231    for (unsigned loc = 0; loc < MAX_SLOTS; loc++) {
232       unsigned frac = 0;
233       while (frac < 4) {
234          nir_variable *first_var = old_vars[loc][frac];
235          if (!first_var) {
236             frac++;
237             continue;
238          }
239 
240          int first = frac;
241          bool found_merge = false;
242 
243          while (frac < 4) {
244             nir_variable *var = old_vars[loc][frac];
245             if (!var)
246                break;
247 
248             if (var != first_var) {
249                if (!variables_can_merge(shader, first_var, var, true))
250                   break;
251 
252                found_merge = true;
253             }
254 
255             const unsigned num_components =
256                glsl_get_components(glsl_without_array(var->type));
257             if (!num_components) {
258                assert(frac == 0);
259                frac++;
260                break; /* The type was a struct. */
261             }
262 
263             /* We had better not have any overlapping vars */
264             for (unsigned i = 1; i < num_components; i++)
265                assert(old_vars[loc][frac + i] == NULL);
266 
267             frac += num_components;
268          }
269 
270          if (!found_merge)
271             continue;
272 
273          merged_any_vars = true;
274 
275          nir_variable *var = nir_variable_clone(old_vars[loc][first], shader);
276          var->data.location_frac = first;
277          var->type = resize_array_vec_type(var->type, frac - first);
278 
279          nir_shader_add_variable(shader, var);
280          for (unsigned i = first; i < frac; i++) {
281             new_vars[loc][i] = var;
282             if (old_vars[loc][i]) {
283                util_dynarray_append(demote_vars, nir_variable *, old_vars[loc][i]);
284                old_vars[loc][i] = NULL;
285             }
286          }
287 
288          old_vars[loc][first] = var;
289       }
290    }
291 
292    /* "flat" mode: tries to ensure there is at most one variable per slot by
293     * merging variables into vec4s
294     */
295    for (unsigned loc = 0; loc < MAX_SLOTS;) {
296       nir_variable *first_var;
297       unsigned num_vertices;
298       unsigned new_loc = loc;
299       const struct glsl_type *flat_type =
300          get_flat_type(shader, old_vars, &new_loc, &first_var, &num_vertices);
301       if (flat_type) {
302          merged_any_vars = true;
303 
304          nir_variable *var = nir_variable_clone(first_var, shader);
305          var->data.location_frac = 0;
306          if (num_vertices)
307             var->type = glsl_array_type(flat_type, num_vertices, 0);
308          else
309             var->type = flat_type;
310 
311          nir_shader_add_variable(shader, var);
312          unsigned num_slots =
313             glsl_type_is_array(flat_type) ? glsl_get_length(flat_type) : 1;
314          for (unsigned i = 0; i < num_slots; i++) {
315             for (unsigned j = 0; j < 4; j++)
316                new_vars[loc + i][j] = var;
317             flat_vars[loc + i] = true;
318          }
319       }
320       loc = new_loc;
321    }
322 
323    return merged_any_vars;
324 }
325 
326 static nir_deref_instr *
build_array_deref_of_new_var(nir_builder * b,nir_variable * new_var,nir_deref_instr * leader)327 build_array_deref_of_new_var(nir_builder *b, nir_variable *new_var,
328                              nir_deref_instr *leader)
329 {
330    if (leader->deref_type == nir_deref_type_var)
331       return nir_build_deref_var(b, new_var);
332 
333    nir_deref_instr *parent =
334       build_array_deref_of_new_var(b, new_var, nir_deref_instr_parent(leader));
335 
336    return nir_build_deref_follower(b, parent, leader);
337 }
338 
339 static nir_def *
build_array_index(nir_builder * b,nir_deref_instr * deref,nir_def * base,bool vs_in,bool per_vertex)340 build_array_index(nir_builder *b, nir_deref_instr *deref, nir_def *base,
341                   bool vs_in, bool per_vertex)
342 {
343    switch (deref->deref_type) {
344    case nir_deref_type_var:
345       return base;
346    case nir_deref_type_array: {
347       nir_def *index = nir_i2iN(b, deref->arr.index.ssa,
348                                 deref->def.bit_size);
349 
350       if (nir_deref_instr_parent(deref)->deref_type == nir_deref_type_var &&
351           per_vertex)
352          return base;
353 
354       return nir_iadd(
355          b, build_array_index(b, nir_deref_instr_parent(deref), base, vs_in, per_vertex),
356          nir_amul_imm(b, index, glsl_count_attribute_slots(deref->type, vs_in)));
357    }
358    default:
359       unreachable("Invalid deref instruction type");
360    }
361 }
362 
363 static nir_deref_instr *
build_array_deref_of_new_var_flat(nir_shader * shader,nir_builder * b,nir_variable * new_var,nir_deref_instr * leader,unsigned base)364 build_array_deref_of_new_var_flat(nir_shader *shader,
365                                   nir_builder *b, nir_variable *new_var,
366                                   nir_deref_instr *leader, unsigned base)
367 {
368    nir_deref_instr *deref = nir_build_deref_var(b, new_var);
369 
370    bool per_vertex = nir_is_arrayed_io(new_var, shader->info.stage);
371    if (per_vertex) {
372       nir_deref_path path;
373       nir_deref_path_init(&path, leader, NULL);
374 
375       assert(path.path[0]->deref_type == nir_deref_type_var);
376       nir_deref_instr *p = path.path[1];
377       nir_deref_path_finish(&path);
378 
379       nir_def *index = p->arr.index.ssa;
380       deref = nir_build_deref_array(b, deref, index);
381    }
382 
383    if (!glsl_type_is_array(deref->type))
384       return deref;
385 
386    bool vs_in = shader->info.stage == MESA_SHADER_VERTEX &&
387                 new_var->data.mode == nir_var_shader_in;
388    return nir_build_deref_array(b, deref,
389                                 build_array_index(b, leader, nir_imm_int(b, base), vs_in, per_vertex));
390 }
391 
392 ASSERTED static bool
nir_shader_can_read_output(const shader_info * info)393 nir_shader_can_read_output(const shader_info *info)
394 {
395    switch (info->stage) {
396    case MESA_SHADER_TESS_CTRL:
397    case MESA_SHADER_FRAGMENT:
398       return true;
399 
400    case MESA_SHADER_TASK:
401    case MESA_SHADER_MESH:
402       /* TODO(mesh): This will not be allowed on EXT. */
403       return true;
404 
405    default:
406       return false;
407    }
408 }
409 
410 static bool
nir_lower_io_to_vector_impl(nir_function_impl * impl,nir_variable_mode modes)411 nir_lower_io_to_vector_impl(nir_function_impl *impl, nir_variable_mode modes)
412 {
413    assert(!(modes & ~(nir_var_shader_in | nir_var_shader_out)));
414 
415    nir_builder b = nir_builder_create(impl);
416 
417    nir_metadata_require(impl, nir_metadata_dominance);
418 
419    struct util_dynarray demote_vars;
420    util_dynarray_init(&demote_vars, NULL);
421 
422    nir_shader *shader = impl->function->shader;
423    nir_variable *new_inputs[MAX_SLOTS][4] = { { 0 } };
424    nir_variable *new_outputs[MAX_SLOTS][4] = { { 0 } };
425    bool flat_inputs[MAX_SLOTS] = { 0 };
426    bool flat_outputs[MAX_SLOTS] = { 0 };
427 
428    if (modes & nir_var_shader_in) {
429       /* Vertex shaders support overlapping inputs.  We don't do those */
430       assert(b.shader->info.stage != MESA_SHADER_VERTEX);
431 
432       /* If we don't actually merge any variables, remove that bit from modes
433        * so we don't bother doing extra non-work.
434        */
435       if (!create_new_io_vars(shader, nir_var_shader_in,
436                               new_inputs, flat_inputs, &demote_vars))
437          modes &= ~nir_var_shader_in;
438    }
439 
440    if (modes & nir_var_shader_out) {
441       /* If we don't actually merge any variables, remove that bit from modes
442        * so we don't bother doing extra non-work.
443        */
444       if (!create_new_io_vars(shader, nir_var_shader_out,
445                               new_outputs, flat_outputs, &demote_vars))
446          modes &= ~nir_var_shader_out;
447    }
448 
449    if (!modes)
450       return false;
451 
452    bool progress = false;
453 
454    /* Actually lower all the IO load/store intrinsics.  Load instructions are
455     * lowered to a vector load and an ALU instruction to grab the channels we
456     * want.  Outputs are lowered to a write-masked store of the vector output.
457     * For non-TCS outputs, we then run nir_lower_io_to_temporaries at the end
458     * to clean up the partial writes.
459     */
460    nir_foreach_block(block, impl) {
461       nir_foreach_instr_safe(instr, block) {
462          if (instr->type != nir_instr_type_intrinsic)
463             continue;
464 
465          nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
466 
467          switch (intrin->intrinsic) {
468          case nir_intrinsic_load_deref:
469          case nir_intrinsic_interp_deref_at_centroid:
470          case nir_intrinsic_interp_deref_at_sample:
471          case nir_intrinsic_interp_deref_at_offset:
472          case nir_intrinsic_interp_deref_at_vertex: {
473             nir_deref_instr *old_deref = nir_src_as_deref(intrin->src[0]);
474             if (!nir_deref_mode_is_one_of(old_deref, modes))
475                break;
476 
477             if (nir_deref_mode_is(old_deref, nir_var_shader_out))
478                assert(nir_shader_can_read_output(&b.shader->info));
479 
480             nir_variable *old_var = nir_deref_instr_get_variable(old_deref);
481 
482             const unsigned loc = get_slot(old_var);
483             const unsigned old_frac = old_var->data.location_frac;
484             nir_variable *new_var = old_var->data.mode == nir_var_shader_in ? new_inputs[loc][old_frac] : new_outputs[loc][old_frac];
485             bool flat = old_var->data.mode == nir_var_shader_in ? flat_inputs[loc] : flat_outputs[loc];
486             if (!new_var)
487                break;
488 
489             const unsigned new_frac = new_var->data.location_frac;
490 
491             nir_component_mask_t vec4_comp_mask =
492                ((1 << intrin->num_components) - 1) << old_frac;
493 
494             b.cursor = nir_before_instr(&intrin->instr);
495 
496             /* Rewrite the load to use the new variable and only select a
497              * portion of the result.
498              */
499             nir_deref_instr *new_deref;
500             if (flat) {
501                new_deref = build_array_deref_of_new_var_flat(
502                   shader, &b, new_var, old_deref, loc - get_slot(new_var));
503             } else {
504                assert(get_slot(new_var) == loc);
505                new_deref = build_array_deref_of_new_var(&b, new_var, old_deref);
506                assert(glsl_type_is_vector(new_deref->type));
507             }
508             nir_src_rewrite(&intrin->src[0], &new_deref->def);
509 
510             intrin->num_components =
511                glsl_get_components(new_deref->type);
512             intrin->def.num_components = intrin->num_components;
513 
514             b.cursor = nir_after_instr(&intrin->instr);
515 
516             nir_def *new_vec = nir_channels(&b, &intrin->def,
517                                             vec4_comp_mask >> new_frac);
518             nir_def_rewrite_uses_after(&intrin->def,
519                                        new_vec,
520                                        new_vec->parent_instr);
521 
522             progress = true;
523             break;
524          }
525 
526          case nir_intrinsic_store_deref: {
527             nir_deref_instr *old_deref = nir_src_as_deref(intrin->src[0]);
528             if (!nir_deref_mode_is(old_deref, nir_var_shader_out))
529                break;
530 
531             nir_variable *old_var = nir_deref_instr_get_variable(old_deref);
532 
533             const unsigned loc = get_slot(old_var);
534             const unsigned old_frac = old_var->data.location_frac;
535             nir_variable *new_var = new_outputs[loc][old_frac];
536             bool flat = flat_outputs[loc];
537             if (!new_var)
538                break;
539 
540             const unsigned new_frac = new_var->data.location_frac;
541 
542             b.cursor = nir_before_instr(&intrin->instr);
543 
544             /* Rewrite the store to be a masked store to the new variable */
545             nir_deref_instr *new_deref;
546             if (flat) {
547                new_deref = build_array_deref_of_new_var_flat(
548                   shader, &b, new_var, old_deref, loc - get_slot(new_var));
549             } else {
550                assert(get_slot(new_var) == loc);
551                new_deref = build_array_deref_of_new_var(&b, new_var, old_deref);
552                assert(glsl_type_is_vector(new_deref->type));
553             }
554             nir_src_rewrite(&intrin->src[0], &new_deref->def);
555 
556             intrin->num_components =
557                glsl_get_components(new_deref->type);
558 
559             nir_component_mask_t old_wrmask = nir_intrinsic_write_mask(intrin);
560 
561             nir_def *old_value = intrin->src[1].ssa;
562             nir_scalar comps[4];
563             for (unsigned c = 0; c < intrin->num_components; c++) {
564                if (new_frac + c >= old_frac &&
565                    (old_wrmask & 1 << (new_frac + c - old_frac))) {
566                   comps[c] = nir_get_scalar(old_value,
567                                                 new_frac + c - old_frac);
568                } else {
569                   comps[c] = nir_get_scalar(nir_undef(&b, old_value->num_components,
570                                                           old_value->bit_size),
571                                                 0);
572                }
573             }
574             nir_def *new_value = nir_vec_scalars(&b, comps, intrin->num_components);
575             nir_src_rewrite(&intrin->src[1], new_value);
576 
577             nir_intrinsic_set_write_mask(intrin,
578                                          old_wrmask << (old_frac - new_frac));
579 
580             progress = true;
581             break;
582          }
583 
584          default:
585             break;
586          }
587       }
588    }
589 
590    /* Demote the old var to a global, so that things like
591     * nir_lower_io_to_temporaries() don't trigger on it.
592     */
593    util_dynarray_foreach(&demote_vars, nir_variable *, varp) {
594       (*varp)->data.mode = nir_var_shader_temp;
595    }
596    nir_fixup_deref_modes(b.shader);
597    util_dynarray_fini(&demote_vars);
598 
599    if (progress) {
600       nir_metadata_preserve(impl, nir_metadata_control_flow);
601    }
602 
603    return progress;
604 }
605 
606 bool
nir_lower_io_to_vector(nir_shader * shader,nir_variable_mode modes)607 nir_lower_io_to_vector(nir_shader *shader, nir_variable_mode modes)
608 {
609    bool progress = false;
610 
611    nir_foreach_function_impl(impl, shader) {
612       progress |= nir_lower_io_to_vector_impl(impl, modes);
613    }
614 
615    return progress;
616 }
617 
618 static bool
is_tess_level_variable(nir_variable * var)619 is_tess_level_variable(nir_variable *var)
620 {
621    return var->data.location == VARYING_SLOT_TESS_LEVEL_OUTER ||
622       var->data.location == VARYING_SLOT_TESS_LEVEL_INNER;
623 }
624 
625 /* Make the tess factor variables vectors instead of compact arrays, so accesses
626  * can be combined by nir_opt_cse()/nir_opt_combine_stores().
627  */
628 bool
nir_vectorize_tess_levels(nir_shader * shader)629 nir_vectorize_tess_levels(nir_shader *shader)
630 {
631    nir_variable_mode mode;
632    if (shader->info.stage == MESA_SHADER_TESS_CTRL)
633       mode = nir_var_shader_out;
634    else if (shader->info.stage == MESA_SHADER_TESS_EVAL)
635       mode = nir_var_shader_in;
636    else
637       return false;
638 
639    bool progress = false;
640    nir_foreach_variable_with_modes(var, shader, mode) {
641       if (is_tess_level_variable(var)) {
642          var->type = glsl_vector_type(GLSL_TYPE_FLOAT, glsl_get_length(var->type));
643          var->data.compact = false;
644          progress = true;
645       }
646    }
647 
648    if (progress) {
649       nir_fixup_deref_types(shader);
650 
651       nir_lower_array_deref_of_vec(shader, mode, is_tess_level_variable,
652                                    nir_lower_direct_array_deref_of_vec_load |
653                                    nir_lower_indirect_array_deref_of_vec_load |
654                                    nir_lower_direct_array_deref_of_vec_store |
655                                    nir_lower_indirect_array_deref_of_vec_store);
656 
657       /* Remove dead array deref instructions to avoid nir_validate() complain
658        * array_deref on vector variable.
659        */
660       nir_opt_dce(shader);
661    }
662 
663    return progress;
664 }
665