xref: /aosp_15_r20/external/mesa3d/src/compiler/nir/tests/vars_tests.cpp (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2018 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include "nir_test.h"
25 #include "nir_deref.h"
26 
27 namespace {
28 
29 class nir_vars_test : public nir_test {
30 protected:
31    nir_vars_test();
32    ~nir_vars_test();
33 
create_var(nir_variable_mode mode,const glsl_type * type,const char * name)34    nir_variable *create_var(nir_variable_mode mode, const glsl_type *type,
35                             const char *name) {
36       if (mode == nir_var_function_temp)
37          return nir_local_variable_create(b->impl, type, name);
38       else
39          return nir_variable_create(b->shader, mode, type, name);
40    }
41 
create_int(nir_variable_mode mode,const char * name)42    nir_variable *create_int(nir_variable_mode mode, const char *name) {
43       return create_var(mode, glsl_int_type(), name);
44    }
45 
create_ivec2(nir_variable_mode mode,const char * name)46    nir_variable *create_ivec2(nir_variable_mode mode, const char *name) {
47       return create_var(mode, glsl_vector_type(GLSL_TYPE_INT, 2), name);
48    }
49 
create_ivec4(nir_variable_mode mode,const char * name)50    nir_variable *create_ivec4(nir_variable_mode mode, const char *name) {
51       return create_var(mode, glsl_vector_type(GLSL_TYPE_INT, 4), name);
52    }
53 
create_many_int(nir_variable_mode mode,const char * prefix,unsigned count)54    nir_variable **create_many_int(nir_variable_mode mode, const char *prefix, unsigned count) {
55       nir_variable **result = (nir_variable **)linear_alloc_child(lin_ctx, sizeof(nir_variable *) * count);
56       for (unsigned i = 0; i < count; i++)
57          result[i] = create_int(mode, linear_asprintf(lin_ctx, "%s%u", prefix, i));
58       return result;
59    }
60 
create_many_ivec2(nir_variable_mode mode,const char * prefix,unsigned count)61    nir_variable **create_many_ivec2(nir_variable_mode mode, const char *prefix, unsigned count) {
62       nir_variable **result = (nir_variable **)linear_alloc_child(lin_ctx, sizeof(nir_variable *) * count);
63       for (unsigned i = 0; i < count; i++)
64          result[i] = create_ivec2(mode, linear_asprintf(lin_ctx, "%s%u", prefix, i));
65       return result;
66    }
67 
create_many_ivec4(nir_variable_mode mode,const char * prefix,unsigned count)68    nir_variable **create_many_ivec4(nir_variable_mode mode, const char *prefix, unsigned count) {
69       nir_variable **result = (nir_variable **)linear_alloc_child(lin_ctx, sizeof(nir_variable *) * count);
70       for (unsigned i = 0; i < count; i++)
71          result[i] = create_ivec4(mode, linear_asprintf(lin_ctx, "%s%u", prefix, i));
72       return result;
73    }
74 
75    unsigned count_derefs(nir_deref_type deref_type);
76    unsigned count_intrinsics(nir_intrinsic_op intrinsic);
count_function_temp_vars(void)77    unsigned count_function_temp_vars(void) {
78       return exec_list_length(&b->impl->locals);
79    }
80 
count_shader_temp_vars(void)81    unsigned count_shader_temp_vars(void) {
82       unsigned count = 0;
83       nir_foreach_variable_with_modes(var, b->shader, nir_var_shader_temp)
84          count++;
85       return count;
86    }
87 
88    nir_intrinsic_instr *get_intrinsic(nir_intrinsic_op intrinsic,
89                                       unsigned index);
90 
91    nir_deref_instr *get_deref(nir_deref_type deref_type,
92                               unsigned index);
93    linear_ctx *lin_ctx;
94 };
95 
nir_vars_test()96 nir_vars_test::nir_vars_test()
97    : nir_test::nir_test("nir_vars_test")
98 {
99    lin_ctx = linear_context(b->shader);
100 }
101 
~nir_vars_test()102 nir_vars_test::~nir_vars_test()
103 {
104    if (HasFailure()) {
105       printf("\nShader from the failed test:\n\n");
106       nir_print_shader(b->shader, stdout);
107    }
108 }
109 
110 unsigned
count_intrinsics(nir_intrinsic_op intrinsic)111 nir_vars_test::count_intrinsics(nir_intrinsic_op intrinsic)
112 {
113    unsigned count = 0;
114    nir_foreach_block(block, b->impl) {
115       nir_foreach_instr(instr, block) {
116          if (instr->type != nir_instr_type_intrinsic)
117             continue;
118          nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
119          if (intrin->intrinsic == intrinsic)
120             count++;
121       }
122    }
123    return count;
124 }
125 
126 unsigned
count_derefs(nir_deref_type deref_type)127 nir_vars_test::count_derefs(nir_deref_type deref_type)
128 {
129    unsigned count = 0;
130    nir_foreach_block(block, b->impl) {
131       nir_foreach_instr(instr, block) {
132          if (instr->type != nir_instr_type_deref)
133             continue;
134          nir_deref_instr *intrin = nir_instr_as_deref(instr);
135          if (intrin->deref_type == deref_type)
136             count++;
137       }
138    }
139    return count;
140 }
141 
142 nir_intrinsic_instr *
get_intrinsic(nir_intrinsic_op intrinsic,unsigned index)143 nir_vars_test::get_intrinsic(nir_intrinsic_op intrinsic,
144                              unsigned index)
145 {
146    nir_foreach_block(block, b->impl) {
147       nir_foreach_instr(instr, block) {
148          if (instr->type != nir_instr_type_intrinsic)
149             continue;
150          nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
151          if (intrin->intrinsic == intrinsic) {
152             if (index == 0)
153                return intrin;
154             index--;
155          }
156       }
157    }
158    return NULL;
159 }
160 
161 nir_deref_instr *
get_deref(nir_deref_type deref_type,unsigned index)162 nir_vars_test::get_deref(nir_deref_type deref_type,
163                          unsigned index)
164 {
165    nir_foreach_block(block, b->impl) {
166       nir_foreach_instr(instr, block) {
167          if (instr->type != nir_instr_type_deref)
168             continue;
169          nir_deref_instr *deref = nir_instr_as_deref(instr);
170          if (deref->deref_type == deref_type) {
171             if (index == 0)
172                return deref;
173             index--;
174          }
175       }
176    }
177    return NULL;
178 }
179 
180 /* Allow grouping the tests while still sharing the helpers. */
181 class nir_redundant_load_vars_test : public nir_vars_test {};
182 class nir_copy_prop_vars_test : public nir_vars_test {};
183 class nir_dead_write_vars_test : public nir_vars_test {};
184 class nir_combine_stores_test : public nir_vars_test {};
185 class nir_split_vars_test : public nir_vars_test {};
186 class nir_remove_dead_variables_test : public nir_vars_test {};
187 
188 } // namespace
189 
190 static nir_def *
nir_load_var_volatile(nir_builder * b,nir_variable * var)191 nir_load_var_volatile(nir_builder *b, nir_variable *var)
192 {
193    return nir_load_deref_with_access(b, nir_build_deref_var(b, var),
194                                      ACCESS_VOLATILE);
195 }
196 
197 static void
nir_store_var_volatile(nir_builder * b,nir_variable * var,nir_def * value,nir_component_mask_t writemask)198 nir_store_var_volatile(nir_builder *b, nir_variable *var,
199                        nir_def *value, nir_component_mask_t writemask)
200 {
201    nir_store_deref_with_access(b, nir_build_deref_var(b, var),
202                                value, writemask, ACCESS_VOLATILE);
203 }
204 
TEST_F(nir_redundant_load_vars_test,duplicated_load)205 TEST_F(nir_redundant_load_vars_test, duplicated_load)
206 {
207    /* Load a variable twice in the same block.  One should be removed. */
208 
209    nir_variable *in = create_int(nir_var_mem_global, "in");
210    nir_variable **out = create_many_int(nir_var_shader_out, "out", 2);
211 
212    nir_store_var(b, out[0], nir_load_var(b, in), 1);
213    nir_store_var(b, out[1], nir_load_var(b, in), 1);
214 
215    nir_validate_shader(b->shader, NULL);
216 
217    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 2);
218 
219    bool progress = nir_opt_copy_prop_vars(b->shader);
220    EXPECT_TRUE(progress);
221 
222    nir_validate_shader(b->shader, NULL);
223 
224    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 1);
225 }
226 
TEST_F(nir_redundant_load_vars_test,duplicated_load_volatile)227 TEST_F(nir_redundant_load_vars_test, duplicated_load_volatile)
228 {
229    /* Load a variable twice in the same block.  One should be removed. */
230 
231    nir_variable *in = create_int(nir_var_mem_global, "in");
232    nir_variable **out = create_many_int(nir_var_shader_out, "out", 3);
233 
234    /* Volatile prevents us from eliminating a load by combining it with
235     * another.  It shouldn't however, prevent us from combing other
236     * non-volatile loads.
237     */
238    nir_store_var(b, out[0], nir_load_var(b, in), 1);
239    nir_store_var(b, out[1], nir_load_var_volatile(b, in), 1);
240    nir_store_var(b, out[2], nir_load_var(b, in), 1);
241 
242    nir_validate_shader(b->shader, NULL);
243 
244    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 3);
245 
246    bool progress = nir_opt_copy_prop_vars(b->shader);
247    EXPECT_TRUE(progress);
248 
249    nir_validate_shader(b->shader, NULL);
250 
251    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 2);
252 
253    nir_intrinsic_instr *first_store = get_intrinsic(nir_intrinsic_store_deref, 0);
254 
255    nir_intrinsic_instr *third_store = get_intrinsic(nir_intrinsic_store_deref, 2);
256 
257    EXPECT_EQ(first_store->src[1].ssa, third_store->src[1].ssa);
258 }
259 
TEST_F(nir_redundant_load_vars_test,duplicated_load_in_two_blocks)260 TEST_F(nir_redundant_load_vars_test, duplicated_load_in_two_blocks)
261 {
262    /* Load a variable twice in different blocks.  One should be removed. */
263 
264    nir_variable *in = create_int(nir_var_mem_global, "in");
265    nir_variable **out = create_many_int(nir_var_shader_out, "out", 2);
266 
267    nir_store_var(b, out[0], nir_load_var(b, in), 1);
268 
269    /* Forces the stores to be in different blocks. */
270    nir_pop_if(b, nir_push_if(b, nir_imm_int(b, 0)));
271 
272    nir_store_var(b, out[1], nir_load_var(b, in), 1);
273 
274    nir_validate_shader(b->shader, NULL);
275 
276    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 2);
277 
278    bool progress = nir_opt_copy_prop_vars(b->shader);
279    EXPECT_TRUE(progress);
280 
281    nir_validate_shader(b->shader, NULL);
282 
283    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 1);
284 }
285 
TEST_F(nir_redundant_load_vars_test,invalidate_inside_if_block)286 TEST_F(nir_redundant_load_vars_test, invalidate_inside_if_block)
287 {
288    /* Load variables, then write to some of then in different branches of the
289     * if statement.  They should be invalidated accordingly.
290     */
291 
292    nir_variable **g = create_many_int(nir_var_shader_temp, "g", 3);
293    nir_variable **out = create_many_int(nir_var_shader_out, "out", 3);
294 
295    nir_load_var(b, g[0]);
296    nir_load_var(b, g[1]);
297    nir_load_var(b, g[2]);
298 
299    nir_if *if_stmt = nir_push_if(b, nir_imm_int(b, 0));
300    nir_store_var(b, g[0], nir_imm_int(b, 10), 1);
301 
302    nir_push_else(b, if_stmt);
303    nir_store_var(b, g[1], nir_imm_int(b, 20), 1);
304 
305    nir_pop_if(b, if_stmt);
306 
307    nir_store_var(b, out[0], nir_load_var(b, g[0]), 1);
308    nir_store_var(b, out[1], nir_load_var(b, g[1]), 1);
309    nir_store_var(b, out[2], nir_load_var(b, g[2]), 1);
310 
311    nir_validate_shader(b->shader, NULL);
312 
313    bool progress = nir_opt_copy_prop_vars(b->shader);
314    EXPECT_TRUE(progress);
315 
316    /* There are 3 initial loads, plus 2 loads for the values invalidated
317     * inside the if statement.
318     */
319    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 5);
320 
321    /* We only load g[2] once. */
322    unsigned g2_load_count = 0;
323    for (int i = 0; i < 5; i++) {
324          nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_deref, i);
325          if (nir_intrinsic_get_var(load, 0) == g[2])
326             g2_load_count++;
327    }
328    EXPECT_EQ(g2_load_count, 1);
329 }
330 
TEST_F(nir_redundant_load_vars_test,invalidate_live_load_in_the_end_of_loop)331 TEST_F(nir_redundant_load_vars_test, invalidate_live_load_in_the_end_of_loop)
332 {
333    /* Invalidating a load in the end of loop body will apply to the whole loop
334     * body.
335     */
336 
337    nir_variable *v = create_int(nir_var_mem_global, "v");
338 
339    nir_load_var(b, v);
340 
341    nir_loop *loop = nir_push_loop(b);
342 
343    nir_if *if_stmt = nir_push_if(b, nir_imm_int(b, 0));
344    nir_jump(b, nir_jump_break);
345    nir_pop_if(b, if_stmt);
346 
347    nir_load_var(b, v);
348    nir_store_var(b, v, nir_imm_int(b, 10), 1);
349 
350    nir_pop_loop(b, loop);
351 
352    bool progress = nir_opt_copy_prop_vars(b->shader);
353    ASSERT_FALSE(progress);
354 }
355 
TEST_F(nir_copy_prop_vars_test,simple_copies)356 TEST_F(nir_copy_prop_vars_test, simple_copies)
357 {
358    nir_variable *in   = create_int(nir_var_shader_in,     "in");
359    nir_variable *temp = create_int(nir_var_function_temp, "temp");
360    nir_variable *out  = create_int(nir_var_shader_out,    "out");
361 
362    nir_copy_var(b, temp, in);
363    nir_copy_var(b, out, temp);
364 
365    nir_validate_shader(b->shader, NULL);
366 
367    bool progress = nir_opt_copy_prop_vars(b->shader);
368    EXPECT_TRUE(progress);
369 
370    nir_validate_shader(b->shader, NULL);
371 
372    ASSERT_EQ(count_intrinsics(nir_intrinsic_copy_deref), 2);
373 
374    nir_intrinsic_instr *first_copy = get_intrinsic(nir_intrinsic_copy_deref, 0);
375 
376    nir_intrinsic_instr *second_copy = get_intrinsic(nir_intrinsic_copy_deref, 1);
377 
378    EXPECT_EQ(first_copy->src[1].ssa, second_copy->src[1].ssa);
379 }
380 
TEST_F(nir_copy_prop_vars_test,self_copy)381 TEST_F(nir_copy_prop_vars_test, self_copy)
382 {
383    nir_variable *v = create_int(nir_var_mem_global, "v");
384 
385    nir_copy_var(b, v, v);
386 
387    nir_validate_shader(b->shader, NULL);
388 
389    bool progress = nir_opt_copy_prop_vars(b->shader);
390    EXPECT_TRUE(progress);
391 
392    nir_validate_shader(b->shader, NULL);
393 
394    ASSERT_EQ(count_intrinsics(nir_intrinsic_copy_deref), 0);
395 }
396 
TEST_F(nir_copy_prop_vars_test,simple_store_load)397 TEST_F(nir_copy_prop_vars_test, simple_store_load)
398 {
399    nir_variable **v = create_many_ivec2(nir_var_function_temp, "v", 2);
400    unsigned mask = 1 | 2;
401 
402    nir_def *stored_value = nir_imm_ivec2(b, 10, 20);
403    nir_store_var(b, v[0], stored_value, mask);
404 
405    nir_def *read_value = nir_load_var(b, v[0]);
406    nir_store_var(b, v[1], read_value, mask);
407 
408    nir_validate_shader(b->shader, NULL);
409 
410    bool progress = nir_opt_copy_prop_vars(b->shader);
411    EXPECT_TRUE(progress);
412 
413    nir_validate_shader(b->shader, NULL);
414 
415    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 2);
416 
417    for (int i = 0; i < 2; i++) {
418       nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, i);
419       EXPECT_EQ(store->src[1].ssa, stored_value);
420    }
421 }
422 
TEST_F(nir_copy_prop_vars_test,store_store_load)423 TEST_F(nir_copy_prop_vars_test, store_store_load)
424 {
425    nir_variable **v = create_many_ivec2(nir_var_function_temp, "v", 2);
426    unsigned mask = 1 | 2;
427 
428    nir_def *first_value = nir_imm_ivec2(b, 10, 20);
429    nir_store_var(b, v[0], first_value, mask);
430 
431    nir_def *second_value = nir_imm_ivec2(b, 30, 40);
432    nir_store_var(b, v[0], second_value, mask);
433 
434    nir_def *read_value = nir_load_var(b, v[0]);
435    nir_store_var(b, v[1], read_value, mask);
436 
437    nir_validate_shader(b->shader, NULL);
438 
439    bool progress = nir_opt_copy_prop_vars(b->shader);
440    EXPECT_TRUE(progress);
441 
442    nir_validate_shader(b->shader, NULL);
443 
444    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 3);
445 
446    /* Store to v[1] should use second_value directly. */
447    nir_intrinsic_instr *store_to_v1 = get_intrinsic(nir_intrinsic_store_deref, 2);
448    ASSERT_EQ(nir_intrinsic_get_var(store_to_v1, 0), v[1]);
449    EXPECT_EQ(store_to_v1->src[1].ssa, second_value);
450 }
451 
TEST_F(nir_copy_prop_vars_test,store_store_load_different_components)452 TEST_F(nir_copy_prop_vars_test, store_store_load_different_components)
453 {
454    nir_variable **v = create_many_ivec2(nir_var_function_temp, "v", 2);
455 
456    nir_def *first_value = nir_imm_ivec2(b, 10, 20);
457    nir_store_var(b, v[0], first_value, 1 << 1);
458 
459    nir_def *second_value = nir_imm_ivec2(b, 30, 40);
460    nir_store_var(b, v[0], second_value, 1 << 0);
461 
462    nir_def *read_value = nir_load_var(b, v[0]);
463    nir_store_var(b, v[1], read_value, 1 << 1);
464 
465    nir_validate_shader(b->shader, NULL);
466 
467    bool progress = nir_opt_copy_prop_vars(b->shader);
468    EXPECT_TRUE(progress);
469 
470    nir_validate_shader(b->shader, NULL);
471 
472    nir_opt_constant_folding(b->shader);
473    nir_validate_shader(b->shader, NULL);
474 
475    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 3);
476 
477    /* Store to v[1] should use first_value directly.  The write of
478     * second_value did not overwrite the component it uses.
479     */
480    nir_intrinsic_instr *store_to_v1 = get_intrinsic(nir_intrinsic_store_deref, 2);
481    ASSERT_EQ(nir_intrinsic_get_var(store_to_v1, 0), v[1]);
482    ASSERT_EQ(nir_src_comp_as_uint(store_to_v1->src[1], 1), 20);
483 }
484 
TEST_F(nir_copy_prop_vars_test,store_store_load_different_components_in_many_blocks)485 TEST_F(nir_copy_prop_vars_test, store_store_load_different_components_in_many_blocks)
486 {
487    nir_variable **v = create_many_ivec2(nir_var_function_temp, "v", 2);
488 
489    nir_def *first_value = nir_imm_ivec2(b, 10, 20);
490    nir_store_var(b, v[0], first_value, 1 << 1);
491 
492    /* Adding an if statement will cause blocks to be created. */
493    nir_pop_if(b, nir_push_if(b, nir_imm_int(b, 0)));
494 
495    nir_def *second_value = nir_imm_ivec2(b, 30, 40);
496    nir_store_var(b, v[0], second_value, 1 << 0);
497 
498    /* Adding an if statement will cause blocks to be created. */
499    nir_pop_if(b, nir_push_if(b, nir_imm_int(b, 0)));
500 
501    nir_def *read_value = nir_load_var(b, v[0]);
502    nir_store_var(b, v[1], read_value, 1 << 1);
503 
504    nir_validate_shader(b->shader, NULL);
505 
506    bool progress = nir_opt_copy_prop_vars(b->shader);
507    EXPECT_TRUE(progress);
508 
509    nir_validate_shader(b->shader, NULL);
510 
511    nir_opt_constant_folding(b->shader);
512    nir_validate_shader(b->shader, NULL);
513 
514    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 3);
515 
516    /* Store to v[1] should use first_value directly.  The write of
517     * second_value did not overwrite the component it uses.
518     */
519    nir_intrinsic_instr *store_to_v1 = get_intrinsic(nir_intrinsic_store_deref, 2);
520    ASSERT_EQ(nir_intrinsic_get_var(store_to_v1, 0), v[1]);
521    ASSERT_EQ(nir_src_comp_as_uint(store_to_v1->src[1], 1), 20);
522 }
523 
TEST_F(nir_copy_prop_vars_test,store_volatile)524 TEST_F(nir_copy_prop_vars_test, store_volatile)
525 {
526    nir_variable **v = create_many_ivec2(nir_var_function_temp, "v", 2);
527    unsigned mask = 1 | 2;
528 
529    nir_def *first_value = nir_imm_ivec2(b, 10, 20);
530    nir_store_var(b, v[0], first_value, mask);
531 
532    nir_def *second_value = nir_imm_ivec2(b, 30, 40);
533    nir_store_var_volatile(b, v[0], second_value, mask);
534 
535    nir_def *third_value = nir_imm_ivec2(b, 50, 60);
536    nir_store_var(b, v[0], third_value, mask);
537 
538    nir_def *read_value = nir_load_var(b, v[0]);
539    nir_store_var(b, v[1], read_value, mask);
540 
541    nir_validate_shader(b->shader, NULL);
542 
543    bool progress = nir_opt_copy_prop_vars(b->shader);
544    EXPECT_TRUE(progress);
545 
546    nir_validate_shader(b->shader, NULL);
547 
548    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 4);
549 
550    /* Our approach here is a bit scorched-earth.  We expect the volatile store
551     * in the middle to cause both that store and the one before it to be kept.
552     * Technically, volatile only prevents combining the volatile store with
553     * another store and one could argue that the store before the volatile and
554     * the one after it could be combined.  However, it seems safer to just
555     * treat a volatile store like an atomic and prevent any combining across
556     * it.
557     */
558    nir_intrinsic_instr *store_to_v1 = get_intrinsic(nir_intrinsic_store_deref, 3);
559    ASSERT_EQ(nir_intrinsic_get_var(store_to_v1, 0), v[1]);
560    EXPECT_EQ(store_to_v1->src[1].ssa, third_value);
561 }
562 
TEST_F(nir_copy_prop_vars_test,self_copy_volatile)563 TEST_F(nir_copy_prop_vars_test, self_copy_volatile)
564 {
565    nir_variable *v = create_int(nir_var_mem_global, "v");
566 
567    nir_copy_var(b, v, v);
568    nir_copy_deref_with_access(b, nir_build_deref_var(b, v),
569                                  nir_build_deref_var(b, v),
570                                  (gl_access_qualifier)0, ACCESS_VOLATILE);
571    nir_copy_deref_with_access(b, nir_build_deref_var(b, v),
572                                  nir_build_deref_var(b, v),
573                                  ACCESS_VOLATILE, (gl_access_qualifier)0);
574    nir_copy_var(b, v, v);
575 
576    nir_validate_shader(b->shader, NULL);
577 
578    bool progress = nir_opt_copy_prop_vars(b->shader);
579    EXPECT_TRUE(progress);
580 
581    nir_validate_shader(b->shader, NULL);
582 
583    ASSERT_EQ(count_intrinsics(nir_intrinsic_copy_deref), 2);
584 
585    /* Store to v[1] should use second_value directly. */
586    nir_intrinsic_instr *first = get_intrinsic(nir_intrinsic_copy_deref, 0);
587    nir_intrinsic_instr *second = get_intrinsic(nir_intrinsic_copy_deref, 1);
588    ASSERT_EQ(nir_intrinsic_src_access(first), ACCESS_VOLATILE);
589    ASSERT_EQ(nir_intrinsic_dst_access(first), (gl_access_qualifier)0);
590    ASSERT_EQ(nir_intrinsic_src_access(second), (gl_access_qualifier)0);
591    ASSERT_EQ(nir_intrinsic_dst_access(second), ACCESS_VOLATILE);
592 }
593 
TEST_F(nir_copy_prop_vars_test,memory_barrier_in_two_blocks)594 TEST_F(nir_copy_prop_vars_test, memory_barrier_in_two_blocks)
595 {
596    nir_variable **v = create_many_int(nir_var_mem_global, "v", 4);
597 
598    nir_store_var(b, v[0], nir_imm_int(b, 1), 1);
599    nir_store_var(b, v[1], nir_imm_int(b, 2), 1);
600 
601    /* Split into many blocks. */
602    nir_pop_if(b, nir_push_if(b, nir_imm_int(b, 0)));
603 
604    nir_store_var(b, v[2], nir_load_var(b, v[0]), 1);
605 
606    nir_scoped_memory_barrier(b, SCOPE_DEVICE, NIR_MEMORY_ACQ_REL,
607                              nir_var_mem_global);
608 
609    nir_store_var(b, v[3], nir_load_var(b, v[1]), 1);
610 
611    bool progress = nir_opt_copy_prop_vars(b->shader);
612    ASSERT_TRUE(progress);
613 
614    /* Only the second load will remain after the optimization. */
615    ASSERT_EQ(1, count_intrinsics(nir_intrinsic_load_deref));
616    nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_deref, 0);
617    ASSERT_EQ(nir_intrinsic_get_var(load, 0), v[1]);
618 }
619 
TEST_F(nir_redundant_load_vars_test,acquire_barrier_prevents_load_removal)620 TEST_F(nir_redundant_load_vars_test, acquire_barrier_prevents_load_removal)
621 {
622    nir_variable **x = create_many_int(nir_var_mem_global, "x", 1);
623 
624    nir_load_var(b, x[0]);
625 
626    nir_scoped_memory_barrier(b, SCOPE_DEVICE, NIR_MEMORY_ACQUIRE,
627                              nir_var_mem_global);
628 
629    nir_load_var(b, x[0]);
630 
631    bool progress = nir_opt_copy_prop_vars(b->shader);
632    ASSERT_FALSE(progress);
633 
634    ASSERT_EQ(2, count_intrinsics(nir_intrinsic_load_deref));
635 }
636 
TEST_F(nir_redundant_load_vars_test,acquire_barrier_prevents_same_mode_load_removal)637 TEST_F(nir_redundant_load_vars_test, acquire_barrier_prevents_same_mode_load_removal)
638 {
639    nir_variable **x = create_many_int(nir_var_mem_global, "x", 2);
640 
641    nir_load_var(b, x[0]);
642    nir_load_var(b, x[1]);
643 
644    nir_scoped_memory_barrier(b, SCOPE_DEVICE, NIR_MEMORY_ACQUIRE,
645                              nir_var_mem_global);
646 
647    nir_load_var(b, x[0]);
648    nir_load_var(b, x[1]);
649 
650    bool progress = nir_opt_copy_prop_vars(b->shader);
651    ASSERT_FALSE(progress);
652 
653    ASSERT_EQ(4, count_intrinsics(nir_intrinsic_load_deref));
654 }
655 
TEST_F(nir_redundant_load_vars_test,acquire_barrier_allows_different_mode_load_removal)656 TEST_F(nir_redundant_load_vars_test, acquire_barrier_allows_different_mode_load_removal)
657 {
658    nir_variable **x = create_many_int(nir_var_mem_global, "x", 2);
659    nir_variable **y = create_many_int(nir_var_mem_shared, "y", 2);
660 
661    nir_load_var(b, x[0]);
662    nir_load_var(b, x[1]);
663    nir_load_var(b, y[0]);
664    nir_load_var(b, y[1]);
665 
666    nir_scoped_memory_barrier(b, SCOPE_DEVICE, NIR_MEMORY_ACQUIRE,
667                              nir_var_mem_global);
668 
669    nir_load_var(b, x[0]);
670    nir_load_var(b, x[1]);
671    nir_load_var(b, y[0]);
672    nir_load_var(b, y[1]);
673 
674    bool progress = nir_opt_copy_prop_vars(b->shader);
675    ASSERT_TRUE(progress);
676 
677    ASSERT_EQ(6, count_intrinsics(nir_intrinsic_load_deref));
678 
679    nir_intrinsic_instr *load;
680 
681    load = get_intrinsic(nir_intrinsic_load_deref, 0);
682    ASSERT_EQ(nir_intrinsic_get_var(load, 0), x[0]);
683    load = get_intrinsic(nir_intrinsic_load_deref, 1);
684    ASSERT_EQ(nir_intrinsic_get_var(load, 0), x[1]);
685 
686    load = get_intrinsic(nir_intrinsic_load_deref, 2);
687    ASSERT_EQ(nir_intrinsic_get_var(load, 0), y[0]);
688    load = get_intrinsic(nir_intrinsic_load_deref, 3);
689    ASSERT_EQ(nir_intrinsic_get_var(load, 0), y[1]);
690 
691    load = get_intrinsic(nir_intrinsic_load_deref, 4);
692    ASSERT_EQ(nir_intrinsic_get_var(load, 0), x[0]);
693    load = get_intrinsic(nir_intrinsic_load_deref, 5);
694    ASSERT_EQ(nir_intrinsic_get_var(load, 0), x[1]);
695 }
696 
TEST_F(nir_redundant_load_vars_test,release_barrier_allows_load_removal)697 TEST_F(nir_redundant_load_vars_test, release_barrier_allows_load_removal)
698 {
699    nir_variable **x = create_many_int(nir_var_mem_global, "x", 1);
700 
701    nir_load_var(b, x[0]);
702 
703    nir_scoped_memory_barrier(b, SCOPE_DEVICE, NIR_MEMORY_RELEASE,
704                              nir_var_mem_global);
705 
706    nir_load_var(b, x[0]);
707 
708    bool progress = nir_opt_copy_prop_vars(b->shader);
709    ASSERT_TRUE(progress);
710 
711    ASSERT_EQ(1, count_intrinsics(nir_intrinsic_load_deref));
712 }
713 
TEST_F(nir_redundant_load_vars_test,release_barrier_allows_same_mode_load_removal)714 TEST_F(nir_redundant_load_vars_test, release_barrier_allows_same_mode_load_removal)
715 {
716    nir_variable **x = create_many_int(nir_var_mem_global, "x", 2);
717 
718    nir_load_var(b, x[0]);
719    nir_load_var(b, x[1]);
720 
721    nir_scoped_memory_barrier(b, SCOPE_DEVICE, NIR_MEMORY_RELEASE,
722                              nir_var_mem_global);
723 
724    nir_load_var(b, x[0]);
725    nir_load_var(b, x[1]);
726 
727    bool progress = nir_opt_copy_prop_vars(b->shader);
728    ASSERT_TRUE(progress);
729 
730    ASSERT_EQ(2, count_intrinsics(nir_intrinsic_load_deref));
731 }
732 
TEST_F(nir_redundant_load_vars_test,release_barrier_allows_different_mode_load_removal)733 TEST_F(nir_redundant_load_vars_test, release_barrier_allows_different_mode_load_removal)
734 {
735    nir_variable **x = create_many_int(nir_var_mem_global, "x", 2);
736    nir_variable **y = create_many_int(nir_var_mem_shared, "y", 2);
737 
738    nir_load_var(b, x[0]);
739    nir_load_var(b, x[1]);
740    nir_load_var(b, y[0]);
741    nir_load_var(b, y[1]);
742 
743    nir_scoped_memory_barrier(b, SCOPE_DEVICE, NIR_MEMORY_RELEASE,
744                              nir_var_mem_global);
745 
746    nir_load_var(b, x[0]);
747    nir_load_var(b, x[1]);
748    nir_load_var(b, y[0]);
749    nir_load_var(b, y[1]);
750 
751    bool progress = nir_opt_copy_prop_vars(b->shader);
752    ASSERT_TRUE(progress);
753 
754    ASSERT_EQ(4, count_intrinsics(nir_intrinsic_load_deref));
755 
756    nir_intrinsic_instr *load;
757 
758    load = get_intrinsic(nir_intrinsic_load_deref, 0);
759    ASSERT_EQ(nir_intrinsic_get_var(load, 0), x[0]);
760    load = get_intrinsic(nir_intrinsic_load_deref, 1);
761    ASSERT_EQ(nir_intrinsic_get_var(load, 0), x[1]);
762 
763    load = get_intrinsic(nir_intrinsic_load_deref, 2);
764    ASSERT_EQ(nir_intrinsic_get_var(load, 0), y[0]);
765    load = get_intrinsic(nir_intrinsic_load_deref, 3);
766    ASSERT_EQ(nir_intrinsic_get_var(load, 0), y[1]);
767 }
768 
TEST_F(nir_copy_prop_vars_test,acquire_barrier_prevents_propagation)769 TEST_F(nir_copy_prop_vars_test, acquire_barrier_prevents_propagation)
770 {
771    nir_variable **x = create_many_int(nir_var_mem_global, "x", 1);
772 
773    nir_store_var(b, x[0], nir_imm_int(b, 10), 1);
774 
775    nir_scoped_memory_barrier(b, SCOPE_DEVICE, NIR_MEMORY_ACQUIRE,
776                              nir_var_mem_global);
777 
778    nir_load_var(b, x[0]);
779 
780    bool progress = nir_opt_copy_prop_vars(b->shader);
781    ASSERT_FALSE(progress);
782 
783    ASSERT_EQ(1, count_intrinsics(nir_intrinsic_store_deref));
784    ASSERT_EQ(1, count_intrinsics(nir_intrinsic_load_deref));
785 }
786 
TEST_F(nir_copy_prop_vars_test,acquire_barrier_prevents_same_mode_propagation)787 TEST_F(nir_copy_prop_vars_test, acquire_barrier_prevents_same_mode_propagation)
788 {
789    nir_variable **x = create_many_int(nir_var_mem_global, "x", 2);
790 
791    nir_store_var(b, x[0], nir_imm_int(b, 10), 1);
792    nir_store_var(b, x[1], nir_imm_int(b, 20), 1);
793 
794    nir_scoped_memory_barrier(b, SCOPE_DEVICE, NIR_MEMORY_ACQUIRE,
795                              nir_var_mem_global);
796 
797    nir_load_var(b, x[0]);
798    nir_load_var(b, x[1]);
799 
800    bool progress = nir_opt_copy_prop_vars(b->shader);
801    ASSERT_FALSE(progress);
802 
803    ASSERT_EQ(2, count_intrinsics(nir_intrinsic_store_deref));
804    ASSERT_EQ(2, count_intrinsics(nir_intrinsic_load_deref));
805 }
806 
TEST_F(nir_copy_prop_vars_test,acquire_barrier_allows_different_mode_propagation)807 TEST_F(nir_copy_prop_vars_test, acquire_barrier_allows_different_mode_propagation)
808 {
809    nir_variable **x = create_many_int(nir_var_mem_global, "x", 2);
810    nir_variable **y = create_many_int(nir_var_mem_shared, "y", 2);
811 
812    nir_store_var(b, x[0], nir_imm_int(b, 10), 1);
813    nir_store_var(b, x[1], nir_imm_int(b, 20), 1);
814    nir_store_var(b, y[0], nir_imm_int(b, 30), 1);
815    nir_store_var(b, y[1], nir_imm_int(b, 40), 1);
816 
817    nir_scoped_memory_barrier(b, SCOPE_DEVICE, NIR_MEMORY_ACQUIRE,
818                              nir_var_mem_global);
819 
820    nir_load_var(b, x[0]);
821    nir_load_var(b, x[1]);
822    nir_load_var(b, y[0]);
823    nir_load_var(b, y[1]);
824 
825    bool progress = nir_opt_copy_prop_vars(b->shader);
826    ASSERT_TRUE(progress);
827 
828    ASSERT_EQ(4, count_intrinsics(nir_intrinsic_store_deref));
829    ASSERT_EQ(2, count_intrinsics(nir_intrinsic_load_deref));
830 
831    nir_intrinsic_instr *store;
832 
833    store = get_intrinsic(nir_intrinsic_store_deref, 0);
834    ASSERT_EQ(nir_intrinsic_get_var(store, 0), x[0]);
835    store = get_intrinsic(nir_intrinsic_store_deref, 1);
836    ASSERT_EQ(nir_intrinsic_get_var(store, 0), x[1]);
837 
838    store = get_intrinsic(nir_intrinsic_store_deref, 2);
839    ASSERT_EQ(nir_intrinsic_get_var(store, 0), y[0]);
840    store = get_intrinsic(nir_intrinsic_store_deref, 3);
841    ASSERT_EQ(nir_intrinsic_get_var(store, 0), y[1]);
842 
843    nir_intrinsic_instr *load;
844 
845    load = get_intrinsic(nir_intrinsic_load_deref, 0);
846    ASSERT_EQ(nir_intrinsic_get_var(load, 0), x[0]);
847    load = get_intrinsic(nir_intrinsic_load_deref, 1);
848    ASSERT_EQ(nir_intrinsic_get_var(load, 0), x[1]);
849 }
850 
TEST_F(nir_copy_prop_vars_test,release_barrier_allows_propagation)851 TEST_F(nir_copy_prop_vars_test, release_barrier_allows_propagation)
852 {
853    nir_variable **x = create_many_int(nir_var_mem_global, "x", 1);
854 
855    nir_store_var(b, x[0], nir_imm_int(b, 10), 1);
856 
857    nir_scoped_memory_barrier(b, SCOPE_DEVICE, NIR_MEMORY_RELEASE,
858                              nir_var_mem_global);
859 
860    nir_load_var(b, x[0]);
861 
862    bool progress = nir_opt_copy_prop_vars(b->shader);
863    ASSERT_TRUE(progress);
864 
865    ASSERT_EQ(1, count_intrinsics(nir_intrinsic_store_deref));
866 }
867 
TEST_F(nir_copy_prop_vars_test,release_barrier_allows_same_mode_propagation)868 TEST_F(nir_copy_prop_vars_test, release_barrier_allows_same_mode_propagation)
869 {
870    nir_variable **x = create_many_int(nir_var_mem_global, "x", 2);
871 
872    nir_store_var(b, x[0], nir_imm_int(b, 10), 1);
873    nir_store_var(b, x[1], nir_imm_int(b, 20), 1);
874 
875    nir_scoped_memory_barrier(b, SCOPE_DEVICE, NIR_MEMORY_RELEASE,
876                              nir_var_mem_global);
877 
878    nir_load_var(b, x[0]);
879    nir_load_var(b, x[1]);
880 
881    bool progress = nir_opt_copy_prop_vars(b->shader);
882    ASSERT_TRUE(progress);
883 
884    ASSERT_EQ(2, count_intrinsics(nir_intrinsic_store_deref));
885    ASSERT_EQ(0, count_intrinsics(nir_intrinsic_load_deref));
886 }
887 
TEST_F(nir_copy_prop_vars_test,release_barrier_allows_different_mode_propagation)888 TEST_F(nir_copy_prop_vars_test, release_barrier_allows_different_mode_propagation)
889 {
890    nir_variable **x = create_many_int(nir_var_mem_global, "x", 2);
891    nir_variable **y = create_many_int(nir_var_mem_shared, "y", 2);
892 
893    nir_store_var(b, x[0], nir_imm_int(b, 10), 1);
894    nir_store_var(b, x[1], nir_imm_int(b, 20), 1);
895    nir_store_var(b, y[0], nir_imm_int(b, 30), 1);
896    nir_store_var(b, y[1], nir_imm_int(b, 40), 1);
897 
898    nir_scoped_memory_barrier(b, SCOPE_DEVICE, NIR_MEMORY_RELEASE,
899                              nir_var_mem_global);
900 
901    nir_load_var(b, x[0]);
902    nir_load_var(b, x[1]);
903    nir_load_var(b, y[0]);
904    nir_load_var(b, y[1]);
905 
906    bool progress = nir_opt_copy_prop_vars(b->shader);
907    ASSERT_TRUE(progress);
908 
909    ASSERT_EQ(4, count_intrinsics(nir_intrinsic_store_deref));
910    ASSERT_EQ(0, count_intrinsics(nir_intrinsic_load_deref));
911 
912    nir_intrinsic_instr *store;
913 
914    store = get_intrinsic(nir_intrinsic_store_deref, 0);
915    ASSERT_EQ(nir_intrinsic_get_var(store, 0), x[0]);
916    store = get_intrinsic(nir_intrinsic_store_deref, 1);
917    ASSERT_EQ(nir_intrinsic_get_var(store, 0), x[1]);
918 
919    store = get_intrinsic(nir_intrinsic_store_deref, 2);
920    ASSERT_EQ(nir_intrinsic_get_var(store, 0), y[0]);
921    store = get_intrinsic(nir_intrinsic_store_deref, 3);
922    ASSERT_EQ(nir_intrinsic_get_var(store, 0), y[1]);
923 }
924 
TEST_F(nir_copy_prop_vars_test,acquire_barrier_prevents_propagation_from_copy)925 TEST_F(nir_copy_prop_vars_test, acquire_barrier_prevents_propagation_from_copy)
926 {
927    nir_variable **x = create_many_int(nir_var_mem_global, "x", 3);
928 
929    nir_copy_var(b, x[1], x[0]);
930 
931    nir_scoped_memory_barrier(b, SCOPE_DEVICE, NIR_MEMORY_ACQUIRE,
932                              nir_var_mem_global);
933 
934    nir_copy_var(b, x[2], x[1]);
935 
936    bool progress = nir_opt_copy_prop_vars(b->shader);
937    ASSERT_FALSE(progress);
938 
939    ASSERT_EQ(2, count_intrinsics(nir_intrinsic_copy_deref));
940 
941    nir_intrinsic_instr *copy;
942 
943    copy = get_intrinsic(nir_intrinsic_copy_deref, 0);
944    ASSERT_EQ(nir_intrinsic_get_var(copy, 1), x[0]);
945 
946    copy = get_intrinsic(nir_intrinsic_copy_deref, 1);
947    ASSERT_EQ(nir_intrinsic_get_var(copy, 1), x[1]);
948 }
949 
TEST_F(nir_copy_prop_vars_test,acquire_barrier_prevents_propagation_from_copy_to_different_mode)950 TEST_F(nir_copy_prop_vars_test, acquire_barrier_prevents_propagation_from_copy_to_different_mode)
951 {
952    nir_variable **x = create_many_int(nir_var_mem_global, "x", 2);
953    nir_variable **y = create_many_int(nir_var_mem_shared, "y", 1);
954 
955    nir_copy_var(b, y[0], x[0]);
956 
957    nir_scoped_memory_barrier(b, SCOPE_DEVICE, NIR_MEMORY_ACQUIRE,
958                              nir_var_mem_global);
959 
960    nir_copy_var(b, x[1], y[0]);
961 
962    bool progress = nir_opt_copy_prop_vars(b->shader);
963    ASSERT_FALSE(progress);
964 
965    ASSERT_EQ(2, count_intrinsics(nir_intrinsic_copy_deref));
966 
967    nir_intrinsic_instr *copy;
968 
969    copy = get_intrinsic(nir_intrinsic_copy_deref, 0);
970    ASSERT_EQ(nir_intrinsic_get_var(copy, 1), x[0]);
971 
972    copy = get_intrinsic(nir_intrinsic_copy_deref, 1);
973    ASSERT_EQ(nir_intrinsic_get_var(copy, 1), y[0]);
974 }
975 
TEST_F(nir_copy_prop_vars_test,release_barrier_allows_propagation_from_copy)976 TEST_F(nir_copy_prop_vars_test, release_barrier_allows_propagation_from_copy)
977 {
978    nir_variable **x = create_many_int(nir_var_mem_global, "x", 3);
979 
980    nir_copy_var(b, x[1], x[0]);
981 
982    nir_scoped_memory_barrier(b, SCOPE_DEVICE, NIR_MEMORY_RELEASE,
983                              nir_var_mem_global);
984 
985    nir_copy_var(b, x[2], x[1]);
986 
987    bool progress = nir_opt_copy_prop_vars(b->shader);
988    ASSERT_TRUE(progress);
989 
990    ASSERT_EQ(2, count_intrinsics(nir_intrinsic_copy_deref));
991 
992    nir_intrinsic_instr *copy;
993 
994    copy = get_intrinsic(nir_intrinsic_copy_deref, 0);
995    ASSERT_EQ(nir_intrinsic_get_var(copy, 1), x[0]);
996 
997    copy = get_intrinsic(nir_intrinsic_copy_deref, 1);
998    ASSERT_EQ(nir_intrinsic_get_var(copy, 1), x[0]);
999 }
1000 
TEST_F(nir_copy_prop_vars_test,release_barrier_allows_propagation_from_copy_to_different_mode)1001 TEST_F(nir_copy_prop_vars_test, release_barrier_allows_propagation_from_copy_to_different_mode)
1002 {
1003    nir_variable **x = create_many_int(nir_var_mem_global, "x", 2);
1004    nir_variable **y = create_many_int(nir_var_mem_shared, "y", 1);
1005 
1006    nir_copy_var(b, y[0], x[0]);
1007 
1008    nir_scoped_memory_barrier(b, SCOPE_DEVICE, NIR_MEMORY_RELEASE,
1009                              nir_var_mem_global);
1010 
1011    nir_copy_var(b, x[1], y[0]);
1012 
1013    bool progress = nir_opt_copy_prop_vars(b->shader);
1014    ASSERT_TRUE(progress);
1015 
1016    ASSERT_EQ(2, count_intrinsics(nir_intrinsic_copy_deref));
1017 
1018    nir_intrinsic_instr *copy;
1019 
1020    copy = get_intrinsic(nir_intrinsic_copy_deref, 0);
1021    ASSERT_EQ(nir_intrinsic_get_var(copy, 1), x[0]);
1022 
1023    copy = get_intrinsic(nir_intrinsic_copy_deref, 1);
1024    ASSERT_EQ(nir_intrinsic_get_var(copy, 1), x[0]);
1025 }
1026 
TEST_F(nir_copy_prop_vars_test,simple_store_load_in_two_blocks)1027 TEST_F(nir_copy_prop_vars_test, simple_store_load_in_two_blocks)
1028 {
1029    nir_variable **v = create_many_ivec2(nir_var_function_temp, "v", 2);
1030    unsigned mask = 1 | 2;
1031 
1032    nir_def *stored_value = nir_imm_ivec2(b, 10, 20);
1033    nir_store_var(b, v[0], stored_value, mask);
1034 
1035    /* Adding an if statement will cause blocks to be created. */
1036    nir_pop_if(b, nir_push_if(b, nir_imm_int(b, 0)));
1037 
1038    nir_def *read_value = nir_load_var(b, v[0]);
1039    nir_store_var(b, v[1], read_value, mask);
1040 
1041    nir_validate_shader(b->shader, NULL);
1042 
1043    bool progress = nir_opt_copy_prop_vars(b->shader);
1044    EXPECT_TRUE(progress);
1045 
1046    nir_validate_shader(b->shader, NULL);
1047 
1048    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 2);
1049 
1050    for (int i = 0; i < 2; i++) {
1051       nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, i);
1052       EXPECT_EQ(store->src[1].ssa, stored_value);
1053    }
1054 }
1055 
TEST_F(nir_copy_prop_vars_test,load_direct_array_deref_on_vector_reuses_previous_load)1056 TEST_F(nir_copy_prop_vars_test, load_direct_array_deref_on_vector_reuses_previous_load)
1057 {
1058    nir_variable *in0 = create_ivec2(nir_var_mem_global, "in0");
1059    nir_variable *in1 = create_ivec2(nir_var_mem_global, "in1");
1060    nir_variable *vec = create_ivec2(nir_var_mem_global, "vec");
1061    nir_variable *out = create_int(nir_var_mem_global, "out");
1062 
1063    nir_store_var(b, vec, nir_load_var(b, in0), 1 << 0);
1064    nir_store_var(b, vec, nir_load_var(b, in1), 1 << 1);
1065 
1066    /* This load will be dropped, as vec.y (or vec[1]) is already known. */
1067    nir_deref_instr *deref =
1068       nir_build_deref_array_imm(b, nir_build_deref_var(b, vec), 1);
1069    nir_def *loaded_from_deref = nir_load_deref(b, deref);
1070 
1071    /* This store should use the value loaded from in1. */
1072    nir_store_var(b, out, loaded_from_deref, 1 << 0);
1073 
1074    nir_validate_shader(b->shader, NULL);
1075    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 3);
1076    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 3);
1077 
1078    bool progress = nir_opt_copy_prop_vars(b->shader);
1079    EXPECT_TRUE(progress);
1080 
1081    nir_validate_shader(b->shader, NULL);
1082    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 2);
1083    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 3);
1084 
1085    nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, 2);
1086 
1087    /* NOTE: The ALU instruction is how we get the vec.y. */
1088    ASSERT_TRUE(nir_src_as_alu_instr(store->src[1]));
1089 }
1090 
TEST_F(nir_copy_prop_vars_test,load_direct_array_deref_on_vector_reuses_previous_copy)1091 TEST_F(nir_copy_prop_vars_test, load_direct_array_deref_on_vector_reuses_previous_copy)
1092 {
1093    nir_variable *in0 = create_ivec2(nir_var_mem_global, "in0");
1094    nir_variable *vec = create_ivec2(nir_var_mem_global, "vec");
1095 
1096    nir_copy_var(b, vec, in0);
1097 
1098    /* This load will be replaced with one from in0. */
1099    nir_deref_instr *deref =
1100       nir_build_deref_array_imm(b, nir_build_deref_var(b, vec), 1);
1101    nir_load_deref(b, deref);
1102 
1103    nir_validate_shader(b->shader, NULL);
1104 
1105    bool progress = nir_opt_copy_prop_vars(b->shader);
1106    EXPECT_TRUE(progress);
1107 
1108    nir_validate_shader(b->shader, NULL);
1109    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 1);
1110 
1111    nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_deref, 0);
1112    ASSERT_EQ(nir_intrinsic_get_var(load, 0), in0);
1113 }
1114 
TEST_F(nir_copy_prop_vars_test,load_direct_array_deref_on_vector_gets_reused)1115 TEST_F(nir_copy_prop_vars_test, load_direct_array_deref_on_vector_gets_reused)
1116 {
1117    nir_variable *in0 = create_ivec2(nir_var_mem_global, "in0");
1118    nir_variable *vec = create_ivec2(nir_var_mem_global, "vec");
1119    nir_variable *out = create_ivec2(nir_var_mem_global, "out");
1120 
1121    /* Loading "vec[1]" deref will save the information about vec.y. */
1122    nir_deref_instr *deref =
1123       nir_build_deref_array_imm(b, nir_build_deref_var(b, vec), 1);
1124    nir_load_deref(b, deref);
1125 
1126    /* Store to vec.x. */
1127    nir_store_var(b, vec, nir_load_var(b, in0), 1 << 0);
1128 
1129    /* This load will be dropped, since both vec.x and vec.y are known. */
1130    nir_def *loaded_from_vec = nir_load_var(b, vec);
1131    nir_store_var(b, out, loaded_from_vec, 0x3);
1132 
1133    nir_validate_shader(b->shader, NULL);
1134    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 3);
1135    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 2);
1136 
1137    bool progress = nir_opt_copy_prop_vars(b->shader);
1138    EXPECT_TRUE(progress);
1139 
1140    nir_validate_shader(b->shader, NULL);
1141    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 2);
1142    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 2);
1143 
1144    nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, 1);
1145    ASSERT_TRUE(nir_src_as_alu_instr(store->src[1]));
1146 }
1147 
TEST_F(nir_copy_prop_vars_test,store_load_direct_array_deref_on_vector)1148 TEST_F(nir_copy_prop_vars_test, store_load_direct_array_deref_on_vector)
1149 {
1150    nir_variable *vec = create_ivec2(nir_var_mem_global, "vec");
1151    nir_variable *out0 = create_int(nir_var_mem_global, "out0");
1152    nir_variable *out1 = create_ivec2(nir_var_mem_global, "out1");
1153 
1154    /* Store to "vec[1]" and "vec[0]". */
1155    nir_deref_instr *store_deref_y =
1156       nir_build_deref_array_imm(b, nir_build_deref_var(b, vec), 1);
1157    nir_store_deref(b, store_deref_y, nir_imm_int(b, 20), 1);
1158 
1159    nir_deref_instr *store_deref_x =
1160       nir_build_deref_array_imm(b, nir_build_deref_var(b, vec), 0);
1161    nir_store_deref(b, store_deref_x, nir_imm_int(b, 10), 1);
1162 
1163    /* Both loads below will be dropped, because the values are already known. */
1164    nir_deref_instr *load_deref_y =
1165       nir_build_deref_array_imm(b, nir_build_deref_var(b, vec), 1);
1166    nir_store_var(b, out0, nir_load_deref(b, load_deref_y), 1);
1167 
1168    nir_store_var(b, out1, nir_load_var(b, vec), 1);
1169 
1170    nir_validate_shader(b->shader, NULL);
1171    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 2);
1172    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 4);
1173 
1174    bool progress = nir_opt_copy_prop_vars(b->shader);
1175    EXPECT_TRUE(progress);
1176 
1177    nir_validate_shader(b->shader, NULL);
1178    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 0);
1179    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 4);
1180 
1181    /* Third store will just use the value from first store. */
1182    nir_intrinsic_instr *first_store = get_intrinsic(nir_intrinsic_store_deref, 0);
1183    nir_intrinsic_instr *third_store = get_intrinsic(nir_intrinsic_store_deref, 2);
1184    EXPECT_EQ(third_store->src[1].ssa, first_store->src[1].ssa);
1185 
1186    /* Fourth store will compose first and second store values. */
1187    nir_intrinsic_instr *fourth_store = get_intrinsic(nir_intrinsic_store_deref, 3);
1188    EXPECT_TRUE(nir_src_as_alu_instr(fourth_store->src[1]));
1189 }
1190 
TEST_F(nir_copy_prop_vars_test,store_load_indirect_array_deref_on_vector)1191 TEST_F(nir_copy_prop_vars_test, store_load_indirect_array_deref_on_vector)
1192 {
1193    nir_variable *vec = create_ivec2(nir_var_mem_global, "vec");
1194    nir_variable *idx = create_int(nir_var_mem_global, "idx");
1195    nir_variable *out = create_int(nir_var_mem_global, "out");
1196 
1197    nir_def *idx_ssa = nir_load_var(b, idx);
1198 
1199    /* Store to vec[idx]. */
1200    nir_deref_instr *store_deref =
1201       nir_build_deref_array(b, nir_build_deref_var(b, vec), idx_ssa);
1202    nir_store_deref(b, store_deref, nir_imm_int(b, 20), 1);
1203 
1204    /* Load from vec[idx] to store in out. This load should be dropped. */
1205    nir_deref_instr *load_deref =
1206       nir_build_deref_array(b, nir_build_deref_var(b, vec), idx_ssa);
1207    nir_store_var(b, out, nir_load_deref(b, load_deref), 1);
1208 
1209    nir_validate_shader(b->shader, NULL);
1210    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 2);
1211    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 2);
1212 
1213    bool progress = nir_opt_copy_prop_vars(b->shader);
1214    EXPECT_TRUE(progress);
1215 
1216    nir_validate_shader(b->shader, NULL);
1217    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 1);
1218    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 2);
1219 
1220    /* Store to vec[idx] propagated to out. */
1221    nir_intrinsic_instr *first = get_intrinsic(nir_intrinsic_store_deref, 0);
1222    nir_intrinsic_instr *second = get_intrinsic(nir_intrinsic_store_deref, 1);
1223    EXPECT_EQ(first->src[1].ssa, second->src[1].ssa);
1224 }
1225 
TEST_F(nir_copy_prop_vars_test,store_load_direct_and_indirect_array_deref_on_vector)1226 TEST_F(nir_copy_prop_vars_test, store_load_direct_and_indirect_array_deref_on_vector)
1227 {
1228    nir_variable *vec = create_ivec2(nir_var_mem_global, "vec");
1229    nir_variable *idx = create_int(nir_var_mem_global, "idx");
1230    nir_variable **out = create_many_int(nir_var_mem_global, "out", 2);
1231 
1232    nir_def *idx_ssa = nir_load_var(b, idx);
1233 
1234    /* Store to vec. */
1235    nir_store_var(b, vec, nir_imm_ivec2(b, 10, 10), 1 | 2);
1236 
1237    /* Load from vec[idx]. This load is currently not dropped. */
1238    nir_deref_instr *indirect =
1239       nir_build_deref_array(b, nir_build_deref_var(b, vec), idx_ssa);
1240    nir_store_var(b, out[0], nir_load_deref(b, indirect), 1);
1241 
1242    /* Load from vec[idx] again. This load should be dropped. */
1243    nir_store_var(b, out[1], nir_load_deref(b, indirect), 1);
1244 
1245    nir_validate_shader(b->shader, NULL);
1246    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 3);
1247    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 3);
1248 
1249    bool progress = nir_opt_copy_prop_vars(b->shader);
1250    EXPECT_TRUE(progress);
1251 
1252    nir_validate_shader(b->shader, NULL);
1253    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 2);
1254    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 3);
1255 
1256    /* Store to vec[idx] propagated to out. */
1257    nir_intrinsic_instr *second = get_intrinsic(nir_intrinsic_store_deref, 1);
1258    nir_intrinsic_instr *third = get_intrinsic(nir_intrinsic_store_deref, 2);
1259    EXPECT_EQ(second->src[1].ssa, third->src[1].ssa);
1260 }
1261 
TEST_F(nir_copy_prop_vars_test,store_load_indirect_array_deref)1262 TEST_F(nir_copy_prop_vars_test, store_load_indirect_array_deref)
1263 {
1264    nir_variable *arr = create_var(nir_var_mem_global,
1265                                   glsl_array_type(glsl_int_type(), 10, 0),
1266                                   "arr");
1267    nir_variable *idx = create_int(nir_var_mem_global, "idx");
1268    nir_variable *out = create_int(nir_var_mem_global, "out");
1269 
1270    nir_def *idx_ssa = nir_load_var(b, idx);
1271 
1272    /* Store to arr[idx]. */
1273    nir_deref_instr *store_deref =
1274       nir_build_deref_array(b, nir_build_deref_var(b, arr), idx_ssa);
1275    nir_store_deref(b, store_deref, nir_imm_int(b, 20), 1);
1276 
1277    /* Load from arr[idx] to store in out. This load should be dropped. */
1278    nir_deref_instr *load_deref =
1279       nir_build_deref_array(b, nir_build_deref_var(b, arr), idx_ssa);
1280    nir_store_var(b, out, nir_load_deref(b, load_deref), 1);
1281 
1282    nir_validate_shader(b->shader, NULL);
1283    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 2);
1284    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 2);
1285 
1286    bool progress = nir_opt_copy_prop_vars(b->shader);
1287    EXPECT_TRUE(progress);
1288 
1289    nir_validate_shader(b->shader, NULL);
1290    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 1);
1291    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 2);
1292 
1293    /* Store to arr[idx] propagated to out. */
1294    nir_intrinsic_instr *first = get_intrinsic(nir_intrinsic_store_deref, 0);
1295    nir_intrinsic_instr *second = get_intrinsic(nir_intrinsic_store_deref, 1);
1296    EXPECT_EQ(first->src[1].ssa, second->src[1].ssa);
1297 }
1298 
TEST_F(nir_copy_prop_vars_test,restrict_ssbo_bindings)1299 TEST_F(nir_copy_prop_vars_test, restrict_ssbo_bindings)
1300 {
1301    glsl_struct_field field = glsl_struct_field();
1302    field.type = glsl_int_type();
1303    field.name = "x";
1304    const glsl_type *ifc_type =
1305       glsl_interface_type(&field, 1, GLSL_INTERFACE_PACKING_STD430,
1306                           false /* row_major */, "b");
1307    nir_variable *ssbo0 = create_var(nir_var_mem_ssbo, ifc_type, "ssbo0");
1308    nir_variable *ssbo1 = create_var(nir_var_mem_ssbo, ifc_type, "ssbo1");
1309    ssbo0->data.access = ssbo1->data.access = ACCESS_RESTRICT;
1310    nir_variable *out = create_var(nir_var_mem_ssbo, ifc_type, "out");
1311    out->data.access = ACCESS_RESTRICT;
1312 
1313    nir_deref_instr *ssbo0_x =
1314       nir_build_deref_struct(b, nir_build_deref_var(b, ssbo0), 0);
1315    nir_store_deref(b, ssbo0_x, nir_imm_int(b, 20), 1);
1316 
1317    nir_deref_instr *ssbo1_x =
1318       nir_build_deref_struct(b, nir_build_deref_var(b, ssbo1), 0);
1319    nir_store_deref(b, ssbo1_x, nir_imm_int(b, 30), 1);
1320 
1321    /* Load ssbo0.x and store it in out.x.  This load should be dropped */
1322    nir_deref_instr *out_x =
1323       nir_build_deref_struct(b, nir_build_deref_var(b, out), 0);
1324    nir_store_deref(b, out_x, nir_load_deref(b, ssbo0_x), 1);
1325 
1326    nir_validate_shader(b->shader, NULL);
1327    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 1);
1328    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 3);
1329 
1330    bool progress = nir_opt_copy_prop_vars(b->shader);
1331    EXPECT_TRUE(progress);
1332 
1333    nir_validate_shader(b->shader, NULL);
1334    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 0);
1335    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 3);
1336 
1337    /* Store to b0.x propagated to out. */
1338    nir_intrinsic_instr *first = get_intrinsic(nir_intrinsic_store_deref, 0);
1339    nir_intrinsic_instr *third = get_intrinsic(nir_intrinsic_store_deref, 2);
1340    EXPECT_EQ(first->src[1].ssa, third->src[1].ssa);
1341 }
1342 
TEST_F(nir_copy_prop_vars_test,aliasing_ssbo_bindings)1343 TEST_F(nir_copy_prop_vars_test, aliasing_ssbo_bindings)
1344 {
1345    glsl_struct_field field = glsl_struct_field();
1346    field.type = glsl_int_type();
1347    field.name = "x";
1348    const glsl_type *ifc_type =
1349       glsl_interface_type(&field, 1, GLSL_INTERFACE_PACKING_STD430,
1350                           false /* row_major */, "b");
1351    nir_variable *ssbo0 = create_var(nir_var_mem_ssbo, ifc_type, "ssbo0");
1352    nir_variable *ssbo1 = create_var(nir_var_mem_ssbo, ifc_type, "ssbo1");
1353    nir_variable *out = create_var(nir_var_mem_ssbo, ifc_type, "out");
1354    out->data.access = ACCESS_RESTRICT;
1355 
1356    nir_deref_instr *ssbo0_x =
1357       nir_build_deref_struct(b, nir_build_deref_var(b, ssbo0), 0);
1358    nir_store_deref(b, ssbo0_x, nir_imm_int(b, 20), 1);
1359 
1360    nir_deref_instr *ssbo1_x =
1361       nir_build_deref_struct(b, nir_build_deref_var(b, ssbo1), 0);
1362    nir_store_deref(b, ssbo1_x, nir_imm_int(b, 30), 1);
1363 
1364    /* Load ssbo0.x and store it in out.x.  This load should not be dropped */
1365    nir_deref_instr *out_x =
1366       nir_build_deref_struct(b, nir_build_deref_var(b, out), 0);
1367    nir_store_deref(b, out_x, nir_load_deref(b, ssbo0_x), 1);
1368 
1369    nir_validate_shader(b->shader, NULL);
1370    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 1);
1371    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 3);
1372 
1373    bool progress = nir_opt_copy_prop_vars(b->shader);
1374    EXPECT_FALSE(progress);
1375 
1376    nir_validate_shader(b->shader, NULL);
1377    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 1);
1378    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 3);
1379 }
1380 
TEST_F(nir_copy_prop_vars_test,ssbo_array_binding_indirect)1381 TEST_F(nir_copy_prop_vars_test, ssbo_array_binding_indirect)
1382 {
1383    glsl_struct_field field = glsl_struct_field();
1384    field.type = glsl_int_type();
1385    field.name = "x";
1386    const glsl_type *ifc_type =
1387       glsl_interface_type(&field, 1, GLSL_INTERFACE_PACKING_STD430,
1388                           false /* row_major */, "b");
1389    const glsl_type *arr_ifc_type = glsl_array_type(ifc_type, 2, 0);
1390    nir_variable *ssbo_arr = create_var(nir_var_mem_ssbo, arr_ifc_type,
1391                                        "ssbo_arr");
1392    ssbo_arr->data.access = ACCESS_RESTRICT;
1393    nir_variable *out = create_var(nir_var_mem_ssbo, ifc_type, "out");
1394    out->data.access = ACCESS_RESTRICT;
1395 
1396    nir_deref_instr *ssbo_0 =
1397       nir_build_deref_array_imm(b, nir_build_deref_var(b, ssbo_arr), 0);
1398    nir_deref_instr *ssbo_0_x = nir_build_deref_struct(b, ssbo_0, 0);
1399    nir_store_deref(b, ssbo_0_x, nir_imm_int(b, 20), 1);
1400 
1401    nir_deref_instr *ssbo_i =
1402       nir_build_deref_array(b, nir_build_deref_var(b, ssbo_arr),
1403                                nir_load_local_invocation_index(b));
1404    nir_deref_instr *ssbo_i_x = nir_build_deref_struct(b, ssbo_i, 0);
1405    nir_store_deref(b, ssbo_i_x, nir_imm_int(b, 30), 1);
1406 
1407    /* Load ssbo_arr[0].x and store it in out.x.  This load should not be dropped */
1408    nir_deref_instr *out_x =
1409       nir_build_deref_struct(b, nir_build_deref_var(b, out), 0);
1410    nir_store_deref(b, out_x, nir_load_deref(b, ssbo_0_x), 1);
1411 
1412    nir_validate_shader(b->shader, NULL);
1413    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 1);
1414    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 3);
1415 
1416    bool progress = nir_opt_copy_prop_vars(b->shader);
1417    EXPECT_FALSE(progress);
1418 
1419    nir_validate_shader(b->shader, NULL);
1420    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 1);
1421    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 3);
1422 }
1423 
TEST_F(nir_copy_prop_vars_test,restrict_ssbo_array_binding)1424 TEST_F(nir_copy_prop_vars_test, restrict_ssbo_array_binding)
1425 {
1426    glsl_struct_field field = glsl_struct_field();
1427    field.type = glsl_int_type();
1428    field.name = "x";
1429    const glsl_type *ifc_type =
1430       glsl_interface_type(&field, 1, GLSL_INTERFACE_PACKING_STD430,
1431                           false /* row_major */, "b");
1432    const glsl_type *arr_ifc_type = glsl_array_type(ifc_type, 2, 0);
1433    nir_variable *ssbo_arr = create_var(nir_var_mem_ssbo, arr_ifc_type,
1434                                        "ssbo_arr");
1435    ssbo_arr->data.access = ACCESS_RESTRICT;
1436    nir_variable *out = create_var(nir_var_mem_ssbo, ifc_type, "out");
1437    out->data.access = ACCESS_RESTRICT;
1438 
1439    nir_deref_instr *ssbo_0 =
1440       nir_build_deref_array_imm(b, nir_build_deref_var(b, ssbo_arr), 0);
1441    nir_deref_instr *ssbo_0_x = nir_build_deref_struct(b, ssbo_0, 0);
1442    nir_store_deref(b, ssbo_0_x, nir_imm_int(b, 20), 1);
1443 
1444    nir_deref_instr *ssbo_1 =
1445       nir_build_deref_array_imm(b, nir_build_deref_var(b, ssbo_arr), 1);
1446    nir_deref_instr *ssbo_1_x = nir_build_deref_struct(b, ssbo_1, 0);
1447    nir_store_deref(b, ssbo_1_x, nir_imm_int(b, 30), 1);
1448 
1449    /* Load ssbo_arr[0].x and store it in out.x.  This load should be dropped */
1450    nir_deref_instr *out_x =
1451       nir_build_deref_struct(b, nir_build_deref_var(b, out), 0);
1452    nir_store_deref(b, out_x, nir_load_deref(b, ssbo_0_x), 1);
1453 
1454    nir_validate_shader(b->shader, NULL);
1455    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 1);
1456    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 3);
1457 
1458    bool progress = nir_opt_copy_prop_vars(b->shader);
1459    EXPECT_TRUE(progress);
1460 
1461    nir_validate_shader(b->shader, NULL);
1462    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 0);
1463    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 3);
1464 
1465    /* Store to b0.x propagated to out. */
1466    nir_intrinsic_instr *first = get_intrinsic(nir_intrinsic_store_deref, 0);
1467    nir_intrinsic_instr *third = get_intrinsic(nir_intrinsic_store_deref, 2);
1468    EXPECT_EQ(first->src[1].ssa, third->src[1].ssa);
1469 }
1470 
TEST_F(nir_copy_prop_vars_test,aliasing_ssbo_array_binding)1471 TEST_F(nir_copy_prop_vars_test, aliasing_ssbo_array_binding)
1472 {
1473    glsl_struct_field field = glsl_struct_field();
1474    field.type = glsl_int_type();
1475    field.name = "x";
1476    const glsl_type *ifc_type =
1477       glsl_interface_type(&field, 1, GLSL_INTERFACE_PACKING_STD430,
1478                           false /* row_major */, "b");
1479    const glsl_type *arr_ifc_type = glsl_array_type(ifc_type, 2, 0);
1480    nir_variable *ssbo_arr = create_var(nir_var_mem_ssbo, arr_ifc_type,
1481                                        "ssbo_arr");
1482    nir_variable *out = create_var(nir_var_mem_ssbo, ifc_type, "out");
1483    out->data.access = ACCESS_RESTRICT;
1484 
1485    nir_deref_instr *ssbo_0 =
1486       nir_build_deref_array_imm(b, nir_build_deref_var(b, ssbo_arr), 0);
1487    nir_deref_instr *ssbo_0_x = nir_build_deref_struct(b, ssbo_0, 0);
1488    nir_store_deref(b, ssbo_0_x, nir_imm_int(b, 20), 1);
1489 
1490    nir_deref_instr *ssbo_1 =
1491       nir_build_deref_array_imm(b, nir_build_deref_var(b, ssbo_arr), 1);
1492    nir_deref_instr *ssbo_1_x = nir_build_deref_struct(b, ssbo_1, 0);
1493    nir_store_deref(b, ssbo_1_x, nir_imm_int(b, 30), 1);
1494 
1495    /* Load ssbo_arr[0].x and store it in out.x.  This load should not be dropped */
1496    nir_deref_instr *out_x =
1497       nir_build_deref_struct(b, nir_build_deref_var(b, out), 0);
1498    nir_store_deref(b, out_x, nir_load_deref(b, ssbo_0_x), 1);
1499 
1500    nir_validate_shader(b->shader, NULL);
1501    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 1);
1502    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 3);
1503 
1504    bool progress = nir_opt_copy_prop_vars(b->shader);
1505    EXPECT_FALSE(progress);
1506 
1507    nir_validate_shader(b->shader, NULL);
1508    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 1);
1509    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 3);
1510 }
1511 
TEST_F(nir_dead_write_vars_test,no_dead_writes_in_block)1512 TEST_F(nir_dead_write_vars_test, no_dead_writes_in_block)
1513 {
1514    nir_variable **v = create_many_int(nir_var_mem_global, "v", 2);
1515 
1516    nir_store_var(b, v[0], nir_load_var(b, v[1]), 1);
1517 
1518    bool progress = nir_opt_dead_write_vars(b->shader);
1519    ASSERT_FALSE(progress);
1520 }
1521 
TEST_F(nir_dead_write_vars_test,no_dead_writes_different_components_in_block)1522 TEST_F(nir_dead_write_vars_test, no_dead_writes_different_components_in_block)
1523 {
1524    nir_variable **v = create_many_ivec2(nir_var_mem_global, "v", 3);
1525 
1526    nir_store_var(b, v[0], nir_load_var(b, v[1]), 1 << 0);
1527    nir_store_var(b, v[0], nir_load_var(b, v[2]), 1 << 1);
1528 
1529    bool progress = nir_opt_dead_write_vars(b->shader);
1530    ASSERT_FALSE(progress);
1531 }
1532 
TEST_F(nir_dead_write_vars_test,volatile_write)1533 TEST_F(nir_dead_write_vars_test, volatile_write)
1534 {
1535    nir_variable *v = create_int(nir_var_mem_global, "v");
1536 
1537    nir_store_var(b, v, nir_imm_int(b, 0), 0x1);
1538    nir_store_var_volatile(b, v, nir_imm_int(b, 1), 0x1);
1539    nir_store_var(b, v, nir_imm_int(b, 2), 0x1);
1540 
1541    /* Our approach here is a bit scorched-earth.  We expect the volatile store
1542     * in the middle to cause both that store and the one before it to be kept.
1543     * Technically, volatile only prevents combining the volatile store with
1544     * another store and one could argue that the store before the volatile and
1545     * the one after it could be combined.  However, it seems safer to just
1546     * treat a volatile store like an atomic and prevent any combining across
1547     * it.
1548     */
1549    bool progress = nir_opt_dead_write_vars(b->shader);
1550    ASSERT_FALSE(progress);
1551 }
1552 
TEST_F(nir_dead_write_vars_test,volatile_copies)1553 TEST_F(nir_dead_write_vars_test, volatile_copies)
1554 {
1555    nir_variable **v = create_many_int(nir_var_mem_global, "v", 2);
1556 
1557    nir_copy_var(b, v[0], v[1]);
1558    nir_copy_deref_with_access(b, nir_build_deref_var(b, v[0]),
1559                                  nir_build_deref_var(b, v[1]),
1560                                  ACCESS_VOLATILE, (gl_access_qualifier)0);
1561    nir_copy_var(b, v[0], v[1]);
1562 
1563    /* Our approach here is a bit scorched-earth.  We expect the volatile store
1564     * in the middle to cause both that store and the one before it to be kept.
1565     * Technically, volatile only prevents combining the volatile store with
1566     * another store and one could argue that the store before the volatile and
1567     * the one after it could be combined.  However, it seems safer to just
1568     * treat a volatile store like an atomic and prevent any combining across
1569     * it.
1570     */
1571    bool progress = nir_opt_dead_write_vars(b->shader);
1572    ASSERT_FALSE(progress);
1573 }
1574 
TEST_F(nir_dead_write_vars_test,no_dead_writes_in_if_statement)1575 TEST_F(nir_dead_write_vars_test, no_dead_writes_in_if_statement)
1576 {
1577    nir_variable **v = create_many_int(nir_var_mem_global, "v", 6);
1578 
1579    nir_store_var(b, v[2], nir_load_var(b, v[0]), 1);
1580    nir_store_var(b, v[3], nir_load_var(b, v[1]), 1);
1581 
1582    /* Each arm of the if statement will overwrite one store. */
1583    nir_if *if_stmt = nir_push_if(b, nir_imm_int(b, 0));
1584    nir_store_var(b, v[2], nir_load_var(b, v[4]), 1);
1585 
1586    nir_push_else(b, if_stmt);
1587    nir_store_var(b, v[3], nir_load_var(b, v[5]), 1);
1588 
1589    nir_pop_if(b, if_stmt);
1590 
1591    bool progress = nir_opt_dead_write_vars(b->shader);
1592    ASSERT_FALSE(progress);
1593 }
1594 
TEST_F(nir_dead_write_vars_test,no_dead_writes_in_loop_statement)1595 TEST_F(nir_dead_write_vars_test, no_dead_writes_in_loop_statement)
1596 {
1597    nir_variable **v = create_many_int(nir_var_mem_global, "v", 3);
1598 
1599    nir_store_var(b, v[0], nir_load_var(b, v[1]), 1);
1600 
1601    /* Loop will write other value.  Since it might not be executed, it doesn't
1602     * kill the first write.
1603     */
1604    nir_loop *loop = nir_push_loop(b);
1605 
1606    nir_if *if_stmt = nir_push_if(b, nir_imm_int(b, 0));
1607    nir_jump(b, nir_jump_break);
1608    nir_pop_if(b, if_stmt);
1609 
1610    nir_store_var(b, v[0], nir_load_var(b, v[2]), 1);
1611    nir_pop_loop(b, loop);
1612 
1613    bool progress = nir_opt_dead_write_vars(b->shader);
1614    ASSERT_FALSE(progress);
1615 }
1616 
TEST_F(nir_dead_write_vars_test,dead_write_in_block)1617 TEST_F(nir_dead_write_vars_test, dead_write_in_block)
1618 {
1619    nir_variable **v = create_many_int(nir_var_mem_global, "v", 3);
1620 
1621    nir_store_var(b, v[0], nir_load_var(b, v[1]), 1);
1622    nir_def *load_v2 = nir_load_var(b, v[2]);
1623    nir_store_var(b, v[0], load_v2, 1);
1624 
1625    bool progress = nir_opt_dead_write_vars(b->shader);
1626    ASSERT_TRUE(progress);
1627 
1628    EXPECT_EQ(1, count_intrinsics(nir_intrinsic_store_deref));
1629 
1630    nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, 0);
1631    EXPECT_EQ(store->src[1].ssa, load_v2);
1632 }
1633 
TEST_F(nir_dead_write_vars_test,dead_write_components_in_block)1634 TEST_F(nir_dead_write_vars_test, dead_write_components_in_block)
1635 {
1636    nir_variable **v = create_many_ivec2(nir_var_mem_global, "v", 3);
1637 
1638    nir_store_var(b, v[0], nir_load_var(b, v[1]), 1 << 0);
1639    nir_def *load_v2 = nir_load_var(b, v[2]);
1640    nir_store_var(b, v[0], load_v2, 1 << 0);
1641 
1642    bool progress = nir_opt_dead_write_vars(b->shader);
1643    ASSERT_TRUE(progress);
1644 
1645    EXPECT_EQ(1, count_intrinsics(nir_intrinsic_store_deref));
1646 
1647    nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, 0);
1648    EXPECT_EQ(store->src[1].ssa, load_v2);
1649 }
1650 
1651 
1652 /* TODO: The DISABLED tests below depend on the dead write removal be able to
1653  * identify dead writes between multiple blocks.  This is still not
1654  * implemented.
1655  */
1656 
TEST_F(nir_dead_write_vars_test,DISABLED_dead_write_in_two_blocks)1657 TEST_F(nir_dead_write_vars_test, DISABLED_dead_write_in_two_blocks)
1658 {
1659    nir_variable **v = create_many_int(nir_var_mem_global, "v", 3);
1660 
1661    nir_store_var(b, v[0], nir_load_var(b, v[1]), 1);
1662    nir_def *load_v2 = nir_load_var(b, v[2]);
1663 
1664    /* Causes the stores to be in different blocks. */
1665    nir_pop_if(b, nir_push_if(b, nir_imm_int(b, 0)));
1666 
1667    nir_store_var(b, v[0], load_v2, 1);
1668 
1669    bool progress = nir_opt_dead_write_vars(b->shader);
1670    ASSERT_TRUE(progress);
1671 
1672    EXPECT_EQ(1, count_intrinsics(nir_intrinsic_store_deref));
1673 
1674    nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, 0);
1675    EXPECT_EQ(store->src[1].ssa, load_v2);
1676 }
1677 
TEST_F(nir_dead_write_vars_test,DISABLED_dead_write_components_in_two_blocks)1678 TEST_F(nir_dead_write_vars_test, DISABLED_dead_write_components_in_two_blocks)
1679 {
1680    nir_variable **v = create_many_ivec2(nir_var_mem_global, "v", 3);
1681 
1682    nir_store_var(b, v[0], nir_load_var(b, v[1]), 1 << 0);
1683 
1684    /* Causes the stores to be in different blocks. */
1685    nir_pop_if(b, nir_push_if(b, nir_imm_int(b, 0)));
1686 
1687    nir_def *load_v2 = nir_load_var(b, v[2]);
1688    nir_store_var(b, v[0], load_v2, 1 << 0);
1689 
1690    bool progress = nir_opt_dead_write_vars(b->shader);
1691    ASSERT_TRUE(progress);
1692 
1693    EXPECT_EQ(1, count_intrinsics(nir_intrinsic_store_deref));
1694 
1695    nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, 0);
1696    EXPECT_EQ(store->src[1].ssa, load_v2);
1697 }
1698 
TEST_F(nir_dead_write_vars_test,DISABLED_dead_writes_in_if_statement)1699 TEST_F(nir_dead_write_vars_test, DISABLED_dead_writes_in_if_statement)
1700 {
1701    nir_variable **v = create_many_int(nir_var_mem_global, "v", 4);
1702 
1703    /* Both branches will overwrite, making the previous store dead. */
1704    nir_store_var(b, v[0], nir_load_var(b, v[1]), 1);
1705 
1706    nir_if *if_stmt = nir_push_if(b, nir_imm_int(b, 0));
1707    nir_def *load_v2 = nir_load_var(b, v[2]);
1708    nir_store_var(b, v[0], load_v2, 1);
1709 
1710    nir_push_else(b, if_stmt);
1711    nir_def *load_v3 = nir_load_var(b, v[3]);
1712    nir_store_var(b, v[0], load_v3, 1);
1713 
1714    nir_pop_if(b, if_stmt);
1715 
1716    bool progress = nir_opt_dead_write_vars(b->shader);
1717    ASSERT_TRUE(progress);
1718    EXPECT_EQ(2, count_intrinsics(nir_intrinsic_store_deref));
1719 
1720    nir_intrinsic_instr *first_store = get_intrinsic(nir_intrinsic_store_deref, 0);
1721    EXPECT_EQ(first_store->src[1].ssa, load_v2);
1722 
1723    nir_intrinsic_instr *second_store = get_intrinsic(nir_intrinsic_store_deref, 1);
1724    EXPECT_EQ(second_store->src[1].ssa, load_v3);
1725 }
1726 
TEST_F(nir_dead_write_vars_test,DISABLED_memory_barrier_in_two_blocks)1727 TEST_F(nir_dead_write_vars_test, DISABLED_memory_barrier_in_two_blocks)
1728 {
1729    nir_variable **v = create_many_int(nir_var_mem_global, "v", 2);
1730 
1731    nir_store_var(b, v[0], nir_imm_int(b, 1), 1);
1732    nir_store_var(b, v[1], nir_imm_int(b, 2), 1);
1733 
1734    /* Split into many blocks. */
1735    nir_pop_if(b, nir_push_if(b, nir_imm_int(b, 0)));
1736 
1737    /* Because it is before the barrier, this will kill the previous store to that target. */
1738    nir_store_var(b, v[0], nir_imm_int(b, 3), 1);
1739 
1740    nir_scoped_memory_barrier(b, SCOPE_DEVICE, NIR_MEMORY_ACQ_REL,
1741                              nir_var_mem_global);
1742 
1743    nir_store_var(b, v[1], nir_imm_int(b, 4), 1);
1744 
1745    bool progress = nir_opt_dead_write_vars(b->shader);
1746    ASSERT_TRUE(progress);
1747 
1748    EXPECT_EQ(3, count_intrinsics(nir_intrinsic_store_deref));
1749 }
1750 
TEST_F(nir_dead_write_vars_test,DISABLED_unrelated_barrier_in_two_blocks)1751 TEST_F(nir_dead_write_vars_test, DISABLED_unrelated_barrier_in_two_blocks)
1752 {
1753    nir_variable **v = create_many_int(nir_var_mem_global, "v", 3);
1754    nir_variable *out = create_int(nir_var_shader_out, "out");
1755 
1756    nir_store_var(b, out, nir_load_var(b, v[1]), 1);
1757    nir_store_var(b, v[0], nir_load_var(b, v[1]), 1);
1758 
1759    /* Split into many blocks. */
1760    nir_pop_if(b, nir_push_if(b, nir_imm_int(b, 0)));
1761 
1762    /* Emit vertex will ensure writes to output variables are considered used,
1763     * but should not affect other types of variables. */
1764 
1765    nir_emit_vertex(b);
1766 
1767    nir_store_var(b, out, nir_load_var(b, v[2]), 1);
1768    nir_store_var(b, v[0], nir_load_var(b, v[2]), 1);
1769 
1770    bool progress = nir_opt_dead_write_vars(b->shader);
1771    ASSERT_TRUE(progress);
1772 
1773    /* Verify the first write to v[0] was removed. */
1774    EXPECT_EQ(3, count_intrinsics(nir_intrinsic_store_deref));
1775 
1776    nir_intrinsic_instr *first_store = get_intrinsic(nir_intrinsic_store_deref, 0);
1777    EXPECT_EQ(nir_intrinsic_get_var(first_store, 0), out);
1778 
1779    nir_intrinsic_instr *second_store = get_intrinsic(nir_intrinsic_store_deref, 1);
1780    EXPECT_EQ(nir_intrinsic_get_var(second_store, 0), out);
1781 
1782    nir_intrinsic_instr *third_store = get_intrinsic(nir_intrinsic_store_deref, 2);
1783    EXPECT_EQ(nir_intrinsic_get_var(third_store, 0), v[0]);
1784 }
1785 
TEST_F(nir_combine_stores_test,non_overlapping_stores)1786 TEST_F(nir_combine_stores_test, non_overlapping_stores)
1787 {
1788    nir_variable **v = create_many_ivec4(nir_var_mem_global, "v", 4);
1789    nir_variable *out = create_ivec4(nir_var_shader_out, "out");
1790 
1791    for (int i = 0; i < 4; i++)
1792       nir_store_var(b, out, nir_load_var(b, v[i]), 1 << i);
1793 
1794    nir_validate_shader(b->shader, NULL);
1795 
1796    bool progress = nir_opt_combine_stores(b->shader, nir_var_shader_out);
1797    ASSERT_TRUE(progress);
1798 
1799    nir_validate_shader(b->shader, NULL);
1800 
1801    /* Clean up to verify from where the values in combined store are coming. */
1802    nir_copy_prop(b->shader);
1803    nir_opt_dce(b->shader);
1804 
1805    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 1);
1806    nir_intrinsic_instr *combined = get_intrinsic(nir_intrinsic_store_deref, 0);
1807    ASSERT_EQ(nir_intrinsic_write_mask(combined), 0xf);
1808    ASSERT_EQ(nir_intrinsic_get_var(combined, 0), out);
1809 
1810    nir_alu_instr *vec = nir_src_as_alu_instr(combined->src[1]);
1811    ASSERT_TRUE(vec);
1812    for (int i = 0; i < 4; i++) {
1813       nir_intrinsic_instr *load = nir_src_as_intrinsic(vec->src[i].src);
1814       ASSERT_EQ(load->intrinsic, nir_intrinsic_load_deref);
1815       ASSERT_EQ(nir_intrinsic_get_var(load, 0), v[i])
1816          << "Source value for component " << i << " of store is wrong";
1817       ASSERT_EQ(vec->src[i].swizzle[0], i)
1818          << "Source component for component " << i << " of store is wrong";
1819    }
1820 }
1821 
TEST_F(nir_combine_stores_test,overlapping_stores)1822 TEST_F(nir_combine_stores_test, overlapping_stores)
1823 {
1824    nir_variable **v = create_many_ivec4(nir_var_mem_global, "v", 3);
1825    nir_variable *out = create_ivec4(nir_var_shader_out, "out");
1826 
1827    /* Make stores with xy, yz and zw masks. */
1828    for (int i = 0; i < 3; i++) {
1829       nir_component_mask_t mask = (1 << i) | (1 << (i + 1));
1830       nir_store_var(b, out, nir_load_var(b, v[i]), mask);
1831    }
1832 
1833    nir_validate_shader(b->shader, NULL);
1834 
1835    bool progress = nir_opt_combine_stores(b->shader, nir_var_shader_out);
1836    ASSERT_TRUE(progress);
1837 
1838    nir_validate_shader(b->shader, NULL);
1839 
1840    /* Clean up to verify from where the values in combined store are coming. */
1841    nir_copy_prop(b->shader);
1842    nir_opt_dce(b->shader);
1843 
1844    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 1);
1845    nir_intrinsic_instr *combined = get_intrinsic(nir_intrinsic_store_deref, 0);
1846    ASSERT_EQ(nir_intrinsic_write_mask(combined), 0xf);
1847    ASSERT_EQ(nir_intrinsic_get_var(combined, 0), out);
1848 
1849    nir_alu_instr *vec = nir_src_as_alu_instr(combined->src[1]);
1850    ASSERT_TRUE(vec);
1851 
1852    /* Component x comes from v[0]. */
1853    nir_intrinsic_instr *load_for_x = nir_src_as_intrinsic(vec->src[0].src);
1854    ASSERT_EQ(nir_intrinsic_get_var(load_for_x, 0), v[0]);
1855    ASSERT_EQ(vec->src[0].swizzle[0], 0);
1856 
1857    /* Component y comes from v[1]. */
1858    nir_intrinsic_instr *load_for_y = nir_src_as_intrinsic(vec->src[1].src);
1859    ASSERT_EQ(nir_intrinsic_get_var(load_for_y, 0), v[1]);
1860    ASSERT_EQ(vec->src[1].swizzle[0], 1);
1861 
1862    /* Components z and w come from v[2]. */
1863    nir_intrinsic_instr *load_for_z = nir_src_as_intrinsic(vec->src[2].src);
1864    nir_intrinsic_instr *load_for_w = nir_src_as_intrinsic(vec->src[3].src);
1865    ASSERT_EQ(load_for_z, load_for_w);
1866    ASSERT_EQ(nir_intrinsic_get_var(load_for_z, 0), v[2]);
1867    ASSERT_EQ(vec->src[2].swizzle[0], 2);
1868    ASSERT_EQ(vec->src[3].swizzle[0], 3);
1869 }
1870 
TEST_F(nir_combine_stores_test,direct_array_derefs)1871 TEST_F(nir_combine_stores_test, direct_array_derefs)
1872 {
1873    nir_variable **v = create_many_ivec4(nir_var_mem_global, "vec", 2);
1874    nir_variable **s = create_many_int(nir_var_mem_global, "scalar", 2);
1875    nir_variable *out = create_ivec4(nir_var_mem_global, "out");
1876 
1877    nir_deref_instr *out_deref = nir_build_deref_var(b, out);
1878 
1879    /* Store to vector with mask x. */
1880    nir_store_deref(b, out_deref, nir_load_var(b, v[0]),
1881                    1 << 0);
1882 
1883    /* Store to vector with mask yz. */
1884    nir_store_deref(b, out_deref, nir_load_var(b, v[1]),
1885                    (1 << 2) | (1 << 1));
1886 
1887    /* Store to vector[2], overlapping with previous store. */
1888    nir_store_deref(b,
1889                    nir_build_deref_array_imm(b, out_deref, 2),
1890                    nir_load_var(b, s[0]),
1891                    1 << 0);
1892 
1893    /* Store to vector[3], no overlap. */
1894    nir_store_deref(b,
1895                    nir_build_deref_array_imm(b, out_deref, 3),
1896                    nir_load_var(b, s[1]),
1897                    1 << 0);
1898 
1899    nir_validate_shader(b->shader, NULL);
1900 
1901    bool progress = nir_opt_combine_stores(b->shader, nir_var_mem_global);
1902    ASSERT_TRUE(progress);
1903 
1904    nir_validate_shader(b->shader, NULL);
1905 
1906    /* Clean up to verify from where the values in combined store are coming. */
1907    nir_copy_prop(b->shader);
1908    nir_opt_dce(b->shader);
1909 
1910    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 1);
1911    nir_intrinsic_instr *combined = get_intrinsic(nir_intrinsic_store_deref, 0);
1912    ASSERT_EQ(nir_intrinsic_write_mask(combined), 0xf);
1913    ASSERT_EQ(nir_intrinsic_get_var(combined, 0), out);
1914 
1915    nir_alu_instr *vec = nir_src_as_alu_instr(combined->src[1]);
1916    ASSERT_TRUE(vec);
1917 
1918    /* Component x comes from v[0]. */
1919    nir_intrinsic_instr *load_for_x = nir_src_as_intrinsic(vec->src[0].src);
1920    ASSERT_EQ(nir_intrinsic_get_var(load_for_x, 0), v[0]);
1921    ASSERT_EQ(vec->src[0].swizzle[0], 0);
1922 
1923    /* Component y comes from v[1]. */
1924    nir_intrinsic_instr *load_for_y = nir_src_as_intrinsic(vec->src[1].src);
1925    ASSERT_EQ(nir_intrinsic_get_var(load_for_y, 0), v[1]);
1926    ASSERT_EQ(vec->src[1].swizzle[0], 1);
1927 
1928    /* Components z comes from s[0]. */
1929    nir_intrinsic_instr *load_for_z = nir_src_as_intrinsic(vec->src[2].src);
1930    ASSERT_EQ(nir_intrinsic_get_var(load_for_z, 0), s[0]);
1931    ASSERT_EQ(vec->src[2].swizzle[0], 0);
1932 
1933    /* Component w comes from s[1]. */
1934    nir_intrinsic_instr *load_for_w = nir_src_as_intrinsic(vec->src[3].src);
1935    ASSERT_EQ(nir_intrinsic_get_var(load_for_w, 0), s[1]);
1936    ASSERT_EQ(vec->src[3].swizzle[0], 0);
1937 }
1938 
1939 static int64_t
vec_src_comp_as_int(nir_src src,unsigned comp)1940 vec_src_comp_as_int(nir_src src, unsigned comp)
1941 {
1942    if (nir_src_is_const(src))
1943       return nir_src_comp_as_int(src, comp);
1944 
1945    nir_scalar s = { src.ssa, comp };
1946    assert(nir_op_is_vec_or_mov(nir_scalar_alu_op(s)));
1947    return nir_scalar_as_int(nir_scalar_chase_alu_src(s, comp));
1948 }
1949 
TEST_F(nir_combine_stores_test,store_volatile)1950 TEST_F(nir_combine_stores_test, store_volatile)
1951 {
1952    nir_variable *out = create_ivec4(nir_var_shader_out, "out");
1953 
1954    nir_store_var(b, out, nir_imm_ivec4(b, 0, 0, 0, 0), 1 << 0);
1955    nir_store_var(b, out, nir_imm_ivec4(b, 1, 1, 1, 1), 1 << 1);
1956    nir_store_var_volatile(b, out, nir_imm_ivec4(b, -1, -2, -3, -4), 0xf);
1957    nir_store_var(b, out, nir_imm_ivec4(b, 2, 2, 2, 2), 1 << 2);
1958    nir_store_var(b, out, nir_imm_ivec4(b, 3, 3, 3, 3), 1 << 3);
1959 
1960    nir_validate_shader(b->shader, NULL);
1961 
1962    bool progress = nir_opt_combine_stores(b->shader, nir_var_shader_out);
1963    ASSERT_TRUE(progress);
1964 
1965    nir_validate_shader(b->shader, NULL);
1966 
1967    /* Clean up the stored values */
1968    nir_opt_constant_folding(b->shader);
1969    nir_opt_dce(b->shader);
1970 
1971    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 3);
1972 
1973    nir_intrinsic_instr *first = get_intrinsic(nir_intrinsic_store_deref, 0);
1974    ASSERT_EQ(nir_intrinsic_write_mask(first), 0x3);
1975    ASSERT_EQ(vec_src_comp_as_int(first->src[1], 0), 0);
1976    ASSERT_EQ(vec_src_comp_as_int(first->src[1], 1), 1);
1977 
1978    nir_intrinsic_instr *second = get_intrinsic(nir_intrinsic_store_deref, 1);
1979    ASSERT_EQ(nir_intrinsic_write_mask(second), 0xf);
1980    ASSERT_EQ(vec_src_comp_as_int(second->src[1], 0), -1);
1981    ASSERT_EQ(vec_src_comp_as_int(second->src[1], 1), -2);
1982    ASSERT_EQ(vec_src_comp_as_int(second->src[1], 2), -3);
1983    ASSERT_EQ(vec_src_comp_as_int(second->src[1], 3), -4);
1984 
1985    nir_intrinsic_instr *third = get_intrinsic(nir_intrinsic_store_deref, 2);
1986    ASSERT_EQ(nir_intrinsic_write_mask(third), 0xc);
1987    ASSERT_EQ(vec_src_comp_as_int(third->src[1], 2), 2);
1988    ASSERT_EQ(vec_src_comp_as_int(third->src[1], 3), 3);
1989 }
1990 
TEST_F(nir_split_vars_test,simple_split)1991 TEST_F(nir_split_vars_test, simple_split)
1992 {
1993    nir_variable **in = create_many_int(nir_var_shader_in, "in", 4);
1994    nir_variable *temp = create_var(nir_var_function_temp, glsl_array_type(glsl_int_type(), 4, 0),
1995                                    "temp");
1996    nir_deref_instr *temp_deref = nir_build_deref_var(b, temp);
1997    for (int i = 0; i < 4; i++)
1998       nir_store_deref(b, nir_build_deref_array_imm(b, temp_deref, i), nir_load_var(b, in[i]), 1);
1999 
2000    nir_validate_shader(b->shader, NULL);
2001    ASSERT_EQ(count_derefs(nir_deref_type_array), 4);
2002    ASSERT_EQ(count_function_temp_vars(), 1);
2003 
2004    bool progress = nir_split_array_vars(b->shader, nir_var_function_temp);
2005    EXPECT_TRUE(progress);
2006 
2007    nir_validate_shader(b->shader, NULL);
2008    ASSERT_EQ(count_derefs(nir_deref_type_array), 0);
2009    ASSERT_EQ(count_function_temp_vars(), 4);
2010 }
2011 
TEST_F(nir_split_vars_test,simple_no_split_array_struct)2012 TEST_F(nir_split_vars_test, simple_no_split_array_struct)
2013 {
2014    nir_variable **in = create_many_int(nir_var_shader_in, "in", 4);
2015    struct glsl_struct_field field;
2016 
2017    field.type = glsl_float_type();
2018    field.name = ralloc_asprintf(b->shader, "field1");
2019    field.location = -1;
2020    field.offset = 0;
2021 
2022    const struct glsl_type *st_type = glsl_struct_type(&field, 1, "struct", false);
2023    nir_variable *temp = create_var(nir_var_function_temp, glsl_array_type(st_type, 4, 0),
2024                                    "temp");
2025 
2026    nir_variable *temp2 = create_var(nir_var_function_temp, glsl_array_type(glsl_int_type(), 4, 0), "temp2");
2027 
2028    nir_deref_instr *temp_deref = nir_build_deref_var(b, temp);
2029    nir_deref_instr *temp2_deref = nir_build_deref_var(b, temp2);
2030    for (int i = 0; i < 4; i++)
2031       nir_store_deref(b, nir_build_deref_array_imm(b, temp2_deref, i), nir_load_var(b, in[i]), 1);
2032 
2033    for (int i = 0; i < 4; i++)
2034       nir_store_deref(b, nir_build_deref_struct(b, nir_build_deref_array_imm(b, temp_deref, i), 0), nir_load_var(b, in[i]), 1);
2035 
2036    nir_validate_shader(b->shader, NULL);
2037    ASSERT_EQ(count_derefs(nir_deref_type_array), 8);
2038    ASSERT_EQ(count_derefs(nir_deref_type_struct), 4);
2039    ASSERT_EQ(count_function_temp_vars(), 2);
2040 
2041    bool progress = nir_split_array_vars(b->shader, nir_var_function_temp);
2042    EXPECT_TRUE(progress);
2043 
2044    nir_validate_shader(b->shader, NULL);
2045 
2046    ASSERT_EQ(count_derefs(nir_deref_type_array), 4);
2047    ASSERT_EQ(count_derefs(nir_deref_type_struct), 4);
2048    for (int i = 0; i < 4; i++) {
2049       nir_deref_instr *deref = get_deref(nir_deref_type_array, i);
2050       ASSERT_TRUE(deref);
2051       ASSERT_TRUE(glsl_type_is_struct(deref->type));
2052    }
2053 
2054    ASSERT_EQ(count_function_temp_vars(), 5);
2055 }
2056 
TEST_F(nir_split_vars_test,simple_split_shader_temp)2057 TEST_F(nir_split_vars_test, simple_split_shader_temp)
2058 {
2059    nir_variable **in = create_many_int(nir_var_shader_in, "in", 4);
2060    nir_variable *temp = create_var(nir_var_shader_temp, glsl_array_type(glsl_int_type(), 4, 0),
2061                                    "temp");
2062    nir_deref_instr *temp_deref = nir_build_deref_var(b, temp);
2063 
2064    for (int i = 0; i < 4; i++)
2065       nir_store_deref(b, nir_build_deref_array_imm(b, temp_deref, i), nir_load_var(b, in[i]), 1);
2066 
2067    nir_validate_shader(b->shader, NULL);
2068    ASSERT_EQ(count_derefs(nir_deref_type_array), 4);
2069    ASSERT_EQ(count_shader_temp_vars(), 1);
2070 
2071    bool progress = nir_split_array_vars(b->shader, nir_var_shader_temp);
2072    EXPECT_TRUE(progress);
2073 
2074    nir_validate_shader(b->shader, NULL);
2075    ASSERT_EQ(count_derefs(nir_deref_type_array), 0);
2076    ASSERT_EQ(count_shader_temp_vars(), 4);
2077 }
2078 
TEST_F(nir_split_vars_test,simple_oob)2079 TEST_F(nir_split_vars_test, simple_oob)
2080 {
2081    nir_variable **in = create_many_int(nir_var_shader_in, "in", 6);
2082    nir_variable *temp = create_var(nir_var_function_temp, glsl_array_type(glsl_int_type(), 4, 0),
2083                                    "temp");
2084    nir_deref_instr *temp_deref = nir_build_deref_var(b, temp);
2085 
2086    for (int i = 0; i < 6; i++)
2087       nir_store_deref(b, nir_build_deref_array_imm(b, temp_deref, i), nir_load_var(b, in[i]), 1);
2088 
2089    nir_validate_shader(b->shader, NULL);
2090    ASSERT_EQ(count_derefs(nir_deref_type_array), 6);
2091    ASSERT_EQ(count_function_temp_vars(), 1);
2092 
2093    bool progress = nir_split_array_vars(b->shader, nir_var_function_temp);
2094    EXPECT_TRUE(progress);
2095 
2096    nir_validate_shader(b->shader, NULL);
2097    ASSERT_EQ(count_derefs(nir_deref_type_array), 0);
2098    ASSERT_EQ(count_function_temp_vars(), 4);
2099 }
2100 
TEST_F(nir_split_vars_test,simple_unused)2101 TEST_F(nir_split_vars_test, simple_unused)
2102 {
2103    nir_variable **in = create_many_int(nir_var_shader_in, "in", 2);
2104    nir_variable *temp = create_var(nir_var_function_temp, glsl_array_type(glsl_int_type(), 4, 0),
2105                                    "temp");
2106    nir_deref_instr *temp_deref = nir_build_deref_var(b, temp);
2107 
2108    for (int i = 0; i < 2; i++)
2109       nir_store_deref(b, nir_build_deref_array_imm(b, temp_deref, i), nir_load_var(b, in[i]), 1);
2110 
2111    nir_validate_shader(b->shader, NULL);
2112    ASSERT_EQ(count_derefs(nir_deref_type_array), 2);
2113    ASSERT_EQ(count_function_temp_vars(), 1);
2114 
2115    bool progress = nir_split_array_vars(b->shader, nir_var_function_temp);
2116    EXPECT_TRUE(progress);
2117 
2118    nir_validate_shader(b->shader, NULL);
2119    ASSERT_EQ(count_derefs(nir_deref_type_array), 0);
2120    /* this pass doesn't remove the unused ones */
2121    ASSERT_EQ(count_function_temp_vars(), 4);
2122 }
2123 
TEST_F(nir_split_vars_test,two_level_split)2124 TEST_F(nir_split_vars_test, two_level_split)
2125 {
2126    nir_variable **in = create_many_int(nir_var_shader_in, "in", 4);
2127    nir_variable *temp = create_var(nir_var_function_temp, glsl_array_type(glsl_array_type(glsl_int_type(), 4, 0), 4, 0),
2128                                    "temp");
2129    nir_deref_instr *temp_deref = nir_build_deref_var(b, temp);
2130    for (int i = 0; i < 4; i++) {
2131       nir_deref_instr *level0 = nir_build_deref_array_imm(b, temp_deref, i);
2132       for (int j = 0; j < 4; j++) {
2133          nir_deref_instr *level1 = nir_build_deref_array_imm(b, level0, j);
2134          nir_store_deref(b, level1, nir_load_var(b, in[i]), 1);
2135       }
2136    }
2137 
2138    nir_validate_shader(b->shader, NULL);
2139    ASSERT_EQ(count_derefs(nir_deref_type_array), 20);
2140    ASSERT_EQ(count_function_temp_vars(), 1);
2141 
2142    bool progress = nir_split_array_vars(b->shader, nir_var_function_temp);
2143    EXPECT_TRUE(progress);
2144 
2145    nir_validate_shader(b->shader, NULL);
2146    ASSERT_EQ(count_derefs(nir_deref_type_array), 0);
2147    ASSERT_EQ(count_function_temp_vars(), 16);
2148 }
2149 
TEST_F(nir_split_vars_test,simple_dont_split)2150 TEST_F(nir_split_vars_test, simple_dont_split)
2151 {
2152    nir_variable **in = create_many_int(nir_var_shader_in, "in", 4);
2153    nir_variable *temp = create_var(nir_var_function_temp, glsl_array_type(glsl_int_type(), 4, 0),
2154                                    "temp");
2155    nir_variable *ind = create_int(nir_var_shader_in, "ind");
2156 
2157    nir_deref_instr *ind_deref = nir_build_deref_var(b, ind);
2158    nir_deref_instr *temp_deref = nir_build_deref_var(b, temp);
2159 
2160    for (int i = 0; i < 4; i++)
2161       nir_store_deref(b, nir_build_deref_array(b, temp_deref, &ind_deref->def), nir_load_var(b, in[i]), 1);
2162 
2163    nir_validate_shader(b->shader, NULL);
2164    ASSERT_EQ(count_derefs(nir_deref_type_array), 4);
2165    ASSERT_EQ(count_function_temp_vars(), 1);
2166 
2167    bool progress = nir_split_array_vars(b->shader, nir_var_function_temp);
2168    EXPECT_FALSE(progress);
2169 
2170    nir_validate_shader(b->shader, NULL);
2171    ASSERT_EQ(count_derefs(nir_deref_type_array), 4);
2172    ASSERT_EQ(count_function_temp_vars(), 1);
2173 }
2174 
TEST_F(nir_split_vars_test,twolevel_dont_split_lvl_0)2175 TEST_F(nir_split_vars_test, twolevel_dont_split_lvl_0)
2176 {
2177    nir_variable **in = create_many_int(nir_var_shader_in, "in", 4);
2178    nir_variable *temp = create_var(nir_var_function_temp, glsl_array_type(glsl_array_type(glsl_int_type(), 6, 0), 4, 0),
2179                                    "temp");
2180    nir_variable *ind = create_int(nir_var_shader_in, "ind");
2181 
2182    nir_deref_instr *ind_deref = nir_build_deref_var(b, ind);
2183    nir_deref_instr *temp_deref = nir_build_deref_var(b, temp);
2184 
2185    for (int i = 0; i < 4; i++) {
2186       nir_deref_instr *level0 = nir_build_deref_array(b, temp_deref, &ind_deref->def);
2187       for (int j = 0; j < 6; j++) {
2188          nir_deref_instr *level1 = nir_build_deref_array_imm(b, level0, j);
2189          nir_store_deref(b, level1, nir_load_var(b, in[i]), 1);
2190       }
2191    }
2192 
2193    nir_validate_shader(b->shader, NULL);
2194    ASSERT_EQ(count_derefs(nir_deref_type_array), 28);
2195    ASSERT_EQ(count_function_temp_vars(), 1);
2196 
2197    bool progress = nir_split_array_vars(b->shader, nir_var_function_temp);
2198    EXPECT_TRUE(progress);
2199 
2200    nir_validate_shader(b->shader, NULL);
2201    ASSERT_EQ(count_derefs(nir_deref_type_array), 24);
2202    ASSERT_EQ(count_function_temp_vars(), 6);
2203 }
2204 
TEST_F(nir_split_vars_test,twolevel_dont_split_lvl_1)2205 TEST_F(nir_split_vars_test, twolevel_dont_split_lvl_1)
2206 {
2207    nir_variable **in = create_many_int(nir_var_shader_in, "in", 6);
2208    nir_variable *temp = create_var(nir_var_function_temp, glsl_array_type(glsl_array_type(glsl_int_type(), 6, 0), 4, 0),
2209                                    "temp");
2210    nir_variable *ind = create_int(nir_var_shader_in, "ind");
2211 
2212    nir_deref_instr *ind_deref = nir_build_deref_var(b, ind);
2213    nir_deref_instr *temp_deref = nir_build_deref_var(b, temp);
2214 
2215    for (int i = 0; i < 4; i++) {
2216       nir_deref_instr *level0 = nir_build_deref_array_imm(b, temp_deref, i);
2217       for (int j = 0; j < 6; j++) {
2218          /* just add the inner index to get some different derefs */
2219          nir_deref_instr *level1 = nir_build_deref_array(b, level0, nir_iadd_imm(b, &ind_deref->def, j));
2220          nir_store_deref(b, level1, nir_load_var(b, in[i]), 1);
2221       }
2222    }
2223 
2224    nir_validate_shader(b->shader, NULL);
2225    ASSERT_EQ(count_derefs(nir_deref_type_array), 28);
2226    ASSERT_EQ(count_function_temp_vars(), 1);
2227 
2228    bool progress = nir_split_array_vars(b->shader, nir_var_function_temp);
2229    EXPECT_TRUE(progress);
2230 
2231    nir_validate_shader(b->shader, NULL);
2232    ASSERT_EQ(count_derefs(nir_deref_type_array), 24);
2233    ASSERT_EQ(count_function_temp_vars(), 4);
2234 }
2235 
TEST_F(nir_split_vars_test,split_multiple_store)2236 TEST_F(nir_split_vars_test, split_multiple_store)
2237 {
2238    nir_variable **in = create_many_int(nir_var_shader_in, "in", 4);
2239    nir_variable *temp = create_var(nir_var_function_temp, glsl_array_type(glsl_int_type(), 4, 0),
2240                                    "temp");
2241    nir_variable *temp2 = create_var(nir_var_function_temp, glsl_array_type(glsl_int_type(), 4, 0),
2242                                     "temp2");
2243 
2244    nir_deref_instr *temp_deref = nir_build_deref_var(b, temp);
2245    nir_deref_instr *temp2_deref = nir_build_deref_var(b, temp2);
2246 
2247    for (int i = 0; i < 4; i++)
2248       nir_store_deref(b, nir_build_deref_array_imm(b, temp_deref, i), nir_load_var(b, in[i]), 1);
2249 
2250    for (int i = 0; i < 4; i++)
2251       nir_store_deref(b, nir_build_deref_array_imm(b, temp2_deref, i), nir_load_var(b, in[i]), 1);
2252 
2253    nir_validate_shader(b->shader, NULL);
2254    ASSERT_EQ(count_derefs(nir_deref_type_array), 8);
2255    ASSERT_EQ(count_function_temp_vars(), 2);
2256 
2257    bool progress = nir_split_array_vars(b->shader, nir_var_function_temp);
2258    EXPECT_TRUE(progress);
2259 
2260    nir_validate_shader(b->shader, NULL);
2261    ASSERT_EQ(count_derefs(nir_deref_type_array), 0);
2262    ASSERT_EQ(count_function_temp_vars(), 8);
2263 }
2264 
TEST_F(nir_split_vars_test,split_load_store)2265 TEST_F(nir_split_vars_test, split_load_store)
2266 {
2267    nir_variable **in = create_many_int(nir_var_shader_in, "in", 4);
2268    nir_variable *temp = create_var(nir_var_function_temp, glsl_array_type(glsl_int_type(), 4, 0),
2269                                    "temp");
2270    nir_variable *temp2 = create_var(nir_var_function_temp, glsl_array_type(glsl_int_type(), 4, 0),
2271                                     "temp2");
2272 
2273    nir_deref_instr *temp_deref = nir_build_deref_var(b, temp);
2274    nir_deref_instr *temp2_deref = nir_build_deref_var(b, temp2);
2275 
2276    for (int i = 0; i < 4; i++)
2277       nir_store_deref(b, nir_build_deref_array_imm(b, temp_deref, i), nir_load_var(b, in[i]), 1);
2278 
2279    for (int i = 0; i < 4; i++) {
2280       nir_deref_instr *store_deref = nir_build_deref_array_imm(b, temp2_deref, i);
2281       nir_deref_instr *load_deref = nir_build_deref_array_imm(b, temp_deref, i);
2282       nir_store_deref(b, store_deref, nir_load_deref(b, load_deref), 1);
2283    }
2284 
2285    nir_validate_shader(b->shader, NULL);
2286    ASSERT_EQ(count_derefs(nir_deref_type_array), 12);
2287    ASSERT_EQ(count_function_temp_vars(), 2);
2288 
2289    bool progress = nir_split_array_vars(b->shader, nir_var_function_temp);
2290    EXPECT_TRUE(progress);
2291 
2292    nir_validate_shader(b->shader, NULL);
2293    ASSERT_EQ(count_derefs(nir_deref_type_array), 0);
2294    ASSERT_EQ(count_function_temp_vars(), 8);
2295 }
2296 
TEST_F(nir_split_vars_test,split_copy)2297 TEST_F(nir_split_vars_test, split_copy)
2298 {
2299    nir_variable **in = create_many_int(nir_var_shader_in, "in", 4);
2300    nir_variable *temp = create_var(nir_var_function_temp, glsl_array_type(glsl_int_type(), 4, 0),
2301                                    "temp");
2302    nir_variable *temp2 = create_var(nir_var_function_temp, glsl_array_type(glsl_int_type(), 4, 0),
2303                                     "temp2");
2304 
2305    nir_deref_instr *temp_deref = nir_build_deref_var(b, temp);
2306    nir_deref_instr *temp2_deref = nir_build_deref_var(b, temp2);
2307 
2308    for (int i = 0; i < 4; i++)
2309       nir_store_deref(b, nir_build_deref_array_imm(b, temp_deref, i), nir_load_var(b, in[i]), 1);
2310 
2311    for (int i = 0; i < 4; i++) {
2312       nir_deref_instr *store_deref = nir_build_deref_array_imm(b, temp2_deref, i);
2313       nir_deref_instr *load_deref = nir_build_deref_array_imm(b, temp_deref, i);
2314       nir_copy_deref(b, store_deref, load_deref);
2315    }
2316 
2317    nir_validate_shader(b->shader, NULL);
2318    ASSERT_EQ(count_derefs(nir_deref_type_array), 12);
2319    ASSERT_EQ(count_function_temp_vars(), 2);
2320 
2321    bool progress = nir_split_array_vars(b->shader, nir_var_function_temp);
2322    EXPECT_TRUE(progress);
2323 
2324    nir_validate_shader(b->shader, NULL);
2325    ASSERT_EQ(count_derefs(nir_deref_type_array), 0);
2326    ASSERT_EQ(count_function_temp_vars(), 8);
2327 }
2328 
TEST_F(nir_split_vars_test,split_wildcard_copy)2329 TEST_F(nir_split_vars_test, split_wildcard_copy)
2330 {
2331    nir_variable **in = create_many_int(nir_var_shader_in, "in", 4);
2332    nir_variable *temp = create_var(nir_var_function_temp, glsl_array_type(glsl_int_type(), 4, 0),
2333                                    "temp");
2334    nir_variable *temp2 = create_var(nir_var_function_temp, glsl_array_type(glsl_int_type(), 4, 0),
2335                                     "temp2");
2336 
2337    nir_deref_instr *temp_deref = nir_build_deref_var(b, temp);
2338    nir_deref_instr *temp2_deref = nir_build_deref_var(b, temp2);
2339 
2340    for (int i = 0; i < 4; i++)
2341       nir_store_deref(b, nir_build_deref_array_imm(b, temp_deref, i), nir_load_var(b, in[i]), 1);
2342 
2343    nir_deref_instr *src_wildcard = nir_build_deref_array_wildcard(b, temp_deref);
2344    nir_deref_instr *dst_wildcard = nir_build_deref_array_wildcard(b, temp2_deref);
2345 
2346    nir_copy_deref(b, dst_wildcard, src_wildcard);
2347 
2348    nir_validate_shader(b->shader, NULL);
2349    ASSERT_EQ(count_derefs(nir_deref_type_array), 4);
2350    ASSERT_EQ(count_derefs(nir_deref_type_array_wildcard), 2);
2351    ASSERT_EQ(count_function_temp_vars(), 2);
2352    ASSERT_EQ(count_intrinsics(nir_intrinsic_copy_deref), 1);
2353 
2354    bool progress = nir_split_array_vars(b->shader, nir_var_function_temp);
2355    EXPECT_TRUE(progress);
2356 
2357    nir_validate_shader(b->shader, NULL);
2358    ASSERT_EQ(count_derefs(nir_deref_type_array), 0);
2359    ASSERT_EQ(count_derefs(nir_deref_type_array_wildcard), 0);
2360    ASSERT_EQ(count_function_temp_vars(), 8);
2361    ASSERT_EQ(count_intrinsics(nir_intrinsic_copy_deref), 4);
2362 }
2363 
TEST_F(nir_split_vars_test,split_nested_struct_const_init)2364 TEST_F(nir_split_vars_test, split_nested_struct_const_init)
2365 {
2366    const struct glsl_struct_field inner_struct_types[] = {
2367       { glsl_int_type(), "a"},
2368       { glsl_int_type(), "b"},
2369    };
2370    const struct glsl_type *inner_struct = glsl_struct_type(inner_struct_types, 2, "inner", false);
2371    const struct glsl_struct_field outer_struct_types[] = {
2372       { glsl_array_type(inner_struct, 2, 0), "as" },
2373       { glsl_array_type(inner_struct, 2, 0), "bs" },
2374    };
2375    const struct glsl_type *outer_struct = glsl_struct_type(outer_struct_types, 2, "outer", false);
2376    nir_variable *var = create_var(nir_var_mem_constant, glsl_array_type(outer_struct, 2, 0), "consts");
2377 
2378    uint32_t literal_val = 0;
2379    auto get_inner_struct_val = [&]() {
2380       nir_constant ret = {};
2381       ret.values[0].u32 = literal_val++;
2382       return ret;
2383    };
2384    auto get_nested_constant = [&](auto &get_inner_val) {
2385       nir_constant *arr = ralloc_array(b->shader, nir_constant, 2);
2386       arr[0] = get_inner_val();
2387       arr[1] = get_inner_val();
2388       nir_constant **arr2 = ralloc_array(b->shader, nir_constant *, 2);
2389       arr2[0] = &arr[0];
2390       arr2[1] = &arr[1];
2391       nir_constant ret = {};
2392       ret.num_elements = 2;
2393       ret.elements = arr2;
2394       return ret;
2395    };
2396    auto get_inner_struct_constant = [&]() { return get_nested_constant(get_inner_struct_val); };
2397    auto get_inner_array_constant = [&]() { return get_nested_constant(get_inner_struct_constant); };
2398    auto get_outer_struct_constant = [&]() { return get_nested_constant(get_inner_array_constant); };
2399    auto get_outer_array_constant = [&]() { return get_nested_constant(get_outer_struct_constant); };
2400    nir_constant var_constant = get_outer_array_constant();
2401    var->constant_initializer = &var_constant;
2402 
2403    nir_variable *out = create_int(nir_var_shader_out, "out");
2404    nir_store_var(b, out,
2405       nir_load_deref(b,
2406          nir_build_deref_struct(b,
2407             nir_build_deref_array_imm(b,
2408                nir_build_deref_struct(b,
2409                   nir_build_deref_array_imm(b, nir_build_deref_var(b, var), 1),
2410                                       0),
2411                                       1),
2412                                 1)
2413                      ),
2414                  0xff);
2415 
2416    nir_validate_shader(b->shader, NULL);
2417 
2418    bool progress = nir_split_struct_vars(b->shader, nir_var_mem_constant);
2419    EXPECT_TRUE(progress);
2420 
2421    nir_validate_shader(b->shader, NULL);
2422 
2423    unsigned count = 0;
2424    nir_foreach_variable_with_modes(var, b->shader, nir_var_mem_constant) {
2425       EXPECT_EQ(glsl_get_aoa_size(var->type), 4);
2426       EXPECT_EQ(glsl_get_length(var->type), 2);
2427       EXPECT_EQ(glsl_without_array(var->type), glsl_int_type());
2428       count++;
2429    }
2430 
2431    ASSERT_EQ(count, 4);
2432 }
2433 
TEST_F(nir_remove_dead_variables_test,pointer_initializer_used)2434 TEST_F(nir_remove_dead_variables_test, pointer_initializer_used)
2435 {
2436    nir_variable *x = create_int(nir_var_shader_temp, "x");
2437    nir_variable *y = create_int(nir_var_shader_temp, "y");
2438    y->pointer_initializer = x;
2439    nir_variable *out = create_int(nir_var_shader_out, "out");
2440 
2441    nir_validate_shader(b->shader, NULL);
2442 
2443    nir_copy_var(b, out, y);
2444 
2445    bool progress = nir_remove_dead_variables(b->shader, nir_var_all, NULL);
2446    EXPECT_FALSE(progress);
2447 
2448    nir_validate_shader(b->shader, NULL);
2449 
2450    unsigned count = 0;
2451    nir_foreach_variable_in_shader(var, b->shader)
2452       count++;
2453 
2454    ASSERT_EQ(count, 3);
2455 }
2456 
TEST_F(nir_remove_dead_variables_test,pointer_initializer_dead)2457 TEST_F(nir_remove_dead_variables_test, pointer_initializer_dead)
2458 {
2459    nir_variable *x = create_int(nir_var_shader_temp, "x");
2460    nir_variable *y = create_int(nir_var_shader_temp, "y");
2461    nir_variable *z = create_int(nir_var_shader_temp, "z");
2462    y->pointer_initializer = x;
2463    z->pointer_initializer = y;
2464 
2465    nir_validate_shader(b->shader, NULL);
2466 
2467    bool progress = nir_remove_dead_variables(b->shader, nir_var_all, NULL);
2468    EXPECT_TRUE(progress);
2469 
2470    nir_validate_shader(b->shader, NULL);
2471 
2472    unsigned count = 0;
2473    nir_foreach_variable_in_shader(var, b->shader)
2474       count++;
2475 
2476    ASSERT_EQ(count, 0);
2477 }
2478 
2479 
2480