xref: /aosp_15_r20/external/mesa3d/src/gallium/drivers/zink/zink_lower_cubemap_to_array.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © Microsoft Corporation
3  * Copyright © 2022 Valve Corporation
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * on the rights to use, copy, modify, merge, publish, distribute, sub
9  * license, and/or sell copies of the Software, and to permit persons to whom
10  * the Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22  * USE OR OTHER DEALINGS IN THE SOFTWARE.
23  */
24 
25 #include "nir_builder.h"
26 #include "nir_builtin_builder.h"
27 
28 
29 static const struct glsl_type *
make_2darray_sampler_from_cubemap(const struct glsl_type * type)30 make_2darray_sampler_from_cubemap(const struct glsl_type *type)
31 {
32    return  glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_CUBE ?
33             glsl_sampler_type(
34                GLSL_SAMPLER_DIM_2D,
35                false, true,
36                glsl_get_sampler_result_type(type)) : type;
37 }
38 
39 static const struct glsl_type *
make_2darray_from_cubemap_with_array(const struct glsl_type * type)40 make_2darray_from_cubemap_with_array(const struct glsl_type *type)
41 {
42    if (glsl_type_is_array(type)) {
43       const struct glsl_type *new_type = glsl_without_array(type);
44       return new_type != type ? glsl_array_type(make_2darray_from_cubemap_with_array(glsl_without_array(type)),
45                                                 glsl_get_length(type), 0) : type;
46    }
47    return make_2darray_sampler_from_cubemap(type);
48 }
49 
50 static bool
lower_cubemap_to_array_filter(const nir_instr * instr,const void * mask)51 lower_cubemap_to_array_filter(const nir_instr *instr, const void *mask)
52 {
53    const uint32_t *nonseamless_cube_mask = mask;
54    if (instr->type == nir_instr_type_tex) {
55       nir_tex_instr *tex = nir_instr_as_tex(instr);
56       nir_variable *var = nir_deref_instr_get_variable(nir_instr_as_deref(tex->src[nir_tex_instr_src_index(tex, nir_tex_src_texture_deref)].src.ssa->parent_instr));
57 
58       if (tex->sampler_dim != GLSL_SAMPLER_DIM_CUBE)
59          return false;
60 
61       switch (tex->op) {
62       case nir_texop_tex:
63       case nir_texop_txb:
64       case nir_texop_txd:
65       case nir_texop_txl:
66       case nir_texop_txs:
67       case nir_texop_lod:
68       case nir_texop_tg4:
69          break;
70       default:
71          return false;
72       }
73       return (BITFIELD_BIT(var->data.driver_location) & (*nonseamless_cube_mask)) != 0;
74    }
75 
76    return false;
77 }
78 
79 typedef struct {
80    nir_def *rx;
81    nir_def *ry;
82    nir_def *rz;
83    nir_def *arx;
84    nir_def *ary;
85    nir_def *arz;
86    nir_def *array;
87 } coord_t;
88 
89 
90 /* This is taken from from sp_tex_sample:convert_cube */
91 static nir_def *
evaluate_face_x(nir_builder * b,coord_t * coord)92 evaluate_face_x(nir_builder *b, coord_t *coord)
93 {
94    nir_def *sign = nir_fsign(b, coord->rx);
95    nir_def *positive = nir_fge_imm(b, coord->rx, 0.0);
96    nir_def *ima = nir_fdiv(b, nir_imm_float(b, -0.5), coord->arx);
97 
98    nir_def *x = nir_fadd_imm(b, nir_fmul(b, nir_fmul(b, sign, ima), coord->rz), 0.5);
99    nir_def *y = nir_fadd_imm(b, nir_fmul(b, ima, coord->ry), 0.5);
100    nir_def *face = nir_bcsel(b, positive, nir_imm_float(b, 0.0), nir_imm_float(b, 1.0));
101 
102    if (coord->array)
103       face = nir_fadd(b, face, coord->array);
104 
105    return nir_vec3(b, x,y, face);
106 }
107 
108 static nir_def *
evaluate_face_y(nir_builder * b,coord_t * coord)109 evaluate_face_y(nir_builder *b, coord_t *coord)
110 {
111    nir_def *sign = nir_fsign(b, coord->ry);
112    nir_def *positive = nir_fge_imm(b, coord->ry, 0.0);
113    nir_def *ima = nir_fdiv(b, nir_imm_float(b, 0.5), coord->ary);
114 
115    nir_def *x = nir_fadd_imm(b, nir_fmul(b, ima, coord->rx), 0.5);
116    nir_def *y = nir_fadd_imm(b, nir_fmul(b, nir_fmul(b, sign, ima), coord->rz), 0.5);
117    nir_def *face = nir_bcsel(b, positive, nir_imm_float(b, 2.0), nir_imm_float(b, 3.0));
118 
119    if (coord->array)
120       face = nir_fadd(b, face, coord->array);
121 
122    return nir_vec3(b, x,y, face);
123 }
124 
125 static nir_def *
evaluate_face_z(nir_builder * b,coord_t * coord)126 evaluate_face_z(nir_builder *b, coord_t *coord)
127 {
128    nir_def *sign = nir_fsign(b, coord->rz);
129    nir_def *positive = nir_fge_imm(b, coord->rz, 0.0);
130    nir_def *ima = nir_fdiv(b, nir_imm_float(b, -0.5), coord->arz);
131 
132    nir_def *x = nir_fadd_imm(b, nir_fmul(b, nir_fmul(b, sign, ima), nir_fneg(b, coord->rx)), 0.5);
133    nir_def *y = nir_fadd_imm(b, nir_fmul(b, ima, coord->ry), 0.5);
134    nir_def *face = nir_bcsel(b, positive, nir_imm_float(b, 4.0), nir_imm_float(b, 5.0));
135 
136    if (coord->array)
137       face = nir_fadd(b, face, coord->array);
138 
139    return nir_vec3(b, x,y, face);
140 }
141 
142 static nir_def *
create_array_tex_from_cube_tex(nir_builder * b,nir_tex_instr * tex,nir_def * coord,nir_texop op)143 create_array_tex_from_cube_tex(nir_builder *b, nir_tex_instr *tex, nir_def *coord, nir_texop op)
144 {
145    nir_tex_instr *array_tex;
146 
147    unsigned num_srcs = tex->num_srcs;
148    if (op == nir_texop_txf && nir_tex_instr_src_index(tex, nir_tex_src_comparator) != -1)
149       num_srcs--;
150    array_tex = nir_tex_instr_create(b->shader, num_srcs);
151    array_tex->op = op;
152    array_tex->sampler_dim = GLSL_SAMPLER_DIM_2D;
153    array_tex->is_array = true;
154    array_tex->is_shadow = tex->is_shadow;
155    array_tex->is_sparse = tex->is_sparse;
156    array_tex->is_new_style_shadow = tex->is_new_style_shadow;
157    array_tex->texture_index = tex->texture_index;
158    array_tex->sampler_index = tex->sampler_index;
159    array_tex->dest_type = tex->dest_type;
160    array_tex->coord_components = 3;
161 
162    nir_src coord_src = nir_src_for_ssa(coord);
163    unsigned s = 0;
164    for (unsigned i = 0; i < tex->num_srcs; i++) {
165       if (op == nir_texop_txf && tex->src[i].src_type == nir_tex_src_comparator)
166          continue;
167       nir_src *psrc = (tex->src[i].src_type == nir_tex_src_coord) ?
168                          &coord_src : &tex->src[i].src;
169 
170       array_tex->src[s].src_type = tex->src[i].src_type;
171       if (psrc->ssa->num_components != nir_tex_instr_src_size(array_tex, s)) {
172          nir_def *c = nir_trim_vector(b, psrc->ssa,
173                                           nir_tex_instr_src_size(array_tex, s));
174          array_tex->src[s].src = nir_src_for_ssa(c);
175       } else
176          array_tex->src[s].src = nir_src_for_ssa(psrc->ssa);
177       s++;
178    }
179 
180    nir_def_init(&array_tex->instr, &array_tex->def,
181                 nir_tex_instr_dest_size(array_tex),
182                 tex->def.bit_size);
183    nir_builder_instr_insert(b, &array_tex->instr);
184    return &array_tex->def;
185 }
186 
187 static nir_def *
handle_cube_edge(nir_builder * b,nir_def * x,nir_def * y,nir_def * face,nir_def * array_slice_cube_base,nir_def * tex_size)188 handle_cube_edge(nir_builder *b, nir_def *x, nir_def *y, nir_def *face, nir_def *array_slice_cube_base, nir_def *tex_size)
189 {
190    enum cube_remap
191    {
192       cube_remap_zero = 0,
193       cube_remap_x,
194       cube_remap_y,
195       cube_remap_tex_size,
196       cube_remap_tex_size_minus_x,
197       cube_remap_tex_size_minus_y,
198 
199       cube_remap_size,
200    };
201 
202    struct cube_remap_table
203    {
204       enum cube_remap remap_x;
205       enum cube_remap remap_y;
206       uint32_t        remap_face;
207    };
208 
209    static const struct cube_remap_table cube_remap_neg_x[6] =
210    {
211        {cube_remap_tex_size,         cube_remap_y,         4},
212        {cube_remap_tex_size,         cube_remap_y,         5},
213        {cube_remap_y,                cube_remap_zero,      1},
214        {cube_remap_tex_size_minus_y, cube_remap_tex_size,  1},
215        {cube_remap_tex_size,         cube_remap_y,         1},
216        {cube_remap_tex_size,         cube_remap_y,         0},
217    };
218 
219    static const struct cube_remap_table cube_remap_pos_x[6] =
220    {
221        {cube_remap_zero,             cube_remap_y,         5},
222        {cube_remap_zero,             cube_remap_y,         4},
223        {cube_remap_tex_size_minus_y, cube_remap_zero,      0},
224        {cube_remap_y,                cube_remap_tex_size,  0},
225        {cube_remap_zero,             cube_remap_y,         0},
226        {cube_remap_zero,             cube_remap_y,         1},
227    };
228 
229    static const struct cube_remap_table cube_remap_neg_y[6] =
230    {
231        {cube_remap_tex_size,         cube_remap_tex_size_minus_x, 2},
232        {cube_remap_zero,             cube_remap_x,                2},
233        {cube_remap_tex_size_minus_x, cube_remap_zero,             5},
234        {cube_remap_x,                cube_remap_tex_size,         4},
235        {cube_remap_x,                cube_remap_tex_size,         2},
236        {cube_remap_tex_size_minus_x, cube_remap_zero,             2},
237    };
238 
239    static const struct cube_remap_table cube_remap_pos_y[6] =
240    {
241        {cube_remap_tex_size,         cube_remap_x,                   3},
242        {cube_remap_zero,             cube_remap_tex_size_minus_x,    3},
243        {cube_remap_x,                cube_remap_zero,                4},
244        {cube_remap_tex_size_minus_x, cube_remap_tex_size,            5},
245        {cube_remap_x,                cube_remap_zero,                3},
246        {cube_remap_tex_size_minus_x, cube_remap_tex_size,            3},
247    };
248 
249    static const struct cube_remap_table* remap_tables[4] = {
250       cube_remap_neg_x,
251       cube_remap_pos_x,
252       cube_remap_neg_y,
253       cube_remap_pos_y
254    };
255 
256    nir_def *zero = nir_imm_int(b, 0);
257 
258    /* Doesn't matter since the texture is square */
259    tex_size = nir_channel(b, tex_size, 0);
260 
261    nir_def *x_on = nir_iand(b, nir_ige(b, x, zero), nir_ige(b, tex_size, x));
262    nir_def *y_on = nir_iand(b, nir_ige(b, y, zero), nir_ige(b, tex_size, y));
263    nir_def *one_on = nir_ixor(b, x_on, y_on);
264 
265    /* If the sample did not fall off the face in either dimension, then set output = input */
266    nir_def *x_result = x;
267    nir_def *y_result = y;
268    nir_def *face_result = face;
269 
270    /* otherwise, if the sample fell off the face in either the X or the Y direction, remap to the new face */
271    nir_def *remap_predicates[4] =
272    {
273       nir_iand(b, one_on, nir_ilt(b, x, zero)),
274       nir_iand(b, one_on, nir_ilt(b, tex_size, x)),
275       nir_iand(b, one_on, nir_ilt(b, y, zero)),
276       nir_iand(b, one_on, nir_ilt(b, tex_size, y)),
277    };
278 
279    nir_def *remap_array[cube_remap_size];
280 
281    remap_array[cube_remap_zero] = zero;
282    remap_array[cube_remap_x] = x;
283    remap_array[cube_remap_y] = y;
284    remap_array[cube_remap_tex_size] = tex_size;
285    remap_array[cube_remap_tex_size_minus_x] = nir_isub(b, tex_size, x);
286    remap_array[cube_remap_tex_size_minus_y] = nir_isub(b, tex_size, y);
287 
288    /* For each possible way the sample could have fallen off */
289    for (unsigned i = 0; i < 4; i++) {
290       const struct cube_remap_table* remap_table = remap_tables[i];
291 
292       /* For each possible original face */
293       for (unsigned j = 0; j < 6; j++) {
294          nir_def *predicate = nir_iand(b, remap_predicates[i], nir_ieq_imm(b, face, j));
295 
296          x_result = nir_bcsel(b, predicate, remap_array[remap_table[j].remap_x], x_result);
297          y_result = nir_bcsel(b, predicate, remap_array[remap_table[j].remap_y], y_result);
298          face_result = nir_bcsel(b, predicate, remap_array[remap_table[j].remap_face], face_result);
299       }
300    }
301 
302    return nir_vec3(b, x_result, y_result, nir_iadd(b, face_result, array_slice_cube_base));
303 }
304 
305 static nir_def *
handle_cube_gather(nir_builder * b,nir_tex_instr * tex,nir_def * coord)306 handle_cube_gather(nir_builder *b, nir_tex_instr *tex, nir_def *coord)
307 {
308    tex->is_array = true;
309    nir_def *tex_size = nir_get_texture_size(b, tex);
310 
311    /* nir_get_texture_size puts the cursor before the tex op */
312    b->cursor = nir_after_instr(coord->parent_instr);
313 
314    nir_def *const_05 = nir_imm_float(b, 0.5f);
315    nir_def *texel_coords = nir_fmul(b, nir_trim_vector(b, coord, 2),
316                                         nir_i2f32(b, nir_trim_vector(b, tex_size, 2)));
317 
318    nir_def *x_orig = nir_channel(b, texel_coords, 0);
319    nir_def *y_orig = nir_channel(b, texel_coords, 1);
320 
321    nir_def *x_pos = nir_f2i32(b, nir_fadd(b, x_orig, const_05));
322    nir_def *x_neg = nir_f2i32(b, nir_fsub(b, x_orig, const_05));
323    nir_def *y_pos = nir_f2i32(b, nir_fadd(b, y_orig, const_05));
324    nir_def *y_neg = nir_f2i32(b, nir_fsub(b, y_orig, const_05));
325    nir_def *coords[4][2] = {
326       { x_neg, y_pos },
327       { x_pos, y_pos },
328       { x_pos, y_neg },
329       { x_neg, y_neg },
330    };
331 
332    nir_def *array_slice_2d = nir_f2i32(b, nir_channel(b, coord, 2));
333    nir_def *face = nir_imod_imm(b, array_slice_2d, 6);
334    nir_def *array_slice_cube_base = nir_isub(b, array_slice_2d, face);
335 
336    nir_def *channels[4];
337    for (unsigned i = 0; i < 4; ++i) {
338       nir_def *final_coord = handle_cube_edge(b, coords[i][0], coords[i][1], face, array_slice_cube_base, tex_size);
339       nir_def *sampled_val = create_array_tex_from_cube_tex(b, tex, final_coord, nir_texop_txf);
340       channels[i] = nir_channel(b, sampled_val, tex->component);
341    }
342 
343    return nir_vec(b, channels, 4);
344 }
345 
346 static nir_def *
lower_cube_coords(nir_builder * b,nir_def * coord,bool is_array)347 lower_cube_coords(nir_builder *b, nir_def *coord, bool is_array)
348 {
349    coord_t coords;
350    coords.rx = nir_channel(b, coord, 0);
351    coords.ry = nir_channel(b, coord, 1);
352    coords.rz = nir_channel(b, coord, 2);
353    coords.arx = nir_fabs(b, coords.rx);
354    coords.ary = nir_fabs(b, coords.ry);
355    coords.arz = nir_fabs(b, coords.rz);
356    coords.array = NULL;
357    if (is_array)
358       coords.array = nir_fmul_imm(b, nir_channel(b, coord, 3), 6.0f);
359 
360    nir_def *use_face_x = nir_iand(b,
361                                       nir_fge(b, coords.arx, coords.ary),
362                                       nir_fge(b, coords.arx, coords.arz));
363 
364    nir_if *use_face_x_if = nir_push_if(b, use_face_x);
365    nir_def *face_x_coord = evaluate_face_x(b, &coords);
366    nir_if *use_face_x_else = nir_push_else(b, use_face_x_if);
367 
368    nir_def *use_face_y = nir_iand(b,
369                                       nir_fge(b, coords.ary, coords.arx),
370                                       nir_fge(b, coords.ary, coords.arz));
371 
372    nir_if *use_face_y_if = nir_push_if(b, use_face_y);
373    nir_def *face_y_coord = evaluate_face_y(b, &coords);
374    nir_if *use_face_y_else = nir_push_else(b, use_face_y_if);
375 
376    nir_def *face_z_coord = evaluate_face_z(b, &coords);
377 
378    nir_pop_if(b, use_face_y_else);
379    nir_def *face_y_or_z_coord = nir_if_phi(b, face_y_coord, face_z_coord);
380    nir_pop_if(b, use_face_x_else);
381 
382    // This contains in xy the normalized sample coordinates, and in z the face index
383    nir_def *coord_and_face = nir_if_phi(b, face_x_coord, face_y_or_z_coord);
384 
385    return coord_and_face;
386 }
387 
388 static void
rewrite_cube_var_type(nir_builder * b,nir_tex_instr * tex)389 rewrite_cube_var_type(nir_builder *b, nir_tex_instr *tex)
390 {
391    nir_variable *sampler = nir_deref_instr_get_variable(nir_instr_as_deref(tex->src[nir_tex_instr_src_index(tex, nir_tex_src_texture_deref)].src.ssa->parent_instr));
392    assert(sampler);
393    sampler->type = make_2darray_from_cubemap_with_array(sampler->type);
394 }
395 
396 /* txb(s, coord, bias) = txl(s, coord, lod(s, coord).y + bias) */
397 /* tex(s, coord) = txl(s, coord, lod(s, coord).x) */
398 static nir_tex_instr *
lower_tex_to_txl(nir_builder * b,nir_tex_instr * tex)399 lower_tex_to_txl(nir_builder *b, nir_tex_instr *tex)
400 {
401    b->cursor = nir_after_instr(&tex->instr);
402    int bias_idx = nir_tex_instr_src_index(tex, nir_tex_src_bias);
403    unsigned num_srcs = bias_idx >= 0 ? tex->num_srcs : tex->num_srcs + 1;
404    nir_tex_instr *txl = nir_tex_instr_create(b->shader, num_srcs);
405 
406    txl->op = nir_texop_txl;
407    txl->sampler_dim = tex->sampler_dim;
408    txl->dest_type = tex->dest_type;
409    txl->coord_components = tex->coord_components;
410    txl->texture_index = tex->texture_index;
411    txl->sampler_index = tex->sampler_index;
412    txl->is_array = tex->is_array;
413    txl->is_shadow = tex->is_shadow;
414    txl->is_sparse = tex->is_sparse;
415    txl->is_new_style_shadow = tex->is_new_style_shadow;
416 
417    unsigned s = 0;
418    for (int i = 0; i < tex->num_srcs; i++) {
419       if (i == bias_idx)
420          continue;
421       txl->src[s].src = nir_src_for_ssa(tex->src[i].src.ssa);
422       txl->src[s].src_type = tex->src[i].src_type;
423       s++;
424    }
425    nir_def *lod = nir_get_texture_lod(b, tex);
426 
427    if (bias_idx >= 0)
428       lod = nir_fadd(b, lod, tex->src[bias_idx].src.ssa);
429    lod = nir_fadd_imm(b, lod, -1.0);
430    txl->src[s] = nir_tex_src_for_ssa(nir_tex_src_lod, lod);
431 
432    b->cursor = nir_before_instr(&tex->instr);
433    nir_def_init(&txl->instr, &txl->def,
434                 tex->def.num_components,
435                 tex->def.bit_size);
436    nir_builder_instr_insert(b, &txl->instr);
437    nir_def_rewrite_uses(&tex->def, &txl->def);
438    return txl;
439 }
440 
441 static nir_def *
lower_cube_sample(nir_builder * b,nir_tex_instr * tex)442 lower_cube_sample(nir_builder *b, nir_tex_instr *tex)
443 {
444    if (!tex->is_shadow && (tex->op == nir_texop_txb || tex->op == nir_texop_tex)) {
445       tex = lower_tex_to_txl(b, tex);
446    }
447 
448    int coord_index = nir_tex_instr_src_index(tex, nir_tex_src_coord);
449    assert(coord_index >= 0);
450 
451    /* Evaluate the face and the xy coordinates for a 2D tex op */
452    nir_def *coord = tex->src[coord_index].src.ssa;
453    nir_def *coord_and_face = lower_cube_coords(b, coord, tex->is_array);
454 
455    rewrite_cube_var_type(b, tex);
456 
457    if (tex->op == nir_texop_tg4 && !tex->is_shadow)
458       return handle_cube_gather(b, tex, coord_and_face);
459    else
460       return create_array_tex_from_cube_tex(b, tex, coord_and_face, tex->op);
461 }
462 
463 static nir_def *
lower_cube_txs(nir_builder * b,nir_tex_instr * tex)464 lower_cube_txs(nir_builder *b, nir_tex_instr *tex)
465 {
466    b->cursor = nir_after_instr(&tex->instr);
467 
468    rewrite_cube_var_type(b, tex);
469    unsigned num_components = tex->def.num_components;
470    /* force max components to unbreak textureSize().xy */
471    tex->def.num_components = 3;
472    tex->is_array = true;
473    nir_def *array_dim = nir_channel(b, &tex->def, 2);
474    nir_def *cube_array_dim = nir_idiv(b, array_dim, nir_imm_int(b, 6));
475    nir_def *size = nir_vec3(b, nir_channel(b, &tex->def, 0),
476                                    nir_channel(b, &tex->def, 1),
477                                    cube_array_dim);
478    return nir_trim_vector(b, size, num_components);
479 }
480 
481 static nir_def *
lower_cubemap_to_array_tex(nir_builder * b,nir_tex_instr * tex)482 lower_cubemap_to_array_tex(nir_builder *b, nir_tex_instr *tex)
483 {
484    switch (tex->op) {
485    case nir_texop_tex:
486    case nir_texop_txb:
487    case nir_texop_txd:
488    case nir_texop_txl:
489    case nir_texop_lod:
490    case nir_texop_tg4:
491       return lower_cube_sample(b, tex);
492    case nir_texop_txs:
493       return lower_cube_txs(b, tex);
494    default:
495       unreachable("Unsupported cupe map texture operation");
496    }
497 }
498 
499 static nir_def *
lower_cubemap_to_array_impl(nir_builder * b,nir_instr * instr,UNUSED void * _options)500 lower_cubemap_to_array_impl(nir_builder *b, nir_instr *instr,
501                                UNUSED void *_options)
502 {
503    if (instr->type == nir_instr_type_tex)
504       return lower_cubemap_to_array_tex(b, nir_instr_as_tex(instr));
505    return NULL;
506 }
507 
508 bool
509 zink_lower_cubemap_to_array(nir_shader *s, uint32_t nonseamless_cube_mask);
510 bool
zink_lower_cubemap_to_array(nir_shader * s,uint32_t nonseamless_cube_mask)511 zink_lower_cubemap_to_array(nir_shader *s, uint32_t nonseamless_cube_mask)
512 {
513    return nir_shader_lower_instructions(s,
514                                         lower_cubemap_to_array_filter,
515                                         lower_cubemap_to_array_impl,
516                                         &nonseamless_cube_mask);
517 }
518