xref: /aosp_15_r20/external/mesa3d/src/compiler/nir/nir_lower_packing.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include "nir.h"
26 #include "nir_builder.h"
27 
28 /*
29  * lowers:
30  *
31  * packDouble2x32(foo) -> packDouble2x32Split(foo.x, foo.y)
32  * unpackDouble2x32(foo) -> vec2(unpackDouble2x32_x(foo), unpackDouble2x32_y(foo))
33  * packInt2x32(foo) -> packInt2x32Split(foo.x, foo.y)
34  * unpackInt2x32(foo) -> vec2(unpackInt2x32_x(foo), unpackInt2x32_y(foo))
35  */
36 
37 static nir_def *
lower_pack_64_from_32(nir_builder * b,nir_def * src)38 lower_pack_64_from_32(nir_builder *b, nir_def *src)
39 {
40    return nir_pack_64_2x32_split(b, nir_channel(b, src, 0),
41                                  nir_channel(b, src, 1));
42 }
43 
44 static nir_def *
lower_unpack_64_to_32(nir_builder * b,nir_def * src)45 lower_unpack_64_to_32(nir_builder *b, nir_def *src)
46 {
47    return nir_vec2(b, nir_unpack_64_2x32_split_x(b, src),
48                    nir_unpack_64_2x32_split_y(b, src));
49 }
50 
51 static nir_def *
lower_pack_32_from_16(nir_builder * b,nir_def * src)52 lower_pack_32_from_16(nir_builder *b, nir_def *src)
53 {
54    return nir_pack_32_2x16_split(b, nir_channel(b, src, 0),
55                                  nir_channel(b, src, 1));
56 }
57 
58 static nir_def *
lower_unpack_32_to_16(nir_builder * b,nir_def * src)59 lower_unpack_32_to_16(nir_builder *b, nir_def *src)
60 {
61    return nir_vec2(b, nir_unpack_32_2x16_split_x(b, src),
62                    nir_unpack_32_2x16_split_y(b, src));
63 }
64 
65 static nir_def *
lower_pack_64_from_16(nir_builder * b,nir_def * src)66 lower_pack_64_from_16(nir_builder *b, nir_def *src)
67 {
68    nir_def *xy = nir_pack_32_2x16_split(b, nir_channel(b, src, 0),
69                                         nir_channel(b, src, 1));
70 
71    nir_def *zw = nir_pack_32_2x16_split(b, nir_channel(b, src, 2),
72                                         nir_channel(b, src, 3));
73 
74    return nir_pack_64_2x32_split(b, xy, zw);
75 }
76 
77 static nir_def *
lower_unpack_64_to_16(nir_builder * b,nir_def * src)78 lower_unpack_64_to_16(nir_builder *b, nir_def *src)
79 {
80    nir_def *xy = nir_unpack_64_2x32_split_x(b, src);
81    nir_def *zw = nir_unpack_64_2x32_split_y(b, src);
82 
83    return nir_vec4(b, nir_unpack_32_2x16_split_x(b, xy),
84                    nir_unpack_32_2x16_split_y(b, xy),
85                    nir_unpack_32_2x16_split_x(b, zw),
86                    nir_unpack_32_2x16_split_y(b, zw));
87 }
88 
89 static nir_def *
lower_pack_32_from_8(nir_builder * b,nir_def * src)90 lower_pack_32_from_8(nir_builder *b, nir_def *src)
91 {
92    if (b->shader->options->has_pack_32_4x8) {
93       return nir_pack_32_4x8_split(b,
94                                    nir_channel(b, src, 0),
95                                    nir_channel(b, src, 1),
96                                    nir_channel(b, src, 2),
97                                    nir_channel(b, src, 3));
98    } else {
99       nir_def *src32 = nir_u2u32(b, src);
100 
101       return nir_ior(b,
102                      nir_ior(b,
103                                              nir_channel(b, src32, 0)     ,
104                              nir_ishl_imm(b, nir_channel(b, src32, 1), 8)),
105                      nir_ior(b,
106                              nir_ishl_imm(b, nir_channel(b, src32, 2), 16),
107                              nir_ishl_imm(b, nir_channel(b, src32, 3), 24)));
108    }
109 }
110 
111 static nir_def *
lower_unpack_32_to_8(nir_builder * b,nir_def * src)112 lower_unpack_32_to_8(nir_builder *b, nir_def *src)
113 {
114    /* Some drivers call nir_lower_pack after the last time nir_opt_algebraic
115     * is called. To prevent issues there, don't generate byte extraction
116     * instructions when the lowering flag is set.
117     */
118    if (b->shader->options->lower_extract_byte) {
119       return nir_vec4(b, nir_u2u8(b,                 src     ),
120                          nir_u2u8(b, nir_ushr_imm(b, src,  8)),
121                          nir_u2u8(b, nir_ushr_imm(b, src, 16)),
122                          nir_u2u8(b, nir_ushr_imm(b, src, 24)));
123    } else {
124       return nir_vec4(b, nir_u2u8(b, nir_extract_u8_imm(b, src, 0)),
125                          nir_u2u8(b, nir_extract_u8_imm(b, src, 1)),
126                          nir_u2u8(b, nir_extract_u8_imm(b, src, 2)),
127                          nir_u2u8(b, nir_extract_u8_imm(b, src, 3)));
128    }
129 }
130 
131 static bool
lower_pack_instr(nir_builder * b,nir_alu_instr * alu_instr,void * data)132 lower_pack_instr(nir_builder *b, nir_alu_instr *alu_instr, void *data)
133 {
134    nir_lower_packing_op op;
135    switch (alu_instr->op) {
136    case nir_op_pack_64_2x32:
137       op = nir_lower_packing_op_pack_64_2x32;
138       break;
139    case nir_op_unpack_64_2x32:
140       op = nir_lower_packing_op_unpack_64_2x32;
141       break;
142    case nir_op_pack_64_4x16:
143       op = nir_lower_packing_op_pack_64_4x16;
144       break;
145    case nir_op_unpack_64_4x16:
146       op = nir_lower_packing_op_unpack_64_4x16;
147       break;
148    case nir_op_pack_32_2x16:
149       op = nir_lower_packing_op_pack_32_2x16;
150       break;
151    case nir_op_unpack_32_2x16:
152       op = nir_lower_packing_op_unpack_32_2x16;
153       break;
154    case nir_op_pack_32_4x8:
155       op = nir_lower_packing_op_pack_32_4x8;
156       break;
157    case nir_op_unpack_32_4x8:
158       op = nir_lower_packing_op_unpack_32_4x8;
159       break;
160    default:
161       return false;
162    }
163 
164    if (b->shader->options->skip_lower_packing_ops & BITFIELD_BIT(op))
165       return false;
166 
167    b->cursor = nir_before_instr(&alu_instr->instr);
168 
169    typedef nir_def *(*lower_func_t)(nir_builder *b, nir_def *src);
170    static const lower_func_t lower_funcs[nir_lower_packing_num_ops] = {
171       [nir_lower_packing_op_pack_64_2x32]   = lower_pack_64_from_32,
172       [nir_lower_packing_op_unpack_64_2x32] = lower_unpack_64_to_32,
173       [nir_lower_packing_op_pack_64_4x16]   = lower_pack_64_from_16,
174       [nir_lower_packing_op_unpack_64_4x16] = lower_unpack_64_to_16,
175       [nir_lower_packing_op_pack_32_2x16]   = lower_pack_32_from_16,
176       [nir_lower_packing_op_unpack_32_2x16] = lower_unpack_32_to_16,
177       [nir_lower_packing_op_pack_32_4x8]    = lower_pack_32_from_8,
178       [nir_lower_packing_op_unpack_32_4x8]  = lower_unpack_32_to_8,
179    };
180 
181    nir_def *src = nir_ssa_for_alu_src(b, alu_instr, 0);
182    nir_def *dest = lower_funcs[op](b, src);
183    nir_def_replace(&alu_instr->def, dest);
184 
185    return true;
186 }
187 
188 bool
nir_lower_pack(nir_shader * shader)189 nir_lower_pack(nir_shader *shader)
190 {
191    return nir_shader_alu_pass(shader, lower_pack_instr,
192                               nir_metadata_control_flow, NULL);
193 }
194