xref: /aosp_15_r20/external/mesa3d/src/panfrost/util/pan_lower_framebuffer.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright (C) 2020 Collabora, Ltd.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors (Collabora):
24  *      Alyssa Rosenzweig <[email protected]>
25  */
26 
27 /**
28  * Implements framebuffer format conversions in software for Midgard/Bifrost
29  * blend shaders. This pass is designed for a single render target; Midgard
30  * duplicates blend shaders for MRT to simplify everything. A particular
31  * framebuffer format may be categorized as 1) typed load available, 2) typed
32  * unpack available, or 3) software unpack only, and likewise for stores. The
33  * first two types are handled in the compiler backend directly, so this module
34  * is responsible for identifying type 3 formats (hardware dependent) and
35  * inserting appropriate ALU code to perform the conversion from the packed
36  * type to a designated unpacked type, and vice versa.
37  *
38  * The unpacked type depends on the format:
39  *
40  *      - For 32-bit float formats or >8-bit UNORM, 32-bit floats.
41  *      - For other floats, 16-bit floats.
42  *      - For 32-bit ints, 32-bit ints.
43  *      - For 8-bit ints, 8-bit ints.
44  *      - For other ints, 16-bit ints.
45  *
46  * The rationale is to optimize blending and logic op instructions by using the
47  * smallest precision necessary to store the pixel losslessly.
48  */
49 
50 #include "pan_lower_framebuffer.h"
51 #include "compiler/nir/nir.h"
52 #include "compiler/nir/nir_builder.h"
53 #include "compiler/nir/nir_format_convert.h"
54 #include "util/format/u_format.h"
55 
56 /* Determines the unpacked type best suiting a given format, so the rest of the
57  * pipeline may be adjusted accordingly */
58 
59 nir_alu_type
pan_unpacked_type_for_format(const struct util_format_description * desc)60 pan_unpacked_type_for_format(const struct util_format_description *desc)
61 {
62    int c = util_format_get_first_non_void_channel(desc->format);
63 
64    if (c == -1)
65       unreachable("Void format not renderable");
66 
67    bool large = (desc->channel[c].size > 16);
68    bool large_norm = (desc->channel[c].size > 8);
69    bool bit8 = (desc->channel[c].size == 8);
70    assert(desc->channel[c].size <= 32);
71 
72    if (desc->channel[c].normalized)
73       return large_norm ? nir_type_float32 : nir_type_float16;
74 
75    switch (desc->channel[c].type) {
76    case UTIL_FORMAT_TYPE_UNSIGNED:
77       return bit8 ? nir_type_uint8 : large ? nir_type_uint32 : nir_type_uint16;
78    case UTIL_FORMAT_TYPE_SIGNED:
79       return bit8 ? nir_type_int8 : large ? nir_type_int32 : nir_type_int16;
80    case UTIL_FORMAT_TYPE_FLOAT:
81       return large ? nir_type_float32 : nir_type_float16;
82    default:
83       unreachable("Format not renderable");
84    }
85 }
86 
87 static bool
pan_is_format_native(const struct util_format_description * desc,bool broken_ld_special,bool is_store)88 pan_is_format_native(const struct util_format_description *desc,
89                      bool broken_ld_special, bool is_store)
90 {
91    if (is_store || broken_ld_special)
92       return false;
93 
94    if (util_format_is_pure_integer(desc->format) ||
95        util_format_is_float(desc->format))
96       return false;
97 
98    /* Some formats are missing as typed but have unpacks */
99    if (desc->format == PIPE_FORMAT_R11G11B10_FLOAT)
100       return false;
101 
102    if (desc->is_array) {
103       int c = util_format_get_first_non_void_channel(desc->format);
104       assert(c >= 0);
105       if (desc->channel[c].size > 8)
106          return false;
107    }
108 
109    return true;
110 }
111 
112 /* Software packs/unpacks, by format class. Packs take in the pixel value typed
113  * as `pan_unpacked_type_for_format` of the format and return an i32vec4
114  * suitable for storing (with components replicated to fill). Unpacks do the
115  * reverse but cannot rely on replication. */
116 
117 static nir_def *
pan_replicate(nir_builder * b,nir_def * v,unsigned num_components)118 pan_replicate(nir_builder *b, nir_def *v, unsigned num_components)
119 {
120    nir_def *replicated[4];
121 
122    for (unsigned i = 0; i < 4; ++i)
123       replicated[i] = nir_channel(b, v, i % num_components);
124 
125    return nir_vec(b, replicated, 4);
126 }
127 
128 /* Pure x16 formats are x16 unpacked, so it's similar, but we need to pack
129  * upper/lower halves of course */
130 
131 static nir_def *
pan_pack_pure_16(nir_builder * b,nir_def * v,unsigned num_components)132 pan_pack_pure_16(nir_builder *b, nir_def *v, unsigned num_components)
133 {
134    nir_def *v4 = pan_replicate(b, v, num_components);
135 
136    nir_def *lo = nir_pack_32_2x16(b, nir_channels(b, v4, 0x3 << 0));
137    nir_def *hi = nir_pack_32_2x16(b, nir_channels(b, v4, 0x3 << 2));
138 
139    return nir_vec4(b, lo, hi, lo, hi);
140 }
141 
142 static nir_def *
pan_unpack_pure_16(nir_builder * b,nir_def * pack,unsigned num_components)143 pan_unpack_pure_16(nir_builder *b, nir_def *pack, unsigned num_components)
144 {
145    nir_def *unpacked[4];
146 
147    assert(num_components <= 4);
148 
149    for (unsigned i = 0; i < num_components; i += 2) {
150       nir_def *halves = nir_unpack_32_2x16(b, nir_channel(b, pack, i >> 1));
151 
152       unpacked[i + 0] = nir_channel(b, halves, 0);
153       unpacked[i + 1] = nir_channel(b, halves, 1);
154    }
155 
156    return nir_pad_vec4(b, nir_vec(b, unpacked, num_components));
157 }
158 
159 static nir_def *
pan_pack_reorder(nir_builder * b,const struct util_format_description * desc,nir_def * v)160 pan_pack_reorder(nir_builder *b, const struct util_format_description *desc,
161                  nir_def *v)
162 {
163    unsigned swizzle[4] = {0, 1, 2, 3};
164 
165    for (unsigned i = 0; i < v->num_components; i++) {
166       if (desc->swizzle[i] <= PIPE_SWIZZLE_W)
167          swizzle[i] = desc->swizzle[i];
168    }
169 
170    return nir_swizzle(b, v, swizzle, v->num_components);
171 }
172 
173 static nir_def *
pan_unpack_reorder(nir_builder * b,const struct util_format_description * desc,nir_def * v)174 pan_unpack_reorder(nir_builder *b, const struct util_format_description *desc,
175                    nir_def *v)
176 {
177    unsigned swizzle[4] = {0, 1, 2, 3};
178 
179    for (unsigned i = 0; i < v->num_components; i++) {
180       if (desc->swizzle[i] <= PIPE_SWIZZLE_W)
181          swizzle[desc->swizzle[i]] = i;
182    }
183 
184    return nir_swizzle(b, v, swizzle, v->num_components);
185 }
186 
187 static nir_def *
pan_pack_pure_8(nir_builder * b,nir_def * v,unsigned num_components)188 pan_pack_pure_8(nir_builder *b, nir_def *v, unsigned num_components)
189 {
190    return nir_replicate(
191       b, nir_pack_32_4x8(b, pan_replicate(b, v, num_components)), 4);
192 }
193 
194 static nir_def *
pan_unpack_pure_8(nir_builder * b,nir_def * pack,unsigned num_components)195 pan_unpack_pure_8(nir_builder *b, nir_def *pack, unsigned num_components)
196 {
197    nir_def *unpacked = nir_unpack_32_4x8(b, nir_channel(b, pack, 0));
198    return nir_trim_vector(b, unpacked, num_components);
199 }
200 
201 static nir_def *
pan_fsat(nir_builder * b,nir_def * v,bool is_signed)202 pan_fsat(nir_builder *b, nir_def *v, bool is_signed)
203 {
204    if (is_signed)
205       return nir_fsat_signed_mali(b, v);
206    else
207       return nir_fsat(b, v);
208 }
209 
210 static float
norm_scale(bool snorm,unsigned bits)211 norm_scale(bool snorm, unsigned bits)
212 {
213    if (snorm)
214       return (1 << (bits - 1)) - 1;
215    else
216       return (1 << bits) - 1;
217 }
218 
219 /* For <= 8-bits per channel, [U,S]NORM formats are packed like [U,S]NORM 8,
220  * with zeroes spacing out each component as needed */
221 
222 static nir_def *
pan_pack_norm(nir_builder * b,nir_def * v,unsigned x,unsigned y,unsigned z,unsigned w,bool is_signed)223 pan_pack_norm(nir_builder *b, nir_def *v, unsigned x, unsigned y, unsigned z,
224               unsigned w, bool is_signed)
225 {
226    /* If a channel has N bits, 1.0 is encoded as 2^N - 1 for UNORMs and
227     * 2^(N-1) - 1 for SNORMs */
228    nir_def *scales =
229       is_signed ? nir_imm_vec4_16(b, (1 << (x - 1)) - 1, (1 << (y - 1)) - 1,
230                                   (1 << (z - 1)) - 1, (1 << (w - 1)) - 1)
231                 : nir_imm_vec4_16(b, (1 << x) - 1, (1 << y) - 1, (1 << z) - 1,
232                                   (1 << w) - 1);
233 
234    /* If a channel has N bits, we pad out to the byte by (8 - N) bits */
235    nir_def *shifts = nir_imm_ivec4(b, 8 - x, 8 - y, 8 - z, 8 - w);
236    nir_def *clamped = pan_fsat(b, nir_pad_vec4(b, v), is_signed);
237 
238    nir_def *f = nir_fmul(b, clamped, scales);
239    nir_def *u8 = nir_f2u8(b, nir_fround_even(b, f));
240    nir_def *s = nir_ishl(b, u8, shifts);
241    nir_def *repl = nir_pack_32_4x8(b, s);
242 
243    return nir_replicate(b, repl, 4);
244 }
245 
246 static nir_def *
pan_pack_unorm(nir_builder * b,nir_def * v,unsigned x,unsigned y,unsigned z,unsigned w)247 pan_pack_unorm(nir_builder *b, nir_def *v, unsigned x, unsigned y, unsigned z,
248                unsigned w)
249 {
250    return pan_pack_norm(b, v, x, y, z, w, false);
251 }
252 
253 /* RGB10_A2 is packed in the tilebuffer as the bottom 3 bytes being the top
254  * 8-bits of RGB and the top byte being RGBA as 2-bits packed. As imirkin
255  * pointed out, this means free conversion to RGBX8 */
256 
257 static nir_def *
pan_pack_unorm_1010102(nir_builder * b,nir_def * v)258 pan_pack_unorm_1010102(nir_builder *b, nir_def *v)
259 {
260    nir_def *scale = nir_imm_vec4(b, 1023.0, 1023.0, 1023.0, 3.0);
261    nir_def *s =
262       nir_f2u32(b, nir_fround_even(b, nir_fmul(b, nir_fsat(b, v), scale)));
263 
264    nir_def *top8 = nir_ushr(b, s, nir_imm_ivec4(b, 0x2, 0x2, 0x2, 0x2));
265    nir_def *top8_rgb = nir_pack_32_4x8(b, nir_u2u8(b, top8));
266 
267    nir_def *bottom2 = nir_iand(b, s, nir_imm_ivec4(b, 0x3, 0x3, 0x3, 0x3));
268 
269    nir_def *top =
270       nir_ior(b,
271               nir_ior(b, nir_ishl_imm(b, nir_channel(b, bottom2, 0), 24 + 0),
272                       nir_ishl_imm(b, nir_channel(b, bottom2, 1), 24 + 2)),
273               nir_ior(b, nir_ishl_imm(b, nir_channel(b, bottom2, 2), 24 + 4),
274                       nir_ishl_imm(b, nir_channel(b, bottom2, 3), 24 + 6)));
275 
276    nir_def *p = nir_ior(b, top, top8_rgb);
277    return nir_replicate(b, p, 4);
278 }
279 
280 /* On the other hand, the pure int RGB10_A2 is identical to the spec */
281 
282 static nir_def *
pan_pack_int_1010102(nir_builder * b,nir_def * v,bool is_signed)283 pan_pack_int_1010102(nir_builder *b, nir_def *v, bool is_signed)
284 {
285    v = nir_u2u32(b, v);
286 
287    /* Clamp the values */
288    if (is_signed) {
289       v = nir_imin(b, v, nir_imm_ivec4(b, 511, 511, 511, 1));
290       v = nir_imax(b, v, nir_imm_ivec4(b, -512, -512, -512, -2));
291    } else {
292       v = nir_umin(b, v, nir_imm_ivec4(b, 1023, 1023, 1023, 3));
293    }
294 
295    v = nir_ishl(b, v, nir_imm_ivec4(b, 0, 10, 20, 30));
296    v = nir_ior(b, nir_ior(b, nir_channel(b, v, 0), nir_channel(b, v, 1)),
297                nir_ior(b, nir_channel(b, v, 2), nir_channel(b, v, 3)));
298 
299    return nir_replicate(b, v, 4);
300 }
301 
302 static nir_def *
pan_unpack_int_1010102(nir_builder * b,nir_def * packed,bool is_signed)303 pan_unpack_int_1010102(nir_builder *b, nir_def *packed, bool is_signed)
304 {
305    nir_def *v = nir_replicate(b, nir_channel(b, packed, 0), 4);
306 
307    /* Left shift all components so the sign bit is on the MSB, and
308     * can be extended by ishr(). The ishl()+[u,i]shr() combination
309     * sets all unused bits to 0 without requiring a mask.
310     */
311    v = nir_ishl(b, v, nir_imm_ivec4(b, 22, 12, 2, 0));
312 
313    if (is_signed)
314       v = nir_ishr(b, v, nir_imm_ivec4(b, 22, 22, 22, 30));
315    else
316       v = nir_ushr(b, v, nir_imm_ivec4(b, 22, 22, 22, 30));
317 
318    return nir_i2i16(b, v);
319 }
320 
321 /* NIR means we can *finally* catch a break */
322 
323 static nir_def *
pan_pack_r11g11b10(nir_builder * b,nir_def * v)324 pan_pack_r11g11b10(nir_builder *b, nir_def *v)
325 {
326    return nir_replicate(b, nir_format_pack_11f11f10f(b, nir_f2f32(b, v)), 4);
327 }
328 
329 static nir_def *
pan_unpack_r11g11b10(nir_builder * b,nir_def * v)330 pan_unpack_r11g11b10(nir_builder *b, nir_def *v)
331 {
332    nir_def *f32 = nir_format_unpack_11f11f10f(b, nir_channel(b, v, 0));
333    nir_def *f16 = nir_f2fmp(b, f32);
334 
335    /* Extend to vec4 with alpha */
336    nir_def *components[4] = {nir_channel(b, f16, 0), nir_channel(b, f16, 1),
337                              nir_channel(b, f16, 2), nir_imm_float16(b, 1.0)};
338 
339    return nir_vec(b, components, 4);
340 }
341 
342 /* Wrapper around sRGB conversion */
343 
344 static nir_def *
pan_linear_to_srgb(nir_builder * b,nir_def * linear)345 pan_linear_to_srgb(nir_builder *b, nir_def *linear)
346 {
347    nir_def *rgb = nir_trim_vector(b, linear, 3);
348 
349    /* TODO: fp16 native conversion */
350    nir_def *srgb =
351       nir_f2fmp(b, nir_format_linear_to_srgb(b, nir_f2f32(b, rgb)));
352 
353    nir_def *comp[4] = {
354       nir_channel(b, srgb, 0),
355       nir_channel(b, srgb, 1),
356       nir_channel(b, srgb, 2),
357       nir_channel(b, linear, 3),
358    };
359 
360    return nir_vec(b, comp, 4);
361 }
362 
363 static nir_def *
pan_unpack_pure(nir_builder * b,nir_def * packed,unsigned size,unsigned nr)364 pan_unpack_pure(nir_builder *b, nir_def *packed, unsigned size, unsigned nr)
365 {
366    switch (size) {
367    case 32:
368       return nir_trim_vector(b, packed, nr);
369    case 16:
370       return pan_unpack_pure_16(b, packed, nr);
371    case 8:
372       return pan_unpack_pure_8(b, packed, nr);
373    default:
374       unreachable("Unrenderable size");
375    }
376 }
377 
378 /* Generic dispatches for un/pack regardless of format */
379 
380 static nir_def *
pan_unpack(nir_builder * b,const struct util_format_description * desc,nir_def * packed)381 pan_unpack(nir_builder *b, const struct util_format_description *desc,
382            nir_def *packed)
383 {
384    if (desc->is_array) {
385       int c = util_format_get_first_non_void_channel(desc->format);
386       assert(c >= 0);
387       struct util_format_channel_description d = desc->channel[c];
388       nir_def *unpacked = pan_unpack_pure(b, packed, d.size, desc->nr_channels);
389 
390       /* Normalized formats are unpacked as integers. We need to
391        * convert to float for the final result.
392        */
393       if (d.normalized) {
394          bool snorm = desc->is_snorm;
395          unsigned float_sz = (d.size <= 8 ? 16 : 32);
396          float multiplier = norm_scale(snorm, d.size);
397 
398          nir_def *as_float = snorm ? nir_i2fN(b, unpacked, float_sz)
399                                    : nir_u2fN(b, unpacked, float_sz);
400 
401          return nir_fmul_imm(b, as_float, 1.0 / multiplier);
402       } else {
403          return unpacked;
404       }
405    }
406 
407    switch (desc->format) {
408    case PIPE_FORMAT_R10G10B10A2_UINT:
409    case PIPE_FORMAT_B10G10R10A2_UINT:
410       return pan_unpack_int_1010102(b, packed, false);
411    case PIPE_FORMAT_R10G10B10A2_SINT:
412    case PIPE_FORMAT_B10G10R10A2_SINT:
413       return pan_unpack_int_1010102(b, packed, true);
414    case PIPE_FORMAT_R11G11B10_FLOAT:
415       return pan_unpack_r11g11b10(b, packed);
416    default:
417       break;
418    }
419 
420    fprintf(stderr, "%s\n", desc->name);
421    unreachable("Unknown format");
422 }
423 
pan_pack(nir_builder * b,const struct util_format_description * desc,nir_def * unpacked)424 static nir_def *pan_pack(nir_builder *b,
425                          const struct util_format_description *desc,
426                          nir_def * unpacked)
427 {
428    if (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB)
429       unpacked = pan_linear_to_srgb(b, unpacked);
430 
431    if (desc->is_array) {
432       int c = util_format_get_first_non_void_channel(desc->format);
433       assert(c >= 0);
434       struct util_format_channel_description d = desc->channel[c];
435 
436       /* Pure formats are packed as-is */
437       nir_def *raw = unpacked;
438 
439       /* Normalized formats get normalized first */
440       if (d.normalized) {
441          bool snorm = desc->is_snorm;
442          float multiplier = norm_scale(snorm, d.size);
443          nir_def *clamped = pan_fsat(b, unpacked, snorm);
444          nir_def *normed = nir_fmul_imm(b, clamped, multiplier);
445 
446          raw = nir_f2uN(b, normed, d.size);
447       }
448 
449       /* Pack the raw format */
450       switch (d.size) {
451       case 32:
452          return pan_replicate(b, raw, desc->nr_channels);
453       case 16:
454          return pan_pack_pure_16(b, raw, desc->nr_channels);
455       case 8:
456          return pan_pack_pure_8(b, raw, desc->nr_channels);
457       default:
458          unreachable("Unrenderable size");
459       }
460    }
461 
462    switch (desc->format) {
463    case PIPE_FORMAT_B4G4R4A4_UNORM:
464    case PIPE_FORMAT_B4G4R4X4_UNORM:
465    case PIPE_FORMAT_A4R4_UNORM:
466    case PIPE_FORMAT_R4A4_UNORM:
467    case PIPE_FORMAT_A4B4G4R4_UNORM:
468    case PIPE_FORMAT_R4G4B4A4_UNORM:
469       return pan_pack_unorm(b, unpacked, 4, 4, 4, 4);
470    case PIPE_FORMAT_B5G5R5A1_UNORM:
471    case PIPE_FORMAT_R5G5B5A1_UNORM:
472       return pan_pack_unorm(b, unpacked, 5, 6, 5, 1);
473    case PIPE_FORMAT_R5G6B5_UNORM:
474    case PIPE_FORMAT_B5G6R5_UNORM:
475       return pan_pack_unorm(b, unpacked, 5, 6, 5, 0);
476    case PIPE_FORMAT_R10G10B10A2_UNORM:
477    case PIPE_FORMAT_B10G10R10A2_UNORM:
478       return pan_pack_unorm_1010102(b, unpacked);
479    case PIPE_FORMAT_R10G10B10A2_UINT:
480    case PIPE_FORMAT_B10G10R10A2_UINT:
481       return pan_pack_int_1010102(b, unpacked, false);
482    case PIPE_FORMAT_R10G10B10A2_SINT:
483    case PIPE_FORMAT_B10G10R10A2_SINT:
484       return pan_pack_int_1010102(b, unpacked, true);
485    case PIPE_FORMAT_R11G11B10_FLOAT:
486       return pan_pack_r11g11b10(b, unpacked);
487    default:
488       break;
489    }
490 
491    fprintf(stderr, "%s\n", desc->name);
492    unreachable("Unknown format");
493 }
494 
495 static void
pan_lower_fb_store(nir_builder * b,nir_intrinsic_instr * intr,const struct util_format_description * desc,bool reorder_comps,unsigned nr_samples)496 pan_lower_fb_store(nir_builder *b, nir_intrinsic_instr *intr,
497                    const struct util_format_description *desc,
498                    bool reorder_comps, unsigned nr_samples)
499 {
500    /* For stores, add conversion before */
501    nir_def *unpacked = intr->src[0].ssa;
502    unpacked = nir_pad_vec4(b, unpacked);
503 
504    /* Re-order the components */
505    if (reorder_comps)
506       unpacked = pan_pack_reorder(b, desc, unpacked);
507 
508    nir_def *packed = pan_pack(b, desc, unpacked);
509 
510    /* We have to split writeout in 128 bit chunks */
511    unsigned iterations = DIV_ROUND_UP(desc->block.bits * nr_samples, 128);
512 
513    for (unsigned s = 0; s < iterations; ++s) {
514       nir_store_raw_output_pan(b, packed,
515                                .io_semantics = nir_intrinsic_io_semantics(intr),
516                                .base = s);
517    }
518 }
519 
520 static nir_def *
pan_sample_id(nir_builder * b,int sample)521 pan_sample_id(nir_builder *b, int sample)
522 {
523    return (sample >= 0) ? nir_imm_int(b, sample) : nir_load_sample_id(b);
524 }
525 
526 static void
pan_lower_fb_load(nir_builder * b,nir_intrinsic_instr * intr,const struct util_format_description * desc,bool reorder_comps,int sample)527 pan_lower_fb_load(nir_builder *b, nir_intrinsic_instr *intr,
528                   const struct util_format_description *desc,
529                   bool reorder_comps, int sample)
530 {
531    nir_def *packed =
532       nir_load_raw_output_pan(b, 4, 32, pan_sample_id(b, sample),
533                               .io_semantics = nir_intrinsic_io_semantics(intr));
534 
535    /* Convert the raw value */
536    nir_def *unpacked = pan_unpack(b, desc, packed);
537 
538    /* Convert to the size of the load intrinsic.
539     *
540     * We can assume that the type will match with the framebuffer format:
541     *
542     * Page 170 of the PDF of the OpenGL ES 3.0.6 spec says:
543     *
544     * If [UNORM or SNORM, convert to fixed-point]; otherwise no type
545     * conversion is applied. If the values written by the fragment shader
546     * do not match the format(s) of the corresponding color buffer(s),
547     * the result is undefined.
548     */
549 
550    unsigned bits = intr->def.bit_size;
551 
552    nir_alu_type src_type =
553       nir_alu_type_get_base_type(pan_unpacked_type_for_format(desc));
554 
555    unpacked = nir_convert_to_bit_size(b, unpacked, src_type, bits);
556    unpacked = nir_resize_vector(b, unpacked, intr->def.num_components);
557 
558    /* Reorder the components */
559    if (reorder_comps)
560       unpacked = pan_unpack_reorder(b, desc, unpacked);
561 
562    nir_def_rewrite_uses_after(&intr->def, unpacked, &intr->instr);
563 }
564 
565 struct inputs {
566    const enum pipe_format *rt_fmts;
567    uint8_t raw_fmt_mask;
568    bool is_blend;
569    bool broken_ld_special;
570    unsigned nr_samples;
571 };
572 
573 static bool
lower(nir_builder * b,nir_instr * instr,void * data)574 lower(nir_builder *b, nir_instr *instr, void *data)
575 {
576    struct inputs *inputs = data;
577    if (instr->type != nir_instr_type_intrinsic)
578       return false;
579 
580    nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
581    bool is_load = intr->intrinsic == nir_intrinsic_load_output;
582    bool is_store = intr->intrinsic == nir_intrinsic_store_output;
583 
584    if (!(is_load || (is_store && inputs->is_blend)))
585       return false;
586 
587    nir_io_semantics sem = nir_intrinsic_io_semantics(intr);
588    if (sem.location < FRAG_RESULT_DATA0)
589       return false;
590 
591    unsigned rt = sem.location - FRAG_RESULT_DATA0;
592    if (inputs->rt_fmts[rt] == PIPE_FORMAT_NONE)
593       return false;
594 
595    const struct util_format_description *desc =
596       util_format_description(inputs->rt_fmts[rt]);
597 
598    /* Don't lower */
599    if (pan_is_format_native(desc, inputs->broken_ld_special, is_store))
600       return false;
601 
602    /* EXT_shader_framebuffer_fetch requires per-sample loads. MSAA blend
603     * shaders are not yet handled, so for now always load sample 0.
604     */
605    int sample = inputs->is_blend ? 0 : -1;
606    bool reorder_comps = inputs->raw_fmt_mask & BITFIELD_BIT(rt);
607 
608    if (is_store) {
609       b->cursor = nir_before_instr(instr);
610       pan_lower_fb_store(b, intr, desc, reorder_comps, inputs->nr_samples);
611    } else {
612       b->cursor = nir_after_instr(instr);
613       pan_lower_fb_load(b, intr, desc, reorder_comps, sample);
614    }
615 
616    nir_instr_remove(instr);
617    return true;
618 }
619 
620 bool
pan_lower_framebuffer(nir_shader * shader,const enum pipe_format * rt_fmts,uint8_t raw_fmt_mask,unsigned blend_shader_nr_samples,bool broken_ld_special)621 pan_lower_framebuffer(nir_shader *shader, const enum pipe_format *rt_fmts,
622                       uint8_t raw_fmt_mask, unsigned blend_shader_nr_samples,
623                       bool broken_ld_special)
624 {
625    assert(shader->info.stage == MESA_SHADER_FRAGMENT);
626 
627    return nir_shader_instructions_pass(
628       shader, lower, nir_metadata_control_flow,
629       &(struct inputs){
630          .rt_fmts = rt_fmts,
631          .raw_fmt_mask = raw_fmt_mask,
632          .nr_samples = blend_shader_nr_samples,
633          .is_blend = blend_shader_nr_samples > 0,
634          .broken_ld_special = broken_ld_special,
635       });
636 }
637