xref: /aosp_15_r20/external/mesa3d/src/amd/llvm/ac_nir_to_llvm.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2016 Bas Nieuwenhuizen
3  *
4  * SPDX-License-Identifier: MIT
5  */
6 
7 #include "ac_nir_to_llvm.h"
8 #include "ac_gpu_info.h"
9 #include "ac_binary.h"
10 #include "ac_llvm_build.h"
11 #include "ac_llvm_util.h"
12 #include "ac_shader_abi.h"
13 #include "ac_shader_util.h"
14 #include "ac_nir.h"
15 #include "nir/nir.h"
16 #include "nir/nir_deref.h"
17 #include "sid.h"
18 #include "util/bitscan.h"
19 #include "util/u_math.h"
20 #include <llvm/Config/llvm-config.h>
21 
22 struct ac_nir_context {
23    struct ac_llvm_context ac;
24    struct ac_shader_abi *abi;
25    const struct ac_shader_args *args;
26 
27    gl_shader_stage stage;
28    shader_info *info;
29 
30    LLVMValueRef *ssa_defs;
31 
32    struct ac_llvm_pointer scratch;
33    struct ac_llvm_pointer constant_data;
34 
35    struct hash_table *defs;
36    struct hash_table *phis;
37    struct hash_table *verified_interp;
38 
39    LLVMValueRef main_function;
40    LLVMBasicBlockRef continue_block;
41    LLVMBasicBlockRef break_block;
42 };
43 
get_def_type(struct ac_nir_context * ctx,const nir_def * def)44 static LLVMTypeRef get_def_type(struct ac_nir_context *ctx, const nir_def *def)
45 {
46    LLVMTypeRef type = LLVMIntTypeInContext(ctx->ac.context, def->bit_size);
47    if (def->num_components > 1) {
48       type = LLVMVectorType(type, def->num_components);
49    }
50    return type;
51 }
52 
get_src(struct ac_nir_context * nir,nir_src src)53 static LLVMValueRef get_src(struct ac_nir_context *nir, nir_src src)
54 {
55    return nir->ssa_defs[src.ssa->index];
56 }
57 
get_memory_ptr(struct ac_nir_context * ctx,nir_src src,unsigned c_off)58 static LLVMValueRef get_memory_ptr(struct ac_nir_context *ctx, nir_src src, unsigned c_off)
59 {
60    LLVMValueRef ptr = get_src(ctx, src);
61    ptr = LLVMBuildAdd(ctx->ac.builder, ptr, LLVMConstInt(ctx->ac.i32, c_off, 0), "");
62    /* LDS is used here as a i8 pointer. */
63    return LLVMBuildGEP2(ctx->ac.builder, ctx->ac.i8, ctx->ac.lds.value, &ptr, 1, "");
64 }
65 
get_block(struct ac_nir_context * nir,const struct nir_block * b)66 static LLVMBasicBlockRef get_block(struct ac_nir_context *nir, const struct nir_block *b)
67 {
68    struct hash_entry *entry = _mesa_hash_table_search(nir->defs, b);
69    return (LLVMBasicBlockRef)entry->data;
70 }
71 
get_alu_src(struct ac_nir_context * ctx,nir_alu_src src,unsigned num_components)72 static LLVMValueRef get_alu_src(struct ac_nir_context *ctx, nir_alu_src src,
73                                 unsigned num_components)
74 {
75    LLVMValueRef value = get_src(ctx, src.src);
76    bool need_swizzle = false;
77 
78    assert(value);
79    unsigned src_components = ac_get_llvm_num_components(value);
80    for (unsigned i = 0; i < num_components; ++i) {
81       assert(src.swizzle[i] < src_components);
82       if (src.swizzle[i] != i)
83          need_swizzle = true;
84    }
85 
86    if (need_swizzle || num_components != src_components) {
87       LLVMValueRef masks[] = {LLVMConstInt(ctx->ac.i32, src.swizzle[0], false),
88                               LLVMConstInt(ctx->ac.i32, src.swizzle[1], false),
89                               LLVMConstInt(ctx->ac.i32, src.swizzle[2], false),
90                               LLVMConstInt(ctx->ac.i32, src.swizzle[3], false)};
91 
92       if (src_components > 1 && num_components == 1) {
93          value = LLVMBuildExtractElement(ctx->ac.builder, value, masks[0], "");
94       } else if (src_components == 1 && num_components > 1) {
95          LLVMValueRef values[] = {value, value, value, value};
96          value = ac_build_gather_values(&ctx->ac, values, num_components);
97       } else {
98          LLVMValueRef swizzle = LLVMConstVector(masks, num_components);
99          value = LLVMBuildShuffleVector(ctx->ac.builder, value, value, swizzle, "");
100       }
101    }
102    return value;
103 }
104 
emit_int_cmp(struct ac_llvm_context * ctx,LLVMIntPredicate pred,LLVMValueRef src0,LLVMValueRef src1)105 static LLVMValueRef emit_int_cmp(struct ac_llvm_context *ctx, LLVMIntPredicate pred,
106                                  LLVMValueRef src0, LLVMValueRef src1)
107 {
108    src0 = ac_to_integer(ctx, src0);
109    src1 = ac_to_integer(ctx, src1);
110    return LLVMBuildICmp(ctx->builder, pred, src0, src1, "");
111 }
112 
emit_float_cmp(struct ac_llvm_context * ctx,LLVMRealPredicate pred,LLVMValueRef src0,LLVMValueRef src1)113 static LLVMValueRef emit_float_cmp(struct ac_llvm_context *ctx, LLVMRealPredicate pred,
114                                    LLVMValueRef src0, LLVMValueRef src1)
115 {
116    src0 = ac_to_float(ctx, src0);
117    src1 = ac_to_float(ctx, src1);
118    return LLVMBuildFCmp(ctx->builder, pred, src0, src1, "");
119 }
120 
emit_intrin_1f_param(struct ac_llvm_context * ctx,const char * intrin,LLVMTypeRef result_type,LLVMValueRef src0)121 static LLVMValueRef emit_intrin_1f_param(struct ac_llvm_context *ctx, const char *intrin,
122                                          LLVMTypeRef result_type, LLVMValueRef src0)
123 {
124    char name[64], type[64];
125    LLVMValueRef params[] = {
126       ac_to_float(ctx, src0),
127    };
128 
129    ac_build_type_name_for_intr(LLVMTypeOf(params[0]), type, sizeof(type));
130    ASSERTED const int length = snprintf(name, sizeof(name), "%s.%s", intrin, type);
131    assert(length < sizeof(name));
132    return ac_build_intrinsic(ctx, name, result_type, params, 1, 0);
133 }
134 
emit_intrin_1f_param_scalar(struct ac_llvm_context * ctx,const char * intrin,LLVMTypeRef result_type,LLVMValueRef src0)135 static LLVMValueRef emit_intrin_1f_param_scalar(struct ac_llvm_context *ctx, const char *intrin,
136                                                 LLVMTypeRef result_type, LLVMValueRef src0)
137 {
138    if (LLVMGetTypeKind(result_type) != LLVMVectorTypeKind)
139       return emit_intrin_1f_param(ctx, intrin, result_type, src0);
140 
141    LLVMTypeRef elem_type = LLVMGetElementType(result_type);
142    LLVMValueRef ret = LLVMGetUndef(result_type);
143 
144    /* Scalarize the intrinsic, because vectors are not supported. */
145    for (unsigned i = 0; i < LLVMGetVectorSize(result_type); i++) {
146       char name[64], type[64];
147       LLVMValueRef params[] = {
148          ac_to_float(ctx, ac_llvm_extract_elem(ctx, src0, i)),
149       };
150 
151       ac_build_type_name_for_intr(LLVMTypeOf(params[0]), type, sizeof(type));
152       ASSERTED const int length = snprintf(name, sizeof(name), "%s.%s", intrin, type);
153       assert(length < sizeof(name));
154       ret = LLVMBuildInsertElement(
155          ctx->builder, ret,
156          ac_build_intrinsic(ctx, name, elem_type, params, 1, 0),
157          LLVMConstInt(ctx->i32, i, 0), "");
158    }
159    return ret;
160 }
161 
emit_intrin_2f_param(struct ac_llvm_context * ctx,const char * intrin,LLVMTypeRef result_type,LLVMValueRef src0,LLVMValueRef src1)162 static LLVMValueRef emit_intrin_2f_param(struct ac_llvm_context *ctx, const char *intrin,
163                                          LLVMTypeRef result_type, LLVMValueRef src0,
164                                          LLVMValueRef src1)
165 {
166    char name[64], type[64];
167    LLVMValueRef params[] = {
168       ac_to_float(ctx, src0),
169       ac_to_float(ctx, src1),
170    };
171 
172    ac_build_type_name_for_intr(LLVMTypeOf(params[0]), type, sizeof(type));
173    ASSERTED const int length = snprintf(name, sizeof(name), "%s.%s", intrin, type);
174    assert(length < sizeof(name));
175    return ac_build_intrinsic(ctx, name, result_type, params, 2, 0);
176 }
177 
emit_intrin_3f_param(struct ac_llvm_context * ctx,const char * intrin,LLVMTypeRef result_type,LLVMValueRef src0,LLVMValueRef src1,LLVMValueRef src2)178 static LLVMValueRef emit_intrin_3f_param(struct ac_llvm_context *ctx, const char *intrin,
179                                          LLVMTypeRef result_type, LLVMValueRef src0,
180                                          LLVMValueRef src1, LLVMValueRef src2)
181 {
182    char name[64], type[64];
183    LLVMValueRef params[] = {
184       ac_to_float(ctx, src0),
185       ac_to_float(ctx, src1),
186       ac_to_float(ctx, src2),
187    };
188 
189    ac_build_type_name_for_intr(LLVMTypeOf(params[0]), type, sizeof(type));
190    ASSERTED const int length = snprintf(name, sizeof(name), "%s.%s", intrin, type);
191    assert(length < sizeof(name));
192    return ac_build_intrinsic(ctx, name, result_type, params, 3, 0);
193 }
194 
emit_bcsel(struct ac_llvm_context * ctx,LLVMValueRef src0,LLVMValueRef src1,LLVMValueRef src2)195 static LLVMValueRef emit_bcsel(struct ac_llvm_context *ctx, LLVMValueRef src0, LLVMValueRef src1,
196                                LLVMValueRef src2)
197 {
198    LLVMTypeRef src1_type = LLVMTypeOf(src1);
199    LLVMTypeRef src2_type = LLVMTypeOf(src2);
200 
201    if (LLVMGetTypeKind(src1_type) == LLVMPointerTypeKind &&
202        LLVMGetTypeKind(src2_type) != LLVMPointerTypeKind) {
203       src2 = LLVMBuildIntToPtr(ctx->builder, src2, src1_type, "");
204    } else if (LLVMGetTypeKind(src2_type) == LLVMPointerTypeKind &&
205               LLVMGetTypeKind(src1_type) != LLVMPointerTypeKind) {
206       src1 = LLVMBuildIntToPtr(ctx->builder, src1, src2_type, "");
207    }
208 
209    return LLVMBuildSelect(ctx->builder, src0, ac_to_integer_or_pointer(ctx, src1),
210                           ac_to_integer_or_pointer(ctx, src2), "");
211 }
212 
emit_iabs(struct ac_llvm_context * ctx,LLVMValueRef src0)213 static LLVMValueRef emit_iabs(struct ac_llvm_context *ctx, LLVMValueRef src0)
214 {
215    return ac_build_imax(ctx, src0, LLVMBuildNeg(ctx->builder, src0, ""));
216 }
217 
emit_uint_carry(struct ac_llvm_context * ctx,const char * intrin,LLVMValueRef src0,LLVMValueRef src1)218 static LLVMValueRef emit_uint_carry(struct ac_llvm_context *ctx, const char *intrin,
219                                     LLVMValueRef src0, LLVMValueRef src1)
220 {
221    LLVMTypeRef ret_type;
222    LLVMTypeRef types[] = {ctx->i32, ctx->i1};
223    LLVMValueRef res;
224    LLVMValueRef params[] = {src0, src1};
225    ret_type = LLVMStructTypeInContext(ctx->context, types, 2, false);
226 
227    res = ac_build_intrinsic(ctx, intrin, ret_type, params, 2, 0);
228 
229    res = LLVMBuildExtractValue(ctx->builder, res, 1, "");
230    res = LLVMBuildZExt(ctx->builder, res, ctx->i32, "");
231    return res;
232 }
233 
emit_b2f(struct ac_llvm_context * ctx,LLVMValueRef src0,unsigned bitsize)234 static LLVMValueRef emit_b2f(struct ac_llvm_context *ctx, LLVMValueRef src0, unsigned bitsize)
235 {
236    assert(ac_get_elem_bits(ctx, LLVMTypeOf(src0)) == 1);
237 
238    switch (bitsize) {
239    case 16:
240       if (LLVMGetTypeKind(LLVMTypeOf(src0)) == LLVMVectorTypeKind) {
241          assert(LLVMGetVectorSize(LLVMTypeOf(src0)) == 2);
242          LLVMValueRef f[] = {
243             LLVMBuildSelect(ctx->builder, ac_llvm_extract_elem(ctx, src0, 0),
244                             ctx->f16_1, ctx->f16_0, ""),
245             LLVMBuildSelect(ctx->builder, ac_llvm_extract_elem(ctx, src0, 1),
246                             ctx->f16_1, ctx->f16_0, ""),
247          };
248          return ac_build_gather_values(ctx, f, 2);
249       }
250       return LLVMBuildSelect(ctx->builder, src0, ctx->f16_1, ctx->f16_0, "");
251    case 32:
252       return LLVMBuildSelect(ctx->builder, src0, ctx->f32_1, ctx->f32_0, "");
253    case 64:
254       return LLVMBuildSelect(ctx->builder, src0, ctx->f64_1, ctx->f64_0, "");
255    default:
256       unreachable("Unsupported bit size.");
257    }
258 }
259 
emit_b2i(struct ac_llvm_context * ctx,LLVMValueRef src0,unsigned bitsize)260 static LLVMValueRef emit_b2i(struct ac_llvm_context *ctx, LLVMValueRef src0, unsigned bitsize)
261 {
262    switch (bitsize) {
263    case 8:
264       return LLVMBuildSelect(ctx->builder, src0, ctx->i8_1, ctx->i8_0, "");
265    case 16:
266       if (LLVMGetTypeKind(LLVMTypeOf(src0)) == LLVMVectorTypeKind) {
267          assert(LLVMGetVectorSize(LLVMTypeOf(src0)) == 2);
268          LLVMValueRef i[] = {
269             LLVMBuildSelect(ctx->builder, ac_llvm_extract_elem(ctx, src0, 0),
270                             ctx->i16_1, ctx->i16_0, ""),
271             LLVMBuildSelect(ctx->builder, ac_llvm_extract_elem(ctx, src0, 1),
272                             ctx->i16_1, ctx->i16_0, ""),
273          };
274          return ac_build_gather_values(ctx, i, 2);
275       }
276       return LLVMBuildSelect(ctx->builder, src0, ctx->i16_1, ctx->i16_0, "");
277    case 32:
278       return LLVMBuildSelect(ctx->builder, src0, ctx->i32_1, ctx->i32_0, "");
279    case 64:
280       return LLVMBuildSelect(ctx->builder, src0, ctx->i64_1, ctx->i64_0, "");
281    default:
282       unreachable("Unsupported bit size.");
283    }
284 }
285 
emit_i2b(struct ac_llvm_context * ctx,LLVMValueRef src0)286 static LLVMValueRef emit_i2b(struct ac_llvm_context *ctx, LLVMValueRef src0)
287 {
288    LLVMValueRef zero = LLVMConstNull(LLVMTypeOf(src0));
289    return LLVMBuildICmp(ctx->builder, LLVMIntNE, src0, zero, "");
290 }
291 
emit_f2f16(struct ac_llvm_context * ctx,LLVMValueRef src0)292 static LLVMValueRef emit_f2f16(struct ac_llvm_context *ctx, LLVMValueRef src0)
293 {
294    LLVMValueRef result;
295    LLVMValueRef cond = NULL;
296 
297    src0 = ac_to_float(ctx, src0);
298    result = LLVMBuildFPTrunc(ctx->builder, src0, ctx->f16, "");
299 
300    if (ctx->gfx_level >= GFX8) {
301       LLVMValueRef args[2];
302       /* Check if the result is a denormal - and flush to 0 if so. */
303       args[0] = result;
304       args[1] = LLVMConstInt(ctx->i32, N_SUBNORMAL | P_SUBNORMAL, false);
305       cond =
306          ac_build_intrinsic(ctx, "llvm.amdgcn.class.f16", ctx->i1, args, 2, 0);
307    }
308 
309    /* need to convert back up to f32 */
310    result = LLVMBuildFPExt(ctx->builder, result, ctx->f32, "");
311 
312    if (ctx->gfx_level >= GFX8)
313       result = LLVMBuildSelect(ctx->builder, cond, ctx->f32_0, result, "");
314    else {
315       /* for GFX6-GFX7 */
316       /* 0x38800000 is smallest half float value (2^-14) in 32-bit float,
317        * so compare the result and flush to 0 if it's smaller.
318        */
319       LLVMValueRef temp, cond2;
320       temp = emit_intrin_1f_param(ctx, "llvm.fabs", ctx->f32, result);
321       cond = LLVMBuildFCmp(
322          ctx->builder, LLVMRealOGT,
323          LLVMBuildBitCast(ctx->builder, LLVMConstInt(ctx->i32, 0x38800000, false), ctx->f32, ""),
324          temp, "");
325       cond2 = LLVMBuildFCmp(ctx->builder, LLVMRealONE, temp, ctx->f32_0, "");
326       cond = LLVMBuildAnd(ctx->builder, cond, cond2, "");
327       result = LLVMBuildSelect(ctx->builder, cond, ctx->f32_0, result, "");
328    }
329    return result;
330 }
331 
emit_umul_high(struct ac_llvm_context * ctx,LLVMValueRef src0,LLVMValueRef src1)332 static LLVMValueRef emit_umul_high(struct ac_llvm_context *ctx, LLVMValueRef src0,
333                                    LLVMValueRef src1)
334 {
335    LLVMValueRef dst64, result;
336 
337 #if LLVM_VERSION_MAJOR < 20
338    if (LLVMIsConstant(src0))
339       ac_build_optimization_barrier(ctx, &src1, false);
340    else
341       ac_build_optimization_barrier(ctx, &src0, false);
342 #endif
343 
344    src0 = LLVMBuildZExt(ctx->builder, src0, ctx->i64, "");
345    src1 = LLVMBuildZExt(ctx->builder, src1, ctx->i64, "");
346 
347    dst64 = LLVMBuildMul(ctx->builder, src0, src1, "");
348    dst64 = LLVMBuildLShr(ctx->builder, dst64, LLVMConstInt(ctx->i64, 32, false), "");
349    result = LLVMBuildTrunc(ctx->builder, dst64, ctx->i32, "");
350    return result;
351 }
352 
emit_imul_high(struct ac_llvm_context * ctx,LLVMValueRef src0,LLVMValueRef src1)353 static LLVMValueRef emit_imul_high(struct ac_llvm_context *ctx, LLVMValueRef src0,
354                                    LLVMValueRef src1)
355 {
356    LLVMValueRef dst64, result;
357    src0 = LLVMBuildSExt(ctx->builder, src0, ctx->i64, "");
358    src1 = LLVMBuildSExt(ctx->builder, src1, ctx->i64, "");
359 
360    dst64 = LLVMBuildMul(ctx->builder, src0, src1, "");
361    dst64 = LLVMBuildAShr(ctx->builder, dst64, LLVMConstInt(ctx->i64, 32, false), "");
362    result = LLVMBuildTrunc(ctx->builder, dst64, ctx->i32, "");
363    return result;
364 }
365 
emit_bfm(struct ac_llvm_context * ctx,LLVMValueRef bits,LLVMValueRef offset)366 static LLVMValueRef emit_bfm(struct ac_llvm_context *ctx, LLVMValueRef bits, LLVMValueRef offset)
367 {
368    /* mask = ((1 << bits) - 1) << offset */
369    return LLVMBuildShl(
370       ctx->builder,
371       LLVMBuildSub(ctx->builder, LLVMBuildShl(ctx->builder, ctx->i32_1, bits, ""), ctx->i32_1, ""),
372       offset, "");
373 }
374 
emit_bitfield_select(struct ac_llvm_context * ctx,LLVMValueRef mask,LLVMValueRef insert,LLVMValueRef base)375 static LLVMValueRef emit_bitfield_select(struct ac_llvm_context *ctx, LLVMValueRef mask,
376                                          LLVMValueRef insert, LLVMValueRef base)
377 {
378    /* Calculate:
379     *   (mask & insert) | (~mask & base) = base ^ (mask & (insert ^ base))
380     * Use the right-hand side, which the LLVM backend can convert to V_BFI.
381     */
382    return LLVMBuildXor(
383       ctx->builder, base,
384       LLVMBuildAnd(ctx->builder, mask, LLVMBuildXor(ctx->builder, insert, base, ""), ""), "");
385 }
386 
emit_pack_2x16(struct ac_llvm_context * ctx,LLVMValueRef src0,LLVMValueRef (* pack)(struct ac_llvm_context * ctx,LLVMValueRef args[2]))387 static LLVMValueRef emit_pack_2x16(struct ac_llvm_context *ctx, LLVMValueRef src0,
388                                    LLVMValueRef (*pack)(struct ac_llvm_context *ctx,
389                                                         LLVMValueRef args[2]))
390 {
391    LLVMValueRef comp[2];
392 
393    src0 = ac_to_float(ctx, src0);
394    comp[0] = LLVMBuildExtractElement(ctx->builder, src0, ctx->i32_0, "");
395    comp[1] = LLVMBuildExtractElement(ctx->builder, src0, ctx->i32_1, "");
396 
397    return LLVMBuildBitCast(ctx->builder, pack(ctx, comp), ctx->i32, "");
398 }
399 
emit_unpack_half_2x16(struct ac_llvm_context * ctx,LLVMValueRef src0)400 static LLVMValueRef emit_unpack_half_2x16(struct ac_llvm_context *ctx, LLVMValueRef src0)
401 {
402    LLVMValueRef const16 = LLVMConstInt(ctx->i32, 16, false);
403    LLVMValueRef temps[2], val;
404    int i;
405 
406    for (i = 0; i < 2; i++) {
407       val = i == 1 ? LLVMBuildLShr(ctx->builder, src0, const16, "") : src0;
408       val = LLVMBuildTrunc(ctx->builder, val, ctx->i16, "");
409       val = LLVMBuildBitCast(ctx->builder, val, ctx->f16, "");
410       temps[i] = LLVMBuildFPExt(ctx->builder, val, ctx->f32, "");
411    }
412    return ac_build_gather_values(ctx, temps, 2);
413 }
414 
emit_ddxy(struct ac_nir_context * ctx,nir_intrinsic_op op,LLVMValueRef src0)415 static LLVMValueRef emit_ddxy(struct ac_nir_context *ctx, nir_intrinsic_op op, LLVMValueRef src0)
416 {
417    unsigned mask;
418    int idx;
419    LLVMValueRef result;
420 
421    if (op == nir_intrinsic_ddx_fine)
422       mask = AC_TID_MASK_LEFT;
423    else if (op == nir_intrinsic_ddy_fine)
424       mask = AC_TID_MASK_TOP;
425    else
426       mask = AC_TID_MASK_TOP_LEFT;
427 
428    /* for DDX we want to next X pixel, DDY next Y pixel. */
429    if (op == nir_intrinsic_ddx_fine || op == nir_intrinsic_ddx_coarse || op == nir_intrinsic_ddx)
430       idx = 1;
431    else
432       idx = 2;
433 
434    result = ac_build_ddxy(&ctx->ac, mask, idx, src0);
435    return result;
436 }
437 
438 struct waterfall_context {
439    LLVMBasicBlockRef phi_bb[2];
440    bool use_waterfall;
441 };
442 
443 /* To deal with divergent descriptors we can create a loop that handles all
444  * lanes with the same descriptor on a given iteration (henceforth a
445  * waterfall loop).
446  *
447  * These helper create the begin and end of the loop leaving the caller
448  * to implement the body.
449  *
450  * params:
451  *  - ctx is the usual nir context
452  *  - wctx is a temporary struct containing some loop info. Can be left uninitialized.
453  *  - value is the possibly divergent value for which we built the loop
454  *  - divergent is whether value is actually divergent. If false we just pass
455  *     things through.
456  */
enter_waterfall(struct ac_nir_context * ctx,struct waterfall_context * wctx,LLVMValueRef value,bool divergent)457 static LLVMValueRef enter_waterfall(struct ac_nir_context *ctx, struct waterfall_context *wctx,
458                                     LLVMValueRef value, bool divergent)
459 {
460    /* If the app claims the value is divergent but it is constant we can
461     * end up with a dynamic index of NULL. */
462    if (!value)
463       divergent = false;
464 
465    wctx->use_waterfall = divergent;
466    if (!divergent)
467       return value;
468 
469    ac_build_bgnloop(&ctx->ac, 6000);
470 
471    LLVMValueRef active = ctx->ac.i1true;
472    LLVMValueRef scalar_value[NIR_MAX_VEC_COMPONENTS];
473 
474    for (unsigned i = 0; i < ac_get_llvm_num_components(value); i++) {
475       LLVMValueRef comp = ac_llvm_extract_elem(&ctx->ac, value, i);
476       scalar_value[i] = ac_build_readlane(&ctx->ac, comp, NULL);
477       active = LLVMBuildAnd(ctx->ac.builder, active,
478                             LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ, comp, scalar_value[i], ""), "");
479    }
480 
481    wctx->phi_bb[0] = LLVMGetInsertBlock(ctx->ac.builder);
482    ac_build_ifcc(&ctx->ac, active, 6001);
483 
484    return ac_build_gather_values(&ctx->ac, scalar_value, ac_get_llvm_num_components(value));
485 }
486 
exit_waterfall(struct ac_nir_context * ctx,struct waterfall_context * wctx,LLVMValueRef value)487 static LLVMValueRef exit_waterfall(struct ac_nir_context *ctx, struct waterfall_context *wctx,
488                                    LLVMValueRef value)
489 {
490    LLVMValueRef ret = NULL;
491    LLVMValueRef phi_src[2];
492    LLVMValueRef cc_phi_src[2] = {
493       ctx->ac.i32_0,
494       LLVMConstInt(ctx->ac.i32, 0xffffffff, false),
495    };
496 
497    if (!wctx->use_waterfall)
498       return value;
499 
500    wctx->phi_bb[1] = LLVMGetInsertBlock(ctx->ac.builder);
501 
502    ac_build_endif(&ctx->ac, 6001);
503 
504    if (value) {
505       phi_src[0] = LLVMGetUndef(LLVMTypeOf(value));
506       phi_src[1] = value;
507 
508       ret = ac_build_phi(&ctx->ac, LLVMTypeOf(value), 2, phi_src, wctx->phi_bb);
509    }
510 
511    /*
512     * By using the optimization barrier on the exit decision, we decouple
513     * the operations from the break, and hence avoid LLVM hoisting the
514     * opteration into the break block.
515     */
516    LLVMValueRef cc = ac_build_phi(&ctx->ac, ctx->ac.i32, 2, cc_phi_src, wctx->phi_bb);
517    ac_build_optimization_barrier(&ctx->ac, &cc, false);
518 
519    LLVMValueRef active =
520       LLVMBuildICmp(ctx->ac.builder, LLVMIntNE, cc, ctx->ac.i32_0, "uniform_active2");
521    ac_build_ifcc(&ctx->ac, active, 6002);
522    ac_build_break(&ctx->ac);
523    ac_build_endif(&ctx->ac, 6002);
524 
525    ac_build_endloop(&ctx->ac, 6000);
526    return ret;
527 }
528 
529 static LLVMValueRef
ac_build_const_int_vec(struct ac_llvm_context * ctx,LLVMTypeRef type,long long val,bool sign_extend)530 ac_build_const_int_vec(struct ac_llvm_context *ctx, LLVMTypeRef type, long long val, bool sign_extend)
531 {
532    unsigned num_components = LLVMGetTypeKind(type) == LLVMVectorTypeKind ? LLVMGetVectorSize(type) : 1;
533 
534    if (num_components == 1)
535       return LLVMConstInt(type, val, sign_extend);
536 
537    assert(num_components == 2);
538    assert(ac_get_elem_bits(ctx, type) == 16);
539 
540    LLVMTypeRef elem_type = LLVMGetElementType(type);
541 
542    LLVMValueRef elems[2];
543    for (unsigned i = 0; i < 2; ++i)
544       elems[i] = LLVMConstInt(elem_type, val, sign_extend);
545 
546    return LLVMConstVector(elems, 2);
547 }
548 
visit_alu(struct ac_nir_context * ctx,const nir_alu_instr * instr)549 static bool visit_alu(struct ac_nir_context *ctx, const nir_alu_instr *instr)
550 {
551    LLVMValueRef src[16], result = NULL;
552    unsigned num_components = instr->def.num_components;
553    LLVMTypeRef def_type = get_def_type(ctx, &instr->def);
554 
555    assert(nir_op_infos[instr->op].num_inputs <= ARRAY_SIZE(src));
556    for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
557       src[i] = get_alu_src(ctx, instr->src[i], nir_ssa_alu_instr_src_components(instr, i));
558 
559    switch (instr->op) {
560    case nir_op_mov:
561       result = src[0];
562       break;
563    case nir_op_fneg:
564       src[0] = ac_to_float(&ctx->ac, src[0]);
565       result = LLVMBuildFNeg(ctx->ac.builder, src[0], "");
566       break;
567    case nir_op_inot:
568       result = LLVMBuildNot(ctx->ac.builder, src[0], "");
569       break;
570    case nir_op_iadd:
571       if (instr->no_unsigned_wrap)
572          result = LLVMBuildNUWAdd(ctx->ac.builder, src[0], src[1], "");
573       else if (instr->no_signed_wrap)
574          result = LLVMBuildNSWAdd(ctx->ac.builder, src[0], src[1], "");
575       else
576          result = LLVMBuildAdd(ctx->ac.builder, src[0], src[1], "");
577       break;
578    case nir_op_uadd_sat:
579    case nir_op_iadd_sat: {
580       char name[64], type[64];
581       ac_build_type_name_for_intr(def_type, type, sizeof(type));
582       snprintf(name, sizeof(name), "llvm.%cadd.sat.%s",
583                instr->op == nir_op_uadd_sat ? 'u' : 's', type);
584       result = ac_build_intrinsic(&ctx->ac, name, def_type, src, 2, 0);
585       break;
586    }
587    case nir_op_usub_sat:
588    case nir_op_isub_sat: {
589       char name[64], type[64];
590       ac_build_type_name_for_intr(def_type, type, sizeof(type));
591       snprintf(name, sizeof(name), "llvm.%csub.sat.%s",
592                instr->op == nir_op_usub_sat ? 'u' : 's', type);
593       result = ac_build_intrinsic(&ctx->ac, name, def_type, src, 2, 0);
594       break;
595    }
596    case nir_op_fadd:
597       src[0] = ac_to_float(&ctx->ac, src[0]);
598       src[1] = ac_to_float(&ctx->ac, src[1]);
599       result = LLVMBuildFAdd(ctx->ac.builder, src[0], src[1], "");
600       break;
601    case nir_op_fsub:
602       src[0] = ac_to_float(&ctx->ac, src[0]);
603       src[1] = ac_to_float(&ctx->ac, src[1]);
604       result = LLVMBuildFSub(ctx->ac.builder, src[0], src[1], "");
605       break;
606    case nir_op_isub:
607       if (instr->no_unsigned_wrap)
608          result = LLVMBuildNUWSub(ctx->ac.builder, src[0], src[1], "");
609       else if (instr->no_signed_wrap)
610          result = LLVMBuildNSWSub(ctx->ac.builder, src[0], src[1], "");
611       else
612          result = LLVMBuildSub(ctx->ac.builder, src[0], src[1], "");
613       break;
614    case nir_op_imul:
615       if (instr->no_unsigned_wrap)
616          result = LLVMBuildNUWMul(ctx->ac.builder, src[0], src[1], "");
617       else if (instr->no_signed_wrap)
618          result = LLVMBuildNSWMul(ctx->ac.builder, src[0], src[1], "");
619       else
620          result = LLVMBuildMul(ctx->ac.builder, src[0], src[1], "");
621       break;
622    case nir_op_fmul:
623       src[0] = ac_to_float(&ctx->ac, src[0]);
624       src[1] = ac_to_float(&ctx->ac, src[1]);
625       result = LLVMBuildFMul(ctx->ac.builder, src[0], src[1], "");
626       break;
627    case nir_op_fmulz:
628       src[0] = ac_to_float(&ctx->ac, src[0]);
629       src[1] = ac_to_float(&ctx->ac, src[1]);
630       result = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.fmul.legacy", ctx->ac.f32,
631                                   src, 2, 0);
632       break;
633    case nir_op_frcp:
634       result = emit_intrin_1f_param_scalar(&ctx->ac, "llvm.amdgcn.rcp",
635                                            ac_to_float_type(&ctx->ac, def_type), src[0]);
636       if (ctx->abi->clamp_div_by_zero)
637          result = ac_build_fmin(&ctx->ac, result,
638                                 LLVMConstReal(ac_to_float_type(&ctx->ac, def_type), FLT_MAX));
639       break;
640    case nir_op_iand:
641       result = LLVMBuildAnd(ctx->ac.builder, src[0], src[1], "");
642       break;
643    case nir_op_ior:
644       result = LLVMBuildOr(ctx->ac.builder, src[0], src[1], "");
645       break;
646    case nir_op_ixor:
647       result = LLVMBuildXor(ctx->ac.builder, src[0], src[1], "");
648       break;
649    case nir_op_ishl:
650    case nir_op_ishr:
651    case nir_op_ushr: {
652       if (ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src[1])) <
653           ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src[0])))
654          src[1] = LLVMBuildZExt(ctx->ac.builder, src[1], LLVMTypeOf(src[0]), "");
655       else if (ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src[1])) >
656                ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src[0])))
657          src[1] = LLVMBuildTrunc(ctx->ac.builder, src[1], LLVMTypeOf(src[0]), "");
658       LLVMTypeRef type = LLVMTypeOf(src[1]);
659       src[1] = LLVMBuildAnd(ctx->ac.builder, src[1],
660                             ac_build_const_int_vec(&ctx->ac, type, ac_get_elem_bits(&ctx->ac, type) - 1, false), "");
661       switch (instr->op) {
662       case nir_op_ishl:
663          result = LLVMBuildShl(ctx->ac.builder, src[0], src[1], "");
664          break;
665       case nir_op_ishr:
666          result = LLVMBuildAShr(ctx->ac.builder, src[0], src[1], "");
667          break;
668       case nir_op_ushr:
669          result = LLVMBuildLShr(ctx->ac.builder, src[0], src[1], "");
670          break;
671       default:
672          break;
673       }
674       break;
675    }
676    case nir_op_ilt:
677       result = emit_int_cmp(&ctx->ac, LLVMIntSLT, src[0], src[1]);
678       break;
679    case nir_op_ine:
680       result = emit_int_cmp(&ctx->ac, LLVMIntNE, src[0], src[1]);
681       break;
682    case nir_op_ieq:
683       result = emit_int_cmp(&ctx->ac, LLVMIntEQ, src[0], src[1]);
684       break;
685    case nir_op_ige:
686       result = emit_int_cmp(&ctx->ac, LLVMIntSGE, src[0], src[1]);
687       break;
688    case nir_op_ult:
689       result = emit_int_cmp(&ctx->ac, LLVMIntULT, src[0], src[1]);
690       break;
691    case nir_op_uge:
692       result = emit_int_cmp(&ctx->ac, LLVMIntUGE, src[0], src[1]);
693       break;
694    case nir_op_feq:
695       result = emit_float_cmp(&ctx->ac, LLVMRealOEQ, src[0], src[1]);
696       break;
697    case nir_op_fneu:
698       result = emit_float_cmp(&ctx->ac, LLVMRealUNE, src[0], src[1]);
699       break;
700    case nir_op_fequ:
701       result = emit_float_cmp(&ctx->ac, LLVMRealUEQ, src[0], src[1]);
702       break;
703    case nir_op_fneo:
704       result = emit_float_cmp(&ctx->ac, LLVMRealONE, src[0], src[1]);
705       break;
706    case nir_op_flt:
707       result = emit_float_cmp(&ctx->ac, LLVMRealOLT, src[0], src[1]);
708       break;
709    case nir_op_fge:
710       result = emit_float_cmp(&ctx->ac, LLVMRealOGE, src[0], src[1]);
711       break;
712    case nir_op_fltu:
713       result = emit_float_cmp(&ctx->ac, LLVMRealULT, src[0], src[1]);
714       break;
715    case nir_op_fgeu:
716       result = emit_float_cmp(&ctx->ac, LLVMRealUGE, src[0], src[1]);
717       break;
718    case nir_op_funord:
719       result = emit_float_cmp(&ctx->ac, LLVMRealUNO, src[0], src[1]);
720       break;
721    case nir_op_ford:
722       result = emit_float_cmp(&ctx->ac, LLVMRealORD, src[0], src[1]);
723       break;
724    case nir_op_fabs:
725       result =
726          emit_intrin_1f_param(&ctx->ac, "llvm.fabs", ac_to_float_type(&ctx->ac, def_type), src[0]);
727       break;
728    case nir_op_fsat:
729       src[0] = ac_to_float(&ctx->ac, src[0]);
730       result = ac_build_fsat(&ctx->ac, src[0],
731                              ac_to_float_type(&ctx->ac, def_type));
732       break;
733    case nir_op_iabs:
734       result = emit_iabs(&ctx->ac, src[0]);
735       break;
736    case nir_op_imax:
737       result = ac_build_imax(&ctx->ac, src[0], src[1]);
738       break;
739    case nir_op_imin:
740       result = ac_build_imin(&ctx->ac, src[0], src[1]);
741       break;
742    case nir_op_umax:
743       result = ac_build_umax(&ctx->ac, src[0], src[1]);
744       break;
745    case nir_op_umin:
746       result = ac_build_umin(&ctx->ac, src[0], src[1]);
747       break;
748    case nir_op_isign:
749       result = ac_build_isign(&ctx->ac, src[0]);
750       break;
751    case nir_op_fsign:
752       src[0] = ac_to_float(&ctx->ac, src[0]);
753       result = ac_build_fsign(&ctx->ac, src[0]);
754       break;
755    case nir_op_ffloor:
756       result =
757          emit_intrin_1f_param(&ctx->ac, "llvm.floor", ac_to_float_type(&ctx->ac, def_type), src[0]);
758       break;
759    case nir_op_ftrunc:
760       result =
761          emit_intrin_1f_param(&ctx->ac, "llvm.trunc", ac_to_float_type(&ctx->ac, def_type), src[0]);
762       break;
763    case nir_op_fceil:
764       result =
765          emit_intrin_1f_param(&ctx->ac, "llvm.ceil", ac_to_float_type(&ctx->ac, def_type), src[0]);
766       break;
767    case nir_op_fround_even:
768       result =
769          emit_intrin_1f_param(&ctx->ac, "llvm.rint", ac_to_float_type(&ctx->ac, def_type), src[0]);
770       break;
771    case nir_op_ffract:
772       result = emit_intrin_1f_param_scalar(&ctx->ac, "llvm.amdgcn.fract",
773                                            ac_to_float_type(&ctx->ac, def_type), src[0]);
774       break;
775    case nir_op_fsin_amd:
776    case nir_op_fcos_amd:
777       /* before GFX9, v_sin_f32 and v_cos_f32 had a valid input domain of [-256, +256] */
778       if (ctx->ac.gfx_level < GFX9)
779          src[0] = emit_intrin_1f_param_scalar(&ctx->ac, "llvm.amdgcn.fract",
780                                               ac_to_float_type(&ctx->ac, def_type), src[0]);
781       result =
782          emit_intrin_1f_param(&ctx->ac, instr->op == nir_op_fsin_amd ? "llvm.amdgcn.sin" : "llvm.amdgcn.cos",
783                               ac_to_float_type(&ctx->ac, def_type), src[0]);
784       break;
785    case nir_op_fsqrt:
786       result =
787          emit_intrin_1f_param(&ctx->ac, "llvm.sqrt", ac_to_float_type(&ctx->ac, def_type), src[0]);
788       LLVMSetMetadata(result, ctx->ac.fpmath_md_kind, ctx->ac.three_md);
789       break;
790    case nir_op_fexp2:
791       result =
792          emit_intrin_1f_param(&ctx->ac, "llvm.exp2", ac_to_float_type(&ctx->ac, def_type), src[0]);
793       break;
794    case nir_op_flog2:
795       result =
796          emit_intrin_1f_param(&ctx->ac, "llvm.log2", ac_to_float_type(&ctx->ac, def_type), src[0]);
797       break;
798    case nir_op_frsq:
799       result = emit_intrin_1f_param_scalar(&ctx->ac, "llvm.amdgcn.rsq",
800                                            ac_to_float_type(&ctx->ac, def_type), src[0]);
801       if (ctx->abi->clamp_div_by_zero)
802          result = ac_build_fmin(&ctx->ac, result,
803                                 LLVMConstReal(ac_to_float_type(&ctx->ac, def_type), FLT_MAX));
804       break;
805    case nir_op_frexp_exp:
806       src[0] = ac_to_float(&ctx->ac, src[0]);
807       result = ac_build_frexp_exp(&ctx->ac, src[0], ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src[0])));
808       if (ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src[0])) == 16)
809          result = LLVMBuildSExt(ctx->ac.builder, result, ctx->ac.i32, "");
810       break;
811    case nir_op_frexp_sig:
812       src[0] = ac_to_float(&ctx->ac, src[0]);
813       result = ac_build_frexp_mant(&ctx->ac, src[0], instr->def.bit_size);
814       break;
815    case nir_op_fmax:
816       result = emit_intrin_2f_param(&ctx->ac, "llvm.maxnum", ac_to_float_type(&ctx->ac, def_type),
817                                     src[0], src[1]);
818       if (ctx->ac.gfx_level < GFX9 && instr->def.bit_size == 32) {
819          /* Only pre-GFX9 chips do not flush denorms. */
820          result = ac_build_canonicalize(&ctx->ac, result, instr->def.bit_size);
821       }
822       break;
823    case nir_op_fmin:
824       result = emit_intrin_2f_param(&ctx->ac, "llvm.minnum", ac_to_float_type(&ctx->ac, def_type),
825                                     src[0], src[1]);
826       if (ctx->ac.gfx_level < GFX9 && instr->def.bit_size == 32) {
827          /* Only pre-GFX9 chips do not flush denorms. */
828          result = ac_build_canonicalize(&ctx->ac, result, instr->def.bit_size);
829       }
830       break;
831    case nir_op_ffma:
832       /* FMA is slow on gfx6-8, so it shouldn't be used. */
833       assert(instr->def.bit_size != 32 || ctx->ac.gfx_level >= GFX9);
834       result = emit_intrin_3f_param(&ctx->ac, "llvm.fma", ac_to_float_type(&ctx->ac, def_type),
835                                     src[0], src[1], src[2]);
836       break;
837    case nir_op_ffmaz:
838       assert(ctx->ac.gfx_level >= GFX10_3);
839       src[0] = ac_to_float(&ctx->ac, src[0]);
840       src[1] = ac_to_float(&ctx->ac, src[1]);
841       src[2] = ac_to_float(&ctx->ac, src[2]);
842       result = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.fma.legacy", ctx->ac.f32,
843                                   src, 3, 0);
844       break;
845    case nir_op_ldexp:
846       src[0] = ac_to_float(&ctx->ac, src[0]);
847       if (ac_get_elem_bits(&ctx->ac, def_type) == 32)
848          result = ac_build_intrinsic(&ctx->ac,
849                                      LLVM_VERSION_MAJOR >= 18 ? "llvm.ldexp.f32.i32"
850                                                               : "llvm.amdgcn.ldexp.f32",
851                                      ctx->ac.f32, src, 2, 0);
852       else if (ac_get_elem_bits(&ctx->ac, def_type) == 16)
853          result = ac_build_intrinsic(&ctx->ac,
854                                      LLVM_VERSION_MAJOR >= 18 ? "llvm.ldexp.f16.i32"
855                                                               : "llvm.amdgcn.ldexp.f16",
856                                      ctx->ac.f16, src, 2, 0);
857       else
858          result = ac_build_intrinsic(&ctx->ac,
859                                      LLVM_VERSION_MAJOR >= 18 ? "llvm.ldexp.f64.i32"
860                                                               : "llvm.amdgcn.ldexp.f64",
861                                      ctx->ac.f64, src, 2, 0);
862       break;
863    case nir_op_bfm:
864       result = emit_bfm(&ctx->ac, src[0], src[1]);
865       break;
866    case nir_op_bitfield_select:
867       result = emit_bitfield_select(&ctx->ac, src[0], src[1], src[2]);
868       break;
869    case nir_op_ubfe:
870       result = ac_build_bfe(&ctx->ac, src[0], src[1], src[2], false);
871       break;
872    case nir_op_ibfe:
873       result = ac_build_bfe(&ctx->ac, src[0], src[1], src[2], true);
874       break;
875    case nir_op_bitfield_reverse:
876       result = ac_build_bitfield_reverse(&ctx->ac, src[0]);
877       break;
878    case nir_op_bit_count:
879       result = ac_build_bit_count(&ctx->ac, src[0]);
880       break;
881    case nir_op_vec2:
882    case nir_op_vec3:
883    case nir_op_vec4:
884    case nir_op_vec5:
885    case nir_op_vec8:
886    case nir_op_vec16:
887       for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
888          src[i] = ac_to_integer(&ctx->ac, src[i]);
889       result = ac_build_gather_values(&ctx->ac, src, num_components);
890       break;
891    case nir_op_f2i8:
892    case nir_op_f2i16:
893    case nir_op_f2i32:
894    case nir_op_f2i64:
895       src[0] = ac_to_float(&ctx->ac, src[0]);
896       result = LLVMBuildFPToSI(ctx->ac.builder, src[0], def_type, "");
897       break;
898    case nir_op_f2u8:
899    case nir_op_f2u16:
900    case nir_op_f2u32:
901    case nir_op_f2u64:
902       src[0] = ac_to_float(&ctx->ac, src[0]);
903       result = LLVMBuildFPToUI(ctx->ac.builder, src[0], def_type, "");
904       break;
905    case nir_op_i2f16:
906    case nir_op_i2f32:
907    case nir_op_i2f64:
908       result = LLVMBuildSIToFP(ctx->ac.builder, src[0], ac_to_float_type(&ctx->ac, def_type), "");
909       break;
910    case nir_op_u2f16:
911    case nir_op_u2f32:
912    case nir_op_u2f64:
913       result = LLVMBuildUIToFP(ctx->ac.builder, src[0], ac_to_float_type(&ctx->ac, def_type), "");
914       break;
915    case nir_op_f2f16_rtz: {
916       src[0] = ac_to_float(&ctx->ac, src[0]);
917 
918       if (LLVMTypeOf(src[0]) == ctx->ac.f64)
919          src[0] = LLVMBuildFPTrunc(ctx->ac.builder, src[0], ctx->ac.f32, "");
920 
921       /* Fast path conversion. This only works if NIR is vectorized
922        * to vec2 16.
923        */
924       if (LLVMTypeOf(src[0]) == ctx->ac.v2f32) {
925          LLVMValueRef args[] = {
926             ac_llvm_extract_elem(&ctx->ac, src[0], 0),
927             ac_llvm_extract_elem(&ctx->ac, src[0], 1),
928          };
929          result = ac_build_cvt_pkrtz_f16(&ctx->ac, args);
930          break;
931       }
932 
933       assert(ac_get_llvm_num_components(src[0]) == 1);
934       LLVMValueRef param[2] = {src[0], LLVMGetUndef(ctx->ac.f32)};
935       result = ac_build_cvt_pkrtz_f16(&ctx->ac, param);
936       result = LLVMBuildExtractElement(ctx->ac.builder, result, ctx->ac.i32_0, "");
937       break;
938    }
939    case nir_op_f2f16:
940    case nir_op_f2f16_rtne:
941    case nir_op_f2f32:
942    case nir_op_f2f64:
943       src[0] = ac_to_float(&ctx->ac, src[0]);
944       if (ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src[0])) < ac_get_elem_bits(&ctx->ac, def_type))
945          result = LLVMBuildFPExt(ctx->ac.builder, src[0], ac_to_float_type(&ctx->ac, def_type), "");
946       else
947          result =
948             LLVMBuildFPTrunc(ctx->ac.builder, src[0], ac_to_float_type(&ctx->ac, def_type), "");
949       break;
950    case nir_op_u2u8:
951    case nir_op_u2u16:
952    case nir_op_u2u32:
953    case nir_op_u2u64:
954       if (ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src[0])) < ac_get_elem_bits(&ctx->ac, def_type))
955          result = LLVMBuildZExt(ctx->ac.builder, src[0], def_type, "");
956       else
957          result = LLVMBuildTrunc(ctx->ac.builder, src[0], def_type, "");
958       break;
959    case nir_op_i2i8:
960    case nir_op_i2i16:
961    case nir_op_i2i32:
962    case nir_op_i2i64:
963       if (ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src[0])) < ac_get_elem_bits(&ctx->ac, def_type))
964          result = LLVMBuildSExt(ctx->ac.builder, src[0], def_type, "");
965       else
966          result = LLVMBuildTrunc(ctx->ac.builder, src[0], def_type, "");
967       break;
968    case nir_op_bcsel:
969       result = emit_bcsel(&ctx->ac, src[0], src[1], src[2]);
970       break;
971    case nir_op_find_lsb:
972       result = ac_find_lsb(&ctx->ac, ctx->ac.i32, src[0]);
973       break;
974    case nir_op_ufind_msb:
975       result = ac_build_umsb(&ctx->ac, src[0], ctx->ac.i32, false);
976       break;
977    case nir_op_ifind_msb:
978       result = ac_build_imsb(&ctx->ac, src[0], ctx->ac.i32);
979       break;
980    case nir_op_ufind_msb_rev:
981       result = ac_build_umsb(&ctx->ac, src[0], ctx->ac.i32, true);
982       break;
983    case nir_op_ifind_msb_rev:
984       result = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.sffbh.i32", ctx->ac.i32, &src[0], 1,
985                                   0);
986       break;
987    case nir_op_uclz: {
988       LLVMValueRef params[2] = {
989          src[0],
990          ctx->ac.i1false,
991       };
992       result = ac_build_intrinsic(&ctx->ac, "llvm.ctlz.i32", ctx->ac.i32, params, 2, 0);
993       break;
994    }
995    case nir_op_uadd_carry:
996       result = emit_uint_carry(&ctx->ac, "llvm.uadd.with.overflow.i32", src[0], src[1]);
997       break;
998    case nir_op_usub_borrow:
999       result = emit_uint_carry(&ctx->ac, "llvm.usub.with.overflow.i32", src[0], src[1]);
1000       break;
1001    case nir_op_b2f16:
1002    case nir_op_b2f32:
1003    case nir_op_b2f64:
1004       result = emit_b2f(&ctx->ac, src[0], instr->def.bit_size);
1005       break;
1006    case nir_op_b2i8:
1007    case nir_op_b2i16:
1008    case nir_op_b2i32:
1009    case nir_op_b2i64:
1010       result = emit_b2i(&ctx->ac, src[0], instr->def.bit_size);
1011       break;
1012    case nir_op_b2b1: /* after loads */
1013       result = emit_i2b(&ctx->ac, src[0]);
1014       break;
1015    case nir_op_b2b16: /* before stores */
1016       result = LLVMBuildZExt(ctx->ac.builder, src[0], ctx->ac.i16, "");
1017       break;
1018    case nir_op_b2b32: /* before stores */
1019       result = LLVMBuildZExt(ctx->ac.builder, src[0], ctx->ac.i32, "");
1020       break;
1021    case nir_op_fquantize2f16:
1022       result = emit_f2f16(&ctx->ac, src[0]);
1023       break;
1024    case nir_op_umul_high:
1025       result = emit_umul_high(&ctx->ac, src[0], src[1]);
1026       break;
1027    case nir_op_imul_high:
1028       result = emit_imul_high(&ctx->ac, src[0], src[1]);
1029       break;
1030    case nir_op_pack_half_2x16_rtz_split:
1031    case nir_op_pack_half_2x16_split:
1032       src[0] = ac_to_float(&ctx->ac, src[0]);
1033       src[1] = ac_to_float(&ctx->ac, src[1]);
1034       result = LLVMBuildBitCast(ctx->ac.builder,
1035                                 ac_build_cvt_pkrtz_f16(&ctx->ac, src),
1036                                 ctx->ac.i32, "");
1037       break;
1038    case nir_op_pack_snorm_2x16:
1039    case nir_op_pack_unorm_2x16: {
1040       unsigned bit_size = instr->src[0].src.ssa->bit_size;
1041       /* Only support 16 and 32bit. */
1042       assert(bit_size == 16 || bit_size == 32);
1043 
1044       LLVMValueRef data = src[0];
1045       /* Work around for pre-GFX9 GPU which don't have fp16 pknorm instruction. */
1046       if (bit_size == 16 && ctx->ac.gfx_level < GFX9) {
1047          data = LLVMBuildFPExt(ctx->ac.builder, data, ctx->ac.v2f32, "");
1048          bit_size = 32;
1049       }
1050 
1051       LLVMValueRef (*pack)(struct ac_llvm_context *ctx, LLVMValueRef args[2]);
1052       if (bit_size == 32) {
1053          pack = instr->op == nir_op_pack_snorm_2x16 ?
1054             ac_build_cvt_pknorm_i16 : ac_build_cvt_pknorm_u16;
1055       } else {
1056          pack = instr->op == nir_op_pack_snorm_2x16 ?
1057             ac_build_cvt_pknorm_i16_f16 : ac_build_cvt_pknorm_u16_f16;
1058       }
1059       result = emit_pack_2x16(&ctx->ac, data, pack);
1060       break;
1061    }
1062    case nir_op_pack_uint_2x16: {
1063       LLVMValueRef comp[2];
1064 
1065       comp[0] = LLVMBuildExtractElement(ctx->ac.builder, src[0], ctx->ac.i32_0, "");
1066       comp[1] = LLVMBuildExtractElement(ctx->ac.builder, src[0], ctx->ac.i32_1, "");
1067 
1068       result = ac_build_cvt_pk_u16(&ctx->ac, comp, 16, false);
1069       break;
1070    }
1071    case nir_op_pack_sint_2x16: {
1072       LLVMValueRef comp[2];
1073 
1074       comp[0] = LLVMBuildExtractElement(ctx->ac.builder, src[0], ctx->ac.i32_0, "");
1075       comp[1] = LLVMBuildExtractElement(ctx->ac.builder, src[0], ctx->ac.i32_1, "");
1076 
1077       result = ac_build_cvt_pk_i16(&ctx->ac, comp, 16, false);
1078       break;
1079    }
1080    case nir_op_unpack_half_2x16_split_x: {
1081       assert(ac_get_llvm_num_components(src[0]) == 1);
1082       LLVMValueRef tmp = emit_unpack_half_2x16(&ctx->ac, src[0]);
1083       result = LLVMBuildExtractElement(ctx->ac.builder, tmp, ctx->ac.i32_0, "");
1084       break;
1085    }
1086    case nir_op_unpack_half_2x16_split_y: {
1087       assert(ac_get_llvm_num_components(src[0]) == 1);
1088       LLVMValueRef tmp = emit_unpack_half_2x16(&ctx->ac, src[0]);
1089       result = LLVMBuildExtractElement(ctx->ac.builder, tmp, ctx->ac.i32_1, "");
1090       break;
1091    }
1092    case nir_op_unpack_64_4x16: {
1093       result = LLVMBuildBitCast(ctx->ac.builder, src[0], ctx->ac.v4i16, "");
1094       break;
1095    }
1096 
1097    case nir_op_unpack_64_2x32: {
1098       result = LLVMBuildBitCast(ctx->ac.builder, src[0],
1099             ctx->ac.v2i32, "");
1100       break;
1101    }
1102    case nir_op_unpack_64_2x32_split_x: {
1103       assert(ac_get_llvm_num_components(src[0]) == 1);
1104       LLVMValueRef tmp = LLVMBuildBitCast(ctx->ac.builder, src[0], ctx->ac.v2i32, "");
1105       result = LLVMBuildExtractElement(ctx->ac.builder, tmp, ctx->ac.i32_0, "");
1106       break;
1107    }
1108    case nir_op_unpack_64_2x32_split_y: {
1109       assert(ac_get_llvm_num_components(src[0]) == 1);
1110       LLVMValueRef tmp = LLVMBuildBitCast(ctx->ac.builder, src[0], ctx->ac.v2i32, "");
1111       result = LLVMBuildExtractElement(ctx->ac.builder, tmp, ctx->ac.i32_1, "");
1112       break;
1113    }
1114 
1115    case nir_op_pack_64_2x32_split: {
1116       LLVMValueRef tmp = ac_build_gather_values(&ctx->ac, src, 2);
1117       result = LLVMBuildBitCast(ctx->ac.builder, tmp, ctx->ac.i64, "");
1118       break;
1119    }
1120 
1121    case nir_op_pack_32_4x8: {
1122       result = LLVMBuildBitCast(ctx->ac.builder, src[0],
1123             ctx->ac.i32, "");
1124       break;
1125    }
1126    case nir_op_pack_32_2x16_split: {
1127       LLVMValueRef tmp = ac_build_gather_values(&ctx->ac, src, 2);
1128       result = LLVMBuildBitCast(ctx->ac.builder, tmp, ctx->ac.i32, "");
1129       break;
1130    }
1131 
1132    case nir_op_unpack_32_4x8:
1133       result = LLVMBuildBitCast(ctx->ac.builder, src[0], ctx->ac.v4i8, "");
1134       break;
1135    case nir_op_unpack_32_2x16: {
1136       result = LLVMBuildBitCast(ctx->ac.builder, src[0],
1137             ctx->ac.v2i16, "");
1138       break;
1139    }
1140    case nir_op_unpack_32_2x16_split_x: {
1141       LLVMValueRef tmp = LLVMBuildBitCast(ctx->ac.builder, src[0], ctx->ac.v2i16, "");
1142       result = LLVMBuildExtractElement(ctx->ac.builder, tmp, ctx->ac.i32_0, "");
1143       break;
1144    }
1145    case nir_op_unpack_32_2x16_split_y: {
1146       LLVMValueRef tmp = LLVMBuildBitCast(ctx->ac.builder, src[0], ctx->ac.v2i16, "");
1147       result = LLVMBuildExtractElement(ctx->ac.builder, tmp, ctx->ac.i32_1, "");
1148       break;
1149    }
1150 
1151    case nir_op_cube_amd: {
1152       src[0] = ac_to_float(&ctx->ac, src[0]);
1153       LLVMValueRef results[4];
1154       LLVMValueRef in[3];
1155       for (unsigned chan = 0; chan < 3; chan++)
1156          in[chan] = ac_llvm_extract_elem(&ctx->ac, src[0], chan);
1157       results[0] = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.cubetc", ctx->ac.f32, in, 3, 0);
1158       results[1] = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.cubesc", ctx->ac.f32, in, 3, 0);
1159       results[2] = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.cubema", ctx->ac.f32, in, 3, 0);
1160       results[3] = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.cubeid", ctx->ac.f32, in, 3, 0);
1161       result = ac_build_gather_values(&ctx->ac, results, 4);
1162       break;
1163    }
1164 
1165    case nir_op_extract_u8:
1166    case nir_op_extract_i8:
1167    case nir_op_extract_u16:
1168    case nir_op_extract_i16: {
1169       bool is_signed = instr->op == nir_op_extract_i16 || instr->op == nir_op_extract_i8;
1170       unsigned size = instr->op == nir_op_extract_u8 || instr->op == nir_op_extract_i8 ? 8 : 16;
1171       LLVMValueRef offset = LLVMConstInt(LLVMTypeOf(src[0]), nir_src_as_uint(instr->src[1].src) * size, false);
1172       result = LLVMBuildLShr(ctx->ac.builder, src[0], offset, "");
1173       result = LLVMBuildTrunc(ctx->ac.builder, result, LLVMIntTypeInContext(ctx->ac.context, size), "");
1174       if (is_signed)
1175          result = LLVMBuildSExt(ctx->ac.builder, result, LLVMTypeOf(src[0]), "");
1176       else
1177          result = LLVMBuildZExt(ctx->ac.builder, result, LLVMTypeOf(src[0]), "");
1178       break;
1179    }
1180 
1181    case nir_op_insert_u8:
1182    case nir_op_insert_u16: {
1183       unsigned size = instr->op == nir_op_insert_u8 ? 8 : 16;
1184       LLVMValueRef offset = LLVMConstInt(LLVMTypeOf(src[0]), nir_src_as_uint(instr->src[1].src) * size, false);
1185       LLVMValueRef mask = LLVMConstInt(LLVMTypeOf(src[0]), u_bit_consecutive(0, size), false);
1186       result = LLVMBuildShl(ctx->ac.builder, LLVMBuildAnd(ctx->ac.builder, src[0], mask, ""), offset, "");
1187       break;
1188    }
1189 
1190    case nir_op_sdot_4x8_iadd:
1191    case nir_op_sdot_4x8_iadd_sat: {
1192       if (ctx->ac.gfx_level >= GFX11) {
1193          result = ac_build_sudot_4x8(&ctx->ac, src[0], src[1], src[2],
1194                                      instr->op == nir_op_sdot_4x8_iadd_sat, 0x3);
1195       } else {
1196          const char *name = "llvm.amdgcn.sdot4";
1197          src[3] = LLVMConstInt(ctx->ac.i1, instr->op == nir_op_sdot_4x8_iadd_sat, false);
1198          result = ac_build_intrinsic(&ctx->ac, name, def_type, src, 4, 0);
1199       }
1200       break;
1201    }
1202    case nir_op_sudot_4x8_iadd:
1203    case nir_op_sudot_4x8_iadd_sat: {
1204       result = ac_build_sudot_4x8(&ctx->ac, src[0], src[1], src[2],
1205                                   instr->op == nir_op_sudot_4x8_iadd_sat, 0x1);
1206       break;
1207    }
1208    case nir_op_udot_4x8_uadd:
1209    case nir_op_udot_4x8_uadd_sat: {
1210       const char *name = "llvm.amdgcn.udot4";
1211       src[3] = LLVMConstInt(ctx->ac.i1, instr->op == nir_op_udot_4x8_uadd_sat, false);
1212       result = ac_build_intrinsic(&ctx->ac, name, def_type, src, 4, 0);
1213       break;
1214    }
1215 
1216    case nir_op_sdot_2x16_iadd:
1217    case nir_op_udot_2x16_uadd:
1218    case nir_op_sdot_2x16_iadd_sat:
1219    case nir_op_udot_2x16_uadd_sat: {
1220       const char *name = instr->op == nir_op_sdot_2x16_iadd ||
1221                          instr->op == nir_op_sdot_2x16_iadd_sat
1222                          ? "llvm.amdgcn.sdot2" : "llvm.amdgcn.udot2";
1223       src[0] = LLVMBuildBitCast(ctx->ac.builder, src[0], ctx->ac.v2i16, "");
1224       src[1] = LLVMBuildBitCast(ctx->ac.builder, src[1], ctx->ac.v2i16, "");
1225       src[3] = LLVMConstInt(ctx->ac.i1, instr->op == nir_op_sdot_2x16_iadd_sat ||
1226                                         instr->op == nir_op_udot_2x16_uadd_sat, false);
1227       result = ac_build_intrinsic(&ctx->ac, name, def_type, src, 4, 0);
1228       break;
1229    }
1230 
1231    case nir_op_msad_4x8:
1232       result = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.msad.u8", ctx->ac.i32,
1233                                   (LLVMValueRef[]){src[1], src[0], src[2]}, 3, 0);
1234       break;
1235 
1236    case nir_op_mqsad_4x8:
1237       src[1] = LLVMBuildBitCast(ctx->ac.builder, src[1], ctx->ac.i64, "");
1238       result = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.mqsad.u32.u8", ctx->ac.v4i32,
1239                                   (LLVMValueRef[]){src[1], src[0], src[2]}, 3, 0);
1240       break;
1241 
1242    case nir_op_shfr:
1243       result = ac_build_intrinsic(&ctx->ac, "llvm.fshr.i32", ctx->ac.i32,
1244                                   (LLVMValueRef[]){src[0], src[1], src[2]}, 3, 0);
1245       break;
1246 
1247    default:
1248       fprintf(stderr, "Unknown NIR alu instr: ");
1249       nir_print_instr(&instr->instr, stderr);
1250       fprintf(stderr, "\n");
1251       return false;
1252    }
1253 
1254    if (result) {
1255       LLVMTypeKind type_kind = LLVMGetTypeKind(LLVMTypeOf(result));
1256       bool is_float = type_kind == LLVMHalfTypeKind || type_kind == LLVMFloatTypeKind || type_kind == LLVMDoubleTypeKind;
1257       if (ctx->ac.float_mode == AC_FLOAT_MODE_DENORM_FLUSH_TO_ZERO && is_float)
1258          result = ac_build_canonicalize(&ctx->ac, result, instr->def.bit_size);
1259 
1260       result = ac_to_integer_or_pointer(&ctx->ac, result);
1261       ctx->ssa_defs[instr->def.index] = result;
1262    }
1263    return true;
1264 }
1265 
visit_load_const(struct ac_nir_context * ctx,const nir_load_const_instr * instr)1266 static bool visit_load_const(struct ac_nir_context *ctx, const nir_load_const_instr *instr)
1267 {
1268    LLVMValueRef values[16], value = NULL;
1269    LLVMTypeRef element_type = LLVMIntTypeInContext(ctx->ac.context, instr->def.bit_size);
1270 
1271    for (unsigned i = 0; i < instr->def.num_components; ++i) {
1272       switch (instr->def.bit_size) {
1273       case 1:
1274          values[i] = LLVMConstInt(element_type, instr->value[i].b, false);
1275          break;
1276       case 8:
1277          values[i] = LLVMConstInt(element_type, instr->value[i].u8, false);
1278          break;
1279       case 16:
1280          values[i] = LLVMConstInt(element_type, instr->value[i].u16, false);
1281          break;
1282       case 32:
1283          values[i] = LLVMConstInt(element_type, instr->value[i].u32, false);
1284          break;
1285       case 64:
1286          values[i] = LLVMConstInt(element_type, instr->value[i].u64, false);
1287          break;
1288       default:
1289          fprintf(stderr, "unsupported nir load_const bit_size: %d\n", instr->def.bit_size);
1290          return false;
1291       }
1292    }
1293    if (instr->def.num_components > 1) {
1294       value = LLVMConstVector(values, instr->def.num_components);
1295    } else
1296       value = values[0];
1297 
1298    ctx->ssa_defs[instr->def.index] = value;
1299    return true;
1300 }
1301 
1302 /* Gather4 should follow the same rules as bilinear filtering, but the hardware
1303  * incorrectly forces nearest filtering if the texture format is integer.
1304  * The only effect it has on Gather4, which always returns 4 texels for
1305  * bilinear filtering, is that the final coordinates are off by 0.5 of
1306  * the texel size.
1307  *
1308  * The workaround is to subtract 0.5 from the unnormalized coordinates,
1309  * or (0.5 / size) from the normalized coordinates.
1310  *
1311  * However, cube textures with 8_8_8_8 data formats require a different
1312  * workaround of overriding the num format to USCALED/SSCALED. This would lose
1313  * precision in 32-bit data formats, so it needs to be applied dynamically at
1314  * runtime. In this case, return an i1 value that indicates whether the
1315  * descriptor was overridden (and hence a fixup of the sampler result is needed).
1316  */
lower_gather4_integer(struct ac_llvm_context * ctx,struct ac_image_args * args,const nir_tex_instr * instr)1317 static LLVMValueRef lower_gather4_integer(struct ac_llvm_context *ctx, struct ac_image_args *args,
1318                                           const nir_tex_instr *instr)
1319 {
1320    nir_alu_type stype = nir_alu_type_get_base_type(instr->dest_type);
1321    LLVMValueRef wa_8888 = NULL;
1322    LLVMValueRef half_texel[2];
1323    LLVMValueRef result;
1324 
1325    assert(stype == nir_type_int || stype == nir_type_uint);
1326 
1327    if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
1328       LLVMValueRef formats;
1329       LLVMValueRef data_format;
1330       LLVMValueRef wa_formats;
1331 
1332       formats = LLVMBuildExtractElement(ctx->builder, args->resource, ctx->i32_1, "");
1333 
1334       data_format = LLVMBuildLShr(ctx->builder, formats, LLVMConstInt(ctx->i32, 20, false), "");
1335       data_format =
1336          LLVMBuildAnd(ctx->builder, data_format, LLVMConstInt(ctx->i32, (1u << 6) - 1, false), "");
1337       wa_8888 = LLVMBuildICmp(ctx->builder, LLVMIntEQ, data_format,
1338                               LLVMConstInt(ctx->i32, V_008F14_IMG_DATA_FORMAT_8_8_8_8, false), "");
1339 
1340       uint32_t wa_num_format = stype == nir_type_uint
1341                                   ? S_008F14_NUM_FORMAT(V_008F14_IMG_NUM_FORMAT_USCALED)
1342                                   : S_008F14_NUM_FORMAT(V_008F14_IMG_NUM_FORMAT_SSCALED);
1343       wa_formats = LLVMBuildAnd(ctx->builder, formats,
1344                                 LLVMConstInt(ctx->i32, C_008F14_NUM_FORMAT, false), "");
1345       wa_formats =
1346          LLVMBuildOr(ctx->builder, wa_formats, LLVMConstInt(ctx->i32, wa_num_format, false), "");
1347 
1348       formats = LLVMBuildSelect(ctx->builder, wa_8888, wa_formats, formats, "");
1349       args->resource =
1350          LLVMBuildInsertElement(ctx->builder, args->resource, formats, ctx->i32_1, "");
1351    }
1352 
1353    if (instr->sampler_dim == GLSL_SAMPLER_DIM_RECT) {
1354       assert(!wa_8888);
1355       half_texel[0] = half_texel[1] = LLVMConstReal(ctx->f32, -0.5);
1356    } else {
1357       struct ac_image_args resinfo = {0};
1358       LLVMBasicBlockRef bbs[2];
1359 
1360       LLVMValueRef unnorm = NULL;
1361       LLVMValueRef default_offset = ctx->f32_0;
1362       if (instr->sampler_dim == GLSL_SAMPLER_DIM_2D && !instr->is_array) {
1363          /* In vulkan, whether the sampler uses unnormalized
1364           * coordinates or not is a dynamic property of the
1365           * sampler. Hence, to figure out whether or not we
1366           * need to divide by the texture size, we need to test
1367           * the sampler at runtime. This tests the bit set by
1368           * radv_init_sampler().
1369           */
1370          LLVMValueRef sampler0 =
1371             LLVMBuildExtractElement(ctx->builder, args->sampler, ctx->i32_0, "");
1372          sampler0 = LLVMBuildLShr(ctx->builder, sampler0, LLVMConstInt(ctx->i32, 15, false), "");
1373          sampler0 = LLVMBuildAnd(ctx->builder, sampler0, ctx->i32_1, "");
1374          unnorm = LLVMBuildICmp(ctx->builder, LLVMIntEQ, sampler0, ctx->i32_1, "");
1375          default_offset = LLVMConstReal(ctx->f32, -0.5);
1376       }
1377 
1378       bbs[0] = LLVMGetInsertBlock(ctx->builder);
1379       if (wa_8888 || unnorm) {
1380          assert(!(wa_8888 && unnorm));
1381          LLVMValueRef not_needed = wa_8888 ? wa_8888 : unnorm;
1382          /* Skip the texture size query entirely if we don't need it. */
1383          ac_build_ifcc(ctx, LLVMBuildNot(ctx->builder, not_needed, ""), 2000);
1384          bbs[1] = LLVMGetInsertBlock(ctx->builder);
1385       }
1386 
1387       /* Query the texture size. */
1388       resinfo.dim = ac_get_sampler_dim(ctx->gfx_level, instr->sampler_dim, instr->is_array);
1389       resinfo.opcode = ac_image_get_resinfo;
1390       resinfo.dmask = 0xf;
1391       resinfo.lod = ctx->i32_0;
1392       resinfo.resource = args->resource;
1393       resinfo.attributes = AC_ATTR_INVARIANT_LOAD;
1394       LLVMValueRef size = ac_build_image_opcode(ctx, &resinfo);
1395 
1396       /* Compute -0.5 / size. */
1397       for (unsigned c = 0; c < 2; c++) {
1398          half_texel[c] =
1399             LLVMBuildExtractElement(ctx->builder, size, LLVMConstInt(ctx->i32, c, 0), "");
1400          half_texel[c] = LLVMBuildUIToFP(ctx->builder, half_texel[c], ctx->f32, "");
1401          half_texel[c] = ac_build_fdiv(ctx, ctx->f32_1, half_texel[c]);
1402          half_texel[c] =
1403             LLVMBuildFMul(ctx->builder, half_texel[c], LLVMConstReal(ctx->f32, -0.5), "");
1404       }
1405 
1406       if (wa_8888 || unnorm) {
1407          ac_build_endif(ctx, 2000);
1408 
1409          for (unsigned c = 0; c < 2; c++) {
1410             LLVMValueRef values[2] = {default_offset, half_texel[c]};
1411             half_texel[c] = ac_build_phi(ctx, ctx->f32, 2, values, bbs);
1412          }
1413       }
1414    }
1415 
1416    for (unsigned c = 0; c < 2; c++) {
1417       LLVMValueRef tmp;
1418       tmp = LLVMBuildBitCast(ctx->builder, args->coords[c], ctx->f32, "");
1419       args->coords[c] = LLVMBuildFAdd(ctx->builder, tmp, half_texel[c], "");
1420    }
1421 
1422    args->attributes = AC_ATTR_INVARIANT_LOAD;
1423    result = ac_build_image_opcode(ctx, args);
1424 
1425    if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
1426       LLVMValueRef tmp, tmp2;
1427 
1428       /* if the cube workaround is in place, f2i the result. */
1429       for (unsigned c = 0; c < 4; c++) {
1430          tmp = LLVMBuildExtractElement(ctx->builder, result, LLVMConstInt(ctx->i32, c, false), "");
1431          if (stype == nir_type_uint)
1432             tmp2 = LLVMBuildFPToUI(ctx->builder, tmp, ctx->i32, "");
1433          else
1434             tmp2 = LLVMBuildFPToSI(ctx->builder, tmp, ctx->i32, "");
1435          tmp = LLVMBuildBitCast(ctx->builder, tmp, ctx->i32, "");
1436          tmp2 = LLVMBuildBitCast(ctx->builder, tmp2, ctx->i32, "");
1437          tmp = LLVMBuildSelect(ctx->builder, wa_8888, tmp2, tmp, "");
1438          tmp = LLVMBuildBitCast(ctx->builder, tmp, ctx->f32, "");
1439          result =
1440             LLVMBuildInsertElement(ctx->builder, result, tmp, LLVMConstInt(ctx->i32, c, false), "");
1441       }
1442    }
1443    return result;
1444 }
1445 
build_tex_intrinsic(struct ac_nir_context * ctx,const nir_tex_instr * instr,struct ac_image_args * args)1446 static LLVMValueRef build_tex_intrinsic(struct ac_nir_context *ctx, const nir_tex_instr *instr,
1447                                         struct ac_image_args *args)
1448 {
1449    assert((!args->tfe || !args->d16) && "unsupported");
1450 
1451    if (instr->sampler_dim == GLSL_SAMPLER_DIM_BUF) {
1452       unsigned mask = nir_def_components_read(&instr->def);
1453 
1454       /* Buffers don't support A16. */
1455       if (args->a16)
1456          args->coords[0] = LLVMBuildZExt(ctx->ac.builder, args->coords[0], ctx->ac.i32, "");
1457 
1458       return ac_build_buffer_load_format(&ctx->ac, args->resource, args->coords[0], ctx->ac.i32_0,
1459                                          util_last_bit(mask), 0, true,
1460                                          instr->def.bit_size == 16,
1461                                          args->tfe);
1462    }
1463 
1464    args->opcode = ac_image_sample;
1465 
1466    switch (instr->op) {
1467    case nir_texop_txf:
1468    case nir_texop_txf_ms:
1469       args->opcode = args->level_zero || instr->sampler_dim == GLSL_SAMPLER_DIM_MS
1470                         ? ac_image_load
1471                         : ac_image_load_mip;
1472       args->level_zero = false;
1473       break;
1474    case nir_texop_txs:
1475    case nir_texop_query_levels:
1476    case nir_texop_texture_samples:
1477       assert(!"should have been lowered");
1478       break;
1479    case nir_texop_tex:
1480       if (ctx->stage != MESA_SHADER_FRAGMENT &&
1481           (!gl_shader_stage_is_compute(ctx->stage) ||
1482            ctx->info->derivative_group == DERIVATIVE_GROUP_NONE)) {
1483          assert(!args->lod);
1484          args->level_zero = true;
1485       }
1486       break;
1487    case nir_texop_tg4:
1488       args->opcode = ac_image_gather4;
1489       if (!args->lod && !instr->is_gather_implicit_lod)
1490          args->level_zero = true;
1491       /* GFX11 supports implicit LOD, but the extension is unsupported. */
1492       assert(args->level_zero || ctx->ac.gfx_level < GFX11);
1493       break;
1494    case nir_texop_lod:
1495       args->opcode = ac_image_get_lod;
1496       break;
1497    case nir_texop_fragment_fetch_amd:
1498    case nir_texop_fragment_mask_fetch_amd:
1499       args->opcode = ac_image_load;
1500       args->level_zero = false;
1501       break;
1502    default:
1503       break;
1504    }
1505 
1506    /* MI200 doesn't have image_sample_lz, but image_sample behaves like lz. */
1507    if (!ctx->ac.info->has_3d_cube_border_color_mipmap)
1508       args->level_zero = false;
1509 
1510    if (instr->op == nir_texop_tg4 && ctx->ac.gfx_level <= GFX8 &&
1511        (instr->dest_type & (nir_type_int | nir_type_uint))) {
1512       return lower_gather4_integer(&ctx->ac, args, instr);
1513    }
1514 
1515    args->attributes = AC_ATTR_INVARIANT_LOAD;
1516    bool cs_derivs =
1517       gl_shader_stage_is_compute(ctx->stage) && ctx->info->derivative_group != DERIVATIVE_GROUP_NONE;
1518    if (ctx->stage == MESA_SHADER_FRAGMENT || cs_derivs) {
1519       /* Prevent texture instructions with implicit derivatives from being
1520        * sinked into branches. */
1521       switch (instr->op) {
1522       case nir_texop_tex:
1523       case nir_texop_txb:
1524       case nir_texop_lod:
1525          args->attributes |= AC_ATTR_CONVERGENT;
1526          break;
1527       default:
1528          break;
1529       }
1530    }
1531 
1532    return ac_build_image_opcode(&ctx->ac, args);
1533 }
1534 
visit_get_ssbo_size(struct ac_nir_context * ctx,const nir_intrinsic_instr * instr)1535 static LLVMValueRef visit_get_ssbo_size(struct ac_nir_context *ctx,
1536                                         const nir_intrinsic_instr *instr)
1537 {
1538    bool non_uniform = nir_intrinsic_access(instr) & ACCESS_NON_UNIFORM;
1539 
1540    LLVMValueRef rsrc = get_src(ctx, instr->src[0]);
1541    if (ctx->abi->load_ssbo)
1542       rsrc = ctx->abi->load_ssbo(ctx->abi, rsrc, false, non_uniform);
1543 
1544    return LLVMBuildExtractElement(ctx->ac.builder, rsrc, LLVMConstInt(ctx->ac.i32, 2, false), "");
1545 }
1546 
extract_vector_range(struct ac_llvm_context * ctx,LLVMValueRef src,unsigned start,unsigned count)1547 static LLVMValueRef extract_vector_range(struct ac_llvm_context *ctx, LLVMValueRef src,
1548                                          unsigned start, unsigned count)
1549 {
1550    LLVMValueRef mask[] = {ctx->i32_0, ctx->i32_1, LLVMConstInt(ctx->i32, 2, false),
1551                           LLVMConstInt(ctx->i32, 3, false)};
1552 
1553    unsigned src_elements = ac_get_llvm_num_components(src);
1554 
1555    if (count == src_elements) {
1556       assert(start == 0);
1557       return src;
1558    } else if (count == 1) {
1559       assert(start < src_elements);
1560       return LLVMBuildExtractElement(ctx->builder, src, mask[start], "");
1561    } else {
1562       assert(start + count <= src_elements);
1563       assert(count <= 4);
1564       LLVMValueRef swizzle = LLVMConstVector(&mask[start], count);
1565       return LLVMBuildShuffleVector(ctx->builder, src, src, swizzle, "");
1566    }
1567 }
1568 
enter_waterfall_ssbo(struct ac_nir_context * ctx,struct waterfall_context * wctx,const nir_intrinsic_instr * instr,nir_src src)1569 static LLVMValueRef enter_waterfall_ssbo(struct ac_nir_context *ctx, struct waterfall_context *wctx,
1570                                          const nir_intrinsic_instr *instr, nir_src src)
1571 {
1572    return enter_waterfall(ctx, wctx, get_src(ctx, src),
1573                           nir_intrinsic_access(instr) & ACCESS_NON_UNIFORM);
1574 }
1575 
visit_store_ssbo(struct ac_nir_context * ctx,nir_intrinsic_instr * instr)1576 static void visit_store_ssbo(struct ac_nir_context *ctx, nir_intrinsic_instr *instr)
1577 {
1578    LLVMValueRef src_data = get_src(ctx, instr->src[0]);
1579    int elem_size_bytes = ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src_data)) / 8;
1580    unsigned writemask = nir_intrinsic_write_mask(instr);
1581    enum gl_access_qualifier access = ac_get_mem_access_flags(instr);
1582 
1583    struct waterfall_context wctx;
1584    LLVMValueRef rsrc_base = enter_waterfall_ssbo(ctx, &wctx, instr, instr->src[1]);
1585 
1586    LLVMValueRef rsrc = ctx->abi->load_ssbo ?
1587       ctx->abi->load_ssbo(ctx->abi, rsrc_base, true, false) : rsrc_base;
1588 
1589    LLVMValueRef base_data = src_data;
1590    base_data = ac_trim_vector(&ctx->ac, base_data, instr->num_components);
1591    LLVMValueRef base_offset = get_src(ctx, instr->src[2]);
1592 
1593    while (writemask) {
1594       int start, count;
1595       LLVMValueRef data, offset;
1596       LLVMTypeRef data_type;
1597 
1598       u_bit_scan_consecutive_range(&writemask, &start, &count);
1599 
1600       if (count == 3 && elem_size_bytes != 4) {
1601          writemask |= 1 << (start + 2);
1602          count = 2;
1603       }
1604       int num_bytes = count * elem_size_bytes; /* count in bytes */
1605 
1606       /* we can only store 4 DWords at the same time.
1607        * can only happen for 64 Bit vectors. */
1608       if (num_bytes > 16) {
1609          writemask |= ((1u << (count - 2)) - 1u) << (start + 2);
1610          count = 2;
1611          num_bytes = 16;
1612       }
1613 
1614       /* check alignment of 16 Bit stores */
1615       if (elem_size_bytes == 2 && num_bytes > 2 && (start % 2) == 1) {
1616          writemask |= ((1u << (count - 1)) - 1u) << (start + 1);
1617          count = 1;
1618          num_bytes = 2;
1619       }
1620 
1621       /* Due to alignment issues, split stores of 8-bit/16-bit
1622        * vectors.
1623        */
1624       if (ctx->ac.gfx_level == GFX6 && count > 1 && elem_size_bytes < 4) {
1625          writemask |= ((1u << (count - 1)) - 1u) << (start + 1);
1626          count = 1;
1627          num_bytes = elem_size_bytes;
1628       }
1629 
1630       data = extract_vector_range(&ctx->ac, base_data, start, count);
1631 
1632       offset = LLVMBuildAdd(ctx->ac.builder, base_offset,
1633                             LLVMConstInt(ctx->ac.i32, start * elem_size_bytes, false), "");
1634 
1635       if (num_bytes == 1) {
1636          ac_build_buffer_store_byte(&ctx->ac, rsrc, data, offset, ctx->ac.i32_0, access);
1637       } else if (num_bytes == 2) {
1638          ac_build_buffer_store_short(&ctx->ac, rsrc, data, offset, ctx->ac.i32_0, access);
1639       } else {
1640          switch (num_bytes) {
1641          case 16: /* v4f32 */
1642             data_type = ctx->ac.v4f32;
1643             break;
1644          case 12: /* v3f32 */
1645             data_type = ctx->ac.v3f32;
1646             break;
1647          case 8: /* v2f32 */
1648             data_type = ctx->ac.v2f32;
1649             break;
1650          case 4: /* f32 */
1651             data_type = ctx->ac.f32;
1652             break;
1653          default:
1654             unreachable("Malformed vector store.");
1655          }
1656          data = LLVMBuildBitCast(ctx->ac.builder, data, data_type, "");
1657 
1658          ac_build_buffer_store_dword(&ctx->ac, rsrc, data, NULL, offset,
1659                                      ctx->ac.i32_0, access);
1660       }
1661    }
1662 
1663    exit_waterfall(ctx, &wctx, NULL);
1664 }
1665 
emit_ssbo_comp_swap_64(struct ac_nir_context * ctx,LLVMValueRef descriptor,LLVMValueRef offset,LLVMValueRef compare,LLVMValueRef exchange,bool image)1666 static LLVMValueRef emit_ssbo_comp_swap_64(struct ac_nir_context *ctx, LLVMValueRef descriptor,
1667                                            LLVMValueRef offset, LLVMValueRef compare,
1668                                            LLVMValueRef exchange, bool image)
1669 {
1670    LLVMBasicBlockRef start_block = NULL, then_block = NULL;
1671    if (ctx->abi->robust_buffer_access || image) {
1672       LLVMValueRef size = ac_llvm_extract_elem(&ctx->ac, descriptor, 2);
1673 
1674       LLVMValueRef cond = LLVMBuildICmp(ctx->ac.builder, LLVMIntULT, offset, size, "");
1675       start_block = LLVMGetInsertBlock(ctx->ac.builder);
1676 
1677       ac_build_ifcc(&ctx->ac, cond, -1);
1678 
1679       then_block = LLVMGetInsertBlock(ctx->ac.builder);
1680    }
1681 
1682    if (image)
1683       offset = LLVMBuildMul(ctx->ac.builder, offset, LLVMConstInt(ctx->ac.i32, 8, false), "");
1684 
1685    LLVMValueRef ptr_parts[2] = {
1686       ac_llvm_extract_elem(&ctx->ac, descriptor, 0),
1687       LLVMBuildAnd(ctx->ac.builder, ac_llvm_extract_elem(&ctx->ac, descriptor, 1),
1688                    LLVMConstInt(ctx->ac.i32, 65535, 0), "")};
1689 
1690    ptr_parts[1] = LLVMBuildTrunc(ctx->ac.builder, ptr_parts[1], ctx->ac.i16, "");
1691    ptr_parts[1] = LLVMBuildSExt(ctx->ac.builder, ptr_parts[1], ctx->ac.i32, "");
1692 
1693    offset = LLVMBuildZExt(ctx->ac.builder, offset, ctx->ac.i64, "");
1694 
1695    LLVMValueRef ptr = ac_build_gather_values(&ctx->ac, ptr_parts, 2);
1696    ptr = LLVMBuildBitCast(ctx->ac.builder, ptr, ctx->ac.i64, "");
1697    ptr = LLVMBuildAdd(ctx->ac.builder, ptr, offset, "");
1698    ptr = LLVMBuildIntToPtr(ctx->ac.builder, ptr, LLVMPointerType(ctx->ac.i64, AC_ADDR_SPACE_GLOBAL),
1699                            "");
1700 
1701    LLVMValueRef result =
1702       ac_build_atomic_cmp_xchg(&ctx->ac, ptr, compare, exchange, "singlethread-one-as");
1703    result = LLVMBuildExtractValue(ctx->ac.builder, result, 0, "");
1704 
1705    if (ctx->abi->robust_buffer_access || image) {
1706       ac_build_endif(&ctx->ac, -1);
1707 
1708       LLVMBasicBlockRef incoming_blocks[2] = {
1709          start_block,
1710          then_block,
1711       };
1712 
1713       LLVMValueRef incoming_values[2] = {
1714          ctx->ac.i64_0,
1715          result,
1716       };
1717       LLVMValueRef ret = LLVMBuildPhi(ctx->ac.builder, ctx->ac.i64, "");
1718       LLVMAddIncoming(ret, incoming_values, incoming_blocks, 2);
1719       return ret;
1720    } else {
1721       return result;
1722    }
1723 }
1724 
1725 static const char *
translate_atomic_op_str(nir_atomic_op op)1726 translate_atomic_op_str(nir_atomic_op op)
1727 {
1728    switch (op) {
1729    case nir_atomic_op_iadd:     return "add";
1730    case nir_atomic_op_imin:     return "smin";
1731    case nir_atomic_op_umin:     return "umin";
1732    case nir_atomic_op_imax:     return "smax";
1733    case nir_atomic_op_umax:     return "umax";
1734    case nir_atomic_op_iand:     return "and";
1735    case nir_atomic_op_ior:      return "or";
1736    case nir_atomic_op_ixor:     return "xor";
1737    case nir_atomic_op_fadd:     return "fadd";
1738    case nir_atomic_op_fmin:     return "fmin";
1739    case nir_atomic_op_fmax:     return "fmax";
1740    case nir_atomic_op_xchg:     return "swap";
1741    case nir_atomic_op_cmpxchg:  return "cmpswap";
1742    case nir_atomic_op_inc_wrap: return "inc";
1743    case nir_atomic_op_dec_wrap: return "dec";
1744    case nir_atomic_op_ordered_add_gfx12_amd: return "ordered.add";
1745    default: abort();
1746    }
1747 }
1748 
1749 static LLVMAtomicRMWBinOp
translate_atomic_op(nir_atomic_op op)1750 translate_atomic_op(nir_atomic_op op)
1751 {
1752    switch (op) {
1753    case nir_atomic_op_iadd: return LLVMAtomicRMWBinOpAdd;
1754    case nir_atomic_op_xchg: return LLVMAtomicRMWBinOpXchg;
1755    case nir_atomic_op_iand: return LLVMAtomicRMWBinOpAnd;
1756    case nir_atomic_op_ior:  return LLVMAtomicRMWBinOpOr;
1757    case nir_atomic_op_ixor: return LLVMAtomicRMWBinOpXor;
1758    case nir_atomic_op_umin: return LLVMAtomicRMWBinOpUMin;
1759    case nir_atomic_op_umax: return LLVMAtomicRMWBinOpUMax;
1760    case nir_atomic_op_imin: return LLVMAtomicRMWBinOpMin;
1761    case nir_atomic_op_imax: return LLVMAtomicRMWBinOpMax;
1762    case nir_atomic_op_fadd: return LLVMAtomicRMWBinOpFAdd;
1763    default: unreachable("Unexpected atomic");
1764    }
1765 }
1766 
visit_atomic_ssbo(struct ac_nir_context * ctx,nir_intrinsic_instr * instr)1767 static LLVMValueRef visit_atomic_ssbo(struct ac_nir_context *ctx, nir_intrinsic_instr *instr)
1768 {
1769    nir_atomic_op nir_op = nir_intrinsic_atomic_op(instr);
1770    const char *op = translate_atomic_op_str(nir_op);
1771    bool is_float = nir_atomic_op_type(nir_op) == nir_type_float;
1772 
1773    LLVMTypeRef return_type = LLVMTypeOf(get_src(ctx, instr->src[2]));
1774    char name[64], type[8];
1775    LLVMValueRef params[6], descriptor;
1776    LLVMValueRef result;
1777    int arg_count = 0;
1778 
1779    struct waterfall_context wctx;
1780    LLVMValueRef rsrc_base = enter_waterfall_ssbo(ctx, &wctx, instr, instr->src[0]);
1781 
1782    descriptor = ctx->abi->load_ssbo ?
1783       ctx->abi->load_ssbo(ctx->abi, rsrc_base, true, false) : rsrc_base;
1784 
1785    if (instr->intrinsic == nir_intrinsic_ssbo_atomic_swap && return_type == ctx->ac.i64) {
1786       result = emit_ssbo_comp_swap_64(ctx, descriptor, get_src(ctx, instr->src[1]),
1787                                       get_src(ctx, instr->src[2]), get_src(ctx, instr->src[3]), false);
1788    } else {
1789       LLVMValueRef data = ac_llvm_extract_elem(&ctx->ac, get_src(ctx, instr->src[2]), 0);
1790 
1791       if (instr->intrinsic == nir_intrinsic_ssbo_atomic_swap) {
1792          params[arg_count++] = ac_llvm_extract_elem(&ctx->ac, get_src(ctx, instr->src[3]), 0);
1793       }
1794       if (is_float) {
1795          data = ac_to_float(&ctx->ac, data);
1796          return_type = LLVMTypeOf(data);
1797       }
1798 
1799       unsigned cache_flags =
1800          ac_get_hw_cache_flags(ctx->ac.gfx_level,
1801 			       ac_get_mem_access_flags(instr) | ACCESS_TYPE_ATOMIC).value;
1802 
1803       params[arg_count++] = data;
1804       params[arg_count++] = descriptor;
1805       params[arg_count++] = get_src(ctx, instr->src[1]); /* voffset */
1806       params[arg_count++] = ctx->ac.i32_0;               /* soffset */
1807       params[arg_count++] = LLVMConstInt(ctx->ac.i32, cache_flags, 0);
1808 
1809       ac_build_type_name_for_intr(return_type, type, sizeof(type));
1810       snprintf(name, sizeof(name), "llvm.amdgcn.raw.buffer.atomic.%s.%s", op, type);
1811 
1812       result = ac_build_intrinsic(&ctx->ac, name, return_type, params, arg_count, 0);
1813 
1814       if (is_float) {
1815          result = ac_to_integer(&ctx->ac, result);
1816       }
1817    }
1818 
1819    return exit_waterfall(ctx, &wctx, result);
1820 }
1821 
visit_load_buffer(struct ac_nir_context * ctx,nir_intrinsic_instr * instr)1822 static LLVMValueRef visit_load_buffer(struct ac_nir_context *ctx, nir_intrinsic_instr *instr)
1823 {
1824    struct waterfall_context wctx;
1825    LLVMValueRef rsrc_base = enter_waterfall_ssbo(ctx, &wctx, instr, instr->src[0]);
1826 
1827    int elem_size_bytes = instr->def.bit_size / 8;
1828    int num_components = instr->num_components;
1829    enum gl_access_qualifier access = ac_get_mem_access_flags(instr);
1830 
1831    LLVMValueRef offset = get_src(ctx, instr->src[1]);
1832    LLVMValueRef rsrc = ctx->abi->load_ssbo ?
1833       ctx->abi->load_ssbo(ctx->abi, rsrc_base, false, false) : rsrc_base;
1834 
1835    LLVMTypeRef def_type = get_def_type(ctx, &instr->def);
1836    LLVMTypeRef def_elem_type = num_components > 1 ? LLVMGetElementType(def_type) : def_type;
1837 
1838    LLVMValueRef results[4];
1839    for (int i = 0; i < num_components;) {
1840       int num_elems = num_components - i;
1841       /* Multi-component subdword loads are lowered by ac_nir_lower_subdword_loads. */
1842       assert(elem_size_bytes >= 4 || num_elems == 1);
1843 
1844       if (num_elems * elem_size_bytes > 16)
1845          num_elems = 16 / elem_size_bytes;
1846       int load_bytes = num_elems * elem_size_bytes;
1847 
1848       LLVMValueRef immoffset = LLVMConstInt(ctx->ac.i32, i * elem_size_bytes, false);
1849       LLVMValueRef voffset = LLVMBuildAdd(ctx->ac.builder, offset, immoffset, "");
1850 
1851       LLVMValueRef ret;
1852 
1853       if (load_bytes == 1) {
1854          ret = ac_build_buffer_load_byte(&ctx->ac, rsrc, voffset, ctx->ac.i32_0,
1855                                           access);
1856       } else if (load_bytes == 2) {
1857          ret = ac_build_buffer_load_short(&ctx->ac, rsrc, voffset, ctx->ac.i32_0,
1858                                            access);
1859       } else {
1860          assert(elem_size_bytes >= 4);
1861          int num_channels = load_bytes / 4;
1862          bool can_speculate = access & ACCESS_CAN_REORDER;
1863 
1864          ret = ac_build_buffer_load(&ctx->ac, rsrc, num_channels, NULL, voffset, ctx->ac.i32_0,
1865                                     ctx->ac.f32, access, can_speculate, false);
1866       }
1867 
1868       LLVMTypeRef ret_type = LLVMVectorType(def_elem_type, num_elems);
1869       ret = LLVMBuildBitCast(ctx->ac.builder, ret, ret_type, "");
1870 
1871       for (unsigned j = 0; j < num_elems; j++) {
1872          results[i + j] =
1873             LLVMBuildExtractElement(ctx->ac.builder, ret, LLVMConstInt(ctx->ac.i32, j, false), "");
1874       }
1875       i += num_elems;
1876    }
1877 
1878    LLVMValueRef ret = ac_build_gather_values(&ctx->ac, results, num_components);
1879    return exit_waterfall(ctx, &wctx, ret);
1880 }
1881 
enter_waterfall_ubo(struct ac_nir_context * ctx,struct waterfall_context * wctx,const nir_intrinsic_instr * instr)1882 static LLVMValueRef enter_waterfall_ubo(struct ac_nir_context *ctx, struct waterfall_context *wctx,
1883                                         const nir_intrinsic_instr *instr)
1884 {
1885    return enter_waterfall(ctx, wctx, get_src(ctx, instr->src[0]),
1886                           nir_intrinsic_access(instr) & ACCESS_NON_UNIFORM);
1887 }
1888 
get_global_address(struct ac_nir_context * ctx,nir_intrinsic_instr * instr,LLVMTypeRef type)1889 static LLVMValueRef get_global_address(struct ac_nir_context *ctx,
1890                                        nir_intrinsic_instr *instr,
1891                                        LLVMTypeRef type)
1892 {
1893    bool is_store = instr->intrinsic == nir_intrinsic_store_global_amd;
1894    LLVMValueRef addr = get_src(ctx, instr->src[is_store ? 1 : 0]);
1895 
1896    LLVMTypeRef ptr_type = LLVMPointerType(type, AC_ADDR_SPACE_GLOBAL);
1897 
1898    uint32_t base = nir_intrinsic_base(instr);
1899    unsigned num_src = nir_intrinsic_infos[instr->intrinsic].num_srcs;
1900    LLVMValueRef offset = get_src(ctx, instr->src[num_src - 1]);
1901    offset = LLVMBuildAdd(ctx->ac.builder, offset, LLVMConstInt(ctx->ac.i32, base, false), "");
1902 
1903    LLVMTypeRef i8_ptr_type = LLVMPointerType(ctx->ac.i8, AC_ADDR_SPACE_GLOBAL);
1904    addr = LLVMBuildIntToPtr(ctx->ac.builder, addr, i8_ptr_type, "");
1905    addr = LLVMBuildGEP2(ctx->ac.builder, ctx->ac.i8, addr, &offset, 1, "");
1906    return LLVMBuildPointerCast(ctx->ac.builder, addr, ptr_type, "");
1907 }
1908 
visit_load_global(struct ac_nir_context * ctx,nir_intrinsic_instr * instr)1909 static LLVMValueRef visit_load_global(struct ac_nir_context *ctx,
1910                                       nir_intrinsic_instr *instr)
1911 {
1912    LLVMTypeRef result_type = get_def_type(ctx, &instr->def);
1913    LLVMValueRef val;
1914    LLVMValueRef addr = get_global_address(ctx, instr, result_type);
1915 
1916    val = LLVMBuildLoad2(ctx->ac.builder, result_type, addr, "");
1917 
1918    if (nir_intrinsic_access(instr) & (ACCESS_COHERENT | ACCESS_VOLATILE)) {
1919       LLVMSetOrdering(val, LLVMAtomicOrderingMonotonic);
1920       LLVMSetAlignment(val, ac_get_type_size(result_type));
1921    }
1922 
1923    return val;
1924 }
1925 
visit_store_global(struct ac_nir_context * ctx,nir_intrinsic_instr * instr)1926 static void visit_store_global(struct ac_nir_context *ctx,
1927 				     nir_intrinsic_instr *instr)
1928 {
1929    LLVMValueRef data = get_src(ctx, instr->src[0]);
1930    LLVMTypeRef type = LLVMTypeOf(data);
1931    LLVMValueRef addr = get_global_address(ctx, instr, type);
1932    LLVMValueRef val;
1933    /* nir_opt_shrink_stores should be enough to simplify the writemask. Store writemasks should
1934     * have no holes.
1935     */
1936    assert(nir_intrinsic_write_mask(instr) == BITFIELD_MASK(instr->src[0].ssa->num_components));
1937 
1938    val = LLVMBuildStore(ctx->ac.builder, data, addr);
1939 
1940    if (nir_intrinsic_access(instr) & (ACCESS_COHERENT | ACCESS_VOLATILE)) {
1941       LLVMSetOrdering(val, LLVMAtomicOrderingMonotonic);
1942       LLVMSetAlignment(val, ac_get_type_size(type));
1943    }
1944 }
1945 
visit_global_atomic(struct ac_nir_context * ctx,nir_intrinsic_instr * instr)1946 static LLVMValueRef visit_global_atomic(struct ac_nir_context *ctx,
1947 					nir_intrinsic_instr *instr)
1948 {
1949    LLVMValueRef data = get_src(ctx, instr->src[1]);
1950    LLVMAtomicRMWBinOp op;
1951    LLVMValueRef result;
1952 
1953    /* use "singlethread" sync scope to implement relaxed ordering */
1954    const char *sync_scope = "singlethread-one-as";
1955 
1956    nir_atomic_op nir_op = nir_intrinsic_atomic_op(instr);
1957    bool is_float = nir_atomic_op_type(nir_op) == nir_type_float;
1958 
1959    LLVMTypeRef data_type = LLVMTypeOf(data);
1960 
1961    assert(instr->src[1].ssa->num_components == 1);
1962    if (is_float) {
1963       switch (instr->src[1].ssa->bit_size) {
1964       case 32:
1965          data_type = ctx->ac.f32;
1966          break;
1967       case 64:
1968          data_type = ctx->ac.f64;
1969          break;
1970       default:
1971          unreachable("Unsupported float bit size");
1972       }
1973 
1974       data = LLVMBuildBitCast(ctx->ac.builder, data, data_type, "");
1975    }
1976 
1977    LLVMValueRef addr = get_global_address(ctx, instr, data_type);
1978 
1979    if (instr->intrinsic == nir_intrinsic_global_atomic_swap_amd) {
1980       LLVMValueRef data1 = get_src(ctx, instr->src[2]);
1981       result = ac_build_atomic_cmp_xchg(&ctx->ac, addr, data, data1, sync_scope);
1982       result = LLVMBuildExtractValue(ctx->ac.builder, result, 0, "");
1983    } else if (nir_op == nir_atomic_op_ordered_add_gfx12_amd) {
1984       result = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.global.atomic.ordered.add.b64", ctx->ac.i64,
1985                                   (LLVMValueRef[]){addr, data}, 2, 0);
1986    } else if (is_float) {
1987       const char *op = translate_atomic_op_str(nir_op);
1988       char name[64], type[8];
1989       LLVMValueRef params[2];
1990       int arg_count = 0;
1991 
1992       params[arg_count++] = addr;
1993       params[arg_count++] = data;
1994 
1995       ac_build_type_name_for_intr(data_type, type, sizeof(type));
1996       snprintf(name, sizeof(name), "llvm.amdgcn.global.atomic.%s.%s.p1.%s", op, type, type);
1997 
1998       result = ac_build_intrinsic(&ctx->ac, name, data_type, params, arg_count, 0);
1999    } else {
2000       op = translate_atomic_op(nir_op);
2001       result = ac_build_atomic_rmw(&ctx->ac, op, addr, ac_to_integer(&ctx->ac, data), sync_scope);
2002    }
2003 
2004    result = ac_to_integer(&ctx->ac, result);
2005 
2006    return result;
2007 }
2008 
visit_load_ubo_buffer(struct ac_nir_context * ctx,nir_intrinsic_instr * instr)2009 static LLVMValueRef visit_load_ubo_buffer(struct ac_nir_context *ctx, nir_intrinsic_instr *instr)
2010 {
2011    struct waterfall_context wctx;
2012    LLVMValueRef rsrc_base = enter_waterfall_ubo(ctx, &wctx, instr);
2013 
2014    LLVMValueRef ret;
2015    LLVMValueRef rsrc = rsrc_base;
2016    LLVMValueRef offset = get_src(ctx, instr->src[1]);
2017    int num_components = instr->num_components;
2018 
2019    assert(instr->def.bit_size >= 32 && instr->def.bit_size % 32 == 0);
2020 
2021    if (ctx->abi->load_ubo)
2022       rsrc = ctx->abi->load_ubo(ctx->abi, rsrc);
2023 
2024    /* Convert to a 32-bit load. */
2025    if (instr->def.bit_size == 64)
2026       num_components *= 2;
2027 
2028    ret = ac_build_buffer_load(&ctx->ac, rsrc, num_components, NULL, offset, NULL,
2029                               ctx->ac.f32, 0, true, true);
2030    ret = LLVMBuildBitCast(ctx->ac.builder, ret, get_def_type(ctx, &instr->def), "");
2031 
2032    return exit_waterfall(ctx, &wctx, ret);
2033 }
2034 
visit_store_output(struct ac_nir_context * ctx,nir_intrinsic_instr * instr)2035 static void visit_store_output(struct ac_nir_context *ctx, nir_intrinsic_instr *instr)
2036 {
2037    unsigned base = nir_intrinsic_base(instr);
2038    unsigned writemask = nir_intrinsic_write_mask(instr);
2039    unsigned component = nir_intrinsic_component(instr);
2040    LLVMValueRef src = ac_to_float(&ctx->ac, get_src(ctx, instr->src[0]));
2041    ASSERTED unsigned bit_size = ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src));
2042    ASSERTED nir_src offset = *nir_get_io_offset_src(instr);
2043 
2044    /* Non-monolithic PS and also LS before TCS in radeonsi use this to forward outputs to
2045     * registers.
2046     */
2047    assert(bit_size == 16 || bit_size == 32);
2048    /* No indirect indexing is allowed here. */
2049    assert(nir_src_is_const(offset) && nir_src_as_uint(offset) == 0);
2050 
2051    writemask <<= component;
2052 
2053    for (unsigned chan = 0; chan < 8; chan++) {
2054       if (!(writemask & (1 << chan)))
2055          continue;
2056 
2057       LLVMValueRef value = ac_llvm_extract_elem(&ctx->ac, src, chan - component);
2058       LLVMValueRef output_addr = ctx->abi->outputs[base * 4 + chan];
2059 
2060       if (!ctx->abi->is_16bit[base * 4 + chan] &&
2061           LLVMTypeOf(value) == ctx->ac.f16) {
2062          LLVMValueRef output, index;
2063 
2064          /* Insert the 16-bit value into the low or high bits of the 32-bit output
2065           * using read-modify-write.
2066           */
2067          index = LLVMConstInt(ctx->ac.i32, nir_intrinsic_io_semantics(instr).high_16bits, 0);
2068 
2069          output = LLVMBuildLoad2(ctx->ac.builder, ctx->ac.v2f16, output_addr, "");
2070          output = LLVMBuildInsertElement(ctx->ac.builder, output, value, index, "");
2071          value = LLVMBuildBitCast(ctx->ac.builder, output, ctx->ac.f32, "");
2072       }
2073       LLVMBuildStore(ctx->ac.builder, value, output_addr);
2074    }
2075 }
2076 
image_type_to_components_count(enum glsl_sampler_dim dim,bool array)2077 static int image_type_to_components_count(enum glsl_sampler_dim dim, bool array)
2078 {
2079    switch (dim) {
2080    case GLSL_SAMPLER_DIM_BUF:
2081       return 1;
2082    case GLSL_SAMPLER_DIM_1D:
2083       return array ? 2 : 1;
2084    case GLSL_SAMPLER_DIM_2D:
2085       return array ? 3 : 2;
2086    case GLSL_SAMPLER_DIM_MS:
2087       return array ? 4 : 3;
2088    case GLSL_SAMPLER_DIM_3D:
2089    case GLSL_SAMPLER_DIM_CUBE:
2090       return 3;
2091    case GLSL_SAMPLER_DIM_RECT:
2092    case GLSL_SAMPLER_DIM_SUBPASS:
2093       return 2;
2094    case GLSL_SAMPLER_DIM_SUBPASS_MS:
2095       return 3;
2096    default:
2097       break;
2098    }
2099    return 0;
2100 }
2101 
get_image_coords(struct ac_nir_context * ctx,const nir_intrinsic_instr * instr,LLVMValueRef dynamic_desc_index,struct ac_image_args * args,enum glsl_sampler_dim dim,bool is_array)2102 static void get_image_coords(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr,
2103                              LLVMValueRef dynamic_desc_index, struct ac_image_args *args,
2104                              enum glsl_sampler_dim dim, bool is_array)
2105 {
2106    LLVMValueRef src0 = get_src(ctx, instr->src[1]);
2107    int count;
2108    ASSERTED bool add_frag_pos =
2109       (dim == GLSL_SAMPLER_DIM_SUBPASS || dim == GLSL_SAMPLER_DIM_SUBPASS_MS);
2110    bool is_ms = (dim == GLSL_SAMPLER_DIM_MS || dim == GLSL_SAMPLER_DIM_SUBPASS_MS);
2111    bool gfx9_1d = ctx->ac.gfx_level == GFX9 && dim == GLSL_SAMPLER_DIM_1D;
2112    assert(!add_frag_pos && "Input attachments should be lowered by this point.");
2113    count = image_type_to_components_count(dim, is_array);
2114 
2115    if (count == 1 && !gfx9_1d) {
2116       if (instr->src[1].ssa->num_components)
2117          args->coords[0] = ac_llvm_extract_elem(&ctx->ac, src0, 0);
2118       else
2119          args->coords[0] = src0;
2120    } else {
2121       int chan;
2122       if (is_ms)
2123          count--;
2124       for (chan = 0; chan < count; ++chan) {
2125          args->coords[chan] = ac_llvm_extract_elem(&ctx->ac, src0, chan);
2126       }
2127 
2128       if (gfx9_1d) {
2129          if (is_array)
2130             args->coords[2] = args->coords[1];
2131          args->coords[1] = LLVMConstInt(LLVMTypeOf(args->coords[0]), 0, 0);
2132          count++;
2133       }
2134       if (ctx->ac.gfx_level == GFX9 && dim == GLSL_SAMPLER_DIM_2D && !is_array) {
2135          /* The hw can't bind a slice of a 3D image as a 2D
2136           * image, because it ignores BASE_ARRAY if the target
2137           * is 3D. The workaround is to read BASE_ARRAY and set
2138           * it as the 3rd address operand for all 2D images.
2139           */
2140          LLVMValueRef first_layer, const5, mask;
2141 
2142          const5 = LLVMConstInt(ctx->ac.i32, 5, 0);
2143          mask = LLVMConstInt(ctx->ac.i32, S_008F24_BASE_ARRAY(~0), 0);
2144          first_layer = LLVMBuildExtractElement(ctx->ac.builder, args->resource, const5, "");
2145          first_layer = LLVMBuildAnd(ctx->ac.builder, first_layer, mask, "");
2146 
2147          if (instr->intrinsic == nir_intrinsic_bindless_image_load ||
2148              instr->intrinsic == nir_intrinsic_bindless_image_sparse_load ||
2149              instr->intrinsic == nir_intrinsic_bindless_image_store) {
2150             int lod_index = instr->intrinsic == nir_intrinsic_bindless_image_store ? 4 : 3;
2151             bool has_lod = !nir_src_is_const(instr->src[lod_index]) ||
2152                            nir_src_as_uint(instr->src[lod_index]) != 0;
2153             if (has_lod) {
2154                /* If there's a lod parameter it matter if the image is 3d or 2d because
2155                 * the hw reads either the fourth or third component as lod. So detect
2156                 * 3d images and place the lod at the third component otherwise.
2157                 */
2158                LLVMValueRef const3, const28, const4, rword3, type3d, type, is_3d, lod;
2159                const3 = LLVMConstInt(ctx->ac.i32, 3, 0);
2160                const28 = LLVMConstInt(ctx->ac.i32, 28, 0);
2161                const4 = LLVMConstInt(ctx->ac.i32, 4, 0);
2162                type3d = LLVMConstInt(ctx->ac.i32, V_008F1C_SQ_RSRC_IMG_3D, 0);
2163                rword3 = LLVMBuildExtractElement(ctx->ac.builder, args->resource, const3, "");
2164                type = ac_build_bfe(&ctx->ac, rword3, const28, const4, false);
2165                is_3d = emit_int_cmp(&ctx->ac, LLVMIntEQ, type, type3d);
2166                lod = get_src(ctx, instr->src[lod_index]);
2167                first_layer = emit_bcsel(&ctx->ac, is_3d, first_layer, lod);
2168             }
2169          }
2170 
2171          args->coords[count] = LLVMBuildTrunc(ctx->ac.builder, first_layer,
2172                                               LLVMTypeOf(args->coords[0]), "");
2173          count++;
2174       }
2175 
2176       if (is_ms) {
2177          /* sample index */
2178          args->coords[count] = ac_llvm_extract_elem(&ctx->ac, get_src(ctx, instr->src[2]), 0);
2179          count++;
2180       }
2181    }
2182 }
2183 
enter_waterfall_image(struct ac_nir_context * ctx,struct waterfall_context * wctx,const nir_intrinsic_instr * instr)2184 static LLVMValueRef enter_waterfall_image(struct ac_nir_context *ctx,
2185                                           struct waterfall_context *wctx,
2186                                           const nir_intrinsic_instr *instr)
2187 {
2188    /* src0 is desc when uniform, desc index when non uniform */
2189    LLVMValueRef value = get_src(ctx, instr->src[0]);
2190 
2191    return enter_waterfall(ctx, wctx, value, nir_intrinsic_access(instr) & ACCESS_NON_UNIFORM);
2192 }
2193 
visit_image_load(struct ac_nir_context * ctx,const nir_intrinsic_instr * instr)2194 static LLVMValueRef visit_image_load(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr)
2195 {
2196    LLVMValueRef res;
2197 
2198    enum glsl_sampler_dim dim = nir_intrinsic_image_dim(instr);
2199    enum gl_access_qualifier access = nir_intrinsic_access(instr);
2200    bool is_array = nir_intrinsic_image_array(instr);
2201 
2202    struct waterfall_context wctx;
2203    LLVMValueRef dynamic_index = enter_waterfall_image(ctx, &wctx, instr);
2204 
2205    struct ac_image_args args = {0};
2206 
2207    args.access = ac_get_mem_access_flags(instr);
2208    args.tfe = instr->intrinsic == nir_intrinsic_bindless_image_sparse_load;
2209 
2210    if (dim == GLSL_SAMPLER_DIM_BUF) {
2211       unsigned num_channels = util_last_bit(nir_def_components_read(&instr->def));
2212       if (instr->def.bit_size == 64)
2213          num_channels = num_channels < 4 ? 2 : 4;
2214       LLVMValueRef rsrc, vindex;
2215 
2216       rsrc = ctx->abi->load_sampler_desc(ctx->abi, dynamic_index, AC_DESC_BUFFER);
2217       vindex =
2218          LLVMBuildExtractElement(ctx->ac.builder, get_src(ctx, instr->src[1]), ctx->ac.i32_0, "");
2219 
2220       bool can_speculate = access & ACCESS_CAN_REORDER;
2221       res = ac_build_buffer_load_format(&ctx->ac, rsrc, vindex, ctx->ac.i32_0, num_channels,
2222                                         args.access, can_speculate,
2223                                         instr->def.bit_size == 16,
2224                                         args.tfe);
2225       res = ac_build_expand(&ctx->ac, res, num_channels, args.tfe ? 5 : 4);
2226 
2227       res = ac_trim_vector(&ctx->ac, res, instr->def.num_components);
2228       res = ac_to_integer(&ctx->ac, res);
2229    } else if (instr->intrinsic == nir_intrinsic_bindless_image_fragment_mask_load_amd) {
2230       assert(ctx->ac.gfx_level < GFX11);
2231 
2232       args.opcode = ac_image_load;
2233       args.resource = ctx->abi->load_sampler_desc(ctx->abi, dynamic_index, AC_DESC_FMASK);
2234       get_image_coords(ctx, instr, dynamic_index, &args, GLSL_SAMPLER_DIM_2D, is_array);
2235       args.dmask = 0x1;
2236       args.dim = is_array ? ac_image_2darray : ac_image_2d;
2237       args.attributes = AC_ATTR_INVARIANT_LOAD;
2238       args.a16 = ac_get_elem_bits(&ctx->ac, LLVMTypeOf(args.coords[0])) == 16;
2239 
2240       res = ac_build_image_opcode(&ctx->ac, &args);
2241    } else {
2242       bool level_zero = nir_src_is_const(instr->src[3]) && nir_src_as_uint(instr->src[3]) == 0;
2243 
2244       args.opcode = level_zero ? ac_image_load : ac_image_load_mip;
2245       args.resource = ctx->abi->load_sampler_desc(ctx->abi, dynamic_index, AC_DESC_IMAGE);
2246       get_image_coords(ctx, instr, dynamic_index, &args, dim, is_array);
2247       args.dim = ac_get_image_dim(ctx->ac.gfx_level, dim, is_array);
2248       if (!level_zero)
2249          args.lod = get_src(ctx, instr->src[3]);
2250       /* TODO: Fix in LLVM. LLVM doesn't reduce DMASK for D16 if optimization barriers are
2251        * present and even if the vector is trimmed before the optimization barriers.
2252        */
2253       args.dmask = BITFIELD_MASK(instr->def.num_components);
2254       args.attributes = access & ACCESS_CAN_REORDER ? AC_ATTR_INVARIANT_LOAD : 0;
2255       args.d16 = instr->def.bit_size == 16;
2256       args.a16 = ac_get_elem_bits(&ctx->ac, LLVMTypeOf(args.coords[0])) == 16;
2257 
2258       res = ac_build_image_opcode(&ctx->ac, &args);
2259    }
2260 
2261    if (instr->def.bit_size == 64) {
2262       LLVMValueRef code = NULL;
2263       if (args.tfe) {
2264          code = ac_llvm_extract_elem(&ctx->ac, res, 4);
2265          res = ac_trim_vector(&ctx->ac, res, 4);
2266       }
2267 
2268       res = LLVMBuildBitCast(ctx->ac.builder, res, LLVMVectorType(ctx->ac.i64, 2), "");
2269       LLVMValueRef x = LLVMBuildExtractElement(ctx->ac.builder, res, ctx->ac.i32_0, "");
2270       LLVMValueRef w = LLVMBuildExtractElement(ctx->ac.builder, res, ctx->ac.i32_1, "");
2271 
2272       if (code)
2273          code = LLVMBuildZExt(ctx->ac.builder, code, ctx->ac.i64, "");
2274       LLVMValueRef values[5] = {x, ctx->ac.i64_0, ctx->ac.i64_0, w, code};
2275       res = ac_build_gather_values(&ctx->ac, values, 4 + args.tfe);
2276    }
2277 
2278    if (instr->def.num_components < 4)
2279       res = ac_trim_vector(&ctx->ac, res, instr->def.num_components);
2280 
2281    return exit_waterfall(ctx, &wctx, res);
2282 }
2283 
visit_image_store(struct ac_nir_context * ctx,const nir_intrinsic_instr * instr)2284 static void visit_image_store(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr)
2285 {
2286    enum glsl_sampler_dim dim = nir_intrinsic_image_dim(instr);
2287    bool is_array = nir_intrinsic_image_array(instr);
2288 
2289    struct waterfall_context wctx;
2290    LLVMValueRef dynamic_index = enter_waterfall_image(ctx, &wctx, instr);
2291 
2292    struct ac_image_args args = {0};
2293    args.access = ac_get_mem_access_flags(instr);
2294 
2295    LLVMValueRef src = get_src(ctx, instr->src[3]);
2296    if (instr->src[3].ssa->bit_size == 64) {
2297       /* only R64_UINT and R64_SINT supported */
2298       src = ac_llvm_extract_elem(&ctx->ac, src, 0);
2299       src = LLVMBuildBitCast(ctx->ac.builder, src, ctx->ac.v2f32, "");
2300    } else {
2301       src = ac_to_float(&ctx->ac, src);
2302    }
2303 
2304    if (dim == GLSL_SAMPLER_DIM_BUF) {
2305       LLVMValueRef rsrc = ctx->abi->load_sampler_desc(ctx->abi, dynamic_index, AC_DESC_BUFFER);
2306       unsigned src_channels = ac_get_llvm_num_components(src);
2307       LLVMValueRef vindex;
2308 
2309       if (src_channels == 3)
2310          src = ac_build_expand_to_vec4(&ctx->ac, src, 3);
2311 
2312       vindex =
2313          LLVMBuildExtractElement(ctx->ac.builder, get_src(ctx, instr->src[1]), ctx->ac.i32_0, "");
2314 
2315       ac_build_buffer_store_format(&ctx->ac, rsrc, src, vindex, ctx->ac.i32_0, args.access);
2316    } else {
2317       bool level_zero = nir_src_is_const(instr->src[4]) && nir_src_as_uint(instr->src[4]) == 0;
2318 
2319       args.opcode = level_zero ? ac_image_store : ac_image_store_mip;
2320       args.data[0] = src;
2321       args.resource = ctx->abi->load_sampler_desc(ctx->abi, dynamic_index, AC_DESC_IMAGE);
2322       get_image_coords(ctx, instr, dynamic_index, &args, dim, is_array);
2323       args.dim = ac_get_image_dim(ctx->ac.gfx_level, dim, is_array);
2324       if (!level_zero)
2325          args.lod = get_src(ctx, instr->src[4]);
2326       args.dmask = 15;
2327       args.d16 = ac_get_elem_bits(&ctx->ac, LLVMTypeOf(args.data[0])) == 16;
2328       args.a16 = ac_get_elem_bits(&ctx->ac, LLVMTypeOf(args.coords[0])) == 16;
2329 
2330       ac_build_image_opcode(&ctx->ac, &args);
2331    }
2332 
2333    exit_waterfall(ctx, &wctx, NULL);
2334 }
2335 
visit_image_atomic(struct ac_nir_context * ctx,const nir_intrinsic_instr * instr)2336 static LLVMValueRef visit_image_atomic(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr)
2337 {
2338    LLVMValueRef params[7];
2339    int param_count = 0;
2340 
2341    nir_atomic_op op = nir_intrinsic_atomic_op(instr);
2342    bool cmpswap = op == nir_atomic_op_cmpxchg;
2343    const char *atomic_name = translate_atomic_op_str(op);
2344    char intrinsic_name[64];
2345    enum ac_atomic_op atomic_subop;
2346    ASSERTED int length;
2347 
2348    enum glsl_sampler_dim dim = nir_intrinsic_image_dim(instr);
2349    bool is_array = nir_intrinsic_image_array(instr);
2350 
2351    struct waterfall_context wctx;
2352    LLVMValueRef dynamic_index = enter_waterfall_image(ctx, &wctx, instr);
2353 
2354    switch (op) {
2355    case nir_atomic_op_iadd:
2356       atomic_subop = ac_atomic_add;
2357       break;
2358    case nir_atomic_op_imin:
2359       atomic_subop = ac_atomic_smin;
2360       break;
2361    case nir_atomic_op_umin:
2362       atomic_subop = ac_atomic_umin;
2363       break;
2364    case nir_atomic_op_imax:
2365       atomic_subop = ac_atomic_smax;
2366       break;
2367    case nir_atomic_op_umax:
2368       atomic_subop = ac_atomic_umax;
2369       break;
2370    case nir_atomic_op_iand:
2371       atomic_subop = ac_atomic_and;
2372       break;
2373    case nir_atomic_op_ior:
2374       atomic_subop = ac_atomic_or;
2375       break;
2376    case nir_atomic_op_ixor:
2377       atomic_subop = ac_atomic_xor;
2378       break;
2379    case nir_atomic_op_xchg:
2380       atomic_subop = ac_atomic_swap;
2381       break;
2382    case nir_atomic_op_cmpxchg:
2383       atomic_subop = 0; /* not used */
2384       break;
2385    case nir_atomic_op_inc_wrap:
2386       atomic_subop = ac_atomic_inc_wrap;
2387       break;
2388    case nir_atomic_op_dec_wrap:
2389       atomic_subop = ac_atomic_dec_wrap;
2390       break;
2391    case nir_atomic_op_fadd:
2392       atomic_subop = ac_atomic_fmin; /* Non-buffer fadd atomics are not supported. */
2393       break;
2394    case nir_atomic_op_fmin:
2395       atomic_subop = ac_atomic_fmin;
2396       break;
2397    case nir_atomic_op_fmax:
2398       atomic_subop = ac_atomic_fmax;
2399       break;
2400    default:
2401       abort();
2402    }
2403 
2404    if (cmpswap)
2405       params[param_count++] = get_src(ctx, instr->src[4]);
2406    params[param_count++] = get_src(ctx, instr->src[3]);
2407 
2408    if (atomic_subop == ac_atomic_fmin || atomic_subop == ac_atomic_fmax)
2409       params[0] = ac_to_float(&ctx->ac, params[0]);
2410 
2411    LLVMValueRef result;
2412    if (dim == GLSL_SAMPLER_DIM_BUF) {
2413       params[param_count++] = ctx->abi->load_sampler_desc(ctx->abi, dynamic_index, AC_DESC_BUFFER);
2414       params[param_count++] = LLVMBuildExtractElement(ctx->ac.builder, get_src(ctx, instr->src[1]),
2415                                                       ctx->ac.i32_0, ""); /* vindex */
2416       params[param_count++] = ctx->ac.i32_0;                              /* voffset */
2417       if (cmpswap && instr->def.bit_size == 64) {
2418          result = emit_ssbo_comp_swap_64(ctx, params[2], params[3], params[1], params[0], true);
2419       } else {
2420          LLVMTypeRef data_type = LLVMTypeOf(params[0]);
2421          char type[8];
2422          unsigned cache_flags =
2423             ac_get_hw_cache_flags(ctx->ac.gfx_level,
2424 				  ac_get_mem_access_flags(instr) | ACCESS_TYPE_ATOMIC).value;
2425 
2426          params[param_count++] = ctx->ac.i32_0; /* soffset */
2427          params[param_count++] = LLVMConstInt(ctx->ac.i32, cache_flags, 0);
2428 
2429          ac_build_type_name_for_intr(data_type, type, sizeof(type));
2430          length = snprintf(intrinsic_name, sizeof(intrinsic_name),
2431                            "llvm.amdgcn.struct.buffer.atomic.%s.%s",
2432                            atomic_name, type);
2433 
2434          assert(length < sizeof(intrinsic_name));
2435          result = ac_build_intrinsic(&ctx->ac, intrinsic_name, LLVMTypeOf(params[0]), params, param_count, 0);
2436       }
2437    } else {
2438       struct ac_image_args args = {0};
2439       args.opcode = cmpswap ? ac_image_atomic_cmpswap : ac_image_atomic;
2440       args.atomic = atomic_subop;
2441       args.data[0] = params[0];
2442       if (cmpswap)
2443          args.data[1] = params[1];
2444       args.resource = ctx->abi->load_sampler_desc(ctx->abi, dynamic_index, AC_DESC_IMAGE);
2445       get_image_coords(ctx, instr, dynamic_index, &args, dim, is_array);
2446       args.dim = ac_get_image_dim(ctx->ac.gfx_level, dim, is_array);
2447       args.a16 = ac_get_elem_bits(&ctx->ac, LLVMTypeOf(args.coords[0])) == 16;
2448       args.access = ac_get_mem_access_flags(instr);
2449 
2450       result = ac_build_image_opcode(&ctx->ac, &args);
2451    }
2452 
2453    return exit_waterfall(ctx, &wctx, result);
2454 }
2455 
emit_discard(struct ac_nir_context * ctx,const nir_intrinsic_instr * instr)2456 static void emit_discard(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr)
2457 {
2458    LLVMValueRef cond;
2459 
2460    if (instr->intrinsic == nir_intrinsic_terminate_if) {
2461       cond = LLVMBuildNot(ctx->ac.builder, get_src(ctx, instr->src[0]), "");
2462    } else {
2463       assert(instr->intrinsic == nir_intrinsic_terminate);
2464       cond = ctx->ac.i1false;
2465    }
2466 
2467    ac_build_kill_if_false(&ctx->ac, cond);
2468 }
2469 
emit_demote(struct ac_nir_context * ctx,const nir_intrinsic_instr * instr)2470 static void emit_demote(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr)
2471 {
2472    LLVMValueRef cond;
2473 
2474    if (instr->intrinsic == nir_intrinsic_demote_if) {
2475       cond = LLVMBuildNot(ctx->ac.builder, get_src(ctx, instr->src[0]), "");
2476    } else {
2477       assert(instr->intrinsic == nir_intrinsic_demote);
2478       cond = ctx->ac.i1false;
2479    }
2480 
2481    /* This demotes the pixel if the condition is false. */
2482    ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.wqm.demote", ctx->ac.voidt, &cond, 1, 0);
2483 }
2484 
visit_load_subgroup_id(struct ac_nir_context * ctx)2485 static LLVMValueRef visit_load_subgroup_id(struct ac_nir_context *ctx)
2486 {
2487    if (gl_shader_stage_is_compute(ctx->stage)) {
2488       if (ctx->ac.gfx_level >= GFX12)
2489          return ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.wave.id", ctx->ac.i32, NULL, 0, 0);
2490       else if (ctx->ac.gfx_level >= GFX10_3)
2491          return ac_unpack_param(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->tg_size), 20, 5);
2492       else
2493          return ac_unpack_param(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->tg_size), 6, 6);
2494    } else if (ctx->args->tcs_wave_id.used) {
2495       return ac_unpack_param(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->tcs_wave_id), 0, 3);
2496    } else if (ctx->args->merged_wave_info.used) {
2497       return ac_unpack_param(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->merged_wave_info), 24, 4);
2498    } else {
2499       return ctx->ac.i32_0;
2500    }
2501 }
2502 
visit_load_local_invocation_index(struct ac_nir_context * ctx)2503 static LLVMValueRef visit_load_local_invocation_index(struct ac_nir_context *ctx)
2504 {
2505    if (ctx->abi->vs_rel_patch_id)
2506       return ctx->abi->vs_rel_patch_id;
2507 
2508    return ac_build_imad(&ctx->ac, visit_load_subgroup_id(ctx),
2509                         LLVMConstInt(ctx->ac.i32, ctx->ac.wave_size, 0),
2510                         ac_get_thread_id(&ctx->ac));
2511 }
2512 
visit_first_invocation(struct ac_nir_context * ctx)2513 static LLVMValueRef visit_first_invocation(struct ac_nir_context *ctx)
2514 {
2515    LLVMValueRef active_set = ac_build_ballot(&ctx->ac, ctx->ac.i32_1);
2516    const char *intr = ctx->ac.wave_size == 32 ? "llvm.cttz.i32" : "llvm.cttz.i64";
2517 
2518    /* The second argument is whether cttz(0) should be defined, but we do not care. */
2519    LLVMValueRef args[] = {active_set, ctx->ac.i1false};
2520    LLVMValueRef result = ac_build_intrinsic(&ctx->ac, intr, ctx->ac.iN_wavemask, args, 2, 0);
2521 
2522    return LLVMBuildTrunc(ctx->ac.builder, result, ctx->ac.i32, "");
2523 }
2524 
visit_load_shared(struct ac_nir_context * ctx,const nir_intrinsic_instr * instr)2525 static LLVMValueRef visit_load_shared(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr)
2526 {
2527    LLVMValueRef values[16], derived_ptr, index, ret;
2528    unsigned const_off = nir_intrinsic_base(instr);
2529 
2530    LLVMTypeRef elem_type = LLVMIntTypeInContext(ctx->ac.context, instr->def.bit_size);
2531    LLVMValueRef ptr = get_memory_ptr(ctx, instr->src[0], const_off);
2532 
2533    for (int chan = 0; chan < instr->num_components; chan++) {
2534       index = LLVMConstInt(ctx->ac.i32, chan, 0);
2535       derived_ptr = LLVMBuildGEP2(ctx->ac.builder, elem_type, ptr, &index, 1, "");
2536       values[chan] = LLVMBuildLoad2(ctx->ac.builder, elem_type, derived_ptr, "");
2537    }
2538 
2539    ret = ac_build_gather_values(&ctx->ac, values, instr->num_components);
2540 
2541    return LLVMBuildBitCast(ctx->ac.builder, ret, get_def_type(ctx, &instr->def), "");
2542 }
2543 
visit_store_shared(struct ac_nir_context * ctx,const nir_intrinsic_instr * instr)2544 static void visit_store_shared(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr)
2545 {
2546    LLVMValueRef derived_ptr, data, index;
2547    LLVMBuilderRef builder = ctx->ac.builder;
2548 
2549    unsigned const_off = nir_intrinsic_base(instr);
2550    LLVMTypeRef elem_type = LLVMIntTypeInContext(ctx->ac.context, instr->src[0].ssa->bit_size);
2551    LLVMValueRef ptr = get_memory_ptr(ctx, instr->src[1], const_off);
2552    LLVMValueRef src = get_src(ctx, instr->src[0]);
2553 
2554    int writemask = nir_intrinsic_write_mask(instr);
2555    for (int chan = 0; chan < 16; chan++) {
2556       if (!(writemask & (1 << chan))) {
2557          continue;
2558       }
2559       data = ac_llvm_extract_elem(&ctx->ac, src, chan);
2560       index = LLVMConstInt(ctx->ac.i32, chan, 0);
2561       derived_ptr = LLVMBuildGEP2(builder, elem_type, ptr, &index, 1, "");
2562       LLVMBuildStore(builder, data, derived_ptr);
2563    }
2564 }
2565 
visit_load_shared2_amd(struct ac_nir_context * ctx,const nir_intrinsic_instr * instr)2566 static LLVMValueRef visit_load_shared2_amd(struct ac_nir_context *ctx,
2567                                            const nir_intrinsic_instr *instr)
2568 {
2569    LLVMTypeRef pointee_type = LLVMIntTypeInContext(ctx->ac.context, instr->def.bit_size);
2570    LLVMValueRef ptr = get_memory_ptr(ctx, instr->src[0], 0);
2571 
2572    LLVMValueRef values[2];
2573    uint8_t offsets[] = {nir_intrinsic_offset0(instr), nir_intrinsic_offset1(instr)};
2574    unsigned stride = nir_intrinsic_st64(instr) ? 64 : 1;
2575    for (unsigned i = 0; i < 2; i++) {
2576       LLVMValueRef index = LLVMConstInt(ctx->ac.i32, offsets[i] * stride, 0);
2577       LLVMValueRef derived_ptr = LLVMBuildGEP2(ctx->ac.builder, pointee_type, ptr, &index, 1, "");
2578       values[i] = LLVMBuildLoad2(ctx->ac.builder, pointee_type, derived_ptr, "");
2579    }
2580 
2581    LLVMValueRef ret = ac_build_gather_values(&ctx->ac, values, 2);
2582    return LLVMBuildBitCast(ctx->ac.builder, ret, get_def_type(ctx, &instr->def), "");
2583 }
2584 
visit_store_shared2_amd(struct ac_nir_context * ctx,const nir_intrinsic_instr * instr)2585 static void visit_store_shared2_amd(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr)
2586 {
2587    LLVMTypeRef pointee_type = LLVMIntTypeInContext(ctx->ac.context, instr->src[0].ssa->bit_size);
2588    LLVMValueRef ptr = get_memory_ptr(ctx, instr->src[1], 0);
2589    LLVMValueRef src = get_src(ctx, instr->src[0]);
2590 
2591    uint8_t offsets[] = {nir_intrinsic_offset0(instr), nir_intrinsic_offset1(instr)};
2592    unsigned stride = nir_intrinsic_st64(instr) ? 64 : 1;
2593    for (unsigned i = 0; i < 2; i++) {
2594       LLVMValueRef index = LLVMConstInt(ctx->ac.i32, offsets[i] * stride, 0);
2595       LLVMValueRef derived_ptr = LLVMBuildGEP2(ctx->ac.builder, pointee_type, ptr, &index, 1, "");
2596       LLVMBuildStore(ctx->ac.builder, ac_llvm_extract_elem(&ctx->ac, src, i), derived_ptr);
2597    }
2598 }
2599 
visit_var_atomic(struct ac_nir_context * ctx,const nir_intrinsic_instr * instr,LLVMValueRef ptr,int src_idx)2600 static LLVMValueRef visit_var_atomic(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr,
2601                                      LLVMValueRef ptr, int src_idx)
2602 {
2603    LLVMValueRef result;
2604    LLVMValueRef src = get_src(ctx, instr->src[src_idx]);
2605    nir_atomic_op nir_op = nir_intrinsic_atomic_op(instr);
2606 
2607    const char *sync_scope = "workgroup-one-as";
2608 
2609    if (nir_op == nir_atomic_op_cmpxchg) {
2610       LLVMValueRef src1 = get_src(ctx, instr->src[src_idx + 1]);
2611       result = ac_build_atomic_cmp_xchg(&ctx->ac, ptr, src, src1, sync_scope);
2612       result = LLVMBuildExtractValue(ctx->ac.builder, result, 0, "");
2613    } else if (nir_op == nir_atomic_op_fmin || nir_op == nir_atomic_op_fmax) {
2614       const char *op = translate_atomic_op_str(nir_op);
2615       char name[64], type[8];
2616       LLVMValueRef params[5];
2617       LLVMTypeRef src_type;
2618       int arg_count = 0;
2619 
2620       src = ac_to_float(&ctx->ac, src);
2621       src_type = LLVMTypeOf(src);
2622 
2623       params[arg_count++] = ptr;
2624       params[arg_count++] = src;
2625       params[arg_count++] = ctx->ac.i32_0;
2626       params[arg_count++] = ctx->ac.i32_0;
2627       params[arg_count++] = ctx->ac.i1false;
2628 
2629       ac_build_type_name_for_intr(src_type, type, sizeof(type));
2630       snprintf(name, sizeof(name), "llvm.amdgcn.ds.%s.%s", op, type);
2631 
2632       result = ac_build_intrinsic(&ctx->ac, name, src_type, params, arg_count, 0);
2633       result = ac_to_integer(&ctx->ac, result);
2634    } else {
2635       LLVMAtomicRMWBinOp op = translate_atomic_op(nir_op);
2636       LLVMValueRef val;
2637 
2638       if (nir_op == nir_atomic_op_fadd) {
2639          val = ac_to_float(&ctx->ac, src);
2640       } else {
2641          val = ac_to_integer(&ctx->ac, src);
2642       }
2643 
2644       result = ac_build_atomic_rmw(&ctx->ac, op, ptr, val, sync_scope);
2645 
2646       if (nir_op == nir_atomic_op_fadd) {
2647          result = ac_to_integer(&ctx->ac, result);
2648       }
2649    }
2650 
2651    return result;
2652 }
2653 
load_sample_pos(struct ac_nir_context * ctx)2654 static LLVMValueRef load_sample_pos(struct ac_nir_context *ctx)
2655 {
2656    LLVMValueRef values[2];
2657    LLVMValueRef pos[2];
2658 
2659    pos[0] = ac_to_float(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->frag_pos[0]));
2660    pos[1] = ac_to_float(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->frag_pos[1]));
2661 
2662    values[0] = ac_build_fract(&ctx->ac, pos[0], 32);
2663    values[1] = ac_build_fract(&ctx->ac, pos[1], 32);
2664    return ac_build_gather_values(&ctx->ac, values, 2);
2665 }
2666 
lookup_interp_param(struct ac_nir_context * ctx,enum glsl_interp_mode interp,unsigned location)2667 static LLVMValueRef lookup_interp_param(struct ac_nir_context *ctx, enum glsl_interp_mode interp,
2668                                         unsigned location)
2669 {
2670    switch (interp) {
2671    case INTERP_MODE_FLAT:
2672    default:
2673       return NULL;
2674    case INTERP_MODE_SMOOTH:
2675    case INTERP_MODE_NONE:
2676       if (location == INTERP_CENTER)
2677          return ac_get_arg(&ctx->ac, ctx->args->persp_center);
2678       else if (location == INTERP_CENTROID)
2679          return ac_get_arg(&ctx->ac, ctx->args->persp_centroid);
2680       else if (location == INTERP_SAMPLE)
2681          return ac_get_arg(&ctx->ac, ctx->args->persp_sample);
2682       break;
2683    case INTERP_MODE_NOPERSPECTIVE:
2684       if (location == INTERP_CENTER)
2685          return ac_get_arg(&ctx->ac, ctx->args->linear_center);
2686       else if (location == INTERP_CENTROID)
2687          return ac_get_arg(&ctx->ac, ctx->args->linear_centroid);
2688       else if (location == INTERP_SAMPLE)
2689          return ac_get_arg(&ctx->ac, ctx->args->linear_sample);
2690       break;
2691    }
2692    return NULL;
2693 }
2694 
barycentric_center(struct ac_nir_context * ctx,unsigned mode)2695 static LLVMValueRef barycentric_center(struct ac_nir_context *ctx, unsigned mode)
2696 {
2697    LLVMValueRef interp_param = lookup_interp_param(ctx, mode, INTERP_CENTER);
2698    return LLVMBuildBitCast(ctx->ac.builder, interp_param, ctx->ac.v2i32, "");
2699 }
2700 
barycentric_offset(struct ac_nir_context * ctx,unsigned mode,LLVMValueRef offset)2701 static LLVMValueRef barycentric_offset(struct ac_nir_context *ctx, unsigned mode,
2702                                        LLVMValueRef offset)
2703 {
2704    LLVMValueRef interp_param = lookup_interp_param(ctx, mode, INTERP_CENTER);
2705    LLVMValueRef src_c0 =
2706       ac_to_float(&ctx->ac, LLVMBuildExtractElement(ctx->ac.builder, offset, ctx->ac.i32_0, ""));
2707    LLVMValueRef src_c1 =
2708       ac_to_float(&ctx->ac, LLVMBuildExtractElement(ctx->ac.builder, offset, ctx->ac.i32_1, ""));
2709 
2710    LLVMValueRef ij_out[2];
2711    LLVMValueRef ddxy_out = ac_build_ddxy_interp(&ctx->ac, interp_param);
2712 
2713    /*
2714     * take the I then J parameters, and the DDX/Y for it, and
2715     * calculate the IJ inputs for the interpolator.
2716     * temp1 = ddx * offset/sample.x + I;
2717     * interp_param.I = ddy * offset/sample.y + temp1;
2718     * temp1 = ddx * offset/sample.x + J;
2719     * interp_param.J = ddy * offset/sample.y + temp1;
2720     */
2721    for (unsigned i = 0; i < 2; i++) {
2722       LLVMValueRef ix_ll = LLVMConstInt(ctx->ac.i32, i, false);
2723       LLVMValueRef iy_ll = LLVMConstInt(ctx->ac.i32, i + 2, false);
2724       LLVMValueRef ddx_el = LLVMBuildExtractElement(ctx->ac.builder, ddxy_out, ix_ll, "");
2725       LLVMValueRef ddy_el = LLVMBuildExtractElement(ctx->ac.builder, ddxy_out, iy_ll, "");
2726       LLVMValueRef interp_el = LLVMBuildExtractElement(ctx->ac.builder, interp_param, ix_ll, "");
2727       LLVMValueRef temp1, temp2;
2728 
2729       interp_el = LLVMBuildBitCast(ctx->ac.builder, interp_el, ctx->ac.f32, "");
2730 
2731       temp1 = ac_build_fmad(&ctx->ac, ddx_el, src_c0, interp_el);
2732       temp2 = ac_build_fmad(&ctx->ac, ddy_el, src_c1, temp1);
2733 
2734       ij_out[i] = LLVMBuildBitCast(ctx->ac.builder, temp2, ctx->ac.i32, "");
2735    }
2736    interp_param = ac_build_gather_values(&ctx->ac, ij_out, 2);
2737    return LLVMBuildBitCast(ctx->ac.builder, interp_param, ctx->ac.v2i32, "");
2738 }
2739 
barycentric_centroid(struct ac_nir_context * ctx,unsigned mode)2740 static LLVMValueRef barycentric_centroid(struct ac_nir_context *ctx, unsigned mode)
2741 {
2742    LLVMValueRef interp_param = lookup_interp_param(ctx, mode, INTERP_CENTROID);
2743    return LLVMBuildBitCast(ctx->ac.builder, interp_param, ctx->ac.v2i32, "");
2744 }
2745 
barycentric_sample(struct ac_nir_context * ctx,unsigned mode)2746 static LLVMValueRef barycentric_sample(struct ac_nir_context *ctx, unsigned mode)
2747 {
2748    LLVMValueRef interp_param = lookup_interp_param(ctx, mode, INTERP_SAMPLE);
2749    return LLVMBuildBitCast(ctx->ac.builder, interp_param, ctx->ac.v2i32, "");
2750 }
2751 
barycentric_model(struct ac_nir_context * ctx)2752 static LLVMValueRef barycentric_model(struct ac_nir_context *ctx)
2753 {
2754    return LLVMBuildBitCast(ctx->ac.builder, ac_get_arg(&ctx->ac, ctx->args->pull_model),
2755                            ctx->ac.v3i32, "");
2756 }
2757 
load_interpolated_input(struct ac_nir_context * ctx,LLVMValueRef interp_param,unsigned index,unsigned comp_start,unsigned num_components,unsigned bitsize,bool high_16bits)2758 static LLVMValueRef load_interpolated_input(struct ac_nir_context *ctx, LLVMValueRef interp_param,
2759                                             unsigned index, unsigned comp_start,
2760                                             unsigned num_components, unsigned bitsize,
2761                                             bool high_16bits)
2762 {
2763    LLVMValueRef attr_number = LLVMConstInt(ctx->ac.i32, index, false);
2764    LLVMValueRef interp_param_f;
2765 
2766    interp_param_f = LLVMBuildBitCast(ctx->ac.builder, interp_param, ctx->ac.v2f32, "");
2767    LLVMValueRef i = LLVMBuildExtractElement(ctx->ac.builder, interp_param_f, ctx->ac.i32_0, "");
2768    LLVMValueRef j = LLVMBuildExtractElement(ctx->ac.builder, interp_param_f, ctx->ac.i32_1, "");
2769 
2770    /* Workaround for issue 2647: kill threads with infinite interpolation coeffs */
2771    if (ctx->verified_interp && !_mesa_hash_table_search(ctx->verified_interp, interp_param)) {
2772       LLVMValueRef cond = ac_build_is_inf_or_nan(&ctx->ac, i);
2773       ac_build_kill_if_false(&ctx->ac, LLVMBuildNot(ctx->ac.builder, cond, ""));
2774       _mesa_hash_table_insert(ctx->verified_interp, interp_param, interp_param);
2775    }
2776 
2777    LLVMValueRef values[4];
2778    assert(bitsize == 16 || bitsize == 32);
2779    for (unsigned comp = 0; comp < num_components; comp++) {
2780       LLVMValueRef llvm_chan = LLVMConstInt(ctx->ac.i32, comp_start + comp, false);
2781       if (bitsize == 16) {
2782          values[comp] = ac_build_fs_interp_f16(&ctx->ac, llvm_chan, attr_number,
2783                                                ac_get_arg(&ctx->ac, ctx->args->prim_mask), i, j,
2784                                                high_16bits);
2785       } else {
2786          values[comp] = ac_build_fs_interp(&ctx->ac, llvm_chan, attr_number,
2787                                            ac_get_arg(&ctx->ac, ctx->args->prim_mask), i, j);
2788       }
2789    }
2790 
2791    return ac_to_integer(&ctx->ac, ac_build_gather_values(&ctx->ac, values, num_components));
2792 }
2793 
visit_load(struct ac_nir_context * ctx,nir_intrinsic_instr * instr)2794 static LLVMValueRef visit_load(struct ac_nir_context *ctx, nir_intrinsic_instr *instr)
2795 {
2796    LLVMValueRef values[8];
2797    LLVMTypeRef dest_type = get_def_type(ctx, &instr->def);
2798    unsigned base = nir_intrinsic_base(instr);
2799    unsigned component = nir_intrinsic_component(instr);
2800    unsigned count = instr->def.num_components;
2801    nir_src offset = *nir_get_io_offset_src(instr);
2802 
2803    assert(instr->def.bit_size == 16 || instr->def.bit_size == 32);
2804    /* No indirect indexing allowed. */
2805    assert(nir_src_is_const(offset) && nir_src_as_uint(offset) == 0);
2806 
2807    /* This is used to load TCS inputs from VGPRs in radeonsi. */
2808    if (ctx->stage == MESA_SHADER_TESS_CTRL) {
2809       LLVMTypeRef component_type = LLVMGetTypeKind(dest_type) == LLVMVectorTypeKind ?
2810                                       LLVMGetElementType(dest_type) : dest_type;
2811 
2812       LLVMValueRef result = ctx->abi->load_tess_varyings(ctx->abi, component_type,
2813                                                          base, component, count);
2814       if (instr->def.bit_size == 16) {
2815          result = ac_to_integer(&ctx->ac, result);
2816          result = LLVMBuildTrunc(ctx->ac.builder, result, dest_type, "");
2817       }
2818       return LLVMBuildBitCast(ctx->ac.builder, result, dest_type, "");
2819    }
2820 
2821    assert(ctx->stage == MESA_SHADER_FRAGMENT);
2822    unsigned vertex_id = 0; /* P0 */
2823 
2824    if (instr->intrinsic == nir_intrinsic_load_input_vertex)
2825       vertex_id = nir_src_as_uint(instr->src[0]);
2826 
2827    LLVMValueRef attr_number = LLVMConstInt(ctx->ac.i32, base, false);
2828 
2829    for (unsigned chan = 0; chan < count; chan++) {
2830       LLVMValueRef llvm_chan = LLVMConstInt(ctx->ac.i32, (component + chan) % 4, false);
2831       values[chan] = ac_build_fs_interp_mov(&ctx->ac, vertex_id, llvm_chan, attr_number,
2832                                             ac_get_arg(&ctx->ac, ctx->args->prim_mask));
2833       values[chan] = LLVMBuildBitCast(ctx->ac.builder, values[chan], ctx->ac.i32, "");
2834       if (instr->def.bit_size == 16 &&
2835           nir_intrinsic_io_semantics(instr).high_16bits)
2836          values[chan] = LLVMBuildLShr(ctx->ac.builder, values[chan], LLVMConstInt(ctx->ac.i32, 16, 0), "");
2837       values[chan] =
2838          LLVMBuildTruncOrBitCast(ctx->ac.builder, values[chan],
2839                                  instr->def.bit_size == 16 ? ctx->ac.i16 : ctx->ac.i32, "");
2840    }
2841 
2842    LLVMValueRef result = ac_build_gather_values(&ctx->ac, values, count);
2843    return LLVMBuildBitCast(ctx->ac.builder, result, dest_type, "");
2844 }
2845 
2846 static LLVMValueRef
emit_load_frag_shading_rate(struct ac_nir_context * ctx)2847 emit_load_frag_shading_rate(struct ac_nir_context *ctx)
2848 {
2849    LLVMValueRef x_rate, y_rate, cond;
2850 
2851    /* VRS Rate X = Ancillary[2:3]
2852     * VRS Rate Y = Ancillary[4:5]
2853     */
2854    x_rate = ac_unpack_param(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->ancillary), 2, 2);
2855    y_rate = ac_unpack_param(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->ancillary), 4, 2);
2856 
2857    /* xRate = xRate == 0x1 ? Horizontal2Pixels : None. */
2858    cond = LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ, x_rate, ctx->ac.i32_1, "");
2859    x_rate = LLVMBuildSelect(ctx->ac.builder, cond,
2860                             LLVMConstInt(ctx->ac.i32, 4, false), ctx->ac.i32_0, "");
2861 
2862    /* yRate = yRate == 0x1 ? Vertical2Pixels : None. */
2863    cond = LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ, y_rate, ctx->ac.i32_1, "");
2864    y_rate = LLVMBuildSelect(ctx->ac.builder, cond,
2865                             ctx->ac.i32_1, ctx->ac.i32_0, "");
2866 
2867    return LLVMBuildOr(ctx->ac.builder, x_rate, y_rate, "");
2868 }
2869 
2870 static LLVMValueRef
emit_load_frag_coord(struct ac_nir_context * ctx)2871 emit_load_frag_coord(struct ac_nir_context *ctx)
2872 {
2873    LLVMValueRef values[4] = {
2874       ac_get_arg(&ctx->ac, ctx->args->frag_pos[0]), ac_get_arg(&ctx->ac, ctx->args->frag_pos[1]),
2875       ac_get_arg(&ctx->ac, ctx->args->frag_pos[2]),
2876       ac_build_fdiv(&ctx->ac, ctx->ac.f32_1, ac_get_arg(&ctx->ac, ctx->args->frag_pos[3]))};
2877 
2878    return ac_to_integer(&ctx->ac, ac_build_gather_values(&ctx->ac, values, 4));
2879 }
2880 
visit_intrinsic(struct ac_nir_context * ctx,nir_intrinsic_instr * instr)2881 static bool visit_intrinsic(struct ac_nir_context *ctx, nir_intrinsic_instr *instr)
2882 {
2883    LLVMValueRef result = NULL;
2884 
2885    switch (instr->intrinsic) {
2886    case nir_intrinsic_ddx:
2887    case nir_intrinsic_ddy:
2888    case nir_intrinsic_ddx_fine:
2889    case nir_intrinsic_ddy_fine:
2890    case nir_intrinsic_ddx_coarse:
2891    case nir_intrinsic_ddy_coarse:
2892       result = emit_ddxy(ctx, instr->intrinsic, get_src(ctx, instr->src[0]));
2893       break;
2894    case nir_intrinsic_ballot:
2895    case nir_intrinsic_ballot_relaxed:
2896       result = ac_build_ballot(&ctx->ac, get_src(ctx, instr->src[0]));
2897       if (instr->def.bit_size > ctx->ac.wave_size) {
2898          LLVMTypeRef dest_type = LLVMIntTypeInContext(ctx->ac.context, instr->def.bit_size);
2899          result = LLVMBuildZExt(ctx->ac.builder, result, dest_type, "");
2900       }
2901       break;
2902    case nir_intrinsic_inverse_ballot: {
2903       LLVMValueRef src = get_src(ctx, instr->src[0]);
2904       if (instr->src[0].ssa->bit_size > ctx->ac.wave_size) {
2905          LLVMTypeRef src_type = LLVMIntTypeInContext(ctx->ac.context, ctx->ac.wave_size);
2906          src = LLVMBuildTrunc(ctx->ac.builder, src, src_type, "");
2907       }
2908       result = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.inverse.ballot", ctx->ac.i1, &src, 1, 0);
2909       break;
2910    }
2911    case nir_intrinsic_read_invocation:
2912       result =
2913          ac_build_readlane(&ctx->ac, get_src(ctx, instr->src[0]), get_src(ctx, instr->src[1]));
2914       break;
2915    case nir_intrinsic_read_first_invocation:
2916    case nir_intrinsic_as_uniform:
2917       result = ac_build_readlane(&ctx->ac, get_src(ctx, instr->src[0]), NULL);
2918       break;
2919    case nir_intrinsic_load_subgroup_invocation:
2920       result = ac_get_thread_id(&ctx->ac);
2921       break;
2922    case nir_intrinsic_load_workgroup_id: {
2923       LLVMValueRef values[3] = {ctx->ac.i32_0, ctx->ac.i32_0, ctx->ac.i32_0};
2924 
2925       for (int i = 0; i < 3; i++) {
2926          if (ctx->args->workgroup_ids[i].used) {
2927             if (ctx->ac.gfx_level >= GFX12) {
2928                char intr_name[256];
2929                snprintf(intr_name, sizeof(intr_name), "llvm.amdgcn.workgroup.id.%c", "xyz"[i]);
2930                values[i] = ac_build_intrinsic(&ctx->ac, intr_name, ctx->ac.i32, NULL, 0, 0);
2931             } else {
2932                values[i] = ac_get_arg(&ctx->ac, ctx->args->workgroup_ids[i]);
2933             }
2934          }
2935       }
2936       result = ac_build_gather_values(&ctx->ac, values, 3);
2937       break;
2938    }
2939    case nir_intrinsic_load_tess_rel_patch_id_amd:
2940       switch (ctx->stage) {
2941       case MESA_SHADER_TESS_CTRL:
2942          result = ac_unpack_param(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->tcs_rel_ids), 0, 8);
2943          break;
2944       case MESA_SHADER_TESS_EVAL:
2945          result = ctx->abi->tes_rel_patch_id_replaced ? ctx->abi->tes_rel_patch_id_replaced :
2946                   ac_get_arg(&ctx->ac, ctx->args->tes_rel_patch_id);
2947          break;
2948       default:
2949          unreachable("invalid stage");
2950       }
2951       break;
2952    case nir_intrinsic_load_base_vertex:
2953    case nir_intrinsic_load_first_vertex:
2954    case nir_intrinsic_load_ring_attr_amd:
2955    case nir_intrinsic_load_lds_ngg_scratch_base_amd:
2956    case nir_intrinsic_load_lds_ngg_gs_out_vertex_base_amd:
2957       result = ctx->abi->intrinsic_load(ctx->abi, instr);
2958       break;
2959    case nir_intrinsic_load_vertex_id_zero_base:
2960       result = ctx->abi->vertex_id_replaced ? ctx->abi->vertex_id_replaced : ctx->abi->vertex_id;
2961       break;
2962    case nir_intrinsic_load_local_invocation_id: {
2963       LLVMValueRef ids = ac_get_arg(&ctx->ac, ctx->args->local_invocation_ids);
2964 
2965       if (LLVMGetTypeKind(LLVMTypeOf(ids)) == LLVMIntegerTypeKind) {
2966          /* Thread IDs are packed in VGPR0, 10 bits per component. */
2967          LLVMValueRef id[3];
2968 
2969          for (unsigned i = 0; i < 3; i++)
2970             id[i] = ac_unpack_param(&ctx->ac, ids, i * 10, 10);
2971 
2972          result = ac_build_gather_values(&ctx->ac, id, 3);
2973       } else {
2974          result = ids;
2975       }
2976       break;
2977    }
2978    case nir_intrinsic_load_base_instance:
2979       result = ac_get_arg(&ctx->ac, ctx->args->start_instance);
2980       break;
2981    case nir_intrinsic_load_draw_id:
2982       result = ac_get_arg(&ctx->ac, ctx->args->draw_id);
2983       break;
2984    case nir_intrinsic_load_view_index:
2985       result = ac_get_arg(&ctx->ac, ctx->args->view_index);
2986       break;
2987    case nir_intrinsic_load_invocation_id:
2988       assert(ctx->stage == MESA_SHADER_TESS_CTRL || ctx->stage == MESA_SHADER_GEOMETRY);
2989       if (ctx->stage == MESA_SHADER_TESS_CTRL) {
2990          result = ac_unpack_param(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->tcs_rel_ids), 8, 5);
2991       } else if (ctx->ac.gfx_level >= GFX12) {
2992          result = ac_unpack_param(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->gs_vtx_offset[0]), 27, 5);
2993       } else if (ctx->ac.gfx_level >= GFX10) {
2994          result = ac_unpack_param(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->gs_invocation_id), 0, 7);
2995       } else {
2996          result = ac_get_arg(&ctx->ac, ctx->args->gs_invocation_id);
2997       }
2998       break;
2999    case nir_intrinsic_load_primitive_id:
3000       if (ctx->stage == MESA_SHADER_GEOMETRY) {
3001          result = ac_get_arg(&ctx->ac, ctx->args->gs_prim_id);
3002       } else if (ctx->stage == MESA_SHADER_TESS_CTRL) {
3003          result = ac_get_arg(&ctx->ac, ctx->args->tcs_patch_id);
3004       } else if (ctx->stage == MESA_SHADER_TESS_EVAL) {
3005          result = ctx->abi->tes_patch_id_replaced ?
3006             ctx->abi->tes_patch_id_replaced : ac_get_arg(&ctx->ac, ctx->args->tes_patch_id);
3007       } else if (ctx->stage == MESA_SHADER_VERTEX) {
3008          if (ctx->args->vs_prim_id.used)
3009             result = ac_get_arg(&ctx->ac, ctx->args->vs_prim_id); /* legacy */
3010          else
3011             result = ac_get_arg(&ctx->ac, ctx->args->gs_prim_id); /* NGG */
3012       } else
3013          fprintf(stderr, "Unknown primitive id intrinsic: %d", ctx->stage);
3014       break;
3015    case nir_intrinsic_load_sample_id:
3016       result = ac_unpack_param(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->ancillary), 8, 4);
3017       break;
3018    case nir_intrinsic_load_sample_pos:
3019       result = load_sample_pos(ctx);
3020       break;
3021    case nir_intrinsic_load_frag_coord:
3022       result = emit_load_frag_coord(ctx);
3023       break;
3024    case nir_intrinsic_load_frag_shading_rate:
3025       result = emit_load_frag_shading_rate(ctx);
3026       break;
3027    case nir_intrinsic_load_front_face:
3028       result = emit_i2b(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->front_face));
3029       break;
3030    case nir_intrinsic_load_helper_invocation:
3031    case nir_intrinsic_is_helper_invocation:
3032       result = ac_build_load_helper_invocation(&ctx->ac);
3033       break;
3034    case nir_intrinsic_load_instance_id:
3035       result = ctx->abi->instance_id_replaced ?
3036          ctx->abi->instance_id_replaced : ctx->abi->instance_id;
3037       break;
3038    case nir_intrinsic_load_num_workgroups:
3039       if (ctx->abi->load_grid_size_from_user_sgpr) {
3040          result = ac_get_arg(&ctx->ac, ctx->args->num_work_groups);
3041       } else {
3042          result = ac_build_load_invariant(&ctx->ac,
3043             ac_get_ptr_arg(&ctx->ac, ctx->args, ctx->args->num_work_groups), ctx->ac.i32_0);
3044       }
3045       break;
3046    case nir_intrinsic_load_subgroup_id:
3047       result = visit_load_subgroup_id(ctx);
3048       break;
3049    case nir_intrinsic_load_local_invocation_index:
3050       result = visit_load_local_invocation_index(ctx);
3051       break;
3052    case nir_intrinsic_first_invocation:
3053       result = visit_first_invocation(ctx);
3054       break;
3055    case nir_intrinsic_store_ssbo:
3056       visit_store_ssbo(ctx, instr);
3057       break;
3058    case nir_intrinsic_load_ssbo:
3059       result = visit_load_buffer(ctx, instr);
3060       break;
3061    case nir_intrinsic_load_global_amd:
3062       result = visit_load_global(ctx, instr);
3063       break;
3064    case nir_intrinsic_store_global_amd:
3065       visit_store_global(ctx, instr);
3066       break;
3067    case nir_intrinsic_global_atomic_amd:
3068    case nir_intrinsic_global_atomic_swap_amd:
3069       result = visit_global_atomic(ctx, instr);
3070       break;
3071    case nir_intrinsic_ssbo_atomic:
3072    case nir_intrinsic_ssbo_atomic_swap:
3073       result = visit_atomic_ssbo(ctx, instr);
3074       break;
3075    case nir_intrinsic_load_ubo:
3076       result = visit_load_ubo_buffer(ctx, instr);
3077       break;
3078    case nir_intrinsic_get_ssbo_size:
3079       result = visit_get_ssbo_size(ctx, instr);
3080       break;
3081    case nir_intrinsic_load_input:
3082    case nir_intrinsic_load_per_primitive_input:
3083    case nir_intrinsic_load_input_vertex:
3084    case nir_intrinsic_load_per_vertex_input:
3085       result = visit_load(ctx, instr);
3086       break;
3087    case nir_intrinsic_store_output:
3088       visit_store_output(ctx, instr);
3089       break;
3090    case nir_intrinsic_load_shared:
3091       result = visit_load_shared(ctx, instr);
3092       break;
3093    case nir_intrinsic_store_shared:
3094       visit_store_shared(ctx, instr);
3095       break;
3096    case nir_intrinsic_load_shared2_amd:
3097       result = visit_load_shared2_amd(ctx, instr);
3098       break;
3099    case nir_intrinsic_store_shared2_amd:
3100       visit_store_shared2_amd(ctx, instr);
3101       break;
3102    case nir_intrinsic_bindless_image_load:
3103    case nir_intrinsic_bindless_image_sparse_load:
3104    case nir_intrinsic_bindless_image_fragment_mask_load_amd:
3105       result = visit_image_load(ctx, instr);
3106       break;
3107    case nir_intrinsic_bindless_image_store:
3108       visit_image_store(ctx, instr);
3109       break;
3110    case nir_intrinsic_bindless_image_atomic:
3111    case nir_intrinsic_bindless_image_atomic_swap:
3112       result = visit_image_atomic(ctx, instr);
3113       break;
3114    case nir_intrinsic_shader_clock:
3115       result = ac_build_shader_clock(&ctx->ac, nir_intrinsic_memory_scope(instr));
3116       break;
3117    case nir_intrinsic_terminate:
3118    case nir_intrinsic_terminate_if:
3119       emit_discard(ctx, instr);
3120       break;
3121    case nir_intrinsic_demote:
3122    case nir_intrinsic_demote_if:
3123       emit_demote(ctx, instr);
3124       break;
3125    case nir_intrinsic_barrier: {
3126       assert(!(nir_intrinsic_memory_semantics(instr) &
3127                (NIR_MEMORY_MAKE_AVAILABLE | NIR_MEMORY_MAKE_VISIBLE)));
3128 
3129       nir_variable_mode modes = nir_intrinsic_memory_modes(instr);
3130 
3131       unsigned wait_flags = 0;
3132       if (modes & (nir_var_mem_global | nir_var_mem_ssbo | nir_var_image))
3133          wait_flags |= AC_WAIT_LOAD | AC_WAIT_STORE;
3134       if (modes & nir_var_mem_shared)
3135          wait_flags |= AC_WAIT_DS;
3136 
3137       if (wait_flags)
3138          ac_build_waitcnt(&ctx->ac, wait_flags);
3139 
3140       if (nir_intrinsic_execution_scope(instr) == SCOPE_WORKGROUP)
3141          ac_build_s_barrier(&ctx->ac, ctx->stage);
3142       break;
3143    }
3144    case nir_intrinsic_optimization_barrier_vgpr_amd:
3145       result = get_src(ctx, instr->src[0]);
3146       ac_build_optimization_barrier(&ctx->ac, &result, false);
3147       break;
3148    case nir_intrinsic_optimization_barrier_sgpr_amd:
3149       result = get_src(ctx, instr->src[0]);
3150       ac_build_optimization_barrier(&ctx->ac, &result, true);
3151       break;
3152    case nir_intrinsic_shared_atomic:
3153    case nir_intrinsic_shared_atomic_swap: {
3154       LLVMValueRef ptr = get_memory_ptr(ctx, instr->src[0], 0);
3155       result = visit_var_atomic(ctx, instr, ptr, 1);
3156       break;
3157    }
3158    case nir_intrinsic_load_barycentric_pixel:
3159       result = barycentric_center(ctx, nir_intrinsic_interp_mode(instr));
3160       break;
3161    case nir_intrinsic_load_barycentric_centroid:
3162       result = barycentric_centroid(ctx, nir_intrinsic_interp_mode(instr));
3163       break;
3164    case nir_intrinsic_load_barycentric_sample:
3165       result = barycentric_sample(ctx, nir_intrinsic_interp_mode(instr));
3166       break;
3167    case nir_intrinsic_load_barycentric_model:
3168       result = barycentric_model(ctx);
3169       break;
3170    case nir_intrinsic_load_barycentric_at_offset: {
3171       LLVMValueRef offset = ac_to_float(&ctx->ac, get_src(ctx, instr->src[0]));
3172       result = barycentric_offset(ctx, nir_intrinsic_interp_mode(instr), offset);
3173       break;
3174    }
3175    case nir_intrinsic_load_interpolated_input: {
3176       /* We assume any indirect loads have been lowered away */
3177       ASSERTED nir_const_value *offset = nir_src_as_const_value(instr->src[1]);
3178       assert(offset);
3179       assert(offset[0].i32 == 0);
3180 
3181       LLVMValueRef interp_param = get_src(ctx, instr->src[0]);
3182       unsigned index = nir_intrinsic_base(instr);
3183       unsigned component = nir_intrinsic_component(instr);
3184       result = load_interpolated_input(ctx, interp_param, index, component,
3185                                        instr->def.num_components, instr->def.bit_size,
3186                                        nir_intrinsic_io_semantics(instr).high_16bits);
3187       break;
3188    }
3189    case nir_intrinsic_sendmsg_amd: {
3190       unsigned imm = nir_intrinsic_base(instr);
3191       LLVMValueRef m0_content = get_src(ctx, instr->src[0]);
3192       ac_build_sendmsg(&ctx->ac, imm, m0_content);
3193       break;
3194    }
3195    case nir_intrinsic_load_gs_wave_id_amd: {
3196       if (ctx->args->merged_wave_info.used)
3197          result = ac_unpack_param(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->merged_wave_info), 16, 8);
3198       else if (ctx->args->gs_wave_id.used)
3199          result = ac_get_arg(&ctx->ac, ctx->args->gs_wave_id);
3200       else
3201          unreachable("Shader doesn't have GS wave ID.");
3202       break;
3203    }
3204    case nir_intrinsic_load_tess_coord: {
3205       LLVMValueRef coord[] = {
3206          ctx->abi->tes_u_replaced ? ctx->abi->tes_u_replaced : ac_get_arg(&ctx->ac, ctx->args->tes_u),
3207          ctx->abi->tes_v_replaced ? ctx->abi->tes_v_replaced : ac_get_arg(&ctx->ac, ctx->args->tes_v),
3208          ctx->ac.f32_0,
3209       };
3210 
3211       /* For triangles, the vector should be (u, v, 1-u-v). */
3212       if (ctx->info->tess._primitive_mode == TESS_PRIMITIVE_TRIANGLES) {
3213          coord[2] = LLVMBuildFSub(ctx->ac.builder, ctx->ac.f32_1,
3214                                   LLVMBuildFAdd(ctx->ac.builder, coord[0], coord[1], ""), "");
3215       }
3216       result = ac_build_gather_values(&ctx->ac, coord, 3);
3217       break;
3218    }
3219    case nir_intrinsic_vote_all: {
3220       result = ac_build_vote_all(&ctx->ac, get_src(ctx, instr->src[0]));
3221       if (ctx->info->stage == MESA_SHADER_FRAGMENT)
3222          result = ac_build_wqm(&ctx->ac, result);
3223       break;
3224    }
3225    case nir_intrinsic_vote_any: {
3226       result = ac_build_vote_any(&ctx->ac, get_src(ctx, instr->src[0]));
3227       if (ctx->info->stage == MESA_SHADER_FRAGMENT)
3228          result = ac_build_wqm(&ctx->ac, result);
3229       break;
3230    }
3231    case nir_intrinsic_quad_vote_any: {
3232       result = ac_build_wqm_vote(&ctx->ac, get_src(ctx, instr->src[0]));
3233       break;
3234    }
3235    case nir_intrinsic_quad_vote_all: {
3236       LLVMValueRef src = LLVMBuildNot(ctx->ac.builder, get_src(ctx, instr->src[0]), "");
3237       result = LLVMBuildNot(ctx->ac.builder, ac_build_wqm_vote(&ctx->ac, src), "");
3238       break;
3239    }
3240    case nir_intrinsic_shuffle:
3241       if (ctx->ac.gfx_level == GFX8 || ctx->ac.gfx_level == GFX9 ||
3242           (ctx->ac.gfx_level >= GFX10 && ctx->ac.wave_size == 32)) {
3243          result =
3244             ac_build_shuffle(&ctx->ac, get_src(ctx, instr->src[0]), get_src(ctx, instr->src[1]));
3245       } else {
3246          LLVMValueRef src = get_src(ctx, instr->src[0]);
3247          LLVMValueRef index = get_src(ctx, instr->src[1]);
3248          LLVMTypeRef type = LLVMTypeOf(src);
3249          struct waterfall_context wctx;
3250          LLVMValueRef index_val;
3251 
3252          index_val = enter_waterfall(ctx, &wctx, index, true);
3253 
3254          src = LLVMBuildZExt(ctx->ac.builder, src, ctx->ac.i32, "");
3255 
3256          result = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.readlane", ctx->ac.i32,
3257                                      (LLVMValueRef[]){src, index_val}, 2, 0);
3258 
3259          result = LLVMBuildTrunc(ctx->ac.builder, result, type, "");
3260 
3261          result = exit_waterfall(ctx, &wctx, result);
3262       }
3263       break;
3264    case nir_intrinsic_reduce:
3265       result = ac_build_reduce(&ctx->ac, get_src(ctx, instr->src[0]), instr->const_index[0],
3266                                instr->const_index[1]);
3267       break;
3268    case nir_intrinsic_inclusive_scan:
3269       result =
3270          ac_build_inclusive_scan(&ctx->ac, get_src(ctx, instr->src[0]), instr->const_index[0]);
3271       break;
3272    case nir_intrinsic_exclusive_scan:
3273       result =
3274          ac_build_exclusive_scan(&ctx->ac, get_src(ctx, instr->src[0]), instr->const_index[0]);
3275       break;
3276    case nir_intrinsic_quad_broadcast: {
3277       unsigned lane = nir_src_as_uint(instr->src[1]);
3278       result = ac_build_quad_swizzle(&ctx->ac, get_src(ctx, instr->src[0]), lane, lane, lane, lane);
3279       if (ctx->info->stage == MESA_SHADER_FRAGMENT)
3280          result = ac_build_wqm(&ctx->ac, result);
3281       break;
3282    }
3283    case nir_intrinsic_quad_swap_horizontal:
3284       result = ac_build_quad_swizzle(&ctx->ac, get_src(ctx, instr->src[0]), 1, 0, 3, 2);
3285       if (ctx->info->stage == MESA_SHADER_FRAGMENT)
3286          result = ac_build_wqm(&ctx->ac, result);
3287       break;
3288    case nir_intrinsic_quad_swap_vertical:
3289       result = ac_build_quad_swizzle(&ctx->ac, get_src(ctx, instr->src[0]), 2, 3, 0, 1);
3290       if (ctx->info->stage == MESA_SHADER_FRAGMENT)
3291          result = ac_build_wqm(&ctx->ac, result);
3292       break;
3293    case nir_intrinsic_quad_swap_diagonal:
3294       result = ac_build_quad_swizzle(&ctx->ac, get_src(ctx, instr->src[0]), 3, 2, 1, 0);
3295       if (ctx->info->stage == MESA_SHADER_FRAGMENT)
3296          result = ac_build_wqm(&ctx->ac, result);
3297       break;
3298    case nir_intrinsic_quad_swizzle_amd: {
3299       uint32_t mask = nir_intrinsic_swizzle_mask(instr);
3300       result = ac_build_quad_swizzle(&ctx->ac, get_src(ctx, instr->src[0]), mask & 0x3,
3301                                      (mask >> 2) & 0x3, (mask >> 4) & 0x3, (mask >> 6) & 0x3);
3302       if (ctx->info->stage == MESA_SHADER_FRAGMENT)
3303          result = ac_build_wqm(&ctx->ac, result);
3304       break;
3305    }
3306    case nir_intrinsic_masked_swizzle_amd: {
3307       uint32_t mask = nir_intrinsic_swizzle_mask(instr);
3308       result = ac_build_ds_swizzle(&ctx->ac, get_src(ctx, instr->src[0]), mask);
3309       break;
3310    }
3311    case nir_intrinsic_write_invocation_amd:
3312       result = ac_build_writelane(&ctx->ac, get_src(ctx, instr->src[0]),
3313                                   get_src(ctx, instr->src[1]), get_src(ctx, instr->src[2]));
3314       break;
3315    case nir_intrinsic_mbcnt_amd:
3316       result = ac_build_mbcnt_add(&ctx->ac, get_src(ctx, instr->src[0]), get_src(ctx, instr->src[1]));
3317       break;
3318    case nir_intrinsic_load_scratch: {
3319       LLVMValueRef offset = get_src(ctx, instr->src[0]);
3320       LLVMValueRef ptr = ac_build_gep0(&ctx->ac, ctx->scratch, offset);
3321       LLVMTypeRef comp_type = LLVMIntTypeInContext(ctx->ac.context, instr->def.bit_size);
3322       LLVMTypeRef vec_type = instr->def.num_components == 1
3323                                 ? comp_type
3324                                 : LLVMVectorType(comp_type, instr->def.num_components);
3325       result = LLVMBuildLoad2(ctx->ac.builder, vec_type, ptr, "");
3326       break;
3327    }
3328    case nir_intrinsic_store_scratch: {
3329       LLVMValueRef offset = get_src(ctx, instr->src[1]);
3330       LLVMValueRef ptr = ac_build_gep0(&ctx->ac, ctx->scratch, offset);
3331       LLVMTypeRef comp_type = LLVMIntTypeInContext(ctx->ac.context, instr->src[0].ssa->bit_size);
3332       LLVMValueRef src = get_src(ctx, instr->src[0]);
3333       unsigned wrmask = nir_intrinsic_write_mask(instr);
3334       while (wrmask) {
3335          int start, count;
3336          u_bit_scan_consecutive_range(&wrmask, &start, &count);
3337 
3338          LLVMValueRef offset = LLVMConstInt(ctx->ac.i32, start, false);
3339          LLVMValueRef offset_ptr = LLVMBuildGEP2(ctx->ac.builder, comp_type, ptr, &offset, 1, "");
3340          LLVMValueRef offset_src = ac_extract_components(&ctx->ac, src, start, count);
3341          LLVMBuildStore(ctx->ac.builder, offset_src, offset_ptr);
3342       }
3343       break;
3344    }
3345    case nir_intrinsic_load_constant: {
3346       unsigned base = nir_intrinsic_base(instr);
3347       unsigned range = nir_intrinsic_range(instr);
3348 
3349       LLVMValueRef offset = get_src(ctx, instr->src[0]);
3350       offset = LLVMBuildAdd(ctx->ac.builder, offset, LLVMConstInt(ctx->ac.i32, base, false), "");
3351 
3352       /* Clamp the offset to avoid out-of-bound access because global
3353        * instructions can't handle them.
3354        */
3355       LLVMValueRef size = LLVMConstInt(ctx->ac.i32, base + range, false);
3356       LLVMValueRef cond = LLVMBuildICmp(ctx->ac.builder, LLVMIntULT, offset, size, "");
3357       offset = LLVMBuildSelect(ctx->ac.builder, cond, offset, size, "");
3358 
3359       LLVMValueRef ptr = ac_build_gep0(&ctx->ac, ctx->constant_data, offset);
3360 
3361       /* TODO: LLVM doesn't sign-extend the result of s_getpc_b64 correctly, causing hangs.
3362        * Do it manually here.
3363        */
3364       if (ctx->ac.gfx_level == GFX12) {
3365          LLVMValueRef addr = LLVMBuildPtrToInt(ctx->ac.builder, ptr, ctx->ac.i64, "");
3366          addr = LLVMBuildOr(ctx->ac.builder, addr,
3367                             LLVMConstInt(ctx->ac.i64, 0xffff000000000000ull, 0), "");
3368          ptr = LLVMBuildIntToPtr(ctx->ac.builder, addr, LLVMTypeOf(ptr), "");
3369       }
3370 
3371       LLVMTypeRef comp_type = LLVMIntTypeInContext(ctx->ac.context, instr->def.bit_size);
3372       LLVMTypeRef vec_type = instr->def.num_components == 1
3373                                 ? comp_type
3374                                 : LLVMVectorType(comp_type, instr->def.num_components);
3375       result = LLVMBuildLoad2(ctx->ac.builder, vec_type, ptr, "");
3376       break;
3377    }
3378    case nir_intrinsic_set_vertex_and_primitive_count:
3379       /* Currently ignored. */
3380       break;
3381    case nir_intrinsic_load_typed_buffer_amd:
3382    case nir_intrinsic_load_buffer_amd:
3383    case nir_intrinsic_store_buffer_amd: {
3384       unsigned src_base = instr->intrinsic == nir_intrinsic_store_buffer_amd ? 1 : 0;
3385       bool idxen = !nir_src_is_const(instr->src[src_base + 3]) ||
3386                    nir_src_as_uint(instr->src[src_base + 3]);
3387 
3388       LLVMValueRef store_data = get_src(ctx, instr->src[0]);
3389       LLVMValueRef descriptor = get_src(ctx, instr->src[src_base + 0]);
3390       LLVMValueRef addr_voffset = get_src(ctx, instr->src[src_base + 1]);
3391       LLVMValueRef addr_soffset = get_src(ctx, instr->src[src_base + 2]);
3392       LLVMValueRef vidx = idxen ? get_src(ctx, instr->src[src_base + 3]) : NULL;
3393       unsigned num_components = instr->def.num_components;
3394       unsigned const_offset = nir_intrinsic_base(instr);
3395       bool reorder = nir_intrinsic_can_reorder(instr);
3396       enum gl_access_qualifier access = ac_get_mem_access_flags(instr);
3397       bool uses_format = access & ACCESS_USES_FORMAT_AMD;
3398 
3399       LLVMValueRef voffset = LLVMBuildAdd(ctx->ac.builder, addr_voffset,
3400                                           LLVMConstInt(ctx->ac.i32, const_offset, 0), "");
3401 
3402       if (instr->intrinsic == nir_intrinsic_load_buffer_amd && uses_format) {
3403          assert(instr->def.bit_size == 16 || instr->def.bit_size == 32);
3404          result = ac_build_buffer_load_format(&ctx->ac, descriptor, vidx, voffset, num_components,
3405                                               access, reorder,
3406                                               instr->def.bit_size == 16, false);
3407          result = ac_to_integer(&ctx->ac, result);
3408       } else if (instr->intrinsic == nir_intrinsic_store_buffer_amd && uses_format) {
3409          assert(instr->src[0].ssa->bit_size == 16 || instr->src[0].ssa->bit_size == 32);
3410          ac_build_buffer_store_format(&ctx->ac, descriptor, store_data, vidx, voffset, access);
3411       } else if (instr->intrinsic == nir_intrinsic_load_buffer_amd ||
3412                  instr->intrinsic == nir_intrinsic_load_typed_buffer_amd) {
3413          /* LLVM is unable to select instructions for larger than 32-bit channel types.
3414           * Workaround by using i32 and casting to the correct type later.
3415           */
3416          const unsigned fetch_num_components =
3417             num_components * MAX2(32, instr->def.bit_size) / 32;
3418 
3419          LLVMTypeRef channel_type =
3420             LLVMIntTypeInContext(ctx->ac.context, MIN2(32, instr->def.bit_size));
3421 
3422          if (instr->intrinsic == nir_intrinsic_load_buffer_amd) {
3423             result = ac_build_buffer_load(&ctx->ac, descriptor, fetch_num_components, vidx, voffset,
3424                                           addr_soffset, channel_type, access, reorder, false);
3425          } else {
3426             const unsigned align_offset = nir_intrinsic_align_offset(instr);
3427             const unsigned align_mul = nir_intrinsic_align_mul(instr);
3428             const enum pipe_format format = nir_intrinsic_format(instr);
3429 
3430             result =
3431                ac_build_safe_tbuffer_load(&ctx->ac, descriptor, vidx, addr_voffset, addr_soffset,
3432                                           format, MIN2(32, instr->def.bit_size), const_offset, align_offset,
3433                                           align_mul, fetch_num_components, access, reorder);
3434          }
3435 
3436          /* Trim to needed vector components. */
3437          result = ac_trim_vector(&ctx->ac, result, fetch_num_components);
3438 
3439          /* Cast to larger than 32-bit sized components if needed. */
3440          if (instr->def.bit_size > 32) {
3441             LLVMTypeRef cast_channel_type =
3442                LLVMIntTypeInContext(ctx->ac.context, instr->def.bit_size);
3443             LLVMTypeRef cast_type =
3444                num_components == 1 ? cast_channel_type :
3445                LLVMVectorType(cast_channel_type, num_components);
3446             result = LLVMBuildBitCast(ctx->ac.builder, result, cast_type, "");
3447          }
3448 
3449          /* Cast the result to an integer (or vector of integers). */
3450          result = ac_to_integer(&ctx->ac, result);
3451       } else {
3452          unsigned writemask = nir_intrinsic_write_mask(instr);
3453          while (writemask) {
3454             int start, count;
3455             u_bit_scan_consecutive_range(&writemask, &start, &count);
3456 
3457             LLVMValueRef voffset = LLVMBuildAdd(
3458                ctx->ac.builder, addr_voffset,
3459                LLVMConstInt(ctx->ac.i32, const_offset + start * 4, 0), "");
3460 
3461             LLVMValueRef data = extract_vector_range(&ctx->ac, store_data, start, count);
3462             ac_build_buffer_store_dword(&ctx->ac, descriptor, data, vidx, voffset, addr_soffset,
3463                                         access);
3464          }
3465       }
3466       break;
3467    }
3468    case nir_intrinsic_is_subgroup_invocation_lt_amd: {
3469       LLVMValueRef count = LLVMBuildAnd(ctx->ac.builder, get_src(ctx, instr->src[0]),
3470                                         LLVMConstInt(ctx->ac.i32, 0xff, 0), "");
3471       result = LLVMBuildICmp(ctx->ac.builder, LLVMIntULT, ac_get_thread_id(&ctx->ac), count, "");
3472       break;
3473    }
3474    case nir_intrinsic_overwrite_vs_arguments_amd:
3475       ctx->abi->vertex_id_replaced = get_src(ctx, instr->src[0]);
3476       ctx->abi->instance_id_replaced = get_src(ctx, instr->src[1]);
3477       break;
3478    case nir_intrinsic_overwrite_tes_arguments_amd:
3479       ctx->abi->tes_u_replaced = ac_to_float(&ctx->ac, get_src(ctx, instr->src[0]));
3480       ctx->abi->tes_v_replaced = ac_to_float(&ctx->ac, get_src(ctx, instr->src[1]));
3481       ctx->abi->tes_rel_patch_id_replaced = get_src(ctx, instr->src[3]);
3482       ctx->abi->tes_patch_id_replaced = get_src(ctx, instr->src[2]);
3483       break;
3484    case nir_intrinsic_gds_atomic_add_amd: {
3485       LLVMValueRef store_val = get_src(ctx, instr->src[0]);
3486       LLVMValueRef addr = get_src(ctx, instr->src[1]);
3487       LLVMTypeRef gds_ptr_type = LLVMPointerType(ctx->ac.i32, AC_ADDR_SPACE_GDS);
3488       LLVMValueRef gds_base = LLVMBuildIntToPtr(ctx->ac.builder, addr, gds_ptr_type, "");
3489       ac_build_atomic_rmw(&ctx->ac, LLVMAtomicRMWBinOpAdd, gds_base, store_val, "workgroup-one-as");
3490       break;
3491    }
3492    case nir_intrinsic_elect:
3493       result = LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ, visit_first_invocation(ctx),
3494                              ac_get_thread_id(&ctx->ac), "");
3495       break;
3496    case nir_intrinsic_lane_permute_16_amd:
3497       result = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.permlane16", ctx->ac.i32,
3498                                   (LLVMValueRef[]){get_src(ctx, instr->src[0]),
3499                                                    get_src(ctx, instr->src[0]),
3500                                                    get_src(ctx, instr->src[1]),
3501                                                    get_src(ctx, instr->src[2]),
3502                                                    ctx->ac.i1false,
3503                                                    ctx->ac.i1false}, 6, 0);
3504       break;
3505    case nir_intrinsic_load_scalar_arg_amd:
3506    case nir_intrinsic_load_vector_arg_amd: {
3507       assert(nir_intrinsic_base(instr) < AC_MAX_ARGS);
3508       struct ac_arg arg;
3509       arg.arg_index = nir_intrinsic_base(instr);
3510       arg.used = true;
3511       result = ac_to_integer(&ctx->ac, ac_get_arg(&ctx->ac, arg));
3512       if (ac_get_elem_bits(&ctx->ac, LLVMTypeOf(result)) != 32)
3513          result = LLVMBuildBitCast(ctx->ac.builder, result, get_def_type(ctx, &instr->def), "");
3514       break;
3515    }
3516    case nir_intrinsic_load_smem_amd: {
3517       LLVMValueRef base = get_src(ctx, instr->src[0]);
3518       LLVMValueRef offset = get_src(ctx, instr->src[1]);
3519 
3520       bool is_addr_32bit = nir_src_bit_size(instr->src[0]) == 32;
3521       int addr_space = is_addr_32bit ? AC_ADDR_SPACE_CONST_32BIT : AC_ADDR_SPACE_CONST;
3522 
3523       LLVMTypeRef result_type = get_def_type(ctx, &instr->def);
3524       LLVMTypeRef byte_ptr_type = LLVMPointerType(ctx->ac.i8, addr_space);
3525 
3526       LLVMValueRef addr = LLVMBuildIntToPtr(ctx->ac.builder, base, byte_ptr_type, "");
3527       /* see ac_build_load_custom() for 32bit/64bit addr GEP difference */
3528       addr = is_addr_32bit ?
3529          LLVMBuildInBoundsGEP2(ctx->ac.builder, ctx->ac.i8, addr, &offset, 1, "") :
3530          LLVMBuildGEP2(ctx->ac.builder, ctx->ac.i8, addr, &offset, 1, "");
3531 
3532       LLVMSetMetadata(addr, ctx->ac.uniform_md_kind, ctx->ac.empty_md);
3533       result = LLVMBuildLoad2(ctx->ac.builder, result_type, addr, "");
3534       LLVMSetMetadata(result, ctx->ac.invariant_load_md_kind, ctx->ac.empty_md);
3535       break;
3536    }
3537    case nir_intrinsic_ordered_xfb_counter_add_gfx11_amd: {
3538       /* Gfx11 GDS instructions only operate on the first active lane. All other lanes are
3539        * ignored. So are their EXEC bits. This uses the mutex feature of ds_ordered_count
3540        * to emulate a multi-dword atomic.
3541        *
3542        * This is the expected code:
3543        *    ds_ordered_count release=0 done=0   // lock mutex
3544        *    ds_add_gs_reg_rtn GDS_STRMOUT_DWORDS_WRITTEN_0
3545        *    ds_add_gs_reg_rtn GDS_STRMOUT_DWORDS_WRITTEN_1
3546        *    ds_add_gs_reg_rtn GDS_STRMOUT_DWORDS_WRITTEN_2
3547        *    ds_add_gs_reg_rtn GDS_STRMOUT_DWORDS_WRITTEN_3
3548        *    ds_ordered_count release=1 done=1   // unlock mutex
3549        *
3550        * GDS_STRMOUT_DWORDS_WRITTEN_n are just general-purpose global registers. We use them
3551        * because MCBP (mid-command-buffer preemption) saves and restores them, and it doesn't
3552        * save and restore GDS memory.
3553        */
3554       LLVMValueRef args[8] = {
3555          LLVMBuildIntToPtr(ctx->ac.builder, get_src(ctx, instr->src[0]),
3556                            LLVMPointerType(ctx->ac.i32, AC_ADDR_SPACE_GDS), ""),
3557          ctx->ac.i32_0,                             /* value to add */
3558          ctx->ac.i32_0,                             /* ordering */
3559          ctx->ac.i32_0,                             /* scope */
3560          ctx->ac.i1false,                           /* isVolatile */
3561          LLVMConstInt(ctx->ac.i32, 1 << 24, false), /* OA index, bits 24+: lane count */
3562          ctx->ac.i1false,                           /* wave release */
3563          ctx->ac.i1false,                           /* wave done */
3564       };
3565 
3566       /* Set release=0 to start a GDS mutex. Set done=0 because it's not the last one. */
3567       ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.ds.ordered.add", ctx->ac.i32,
3568                          args, ARRAY_SIZE(args), 0);
3569       ac_build_waitcnt(&ctx->ac, AC_WAIT_DS);
3570 
3571       LLVMValueRef global_count[4];
3572       LLVMValueRef count_vec = get_src(ctx, instr->src[1]);
3573       unsigned write_mask = nir_intrinsic_write_mask(instr);
3574       for (unsigned i = 0; i < instr->num_components; i++) {
3575          LLVMValueRef value =
3576             LLVMBuildExtractElement(ctx->ac.builder, count_vec,
3577                                     LLVMConstInt(ctx->ac.i32, i, false), "");
3578          if (write_mask & (1 << i)) {
3579             /* The offset is a relative offset from GDS_STRMOUT_DWORDS_WRITTEN_0. */
3580             global_count[i] =
3581                ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.ds.add.gs.reg.rtn.i32", ctx->ac.i32,
3582                                   (LLVMValueRef[]){value, LLVMConstInt(ctx->ac.i32, i * 4, 0)},
3583                                   2, 0);
3584          } else {
3585             global_count[i] = LLVMGetUndef(ctx->ac.i32);
3586          }
3587       }
3588 
3589       ac_build_waitcnt(&ctx->ac, AC_WAIT_DS);
3590 
3591       /* Set release=1 to end a GDS mutex. Set done=1 because it's the last one. */
3592       args[6] = args[7] = ctx->ac.i1true;
3593       ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.ds.ordered.add", ctx->ac.i32,
3594                          args, ARRAY_SIZE(args), 0);
3595       result = ac_build_gather_values(&ctx->ac, global_count, instr->num_components);
3596       break;
3597    }
3598    case nir_intrinsic_xfb_counter_sub_gfx11_amd: {
3599       /* must be called in a single lane of a workgroup. */
3600       LLVMValueRef sub_vec = get_src(ctx, instr->src[0]);
3601       unsigned write_mask = nir_intrinsic_write_mask(instr);
3602 
3603       for (unsigned i = 0; i < instr->num_components; i++) {
3604          if (write_mask & (1 << i)) {
3605             LLVMValueRef value =
3606                LLVMBuildExtractElement(ctx->ac.builder, sub_vec,
3607                                        LLVMConstInt(ctx->ac.i32, i, false), "");
3608             /* The offset is a relative offset from GDS_STRMOUT_DWORDS_WRITTEN_0. */
3609             ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.ds.sub.gs.reg.rtn.i32", ctx->ac.i32,
3610                                (LLVMValueRef[]){value, LLVMConstInt(ctx->ac.i32, i * 4, 0)},
3611                                2, 0);
3612          }
3613       }
3614       break;
3615    }
3616    case nir_intrinsic_ordered_add_loop_gfx12_amd: {
3617       const unsigned num_atomics = 6;
3618       char code[2048];
3619       char *ptr = code;
3620 
3621       /* Assembly outputs:
3622        *    i32 VGPR $0 = previous value in memory
3623        *
3624        * Assembly inputs:
3625        *    EXEC = one lane per counter (use nir_push_if, streamout should always enable 4 lanes)
3626        *    i64 SGPR $1 = atomic base address
3627        *    i32 VGPR $2 = 32-bit VGPR voffset (streamout should set local_invocation_index * 8)
3628        *    i32 SGPR $3 = orderedID
3629        *    i64 VGPR $4 = 64-bit VGPR atomic src (streamout should set {orderedID, numDwords})
3630        */
3631 
3632       /* Issue (num_atomics - 1) atomics to initialize the results.
3633        * There are no s_sleeps here because the atomics must be pipelined.
3634        */
3635       for (int i = 0; i < num_atomics - 1; i++) {
3636          /* global_atomic_ordered_add_b64 dst, offset, data, address */
3637          ptr += sprintf(ptr,
3638                         "global_atomic_ordered_add_b64 v[%u:%u], $2, $4, $1 th:TH_ATOMIC_RETURN\n"
3639                         "s_nop 15\n"
3640                         "s_nop 7\n",
3641                         3 + i * 2,
3642                         3 + i * 2 + 1);
3643       }
3644 
3645       /* This is an infinite while loop with breaks. The loop body executes "num_atomics"
3646        * atomics and the same number of conditional breaks.
3647        *
3648        * It's pipelined such that we only wait for the oldest atomic, so there is always
3649        * "num_atomics" atomics in flight while the shader is waiting.
3650        */
3651       unsigned inst_block_size = 3 + 1 + 3; /* size of the next sprintf in dwords */
3652 
3653       for (unsigned i = 0; i < num_atomics; i++) {
3654          unsigned issue_index = (num_atomics - 1 + i) % num_atomics;
3655          unsigned read_index = i;
3656 
3657          ptr += sprintf(ptr,
3658                         /* Issue (or repeat) the attempt. */
3659                         "global_atomic_ordered_add_b64 v[%u:%u], $2, $4, $1 th:TH_ATOMIC_RETURN\n"
3660                         "s_wait_loadcnt 0x%x\n"
3661                         /* if (result[check_index].ordered_id == ordered_id) {
3662                          *    return_value = result[check_index].value;
3663                          *    break;
3664                          * }
3665                          */
3666                         "v_cmp_eq_u32 %s, $3, v%u\n"
3667                         "v_mov_b32 $0, v%u\n"
3668                         "s_cbranch_vccnz 0x%x\n",
3669                         3 + issue_index * 2,
3670                         3 + issue_index * 2 + 1,
3671                         num_atomics - 1, /* wait count */
3672                         ctx->ac.wave_size == 32 ? "vcc_lo" : "vcc",
3673                         3 + read_index * 2, /* v_cmp_eq: src1 */
3674                         3 + read_index * 2 + 1, /* output */
3675                         inst_block_size * (num_atomics - i - 1) + 1); /* forward s_cbranch as loop break */
3676       }
3677 
3678       /* Jump to the beginning of the loop. */
3679       ptr += sprintf(ptr,
3680                      "s_branch 0x%x\n"
3681                      "s_wait_alu 0xfffe\n"
3682                      "s_wait_loadcnt 0x0\n",
3683                      (inst_block_size * -(int)num_atomics - 1) & 0xffff);
3684 
3685       LLVMTypeRef param_types[] = {ctx->ac.i64, ctx->ac.i32, ctx->ac.i32, ctx->ac.i64};
3686       LLVMTypeRef calltype = LLVMFunctionType(ctx->ac.i32, param_types, 4, false);
3687 
3688       /* =v means a VGPR output, =& means the dst register must be different from src registers,
3689        * s means an SGPR input, v means a VGPR input, ~{reg} means that the register is clobbered
3690        *
3691        * We need to list the registers manually because the clobber constraint doesn't prevent
3692        * input and output registers from being assigned the same registers as the ones that are
3693        * clobbered.
3694        *
3695        * Since registers in the clobber constraints are ignored by LLVM during computation of
3696        * register usage, we have to set the input register to the highest used register because
3697        * that one is included in the register usage computation.
3698        */
3699       char constraint[128];
3700       snprintf(constraint, sizeof(constraint), "=&{v0},{s[8:9]},{v%u},{s12},{v[1:2]},~{%s},~{v[3:%u]}",
3701                3 + num_atomics * 2,
3702                ctx->ac.wave_size == 32 ? "vcc_lo" : "vcc",
3703                3 + num_atomics * 2 - 1);
3704 
3705       LLVMValueRef inlineasm = LLVMConstInlineAsm(calltype, code, constraint, true, false);
3706 
3707       LLVMValueRef args[] = {
3708          get_src(ctx, instr->src[0]),
3709          get_src(ctx, instr->src[1]),
3710          get_src(ctx, instr->src[2]),
3711          get_src(ctx, instr->src[3]),
3712       };
3713       result = LLVMBuildCall2(ctx->ac.builder, calltype, inlineasm, args, 4, "");
3714 
3715       assert(ptr < code + sizeof(code));
3716       break;
3717    }
3718    case nir_intrinsic_export_amd: {
3719       unsigned flags = nir_intrinsic_flags(instr);
3720       unsigned target = nir_intrinsic_base(instr);
3721       unsigned write_mask = nir_intrinsic_write_mask(instr);
3722 
3723       struct ac_export_args args = {
3724          .target = target,
3725          .enabled_channels = write_mask,
3726          .compr = flags & AC_EXP_FLAG_COMPRESSED,
3727          .done = flags & AC_EXP_FLAG_DONE,
3728          .valid_mask = flags & AC_EXP_FLAG_VALID_MASK,
3729       };
3730 
3731       LLVMValueRef value = get_src(ctx, instr->src[0]);
3732       int num_components = ac_get_llvm_num_components(value);
3733       for (int i = 0; i < num_components; i++)
3734          args.out[i] = ac_llvm_extract_elem(&ctx->ac, value, i);
3735 
3736       ac_build_export(&ctx->ac, &args);
3737       break;
3738    }
3739    case nir_intrinsic_bvh64_intersect_ray_amd: {
3740       LLVMValueRef desc = get_src(ctx, instr->src[0]);
3741       LLVMValueRef node_id =
3742          LLVMBuildBitCast(ctx->ac.builder, get_src(ctx, instr->src[1]), ctx->ac.i64, "");
3743       LLVMValueRef t_max =
3744          LLVMBuildBitCast(ctx->ac.builder, get_src(ctx, instr->src[2]), ctx->ac.f32, "");
3745       LLVMValueRef origin =
3746          LLVMBuildBitCast(ctx->ac.builder, get_src(ctx, instr->src[3]), ctx->ac.v3f32, "");
3747       LLVMValueRef dir =
3748          LLVMBuildBitCast(ctx->ac.builder, get_src(ctx, instr->src[4]), ctx->ac.v3f32, "");
3749       LLVMValueRef inv_dir =
3750          LLVMBuildBitCast(ctx->ac.builder, get_src(ctx, instr->src[5]), ctx->ac.v3f32, "");
3751 
3752       LLVMValueRef args[6] = {
3753          node_id, t_max, origin, dir, inv_dir, desc,
3754       };
3755 
3756       result = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.image.bvh.intersect.ray.i64.v3f32",
3757                                   ctx->ac.v4i32, args, ARRAY_SIZE(args), 0);
3758       break;
3759    }
3760    case nir_intrinsic_sleep_amd: {
3761       LLVMValueRef arg = LLVMConstInt(ctx->ac.i32, nir_intrinsic_base(instr), 0);
3762       ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.s.sleep", ctx->ac.voidt, &arg, 1, 0);
3763       break;
3764    }
3765    case nir_intrinsic_nop_amd: {
3766       LLVMValueRef arg = LLVMConstInt(ctx->ac.i16, nir_intrinsic_base(instr), 0);
3767       ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.s.nop", ctx->ac.voidt, &arg, 1, 0);
3768       break;
3769    }
3770    default:
3771       fprintf(stderr, "Unknown intrinsic: ");
3772       nir_print_instr(&instr->instr, stderr);
3773       fprintf(stderr, "\n");
3774       return false;
3775    }
3776    if (result) {
3777       ctx->ssa_defs[instr->def.index] = result;
3778    }
3779    return true;
3780 }
3781 
3782 /* Disable anisotropic filtering if BASE_LEVEL == LAST_LEVEL.
3783  *
3784  * GFX6-GFX7:
3785  *   If BASE_LEVEL == LAST_LEVEL, the shader must disable anisotropic
3786  *   filtering manually. The driver sets img7 to a mask clearing
3787  *   MAX_ANISO_RATIO if BASE_LEVEL == LAST_LEVEL. The shader must do:
3788  *     s_and_b32 samp0, samp0, img7
3789  *
3790  * GFX8:
3791  *   The ANISO_OVERRIDE sampler field enables this fix in TA.
3792  */
sici_fix_sampler_aniso(struct ac_nir_context * ctx,LLVMValueRef res,LLVMValueRef samp)3793 static LLVMValueRef sici_fix_sampler_aniso(struct ac_nir_context *ctx, LLVMValueRef res,
3794                                            LLVMValueRef samp)
3795 {
3796    LLVMBuilderRef builder = ctx->ac.builder;
3797    LLVMValueRef img7, samp0;
3798 
3799    if (ctx->ac.gfx_level >= GFX8)
3800       return samp;
3801 
3802    img7 = LLVMBuildExtractElement(builder, res, LLVMConstInt(ctx->ac.i32, 7, 0), "");
3803    samp0 = LLVMBuildExtractElement(builder, samp, ctx->ac.i32_0, "");
3804    samp0 = LLVMBuildAnd(builder, samp0, img7, "");
3805    return LLVMBuildInsertElement(builder, samp, samp0, ctx->ac.i32_0, "");
3806 }
3807 
tex_fetch_ptrs(struct ac_nir_context * ctx,nir_tex_instr * instr,struct waterfall_context * wctx,LLVMValueRef * res_ptr,LLVMValueRef * samp_ptr)3808 static void tex_fetch_ptrs(struct ac_nir_context *ctx, nir_tex_instr *instr,
3809                            struct waterfall_context *wctx, LLVMValueRef *res_ptr,
3810                            LLVMValueRef *samp_ptr)
3811 {
3812    LLVMValueRef texture_dynamic_handle = NULL;
3813    LLVMValueRef sampler_dynamic_handle = NULL;
3814    int plane = -1;
3815 
3816    *res_ptr = NULL;
3817    *samp_ptr = NULL;
3818    for (unsigned i = 0; i < instr->num_srcs; i++) {
3819       switch (instr->src[i].src_type) {
3820       case nir_tex_src_texture_handle:
3821       case nir_tex_src_sampler_handle: {
3822          LLVMValueRef val = get_src(ctx, instr->src[i].src);
3823          if (LLVMGetTypeKind(LLVMTypeOf(val)) == LLVMVectorTypeKind) {
3824             if (instr->src[i].src_type == nir_tex_src_texture_handle)
3825                *res_ptr = val;
3826             else
3827                *samp_ptr = val;
3828          } else {
3829             if (instr->src[i].src_type == nir_tex_src_texture_handle)
3830                texture_dynamic_handle = val;
3831             else
3832                sampler_dynamic_handle = val;
3833          }
3834          break;
3835       }
3836       case nir_tex_src_plane:
3837          plane = nir_src_as_int(instr->src[i].src);
3838          break;
3839       default:
3840          break;
3841       }
3842    }
3843 
3844    enum ac_descriptor_type main_descriptor =
3845       instr->sampler_dim == GLSL_SAMPLER_DIM_BUF ? AC_DESC_BUFFER : AC_DESC_IMAGE;
3846 
3847    if (plane >= 0) {
3848       assert(instr->op != nir_texop_txf_ms);
3849       assert(instr->sampler_dim != GLSL_SAMPLER_DIM_BUF);
3850 
3851       main_descriptor = AC_DESC_PLANE_0 + plane;
3852    }
3853 
3854    if (instr->op == nir_texop_fragment_mask_fetch_amd) {
3855       /* The fragment mask is fetched from the compressed
3856        * multisampled surface.
3857        */
3858       assert(ctx->ac.gfx_level < GFX11);
3859       main_descriptor = AC_DESC_FMASK;
3860    }
3861 
3862    /* descriptor handles given through nir_tex_src_{texture,sampler}_handle */
3863    if (instr->texture_non_uniform)
3864       texture_dynamic_handle = enter_waterfall(ctx, &wctx[0], texture_dynamic_handle, true);
3865 
3866    if (instr->sampler_non_uniform)
3867       sampler_dynamic_handle = enter_waterfall(ctx, &wctx[1], sampler_dynamic_handle, true);
3868 
3869    if (texture_dynamic_handle)
3870       *res_ptr = ctx->abi->load_sampler_desc(ctx->abi, texture_dynamic_handle, main_descriptor);
3871 
3872    if (sampler_dynamic_handle) {
3873       *samp_ptr = ctx->abi->load_sampler_desc(ctx->abi, sampler_dynamic_handle, AC_DESC_SAMPLER);
3874 
3875       if (ctx->abi->disable_aniso_single_level && instr->sampler_dim < GLSL_SAMPLER_DIM_RECT)
3876          *samp_ptr = sici_fix_sampler_aniso(ctx, *res_ptr, *samp_ptr);
3877    }
3878 }
3879 
visit_tex(struct ac_nir_context * ctx,nir_tex_instr * instr)3880 static void visit_tex(struct ac_nir_context *ctx, nir_tex_instr *instr)
3881 {
3882    LLVMValueRef result = NULL;
3883    struct ac_image_args args = {0};
3884    LLVMValueRef sample_index = NULL;
3885    LLVMValueRef ddx = NULL, ddy = NULL;
3886    struct waterfall_context wctx[2] = {{{0}}};
3887 
3888    tex_fetch_ptrs(ctx, instr, wctx, &args.resource, &args.sampler);
3889 
3890    for (unsigned i = 0; i < instr->num_srcs; i++) {
3891       switch (instr->src[i].src_type) {
3892       case nir_tex_src_coord: {
3893          LLVMValueRef coord = get_src(ctx, instr->src[i].src);
3894          args.a16 = instr->src[i].src.ssa->bit_size == 16;
3895          for (unsigned chan = 0; chan < instr->coord_components; ++chan)
3896             args.coords[chan] = ac_llvm_extract_elem(&ctx->ac, coord, chan);
3897          break;
3898       }
3899       case nir_tex_src_projector:
3900          break;
3901       case nir_tex_src_comparator:
3902          if (instr->is_shadow) {
3903             args.compare = get_src(ctx, instr->src[i].src);
3904             args.compare = ac_to_float(&ctx->ac, args.compare);
3905             assert(instr->src[i].src.ssa->bit_size == 32);
3906          }
3907          break;
3908       case nir_tex_src_offset:
3909          args.offset = get_src(ctx, instr->src[i].src);
3910          /* We pack it with bit shifts, so we need it to be 32-bit. */
3911          assert(ac_get_elem_bits(&ctx->ac, LLVMTypeOf(args.offset)) == 32);
3912          break;
3913       case nir_tex_src_bias:
3914          args.bias = get_src(ctx, instr->src[i].src);
3915          break;
3916       case nir_tex_src_lod:
3917          if (nir_src_is_const(instr->src[i].src) && nir_src_as_uint(instr->src[i].src) == 0)
3918             args.level_zero = true;
3919          else
3920             args.lod = get_src(ctx, instr->src[i].src);
3921          break;
3922       case nir_tex_src_ms_index:
3923          sample_index = get_src(ctx, instr->src[i].src);
3924          break;
3925       case nir_tex_src_ddx:
3926          ddx = get_src(ctx, instr->src[i].src);
3927          args.g16 = instr->src[i].src.ssa->bit_size == 16;
3928          break;
3929       case nir_tex_src_ddy:
3930          ddy = get_src(ctx, instr->src[i].src);
3931          assert(LLVMTypeOf(ddy) == LLVMTypeOf(ddx));
3932          break;
3933       case nir_tex_src_min_lod:
3934          args.min_lod = get_src(ctx, instr->src[i].src);
3935          break;
3936       case nir_tex_src_texture_offset:
3937       case nir_tex_src_sampler_offset:
3938       case nir_tex_src_plane:
3939       default:
3940          break;
3941       }
3942    }
3943 
3944    if (args.offset) {
3945       /* offset for txf has been lowered in nir. */
3946       assert(instr->op != nir_texop_txf);
3947 
3948       LLVMValueRef offset[3], pack;
3949       for (unsigned chan = 0; chan < 3; ++chan)
3950          offset[chan] = ctx->ac.i32_0;
3951 
3952       unsigned num_components = ac_get_llvm_num_components(args.offset);
3953       for (unsigned chan = 0; chan < num_components; chan++) {
3954          offset[chan] = ac_llvm_extract_elem(&ctx->ac, args.offset, chan);
3955          offset[chan] =
3956             LLVMBuildAnd(ctx->ac.builder, offset[chan], LLVMConstInt(ctx->ac.i32, 0x3f, false), "");
3957          if (chan)
3958             offset[chan] = LLVMBuildShl(ctx->ac.builder, offset[chan],
3959                                         LLVMConstInt(ctx->ac.i32, chan * 8, false), "");
3960       }
3961       pack = LLVMBuildOr(ctx->ac.builder, offset[0], offset[1], "");
3962       pack = LLVMBuildOr(ctx->ac.builder, pack, offset[2], "");
3963       args.offset = pack;
3964    }
3965 
3966    /* Section 8.23.1 (Depth Texture Comparison Mode) of the
3967     * OpenGL 4.5 spec says:
3968     *
3969     *    "If the texture’s internal format indicates a fixed-point
3970     *     depth texture, then D_t and D_ref are clamped to the
3971     *     range [0, 1]; otherwise no clamping is performed."
3972     *
3973     * TC-compatible HTILE promotes Z16 and Z24 to Z32_FLOAT,
3974     * so the depth comparison value isn't clamped for Z16 and
3975     * Z24 anymore. Do it manually here for GFX8-9; GFX10 has
3976     * an explicitly clamped 32-bit float format.
3977     */
3978    if (args.compare && ctx->ac.gfx_level >= GFX8 && ctx->ac.gfx_level <= GFX9 &&
3979        ctx->abi->clamp_shadow_reference) {
3980       LLVMValueRef upgraded, clamped;
3981 
3982       upgraded = LLVMBuildExtractElement(ctx->ac.builder, args.sampler,
3983                                          LLVMConstInt(ctx->ac.i32, 3, false), "");
3984       upgraded = LLVMBuildLShr(ctx->ac.builder, upgraded, LLVMConstInt(ctx->ac.i32, 29, false), "");
3985       upgraded = LLVMBuildTrunc(ctx->ac.builder, upgraded, ctx->ac.i1, "");
3986       clamped = ac_build_clamp(&ctx->ac, args.compare);
3987       args.compare = LLVMBuildSelect(ctx->ac.builder, upgraded, clamped, args.compare, "");
3988    }
3989 
3990    /* pack derivatives */
3991    if (ddx || ddy) {
3992       int num_deriv_channels;
3993       switch (instr->sampler_dim) {
3994       case GLSL_SAMPLER_DIM_3D:
3995          num_deriv_channels = 3;
3996          break;
3997       case GLSL_SAMPLER_DIM_2D:
3998       case GLSL_SAMPLER_DIM_CUBE:
3999       default:
4000          num_deriv_channels = 2;
4001          break;
4002       case GLSL_SAMPLER_DIM_1D:
4003          num_deriv_channels = 1;
4004          break;
4005       }
4006 
4007       for (unsigned i = 0; i < num_deriv_channels; i++) {
4008          args.derivs[i] = ac_to_float(&ctx->ac, ac_llvm_extract_elem(&ctx->ac, ddx, i));
4009          args.derivs[num_deriv_channels + i] =
4010             ac_to_float(&ctx->ac, ac_llvm_extract_elem(&ctx->ac, ddy, i));
4011       }
4012    }
4013 
4014    /* Pack sample index */
4015    if (sample_index && (instr->op == nir_texop_txf_ms || instr->op == nir_texop_fragment_fetch_amd))
4016       args.coords[instr->coord_components] = sample_index;
4017 
4018    bool is_new_style_shadow = instr->is_shadow && instr->is_new_style_shadow &&
4019                               instr->op != nir_texop_lod && instr->op != nir_texop_tg4;
4020    unsigned num_components = util_last_bit(nir_def_components_read(&instr->def));
4021 
4022    /* DMASK was repurposed for GATHER4. 4 components are always
4023     * returned and DMASK works like a swizzle - it selects
4024     * the component to fetch. The only valid DMASK values are
4025     * 1=red, 2=green, 4=blue, 8=alpha. (e.g. 1 returns
4026     * (red,red,red,red) etc.) The ISA document doesn't mention
4027     * this.
4028     */
4029    if (instr->op == nir_texop_tg4) {
4030       if (instr->is_shadow)
4031          args.dmask = 1;
4032       else
4033          args.dmask = 1 << instr->component;
4034    } else if (is_new_style_shadow || instr->op == nir_texop_fragment_mask_fetch_amd) {
4035       args.dmask = 1;
4036    } else {
4037       args.dmask = BITFIELD_MASK(num_components);
4038    }
4039 
4040    if (instr->sampler_dim != GLSL_SAMPLER_DIM_BUF) {
4041       args.dim = ac_get_sampler_dim(ctx->ac.gfx_level, instr->sampler_dim, instr->is_array);
4042       args.unorm = instr->sampler_dim == GLSL_SAMPLER_DIM_RECT;
4043    }
4044 
4045    /* Adjust the number of coordinates because we only need (x,y) for 2D
4046     * multisampled images and (x,y,layer) for 2D multisampled layered
4047     * images or for multisampled input attachments.
4048     */
4049    if (instr->op == nir_texop_fragment_mask_fetch_amd) {
4050       if (args.dim == ac_image_2dmsaa) {
4051          args.dim = ac_image_2d;
4052       } else {
4053          assert(args.dim == ac_image_2darraymsaa);
4054          args.dim = ac_image_2darray;
4055       }
4056    }
4057 
4058    /* Set TRUNC_COORD=0 for textureGather(). */
4059    if (instr->op == nir_texop_tg4 && !ctx->ac.info->conformant_trunc_coord) {
4060       LLVMValueRef dword0 = LLVMBuildExtractElement(ctx->ac.builder, args.sampler, ctx->ac.i32_0, "");
4061       dword0 = LLVMBuildAnd(ctx->ac.builder, dword0, LLVMConstInt(ctx->ac.i32, C_008F30_TRUNC_COORD, 0), "");
4062       args.sampler = LLVMBuildInsertElement(ctx->ac.builder, args.sampler, dword0, ctx->ac.i32_0, "");
4063    }
4064 
4065    args.d16 = instr->def.bit_size == 16;
4066    args.tfe = instr->is_sparse;
4067 
4068    result = build_tex_intrinsic(ctx, instr, &args);
4069 
4070    LLVMValueRef code = NULL;
4071    if (instr->is_sparse) {
4072       code = ac_llvm_extract_elem(&ctx->ac, result, 4);
4073       result = ac_trim_vector(&ctx->ac, result, 4);
4074    }
4075 
4076    if (is_new_style_shadow)
4077       result = LLVMBuildExtractElement(ctx->ac.builder, result, ctx->ac.i32_0, "");
4078    else if (instr->op == nir_texop_fragment_mask_fetch_amd) {
4079       /* Use 0x76543210 if the image doesn't have FMASK. */
4080       LLVMValueRef tmp = LLVMBuildBitCast(ctx->ac.builder, args.resource, ctx->ac.v8i32, "");
4081       tmp = LLVMBuildExtractElement(ctx->ac.builder, tmp, ctx->ac.i32_1, "");
4082       tmp = LLVMBuildICmp(ctx->ac.builder, LLVMIntNE, tmp, ctx->ac.i32_0, "");
4083       result = LLVMBuildSelect(ctx->ac.builder, tmp,
4084                                LLVMBuildExtractElement(ctx->ac.builder, result, ctx->ac.i32_0, ""),
4085                                LLVMConstInt(ctx->ac.i32, 0x76543210, false), "");
4086    } else
4087       result = ac_trim_vector(&ctx->ac, result, num_components);
4088 
4089    if (instr->is_sparse)
4090       result = ac_build_concat(&ctx->ac, result, code);
4091 
4092    if (result) {
4093       result = ac_to_integer(&ctx->ac, result);
4094 
4095       for (int i = ARRAY_SIZE(wctx); --i >= 0;) {
4096          result = exit_waterfall(ctx, wctx + i, result);
4097       }
4098 
4099       ctx->ssa_defs[instr->def.index] = result;
4100    }
4101 }
4102 
visit_phi(struct ac_nir_context * ctx,nir_phi_instr * instr)4103 static void visit_phi(struct ac_nir_context *ctx, nir_phi_instr *instr)
4104 {
4105    LLVMTypeRef type = get_def_type(ctx, &instr->def);
4106    LLVMValueRef result = LLVMBuildPhi(ctx->ac.builder, type, "");
4107 
4108    ctx->ssa_defs[instr->def.index] = result;
4109    _mesa_hash_table_insert(ctx->phis, instr, result);
4110 }
4111 
visit_post_phi(struct ac_nir_context * ctx,nir_phi_instr * instr,LLVMValueRef llvm_phi)4112 static void visit_post_phi(struct ac_nir_context *ctx, nir_phi_instr *instr, LLVMValueRef llvm_phi)
4113 {
4114    nir_foreach_phi_src (src, instr) {
4115       LLVMBasicBlockRef block = get_block(ctx, src->pred);
4116       LLVMValueRef llvm_src = get_src(ctx, src->src);
4117 
4118       LLVMAddIncoming(llvm_phi, &llvm_src, &block, 1);
4119    }
4120 }
4121 
phi_post_pass(struct ac_nir_context * ctx)4122 static void phi_post_pass(struct ac_nir_context *ctx)
4123 {
4124    hash_table_foreach(ctx->phis, entry)
4125    {
4126       visit_post_phi(ctx, (nir_phi_instr *)entry->key, (LLVMValueRef)entry->data);
4127    }
4128 }
4129 
visit_ssa_undef(struct ac_nir_context * ctx,const nir_undef_instr * instr)4130 static void visit_ssa_undef(struct ac_nir_context *ctx, const nir_undef_instr *instr)
4131 {
4132    unsigned num_components = instr->def.num_components;
4133    LLVMTypeRef type = LLVMIntTypeInContext(ctx->ac.context, instr->def.bit_size);
4134 
4135    LLVMValueRef undef;
4136 
4137    if (num_components == 1)
4138       undef = LLVMGetUndef(type);
4139    else {
4140       undef = LLVMGetUndef(LLVMVectorType(type, num_components));
4141    }
4142    ctx->ssa_defs[instr->def.index] = undef;
4143 }
4144 
visit_jump(struct ac_llvm_context * ctx,const nir_jump_instr * instr)4145 static bool visit_jump(struct ac_llvm_context *ctx, const nir_jump_instr *instr)
4146 {
4147    switch (instr->type) {
4148    case nir_jump_break:
4149       ac_build_break(ctx);
4150       break;
4151    case nir_jump_continue:
4152       ac_build_continue(ctx);
4153       break;
4154    default:
4155       fprintf(stderr, "Unknown NIR jump instr: ");
4156       nir_print_instr(&instr->instr, stderr);
4157       fprintf(stderr, "\n");
4158       return false;
4159    }
4160    return true;
4161 }
4162 
4163 static bool visit_cf_list(struct ac_nir_context *ctx, struct exec_list *list);
4164 
visit_block(struct ac_nir_context * ctx,nir_block * block)4165 static bool visit_block(struct ac_nir_context *ctx, nir_block *block)
4166 {
4167    LLVMBasicBlockRef blockref = LLVMGetInsertBlock(ctx->ac.builder);
4168    LLVMValueRef first = LLVMGetFirstInstruction(blockref);
4169    if (first) {
4170       /* ac_branch_exited() might have already inserted non-phis */
4171       LLVMPositionBuilderBefore(ctx->ac.builder, LLVMGetFirstInstruction(blockref));
4172    }
4173 
4174    nir_foreach_phi(phi, block) {
4175       visit_phi(ctx, phi);
4176    }
4177 
4178    LLVMPositionBuilderAtEnd(ctx->ac.builder, blockref);
4179 
4180    nir_foreach_instr (instr, block) {
4181       switch (instr->type) {
4182       case nir_instr_type_alu:
4183          if (!visit_alu(ctx, nir_instr_as_alu(instr)))
4184             return false;
4185          break;
4186       case nir_instr_type_load_const:
4187          if (!visit_load_const(ctx, nir_instr_as_load_const(instr)))
4188             return false;
4189          break;
4190       case nir_instr_type_intrinsic:
4191          if (!visit_intrinsic(ctx, nir_instr_as_intrinsic(instr)))
4192             return false;
4193          break;
4194       case nir_instr_type_tex:
4195          visit_tex(ctx, nir_instr_as_tex(instr));
4196          break;
4197       case nir_instr_type_phi:
4198          break;
4199       case nir_instr_type_undef:
4200          visit_ssa_undef(ctx, nir_instr_as_undef(instr));
4201          break;
4202       case nir_instr_type_jump:
4203          if (!visit_jump(&ctx->ac, nir_instr_as_jump(instr)))
4204             return false;
4205          break;
4206       case nir_instr_type_deref:
4207          assert (!nir_deref_mode_is_one_of(nir_instr_as_deref(instr),
4208                                            nir_var_mem_shared | nir_var_mem_global));
4209          break;
4210       default:
4211          fprintf(stderr, "Unknown NIR instr type: ");
4212          nir_print_instr(instr, stderr);
4213          fprintf(stderr, "\n");
4214          return false;
4215       }
4216    }
4217 
4218    _mesa_hash_table_insert(ctx->defs, block, LLVMGetInsertBlock(ctx->ac.builder));
4219 
4220    return true;
4221 }
4222 
visit_if(struct ac_nir_context * ctx,nir_if * if_stmt)4223 static bool visit_if(struct ac_nir_context *ctx, nir_if *if_stmt)
4224 {
4225    LLVMValueRef value = get_src(ctx, if_stmt->condition);
4226 
4227    nir_block *then_block = (nir_block *)exec_list_get_head(&if_stmt->then_list);
4228 
4229    ac_build_ifcc(&ctx->ac, value, then_block->index);
4230 
4231    if (!visit_cf_list(ctx, &if_stmt->then_list))
4232       return false;
4233 
4234    if (!exec_list_is_empty(&if_stmt->else_list)) {
4235       nir_block *else_block = (nir_block *)exec_list_get_head(&if_stmt->else_list);
4236 
4237       ac_build_else(&ctx->ac, else_block->index);
4238       if (!visit_cf_list(ctx, &if_stmt->else_list))
4239          return false;
4240    }
4241 
4242    ac_build_endif(&ctx->ac, then_block->index);
4243    return true;
4244 }
4245 
visit_loop(struct ac_nir_context * ctx,nir_loop * loop)4246 static bool visit_loop(struct ac_nir_context *ctx, nir_loop *loop)
4247 {
4248    assert(!nir_loop_has_continue_construct(loop));
4249    nir_block *first_loop_block = (nir_block *)exec_list_get_head(&loop->body);
4250 
4251    ac_build_bgnloop(&ctx->ac, first_loop_block->index);
4252 
4253    if (!visit_cf_list(ctx, &loop->body))
4254       return false;
4255 
4256    ac_build_endloop(&ctx->ac, first_loop_block->index);
4257    return true;
4258 }
4259 
visit_cf_list(struct ac_nir_context * ctx,struct exec_list * list)4260 static bool visit_cf_list(struct ac_nir_context *ctx, struct exec_list *list)
4261 {
4262    foreach_list_typed(nir_cf_node, node, node, list)
4263    {
4264       switch (node->type) {
4265       case nir_cf_node_block:
4266          if (!visit_block(ctx, nir_cf_node_as_block(node)))
4267             return false;
4268          break;
4269 
4270       case nir_cf_node_if:
4271          if (!visit_if(ctx, nir_cf_node_as_if(node)))
4272             return false;
4273          break;
4274 
4275       case nir_cf_node_loop:
4276          if (!visit_loop(ctx, nir_cf_node_as_loop(node)))
4277             return false;
4278          break;
4279 
4280       default:
4281          return false;
4282       }
4283    }
4284    return true;
4285 }
4286 
setup_scratch(struct ac_nir_context * ctx,struct nir_shader * shader)4287 static void setup_scratch(struct ac_nir_context *ctx, struct nir_shader *shader)
4288 {
4289    if (shader->scratch_size == 0)
4290       return;
4291 
4292    LLVMTypeRef type = LLVMArrayType(ctx->ac.i8, shader->scratch_size);
4293    ctx->scratch = (struct ac_llvm_pointer) {
4294       .value = ac_build_alloca_undef(&ctx->ac, type, "scratch"),
4295       .pointee_type = type
4296    };
4297 }
4298 
setup_constant_data(struct ac_nir_context * ctx,struct nir_shader * shader)4299 static void setup_constant_data(struct ac_nir_context *ctx, struct nir_shader *shader)
4300 {
4301    if (!shader->constant_data)
4302       return;
4303 
4304    LLVMValueRef data = LLVMConstStringInContext(ctx->ac.context, shader->constant_data,
4305                                                 shader->constant_data_size, true);
4306    LLVMTypeRef type = LLVMArrayType(ctx->ac.i8, shader->constant_data_size);
4307    LLVMValueRef global =
4308       LLVMAddGlobalInAddressSpace(ctx->ac.module, type, "const_data", AC_ADDR_SPACE_CONST);
4309 
4310    LLVMSetInitializer(global, data);
4311    LLVMSetGlobalConstant(global, true);
4312    LLVMSetVisibility(global, LLVMHiddenVisibility);
4313    ctx->constant_data = (struct ac_llvm_pointer) {
4314       .value = global,
4315       .pointee_type = type
4316    };
4317 }
4318 
setup_shared(struct ac_nir_context * ctx,struct nir_shader * nir)4319 static void setup_shared(struct ac_nir_context *ctx, struct nir_shader *nir)
4320 {
4321    if (ctx->ac.lds.value)
4322       return;
4323 
4324    LLVMTypeRef type = LLVMArrayType(ctx->ac.i8, nir->info.shared_size);
4325 
4326    LLVMValueRef lds =
4327       LLVMAddGlobalInAddressSpace(ctx->ac.module, type, "compute_lds", AC_ADDR_SPACE_LDS);
4328    LLVMSetAlignment(lds, 64 * 1024);
4329 
4330    ctx->ac.lds = (struct ac_llvm_pointer) {
4331       .value = lds,
4332       .pointee_type = type
4333    };
4334 }
4335 
setup_gds(struct ac_nir_context * ctx,nir_function_impl * impl)4336 static void setup_gds(struct ac_nir_context *ctx, nir_function_impl *impl)
4337 {
4338    bool has_gds_atomic = false;
4339 
4340    if (ctx->ac.gfx_level >= GFX10 &&
4341        (ctx->stage == MESA_SHADER_VERTEX ||
4342         ctx->stage == MESA_SHADER_TESS_EVAL ||
4343         ctx->stage == MESA_SHADER_GEOMETRY)) {
4344 
4345       nir_foreach_block(block, impl) {
4346          nir_foreach_instr(instr, block) {
4347             if (instr->type != nir_instr_type_intrinsic)
4348                continue;
4349 
4350             nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
4351             has_gds_atomic |= intrin->intrinsic == nir_intrinsic_gds_atomic_add_amd;
4352          }
4353       }
4354    }
4355 
4356    unsigned gds_size = has_gds_atomic ? 0x100 : 0;
4357 
4358    if (gds_size)
4359       ac_llvm_add_target_dep_function_attr(ctx->main_function, "amdgpu-gds-size", gds_size);
4360 }
4361 
ac_nir_translate(struct ac_llvm_context * ac,struct ac_shader_abi * abi,const struct ac_shader_args * args,struct nir_shader * nir)4362 bool ac_nir_translate(struct ac_llvm_context *ac, struct ac_shader_abi *abi,
4363                       const struct ac_shader_args *args, struct nir_shader *nir)
4364 {
4365    struct ac_nir_context ctx = {0};
4366    struct nir_function *func;
4367    bool ret;
4368 
4369    ctx.ac = *ac;
4370    ctx.abi = abi;
4371    ctx.args = args;
4372 
4373    ctx.stage = nir->info.stage;
4374    ctx.info = &nir->info;
4375 
4376    ctx.main_function = LLVMGetBasicBlockParent(LLVMGetInsertBlock(ctx.ac.builder));
4377 
4378    ctx.defs = _mesa_hash_table_create(NULL, _mesa_hash_pointer, _mesa_key_pointer_equal);
4379    ctx.phis = _mesa_hash_table_create(NULL, _mesa_hash_pointer, _mesa_key_pointer_equal);
4380 
4381    if (ctx.abi->kill_ps_if_inf_interp)
4382       ctx.verified_interp =
4383          _mesa_hash_table_create(NULL, _mesa_hash_pointer, _mesa_key_pointer_equal);
4384 
4385    func = (struct nir_function *)exec_list_get_head(&nir->functions);
4386 
4387    nir_index_ssa_defs(func->impl);
4388    ctx.ssa_defs = calloc(func->impl->ssa_alloc, sizeof(LLVMValueRef));
4389 
4390    setup_scratch(&ctx, nir);
4391    setup_constant_data(&ctx, nir);
4392    setup_gds(&ctx, func->impl);
4393 
4394    if (gl_shader_stage_is_compute(nir->info.stage))
4395       setup_shared(&ctx, nir);
4396 
4397    if ((ret = visit_cf_list(&ctx, &func->impl->body)))
4398       phi_post_pass(&ctx);
4399 
4400    free(ctx.ssa_defs);
4401    ralloc_free(ctx.defs);
4402    ralloc_free(ctx.phis);
4403    if (ctx.abi->kill_ps_if_inf_interp)
4404       ralloc_free(ctx.verified_interp);
4405 
4406    return ret;
4407 }
4408 
4409 /* Fixup the HW not emitting the TCS regs if there are no HS threads. */
ac_fixup_ls_hs_input_vgprs(struct ac_llvm_context * ac,struct ac_shader_abi * abi,const struct ac_shader_args * args)4410 void ac_fixup_ls_hs_input_vgprs(struct ac_llvm_context *ac, struct ac_shader_abi *abi,
4411                                 const struct ac_shader_args *args)
4412 {
4413    LLVMValueRef count = ac_unpack_param(ac, ac_get_arg(ac, args->merged_wave_info), 8, 8);
4414    LLVMValueRef hs_empty = LLVMBuildICmp(ac->builder, LLVMIntEQ, count, ac->i32_0, "");
4415 
4416    abi->instance_id =
4417       LLVMBuildSelect(ac->builder, hs_empty, ac_get_arg(ac, args->vertex_id),
4418                       abi->instance_id, "");
4419 
4420    abi->vs_rel_patch_id =
4421       LLVMBuildSelect(ac->builder, hs_empty, ac_get_arg(ac, args->tcs_rel_ids),
4422                       abi->vs_rel_patch_id, "");
4423 
4424    abi->vertex_id =
4425       LLVMBuildSelect(ac->builder, hs_empty, ac_get_arg(ac, args->tcs_patch_id),
4426                       abi->vertex_id, "");
4427 }
4428